From f371c789d0bce44703d145310a3a799f7ebe0bb1 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Thu, 2 Jun 2022 09:18:10 +0200 Subject: [PATCH] ipq807x: revert all 11.5 changes Fixes: WIFI-7570 Signed-off-by: John Crispin --- feeds/ipq807x/ipq807x/Makefile | 8 +- .../ipq807x/base-files/etc/board.d/02_network | 2 +- feeds/ipq807x/ipq807x/config-4.4 | 829 + .../files/arch/arm/boot/dts/ipq8074-hk14.dts | 5 - .../arch/arm/boot/dts/qcom-ipq5018-eap104.dts | 7 +- .../arm/boot/dts/qcom-ipq6018-cig-wf188n.dts | 2 +- .../boot/dts/qcom-ipq6018-edgecore-eap101.dts | 2 +- .../arm/boot/dts/qcom-ipq6018-gl-ax1800.dts | 2 +- .../arm/boot/dts/qcom-ipq6018-gl-axt1800.dts | 2 +- .../arm/boot/dts/qcom-ipq6018-hfcl-ion4xe.dts | 9 +- .../arm/boot/dts/qcom-ipq6018-hfcl-ion4xi.dts | 9 +- .../dts/qcom-ipq6018-wallys-dr6018-v4.dts | 2 +- .../boot/dts/qcom-ipq6018-wallys-dr6018.dts | 2 +- .../boot/dts/qcom-ipq6018-yuncore-ax840.dts | 2 +- .../arch/arm/boot/dts/qcom-ipq807x-eap102.dts | 11 +- .../arch/arm/boot/dts/qcom-ipq807x-eap106.dts | 11 +- .../arch/arm/boot/dts/qcom-ipq807x-ex227.dts | 11 +- .../arch/arm/boot/dts/qcom-ipq807x-ex447.dts | 11 +- .../arch/arm/boot/dts/qcom-ipq807x-wf194c.dts | 11 +- .../arm/boot/dts/qcom-ipq807x-wf194c4.dts | 11 +- .../arch/arm/boot/dts/qcom-ipq807x-wf196.dts | 11 +- .../boot/dts/qcom/qcom-ipq5018-eap104.dts | 699 +- .../boot/dts/qcom/qcom-ipq5018-eww622-a1.dts | 483 +- .../arm64/boot/dts/qcom/qcom-ipq5018-q14.dts | 934 - .../boot/dts/qcom/qcom-ipq6018-cig-wf188n.dts | 167 +- .../dts/qcom/qcom-ipq6018-edgecore-eap101.dts | 375 +- .../boot/dts/qcom/qcom-ipq6018-gl-ax1800.dtsi | 125 +- .../boot/dts/qcom/qcom-ipq6018-hfcl-ion4x.dts | 305 - .../qcom/qcom-ipq6018-wallys-dr6018-v4.dts | 595 +- .../dts/qcom/qcom-ipq6018-wallys-dr6018.dts | 539 +- .../dts/qcom/qcom-ipq6018-yuncore-ax840.dts | 218 +- .../boot/dts/qcom/qcom-ipq807x-eap102.dts | 1260 +- .../boot/dts/qcom/qcom-ipq807x-eap106.dts | 1182 +- .../boot/dts/qcom/qcom-ipq807x-ex227.dts | 1206 +- .../boot/dts/qcom/qcom-ipq807x-ex447.dts | 1267 +- .../boot/dts/qcom/qcom-ipq807x-wf194c.dts | 1164 +- .../boot/dts/qcom/qcom-ipq807x-wf194c4.dts | 1525 +- .../boot/dts/qcom/qcom-ipq807x-wf196.dts | 1485 +- feeds/ipq807x/ipq807x/image/ipq50xx.mk | 4 +- feeds/ipq807x/ipq807x/image/ipq60xx.mk | 10 +- feeds/ipq807x/ipq807x/image/ipq807x.mk | 8 +- feeds/ipq807x/ipq807x/ipq50xx/config-5.4 | 1266 - feeds/ipq807x/ipq807x/ipq50xx/config-default | 85 + feeds/ipq807x/ipq807x/ipq50xx/config-lowmem | 73 + feeds/ipq807x/ipq807x/ipq60xx/config-default | 1279 +- feeds/ipq807x/ipq807x/ipq60xx/target.mk | 2 - feeds/ipq807x/ipq807x/ipq807x/config-default | 1286 +- feeds/ipq807x/ipq807x/modules.mk | 142 +- .../patches/001-backport_kbuild_fix.patch | 25 + ...y-up-lib-crypto-Kconfig-and-Makefile.patch | 118 - ...ve-existing-library-code-into-lib-cr.patch | 668 - ...a-depend-on-generic-chacha-library-i.patch | 192 - ...a-expose-SIMD-ChaCha-routine-as-libr.patch | 205 - ...cha-depend-on-generic-chacha-library.patch | 129 - ...cha-expose-arm64-ChaCha-routine-as-l.patch | 138 - ...a-import-Eric-Biggers-s-scalar-accel.patch | 480 - ...a-remove-dependency-on-generic-ChaCh.patch | 691 - ...a-expose-ARM-ChaCha-routine-as-libra.patch | 108 - ...ha-import-32r2-ChaCha-code-from-Zinc.patch | 451 - ...ha-wire-up-accelerated-32r2-code-fro.patch | 559 - ...cha-unexport-chacha_generic-routines.patch | 115 - ...move-core-routines-into-a-separate-l.patch | 649 - ...305-unify-Poly1305-state-struct-with.patch | 251 - ...expose-init-update-final-library-int.patch | 224 - ...305-depend-on-generic-library-not-ge.patch | 217 - ...305-expose-existing-driver-as-poly13.patch | 163 - ...y1305-incorporate-OpenSSL-CRYPTOGAMS.patch | 2083 - ...305-incorporate-OpenSSL-CRYPTOGAMS-N.patch | 2776 - ...1305-incorporate-OpenSSL-CRYPTOGAMS-.patch | 1563 - ...eneric-C-library-implementation-and-.patch | 1097 - ...o-testmgr-add-test-cases-for-Blake2s.patch | 322 - ...ake2s-implement-generic-shash-driver.patch | 245 - ...o-blake2s-x86_64-SIMD-implementation.patch | 557 - ...19-generic-C-library-implementations.patch | 1849 - ...6-crypto-curve25519-add-kpp-selftest.patch | 1268 - ...ve25519-implement-generic-KPP-driver.patch | 136 - ...25519-work-around-Clang-stack-spilli.patch | 75 - ...9-x86_64-library-and-KPP-implementat.patch | 2536 - ...25519-import-Bernstein-and-Schwabe-s.patch | 2135 - ...rve25519-wire-up-NEON-implementation.patch | 1058 - ...oly1305-import-construction-and-self.patch | 7677 --- ...a20poly1305-reimplement-crypt_from_s.patch | 295 - ...neric-remove-unnecessary-setkey-func.patch | 68 - ...a-only-unregister-algorithms-if-regi.patch | 31 - ...-chacha20poly1305-use-chacha20_crypt.patch | 83 - ...itionalize-crypto-api-in-arch-glue-f.patch | 275 - ...a-fix-warning-message-in-header-file.patch | 35 - ...25519-add-arch-specific-key-generati.patch | 38 - ...ypto-lib-curve25519-re-add-selftests.patch | 1387 - ...add-new-32-and-64-bit-generic-versio.patch | 1164 - ...305-import-unmodified-cryptogams-imp.patch | 4183 -- ...305-wire-up-faster-implementations-f.patch | 2927 - ...-mips-poly1305-remove-redundant-non-.patch | 171 - ...-curve25519-Fix-selftest-build-error.patch | 102 - ...pto-x86-poly1305-fix-.gitignore-typo.patch | 23 - ...oly1305-add-back-missing-test-vector.patch | 1858 - ...305-emit-does-base-conversion-itself.patch | 36 - ...a-fix-build-failured-when-kernel-mod.patch | 58 - ...llow-tests-to-be-disabled-when-manag.patch | 42 - ...oly1305-prevent-integer-overflow-on-.patch | 40 - ...25519-support-assemblers-with-no-adx.patch | 84 - ...chacha-correctly-walk-through-blocks.patch | 68 - ...25519-replace-with-formally-verified.patch | 3765 -- ...rve25519-leave-r12-as-spare-register.patch | 376 - ...ly1305-add-artifact-to-.gitignore-fi.patch | 35 - ...ch-lib-limit-simd-usage-to-4k-chunks.patch | 243 - ...a20poly1305-Add-missing-function-dec.patch | 38 - ...a-sse3-use-unaligned-loads-for-state.patch | 147 - ...e25519-Remove-unused-carry-variables.patch | 46 - ...rve25519-include-linux-scatterlist.h.patch | 36 - ...305-Add-prototype-for-poly1305_block.patch | 33 - ...to-curve25519-x86_64-Use-XORL-r32-32.patch | 261 - ...ypto-poly1305-x86_64-Use-XORL-r32-32.patch | 59 - ...305-Remove-assignments-with-no-effec.patch | 29 - ...oly1305-add-back-a-needed-assignment.patch | 33 - ...RYPTO_MANAGER_EXTRA_TESTS-requires-t.patch | 33 - ...a-neon-optimize-for-non-block-size-m.patch | 272 - ...-chacha-simplify-tail-block-handling.patch | 324 - ...a20poly1305-define-empty-module-exit.patch | 37 - ...a-neon-add-missing-counter-increment.patch | 38 - ...-net-WireGuard-secure-network-tunnel.patch | 8071 --- ...sts-import-harness-makefile-for-test.patch | 1078 - ...g-select-parent-dependency-for-crypt.patch | 30 - ...al-fix-spelling-mistakes-in-comments.patch | 66 - ...emove-unused-include-linux-version.h.patch | 28 - ...dips-use-kfree_rcu-instead-of-call_r.patch | 41 - ...sts-remove-ancient-kernel-compatibil.patch | 373 - ...ng-do-not-account-for-pfmemalloc-whe.patch | 39 - ...-mark-skbs-as-not-on-list-when-recei.patch | 34 - ...dips-fix-use-after-free-in-root_remo.patch | 164 - ...reject-peers-with-low-order-public-k.patch | 233 - ...sts-ensure-non-addition-of-peers-wit.patch | 34 - ...sts-tie-socket-waiting-to-target-pid.patch | 77 - ...uard-device-use-icmp_ndo_send-helper.patch | 64 - ...sts-reduce-complexity-and-fix-make-r.patch | 104 - ...eceive-reset-last_under_load-to-zero.patch | 38 - ...guard-send-account-for-mtu-0-devices.patch | 95 - ...-remove-extra-call-to-synchronize_ne.patch | 32 - ...sts-remove-duplicated-include-sys-ty.patch | 27 - ...-queueing-account-for-skb-protocol-0.patch | 100 - ...e-remove-dead-code-from-default-pack.patch | 35 - ...error-out-precomputed-DH-during-hand.patch | 224 - ...emove-errant-newline-from-packet_enc.patch | 29 - ...ng-cleanup-ptr_ring-in-error-path-of.patch | 35 - ...e-use-tunnel-helpers-for-decapsulati.patch | 50 - ...sts-use-normal-kernel-stack-size-on-.patch | 28 - ...-remove-errant-restriction-on-loopin.patch | 162 - ...eceive-cond_resched-when-processing-.patch | 58 - ...sts-initalize-ipv6-members-to-NULL-t.patch | 51 - ...eceive-use-explicit-unlikely-branch-.patch | 88 - ...ftests-use-newer-iproute2-for-gcc-10.patch | 31 - ...read-preshared-key-while-taking-lock.patch | 61 - ...ng-preserve-flow-hash-across-packet-.patch | 116 - ...separate-receive-counter-from-send-c.patch | 330 - ...do-not-assign-initiation-time-in-if-.patch | 33 - ...vice-avoid-circular-netns-references.patch | 296 - ...e-account-for-napi_gro_receive-never.patch | 42 - ...l-add-header_ops-for-layer-3-devices.patch | 58 - ...ent-header_ops-parse_protocol-for-AF.patch | 36 - ...ng-make-use-of-ip_tunnel_parse_proto.patch | 68 - ...onsistently-use-NLA_POLICY_EXACT_LEN.patch | 49 - ...-consistently-use-NLA_POLICY_MIN_LEN.patch | 39 - ...take-lock-when-removing-handshake-en.patch | 127 - ...okup-take-lock-before-checking-hash-.patch | 62 - ...sts-check-that-route_me_harder-packe.patch | 56 - ...double-unlikely-notation-when-using-.patch | 55 - ...ocket-remove-bogus-__be32-annotation.patch | 52 - ...tests-test-multiple-parallel-streams.patch | 52 - ...ut-frequently-used-members-above-cac.patch | 42 - ...-do-not-generate-ICMP-for-non-IP-pac.patch | 47 - ...ing-get-rid-of-per-peer-ring-buffers.patch | 560 - ...fig-use-arm-chacha-even-with-no-neon.patch | 30 - ...y1305-enable-for-all-MIPS-processors.patch | 60 - ...ps-add-poly1305-core.S-to-.gitignore.patch | 24 - ...fix-poly1305_core_setkey-declaration.patch | 172 - ...sts-remove-old-conntrack-kconfig-val.patch | 29 - ...sts-make-sure-rp_filter-is-disabled-.patch | 31 - ...reguard-0129-wireguard-do-not-use-O3.patch | 33 - ...nchronize_net-rather-than-synchroniz.patch | 66 - ...ireguard-peer-allocate-in-kmem_cache.patch | 125 - ...dips-initialize-list-head-in-selftes.patch | 43 - ...guard-allowedips-remove-nodes-in-O-1.patch | 237 - ...owedips-allocate-nodes-in-kmem_cache.patch | 173 - ...dips-free-empty-intermediate-nodes-w.patch | 521 - feeds/ipq807x/ipq807x/patches/100-dts.patch | 26 - .../ipq807x/ipq807x/patches/100-qrtr-ns.patch | 976 + .../ipq807x/ipq807x/patches/101-aq_phy.patch | 78 - .../ipq807x/patches/101-squashfs.patch | 16 + .../ipq807x/ipq807x/patches/102-aq-phy.patch | 90 + ...pointer-dereference-in-iptunnel_xmit.patch | 13 - .../patches/103-fix-dtc-gcc10-build.patch | 11 + .../ipq807x/patches/104-log-spam.patch | 27 +- .../ipq807x/patches/105-add-esmt-nand.patch | 36 +- .../ipq807x/ipq807x/patches/106-pstore.patch | 147 + ...ow-bcast-mcast-same-port-hairpinmode.patch | 19 +- .../ipq807x/patches/108-add-W25N01GW.patch | 21 +- .../patches/190-revert-threaded-NAPI.patch | 306 - .../ipq807x/patches/200-bpf_backport.patch | 44780 ++++++++++++++++ ...i-poll-functionality-to-__napi_poll.patch} | 8 +- ...hreaded-able-napi-poll-loop-support.patch} | 100 +- ...ribute-to-control-napi-threaded-mod.patch} | 45 +- ...een-napi-kthread-mode-and-busy-poll.patch} | 67 +- ...p-on-napi_disable-for-threaded-napi.patch} | 4 +- .../220-net-sched-add-clsact-qdisc.patch | 439 + feeds/ipq807x/kmod-sched-cake/Makefile | 43 + .../kmod-sched-cake/patches/100-compat.patch | 20 + feeds/ipq807x/qca-nss-clients/Makefile | 7 +- feeds/ipq807x/qca-nss-dp/Makefile | 15 +- feeds/ipq807x/qca-nss-dp/Makefile.orig | 59 - feeds/ipq807x/qca-nss-dp/src/Makefile | 56 + .../qca-nss-dp/src/exports/nss_dp_api_if.h | 219 + .../src/hal/arch/ipq50xx/nss_ipq50xx.c | 153 + .../src/hal/arch/ipq50xx/nss_ipq50xx.h | 130 + .../src/hal/arch/ipq60xx/nss_ipq60xx.c | 53 + .../src/hal/arch/ipq60xx/nss_ipq60xx.h | 34 + .../src/hal/arch/ipq807x/nss_ipq807x.c | 53 + .../src/hal/arch/ipq807x/nss_ipq807x.h | 34 + .../qca-nss-dp/src/hal/edma/edma_cfg.c | 967 + .../qca-nss-dp/src/hal/edma/edma_data_plane.c | 962 + .../qca-nss-dp/src/hal/edma/edma_data_plane.h | 287 + .../qca-nss-dp/src/hal/edma/edma_regs.h | 454 + .../qca-nss-dp/src/hal/edma/edma_tx_rx.c | 795 + .../src/hal/gmac_hal_ops/qcom/qcom_dev.h | 697 + .../src/hal/gmac_hal_ops/qcom/qcom_if.c | 479 + .../src/hal/gmac_hal_ops/qcom/qcom_reg.h | 156 + .../src/hal/gmac_hal_ops/syn/gmac/syn_dev.h} | 18 +- .../src/hal/gmac_hal_ops/syn/gmac/syn_if.c | 959 + .../src/hal/gmac_hal_ops/syn/gmac/syn_reg.h | 531 + .../src/hal/gmac_hal_ops/syn/xgmac/syn_dev.h | 189 + .../src/hal/gmac_hal_ops/syn/xgmac/syn_if.c | 505 + .../src/hal/gmac_hal_ops/syn/xgmac/syn_reg.h | 255 + .../ipq807x/qca-nss-dp/src/hal/include/edma.h | 31 + .../qca-nss-dp/src/hal/include/nss_dp_hal.h | 48 + .../src/hal/include/nss_dp_hal_if.h | 162 + .../src/hal/syn_gmac_dp/syn_data_plane.c | 336 + .../src/hal/syn_gmac_dp/syn_data_plane.h | 109 + .../src/hal/syn_gmac_dp/syn_dma_desc.h | 342 + .../src/hal/syn_gmac_dp/syn_dp_cfg.c | 195 + .../src/hal/syn_gmac_dp/syn_dp_tx_rx.c | 425 + .../qca-nss-dp/src/include/nss_dp_dev.h | 132 + feeds/ipq807x/qca-nss-dp/src/nss_dp_attach.c | 192 + .../ipq807x/qca-nss-dp/src/nss_dp_ethtools.c | 378 + feeds/ipq807x/qca-nss-dp/src/nss_dp_main.c | 830 + .../ipq807x/qca-nss-dp/src/nss_dp_switchdev.c | 367 + .../qca-nss-drv/200-napi_threaded.patch | 11 - feeds/ipq807x/qca-nss-drv/Makefile | 10 - .../patches/200-napi_threaded.patch | 2 +- feeds/ipq807x/qca-nss-drv/src/Makefile | 537 + feeds/ipq807x/qca-nss-drv/src/Makefile.fsm | 123 + .../src/exports/arch/nss_fsm9010.h | 43 + .../src/exports/arch/nss_ipq40xx.h | 43 + .../src/exports/arch/nss_ipq50xx.h | 40 + .../src/exports/arch/nss_ipq50xx_64.h | 40 + .../src/exports/arch/nss_ipq60xx.h | 41 + .../src/exports/arch/nss_ipq60xx_64.h | 41 + .../src/exports/arch/nss_ipq806x.h | 43 + .../src/exports/arch/nss_ipq807x.h | 44 + .../src/exports/arch/nss_ipq807x_64.h | 44 + .../qca-nss-drv/src/exports/nss_api_if.h | 319 + .../qca-nss-drv/src/exports/nss_bridge.h | 362 + .../qca-nss-drv/src/exports/nss_c2c_rx.h | 86 + .../qca-nss-drv/src/exports/nss_c2c_tx.h | 308 + .../qca-nss-drv/src/exports/nss_capwap.h | 659 + .../qca-nss-drv/src/exports/nss_clmap.h | 390 + .../ipq807x/qca-nss-drv/src/exports/nss_cmn.h | 478 + .../qca-nss-drv/src/exports/nss_crypto.h | 392 + .../qca-nss-drv/src/exports/nss_crypto_cmn.h | 460 + .../ipq807x/qca-nss-drv/src/exports/nss_def.h | 57 + .../ipq807x/qca-nss-drv/src/exports/nss_dma.h | 333 + .../qca-nss-drv/src/exports/nss_dtls.h | 335 + .../qca-nss-drv/src/exports/nss_dtls_cmn.h | 512 + .../src/exports/nss_dynamic_interface.h | 343 + .../qca-nss-drv/src/exports/nss_edma.h | 375 + .../qca-nss-drv/src/exports/nss_eth_rx.h | 100 + .../qca-nss-drv/src/exports/nss_freq.h | 64 + .../ipq807x/qca-nss-drv/src/exports/nss_gre.h | 494 + .../qca-nss-drv/src/exports/nss_gre_redir.h | 712 + .../src/exports/nss_gre_redir_lag.h | 732 + .../src/exports/nss_gre_redir_mark.h | 338 + .../qca-nss-drv/src/exports/nss_gre_tunnel.h | 428 + .../ipq807x/qca-nss-drv/src/exports/nss_if.h | 454 + .../ipq807x/qca-nss-drv/src/exports/nss_igs.h | 213 + .../qca-nss-drv/src/exports/nss_ipsec.h | 550 + .../qca-nss-drv/src/exports/nss_ipsec_cmn.h | 691 + .../qca-nss-drv/src/exports/nss_ipsecmgr.h | 443 + .../qca-nss-drv/src/exports/nss_ipv4.h | 1310 + .../qca-nss-drv/src/exports/nss_ipv4_reasm.h | 89 + .../qca-nss-drv/src/exports/nss_ipv6.h | 1304 + .../qca-nss-drv/src/exports/nss_ipv6_reasm.h | 92 + .../qca-nss-drv/src/exports/nss_l2tpv2.h | 327 + .../ipq807x/qca-nss-drv/src/exports/nss_lag.h | 211 + .../qca-nss-drv/src/exports/nss_lso_rx.h | 88 + .../qca-nss-drv/src/exports/nss_map_t.h | 382 + .../qca-nss-drv/src/exports/nss_match.h | 296 + .../qca-nss-drv/src/exports/nss_mirror.h | 317 + .../ipq807x/qca-nss-drv/src/exports/nss_n2h.h | 572 + .../ipq807x/qca-nss-drv/src/exports/nss_oam.h | 145 + .../qca-nss-drv/src/exports/nss_phy_if.h | 67 + .../ipq807x/qca-nss-drv/src/exports/nss_pm.h | 114 + .../qca-nss-drv/src/exports/nss_portid.h | 284 + .../ipq807x/qca-nss-drv/src/exports/nss_ppe.h | 91 + .../qca-nss-drv/src/exports/nss_ppe_vp.h | 79 + .../qca-nss-drv/src/exports/nss_pppoe.h | 384 + .../qca-nss-drv/src/exports/nss_pptp.h | 345 + .../qca-nss-drv/src/exports/nss_profiler.h | 406 + .../qca-nss-drv/src/exports/nss_project.h | 176 + .../qca-nss-drv/src/exports/nss_pvxlan.h | 371 + .../qca-nss-drv/src/exports/nss_qrfs.h | 193 + .../qca-nss-drv/src/exports/nss_qvpn.h | 522 + .../qca-nss-drv/src/exports/nss_rmnet_rx.h | 392 + .../ipq807x/qca-nss-drv/src/exports/nss_rps.h | 55 + .../qca-nss-drv/src/exports/nss_shaper.h | 901 + .../qca-nss-drv/src/exports/nss_sjack.h | 154 + .../src/exports/nss_stats_public.h | 131 + .../ipq807x/qca-nss-drv/src/exports/nss_tls.h | 469 + .../qca-nss-drv/src/exports/nss_trustsec_tx.h | 234 + .../qca-nss-drv/src/exports/nss_tstamp.h | 125 + .../qca-nss-drv/src/exports/nss_tun6rd.h | 198 + .../qca-nss-drv/src/exports/nss_tunipip6.h | 293 + .../qca-nss-drv/src/exports/nss_udp_st.h | 284 + .../qca-nss-drv/src/exports/nss_unaligned.h | 121 + .../qca-nss-drv/src/exports/nss_virt_if.h | 436 + .../qca-nss-drv/src/exports/nss_vlan.h | 265 + .../qca-nss-drv/src/exports/nss_vxlan.h | 350 + .../qca-nss-drv/src/exports/nss_wifi.h | 1015 + .../src/exports/nss_wifi_ext_vdev_if.h | 297 + .../src/exports/nss_wifi_mac_db_if.h | 277 + .../qca-nss-drv/src/exports/nss_wifi_mesh.h | 1000 + .../qca-nss-drv/src/exports/nss_wifi_vdev.h | 1358 + .../qca-nss-drv/src/exports/nss_wifili_if.h | 2057 + feeds/ipq807x/qca-nss-drv/src/nss_bridge.c | 501 + .../ipq807x/qca-nss-drv/src/nss_bridge_log.c | 135 + .../ipq807x/qca-nss-drv/src/nss_bridge_log.h | 41 + feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx.c | 113 + .../qca-nss-drv/src/nss_c2c_rx_stats.c | 173 + .../qca-nss-drv/src/nss_c2c_rx_stats.h | 63 + .../qca-nss-drv/src/nss_c2c_rx_strings.c | 61 + .../qca-nss-drv/src/nss_c2c_rx_strings.h | 23 + feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx.c | 439 + .../ipq807x/qca-nss-drv/src/nss_c2c_tx_log.c | 121 + .../ipq807x/qca-nss-drv/src/nss_c2c_tx_log.h | 36 + .../qca-nss-drv/src/nss_c2c_tx_stats.c | 168 + .../qca-nss-drv/src/nss_c2c_tx_stats.h | 29 + .../qca-nss-drv/src/nss_c2c_tx_strings.c | 61 + .../qca-nss-drv/src/nss_c2c_tx_strings.h | 23 + feeds/ipq807x/qca-nss-drv/src/nss_capwap.c | 606 + .../ipq807x/qca-nss-drv/src/nss_capwap_log.c | 282 + .../ipq807x/qca-nss-drv/src/nss_capwap_log.h | 37 + .../qca-nss-drv/src/nss_capwap_stats.c | 313 + .../qca-nss-drv/src/nss_capwap_stats.h | 26 + .../qca-nss-drv/src/nss_capwap_strings.c | 102 + .../qca-nss-drv/src/nss_capwap_strings.h | 28 + feeds/ipq807x/qca-nss-drv/src/nss_clmap.c | 346 + feeds/ipq807x/qca-nss-drv/src/nss_clmap_log.c | 207 + feeds/ipq807x/qca-nss-drv/src/nss_clmap_log.h | 37 + .../ipq807x/qca-nss-drv/src/nss_clmap_stats.c | 296 + .../ipq807x/qca-nss-drv/src/nss_clmap_stats.h | 42 + .../qca-nss-drv/src/nss_clmap_strings.c | 73 + .../qca-nss-drv/src/nss_clmap_strings.h | 25 + feeds/ipq807x/qca-nss-drv/src/nss_cmn.c | 345 + feeds/ipq807x/qca-nss-drv/src/nss_core.c | 3251 ++ feeds/ipq807x/qca-nss-drv/src/nss_core.h | 1038 + feeds/ipq807x/qca-nss-drv/src/nss_coredump.c | 257 + feeds/ipq807x/qca-nss-drv/src/nss_crypto.c | 302 + .../ipq807x/qca-nss-drv/src/nss_crypto_cmn.c | 388 + .../qca-nss-drv/src/nss_crypto_cmn_log.c | 210 + .../qca-nss-drv/src/nss_crypto_cmn_log.h | 37 + .../qca-nss-drv/src/nss_crypto_cmn_stats.c | 166 + .../qca-nss-drv/src/nss_crypto_cmn_stats.h | 77 + .../qca-nss-drv/src/nss_crypto_cmn_strings.c | 61 + .../qca-nss-drv/src/nss_crypto_cmn_strings.h | 25 + .../ipq807x/qca-nss-drv/src/nss_crypto_log.c | 151 + .../ipq807x/qca-nss-drv/src/nss_crypto_log.h | 37 + .../hal/include/nss_data_plane_hal.h | 54 + .../src/nss_data_plane/hal/nss_ipq50xx.c | 185 + .../src/nss_data_plane/hal/nss_ipq60xx.c | 117 + .../src/nss_data_plane/hal/nss_ipq807x.c | 117 + .../nss_data_plane/include/nss_data_plane.h | 60 + .../src/nss_data_plane/nss_data_plane.c | 386 + .../nss_data_plane/nss_data_plane_common.c | 84 + .../src/nss_data_plane/nss_data_plane_gmac.c | 396 + feeds/ipq807x/qca-nss-drv/src/nss_dma.c | 501 + feeds/ipq807x/qca-nss-drv/src/nss_dma_log.c | 140 + feeds/ipq807x/qca-nss-drv/src/nss_dma_log.h | 38 + feeds/ipq807x/qca-nss-drv/src/nss_dma_stats.c | 163 + feeds/ipq807x/qca-nss-drv/src/nss_dma_stats.h | 31 + .../ipq807x/qca-nss-drv/src/nss_dma_strings.c | 88 + .../ipq807x/qca-nss-drv/src/nss_dma_strings.h | 25 + feeds/ipq807x/qca-nss-drv/src/nss_drv_stats.c | 166 + feeds/ipq807x/qca-nss-drv/src/nss_drv_stats.h | 80 + .../ipq807x/qca-nss-drv/src/nss_drv_strings.c | 92 + .../ipq807x/qca-nss-drv/src/nss_drv_strings.h | 26 + feeds/ipq807x/qca-nss-drv/src/nss_dscp_map.h | 212 + feeds/ipq807x/qca-nss-drv/src/nss_dtls.c | 468 + feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn.c | 451 + .../qca-nss-drv/src/nss_dtls_cmn_log.c | 178 + .../qca-nss-drv/src/nss_dtls_cmn_log.h | 37 + .../qca-nss-drv/src/nss_dtls_cmn_stats.c | 215 + .../qca-nss-drv/src/nss_dtls_cmn_stats.h | 26 + .../qca-nss-drv/src/nss_dtls_cmn_strings.c | 128 + .../qca-nss-drv/src/nss_dtls_cmn_strings.h | 25 + feeds/ipq807x/qca-nss-drv/src/nss_dtls_log.c | 185 + feeds/ipq807x/qca-nss-drv/src/nss_dtls_log.h | 37 + .../ipq807x/qca-nss-drv/src/nss_dtls_stats.c | 143 + .../ipq807x/qca-nss-drv/src/nss_dtls_stats.h | 115 + .../qca-nss-drv/src/nss_dynamic_interface.c | 420 + .../src/nss_dynamic_interface_log.c | 145 + .../src/nss_dynamic_interface_log.h | 37 + .../src/nss_dynamic_interface_stats.c | 160 + .../src/nss_dynamic_interface_stats.h | 33 + feeds/ipq807x/qca-nss-drv/src/nss_edma.c | 139 + .../ipq807x/qca-nss-drv/src/nss_edma_stats.c | 822 + .../ipq807x/qca-nss-drv/src/nss_edma_stats.h | 36 + .../qca-nss-drv/src/nss_edma_strings.c | 350 + .../qca-nss-drv/src/nss_edma_strings.h | 30 + feeds/ipq807x/qca-nss-drv/src/nss_eth_rx.c | 77 + .../qca-nss-drv/src/nss_eth_rx_stats.c | 187 + .../qca-nss-drv/src/nss_eth_rx_stats.h | 65 + .../qca-nss-drv/src/nss_eth_rx_strings.c | 106 + .../qca-nss-drv/src/nss_eth_rx_strings.h | 26 + feeds/ipq807x/qca-nss-drv/src/nss_freq.c | 467 + feeds/ipq807x/qca-nss-drv/src/nss_freq_log.c | 100 + feeds/ipq807x/qca-nss-drv/src/nss_freq_log.h | 35 + .../ipq807x/qca-nss-drv/src/nss_freq_stats.c | 86 + .../ipq807x/qca-nss-drv/src/nss_freq_stats.h | 29 + .../ipq807x/qca-nss-drv/src/nss_gmac_stats.c | 83 + .../ipq807x/qca-nss-drv/src/nss_gmac_stats.h | 33 + feeds/ipq807x/qca-nss-drv/src/nss_gre.c | 411 + feeds/ipq807x/qca-nss-drv/src/nss_gre_log.c | 187 + feeds/ipq807x/qca-nss-drv/src/nss_gre_log.h | 41 + feeds/ipq807x/qca-nss-drv/src/nss_gre_redir.c | 673 + .../qca-nss-drv/src/nss_gre_redir_lag_ds.c | 404 + .../src/nss_gre_redir_lag_ds_log.c | 164 + .../src/nss_gre_redir_lag_ds_log.h | 37 + .../src/nss_gre_redir_lag_ds_stats.c | 211 + .../src/nss_gre_redir_lag_ds_stats.h | 28 + .../src/nss_gre_redir_lag_ds_strings.c | 60 + .../src/nss_gre_redir_lag_ds_strings.h | 25 + .../qca-nss-drv/src/nss_gre_redir_lag_us.c | 665 + .../src/nss_gre_redir_lag_us_log.c | 191 + .../src/nss_gre_redir_lag_us_log.h | 37 + .../src/nss_gre_redir_lag_us_stats.c | 226 + .../src/nss_gre_redir_lag_us_stats.h | 50 + .../src/nss_gre_redir_lag_us_strings.c | 71 + .../src/nss_gre_redir_lag_us_strings.h | 25 + .../qca-nss-drv/src/nss_gre_redir_log.c | 242 + .../qca-nss-drv/src/nss_gre_redir_log.h | 41 + .../qca-nss-drv/src/nss_gre_redir_mark.c | 341 + .../qca-nss-drv/src/nss_gre_redir_mark_log.c | 119 + .../qca-nss-drv/src/nss_gre_redir_mark_log.h | 37 + .../src/nss_gre_redir_mark_stats.c | 230 + .../src/nss_gre_redir_mark_stats.h | 35 + .../src/nss_gre_redir_mark_strings.c | 66 + .../src/nss_gre_redir_mark_strings.h | 25 + .../qca-nss-drv/src/nss_gre_redir_stats.c | 312 + .../qca-nss-drv/src/nss_gre_redir_stats.h | 30 + .../qca-nss-drv/src/nss_gre_redir_strings.c | 87 + .../qca-nss-drv/src/nss_gre_redir_strings.h | 25 + feeds/ipq807x/qca-nss-drv/src/nss_gre_stats.c | 338 + feeds/ipq807x/qca-nss-drv/src/nss_gre_stats.h | 55 + .../ipq807x/qca-nss-drv/src/nss_gre_strings.c | 124 + .../ipq807x/qca-nss-drv/src/nss_gre_strings.h | 26 + .../ipq807x/qca-nss-drv/src/nss_gre_tunnel.c | 395 + .../qca-nss-drv/src/nss_gre_tunnel_log.c | 168 + .../qca-nss-drv/src/nss_gre_tunnel_log.h | 41 + .../qca-nss-drv/src/nss_gre_tunnel_stats.c | 282 + .../qca-nss-drv/src/nss_gre_tunnel_stats.h | 44 + .../qca-nss-drv/src/nss_gre_tunnel_strings.c | 77 + .../qca-nss-drv/src/nss_gre_tunnel_strings.h | 25 + .../src/nss_hal/fsm9010/nss_hal_pvt.c | 342 + .../qca-nss-drv/src/nss_hal/include/nss_hal.h | 129 + .../src/nss_hal/include/nss_hal_ops.h | 49 + .../src/nss_hal/include/nss_regs.h | 108 + .../src/nss_hal/ipq50xx/nss_hal_pvt.c | 667 + .../src/nss_hal/ipq60xx/nss_hal_pvt.c | 739 + .../src/nss_hal/ipq806x/nss_clocks.h | 131 + .../src/nss_hal/ipq806x/nss_hal_pvt.c | 1237 + .../src/nss_hal/ipq807x/nss_hal_pvt.c | 771 + .../ipq807x/qca-nss-drv/src/nss_hal/nss_hal.c | 834 + feeds/ipq807x/qca-nss-drv/src/nss_hlos_if.h | 381 + feeds/ipq807x/qca-nss-drv/src/nss_if.c | 354 + feeds/ipq807x/qca-nss-drv/src/nss_if_log.c | 429 + feeds/ipq807x/qca-nss-drv/src/nss_if_log.h | 40 + feeds/ipq807x/qca-nss-drv/src/nss_igs.c | 207 + feeds/ipq807x/qca-nss-drv/src/nss_igs_stats.c | 307 + feeds/ipq807x/qca-nss-drv/src/nss_igs_stats.h | 45 + feeds/ipq807x/qca-nss-drv/src/nss_init.c | 950 + feeds/ipq807x/qca-nss-drv/src/nss_ipsec.c | 597 + feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn.c | 525 + .../qca-nss-drv/src/nss_ipsec_cmn_log.c | 354 + .../qca-nss-drv/src/nss_ipsec_cmn_log.h | 37 + .../qca-nss-drv/src/nss_ipsec_cmn_stats.c | 219 + .../qca-nss-drv/src/nss_ipsec_cmn_stats.h | 28 + .../qca-nss-drv/src/nss_ipsec_cmn_strings.c | 82 + .../qca-nss-drv/src/nss_ipsec_cmn_strings.h | 27 + feeds/ipq807x/qca-nss-drv/src/nss_ipsec_log.c | 205 + feeds/ipq807x/qca-nss-drv/src/nss_ipsec_log.h | 37 + feeds/ipq807x/qca-nss-drv/src/nss_ipv4.c | 782 + feeds/ipq807x/qca-nss-drv/src/nss_ipv4_log.c | 355 + .../ipq807x/qca-nss-drv/src/nss_ipv4_reasm.c | 76 + .../qca-nss-drv/src/nss_ipv4_reasm_stats.c | 167 + .../qca-nss-drv/src/nss_ipv4_reasm_stats.h | 27 + .../qca-nss-drv/src/nss_ipv4_reasm_strings.c | 55 + .../qca-nss-drv/src/nss_ipv4_reasm_strings.h | 25 + .../ipq807x/qca-nss-drv/src/nss_ipv4_stats.c | 239 + .../ipq807x/qca-nss-drv/src/nss_ipv4_stats.h | 29 + .../qca-nss-drv/src/nss_ipv4_strings.c | 208 + .../qca-nss-drv/src/nss_ipv4_strings.h | 26 + feeds/ipq807x/qca-nss-drv/src/nss_ipv6.c | 776 + feeds/ipq807x/qca-nss-drv/src/nss_ipv6_log.c | 387 + .../ipq807x/qca-nss-drv/src/nss_ipv6_reasm.c | 72 + .../qca-nss-drv/src/nss_ipv6_reasm_stats.c | 167 + .../qca-nss-drv/src/nss_ipv6_reasm_stats.h | 27 + .../qca-nss-drv/src/nss_ipv6_reasm_strings.c | 55 + .../qca-nss-drv/src/nss_ipv6_reasm_strings.h | 25 + .../ipq807x/qca-nss-drv/src/nss_ipv6_stats.c | 243 + .../ipq807x/qca-nss-drv/src/nss_ipv6_stats.h | 29 + .../qca-nss-drv/src/nss_ipv6_strings.c | 185 + .../qca-nss-drv/src/nss_ipv6_strings.h | 26 + feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2.c | 284 + .../ipq807x/qca-nss-drv/src/nss_l2tpv2_log.c | 143 + .../ipq807x/qca-nss-drv/src/nss_l2tpv2_log.h | 41 + .../qca-nss-drv/src/nss_l2tpv2_stats.c | 156 + .../qca-nss-drv/src/nss_l2tpv2_stats.h | 33 + .../qca-nss-drv/src/nss_l2tpv2_strings.c | 57 + .../qca-nss-drv/src/nss_l2tpv2_strings.h | 25 + feeds/ipq807x/qca-nss-drv/src/nss_lag.c | 273 + feeds/ipq807x/qca-nss-drv/src/nss_lag_log.c | 103 + feeds/ipq807x/qca-nss-drv/src/nss_lag_log.h | 41 + feeds/ipq807x/qca-nss-drv/src/nss_log.c | 602 + feeds/ipq807x/qca-nss-drv/src/nss_log.h | 115 + feeds/ipq807x/qca-nss-drv/src/nss_lso_rx.c | 62 + .../qca-nss-drv/src/nss_lso_rx_stats.c | 172 + .../qca-nss-drv/src/nss_lso_rx_stats.h | 67 + .../qca-nss-drv/src/nss_lso_rx_strings.c | 57 + .../qca-nss-drv/src/nss_lso_rx_strings.h | 25 + feeds/ipq807x/qca-nss-drv/src/nss_map_t.c | 412 + feeds/ipq807x/qca-nss-drv/src/nss_map_t_log.c | 151 + feeds/ipq807x/qca-nss-drv/src/nss_map_t_log.h | 41 + .../ipq807x/qca-nss-drv/src/nss_map_t_stats.c | 154 + .../ipq807x/qca-nss-drv/src/nss_map_t_stats.h | 36 + .../qca-nss-drv/src/nss_map_t_strings.c | 65 + .../qca-nss-drv/src/nss_map_t_strings.h | 25 + feeds/ipq807x/qca-nss-drv/src/nss_match.c | 299 + feeds/ipq807x/qca-nss-drv/src/nss_match_log.c | 225 + feeds/ipq807x/qca-nss-drv/src/nss_match_log.h | 39 + .../ipq807x/qca-nss-drv/src/nss_match_stats.c | 245 + .../ipq807x/qca-nss-drv/src/nss_match_stats.h | 81 + .../qca-nss-drv/src/nss_match_strings.c | 92 + .../qca-nss-drv/src/nss_match_strings.h | 27 + feeds/ipq807x/qca-nss-drv/src/nss_meminfo.c | 798 + feeds/ipq807x/qca-nss-drv/src/nss_meminfo.h | 135 + feeds/ipq807x/qca-nss-drv/src/nss_mirror.c | 296 + .../ipq807x/qca-nss-drv/src/nss_mirror_log.c | 198 + .../ipq807x/qca-nss-drv/src/nss_mirror_log.h | 39 + .../qca-nss-drv/src/nss_mirror_stats.c | 324 + .../qca-nss-drv/src/nss_mirror_stats.h | 44 + .../qca-nss-drv/src/nss_mirror_strings.c | 58 + .../qca-nss-drv/src/nss_mirror_strings.h | 27 + feeds/ipq807x/qca-nss-drv/src/nss_n2h.c | 2250 + feeds/ipq807x/qca-nss-drv/src/nss_n2h_stats.c | 214 + feeds/ipq807x/qca-nss-drv/src/nss_n2h_stats.h | 27 + .../ipq807x/qca-nss-drv/src/nss_n2h_strings.c | 85 + .../ipq807x/qca-nss-drv/src/nss_n2h_strings.h | 25 + feeds/ipq807x/qca-nss-drv/src/nss_oam.c | 141 + feeds/ipq807x/qca-nss-drv/src/nss_oam_log.c | 101 + feeds/ipq807x/qca-nss-drv/src/nss_oam_log.h | 41 + feeds/ipq807x/qca-nss-drv/src/nss_phys_if.c | 629 + feeds/ipq807x/qca-nss-drv/src/nss_phys_if.h | 326 + feeds/ipq807x/qca-nss-drv/src/nss_pm.c | 447 + feeds/ipq807x/qca-nss-drv/src/nss_pm.h | 164 + feeds/ipq807x/qca-nss-drv/src/nss_portid.c | 423 + .../ipq807x/qca-nss-drv/src/nss_portid_log.c | 129 + .../ipq807x/qca-nss-drv/src/nss_portid_log.h | 41 + .../qca-nss-drv/src/nss_portid_stats.c | 153 + .../qca-nss-drv/src/nss_portid_stats.h | 39 + feeds/ipq807x/qca-nss-drv/src/nss_ppe.c | 374 + feeds/ipq807x/qca-nss-drv/src/nss_ppe.h | 423 + feeds/ipq807x/qca-nss-drv/src/nss_ppe_log.c | 189 + feeds/ipq807x/qca-nss-drv/src/nss_ppe_stats.c | 925 + feeds/ipq807x/qca-nss-drv/src/nss_ppe_stats.h | 447 + .../ipq807x/qca-nss-drv/src/nss_ppe_strings.c | 532 + .../ipq807x/qca-nss-drv/src/nss_ppe_strings.h | 32 + feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp.c | 864 + feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp.h | 130 + .../ipq807x/qca-nss-drv/src/nss_ppe_vp_log.c | 135 + .../qca-nss-drv/src/nss_ppe_vp_stats.c | 229 + .../qca-nss-drv/src/nss_ppe_vp_stats.h | 63 + feeds/ipq807x/qca-nss-drv/src/nss_pppoe.c | 435 + feeds/ipq807x/qca-nss-drv/src/nss_pppoe_log.c | 133 + feeds/ipq807x/qca-nss-drv/src/nss_pppoe_log.h | 41 + .../ipq807x/qca-nss-drv/src/nss_pppoe_stats.c | 265 + .../ipq807x/qca-nss-drv/src/nss_pppoe_stats.h | 28 + .../qca-nss-drv/src/nss_pppoe_strings.c | 121 + .../qca-nss-drv/src/nss_pppoe_strings.h | 26 + feeds/ipq807x/qca-nss-drv/src/nss_pptp.c | 472 + feeds/ipq807x/qca-nss-drv/src/nss_pptp_log.c | 129 + feeds/ipq807x/qca-nss-drv/src/nss_pptp_log.h | 41 + .../ipq807x/qca-nss-drv/src/nss_pptp_stats.c | 154 + .../ipq807x/qca-nss-drv/src/nss_pptp_stats.h | 36 + .../qca-nss-drv/src/nss_pptp_strings.c | 79 + .../qca-nss-drv/src/nss_pptp_strings.h | 25 + feeds/ipq807x/qca-nss-drv/src/nss_profiler.c | 254 + feeds/ipq807x/qca-nss-drv/src/nss_project.c | 338 + feeds/ipq807x/qca-nss-drv/src/nss_pvxlan.c | 446 + .../ipq807x/qca-nss-drv/src/nss_pvxlan_log.c | 244 + .../ipq807x/qca-nss-drv/src/nss_pvxlan_log.h | 37 + .../qca-nss-drv/src/nss_pvxlan_stats.c | 213 + .../qca-nss-drv/src/nss_pvxlan_stats.h | 66 + feeds/ipq807x/qca-nss-drv/src/nss_qrfs.c | 472 + feeds/ipq807x/qca-nss-drv/src/nss_qrfs_log.c | 174 + feeds/ipq807x/qca-nss-drv/src/nss_qrfs_log.h | 37 + .../ipq807x/qca-nss-drv/src/nss_qrfs_stats.c | 148 + .../ipq807x/qca-nss-drv/src/nss_qrfs_stats.h | 38 + feeds/ipq807x/qca-nss-drv/src/nss_qvpn.c | 344 + feeds/ipq807x/qca-nss-drv/src/nss_qvpn_log.c | 262 + feeds/ipq807x/qca-nss-drv/src/nss_qvpn_log.h | 37 + .../ipq807x/qca-nss-drv/src/nss_qvpn_stats.c | 203 + .../ipq807x/qca-nss-drv/src/nss_qvpn_stats.h | 24 + .../qca-nss-drv/src/nss_qvpn_strings.c | 60 + .../qca-nss-drv/src/nss_qvpn_strings.h | 27 + feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx.c | 781 + .../qca-nss-drv/src/nss_rmnet_rx_stats.c | 209 + .../qca-nss-drv/src/nss_rmnet_rx_stats.h | 61 + feeds/ipq807x/qca-nss-drv/src/nss_rps.c | 644 + feeds/ipq807x/qca-nss-drv/src/nss_shaper.c | 367 + feeds/ipq807x/qca-nss-drv/src/nss_sjack.c | 189 + feeds/ipq807x/qca-nss-drv/src/nss_sjack_log.c | 133 + feeds/ipq807x/qca-nss-drv/src/nss_sjack_log.h | 41 + .../ipq807x/qca-nss-drv/src/nss_sjack_stats.c | 94 + .../ipq807x/qca-nss-drv/src/nss_sjack_stats.h | 45 + feeds/ipq807x/qca-nss-drv/src/nss_stats.c | 481 + feeds/ipq807x/qca-nss-drv/src/nss_stats.h | 76 + feeds/ipq807x/qca-nss-drv/src/nss_strings.c | 148 + feeds/ipq807x/qca-nss-drv/src/nss_strings.h | 52 + feeds/ipq807x/qca-nss-drv/src/nss_tls.c | 475 + feeds/ipq807x/qca-nss-drv/src/nss_tls_log.c | 167 + feeds/ipq807x/qca-nss-drv/src/nss_tls_log.h | 39 + feeds/ipq807x/qca-nss-drv/src/nss_tls_stats.c | 206 + feeds/ipq807x/qca-nss-drv/src/nss_tls_stats.h | 28 + .../ipq807x/qca-nss-drv/src/nss_tls_strings.c | 88 + .../ipq807x/qca-nss-drv/src/nss_tls_strings.h | 27 + .../ipq807x/qca-nss-drv/src/nss_trustsec_tx.c | 299 + .../qca-nss-drv/src/nss_trustsec_tx_log.c | 170 + .../qca-nss-drv/src/nss_trustsec_tx_log.h | 37 + .../qca-nss-drv/src/nss_trustsec_tx_stats.c | 145 + .../qca-nss-drv/src/nss_trustsec_tx_stats.h | 44 + feeds/ipq807x/qca-nss-drv/src/nss_tstamp.c | 423 + .../qca-nss-drv/src/nss_tstamp_stats.c | 165 + .../qca-nss-drv/src/nss_tstamp_stats.h | 48 + feeds/ipq807x/qca-nss-drv/src/nss_tun6rd.c | 183 + .../ipq807x/qca-nss-drv/src/nss_tun6rd_log.c | 132 + .../ipq807x/qca-nss-drv/src/nss_tun6rd_log.h | 41 + feeds/ipq807x/qca-nss-drv/src/nss_tunipip6.c | 291 + .../qca-nss-drv/src/nss_tunipip6_log.c | 189 + .../qca-nss-drv/src/nss_tunipip6_log.h | 41 + .../qca-nss-drv/src/nss_tunipip6_stats.c | 124 + .../qca-nss-drv/src/nss_tunipip6_stats.h | 34 + .../ipq807x/qca-nss-drv/src/nss_tx_msg_sync.c | 197 + .../ipq807x/qca-nss-drv/src/nss_tx_msg_sync.h | 82 + .../qca-nss-drv/src/nss_tx_rx_common.h | 114 + feeds/ipq807x/qca-nss-drv/src/nss_udp_st.c | 233 + .../ipq807x/qca-nss-drv/src/nss_udp_st_log.c | 254 + .../ipq807x/qca-nss-drv/src/nss_udp_st_log.h | 39 + .../qca-nss-drv/src/nss_udp_st_stats.c | 178 + .../qca-nss-drv/src/nss_udp_st_stats.h | 36 + .../qca-nss-drv/src/nss_udp_st_strings.c | 151 + .../qca-nss-drv/src/nss_udp_st_strings.h | 28 + feeds/ipq807x/qca-nss-drv/src/nss_unaligned.c | 91 + .../qca-nss-drv/src/nss_unaligned_log.c | 75 + .../qca-nss-drv/src/nss_unaligned_log.h | 31 + .../qca-nss-drv/src/nss_unaligned_stats.c | 88 + .../qca-nss-drv/src/nss_unaligned_stats.h | 22 + feeds/ipq807x/qca-nss-drv/src/nss_virt_if.c | 736 + .../qca-nss-drv/src/nss_virt_if_stats.c | 339 + .../qca-nss-drv/src/nss_virt_if_stats.h | 51 + feeds/ipq807x/qca-nss-drv/src/nss_vlan.c | 411 + feeds/ipq807x/qca-nss-drv/src/nss_vlan_log.c | 120 + feeds/ipq807x/qca-nss-drv/src/nss_vlan_log.h | 37 + feeds/ipq807x/qca-nss-drv/src/nss_vxlan.c | 326 + feeds/ipq807x/qca-nss-drv/src/nss_vxlan_log.c | 257 + feeds/ipq807x/qca-nss-drv/src/nss_vxlan_log.h | 37 + .../ipq807x/qca-nss-drv/src/nss_vxlan_stats.c | 122 + .../ipq807x/qca-nss-drv/src/nss_vxlan_stats.h | 32 + feeds/ipq807x/qca-nss-drv/src/nss_wifi.c | 198 + .../qca-nss-drv/src/nss_wifi_ext_vdev.c | 338 + .../qca-nss-drv/src/nss_wifi_ext_vdev_log.c | 220 + .../qca-nss-drv/src/nss_wifi_ext_vdev_log.h | 34 + .../qca-nss-drv/src/nss_wifi_ext_vdev_stats.c | 234 + .../qca-nss-drv/src/nss_wifi_ext_vdev_stats.h | 60 + feeds/ipq807x/qca-nss-drv/src/nss_wifi_log.c | 806 + feeds/ipq807x/qca-nss-drv/src/nss_wifi_log.h | 37 + .../ipq807x/qca-nss-drv/src/nss_wifi_mac_db.c | 215 + feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh.c | 242 + .../qca-nss-drv/src/nss_wifi_mesh_log.c | 368 + .../qca-nss-drv/src/nss_wifi_mesh_log.h | 34 + .../qca-nss-drv/src/nss_wifi_mesh_stats.c | 662 + .../qca-nss-drv/src/nss_wifi_mesh_stats.h | 42 + .../qca-nss-drv/src/nss_wifi_mesh_strings.c | 276 + .../qca-nss-drv/src/nss_wifi_mesh_strings.h | 32 + .../ipq807x/qca-nss-drv/src/nss_wifi_stats.c | 213 + .../ipq807x/qca-nss-drv/src/nss_wifi_stats.h | 62 + feeds/ipq807x/qca-nss-drv/src/nss_wifi_vdev.c | 379 + feeds/ipq807x/qca-nss-drv/src/nss_wifili.c | 670 + .../ipq807x/qca-nss-drv/src/nss_wifili_log.c | 553 + .../ipq807x/qca-nss-drv/src/nss_wifili_log.h | 37 + .../qca-nss-drv/src/nss_wifili_stats.c | 512 + .../qca-nss-drv/src/nss_wifili_stats.h | 35 + .../qca-nss-drv/src/nss_wifili_strings.c | 366 + .../qca-nss-drv/src/nss_wifili_strings.h | 44 + .../qca-nss-fw/files/IPQ5018/qca-nss0.bin | Bin 781856 -> 764816 bytes feeds/ipq807x/qca-ssdk-shell/Makefile | 9 - feeds/ipq807x/qca-ssdk-shell/src/Makefile | 47 + feeds/ipq807x/qca-ssdk-shell/src/config | 153 + .../src/include/api/api_access.h | 39 + .../qca-ssdk-shell/src/include/api/api_desc.h | 4240 ++ .../qca-ssdk-shell/src/include/api/sw_api.h | 282 + .../qca-ssdk-shell/src/include/api/sw_ioctl.h | 981 + .../src/include/common/aos_head.h | 21 + .../src/include/common/shared_func.h | 110 + .../qca-ssdk-shell/src/include/common/sw.h | 33 + .../src/include/common/sw_config.h | 37 + .../src/include/common/sw_error.h | 62 + .../qca-ssdk-shell/src/include/common/util.h | 92 + .../qca-ssdk-shell/src/include/fal/fal.h | 67 + .../qca-ssdk-shell/src/include/fal/fal_acl.h | 619 + .../qca-ssdk-shell/src/include/fal/fal_api.h | 2115 + .../qca-ssdk-shell/src/include/fal/fal_bm.h | 130 + .../src/include/fal/fal_cosmap.h | 138 + .../src/include/fal/fal_ctrlpkt.h | 90 + .../qca-ssdk-shell/src/include/fal/fal_fdb.h | 364 + .../qca-ssdk-shell/src/include/fal/fal_flow.h | 236 + .../qca-ssdk-shell/src/include/fal/fal_igmp.h | 161 + .../qca-ssdk-shell/src/include/fal/fal_init.h | 80 + .../src/include/fal/fal_interface_ctrl.h | 183 + .../qca-ssdk-shell/src/include/fal/fal_ip.h | 633 + .../src/include/fal/fal_leaky.h | 107 + .../qca-ssdk-shell/src/include/fal/fal_led.h | 123 + .../qca-ssdk-shell/src/include/fal/fal_mib.h | 245 + .../src/include/fal/fal_mirror.h | 87 + .../qca-ssdk-shell/src/include/fal/fal_misc.h | 263 + .../src/include/fal/fal_multi.h | 73 + .../qca-ssdk-shell/src/include/fal/fal_nat.h | 293 + .../src/include/fal/fal_policer.h | 149 + .../src/include/fal/fal_port_ctrl.h | 777 + .../src/include/fal/fal_portvlan.h | 725 + .../src/include/fal/fal_pppoe.h | 110 + .../qca-ssdk-shell/src/include/fal/fal_ptp.h | 471 + .../qca-ssdk-shell/src/include/fal/fal_qm.h | 325 + .../qca-ssdk-shell/src/include/fal/fal_qos.h | 456 + .../qca-ssdk-shell/src/include/fal/fal_rate.h | 234 + .../src/include/fal/fal_reg_access.h | 95 + .../src/include/fal/fal_rss_hash.h | 69 + .../qca-ssdk-shell/src/include/fal/fal_sec.h | 250 + .../src/include/fal/fal_servcode.h | 125 + .../qca-ssdk-shell/src/include/fal/fal_sfp.h | 578 + .../src/include/fal/fal_shaper.h | 162 + .../qca-ssdk-shell/src/include/fal/fal_stp.h | 76 + .../src/include/fal/fal_trunk.h | 86 + .../qca-ssdk-shell/src/include/fal/fal_type.h | 135 + .../src/include/fal/fal_uk_if.h | 42 + .../qca-ssdk-shell/src/include/fal/fal_vlan.h | 129 + .../qca-ssdk-shell/src/include/fal/fal_vsi.h | 131 + .../src/include/init/ssdk_init.h | 280 + .../src/include/init/ssdk_plat.h | 178 + .../qca-ssdk-shell/src/include/ref/ref_api.h | 47 + .../qca-ssdk-shell/src/include/ref/ref_vlan.h | 58 + .../src/include/sal/os/aos_lock.h | 47 + .../src/include/sal/os/aos_mem.h | 116 + .../src/include/sal/os/aos_timer.h | 50 + .../src/include/sal/os/aos_types.h | 184 + .../include/sal/os/linux_user/aos_lock_pvt.h | 46 + .../include/sal/os/linux_user/aos_mem_pvt.h | 61 + .../include/sal/os/linux_user/aos_timer_pvt.h | 36 + .../include/sal/os/linux_user/aos_types_pvt.h | 41 + .../sal/sd/linux/uk_interface/sw_api_us.h | 36 + .../qca-ssdk-shell/src/include/sal/sd/sd.h | 58 + .../qca-ssdk-shell/src/include/shell/shell.h | 43 + .../src/include/shell/shell_config.h | 91 + .../src/include/shell/shell_io.h | 1039 + .../src/include/shell/shell_lib.h | 31 + .../src/include/shell/shell_sw.h | 53 + .../qca-ssdk-shell/src/make/components.mk | 36 + .../ipq807x/qca-ssdk-shell/src/make/config.mk | 91 + feeds/ipq807x/qca-ssdk-shell/src/make/defs.mk | 28 + .../qca-ssdk-shell/src/make/linux_opt.mk | 328 + .../ipq807x/qca-ssdk-shell/src/make/target.mk | 49 + .../ipq807x/qca-ssdk-shell/src/make/tools.mk | 12 + .../qca-ssdk-shell/src/src/api/Makefile | 12 + .../qca-ssdk-shell/src/src/api/api_access.c | 293 + .../qca-ssdk-shell/src/src/fal_uk/Makefile | 141 + .../qca-ssdk-shell/src/src/fal_uk/fal_acl.c | 207 + .../qca-ssdk-shell/src/src/fal_uk/fal_bm.c | 160 + .../src/src/fal_uk/fal_cosmap.c | 225 + .../src/src/fal_uk/fal_ctrlpkt.c | 96 + .../qca-ssdk-shell/src/src/fal_uk/fal_fdb.c | 459 + .../qca-ssdk-shell/src/src/fal_uk/fal_flow.c | 190 + .../qca-ssdk-shell/src/src/fal_uk/fal_igmp.c | 277 + .../qca-ssdk-shell/src/src/fal_uk/fal_init.c | 60 + .../src/src/fal_uk/fal_interface_ctrl.c | 119 + .../qca-ssdk-shell/src/src/fal_uk/fal_ip.c | 728 + .../qca-ssdk-shell/src/src/fal_uk/fal_leaky.c | 110 + .../qca-ssdk-shell/src/src/fal_uk/fal_led.c | 43 + .../qca-ssdk-shell/src/src/fal_uk/fal_mib.c | 93 + .../src/src/fal_uk/fal_mirror.c | 98 + .../qca-ssdk-shell/src/src/fal_uk/fal_misc.c | 500 + .../qca-ssdk-shell/src/src/fal_uk/fal_nat.c | 360 + .../src/src/fal_uk/fal_policer.c | 138 + .../src/src/fal_uk/fal_port_ctrl.c | 956 + .../src/src/fal_uk/fal_portvlan.c | 754 + .../qca-ssdk-shell/src/src/fal_uk/fal_pppoe.c | 167 + .../qca-ssdk-shell/src/src/fal_uk/fal_ptp.c | 497 + .../qca-ssdk-shell/src/src/fal_uk/fal_qm.c | 404 + .../qca-ssdk-shell/src/src/fal_uk/fal_qos.c | 612 + .../qca-ssdk-shell/src/src/fal_uk/fal_rate.c | 263 + .../src/src/fal_uk/fal_reg_access.c | 159 + .../src/src/fal_uk/fal_rss_hash.c | 41 + .../qca-ssdk-shell/src/src/fal_uk/fal_sec.c | 94 + .../src/src/fal_uk/fal_servcode.c | 57 + .../qca-ssdk-shell/src/src/fal_uk/fal_sfp.c | 202 + .../src/src/fal_uk/fal_shaper.c | 221 + .../qca-ssdk-shell/src/src/fal_uk/fal_stp.c | 42 + .../qca-ssdk-shell/src/src/fal_uk/fal_trunk.c | 101 + .../qca-ssdk-shell/src/src/fal_uk/fal_uk_if.c | 67 + .../qca-ssdk-shell/src/src/fal_uk/fal_vlan.c | 145 + .../qca-ssdk-shell/src/src/fal_uk/fal_vsi.c | 148 + .../qca-ssdk-shell/src/src/ref/Makefile | 16 + .../qca-ssdk-shell/src/src/ref/ref_vlan.c | 38 + .../qca-ssdk-shell/src/src/sal/Makefile | 12 + .../qca-ssdk-shell/src/src/sal/sd/Makefile | 12 + .../src/src/sal/sd/linux/Makefile | 12 + .../src/sal/sd/linux/uk_interface/Makefile | 34 + .../sd/linux/uk_interface/sw_api_us_ioctl.c | 127 + .../sd/linux/uk_interface/sw_api_us_netlink.c | 228 + .../qca-ssdk-shell/src/src/sal/sd/sd.c | 204 + .../qca-ssdk-shell/src/src/shell/Makefile | 24 + .../qca-ssdk-shell/src/src/shell/shell.c | 858 + .../src/src/shell/shell_config.c | 1451 + .../qca-ssdk-shell/src/src/shell/shell_io.c | 30033 +++++++++++ .../qca-ssdk-shell/src/src/shell/shell_lib.c | 917 + .../src/src/shell/shell_module_ctrl.c | 1069 + .../qca-ssdk-shell/src/src/shell/shell_sw.c | 739 + feeds/ipq807x/qca-ssdk/Makefile | 110 +- feeds/ipq807x/qca-ssdk/Makefile.orig | 94 - feeds/ipq807x/qca-ssdk/patches/100-aq.patch | 26 +- feeds/ipq807x/qca-ssdk/src/ChangeLog | 129 + feeds/ipq807x/qca-ssdk/src/Makefile | 52 + .../src/app/nathelper/linux/host_helper.c | 2253 + .../app/nathelper/linux/lib/nat_helper_dt.c | 1352 + .../app/nathelper/linux/lib/nat_helper_dt.h | 22 + .../app/nathelper/linux/lib/nat_helper_hsl.c | 879 + .../app/nathelper/linux/lib/nat_helper_hsl.h | 257 + .../src/app/nathelper/linux/napt_acl.c | 1622 + .../src/app/nathelper/linux/napt_acl.h | 94 + .../src/app/nathelper/linux/napt_helper.c | 406 + .../src/app/nathelper/linux/napt_helper.h | 91 + .../src/app/nathelper/linux/napt_procfs.c | 1316 + .../src/app/nathelper/linux/nat_helper.c | 69 + .../src/app/nathelper/linux/nat_helper.h | 54 + .../src/app/nathelper/linux/nat_ipt_helper.c | 761 + feeds/ipq807x/qca-ssdk/src/config | 347 + .../ipq807x/qca-ssdk/src/include/adpt/adpt.h | 1604 + .../src/include/adpt/cppe/adpt_cppe_flow.h | 38 + .../src/include/adpt/cppe/adpt_cppe_mib.h | 52 + .../src/include/adpt/cppe/adpt_cppe_misc.h | 37 + .../include/adpt/cppe/adpt_cppe_portctrl.h | 103 + .../src/include/adpt/cppe/adpt_cppe_qm.h | 41 + .../src/include/adpt/cppe/adpt_cppe_qos.h | 68 + .../src/include/adpt/cppe/adpt_cppe_uniphy.h | 47 + .../src/include/adpt/hppe/adpt_hppe.h | 112 + .../qca-ssdk/src/include/adpt/mp/adpt_mp.h | 43 + .../src/include/adpt/mp/adpt_mp_portctrl.h | 63 + .../src/include/adpt/mp/adpt_mp_uniphy.h | 45 + .../qca-ssdk/src/include/adpt/sfp/adpt_sfp.h | 32 + .../qca-ssdk/src/include/api/api_access.h | 39 + .../qca-ssdk/src/include/api/api_desc.h | 4771 ++ .../ipq807x/qca-ssdk/src/include/api/sw_api.h | 276 + .../qca-ssdk/src/include/api/sw_ioctl.h | 989 + .../qca-ssdk/src/include/common/aos_head.h | 19 + .../qca-ssdk/src/include/common/shared_func.h | 120 + .../ipq807x/qca-ssdk/src/include/common/sw.h | 33 + .../qca-ssdk/src/include/common/sw_config.h | 37 + .../qca-ssdk/src/include/common/sw_error.h | 62 + .../qca-ssdk/src/include/common/util.h | 92 + feeds/ipq807x/qca-ssdk/src/include/fal/fal.h | 68 + .../qca-ssdk/src/include/fal/fal_acl.h | 616 + .../qca-ssdk/src/include/fal/fal_api.h | 2428 + .../ipq807x/qca-ssdk/src/include/fal/fal_bm.h | 144 + .../qca-ssdk/src/include/fal/fal_cosmap.h | 140 + .../qca-ssdk/src/include/fal/fal_ctrlpkt.h | 100 + .../qca-ssdk/src/include/fal/fal_fdb.h | 338 + .../qca-ssdk/src/include/fal/fal_flow.h | 244 + .../qca-ssdk/src/include/fal/fal_flowcookie.h | 43 + .../qca-ssdk/src/include/fal/fal_igmp.h | 161 + .../qca-ssdk/src/include/fal/fal_init.h | 84 + .../src/include/fal/fal_interface_ctrl.h | 181 + .../ipq807x/qca-ssdk/src/include/fal/fal_ip.h | 646 + .../qca-ssdk/src/include/fal/fal_leaky.h | 107 + .../qca-ssdk/src/include/fal/fal_led.h | 142 + .../qca-ssdk/src/include/fal/fal_mib.h | 260 + .../qca-ssdk/src/include/fal/fal_mirror.h | 86 + .../qca-ssdk/src/include/fal/fal_misc.h | 256 + .../qca-ssdk/src/include/fal/fal_multi.h | 73 + .../qca-ssdk/src/include/fal/fal_nat.h | 294 + .../qca-ssdk/src/include/fal/fal_policer.h | 164 + .../qca-ssdk/src/include/fal/fal_port_ctrl.h | 801 + .../qca-ssdk/src/include/fal/fal_portvlan.h | 738 + .../qca-ssdk/src/include/fal/fal_pppoe.h | 109 + .../qca-ssdk/src/include/fal/fal_ptp.h | 496 + .../ipq807x/qca-ssdk/src/include/fal/fal_qm.h | 344 + .../qca-ssdk/src/include/fal/fal_qos.h | 442 + .../qca-ssdk/src/include/fal/fal_rate.h | 234 + .../qca-ssdk/src/include/fal/fal_reg_access.h | 97 + .../qca-ssdk/src/include/fal/fal_rfs.h | 90 + .../qca-ssdk/src/include/fal/fal_rss_hash.h | 77 + .../qca-ssdk/src/include/fal/fal_sec.h | 251 + .../qca-ssdk/src/include/fal/fal_servcode.h | 137 + .../qca-ssdk/src/include/fal/fal_sfp.h | 578 + .../qca-ssdk/src/include/fal/fal_shaper.h | 163 + .../qca-ssdk/src/include/fal/fal_stp.h | 75 + .../qca-ssdk/src/include/fal/fal_trunk.h | 84 + .../qca-ssdk/src/include/fal/fal_type.h | 137 + .../qca-ssdk/src/include/fal/fal_uk_if.h | 42 + .../qca-ssdk/src/include/fal/fal_vlan.h | 129 + .../qca-ssdk/src/include/fal/fal_vsi.h | 137 + .../src/include/hsl/athena/athena_api.h | 198 + .../src/include/hsl/athena/athena_fdb.h | 76 + .../src/include/hsl/athena/athena_init.h | 45 + .../src/include/hsl/athena/athena_mib.h | 51 + .../src/include/hsl/athena/athena_port_ctrl.h | 127 + .../src/include/hsl/athena/athena_portvlan.h | 94 + .../src/include/hsl/athena/athena_reg.h | 2157 + .../include/hsl/athena/athena_reg_access.h | 63 + .../src/include/hsl/athena/athena_vlan.h | 94 + .../src/include/hsl/cppe/cppe_loopback.h | 213 + .../src/include/hsl/cppe/cppe_loopback_reg.h | 578 + .../src/include/hsl/cppe/cppe_portctrl.h | 204 + .../src/include/hsl/cppe/cppe_portctrl_reg.h | 166 + .../qca-ssdk/src/include/hsl/cppe/cppe_qos.h | 186 + .../src/include/hsl/cppe/cppe_qos_reg.h | 113 + .../qca-ssdk/src/include/hsl/dess/dess_acl.h | 144 + .../qca-ssdk/src/include/hsl/dess/dess_api.h | 1174 + .../src/include/hsl/dess/dess_cosmap.h | 141 + .../qca-ssdk/src/include/hsl/dess/dess_fdb.h | 168 + .../src/include/hsl/dess/dess_fdb_prv.h | 49 + .../qca-ssdk/src/include/hsl/dess/dess_igmp.h | 165 + .../qca-ssdk/src/include/hsl/dess/dess_init.h | 44 + .../include/hsl/dess/dess_interface_ctrl.h | 58 + .../qca-ssdk/src/include/hsl/dess/dess_ip.h | 201 + .../src/include/hsl/dess/dess_leaky.h | 94 + .../qca-ssdk/src/include/hsl/dess/dess_led.h | 61 + .../qca-ssdk/src/include/hsl/dess/dess_mib.h | 69 + .../src/include/hsl/dess/dess_mirror.h | 73 + .../qca-ssdk/src/include/hsl/dess/dess_misc.h | 258 + .../qca-ssdk/src/include/hsl/dess/dess_nat.h | 147 + .../src/include/hsl/dess/dess_nat_helper.h | 50 + .../src/include/hsl/dess/dess_port_ctrl.h | 360 + .../src/include/hsl/dess/dess_portvlan.h | 228 + .../src/include/hsl/dess/dess_psgmii.h | 310 + .../qca-ssdk/src/include/hsl/dess/dess_qos.h | 180 + .../qca-ssdk/src/include/hsl/dess/dess_rate.h | 96 + .../qca-ssdk/src/include/hsl/dess/dess_reg.h | 5011 ++ .../src/include/hsl/dess/dess_reg_access.h | 78 + .../qca-ssdk/src/include/hsl/dess/dess_sec.h | 53 + .../qca-ssdk/src/include/hsl/dess/dess_stp.h | 55 + .../src/include/hsl/dess/dess_trunk.h | 59 + .../qca-ssdk/src/include/hsl/dess/dess_vlan.h | 99 + .../src/include/hsl/garuda/garuda_acl.h | 120 + .../src/include/hsl/garuda/garuda_api.h | 531 + .../src/include/hsl/garuda/garuda_fdb.h | 120 + .../src/include/hsl/garuda/garuda_igmp.h | 111 + .../src/include/hsl/garuda/garuda_init.h | 47 + .../src/include/hsl/garuda/garuda_leaky.h | 107 + .../src/include/hsl/garuda/garuda_led.h | 57 + .../src/include/hsl/garuda/garuda_mib.h | 68 + .../src/include/hsl/garuda/garuda_mirror.h | 84 + .../src/include/hsl/garuda/garuda_misc.h | 147 + .../src/include/hsl/garuda/garuda_port_ctrl.h | 145 + .../src/include/hsl/garuda/garuda_portvlan.h | 151 + .../src/include/hsl/garuda/garuda_qos.h | 169 + .../src/include/hsl/garuda/garuda_rate.h | 111 + .../include/hsl/garuda/garuda_reduced_acl.h | 54 + .../src/include/hsl/garuda/garuda_reg.h | 3614 ++ .../include/hsl/garuda/garuda_reg_access.h | 63 + .../src/include/hsl/garuda/garuda_stp.h | 62 + .../src/include/hsl/garuda/garuda_vlan.h | 82 + .../src/include/hsl/horus/horus_api.h | 559 + .../src/include/hsl/horus/horus_fdb.h | 108 + .../src/include/hsl/horus/horus_igmp.h | 133 + .../src/include/hsl/horus/horus_init.h | 49 + .../src/include/hsl/horus/horus_leaky.h | 100 + .../src/include/hsl/horus/horus_led.h | 55 + .../src/include/hsl/horus/horus_mib.h | 65 + .../src/include/hsl/horus/horus_mirror.h | 78 + .../src/include/hsl/horus/horus_misc.h | 167 + .../src/include/hsl/horus/horus_port_ctrl.h | 145 + .../src/include/hsl/horus/horus_portvlan.h | 160 + .../src/include/hsl/horus/horus_qos.h | 164 + .../src/include/hsl/horus/horus_rate.h | 89 + .../src/include/hsl/horus/horus_reg.h | 2431 + .../src/include/hsl/horus/horus_reg_access.h | 63 + .../src/include/hsl/horus/horus_stp.h | 61 + .../src/include/hsl/horus/horus_vlan.h | 78 + .../qca-ssdk/src/include/hsl/hppe/hppe_acl.h | 1256 + .../src/include/hsl/hppe/hppe_acl_reg.h | 842 + .../qca-ssdk/src/include/hsl/hppe/hppe_bm.h | 1218 + .../src/include/hsl/hppe/hppe_bm_reg.h | 1036 + .../src/include/hsl/hppe/hppe_ctrlpkt.h | 245 + .../src/include/hsl/hppe/hppe_ctrlpkt_reg.h | 154 + .../qca-ssdk/src/include/hsl/hppe/hppe_fdb.h | 900 + .../src/include/hsl/hppe/hppe_fdb_reg.h | 838 + .../qca-ssdk/src/include/hsl/hppe/hppe_flow.h | 2264 + .../src/include/hsl/hppe/hppe_flow_reg.h | 2856 + .../src/include/hsl/hppe/hppe_global.h | 1204 + .../src/include/hsl/hppe/hppe_global_reg.h | 988 + .../qca-ssdk/src/include/hsl/hppe/hppe_init.h | 117 + .../qca-ssdk/src/include/hsl/hppe/hppe_ip.h | 2696 + .../src/include/hsl/hppe/hppe_ip_reg.h | 2240 + .../qca-ssdk/src/include/hsl/hppe/hppe_mib.h | 1150 + .../src/include/hsl/hppe/hppe_mib_reg.h | 1006 + .../src/include/hsl/hppe/hppe_mirror.h | 92 + .../src/include/hsl/hppe/hppe_mirror_reg.h | 83 + .../src/include/hsl/hppe/hppe_policer.h | 1173 + .../src/include/hsl/hppe/hppe_policer_reg.h | 795 + .../src/include/hsl/hppe/hppe_portctrl.h | 1413 + .../src/include/hsl/hppe/hppe_portctrl_reg.h | 1119 + .../src/include/hsl/hppe/hppe_portvlan.h | 1627 + .../src/include/hsl/hppe/hppe_portvlan_reg.h | 1073 + .../src/include/hsl/hppe/hppe_pppoe.h | 173 + .../src/include/hsl/hppe/hppe_pppoe_reg.h | 127 + .../qca-ssdk/src/include/hsl/hppe/hppe_qm.h | 3811 ++ .../src/include/hsl/hppe/hppe_qm_reg.h | 2748 + .../qca-ssdk/src/include/hsl/hppe/hppe_qos.h | 2163 + .../src/include/hsl/hppe/hppe_qos_reg.h | 1676 + .../src/include/hsl/hppe/hppe_reg_access.h | 55 + .../qca-ssdk/src/include/hsl/hppe/hppe_rss.h | 249 + .../src/include/hsl/hppe/hppe_rss_reg.h | 231 + .../qca-ssdk/src/include/hsl/hppe/hppe_sec.h | 416 + .../src/include/hsl/hppe/hppe_sec_reg.h | 373 + .../src/include/hsl/hppe/hppe_servcode.h | 220 + .../src/include/hsl/hppe/hppe_servcode_reg.h | 150 + .../src/include/hsl/hppe/hppe_shaper.h | 1055 + .../src/include/hsl/hppe/hppe_shaper_reg.h | 730 + .../qca-ssdk/src/include/hsl/hppe/hppe_stp.h | 51 + .../src/include/hsl/hppe/hppe_stp_reg.h | 46 + .../src/include/hsl/hppe/hppe_trunk.h | 320 + .../src/include/hsl/hppe/hppe_trunk_reg.h | 230 + .../src/include/hsl/hppe/hppe_uniphy.h | 2440 + .../src/include/hsl/hppe/hppe_uniphy_reg.h | 1594 + .../qca-ssdk/src/include/hsl/hppe/hppe_vsi.h | 270 + .../src/include/hsl/hppe/hppe_vsi_reg.h | 192 + .../src/include/hsl/hppe/hppe_xgmacmib.h | 2252 + .../src/include/hsl/hppe/hppe_xgmacmib_reg.h | 1956 + .../src/include/hsl/hppe/hppe_xgportctrl.h | 2758 + .../include/hsl/hppe/hppe_xgportctrl_reg.h | 1627 + feeds/ipq807x/qca-ssdk/src/include/hsl/hsl.h | 244 + .../qca-ssdk/src/include/hsl/hsl_acl.h | 46 + .../qca-ssdk/src/include/hsl/hsl_api.h | 2619 + .../qca-ssdk/src/include/hsl/hsl_dev.h | 78 + .../qca-ssdk/src/include/hsl/hsl_lock.h | 30 + .../qca-ssdk/src/include/hsl/hsl_port_prop.h | 81 + .../qca-ssdk/src/include/hsl/hsl_shared_api.h | 220 + .../qca-ssdk/src/include/hsl/isis/isis_acl.h | 134 + .../qca-ssdk/src/include/hsl/isis/isis_api.h | 992 + .../src/include/hsl/isis/isis_cosmap.h | 109 + .../qca-ssdk/src/include/hsl/isis/isis_fdb.h | 162 + .../qca-ssdk/src/include/hsl/isis/isis_igmp.h | 162 + .../qca-ssdk/src/include/hsl/isis/isis_init.h | 51 + .../include/hsl/isis/isis_interface_ctrl.h | 64 + .../qca-ssdk/src/include/hsl/isis/isis_ip.h | 153 + .../src/include/hsl/isis/isis_leaky.h | 94 + .../qca-ssdk/src/include/hsl/isis/isis_led.h | 57 + .../qca-ssdk/src/include/hsl/isis/isis_mib.h | 59 + .../src/include/hsl/isis/isis_mirror.h | 73 + .../qca-ssdk/src/include/hsl/isis/isis_misc.h | 212 + .../qca-ssdk/src/include/hsl/isis/isis_nat.h | 153 + .../src/include/hsl/isis/isis_nat_helper.h | 50 + .../src/include/hsl/isis/isis_port_ctrl.h | 224 + .../src/include/hsl/isis/isis_portvlan.h | 216 + .../qca-ssdk/src/include/hsl/isis/isis_qos.h | 157 + .../qca-ssdk/src/include/hsl/isis/isis_rate.h | 89 + .../qca-ssdk/src/include/hsl/isis/isis_reg.h | 5229 ++ .../src/include/hsl/isis/isis_reg_access.h | 124 + .../qca-ssdk/src/include/hsl/isis/isis_sec.h | 53 + .../qca-ssdk/src/include/hsl/isis/isis_stp.h | 55 + .../src/include/hsl/isis/isis_trunk.h | 69 + .../qca-ssdk/src/include/hsl/isis/isis_vlan.h | 99 + .../src/include/hsl/isisc/isisc_acl.h | 143 + .../src/include/hsl/isisc/isisc_api.h | 1093 + .../src/include/hsl/isisc/isisc_cosmap.h | 109 + .../src/include/hsl/isisc/isisc_fdb.h | 168 + .../src/include/hsl/isisc/isisc_fdb_prv.h | 49 + .../src/include/hsl/isisc/isisc_igmp.h | 165 + .../src/include/hsl/isisc/isisc_init.h | 51 + .../include/hsl/isisc/isisc_interface_ctrl.h | 78 + .../qca-ssdk/src/include/hsl/isisc/isisc_ip.h | 153 + .../src/include/hsl/isisc/isisc_leaky.h | 94 + .../src/include/hsl/isisc/isisc_led.h | 57 + .../src/include/hsl/isisc/isisc_mib.h | 69 + .../src/include/hsl/isisc/isisc_mirror.h | 73 + .../src/include/hsl/isisc/isisc_misc.h | 238 + .../src/include/hsl/isisc/isisc_nat.h | 153 + .../src/include/hsl/isisc/isisc_nat_helper.h | 50 + .../src/include/hsl/isisc/isisc_port_ctrl.h | 226 + .../src/include/hsl/isisc/isisc_portvlan.h | 228 + .../src/include/hsl/isisc/isisc_qos.h | 180 + .../src/include/hsl/isisc/isisc_rate.h | 96 + .../src/include/hsl/isisc/isisc_reg.h | 5478 ++ .../src/include/hsl/isisc/isisc_reg_access.h | 70 + .../src/include/hsl/isisc/isisc_sec.h | 53 + .../src/include/hsl/isisc/isisc_stp.h | 55 + .../src/include/hsl/isisc/isisc_trunk.h | 69 + .../src/include/hsl/isisc/isisc_vlan.h | 99 + .../qca-ssdk/src/include/hsl/mp/mp_mib.h | 392 + .../qca-ssdk/src/include/hsl/mp/mp_mib_reg.h | 875 + .../qca-ssdk/src/include/hsl/mp/mp_portctrl.h | 113 + .../src/include/hsl/mp/mp_portctrl_reg.h | 219 + .../qca-ssdk/src/include/hsl/mp/mp_uniphy.h | 40 + .../src/include/hsl/mp/mp_uniphy_reg.h | 60 + .../src/include/hsl/phy/aquantia_phy.h | 394 + .../qca-ssdk/src/include/hsl/phy/f1_phy.h | 479 + .../qca-ssdk/src/include/hsl/phy/f2_phy.h | 399 + .../qca-ssdk/src/include/hsl/phy/hsl_phy.h | 722 + .../qca-ssdk/src/include/hsl/phy/malibu_phy.h | 676 + .../qca-ssdk/src/include/hsl/phy/mpge_led.h | 41 + .../qca-ssdk/src/include/hsl/phy/mpge_phy.h | 101 + .../src/include/hsl/phy/qca803x_phy.h | 425 + .../qca-ssdk/src/include/hsl/phy/qca808x.h | 135 + .../src/include/hsl/phy/qca808x_led.h | 52 + .../src/include/hsl/phy/qca808x_phy.h | 630 + .../src/include/hsl/phy/qca808x_ptp.h | 223 + .../src/include/hsl/phy/qca808x_ptp_api.h | 5858 ++ .../src/include/hsl/phy/qca808x_ptp_reg.h | 5853 ++ .../qca-ssdk/src/include/hsl/phy/sfp_phy.h | 47 + .../src/include/hsl/scomphy/scomphy_init.h | 42 + .../include/hsl/scomphy/scomphy_port_ctrl.h | 233 + .../include/hsl/scomphy/scomphy_reg_access.h | 62 + .../qca-ssdk/src/include/hsl/sfp/sfp.h | 956 + .../qca-ssdk/src/include/hsl/sfp/sfp_access.h | 53 + .../qca-ssdk/src/include/hsl/sfp/sfp_reg.h | 1319 + .../src/include/hsl/shiva/shiva_acl.h | 114 + .../src/include/hsl/shiva/shiva_api.h | 609 + .../src/include/hsl/shiva/shiva_fdb.h | 109 + .../src/include/hsl/shiva/shiva_igmp.h | 133 + .../src/include/hsl/shiva/shiva_init.h | 51 + .../src/include/hsl/shiva/shiva_leaky.h | 100 + .../src/include/hsl/shiva/shiva_led.h | 55 + .../src/include/hsl/shiva/shiva_mib.h | 65 + .../src/include/hsl/shiva/shiva_mirror.h | 79 + .../src/include/hsl/shiva/shiva_misc.h | 188 + .../src/include/hsl/shiva/shiva_port_ctrl.h | 151 + .../src/include/hsl/shiva/shiva_portvlan.h | 211 + .../src/include/hsl/shiva/shiva_qos.h | 164 + .../src/include/hsl/shiva/shiva_rate.h | 102 + .../src/include/hsl/shiva/shiva_reduced_acl.h | 54 + .../src/include/hsl/shiva/shiva_reg.h | 4075 ++ .../src/include/hsl/shiva/shiva_reg_access.h | 63 + .../src/include/hsl/shiva/shiva_stp.h | 61 + .../src/include/hsl/shiva/shiva_vlan.h | 83 + .../qca-ssdk/src/include/init/ssdk_clk.h | 313 + .../qca-ssdk/src/include/init/ssdk_dts.h | 188 + .../qca-ssdk/src/include/init/ssdk_hppe.h | 32 + .../qca-ssdk/src/include/init/ssdk_init.h | 390 + .../src/include/init/ssdk_interrupt.h | 29 + .../qca-ssdk/src/include/init/ssdk_led.h | 53 + .../qca-ssdk/src/include/init/ssdk_mp.h | 35 + .../qca-ssdk/src/include/init/ssdk_phy_i2c.h | 76 + .../qca-ssdk/src/include/init/ssdk_plat.h | 414 + .../qca-ssdk/src/include/init/ssdk_scomphy.h | 33 + .../qca-ssdk/src/include/init/ssdk_uci.h | 31 + .../qca-ssdk/src/include/ref/ref_api.h | 47 + .../qca-ssdk/src/include/ref/ref_fdb.h | 54 + .../qca-ssdk/src/include/ref/ref_mib.h | 50 + .../qca-ssdk/src/include/ref/ref_misc.h | 41 + .../qca-ssdk/src/include/ref/ref_port_ctrl.h | 62 + .../qca-ssdk/src/include/ref/ref_uci.h | 35 + .../qca-ssdk/src/include/ref/ref_vlan.h | 88 + .../qca-ssdk/src/include/ref/ref_vsi.h | 73 + .../qca-ssdk/src/include/sal/os/aos_lock.h | 47 + .../qca-ssdk/src/include/sal/os/aos_mem.h | 112 + .../qca-ssdk/src/include/sal/os/aos_timer.h | 46 + .../qca-ssdk/src/include/sal/os/aos_types.h | 180 + .../src/include/sal/os/linux/aos_lock_pvt.h | 47 + .../src/include/sal/os/linux/aos_mem_pvt.h | 60 + .../src/include/sal/os/linux/aos_timer_pvt.h | 43 + .../src/include/sal/os/linux/aos_types_pvt.h | 70 + .../sal/sd/linux/uk_interface/sw_api_ks.h | 34 + .../ipq807x/qca-ssdk/src/include/sal/sd/sd.h | 77 + .../qca-ssdk/src/include/shell_lib/shell.h | 43 + .../src/include/shell_lib/shell_config.h | 78 + .../qca-ssdk/src/include/shell_lib/shell_io.h | 484 + .../qca-ssdk/src/include/shell_lib/shell_sw.h | 32 + feeds/ipq807x/qca-ssdk/src/ko_Makefile | 3 + feeds/ipq807x/qca-ssdk/src/make/.build_number | 1 + feeds/ipq807x/qca-ssdk/src/make/components.mk | 36 + feeds/ipq807x/qca-ssdk/src/make/config.mk | 119 + feeds/ipq807x/qca-ssdk/src/make/defs.mk | 28 + feeds/ipq807x/qca-ssdk/src/make/linux_opt.mk | 698 + feeds/ipq807x/qca-ssdk/src/make/target.mk | 49 + feeds/ipq807x/qca-ssdk/src/make/tools.mk | 12 + feeds/ipq807x/qca-ssdk/src/src/adpt/Makefile | 12 + feeds/ipq807x/qca-ssdk/src/src/adpt/adpt.c | 658 + .../qca-ssdk/src/src/adpt/cppe/Makefile | 48 + .../src/src/adpt/cppe/adpt_cppe_flow.c | 77 + .../src/src/adpt/cppe/adpt_cppe_mib.c | 181 + .../src/src/adpt/cppe/adpt_cppe_misc.c | 121 + .../src/src/adpt/cppe/adpt_cppe_portctrl.c | 541 + .../qca-ssdk/src/src/adpt/cppe/adpt_cppe_qm.c | 60 + .../src/src/adpt/cppe/adpt_cppe_qos.c | 289 + .../src/src/adpt/cppe/adpt_cppe_uniphy.c | 304 + .../qca-ssdk/src/src/adpt/hppe/Makefile | 112 + .../src/src/adpt/hppe/adpt_hppe_acl.c | 4013 ++ .../qca-ssdk/src/src/adpt/hppe/adpt_hppe_bm.c | 452 + .../src/src/adpt/hppe/adpt_hppe_ctrlpkt.c | 385 + .../src/src/adpt/hppe/adpt_hppe_fdb.c | 1520 + .../src/src/adpt/hppe/adpt_hppe_flow.c | 1850 + .../qca-ssdk/src/src/adpt/hppe/adpt_hppe_ip.c | 1355 + .../src/src/adpt/hppe/adpt_hppe_mib.c | 1019 + .../src/src/adpt/hppe/adpt_hppe_mirror.c | 345 + .../src/src/adpt/hppe/adpt_hppe_misc.c | 1011 + .../src/src/adpt/hppe/adpt_hppe_policer.c | 1138 + .../src/src/adpt/hppe/adpt_hppe_portctrl.c | 5591 ++ .../src/src/adpt/hppe/adpt_hppe_portvlan.c | 2235 + .../src/src/adpt/hppe/adpt_hppe_pppoe.c | 330 + .../src/src/adpt/hppe/adpt_hppe_ptp.c | 1382 + .../qca-ssdk/src/src/adpt/hppe/adpt_hppe_qm.c | 1413 + .../src/src/adpt/hppe/adpt_hppe_qos.c | 1235 + .../src/src/adpt/hppe/adpt_hppe_rss_hash.c | 296 + .../src/src/adpt/hppe/adpt_hppe_sec.c | 261 + .../src/src/adpt/hppe/adpt_hppe_servcode.c | 172 + .../src/src/adpt/hppe/adpt_hppe_shaper.c | 1591 + .../src/src/adpt/hppe/adpt_hppe_stp.c | 135 + .../src/src/adpt/hppe/adpt_hppe_trunk.c | 345 + .../src/src/adpt/hppe/adpt_hppe_uniphy.c | 765 + .../src/src/adpt/hppe/adpt_hppe_vsi.c | 637 + .../ipq807x/qca-ssdk/src/src/adpt/mp/Makefile | 32 + .../src/src/adpt/mp/adpt_mp_interrupt.c | 119 + .../qca-ssdk/src/src/adpt/mp/adpt_mp_led.c | 138 + .../qca-ssdk/src/src/adpt/mp/adpt_mp_mib.c | 360 + .../src/src/adpt/mp/adpt_mp_portctrl.c | 1344 + .../qca-ssdk/src/src/adpt/mp/adpt_mp_uniphy.c | 398 + .../qca-ssdk/src/src/adpt/sfp/Makefile | 16 + .../qca-ssdk/src/src/adpt/sfp/adpt_sfp.c | 668 + feeds/ipq807x/qca-ssdk/src/src/api/Makefile | 12 + .../ipq807x/qca-ssdk/src/src/api/api_access.c | 177 + feeds/ipq807x/qca-ssdk/src/src/fal/Makefile | 140 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_acl.c | 741 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_bm.c | 422 + .../ipq807x/qca-ssdk/src/src/fal/fal_cosmap.c | 783 + .../qca-ssdk/src/src/fal/fal_ctrlpkt.c | 248 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_fdb.c | 1942 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_flow.c | 467 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_igmp.c | 961 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_init.c | 212 + .../qca-ssdk/src/src/fal/fal_interface_ctrl.c | 386 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_ip.c | 2370 + .../ipq807x/qca-ssdk/src/src/fal/fal_leaky.c | 353 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_led.c | 158 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_mib.c | 688 + .../ipq807x/qca-ssdk/src/src/fal/fal_mirror.c | 344 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_misc.c | 1772 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_nat.c | 1341 + .../qca-ssdk/src/src/fal/fal_policer.c | 402 + .../qca-ssdk/src/src/fal/fal_port_ctrl.c | 3728 ++ .../qca-ssdk/src/src/fal/fal_portvlan.c | 2550 + .../ipq807x/qca-ssdk/src/src/fal/fal_pppoe.c | 609 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_ptp.c | 1208 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_qm.c | 1089 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_qos.c | 2000 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_rate.c | 840 + .../qca-ssdk/src/src/fal/fal_reg_access.c | 570 + .../qca-ssdk/src/src/fal/fal_rss_hash.c | 85 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_sec.c | 247 + .../qca-ssdk/src/src/fal/fal_servcode.c | 143 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_sfp.c | 508 + .../ipq807x/qca-ssdk/src/src/fal/fal_shaper.c | 601 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_stp.c | 130 + .../ipq807x/qca-ssdk/src/src/fal/fal_trunk.c | 324 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_vlan.c | 933 + feeds/ipq807x/qca-ssdk/src/src/fal/fal_vsi.c | 343 + feeds/ipq807x/qca-ssdk/src/src/hsl/Makefile | 39 + .../qca-ssdk/src/src/hsl/athena/Makefile | 44 + .../qca-ssdk/src/src/hsl/athena/athena_fdb.c | 639 + .../qca-ssdk/src/src/hsl/athena/athena_init.c | 290 + .../qca-ssdk/src/src/hsl/athena/athena_mib.c | 406 + .../src/src/hsl/athena/athena_port_ctrl.c | 811 + .../src/src/hsl/athena/athena_portvlan.c | 451 + .../src/src/hsl/athena/athena_reg_access.c | 272 + .../qca-ssdk/src/src/hsl/athena/athena_vlan.c | 622 + .../qca-ssdk/src/src/hsl/cppe/Makefile | 23 + .../qca-ssdk/src/src/hsl/cppe/cppe_loopback.c | 440 + .../qca-ssdk/src/src/hsl/cppe/cppe_portctrl.c | 488 + .../qca-ssdk/src/src/hsl/cppe/cppe_qos.c | 428 + .../qca-ssdk/src/src/hsl/dess/Makefile | 114 + .../qca-ssdk/src/src/hsl/dess/dess_acl.c | 2039 + .../src/src/hsl/dess/dess_acl_parse.c | 2452 + .../qca-ssdk/src/src/hsl/dess/dess_acl_prv.h | 126 + .../qca-ssdk/src/src/hsl/dess/dess_cosmap.c | 945 + .../qca-ssdk/src/src/hsl/dess/dess_fdb.c | 2431 + .../qca-ssdk/src/src/hsl/dess/dess_igmp.c | 1147 + .../qca-ssdk/src/src/hsl/dess/dess_init.c | 343 + .../src/src/hsl/dess/dess_interface_ctrl.c | 299 + .../qca-ssdk/src/src/hsl/dess/dess_ip.c | 3668 ++ .../qca-ssdk/src/src/hsl/dess/dess_leaky.c | 526 + .../qca-ssdk/src/src/hsl/dess/dess_led.c | 670 + .../qca-ssdk/src/src/hsl/dess/dess_mib.c | 861 + .../qca-ssdk/src/src/hsl/dess/dess_mirror.c | 316 + .../qca-ssdk/src/src/hsl/dess/dess_misc.c | 2450 + .../src/src/hsl/dess/dess_multicast_acl.c | 1030 + .../qca-ssdk/src/src/hsl/dess/dess_nat.c | 3213 ++ .../src/src/hsl/dess/dess_port_ctrl.c | 3983 ++ .../qca-ssdk/src/src/hsl/dess/dess_portvlan.c | 2336 + .../qca-ssdk/src/src/hsl/dess/dess_psgmii.c | 722 + .../qca-ssdk/src/src/hsl/dess/dess_qos.c | 1562 + .../qca-ssdk/src/src/hsl/dess/dess_rate.c | 1672 + .../src/src/hsl/dess/dess_reg_access.c | 644 + .../qca-ssdk/src/src/hsl/dess/dess_sec.c | 787 + .../qca-ssdk/src/src/hsl/dess/dess_stp.c | 193 + .../qca-ssdk/src/src/hsl/dess/dess_trunk.c | 327 + .../qca-ssdk/src/src/hsl/dess/dess_vlan.c | 906 + .../qca-ssdk/src/src/hsl/garuda/Makefile | 84 + .../qca-ssdk/src/src/hsl/garuda/garuda_acl.c | 3034 ++ .../qca-ssdk/src/src/hsl/garuda/garuda_fdb.c | 1007 + .../qca-ssdk/src/src/hsl/garuda/garuda_igmp.c | 610 + .../qca-ssdk/src/src/hsl/garuda/garuda_init.c | 642 + .../src/src/hsl/garuda/garuda_leaky.c | 531 + .../qca-ssdk/src/src/hsl/garuda/garuda_led.c | 370 + .../qca-ssdk/src/src/hsl/garuda/garuda_mib.c | 378 + .../src/src/hsl/garuda/garuda_mirror.c | 318 + .../qca-ssdk/src/src/hsl/garuda/garuda_misc.c | 1009 + .../src/src/hsl/garuda/garuda_port_ctrl.c | 1069 + .../src/src/hsl/garuda/garuda_portvlan.c | 912 + .../qca-ssdk/src/src/hsl/garuda/garuda_qos.c | 1191 + .../qca-ssdk/src/src/hsl/garuda/garuda_rate.c | 851 + .../src/src/hsl/garuda/garuda_reduced_acl.c | 170 + .../src/src/hsl/garuda/garuda_reg_access.c | 271 + .../qca-ssdk/src/src/hsl/garuda/garuda_stp.c | 193 + .../qca-ssdk/src/src/hsl/garuda/garuda_vlan.c | 498 + .../qca-ssdk/src/src/hsl/horus/Makefile | 80 + .../qca-ssdk/src/src/hsl/horus/horus_fdb.c | 999 + .../qca-ssdk/src/src/hsl/horus/horus_igmp.c | 979 + .../qca-ssdk/src/src/hsl/horus/horus_init.c | 432 + .../qca-ssdk/src/src/hsl/horus/horus_leaky.c | 525 + .../qca-ssdk/src/src/hsl/horus/horus_led.c | 427 + .../qca-ssdk/src/src/hsl/horus/horus_mib.c | 377 + .../qca-ssdk/src/src/hsl/horus/horus_mirror.c | 317 + .../qca-ssdk/src/src/hsl/horus/horus_misc.c | 1396 + .../src/src/hsl/horus/horus_port_ctrl.c | 1061 + .../src/src/hsl/horus/horus_portvlan.c | 1184 + .../qca-ssdk/src/src/hsl/horus/horus_qos.c | 1260 + .../qca-ssdk/src/src/hsl/horus/horus_rate.c | 578 + .../src/src/hsl/horus/horus_reg_access.c | 273 + .../qca-ssdk/src/src/hsl/horus/horus_stp.c | 192 + .../qca-ssdk/src/src/hsl/horus/horus_vlan.c | 518 + .../qca-ssdk/src/src/hsl/hppe/Makefile | 110 + .../qca-ssdk/src/src/hsl/hppe/hppe_acl.c | 3164 ++ .../qca-ssdk/src/src/hsl/hppe/hppe_bm.c | 3027 ++ .../qca-ssdk/src/src/hsl/hppe/hppe_ctrlpkt.c | 581 + .../qca-ssdk/src/src/hsl/hppe/hppe_fdb.c | 2178 + .../qca-ssdk/src/src/hsl/hppe/hppe_flow.c | 5814 ++ .../qca-ssdk/src/src/hsl/hppe/hppe_global.c | 3117 ++ .../qca-ssdk/src/src/hsl/hppe/hppe_init.c | 234 + .../qca-ssdk/src/src/hsl/hppe/hppe_ip.c | 6892 +++ .../qca-ssdk/src/src/hsl/hppe/hppe_mib.c | 2167 + .../qca-ssdk/src/src/hsl/hppe/hppe_mirror.c | 194 + .../qca-ssdk/src/src/hsl/hppe/hppe_policer.c | 2928 + .../qca-ssdk/src/src/hsl/hppe/hppe_portctrl.c | 3441 ++ .../qca-ssdk/src/src/hsl/hppe/hppe_portvlan.c | 4108 ++ .../qca-ssdk/src/src/hsl/hppe/hppe_pppoe.c | 388 + .../qca-ssdk/src/src/hsl/hppe/hppe_qm.c | 9622 ++++ .../qca-ssdk/src/src/hsl/hppe/hppe_qos.c | 4548 ++ .../src/src/hsl/hppe/hppe_reg_access.c | 77 + .../qca-ssdk/src/src/hsl/hppe/hppe_rss.c | 585 + .../qca-ssdk/src/src/hsl/hppe/hppe_sec.c | 1041 + .../qca-ssdk/src/src/hsl/hppe/hppe_servcode.c | 511 + .../qca-ssdk/src/src/hsl/hppe/hppe_shaper.c | 2396 + .../qca-ssdk/src/src/hsl/hppe/hppe_stp.c | 85 + .../qca-ssdk/src/src/hsl/hppe/hppe_trunk.c | 790 + .../qca-ssdk/src/src/hsl/hppe/hppe_uniphy.c | 6120 +++ .../qca-ssdk/src/src/hsl/hppe/hppe_vsi.c | 650 + .../qca-ssdk/src/src/hsl/hppe/hppe_xgmacmib.c | 4210 ++ .../src/src/hsl/hppe/hppe_xgportctrl.c | 6074 +++ feeds/ipq807x/qca-ssdk/src/src/hsl/hsl_acl.c | 972 + feeds/ipq807x/qca-ssdk/src/src/hsl/hsl_api.c | 42 + feeds/ipq807x/qca-ssdk/src/src/hsl/hsl_dev.c | 705 + feeds/ipq807x/qca-ssdk/src/src/hsl/hsl_lock.c | 30 + .../qca-ssdk/src/src/hsl/hsl_port_prop.c | 226 + .../qca-ssdk/src/src/hsl/isis/Makefile | 124 + .../qca-ssdk/src/src/hsl/isis/isis_acl.c | 1904 + .../src/src/hsl/isis/isis_acl_parse.c | 2452 + .../qca-ssdk/src/src/hsl/isis/isis_acl_prv.h | 125 + .../qca-ssdk/src/src/hsl/isis/isis_cosmap.c | 637 + .../qca-ssdk/src/src/hsl/isis/isis_fdb.c | 2217 + .../qca-ssdk/src/src/hsl/isis/isis_igmp.c | 1143 + .../qca-ssdk/src/src/hsl/isis/isis_init.c | 340 + .../src/src/hsl/isis/isis_interface_ctrl.c | 1710 + .../qca-ssdk/src/src/hsl/isis/isis_ip.c | 2516 + .../qca-ssdk/src/src/hsl/isis/isis_leaky.c | 526 + .../qca-ssdk/src/src/hsl/isis/isis_led.c | 405 + .../src/src/hsl/isis/isis_mac_block.c | 388 + .../qca-ssdk/src/src/hsl/isis/isis_mib.c | 663 + .../qca-ssdk/src/src/hsl/isis/isis_mirror.c | 315 + .../qca-ssdk/src/src/hsl/isis/isis_misc.c | 1835 + .../src/src/hsl/isis/isis_multicast_acl.c | 985 + .../qca-ssdk/src/src/hsl/isis/isis_nat.c | 2457 + .../src/src/hsl/isis/isis_port_ctrl.c | 2356 + .../qca-ssdk/src/src/hsl/isis/isis_portvlan.c | 2130 + .../qca-ssdk/src/src/hsl/isis/isis_qos.c | 1325 + .../qca-ssdk/src/src/hsl/isis/isis_rate.c | 1549 + .../src/src/hsl/isis/isis_reg_access.c | 507 + .../qca-ssdk/src/src/hsl/isis/isis_sec.c | 787 + .../qca-ssdk/src/src/hsl/isis/isis_stp.c | 193 + .../qca-ssdk/src/src/hsl/isis/isis_trunk.c | 692 + .../qca-ssdk/src/src/hsl/isis/isis_vlan.c | 909 + .../qca-ssdk/src/src/hsl/isisc/Makefile | 122 + .../qca-ssdk/src/src/hsl/isisc/isisc_acl.c | 2035 + .../src/src/hsl/isisc/isisc_acl_parse.c | 2452 + .../src/src/hsl/isisc/isisc_acl_prv.h | 126 + .../qca-ssdk/src/src/hsl/isisc/isisc_cosmap.c | 647 + .../qca-ssdk/src/src/hsl/isisc/isisc_fdb.c | 2294 + .../qca-ssdk/src/src/hsl/isisc/isisc_igmp.c | 1147 + .../qca-ssdk/src/src/hsl/isisc/isisc_init.c | 337 + .../src/src/hsl/isisc/isisc_interface_ctrl.c | 2324 + .../qca-ssdk/src/src/hsl/isisc/isisc_ip.c | 2555 + .../qca-ssdk/src/src/hsl/isisc/isisc_leaky.c | 526 + .../qca-ssdk/src/src/hsl/isisc/isisc_led.c | 405 + .../qca-ssdk/src/src/hsl/isisc/isisc_mib.c | 860 + .../qca-ssdk/src/src/hsl/isisc/isisc_mirror.c | 315 + .../qca-ssdk/src/src/hsl/isisc/isisc_misc.c | 2207 + .../src/src/hsl/isisc/isisc_multicast_acl.c | 1024 + .../qca-ssdk/src/src/hsl/isisc/isisc_nat.c | 2468 + .../src/src/hsl/isisc/isisc_port_ctrl.c | 2797 + .../src/src/hsl/isisc/isisc_portvlan.c | 2291 + .../qca-ssdk/src/src/hsl/isisc/isisc_qos.c | 1639 + .../qca-ssdk/src/src/hsl/isisc/isisc_rate.c | 1662 + .../src/src/hsl/isisc/isisc_reg_access.c | 459 + .../qca-ssdk/src/src/hsl/isisc/isisc_sec.c | 787 + .../qca-ssdk/src/src/hsl/isisc/isisc_stp.c | 193 + .../qca-ssdk/src/src/hsl/isisc/isisc_trunk.c | 692 + .../qca-ssdk/src/src/hsl/isisc/isisc_vlan.c | 906 + .../ipq807x/qca-ssdk/src/src/hsl/mp/Makefile | 26 + .../ipq807x/qca-ssdk/src/src/hsl/mp/mp_mib.c | 816 + .../qca-ssdk/src/src/hsl/mp/mp_portctrl.c | 231 + .../qca-ssdk/src/src/hsl/mp/mp_uniphy.c | 55 + .../ipq807x/qca-ssdk/src/src/hsl/phy/Makefile | 96 + .../qca-ssdk/src/src/hsl/phy/aquantia_phy.c | 2205 + .../ipq807x/qca-ssdk/src/src/hsl/phy/f1_phy.c | 1534 + .../ipq807x/qca-ssdk/src/src/hsl/phy/f2_phy.c | 918 + .../qca-ssdk/src/src/hsl/phy/hsl_phy.c | 827 + .../qca-ssdk/src/src/hsl/phy/malibu_phy.c | 2871 + .../qca-ssdk/src/src/hsl/phy/mpge_led.c | 66 + .../qca-ssdk/src/src/hsl/phy/mpge_phy.c | 1117 + .../qca-ssdk/src/src/hsl/phy/qca803x_phy.c | 2341 + .../qca-ssdk/src/src/hsl/phy/qca808x.c | 645 + .../qca-ssdk/src/src/hsl/phy/qca808x_led.c | 272 + .../qca-ssdk/src/src/hsl/phy/qca808x_phc.c | 1472 + .../qca-ssdk/src/src/hsl/phy/qca808x_phy.c | 2265 + .../qca-ssdk/src/src/hsl/phy/qca808x_ptp.c | 2843 + .../src/src/hsl/phy/qca808x_ptp_api.c | 15319 ++++++ .../qca-ssdk/src/src/hsl/phy/sfp_phy.c | 253 + .../qca-ssdk/src/src/hsl/scomphy/Makefile | 28 + .../src/src/hsl/scomphy/scomphy_init.c | 196 + .../src/src/hsl/scomphy/scomphy_port_ctrl.c | 2078 + .../src/src/hsl/scomphy/scomphy_reg_access.c | 147 + .../ipq807x/qca-ssdk/src/src/hsl/sfp/Makefile | 16 + feeds/ipq807x/qca-ssdk/src/src/hsl/sfp/sfp.c | 2296 + .../qca-ssdk/src/src/hsl/sfp/sfp_access.c | 59 + .../qca-ssdk/src/src/hsl/shiva/Makefile | 84 + .../qca-ssdk/src/src/hsl/shiva/shiva_acl.c | 3171 ++ .../qca-ssdk/src/src/hsl/shiva/shiva_fdb.c | 1135 + .../qca-ssdk/src/src/hsl/shiva/shiva_igmp.c | 980 + .../qca-ssdk/src/src/hsl/shiva/shiva_init.c | 448 + .../qca-ssdk/src/src/hsl/shiva/shiva_leaky.c | 526 + .../qca-ssdk/src/src/hsl/shiva/shiva_led.c | 428 + .../qca-ssdk/src/src/hsl/shiva/shiva_mib.c | 663 + .../qca-ssdk/src/src/hsl/shiva/shiva_mirror.c | 317 + .../qca-ssdk/src/src/hsl/shiva/shiva_misc.c | 1749 + .../src/src/hsl/shiva/shiva_port_ctrl.c | 1384 + .../src/src/hsl/shiva/shiva_portvlan.c | 1859 + .../qca-ssdk/src/src/hsl/shiva/shiva_qos.c | 1288 + .../qca-ssdk/src/src/hsl/shiva/shiva_rate.c | 852 + .../src/src/hsl/shiva/shiva_reduced_acl.c | 178 + .../src/src/hsl/shiva/shiva_reg_access.c | 271 + .../qca-ssdk/src/src/hsl/shiva/shiva_stp.c | 193 + .../qca-ssdk/src/src/hsl/shiva/shiva_vlan.c | 524 + feeds/ipq807x/qca-ssdk/src/src/init/Makefile | 40 + .../ipq807x/qca-ssdk/src/src/init/ssdk_clk.c | 1305 + .../ipq807x/qca-ssdk/src/src/init/ssdk_dts.c | 1200 + .../ipq807x/qca-ssdk/src/src/init/ssdk_hppe.c | 1286 + .../ipq807x/qca-ssdk/src/src/init/ssdk_init.c | 3693 ++ .../qca-ssdk/src/src/init/ssdk_interrupt.c | 209 + .../ipq807x/qca-ssdk/src/src/init/ssdk_led.c | 159 + feeds/ipq807x/qca-ssdk/src/src/init/ssdk_mp.c | 120 + .../qca-ssdk/src/src/init/ssdk_phy_i2c.c | 462 + .../ipq807x/qca-ssdk/src/src/init/ssdk_plat.c | 1417 + .../qca-ssdk/src/src/init/ssdk_scomphy.c | 40 + .../ipq807x/qca-ssdk/src/src/init/ssdk_uci.c | 196 + feeds/ipq807x/qca-ssdk/src/src/ref/Makefile | 45 + feeds/ipq807x/qca-ssdk/src/src/ref/ref_fdb.c | 177 + feeds/ipq807x/qca-ssdk/src/src/ref/ref_mib.c | 520 + feeds/ipq807x/qca-ssdk/src/src/ref/ref_misc.c | 193 + .../qca-ssdk/src/src/ref/ref_port_ctrl.c | 775 + feeds/ipq807x/qca-ssdk/src/src/ref/ref_uci.c | 11839 ++++ feeds/ipq807x/qca-ssdk/src/src/ref/ref_vlan.c | 562 + feeds/ipq807x/qca-ssdk/src/src/ref/ref_vsi.c | 494 + feeds/ipq807x/qca-ssdk/src/src/sal/Makefile | 12 + .../ipq807x/qca-ssdk/src/src/sal/sd/Makefile | 12 + .../qca-ssdk/src/src/sal/sd/linux/Makefile | 12 + .../src/sal/sd/linux/uk_interface/Makefile | 34 + .../sd/linux/uk_interface/sw_api_ks_ioctl.c | 316 + .../sd/linux/uk_interface/sw_api_ks_netlink.c | 789 + feeds/ipq807x/qca-ssdk/src/src/sal/sd/sd.c | 371 + .../qca-ssdk/src/src/shell_lib/Makefile | 12 + .../qca-ssdk/src/src/shell_lib/shell.c | 555 + .../qca-ssdk/src/src/shell_lib/shell_config.c | 1513 + .../qca-ssdk/src/src/shell_lib/shell_io.c | 14042 +++++ .../qca-ssdk/src/src/shell_lib/shell_sw.c | 53 + feeds/ipq807x/qca-ssdk/src/src/util/Makefile | 12 + feeds/ipq807x/qca-ssdk/src/src/util/util.c | 486 + feeds/ipq807x/qca-thermald-10.4/Makefile | 44 + .../qca-thermald-10.4/files/thermal.config | 2 + .../qca-thermald-10.4/files/thermal.init | 23 + .../ipq807x/qca-thermald-10.4/src/Android.mk | 236 + feeds/ipq807x/qca-thermald-10.4/src/Makefile | 74 + .../ipq807x/qca-thermald-10.4/src/Makefile.am | 35 + .../qca-thermald-10.4/src/adc-sensor.c | 119 + .../qca-thermald-10.4/src/adc-sensor.h | 17 + .../qca-thermald-10.4/src/bcl-sensor.c | 674 + .../qca-thermald-10.4/src/bcl-sensor.h | 21 + .../qca-thermald-10.4/src/configure.ac | 67 + .../qca-thermald-10.4/src/gen-sensor.c | 76 + .../qca-thermald-10.4/src/gen-sensor.h | 17 + .../src/ipq-thermald-8064.conf | 77 + .../src/ipq-thermald-8066.conf | 77 + .../src/ipq-thermald-8069.conf | 77 + .../src/modem_mitigation_oncrpc.c | 480 + .../src/modem_mitigation_qmi.c | 565 + .../qca-thermald-10.4/src/modem_sensor_qmi.c | 470 + .../qca-thermald-10.4/src/pm8821-sensor.c | 242 + .../qca-thermald-10.4/src/pm8821-sensor.h | 19 + .../qca-thermald-10.4/src/qmi-ts-sensor.c | 372 + .../qca-thermald-10.4/src/qmi-ts-sensor.h | 20 + .../ipq807x/qca-thermald-10.4/src/readme.txt | 173 + .../qca-thermald-10.4/src/sensors-7x30.c | 159 + .../qca-thermald-10.4/src/sensors-8660.c | 223 + .../qca-thermald-10.4/src/sensors-8960.c | 991 + .../qca-thermald-10.4/src/sensors-8974.c | 322 + .../qca-thermald-10.4/src/sensors-8x25.c | 182 + .../qca-thermald-10.4/src/sensors-hw.h | 95 + .../qca-thermald-10.4/src/sensors-ipq.c | 417 + feeds/ipq807x/qca-thermald-10.4/src/thermal.c | 211 + feeds/ipq807x/qca-thermald-10.4/src/thermal.h | 391 + .../qca-thermald-10.4/src/thermal_actions.c | 1418 + .../qca-thermald-10.4/src/thermal_client.c | 357 + .../qca-thermald-10.4/src/thermal_client.h | 22 + .../qca-thermald-10.4/src/thermal_config.c | 1141 + .../src/thermal_lib_common.c | 146 + .../src/thermal_lib_common.h | 38 + .../thermal_mitigation_device_service_v01.c | 231 + .../thermal_mitigation_device_service_v01.h | 338 + .../qca-thermald-10.4/src/thermal_monitor.c | 318 + .../src/thermal_sensor_service_v01.c | 179 + .../src/thermal_sensor_service_v01.h | 272 + .../qca-thermald-10.4/src/thermal_server.c | 467 + .../qca-thermald-10.4/src/thermal_server.h | 25 + .../qca-thermald-10.4/src/thermal_util.c | 492 + .../qca-thermald-10.4/src/thermald-7x30.conf | 9 + .../qca-thermald-10.4/src/thermald-8064.conf | 92 + .../src/thermald-8064ab.conf | 92 + .../qca-thermald-10.4/src/thermald-8660.conf | 15 + .../qca-thermald-10.4/src/thermald-8930.conf | 79 + .../src/thermald-8930ab.conf | 79 + .../qca-thermald-10.4/src/thermald-8960.conf | 44 + .../src/thermald-8960ab.conf | 44 + .../qca-thermald-10.4/src/thermald-8974.conf | 80 + .../src/thermald-8x25-msm1-pmic_therm.conf | 10 + .../src/thermald-8x25-msm2-msm_therm.conf | 10 + .../src/thermald-8x25-msm2-pmic_therm.conf | 10 + .../src/thermald.conf_sample | 22 + .../qca-thermald-10.4/src/tsens-sensor.c | 408 + .../qca-thermald-10.4/src/tsens-sensor.h | 25 + .../0009-include-set-kernel-version.mk.patch | 4 +- ...0002-ipq807x-add-qsdk-kernel-support.patch | 85 - ...ystem-patches-required-by-the-target.patch | 4817 ++ ...0003-ipq807x-add-qsdk-kernel-support.patch | 117 - ...x-modules-fix-some-v4.4-dependencies.patch | 643 + ...x-add-the-Qualcomm-AX-target-support.patch | 13 + .../0002-rtkmipsel-add-kernel-version.patch | 18 +- ...kmipsel-select-gcc-5-as-the-compiler.patch | 15 +- profiles/cig_wf196.yml | 3 + profiles/wifi-ax.yml | 3 +- 1591 files changed, 793237 insertions(+), 76178 deletions(-) create mode 100644 feeds/ipq807x/ipq807x/config-4.4 delete mode 100644 feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/ipq8074-hk14.dts delete mode 100755 feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-q14.dts delete mode 100755 feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-hfcl-ion4x.dts delete mode 100644 feeds/ipq807x/ipq807x/ipq50xx/config-5.4 create mode 100644 feeds/ipq807x/ipq807x/ipq50xx/config-default create mode 100644 feeds/ipq807x/ipq807x/ipq50xx/config-lowmem create mode 100644 feeds/ipq807x/ipq807x/patches/001-backport_kbuild_fix.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0001-crypto-lib-tidy-up-lib-crypto-Kconfig-and-Makefile.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0002-crypto-chacha-move-existing-library-code-into-lib-cr.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0003-crypto-x86-chacha-depend-on-generic-chacha-library-i.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0004-crypto-x86-chacha-expose-SIMD-ChaCha-routine-as-libr.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0005-crypto-arm64-chacha-depend-on-generic-chacha-library.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0006-crypto-arm64-chacha-expose-arm64-ChaCha-routine-as-l.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0007-crypto-arm-chacha-import-Eric-Biggers-s-scalar-accel.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0008-crypto-arm-chacha-remove-dependency-on-generic-ChaCh.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0009-crypto-arm-chacha-expose-ARM-ChaCha-routine-as-libra.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0010-crypto-mips-chacha-import-32r2-ChaCha-code-from-Zinc.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0011-crypto-mips-chacha-wire-up-accelerated-32r2-code-fro.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0012-crypto-chacha-unexport-chacha_generic-routines.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0013-crypto-poly1305-move-core-routines-into-a-separate-l.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0014-crypto-x86-poly1305-unify-Poly1305-state-struct-with.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0015-crypto-poly1305-expose-init-update-final-library-int.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0016-crypto-x86-poly1305-depend-on-generic-library-not-ge.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0017-crypto-x86-poly1305-expose-existing-driver-as-poly13.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0018-crypto-arm64-poly1305-incorporate-OpenSSL-CRYPTOGAMS.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0019-crypto-arm-poly1305-incorporate-OpenSSL-CRYPTOGAMS-N.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0020-crypto-mips-poly1305-incorporate-OpenSSL-CRYPTOGAMS-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0021-crypto-blake2s-generic-C-library-implementation-and-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0022-crypto-testmgr-add-test-cases-for-Blake2s.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0023-crypto-blake2s-implement-generic-shash-driver.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0024-crypto-blake2s-x86_64-SIMD-implementation.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0025-crypto-curve25519-generic-C-library-implementations.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0026-crypto-curve25519-add-kpp-selftest.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0027-crypto-curve25519-implement-generic-KPP-driver.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0028-crypto-lib-curve25519-work-around-Clang-stack-spilli.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0029-crypto-curve25519-x86_64-library-and-KPP-implementat.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0030-crypto-arm-curve25519-import-Bernstein-and-Schwabe-s.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0031-crypto-arm-curve25519-wire-up-NEON-implementation.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0032-crypto-chacha20poly1305-import-construction-and-self.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0033-crypto-lib-chacha20poly1305-reimplement-crypt_from_s.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0034-crypto-chacha_generic-remove-unnecessary-setkey-func.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0035-crypto-x86-chacha-only-unregister-algorithms-if-regi.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0036-crypto-lib-chacha20poly1305-use-chacha20_crypt.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0037-crypto-arch-conditionalize-crypto-api-in-arch-glue-f.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0038-crypto-chacha-fix-warning-message-in-header-file.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0039-crypto-arm-curve25519-add-arch-specific-key-generati.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0040-crypto-lib-curve25519-re-add-selftests.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0041-crypto-poly1305-add-new-32-and-64-bit-generic-versio.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0042-crypto-x86-poly1305-import-unmodified-cryptogams-imp.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0043-crypto-x86-poly1305-wire-up-faster-implementations-f.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0044-crypto-arm-arm64-mips-poly1305-remove-redundant-non-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0045-crypto-curve25519-Fix-selftest-build-error.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0046-crypto-x86-poly1305-fix-.gitignore-typo.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0047-crypto-chacha20poly1305-add-back-missing-test-vector.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0048-crypto-x86-poly1305-emit-does-base-conversion-itself.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0049-crypto-arm-chacha-fix-build-failured-when-kernel-mod.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0050-crypto-Kconfig-allow-tests-to-be-disabled-when-manag.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0051-crypto-chacha20poly1305-prevent-integer-overflow-on-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0052-crypto-x86-curve25519-support-assemblers-with-no-adx.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0053-crypto-arm64-chacha-correctly-walk-through-blocks.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0054-crypto-x86-curve25519-replace-with-formally-verified.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0055-crypto-x86-curve25519-leave-r12-as-spare-register.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0056-crypto-arm-64-poly1305-add-artifact-to-.gitignore-fi.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0057-crypto-arch-lib-limit-simd-usage-to-4k-chunks.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0058-crypto-lib-chacha20poly1305-Add-missing-function-dec.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0059-crypto-x86-chacha-sse3-use-unaligned-loads-for-state.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0060-crypto-x86-curve25519-Remove-unused-carry-variables.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0061-crypto-arm-curve25519-include-linux-scatterlist.h.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0062-crypto-arm-poly1305-Add-prototype-for-poly1305_block.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0063-crypto-curve25519-x86_64-Use-XORL-r32-32.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0064-crypto-poly1305-x86_64-Use-XORL-r32-32.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0065-crypto-x86-poly1305-Remove-assignments-with-no-effec.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0066-crypto-x86-poly1305-add-back-a-needed-assignment.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0067-crypto-Kconfig-CRYPTO_MANAGER_EXTRA_TESTS-requires-t.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0068-crypto-arm-chacha-neon-optimize-for-non-block-size-m.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0069-crypto-arm64-chacha-simplify-tail-block-handling.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0070-crypto-lib-chacha20poly1305-define-empty-module-exit.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0071-crypto-arm-chacha-neon-add-missing-counter-increment.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0072-net-WireGuard-secure-network-tunnel.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0073-wireguard-selftests-import-harness-makefile-for-test.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0074-wireguard-Kconfig-select-parent-dependency-for-crypt.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0075-wireguard-global-fix-spelling-mistakes-in-comments.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0076-wireguard-main-remove-unused-include-linux-version.h.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0077-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0078-wireguard-selftests-remove-ancient-kernel-compatibil.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0079-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0080-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0081-wireguard-allowedips-fix-use-after-free-in-root_remo.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0082-wireguard-noise-reject-peers-with-low-order-public-k.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0083-wireguard-selftests-ensure-non-addition-of-peers-wit.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0084-wireguard-selftests-tie-socket-waiting-to-target-pid.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0085-wireguard-device-use-icmp_ndo_send-helper.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0086-wireguard-selftests-reduce-complexity-and-fix-make-r.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0087-wireguard-receive-reset-last_under_load-to-zero.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0088-wireguard-send-account-for-mtu-0-devices.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0089-wireguard-socket-remove-extra-call-to-synchronize_ne.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0090-wireguard-selftests-remove-duplicated-include-sys-ty.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0091-wireguard-queueing-account-for-skb-protocol-0.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0092-wireguard-receive-remove-dead-code-from-default-pack.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0093-wireguard-noise-error-out-precomputed-DH-during-hand.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0094-wireguard-send-remove-errant-newline-from-packet_enc.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0095-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0096-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0097-wireguard-selftests-use-normal-kernel-stack-size-on-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0098-wireguard-socket-remove-errant-restriction-on-loopin.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0099-wireguard-send-receive-cond_resched-when-processing-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0100-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0101-wireguard-send-receive-use-explicit-unlikely-branch-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0102-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0103-wireguard-noise-read-preshared-key-while-taking-lock.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0104-wireguard-queueing-preserve-flow-hash-across-packet-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0105-wireguard-noise-separate-receive-counter-from-send-c.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0106-wireguard-noise-do-not-assign-initiation-time-in-if-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0107-wireguard-device-avoid-circular-netns-references.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0108-wireguard-receive-account-for-napi_gro_receive-never.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0109-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0110-wireguard-implement-header_ops-parse_protocol-for-AF.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0111-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0112-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0113-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0114-wireguard-noise-take-lock-when-removing-handshake-en.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0115-wireguard-peerlookup-take-lock-before-checking-hash-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0116-wireguard-selftests-check-that-route_me_harder-packe.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0117-wireguard-avoid-double-unlikely-notation-when-using-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0118-wireguard-socket-remove-bogus-__be32-annotation.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0119-wireguard-selftests-test-multiple-parallel-streams.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0120-wireguard-peer-put-frequently-used-members-above-cac.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0121-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0122-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0123-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0124-crypto-mips-poly1305-enable-for-all-MIPS-processors.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0125-crypto-mips-add-poly1305-core.S-to-.gitignore.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0126-crypto-poly1305-fix-poly1305_core_setkey-declaration.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0127-wireguard-selftests-remove-old-conntrack-kconfig-val.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0128-wireguard-selftests-make-sure-rp_filter-is-disabled-.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0129-wireguard-do-not-use-O3.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0130-wireguard-use-synchronize_net-rather-than-synchroniz.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0131-wireguard-peer-allocate-in-kmem_cache.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0132-wireguard-allowedips-initialize-list-head-in-selftes.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0133-wireguard-allowedips-remove-nodes-in-O-1.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0134-wireguard-allowedips-allocate-nodes-in-kmem_cache.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/080-wireguard-0135-wireguard-allowedips-free-empty-intermediate-nodes-w.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/100-dts.patch create mode 100644 feeds/ipq807x/ipq807x/patches/100-qrtr-ns.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/101-aq_phy.patch create mode 100644 feeds/ipq807x/ipq807x/patches/101-squashfs.patch create mode 100644 feeds/ipq807x/ipq807x/patches/102-aq-phy.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/102-fix-null-pointer-dereference-in-iptunnel_xmit.patch create mode 100644 feeds/ipq807x/ipq807x/patches/103-fix-dtc-gcc10-build.patch create mode 100644 feeds/ipq807x/ipq807x/patches/106-pstore.patch delete mode 100644 feeds/ipq807x/ipq807x/patches/190-revert-threaded-NAPI.patch create mode 100644 feeds/ipq807x/ipq807x/patches/200-bpf_backport.patch rename feeds/ipq807x/ipq807x/patches/{200-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch => 210-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch} (89%) rename feeds/ipq807x/ipq807x/patches/{201-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch => 211-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch} (72%) rename feeds/ipq807x/ipq807x/patches/{202-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch => 212-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch} (75%) rename feeds/ipq807x/ipq807x/patches/{203-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch => 213-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch} (63%) rename feeds/ipq807x/ipq807x/patches/{204-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch => 214-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch} (93%) create mode 100644 feeds/ipq807x/ipq807x/patches/220-net-sched-add-clsact-qdisc.patch create mode 100644 feeds/ipq807x/kmod-sched-cake/Makefile create mode 100644 feeds/ipq807x/kmod-sched-cake/patches/100-compat.patch delete mode 100644 feeds/ipq807x/qca-nss-dp/Makefile.orig create mode 100644 feeds/ipq807x/qca-nss-dp/src/Makefile create mode 100644 feeds/ipq807x/qca-nss-dp/src/exports/nss_dp_api_if.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq50xx/nss_ipq50xx.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq50xx/nss_ipq50xx.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq60xx/nss_ipq60xx.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq60xx/nss_ipq60xx.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq807x/nss_ipq807x.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq807x/nss_ipq807x.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_cfg.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_data_plane.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_data_plane.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_regs.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_tx_rx.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_dev.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_if.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_reg.h rename feeds/ipq807x/{ipq807x/files/arch/arm/boot/dts/qcom-ipq5018-q14.dts => qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_dev.h} (68%) create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_if.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_reg.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_dev.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_if.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_reg.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/include/edma.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/include/nss_dp_hal.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/include/nss_dp_hal_if.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_data_plane.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_data_plane.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dma_desc.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dp_cfg.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dp_tx_rx.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/include/nss_dp_dev.h create mode 100644 feeds/ipq807x/qca-nss-dp/src/nss_dp_attach.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/nss_dp_ethtools.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/nss_dp_main.c create mode 100644 feeds/ipq807x/qca-nss-dp/src/nss_dp_switchdev.c delete mode 100644 feeds/ipq807x/qca-nss-drv/200-napi_threaded.patch create mode 100644 feeds/ipq807x/qca-nss-drv/src/Makefile create mode 100644 feeds/ipq807x/qca-nss-drv/src/Makefile.fsm create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_fsm9010.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq40xx.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq50xx.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq50xx_64.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq60xx.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq60xx_64.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq806x.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq807x.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq807x_64.h create mode 100755 feeds/ipq807x/qca-nss-drv/src/exports/nss_api_if.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_bridge.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_c2c_rx.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_c2c_tx.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_capwap.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_clmap.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_cmn.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_crypto.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_crypto_cmn.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_def.h create mode 100755 feeds/ipq807x/qca-nss-drv/src/exports/nss_dma.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_dtls.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_dtls_cmn.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_dynamic_interface.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_edma.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_eth_rx.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_freq.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_gre.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir_lag.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir_mark.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_tunnel.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_if.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_igs.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsec.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsec_cmn.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsecmgr.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv4.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv4_reasm.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv6.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv6_reasm.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_l2tpv2.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_lag.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_lso_rx.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_map_t.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_match.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_mirror.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_n2h.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_oam.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_phy_if.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_pm.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_portid.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_ppe.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_ppe_vp.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_pppoe.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_pptp.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_profiler.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_project.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_pvxlan.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_qrfs.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_qvpn.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_rmnet_rx.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_rps.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_shaper.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_sjack.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_stats_public.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_tls.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_trustsec_tx.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_tstamp.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_tun6rd.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_tunipip6.h create mode 100755 feeds/ipq807x/qca-nss-drv/src/exports/nss_udp_st.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_unaligned.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_virt_if.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_vlan.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_vxlan.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_ext_vdev_if.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_mac_db_if.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_mesh.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_vdev.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/exports/nss_wifili_if.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_bridge.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_bridge_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_bridge_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_capwap.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_capwap_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_capwap_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_capwap_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_capwap_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_capwap_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_capwap_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_clmap.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_clmap_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_clmap_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_clmap_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_clmap_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_clmap_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_clmap_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_cmn.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_core.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_core.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_coredump.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_crypto.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_crypto_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_crypto_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/include/nss_data_plane_hal.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq50xx.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq60xx.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq807x.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_data_plane/include/nss_data_plane.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane_common.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane_gmac.c create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_dma.c create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_dma_log.c create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_dma_log.h create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_dma_stats.c create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_dma_stats.h create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_dma_strings.c create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_dma_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_drv_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_drv_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_drv_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_drv_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dscp_map.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dtls_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_edma.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_edma_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_edma_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_edma_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_edma_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_eth_rx.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_freq.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_freq_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_freq_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_freq_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_freq_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gmac_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gmac_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hal/fsm9010/nss_hal_pvt.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_hal.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_hal_ops.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_regs.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq50xx/nss_hal_pvt.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq60xx/nss_hal_pvt.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq806x/nss_clocks.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq806x/nss_hal_pvt.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq807x/nss_hal_pvt.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hal/nss_hal.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_hlos_if.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_if.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_if_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_if_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_igs.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_igs_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_igs_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_init.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipsec.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipsec_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipsec_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv4_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ipv6_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_lag.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_lag_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_lag_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_lso_rx.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_map_t.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_map_t_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_map_t_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_map_t_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_map_t_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_map_t_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_map_t_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_match.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_match_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_match_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_match_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_match_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_match_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_match_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_meminfo.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_meminfo.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_mirror.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_mirror_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_mirror_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_mirror_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_mirror_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_mirror_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_mirror_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_n2h.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_n2h_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_n2h_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_n2h_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_n2h_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_oam.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_oam_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_oam_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_phys_if.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_phys_if.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pm.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pm.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_portid.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_portid_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_portid_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_portid_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_portid_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pppoe.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pppoe_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pppoe_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pppoe_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pppoe_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pppoe_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pppoe_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pptp.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pptp_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pptp_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pptp_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pptp_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pptp_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pptp_strings.h create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_profiler.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_project.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pvxlan.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qrfs.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qrfs_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qrfs_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qrfs_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qrfs_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qvpn.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qvpn_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qvpn_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qvpn_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qvpn_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qvpn_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_qvpn_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_rps.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_shaper.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_sjack.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_sjack_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_sjack_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_sjack_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_sjack_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tls.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tls_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tls_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tls_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tls_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tls_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tls_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tstamp.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tstamp_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tstamp_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tun6rd.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tun6rd_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tun6rd_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tunipip6.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tx_msg_sync.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tx_msg_sync.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_tx_rx_common.h create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_udp_st.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_udp_st_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_udp_st_log.h create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_udp_st_stats.c create mode 100755 feeds/ipq807x/qca-nss-drv/src/nss_udp_st_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_udp_st_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_udp_st_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_unaligned.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_unaligned_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_unaligned_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_unaligned_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_unaligned_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_virt_if.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_virt_if_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_virt_if_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_vlan.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_vlan_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_vlan_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_vxlan.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_vxlan_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_vxlan_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_vxlan_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_vxlan_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_mac_db.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_strings.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifi_vdev.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifili.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifili_log.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifili_log.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifili_stats.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifili_stats.h create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifili_strings.c create mode 100644 feeds/ipq807x/qca-nss-drv/src/nss_wifili_strings.h mode change 100644 => 100755 feeds/ipq807x/qca-ssdk-shell/Makefile create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/Makefile create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/config create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/api/api_access.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/api/api_desc.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/api/sw_api.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/api/sw_ioctl.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/common/aos_head.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/common/shared_func.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/common/sw.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/common/sw_config.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/common/sw_error.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/common/util.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_acl.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_api.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_bm.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_cosmap.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_ctrlpkt.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_flow.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_igmp.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_init.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_interface_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_ip.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_leaky.h create mode 100644 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_led.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_mib.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_mirror.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_misc.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_multi.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_nat.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_policer.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_portvlan.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_pppoe.h create mode 100644 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_ptp.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_qm.h create mode 100644 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_qos.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_rate.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_reg_access.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_rss_hash.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_sec.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_servcode.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_sfp.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_shaper.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_stp.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_trunk.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_type.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_uk_if.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_vsi.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/init/ssdk_init.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/init/ssdk_plat.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/ref/ref_api.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/ref/ref_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_lock.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_mem.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_timer.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_types.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_lock_pvt.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_mem_pvt.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_timer_pvt.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_types_pvt.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/sal/sd/linux/uk_interface/sw_api_us.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/sal/sd/sd.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_config.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_io.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_lib.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_sw.h create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/make/components.mk create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/make/config.mk create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/make/defs.mk create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/make/linux_opt.mk create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/make/target.mk create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/make/tools.mk create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/api/Makefile create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/api/api_access.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/Makefile create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_acl.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_bm.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_cosmap.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ctrlpkt.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_flow.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_igmp.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_init.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_interface_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ip.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_leaky.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_led.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_mib.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_mirror.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_misc.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_nat.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_policer.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_pppoe.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ptp.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_qm.c create mode 100644 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_qos.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_rate.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_rss_hash.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_sec.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_servcode.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_sfp.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_shaper.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_stp.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_trunk.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_uk_if.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_vlan.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_vsi.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/ref/Makefile create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/ref/ref_vlan.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/sal/Makefile create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/Makefile create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/Makefile create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/Makefile create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/sw_api_us_ioctl.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/sw_api_us_netlink.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/sd.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/shell/Makefile create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell_config.c create mode 100644 feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell_io.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell_lib.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell_module_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell_sw.c delete mode 100755 feeds/ipq807x/qca-ssdk/Makefile.orig create mode 100644 feeds/ipq807x/qca-ssdk/src/ChangeLog create mode 100755 feeds/ipq807x/qca-ssdk/src/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/host_helper.c create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/lib/nat_helper_dt.c create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/lib/nat_helper_dt.h create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/lib/nat_helper_hsl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/lib/nat_helper_hsl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/napt_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/napt_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/napt_helper.c create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/napt_helper.h create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/napt_procfs.c create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/nat_helper.c create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/nat_helper.h create mode 100755 feeds/ipq807x/qca-ssdk/src/app/nathelper/linux/nat_ipt_helper.c create mode 100644 feeds/ipq807x/qca-ssdk/src/config create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/adpt.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/cppe/adpt_cppe_flow.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/cppe/adpt_cppe_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/cppe/adpt_cppe_misc.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/cppe/adpt_cppe_portctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/cppe/adpt_cppe_qm.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/adpt/cppe/adpt_cppe_qos.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/cppe/adpt_cppe_uniphy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/hppe/adpt_hppe.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/adpt/mp/adpt_mp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/mp/adpt_mp_portctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/mp/adpt_mp_uniphy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/adpt/sfp/adpt_sfp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/api/api_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/api/api_desc.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/api/sw_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/api/sw_ioctl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/common/aos_head.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/common/shared_func.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/common/sw.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/common/sw_config.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/common/sw_error.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/common/util.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_bm.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_cosmap.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_ctrlpkt.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_flow.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_flowcookie.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_igmp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_init.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_interface_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_ip.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_leaky.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/fal/fal_led.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_mirror.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_misc.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_multi.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_nat.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_policer.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_portvlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_pppoe.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_ptp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_qm.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/fal/fal_qos.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_rate.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_reg_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_rfs.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_rss_hash.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_sec.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_servcode.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_sfp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_shaper.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_stp.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/fal/fal_trunk.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_type.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_uk_if.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/fal/fal_vsi.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/athena/athena_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/athena/athena_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/athena/athena_init.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/athena/athena_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/athena/athena_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/athena/athena_portvlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/athena/athena_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/athena/athena_reg_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/athena/athena_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/cppe/cppe_loopback.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/cppe/cppe_loopback_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/cppe/cppe_portctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/cppe/cppe_portctrl_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/cppe/cppe_qos.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/cppe/cppe_qos_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_cosmap.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_fdb_prv.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_igmp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_init.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_interface_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_ip.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_leaky.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_led.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_mirror.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_misc.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_nat.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_nat_helper.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_portvlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_psgmii.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_qos.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_rate.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_reg_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_sec.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_stp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_trunk.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/dess/dess_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_igmp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_init.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_leaky.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_led.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_mirror.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_misc.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_portvlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_qos.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_rate.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_reduced_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_reg_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_stp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/garuda/garuda_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_igmp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_init.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_leaky.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_led.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_mirror.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_misc.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_portvlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_qos.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_rate.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_reg_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_stp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/horus/horus_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_acl_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_bm.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_bm_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_ctrlpkt.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_ctrlpkt_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_fdb_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_flow.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_flow_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_global.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_global_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_init.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_ip.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_ip_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_mib_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_mirror.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_mirror_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_policer.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_policer_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_portctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_portctrl_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_portvlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_portvlan_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_pppoe.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_pppoe_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_qm.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_qm_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_qos.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_qos_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_reg_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_rss.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_rss_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_sec.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_sec_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_servcode.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_servcode_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_shaper.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_shaper_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_stp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_stp_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_trunk.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_trunk_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_uniphy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_uniphy_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_vsi.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_vsi_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_xgmacmib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_xgmacmib_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_xgportctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hppe/hppe_xgportctrl_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hsl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hsl_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hsl_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hsl_dev.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hsl_lock.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hsl_port_prop.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/hsl_shared_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_cosmap.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_igmp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_init.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_interface_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_ip.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_leaky.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_led.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_mirror.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_misc.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_nat.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_nat_helper.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_portvlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_qos.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_rate.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_reg_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_sec.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_stp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_trunk.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isis/isis_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_cosmap.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_fdb_prv.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_igmp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_init.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_interface_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_ip.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_leaky.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_led.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_mirror.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_misc.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_nat.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_nat_helper.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_portvlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_qos.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_rate.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_reg_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_sec.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_stp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_trunk.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/isisc/isisc_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/mp/mp_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/mp/mp_mib_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/mp/mp_portctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/mp/mp_portctrl_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/mp/mp_uniphy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/mp/mp_uniphy_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/aquantia_phy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/f1_phy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/f2_phy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/hsl_phy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/malibu_phy.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/mpge_led.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/mpge_phy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/qca803x_phy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/qca808x.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/qca808x_led.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/qca808x_phy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/qca808x_ptp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/qca808x_ptp_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/qca808x_ptp_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/phy/sfp_phy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/scomphy/scomphy_init.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/hsl/scomphy/scomphy_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/scomphy/scomphy_reg_access.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/hsl/sfp/sfp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/sfp/sfp_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/sfp/sfp_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_igmp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_init.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_leaky.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_led.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_mirror.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_misc.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_portvlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_qos.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_rate.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_reduced_acl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_reg.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_reg_access.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_stp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/hsl/shiva/shiva_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_clk.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_dts.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_hppe.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_init.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_interrupt.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_led.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_mp.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_phy_i2c.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_plat.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_scomphy.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/init/ssdk_uci.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/ref/ref_api.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/ref/ref_fdb.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/ref/ref_mib.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/ref/ref_misc.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/ref/ref_port_ctrl.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/ref/ref_uci.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/ref/ref_vlan.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/ref/ref_vsi.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/sal/os/aos_lock.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/sal/os/aos_mem.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/sal/os/aos_timer.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/sal/os/aos_types.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/sal/os/linux/aos_lock_pvt.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/sal/os/linux/aos_mem_pvt.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/sal/os/linux/aos_timer_pvt.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/sal/os/linux/aos_types_pvt.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/sal/sd/linux/uk_interface/sw_api_ks.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/sal/sd/sd.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/shell_lib/shell.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/shell_lib/shell_config.h create mode 100644 feeds/ipq807x/qca-ssdk/src/include/shell_lib/shell_io.h create mode 100755 feeds/ipq807x/qca-ssdk/src/include/shell_lib/shell_sw.h create mode 100755 feeds/ipq807x/qca-ssdk/src/ko_Makefile create mode 100644 feeds/ipq807x/qca-ssdk/src/make/.build_number create mode 100755 feeds/ipq807x/qca-ssdk/src/make/components.mk create mode 100755 feeds/ipq807x/qca-ssdk/src/make/config.mk create mode 100755 feeds/ipq807x/qca-ssdk/src/make/defs.mk create mode 100755 feeds/ipq807x/qca-ssdk/src/make/linux_opt.mk create mode 100755 feeds/ipq807x/qca-ssdk/src/make/target.mk create mode 100755 feeds/ipq807x/qca-ssdk/src/make/tools.mk create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/Makefile create mode 100644 feeds/ipq807x/qca-ssdk/src/src/adpt/adpt.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/cppe/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/cppe/adpt_cppe_flow.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/cppe/adpt_cppe_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/cppe/adpt_cppe_misc.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/cppe/adpt_cppe_portctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/cppe/adpt_cppe_qm.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/adpt/cppe/adpt_cppe_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/cppe/adpt_cppe_uniphy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_bm.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_ctrlpkt.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_flow.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_ip.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_mirror.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_misc.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_policer.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_portctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_pppoe.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_ptp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_qm.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_rss_hash.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_sec.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_servcode.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_shaper.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_stp.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_trunk.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_uniphy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/hppe/adpt_hppe_vsi.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/adpt/mp/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/mp/adpt_mp_interrupt.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/adpt/mp/adpt_mp_led.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/mp/adpt_mp_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/mp/adpt_mp_portctrl.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/adpt/mp/adpt_mp_uniphy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/sfp/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/adpt/sfp/adpt_sfp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/api/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/api/api_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_bm.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_cosmap.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_ctrlpkt.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_flow.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_igmp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_init.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_interface_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_ip.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_leaky.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/fal/fal_led.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_mirror.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_misc.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_nat.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_policer.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/fal/fal_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_pppoe.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_ptp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_qm.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/fal/fal_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_rate.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_rss_hash.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_sec.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_servcode.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_sfp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_shaper.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_stp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_trunk.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_vlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/fal/fal_vsi.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/athena/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/athena/athena_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/athena/athena_init.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/athena/athena_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/athena/athena_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/athena/athena_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/athena/athena_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/athena/athena_vlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/cppe/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/cppe/cppe_loopback.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/cppe/cppe_portctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/cppe/cppe_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_acl_parse.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_acl_prv.h create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_cosmap.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_igmp.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_init.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_interface_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_ip.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_leaky.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_led.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_mirror.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_misc.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_multicast_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_nat.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_psgmii.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_rate.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_sec.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_stp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_trunk.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/dess/dess_vlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_igmp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_init.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_leaky.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_led.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_mirror.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_misc.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_rate.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_reduced_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_stp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/garuda/garuda_vlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_igmp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_init.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_leaky.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_led.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_mirror.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_misc.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_rate.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_stp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/horus/horus_vlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_bm.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_ctrlpkt.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_flow.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_global.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_init.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_ip.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_mirror.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_policer.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_portctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_pppoe.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_qm.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_rss.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_sec.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_servcode.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_shaper.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_stp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_trunk.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_uniphy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_vsi.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_xgmacmib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hppe/hppe_xgportctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hsl_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hsl_api.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hsl_dev.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hsl_lock.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/hsl_port_prop.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_acl_parse.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_acl_prv.h create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_cosmap.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_igmp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_init.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_interface_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_ip.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_leaky.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_led.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_mac_block.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_mirror.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_misc.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_multicast_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_nat.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_rate.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_sec.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_stp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_trunk.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isis/isis_vlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_acl_parse.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_acl_prv.h create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_cosmap.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_igmp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_init.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_interface_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_ip.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_leaky.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_led.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_mirror.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_misc.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_multicast_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_nat.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_rate.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_sec.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_stp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_trunk.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/isisc/isisc_vlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/mp/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/mp/mp_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/mp/mp_portctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/mp/mp_uniphy.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/aquantia_phy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/f1_phy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/f2_phy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/hsl_phy.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/malibu_phy.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/mpge_led.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/mpge_phy.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/qca803x_phy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/qca808x.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/qca808x_led.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/qca808x_phc.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/qca808x_phy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/qca808x_ptp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/qca808x_ptp_api.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/phy/sfp_phy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/scomphy/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/scomphy/scomphy_init.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/hsl/scomphy/scomphy_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/scomphy/scomphy_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/sfp/Makefile create mode 100644 feeds/ipq807x/qca-ssdk/src/src/hsl/sfp/sfp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/sfp/sfp_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_fdb.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_igmp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_init.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_leaky.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_led.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_mirror.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_misc.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_portvlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_qos.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_rate.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_reduced_acl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_reg_access.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_stp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/hsl/shiva/shiva_vlan.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/init/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_clk.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_dts.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_hppe.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_init.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_interrupt.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_led.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_mp.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_phy_i2c.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_plat.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_scomphy.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/init/ssdk_uci.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/ref/Makefile create mode 100644 feeds/ipq807x/qca-ssdk/src/src/ref/ref_fdb.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/ref/ref_mib.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/ref/ref_misc.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/ref/ref_port_ctrl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/ref/ref_uci.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/ref/ref_vlan.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/ref/ref_vsi.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/sal/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/sal/sd/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/sal/sd/linux/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/sal/sd/linux/uk_interface/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/sal/sd/linux/uk_interface/sw_api_ks_ioctl.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/sal/sd/linux/uk_interface/sw_api_ks_netlink.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/sal/sd/sd.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/shell_lib/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/shell_lib/shell.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/shell_lib/shell_config.c create mode 100644 feeds/ipq807x/qca-ssdk/src/src/shell_lib/shell_io.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/shell_lib/shell_sw.c create mode 100755 feeds/ipq807x/qca-ssdk/src/src/util/Makefile create mode 100755 feeds/ipq807x/qca-ssdk/src/src/util/util.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/Makefile create mode 100755 feeds/ipq807x/qca-thermald-10.4/files/thermal.config create mode 100755 feeds/ipq807x/qca-thermald-10.4/files/thermal.init create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/Android.mk create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/Makefile create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/Makefile.am create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/adc-sensor.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/adc-sensor.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/bcl-sensor.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/bcl-sensor.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/configure.ac create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/gen-sensor.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/gen-sensor.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/ipq-thermald-8064.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/ipq-thermald-8066.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/ipq-thermald-8069.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/modem_mitigation_oncrpc.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/modem_mitigation_qmi.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/modem_sensor_qmi.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/pm8821-sensor.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/pm8821-sensor.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/qmi-ts-sensor.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/qmi-ts-sensor.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/readme.txt create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/sensors-7x30.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/sensors-8660.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/sensors-8960.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/sensors-8974.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/sensors-8x25.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/sensors-hw.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/sensors-ipq.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_actions.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_client.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_client.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_config.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_lib_common.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_lib_common.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_mitigation_device_service_v01.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_mitigation_device_service_v01.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_monitor.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_sensor_service_v01.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_sensor_service_v01.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_server.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_server.h create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermal_util.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-7x30.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8064.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8064ab.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8660.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8930.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8930ab.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8960.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8960ab.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8974.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8x25-msm1-pmic_therm.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8x25-msm2-msm_therm.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald-8x25-msm2-pmic_therm.conf create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/thermald.conf_sample create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/tsens-sensor.c create mode 100755 feeds/ipq807x/qca-thermald-10.4/src/tsens-sensor.h delete mode 100644 patches/ipq807x/0002-ipq807x-add-qsdk-kernel-support.patch create mode 100644 patches/ipq807x/0002-ipq807x-buildsystem-patches-required-by-the-target.patch delete mode 100644 patches/ipq807x/0003-ipq807x-add-qsdk-kernel-support.patch create mode 100644 patches/ipq807x/0003-linux-modules-fix-some-v4.4-dependencies.patch diff --git a/feeds/ipq807x/ipq807x/Makefile b/feeds/ipq807x/ipq807x/Makefile index e5440198b..dbba8fd6b 100644 --- a/feeds/ipq807x/ipq807x/Makefile +++ b/feeds/ipq807x/ipq807x/Makefile @@ -4,17 +4,17 @@ ARCH:=arm BOARD:=ipq807x BOARDNAME:=Qualcomm Atheros AX SUBTARGETS:=ipq807x ipq60xx ipq50xx -FEATURES:=squashfs ramdisk nand pcie usb usbgadget +FEATURES:=squashfs ramdisk nand pcie usb KERNELNAME:=Image dtbs CPU_TYPE:=cortex-a7 -KERNEL_PATCHVER:=5.4 -KERNEL_NAME_SUFFIX=-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac +KERNEL_PATCHVER:=4.4 +KERNEL_NAME_SUFFIX=-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016 include $(INCLUDE_DIR)/target.mk DEFAULT_PACKAGES += kmod-qca-nss-dp kmod-qca-ssdk swconfig \ kmod-qca-nss-drv \ - kmod-usb-phy-ipq807x kmod-usb-dwc3-qcom-internal \ + kmod-usb-phy-ipq807x kmod-usb-dwc3-of-simple \ kmod-ath11k-ahb kmod-qrtr_mproc wpad \ kmod-gpio-button-hotplug \ qca-thermald-10.4 qca-ssdk-shell kmod-qca-nss-drv-bridge-mgr \ diff --git a/feeds/ipq807x/ipq807x/base-files/etc/board.d/02_network b/feeds/ipq807x/ipq807x/base-files/etc/board.d/02_network index d69410464..b1ccffd03 100755 --- a/feeds/ipq807x/ipq807x/base-files/etc/board.d/02_network +++ b/feeds/ipq807x/ipq807x/base-files/etc/board.d/02_network @@ -79,7 +79,7 @@ qcom_setup_macs() cig,wf194c|\ cig,wf194c4|\ cig,wf196) - mac=$(grep BaseMacAddress= /dev/mtd18 | cut -dx -f2) + mac=$(grep BaseMacAddress= /dev/mtd14 | cut -dx -f2) wan_mac=$(macaddr_canonicalize $mac) lan_mac=$(macaddr_add "$wan_mac" 1) ucidef_set_network_device_mac eth0 $lan_mac diff --git a/feeds/ipq807x/ipq807x/config-4.4 b/feeds/ipq807x/ipq807x/config-4.4 new file mode 100644 index 000000000..af9953aea --- /dev/null +++ b/feeds/ipq807x/ipq807x/config-4.4 @@ -0,0 +1,829 @@ +# CONFIG_AHCI_IPQ is not set +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_ALLOW_DEV_COREDUMP is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_APM_EMULATION is not set +# CONFIG_APQ_GCC_8084 is not set +# CONFIG_APQ_MMCC_8084 is not set +# CONFIG_AR8216_PHY is not set +CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +CONFIG_ARCH_HAS_SG_CHAIN=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y +# CONFIG_ARCH_IPQ40XX is not set +# CONFIG_ARCH_IPQ806x is not set +# CONFIG_ARCH_IPQ807x is not set +# CONFIG_ARCH_IPQ6018 is not set +# CONFIG_ARCH_IPQ5018 is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +# CONFIG_ARCH_MSM8960 is not set +# CONFIG_ARCH_MSM8974 is not set +CONFIG_ARCH_MSM8X60=y +CONFIG_ARCH_MULTIPLATFORM=y +# CONFIG_ARCH_MULTI_CPU_AUTO is not set +CONFIG_ARCH_MULTI_V6_V7=y +CONFIG_ARCH_MULTI_V7=y +CONFIG_ARCH_NR_GPIO=0 +CONFIG_ARCH_QCOM=y +CONFIG_QSEECOM=m +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y +CONFIG_ARM=y +CONFIG_ARM_AMBA=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +# CONFIG_ARM_ATAG_DTB_COMPAT is not set +CONFIG_ARM_CCI=y +CONFIG_ARM_CCI400_COMMON=y +CONFIG_ARM_CCI400_PMU=y +CONFIG_ARM_CCI_PMU=y +CONFIG_ARM_CPU_SUSPEND=y +CONFIG_ARM_GIC=y +CONFIG_ARM_HAS_SG_CHAIN=y +# CONFIG_ARM_HIGHBANK_CPUIDLE is not set +CONFIG_ARM_CPUIDLE=y +CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_L1_CACHE_SHIFT_6=y +# CONFIG_ARM_LPAE is not set +CONFIG_ARM_MODULE_PLTS=y +CONFIG_ARM_PATCH_PHYS_VIRT=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PSCI=y +CONFIG_ARM_PSCI_FW=y +CONFIG_ARM_QCOM_CPUFREQ=y +# CONFIG_ARM_SMMU is not set +# CONFIG_ARM_SP805_WATCHDOG is not set +CONFIG_ARM_THUMB=y +# CONFIG_ARM_THUMBEE is not set +CONFIG_ARM_UNWIND=y +CONFIG_ARM_VIRT_EXT=y +CONFIG_AT803X_PHY=y +# CONFIG_ATA is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NVME=y +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_VIRTIO_BLK is not set +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_BOUNCE=y +CONFIG_BUILD_BIN2C=y +# CONFIG_CNSS_QCN9000 is not set +# CONFIG_CNSS2 is not set +# CONFIG_CNSS2_GENL is not set +# CONFIG_CNSS2_DEBUG is not set +# CONFIG_CNSS2_PM is not set +# CONFIG_CNSS2_PCI_DRIVER is not set +# CONFIG_CNSS2_CALIBRATION_SUPPORT is not set +# CONFIG_CNSS2_SMMU is not set +# CONFIG_CNSS2_RAMDUMP is not set +# CONFIG_CACHE_L2X0 is not set +CONFIG_CC_OPTIMIZE_FOR_SIZE=y +CONFIG_CC_STACKPROTECTOR=y +# CONFIG_CC_STACKPROTECTOR_NONE is not set +CONFIG_CC_STACKPROTECTOR_REGULAR=y +# CONFIG_CHARGER_QCOM_SMBB is not set +CONFIG_CLEANCACHE=y +CONFIG_CLKDEV_LOOKUP=y +CONFIG_CLKSRC_OF=y +CONFIG_CLKSRC_PROBE=y +CONFIG_CLKSRC_QCOM=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_COMMON_CLK=y +CONFIG_COMMON_CLK_QCOM=y +CONFIG_CONFIGFS_FS=y +CONFIG_COREDUMP=y +# CONFIG_CORESIGHT is not set +# CONFIG_CORESIGHT_CSR is not set +# CONFIG_CORESIGHT_CTI is not set +# NFIG_CORESIGHT_EVENT is not set +# CONFIG_CORESIGHT_HWEVENT is not set +# CONFIG_CORESIGHT_LINKS_AND_SINKS is not set +# CONFIG_CORESIGHT_LINK_AND_SINK_TMC is not set +# CONFIG_CORESIGHT_QCOM_REPLICATOR is not set +# CONFIG_CORESIGHT_QPDI is not set +# CONFIG_CORESIGHT_SINK_ETBV10 is not set +# CONFIG_CORESIGHT_SINK_TPIU is not set +# CONFIG_CORESIGHT_SOURCE_DUMMY is not set +# CONFIG_CORESIGHT_SOURCE_ETM3X is not set +# CONFIG_CORESIGHT_SOURCE_ETM4X is not set +# CONFIG_CORESIGHT_REMOTE_ETM is not set +# CONFIG_CORESIGHT_STM is not set +# CONFIG_CORESIGHT_TPDA is not set +# CONFIG_CORESIGHT_TPDM is not set +# CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE is not set +# CONFIG_CORESIGHT_STREAM is not set +CONFIG_CPUFREQ_DT=y +CONFIG_CPUFREQ_DT_PLATDEV=y +CONFIG_CPU_32v6K=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +# CONFIG_CPU_BIG_ENDIAN is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y +# CONFIG_CPU_SW_DOMAIN_PAN is not set +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_HAS_ASID=y +# CONFIG_CPU_ICACHE_DISABLE is not set +CONFIG_CPU_IDLE=y +CONFIG_CPU_IDLE_GOV_LADDER=y +CONFIG_CPU_IDLE_GOV_MENU=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_PM=y +CONFIG_CPU_RMAP=y +# CONFIG_CPU_THERMAL is not set +CONFIG_CPU_TLB_V7=y +CONFIG_CPU_V7=y +CONFIG_CRC16=y +# CONFIG_CRC32_SARWATE is not set +CONFIG_CRC32_SLICEBY8=y +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_CRYPTO_DEV_QCOM_MSM_QCE is not set +# CONFIG_CRYPTO_DEV_OTA_CRYPTO is not set +# CONFIG_FIPS_ENABLE is not set +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_CMAC=y +# CONFIG_CRYPTO_DEV_QCOM_ICE is not set +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HASH_INFO=y +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_WORKQUEUE=y +CONFIG_CRYPTO_XZ=y +CONFIG_CRYPTO_ARC4=y +CONFIG_CRYPTO_GCM=y +CONFIG_DCACHE_WORD_ACCESS=y +CONFIG_DEBUG_BUGVERBOSE=y +CONFIG_DEBUG_GPIO=y +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S" +# CONFIG_DEBUG_MEM_USAGE is not set +# CONFIG_DEBUG_UART_8250 is not set +# CONFIG_DEBUG_USER is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DEVMEM=y +# CONFIG_DIAG_OVER_USB is not set +CONFIG_DMADEVICES=y +CONFIG_DMA_ENGINE=y +CONFIG_DMA_OF=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DTC=y +# CONFIG_DWMAC_GENERIC is not set +# CONFIG_DWMAC_IPQ806X is not set +# CONFIG_DWMAC_SUNXI is not set +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_VHOST_NET is not set +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set +CONFIG_DYNAMIC_DEBUG=y +CONFIG_ETHERNET_PACKET_MANGLE=y +CONFIG_EXT4_FS=y +# CONFIG_EXT4_USE_FOR_EXT2 is not set +CONFIG_FB=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_CMDLINE=y +CONFIG_FB_QCOM_QPIC=y +CONFIG_FB_QCOM_QPIC_ER_SSD1963_PANEL=y +CONFIG_FB_SYS_FOPS=y +CONFIG_FIXED_PHY=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_FS_MBCACHE=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +# CONFIG_GENERIC_CPUFREQ_KRAIT is not set +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_GENERIC_IO=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y +CONFIG_GENERIC_PINCONF=y +CONFIG_GENERIC_SCHED_CLOCK=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_IRQCHIP=y +CONFIG_GPIO_DEVRES=y +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_NXP_74HC153 is not set +CONFIG_GPIO_SYSFS=y +CONFIG_HANDLE_DOMAIN_IRQ=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_HAS_DMA=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT_MAP=y +# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_HAVE_ARCH_PFN_VALID=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_ARM_ARCH_TIMER=y +# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set +CONFIG_HAVE_BPF_JIT=y +CONFIG_HAVE_CC_STACKPROTECTOR=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_HAVE_CONTEXT_TRACKING=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_HAVE_DMA_API_DEBUG=y +CONFIG_HAVE_DMA_ATTRS=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +# CONFIG_SRD_TRACE is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_IDE=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_XZ=y +# CONFIG_HAVE_KPROBES is not set +# CONFIG_HAVE_KRETPROBES is not set +CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_HAVE_NET_DSA=y +CONFIG_HAVE_OPROFILE=y +# CONFIG_HAVE_OPTPROBES is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_PROC_CPU=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_SMP=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_UID16=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HIGHMEM=y +CONFIG_HIGHPTE=y +CONFIG_HOTPLUG_CPU=y +CONFIG_HWMON=y +CONFIG_HWSPINLOCK=y +CONFIG_HWSPINLOCK_QCOM=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_MSM=y +CONFIG_HZ_FIXED=0 +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_QUP=y +# CONFIG_IIO is not set +# CONFIG_IIO_BUFFER is not set +# CONFIG_IIO_TRIGGER is not set +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_INITRAMFS_SOURCE="" +# CONFIG_INPUT_PM8941_PWRKEY is not set +CONFIG_IOMMU_HELPER=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set +# CONFIG_IPQ_DWC3_QTI_EXTCON is not set +# CONFIG_IPQ_GCC_4019 is not set +# CONFIG_IPQ_GCC_5018 is not set +# CONFIG_IPQ_APSS_5018 is not set +# CONFIG_IPQ_GCC_6018 is not set +# CONFIG_IPQ_APSS_6018 is not set +# CONFIG_IPQ_GCC_806X is not set +# CONFIG_IPQ_ADSS_807x is not set +# CONFIG_IPQ_APSS_807x is not set +# CONFIG_IPQ_GCC_807x is not set +# CONFIG_IPQ_ADCC_4019 is not set +# CONFIG_IPQ_LCC_806X is not set +# CONFIG_IPQ_REMOTEPROC_ADSP is not set +# CONFIG_IPQ_SUBSYSTEM_RESTART is not set +# CONFIG_IPQ_SUBSYSTEM_RESTART_TEST is not set +CONFIG_IRQCHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_IRQ_WORK=y +CONFIG_JBD2=y +# CONFIG_IPC_ROUTER is not set +# CONFIG_IPC_ROUTER_SECURITY is not set +# CONFIG_IPC_LOGGING is not set +CONFIG_KPSS_XCC=y +# CONFIG_KRAITCC is not set +# CONFIG_KRAIT_CLOCKS is not set +# CONFIG_KRAIT_L2_ACCESSORS is not set +CONFIG_LEDS_IPQ=y +CONFIG_LEDS_PWM=y +CONFIG_LEDS_TLC591XX=y +# CONFIG_LEDS_PCA9956B is not set +CONFIG_LIBFDT=y +CONFIG_LOCKUP_DETECTOR=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_MDIO=y +CONFIG_MDIO_BITBANG=y +CONFIG_MDIO_BOARDINFO=y +CONFIG_MDIO_GPIO=y +# CONFIG_MDIO_QCA is not set +CONFIG_MFD_QCOM_RPM=y +CONFIG_MFD_SPMI_PMIC=y +# CONFIG_SLIMBUS is not set +# CONFIG_SLIMBUS_MSM_CTRL is not set +# CONFIG_SLIMBUS_MSM_NGD is not set +# CONFIG_OF_SLIMBUS is not set +CONFIG_MFD_SYSCON=y +CONFIG_MIGHT_HAVE_CACHE_L2X0=y +CONFIG_MIGHT_HAVE_PCI=y +CONFIG_MMC=y +CONFIG_MMC_ARMMMCI=y +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_MINORS=32 +CONFIG_MMC_QCOM_DML=y +CONFIG_MMC_QCOM_TUNING=y +CONFIG_MMC_SDHCI=y +CONFIG_MMC_SDHCI_MSM=y +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_PCI is not set +CONFIG_MMC_SDHCI_PLTFM=y +# CONFIG_MMC_TIFM_SD is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_MODULES_USE_ELF_REL=y +# CONFIG_MPLS_ROUTING is not set +# CONFIG_MSM_GCC_8660 is not set +# CONFIG_MSM_GCC_8916 is not set +# CONFIG_MSM_GCC_8960 is not set +# CONFIG_MSM_GCC_8974 is not set +# CONFIG_MSM_LCC_8960 is not set +# CONFIG_MSM_MMCC_8960 is not set +# CONFIG_MSM_MMCC_8974 is not set +# CONFIG_MSM_MHI is not set +# CONFIG_MSM_IPC_ROUTER_MHI_XPRT is not set +# CONFIG_MSM_MHI_DEBUG is not set +# CONFIG_MSM_MHI_DEV is not set +# CONFIG_MSM_MHI_UCI is not set +# CONFIG_DIAGFWD_BRIDGE_CODE is not set +# CONFIG_MSM_BUS_SCALING is not set +# CONFIG_BUS_TOPOLOGY_ADHOC is not set +# CONFIG_QPNP_REVID is not set +# CONFIG_SPS is not set +# CONFIG_SPS_SUPPORT_NDP_BAM is not set +# CONFIG_USB_BAM is not set +# CONFIG_SPS_SUPPORT_BAMDMA is not set +# CONFIG_IPA is not set +# CONFIG_IPA3 is not set +# CONFIG_EP_PCIE is not set +# CONFIG_GSI is not set +# CONFIG_PFT is not set +# CONFIG_SEEMP_CORE is not set +# CONFIG_GPIO_USB_DETECT is not set +# CONFIG_MSM_GLINK is not set +# CONFIG_MSM_GLINK_LOOPBACK_SERVER is not set +# CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT is not set +# CONFIG_MSM_GLINK_PKT is not set +# CONFIG_MSM_IPC_ROUTER_GLINK_XPRT is not set +# CONFIG_MSM_QMI_INTERFACE is not set +# CONFIG_MSM_TEST_QMI_CLIENT is not set +# CONFIG_GLINK_DEBUG_FS is not set +# CONFIG_MSM_RPM_SMD is not set +# CONFIG_MSM_RPM_GLINK is not set +CONFIG_MSM_RPM_LOG=y +# CONFIG_MSM_SMEM is not set +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_M25P80=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_ECC=y +CONFIG_MTD_NAND_QCOM=y +CONFIG_MTD_QCOM_SMEM_PARTS=y +CONFIG_MTD_SPINAND_GIGADEVICE=y +CONFIG_MTD_SPINAND_MT29F=y +CONFIG_MTD_SPINAND_ONDIEECC=y +CONFIG_MTD_SPI_NOR=y +CONFIG_MTD_SPLIT_FIRMWARE=y +CONFIG_MTD_SPLIT_FIT_FW=y +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_BEB_LIMIT=20 +CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_UBI_FASTMAP is not set +CONFIG_MTD_UBI_GLUEBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MULTI_IRQ_HANDLER=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_NEON=y +CONFIG_NET=y +# CONFIG_NET_DSA_MV88E6063 is not set +CONFIG_NET_FLOW_LIMIT=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NO_BOOTMEM=y +CONFIG_NO_HZ=y +CONFIG_NO_HZ_COMMON=y +CONFIG_NO_HZ_IDLE=y +CONFIG_NR_CPUS=4 +CONFIG_NUM_ALT_PARTITION=8 +CONFIG_NVMEM=y +CONFIG_OF=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_ADDRESS_PCI=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_FLATTREE=y +CONFIG_OF_GPIO=y +CONFIG_OF_IRQ=y +CONFIG_OF_MDIO=y +CONFIG_OF_MTD=y +CONFIG_OF_NET=y +CONFIG_OF_PCI=y +CONFIG_OF_PCI_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OLD_SIGACTION=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=5 +CONFIG_PCI=y +# CONFIG_PCIEAER is not set +CONFIG_PCIE_DW=y +# CONFIG_PCIE_DW_PLAT is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_QCOM=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_MSI=y +CONFIG_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=2 +CONFIG_PHYLIB=y +# CONFIG_PHY_IPQ_BALDUR_USB is not set +# CONFIG_PHY_IPQ_UNIPHY_USB is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +CONFIG_PHY_QCA_PCIE_QMP=y +# CONFIG_PHY_QCOM_UFS is not set +# CONFIG_PHY_IPQ_UNIPHY_PCIE is not set +CONFIG_PINCTRL=y +# CONFIG_PINCTRL_APQ8064 is not set +# CONFIG_PINCTRL_APQ8084 is not set +# CONFIG_PINCTRL_IPQ4019 is not set +# CONFIG_PINCTRL_IPQ6018 is not set +# CONFIG_PINCTRL_IPQ8064 is not set +# CONFIG_PINCTRL_IPQ807x is not set +# CONFIG_PINCTRL_IPQ5018 is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_MSM8660 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8960 is not set +CONFIG_PINCTRL_QCOM_SPMI_PMIC=y +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PL330_DMA is not set +CONFIG_PM=y +CONFIG_PM_CLK=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_GENERIC_DOMAINS=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_OPP=y +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +CONFIG_POWER_RESET=y +CONFIG_POWER_RESET_MSM=y +CONFIG_POWER_SUPPLY=y +CONFIG_PREEMPT=y +CONFIG_PREEMPT_COUNT=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_RCU=y +CONFIG_PRINTK_TIME=y +CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_STRIPPED is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +CONFIG_PWM=y +CONFIG_PWM_IPQ4019=y +# CONFIG_PWM_PCA9685 is not set +CONFIG_PWM_SYSFS=y +CONFIG_QCOM_ADM=y +# CONFIG_QCOM_APM is not set +CONFIG_QCOM_BAM_DMA=y +# CONFIG_QTI_BT_TTY is not set +# CONFIG_QCOM_COINCELL is not set +# CONFIG_QCOM_DCC is not set +CONFIG_QCOM_GDSC=y +CONFIG_QCOM_GSBI=y +# CONFIG_QCOM_HFPLL is not set +# CONFIG_QCOM_MEMORY_DUMP_V2 is not set +# CONFIG_QCOM_MDT_LOADER is not set +CONFIG_QCOM_QFPROM=y +# CONFIG_QCOM_SPMI_TEMP_ALARM is not set +CONFIG_QCOM_RPM_CLK=y +# CONFIG_QCOM_RTB is not set +# CONFIG_QCOM_PM is not set +CONFIG_QCOM_SCM=y +CONFIG_QCOM_SCM_32=y +# CONFIG_HAVE_ARM_SMCCC is not set +CONFIG_QCA_SCM_RESTART_REASON=y +CONFIG_IPQ_TCSR=y +CONFIG_QCOM_QFPROM=y +# CONFIG_QCOM_SMD is not set +CONFIG_QCOM_SMEM=y +CONFIG_QCOM_SMEM_STATE=y +# CONFIG_QCOM_SMD is not set +CONFIG_QCOM_SMP2P=y +# CONFIG_QCOM_SPMI_VADC is not set +CONFIG_QCOM_TSENS=y +CONFIG_QCOM_TZ_LOG=y +CONFIG_QCOM_WDT=y +CONFIG_QMI_ENCDEC=y +CONFIG_RATIONAL=y +# CONFIG_RCU_BOOST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=21 +# CONFIG_RCU_EXPERT is not set +CONFIG_RCU_STALL_COMMON=y +CONFIG_RD_GZIP=y +CONFIG_REGMAP=y +# CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS is not set +CONFIG_REGMAP_MMIO=y +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_CPR3 is not set +CONFIG_REGULATOR_GPIO=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y +CONFIG_REGULATOR_QCOM_RPM=y +CONFIG_REGULATOR_QCOM_SPMI=y +# CONFIG_REGULATOR_IPQ40XX is not set +# CONFIG_REGULATOR_RPM_SMD is not set +# CONFIG_REGULATOR_RPM_GLINK is not set +CONFIG_RELAY=y +CONFIG_REMOTEPROC=y +# CONFIG_IPQ807X_REMOTEPROC is not set +CONFIG_RESET_CONTROLLER=y +CONFIG_RFS_ACCEL=y +CONFIG_RPS=y +CONFIG_RTC_CLASS=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_RTC_DRV_CMOS is not set +# CONFIG_RTC_DRV_PM8XXX is not set +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_RWSEM_XCHGADD_ALGORITHM=y +# CONFIG_SATA_AHCI is not set +CONFIG_SCHED_HRTICK=y +# CONFIG_SCSI is not set +# CONFIG_SCHED_INFO is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SERIAL_8250 is not set +# CONFIG_SERIAL_8250_CONSOLE is not set +# CONFIG_SERIAL_8250_DMA is not set +# CONFIG_SERIAL_AMBA_PL010 is not set +# CONFIG_SERIAL_AMBA_PL011 is not set +CONFIG_SERIAL_MSM=y +CONFIG_SERIAL_MSM_CONSOLE=y +# CONFIG_VIRTIO_CONSOLE is not set +CONFIG_SMP=y +CONFIG_SMP_ON_UP=y +# CONFIG_SND is not set +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +CONFIG_SND_PROC_FS=y +# CONFIG_SND_COMPRESS_OFFLOAD is not set +CONFIG_SND_PCM=y +CONFIG_SND_SOC=y +# CONFIG_SND_SOC_APQ8016_SBC is not set +CONFIG_SND_SOC_I2C_AND_SPI=y +# CONFIG_SND_SOC_IPQ is not set +# CONFIG_SND_SOC_IPQ806X_LPAIF is not set +# CONFIG_SND_SOC_IPQ806X_PCM_RAW is not set +CONFIG_SND_SOC_IPQ_ADSS=y +CONFIG_SND_SOC_IPQ_CODEC=y +CONFIG_SND_SOC_IPQ_CPU_DAI=y +CONFIG_SND_SOC_IPQ_MBOX=y +CONFIG_SND_SOC_IPQ_PCM_I2S=y +CONFIG_SND_SOC_IPQ_PCM_RAW=y +CONFIG_SND_SOC_IPQ_PCM_SPDIF=y +CONFIG_SND_SOC_IPQ_PCM_TDM=y +CONFIG_SND_SOC_IPQ_STEREO=y +CONFIG_SND_SOC_QCOM=y +# CONFIG_SND_SOC_STORM is not set +CONFIG_SOUND=y +CONFIG_SPARSE_IRQ=y +CONFIG_SPI=y +CONFIG_SPI_MASTER=y +CONFIG_SPI_QUP=y +CONFIG_SPI_SPIDEV=y +# CONFIG_SPI_VSC7385 is not set +CONFIG_SPMI=y +CONFIG_SPMI_MSM_PMIC_ARB=y +CONFIG_SRCU=y +# CONFIG_STRIP_ASM_SYMS is not set +# CONFIG_STOPWATCH is not set +CONFIG_SUSPEND=y +CONFIG_SWCONFIG=y +CONFIG_SWIOTLB=y +# CONFIG_SWAP is not set +CONFIG_SWP_EMULATE=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_THERMAL=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +# CONFIG_THUMB2_KERNEL is not set +# CONFIG_TICK_CPU_ACCOUNTING is not set +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_TRACING_EVENTS_GPIO=y +CONFIG_UBIFS_FS=y +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_XZ=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_USB_GADGET=n +CONFIG_USB_SUPPORT=y +# CONFIG_USB_DWC3_OF_SIMPLE is not set +# CONFIG_USB_QCOM_8X16_PHY is not set +# CONFIG_USB_QCOM_KS_BRIDGE is not set +# CONFIG_USB_QCOM_QUSB_PHY is not set +# CONFIG_USB_QCOM_QMP_PHY is not set +# CONFIG_USB_QCA_M31_PHY is not set +# CONFIG_USB_EHCI_ROOT_HUB_TT is not set +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +# CONFIG_USB_OHCI_LITTLE_ENDIAN is not set +CONFIG_USE_OF=y +CONFIG_VDSO=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_VFP=y +CONFIG_VFPv3=y +CONFIG_VM_EVENT_COUNTERS=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WL_TI is not set +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_XPS=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_BCJ=y +CONFIG_ZBOOT_ROM_BSS=0 +CONFIG_ZBOOT_ROM_TEXT=0 +CONFIG_ZLIB_DEFLATE=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_QCOM_CACHE_DUMP=y +CONFIG_QCOM_CACHE_DUMP_ON_PANIC=y +# CONFIG_QCOM_RESTART_REASON is not set +# CONFIG_QCOM_DLOAD_MODE is not set +CONFIG_FW_AUTH=y +CONFIG_FW_AUTH_TEST=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_PUBLIC_KEY_ALGO_RSA=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=n +CONFIG_KEYS=y +# CONFIG_SKB_RECYCLER is not set +CONFIG_SKB_RECYCLER_MULTI_CPU=y +# CONFIG_SKB_RECYCLER_PREALLOC is not set +# CONFIG_U_SERIAL_CONSOLE is not set +CONFIG_SCSI_SCAN_ASYNC=y +# CONFIG_NF_IPV6_DUMMY_HEADER is not set +# CONFIG_RMNET is not set +# CONFIG_RMNET_DATA is not set +# CONFIG_RMNET_CTL is not set +# CONFIG_MSM_SECURE_BUFFER is not set +# CONFIG_STAGING is not set +# CONFIG_ANDROID is not set +# CONFIG_ION is not set +# CONFIG_ION_DUMMY is not set +# CONFIG_ION_MSM is not set +# CONFIG_ION_TEST is not set +# CONFIG_CMA is not set +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_DMA_CMA is not set +# CONFIG_CMA_AREAS is not set +# CONFIG_CMA_SIZE_MBYTES is not set +# CONFIG_CMA_SIZE_SEL_MBYTES is not set +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +# CONFIG_CMA_ALIGNMENT is not set +# CONFIG_ASHMEM is not set +# CONFIG_ANDROID_TIMED_OUTPUT is not set +# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set +# CONFIG_SYNC is not set +# CONFIG_SW_SYNC is not set +# CONFIG_FSL_MC_BUS is not set +# CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS is not set +CONFIG_ALLOC_SKB_PAGE_FRAG_DISABLE=y +# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set +# CONFIG_MAILBOX is not set +# CONFIG_MAILBOX_TEST is not set +# CONFIG_QCOM_APCS_IPC is not set +# CONFIG_QCOM_GLINK_SSR is not set +# CONFIG_QCOM_Q6V5_WCSS is not set +# CONFIG_QCOM_SYSMON is not set +# CONFIG_QRTR is not set +# CONFIG_QRTR_SMD is not set +# CONFIG_QRTR_TUN is not set +# CONFIG_RPMSG is not set +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# CONFIG_RPMSG_CHAR is not set +# CONFIG_RPMSG_QCOM_GLINK_SMEM is not set +# CONFIG_RPMSG_QCOM_SMD is not set +CONFIG_QCA_MINIDUMP=y +# CONFIG_QCA_MINIDUMP_DEBUG is not set +# CONFIG_QRTR_USB is not set +# CONFIG_QRTR_FIFO is not set +CONFIG_QRTR_MHI=y +CONFIG_MHI_BUS=y +# CONFIG_MHI_QTI is not set +# CONFIG_MHI_NETDEV is not set +# CONFIG_MHI_DEBUG is not set +# CONFIG_MHI_UCI is not set +# CONFIG_MHI_SATELLITE is not set +# CONFIG_DIAG_OVER_QRTR is not set +# CONFIG_MSM_ADSPRPC is not set +CONFIG_CRYPTO_MICHAEL_MIC=y +# CONFIG_ARCH_HAS_KCOV is not set +# CONFIG_KCOV is not set +# CONFIG_GCC_PLUGINS is not set +# CONFIG_QTI_Q6V5_ADSP is not set +# CONFIG_MSM_RPM_RPMSG is not set +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_REGULATOR_RPM_GLINK is not set +# CONFIG_MTD_NAND_SERIAL is not set +# CONFIG_ARM_QTI_IPQ60XX_CPUFREQ is not set +# CONFIG_PAGE_SCOPE_MULTI_PAGE_READ is not set +# CONFIG_CRYPTO_NO_ZERO_LEN_HASH is not set +# CONFIG_CRYPTO_DISABLE_AES192_TEST is not set +# CONFIG_QTI_EUD is not set +# CONFIG_EUD_EXTCON_SUPPORT is not set +# CONFIG_CLK_TEST_5018 is not set +CONFIG_MAP_E_SUPPORT=y +# CONFIG_IPQ_FLASH_16M_PROFILE is not set +# CONFIG_QGIC2_MSI is not set +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_LEDS_GPIO=y +# CONFIG_ARCH_IPQ256M is not set +CONFIG_SKB_FIXED_SIZE_2K=y +# CONFIG_IPQ_MEM_PROFILE is not set +# CONFIG_VIRTIO_NET is not set +# CONFIG_QCA_85XX_SWITCH is not set +CONFIG_AQ_PHY=y +CONFIG_DIAG_CHAR=y +# CONFIG_HW_RANDOM_VIRTIO is not set +# CONFIG_BOOTCONFIG_PARTITION is not set +# CONFIG_CRYPTO_DEV_QCEDEV is not set +# CONFIG_CRYPTO_DEV_QCRYPTO is not set +# CONFIG_MHI_BUS_TEST is not set diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/ipq8074-hk14.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/ipq8074-hk14.dts deleted file mode 100644 index 7cfd0cb75..000000000 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/ipq8074-hk14.dts +++ /dev/null @@ -1,5 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -// Copyright (c) 2020 The Linux Foundation. All rights reserved. - -#include "../../../arm64/boot/dts/qcom/ipq8074-hk14.dts" -#include "ipq8074.dtsi" diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq5018-eap104.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq5018-eap104.dts index 75f65377d..af8b70256 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq5018-eap104.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq5018-eap104.dts @@ -15,4 +15,9 @@ */ #include "../../../arm64/boot/dts/qcom/qcom-ipq5018-eap104.dts" -#include "ipq5018.dtsi" + +/ { + pmuv8: pmu { + compatible = "arm,cortex-a7-pmu"; + }; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-cig-wf188n.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-cig-wf188n.dts index 1a05856f2..ffd789922 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-cig-wf188n.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-cig-wf188n.dts @@ -15,4 +15,4 @@ */ #include "../../../arm64/boot/dts/qcom/qcom-ipq6018-cig-wf188n.dts" -#include "ipq6018.dtsi" +#include "qcom-ipq6018.dtsi" diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-edgecore-eap101.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-edgecore-eap101.dts index 9fb3fbab5..531510a76 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-edgecore-eap101.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-edgecore-eap101.dts @@ -15,4 +15,4 @@ */ #include "../../../arm64/boot/dts/qcom/qcom-ipq6018-edgecore-eap101.dts" -#include "ipq6018.dtsi" +#include "qcom-ipq6018.dtsi" diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-gl-ax1800.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-gl-ax1800.dts index 3e5ba245c..29aa5d5e0 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-gl-ax1800.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-gl-ax1800.dts @@ -15,4 +15,4 @@ */ #include "../../../arm64/boot/dts/qcom/qcom-ipq6018-gl-ax1800.dts" -#include "ipq6018.dtsi" +#include "qcom-ipq6018.dtsi" diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-gl-axt1800.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-gl-axt1800.dts index 6d2c63a20..f09f8b43d 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-gl-axt1800.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-gl-axt1800.dts @@ -15,4 +15,4 @@ */ #include "../../../arm64/boot/dts/qcom/qcom-ipq6018-gl-axt1800.dts" -#include "ipq6018.dtsi" +#include "qcom-ipq6018.dtsi" diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-hfcl-ion4xe.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-hfcl-ion4xe.dts index b6a717744..5a44b9a3e 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-hfcl-ion4xe.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-hfcl-ion4xe.dts @@ -14,10 +14,5 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "../../../arm64/boot/dts/qcom/qcom-ipq6018-hfcl-ion4x.dts" -#include "ipq6018.dtsi" - -/ { - model = "HFCL ION4Xe"; - compatible = "hfcl,ion4xe", "qcom,ipq6018-cp01", "qcom,ipq6018"; -}; +#include "../../../arm64/boot/dts/qcom/qcom-ipq6018-hfcl-ion4xe.dts" +#include "qcom-ipq6018.dtsi" diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-hfcl-ion4xi.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-hfcl-ion4xi.dts index a2b3085d8..8edd1d817 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-hfcl-ion4xi.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-hfcl-ion4xi.dts @@ -14,10 +14,5 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "../../../arm64/boot/dts/qcom/qcom-ipq6018-hfcl-ion4x.dts" -#include "ipq6018.dtsi" - -/ { - model = "HFCL ION4Xi"; - compatible = "hfcl,ion4xi", "qcom,ipq6018-cp01", "qcom,ipq6018"; -}; +#include "../../../arm64/boot/dts/qcom/qcom-ipq6018-hfcl-ion4xi.dts" +#include "qcom-ipq6018.dtsi" diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-wallys-dr6018-v4.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-wallys-dr6018-v4.dts index 94af33e3a..470e437e4 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-wallys-dr6018-v4.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-wallys-dr6018-v4.dts @@ -15,4 +15,4 @@ */ #include "../../../arm64/boot/dts/qcom/qcom-ipq6018-wallys-dr6018-v4.dts" -#include "ipq6018.dtsi" +#include "qcom-ipq6018.dtsi" diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-wallys-dr6018.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-wallys-dr6018.dts index 57f7fe0bb..1250eb19f 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-wallys-dr6018.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-wallys-dr6018.dts @@ -15,4 +15,4 @@ */ #include "../../../arm64/boot/dts/qcom/qcom-ipq6018-wallys-dr6018.dts" -#include "ipq6018.dtsi" +#include "qcom-ipq6018.dtsi" diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-yuncore-ax840.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-yuncore-ax840.dts index 7442c79b8..a84bcf361 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-yuncore-ax840.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq6018-yuncore-ax840.dts @@ -15,4 +15,4 @@ */ #include "../../../arm64/boot/dts/qcom/qcom-ipq6018-yuncore-ax840.dts" -#include "ipq6018.dtsi" +#include "qcom-ipq6018.dtsi" diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-eap102.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-eap102.dts index f0adf8451..d1d3f3755 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-eap102.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-eap102.dts @@ -14,4 +14,13 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../../../arm64/boot/dts/qcom/qcom-ipq807x-eap102.dts" -#include "ipq8074.dtsi" + +/ { + soc { + pmu { + compatible = "arm,cortex-a7-pmu"; + interrupts = ; + }; + }; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-eap106.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-eap106.dts index 119cdcbbe..1527f81a4 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-eap106.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-eap106.dts @@ -14,4 +14,13 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../../../arm64/boot/dts/qcom/qcom-ipq807x-eap106.dts" -#include "ipq8074.dtsi" + +/ { + soc { + pmu { + compatible = "arm,cortex-a7-pmu"; + interrupts = ; + }; + }; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-ex227.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-ex227.dts index 76a3f8a94..f3b25e263 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-ex227.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-ex227.dts @@ -14,4 +14,13 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../../../arm64/boot/dts/qcom/qcom-ipq807x-ex227.dts" -#include "ipq8074.dtsi" + +/ { + soc { + pmu { + compatible = "arm,cortex-a7-pmu"; + interrupts = ; + }; + }; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-ex447.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-ex447.dts index fcf9ffb22..7cfd66ba0 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-ex447.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-ex447.dts @@ -14,4 +14,13 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../../../arm64/boot/dts/qcom/qcom-ipq807x-ex447.dts" -#include "ipq8074.dtsi" + +/ { + soc { + pmu { + compatible = "arm,cortex-a7-pmu"; + interrupts = ; + }; + }; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf194c.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf194c.dts index 5ed5e76cb..4274b6c70 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf194c.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf194c.dts @@ -14,4 +14,13 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../../../arm64/boot/dts/qcom/qcom-ipq807x-wf194c.dts" -#include "ipq8074.dtsi" + +/ { + soc { + pmu { + compatible = "arm,cortex-a7-pmu"; + interrupts = ; + }; + }; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf194c4.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf194c4.dts index 8deabfa19..372f29df3 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf194c4.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf194c4.dts @@ -14,4 +14,13 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../../../arm64/boot/dts/qcom/qcom-ipq807x-wf194c4.dts" -#include "ipq8074.dtsi" + +/ { + soc { + pmu { + compatible = "arm,cortex-a7-pmu"; + interrupts = ; + }; + }; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf196.dts b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf196.dts index 3c95097b6..f8d31a052 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf196.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq807x-wf196.dts @@ -14,4 +14,13 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "../../../arm64/boot/dts/qcom/qcom-ipq807x-wf196.dts" -#include "ipq8074.dtsi" + +/ { + soc { + pmu { + compatible = "arm,cortex-a7-pmu"; + interrupts = ; + }; + }; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-eap104.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-eap104.dts index d581a8897..cde789f51 100755 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-eap104.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-eap104.dts @@ -1,7 +1,5 @@ /dts-v1/; -/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. - * - * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -16,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq5018.dtsi" +#include "qcom-ipq5018.dtsi" / { #address-cells = <0x2>; @@ -40,7 +38,11 @@ chosen { bootargs = "console=ttyMSM0,115200,n8 rw init=/init"; + #ifdef __IPQ_MEM_PROFILE_256_MB__ + bootargs-append = " swiotlb=1"; + #else bootargs-append = " swiotlb=1 coherent_pool=2M"; + #endif stdout-path = "serial0"; }; @@ -78,22 +80,22 @@ * | QDSS | 0x4D200000 | 1MB | * +----------+--------------+-------------------------+ * | QCN6122_1| | | - * | data | 0x4D300000 | 13MB | + * | data | 0x4D300000 | 15MB | * +----------+--------------+-------------------------+ * | QCN6122_1| | | - * | M3 Dump | 0x4E000000 | 1MB | + * | M3 Dump | 0x4E200000 | 1MB | * +----------+--------------+-------------------------+ * | QCN6122_1| | | - * | QDSS | 0x4E100000 | 1MB | + * | QDSS | 0x4E300000 | 1MB | * +----------+--------------+-------------------------+ * | QCN6122_2| | | - * | data | 0x4E200000 | 13MB | + * | data | 0x4E400000 | 15MB | * +----------+--------------+-------------------------+ * | QCN6122_2| | | - * | M3 Dump | 0x4EF00000 | 1MB | + * | M3 Dump | 0x4F300000 | 1MB | * +----------+--------------+-------------------------+ * | QCN6122_2| | | - * | QDSS | 0x4F000000 | 1MB | + * | QDSS | 0x4F400000 | 1MB | * +----------+--------------+-------------------------+ * | | * | Rest of the memory for Linux | @@ -102,7 +104,7 @@ */ q6_mem_regions: q6_mem_regions@4B000000 { no-map; - reg = <0x0 0x4B000000 0x0 0x4100000>; + reg = <0x0 0x4B000000 0x0 0x4500000>; }; q6_code_data: q6_code_data@4B000000 { @@ -127,32 +129,32 @@ q6_qcn6122_data1: q6_qcn6122_data1@4D300000 { no-map; - reg = <0x0 0x4D300000 0x0 0xD00000>; + reg = <0x0 0x4D300000 0x0 0xF00000>; }; - m3_dump_qcn6122_1: m3_dump_qcn6122_1@4E000000 { + m3_dump_qcn6122_1: m3_dump_qcn6122_1@4E200000 { no-map; - reg = <0x0 0x4E000000 0x0 0x100000>; + reg = <0x0 0x4E200000 0x0 0x100000>; }; - q6_qcn6122_etr_1: q6_qcn6122_etr_1@4E100000 { + q6_qcn6122_etr_1: q6_qcn6122_etr_1@4E300000 { no-map; - reg = <0x0 0x4E100000 0x0 0x100000>; + reg = <0x0 0x4E300000 0x0 0x100000>; }; - q6_qcn6122_data2: q6_qcn6122_data2@4E200000 { + q6_qcn6122_data2: q6_qcn6122_data2@4E400000 { no-map; - reg = <0x0 0x4E200000 0x0 0xD00000>; + reg = <0x0 0x4E400000 0x0 0xF00000>; }; - m3_dump_qcn6122_2: m3_dump_qcn6122_2@4EF00000 { + m3_dump_qcn6122_2: m3_dump_qcn6122_2@4F300000 { no-map; - reg = <0x0 0x4EF00000 0x0 0x100000>; + reg = <0x0 0x4F300000 0x0 0x100000>; }; - q6_qcn6122_etr_2: q6_qcn6122_etr_2@4F000000 { + q6_qcn6122_etr_2: q6_qcn6122_etr_2@4F400000 { no-map; - reg = <0x0 0x4F000000 0x0 0x100000>; + reg = <0x0 0x4F400000 0x0 0x100000>; }; #else /* 512MB/1GB Profiles @@ -299,6 +301,7 @@ blsp1_uart2: serial@78b0000 { pinctrl-0 = <&blsp1_uart_pins>; pinctrl-names = "default"; + status = "ok"; }; qpic_bam: dma@7984000{ @@ -341,6 +344,7 @@ pinctrl-0 = <&mdio1_pins>; pinctrl-names = "default"; phy-reset-gpio = <&tlmm 39 0>; + ethernet-phy@0 { reg = <0>; }; @@ -376,6 +380,8 @@ forced-duplex = <1>; }; }; + + /* led_source@0 { source = <0>; mode = "normal"; @@ -383,13 +389,15 @@ blink_en = "enable"; active = "high"; }; + */ + }; ess-switch1@1 { compatible = "qcom,ess-switch-qca83xx"; device_id = <1>; switch_access_mode = "mdio"; mdio-bus = <&mdio1>; - reset_gpio = <&tlmm 0x27 0>; + reset_gpio = <0x27>; switch_cpu_bmp = <0x40>; /* cpu port bitmap */ switch_lan_bmp = <0x1e>; /* lan port bitmap */ switch_wan_bmp = <0x0>; /* wan port bitmap */ @@ -424,8 +432,8 @@ }; }; - ess-uniphy@98000 { - status = "disabled"; + wifi0: wifi@c000000 { + status = "ok"; }; dp1 { @@ -442,7 +450,6 @@ mdio-bus = <&mdio0>; local-mac-address = [000000000000]; phy-mode = "sgmii"; - qcom,rx-page-mode = <0>; }; dp2 { @@ -456,13 +463,14 @@ qcom,mactype = <2>; local-mac-address = [000000000000]; phy-mode = "sgmii"; - qcom,rx-page-mode = <0>; }; - nss-macsec1 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x1c>; - mdiobus = <&mdio1>; + qcom,test@0 { + status = "ok"; + }; + + lpass: lpass@0xA000000{ + status = "disabled"; }; pcm: pcm@0xA3C0000{ @@ -471,151 +479,72 @@ status = "disabled"; }; - leds { - compatible = "gpio-leds"; - pinctrl-0 = <&leds_pins>; - pinctrl-names = "default"; - - led@2 { - label = "green:wifi5"; - gpios = <&tlmm 2 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - - led@3 { - label = "green:wifi2"; - gpios = <&tlmm 3 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - - led_power: led@30 { - label = "green:power"; - gpios = <&tlmm 30 GPIO_ACTIVE_HIGH>; - default-state = "on"; - }; - - led@42 { - label = "orange:uplink"; - gpios = <&tlmm 42 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - - led@43 { - label = "yellow:uplink"; - gpios = <&tlmm 43 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - - led@46 { - label = "green:cloud"; - gpios = <&tlmm 46 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; + pcm_lb: pcm_lb@0 { + status = "disabled"; }; }; - qcom,test@0 { - status = "ok"; - }; - thermal-zones { status = "ok"; }; + + gpio_keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + wps { + label = "reset"; + linux,code = ; + gpios = <&tlmm 32 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + debounce-interval = <60>; + }; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-0 = <&leds_pins>; + pinctrl-names = "default"; + + led@2 { + label = "green:wifi5"; + gpios = <&tlmm 2 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + led@3 { + label = "green:wifi2"; + gpios = <&tlmm 3 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + led_power: led@30 { + label = "green:power"; + gpios = <&tlmm 30 GPIO_ACTIVE_HIGH>; + default-state = "on"; + }; + led@42 { + label = "orange:uplink"; + gpios = <&tlmm 42 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + led@43 { + label = "yellow:uplink"; + gpios = <&tlmm 43 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + led@46 { + label = "green:cloud"; + gpios = <&tlmm 46 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; }; &tlmm { - pinctrl-0 = <&blsp0_uart_pins>; + /* pinctrl-0 = <&blsp0_uart_pins &phy_led_pins>; */ + pinctrl-0 = <&blsp0_uart_pins &phy_led_pins &ble_pins>; pinctrl-names = "default"; - blsp0_uart_pins: uart_pins { - blsp0_uart_rx_tx { - pins = "gpio20", "gpio21"; - function = "blsp0_uart0"; - bias-disable; - }; - }; - - blsp1_uart_pins: blsp1_uart_pins { - blsp1_uart_rx_tx { - pins = "gpio23", "gpio25", "gpio24", "gpio26"; - function = "blsp1_uart2"; - bias-disable; - }; - }; - - blsp0_spi_pins: blsp0_spi_pins { - mux { - pins = "gpio10", "gpio11", "gpio12", "gpio13"; - function = "blsp0_spi"; - drive-strength = <2>; - bias-disable; - }; - }; - - qspi_nand_pins: qspi_nand_pins { - qspi_clock { - pins = "gpio9"; - function = "qspi_clk"; - drive-strength = <8>; - bias-disable; - }; - - qspi_cs { - pins = "gpio8"; - function = "qspi_cs"; - drive-strength = <8>; - bias-disable; - }; - - qspi_data { - pins = "gpio4", "gpio5", "gpio6", "gpio7"; - function = "qspi_data"; - drive-strength = <8>; - bias-disable; - }; - }; - - mdio1_pins: mdio_pinmux { - mux_0 { - pins = "gpio36"; - function = "mdc"; - drive-strength = <8>; - bias-pull-up; - }; - - mux_1 { - pins = "gpio37"; - function = "mdio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - - i2c_pins: i2c_pins { - i2c_scl { - pins = "gpio25"; - function = "blsp2_i2c1"; - drive-strength = <8>; - bias-disable; - }; - - i2c_sda { - pins = "gpio26"; - function = "blsp2_i2c1"; - drive-strength = <8>; - bias-disable; - }; - }; - - button_pins: button_pins { - wps_button { - pins = "gpio38"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - leds_pins: leds_pins { led_5g { pins = "gpio2"; @@ -654,6 +583,131 @@ bias-pull-down; }; }; + blsp0_uart_pins: uart_pins { + blsp0_uart_rx_tx { + pins = "gpio20", "gpio21"; + function = "blsp0_uart0"; + bias-disable; + }; + }; + + blsp1_uart_pins: blsp1_uart_pins { + blsp1_uart_rx_tx { + pins = "gpio23", "gpio25", "gpio24", "gpio26"; + function = "blsp1_uart2"; + bias-disable; + }; + }; + + blsp0_spi_pins: blsp0_spi_pins { + mux { + pins = "gpio10", "gpio11", "gpio12", "gpio13"; + function = "blsp0_spi"; + drive-strength = <2>; + bias-disable; + }; + }; + + qspi_nand_pins: qspi_nand_pins { + qspi_clock { + pins = "gpio9"; + function = "qspi_clk"; + drive-strength = <8>; + bias-disable; + }; + qspi_cs { + pins = "gpio8"; + function = "qspi_cs"; + drive-strength = <8>; + bias-disable; + }; + qspi_data_0 { + pins = "gpio7"; + function = "qspi0"; + drive-strength = <8>; + bias-disable; + }; + qspi_data_1 { + pins = "gpio6"; + function = "qspi1"; + drive-strength = <8>; + bias-disable; + }; + qspi_data_2 { + pins = "gpio5"; + function = "qspi2"; + drive-strength = <8>; + bias-disable; + }; + qspi_data_3 { + pins = "gpio4"; + function = "qspi3"; + drive-strength = <8>; + bias-disable; + }; + }; + + mdio1_pins: mdio_pinmux { + mux_0 { + pins = "gpio36"; + function = "mdc"; + drive-strength = <8>; + bias-pull-up; + }; + + mux_1 { + pins = "gpio37"; + function = "mdio"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + phy_led_pins: phy_led_pins { + gephy_led_pin { + pins = "gpio46"; + /* function = "led0"; */ + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + ble_pins: ble_pins { + ble_coex_grant { + pins = "gpio19"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + i2c_pins: i2c_pins { + i2c_scl { + pins = "gpio33"; + function = "blsp2_i2c0"; + drive-strength = <8>; + /* bias-disable; */ + bias-pull-up; + }; + + i2c_sda { + pins = "gpio34"; + function = "blsp2_i2c0"; + drive-strength = <8>; + /* bias-disable; */ + bias-pull-up; + }; + }; + + button_pins: button_pins { + wps_button { + pins = "gpio38"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + }; audio_pins: audio_pinmux { mux_1 { @@ -705,8 +759,10 @@ bias-pull-down; }; }; + }; +/* Disable gpio 38 and 24 &soc { gpio_keys { compatible = "gpio-keys"; @@ -714,174 +770,190 @@ pinctrl-names = "default"; button@1 { - label = "reset"; - linux,code = ; - gpios = <&tlmm 32 GPIO_ACTIVE_LOW>; + label = "wps"; + linux,code = ; + gpios = <&tlmm 38 GPIO_ACTIVE_LOW>; linux,input-type = <1>; debounce-interval = <60>; }; }; }; -&q6v5_wcss { - compatible = "qcom,ipq5018-q6-mpd"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - firmware = "IPQ5018/q6_fw.mdt"; - reg = <0x0cd00000 0x4040>, - <0x1938000 0x8>, - <0x193d204 0x4>; - reg-names = "qdsp6", - "tcsr-msip", - "tcsr-q6"; - resets = <&gcc GCC_WCSSAON_RESET>, - <&gcc GCC_WCSS_Q6_BCR>; +&usb3 { + status = "ok"; + device-power-gpio = <&tlmm 24 1>; +}; - reset-names = "wcss_aon_reset", - "wcss_q6_reset"; +*/ - clocks = <&gcc GCC_Q6_AXIS_CLK>, - <&gcc GCC_WCSS_ECAHB_CLK>, - <&gcc GCC_Q6_AXIM_CLK>, - <&gcc GCC_Q6_AXIM2_CLK>, - <&gcc GCC_Q6_AHB_CLK>, - <&gcc GCC_Q6_AHB_S_CLK>, - <&gcc GCC_WCSS_AXI_S_CLK>; - clock-names = "gcc_q6_axis_clk", - "gcc_wcss_ecahb_clk", - "gcc_q6_axim_clk", - "gcc_q6_axim2_clk", - "gcc_q6_ahb_clk", - "gcc_q6_ahb_s_clk", - "gcc_wcss_axi_s_clk"; +&eud { + status = "ok"; +}; - #ifdef __IPQ_MEM_PROFILE_256_MB__ - memory-region = <&q6_mem_regions>, <&q6_etr_region>; - #else - memory-region = <&q6_mem_regions>, <&q6_etr_region>, - <&q6_caldb_region>; - #endif +&pcie_x1 { + status = "disabled"; + perst-gpio = <&tlmm 18 1>; +}; - qcom,rproc = <&q6v5_wcss>; +&pcie_x2 { + status = "disabled"; + perst-gpio = <&tlmm 15 1>; +}; + +&dwc_0 { + /delete-property/ #phy-cells; + /delete-property/ phys; + /delete-property/ phy-names; +}; + +&hs_m31phy_0 { + status = "ok"; +}; + +&pcie_x1phy { + status = "disabled"; +}; + +&pcie_x2phy { + status = "disabled"; +}; + +&pcie_x1_rp { + status = "disabled"; + + mhi_0: qcom,mhi@0 { + reg = <0 0 0 0 0 >; + }; +}; + +&pcie_x2_rp { + status = "disabled"; + + mhi_1: qcom,mhi@1 { + reg = <0 0 0 0 0 >; + + }; +}; + +&qfprom { + status = "ok"; +}; + +&tsens { + status = "ok"; +}; + +&qcom_q6v5_wcss { + qcom,multipd_arch; + memory-region = <&q6_mem_regions>; + qcom,share_bootargs; qcom,bootargs_smem = <507>; boot-args = <0x1 0x4 0x3 0x0F 0x0 0x0>, <0x2 0x4 0x2 0x12 0x0 0x0>; - status = "ok"; - q6_wcss_pd1: remoteproc_pd1@4ab000 { - compatible = "qcom,ipq5018-wcss-ahb-mpd"; - reg = <0x4ab000 0x20>; - reg-names = "rmb"; - firmware = "IPQ5018/q6_fw.mdt"; + /* IPQ5018 */ + q6v5_wcss_userpd1 { m3_firmware = "IPQ5018/m3_fw.mdt"; interrupts-extended = <&wcss_smp2p_in 8 0>, - <&wcss_smp2p_in 9 0>, - <&wcss_smp2p_in 12 0>, - <&wcss_smp2p_in 11 0>; - interrupt-names = "fatal", - "ready", - "spawn-ack", - "stop-ack"; - - resets = <&gcc GCC_WCSSAON_RESET>, - <&gcc GCC_WCSS_BCR>, - <&gcc GCC_CE_BCR>; - reset-names = "wcss_aon_reset", - "wcss_reset", - "ce_reset"; - - clocks = <&gcc GCC_WCSS_AHB_S_CLK>, - <&gcc GCC_WCSS_ACMT_CLK>, - <&gcc GCC_WCSS_AXI_M_CLK>; - clock-names = "gcc_wcss_ahb_s_clk", - "gcc_wcss_acmt_clk", - "gcc_wcss_axi_m_clk"; - - qcom,halt-regs = <&tcsr_q6_block 0xa000 0xd000 0x0>; - + <&wcss_smp2p_in 9 0>, + <&wcss_smp2p_in 12 0>, + <&wcss_smp2p_in 11 0>; + interrupt-names ="fatal", + "ready", + "spawn_ack", + "stop-ack"; qcom,smem-states = <&wcss_smp2p_out 8>, - <&wcss_smp2p_out 9>, - <&wcss_smp2p_out 10>; + <&wcss_smp2p_out 9>, + <&wcss_smp2p_out 10>; qcom,smem-state-names = "shutdown", - "stop", - "spawn"; - #ifdef __IPQ_MEM_PROFILE_256_MB__ + "stop", + "spawn"; + qca,asid = <1>; + qca,auto-restart; + qca,int_radio; + #ifdef __IPQ_MEM_PROFILE_256_MB__ memory-region = <&q6_ipq5018_data>, <&m3_dump>, <&q6_etr_region>; - #else + #else memory-region = <&q6_ipq5018_data>, <&m3_dump>, <&q6_etr_region>, <&q6_caldb_region>; - #endif - + #endif }; - q6_wcss_pd2: remoteproc_pd2 { - compatible = "qcom,ipq5018-wcss-pcie-mpd"; - firmware = "IPQ5018/q6_fw.mdt"; + /* QCN6122 6G */ + q6v5_wcss_userpd2 { m3_firmware = "qcn6122/m3_fw.mdt"; interrupts-extended = <&wcss_smp2p_in 16 0>, - <&wcss_smp2p_in 17 0>, - <&wcss_smp2p_in 20 0>, - <&wcss_smp2p_in 19 0>; - interrupt-names = "fatal", - "ready", - "spawn-ack", - "stop-ack"; - + <&wcss_smp2p_in 17 0>, + <&wcss_smp2p_in 20 0>, + <&wcss_smp2p_in 19 0>; + interrupt-names ="fatal", + "ready", + "spawn_ack", + "stop-ack"; qcom,smem-states = <&wcss_smp2p_out 16>, - <&wcss_smp2p_out 17>, - <&wcss_smp2p_out 18>; + <&wcss_smp2p_out 17>, + <&wcss_smp2p_out 18>; qcom,smem-state-names = "shutdown", - "stop", - "spawn"; - #ifdef __IPQ_MEM_PROFILE_256_MB__ + "stop", + "spawn"; + qca,asid = <2>; + qca,auto-restart; + #ifdef __IPQ_MEM_PROFILE_256_MB__ memory-region = <&q6_qcn6122_data1>, <&m3_dump_qcn6122_1>, <&q6_qcn6122_etr_1>; - #else + #else memory-region = <&q6_qcn6122_data1>, <&m3_dump_qcn6122_1>, <&q6_qcn6122_etr_1>, <&q6_qcn6122_caldb_1>; - #endif - + #endif }; - q6_wcss_pd3: remoteproc_pd3 { - compatible = "qcom,ipq5018-wcss-pcie-mpd"; - firmware = "IPQ5018/q6_fw.mdt"; + /* QCN6122 5G */ + q6v5_wcss_userpd3 { + m3_firmware = "qcn6122/m3_fw.mdt"; interrupts-extended = <&wcss_smp2p_in 24 0>, - <&wcss_smp2p_in 25 0>, - <&wcss_smp2p_in 28 0>, - <&wcss_smp2p_in 27 0>; - interrupt-names = "fatal", - "ready", - "spawn-ack", - "stop-ack"; - + <&wcss_smp2p_in 25 0>, + <&wcss_smp2p_in 28 0>, + <&wcss_smp2p_in 27 0>; + interrupt-names ="fatal", + "ready", + "spawn_ack", + "stop-ack"; qcom,smem-states = <&wcss_smp2p_out 24>, - <&wcss_smp2p_out 25>, - <&wcss_smp2p_out 26>; + <&wcss_smp2p_out 25>, + <&wcss_smp2p_out 26>; qcom,smem-state-names = "shutdown", - "stop", - "spawn"; - #ifdef __IPQ_MEM_PROFILE_256_MB__ + "stop", + "spawn"; + qca,asid = <3>; + qca,auto-restart; + #ifdef __IPQ_MEM_PROFILE_256_MB__ memory-region = <&q6_qcn6122_data2>, <&m3_dump_qcn6122_2>, <&q6_qcn6122_etr_2>; - #else + #else memory-region = <&q6_qcn6122_data2>, <&m3_dump_qcn6122_2>, <&q6_qcn6122_etr_2>, <&q6_qcn6122_caldb_2>; - #endif + #endif }; }; &i2c_0 { pinctrl-0 = <&i2c_pins>; pinctrl-names = "default"; - status = "disabled"; + /* status = "disabled"; */ + status = "ok"; +}; + +&qgic_msi_0 { + status = "ok"; +}; + +&qgic_msi_1 { + status = "ok"; }; &wifi0 { /* IPQ5018 */ qcom,multipd_arch; - qcom,rproc = <&q6_wcss_pd1>; qcom,userpd-subsys-name = "q6v5_wcss_userpd1"; #ifdef __IPQ_MEM_PROFILE_256_MB__ qcom,tgt-mem-mode = <2>; @@ -897,7 +969,6 @@ m3-dump-addr = <0x4D200000>; #endif qcom,caldb-size = <0x200000>; - mem-region = <&q6_ipq5018_data>; status = "ok"; }; @@ -905,13 +976,12 @@ /* QCN6122 5G */ qcom,multipd_arch; qcom,userpd-subsys-name = "q6v5_wcss_userpd2"; - qcom,rproc = <&q6_wcss_pd2>; #ifdef __IPQ_MEM_PROFILE_256_MB__ qcom,tgt-mem-mode = <2>; #else qcom,tgt-mem-mode = <1>; #endif - qcom,board_id = <0x60>; + qcom,board_id = <0x50>; qcom,bdf-addr = <0x4D600000 0x4D600000 0x4D300000 0x0 0x0>; #ifdef __CNSS2__ qcom,caldb-addr = <0x4E800000 0x4E800000 0 0 0>; @@ -920,7 +990,6 @@ m3-dump-addr = <0x4E600000>; #endif qcom,caldb-size = <0x500000>; - mem-region = <&q6_qcn6122_data1>; status = "disabled"; }; @@ -928,14 +997,13 @@ /* QCN6122 6G */ qcom,multipd_arch; qcom,userpd-subsys-name = "q6v5_wcss_userpd3"; - qcom,rproc = <&q6_wcss_pd3>; #ifdef __IPQ_MEM_PROFILE_256_MB__ qcom,tgt-mem-mode = <2>; #else qcom,tgt-mem-mode = <1>; #endif qcom,board_id = <0xb0>; - qcom,bdf-addr = <0x4ED00000 0x4ED00000 0x4E200000 0x0 0x0>; + qcom,bdf-addr = <0x4ED00000 0x4ED00000 0x4E400000 0x0 0x0>; #ifdef __CNSS2__ qcom,caldb-addr = <0x4FF00000 0x4FF00000 0 0 0>; #else @@ -943,60 +1011,5 @@ m3-dump-addr = <0x4FD00000>; #endif qcom,caldb-size = <0x500000>; - mem-region = <&q6_qcn6122_data2>; status = "ok"; }; - -&usb3 { - status = "ok"; - device-power-gpio = <&tlmm 24 1>; -}; - -&dwc_0 { - /delete-property/ #phy-cells; - /delete-property/ phys; - /delete-property/ phy-names; -}; - -&hs_m31phy_0 { - status = "ok"; -}; - -&eud { - status = "ok"; -}; - -&pcie_x1 { - status = "disabled"; - perst-gpio = <&tlmm 18 GPIO_ACTIVE_LOW>; -}; - -&pcie_x2 { - status = "disabled"; - perst-gpio = <&tlmm 15 GPIO_ACTIVE_LOW>; -}; - -&pcie_x1phy { - status = "disabled"; -}; - -&pcie_x2phy { - status = "disabled"; -}; - -&pcie_x1_rp { - status = "disabled"; - - mhi_0: qcom,mhi@0 { - reg = <0 0 0 0 0 >; - }; -}; - -&pcie_x2_rp { - status = "disabled"; - - mhi_1: qcom,mhi@1 { - reg = <0 0 0 0 0 >; - - }; -}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-eww622-a1.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-eww622-a1.dts index a7e238f48..c029130da 100755 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-eww622-a1.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-eww622-a1.dts @@ -1,7 +1,5 @@ /dts-v1/; -/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. - * - * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. +/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -16,14 +14,17 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq5018.dtsi" +#include "qcom-ipq5018.dtsi" / { #address-cells = <0x2>; #size-cells = <0x2>; model = "Qualcomm Technologies, Inc. IPQ5018/AP-MP03.1"; - compatible = "qcom,ipq5018-ap-mp03.1", "qcom,ipq5018-mp03.1", "qcom,ipq5018"; + compatible = "qcom,ipq5018-mp03.1", "qcom,ipq5018"; interrupt-parent = <&intc>; + #ifdef __IPQ_MEM_PROFILE_256_MB__ + AUTO_MOUNT; + #endif aliases { sdhc1 = &sdhc_1; /* SDC1 eMMC slot */ @@ -31,11 +32,19 @@ serial1 = &blsp1_uart2; ethernet0 = "/soc/dp1"; ethernet1 = "/soc/dp2"; + led-boot = &led_sys; + led-failsafe = &led_sys; + led-running = &led_sys; + led-upgrade = &led_sys; }; chosen { bootargs = "console=ttyMSM0,115200,n8 rw init=/init"; + #ifdef __IPQ_MEM_PROFILE_256_MB__ + bootargs-append = " swiotlb=1"; + #else bootargs-append = " swiotlb=1 coherent_pool=2M"; + #endif stdout-path = "serial0"; }; @@ -92,7 +101,7 @@ * | | | | * +--------+--------------+-------------------------+ * | | | | - * | MHI1 | 0x4DA00000 | 9MB | + * | MHI1 | 0x4DA00000 | 16MB | * | | | | * +--------+--------------+-------------------------+ * | | @@ -100,7 +109,7 @@ * | | * +=================================================+ */ - q6_region: memory@4b000000 { + q6_region: wcnss@4b000000 { no-map; reg = <0x0 0x4b000000 0x0 0x01700000>; }; @@ -115,18 +124,16 @@ reg = <0x0 0x4c800000 0x0 0x100000>; }; - qcn9000_pcie0: qcn9000_pcie0@4c900000 { + qcn9000_pcie0@4c900000 { no-map; reg = <0x0 0x4C900000 0x0 0x01100000>; }; - #if defined(__CNSS2__) mhi_region1: dma_pool1@4da00000 { compatible = "shared-dma-pool"; no-map; - reg = <0x0 0x4DA00000 0x0 0x00900000>; + reg = <0x0 0x4da00000 0x0 0x01000000>; }; - #endif #elif __IPQ_MEM_PROFILE_512_MB__ /* 512 MB Profile * +=========+==============+========================+ @@ -176,11 +183,11 @@ * | caldb | 0x4CA00000 | 2MB | * +--------+--------------+-------------------------+ * | | | | - * |QCN9000 | 0x4CC00000 | 38MB | + * |QCN9000 | 0x4CC00000 | 30MB | * | | | | * +--------+--------------+-------------------------+ * | | | | - * | MHI1 | 0x4F200000 | 9MB | + * | MHI1 | 0x4EA00000 | 16MB | * | | | | * +--------+--------------+-------------------------+ * | | @@ -188,7 +195,7 @@ * | | * +=================================================+ */ - q6_region: memory@4b000000 { + q6_region: wcnss@4b000000 { no-map; reg = <0x0 0x4b000000 0x0 0x01800000>; }; @@ -208,18 +215,16 @@ reg = <0x0 0x4ca00000 0x0 0x200000>; }; - qcn9000_pcie0: qcn9000_pcie0@4cc00000 { + qcn9000_pcie0@4cc00000 { no-map; - reg = <0x0 0x4CC00000 0x0 0x02600000>; + reg = <0x0 0x4CC00000 0x0 0x01E00000>; }; - #if defined(__CNSS2__) - mhi_region1: dma_pool1@4f200000 { + mhi_region1: dma_pool1@4ea00000 { compatible = "shared-dma-pool"; no-map; - reg = <0x0 0x4f200000 0x0 0x00900000>; + reg = <0x0 0x4ea00000 0x0 0x01000000>; }; - #endif #else /* 1G Profile * +=========+==============+========================+ @@ -269,11 +274,11 @@ * | caldb | 0x4CA00000 | 2MB | * +--------+--------------+-------------------------+ * | | | | - * |QCN9000 | 0x4CC00000 | 53MB | + * |QCN9000 | 0x4CC00000 | 45MB | * | | | | * +--------+--------------+-------------------------+ * | | | | - * | MHI1 | 0x50100000 | 9MB | + * | MHI1 | 0x4F900000 | 24MB | * | | | | * +--------+--------------+-------------------------+ * | | @@ -281,7 +286,7 @@ * | | * +=================================================+ */ - q6_region: memory@4b000000 { + q6_region: wcnss@4b000000 { no-map; reg = <0x0 0x4b000000 0x0 0x01800000>; }; @@ -301,19 +306,17 @@ reg = <0x0 0x4ca00000 0x0 0x200000>; }; - qcn9000_pcie0: qcn9000_pcie0@4cc00000 { + qcn9000_pcie0@4cc00000 { no-map; - reg = <0x0 0x4CC00000 0x0 0x03500000>; + reg = <0x0 0x4CC00000 0x0 0x02D00000>; }; - #if defined(__CNSS2__) - mhi_region1: dma_pool1@50100000 { + mhi_region1: dma_pool1@4F900000 { compatible = "shared-dma-pool"; no-map; - reg = <0x0 0x50100000 0x0 0x00900000>; + reg = <0x0 0x4F900000 0x0 0x01800000>; }; #endif - #endif }; soc { @@ -425,7 +428,7 @@ device_id = <1>; switch_access_mode = "mdio"; mdio-bus = <&mdio1>; - reset_gpio = <&tlmm 0x27 0>; + reset_gpio = <0x27>; switch_cpu_bmp = <0x40>; /* cpu port bitmap */ switch_lan_bmp = <0x3c>; /* lan port bitmap */ switch_wan_bmp = <0x0>; /* wan port bitmap */ @@ -464,6 +467,15 @@ }; }; + wifi0: wifi@c000000 { + qcom,bdf-addr = <0x4BA00000 0x4BA00000 0x4BA00000 + 0x0 0x0>; + qcom,caldb-addr = <0x4CA00000 0x4CA00000 0x4CA00000 + 0x0 0x0>; + qcom,caldb-size = <0x200000>; + status = "ok"; + }; + ess-uniphy@98000 { status = "disabled"; }; @@ -472,6 +484,10 @@ status = "ok"; }; + qcom,usbbam@8B04000 { + status = "ok"; + }; + qcom,diag@0 { status = "ok"; }; @@ -490,7 +506,6 @@ mdio-bus = <&mdio0>; local-mac-address = [000000000000]; phy-mode = "sgmii"; - qcom,rx-page-mode = <0>; }; dp2 { @@ -504,74 +519,16 @@ qcom,mactype = <2>; local-mac-address = [000000000000]; phy-mode = "sgmii"; - qcom,rx-page-mode = <0>; }; rpm_etm0 { status = "disabled"; }; - - pcm: pcm@0xA3C0000{ - pinctrl-0 = <&audio_pins>; - pinctrl-names = "default"; - status = "disabled"; - }; - - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; - pinctrl-names = "default"; - - button@1 { - label = "wps"; - linux,code = ; - gpios = <&tlmm 27 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - - button@2 { - label = "reset"; - linux,code = ; - gpios = <&tlmm 28 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - }; - - gpio_leds { - compatible = "gpio-leds"; - pinctrl-0 = <&leds_pins>; - pinctrl-names = "default"; - - led_sys: led@1 { - label = "sys:blue"; - gpios = <&tlmm 1 GPIO_ACTIVE_HIGH>; /* GPIO_1 */ - default-state="on"; - }; - - led@35 { - label = "sys:green"; - gpios = <&tlmm 35 GPIO_ACTIVE_HIGH>; /* GPIO_35 */ - default-state="off"; - }; - - led@31 { - label = "sys:red"; - gpios = <&tlmm 31 GPIO_ACTIVE_HIGH>; /* GPIO_31 */ - default-state="off"; - }; - }; - }; - - qcom,test@0 { - status = "ok"; }; thermal-zones { status = "ok"; }; - }; &tlmm { @@ -610,17 +567,33 @@ drive-strength = <8>; bias-disable; }; - qspi_cs { pins = "gpio8"; function = "qspi_cs"; drive-strength = <8>; bias-disable; }; - - qspi_data { - pins = "gpio4", "gpio5", "gpio6", "gpio7"; - function = "qspi_data"; + qspi_data_0 { + pins = "gpio7"; + function = "qspi0"; + drive-strength = <8>; + bias-disable; + }; + qspi_data_1 { + pins = "gpio6"; + function = "qspi1"; + drive-strength = <8>; + bias-disable; + }; + qspi_data_2 { + pins = "gpio5"; + function = "qspi2"; + drive-strength = <8>; + bias-disable; + }; + qspi_data_3 { + pins = "gpio4"; + function = "qspi3"; drive-strength = <8>; bias-disable; }; @@ -643,12 +616,18 @@ }; phy_led_pins: phy_led_pins { - gephy_led_pin { - pins = "gpio46"; - function = "led0"; - drive-strength = <8>; - bias-pull-down; - }; + gephy_led_pin_1g { + pins = "gpio30"; + function = "led2"; + drive-strength = <8>; + bias-pull-down; + }; + gephy_led_pin_100 { + pins = "gpio46"; + function = "led0"; + drive-strength = <8>; + bias-pull-down; + }; }; i2c_pins: i2c_pins { @@ -668,95 +647,87 @@ }; button_pins: button_pins { - wps_button { - pins = "gpio27"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - - reset_button { - pins = "gpio28"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - - leds_pins: leds_pinmux { - sys_blue { - pins = "gpio1"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - sys_green { - pins = "gpio35"; - function = "gpio"; - drive-strength = <8>; - bias-disable; - }; - sys_red { - pins = "gpio31"; - function = "gpio"; - drive-strength = <8>; - bias-disable; - }; - }; - - audio_pins: audio_pinmux { - mux_1 { - pins = "gpio24"; - function = "audio_rxbclk"; - drive-strength = <8>; - bias-pull-down; - }; - - mux_2 { - pins = "gpio25"; - function = "audio_rxfsync"; - drive-strength = <8>; - bias-pull-down; - }; - - mux_3 { - pins = "gpio26"; - function = "audio_rxd"; - drive-strength = <8>; - bias-pull-down; - }; - - mux_4 { + wps_button { pins = "gpio27"; - function = "audio_txmclk"; + function = "gpio"; drive-strength = <8>; - bias-pull-down; + bias-pull-up; }; - - mux_5 { + + reset_button { pins = "gpio28"; - function = "audio_txbclk"; + function = "gpio"; drive-strength = <8>; - bias-pull-down; - }; - - mux_6 { - pins = "gpio29"; - function = "audio_txfsync"; - drive-strength = <8>; - bias-pull-down; - }; - - mux_7 { - pins = "gpio30"; - function = "audio_txd"; - drive-strength = <8>; - bias-pull-down; + bias-pull-up; }; }; + + leds_pins: leds_pinmux { + sys_blue { + pins = "gpio1"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + sys_green { + pins = "gpio35"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + sys_red { + pins = "gpio31"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + }; + }; + }; &soc { + gpio_keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + button@1 { + label = "wps"; + linux,code = ; + gpios = <&tlmm 27 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + debounce-interval = <60>; + }; + + button@2 { + label = "reset"; + linux,code = ; + gpios = <&tlmm 28 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + debounce-interval = <60>; + }; + }; + gpio_leds { + compatible = "gpio-leds"; + pinctrl-0 = <&leds_pins>; + pinctrl-names = "default"; + + led_sys: led@1 { + label = "sys:blue"; + gpios = <&tlmm 1 GPIO_ACTIVE_HIGH>; /* GPIO_1 */ + default-state="on"; + }; + led@35 { + label = "sys:green"; + gpios = <&tlmm 35 GPIO_ACTIVE_HIGH>; /* GPIO_35 */ + default-state="off"; + }; + led@31 { + label = "sys:red"; + gpios = <&tlmm 31 GPIO_ACTIVE_HIGH>; /* GPIO_31 */ + default-state="off"; + }; + }; }; &usb3 { @@ -772,22 +743,42 @@ &pcie_x1 { status = "disabled"; - perst-gpio = <&tlmm 18 GPIO_ACTIVE_LOW>; + perst-gpio = <&tlmm 18 1>; }; &pcie_x2 { status = "ok"; - perst-gpio = <&tlmm 15 GPIO_ACTIVE_LOW>; + perst-gpio = <&tlmm 15 1>; +}; + +&bt { + status = "ok"; }; &wcss { status = "ok"; }; +&q6v5_wcss { + status = "disabled"; +}; + +&q6v5_m3 { + status = "disabled"; +}; + +&tcsr_mutex_block { + status = "ok"; +}; + &tcsr_mutex { status = "ok"; }; +&smem { + status = "ok"; +}; + &apcs_glb { status = "ok"; }; @@ -796,13 +787,34 @@ status = "ok"; }; -&q6v5_wcss { +&qcom_q6v5_wcss { #ifdef __IPQ_MEM_PROFILE_256_MB__ memory-region = <&q6_region>, <&q6_etr_region>; #else memory-region = <&q6_region>, <&q6_etr_region>, <&q6_caldb_region>; #endif + /* IPQ5018 */ + q6v5_wcss_userpd1 { + m3_firmware = "IPQ5018/m3_fw.mdt"; + interrupts-extended = <&wcss_smp2p_in 8 0>, + <&wcss_smp2p_in 9 0>, + <&wcss_smp2p_in 12 0>, + <&wcss_smp2p_in 11 0>; + interrupt-names ="fatal", + "ready", + "spawn_ack", + "stop-ack"; + qcom,smem-states = <&wcss_smp2p_out 8>, + <&wcss_smp2p_out 9>, + <&wcss_smp2p_out 10>; + qcom,smem-state-names = "shutdown", + "stop", + "spawn"; + qca,asid = <1>; + qca,auto-restart; + qca,int_radio; + }; }; &i2c_0 { @@ -811,6 +823,14 @@ status = "disabled"; }; +&dbm_1p5 { + status = "ok"; +}; + +&msm_imem { + status = "ok"; +}; + &blsp1_uart1 { status = "ok"; }; @@ -847,108 +867,35 @@ qrtr_instance_id = <0x20>; #address-cells = <0x2>; #size-cells = <0x2>; -#if defined(__CNSS2__) memory-region = <&mhi_region1>; -#else +#if !defined(__CNSS2__) base-addr = <0x4CC00000>; m3-dump-addr = <0x4E000000>; etr-addr = <0x4E100000>; qcom,caldb-addr = <0x4E200000>; - pageable-addr = <0x4EA00000>; qcom,tgt-mem-mode = <0x1>; + mhi,max-channels = <30>; + mhi,timeout = <10000>; #endif }; }; &wifi0 { /* IPQ5018 */ - mem-region = <&q6_region>; qcom,board_id = <0x24>; - - qcom,bdf-addr = <0x4BA00000 0x4BA00000 0x4BA00000 - 0x0 0x0>; - qcom,caldb-addr = <0x4CA00000 0x4CA00000 0x0 0x0 0x0>; - qcom,caldb-size = <0x200000>; status = "ok"; }; &wifi3 { - /* QCN9000 5G */ - board_id = <0xa0>; - hremote_node = <&qcn9000_pcie0>; -#ifdef __IPQ_MEM_PROFILE_256_MB__ - /* QCN9000 tgt-mem-mode=2 layout - 17MB - * +=========+==============+=========+ - * | Region | Start Offset | Size | - * +---------+--------------+---------+ - * | HREMOTE | 0x4C900000 | 11MB | - * +---------+--------------+---------+ - * | M3 Dump | 0x4D400000 | 1MB | - * +---------+--------------+---------+ - * | ETR | 0x4D500000 | 1MB | - * +---------+--------------+---------+ - * | Pageable| 0x4D600000 | 4MB | - * +==================================+ - */ - base-addr = <0x4C900000>; - m3-dump-addr = <0x4D400000>; - etr-addr = <0x4D500000>; - caldb-addr = <0>; - pageable-addr = <0x4D600000>; - caldb-size = <0>; - hremote-size = <0xB00000>; - tgt-mem-mode = <0x2>; - pageable-size = <0x400000>; -#elif __IPQ_MEM_PROFILE_512_MB__ - /* QCN9000 tgt-mem-mode=1 layout - 38MB - * +=========+==============+=========+ - * | Region | Start Offset | Size | - * +---------+--------------+---------+ - * | HREMOTE | 0x4CC00000 | 20MB | - * +---------+--------------+---------+ - * | M3 Dump | 0x4E000000 | 1MB | - * +---------+--------------+---------+ - * | ETR | 0x4E100000 | 1MB | - * +---------+--------------+---------+ - * | Caldb | 0x4E200000 | 8MB | - * +---------+--------------+---------+ - * | Pageable| 0x4EA00000 | 8MB | - * +==================================+ - */ - base-addr = <0x4CC00000>; - m3-dump-addr = <0x4E000000>; - etr-addr = <0x4E100000>; - caldb-addr = <0x4E200000>; - pageable-addr = <0x4EA00000>; - caldb-size = <0x800000>; - hremote-size = <0x1400000>; - tgt-mem-mode = <0x1>; - pageable-size = <0x800000>; -#else - /* QCN9000 tgt-mem-mode=0 layout - 53MB - * +=========+==============+=========+ - * | Region | Start Offset | Size | - * +---------+--------------+---------+ - * | HREMOTE | 0x4CC00000 | 35MB | - * +---------+--------------+---------+ - * | M3 Dump | 0x4EF00000 | 1MB | - * +---------+--------------+---------+ - * | ETR | 0x4F000000 | 1MB | - * +---------+--------------+---------+ - * | Caldb | 0x4F100000 | 8MB | - * +---------+--------------+---------+ - * | Pageable| 0x4F900000 | 8MB | - * +==================================+ - */ - base-addr = <0x4CC00000>; - m3-dump-addr = <0x4EF00000>; - etr-addr = <0x4F000000>; - caldb-addr = <0x4F100000>; - pageable-addr = <0x4F900000>; - hremote-size = <0x2300000>; - caldb-size = <0x800000>; - tgt-mem-mode = <0x0>; - pageable-size = <0x800000>; -#endif + /* QCN9000 5G */ + board_id = <0xa0>; + status = "ok"; +}; + +&qfprom { + status = "ok"; +}; + +&tsens { status = "ok"; }; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-q14.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-q14.dts deleted file mode 100755 index 40bb66907..000000000 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq5018-q14.dts +++ /dev/null @@ -1,934 +0,0 @@ -/dts-v1/; -/* Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. - * - * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "ipq5018.dtsi" - -/ { - #address-cells = <0x2>; - #size-cells = <0x2>; - model = "Mototola Q14"; - compatible = "motorola,q14", "qcom,ipq5018-mp03.5", "qcom,ipq5018"; - interrupt-parent = <&intc>; - - aliases { - sdhc1 = &sdhc_1; /* SDC1 eMMC slot */ - serial0 = &blsp1_uart1; - serial1 = &blsp1_uart2; - ethernet0 = "/soc/dp1"; - ethernet1 = "/soc/dp2"; - }; - - chosen { - bootargs = "console=ttyMSM0,115200,n8 rw init=/init"; - bootargs-append = " swiotlb=1 coherent_pool=2M"; - stdout-path = "serial0"; - }; - - reserved-memory { - #ifdef __IPQ_MEM_PROFILE_256_MB__ - /* 256 MB Profile - * +==========+==============+=========================+ - * | | | | - * | Region | Start Offset | Size | - * | | | | - * +----------+--------------+-------------------------+ - * | NSS | 0x40000000 | 8MB | - * +----------+--------------+-------------------------+ - * | Linux | 0x40800000 | Depends on total memory | - * +----------+--------------+-------------------------+ - * | uboot | 0x4A600000 | 4MB | - * +----------+--------------+-------------------------+ - * | SBL | 0x4AA00000 | 1MB | - * +----------+--------------+-------------------------+ - * | smem | 0x4AB00000 | 1MB | - * +----------+--------------+-------------------------+ - * | TZ | 0x4AC00000 | 4MB | - * +----------+--------------+-------------------------+ - * | Q6 | | | - * | code/ | 0x4B000000 | 20MB | - * | data | | | - * +----------+--------------+-------------------------+ - * | IPQ5018 | | | - * | data | 0x4C400000 | 13MB | - * +----------+--------------+-------------------------+ - * | IPQ5018 | | | - * | M3 Dump | 0x4D100000 | 1MB | - * +----------+--------------+-------------------------+ - * | IPQ5018 | | | - * | QDSS | 0x4D200000 | 1MB | - * +----------+--------------+-------------------------+ - * | QCN6122_1| | | - * | data | 0x4D300000 | 13MB | - * +----------+--------------+-------------------------+ - * | QCN6122_1| | | - * | M3 Dump | 0x4E000000 | 1MB | - * +----------+--------------+-------------------------+ - * | QCN6122_1| | | - * | QDSS | 0x4E100000 | 1MB | - * +----------+--------------+-------------------------+ - * | QCN6122_2| | | - * | data | 0x4E200000 | 13MB | - * +----------+--------------+-------------------------+ - * | QCN6122_2| | | - * | M3 Dump | 0x4EF00000 | 1MB | - * +----------+--------------+-------------------------+ - * | QCN6122_2| | | - * | QDSS | 0x4F000000 | 1MB | - * +----------+--------------+-------------------------+ - * | | - * | Rest of the memory for Linux | - * | | - * +===================================================+ - */ - q6_mem_regions: q6_mem_regions@4B000000 { - no-map; - reg = <0x0 0x4B000000 0x0 0x4100000>; - }; - - q6_code_data: q6_code_data@4B000000 { - no-map; - reg = <0x0 0x4B000000 0x0 0x1400000>; - }; - - q6_ipq5018_data: q6_ipq5018_data@4C400000 { - no-map; - reg = <0x0 0x4C400000 0x0 0xD00000>; - }; - - m3_dump: m3_dump@4D100000 { - no-map; - reg = <0x0 0x4D100000 0x0 0x100000>; - }; - - q6_etr_region: q6_etr_dump@4D200000 { - no-map; - reg = <0x0 0x4D200000 0x0 0x100000>; - }; - - q6_qcn6122_data1: q6_qcn6122_data1@4D300000 { - no-map; - reg = <0x0 0x4D300000 0x0 0xD00000>; - }; - - m3_dump_qcn6122_1: m3_dump_qcn6122_1@4E000000 { - no-map; - reg = <0x0 0x4E000000 0x0 0x100000>; - }; - - q6_qcn6122_etr_1: q6_qcn6122_etr_1@4E100000 { - no-map; - reg = <0x0 0x4E100000 0x0 0x100000>; - }; - - q6_qcn6122_data2: q6_qcn6122_data2@4E200000 { - no-map; - reg = <0x0 0x4E200000 0x0 0xD00000>; - }; - - m3_dump_qcn6122_2: m3_dump_qcn6122_2@4EF00000 { - no-map; - reg = <0x0 0x4EF00000 0x0 0x100000>; - }; - - q6_qcn6122_etr_2: q6_qcn6122_etr_2@4F000000 { - no-map; - reg = <0x0 0x4F000000 0x0 0x100000>; - }; - #else - /* 512MB/1GB Profiles - * +==========+==============+=========================+ - * | | | | - * | Region | Start Offset | Size | - * | | | | - * +----------+--------------+-------------------------+ - * | NSS | 0x40000000 | 16MB | - * +----------+--------------+-------------------------+ - * | Linux | 0x41000000 | Depends on total memory | - * +----------+--------------+-------------------------+ - * | uboot | 0x4A600000 | 4MB | - * +----------+--------------+-------------------------+ - * | SBL | 0x4AA00000 | 1MB | - * +----------+--------------+-------------------------+ - * | smem | 0x4AB00000 | 1MB | - * +----------+--------------+-------------------------+ - * | TZ | 0x4AC00000 | 4MB | - * +----------+--------------+-------------------------+ - * | Q6 | | | - * | code/ | 0x4B000000 | 20MB | - * | data | | | - * +----------+--------------+-------------------------+ - * | IPQ5018 | | | - * | data | 0x4C400000 | 14MB | - * +----------+--------------+-------------------------+ - * | IPQ5018 | | | - * | M3 Dump | 0x4D200000 | 1MB | - * +----------+--------------+-------------------------+ - * | IPQ5018 | | | - * | QDSS | 0x4D300000 | 1MB | - * +----------+--------------+-------------------------+ - * | IPQ5018 | | | - * | Caldb | 0x4D400000 | 2MB | - * +----------+--------------+-------------------------+ - * | QCN6122_1| | | - * | data | 0x4D600000 | 16MB | - * +----------+--------------+-------------------------+ - * | QCN6122_1| | | - * | M3 Dump | 0x4E600000 | 1MB | - * +----------+--------------+-------------------------+ - * | QCN6122_1| | | - * | QDSS | 0x4E700000 | 1MB | - * +----------+--------------+-------------------------+ - * | QCN6122_1| | | - * | Caldb | 0x4E800000 | 5MB | - * +----------+--------------+-------------------------+ - * | QCN6122_2| | | - * | data | 0x4ED00000 | 16MB | - * +----------+--------------+-------------------------+ - * | QCN6122_2| | | - * | M3 Dump | 0x4FD00000 | 1MB | - * +----------+--------------+-------------------------+ - * | QCN6122_2| | | - * | QDSS | 0x4FE00000 | 1MB | - * +----------+--------------+-------------------------+ - * | QCN6122_2| | | - * | Caldb | 0x4FF00000 | 5MB | - * +----------+--------------+-------------------------+ - * | | - * | Rest of the memory for Linux | - * | | - * +===================================================+ - */ - q6_mem_regions: q6_mem_regions@4B000000 { - no-map; - reg = <0x0 0x4B000000 0x0 0x5400000>; - }; - - q6_code_data: q6_code_data@4B000000 { - no-map; - reg = <0x0 0x4B000000 0x0 01400000>; - }; - - q6_ipq5018_data: q6_ipq5018_data@4C400000 { - no-map; - reg = <0x0 0x4C400000 0x0 0xE00000>; - }; - - m3_dump: m3_dump@4D200000 { - no-map; - reg = <0x0 0x4D200000 0x0 0x100000>; - }; - - q6_etr_region: q6_etr_dump@4D300000 { - no-map; - reg = <0x0 0x4D300000 0x0 0x100000>; - }; - - q6_caldb_region: q6_caldb_region@4D400000 { - no-map; - reg = <0x0 0x4D400000 0x0 0x200000>; - }; - - q6_qcn6122_data1: q6_qcn6122_data1@4D600000 { - no-map; - reg = <0x0 0x4D600000 0x0 0x1000000>; - }; - - m3_dump_qcn6122_1: m3_dump_qcn6122_1@4E600000 { - no-map; - reg = <0x0 0x4E600000 0x0 0x100000>; - }; - - q6_qcn6122_etr_1: q6_qcn6122_etr_1@4E700000 { - no-map; - reg = <0x0 0x4E700000 0x0 0x100000>; - }; - - q6_qcn6122_caldb_1: q6_qcn6122_caldb_1@4E800000 { - no-map; - reg = <0x0 0x4E800000 0x0 0x500000>; - }; - - q6_qcn6122_data2: q6_qcn6122_data2@4E900000 { - no-map; - reg = <0x0 0x4ED00000 0x0 0x1000000>; - }; - - m3_dump_qcn6122_2: m3_dump_qcn6122_2@4FD00000 { - no-map; - reg = <0x0 0x4FD00000 0x0 0x100000>; - }; - - q6_qcn6122_etr_2: q6_qcn6122_etr_2@4FE00000 { - no-map; - reg = <0x0 0x4FE00000 0x0 0x100000>; - }; - - q6_qcn6122_caldb_2: q6_qcn6122_caldb_2@4FF00000 { - no-map; - reg = <0x0 0x4FF00000 0x0 0x500000>; - }; - - #endif - }; - - soc { - serial@78af000 { - status = "ok"; - }; - - blsp1_uart2: serial@78b0000 { - pinctrl-0 = <&blsp1_uart_pins>; - pinctrl-names = "default"; - }; - - qpic_bam: dma@7984000{ - status = "ok"; - }; - - nand: qpic-nand@79b0000 { - pinctrl-0 = <&qspi_nand_pins>; - pinctrl-names = "default"; - status = "disabled"; - }; - - spi_0: spi@78b5000 { /* BLSP1 QUP0 */ - pinctrl-0 = <&blsp0_spi_pins>; - pinctrl-names = "default"; - cs-select = <0>; - status = "ok"; - - m25p80@0 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - compatible = "n25q128a11"; - linux,modalias = "m25p80", "n25q128a11"; - spi-max-frequency = <50000000>; - use-default-sizes; - }; - }; - - mdio0: mdio@88000 { - status = "ok"; - - ethernet-phy@0 { - reg = <7>; - }; - }; - - mdio1: mdio@90000 { - status = "ok"; - pinctrl-0 = <&mdio1_pins>; - pinctrl-names = "default"; - phy-reset-gpio = <&tlmm 39 0>; - - ethernet-phy@0 { - reg = <28>; - }; - }; - - ess-instance { - num_devices = <0x1>; - ess-switch@0x39c00000 { - switch_mac_mode = <0xf>; /* mac mode for uniphy instance*/ - cmnblk_clk = "internal_96MHz"; /* cmnblk clk*/ - qcom,port_phyinfo { - port@0 { - port_id = <1>; - phy_address = <7>; - mdiobus = <&mdio0>; - }; - port@1 { - port_id = <2>; - phy_address = <0x1c>; - mdiobus = <&mdio1>; - port_mac_sel = "QGMAC_PORT"; - }; - }; - led_source@0 { - source = <0>; - mode = "normal"; - speed = "all"; - blink_en = "enable"; - active = "high"; - }; - }; - }; - - dp1 { - device_type = "network"; - compatible = "qcom,nss-dp"; - clocks = <&gcc GCC_SNOC_GMAC0_AXI_CLK>; - clock-names = "nss-snoc-gmac-axi-clk"; - qcom,id = <1>; - reg = <0x39C00000 0x10000>; - interrupts = ; - qcom,mactype = <2>; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <7>; - mdio-bus = <&mdio0>; - local-mac-address = [000000000000]; - phy-mode = "sgmii"; - qcom,rx-page-mode = <0>; - }; - - dp2 { - device_type = "network"; - compatible = "qcom,nss-dp"; - clocks = <&gcc GCC_SNOC_GMAC1_AXI_CLK>; - clock-names = "nss-snoc-gmac-axi-clk"; - qcom,id = <2>; - reg = <0x39D00000 0x10000>; - interrupts = ; - qcom,mactype = <2>; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <28>; - mdio-bus = <&mdio1>; - local-mac-address = [000000000000]; - phy-mode = "sgmii"; - qcom,rx-page-mode = <0>; - }; - - nss-macsec1 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x1c>; - mdiobus = <&mdio1>; - }; - - pcm: pcm@0xA3C0000{ - pinctrl-0 = <&audio_pins>; - pinctrl-names = "default"; - status = "disabled"; - }; - - }; - - qcom,test@0 { - status = "ok"; - }; - - thermal-zones { - status = "ok"; - }; -}; - -&sdhc_1 { - status = "ok"; - pinctrl-0 = <&emmc_pins>; - pinctrl-names = "default"; - qcom,nonremovable; -}; - -&tlmm { - pinctrl-0 = <&blsp0_uart_pins &phy_led_pins>; - pinctrl-names = "default"; - - emmc_pins: emmc_pins { - emmc_clk { - pins = "gpio9"; - function = "sdc1_clk"; - drive-strength = <8>; - bias-disable; - }; - emmc_cmd { - pins = "gpio8"; - function = "sdc1_cmd"; - drive-strength = <8>; - bias-pull-up; - }; - emmc_data_0 { - pins = "gpio7"; - function = "sdc10"; - drive-strength = <8>; - bias-disable; - }; - emmc_data_1 { - pins = "gpio6"; - function = "sdc11"; - drive-strength = <8>; - bias-disable; - }; - emmc_data_2 { - pins = "gpio5"; - function = "sdc12"; - drive-strength = <8>; - bias-disable; - }; - emmc_data_3 { - pins = "gpio4"; - function = "sdc13"; - drive-strength = <8>; - bias-disable; - }; - }; - blsp0_uart_pins: uart_pins { - blsp0_uart_rx_tx { - pins = "gpio20", "gpio21"; - function = "blsp0_uart0"; - bias-disable; - }; - }; - - blsp1_uart_pins: blsp1_uart_pins { - blsp1_uart_rx_tx { - pins = "gpio23", "gpio25", "gpio24", "gpio26"; - function = "blsp1_uart2"; - bias-disable; - }; - }; - - blsp0_spi_pins: blsp0_spi_pins { - mux { - pins = "gpio10", "gpio11", "gpio12", "gpio13"; - function = "blsp0_spi"; - drive-strength = <2>; - bias-disable; - }; - }; - - qspi_nand_pins: qspi_nand_pins { - qspi_clock { - pins = "gpio9"; - function = "qspi_clk"; - drive-strength = <8>; - bias-disable; - }; - - qspi_cs { - pins = "gpio8"; - function = "qspi_cs"; - drive-strength = <8>; - bias-disable; - }; - - qspi_data { - pins = "gpio4", "gpio5", "gpio6", "gpio7"; - function = "qspi_data"; - drive-strength = <8>; - bias-disable; - }; - }; - - mdio1_pins: mdio_pinmux { - mux_0 { - pins = "gpio36"; - function = "mdc"; - drive-strength = <8>; - bias-pull-up; - }; - - mux_1 { - pins = "gpio37"; - function = "mdio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - - phy_led_pins: phy_led_pins { - gephy_led_pin { - pins = "gpio46"; - function = "led0"; - drive-strength = <8>; - bias-pull-down; - }; - }; - - i2c_pins: i2c_pins { - i2c_scl { - pins = "gpio25"; - function = "blsp2_i2c1"; - drive-strength = <8>; - bias-disable; - }; - - i2c_sda { - pins = "gpio26"; - function = "blsp2_i2c1"; - drive-strength = <8>; - bias-disable; - }; - }; - - button_pins: button_pins { - wps_button { - pins = "gpio38"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - reset_button { - pins = "gpio31"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - - audio_pins: audio_pinmux { - mux_1 { - pins = "gpio24"; - function = "audio_rxbclk"; - drive-strength = <8>; - bias-pull-down; - }; - - mux_2 { - pins = "gpio25"; - function = "audio_rxfsync"; - drive-strength = <8>; - bias-pull-down; - }; - - mux_3 { - pins = "gpio26"; - function = "audio_rxd"; - drive-strength = <8>; - bias-pull-down; - }; - - mux_4 { - pins = "gpio27"; - function = "audio_txmclk"; - drive-strength = <8>; - bias-pull-down; - }; - - mux_5 { - pins = "gpio28"; - function = "audio_txbclk"; - drive-strength = <8>; - bias-pull-down; - }; - - mux_6 { - pins = "gpio29"; - function = "audio_txfsync"; - drive-strength = <8>; - bias-pull-down; - }; - - mux_7 { - pins = "gpio30"; - function = "audio_txd"; - drive-strength = <8>; - bias-pull-down; - }; - }; -}; - -&soc { - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; - pinctrl-names = "default"; - - button@1 { - label = "wps"; - linux,code = ; - gpios = <&tlmm 38 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - button@2 { - label = "reset"; - linux,code = ; - gpios = <&tlmm 31 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - }; -}; - -&q6v5_wcss { - compatible = "qcom,ipq5018-q6-mpd"; - #address-cells = <1>; - #size-cells = <1>; - ranges; - firmware = "IPQ5018/q6_fw.mdt"; - reg = <0x0cd00000 0x4040>, - <0x1938000 0x8>, - <0x193d204 0x4>; - reg-names = "qdsp6", - "tcsr-msip", - "tcsr-q6"; - resets = <&gcc GCC_WCSSAON_RESET>, - <&gcc GCC_WCSS_Q6_BCR>; - - reset-names = "wcss_aon_reset", - "wcss_q6_reset"; - - clocks = <&gcc GCC_Q6_AXIS_CLK>, - <&gcc GCC_WCSS_ECAHB_CLK>, - <&gcc GCC_Q6_AXIM_CLK>, - <&gcc GCC_Q6_AXIM2_CLK>, - <&gcc GCC_Q6_AHB_CLK>, - <&gcc GCC_Q6_AHB_S_CLK>, - <&gcc GCC_WCSS_AXI_S_CLK>; - clock-names = "gcc_q6_axis_clk", - "gcc_wcss_ecahb_clk", - "gcc_q6_axim_clk", - "gcc_q6_axim2_clk", - "gcc_q6_ahb_clk", - "gcc_q6_ahb_s_clk", - "gcc_wcss_axi_s_clk"; - - #ifdef __IPQ_MEM_PROFILE_256_MB__ - memory-region = <&q6_mem_regions>, <&q6_etr_region>; - #else - memory-region = <&q6_mem_regions>, <&q6_etr_region>, - <&q6_caldb_region>; - #endif - - qcom,rproc = <&q6v5_wcss>; - qcom,bootargs_smem = <507>; - boot-args = <0x1 0x4 0x3 0x0F 0x0 0x0>, - <0x2 0x4 0x2 0x12 0x0 0x0>; - status = "ok"; - q6_wcss_pd1: remoteproc_pd1@4ab000 { - compatible = "qcom,ipq5018-wcss-ahb-mpd"; - reg = <0x4ab000 0x20>; - reg-names = "rmb"; - firmware = "IPQ5018/q6_fw.mdt"; - m3_firmware = "IPQ5018/m3_fw.mdt"; - interrupts-extended = <&wcss_smp2p_in 8 0>, - <&wcss_smp2p_in 9 0>, - <&wcss_smp2p_in 12 0>, - <&wcss_smp2p_in 11 0>; - interrupt-names = "fatal", - "ready", - "spawn-ack", - "stop-ack"; - - resets = <&gcc GCC_WCSSAON_RESET>, - <&gcc GCC_WCSS_BCR>, - <&gcc GCC_CE_BCR>; - reset-names = "wcss_aon_reset", - "wcss_reset", - "ce_reset"; - - clocks = <&gcc GCC_WCSS_AHB_S_CLK>, - <&gcc GCC_WCSS_ACMT_CLK>, - <&gcc GCC_WCSS_AXI_M_CLK>; - clock-names = "gcc_wcss_ahb_s_clk", - "gcc_wcss_acmt_clk", - "gcc_wcss_axi_m_clk"; - - qcom,halt-regs = <&tcsr_q6_block 0xa000 0xd000 0x0>; - - qcom,smem-states = <&wcss_smp2p_out 8>, - <&wcss_smp2p_out 9>, - <&wcss_smp2p_out 10>; - qcom,smem-state-names = "shutdown", - "stop", - "spawn"; - #ifdef __IPQ_MEM_PROFILE_256_MB__ - memory-region = <&q6_ipq5018_data>, <&m3_dump>, - <&q6_etr_region>; - #else - memory-region = <&q6_ipq5018_data>, <&m3_dump>, - <&q6_etr_region>, <&q6_caldb_region>; - #endif - - }; - - q6_wcss_pd2: remoteproc_pd2 { - compatible = "qcom,ipq5018-wcss-pcie-mpd"; - firmware = "IPQ5018/q6_fw.mdt"; - m3_firmware = "qcn6122/m3_fw.mdt"; - interrupts-extended = <&wcss_smp2p_in 16 0>, - <&wcss_smp2p_in 17 0>, - <&wcss_smp2p_in 20 0>, - <&wcss_smp2p_in 19 0>; - interrupt-names = "fatal", - "ready", - "spawn-ack", - "stop-ack"; - - qcom,smem-states = <&wcss_smp2p_out 16>, - <&wcss_smp2p_out 17>, - <&wcss_smp2p_out 18>; - qcom,smem-state-names = "shutdown", - "stop", - "spawn"; - #ifdef __IPQ_MEM_PROFILE_256_MB__ - memory-region = <&q6_qcn6122_data1>, <&m3_dump_qcn6122_1>, - <&q6_qcn6122_etr_1>; - #else - memory-region = <&q6_qcn6122_data1>, <&m3_dump_qcn6122_1>, - <&q6_qcn6122_etr_1>, <&q6_qcn6122_caldb_1>; - #endif - - }; - - q6_wcss_pd3: remoteproc_pd3 { - compatible = "qcom,ipq5018-wcss-pcie-mpd"; - firmware = "IPQ5018/q6_fw.mdt"; - interrupts-extended = <&wcss_smp2p_in 24 0>, - <&wcss_smp2p_in 25 0>, - <&wcss_smp2p_in 28 0>, - <&wcss_smp2p_in 27 0>; - interrupt-names = "fatal", - "ready", - "spawn-ack", - "stop-ack"; - - qcom,smem-states = <&wcss_smp2p_out 24>, - <&wcss_smp2p_out 25>, - <&wcss_smp2p_out 26>; - qcom,smem-state-names = "shutdown", - "stop", - "spawn"; - #ifdef __IPQ_MEM_PROFILE_256_MB__ - memory-region = <&q6_qcn6122_data2>, <&m3_dump_qcn6122_2>, - <&q6_qcn6122_etr_2>; - #else - memory-region = <&q6_qcn6122_data2>, <&m3_dump_qcn6122_2>, - <&q6_qcn6122_etr_2>, <&q6_qcn6122_caldb_2>; - #endif - }; -}; - -&i2c_0 { - pinctrl-0 = <&i2c_pins>; - pinctrl-names = "default"; - status = "disabled"; -}; - -&wifi0 { - /* IPQ5018 */ - qcom,multipd_arch; - qcom,rproc = <&q6_wcss_pd1>; - qcom,userpd-subsys-name = "q6v5_wcss_userpd1"; -#ifdef __IPQ_MEM_PROFILE_256_MB__ - qcom,tgt-mem-mode = <2>; -#else - qcom,tgt-mem-mode = <1>; -#endif - qcom,board_id = <0x23>; - qcom,bdf-addr = <0x4C400000 0x4C400000 0x4C400000 0x0 0x0>; -#ifdef __CNSS2__ - qcom,caldb-addr = <0x4D400000 0x4D400000 0 0 0>; -#else - qcom,caldb-addr = <0x4D400000>; - m3-dump-addr = <0x4D200000>; -#endif - qcom,caldb-size = <0x200000>; - mem-region = <&q6_ipq5018_data>; - status = "ok"; -}; - -&wifi1 { - /* QCN6122 5G */ - qcom,multipd_arch; - qcom,userpd-subsys-name = "q6v5_wcss_userpd2"; - qcom,rproc = <&q6_wcss_pd2>; -#ifdef __IPQ_MEM_PROFILE_256_MB__ - qcom,tgt-mem-mode = <2>; -#else - qcom,tgt-mem-mode = <1>; -#endif - qcom,board_id = <0x60>; - qcom,bdf-addr = <0x4D600000 0x4D600000 0x4D300000 0x0 0x0>; -#ifdef __CNSS2__ - qcom,caldb-addr = <0x4E800000 0x4E800000 0 0 0>; -#else - qcom,caldb-addr = <0x4E800000>; - m3-dump-addr = <0x4E600000>; -#endif - qcom,caldb-size = <0x500000>; - mem-region = <&q6_qcn6122_data1>; - status = "ok"; -}; - -&wifi2 { - /* QCN6122 6G */ - qcom,multipd_arch; - qcom,userpd-subsys-name = "q6v5_wcss_userpd3"; - qcom,rproc = <&q6_wcss_pd3>; -#ifdef __IPQ_MEM_PROFILE_256_MB__ - qcom,tgt-mem-mode = <2>; -#else - qcom,tgt-mem-mode = <1>; -#endif - qcom,board_id = <0xb0>; - qcom,bdf-addr = <0x4ED00000 0x4ED00000 0x4E200000 0x0 0x0>; -#ifdef __CNSS2__ - qcom,caldb-addr = <0x4FF00000 0x4FF00000 0 0 0>; -#else - qcom,caldb-addr = <0x4FF00000>; - m3-dump-addr = <0x4FD00000>; -#endif - qcom,caldb-size = <0x500000>; - mem-region = <&q6_qcn6122_data2>; - status = "ok"; -}; - -&usb3 { - status = "ok"; - device-power-gpio = <&tlmm 24 1>; -}; - -&dwc_0 { - /delete-property/ #phy-cells; - /delete-property/ phys; - /delete-property/ phy-names; -}; - -&hs_m31phy_0 { - status = "ok"; -}; - -&eud { - status = "ok"; -}; - -&pcie_x1 { - status = "disabled"; - perst-gpio = <&tlmm 18 GPIO_ACTIVE_LOW>; -}; - -&pcie_x2 { - status = "disabled"; - perst-gpio = <&tlmm 15 GPIO_ACTIVE_LOW>; -}; - -&pcie_x1phy { - status = "disabled"; -}; - -&pcie_x2phy { - status = "disabled"; -}; - -&pcie_x1_rp { - status = "disabled"; - - mhi_0: qcom,mhi@0 { - reg = <0 0 0 0 0 >; - }; -}; - -&pcie_x2_rp { - status = "disabled"; - - mhi_1: qcom,mhi@1 { - reg = <0 0 0 0 0 >; - - }; -}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-cig-wf188n.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-cig-wf188n.dts index 2c22e9b15..4956502ce 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-cig-wf188n.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-cig-wf188n.dts @@ -1,6 +1,6 @@ /dts-v1/; /* - * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -15,9 +15,12 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq6018.dtsi" -#include "ipq6018-cpr-regulator.dtsi" +#include "qcom-ipq6018.dtsi" +#include "qcom-ipq6018-rpm-regulator.dtsi" +#include "qcom-ipq6018-cpr-regulator.dtsi" +#include "qcom-ipq6018-cp-cpu.dtsi" #include +#include / { #address-cells = <0x2>; @@ -25,9 +28,11 @@ model = "Cigtech WF-188n"; compatible = "cig,wf188n", "qcom,ipq6018-cp03", "qcom,ipq6018"; interrupt-parent = <&intc>; - qcom,msm-id = <0x1A5 0x0>; aliases { + serial0 = &blsp1_uart3; + serial1 = &blsp1_uart2; + /* * Aliases as required by u-boot * to patch MAC addresses @@ -43,11 +48,7 @@ chosen { bootargs = "console=ttyMSM0,115200,n8 rw init=/init"; -#ifdef __IPQ_MEM_PROFILE_256_MB__ - bootargs-append = " swiotlb=1"; -#else bootargs-append = " swiotlb=1 coherent_pool=2M"; -#endif }; /* @@ -105,13 +106,71 @@ }; }; - button_pins: button_pins { - wps_button { - pins = "gpio9"; - function = "gpio"; + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; drive-strength = <8>; bias-pull-down; }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-pull-down; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-pull-down; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-pull-down; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-pull-down; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-pull-down; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-pull-down; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-pull-down; + }; + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + button_pins: button_pins { + wps_button { + pins = "gpio22"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; }; mdio_pins: mdio_pinmux { @@ -136,17 +195,37 @@ leds_pins: leds_pins { led_5g { - pins = "gpio35"; + pins = "gpio25"; function = "gpio"; drive-strength = <8>; bias-pull-down; }; led_2g { - pins = "gpio37"; + pins = "gpio24"; function = "gpio"; drive-strength = <8>; bias-pull-down; }; + led_eth { + pins = "gpio18"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led_pwr { + pins = "gpio16"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + }; + uart2_pins: uart2_pins { + mux { + pins = "gpio57", "gpio58"; + function = "blsp4_uart"; + drive-strength = <8>; + bias-pull-down; + }; }; }; @@ -164,6 +243,7 @@ }; }; + ess-switch@3a000000 { switch_cpu_bmp = <0x1>; /* cpu port bitmap */ switch_lan_bmp = <0x08>; /* lan port bitmap */ @@ -180,7 +260,7 @@ port@4 { port_id = <4>; phy_address = <3>; - }; + }; }; }; @@ -206,6 +286,7 @@ qcom,link-poll = <1>; qcom,phy-mdio-addr = <3>; phy-mode = "sgmii"; + }; gpio_keys { @@ -221,7 +302,7 @@ debounce-interval = <60>; }; }; - + leds { compatible = "gpio-leds"; pinctrl-0 = <&leds_pins>; @@ -245,17 +326,17 @@ linux,default-trigger = "wf188:green:eth"; default-state = "off"; }; - led_power: led@16 { - label = "green:power"; - gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "wf188:green:power"; + led_power: led@16 { + label = "green:power"; + gpios = <&tlmm 16 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "wf188:green:power"; default-state = "on"; }; }; gpio-watchdog { compatible = "linux,wdt-gpio"; - gpios = <&tlmm 35 GPIO_ACTIVE_HIGH>; + gpios = <&tlmm 35 GPIO_ACTIVE_HIGH>; hw_algo = "toggle"; hw_margin_ms = <5000>; always-running; @@ -285,22 +366,22 @@ }; }; +&blsp1_uart2 { + pinctrl-0 = <&uart2_pins>; + pinctrl-names = "default"; + dmas = <&blsp_dma 2>, + <&blsp_dma 3>; + dma-names = "tx", "rx"; + status = "ok"; +}; &qpic_bam { status = "ok"; }; -&qpic_nand { +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; }; &ssphy_0 { @@ -318,3 +399,25 @@ &nss_crypto { status = "ok"; }; + +&cpu0_opp_table { + compatible = "operating-points-v2"; + opp-shared; + opp03 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <3>; + clock-latency-ns = <200000>; + }; + /delete-node/ opp04; + /delete-node/ opp05; + /delete-node/ opp06; +}; + +/* TZAPP is enabled in default memory profile only */ +#if !defined(__IPQ_MEM_PROFILE_256_MB__) && !defined(__IPQ_MEM_PROFILE_512_MB__) +&qseecom { + mem-start = <0x49B00000>; + mem-size = <0x600000>; + status = "ok"; +}; +#endif diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-edgecore-eap101.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-edgecore-eap101.dts index b80b659f4..265398e78 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-edgecore-eap101.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-edgecore-eap101.dts @@ -1,23 +1,38 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/dts-v1/; /* - * IPQ6018 CP01 board device tree source + * Copyright (c) 2019, The Linux Foundation. All rights reserved. * - * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/dts-v1/; - -#include "ipq6018.dtsi" -#include "ipq6018-cpr-regulator.dtsi" +#include "qcom-ipq6018.dtsi" +#include "qcom-ipq6018-rpm-regulator.dtsi" +#include "qcom-ipq6018-cpr-regulator.dtsi" +#include "qcom-ipq6018-cp-cpu.dtsi" #include +#include / { + #address-cells = <0x2>; + #size-cells = <0x2>; model = "EdgeCore EAP101"; compatible = "edgecore,eap101", "qcom,ipq6018-cp01", "qcom,ipq6018"; + interrupt-parent = <&intc>; aliases { serial0 = &blsp1_uart3; serial1 = &blsp1_uart2; + /* * Aliases as required by u-boot * to patch MAC addresses @@ -33,84 +48,130 @@ }; chosen { - stdout-path = "serial0:115200n8"; - bootargs-append = " swiotlb=1"; + bootargs = "console=ttyMSM0,115200,n8 rw init=/init"; + bootargs-append = " console=ttyMSM0,115200,n8 swiotlb=1 coherent_pool=2M"; }; -}; -&blsp1_uart3 { - pinctrl-0 = <&serial_3_pins>; - pinctrl-names = "default"; - status = "ok"; -}; + /* + * +=========+==============+========================+ + * | | | | + * | Region | Start Offset | Size | + * | | | | + * +--------+--------------+-------------------------+ + * | | | | + * | | | | + * | | | | + * | | | | + * | Linux | 0x41000000 | 139MB | + * | | | | + * | | | | + * | | | | + * +--------+--------------+-------------------------+ + * | TZ App | 0x49B00000 | 6MB | + * +--------+--------------+-------------------------+ + * + * From the available 145 MB for Linux in the first 256 MB, + * we are reserving 6 MB for TZAPP. + * + * Refer arch/arm64/boot/dts/qcom/qcom-ipq6018-memory.dtsi + * for memory layout. + */ -&spi_0 { - pinctrl-0 = <&spi_0_pins>; - pinctrl-names = "default"; - cs-select = <0>; - status = "ok"; - - m25p80@0 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - compatible = "n25q128a11"; - linux,modalias = "m25p80", "n25q128a11"; - spi-max-frequency = <50000000>; - use-default-sizes; +/* TZAPP is enabled only in default memory profile */ +#if !defined(__IPQ_MEM_PROFILE_256_MB__) && !defined(__IPQ_MEM_PROFILE_512_MB__) + reserved-memory { + tzapp:tzapp@49B00000 { /* TZAPPS */ + no-map; + reg = <0x0 0x49B00000 0x0 0x00600000>; + }; }; -}; - -&blsp1_uart2 { - pinctrl-0 = <&hsuart_pins &btcoex_pins>; - pinctrl-names = "default"; - dmas = <&blsp_dma 2>, - <&blsp_dma 3>; - dma-names = "tx", "rx"; - status = "ok"; +#endif }; &tlmm { - spi_0_pins: spi-0-pins { - pins = "gpio38", "gpio39", "gpio40", "gpio41"; - function = "blsp0_spi"; - drive-strength = <8>; - bias-pull-down; - }; - - spi_1_pins: spi_1_pins { + uart_pins: uart_pins { mux { - pins = "gpio69", "gpio71", "gpio72"; - function = "blsp1_spi"; + pins = "gpio44", "gpio45"; + function = "blsp2_uart"; drive-strength = <8>; bias-pull-down; }; - spi_cs { - pins = "gpio70"; - function = "blsp1_spi"; - drive-strength = <8>; - bias-disable; - }; - quartz_interrupt { - pins = "gpio78"; - function = "gpio"; - input; - bias-disable; - }; - quartz_reset { - pins = "gpio79"; - function = "gpio"; - output-low; - bias-disable; - }; - }; - sd_pins: sd-pinmux { - pins = "gpio62"; - function = "sd_card"; - drive-strength = <8>; - bias-pull-up; + spi_0_pins: spi_0_pins { + mux { + pins = "gpio38", "gpio39", "gpio40", "gpio41"; + function = "blsp0_spi"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-pull-down; + }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-pull-down; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-pull-down; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-pull-down; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-pull-down; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-pull-down; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-pull-down; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-pull-down; + }; + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + extcon_usb_pins: extcon_usb_pins { + mux { + pins = "gpio26"; + function = "gpio"; + drive-strength = <2>; + bias-pull-down; + }; }; button_pins: button_pins { @@ -140,26 +201,6 @@ function = "gpio"; bias-pull-up; }; - mux_3 { - pins = "gpio77"; - function = "gpio"; - bias-pull-up; - }; - }; - - pwm_pins: pwm_pinmux { - pins = "gpio18"; - function = "pwm00"; - drive-strength = <8>; - }; - - hsuart_pins: hsuart_pins { - mux { - pins = "gpio71", "gpio72", "gpio69", "gpio70"; - function = "blsp1_uart"; - drive-strength = <8>; - bias-disable; - }; }; leds_pins: leds_pins { @@ -182,42 +223,22 @@ bias-pull-down; }; }; - - btcoex_pins: btcoex_pins { - mux_0 { - pins = "gpio51"; - function = "pta1_1"; - drive-strength = <6>; - bias-pull-down; - }; - mux_1 { - pins = "gpio53"; - function = "pta1_0"; - drive-strength = <6>; - bias-pull-down; - }; - mux_2 { - pins = "gpio52"; - function = "pta1_2"; - drive-strength = <6>; + uart2_pins: uart2_pins { + mux { + pins = "gpio57", "gpio58"; + function = "blsp4_uart"; + drive-strength = <8>; bias-pull-down; }; }; }; &soc { - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; + extcon_usb: extcon_usb { + pinctrl-0 = <&extcon_usb_pins>; pinctrl-names = "default"; - - wps { - label = "reset"; - linux,code = ; - gpios = <&tlmm 19 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; + id-gpio = <&tlmm 26 GPIO_ACTIVE_LOW>; + status = "ok"; }; mdio: mdio@90000 { @@ -297,11 +318,18 @@ }; }; - nss-macsec0 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x18>; - phy_access_mode = <0>; - mdiobus = <&mdio>; + gpio_keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + wps { + label = "reset"; + linux,code = ; + gpios = <&tlmm 19 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + debounce-interval = <60>; + }; }; leds { @@ -321,56 +349,79 @@ linux,default-trigger = "wf188:green:2g"; default-state = "off"; }; - led_power: led@16 { - label = "green:led_pwr"; - gpios = <&tlmm 74 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "green:power"; + led_power: led@16 { + label = "green:led_pwr"; + gpios = <&tlmm 74 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "green:power"; default-state = "off"; }; - led@61 { - label = "green:lan1"; - gpios = <&tlmm 61 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "green:power"; + led@61 { + label = "green:lan1"; + gpios = <&tlmm 61 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "green:power"; default-state = "off"; }; - led@62 { - label = "green:wan"; - gpios = <&tlmm 62 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "green:power"; + led@62 { + label = "green:wan"; + gpios = <&tlmm 62 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "green:power"; default-state = "off"; }; - led@63 { - label = "green:lan2"; - gpios = <&tlmm 63 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "green:power"; + led@63 { + label = "green:lan2"; + gpios = <&tlmm 63 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "green:power"; default-state = "off"; }; }; }; +&blsp1_uart3 { + pinctrl-0 = <&uart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&spi_0 { + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + cs-select = <0>; + status = "ok"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + compatible = "n25q128a11"; + linux,modalias = "m25p80", "n25q128a11"; + spi-max-frequency = <50000000>; + use-default-sizes; + }; +}; + +&blsp1_uart2 { + pinctrl-0 = <&uart2_pins>; + pinctrl-names = "default"; + dmas = <&blsp_dma 2>, + <&blsp_dma 3>; + dma-names = "tx", "rx"; + status = "ok"; +}; &qpic_bam { status = "ok"; }; -&qpic_nand { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; -}; - -&pcie_phy { +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; status = "ok"; }; -&pcie0 { +&ssphy_0 { + status = "ok"; +}; + +&qusb_phy_0 { status = "ok"; }; @@ -382,14 +433,6 @@ status = "ok"; }; -&qusb_phy_0 { - status = "ok"; -}; - -&ssphy_0 { - status = "ok"; -}; - &usb3 { status = "ok"; }; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-gl-ax1800.dtsi b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-gl-ax1800.dtsi index 53fc91adb..25f8717a7 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-gl-ax1800.dtsi +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-gl-ax1800.dtsi @@ -1,6 +1,5 @@ -/dts-v1/; /* - * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2019, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -15,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq6018.dtsi" +#include "qcom-ipq6018.dtsi" #include / { @@ -37,47 +36,8 @@ chosen { bootargs = "console=ttyMSM0,115200,n8 rw init=/init"; -#ifdef __IPQ_MEM_PROFILE_256_MB__ - bootargs-append = " swiotlb=1"; -#else bootargs-append = " swiotlb=1 coherent_pool=2M"; -#endif }; - - /* - * +=========+==============+========================+ - * | | | | - * | Region | Start Offset | Size | - * | | | | - * +--------+--------------+-------------------------+ - * | | | | - * | | | | - * | | | | - * | | | | - * | Linux | 0x41000000 | 139MB | - * | | | | - * | | | | - * | | | | - * +--------+--------------+-------------------------+ - * | TZ App | 0x49B00000 | 6MB | - * +--------+--------------+-------------------------+ - * - * From the available 145 MB for Linux in the first 256 MB, - * we are reserving 6 MB for TZAPP. - * - * Refer arch/arm64/boot/dts/qcom/qcom-ipq6018-memory.dtsi - * for memory layout. - */ - -/* TZAPP is enabled only in default memory profile */ -#if !defined(__IPQ_MEM_PROFILE_256_MB__) && !defined(__IPQ_MEM_PROFILE_512_MB__) - reserved-memory { - tzapp:tzapp@49B00000 { /* TZAPPS */ - no-map; - reg = <0x0 0x49B00000 0x0 0x00600000>; - }; - }; -#endif }; &tlmm { @@ -90,10 +50,59 @@ }; }; - spi_0_pins: spi_0_pins { - mux { - pins = "gpio38", "gpio39", "gpio40", "gpio41"; - function = "blsp0_spi"; + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-pull-down; + }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-pull-down; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-pull-down; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-pull-down; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-pull-down; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-pull-down; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-pull-down; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-pull-down; + }; + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; drive-strength = <8>; bias-pull-down; }; @@ -262,7 +271,7 @@ switch { label = "switch"; linux,code = ; - gpios = <&tlmm 9 GPIO_ACTIVE_HIGH>; + gpios = <&tlmm 9 GPIO_ACTIVE_LOW>; linux,input-type = <1>; debounce-interval = <60>; }; @@ -286,18 +295,10 @@ status = "ok"; }; -&qpic_nand { +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; }; &ssphy_0 { @@ -318,13 +319,16 @@ status = "ok"; }; +&q6_region { + reg = <0x0 0x4ab00000 0x0 0x05500000>; +}; + &CPU0 { operating-points = < /* kHz uV (fixed) */ 864000 1100000 1056000 1100000 1200000 1100000 - 1608000 1100000 >; clock-latency = <200000>; }; @@ -335,7 +339,6 @@ 864000 1100000 1056000 1100000 1200000 1100000 - 1608000 1100000 >; clock-latency = <200000>; }; @@ -346,7 +349,6 @@ 864000 1100000 1056000 1100000 1200000 1100000 - 1608000 1100000 >; clock-latency = <200000>; }; @@ -357,7 +359,6 @@ 864000 1100000 1056000 1100000 1200000 1100000 - 1608000 1100000 >; clock-latency = <200000>; }; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-hfcl-ion4x.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-hfcl-ion4x.dts deleted file mode 100755 index dba13e31c..000000000 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-hfcl-ion4x.dts +++ /dev/null @@ -1,305 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) -/* - * IPQ6018 CP01 board device tree source - * - * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. - */ - -/dts-v1/; - -#include "ipq6018.dtsi" -#include "ipq6018-cpr-regulator.dtsi" -#include - -/ { - aliases { - serial0 = &blsp1_uart3; - serial1 = &blsp1_uart2; - /* - * Aliases as required by u-boot - * to patch MAC addresses - */ - ethernet0 = "/soc/dp1"; - ethernet1 = "/soc/dp2"; - }; - - chosen { - stdout-path = "serial0:115200n8"; - bootargs-append = " swiotlb=1"; - }; -}; - -&blsp1_uart3 { - pinctrl-0 = <&serial_3_pins>; - pinctrl-names = "default"; - status = "ok"; -}; - -&spi_0 { - pinctrl-0 = <&spi_0_pins>; - pinctrl-names = "default"; - cs-select = <0>; - status = "ok"; - - m25p80@0 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - compatible = "n25q128a11"; - linux,modalias = "m25p80", "n25q128a11"; - spi-max-frequency = <50000000>; - use-default-sizes; - }; -}; - -&blsp1_uart2 { - pinctrl-0 = <&hsuart_pins &btcoex_pins>; - pinctrl-names = "default"; - dmas = <&blsp_dma 2>, - <&blsp_dma 3>; - dma-names = "tx", "rx"; - status = "ok"; -}; - -&tlmm { - spi_0_pins: spi-0-pins { - pins = "gpio38", "gpio39", "gpio40", "gpio41"; - function = "blsp0_spi"; - drive-strength = <8>; - bias-pull-down; - }; - - mdio_pins: mdio_pinmux { - mux_0 { - pins = "gpio64"; - function = "mdc"; - drive-strength = <8>; - bias-pull-up; - }; - mux_1 { - pins = "gpio65"; - function = "mdio"; - drive-strength = <8>; - bias-pull-up; - }; - mux_2 { - pins = "gpio75"; - function = "gpio"; - bias-pull-up; - }; - mux_3 { - pins = "gpio77"; - function = "gpio"; - bias-pull-up; - }; - }; - - hsuart_pins: hsuart_pins { - mux { - pins = "gpio71", "gpio72", "gpio69", "gpio70"; - function = "blsp1_uart"; - drive-strength = <8>; - bias-disable; - }; - }; - - button_pins: button_pins { - reset_button { - pins = "gpio53"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - - leds_pins: leds_pins { - led_5g { - pins = "gpio60"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - led_2g { - pins = "gpio61"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - }; - - mdio_pins: mdio_pinmux { - mux_0 { - pins = "gpio64"; - function = "mdc"; - drive-strength = <8>; - bias-pull-up; - }; - mux_1 { - pins = "gpio65"; - function = "mdio"; - drive-strength = <8>; - bias-pull-up; - }; - mux_2 { - pins = "gpio75"; - function = "gpio"; - bias-pull-up; - }; - mux_3 { - pins = "gpio77"; - function = "gpio"; - bias-pull-up; - }; - }; - - btcoex_pins: btcoex_pins { - mux_0 { - pins = "gpio51"; - function = "pta1_1"; - drive-strength = <6>; - bias-pull-down; - }; - mux_1 { - pins = "gpio53"; - function = "pta1_0"; - drive-strength = <6>; - bias-pull-down; - }; - mux_2 { - pins = "gpio52"; - function = "pta1_2"; - drive-strength = <6>; - bias-pull-down; - }; - }; -}; - -&soc { - mdio: mdio@90000 { - pinctrl-0 = <&mdio_pins>; - pinctrl-names = "default"; - phy-reset-gpio = <&tlmm 77 0>; - status = "ok"; - phy0: ethernet-phy@0 { - reg = <4>; - }; - phy1: ethernet-phy@1 { - reg = <30>; - }; - }; - - dp1 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <4>; - reg = <0x3a001600 0x200>; - qcom,mactype = <0>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <4>; - phy-mode = "sgmii"; - }; - - dp2 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <5>; - reg = <0x3a003000 0x3fff>; - qcom,mactype = <1>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <30>; - phy-mode = "sgmii"; - }; - - ess-switch@3a000000 { - switch_cpu_bmp = <0x1>; /* cpu port bitmap */ - switch_lan_bmp = <0x10>; /* lan port bitmap */ - switch_wan_bmp = <0x20>; /* wan port bitmap */ - switch_inner_bmp = <0xc0>; /*inner port bitmap*/ - switch_mac_mode = <0xf>; /* mac mode for uniphy instance0*/ - switch_mac_mode1 = <0x14>; /* mac mode for uniphy instance1*/ - switch_mac_mode2 = <0xff>; /* mac mode for uniphy instance2*/ - qcom,port_phyinfo { - port@4 { - port_id = <4>; - phy_address = <4>; - }; - port@5 { - port_id = <5>; - phy_address = <30>; - port_mac_sel = "QGMAC_PORT"; - }; - }; - }; - - nss-macsec0 { - compatible = "qcom,nss-macsec"; - phy_addr = <30>; - phy_access_mode = <0>; - mdiobus = <&mdio>; - }; - - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; - pinctrl-names = "default"; - - reset { - label = "reset"; - linux,code = ; - gpios = <&tlmm 53 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - }; - - leds { - compatible = "gpio-leds"; - pinctrl-0 = <&leds_pins>; - pinctrl-names = "default"; - - led@60 { - label = "blue:wifi5"; - gpios = <&tlmm 60 GPIO_ACTIVE_LOW>; - linux,default-trigger = "led_5g"; - default-state = "off"; - }; - led@61 { - label = "blue:wifi2"; - gpios = <&tlmm 61 GPIO_ACTIVE_LOW>; - linux,default-trigger = "led_2g"; - default-state = "off"; - }; - }; -}; - -&qpic_bam { - status = "ok"; -}; - -&qpic_nand { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; -}; - -&pcie_phy { - status = "ok"; -}; - -&pcie0 { - status = "ok"; -}; - -&nss_crypto { - status = "ok"; -}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-wallys-dr6018-v4.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-wallys-dr6018-v4.dts index dc5535918..a00c106ad 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-wallys-dr6018-v4.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-wallys-dr6018-v4.dts @@ -1,28 +1,49 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/dts-v1/; /* - * IPQ6018 CP01 board device tree source + * Copyright (c) 2019, The Linux Foundation. All rights reserved. * - * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/dts-v1/; - -#include "ipq6018.dtsi" +#include "qcom-ipq6018.dtsi" +#include "qcom-ipq6018-rpm-regulator.dtsi" +#include "qcom-ipq6018-cpr-regulator.dtsi" +#include "qcom-ipq6018-cp-cpu.dtsi" #include +#include / { - model = "Wallys DR6018"; - compatible = "wallys,dr6018", "qcom,ipq6018-cp01", "qcom,ipq6018"; + #address-cells = <0x2>; + #size-cells = <0x2>; + model = "Wallys DR6018 V4"; + compatible = "wallys,dr6018-v4", "qcom,ipq6018-cp01", "qcom,ipq6018"; + interrupt-parent = <&intc>; aliases { serial0 = &blsp1_uart3; serial1 = &blsp1_uart2; + /* * Aliases as required by u-boot * to patch MAC addresses */ ethernet0 = "/soc/dp1"; ethernet1 = "/soc/dp2"; + ethernet2 = "/soc/dp3"; + ethernet3 = "/soc/dp4"; + ethernet4 = "/soc/dp5"; + + sdhc2 = "/soc/sdhci_sd@7804000"; led-boot = &led_power; led-failsafe = &led_power; @@ -31,97 +52,148 @@ }; chosen { - stdout-path = "serial0:115200n8"; - bootargs-append = " swiotlb=1"; + bootargs = "console=ttyMSM0,115200,n8 rw init=/init"; + bootargs-append = " console=ttyMSM0,115200,n8 swiotlb=1 coherent_pool=2M"; }; -}; -&blsp1_uart3 { - pinctrl-0 = <&serial_3_pins>; - pinctrl-names = "default"; - status = "ok"; -}; + /* + * +=========+==============+========================+ + * | | | | + * | Region | Start Offset | Size | + * | | | | + * +--------+--------------+-------------------------+ + * | | | | + * | | | | + * | | | | + * | | | | + * | Linux | 0x41000000 | 139MB | + * | | | | + * | | | | + * | | | | + * +--------+--------------+-------------------------+ + * | TZ App | 0x49B00000 | 6MB | + * +--------+--------------+-------------------------+ + * + * From the available 145 MB for Linux in the first 256 MB, + * we are reserving 6 MB for TZAPP. + * + * Refer arch/arm64/boot/dts/qcom/qcom-ipq6018-memory.dtsi + * for memory layout. + */ -&spi_0 { - pinctrl-0 = <&spi_0_pins>; - pinctrl-names = "default"; - cs-select = <0>; - status = "ok"; - - m25p80@0 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - compatible = "n25q128a11"; - linux,modalias = "m25p80", "n25q128a11"; - spi-max-frequency = <50000000>; - use-default-sizes; - }; -}; - -&blsp1_uart2 { - pinctrl-0 = <&hsuart_pins &btcoex_pins>; - pinctrl-names = "default"; - dmas = <&blsp_dma 2>, - <&blsp_dma 3>; - dma-names = "tx", "rx"; - status = "ok"; -}; - -&spi_1 { /* BLSP1 QUP1 */ - pinctrl-0 = <&spi_1_pins>; - pinctrl-names = "default"; - cs-select = <0>; - quartz-reset-gpio = <&tlmm 79 1>; - status = "disabled"; - spidev1: spi@1 { - compatible = "qca,spidev"; - reg = <0>; - spi-max-frequency = <24000000>; +/* TZAPP is enabled only in default memory profile */ +#if !defined(__IPQ_MEM_PROFILE_256_MB__) && !defined(__IPQ_MEM_PROFILE_512_MB__) + reserved-memory { + tzapp:tzapp@49B00000 { /* TZAPPS */ + no-map; + reg = <0x0 0x49B00000 0x0 0x00600000>; + }; }; +#endif }; &tlmm { - spi_0_pins: spi-0-pins { - pins = "gpio38", "gpio39", "gpio40", "gpio41"; - function = "blsp0_spi"; - drive-strength = <8>; - bias-pull-down; - }; - - spi_1_pins: spi_1_pins { + uart_pins: uart_pins { mux { - pins = "gpio69", "gpio71", "gpio72"; - function = "blsp1_spi"; + pins = "gpio44", "gpio45"; + function = "blsp2_uart"; drive-strength = <8>; bias-pull-down; }; - spi_cs { - pins = "gpio70"; - function = "blsp1_spi"; - drive-strength = <8>; - bias-disable; - }; - quartz_interrupt { - pins = "gpio78"; - function = "gpio"; - input; - bias-disable; - }; - quartz_reset { - pins = "gpio79"; - function = "gpio"; - output-low; - bias-disable; - }; - }; - sd_pins: sd-pinmux { - pins = "gpio62"; - function = "sd_card"; - drive-strength = <8>; - bias-pull-up; + sd_pins: sd_pins { + mux { + pins = "gpio62"; + function = "sd_card"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + spi_0_pins: spi_0_pins { + mux { + pins = "gpio38", "gpio39", "gpio40", "gpio41"; + function = "blsp0_spi"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-pull-down; + }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-pull-down; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-pull-down; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-pull-down; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-pull-down; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-pull-down; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-pull-down; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-pull-down; + }; + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + extcon_usb_pins: extcon_usb_pins { + mux { + pins = "gpio26"; + function = "gpio"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + button_pins: button_pins { + wps_button { + pins = "gpio19"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; }; mdio_pins: mdio_pinmux { @@ -142,101 +214,70 @@ function = "gpio"; bias-pull-up; }; - mux_3 { - pins = "gpio77"; - function = "gpio"; - bias-pull-up; - }; - }; - - pwm_pins: pwm_pinmux { - pins = "gpio18"; - function = "pwm00"; - drive-strength = <8>; - }; - - hsuart_pins: hsuart_pins { - mux { - pins = "gpio71", "gpio72", "gpio69", "gpio70"; - function = "blsp1_uart"; - drive-strength = <8>; - bias-disable; - }; - }; - - button_pins: button_pins { - wps_button { - pins = "gpio19"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; }; leds_pins: leds_pins { led_pwr { - pins = "gpio74"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - led_5g { - pins = "gpio35"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - led_2g { - pins = "gpio37"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; + pins = "gpio74"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led_5g { + pins = "gpio35"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led_2g { + pins = "gpio37"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; }; - - btcoex_pins: btcoex_pins { - mux_0 { - pins = "gpio51"; - function = "pta1_1"; - drive-strength = <6>; - bias-pull-down; - }; - mux_1 { - pins = "gpio53"; - function = "pta1_0"; - drive-strength = <6>; - bias-pull-down; - }; - mux_2 { - pins = "gpio52"; - function = "pta1_2"; - drive-strength = <6>; + uart2_pins: uart2_pins { + mux { + pins = "gpio57", "gpio58"; + function = "blsp4_uart"; + drive-strength = <8>; bias-pull-down; }; }; }; &soc { + extcon_usb: extcon_usb { + pinctrl-0 = <&extcon_usb_pins>; + pinctrl-names = "default"; + id-gpio = <&tlmm 26 GPIO_ACTIVE_LOW>; + status = "ok"; + }; + mdio: mdio@90000 { pinctrl-0 = <&mdio_pins>; pinctrl-names = "default"; phy-reset-gpio = <&tlmm 75 0 &tlmm 77 1>; status = "ok"; - phy0: ethernet-phy@0 { - reg = <0>; + ethernet-phy@3 { + reg = <0x03>; }; - phy1: ethernet-phy@1 { - reg = <1>; - }; - phy2: ethernet-phy@2 { - reg = <2>; - }; - phy3: ethernet-phy@3 { - reg = <3>; - }; - phy4: ethernet-phy@4 { + + ethernet-phy@4 { reg = <0x18>; }; + + ethernet-phy@1 { + reg = <0x01>; + }; + + ethernet-phy@2 { + reg = <0x02>; + }; + + ethernet-phy@0 { + reg = <0x00>; + }; }; dp1 { @@ -263,51 +304,79 @@ phy-mode = "sgmii"; }; + dp3 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <3>; + reg = <0x3a001400 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <2>; + phy-mode = "sgmii"; + }; + + dp4 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <4>; + reg = <0x3a001600 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <3>; + phy-mode = "sgmii"; + }; + + dp5 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <5>; + reg = <0x3a001800 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <0x18>; + phy-mode = "sgmii"; + }; + ess-switch@3a000000 { switch_cpu_bmp = <0x1>; /* cpu port bitmap */ - switch_lan_bmp = <0x4>; /* lan port bitmap */ + switch_lan_bmp = <0x3c>; /* lan port bitmap */ switch_wan_bmp = <0x2>; /* wan port bitmap */ switch_inner_bmp = <0xc0>; /*inner port bitmap*/ switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ switch_mac_mode1 = <0xf>; /* mac mode for uniphy instance1*/ switch_mac_mode2 = <0xff>; /* mac mode for uniphy instance2*/ qcom,port_phyinfo { - port@0 { - port_id = <1>; - phy_address = <0>; - }; port@1 { - port_id = <2>; - phy_address = <1>; + phy_address = <0x01>; + port_id = <0x02>; }; + + port@0 { + phy_address = <0x00>; + port_id = <0x01>; + }; + port@2 { - port_id = <3>; - phy_address = <2>; + phy_address = <0x02>; + port_id = <0x03>; }; + port@3 { - port_id = <4>; - phy_address = <3>; + phy_address = <0x03>; + port_id = <0x04>; }; + port@4 { - port_id = <5>; phy_address = <0x18>; + port_id = <0x05>; port_mac_sel = "QGMAC_PORT"; }; }; }; - nss-macsec0 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x18>; - phy_access_mode = <0>; - mdiobus = <&mdio>; - }; - - pwm { - pinctrl-0 = <&pwm_pins>; - pinctrl-names = "default"; - }; - gpio_keys { compatible = "gpio-keys"; pinctrl-0 = <&button_pins>; @@ -339,38 +408,61 @@ linux,default-trigger = "green:2g"; default-state = "off"; }; - led_power: led@16 { - label = "green:led_pwr"; - gpios = <&tlmm 50 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "green:power"; + led_power: led@16 { + label = "green:led_pwr"; + gpios = <&tlmm 50 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "green:power"; default-state = "off"; }; }; }; +&blsp1_uart3 { + pinctrl-0 = <&uart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&spi_0 { + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + cs-select = <0>; + status = "ok"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + compatible = "n25q128a11"; + linux,modalias = "m25p80", "n25q128a11"; + spi-max-frequency = <50000000>; + use-default-sizes; + }; +}; + +&blsp1_uart2 { + pinctrl-0 = <&uart2_pins>; + pinctrl-names = "default"; + dmas = <&blsp_dma 2>, + <&blsp_dma 3>; + dma-names = "tx", "rx"; + status = "ok"; +}; &qpic_bam { status = "ok"; }; -&qpic_nand { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; -}; - -&pcie_phy { +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; status = "ok"; }; -&pcie0 { +&ssphy_0 { + status = "ok"; +}; + +&qusb_phy_0 { status = "ok"; }; @@ -382,21 +474,6 @@ status = "ok"; }; -&sdhc_2 { - pinctrl-0 = <&sd_pins>; - pinctrl-names = "default"; - cd-gpios = <&tlmm 62 GPIO_ACTIVE_LOW>; - status = "ok"; -}; - -&qusb_phy_0 { - status = "ok"; -}; - -&ssphy_0 { - status = "ok"; -}; - &usb3 { status = "ok"; }; @@ -405,77 +482,11 @@ status = "ok"; }; -&CPU0 { - operating-points = < - /* kHz uV (fixed) */ - 864000 1100000 - 1056000 1100000 - 1320000 1100000 - 1440000 1100000 - 1608000 1100000 - 1800000 1100000 - >; - clock-latency = <200000>; -}; - -&CPU1 { - operating-points = < - /* kHz uV (fixed) */ - 864000 1100000 - 1056000 1100000 - 1320000 1100000 - 1440000 1100000 - 1608000 1100000 - 1800000 1100000 - >; - clock-latency = <200000>; -}; - -&CPU2 { - operating-points = < - /* kHz uV (fixed) */ - 864000 1100000 - 1056000 1100000 - 1320000 1100000 - 1440000 1100000 - 1608000 1100000 - 1800000 1100000 - >; - clock-latency = <200000>; -}; - -&CPU3 { - operating-points = < - /* kHz uV (fixed) */ - 864000 1100000 - 1056000 1100000 - 1320000 1100000 - 1440000 1100000 - 1608000 1100000 - 1800000 1100000 - >; - clock-latency = <200000>; -}; - -&tlmm { - gpio-reserved-ranges = <20 1>; - - i2c_1_pins: i2c_1_pins { - mux { - pins = "gpio42", "gpio43"; - function = "blsp2_i2c"; - drive-strength = <8>; - bias-pull-down; - }; - }; -}; - -&i2c_1 { - pinctrl-0 = <&i2c_1_pins>; +&sdhc_2 { + pinctrl-0 = <&sd_pins>; pinctrl-names = "default"; + cd-gpios = <&tlmm 62 1>; + sd-ldo-gpios = <&tlmm 66 0>; + //vqmmc-supply = <&ipq6018_l2_corner>; status = "ok"; }; - -&rpm_glink { - status = "disabled"; -}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-wallys-dr6018.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-wallys-dr6018.dts index 4b64d672d..12065b62f 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-wallys-dr6018.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-wallys-dr6018.dts @@ -1,22 +1,38 @@ -// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) +/dts-v1/; /* - * IPQ6018 CP01 board device tree source + * Copyright (c) 2019, The Linux Foundation. All rights reserved. * - * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -/dts-v1/; - -#include "ipq6018.dtsi" +#include "qcom-ipq6018.dtsi" +#include "qcom-ipq6018-rpm-regulator.dtsi" +#include "qcom-ipq6018-cpr-regulator.dtsi" +#include "qcom-ipq6018-cp-cpu.dtsi" #include +#include / { + #address-cells = <0x2>; + #size-cells = <0x2>; model = "Wallys DR6018"; compatible = "wallys,dr6018", "qcom,ipq6018-cp01", "qcom,ipq6018"; + interrupt-parent = <&intc>; aliases { serial0 = &blsp1_uart3; serial1 = &blsp1_uart2; + /* * Aliases as required by u-boot * to patch MAC addresses @@ -31,97 +47,139 @@ }; chosen { - stdout-path = "serial0:115200n8"; - bootargs-append = " swiotlb=1"; + bootargs = "console=ttyMSM0,115200,n8 rw init=/init"; + bootargs-append = " console=ttyMSM0,115200,n8 swiotlb=1 coherent_pool=2M"; }; -}; -&blsp1_uart3 { - pinctrl-0 = <&serial_3_pins>; - pinctrl-names = "default"; - status = "ok"; -}; + /* + * +=========+==============+========================+ + * | | | | + * | Region | Start Offset | Size | + * | | | | + * +--------+--------------+-------------------------+ + * | | | | + * | | | | + * | | | | + * | | | | + * | Linux | 0x41000000 | 139MB | + * | | | | + * | | | | + * | | | | + * +--------+--------------+-------------------------+ + * | TZ App | 0x49B00000 | 6MB | + * +--------+--------------+-------------------------+ + * + * From the available 145 MB for Linux in the first 256 MB, + * we are reserving 6 MB for TZAPP. + * + * Refer arch/arm64/boot/dts/qcom/qcom-ipq6018-memory.dtsi + * for memory layout. + */ -&spi_0 { - pinctrl-0 = <&spi_0_pins>; - pinctrl-names = "default"; - cs-select = <0>; - status = "ok"; - - m25p80@0 { - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - compatible = "n25q128a11"; - linux,modalias = "m25p80", "n25q128a11"; - spi-max-frequency = <50000000>; - use-default-sizes; - }; -}; - -&blsp1_uart2 { - pinctrl-0 = <&hsuart_pins &btcoex_pins>; - pinctrl-names = "default"; - dmas = <&blsp_dma 2>, - <&blsp_dma 3>; - dma-names = "tx", "rx"; - status = "ok"; -}; - -&spi_1 { /* BLSP1 QUP1 */ - pinctrl-0 = <&spi_1_pins>; - pinctrl-names = "default"; - cs-select = <0>; - quartz-reset-gpio = <&tlmm 79 1>; - status = "disabled"; - spidev1: spi@1 { - compatible = "qca,spidev"; - reg = <0>; - spi-max-frequency = <24000000>; +/* TZAPP is enabled only in default memory profile */ +#if !defined(__IPQ_MEM_PROFILE_256_MB__) && !defined(__IPQ_MEM_PROFILE_512_MB__) + reserved-memory { + tzapp:tzapp@49B00000 { /* TZAPPS */ + no-map; + reg = <0x0 0x49B00000 0x0 0x00600000>; + }; }; +#endif }; &tlmm { - spi_0_pins: spi-0-pins { - pins = "gpio38", "gpio39", "gpio40", "gpio41"; - function = "blsp0_spi"; - drive-strength = <8>; - bias-pull-down; - }; - - spi_1_pins: spi_1_pins { + uart_pins: uart_pins { mux { - pins = "gpio69", "gpio71", "gpio72"; - function = "blsp1_spi"; + pins = "gpio44", "gpio45"; + function = "blsp2_uart"; drive-strength = <8>; bias-pull-down; }; - spi_cs { - pins = "gpio70"; - function = "blsp1_spi"; - drive-strength = <8>; - bias-disable; - }; - quartz_interrupt { - pins = "gpio78"; - function = "gpio"; - input; - bias-disable; - }; - quartz_reset { - pins = "gpio79"; - function = "gpio"; - output-low; - bias-disable; - }; - }; - sd_pins: sd-pinmux { - pins = "gpio62"; - function = "sd_card"; - drive-strength = <8>; - bias-pull-up; + spi_0_pins: spi_0_pins { + mux { + pins = "gpio38", "gpio39", "gpio40", "gpio41"; + function = "blsp0_spi"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-pull-down; + }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-pull-down; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-pull-down; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-pull-down; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-pull-down; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-pull-down; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-pull-down; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-pull-down; + }; + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + extcon_usb_pins: extcon_usb_pins { + mux { + pins = "gpio26"; + function = "gpio"; + drive-strength = <2>; + bias-pull-down; + }; + }; + + button_pins: button_pins { + wps_button { + pins = "gpio19"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; }; mdio_pins: mdio_pinmux { @@ -142,101 +200,70 @@ function = "gpio"; bias-pull-up; }; - mux_3 { - pins = "gpio77"; - function = "gpio"; - bias-pull-up; - }; - }; - - pwm_pins: pwm_pinmux { - pins = "gpio18"; - function = "pwm00"; - drive-strength = <8>; - }; - - hsuart_pins: hsuart_pins { - mux { - pins = "gpio71", "gpio72", "gpio69", "gpio70"; - function = "blsp1_uart"; - drive-strength = <8>; - bias-disable; - }; - }; - - button_pins: button_pins { - wps_button { - pins = "gpio19"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; }; leds_pins: leds_pins { led_pwr { - pins = "gpio74"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - led_5g { - pins = "gpio35"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - led_2g { - pins = "gpio37"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; + pins = "gpio74"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led_5g { + pins = "gpio35"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led_2g { + pins = "gpio37"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; }; - - btcoex_pins: btcoex_pins { - mux_0 { - pins = "gpio51"; - function = "pta1_1"; - drive-strength = <6>; - bias-pull-down; - }; - mux_1 { - pins = "gpio53"; - function = "pta1_0"; - drive-strength = <6>; - bias-pull-down; - }; - mux_2 { - pins = "gpio52"; - function = "pta1_2"; - drive-strength = <6>; + uart2_pins: uart2_pins { + mux { + pins = "gpio57", "gpio58"; + function = "blsp4_uart"; + drive-strength = <8>; bias-pull-down; }; }; }; &soc { + extcon_usb: extcon_usb { + pinctrl-0 = <&extcon_usb_pins>; + pinctrl-names = "default"; + id-gpio = <&tlmm 26 GPIO_ACTIVE_LOW>; + status = "ok"; + }; + mdio: mdio@90000 { pinctrl-0 = <&mdio_pins>; pinctrl-names = "default"; phy-reset-gpio = <&tlmm 75 0 &tlmm 77 1>; status = "ok"; - phy0: ethernet-phy@0 { - reg = <0>; + ethernet-phy@3 { + reg = <0x03>; }; - phy1: ethernet-phy@1 { - reg = <1>; - }; - phy2: ethernet-phy@2 { - reg = <2>; - }; - phy3: ethernet-phy@3 { - reg = <3>; - }; - phy4: ethernet-phy@4 { + + ethernet-phy@4 { reg = <0x18>; }; + + ethernet-phy@1 { + reg = <0x01>; + }; + + ethernet-phy@2 { + reg = <0x02>; + }; + + ethernet-phy@0 { + reg = <0x00>; + }; }; dp1 { @@ -272,42 +299,34 @@ switch_mac_mode1 = <0xf>; /* mac mode for uniphy instance1*/ switch_mac_mode2 = <0xff>; /* mac mode for uniphy instance2*/ qcom,port_phyinfo { - port@0 { - port_id = <1>; - phy_address = <0>; - }; port@1 { - port_id = <2>; - phy_address = <1>; + phy_address = <0x01>; + port_id = <0x02>; }; + + port@0 { + phy_address = <0x00>; + port_id = <0x01>; + }; + port@2 { - port_id = <3>; - phy_address = <2>; + phy_address = <0x02>; + port_id = <0x03>; }; + port@3 { - port_id = <4>; - phy_address = <3>; + phy_address = <0x03>; + port_id = <0x04>; }; + port@4 { - port_id = <5>; phy_address = <0x18>; + port_id = <0x05>; port_mac_sel = "QGMAC_PORT"; }; }; }; - nss-macsec0 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x18>; - phy_access_mode = <0>; - mdiobus = <&mdio>; - }; - - pwm { - pinctrl-0 = <&pwm_pins>; - pinctrl-names = "default"; - }; - gpio_keys { compatible = "gpio-keys"; pinctrl-0 = <&button_pins>; @@ -320,6 +339,14 @@ linux,input-type = <1>; debounce-interval = <60>; }; + + /* wps { + label = "wps"; + linux,code = <>; + gpios = <&tlmm 9 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + debounce-interval = <60>; + };*/ }; leds { @@ -339,38 +366,61 @@ linux,default-trigger = "green:2g"; default-state = "off"; }; - led_power: led@16 { - label = "green:led_pwr"; - gpios = <&tlmm 50 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "green:power"; + led_power: led@16 { + label = "green:led_pwr"; + gpios = <&tlmm 50 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "green:power"; default-state = "off"; }; }; }; +&blsp1_uart3 { + pinctrl-0 = <&uart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&spi_0 { + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + cs-select = <0>; + status = "ok"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + compatible = "n25q128a11"; + linux,modalias = "m25p80", "n25q128a11"; + spi-max-frequency = <50000000>; + use-default-sizes; + }; +}; + +&blsp1_uart2 { + pinctrl-0 = <&uart2_pins>; + pinctrl-names = "default"; + dmas = <&blsp_dma 2>, + <&blsp_dma 3>; + dma-names = "tx", "rx"; + status = "ok"; +}; &qpic_bam { status = "ok"; }; -&qpic_nand { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; -}; - -&pcie_phy { +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; status = "ok"; }; -&pcie0 { +&ssphy_0 { + status = "ok"; +}; + +&qusb_phy_0 { status = "ok"; }; @@ -382,14 +432,6 @@ status = "ok"; }; -&qusb_phy_0 { - status = "ok"; -}; - -&ssphy_0 { - status = "ok"; -}; - &usb3 { status = "ok"; }; @@ -397,78 +439,3 @@ &nss_crypto { status = "ok"; }; - -&CPU0 { - operating-points = < - /* kHz uV (fixed) */ - 864000 1100000 - 1056000 1100000 - 1320000 1100000 - 1440000 1100000 - 1608000 1100000 - 1800000 1100000 - >; - clock-latency = <200000>; -}; - -&CPU1 { - operating-points = < - /* kHz uV (fixed) */ - 864000 1100000 - 1056000 1100000 - 1320000 1100000 - 1440000 1100000 - 1608000 1100000 - 1800000 1100000 - >; - clock-latency = <200000>; -}; - -&CPU2 { - operating-points = < - /* kHz uV (fixed) */ - 864000 1100000 - 1056000 1100000 - 1320000 1100000 - 1440000 1100000 - 1608000 1100000 - 1800000 1100000 - >; - clock-latency = <200000>; -}; - -&CPU3 { - operating-points = < - /* kHz uV (fixed) */ - 864000 1100000 - 1056000 1100000 - 1320000 1100000 - 1440000 1100000 - 1608000 1100000 - 1800000 1100000 - >; - clock-latency = <200000>; -}; - -&tlmm { - gpio-reserved-ranges = <20 1>; - - i2c_1_pins: i2c_1_pins { - mux { - pins = "gpio42", "gpio43"; - function = "blsp2_i2c"; - drive-strength = <8>; - bias-pull-down; - }; - }; -}; - -&i2c_1 { - pinctrl-0 = <&i2c_1_pins>; - pinctrl-names = "default"; - status = "ok"; -}; - -&rpm_glink { - status = "disabled"; -}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-yuncore-ax840.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-yuncore-ax840.dts index fada5f58e..0eb8cd6bc 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-yuncore-ax840.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq6018-yuncore-ax840.dts @@ -1,39 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-or-later OR MIT /dts-v1/; -/* - * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -#include "ipq6018.dtsi" -#include "ipq6018-cpr-regulator.dtsi" +#include "qcom-ipq6018.dtsi" +#include "qcom-ipq6018-rpm-regulator.dtsi" +#include "qcom-ipq6018-cpr-regulator.dtsi" +#include "qcom-ipq6018-cp-cpu.dtsi" #include +#include / { - #address-cells = <0x2>; - #size-cells = <0x2>; model = "YunCore AX840"; compatible = "yuncore,ax840", "qcom,ipq6018-cp03", "qcom,ipq6018"; + + #address-cells = <0x2>; + #size-cells = <0x2>; interrupt-parent = <&intc>; - qcom,msm-id = <0x1A5 0x0>; aliases { - /* - * Aliases as required by u-boot - * to patch MAC addresses - */ + /* Aliases as required by u-boot to patch MAC addresses */ ethernet0 = "/soc/dp2"; ethernet1 = "/soc/dp1"; + + serial0 = &blsp1_uart3; + serial1 = &blsp1_uart2; + led-boot = &led_system; led-failsafe = &led_system; led-running = &led_system; @@ -42,47 +32,15 @@ chosen { bootargs = "console=ttyMSM0,115200,n8 rw init=/init"; -#ifdef __IPQ_MEM_PROFILE_256_MB__ - bootargs-append = " swiotlb=1"; -#else bootargs-append = " swiotlb=1 coherent_pool=2M"; -#endif }; - /* - * +=========+==============+========================+ - * | | | | - * | Region | Start Offset | Size | - * | | | | - * +--------+--------------+-------------------------+ - * | | | | - * | | | | - * | | | | - * | | | | - * | Linux | 0x41000000 | 139MB | - * | | | | - * | | | | - * | | | | - * +--------+--------------+-------------------------+ - * | TZ App | 0x49B00000 | 6MB | - * +--------+--------------+-------------------------+ - * - * From the available 145 MB for Linux in the first 256 MB, - * we are reserving 6 MB for TZAPP. - * - * Refer arch/arm64/boot/dts/qcom/qcom-ipq6018-memory.dtsi - * for memory layout. - */ - -/* TZAPP is enabled only in default memory profile */ -#if !defined(__IPQ_MEM_PROFILE_256_MB__) && !defined(__IPQ_MEM_PROFILE_512_MB__) reserved-memory { tzapp:tzapp@49B00000 { /* TZAPPS */ no-map; reg = <0x0 0x49B00000 0x0 0x00600000>; }; }; -#endif }; &tlmm { @@ -104,13 +62,79 @@ }; }; - button_pins: button_pins { - wps_button { - pins = "gpio9"; - function = "gpio"; + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; drive-strength = <8>; bias-pull-down; }; + + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-pull-down; + }; + + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-pull-down; + }; + + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-pull-down; + }; + + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-pull-down; + }; + + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-pull-down; + }; + + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-pull-down; + }; + + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-pull-down; + }; + + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + button_pins: button_pins { + rst_button { + pins = "gpio19"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; }; mdio_pins: mdio_pinmux { @@ -120,12 +144,14 @@ drive-strength = <8>; bias-pull-up; }; + mux_1 { pins = "gpio65"; function = "mdio"; drive-strength = <8>; bias-pull-up; }; + mux_2 { pins = "gpio75"; function = "gpio"; @@ -155,28 +181,22 @@ bias-pull-down; }; }; - - button_pins: button_pins { - rst_button { - pins = "gpio19"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; }; &soc { - mdio@90000 { + mdio: mdio@90000 { + status = "ok"; + pinctrl-0 = <&mdio_pins>; pinctrl-names = "default"; phy-reset-gpio = <&tlmm 75 0>; - status = "ok"; - phy0: ethernet-phy@0 { - reg = <3>; + + ethernet-phy@0 { + reg = <0x03>; }; - phy1: ethernet-phy@1 { - reg = <4>; + + ethernet-phy@1 { + reg = <0x04>; }; }; @@ -243,6 +263,7 @@ leds { compatible = "gpio-leds"; + pinctrl-0 = <&leds_pins>; pinctrl-names = "default"; @@ -290,32 +311,33 @@ status = "ok"; }; -&qpic_nand { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; -}; - -&ssphy_0 { - status = "ok"; -}; - -&qusb_phy_0 { - status = "ok"; -}; - -&usb3 { +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; status = "ok"; }; &nss_crypto { status = "ok"; }; + +&cpu0_opp_table { + compatible = "operating-points-v2"; + opp-shared; + + opp03 { + opp-hz = /bits/ 64 <1200000000>; + opp-microvolt = <3>; + clock-latency-ns = <200000>; + }; + + /delete-node/ opp04; + /delete-node/ opp05; + /delete-node/ opp06; +}; + +&qseecom { + mem-start = <0x49B00000>; + mem-size = <0x600000>; + status = "ok"; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-eap102.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-eap102.dts index 04382d999..d04cb1020 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-eap102.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-eap102.dts @@ -1,9 +1,21 @@ -// SPDX-License-Identifier: GPL-2.0-only /dts-v1/; -/* Copyright (c) 2020 The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq8074.dtsi" -#include "ipq8074-ac-cpu.dtsi" +#include "qcom-ipq807x-soc.dtsi" +#include "qcom-ipq807x-ac-cpu.dtsi" / { #address-cells = <0x2>; @@ -12,10 +24,14 @@ compatible = "edgecore,eap102", "qcom,ipq807x-ac02", "qcom,ipq807x"; qcom,msm-id = <0x178 0x0>; interrupt-parent = <&intc>; + qcom,board-id = <0x8 0x0>; + qcom,pmic-id = <0x0 0x0 0x0 0x0>; aliases { - serial0 = &blsp1_uart5; - /* Aliases as required by u-boot to patch MAC addresses */ + /* + * Aliases as required by u-boot + * to patch MAC addresses + */ ethernet1 = "/soc/dp5"; ethernet0 = "/soc/dp6"; @@ -26,541 +42,633 @@ }; chosen { - stdout-path = "serial0"; + bootargs = "console=ttyMSM0,115200,n8 root=/dev/ram0 rw \ + init=/init"; + #ifdef __IPQ_MEM_PROFILE_256_MB__ + bootargs-append = " swiotlb=1"; + #else + bootargs-append = " swiotlb=1 coherent_pool=2M"; + #endif + }; +}; + +&tlmm { + pinctrl-0 = <&btcoex_pins>; + pinctrl-names = "default"; + + btcoex_pins: btcoex_pins { + mux_0 { + pins = "gpio64"; + function = "pta1_1"; + drive-strength = <6>; + bias-pull-down; + }; + mux_1 { + pins = "gpio65"; + function = "pta1_2"; + drive-strength = <6>; + bias-pull-down; + }; }; - soc { - pinctrl@1000000 { - button_pins: button_pins { - reset_button { - pins = "gpio66"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; + mdio_pins: mdio_pinmux { + mux_0 { + pins = "gpio68"; + function = "mdc"; + drive-strength = <8>; + bias-pull-up; + }; + mux_1 { + pins = "gpio69"; + function = "mdio"; + drive-strength = <8>; + bias-pull-up; + }; + mux_2 { + pins = "gpio33"; + function = "gpio"; + bias-pull-up; + }; + mux_3 { + pins = "gpio44"; + function = "gpio"; + bias-pull-up; + }; + }; - usb_mux_sel_pins: usb_mux_pins { - mux { - pins = "gpio27"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - }; + uart_pins: uart_pins { + mux { + pins = "gpio23", "gpio24"; + function = "blsp4_uart1"; + drive-strength = <8>; + bias-disable; + }; + }; - pcie0_pins: pcie_pins { - pcie0_rst { - pins = "gpio58"; - function = "pcie0_rst"; - drive-strength = <8>; - bias-pull-down; - }; - pcie0_wake { - pins = "gpio59"; - function = "pcie0_wake"; - drive-strength = <8>; - bias-pull-down; - }; - }; + spi_0_pins: spi_0_pins { + mux { + pins = "gpio38", "gpio39", "gpio40", "gpio41"; + function = "blsp0_spi"; + drive-strength = <8>; + bias-disable; + }; + }; - mdio_pins: mdio_pinmux { - mux_0 { - pins = "gpio68"; - function = "mdc"; - drive-strength = <8>; - bias-pull-up; - }; - mux_1 { - pins = "gpio69"; - function = "mdio"; - drive-strength = <8>; - bias-pull-up; - }; - mux_2 { - pins = "gpio33"; - function = "gpio"; - bias-pull-up; - }; - mux_3 { - pins = "gpio44"; - function = "gpio"; - bias-pull-up; - }; - }; - led_pins: led_pins { - led_2g { - pins = "gpio42"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-disable; + }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-disable; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-disable; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-disable; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-disable; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-disable; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-disable; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-disable; + }; + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-disable; + }; + }; - led_5g { - pins = "gpio43"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; + hsuart_pins: hsuart_pins { + mux { + pins = "gpio49"; + function = "blsp2_uart"; + drive-strength = <8>; + bias-disable; + }; + }; + + button_pins: button_pins { + + reset_button { + pins = "gpio66"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + led_pins: led_pins { + led_pwr { + pins = "gpio46"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + + led_2g { + pins = "gpio47"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + + led_5g { + pins = "gpio48"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + + led_bt { + pins = "gpio50"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + usb_mux_sel_pins: usb_mux_pins { + mux { + pins = "gpio27"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + pcie0_pins: pcie_pins { + pcie0_rst { + pins = "gpio58"; + function = "pcie0_rst"; + drive-strength = <8>; + bias-pull-down; + }; + pcie0_wake { + pins = "gpio59"; + function = "pcie0_wake"; + drive-strength = <8>; + bias-pull-down; + }; + }; + +}; + +&soc { + gpio_keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + button@1 { + label = "reset_button"; + linux,code = ; + gpios = <&tlmm 66 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + debounce-interval = <60>; + }; + }; + + mdio: mdio@90000 { + pinctrl-0 = <&mdio_pins>; + pinctrl-names = "default"; + phy-reset-gpio = <&tlmm 37 0 &tlmm 25 1 &tlmm 44 1>; + compatible = "qcom,ipq40xx-mdio", "qcom,qca-mdio"; + phy0: ethernet-phy@0 { + reg = <0>; + }; + phy1: ethernet-phy@1 { + reg = <1>; + }; + phy2: ethernet-phy@2 { + reg = <2>; + }; + phy3: ethernet-phy@3 { + reg = <3>; + }; + phy4: ethernet-phy@4 { + reg = <24>; + }; + phy5: ethernet-phy@5 { + reg = <28>; + }; + }; + + ess-switch@3a000000 { + switch_cpu_bmp = <0x1>; /* cpu port bitmap */ + switch_lan_bmp = <0x3e>; /* lan port bitmap */ + switch_wan_bmp = <0x40>; /* wan port bitmap */ + switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ + switch_mac_mode1 = <0xf>; /* mac mode for uniphy instance1*/ + switch_mac_mode2 = <0xf>; /* mac mode for uniphy instance2*/ + bm_tick_mode = <0>; /* bm tick mode */ + tm_tick_mode = <0>; /* tm tick mode */ + qcom,port_phyinfo { + port@0 { + port_id = <1>; + phy_address = <0>; + }; + port@1 { + port_id = <2>; + phy_address = <1>; + }; + port@2 { + port_id = <3>; + phy_address = <2>; + }; + port@3 { + port_id = <4>; + phy_address = <3>; + }; + port@4 { + port_id = <5>; + phy_address = <24>; + port_mac_sel = "QGMAC_PORT"; + }; + port@5 { + port_id = <6>; + phy_address = <28>; + port_mac_sel = "QGMAC_PORT"; }; }; - - serial@78b3000 { - status = "ok"; - }; - - spi@78b5000 { - status = "ok"; - pinctrl-0 = <&spi_0_pins>; - pinctrl-names = "default"; - cs-select = <0>; - - m25p80@0 { - compatible = "n25q128a11"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - spi-max-frequency = <50000000>; + port_scheduler_resource { + port@0 { + port_id = <0>; + ucast_queue = <0 143>; + mcast_queue = <256 271>; + l0sp = <0 35>; + l0cdrr = <0 47>; + l0edrr = <0 47>; + l1cdrr = <0 7>; + l1edrr = <0 7>; + }; + port@1 { + port_id = <1>; + ucast_queue = <144 159>; + mcast_queue = <272 275>; + l0sp = <36 39>; + l0cdrr = <48 63>; + l0edrr = <48 63>; + l1cdrr = <8 11>; + l1edrr = <8 11>; + }; + port@2 { + port_id = <2>; + ucast_queue = <160 175>; + mcast_queue = <276 279>; + l0sp = <40 43>; + l0cdrr = <64 79>; + l0edrr = <64 79>; + l1cdrr = <12 15>; + l1edrr = <12 15>; + }; + port@3 { + port_id = <3>; + ucast_queue = <176 191>; + mcast_queue = <280 283>; + l0sp = <44 47>; + l0cdrr = <80 95>; + l0edrr = <80 95>; + l1cdrr = <16 19>; + l1edrr = <16 19>; + }; + port@4 { + port_id = <4>; + ucast_queue = <192 207>; + mcast_queue = <284 287>; + l0sp = <48 51>; + l0cdrr = <96 111>; + l0edrr = <96 111>; + l1cdrr = <20 23>; + l1edrr = <20 23>; + }; + port@5 { + port_id = <5>; + ucast_queue = <208 223>; + mcast_queue = <288 291>; + l0sp = <52 55>; + l0cdrr = <112 127>; + l0edrr = <112 127>; + l1cdrr = <24 27>; + l1edrr = <24 27>; + }; + port@6 { + port_id = <6>; + ucast_queue = <224 239>; + mcast_queue = <292 295>; + l0sp = <56 59>; + l0cdrr = <128 143>; + l0edrr = <128 143>; + l1cdrr = <28 31>; + l1edrr = <28 31>; + }; + port@7 { + port_id = <7>; + ucast_queue = <240 255>; + mcast_queue = <296 299>; + l0sp = <60 63>; + l0cdrr = <144 159>; + l0edrr = <144 159>; + l1cdrr = <32 35>; + l1edrr = <32 35>; }; }; - - dma@7984000 { - status = "ok"; - }; - - nand@79b0000 { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; - }; - - qusb@79000 { - status = "ok"; - }; - - ssphy@78000 { - status = "ok"; - }; - - usb3@8A00000 { - status = "ok"; - }; - - usb3@8C00000 { - status = "ok"; - }; - - qusb@59000 { - status = "ok"; - }; - - ssphy@58000 { - status = "ok"; - }; - - usb3@8C00000 { - status = "ok"; - }; - - phy@84000 { - status = "ok"; - }; - - phy@86000 { - status = "ok"; - }; - - pci@20000000 { - perst-gpio = <&tlmm 58 1>; - status = "ok"; - }; - - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; - pinctrl-names = "default"; - status = "ok"; - - button@1 { - label = "reset"; - linux,code = ; - gpios = <&tlmm 66 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - }; - - mdio: mdio@90000 { - pinctrl-0 = <&mdio_pins>; - pinctrl-names = "default"; - phy-reset-gpio = <&tlmm 37 0 &tlmm 25 1 &tlmm 44 1>; - phy0: ethernet-phy@0 { - reg = <0>; - }; - phy1: ethernet-phy@1 { - reg = <1>; - }; - phy2: ethernet-phy@2 { - reg = <2>; - }; - phy3: ethernet-phy@3 { - reg = <3>; - }; - phy4: ethernet-phy@4 { - reg = <24>; - }; - phy5: ethernet-phy@5 { - reg = <28>; - }; - }; - - ess-switch@3a000000 { - switch_cpu_bmp = <0x1>; /* cpu port bitmap */ - switch_lan_bmp = <0x3e>; /* lan port bitmap */ - switch_wan_bmp = <0x40>; /* wan port bitmap */ - switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ - switch_mac_mode1 = <0xf>; /* mac mode for uniphy instance1*/ - switch_mac_mode2 = <0xf>; /* mac mode for uniphy instance2*/ - bm_tick_mode = <0>; /* bm tick mode */ - tm_tick_mode = <0>; /* tm tick mode */ - qcom,port_phyinfo { - port@0 { - port_id = <1>; - phy_address = <0>; - }; - port@1 { - port_id = <2>; - phy_address = <1>; - }; - port@2 { - port_id = <3>; - phy_address = <2>; - }; - port@3 { - port_id = <4>; - phy_address = <3>; - }; - port@4 { - port_id = <5>; - phy_address = <24>; - port_mac_sel = "QGMAC_PORT"; - }; - port@5 { - port_id = <6>; - phy_address = <28>; - port_mac_sel = "QGMAC_PORT"; - }; - }; - port_scheduler_resource { - port@0 { - port_id = <0>; - ucast_queue = <0 143>; - mcast_queue = <256 271>; - l0sp = <0 35>; - l0cdrr = <0 47>; - l0edrr = <0 47>; - l1cdrr = <0 7>; - l1edrr = <0 7>; - }; - port@1 { - port_id = <1>; - ucast_queue = <144 159>; - mcast_queue = <272 275>; - l0sp = <36 39>; - l0cdrr = <48 63>; - l0edrr = <48 63>; - l1cdrr = <8 11>; - l1edrr = <8 11>; - }; - port@2 { - port_id = <2>; - ucast_queue = <160 175>; - mcast_queue = <276 279>; - l0sp = <40 43>; - l0cdrr = <64 79>; - l0edrr = <64 79>; - l1cdrr = <12 15>; - l1edrr = <12 15>; - }; - port@3 { - port_id = <3>; - ucast_queue = <176 191>; - mcast_queue = <280 283>; - l0sp = <44 47>; - l0cdrr = <80 95>; - l0edrr = <80 95>; - l1cdrr = <16 19>; - l1edrr = <16 19>; - }; - port@4 { - port_id = <4>; - ucast_queue = <192 207>; - mcast_queue = <284 287>; - l0sp = <48 51>; - l0cdrr = <96 111>; - l0edrr = <96 111>; - l1cdrr = <20 23>; - l1edrr = <20 23>; - }; - port@5 { - port_id = <5>; - ucast_queue = <208 223>; - mcast_queue = <288 291>; - l0sp = <52 55>; - l0cdrr = <112 127>; - l0edrr = <112 127>; - l1cdrr = <24 27>; - l1edrr = <24 27>; - }; - port@6 { - port_id = <6>; - ucast_queue = <224 239>; - mcast_queue = <292 295>; - l0sp = <56 59>; - l0cdrr = <128 143>; - l0edrr = <128 143>; - l1cdrr = <28 31>; - l1edrr = <28 31>; - }; - port@7 { - port_id = <7>; - ucast_queue = <240 255>; - mcast_queue = <296 299>; - l0sp = <60 63>; - l0cdrr = <144 159>; - l0edrr = <144 159>; - l1cdrr = <32 35>; - l1edrr = <32 35>; - }; - }; - port_scheduler_config { - port@0 { - port_id = <0>; - l1scheduler { - group@0 { - sp = <0 1>; /*L0 SPs*/ - /*cpri cdrr epri edrr*/ - cfg = <0 0 0 0>; - }; - }; - l0scheduler { - group@0 { - /*unicast queues*/ - ucast_queue = <0 4 8>; - /*multicast queues*/ - mcast_queue = <256 260>; - /*sp cpri cdrr epri edrr*/ - cfg = <0 0 0 0 0>; - }; - group@1 { - ucast_queue = <1 5 9>; - mcast_queue = <257 261>; - cfg = <0 1 1 1 1>; - }; - group@2 { - ucast_queue = <2 6 10>; - mcast_queue = <258 262>; - cfg = <0 2 2 2 2>; - }; - group@3 { - ucast_queue = <3 7 11>; - mcast_queue = <259 263>; - cfg = <0 3 3 3 3>; - }; + port_scheduler_config { + port@0 { + port_id = <0>; + l1scheduler { + group@0 { + sp = <0 1>; /*L0 SPs*/ + /*cpri cdrr epri edrr*/ + cfg = <0 0 0 0>; }; }; - port@1 { - port_id = <1>; - l1scheduler { - group@0 { - sp = <36>; - cfg = <0 8 0 8>; - }; - group@1 { - sp = <37>; - cfg = <1 9 1 9>; - }; + l0scheduler { + group@0 { + /*unicast queues*/ + ucast_queue = <0 4 8>; + /*multicast queues*/ + mcast_queue = <256 260>; + /*sp cpri cdrr epri edrr*/ + cfg = <0 0 0 0 0>; }; - l0scheduler { - group@0 { - ucast_queue = <144>; - ucast_loop_pri = <16>; - mcast_queue = <272>; - mcast_loop_pri = <4>; - cfg = <36 0 48 0 48>; - }; + group@1 { + ucast_queue = <1 5 9>; + mcast_queue = <257 261>; + cfg = <0 1 1 1 1>; + }; + group@2 { + ucast_queue = <2 6 10>; + mcast_queue = <258 262>; + cfg = <0 2 2 2 2>; + }; + group@3 { + ucast_queue = <3 7 11>; + mcast_queue = <259 263>; + cfg = <0 3 3 3 3>; }; }; - port@2 { - port_id = <2>; - l1scheduler { - group@0 { - sp = <40>; - cfg = <0 12 0 12>; - }; - group@1 { - sp = <41>; - cfg = <1 13 1 13>; - }; + }; + port@1 { + port_id = <1>; + l1scheduler { + group@0 { + sp = <36>; + cfg = <0 8 0 8>; }; - l0scheduler { - group@0 { - ucast_queue = <160>; - ucast_loop_pri = <16>; - mcast_queue = <276>; - mcast_loop_pri = <4>; - cfg = <40 0 64 0 64>; - }; + group@1 { + sp = <37>; + cfg = <1 9 1 9>; }; }; - port@3 { - port_id = <3>; - l1scheduler { - group@0 { - sp = <44>; - cfg = <0 16 0 16>; - }; - group@1 { - sp = <45>; - cfg = <1 17 1 17>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <176>; - ucast_loop_pri = <16>; - mcast_queue = <280>; - mcast_loop_pri = <4>; - cfg = <44 0 80 0 80>; - }; + l0scheduler { + group@0 { + ucast_queue = <144>; + ucast_loop_pri = <16>; + mcast_queue = <272>; + mcast_loop_pri = <4>; + cfg = <36 0 48 0 48>; }; }; - port@4 { - port_id = <4>; - l1scheduler { - group@0 { - sp = <48>; - cfg = <0 20 0 20>; - }; - group@1 { - sp = <49>; - cfg = <1 21 1 21>; - }; + }; + port@2 { + port_id = <2>; + l1scheduler { + group@0 { + sp = <40>; + cfg = <0 12 0 12>; }; - l0scheduler { - group@0 { - ucast_queue = <192>; - ucast_loop_pri = <16>; - mcast_queue = <284>; - mcast_loop_pri = <4>; - cfg = <48 0 96 0 96>; - }; + group@1 { + sp = <41>; + cfg = <1 13 1 13>; }; }; - port@5 { - port_id = <5>; - l1scheduler { - group@0 { - sp = <52>; - cfg = <0 24 0 24>; - }; - group@1 { - sp = <53>; - cfg = <1 25 1 25>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <208>; - ucast_loop_pri = <16>; - mcast_queue = <288>; - mcast_loop_pri = <4>; - cfg = <52 0 112 0 112>; - }; + l0scheduler { + group@0 { + ucast_queue = <160>; + ucast_loop_pri = <16>; + mcast_queue = <276>; + mcast_loop_pri = <4>; + cfg = <40 0 64 0 64>; }; }; - port@6 { - port_id = <6>; - l1scheduler { - group@0 { - sp = <56>; - cfg = <0 28 0 28>; - }; - group@1 { - sp = <57>; - cfg = <1 29 1 29>; - }; + }; + port@3 { + port_id = <3>; + l1scheduler { + group@0 { + sp = <44>; + cfg = <0 16 0 16>; }; - l0scheduler { - group@0 { - ucast_queue = <224>; - ucast_loop_pri = <16>; - mcast_queue = <292>; - mcast_loop_pri = <4>; - cfg = <56 0 128 0 128>; - }; + group@1 { + sp = <45>; + cfg = <1 17 1 17>; }; }; - port@7 { - port_id = <7>; - l1scheduler { - group@0 { - sp = <60>; - cfg = <0 32 0 32>; - }; - group@1 { - sp = <61>; - cfg = <1 33 1 33>; - }; + l0scheduler { + group@0 { + ucast_queue = <176>; + ucast_loop_pri = <16>; + mcast_queue = <280>; + mcast_loop_pri = <4>; + cfg = <44 0 80 0 80>; }; - l0scheduler { - group@0 { - ucast_queue = <240>; - ucast_loop_pri = <16>; - mcast_queue = <296>; - cfg = <60 0 144 0 144>; - }; + }; + }; + port@4 { + port_id = <4>; + l1scheduler { + group@0 { + sp = <48>; + cfg = <0 20 0 20>; + }; + group@1 { + sp = <49>; + cfg = <1 21 1 21>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <192>; + ucast_loop_pri = <16>; + mcast_queue = <284>; + mcast_loop_pri = <4>; + cfg = <48 0 96 0 96>; + }; + }; + }; + port@5 { + port_id = <5>; + l1scheduler { + group@0 { + sp = <52>; + cfg = <0 24 0 24>; + }; + group@1 { + sp = <53>; + cfg = <1 25 1 25>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <208>; + ucast_loop_pri = <16>; + mcast_queue = <288>; + mcast_loop_pri = <4>; + cfg = <52 0 112 0 112>; + }; + }; + }; + port@6 { + port_id = <6>; + l1scheduler { + group@0 { + sp = <56>; + cfg = <0 28 0 28>; + }; + group@1 { + sp = <57>; + cfg = <1 29 1 29>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <224>; + ucast_loop_pri = <16>; + mcast_queue = <292>; + mcast_loop_pri = <4>; + cfg = <56 0 128 0 128>; + }; + }; + }; + port@7 { + port_id = <7>; + l1scheduler { + group@0 { + sp = <60>; + cfg = <0 32 0 32>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <240>; + mcast_queue = <296>; + cfg = <60 0 144 0 144>; }; }; }; }; + }; +/* + dp1 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <1>; + reg = <0x3a001000 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <0>; + phy-mode = "sgmii"; + }; - dp6 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <6>; - reg = <0x3a001800 0x200>; - qcom,mactype = <0>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <28>; - phy-mode = "sgmii"; - }; + dp2 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <2>; + reg = <0x3a001200 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <1>; + phy-mode = "sgmii"; + }; - dp5 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <5>; - reg = <0x3a001a00 0x200>; - qcom,mactype = <0>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <24>; - phy-mode = "sgmii"; - }; + dp3 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <3>; + reg = <0x3a001400 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <2>; + phy-mode = "sgmii"; + }; - leds { - compatible = "gpio-leds"; - pinctrl-0 = <&led_pins>; - pinctrl-names = "default"; + dp4 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <4>; + reg = <0x3a001600 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <3>; + phy-mode = "sgmii"; + }; +*/ + dp6 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <6>; + reg = <0x3a001800 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <28>; + phy-mode = "sgmii"; + }; + + dp5 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <5>; + reg = <0x3a001a00 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <24>; + phy-mode = "sgmii"; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; led_pwr { label = "green:wan"; @@ -587,18 +695,132 @@ default-state = "on"; linux,default-trigger = "led_bt"; }; - }; - nss-macsec0 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x18>; - mdiobus = <&mdio>; - }; - nss-macsec1 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x1c>; - mdiobus = <&mdio>; - }; }; + nss-macsec0 { + compatible = "qcom,nss-macsec"; + phy_addr = <0x18>; + phy_access_mode = <0>; + mdiobus = <&mdio>; + }; + nss-macsec1 { + compatible = "qcom,nss-macsec"; + phy_addr = <0x1c>; + phy_access_mode = <0>; + mdiobus = <&mdio>; + }; +}; + +&serial_blsp4 { + pinctrl-0 = <&uart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&spi_0 { /* BLSP1 QUP1 */ + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + cs-select = <0>; + status = "ok"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + compatible = "n25q128a11"; + linux,modalias = "m25p80", "n25q128a11"; + spi-max-frequency = <50000000>; + use-default-sizes; + }; +}; + +&serial_blsp2 { + pinctrl-0 = <&hsuart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&nss0 { + qcom,low-frequency = <187200000>; + qcom,mid-frequency = <748800000>; + qcom,max-frequency = <1497600000>; +}; + +&msm_imem { + status = "disabled"; +}; + +&ssphy_0 { + status = "ok"; +}; + +&qusb_phy_0 { + status = "ok"; +}; + +&ssphy_1 { + status = "ok"; +}; + +&qusb_phy_1 { + status = "ok"; +}; + +&usb3_0 { + status = "ok"; +}; + +&usb3_1 { + status = "ok"; +}; + +&cryptobam { + status = "ok"; +}; + +&crypto { + status = "ok"; +}; + +&i2c_0 { + status = "disabled"; +}; + +&i2c_1 { + status = "disabled"; +}; + +&qpic_bam { + status = "ok"; +}; + +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&qpic_lcd { + status = "disabled"; +}; + +&qpic_lcd_panel { + status = "disabled"; +}; + +&ledc { + status = "disabled"; +}; + +&pcie0 { + status = "ok"; +}; + +&pcie1 { + status = "disabled"; +}; + +&glink_rpm { + status = "disabled"; }; &apc_cpr { @@ -682,12 +904,6 @@ status = "disabled"; }; -&nss0 { - qcom,low-frequency = <187200000>; - qcom,mid-frequency = <748800000>; - qcom,max-frequency = <1497600000>; -}; - &nss0 { npu-supply = <&dummy_reg>; mx-supply = <&dummy_reg>; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-eap106.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-eap106.dts index 177019c25..d733513c3 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-eap106.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-eap106.dts @@ -1,562 +1,716 @@ -// SPDX-License-Identifier: GPL-2.0-only /dts-v1/; -/* Copyright (c) 2020 The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq8074.dtsi" -#include "ipq8074-hk-cpu.dtsi" +#include "qcom-ipq807x-soc.dtsi" +#include "qcom-ipq807x-hk-cpu.dtsi" / { #address-cells = <0x2>; #size-cells = <0x2>; model = "Edgecore EAP106"; compatible = "edgecore,eap106", "qcom,ipq807x-hk02", "qcom,ipq807x"; - qcom,msm-id = <0x158 0x0>, <0x188 0x0>; + qcom,msm-id = <0x143 0x0>; interrupt-parent = <&intc>; + qcom,board-id = <0x8 0x0>; + qcom,pmic-id = <0x0 0x0 0x0 0x0>; aliases { - serial0 = &blsp1_uart5; - /* Aliases as required by u-boot to patch MAC addresses */ + /* + * Aliases as required by u-boot + * to patch MAC addresses + */ ethernet0 = "/soc/dp1"; ethernet1 = "/soc/dp2"; }; - chosen { - stdout-path = "serial0"; -#ifndef __IPQ_MEM_PROFILE_256_MB__ -// bootargs-append = " vmalloc=600M"; -#endif + bootargs = "console=ttyMSM0,115200,n8 root=/dev/ram0 rw init=/init"; + #ifdef __IPQ_MEM_PROFILE_256_MB__ + bootargs-append = " swiotlb=1"; + #else + bootargs-append = " swiotlb=1 coherent_pool=2M"; + #endif + }; +}; + +&tlmm { + leds_pins: leds_pinmux { + + led1_yellow { + pins = "gpio25"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led1_green { + pins = "gpio28"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led2_amber { + pins = "gpio29"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led2_blue { + pins = "gpio32"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; }; - soc { - - dp1 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <4>; - reg = <0x3a001600 0x200>; - qcom,mactype = <4>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <3>; - phy-mode = "sgmii"; + mdio_pins: mdio_pinmux { + mux_0 { + pins = "gpio68"; + function = "mdc"; + drive-strength = <8>; + bias-pull-up; }; - - dp2 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <6>; - reg = <0x3a007000 0x3fff>; - qcom,mactype = <1>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <8>; - phy-mode = "sgmii"; + mux_1 { + pins = "gpio69"; + function = "mdio"; + drive-strength = <8>; + bias-pull-up; }; + }; - pinctrl@1000000 { - button_pins: button_pins { - wps_button { - pins = "gpio57"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - - mdio_pins: mdio_pinmux { - mux_0 { - pins = "gpio68"; - function = "mdc"; - drive-strength = <8>; - bias-pull-up; - }; - mux_1 { - pins = "gpio69"; - function = "mdio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - - uniphy_pins: uniphy_pinmux { - mux { - pins = "gpio60"; - function = "rx2"; - bias-disable; - }; - }; - - leds_pins: leds_pinmux { - led1_yellow { - pins = "gpio25"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - - led1_green { - pins = "gpio28"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - - led2_amber { - pins = "gpio29"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - - led2_blue { - pins = "gpio32"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - }; + uart_pins: uart_pins { + mux { + pins = "gpio23", "gpio24"; + function = "blsp4_uart1"; + drive-strength = <8>; + bias-disable; }; + }; - serial@78b3000 { - status = "ok"; + i2c_0_pins: i2c_0_pinmux { + mux { + pins = "gpio42", "gpio43"; + function = "blsp1_i2c"; + drive-strength = <8>; + bias-disable; }; + }; - spi@78b5000 { - status = "ok"; - pinctrl-0 = <&spi_0_pins>; - pinctrl-names = "default"; - cs-select = <0>; - - m25p80@0 { - compatible = "n25q128a11"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - spi-max-frequency = <50000000>; - }; + spi_0_pins: spi_0_pins { + mux { + pins = "gpio38", "gpio39", "gpio40", "gpio41"; + function = "blsp0_spi"; + drive-strength = <8>; + bias-disable; }; + }; - dma@7984000 { - status = "ok"; + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-disable; }; - - nand@79b0000 { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-disable; }; - - qusb@79000 { - status = "ok"; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-disable; }; - - ssphy@78000 { - status = "ok"; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-disable; }; - - usb3@8A00000 { - status = "ok"; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-disable; }; - - qusb@59000 { - status = "ok"; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-disable; }; - - ssphy@58000 { - status = "ok"; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-disable; }; - - usb3@8C00000 { - status = "ok"; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-disable; }; - - phy@84000 { - status = "ok"; + data_8 { + pins = "gpio16"; + function = "qpic_pad8"; + drive-strength = <8>; + bias-disable; }; - - phy@86000 { - status = "ok"; + qpic_pad { + pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", + "gpio9", "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-disable; }; + }; - pci@20000000 { - perst-gpio = <&tlmm 58 1>; - status = "ok"; + hsuart_pins: hsuart_pins { + mux { + pins = "gpio46", "gpio47", "gpio48", "gpio49"; + function = "blsp2_uart"; + drive-strength = <8>; + bias-disable; + output-low; }; - - phy@8e000 { - status = "ok"; + mux_1 { + pins = "gpio51"; + function = "gpio"; + drive-strength = <8>; + bias-disable; + output-high; }; + }; - pci@10000000 { - perst-gpio = <&tlmm 61 0x1>; - status = "ok"; + button_pins: button_pins { + wps_button { + pins = "gpio57"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; }; + }; - mdio@90000 { - pinctrl-0 = <&mdio_pins>; - pinctrl-names = "default"; - phy-reset-gpio = <&tlmm 37 0>; - phy0: ethernet-phy@0 { - reg = <0>; - }; - phy1: ethernet-phy@1 { - reg = <1>; - }; - phy2: ethernet-phy@2 { - reg = <2>; - }; - phy3: ethernet-phy@3 { - reg = <3>; - }; - phy4: ethernet-phy@4 { - reg = <4>; - }; - phy5: ethernet-phy@5 { - compatible ="ethernet-phy-ieee802.3-c45"; - reg = <8>; - }; + uniphy_pins: uniphy_pinmux { + mux { + pins = "gpio60"; + function = "rx2"; + bias-disable; }; - - ess-switch@3a000000 { - pinctrl-0 = <&uniphy_pins>; - pinctrl-names = "default"; - switch_cpu_bmp = <0x1>; /* cpu port bitmap */ - switch_lan_bmp = <0x30>; /* lan port bitmap */ - switch_wan_bmp = <0x40>; /* wan port bitmap */ - switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ - switch_mac_mode1 = <0xff>; /* mac mode for uniphy instance1*/ - switch_mac_mode2 = <0xd>; /* mac mode for uniphy instance2*/ - bm_tick_mode = <0>; /* bm tick mode */ - tm_tick_mode = <0>; /* tm tick mode */ - port_scheduler_resource { - port@0 { - port_id = <0>; - ucast_queue = <0 143>; - mcast_queue = <256 271>; - l0sp = <0 35>; - l0cdrr = <0 47>; - l0edrr = <0 47>; - l1cdrr = <0 7>; - l1edrr = <0 7>; - }; - port@1 { - port_id = <1>; - ucast_queue = <144 159>; - mcast_queue = <272 275>; - l0sp = <36 39>; - l0cdrr = <48 63>; - l0edrr = <48 63>; - l1cdrr = <8 11>; - l1edrr = <8 11>; - }; - port@2 { - port_id = <2>; - ucast_queue = <160 175>; - mcast_queue = <276 279>; - l0sp = <40 43>; - l0cdrr = <64 79>; - l0edrr = <64 79>; - l1cdrr = <12 15>; - l1edrr = <12 15>; - }; - port@3 { - port_id = <3>; - ucast_queue = <176 191>; - mcast_queue = <280 283>; - l0sp = <44 47>; - l0cdrr = <80 95>; - l0edrr = <80 95>; - l1cdrr = <16 19>; - l1edrr = <16 19>; - }; - port@4 { - port_id = <4>; - ucast_queue = <192 207>; - mcast_queue = <284 287>; - l0sp = <48 51>; - l0cdrr = <96 111>; - l0edrr = <96 111>; - l1cdrr = <20 23>; - l1edrr = <20 23>; - }; - port@5 { - port_id = <5>; - ucast_queue = <208 223>; - mcast_queue = <288 291>; - l0sp = <52 55>; - l0cdrr = <112 127>; - l0edrr = <112 127>; - l1cdrr = <24 27>; - l1edrr = <24 27>; - }; - port@6 { - port_id = <6>; - ucast_queue = <224 239>; - mcast_queue = <292 295>; - l0sp = <56 59>; - l0cdrr = <128 143>; - l0edrr = <128 143>; - l1cdrr = <28 31>; - l1edrr = <28 31>; - }; - port@7 { - port_id = <7>; - ucast_queue = <240 255>; - mcast_queue = <296 299>; - l0sp = <60 63>; - l0cdrr = <144 159>; - l0edrr = <144 159>; - l1cdrr = <32 35>; - l1edrr = <32 35>; - }; - }; - port_scheduler_config { - port@0 { - port_id = <0>; - l1scheduler { - group@0 { - sp = <0 1>; /*L0 SPs*/ - /*cpri cdrr epri edrr*/ - cfg = <0 0 0 0>; - }; - }; - l0scheduler { - group@0 { - /*unicast queues*/ - ucast_queue = <0 4 8>; - /*multicast queues*/ - mcast_queue = <256 260>; - /*sp cpri cdrr epri edrr*/ - cfg = <0 0 0 0 0>; - }; - group@1 { - ucast_queue = <1 5 9>; - mcast_queue = <257 261>; - cfg = <0 1 1 1 1>; - }; - group@2 { - ucast_queue = <2 6 10>; - mcast_queue = <258 262>; - cfg = <0 2 2 2 2>; - }; - group@3 { - ucast_queue = <3 7 11>; - mcast_queue = <259 263>; - cfg = <0 3 3 3 3>; - }; - }; - }; - port@1 { - port_id = <1>; - l1scheduler { - group@0 { - sp = <36>; - cfg = <0 8 0 8>; - }; - group@1 { - sp = <37>; - cfg = <1 9 1 9>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <144>; - ucast_loop_pri = <16>; - mcast_queue = <272>; - mcast_loop_pri = <4>; - cfg = <36 0 48 0 48>; - }; - }; - }; - port@2 { - port_id = <2>; - l1scheduler { - group@0 { - sp = <40>; - cfg = <0 12 0 12>; - }; - group@1 { - sp = <41>; - cfg = <1 13 1 13>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <160>; - ucast_loop_pri = <16>; - mcast_queue = <276>; - mcast_loop_pri = <4>; - cfg = <40 0 64 0 64>; - }; - }; - }; - port@3 { - port_id = <3>; - l1scheduler { - group@0 { - sp = <44>; - cfg = <0 16 0 16>; - }; - group@1 { - sp = <45>; - cfg = <1 17 1 17>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <176>; - ucast_loop_pri = <16>; - mcast_queue = <280>; - mcast_loop_pri = <4>; - cfg = <44 0 80 0 80>; - }; - }; - }; - port@4 { - port_id = <4>; - l1scheduler { - group@0 { - sp = <48>; - cfg = <0 20 0 20>; - }; - group@1 { - sp = <49>; - cfg = <1 21 1 21>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <192>; - ucast_loop_pri = <16>; - mcast_queue = <284>; - mcast_loop_pri = <4>; - cfg = <48 0 96 0 96>; - }; - }; - }; - port@5 { - port_id = <5>; - l1scheduler { - group@0 { - sp = <52>; - cfg = <0 24 0 24>; - }; - group@1 { - sp = <53>; - cfg = <1 25 1 25>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <208>; - ucast_loop_pri = <16>; - mcast_queue = <288>; - mcast_loop_pri = <4>; - cfg = <52 0 112 0 112>; - }; - }; - }; - port@6 { - port_id = <6>; - l1scheduler { - group@0 { - sp = <56>; - cfg = <0 28 0 28>; - }; - group@1 { - sp = <57>; - cfg = <1 29 1 29>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <224>; - ucast_loop_pri = <16>; - mcast_queue = <292>; - mcast_loop_pri = <4>; - cfg = <56 0 128 0 128>; - }; - }; - }; - port@7 { - port_id = <7>; - l1scheduler { - group@0 { - sp = <60>; - cfg = <0 32 0 32>; - }; - group@1 { - sp = <61>; - cfg = <1 33 1 33>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <240>; - ucast_loop_pri = <16>; - mcast_queue = <296>; - cfg = <60 0 144 0 144>; - }; - }; - }; - }; + }; + cnss_wlan_en_active: cnss_wlan_en_active { + mux { + pins = "gpio57"; + function = "gpio"; + drive-strength = <16>; + output-high; + bias-pull-up; }; + }; - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; - pinctrl-names = "default"; - status = "ok"; - - button@1 { - label = "reset"; - linux,code = ; - gpios = <&tlmm 57 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - }; - - leds { - compatible = "gpio-leds"; - pinctrl-0 = <&leds_pins>; - pinctrl-names = "default"; - - led@25 { - label = "led1_yellow"; - gpios = <&tlmm 25 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "led1_yellow"; - default-state = "off"; - }; - - led@28 { - label = "led1_green"; - gpios = <&tlmm 28 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "led1_green"; - default-state = "off"; - }; - - led@29 { - label = "led2_amber"; - gpios = <&tlmm 29 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "led2_amber"; - default-state = "off"; - }; - - led@32 { - label = "led2_blue"; - gpio = <&tlmm 32 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "led2_blue"; - default-state = "off"; - }; + cnss_wlan_en_sleep: cnss_wlan_en_sleep { + mux { + pins = "gpio57"; + function = "gpio"; + drive-strength = <2>; + output-low; + bias-pull-down; }; }; }; + +&soc { + gpio_keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + button@1 { + label = "wps"; + linux,code = ; + gpios = <&tlmm 57 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + debounce-interval = <60>; + }; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-0 = <&leds_pins>; + pinctrl-names = "default"; + + led@25 { + label = "led1_yellow"; + gpios = <&tlmm 25 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "led1_yellow"; + default-state = "off"; + }; + + led@28 { + label = "led1_green"; + gpios = <&tlmm 28 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "led1_green"; + default-state = "off"; + }; + + led@29 { + label = "led2_amber"; + gpios = <&tlmm 29 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "led2_amber"; + default-state = "off"; + }; + + led@32 { + label = "led2_blue"; + gpio = <&tlmm 32 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "led2_blue"; + default-state = "off"; + }; + }; + + mdio@90000 { + pinctrl-0 = <&mdio_pins>; + pinctrl-names = "default"; + phy-reset-gpio = <&tlmm 37 0>; + phy0: ethernet-phy@0 { + reg = <0>; + }; + phy1: ethernet-phy@1 { + reg = <1>; + }; + phy2: ethernet-phy@2 { + reg = <2>; + }; + phy3: ethernet-phy@3 { + reg = <3>; + }; + phy4: ethernet-phy@4 { + reg = <4>; + }; + phy5: ethernet-phy@5 { + compatible ="ethernet-phy-ieee802.3-c45"; + reg = <8>; + }; + }; + + ess-switch@3a000000 { + pinctrl-0 = <&uniphy_pins>; + pinctrl-names = "default"; + switch_cpu_bmp = <0x1>; /* cpu port bitmap */ + switch_lan_bmp = <0x30>; /* lan port bitmap */ + switch_wan_bmp = <0x40>; /* wan port bitmap */ + switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ + switch_mac_mode1 = <0xff>; /* mac mode for uniphy instance1*/ + switch_mac_mode2 = <0xd>; /* mac mode for uniphy instance2*/ + bm_tick_mode = <0>; /* bm tick mode */ + tm_tick_mode = <0>; /* tm tick mode */ + port_scheduler_resource { + port@0 { + port_id = <0>; + ucast_queue = <0 143>; + mcast_queue = <256 271>; + l0sp = <0 35>; + l0cdrr = <0 47>; + l0edrr = <0 47>; + l1cdrr = <0 7>; + l1edrr = <0 7>; + }; + port@1 { + port_id = <1>; + ucast_queue = <144 159>; + mcast_queue = <272 275>; + l0sp = <36 39>; + l0cdrr = <48 63>; + l0edrr = <48 63>; + l1cdrr = <8 11>; + l1edrr = <8 11>; + }; + port@2 { + port_id = <2>; + ucast_queue = <160 175>; + mcast_queue = <276 279>; + l0sp = <40 43>; + l0cdrr = <64 79>; + l0edrr = <64 79>; + l1cdrr = <12 15>; + l1edrr = <12 15>; + }; + port@3 { + port_id = <3>; + ucast_queue = <176 191>; + mcast_queue = <280 283>; + l0sp = <44 47>; + l0cdrr = <80 95>; + l0edrr = <80 95>; + l1cdrr = <16 19>; + l1edrr = <16 19>; + }; + port@4 { + port_id = <4>; + ucast_queue = <192 207>; + mcast_queue = <284 287>; + l0sp = <48 51>; + l0cdrr = <96 111>; + l0edrr = <96 111>; + l1cdrr = <20 23>; + l1edrr = <20 23>; + }; + port@5 { + port_id = <5>; + ucast_queue = <208 223>; + mcast_queue = <288 291>; + l0sp = <52 55>; + l0cdrr = <112 127>; + l0edrr = <112 127>; + l1cdrr = <24 27>; + l1edrr = <24 27>; + }; + port@6 { + port_id = <6>; + ucast_queue = <224 239>; + mcast_queue = <292 295>; + l0sp = <56 59>; + l0cdrr = <128 143>; + l0edrr = <128 143>; + l1cdrr = <28 31>; + l1edrr = <28 31>; + }; + port@7 { + port_id = <7>; + ucast_queue = <240 255>; + mcast_queue = <296 299>; + l0sp = <60 63>; + l0cdrr = <144 159>; + l0edrr = <144 159>; + l1cdrr = <32 35>; + l1edrr = <32 35>; + }; + }; + port_scheduler_config { + port@0 { + port_id = <0>; + l1scheduler { + group@0 { + sp = <0 1>; /*L0 SPs*/ + /*cpri cdrr epri edrr*/ + cfg = <0 0 0 0>; + }; + }; + l0scheduler { + group@0 { + /*unicast queues*/ + ucast_queue = <0 4 8>; + /*multicast queues*/ + mcast_queue = <256 260>; + /*sp cpri cdrr epri edrr*/ + cfg = <0 0 0 0 0>; + }; + group@1 { + ucast_queue = <1 5 9>; + mcast_queue = <257 261>; + cfg = <0 1 1 1 1>; + }; + group@2 { + ucast_queue = <2 6 10>; + mcast_queue = <258 262>; + cfg = <0 2 2 2 2>; + }; + group@3 { + ucast_queue = <3 7 11>; + mcast_queue = <259 263>; + cfg = <0 3 3 3 3>; + }; + }; + }; + port@1 { + port_id = <1>; + l1scheduler { + group@0 { + sp = <36>; + cfg = <0 8 0 8>; + }; + group@1 { + sp = <37>; + cfg = <1 9 1 9>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <144>; + ucast_loop_pri = <16>; + mcast_queue = <272>; + mcast_loop_pri = <4>; + cfg = <36 0 48 0 48>; + }; + }; + }; + port@2 { + port_id = <2>; + l1scheduler { + group@0 { + sp = <40>; + cfg = <0 12 0 12>; + }; + group@1 { + sp = <41>; + cfg = <1 13 1 13>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <160>; + ucast_loop_pri = <16>; + mcast_queue = <276>; + mcast_loop_pri = <4>; + cfg = <40 0 64 0 64>; + }; + }; + }; + port@3 { + port_id = <3>; + l1scheduler { + group@0 { + sp = <44>; + cfg = <0 16 0 16>; + }; + group@1 { + sp = <45>; + cfg = <1 17 1 17>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <176>; + ucast_loop_pri = <16>; + mcast_queue = <280>; + mcast_loop_pri = <4>; + cfg = <44 0 80 0 80>; + }; + }; + }; + port@4 { + port_id = <4>; + l1scheduler { + group@0 { + sp = <48>; + cfg = <0 20 0 20>; + }; + group@1 { + sp = <49>; + cfg = <1 21 1 21>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <192>; + ucast_loop_pri = <16>; + mcast_queue = <284>; + mcast_loop_pri = <4>; + cfg = <48 0 96 0 96>; + }; + }; + }; + port@5 { + port_id = <5>; + l1scheduler { + group@0 { + sp = <52>; + cfg = <0 24 0 24>; + }; + group@1 { + sp = <53>; + cfg = <1 25 1 25>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <208>; + ucast_loop_pri = <16>; + mcast_queue = <288>; + mcast_loop_pri = <4>; + cfg = <52 0 112 0 112>; + }; + }; + }; + port@6 { + port_id = <6>; + l1scheduler { + group@0 { + sp = <56>; + cfg = <0 28 0 28>; + }; + group@1 { + sp = <57>; + cfg = <1 29 1 29>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <224>; + ucast_loop_pri = <16>; + mcast_queue = <292>; + mcast_loop_pri = <4>; + cfg = <56 0 128 0 128>; + }; + }; + }; + port@7 { + port_id = <7>; + l1scheduler { + group@0 { + sp = <60>; + cfg = <0 32 0 32>; + }; + group@1 { + sp = <61>; + cfg = <1 33 1 33>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <240>; + ucast_loop_pri = <16>; + mcast_queue = <296>; + cfg = <60 0 144 0 144>; + }; + }; + }; + }; + }; + + dp1 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <4>; + reg = <0x3a001600 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <3>; + phy-mode = "sgmii"; + }; + + dp2 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <6>; + reg = <0x3a007000 0x3fff>; + qcom,mactype = <1>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <8>; + phy-mode = "sgmii"; + }; + wifi3: wifi3@f00000 { + compatible = "qcom,cnss-qcn9000"; + wlan-en-gpio = <&tlmm 57 0>; + pinctrl-names = "wlan_en_active", "wlan_en_sleep"; + pinctrl-0 = <&cnss_wlan_en_active>; + pinctrl-1 = <&cnss_wlan_en_sleep>; + status = "disabled"; + }; +}; + +&serial_blsp4 { + pinctrl-0 = <&uart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&spi_0 { /* BLSP1 QUP1 */ + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + cs-select = <0>; + status = "ok"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + compatible = "n25q128a11"; + linux,modalias = "m25p80", "n25q128a11"; + spi-max-frequency = <50000000>; + use-default-sizes; + }; +}; + +&serial_blsp2 { + pinctrl-0 = <&hsuart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&msm_imem { + status = "disabled"; +}; + +&ssphy_0 { + status = "ok"; +}; + +&qusb_phy_0 { + status = "ok"; +}; + +&ssphy_1 { + status = "ok"; +}; + +&qusb_phy_1 { + status = "ok"; +}; + +&usb3_0 { + status = "ok"; +}; + +&usb3_1 { + status = "ok"; +}; + +&cryptobam { + status = "ok"; +}; + +&crypto { + status = "ok"; +}; + +&i2c_0 { + pinctrl-0 = <&i2c_0_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&i2c_1 { + status = "disabled"; +}; + +&qpic_bam { + status = "ok"; +}; + +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&pcie0 { + status = "disabled"; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-ex227.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-ex227.dts index 139f3ed9c..0f34156f2 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-ex227.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-ex227.dts @@ -1,21 +1,37 @@ -// SPDX-License-Identifier: GPL-2.0-only /dts-v1/; -/* Copyright (c) 2020 The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq8074.dtsi" -#include "ipq8074-hk-cpu.dtsi" +#include "qcom-ipq807x-soc.dtsi" +#include "qcom-ipq807x-hk-cpu.dtsi" / { #address-cells = <0x2>; #size-cells = <0x2>; model = "TP-Link EX227"; - compatible = "tplink,ex227", "qcom,ipq8074-ap-hk07", "qcom,ipq8074"; - qcom,msm-id = <0x156 0x0>, <0x185 0x0>; + compatible = "tplink,ex227", "qcom,ipq807x"; + qcom,msm-id = <0x143 0x0>; interrupt-parent = <&intc>; + qcom,board-id = <0x8 0x0>; + qcom,pmic-id = <0x0 0x0 0x0 0x0>; aliases { - serial0 = &blsp1_uart5; - /* Aliases as required by u-boot to patch MAC addresses */ + /* + * Aliases as required by u-boot + * to patch MAC addresses + */ ethernet0 = "/soc/dp1"; led-boot = &led_power; led-failsafe = &led_power; @@ -24,469 +40,715 @@ }; chosen { - stdout-path = "serial0"; - }; - - soc { - pinctrl@1000000 { - button_pins: button_pins { - wps_button { - pins = "gpio50"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - - mdio_pins: mdio_pinmux { - mux_0 { - pins = "gpio68"; - function = "mdc"; - drive-strength = <8>; - bias-pull-up; - }; - mux_1 { - pins = "gpio69"; - function = "mdio"; - drive-strength = <8>; - bias-pull-up; - }; - mux_2 { - pins = "gpio37"; - function = "gpio"; - bias-pull-up; - }; - mux_3 { - pins = "gpio25"; - function = "gpio"; - bias-pull-up; - }; - }; - - led_pins: led_pins { - led_power { - pins = "gpio42"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - }; - }; - - serial@78b3000 { - status = "ok"; - }; - - dp1 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <6>; - reg = <0x3a001000 0x200>; - qcom,mactype = <0>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <4>; - phy-mode = "sgmii"; - }; - - spi@78b5000 { - status = "ok"; - pinctrl-0 = <&spi_0_pins>; - pinctrl-names = "default"; - cs-select = <0>; - - m25p80@0 { - compatible = "n25q128a11"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - spi-max-frequency = <50000000>; - }; - }; - - dma@7984000 { - status = "ok"; - }; - - nand@79b0000 { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; - }; - - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; - pinctrl-names = "default"; - status = "ok"; - - button@1 { - label = "reset"; - linux,code = ; - gpios = <&tlmm 50 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - }; - - leds { - compatible = "gpio-leds"; - pinctrl-0 = <&led_pins>; - pinctrl-names = "default"; - - led_power: led_power { - label = "led_2g"; - gpio = <&tlmm 42 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - }; - - mdio: mdio@90000 { - pinctrl-0 = <&mdio_pins>; - pinctrl-names = "default"; - phy-reset-gpio = <&tlmm 37 0 &tlmm 25 1>; - phy0: ethernet-phy@0 { - reg = <0>; - }; - phy1: ethernet-phy@1 { - reg = <1>; - }; - phy2: ethernet-phy@2 { - reg = <2>; - }; - phy3: ethernet-phy@3 { - reg = <3>; - }; - phy4: ethernet-phy@4 { - reg = <28>; - }; - phy5: ethernet-phy@5 { - reg = <4>; - }; - }; - - ess-switch@3a000000 { - switch_cpu_bmp = <0x1>; /* cpu port bitmap */ - switch_lan_bmp = <0x3e>; /* lan port bitmap */ - switch_wan_bmp = <0x40>; /* wan port bitmap */ - switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ - switch_mac_mode1 = <0xff>; /* mac mode for uniphy instance1*/ - switch_mac_mode2 = <0xf>; /* mac mode for uniphy instance2*/ - bm_tick_mode = <0>; /* bm tick mode */ - tm_tick_mode = <0>; /* tm tick mode */ - qcom,port_phyinfo { - port@0 { - port_id = <1>; - phy_address = <0>; - }; - port@1 { - port_id = <2>; - phy_address = <1>; - }; - port@2 { - port_id = <3>; - phy_address = <2>; - }; - port@3 { - port_id = <4>; - phy_address = <3>; - }; - port@4 { - port_id = <5>; - phy_address = <28>; - port_mac_sel = "QGMAC_PORT"; - }; - port@5 { - port_id = <6>; - phy_address = <4>; - }; - }; - port_scheduler_resource { - port@0 { - port_id = <0>; - ucast_queue = <0 143>; - mcast_queue = <256 271>; - l0sp = <0 35>; - l0cdrr = <0 47>; - l0edrr = <0 47>; - l1cdrr = <0 7>; - l1edrr = <0 7>; - }; - port@1 { - port_id = <1>; - ucast_queue = <144 159>; - mcast_queue = <272 275>; - l0sp = <36 39>; - l0cdrr = <48 63>; - l0edrr = <48 63>; - l1cdrr = <8 11>; - l1edrr = <8 11>; - }; - port@2 { - port_id = <2>; - ucast_queue = <160 175>; - mcast_queue = <276 279>; - l0sp = <40 43>; - l0cdrr = <64 79>; - l0edrr = <64 79>; - l1cdrr = <12 15>; - l1edrr = <12 15>; - }; - port@3 { - port_id = <3>; - ucast_queue = <176 191>; - mcast_queue = <280 283>; - l0sp = <44 47>; - l0cdrr = <80 95>; - l0edrr = <80 95>; - l1cdrr = <16 19>; - l1edrr = <16 19>; - }; - port@4 { - port_id = <4>; - ucast_queue = <192 207>; - mcast_queue = <284 287>; - l0sp = <48 51>; - l0cdrr = <96 111>; - l0edrr = <96 111>; - l1cdrr = <20 23>; - l1edrr = <20 23>; - }; - port@5 { - port_id = <5>; - ucast_queue = <208 223>; - mcast_queue = <288 291>; - l0sp = <52 55>; - l0cdrr = <112 127>; - l0edrr = <112 127>; - l1cdrr = <24 27>; - l1edrr = <24 27>; - }; - port@6 { - port_id = <6>; - ucast_queue = <224 239>; - mcast_queue = <292 295>; - l0sp = <56 59>; - l0cdrr = <128 143>; - l0edrr = <128 143>; - l1cdrr = <28 31>; - l1edrr = <28 31>; - }; - port@7 { - port_id = <7>; - ucast_queue = <240 255>; - mcast_queue = <296 299>; - l0sp = <60 63>; - l0cdrr = <144 159>; - l0edrr = <144 159>; - l1cdrr = <32 35>; - l1edrr = <32 35>; - }; - }; - port_scheduler_config { - port@0 { - port_id = <0>; - l1scheduler { - group@0 { - sp = <0 1>; /*L0 SPs*/ - /*cpri cdrr epri edrr*/ - cfg = <0 0 0 0>; - }; - }; - l0scheduler { - group@0 { - /*unicast queues*/ - ucast_queue = <0 4 8>; - /*multicast queues*/ - mcast_queue = <256 260>; - /*sp cpri cdrr epri edrr*/ - cfg = <0 0 0 0 0>; - }; - group@1 { - ucast_queue = <1 5 9>; - mcast_queue = <257 261>; - cfg = <0 1 1 1 1>; - }; - group@2 { - ucast_queue = <2 6 10>; - mcast_queue = <258 262>; - cfg = <0 2 2 2 2>; - }; - group@3 { - ucast_queue = <3 7 11>; - mcast_queue = <259 263>; - cfg = <0 3 3 3 3>; - }; - }; - }; - port@1 { - port_id = <1>; - l1scheduler { - group@0 { - sp = <36>; - cfg = <0 8 0 8>; - }; - group@1 { - sp = <37>; - cfg = <1 9 1 9>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <144>; - ucast_loop_pri = <16>; - mcast_queue = <272>; - mcast_loop_pri = <4>; - cfg = <36 0 48 0 48>; - }; - }; - }; - port@2 { - port_id = <2>; - l1scheduler { - group@0 { - sp = <40>; - cfg = <0 12 0 12>; - }; - group@1 { - sp = <41>; - cfg = <1 13 1 13>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <160>; - ucast_loop_pri = <16>; - mcast_queue = <276>; - mcast_loop_pri = <4>; - cfg = <40 0 64 0 64>; - }; - }; - }; - port@3 { - port_id = <3>; - l1scheduler { - group@0 { - sp = <44>; - cfg = <0 16 0 16>; - }; - group@1 { - sp = <45>; - cfg = <1 17 1 17>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <176>; - ucast_loop_pri = <16>; - mcast_queue = <280>; - mcast_loop_pri = <4>; - cfg = <44 0 80 0 80>; - }; - }; - }; - port@4 { - port_id = <4>; - l1scheduler { - group@0 { - sp = <48>; - cfg = <0 20 0 20>; - }; - group@1 { - sp = <49>; - cfg = <1 21 1 21>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <192>; - ucast_loop_pri = <16>; - mcast_queue = <284>; - mcast_loop_pri = <4>; - cfg = <48 0 96 0 96>; - }; - }; - }; - port@5 { - port_id = <5>; - l1scheduler { - group@0 { - sp = <52>; - cfg = <0 24 0 24>; - }; - group@1 { - sp = <53>; - cfg = <1 25 1 25>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <208>; - ucast_loop_pri = <16>; - mcast_queue = <288>; - mcast_loop_pri = <4>; - cfg = <52 0 112 0 112>; - }; - }; - }; - port@6 { - port_id = <6>; - l1scheduler { - group@0 { - sp = <56>; - cfg = <0 28 0 28>; - }; - group@1 { - sp = <57>; - cfg = <1 29 1 29>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <224>; - ucast_loop_pri = <16>; - mcast_queue = <292>; - mcast_loop_pri = <4>; - cfg = <56 0 128 0 128>; - }; - }; - }; - port@7 { - port_id = <7>; - l1scheduler { - group@0 { - sp = <60>; - cfg = <0 32 0 32>; - }; - group@1 { - sp = <61>; - cfg = <1 33 1 33>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <240>; - ucast_loop_pri = <16>; - mcast_queue = <296>; - cfg = <60 0 144 0 144>; - }; - }; - }; - }; - }; - nss-macsec1 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x1c>; - phy_access_mode = <0>; - mdiobus = <&mdio>; - }; + bootargs = "console=ttyMSM0,115200,n8 root=/dev/ram0 rw \ + init=/init"; + bootargs-append = " swiotlb=1"; }; }; + +&tlmm { + pinctrl-0 = <&btcoex_pins>; + pinctrl-names = "default"; + + btcoex_pins: btcoex_pins { + mux_0 { + pins = "gpio64"; + function = "pta1_1"; + drive-strength = <6>; + bias-pull-down; + }; + mux_1 { + pins = "gpio65"; + function = "pta1_2"; + drive-strength = <6>; + bias-pull-down; + }; + mux_2 { + pins = "gpio66"; + function = "pta1_0"; + drive-strength = <6>; + bias-pull-down; + }; + }; + + mdio_pins: mdio_pinmux { + mux_0 { + pins = "gpio68"; + function = "mdc"; + drive-strength = <8>; + bias-pull-up; + }; + mux_1 { + pins = "gpio69"; + function = "mdio"; + drive-strength = <8>; + bias-pull-up; + }; + mux_2 { + pins = "gpio25"; + function = "gpio"; + bias-pull-up; + }; + mux_3 { + pins = "gpio37"; + function = "gpio"; + bias-pull-up; + }; + }; + + uart_pins: uart_pins { + mux { + pins = "gpio23", "gpio24"; + function = "blsp4_uart1"; + drive-strength = <8>; + bias-disable; + }; + }; + + spi_0_pins: spi_0_pins { + mux { + pins = "gpio38", "gpio39", "gpio40", "gpio41"; + function = "blsp0_spi"; + drive-strength = <8>; + bias-disable; + }; + }; + + spi_3_pins: spi_3_pins { + mux { + pins = "gpio52", "gpio53"; + function = "blsp3_spi"; + drive-strength = <8>; + bias-disable; + }; + spi_cs { + pins = "gpio22"; + function = "blsp3_spi2"; + drive-strength = <8>; + bias-disable; + }; + quartz_interrupt { + pins = "gpio47"; + function = "gpio"; + input; + bias-disable; + }; + quartz_reset { + pins = "gpio21"; + function = "gpio"; + output-low; + bias-disable; + }; + }; + + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-disable; + }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-disable; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-disable; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-disable; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-disable; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-disable; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-disable; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-disable; + }; + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-disable; + }; + }; + + hsuart_pins: hsuart_pins { + mux { + pins = "gpio46", "gpio47", "gpio48", "gpio49"; + function = "blsp2_uart"; + drive-strength = <8>; + bias-disable; + }; + }; + + /* POWER_LED, TP-Link */ + led_pins: led_pins { + led_power { + pins = "gpio42"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + /* BUTTON, TP-Link */ + button_pins: button_pins { + reset_button { + pins = "gpio50"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + usb_mux_sel_pins: usb_mux_pins { + mux { + pins = "gpio27"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + pcie0_pins: pcie_pins { + pcie0_rst { + pins = "gpio58"; + function = "pcie0_rst"; + drive-strength = <8>; + bias-pull-down; + }; + pcie0_wake { + pins = "gpio59"; + function = "pcie0_wake"; + drive-strength = <8>; + bias-pull-down; + }; + }; + +}; + +&soc { + mdio: mdio@90000 { + pinctrl-0 = <&mdio_pins>; + pinctrl-names = "default"; + phy-reset-gpio = <&tlmm 37 0 &tlmm 25 1>; + compatible = "qcom,ipq40xx-mdio", "qcom,qca-mdio"; + phy0: ethernet-phy@0 { + reg = <0>; + }; + phy1: ethernet-phy@1 { + reg = <1>; + }; + phy2: ethernet-phy@2 { + reg = <2>; + }; + phy3: ethernet-phy@3 { + reg = <3>; + }; + phy4: ethernet-phy@4 { + reg = <28>; + }; + phy5: ethernet-phy@5 { + reg = <4>; + }; + }; + + ess-switch@3a000000 { + switch_cpu_bmp = <0x1>; /* cpu port bitmap */ + switch_lan_bmp = <0x3e>; /* lan port bitmap */ + switch_wan_bmp = <0x40>; /* wan port bitmap */ + switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ + switch_mac_mode1 = <0xf>; /* mac mode for uniphy instance1*/ + switch_mac_mode2 = <0xf>; /* mac mode for uniphy instance2*/ + bm_tick_mode = <0>; /* bm tick mode */ + tm_tick_mode = <0>; /* tm tick mode */ + qcom,port_phyinfo { + port@0 { + port_id = <1>; + phy_address = <0>; + }; + port@1 { + port_id = <2>; + phy_address = <1>; + }; + port@2 { + port_id = <3>; + phy_address = <2>; + }; + port@3 { + port_id = <4>; + phy_address = <3>; + }; + port@4 { + port_id = <5>; + phy_address = <28>; + port_mac_sel = "QGMAC_PORT"; + }; + port@5 { + port_id = <6>; + phy_address = <4>; + }; + }; + port_scheduler_resource { + port@0 { + port_id = <0>; + ucast_queue = <0 143>; + mcast_queue = <256 271>; + l0sp = <0 35>; + l0cdrr = <0 47>; + l0edrr = <0 47>; + l1cdrr = <0 7>; + l1edrr = <0 7>; + }; + port@1 { + port_id = <1>; + ucast_queue = <144 159>; + mcast_queue = <272 275>; + l0sp = <36 39>; + l0cdrr = <48 63>; + l0edrr = <48 63>; + l1cdrr = <8 11>; + l1edrr = <8 11>; + }; + port@2 { + port_id = <2>; + ucast_queue = <160 175>; + mcast_queue = <276 279>; + l0sp = <40 43>; + l0cdrr = <64 79>; + l0edrr = <64 79>; + l1cdrr = <12 15>; + l1edrr = <12 15>; + }; + port@3 { + port_id = <3>; + ucast_queue = <176 191>; + mcast_queue = <280 283>; + l0sp = <44 47>; + l0cdrr = <80 95>; + l0edrr = <80 95>; + l1cdrr = <16 19>; + l1edrr = <16 19>; + }; + port@4 { + port_id = <4>; + ucast_queue = <192 207>; + mcast_queue = <284 287>; + l0sp = <48 51>; + l0cdrr = <96 111>; + l0edrr = <96 111>; + l1cdrr = <20 23>; + l1edrr = <20 23>; + }; + port@5 { + port_id = <5>; + ucast_queue = <208 223>; + mcast_queue = <288 291>; + l0sp = <52 55>; + l0cdrr = <112 127>; + l0edrr = <112 127>; + l1cdrr = <24 27>; + l1edrr = <24 27>; + }; + port@6 { + port_id = <6>; + ucast_queue = <224 239>; + mcast_queue = <292 295>; + l0sp = <56 59>; + l0cdrr = <128 143>; + l0edrr = <128 143>; + l1cdrr = <28 31>; + l1edrr = <28 31>; + }; + port@7 { + port_id = <7>; + ucast_queue = <240 255>; + mcast_queue = <296 299>; + l0sp = <60 63>; + l0cdrr = <144 159>; + l0edrr = <144 159>; + l1cdrr = <32 35>; + l1edrr = <32 35>; + }; + }; + port_scheduler_config { + port@0 { + port_id = <0>; + l1scheduler { + group@0 { + sp = <0 1>; /*L0 SPs*/ + /*cpri cdrr epri edrr*/ + cfg = <0 0 0 0>; + }; + }; + l0scheduler { + group@0 { + /*unicast queues*/ + ucast_queue = <0 4 8>; + /*multicast queues*/ + mcast_queue = <256 260>; + /*sp cpri cdrr epri edrr*/ + cfg = <0 0 0 0 0>; + }; + group@1 { + ucast_queue = <1 5 9>; + mcast_queue = <257 261>; + cfg = <0 1 1 1 1>; + }; + group@2 { + ucast_queue = <2 6 10>; + mcast_queue = <258 262>; + cfg = <0 2 2 2 2>; + }; + group@3 { + ucast_queue = <3 7 11>; + mcast_queue = <259 263>; + cfg = <0 3 3 3 3>; + }; + }; + }; + port@1 { + port_id = <1>; + l1scheduler { + group@0 { + sp = <36>; + cfg = <0 8 0 8>; + }; + group@1 { + sp = <37>; + cfg = <1 9 1 9>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <144>; + ucast_loop_pri = <16>; + mcast_queue = <272>; + mcast_loop_pri = <4>; + cfg = <36 0 48 0 48>; + }; + }; + }; + port@2 { + port_id = <2>; + l1scheduler { + group@0 { + sp = <40>; + cfg = <0 12 0 12>; + }; + group@1 { + sp = <41>; + cfg = <1 13 1 13>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <160>; + ucast_loop_pri = <16>; + mcast_queue = <276>; + mcast_loop_pri = <4>; + cfg = <40 0 64 0 64>; + }; + }; + }; + port@3 { + port_id = <3>; + l1scheduler { + group@0 { + sp = <44>; + cfg = <0 16 0 16>; + }; + group@1 { + sp = <45>; + cfg = <1 17 1 17>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <176>; + ucast_loop_pri = <16>; + mcast_queue = <280>; + mcast_loop_pri = <4>; + cfg = <44 0 80 0 80>; + }; + }; + }; + port@4 { + port_id = <4>; + l1scheduler { + group@0 { + sp = <48>; + cfg = <0 20 0 20>; + }; + group@1 { + sp = <49>; + cfg = <1 21 1 21>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <192>; + ucast_loop_pri = <16>; + mcast_queue = <284>; + mcast_loop_pri = <4>; + cfg = <48 0 96 0 96>; + }; + }; + }; + port@5 { + port_id = <5>; + l1scheduler { + group@0 { + sp = <52>; + cfg = <0 24 0 24>; + }; + group@1 { + sp = <53>; + cfg = <1 25 1 25>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <208>; + ucast_loop_pri = <16>; + mcast_queue = <288>; + mcast_loop_pri = <4>; + cfg = <52 0 112 0 112>; + }; + }; + }; + port@6 { + port_id = <6>; + l1scheduler { + group@0 { + sp = <56>; + cfg = <0 28 0 28>; + }; + group@1 { + sp = <57>; + cfg = <1 29 1 29>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <224>; + ucast_loop_pri = <16>; + mcast_queue = <292>; + mcast_loop_pri = <4>; + cfg = <56 0 128 0 128>; + }; + }; + }; + port@7 { + port_id = <7>; + l1scheduler { + group@0 { + sp = <60>; + cfg = <0 32 0 32>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <240>; + mcast_queue = <296>; + cfg = <60 0 144 0 144>; + }; + }; + }; + }; + }; + + dp1 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <6>; + reg = <0x3a001000 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <4>; + phy-mode = "sgmii"; + }; + + /* POWER LED, TP-Link */ + leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + led_power: led_power { + label = "blue:power"; + gpio = <&tlmm 42 GPIO_ACTIVE_HIGH>; + default-state = "on"; + }; + }; + + /* BUTTON, TP-Link */ + gpio_keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + button@1 { + label = "reset"; + linux,code = ; + gpios = <&tlmm 50 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + }; + }; + + nss-macsec0 { + compatible = "qcom,nss-macsec"; + phy_addr = <0x18>; + phy_access_mode = <0>; + mdiobus = <&mdio>; + }; + nss-macsec1 { + compatible = "qcom,nss-macsec"; + phy_addr = <0x1c>; + phy_access_mode = <0>; + mdiobus = <&mdio>; + }; +}; + +&serial_blsp4 { + pinctrl-0 = <&uart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&spi_0 { /* BLSP1 QUP1 */ + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + cs-select = <0>; + status = "ok"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + compatible = "n25q128a11"; + linux,modalias = "m25p80", "n25q128a11"; + spi-max-frequency = <50000000>; + use-default-sizes; + }; +}; + +&spi_4 { /* BLSP1 QUP3 */ + pinctrl-0 = <&spi_3_pins>; + pinctrl-names = "default"; + cs-select = <2>; + quartz-reset-gpio = <&tlmm 21 1>; + status = "disabled"; + spidev3: spi@3 { + compatible = "qca,spidev"; + reg = <0>; + spi-max-frequency = <24000000>; + }; +}; + +&serial_blsp2 { + status = "disabled"; +}; + +&msm_imem { + status = "disabled"; +}; + +&ssphy_0 { + status = "ok"; +}; + +&qusb_phy_0 { + status = "ok"; +}; + +&ssphy_1 { + status = "ok"; +}; + +&qusb_phy_1 { + status = "ok"; +}; + +&usb3_0 { + status = "ok"; +}; + +&usb3_1 { + status = "ok"; +}; + +&cryptobam { + status = "ok"; +}; + +&crypto { + status = "ok"; +}; + +&i2c_0 { + status = "disabled"; +}; + +&i2c_1 { + status = "disabled"; +}; + +&qpic_bam { + status = "ok"; +}; + +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&qpic_lcd { + status = "disabled"; +}; + +&qpic_lcd_panel { + status = "disabled"; +}; + +&ledc { + status = "disabled"; +}; + +&pcie0 { + status = "ok"; +}; + +&pcie1 { + status = "disabled"; +}; + diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-ex447.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-ex447.dts index b71acdcee..f8addc97f 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-ex447.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-ex447.dts @@ -1,563 +1,754 @@ -// SPDX-License-Identifier: GPL-2.0-only /dts-v1/; -/* Copyright (c) 2020 The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq8074.dtsi" -#include "ipq8074-hk-cpu.dtsi" +#include "qcom-ipq807x-soc.dtsi" +#include "qcom-ipq807x-hk-cpu.dtsi" / { #address-cells = <0x2>; #size-cells = <0x2>; - model = "TP-Link EX447"; - compatible = "tplink,ex447", "qcom,ipq807x"; - qcom,msm-id = <0x157 0x0>, <0x187 0x0>; + model = "TP-Link EX447"; + compatible = "tplink,ex447", "qcom,ipq807x"; + qcom,msm-id = <0x143 0x0>; interrupt-parent = <&intc>; + qcom,board-id = <0x8 0x0>; + qcom,pmic-id = <0x0 0x0 0x0 0x0>; aliases { - serial0 = &blsp1_uart5; - /* Aliases as required by u-boot to patch MAC addresses */ + /* + * Aliases as required by u-boot + * to patch MAC addresses + */ ethernet0 = "/soc/dp1"; }; chosen { - stdout-path = "serial0"; + bootargs = "console=ttyMSM0,115200,n8 root=/dev/ram0 rw \ + init=/init"; + bootargs-append = " swiotlb=1"; led-boot = &led_power; led-failsafe = &led_power; led-running = &led_power; led-upgrade = &led_power; }; +}; - soc { - pinctrl@1000000 { - button_pins: button_pins { - wps_button { - pins = "gpio50"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; +&tlmm { + pinctrl-0 = <&btcoex_pins>; + pinctrl-names = "default"; - usb_mux_sel_pins: usb_mux_pins { - mux { - pins = "gpio27"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - }; - - pcie0_pins: pcie_pins { - pcie0_rst { - pins = "gpio58"; - function = "pcie0_rst"; - drive-strength = <8>; - bias-pull-down; - }; - pcie0_wake { - pins = "gpio59"; - function = "pcie0_wake"; - drive-strength = <8>; - bias-pull-down; - }; - }; - - mdio_pins: mdio_pinmux { - mux_0 { - pins = "gpio68"; - function = "mdc"; - drive-strength = <8>; - bias-pull-up; - }; - mux_1 { - pins = "gpio69"; - function = "mdio"; - drive-strength = <8>; - bias-pull-up; - }; - mux_2 { - pins = "gpio25"; - function = "gpio"; - bias-pull-up; - }; - mux_3 { - pins = "gpio37"; - function = "gpio"; - bias-pull-up; - }; - }; - - led_pins: led_pins { - led_power { - pins = "gpio42"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - }; - - spi_3_pins: spi_3_pins { - mux { - pins = "gpio50", "gpio52", "gpio53"; - function = "blsp3_spi"; - drive-strength = <8>; - bias-disable; - }; - spi_cs { - pins = "gpio22"; - function = "blsp3_spi2"; - drive-strength = <8>; - bias-disable; - }; - quartz_interrupt { - pins = "gpio47"; - function = "gpio"; - input; - bias-disable; - }; - quartz_reset { - pins = "gpio21"; - function = "gpio"; - output-low; - bias-disable; - }; - }; + btcoex_pins: btcoex_pins { + mux_0 { + pins = "gpio64"; + function = "pta1_1"; + drive-strength = <6>; + bias-pull-down; }; - - serial@78b3000 { - status = "ok"; + mux_1 { + pins = "gpio65"; + function = "pta1_2"; + drive-strength = <6>; + bias-pull-down; }; - - dp1 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <5>; - reg = <0x3a001000 0x200>; - qcom,mactype = <0>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <28>; - phy-mode = "sgmii"; - }; - - spi@78b5000 { - status = "ok"; - pinctrl-0 = <&spi_0_pins>; - pinctrl-names = "default"; - cs-select = <0>; - - m25p80@0 { - compatible = "w25q256jw"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - spi-max-frequency = <50000000>; - }; - }; - - dma@7984000 { - status = "ok"; - }; - - nand@79b0000 { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; - }; - - phy@84000 { - status = "ok"; - }; - - phy@86000 { - status = "ok"; - }; - - pci@20000000 { - perst-gpio = <&tlmm 58 1>; - status = "ok"; - }; - - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; - pinctrl-names = "default"; - status = "ok"; - - button@1 { - label = "reset"; - linux,code = ; - gpios = <&tlmm 50 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - }; - - leds { - compatible = "gpio-leds"; - pinctrl-0 = <&led_pins>; - pinctrl-names = "default"; - - led_power: led_power { - label = "blue:power"; - gpio = <&tlmm 50 GPIO_ACTIVE_LOW>; - default-state = "off"; - }; - }; - - mdio: mdio@90000 { - pinctrl-0 = <&mdio_pins>; - pinctrl-names = "default"; - phy-reset-gpio = <&tlmm 37 0 &tlmm 25 1>; - phy0: ethernet-phy@0 { - reg = <0>; - }; - phy1: ethernet-phy@1 { - reg = <1>; - }; - phy2: ethernet-phy@2 { - reg = <2>; - }; - phy3: ethernet-phy@3 { - reg = <3>; - }; - phy4: ethernet-phy@4 { - reg = <28>; - }; - phy5: ethernet-phy@5 { - reg = <4>; - }; - }; - - ess-switch@3a000000 { - switch_cpu_bmp = <0x1>; /* cpu port bitmap */ - switch_lan_bmp = <0x3e>; /* lan port bitmap */ - switch_wan_bmp = <0x40>; /* wan port bitmap */ - switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ - switch_mac_mode1 = <0xf>; /* mac mode for uniphy instance1*/ - switch_mac_mode2 = <0xf>; /* mac mode for uniphy instance2*/ - bm_tick_mode = <0>; /* bm tick mode */ - tm_tick_mode = <0>; /* tm tick mode */ - qcom,port_phyinfo { - port@0 { - port_id = <1>; - phy_address = <0>; - }; - port@1 { - port_id = <2>; - phy_address = <1>; - }; - port@2 { - port_id = <3>; - phy_address = <2>; - }; - port@3 { - port_id = <4>; - phy_address = <3>; - }; - port@4 { - port_id = <5>; - phy_address = <28>; - port_mac_sel = "QGMAC_PORT"; - }; - port@5 { - port_id = <6>; - phy_address = <4>; - }; - }; - port_scheduler_resource { - port@0 { - port_id = <0>; - ucast_queue = <0 143>; - mcast_queue = <256 271>; - l0sp = <0 35>; - l0cdrr = <0 47>; - l0edrr = <0 47>; - l1cdrr = <0 7>; - l1edrr = <0 7>; - }; - port@1 { - port_id = <1>; - ucast_queue = <144 159>; - mcast_queue = <272 275>; - l0sp = <36 39>; - l0cdrr = <48 63>; - l0edrr = <48 63>; - l1cdrr = <8 11>; - l1edrr = <8 11>; - }; - port@2 { - port_id = <2>; - ucast_queue = <160 175>; - mcast_queue = <276 279>; - l0sp = <40 43>; - l0cdrr = <64 79>; - l0edrr = <64 79>; - l1cdrr = <12 15>; - l1edrr = <12 15>; - }; - port@3 { - port_id = <3>; - ucast_queue = <176 191>; - mcast_queue = <280 283>; - l0sp = <44 47>; - l0cdrr = <80 95>; - l0edrr = <80 95>; - l1cdrr = <16 19>; - l1edrr = <16 19>; - }; - port@4 { - port_id = <4>; - ucast_queue = <192 207>; - mcast_queue = <284 287>; - l0sp = <48 51>; - l0cdrr = <96 111>; - l0edrr = <96 111>; - l1cdrr = <20 23>; - l1edrr = <20 23>; - }; - port@5 { - port_id = <5>; - ucast_queue = <208 223>; - mcast_queue = <288 291>; - l0sp = <52 55>; - l0cdrr = <112 127>; - l0edrr = <112 127>; - l1cdrr = <24 27>; - l1edrr = <24 27>; - }; - port@6 { - port_id = <6>; - ucast_queue = <224 239>; - mcast_queue = <292 295>; - l0sp = <56 59>; - l0cdrr = <128 143>; - l0edrr = <128 143>; - l1cdrr = <28 31>; - l1edrr = <28 31>; - }; - port@7 { - port_id = <7>; - ucast_queue = <240 255>; - mcast_queue = <296 299>; - l0sp = <60 63>; - l0cdrr = <144 159>; - l0edrr = <144 159>; - l1cdrr = <32 35>; - l1edrr = <32 35>; - }; - }; - port_scheduler_config { - port@0 { - port_id = <0>; - l1scheduler { - group@0 { - sp = <0 1>; /*L0 SPs*/ - /*cpri cdrr epri edrr*/ - cfg = <0 0 0 0>; - }; - }; - l0scheduler { - group@0 { - /*unicast queues*/ - ucast_queue = <0 4 8>; - /*multicast queues*/ - mcast_queue = <256 260>; - /*sp cpri cdrr epri edrr*/ - cfg = <0 0 0 0 0>; - }; - group@1 { - ucast_queue = <1 5 9>; - mcast_queue = <257 261>; - cfg = <0 1 1 1 1>; - }; - group@2 { - ucast_queue = <2 6 10>; - mcast_queue = <258 262>; - cfg = <0 2 2 2 2>; - }; - group@3 { - ucast_queue = <3 7 11>; - mcast_queue = <259 263>; - cfg = <0 3 3 3 3>; - }; - }; - }; - port@1 { - port_id = <1>; - l1scheduler { - group@0 { - sp = <36>; - cfg = <0 8 0 8>; - }; - group@1 { - sp = <37>; - cfg = <1 9 1 9>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <144>; - ucast_loop_pri = <16>; - mcast_queue = <272>; - mcast_loop_pri = <4>; - cfg = <36 0 48 0 48>; - }; - }; - }; - port@2 { - port_id = <2>; - l1scheduler { - group@0 { - sp = <40>; - cfg = <0 12 0 12>; - }; - group@1 { - sp = <41>; - cfg = <1 13 1 13>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <160>; - ucast_loop_pri = <16>; - mcast_queue = <276>; - mcast_loop_pri = <4>; - cfg = <40 0 64 0 64>; - }; - }; - }; - port@3 { - port_id = <3>; - l1scheduler { - group@0 { - sp = <44>; - cfg = <0 16 0 16>; - }; - group@1 { - sp = <45>; - cfg = <1 17 1 17>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <176>; - ucast_loop_pri = <16>; - mcast_queue = <280>; - mcast_loop_pri = <4>; - cfg = <44 0 80 0 80>; - }; - }; - }; - port@4 { - port_id = <4>; - l1scheduler { - group@0 { - sp = <48>; - cfg = <0 20 0 20>; - }; - group@1 { - sp = <49>; - cfg = <1 21 1 21>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <192>; - ucast_loop_pri = <16>; - mcast_queue = <284>; - mcast_loop_pri = <4>; - cfg = <48 0 96 0 96>; - }; - }; - }; - port@5 { - port_id = <5>; - l1scheduler { - group@0 { - sp = <52>; - cfg = <0 24 0 24>; - }; - group@1 { - sp = <53>; - cfg = <1 25 1 25>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <208>; - ucast_loop_pri = <16>; - mcast_queue = <288>; - mcast_loop_pri = <4>; - cfg = <52 0 112 0 112>; - }; - }; - }; - port@6 { - port_id = <6>; - l1scheduler { - group@0 { - sp = <56>; - cfg = <0 28 0 28>; - }; - group@1 { - sp = <57>; - cfg = <1 29 1 29>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <224>; - ucast_loop_pri = <16>; - mcast_queue = <292>; - mcast_loop_pri = <4>; - cfg = <56 0 128 0 128>; - }; - }; - }; - port@7 { - port_id = <7>; - l1scheduler { - group@0 { - sp = <60>; - cfg = <0 32 0 32>; - }; - group@1 { - sp = <61>; - cfg = <1 33 1 33>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <240>; - ucast_loop_pri = <16>; - mcast_queue = <296>; - cfg = <60 0 144 0 144>; - }; - }; - }; - }; - }; - - nss-macsec0 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x18>; - phy_access_mode = <0>; - mdiobus = <&mdio>; - }; - nss-macsec1 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x1c>; - phy_access_mode = <0>; - mdiobus = <&mdio>; + mux_2 { + pins = "gpio66"; + function = "pta1_0"; + drive-strength = <6>; + bias-pull-down; }; }; + + mdio_pins: mdio_pinmux { + mux_0 { + pins = "gpio68"; + function = "mdc"; + drive-strength = <8>; + bias-pull-up; + }; + mux_1 { + pins = "gpio69"; + function = "mdio"; + drive-strength = <8>; + bias-pull-up; + }; + mux_2 { + pins = "gpio25"; + function = "gpio"; + bias-pull-up; + }; + mux_3 { + pins = "gpio37"; + function = "gpio"; + bias-pull-up; + }; + }; + + uart_pins: uart_pins { + mux { + pins = "gpio23", "gpio24"; + function = "blsp4_uart1"; + drive-strength = <8>; + bias-disable; + }; + }; + + spi_0_pins: spi_0_pins { + mux { + pins = "gpio38", "gpio39", "gpio40", "gpio41"; + function = "blsp0_spi"; + drive-strength = <8>; + bias-disable; + }; + }; + + spi_3_pins: spi_3_pins { + mux { + pins = "gpio52", "gpio53"; + function = "blsp3_spi"; + drive-strength = <8>; + bias-disable; + }; + spi_cs { + pins = "gpio22"; + function = "blsp3_spi2"; + drive-strength = <8>; + bias-disable; + }; + quartz_interrupt { + pins = "gpio47"; + function = "gpio"; + input; + bias-disable; + }; + quartz_reset { + pins = "gpio21"; + function = "gpio"; + output-low; + bias-disable; + }; + }; + + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-disable; + }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-disable; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-disable; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-disable; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-disable; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-disable; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-disable; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-disable; + }; + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-disable; + }; + }; + + hsuart_pins: hsuart_pins { + mux { + pins = "gpio46", "gpio47", "gpio48", "gpio49"; + function = "blsp2_uart"; + drive-strength = <8>; + bias-disable; + }; + }; + + /* POWER_LED, TP-Link */ + led_pins: led_pins { + led_power { + pins = "gpio42"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + /* BUTTON, TP-Link */ + button_pins: button_pins { + reset_button { + pins = "gpio50"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + usb_mux_sel_pins: usb_mux_pins { + mux { + pins = "gpio27"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + pcie0_pins: pcie_pins { + pcie0_rst { + pins = "gpio58"; + function = "pcie0_rst"; + drive-strength = <8>; + bias-pull-down; + }; + pcie0_wake { + pins = "gpio59"; + function = "pcie0_wake"; + drive-strength = <8>; + bias-pull-down; + }; + }; + }; + +&soc { + mdio: mdio@90000 { + pinctrl-0 = <&mdio_pins>; + pinctrl-names = "default"; + phy-reset-gpio = <&tlmm 37 0 &tlmm 25 1>; + compatible = "qcom,ipq40xx-mdio", "qcom,qca-mdio"; + phy0: ethernet-phy@0 { + reg = <0>; + }; + phy1: ethernet-phy@1 { + reg = <1>; + }; + phy2: ethernet-phy@2 { + reg = <2>; + }; + phy3: ethernet-phy@3 { + reg = <3>; + }; + phy4: ethernet-phy@4 { + reg = <28>; + }; + phy5: ethernet-phy@5 { + reg = <4>; + }; + }; + + ess-switch@3a000000 { + switch_cpu_bmp = <0x1>; /* cpu port bitmap */ + switch_lan_bmp = <0x3e>; /* lan port bitmap */ + switch_wan_bmp = <0x40>; /* wan port bitmap */ + switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ + switch_mac_mode1 = <0xf>; /* mac mode for uniphy instance1*/ + switch_mac_mode2 = <0xf>; /* mac mode for uniphy instance2*/ + bm_tick_mode = <0>; /* bm tick mode */ + tm_tick_mode = <0>; /* tm tick mode */ + qcom,port_phyinfo { + port@0 { + port_id = <1>; + phy_address = <0>; + }; + port@1 { + port_id = <2>; + phy_address = <1>; + }; + port@2 { + port_id = <3>; + phy_address = <2>; + }; + port@3 { + port_id = <4>; + phy_address = <3>; + }; + port@4 { + port_id = <5>; + phy_address = <28>; + port_mac_sel = "QGMAC_PORT"; + }; + port@5 { + port_id = <6>; + phy_address = <4>; + }; + }; + port_scheduler_resource { + port@0 { + port_id = <0>; + ucast_queue = <0 143>; + mcast_queue = <256 271>; + l0sp = <0 35>; + l0cdrr = <0 47>; + l0edrr = <0 47>; + l1cdrr = <0 7>; + l1edrr = <0 7>; + }; + port@1 { + port_id = <1>; + ucast_queue = <144 159>; + mcast_queue = <272 275>; + l0sp = <36 39>; + l0cdrr = <48 63>; + l0edrr = <48 63>; + l1cdrr = <8 11>; + l1edrr = <8 11>; + }; + port@2 { + port_id = <2>; + ucast_queue = <160 175>; + mcast_queue = <276 279>; + l0sp = <40 43>; + l0cdrr = <64 79>; + l0edrr = <64 79>; + l1cdrr = <12 15>; + l1edrr = <12 15>; + }; + port@3 { + port_id = <3>; + ucast_queue = <176 191>; + mcast_queue = <280 283>; + l0sp = <44 47>; + l0cdrr = <80 95>; + l0edrr = <80 95>; + l1cdrr = <16 19>; + l1edrr = <16 19>; + }; + port@4 { + port_id = <4>; + ucast_queue = <192 207>; + mcast_queue = <284 287>; + l0sp = <48 51>; + l0cdrr = <96 111>; + l0edrr = <96 111>; + l1cdrr = <20 23>; + l1edrr = <20 23>; + }; + port@5 { + port_id = <5>; + ucast_queue = <208 223>; + mcast_queue = <288 291>; + l0sp = <52 55>; + l0cdrr = <112 127>; + l0edrr = <112 127>; + l1cdrr = <24 27>; + l1edrr = <24 27>; + }; + port@6 { + port_id = <6>; + ucast_queue = <224 239>; + mcast_queue = <292 295>; + l0sp = <56 59>; + l0cdrr = <128 143>; + l0edrr = <128 143>; + l1cdrr = <28 31>; + l1edrr = <28 31>; + }; + port@7 { + port_id = <7>; + ucast_queue = <240 255>; + mcast_queue = <296 299>; + l0sp = <60 63>; + l0cdrr = <144 159>; + l0edrr = <144 159>; + l1cdrr = <32 35>; + l1edrr = <32 35>; + }; + }; + port_scheduler_config { + port@0 { + port_id = <0>; + l1scheduler { + group@0 { + sp = <0 1>; /*L0 SPs*/ + /*cpri cdrr epri edrr*/ + cfg = <0 0 0 0>; + }; + }; + l0scheduler { + group@0 { + /*unicast queues*/ + ucast_queue = <0 4 8>; + /*multicast queues*/ + mcast_queue = <256 260>; + /*sp cpri cdrr epri edrr*/ + cfg = <0 0 0 0 0>; + }; + group@1 { + ucast_queue = <1 5 9>; + mcast_queue = <257 261>; + cfg = <0 1 1 1 1>; + }; + group@2 { + ucast_queue = <2 6 10>; + mcast_queue = <258 262>; + cfg = <0 2 2 2 2>; + }; + group@3 { + ucast_queue = <3 7 11>; + mcast_queue = <259 263>; + cfg = <0 3 3 3 3>; + }; + }; + }; + port@1 { + port_id = <1>; + l1scheduler { + group@0 { + sp = <36>; + cfg = <0 8 0 8>; + }; + group@1 { + sp = <37>; + cfg = <1 9 1 9>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <144>; + ucast_loop_pri = <16>; + mcast_queue = <272>; + mcast_loop_pri = <4>; + cfg = <36 0 48 0 48>; + }; + }; + }; + port@2 { + port_id = <2>; + l1scheduler { + group@0 { + sp = <40>; + cfg = <0 12 0 12>; + }; + group@1 { + sp = <41>; + cfg = <1 13 1 13>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <160>; + ucast_loop_pri = <16>; + mcast_queue = <276>; + mcast_loop_pri = <4>; + cfg = <40 0 64 0 64>; + }; + }; + }; + port@3 { + port_id = <3>; + l1scheduler { + group@0 { + sp = <44>; + cfg = <0 16 0 16>; + }; + group@1 { + sp = <45>; + cfg = <1 17 1 17>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <176>; + ucast_loop_pri = <16>; + mcast_queue = <280>; + mcast_loop_pri = <4>; + cfg = <44 0 80 0 80>; + }; + }; + }; + port@4 { + port_id = <4>; + l1scheduler { + group@0 { + sp = <48>; + cfg = <0 20 0 20>; + }; + group@1 { + sp = <49>; + cfg = <1 21 1 21>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <192>; + ucast_loop_pri = <16>; + mcast_queue = <284>; + mcast_loop_pri = <4>; + cfg = <48 0 96 0 96>; + }; + }; + }; + port@5 { + port_id = <5>; + l1scheduler { + group@0 { + sp = <52>; + cfg = <0 24 0 24>; + }; + group@1 { + sp = <53>; + cfg = <1 25 1 25>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <208>; + ucast_loop_pri = <16>; + mcast_queue = <288>; + mcast_loop_pri = <4>; + cfg = <52 0 112 0 112>; + }; + }; + }; + port@6 { + port_id = <6>; + l1scheduler { + group@0 { + sp = <56>; + cfg = <0 28 0 28>; + }; + group@1 { + sp = <57>; + cfg = <1 29 1 29>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <224>; + ucast_loop_pri = <16>; + mcast_queue = <292>; + mcast_loop_pri = <4>; + cfg = <56 0 128 0 128>; + }; + }; + }; + port@7 { + port_id = <7>; + l1scheduler { + group@0 { + sp = <60>; + cfg = <0 32 0 32>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <240>; + mcast_queue = <296>; + cfg = <60 0 144 0 144>; + }; + }; + }; + }; + }; + + dp1 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <5>; + reg = <0x3a001000 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <28>; + phy-mode = "sgmii"; + }; + + /* POWER LED, TP-Link */ + leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + led_power: led_power { + label = "power:blue"; + gpio = <&tlmm 42 GPIO_ACTIVE_HIGH>; + default-state = "on"; + }; + }; + + /* BUTTON, TP-Link */ + gpio_keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + button@1 { + label = "reset"; + linux,code = ; + gpios = <&tlmm 50 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + }; + }; + + nss-macsec0 { + compatible = "qcom,nss-macsec"; + phy_addr = <0x18>; + phy_access_mode = <0>; + mdiobus = <&mdio>; + }; + nss-macsec1 { + compatible = "qcom,nss-macsec"; + phy_addr = <0x1c>; + phy_access_mode = <0>; + mdiobus = <&mdio>; + }; +}; + +&serial_blsp4 { + pinctrl-0 = <&uart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&spi_0 { /* BLSP1 QUP1 */ + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + cs-select = <0>; + status = "ok"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + compatible = "n25q128a11"; + linux,modalias = "m25p80", "n25q128a11"; + spi-max-frequency = <50000000>; + use-default-sizes; + }; +}; + +&spi_4 { /* BLSP1 QUP3 */ + pinctrl-0 = <&spi_3_pins>; + pinctrl-names = "default"; + cs-select = <2>; + quartz-reset-gpio = <&tlmm 21 1>; + status = "disabled"; + spidev3: spi@3 { + compatible = "qca,spidev"; + reg = <0>; + spi-max-frequency = <24000000>; + }; +}; + +&serial_blsp2 { + status = "disabled"; +}; + +&msm_imem { + status = "disabled"; +}; + +&ssphy_0 { + status = "ok"; +}; + +&qusb_phy_0 { + status = "ok"; +}; + +&ssphy_1 { + status = "ok"; +}; + +&qusb_phy_1 { + status = "ok"; +}; + +&usb3_0 { + status = "ok"; +}; + +&usb3_1 { + status = "ok"; +}; + +&cryptobam { + status = "ok"; +}; + +&crypto { + status = "ok"; +}; + +&i2c_0 { + status = "disabled"; +}; + +&i2c_1 { + status = "disabled"; +}; + +&qpic_bam { + status = "ok"; +}; + +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&qpic_lcd { + status = "disabled"; +}; + +&qpic_lcd_panel { + status = "disabled"; +}; + +&ledc { + status = "disabled"; +}; + +&pcie0 { + status = "ok"; +}; + +&pcie1 { + status = "disabled"; +}; + diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf194c.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf194c.dts index f346e5b9f..0db1a5c91 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf194c.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf194c.dts @@ -1,461 +1,751 @@ -// SPDX-License-Identifier: GPL-2.0-only /dts-v1/; -/* Copyright (c) 2017, 2020, The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq8074-hk01.dtsi" -#include "ipq8074-audio.dtsi" +#include "qcom-ipq807x-soc.dtsi" +#include "qcom-ipq807x-audio.dtsi" +#include "qcom-ipq807x-hk-cpu.dtsi" / { - model = "CIG WF194C"; - compatible = "cig,wf194c", "qcom,ipq807x"; + #address-cells = <0x2>; + #size-cells = <0x2>; + model = "CIG WF194C"; + compatible = "cig,wf194c", "qcom,ipq807x"; + qcom,msm-id = <0x143 0x0>, <0x158 0x0>, <0x186 0x0>, <0x188 0x0>; + interrupt-parent = <&intc>; + qcom,board-id = <0x8 0x0>; + qcom,pmic-id = <0x0 0x0 0x0 0x0>; aliases { - /delete-property/ ethernet2; - /delete-property/ ethernet3; - /delete-property/ ethernet4; - /delete-property/ ethernet5; + sdhc1 = &sdhc_1; /* SDC1 eMMC slot */ + sdhc2 = &sdhc_2; /* SDC2 SD slot */ + /* + * Aliases as required by u-boot + * to patch MAC addresses + */ + ethernet0 = "/soc/dp1"; + ethernet1 = "/soc/dp2"; }; - soc { - /delete-node/ ledc@191E000; - /delete-node/ qti,scm_restart_reason; + chosen { + bootargs = "console=ttyMSM0,115200,n8 root=/dev/ram0 rw init=/init"; + #ifdef __IPQ_MEM_PROFILE_256_MB__ + bootargs-append = " swiotlb=1"; + #else + bootargs-append = " swiotlb=1 coherent_pool=2M"; + #endif + }; +}; - pinctrl@1000000 { - button_pins: button_pins { - wps_button { - pins = "gpio57"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; +&tlmm { + pinctrl-0 = <&btcoex_pins>; + pinctrl-names = "default"; - leds_pins: leds_pinmux { - led1_r { - pins = "gpio54"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; + btcoex_pins: btcoex_pins { + mux_0 { + pins = "gpio34"; + function = "gpio"; + drive-strength = <6>; + bias-pull-up; + output-high; + }; + mux_1 { + pins = "gpio62"; + function = "gpio"; + drive-strength = <6>; + bias-pull-up; + output-high; + }; + }; - led1_g { - pins = "gpio55"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; + mdio_pins: mdio_pinmux { + mux_0 { + pins = "gpio68"; + function = "mdc"; + drive-strength = <8>; + bias-pull-up; + }; + mux_1 { + pins = "gpio69"; + function = "mdio"; + drive-strength = <8>; + bias-pull-up; + }; + }; - led2_r { - pins = "gpio56"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; + uart_pins: uart_pins { + mux { + pins = "gpio23", "gpio24"; + function = "blsp4_uart1"; + drive-strength = <8>; + bias-disable; + }; + }; - led2_g { - pins = "gpio64"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - }; + i2c_0_pins: i2c_0_pinmux { + mux { + pins = "gpio42", "gpio43"; + function = "blsp1_i2c"; + drive-strength = <8>; + bias-disable; + }; + }; + + spi_0_pins: spi_0_pins { + mux { + pins = "gpio38", "gpio39", "gpio40", "gpio41"; + function = "blsp0_spi"; + drive-strength = <8>; + bias-disable; + }; + }; + + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-disable; + }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-disable; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-disable; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-disable; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-disable; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-disable; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-disable; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-disable; + }; + data_8 { + pins = "gpio16"; + function = "qpic_pad8"; + drive-strength = <8>; + bias-disable; + }; + qpic_pad { + pins = "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", + "gpio9", "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-disable; + }; + }; + + sd_pins: sd_pins { + mux { + pins = "gpio63"; + function = "sd_card"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + hsuart_pins: hsuart_pins { + mux { + pins = "gpio48", "gpio49"; + function = "blsp2_uart"; + drive-strength = <8>; + bias-disable; + }; + }; + + leds_pins: leds_pinmux { + led1_r { + pins = "gpio54"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led1_g { + pins = "gpio55"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led2_r { + pins = "gpio56"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led2_g { + pins = "gpio64"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; }; - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; - pinctrl-names = "default"; + }; - button@1 { - label = "reset"; - linux,code = ; - gpios = <&tlmm 67 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; + button_pins: button_pins { + wps_button { + pins = "gpio67"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; }; + }; - leds { - compatible = "gpio-leds"; - pinctrl-0 = <&leds_pins>; - pinctrl-names = "default"; - status = "ok"; - - led@54 { - label = "wf194c:red:lan"; - gpios = <&tlmm 54 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - - led@55 { - label = "wf194c:green:lan"; - gpios = <&tlmm 55 GPIO_ACTIVE_HIGH>; - default-state = "on"; - }; - - led@56 { - label = "wf194c:red:wan"; - gpios = <&tlmm 56 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - - led@64 { - label = "wf194c:green:wan"; - gpios = <&tlmm 64 GPIO_ACTIVE_HIGH>; - default-state = "on"; - }; + uniphy_pins: uniphy_pinmux { + mux_2 { + pins = "gpio37"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; }; - - dp1 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <4>; - reg = <0x3a001600 0x200>; - qcom,mactype = <0>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <3>; - phy-mode = "sgmii"; + mux_3 { + pins = "gpio44"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; }; + }; - dp2 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <6>; - reg = <0x3a007000 0x3fff>; - qcom,mactype = <1>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <0>; - phy-mode = "sgmii"; - }; - - mdio@90000 { - pinctrl-0 = <&mdio_pins>; - pinctrl-names = "default"; - phy-reset-gpio = <&tlmm 37 0 &tlmm 44 0>; - phy0: ethernet-phy@0 { - reg = <0xf>; - }; - phy1: ethernet-phy@1 { - reg = <0xf>; - }; - phy2: ethernet-phy@2 { - reg = <0xf>; - }; - phy3: ethernet-phy@3 { - reg = <3>; - }; - phy4: ethernet-phy@4 { - reg = <4>; - }; - phy5: ethernet-phy@5 { - compatible ="ethernet-phy-ieee802.3-c45"; - reg = <0>; - }; - }; - - ess-switch@3a000000 { - switch_cpu_bmp = <0x1>; /* cpu port bitmap */ - switch_lan_bmp = <0x30>; /* lan port bitmap */ - switch_wan_bmp = <0x40>; /* wan port bitmap */ - switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ - switch_mac_mode1 = <0xff>; /* mac mode for uniphy instance1*/ - switch_mac_mode2 = <0xd>; /* mac mode for uniphy instance2*/ - bm_tick_mode = <0>; /* bm tick mode */ - tm_tick_mode = <0>; /* tm tick mode */ - port_scheduler_resource { - port@0 { - port_id = <0>; - ucast_queue = <0 143>; - mcast_queue = <256 271>; - l0sp = <0 35>; - l0cdrr = <0 47>; - l0edrr = <0 47>; - l1cdrr = <0 7>; - l1edrr = <0 7>; - }; - port@1 { - port_id = <1>; - ucast_queue = <144 159>; - mcast_queue = <272 275>; - l0sp = <36 39>; - l0cdrr = <48 63>; - l0edrr = <48 63>; - l1cdrr = <8 11>; - l1edrr = <8 11>; - }; - port@2 { - port_id = <2>; - ucast_queue = <160 175>; - mcast_queue = <276 279>; - l0sp = <40 43>; - l0cdrr = <64 79>; - l0edrr = <64 79>; - l1cdrr = <12 15>; - l1edrr = <12 15>; - }; - port@3 { - port_id = <3>; - ucast_queue = <176 191>; - mcast_queue = <280 283>; - l0sp = <44 47>; - l0cdrr = <80 95>; - l0edrr = <80 95>; - l1cdrr = <16 19>; - l1edrr = <16 19>; - }; - port@4 { - port_id = <4>; - ucast_queue = <192 207>; - mcast_queue = <284 287>; - l0sp = <48 51>; - l0cdrr = <96 111>; - l0edrr = <96 111>; - l1cdrr = <20 23>; - l1edrr = <20 23>; - }; - port@5 { - port_id = <5>; - ucast_queue = <208 223>; - mcast_queue = <288 291>; - l0sp = <52 55>; - l0cdrr = <112 127>; - l0edrr = <112 127>; - l1cdrr = <24 27>; - l1edrr = <24 27>; - }; - port@6 { - port_id = <6>; - ucast_queue = <224 239>; - mcast_queue = <292 295>; - l0sp = <56 59>; - l0cdrr = <128 143>; - l0edrr = <128 143>; - l1cdrr = <28 31>; - l1edrr = <28 31>; - }; - port@7 { - port_id = <7>; - ucast_queue = <240 255>; - mcast_queue = <296 299>; - l0sp = <60 63>; - l0cdrr = <144 159>; - l0edrr = <144 159>; - l1cdrr = <32 35>; - l1edrr = <32 35>; - }; - }; - port_scheduler_config { - port@0 { - port_id = <0>; - l1scheduler { - group@0 { - sp = <0 1>; /*L0 SPs*/ - /*cpri cdrr epri edrr*/ - cfg = <0 0 0 0>; - }; - }; - l0scheduler { - group@0 { - /*unicast queues*/ - ucast_queue = <0 4 8>; - /*multicast queues*/ - mcast_queue = <256 260>; - /*sp cpri cdrr epri edrr*/ - cfg = <0 0 0 0 0>; - }; - group@1 { - ucast_queue = <1 5 9>; - mcast_queue = <257 261>; - cfg = <0 1 1 1 1>; - }; - group@2 { - ucast_queue = <2 6 10>; - mcast_queue = <258 262>; - cfg = <0 2 2 2 2>; - }; - group@3 { - ucast_queue = <3 7 11>; - mcast_queue = <259 263>; - cfg = <0 3 3 3 3>; - }; - }; - }; - port@1 { - port_id = <1>; - l1scheduler { - group@0 { - sp = <36>; - cfg = <0 8 0 8>; - }; - group@1 { - sp = <37>; - cfg = <1 9 1 9>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <144>; - ucast_loop_pri = <16>; - mcast_queue = <272>; - mcast_loop_pri = <4>; - cfg = <36 0 48 0 48>; - }; - }; - }; - port@2 { - port_id = <2>; - l1scheduler { - group@0 { - sp = <40>; - cfg = <0 12 0 12>; - }; - group@1 { - sp = <41>; - cfg = <1 13 1 13>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <160>; - ucast_loop_pri = <16>; - mcast_queue = <276>; - mcast_loop_pri = <4>; - cfg = <40 0 64 0 64>; - }; - }; - }; - port@3 { - port_id = <3>; - l1scheduler { - group@0 { - sp = <44>; - cfg = <0 16 0 16>; - }; - group@1 { - sp = <45>; - cfg = <1 17 1 17>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <176>; - ucast_loop_pri = <16>; - mcast_queue = <280>; - mcast_loop_pri = <4>; - cfg = <44 0 80 0 80>; - }; - }; - }; - port@4 { - port_id = <4>; - l1scheduler { - group@0 { - sp = <48>; - cfg = <0 20 0 20>; - }; - group@1 { - sp = <49>; - cfg = <1 21 1 21>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <192>; - ucast_loop_pri = <16>; - mcast_queue = <284>; - mcast_loop_pri = <4>; - cfg = <48 0 96 0 96>; - }; - }; - }; - port@5 { - port_id = <5>; - l1scheduler { - group@0 { - sp = <52>; - cfg = <0 24 0 24>; - }; - group@1 { - sp = <53>; - cfg = <1 25 1 25>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <208>; - ucast_loop_pri = <16>; - mcast_queue = <288>; - mcast_loop_pri = <4>; - cfg = <52 0 112 0 112>; - }; - }; - }; - port@6 { - port_id = <6>; - l1scheduler { - group@0 { - sp = <56>; - cfg = <0 28 0 28>; - }; - group@1 { - sp = <57>; - cfg = <1 29 1 29>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <224>; - ucast_loop_pri = <16>; - mcast_queue = <292>; - mcast_loop_pri = <4>; - cfg = <56 0 128 0 128>; - }; - }; - }; - port@7 { - port_id = <7>; - l1scheduler { - group@0 { - sp = <60>; - cfg = <0 32 0 32>; - }; - group@1 { - sp = <61>; - cfg = <1 33 1 33>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <240>; - ucast_loop_pri = <16>; - mcast_queue = <296>; - cfg = <60 0 144 0 144>; - }; - }; - }; - }; - }; - - pwm { - status = "disabled"; - }; - - qti_mdss_qpic@7980000 { - status = "disabled"; - }; - - qti_mdss_qpic_panel { - status = "disabled"; - }; - - i2c@78b6000 { - status = "disabled"; - }; - - sdhci@7824900 { - status = "disabled"; + pwm_pins: pwm_pinmux { + mux_1 { + pins = "gpio25"; + function = "pwm02"; + drive-strength = <8>; }; }; }; + +&soc { + pwm { + pinctrl-0 = <&pwm_pins>; + pinctrl-names = "default"; + used-pwm-indices = <1>, <0>, <0>, <0>; + status = "disabled"; + }; + + gpio_keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + button@1 { + label = "reset"; + linux,code = ; + gpios = <&tlmm 67 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + debounce-interval = <60>; + }; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-0 = <&leds_pins>; + pinctrl-names = "default"; + status = "ok"; + + led@54 { + label = "wf194c:red:lan"; + gpios = <&tlmm 54 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + led@55 { + label = "wf194c:green:lan"; + gpios = <&tlmm 55 GPIO_ACTIVE_HIGH>; + default-state = "on"; + }; + led@56 { + label = "wf194c:red:wan"; + gpios = <&tlmm 56 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + led@64 { + label = "wf194c:green:wan"; + gpios = <&tlmm 64 GPIO_ACTIVE_HIGH>; + default-state = "on"; + }; + }; + mdio@90000 { + pinctrl-0 = <&mdio_pins>; + pinctrl-names = "default"; + phy-reset-gpio = <&tlmm 37 0 &tlmm 44 0>; + phy0: ethernet-phy@0 { + reg = <0xf>; /*<0>*/ + }; + phy1: ethernet-phy@1 { + reg = <0xf>; + }; + phy2: ethernet-phy@2 { + reg = <0xf>; + }; + phy3: ethernet-phy@3 { + reg = <3>; + }; + phy4: ethernet-phy@4 { + reg = <4>; + }; + phy5: ethernet-phy@5 { + compatible ="ethernet-phy-ieee802.3-c45"; + reg = <0>; + }; + }; + + ess-switch@3a000000 { + pinctrl-0 = <&uniphy_pins>; + pinctrl-names = "default"; + switch_cpu_bmp = <0x1>; /* cpu port bitmap */ + switch_lan_bmp = <0x30>; /*..0x3e lan port bitmap */ + switch_wan_bmp = <0x40>; /* wan port bitmap */ + switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ + switch_mac_mode1 = <0xff>; /* mac mode for uniphy instance1*/ + switch_mac_mode2 = <0xd>; /* mac mode for uniphy instance2*/ + bm_tick_mode = <0>; /* bm tick mode */ + tm_tick_mode = <0>; /* tm tick mode */ + port_scheduler_resource { + port@0 { + port_id = <0>; + ucast_queue = <0 143>; + mcast_queue = <256 271>; + l0sp = <0 35>; + l0cdrr = <0 47>; + l0edrr = <0 47>; + l1cdrr = <0 7>; + l1edrr = <0 7>; + }; + port@1 { + port_id = <1>; + ucast_queue = <144 159>; + mcast_queue = <272 275>; + l0sp = <36 39>; + l0cdrr = <48 63>; + l0edrr = <48 63>; + l1cdrr = <8 11>; + l1edrr = <8 11>; + }; + port@2 { + port_id = <2>; + ucast_queue = <160 175>; + mcast_queue = <276 279>; + l0sp = <40 43>; + l0cdrr = <64 79>; + l0edrr = <64 79>; + l1cdrr = <12 15>; + l1edrr = <12 15>; + }; + port@3 { + port_id = <3>; + ucast_queue = <176 191>; + mcast_queue = <280 283>; + l0sp = <44 47>; + l0cdrr = <80 95>; + l0edrr = <80 95>; + l1cdrr = <16 19>; + l1edrr = <16 19>; + }; + port@4 { + port_id = <4>; + ucast_queue = <192 207>; + mcast_queue = <284 287>; + l0sp = <48 51>; + l0cdrr = <96 111>; + l0edrr = <96 111>; + l1cdrr = <20 23>; + l1edrr = <20 23>; + }; + port@5 { + port_id = <5>; + ucast_queue = <208 223>; + mcast_queue = <288 291>; + l0sp = <52 55>; + l0cdrr = <112 127>; + l0edrr = <112 127>; + l1cdrr = <24 27>; + l1edrr = <24 27>; + }; + port@6 { + port_id = <6>; + ucast_queue = <224 239>; + mcast_queue = <292 295>; + l0sp = <56 59>; + l0cdrr = <128 143>; + l0edrr = <128 143>; + l1cdrr = <28 31>; + l1edrr = <28 31>; + }; + port@7 { + port_id = <7>; + ucast_queue = <240 255>; + mcast_queue = <296 299>; + l0sp = <60 63>; + l0cdrr = <144 159>; + l0edrr = <144 159>; + l1cdrr = <32 35>; + l1edrr = <32 35>; + }; + }; + port_scheduler_config { + port@0 { + port_id = <0>; + l1scheduler { + group@0 { + sp = <0 1>; /*L0 SPs*/ + /*cpri cdrr epri edrr*/ + cfg = <0 0 0 0>; + }; + }; + l0scheduler { + group@0 { + /*unicast queues*/ + ucast_queue = <0 4 8>; + /*multicast queues*/ + mcast_queue = <256 260>; + /*sp cpri cdrr epri edrr*/ + cfg = <0 0 0 0 0>; + }; + group@1 { + ucast_queue = <1 5 9>; + mcast_queue = <257 261>; + cfg = <0 1 1 1 1>; + }; + group@2 { + ucast_queue = <2 6 10>; + mcast_queue = <258 262>; + cfg = <0 2 2 2 2>; + }; + group@3 { + ucast_queue = <3 7 11>; + mcast_queue = <259 263>; + cfg = <0 3 3 3 3>; + }; + }; + }; + port@1 { + port_id = <1>; + l1scheduler { + group@0 { + sp = <36>; + cfg = <0 8 0 8>; + }; + group@1 { + sp = <37>; + cfg = <1 9 1 9>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <144>; + ucast_loop_pri = <16>; + mcast_queue = <272>; + mcast_loop_pri = <4>; + cfg = <36 0 48 0 48>; + }; + }; + }; + port@2 { + port_id = <2>; + l1scheduler { + group@0 { + sp = <40>; + cfg = <0 12 0 12>; + }; + group@1 { + sp = <41>; + cfg = <1 13 1 13>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <160>; + ucast_loop_pri = <16>; + mcast_queue = <276>; + mcast_loop_pri = <4>; + cfg = <40 0 64 0 64>; + }; + }; + }; + port@3 { + port_id = <3>; + l1scheduler { + group@0 { + sp = <44>; + cfg = <0 16 0 16>; + }; + group@1 { + sp = <45>; + cfg = <1 17 1 17>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <176>; + ucast_loop_pri = <16>; + mcast_queue = <280>; + mcast_loop_pri = <4>; + cfg = <44 0 80 0 80>; + }; + }; + }; + port@4 { + port_id = <4>; + l1scheduler { + group@0 { + sp = <48>; + cfg = <0 20 0 20>; + }; + group@1 { + sp = <49>; + cfg = <1 21 1 21>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <192>; + ucast_loop_pri = <16>; + mcast_queue = <284>; + mcast_loop_pri = <4>; + cfg = <48 0 96 0 96>; + }; + }; + }; + port@5 { + port_id = <5>; + l1scheduler { + group@0 { + sp = <52>; + cfg = <0 24 0 24>; + }; + group@1 { + sp = <53>; + cfg = <1 25 1 25>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <208>; + ucast_loop_pri = <16>; + mcast_queue = <288>; + mcast_loop_pri = <4>; + cfg = <52 0 112 0 112>; + }; + }; + }; + port@6 { + port_id = <6>; + l1scheduler { + group@0 { + sp = <56>; + cfg = <0 28 0 28>; + }; + group@1 { + sp = <57>; + cfg = <1 29 1 29>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <224>; + ucast_loop_pri = <16>; + mcast_queue = <292>; + mcast_loop_pri = <4>; + cfg = <56 0 128 0 128>; + }; + }; + }; + port@7 { + port_id = <7>; + l1scheduler { + group@0 { + sp = <60>; + cfg = <0 32 0 32>; + }; + group@1 { + sp = <61>; + cfg = <1 33 1 33>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <240>; + ucast_loop_pri = <16>; + mcast_queue = <296>; + cfg = <60 0 144 0 144>; + }; + }; + }; + }; + }; + dp1 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <4>; + reg = <0x3a001600 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <3>; + phy-mode = "sgmii"; + }; + dp2 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <6>; + reg = <0x3a007000 0x3fff>; + qcom,mactype = <1>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <0>; + phy-mode = "sgmii"; + }; +}; + +&serial_blsp4 { + pinctrl-0 = <&uart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&spi_0 { /* BLSP1 QUP1 */ + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + cs-select = <0>; + status = "ok"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + compatible = "n25q128a11"; + linux,modalias = "m25p80", "n25q128a11"; + spi-max-frequency = <50000000>; + use-default-sizes; + }; +}; + +&serial_blsp2 { + pinctrl-0 = <&hsuart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&msm_imem { + status = "enabled"; +}; + +&ssphy_0 { + status = "ok"; +}; + +&qusb_phy_0 { + status = "ok"; +}; + +&ssphy_1 { + status = "ok"; +}; + +&qusb_phy_1 { + status = "ok"; +}; + +&usb3_0 { + status = "ok"; +}; + +&usb3_1 { + status = "ok"; +}; + +&cryptobam { + status = "ok"; +}; + +&crypto { + status = "ok"; +}; + +&i2c_0 { + pinctrl-0 = <&i2c_0_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&i2c_1 { + status = "disabled"; +}; + +&sdhc_1 { + qcom,clk-rates = <400000 25000000 50000000 100000000 \ + 192000000 384000000>; + qcom,bus-speed-mode = "HS400_1p8v", "HS200_1p8v", "DDR_1p8v"; + qcom,nonremovable; + status = "ok"; +}; + +&qpic_bam { + status = "ok"; +}; + +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&sdhc_2 { + qcom,clk-rates = <400000 25000000 50000000 100000000 \ + 192000000>; + qcom,bus-speed-mode = "HS200_1p8v", "DDR_1p8v"; + pinctrl-0 = <&sd_pins>; + pinctrl-names = "default"; + cd-gpios = <&tlmm 63 1>; + sd-ldo-gpios = <&tlmm 21 0>; + vqmmc-supply = <&ldo11>; + status = "ok"; +}; + +&qpic_lcd { + status = "ok"; +}; + +&qpic_lcd_panel { + status = "ok"; +}; diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf194c4.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf194c4.dts index 6ae0f1f28..4c633fea5 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf194c4.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf194c4.dts @@ -1,619 +1,942 @@ -// SPDX-License-Identifier: GPL-2.0-only /dts-v1/; -/* Copyright (c) 2020 The Linux Foundation. All rights reserved. +/* + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq8074.dtsi" -#include "ipq8074-hk-cpu.dtsi" +#include "qcom-ipq807x-soc.dtsi" +#include "qcom-ipq807x-hk-cpu.dtsi" / { #address-cells = <0x2>; #size-cells = <0x2>; model = "CIG WF194c4"; compatible = "cig,wf194c4", "qcom,ipq807x"; - qcom,msm-id = <0x157 0x0>, <0x187 0x0>; + qcom,msm-id = <0x156 0x0>; interrupt-parent = <&intc>; + qcom,board-id = <0x8 0x0>; + qcom,pmic-id = <0x0 0x0 0x0 0x0>; aliases { - serial0 = &blsp1_uart5; - /* Aliases as required by u-boot to patch MAC addresses */ + /* + * Aliases as required by u-boot + * to patch MAC addresses + */ ethernet0 = "/soc/dp1"; ethernet1 = "/soc/dp2"; + /* ethernet2 = "/soc/dp3"; + ethernet3 = "/soc/dp4"; + ethernet4 = "/soc/dp5"; + ethernet5 = "/soc/dp6"; + */ }; chosen { - stdout-path = "serial0"; - }; - - soc { - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; - pinctrl-names = "default"; - - button@1 { - label = "reset"; - linux,code = ; - gpios = <&tlmm 67 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - }; - - leds { - compatible = "gpio-leds"; - pinctrl-0 = <&leds_pins>; - pinctrl-names = "default"; - status = "ok"; - - led@54 { - label = "red:lan"; - gpios = <&tlmm 54 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "led1_r"; - default-state = "off"; - }; - - led@55 { - label = "green:lan"; - gpios = <&tlmm 55 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "led1_g"; - default-state = "off"; - }; - - led@56 { - label = "red:wan"; - gpios = <&tlmm 56 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "led2_r"; - default-state = "off"; - }; - - led@64 { - label = "green:wan"; - gpios = <&tlmm 64 GPIO_ACTIVE_HIGH>; - linux,default-trigger = "led2_g"; - default-state = "off"; - }; - }; - - pinctrl@1000000 { - button_pins: button_pins { - wps_button { - pins = "gpio34"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - - usb_mux_sel_pins: usb_mux_pins { - mux { - pins = "gpio27"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - }; - - pcie0_pins: pcie_pins { - pcie0_rst { - pins = "gpio58"; - function = "pcie0_rst"; - drive-strength = <8>; - bias-pull-down; - }; - pcie0_wake { - pins = "gpio59"; - function = "pcie0_wake"; - drive-strength = <8>; - bias-pull-down; - }; - }; - - mdio_pins: mdio_pinmux { - mux_0 { - pins = "gpio68"; - function = "mdc"; - drive-strength = <8>; - bias-pull-up; - }; - mux_1 { - pins = "gpio69"; - function = "mdio"; - drive-strength = <8>; - bias-pull-up; - }; - mux_2 { - pins = "gpio25"; - function = "gpio"; - bias-pull-up; - }; - mux_3 { - pins = "gpio44"; - function = "gpio"; - bias-pull-up; - }; - }; - - button_pins: button_pins { - wps_button { - pins = "gpio67"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; - }; - }; - - leds_pins: leds_pinmux { - led1_r { - pins = "gpio54"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - led1_g { - pins = "gpio55"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - led2_r { - pins = "gpio56"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - led2_g { - pins = "gpio64"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; - }; - }; - - spi_3_pins: spi_3_pins { - mux { - pins = "gpio50", "gpio52", "gpio53"; - function = "blsp3_spi"; - drive-strength = <8>; - bias-disable; - }; - spi_cs { - pins = "gpio22"; - function = "blsp3_spi2"; - drive-strength = <8>; - bias-disable; - }; - quartz_interrupt { - pins = "gpio47"; - function = "gpio"; - input; - bias-disable; - }; - quartz_reset { - pins = "gpio21"; - function = "gpio"; - output-low; - bias-disable; - }; - }; - }; - - serial@78b3000 { - status = "ok"; - }; - - dp1 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <4>; - reg = <0x3a001600 0x200>; - qcom,mactype = <0>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <0x13>; - phy-mode = "sgmii"; - }; - - dp2 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <6>; - reg = <0x3a007000 0x3fff>; - qcom,mactype = <1>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <0>; - phy-mode = "sgmii"; - }; - - spi@78b5000 { - status = "ok"; - pinctrl-0 = <&spi_0_pins>; - pinctrl-names = "default"; - cs-select = <0>; - - m25p80@0 { - compatible = "n25q128a11"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - spi-max-frequency = <50000000>; - }; - }; - - - dma@7984000 { - status = "ok"; - }; - - nand@79b0000 { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; - }; - - qusb@79000 { - status = "ok"; - }; - - ssphy@78000 { - status = "ok"; - }; - - usb3@8A00000 { - status = "ok"; - }; - - qusb@59000 { - status = "ok"; - }; - - ssphy@58000 { - status = "ok"; - }; - - usb3@8C00000 { - status = "ok"; - }; - - phy@84000 { - status = "ok"; - }; - - phy@86000 { - status = "ok"; - }; - - pci@20000000 { - perst-gpio = <&tlmm 58 1>; - status = "ok"; - }; - - mdio: mdio@90000 { - pinctrl-0 = <&mdio_pins>; - pinctrl-names = "default"; - phy-reset-gpio = <&tlmm 37 0 &tlmm 25 0 &tlmm 44 0>; - phy0: ethernet-phy@0 { - reg = <0x10>; - }; - phy1: ethernet-phy@1 { - reg = <0x11>; - }; - phy2: ethernet-phy@2 { - reg = <0x12>; - }; - phy3: ethernet-phy@3 { - reg = <0x13>; - }; - phy4: ethernet-phy@4 { - reg = <0x14>; - }; - phy5: ethernet-phy@5 { - reg = <0>; - }; - }; - - ess-switch@3a000000 { - switch_cpu_bmp = <0x1>; /* cpu port bitmap */ - switch_lan_bmp = <0x32>; /* lan port bitmap */ - switch_wan_bmp = <0x40>; /* wan port bitmap */ - switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ - switch_mac_mode1 = <0xff>; /* mac mode for uniphy instance1*/ - switch_mac_mode2 = <0xd>; /* mac mode for uniphy instance2*/ - bm_tick_mode = <0>; /* bm tick mode */ - tm_tick_mode = <0>; /* tm tick mode */ - port_scheduler_resource { - port@0 { - port_id = <0>; - ucast_queue = <0 143>; - mcast_queue = <256 271>; - l0sp = <0 35>; - l0cdrr = <0 47>; - l0edrr = <0 47>; - l1cdrr = <0 7>; - l1edrr = <0 7>; - }; - port@1 { - port_id = <1>; - ucast_queue = <144 159>; - mcast_queue = <272 275>; - l0sp = <36 39>; - l0cdrr = <48 63>; - l0edrr = <48 63>; - l1cdrr = <8 11>; - l1edrr = <8 11>; - }; - port@2 { - port_id = <2>; - ucast_queue = <160 175>; - mcast_queue = <276 279>; - l0sp = <40 43>; - l0cdrr = <64 79>; - l0edrr = <64 79>; - l1cdrr = <12 15>; - l1edrr = <12 15>; - }; - port@3 { - port_id = <3>; - ucast_queue = <176 191>; - mcast_queue = <280 283>; - l0sp = <44 47>; - l0cdrr = <80 95>; - l0edrr = <80 95>; - l1cdrr = <16 19>; - l1edrr = <16 19>; - }; - port@4 { - port_id = <4>; - ucast_queue = <192 207>; - mcast_queue = <284 287>; - l0sp = <48 51>; - l0cdrr = <96 111>; - l0edrr = <96 111>; - l1cdrr = <20 23>; - l1edrr = <20 23>; - }; - port@5 { - port_id = <5>; - ucast_queue = <208 223>; - mcast_queue = <288 291>; - l0sp = <52 55>; - l0cdrr = <112 127>; - l0edrr = <112 127>; - l1cdrr = <24 27>; - l1edrr = <24 27>; - }; - port@6 { - port_id = <6>; - ucast_queue = <224 239>; - mcast_queue = <292 295>; - l0sp = <56 59>; - l0cdrr = <128 143>; - l0edrr = <128 143>; - l1cdrr = <28 31>; - l1edrr = <28 31>; - }; - port@7 { - port_id = <7>; - ucast_queue = <240 255>; - mcast_queue = <296 299>; - l0sp = <60 63>; - l0cdrr = <144 159>; - l0edrr = <144 159>; - l1cdrr = <32 35>; - l1edrr = <32 35>; - }; - }; - port_scheduler_config { - port@0 { - port_id = <0>; - l1scheduler { - group@0 { - sp = <0 1>; /*L0 SPs*/ - /*cpri cdrr epri edrr*/ - cfg = <0 0 0 0>; - }; - }; - l0scheduler { - group@0 { - /*unicast queues*/ - ucast_queue = <0 4 8>; - /*multicast queues*/ - mcast_queue = <256 260>; - /*sp cpri cdrr epri edrr*/ - cfg = <0 0 0 0 0>; - }; - group@1 { - ucast_queue = <1 5 9>; - mcast_queue = <257 261>; - cfg = <0 1 1 1 1>; - }; - group@2 { - ucast_queue = <2 6 10>; - mcast_queue = <258 262>; - cfg = <0 2 2 2 2>; - }; - group@3 { - ucast_queue = <3 7 11>; - mcast_queue = <259 263>; - cfg = <0 3 3 3 3>; - }; - }; - }; - port@1 { - port_id = <1>; - l1scheduler { - group@0 { - sp = <36>; - cfg = <0 8 0 8>; - }; - group@1 { - sp = <37>; - cfg = <1 9 1 9>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <144>; - ucast_loop_pri = <16>; - mcast_queue = <272>; - mcast_loop_pri = <4>; - cfg = <36 0 48 0 48>; - }; - }; - }; - port@2 { - port_id = <2>; - l1scheduler { - group@0 { - sp = <40>; - cfg = <0 12 0 12>; - }; - group@1 { - sp = <41>; - cfg = <1 13 1 13>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <160>; - ucast_loop_pri = <16>; - mcast_queue = <276>; - mcast_loop_pri = <4>; - cfg = <40 0 64 0 64>; - }; - }; - }; - port@3 { - port_id = <3>; - l1scheduler { - group@0 { - sp = <44>; - cfg = <0 16 0 16>; - }; - group@1 { - sp = <45>; - cfg = <1 17 1 17>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <176>; - ucast_loop_pri = <16>; - mcast_queue = <280>; - mcast_loop_pri = <4>; - cfg = <44 0 80 0 80>; - }; - }; - }; - port@4 { - port_id = <4>; - l1scheduler { - group@0 { - sp = <48>; - cfg = <0 20 0 20>; - }; - group@1 { - sp = <49>; - cfg = <1 21 1 21>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <192>; - ucast_loop_pri = <16>; - mcast_queue = <284>; - mcast_loop_pri = <4>; - cfg = <48 0 96 0 96>; - }; - }; - }; - port@5 { - port_id = <5>; - l1scheduler { - group@0 { - sp = <52>; - cfg = <0 24 0 24>; - }; - group@1 { - sp = <53>; - cfg = <1 25 1 25>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <208>; - ucast_loop_pri = <16>; - mcast_queue = <288>; - mcast_loop_pri = <4>; - cfg = <52 0 112 0 112>; - }; - }; - }; - port@6 { - port_id = <6>; - l1scheduler { - group@0 { - sp = <56>; - cfg = <0 28 0 28>; - }; - group@1 { - sp = <57>; - cfg = <1 29 1 29>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <224>; - ucast_loop_pri = <16>; - mcast_queue = <292>; - mcast_loop_pri = <4>; - cfg = <56 0 128 0 128>; - }; - }; - }; - port@7 { - port_id = <7>; - l1scheduler { - group@0 { - sp = <60>; - cfg = <0 32 0 32>; - }; - group@1 { - sp = <61>; - cfg = <1 33 1 33>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <240>; - ucast_loop_pri = <16>; - mcast_queue = <296>; - cfg = <60 0 144 0 144>; - }; - }; - }; - }; - }; - - nss-macsec0 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x18>; - phy_access_mode = <0>; - mdiobus = <&mdio>; - }; - nss-macsec1 { - compatible = "qcom,nss-macsec"; - phy_addr = <0x1c>; - phy_access_mode = <0>; - mdiobus = <&mdio>; - }; + bootargs = "console=ttyMSM0,115200,n8 root=/dev/ram0 rw \ + init=/init"; + #ifdef __IPQ_MEM_PROFILE_256_MB__ + bootargs-append = " swiotlb=1"; + #else + bootargs-append = " swiotlb=1 coherent_pool=2M"; + #endif }; }; + +&tlmm { + pinctrl-0 = <&btcoex_pins>; + pinctrl-names = "default"; + + btcoex_pins: btcoex_pins { +/* + mux_0 { + pins = "gpio64"; + function = "pta1_1"; + drive-strength = <6>; + bias-pull-down; + }; + mux_1 { + pins = "gpio65"; + function = "pta1_2"; + drive-strength = <6>; + bias-pull-down; + }; + mux_2 { + pins = "gpio66"; + function = "pta1_0"; + drive-strength = <6>; + bias-pull-down; + }; + mux_3 { + pins = "gpio54"; + function = "pta2_0"; + drive-strength = <6>; + bias-pull-down; + }; + mux_4 { + pins = "gpio55"; + function = "pta2_1"; + drive-strength = <6>; + bias-pull-down; + }; + mux_5 { + pins = "gpio56"; + function = "pta2_2"; + drive-strength = <6>; + bias-pull-down; + }; +*/ + mux_0 { + pins = "gpio34"; + function = "gpio"; + drive-strength = <6>; + bias-pull-up; + output-high; + }; + mux_1 { + pins = "gpio62"; + function = "gpio"; + drive-strength = <6>; + bias-pull-up; + output-high; + }; + }; + + mdio_pins: mdio_pinmux { + mux_0 { + pins = "gpio68"; + function = "mdc"; + drive-strength = <8>; + bias-pull-up; + }; + mux_1 { + pins = "gpio69"; + function = "mdio"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + uart_pins: uart_pins { + mux { + pins = "gpio23", "gpio24"; + function = "blsp4_uart1"; + drive-strength = <8>; + bias-disable; + }; + }; + + spi_0_pins: spi_0_pins { + mux { + pins = "gpio38", "gpio39", "gpio40", "gpio41"; + function = "blsp0_spi"; + drive-strength = <8>; + bias-disable; + }; + }; + + /*spi_3_pins: spi_3_pins { + mux { + pins = "gpio50", "gpio52", "gpio53"; + function = "blsp3_spi"; + drive-strength = <8>; + bias-disable; + }; + spi_cs { + pins = "gpio22"; + function = "blsp3_spi2"; + drive-strength = <8>; + bias-disable; + }; + quartz_interrupt { + pins = "gpio47"; + function = "gpio"; + input; + bias-disable; + }; + quartz_reset { + pins = "gpio21"; + function = "gpio"; + output-low; + bias-disable; + }; + };*/ + + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-disable; + }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-disable; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-disable; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-disable; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-disable; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-disable; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-disable; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-disable; + }; + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-disable; + }; + }; + + hsuart_pins: hsuart_pins { + mux { + pins = "gpio48", "gpio49"; + function = "blsp2_uart"; + drive-strength = <8>; + bias-disable; + }; + }; + + button_pins: button_pins { + wps_button { + pins = "gpio67"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + leds_pins: leds_pinmux { + led1_r { + pins = "gpio54"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led1_g { + pins = "gpio55"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led2_r { + pins = "gpio56"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + led2_g { + pins = "gpio64"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + + }; + + /*usb_mux_sel_pins: usb_mux_pins { + mux { + pins = "gpio27"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + pcie0_pins: pcie_pins { + pcie0_rst { + pins = "gpio58"; + function = "pcie0_rst"; + drive-strength = <8>; + bias-pull-down; + }; + pcie0_wake { + pins = "gpio59"; + function = "pcie0_wake"; + drive-strength = <8>; + bias-pull-down; + }; + };*/ + uniphy_pins: uniphy_pinmux { + mux_2 { + pins = "gpio37"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + mux_3 { + pins = "gpio44"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + }; + +}; + +&soc { + gpio_keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + button@1 { + label = "reset"; + linux,code = ; + gpios = <&tlmm 67 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + debounce-interval = <60>; + }; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-0 = <&leds_pins>; + pinctrl-names = "default"; + status = "ok"; + + led@54 { + label = "red:lan"; + gpios = <&tlmm 54 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "led1_r"; + default-state = "off"; + }; + led@55 { + label = "green:lan"; + gpios = <&tlmm 55 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "led1_g"; + default-state = "off"; + }; + led@56 { + label = "red:wan"; + gpios = <&tlmm 56 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "led2_r"; + default-state = "off"; + }; + led@64 { + label = "green:wan"; + gpios = <&tlmm 64 GPIO_ACTIVE_HIGH>; + linux,default-trigger = "led2_g"; + default-state = "off"; + }; + }; + mdio: mdio@90000 { + pinctrl-0 = <&mdio_pins>; + pinctrl-names = "default"; + phy-reset-gpio = <&tlmm 37 0 &tlmm 44 0>; + phy0: ethernet-phy@0 { + reg = <0x10>; /*<0>*/ + }; + phy1: ethernet-phy@1 { + reg = <0x11>; + }; + phy2: ethernet-phy@2 { + reg = <0x12>; + }; + phy3: ethernet-phy@3 { + reg = <0x13>; + }; + phy4: ethernet-phy@4 { + reg = <0x14>; + }; + phy5: ethernet-phy@5 { + compatible ="ethernet-phy-ieee802.3-c45"; + reg = <0>; + }; + }; + + ess-switch@3a000000 { + pinctrl-0 = <&uniphy_pins>; + pinctrl-names = "default"; + switch_cpu_bmp = <0x1>; /* cpu port bitmap */ + switch_lan_bmp = <0x32>; /*..0x3e lan port bitmap */ + switch_wan_bmp = <0x40>; /* wan port bitmap */ + switch_mac_mode = <0x0>; /* mac mode for uniphy instance0*/ + switch_mac_mode1 = <0xff>; /* mac mode for uniphy instance1*/ + switch_mac_mode2 = <0xd>; /* mac mode for uniphy instance2*/ + bm_tick_mode = <0>; /* bm tick mode */ + tm_tick_mode = <0>; /* tm tick mode */ + /*qcom,port_phyinfo { + port@0 { + port_id = <1>; + phy_address = <0>; + }; + port@1 { + port_id = <2>; + phy_address = <1>; + }; + port@2 { + port_id = <3>; + phy_address = <2>; + }; + port@3 { + port_id = <4>; + phy_address = <3>; + }; + port@4 { + port_id = <5>; + phy_address = <24>; + port_mac_sel = "QGMAC_PORT"; + }; + port@5 { + port_id = <6>; + phy_address = <28>; + port_mac_sel = "QGMAC_PORT"; + }; + };*/ + port_scheduler_resource { + port@0 { + port_id = <0>; + ucast_queue = <0 143>; + mcast_queue = <256 271>; + l0sp = <0 35>; + l0cdrr = <0 47>; + l0edrr = <0 47>; + l1cdrr = <0 7>; + l1edrr = <0 7>; + }; + port@1 { + port_id = <1>; + ucast_queue = <144 159>; + mcast_queue = <272 275>; + l0sp = <36 39>; + l0cdrr = <48 63>; + l0edrr = <48 63>; + l1cdrr = <8 11>; + l1edrr = <8 11>; + }; + port@2 { + port_id = <2>; + ucast_queue = <160 175>; + mcast_queue = <276 279>; + l0sp = <40 43>; + l0cdrr = <64 79>; + l0edrr = <64 79>; + l1cdrr = <12 15>; + l1edrr = <12 15>; + }; + port@3 { + port_id = <3>; + ucast_queue = <176 191>; + mcast_queue = <280 283>; + l0sp = <44 47>; + l0cdrr = <80 95>; + l0edrr = <80 95>; + l1cdrr = <16 19>; + l1edrr = <16 19>; + }; + port@4 { + port_id = <4>; + ucast_queue = <192 207>; + mcast_queue = <284 287>; + l0sp = <48 51>; + l0cdrr = <96 111>; + l0edrr = <96 111>; + l1cdrr = <20 23>; + l1edrr = <20 23>; + }; + port@5 { + port_id = <5>; + ucast_queue = <208 223>; + mcast_queue = <288 291>; + l0sp = <52 55>; + l0cdrr = <112 127>; + l0edrr = <112 127>; + l1cdrr = <24 27>; + l1edrr = <24 27>; + }; + port@6 { + port_id = <6>; + ucast_queue = <224 239>; + mcast_queue = <292 295>; + l0sp = <56 59>; + l0cdrr = <128 143>; + l0edrr = <128 143>; + l1cdrr = <28 31>; + l1edrr = <28 31>; + }; + port@7 { + port_id = <7>; + ucast_queue = <240 255>; + mcast_queue = <296 299>; + l0sp = <60 63>; + l0cdrr = <144 159>; + l0edrr = <144 159>; + l1cdrr = <32 35>; + l1edrr = <32 35>; + }; + }; + port_scheduler_config { + port@0 { + port_id = <0>; + l1scheduler { + group@0 { + sp = <0 1>; /*L0 SPs*/ + /*cpri cdrr epri edrr*/ + cfg = <0 0 0 0>; + }; + }; + l0scheduler { + group@0 { + /*unicast queues*/ + ucast_queue = <0 4 8>; + /*multicast queues*/ + mcast_queue = <256 260>; + /*sp cpri cdrr epri edrr*/ + cfg = <0 0 0 0 0>; + }; + group@1 { + ucast_queue = <1 5 9>; + mcast_queue = <257 261>; + cfg = <0 1 1 1 1>; + }; + group@2 { + ucast_queue = <2 6 10>; + mcast_queue = <258 262>; + cfg = <0 2 2 2 2>; + }; + group@3 { + ucast_queue = <3 7 11>; + mcast_queue = <259 263>; + cfg = <0 3 3 3 3>; + }; + }; + }; + port@1 { + port_id = <1>; + l1scheduler { + group@0 { + sp = <36>; + cfg = <0 8 0 8>; + }; + group@1 { + sp = <37>; + cfg = <1 9 1 9>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <144>; + ucast_loop_pri = <16>; + mcast_queue = <272>; + mcast_loop_pri = <4>; + cfg = <36 0 48 0 48>; + }; + }; + }; + port@2 { + port_id = <2>; + l1scheduler { + group@0 { + sp = <40>; + cfg = <0 12 0 12>; + }; + group@1 { + sp = <41>; + cfg = <1 13 1 13>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <160>; + ucast_loop_pri = <16>; + mcast_queue = <276>; + mcast_loop_pri = <4>; + cfg = <40 0 64 0 64>; + }; + }; + }; + port@3 { + port_id = <3>; + l1scheduler { + group@0 { + sp = <44>; + cfg = <0 16 0 16>; + }; + group@1 { + sp = <45>; + cfg = <1 17 1 17>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <176>; + ucast_loop_pri = <16>; + mcast_queue = <280>; + mcast_loop_pri = <4>; + cfg = <44 0 80 0 80>; + }; + }; + }; + port@4 { + port_id = <4>; + l1scheduler { + group@0 { + sp = <48>; + cfg = <0 20 0 20>; + }; + group@1 { + sp = <49>; + cfg = <1 21 1 21>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <192>; + ucast_loop_pri = <16>; + mcast_queue = <284>; + mcast_loop_pri = <4>; + cfg = <48 0 96 0 96>; + }; + }; + }; + port@5 { + port_id = <5>; + l1scheduler { + group@0 { + sp = <52>; + cfg = <0 24 0 24>; + }; + group@1 { + sp = <53>; + cfg = <1 25 1 25>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <208>; + ucast_loop_pri = <16>; + mcast_queue = <288>; + mcast_loop_pri = <4>; + cfg = <52 0 112 0 112>; + }; + }; + }; + port@6 { + port_id = <6>; + l1scheduler { + group@0 { + sp = <56>; + cfg = <0 28 0 28>; + }; + group@1 { + sp = <57>; + cfg = <1 29 1 29>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <224>; + ucast_loop_pri = <16>; + mcast_queue = <292>; + mcast_loop_pri = <4>; + cfg = <56 0 128 0 128>; + }; + }; + }; + port@7 { + port_id = <7>; + l1scheduler { + group@0 { + sp = <60>; + cfg = <0 32 0 32>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <240>; + mcast_queue = <296>; + cfg = <60 0 144 0 144>; + }; + }; + }; + }; + }; +/* + dp1 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <1>; + reg = <0x3a001000 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <0>; + phy-mode = "sgmii"; + }; + + dp2 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <2>; + reg = <0x3a001200 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <1>; + phy-mode = "sgmii"; + }; + + dp3 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <3>; + reg = <0x3a001400 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <2>; + phy-mode = "sgmii"; + }; + + dp4 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <4>; + reg = <0x3a001600 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <3>; + phy-mode = "sgmii"; + }; + + dp5 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <5>; + reg = <0x3a003000 0x3fff>; + qcom,mactype = <1>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <24>; + phy-mode = "sgmii"; + }; + + dp6 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <6>; + reg = <0x3a007000 0x3fff>; + qcom,mactype = <1>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <28>; + phy-mode = "sgmii"; + }; +*/ + dp1 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <4>; + reg = <0x3a001600 0x200>; + // qcom,id = <1>; + // reg = <0x3a001000 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <0x13>; + phy-mode = "sgmii"; + }; + dp2 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <6>; + reg = <0x3a007000 0x3fff>; + qcom,mactype = <1>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <0>; + phy-mode = "sgmii"; + }; +/* + leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + led_2g { + label = "led_2g"; + gpio = <&tlmm 42 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + + led_5g { + label = "led_5g"; + gpio = <&tlmm 43 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; + + nss-macsec0 { + compatible = "qcom,nss-macsec"; + phy_addr = <0x18>; + phy_access_mode = <0>; + mdiobus = <&mdio>; + }; + nss-macsec1 { + compatible = "qcom,nss-macsec"; + phy_addr = <0x1c>; + phy_access_mode = <0>; + mdiobus = <&mdio>; + }; +*/ +}; + +&serial_blsp4 { + pinctrl-0 = <&uart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&spi_0 { /* BLSP1 QUP1 */ + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + cs-select = <0>; + status = "ok"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + compatible = "n25q128a11"; + linux,modalias = "m25p80", "n25q128a11"; + spi-max-frequency = <50000000>; + use-default-sizes; + }; +}; +/* +&spi_4 { + pinctrl-0 = <&spi_3_pins>; + pinctrl-names = "default"; + cs-select = <2>; + quartz-reset-gpio = <&tlmm 21 1>; + status = "ok"; + spidev3: spi@3 { + compatible = "qca,spidev"; + reg = <0>; + spi-max-frequency = <24000000>; + }; +};*/ + +&serial_blsp2 { + pinctrl-0 = <&hsuart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&msm_imem { + status = "disabled"; +}; + +&ssphy_0 { + status = "ok"; +}; + +&qusb_phy_0 { + status = "ok"; +}; + +&ssphy_1 { + status = "ok"; +}; + +&qusb_phy_1 { + status = "ok"; +}; + +&usb3_0 { + status = "ok"; +}; + +&usb3_1 { + status = "ok"; +}; + +&cryptobam { + status = "ok"; +}; + +&crypto { + status = "ok"; +}; + +&i2c_0 { + status = "disabled"; +}; + +&i2c_1 { + status = "disabled"; +}; + +&qpic_bam { + status = "ok"; +}; + +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&qpic_lcd { + status = "disabled"; +}; + +&qpic_lcd_panel { + status = "disabled"; +}; + +&ledc { + status = "disabled"; +}; + +&pcie0 { + status = "disabled"; +}; + +&pcie1 { + status = "disabled"; +}; + diff --git a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf196.dts b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf196.dts index eb1f2a6ca..30ff25fc2 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf196.dts +++ b/feeds/ipq807x/ipq807x/files/arch/arm64/boot/dts/qcom/qcom-ipq807x-wf196.dts @@ -1,24 +1,45 @@ -// SPDX-License-Identifier: GPL-2.0-only /dts-v1/; -/* Copyright (c) 2020 The Linux Foundation. All rights reserved. - * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved. +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "ipq8074.dtsi" -#include "ipq8074-hk-cpu.dtsi" +#include "qcom-ipq807x-soc.dtsi" +#include "qcom-ipq807x-audio.dtsi" +#include "qcom-ipq807x-hk-cpu.dtsi" / { #address-cells = <0x2>; #size-cells = <0x2>; model = "CIG WF196"; compatible = "cig,wf196", "qcom,ipq807x"; - qcom,msm-id = <0x157 0x0>, <0x187 0x0>; + qcom,msm-id = <0x187 0x0>; interrupt-parent = <&intc>; + qcom,board-id = <0x8 0x0>; + qcom,pmic-id = <0x0 0x0 0x0 0x0>; aliases { - serial0 = &blsp1_uart5; - /* Aliases as required by u-boot to patch MAC addresses */ + /* + * Aliases as required by u-boot + * to patch MAC addresses + */ ethernet0 = "/soc/dp1"; ethernet1 = "/soc/dp2"; + ethernet2 = "/soc/dp3"; + ethernet3 = "/soc/dp4"; + ethernet4 = "/soc/dp5"; + ethernet5 = "/soc/dp6"; + led-boot = &led_power; led-failsafe = &led_power; led-running = &led_power; @@ -26,7 +47,12 @@ }; chosen { - stdout-path = "serial0"; + bootargs = "console=ttyMSM0,115200,n8 root=/dev/ram0 rw init=/init"; + #ifdef __IPQ_MEM_PROFILE_256_MB__ + bootargs-append = " swiotlb=1"; + #else + bootargs-append = " swiotlb=1 coherent_pool=2M vmalloc=600M"; + #endif }; reserved-memory { @@ -57,27 +83,26 @@ * +--------+--------------+-------------------------+ * | M3 Dump| 0x4E800000 | 1MB | * +--------+--------------+-------------------------+ - * | Pine | 0x4E900000 | 38MB | + * | Pine0 | 0x4E900000 | 30MB | * +--------+--------------+-------------------------+ - * | MHI0 | 0x50F00000 | 9MB | + * | MHI0 | 0x50700000 | 16MB | * +--------+--------------+-------------------------+ * | | * | Rest of the memory for Linux | * | | * +=================================================+ */ - qcn9000_pcie0: qcn9000_pcie0@4e900000 { + /delete-node/ wifi_dump@4e900000; + qcn9000_pcie0@4e900000 { no-map; - reg = <0x0 0x4e900000 0x0 0x02600000>; + reg = <0x0 0x4e900000 0x0 0x01e00000>; }; -#if defined(__CNSS2__) - mhi_region0: dma_pool0@50F00000 { + mhi_region0: dma_pool0@50700000 { compatible = "shared-dma-pool"; no-map; - reg = <0x0 0x50F00000 0x0 0x00900000>; + reg = <0x0 0x50700000 0x0 0x01000000>; }; -#endif #else /* Default Profile * +========+==============+=========================+ @@ -103,587 +128,869 @@ * +--------+--------------+-------------------------+ * | M3 Dump| 0x51000000 | 1MB | * +--------+--------------+-------------------------+ - * | Pine0 | 0x51100000 | 53MB | + * | Pine0 | 0x51100000 | 45MB | * +--------+--------------+-------------------------+ - * | MHI0 | 0x54600000 | 9MB | + * | MHI0 | 0x53E00000 | 24MB | * +--------+--------------+-------------------------+ * | | * | Rest of the memory for Linux | * | | * +=================================================+ */ - qcn9000_pcie0: qcn9000_pcie0@51100000 { + qcn9000_pcie0@51100000 { no-map; - reg = <0x0 0x51100000 0x0 0x03500000>; + reg = <0x0 0x51100000 0x0 0x02D00000>; }; + /delete-node/ wifi_dump@51100000; + /delete-node/ wigig_dump@51300000; -#if defined(__CNSS2__) - mhi_region0: dma_pool0@54600000 { + mhi_region0: dma_pool0@53e00000 { compatible = "shared-dma-pool"; no-map; - reg = <0x0 0x54600000 0x0 0x00900000>; + reg = <0x0 0x53E00000 0x0 0x01800000>; }; #endif -#endif #endif }; +}; - soc { - pinctrl@1000000 { - button_pins: button_pins { - wps_button { - pins = "gpio67"; - function = "gpio"; - drive-strength = <8>; - bias-pull-up; +&tlmm { + pinctrl-0 = <&btcoex_pins>; + pinctrl-names = "default"; + + btcoex_pins: btcoex_pins { + mux_0 { + pins = "gpio64"; + function = "pta1_1"; + drive-strength = <6>; + bias-pull-down; + }; + mux_1 { + pins = "gpio65"; + function = "pta1_2"; + drive-strength = <6>; + bias-pull-down; + }; + mux_2 { + pins = "gpio66"; + function = "pta1_0"; + drive-strength = <6>; + bias-pull-down; + }; + }; + + mdio_pins: mdio_pinmux { + mux_0 { + pins = "gpio68"; + function = "mdc"; + drive-strength = <8>; + bias-pull-up; + }; + mux_1 { + pins = "gpio69"; + function = "mdio"; + drive-strength = <8>; + bias-pull-up; + }; + mux_3 { + pins = "gpio44"; + function = "gpio"; + bias-pull-up; + }; + }; + + uart_pins: uart_pins { + mux { + pins = "gpio23", "gpio24"; + function = "blsp4_uart1"; + drive-strength = <8>; + bias-disable; + }; + }; + i2c_5_pins: i2c_5_pinmux { + mux { + pins = "gpio0", "gpio2"; + function = "blsp5_i2c"; + drive-strength = <8>; + bias-disable; + }; + }; + + spi_0_pins: spi_0_pins { + mux { + pins = "gpio38", "gpio39", "gpio40", "gpio41"; + function = "blsp0_spi"; + drive-strength = <8>; + bias-disable; + }; + }; + + spi_3_pins: spi_3_pins { + mux { + pins = "gpio50", "gpio52", "gpio53"; + function = "blsp3_spi"; + drive-strength = <8>; + bias-disable; + }; + spi_cs { + pins = "gpio22"; + function = "blsp3_spi2"; + drive-strength = <8>; + bias-disable; + }; + quartz_interrupt { + pins = "gpio47"; + function = "gpio"; + input; + bias-disable; + }; + quartz_reset { + pins = "gpio21"; + function = "gpio"; + output-low; + bias-disable; + }; + }; + + qpic_pins: qpic_pins { + data_0 { + pins = "gpio15"; + function = "qpic_pad0"; + drive-strength = <8>; + bias-disable; + }; + data_1 { + pins = "gpio12"; + function = "qpic_pad1"; + drive-strength = <8>; + bias-disable; + }; + data_2 { + pins = "gpio13"; + function = "qpic_pad2"; + drive-strength = <8>; + bias-disable; + }; + data_3 { + pins = "gpio14"; + function = "qpic_pad3"; + drive-strength = <8>; + bias-disable; + }; + data_4 { + pins = "gpio5"; + function = "qpic_pad4"; + drive-strength = <8>; + bias-disable; + }; + data_5 { + pins = "gpio6"; + function = "qpic_pad5"; + drive-strength = <8>; + bias-disable; + }; + data_6 { + pins = "gpio7"; + function = "qpic_pad6"; + drive-strength = <8>; + bias-disable; + }; + data_7 { + pins = "gpio8"; + function = "qpic_pad7"; + drive-strength = <8>; + bias-disable; + }; + qpic_pad { + pins = "gpio1", "gpio3", "gpio4", + "gpio10", "gpio11", "gpio17"; + function = "qpic_pad"; + drive-strength = <8>; + bias-disable; + }; + }; + + hsuart_pins: hsuart_pins { + mux { + pins = "gpio46", "gpio47", "gpio48", "gpio49"; + function = "blsp2_uart"; + drive-strength = <8>; + bias-disable; + }; + }; + + button_pins: button_pins { + + wps_button { + pins = "gpio67"; + function = "gpio"; + drive-strength = <8>; + bias-pull-up; + }; + }; + + led_pins: led_pins { + led_red { + pins = "gpio25"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + + led_green { + pins = "gpio26"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + + led_blue { + pins = "gpio27"; + function = "gpio"; + drive-strength = <8>; + bias-pull-down; + }; + }; + + pcie0_pins: pcie_pins { + pcie0_rst { + pins = "gpio58"; + function = "pcie0_rst"; + drive-strength = <8>; + bias-pull-down; + }; + pcie0_wake { + pins = "gpio59"; + function = "pcie0_wake"; + drive-strength = <8>; + bias-pull-down; + }; + }; + pwm_pins: pwm_pinmux { + mux_1 { + pins = "gpio25"; + function = "pwm02"; + drive-strength = <8>; + }; + mux_2 { + pins = "gpio26"; + function = "pwm12"; + drive-strength = <8>; + }; + mux_3 { + pins = "gpio27"; + function = "pwm22"; + drive-strength = <8>; + }; + }; + +}; + +&soc { + pwm { + pinctrl-0 = <&pwm_pins>; + pinctrl-names = "default"; + used-pwm-indices = <1>, <1>, <1>, <0>; + // status = "ok"; + }; + gpio_keys { + compatible = "gpio-keys"; + pinctrl-0 = <&button_pins>; + pinctrl-names = "default"; + + button@1 { + label = "reset"; + linux,code = ; + gpios = <&tlmm 67 GPIO_ACTIVE_LOW>; + linux,input-type = <1>; + debounce-interval = <60>; + }; + }; + + mdio: mdio@90000 { + pinctrl-0 = <&mdio_pins>; + pinctrl-names = "default"; + phy-reset-gpio = <&tlmm 64 0 &tlmm 66 0>; + compatible = "qcom,ipq40xx-mdio", "qcom,qca-mdio"; + phy0: ethernet-phy@0 { + reg = <16>; + }; + phy1: ethernet-phy@1 { + reg = <17>; + }; + phy2: ethernet-phy@2 { + reg = <18>; + }; + phy3: ethernet-phy@3 { + reg = <19>; + }; + phy4: ethernet-phy@4 { + compatible ="ethernet-phy-ieee802.3-c45"; + reg = <0>; + }; + phy5: ethernet-phy@5 { + reg = <28>; + }; + }; + + ess-switch@3a000000 { + switch_cpu_bmp = <0x1>; /* cpu port bitmap */ + switch_lan_bmp = <0x3e>; /* lan port bitmap */ + switch_wan_bmp = <0xc0>; /* wan port bitmap */ + switch_mac_mode = <0xff>; /* mac mode for uniphy instance0*/ + switch_mac_mode1 = <0xd>; /* mac mode for uniphy instance1*/ + switch_mac_mode2 = <0xf>; /* mac mode for uniphy instance2*/ + bm_tick_mode = <0>; /* bm tick mode */ + tm_tick_mode = <0>; /* tm tick mode */ + qcom,port_phyinfo { + port@0 { + port_id = <1>; + phy_address = <16>; + }; + port@1 { + port_id = <2>; + phy_address = <17>; + }; + port@2 { + port_id = <3>; + phy_address = <18>; + }; + port@3 { + port_id = <4>; + phy_address = <19>; + }; + port@4 { + port_id = <5>; + phy_address = <0>; + port_mac_sel = "QGMAC_PORT"; + ethernet-phy-ieee802.3-c45; + }; + port@5 { + port_id = <6>; + phy_address = <28>; + port_mac_sel = "QGMAC_PORT"; + }; + }; + port_scheduler_resource { + port@0 { + port_id = <0>; + ucast_queue = <0 143>; + mcast_queue = <256 271>; + l0sp = <0 35>; + l0cdrr = <0 47>; + l0edrr = <0 47>; + l1cdrr = <0 7>; + l1edrr = <0 7>; + }; + port@1 { + port_id = <1>; + ucast_queue = <144 159>; + mcast_queue = <272 275>; + l0sp = <36 39>; + l0cdrr = <48 63>; + l0edrr = <48 63>; + l1cdrr = <8 11>; + l1edrr = <8 11>; + }; + port@2 { + port_id = <2>; + ucast_queue = <160 175>; + mcast_queue = <276 279>; + l0sp = <40 43>; + l0cdrr = <64 79>; + l0edrr = <64 79>; + l1cdrr = <12 15>; + l1edrr = <12 15>; + }; + port@3 { + port_id = <3>; + ucast_queue = <176 191>; + mcast_queue = <280 283>; + l0sp = <44 47>; + l0cdrr = <80 95>; + l0edrr = <80 95>; + l1cdrr = <16 19>; + l1edrr = <16 19>; + }; + port@4 { + port_id = <4>; + ucast_queue = <192 207>; + mcast_queue = <284 287>; + l0sp = <48 51>; + l0cdrr = <96 111>; + l0edrr = <96 111>; + l1cdrr = <20 23>; + l1edrr = <20 23>; + }; + port@5 { + port_id = <5>; + ucast_queue = <208 223>; + mcast_queue = <288 291>; + l0sp = <52 55>; + l0cdrr = <112 127>; + l0edrr = <112 127>; + l1cdrr = <24 27>; + l1edrr = <24 27>; + }; + port@6 { + port_id = <6>; + ucast_queue = <224 239>; + mcast_queue = <292 295>; + l0sp = <56 59>; + l0cdrr = <128 143>; + l0edrr = <128 143>; + l1cdrr = <28 31>; + l1edrr = <28 31>; + }; + port@7 { + port_id = <7>; + ucast_queue = <240 255>; + mcast_queue = <296 299>; + l0sp = <60 63>; + l0cdrr = <144 159>; + l0edrr = <144 159>; + l1cdrr = <32 35>; + l1edrr = <32 35>; + }; + }; + port_scheduler_config { + port@0 { + port_id = <0>; + l1scheduler { + group@0 { + sp = <0 1>; /*L0 SPs*/ + /*cpri cdrr epri edrr*/ + cfg = <0 0 0 0>; + }; + }; + l0scheduler { + group@0 { + /*unicast queues*/ + ucast_queue = <0 4 8>; + /*multicast queues*/ + mcast_queue = <256 260>; + /*sp cpri cdrr epri edrr*/ + cfg = <0 0 0 0 0>; + }; + group@1 { + ucast_queue = <1 5 9>; + mcast_queue = <257 261>; + cfg = <0 1 1 1 1>; + }; + group@2 { + ucast_queue = <2 6 10>; + mcast_queue = <258 262>; + cfg = <0 2 2 2 2>; + }; + group@3 { + ucast_queue = <3 7 11>; + mcast_queue = <259 263>; + cfg = <0 3 3 3 3>; + }; }; }; - - pcie0_pins: pcie_pins { - pcie0_rst { - pins = "gpio58"; - function = "pcie0_rst"; - drive-strength = <8>; - bias-pull-down; + port@1 { + port_id = <1>; + l1scheduler { + group@0 { + sp = <36>; + cfg = <0 8 0 8>; + }; + group@1 { + sp = <37>; + cfg = <1 9 1 9>; + }; }; - pcie0_wake { - pins = "gpio59"; - function = "pcie0_wake"; - drive-strength = <8>; - bias-pull-down; + l0scheduler { + group@0 { + ucast_queue = <144>; + ucast_loop_pri = <16>; + mcast_queue = <272>; + mcast_loop_pri = <4>; + cfg = <36 0 48 0 48>; + }; }; }; - - mdio_pins: mdio_pinmux { - mux_0 { - pins = "gpio68"; - function = "mdc"; - drive-strength = <8>; - bias-pull-up; + port@2 { + port_id = <2>; + l1scheduler { + group@0 { + sp = <40>; + cfg = <0 12 0 12>; + }; + group@1 { + sp = <41>; + cfg = <1 13 1 13>; + }; }; - mux_1 { - pins = "gpio69"; - function = "mdio"; - drive-strength = <8>; - bias-pull-up; - }; - mux_2 { - pins = "gpio44"; - function = "gpio"; - bias-pull-up; + l0scheduler { + group@0 { + ucast_queue = <160>; + ucast_loop_pri = <16>; + mcast_queue = <276>; + mcast_loop_pri = <4>; + cfg = <40 0 64 0 64>; + }; }; }; - - uniphy_pins: uniphy_pinmux { - mux { - pins = "gpio60"; - function = "rx2"; - bias-disable; + port@3 { + port_id = <3>; + l1scheduler { + group@0 { + sp = <44>; + cfg = <0 16 0 16>; + }; + group@1 { + sp = <45>; + cfg = <1 17 1 17>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <176>; + ucast_loop_pri = <16>; + mcast_queue = <280>; + mcast_loop_pri = <4>; + cfg = <44 0 80 0 80>; + }; }; }; - - led_pins: led_pins { - led_red { - pins = "gpio25"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; + port@4 { + port_id = <4>; + l1scheduler { + group@0 { + sp = <48>; + cfg = <0 20 0 20>; + }; + group@1 { + sp = <49>; + cfg = <1 21 1 21>; + }; }; - - led_green { - pins = "gpio26"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; + l0scheduler { + group@0 { + ucast_queue = <192>; + ucast_loop_pri = <16>; + mcast_queue = <284>; + mcast_loop_pri = <4>; + cfg = <48 0 96 0 96>; + }; }; - - led_blue { - pins = "gpio27"; - function = "gpio"; - drive-strength = <8>; - bias-pull-down; + }; + port@5 { + port_id = <5>; + l1scheduler { + group@0 { + sp = <52>; + cfg = <0 24 0 24>; + }; + group@1 { + sp = <53>; + cfg = <1 25 1 25>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <208>; + ucast_loop_pri = <16>; + mcast_queue = <288>; + mcast_loop_pri = <4>; + cfg = <52 0 112 0 112>; + }; + }; + }; + port@6 { + port_id = <6>; + l1scheduler { + group@0 { + sp = <56>; + cfg = <0 28 0 28>; + }; + group@1 { + sp = <57>; + cfg = <1 29 1 29>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <224>; + ucast_loop_pri = <16>; + mcast_queue = <292>; + mcast_loop_pri = <4>; + cfg = <56 0 128 0 128>; + }; + }; + }; + port@7 { + port_id = <7>; + l1scheduler { + group@0 { + sp = <60>; + cfg = <0 32 0 32>; + }; + group@1 { + sp = <61>; + cfg = <1 33 1 33>; + }; + }; + l0scheduler { + group@0 { + ucast_queue = <240>; + ucast_loop_pri = <16>; + mcast_queue = <296>; + cfg = <60 0 144 0 144>; + }; }; }; }; + }; - serial@78b3000 { + dp1 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <1>; + reg = <0x3a001000 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <16>; + phy-mode = "sgmii"; + }; + + dp2 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <2>; + reg = <0x3a001200 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <17>; + phy-mode = "sgmii"; + }; + + dp3 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <3>; + reg = <0x3a001400 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <18>; + phy-mode = "sgmii"; + }; + + dp4 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <4>; + reg = <0x3a001600 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <19>; + phy-mode = "sgmii"; + }; + + dp5 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <5>; + reg = <0x3a001800 0x200>; + qcom,mactype = <1>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <0>; + phy-mode = "sgmii"; + }; + + dp6 { + device_type = "network"; + compatible = "qcom,nss-dp"; + qcom,id = <6>; + reg = <0x3a001a00 0x200>; + qcom,mactype = <0>; + local-mac-address = [000000000000]; + qcom,link-poll = <1>; + qcom,phy-mdio-addr = <28>; + phy-mode = "sgmii"; + }; + + leds { + compatible = "gpio-leds"; + pinctrl-0 = <&led_pins>; + pinctrl-names = "default"; + + led_red { + label = "red:status"; + gpio = <&tlmm 25 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + + led_power: led_green { + label = "green:status"; + gpio = <&tlmm 26 GPIO_ACTIVE_HIGH>; + default-state = "on"; + }; + + led_blue { + label = "blue:status"; + gpio = <&tlmm 27 GPIO_ACTIVE_HIGH>; + default-state = "off"; + }; + }; + nss-macsec0 { + compatible = "qcom,nss-macsec"; + phy_addr = <0>; + phy_access_mode = <0>; + mdiobus = <&mdio>; + }; + nss-macsec1 { + compatible = "qcom,nss-macsec"; + phy_addr = <0x1c>; + phy_access_mode = <0>; + mdiobus = <&mdio>; + }; + i2c_5: i2c@78ba000 { + compatible = "qcom,i2c-qup-v2.2.1"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x78ba000 0x600>; + interrupts = ; + clocks = <&gcc GCC_BLSP1_AHB_CLK>, + <&gcc GCC_BLSP1_QUP6_I2C_APPS_CLK>; + clock-names = "iface", "core"; + clock-frequency = <100000>; + dmas = <&blsp_dma 23>, <&blsp_dma 22>; + dma-names = "rx", "tx"; + }; +}; + +&serial_blsp4 { + pinctrl-0 = <&uart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&spi_0 { /* BLSP1 QUP1 */ + pinctrl-0 = <&spi_0_pins>; + pinctrl-names = "default"; + cs-select = <0>; + status = "ok"; + + m25p80@0 { + #address-cells = <1>; + #size-cells = <1>; + reg = <0>; + compatible = "n25q128a11"; + linux,modalias = "m25p80", "n25q128a11"; + spi-max-frequency = <50000000>; + use-default-sizes; + }; +}; + +&serial_blsp2 { + pinctrl-0 = <&hsuart_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&msm_imem { + status = "disabled"; +}; + +&ssphy_0 { + status = "ok"; +}; + +&qusb_phy_0 { + status = "ok"; +}; + +&usb3_0 { + status = "ok"; +}; + +&cryptobam { + status = "ok"; +}; + +&crypto { + status = "ok"; +}; + +&i2c_0 { + status = "disabled"; +}; + +&i2c_1 { + status = "disabled"; +}; + +&i2c_5 { + pinctrl-0 = <&i2c_5_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&qpic_bam { + status = "ok"; +}; + +&nand { + pinctrl-0 = <&qpic_pins>; + pinctrl-names = "default"; + status = "ok"; +}; + +&qpic_lcd { + status = "disabled"; +}; + +&qpic_lcd_panel { + status = "disabled"; +}; + +&pcie0 { + status = "ok"; +}; + + +&pcie0_rp { + status = "ok"; + + mhi_0: qcom,mhi@0 { + reg = <0 0 0 0 0 >; + qrtr_instance_id = <0x20>; + #address-cells = <0x2>; + #size-cells = <0x2>; +#if !defined(__IPQ_MEM_PROFILE_256_MB__) + memory-region = <&mhi_region0>; +#endif + +#if !defined(__CNSS2__) + base-addr = <0x51100000>; + m3-dump-addr = <0x53400000>; + etr-addr = <0x53500000>; + qcom,caldb-addr = <0x53600000>; + mhi,max-channels = <30>; + mhi,timeout = <10000>; + qcom,board_id= <0xa4>; + + pcie0_mhi: pcie0_mhi { status = "ok"; }; +#endif + }; +}; - spi@78b5000 { - status = "ok"; - pinctrl-0 = <&spi_0_pins>; - pinctrl-names = "default"; - cs-select = <0>; +&pcie1_rp { + status = "disabled"; - m25p80@0 { - compatible = "n25q128a11"; - #address-cells = <1>; - #size-cells = <1>; - reg = <0>; - spi-max-frequency = <50000000>; - }; - }; - - dma@7984000 { - status = "ok"; - }; - - nand@79b0000 { - status = "ok"; - - nand@0 { - reg = <0>; - #address-cells = <1>; - #size-cells = <1>; - - nand-ecc-strength = <4>; - nand-ecc-step-size = <512>; - nand-bus-width = <8>; - }; - }; - - qusb@79000 { - status = "ok"; - }; - - ssphy@78000 { - status = "ok"; - }; - - usb3@8A00000 { - status = "ok"; - }; - - qcom,test@0 { - status = "ok"; - }; - - phy@84000 { - status = "ok"; - }; - - phy@86000 { - status = "ok"; - }; - - pci@20000000 { - perst-gpio = <&tlmm 58 1>; - status = "ok"; - - pcie0_rp: pcie0_rp { - reg = <0 0 0 0 0>; - }; - }; - - phy@8e000 { - status = "ok"; - }; - - pci@10000000 { - perst-gpio = <&tlmm 61 0x1>; - status = "ok"; - }; - - mdio@90000 { - pinctrl-0 = <&mdio_pins>; - pinctrl-names = "default"; - phy-reset-gpio = <&tlmm 64 0 &tlmm 66 0>; - phy0: ethernet-phy@0 { - reg = <16>; - }; - phy1: ethernet-phy@1 { - reg = <17>; - }; - phy2: ethernet-phy@2 { - reg = <18>; - }; - phy3: ethernet-phy@3 { - reg = <19>; - }; - phy4: ethernet-phy@4 { - compatible ="ethernet-phy-ieee802.3-c45"; - reg = <0>; - }; - phy5: ethernet-phy@5 { - reg = <28>; - }; - }; - - ess-switch@3a000000 { - pinctrl-0 = <&uniphy_pins>; - pinctrl-names = "default"; - switch_cpu_bmp = <0x1>; /* cpu port bitmap */ - switch_lan_bmp = <0x3e>; /* lan port bitmap */ - switch_wan_bmp = <0xc0>; /* wan port bitmap */ - switch_mac_mode = <0xff>; /* mac mode for uniphy instance0*/ - switch_mac_mode1 = <0xd>; /* mac mode for uniphy instance1*/ - switch_mac_mode2 = <0xf>; /* mac mode for uniphy instance2*/ - bm_tick_mode = <0>; /* bm tick mode */ - tm_tick_mode = <0>; /* tm tick mode */ - qcom,port_phyinfo { - port@0 { - port_id = <1>; - phy_address = <16>; - }; - port@1 { - port_id = <2>; - phy_address = <17>; - }; - port@2 { - port_id = <3>; - phy_address = <18>; - }; - port@3 { - port_id = <4>; - phy_address = <19>; - }; - port@4 { - port_id = <5>; - phy_address = <0>; - port_mac_sel = "QGMAC_PORT"; - ethernet-phy-ieee802.3-c45; - }; - port@5 { - port_id = <6>; - phy_address = <28>; - port_mac_sel = "QGMAC_PORT"; - }; - }; - port_scheduler_resource { - port@0 { - port_id = <0>; - ucast_queue = <0 143>; - mcast_queue = <256 271>; - l0sp = <0 35>; - l0cdrr = <0 47>; - l0edrr = <0 47>; - l1cdrr = <0 7>; - l1edrr = <0 7>; - }; - port@1 { - port_id = <1>; - ucast_queue = <144 159>; - mcast_queue = <272 275>; - l0sp = <36 39>; - l0cdrr = <48 63>; - l0edrr = <48 63>; - l1cdrr = <8 11>; - l1edrr = <8 11>; - }; - port@2 { - port_id = <2>; - ucast_queue = <160 175>; - mcast_queue = <276 279>; - l0sp = <40 43>; - l0cdrr = <64 79>; - l0edrr = <64 79>; - l1cdrr = <12 15>; - l1edrr = <12 15>; - }; - port@3 { - port_id = <3>; - ucast_queue = <176 191>; - mcast_queue = <280 283>; - l0sp = <44 47>; - l0cdrr = <80 95>; - l0edrr = <80 95>; - l1cdrr = <16 19>; - l1edrr = <16 19>; - }; - port@4 { - port_id = <4>; - ucast_queue = <192 207>; - mcast_queue = <284 287>; - l0sp = <48 51>; - l0cdrr = <96 111>; - l0edrr = <96 111>; - l1cdrr = <20 23>; - l1edrr = <20 23>; - }; - port@5 { - port_id = <5>; - ucast_queue = <208 223>; - mcast_queue = <288 291>; - l0sp = <52 55>; - l0cdrr = <112 127>; - l0edrr = <112 127>; - l1cdrr = <24 27>; - l1edrr = <24 27>; - }; - port@6 { - port_id = <6>; - ucast_queue = <224 239>; - mcast_queue = <292 295>; - l0sp = <56 59>; - l0cdrr = <128 143>; - l0edrr = <128 143>; - l1cdrr = <28 31>; - l1edrr = <28 31>; - }; - port@7 { - port_id = <7>; - ucast_queue = <240 255>; - mcast_queue = <296 299>; - l0sp = <60 63>; - l0cdrr = <144 159>; - l0edrr = <144 159>; - l1cdrr = <32 35>; - l1edrr = <32 35>; - }; - }; - port_scheduler_config { - port@0 { - port_id = <0>; - l1scheduler { - group@0 { - sp = <0 1>; /*L0 SPs*/ - /*cpri cdrr epri edrr*/ - cfg = <0 0 0 0>; - }; - }; - l0scheduler { - group@0 { - /*unicast queues*/ - ucast_queue = <0 4 8>; - /*multicast queues*/ - mcast_queue = <256 260>; - /*sp cpri cdrr epri edrr*/ - cfg = <0 0 0 0 0>; - }; - group@1 { - ucast_queue = <1 5 9>; - mcast_queue = <257 261>; - cfg = <0 1 1 1 1>; - }; - group@2 { - ucast_queue = <2 6 10>; - mcast_queue = <258 262>; - cfg = <0 2 2 2 2>; - }; - group@3 { - ucast_queue = <3 7 11>; - mcast_queue = <259 263>; - cfg = <0 3 3 3 3>; - }; - }; - }; - port@1 { - port_id = <1>; - l1scheduler { - group@0 { - sp = <36>; - cfg = <0 8 0 8>; - }; - group@1 { - sp = <37>; - cfg = <1 9 1 9>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <144>; - ucast_loop_pri = <16>; - mcast_queue = <272>; - mcast_loop_pri = <4>; - cfg = <36 0 48 0 48>; - }; - }; - }; - port@2 { - port_id = <2>; - l1scheduler { - group@0 { - sp = <40>; - cfg = <0 12 0 12>; - }; - group@1 { - sp = <41>; - cfg = <1 13 1 13>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <160>; - ucast_loop_pri = <16>; - mcast_queue = <276>; - mcast_loop_pri = <4>; - cfg = <40 0 64 0 64>; - }; - }; - }; - port@3 { - port_id = <3>; - l1scheduler { - group@0 { - sp = <44>; - cfg = <0 16 0 16>; - }; - group@1 { - sp = <45>; - cfg = <1 17 1 17>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <176>; - ucast_loop_pri = <16>; - mcast_queue = <280>; - mcast_loop_pri = <4>; - cfg = <44 0 80 0 80>; - }; - }; - }; - port@4 { - port_id = <4>; - l1scheduler { - group@0 { - sp = <48>; - cfg = <0 20 0 20>; - }; - group@1 { - sp = <49>; - cfg = <1 21 1 21>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <192>; - ucast_loop_pri = <16>; - mcast_queue = <284>; - mcast_loop_pri = <4>; - cfg = <48 0 96 0 96>; - }; - }; - }; - port@5 { - port_id = <5>; - l1scheduler { - group@0 { - sp = <52>; - cfg = <0 24 0 24>; - }; - group@1 { - sp = <53>; - cfg = <1 25 1 25>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <208>; - ucast_loop_pri = <16>; - mcast_queue = <288>; - mcast_loop_pri = <4>; - cfg = <52 0 112 0 112>; - }; - }; - }; - port@6 { - port_id = <6>; - l1scheduler { - group@0 { - sp = <56>; - cfg = <0 28 0 28>; - }; - group@1 { - sp = <57>; - cfg = <1 29 1 29>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <224>; - ucast_loop_pri = <16>; - mcast_queue = <292>; - mcast_loop_pri = <4>; - cfg = <56 0 128 0 128>; - }; - }; - }; - port@7 { - port_id = <7>; - l1scheduler { - group@0 { - sp = <60>; - cfg = <0 32 0 32>; - }; - group@1 { - sp = <61>; - cfg = <1 33 1 33>; - }; - }; - l0scheduler { - group@0 { - ucast_queue = <240>; - ucast_loop_pri = <16>; - mcast_queue = <296>; - cfg = <60 0 144 0 144>; - }; - }; - }; - }; - }; - gpio_keys { - compatible = "gpio-keys"; - pinctrl-0 = <&button_pins>; - pinctrl-names = "default"; - status = "ok"; - - button@1 { - label = "reset"; - linux,code = ; - gpios = <&tlmm 67 GPIO_ACTIVE_LOW>; - linux,input-type = <1>; - debounce-interval = <60>; - }; - }; - - leds { - compatible = "gpio-leds"; - pinctrl-0 = <&led_pins>; - pinctrl-names = "default"; - - led_red { - label = "red:status"; - gpio = <&tlmm 25 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - - led_power: led_green { - label = "green:status"; - gpio = <&tlmm 26 GPIO_ACTIVE_HIGH>; - default-state = "on"; - }; - - led_blue { - label = "blue:status"; - gpio = <&tlmm 27 GPIO_ACTIVE_HIGH>; - default-state = "off"; - }; - }; - - dp1 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <5>; - reg = <0x3a001800 0x200>; - qcom,mactype = <1>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <0>; - phy-mode = "sgmii"; - }; - - dp2 { - device_type = "network"; - compatible = "qcom,nss-dp"; - qcom,id = <6>; - reg = <0x3a001a00 0x200>; - qcom,mactype = <0>; - local-mac-address = [000000000000]; - qcom,link-poll = <1>; - qcom,phy-mdio-addr = <28>; - phy-mode = "sgmii"; + mhi_1: qcom,mhi@1 { + pcie1_mhi: pcie1_mhi { + status = "disabled"; }; }; }; @@ -704,15 +1011,12 @@ }; #endif -&wifi0 { - status = "disabled"; -}; - &wifi1 { qcom,board_id = <0x294>; status = "ok"; }; +/* No support for QCN9000 in 256M profile */ #if !defined(__IPQ_MEM_PROFILE_256_MB__) &wifi2 { #ifdef __IPQ_MEM_PROFILE_512_MB__ @@ -727,15 +1031,12 @@ * | ETR | 0x4FE00000 | 1MB | * +---------+--------------+---------+ * | Caldb | 0x4FF00000 | 8MB | - * +---------+--------------+---------+ - * | Pageable| 0x50700000 | 8MB | * +==================================+ */ base-addr = <0x4E900000>; m3-dump-addr = <0x4FD00000>; etr-addr = <0x4FE00000>; caldb-addr = <0x4FF00000>; - pageable-addr = <0x50700000>; hremote-size = <0x1400000>; tgt-mem-mode = <0x1>; #else @@ -745,66 +1046,32 @@ * +---------+--------------+---------+ * | HREMOTE | 0x51100000 | 35MB | * +---------+--------------+---------+ - * | M3 DUMP | 0x53400000 | 1MB | + * | M3 Dump | 0x53400000 | 1MB | * +---------+--------------+---------+ * | ETR | 0x53500000 | 1MB | * +---------+--------------+---------+ * | Caldb | 0x53600000 | 8MB | - * +---------+--------------+---------+ - * | Pageable| 0x53E00000 | 8MB | * +==================================+ */ base-addr = <0x51100000>; m3-dump-addr = <0x53400000>; etr-addr = <0x53500000>; caldb-addr = <0x53600000>; - pageable-addr = <0x53E00000>; hremote-size = <0x2300000>; tgt-mem-mode = <0x0>; #endif caldb-size = <0x800000>; - hremote_node = <&qcn9000_pcie0>; - pageable-size = <0x800000>; board_id = <0xa4>; status = "ok"; -}; +#if defined(__CNSS2__) + pcie0_mhi: pcie0_mhi { + status = "ok"; + }; #endif - +}; &wifi3 { status = "disabled"; }; -&pcie0_rp { - status = "ok"; - - mhi_0: qcom,mhi@0 { - reg = <0 0 0 0 0 >; - qrtr_instance_id = <0x20>; - #address-cells = <0x2>; - #size-cells = <0x2>; -#if defined(__CNSS2__) -#if !defined(__IPQ_MEM_PROFILE_256_MB__) - memory-region = <&mhi_region0>; +#include "qcom-ipq807x-mhi.dtsi" #endif -#else -#ifdef __IPQ_MEM_PROFILE_512_MB__ - base-addr = <0x4E900000>; - m3-dump-addr = <0x4FD00000>; - etr-addr = <0x4FE00000>; - qcom,caldb-addr = <0x4FF00000>; - pageable-addr = <0x50700000>; -#else - base-addr = <0x51100000>; - m3-dump-addr = <0x53400000>; - etr-addr = <0x53500000>; - qcom,caldb-addr = <0x53600000>; - pageable-addr = <0x53E00000>; -#endif - qcom,board_id= <0xa4>; - - pcie0_mhi: pcie0_mhi { - status = "ok"; - }; -#endif - }; -}; diff --git a/feeds/ipq807x/ipq807x/image/ipq50xx.mk b/feeds/ipq807x/ipq807x/image/ipq50xx.mk index 3bf1c53c8..87b599c5f 100644 --- a/feeds/ipq807x/ipq807x/image/ipq50xx.mk +++ b/feeds/ipq807x/ipq807x/image/ipq50xx.mk @@ -36,7 +36,7 @@ define Device/qcom_mp03_1 DEVICE_PACKAGES := ath11k-wifi-qcom-ipq5018 DEVICE_DTS_CONFIG := config@mp03.1 endef -#TARGET_DEVICES += qcom_mp03_1 +TARGET_DEVICES += qcom_mp03_1 define Device/qcom_mp03_3 DEVICE_TITLE := Qualcomm Maple 03.3 @@ -45,4 +45,4 @@ define Device/qcom_mp03_3 DEVICE_PACKAGES := ath11k-wifi-qcom-ipq5018 DEVICE_DTS_CONFIG := config@mp03.3 endef -#TARGET_DEVICES += qcom_mp03_3 +TARGET_DEVICES += qcom_mp03_3 diff --git a/feeds/ipq807x/ipq807x/image/ipq60xx.mk b/feeds/ipq807x/ipq807x/image/ipq60xx.mk index 8b2b1f678..23057a99e 100644 --- a/feeds/ipq807x/ipq807x/image/ipq60xx.mk +++ b/feeds/ipq807x/ipq807x/image/ipq60xx.mk @@ -50,10 +50,18 @@ define Device/wallys_dr6018_v4 DEVICE_DTS := qcom-ipq6018-wallys-dr6018-v4 DEVICE_DTS_CONFIG := config@cp01-c4 SUPPORTED_DEVICES := wallys,dr6018-v4 - DEVICE_PACKAGES := ath11k-wifi-wallys-dr6018-v4 uboot-envtools ath11k-firmware-qcn9000 + DEVICE_PACKAGES := ath11k-wifi-wallys-dr6018-v4 uboot-envtools endef TARGET_DEVICES += wallys_dr6018_v4 +define Device/qcom_cp01_c1 + DEVICE_TITLE := Qualcomm Cypress C1 + DEVICE_DTS := qcom-ipq6018-cp01-c1 + SUPPORTED_DEVICES := qcom,ipq6018-cp01 + DEVICE_PACKAGES := ath11k-wifi-qcom-ipq6018 +endef +TARGET_DEVICES += qcom_cp01_c1 + define Device/glinet_ax1800 DEVICE_TITLE := GL-iNet AX1800 DEVICE_DTS := qcom-ipq6018-gl-ax1800 diff --git a/feeds/ipq807x/ipq807x/image/ipq807x.mk b/feeds/ipq807x/ipq807x/image/ipq807x.mk index 631c95616..c32074f73 100644 --- a/feeds/ipq807x/ipq807x/image/ipq807x.mk +++ b/feeds/ipq807x/ipq807x/image/ipq807x.mk @@ -2,16 +2,16 @@ KERNEL_LOADADDR := 0x41208000 define Device/qcom_hk01 DEVICE_TITLE := Qualcomm Hawkeye HK01 - DEVICE_DTS := ipq8074-hk01 + DEVICE_DTS := qcom-ipq807x-hk01 DEVICE_DTS_CONFIG=config@hk01 SUPPORTED_DEVICES := qcom,ipq807x-hk01 DEVICE_PACKAGES := ath11k-wifi-qcom-ipq8074 endef -#TARGET_DEVICES += qcom_hk01 +TARGET_DEVICES += qcom_hk01 define Device/qcom_hk14 DEVICE_TITLE := Qualcomm Hawkeye HK14 - DEVICE_DTS := ipq8074-hk14 + DEVICE_DTS := qcom-ipq807x-hk14 DEVICE_DTS_CONFIG=config@hk14 SUPPORTED_DEVICES := qcom,ipq807x-hk14 DEVICE_PACKAGES := ath11k-wifi-qcom-ipq8074 kmod-ath11k-pci ath11k-firmware-qcn9000 @@ -41,6 +41,8 @@ define Device/cig_wf196 DEVICE_DTS := qcom-ipq807x-wf196 DEVICE_DTS_CONFIG=config@hk14 SUPPORTED_DEVICES := cig,wf196 + BLOCKSIZE := 256k + PAGESIZE := 4096 DEVICE_PACKAGES := ath11k-wifi-cig-wf196 aq-fw-download uboot-envtools kmod-usb3 kmod-usb2 \ ath11k-firmware-qcn9000 ath11k-wifi-cig-wf196_6g endef diff --git a/feeds/ipq807x/ipq807x/ipq50xx/config-5.4 b/feeds/ipq807x/ipq807x/ipq50xx/config-5.4 deleted file mode 100644 index 5a6f77d3a..000000000 --- a/feeds/ipq807x/ipq807x/ipq50xx/config-5.4 +++ /dev/null @@ -1,1266 +0,0 @@ -# CONFIG_AC97_BUS is not set -# CONFIG_AC97_BUS_NEW is not set -# CONFIG_AD7124 is not set -# CONFIG_AD7606_IFACE_PARALLEL is not set -# CONFIG_AD7606_IFACE_SPI is not set -# CONFIG_AD7768_1 is not set -# CONFIG_AD7949 is not set -# CONFIG_ADF4371 is not set -# CONFIG_ADIN_PHY is not set -# CONFIG_ADIS16460 is not set -# CONFIG_ADXL345_I2C is not set -# CONFIG_ADXL345_SPI is not set -# CONFIG_ADXL372_I2C is not set -# CONFIG_ADXL372_SPI is not set -# CONFIG_AHCI_IPQ is not set -CONFIG_ALIGNMENT_TRAP=y -# CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS is not set -# CONFIG_ALLOC_SKB_PAGE_FRAG_DISABLE is not set -# CONFIG_AL_FIC is not set -# CONFIG_AMBA_PL08X is not set -# CONFIG_ANDROID is not set -# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set -# CONFIG_ANDROID_TIMED_OUTPUT is not set -# CONFIG_APM_EMULATION is not set -# CONFIG_APQ_GCC_8084 is not set -# CONFIG_APQ_MMCC_8084 is not set -CONFIG_AQUANTIA_PHY=y -# CONFIG_AR8216_PHY is not set -# CONFIG_ARCHES is not set -CONFIG_ARCH_32BIT_OFF_T=y -# CONFIG_ARCH_AGILEX is not set -# CONFIG_ARCH_BITMAIN is not set -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN=y -CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# CONFIG_ARCH_HAS_KCOV is not set -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_ARCH_HAS_PHYS_TO_DMA=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SG_CHAIN=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -# CONFIG_ARCH_IPQ6018 is not set -# CONFIG_ARCH_IPQ9574 is not set -CONFIG_ARCH_KEEP_MEMBLOCK=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -# CONFIG_ARCH_MDM9615 is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -# CONFIG_ARCH_MILBEAUT is not set -CONFIG_ARCH_MMAP_RND_BITS=18 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 -# CONFIG_ARCH_MSM8960 is not set -# CONFIG_ARCH_MSM8974 is not set -CONFIG_ARCH_MSM8X60=y -CONFIG_ARCH_MULTIPLATFORM=y -# CONFIG_ARCH_MULTI_CPU_AUTO is not set -CONFIG_ARCH_MULTI_V6_V7=y -CONFIG_ARCH_MULTI_V7=y -CONFIG_ARCH_NR_GPIO=0 -CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y -CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y -CONFIG_ARCH_QCOM=y -# CONFIG_ARCH_RDA is not set -# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set -# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_ARM=y -# CONFIG_ARM64_CNP is not set -# CONFIG_ARM64_ERRATUM_1165522 is not set -# CONFIG_ARM64_ERRATUM_1286807 is not set -# CONFIG_ARM64_ERRATUM_1418040 is not set -# CONFIG_ARM64_ERRATUM_1542419 is not set -# CONFIG_ARM64_MODULE_PLTS is not set -# CONFIG_ARM64_PMEM is not set -# CONFIG_ARM64_PSEUDO_NMI is not set -# CONFIG_ARM64_PTDUMP_DEBUGFS is not set -# CONFIG_ARM64_PTR_AUTH is not set -# CONFIG_ARM64_SSBD is not set -# CONFIG_ARM64_SVE is not set -# CONFIG_ARM64_TAGGED_ADDR_ABI is not set -# CONFIG_ARM64_UAO is not set -# CONFIG_ARM64_VHE is not set -CONFIG_ARM_AMBA=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -# CONFIG_ARM_ATAG_DTB_COMPAT is not set -# CONFIG_ARM_CCI is not set -# CONFIG_ARM_CCI400_COMMON is not set -# CONFIG_ARM_CCI400_PMU is not set -# CONFIG_ARM_CCI_PMU is not set -CONFIG_ARM_CPUIDLE=y -CONFIG_ARM_CPU_SUSPEND=y -# CONFIG_ARM_ERRATA_814220 is not set -# CONFIG_ARM_ERRATA_857272 is not set -CONFIG_ARM_GIC=y -CONFIG_ARM_HAS_SG_CHAIN=y -# CONFIG_ARM_HIGHBANK_CPUIDLE is not set -CONFIG_ARM_L1_CACHE_SHIFT=6 -CONFIG_ARM_L1_CACHE_SHIFT_6=y -# CONFIG_ARM_LPAE is not set -CONFIG_ARM_MODULE_PLTS=y -CONFIG_ARM_PATCH_IDIV=y -CONFIG_ARM_PATCH_PHYS_VIRT=y -CONFIG_ARM_PMU=y -CONFIG_ARM_PSCI=y -# CONFIG_ARM_PSCI_CPUIDLE is not set -CONFIG_ARM_PSCI_FW=y -CONFIG_ARM_QCOM_CPUFREQ=y -# CONFIG_ARM_QCOM_CPUFREQ_HW is not set -# CONFIG_ARM_QCOM_CPUFREQ_NVMEM is not set -# CONFIG_ARM_SCMI_PROTOCOL is not set -# CONFIG_ARM_SMMU is not set -# CONFIG_ARM_SP805_WATCHDOG is not set -CONFIG_ARM_THUMB=y -# CONFIG_ARM_THUMBEE is not set -CONFIG_ARM_UNWIND=y -CONFIG_ARM_VIRT_EXT=y -# CONFIG_ASHMEM is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_AT803X_PHY=y -# CONFIG_ATA is not set -# CONFIG_BACKLIGHT_CLASS_DEVICE is not set -# CONFIG_BATTERY_RT5033 is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_NVME=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=4096 -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_BOUNCE=y -# CONFIG_BPFILTER is not set -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_BRIDGE_VLAN_FILTERING=y -# CONFIG_BT_HCIBTUSB_MTK is not set -# CONFIG_BT_MTKSDIO is not set -CONFIG_BUILD_BIN2C=y -# CONFIG_BUS_TOPOLOGY_ADHOC is not set -# CONFIG_CACHE_L2X0 is not set -# CONFIG_CAVIUM_TX2_ERRATUM_219 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_CC_STACKPROTECTOR=y -# CONFIG_CC_STACKPROTECTOR_NONE is not set -CONFIG_CC_STACKPROTECTOR_REGULAR=y -# CONFIG_CHARGER_LT3651 is not set -# CONFIG_CHARGER_QCOM_SMBB is not set -# CONFIG_CHARGER_UCS1002 is not set -CONFIG_CLEANCACHE=y -CONFIG_CLKDEV_LOOKUP=y -CONFIG_CLKSRC_OF=y -CONFIG_CLKSRC_PROBE=y -CONFIG_CLKSRC_QCOM=y -CONFIG_CLONE_BACKWARDS=y -# CONFIG_CMA is not set -# CONFIG_CMA_ALIGNMENT is not set -# CONFIG_CMA_AREAS is not set -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -# CONFIG_CMA_SIZE_MBYTES is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -# CONFIG_CMA_SIZE_SEL_MBYTES is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -# CONFIG_CNSS2 is not set -# CONFIG_CNSS2_CALIBRATION_SUPPORT is not set -# CONFIG_CNSS2_DEBUG is not set -CONFIG_CNSS2_GENL=y -# CONFIG_CNSS2_PCI_DRIVER is not set -# CONFIG_CNSS2_PM is not set -# CONFIG_CNSS2_RAMDUMP is not set -# CONFIG_CNSS2_SMMU is not set -# CONFIG_CNSS2_QCA9574_SUPPORT is not set -CONFIG_CNSS_QCN9000=y -CONFIG_COMMON_CLK=y -# CONFIG_COMMON_CLK_FIXED_MMIO is not set -CONFIG_COMMON_CLK_QCOM=y -# CONFIG_COMMON_CLK_SI5341 is not set -CONFIG_CONFIGFS_FS=y -CONFIG_COREDUMP=y -CONFIG_CORESIGHT=y -# CONFIG_CORESIGHT_BYTE_CNTR is not set -# CONFIG_CORESIGHT_CATU is not set -# CONFIG_CORESIGHT_CPU_DEBUG is not set -CONFIG_CORESIGHT_CSR=y -CONFIG_CORESIGHT_CTI=y -# CONFIG_CORESIGHT_CTI_SAVE_DISABLE is not set -# CONFIG_CORESIGHT_DUMMY is not set -CONFIG_CORESIGHT_HWEVENT=y -CONFIG_CORESIGHT_LINKS_AND_SINKS=y -CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -# CONFIG_CORESIGHT_REMOTE_ETM is not set -# CONFIG_CORESIGHT_SINK_ETBV10 is not set -# CONFIG_CORESIGHT_SINK_TPIU is not set -# CONFIG_CORESIGHT_SOURCE_ETM3X is not set -CONFIG_CORESIGHT_SOURCE_ETM4X=y -CONFIG_CORESIGHT_STM=y -CONFIG_CORESIGHT_TPDA=y -CONFIG_CORESIGHT_TPDM=y -# CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE is not set -# CONFIG_COUNTER is not set -CONFIG_CPUFREQ_DT=y -CONFIG_CPUFREQ_DT_PLATDEV=y -CONFIG_CPU_32v6K=y -CONFIG_CPU_32v7=y -CONFIG_CPU_ABRT_EV7=y -# CONFIG_CPU_BIG_ENDIAN is not set -# CONFIG_CPU_BPREDICT_DISABLE is not set -CONFIG_CPU_CACHE_V7=y -CONFIG_CPU_CACHE_VIPT=y -CONFIG_CPU_COPY_V6=y -CONFIG_CPU_CP15=y -CONFIG_CPU_CP15_MMU=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_HAS_ASID=y -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_CPU_ICACHE_DISABLE is not set -# CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND is not set -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set -CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y -CONFIG_CPU_PABRT_V7=y -CONFIG_CPU_PM=y -CONFIG_CPU_RMAP=y -# CONFIG_CPU_SW_DOMAIN_PAN is not set -# CONFIG_CPU_THERMAL is not set -CONFIG_CPU_TLB_V7=y -CONFIG_CPU_V7=y -CONFIG_CRC16=y -# CONFIG_CRC32_SARWATE is not set -CONFIG_CRC32_SLICEBY8=y -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_CRYPTO_ADIANTUM is not set -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_AES_586=y -# CONFIG_CRYPTO_ALL_CASES is not set -CONFIG_CRYPTO_ARC4=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CCM=y -CONFIG_CRYPTO_CMAC=y -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_DES=y -CONFIG_CRYPTO_DISABLE_AES192_TEST=y -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -# CONFIG_CRYPTO_DEV_HISI_ZIP is not set -# CONFIG_CRYPTO_DEV_OTA_CRYPTO is not set -# CONFIG_CRYPTO_DEV_QCOM_ICE is not set -# CONFIG_CRYPTO_DEV_QCOM_MSM_QCE is not set -# CONFIG_CRYPTO_DEV_QCOM_RNG is not set -CONFIG_CRYPTO_DISABLE_AHASH_LARGE_KEY_TEST=y -CONFIG_CRYPTO_DISABLE_AHASH_TYPE1_TESTS=y -CONFIG_CRYPTO_DISABLE_AHASH_TYPE2_TESTS=y -CONFIG_CRYPTO_DISABLE_AHASH_TYPE3_TESTS=y -CONFIG_CRYPTO_DISABLE_AUTH_SPLIT_TESTS=y -CONFIG_CRYPTO_DISABLE_HW_UNSUPPORTED_TESTS=y -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_ECHAINIV=y -# CONFIG_CRYPTO_ECRDSA is not set -# CONFIG_CRYPTO_ESSIV is not set -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_GF128MUL=y -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_JITTERENTROPY=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_LIB_SHA256=y -CONFIG_CRYPTO_LZO=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set -CONFIG_CRYPTO_MD5_PPC=y -CONFIG_CRYPTO_MICHAEL_MIC=y -CONFIG_CRYPTO_NO_ZERO_LEN_HASH=y -# CONFIG_CRYPTO_NO_AES_XTS_ZERO_KEY_SUPPORT is not set -# CONFIG_CRYPTO_NO_AES_CTR_UNEVEN_DATA_LEN_SUPPORT is not set -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_OFB=y -# CONFIG_CRYPTO_PCRYPT is not set -CONFIG_CRYPTO_RMD160=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA1_PPC=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -# CONFIG_CRYPTO_STREEBOG is not set -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_XTS=y -# CONFIG_CRYPTO_XXHASH is not set -CONFIG_CRYPTO_XZ=y -CONFIG_DCACHE_WORD_ACCESS=y -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_EFI is not set -CONFIG_DEBUG_GPIO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN is not set -CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S" -# CONFIG_DEBUG_MISC is not set -# CONFIG_DEBUG_PLIST is not set -# CONFIG_DEBUG_UART_8250 is not set -# CONFIG_DEBUG_USER is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_DEVMEM=y -# CONFIG_DIAGFWD_BRIDGE_CODE is not set -CONFIG_DIAG_OVER_QRTR=y -# CONFIG_DIAG_OVER_USB is not set -CONFIG_DMADEVICES=y -# CONFIG_DMA_CMA is not set -CONFIG_DMA_ENGINE=y -CONFIG_DMA_OF=y -# CONFIG_DMA_SOUND is not set -CONFIG_DMA_VIRTUAL_CHANNELS=y -# CONFIG_DMI is not set -# CONFIG_DMIID is not set -# CONFIG_DMI_SYSFS is not set -# CONFIG_DM_INIT is not set -# CONFIG_DP83640_PHY is not set -# CONFIG_DPS310 is not set -CONFIG_DTC=y -# CONFIG_DWMAC_GENERIC is not set -# CONFIG_DWMAC_IPQ806X is not set -# CONFIG_DWMAC_SUNXI is not set -# CONFIG_DW_DMAC_PCI is not set -CONFIG_DYNAMIC_DEBUG=y -CONFIG_EDAC_ATOMIC_SCRUB=y -CONFIG_EDAC_SUPPORT=y -# CONFIG_EEPROM_EE1004 is not set -# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_ENERGY_MODEL is not set -# CONFIG_EP_PCIE is not set -CONFIG_ETHERNET_PACKET_MANGLE=y -# CONFIG_EXPORTFS_BLOCK_OPS is not set -CONFIG_EXT4_FS=y -# CONFIG_EXT4_USE_FOR_EXT2 is not set -# CONFIG_EXTCON_FSA9480 is not set -CONFIG_FB=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_CMDLINE=y -# CONFIG_FB_EFI is not set -CONFIG_FB_QTI_QPIC=y -CONFIG_FB_QTI_QPIC_ER_SSD1963_PANEL=y -CONFIG_FB_SYS_FOPS=y -# CONFIG_FIPS_ENABLE is not set -CONFIG_FIXED_PHY=y -CONFIG_FIX_EARLYCON_MEM=y -# CONFIG_FSL_MC_BUS is not set -# CONFIG_FSL_QDMA is not set -CONFIG_FS_MBCACHE=y -# CONFIG_FS_VERITY is not set -# CONFIG_FUJITSU_ERRATUM_010001 is not set -CONFIG_FW_AUTH=y -CONFIG_FW_AUTH_TEST=m -# CONFIG_FW_LOADER_COMPRESS is not set -# CONFIG_FXAS21002C is not set -# CONFIG_FXOS8700_I2C is not set -# CONFIG_FXOS8700_SPI is not set -# CONFIG_GCC_PLUGINS is not set -# CONFIG_GCC_PLUGIN_CYC_COMPLEXITY is not set -# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set -# CONFIG_GCC_PLUGIN_RANDSTRUCT is not set -# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set -# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL is not set -# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -# CONFIG_GENERIC_CPUFREQ_KRAIT is not set -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_GENERIC_IO=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_PHY=y -CONFIG_GENERIC_PINCONF=y -CONFIG_GENERIC_PINCTRL_GROUPS=y -CONFIG_GENERIC_PINMUX_FUNCTIONS=y -CONFIG_GENERIC_SCHED_CLOCK=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_TIME_VSYSCALL=y -# CONFIG_GEN_RTC is not set -# CONFIG_GLACIER is not set -# CONFIG_GLINK_DEBUG_FS is not set -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_GPIO_AMD_FCH is not set -# CONFIG_GPIO_CADENCE is not set -CONFIG_GPIO_DEVRES=y -# CONFIG_GPIO_GW_PLD is not set -# CONFIG_GPIO_LATCH is not set -# CONFIG_GPIO_NXP_74HC153 is not set -# CONFIG_GPIO_SAMA5D2_PIOBU is not set -CONFIG_GPIO_SYSFS=y -# CONFIG_GPIO_USB_DETECT is not set -# CONFIG_GSI is not set -# CONFIG_HABANA_AI is not set -CONFIG_HANDLE_DOMAIN_IRQ=y -CONFIG_HARDEN_BRANCH_PREDICTOR=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_HAS_DMA=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_HAVE_ARCH_BITREVERSE=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_HAVE_ARCH_PFN_VALID=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_ARM_ARCH_TIMER=y -# CONFIG_HAVE_ARM_SMCCC is not set -# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set -CONFIG_HAVE_BPF_JIT=y -CONFIG_HAVE_CC_STACKPROTECTOR=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_HAVE_DEBUG_KMEMLEAK=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_IDE=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_XZ=y -# CONFIG_HAVE_KPROBES is not set -# CONFIG_HAVE_KRETPROBES is not set -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_HAVE_NET_DSA=y -CONFIG_HAVE_OPROFILE=y -# CONFIG_HAVE_OPTPROBES is not set -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_PROC_CPU=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_SMP=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_UID16=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -# CONFIG_HEADERS_INSTALL is not set -# CONFIG_HID_MACALLY is not set -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_VIEWSONIC is not set -CONFIG_HIGHMEM=y -CONFIG_HIGHPTE=y -# CONFIG_HIST_TRIGGERS is not set -CONFIG_HOTPLUG_CPU=y -CONFIG_HWSPINLOCK=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_MSM_LEGACY=y -CONFIG_HZ_FIXED=0 -CONFIG_I2C=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_HELPER_AUTO=y -# CONFIG_I2C_NVIDIA_GPU is not set -CONFIG_I2C_QUP=y -# CONFIG_I3C is not set -# CONFIG_IGC is not set -CONFIG_IIO=y -# CONFIG_IIO_BUFFER is not set -# CONFIG_IIO_TRIGGER is not set -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_INITRAMFS_SOURCE="" -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -# CONFIG_INPUT_GPIO_VIBRA is not set -# CONFIG_INPUT_MSM_VIBRATOR is not set -# CONFIG_INPUT_PM8941_PWRKEY is not set -# CONFIG_INPUT_PM8XXX_VIBRATOR is not set -# CONFIG_INTERCONNECT is not set -CONFIG_IOMMU_HELPER=y -# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set -# CONFIG_ION is not set -# CONFIG_ION_DUMMY is not set -# CONFIG_ION_MSM is not set -# CONFIG_ION_TEST is not set -# CONFIG_IO_URING is not set -# CONFIG_IPA is not set -# CONFIG_IPA3 is not set -# CONFIG_IPC_LOGGING is not set -# CONFIG_IPC_ROUTER is not set -# CONFIG_IPC_ROUTER_SECURITY is not set -# CONFIG_IPQ807X_REMOTEPROC is not set -# CONFIG_IPQ_ADCC_4019 is not set -# CONFIG_IPQ_ADSS_8074 is not set -# CONFIG_IPQ_APSS_PLL is not set -CONFIG_IPQ_APSS_5018=y -# CONFIG_IPQ_APSS_6018 is not set -# CONFIG_IPQ_APSS_8074 is not set -# CONFIG_IPQ_DWC3_QTI_EXTCON is not set -# CONFIG_IPQ_FLASH_16M_PROFILE is not set -# CONFIG_IPQ_GCC_4019 is not set -CONFIG_IPQ_GCC_5018=y -# CONFIG_IPQ_GCC_6018 is not set -# CONFIG_IPQ_GCC_806X is not set -# CONFIG_IPQ_GCC_8074 is not set -# CONFIG_IPQ_GCC_9574 is not set -# CONFIG_IPQ_LCC_806X is not set -# CONFIG_IPQ_REMOTEPROC_ADSP is not set -CONFIG_IPQ_SUBSYSTEM_DUMP=y -CONFIG_IPQ_SUBSYSTEM_RAMDUMP=y -# CONFIG_IPQ_SUBSYSTEM_RESTART is not set -# CONFIG_IPQ_SUBSYSTEM_RESTART_TEST is not set -CONFIG_IPQ_TCSR=y -CONFIG_IRQCHIP=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_IRQ_WORK=y -CONFIG_JBD2=y -# CONFIG_KCOV is not set -CONFIG_KEYS=y -# CONFIG_KEYS_REQUEST_CACHE is not set -# CONFIG_KPSS_XCC is not set -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -# CONFIG_KRAITCC is not set -# CONFIG_KRAIT_CLOCKS is not set -# CONFIG_KRAIT_L2_ACCESSORS is not set -# CONFIG_LCD_CLASS_DEVICE is not set -# CONFIG_LEDS_AN30259A is not set -CONFIG_LEDS_IPQ=y -# CONFIG_LEDS_LM3532 is not set -# CONFIG_LEDS_PCA9956B is not set -CONFIG_LEDS_TLC591XX=y -# CONFIG_LEDS_TRIGGER_AUDIO is not set -# CONFIG_LEDS_TRIGGER_PATTERN is not set -CONFIG_LIBFDT=y -CONFIG_LOCKUP_DETECTOR=y -# CONFIG_LOCK_EVENT_COUNTS is not set -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" -# CONFIG_LTC1660 is not set -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_MAILBOX=y -# CONFIG_MAILBOX_TEST is not set -# CONFIG_MAP_E_SUPPORT is not set -# CONFIG_MAX31856 is not set -# CONFIG_MAX44009 is not set -# CONFIG_MAX5432 is not set -# CONFIG_MB1232 is not set -# CONFIG_MCP3911 is not set -# CONFIG_MCP41010 is not set -CONFIG_MDIO=y -CONFIG_MDIO_BITBANG=y -CONFIG_MDIO_BOARDINFO=y -# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set -CONFIG_MDIO_GPIO=y -CONFIG_MDIO_QCA=y -# CONFIG_MDM_GCC_9615 is not set -# CONFIG_MDM_LCC_9615 is not set -# CONFIG_MEMORY_HOTPLUG is not set -# CONFIG_MFD_LOCHNAGAR is not set -# CONFIG_MFD_MAX77650 is not set -# CONFIG_MFD_QCOM_RPM is not set -# CONFIG_MFD_ROHM_BD70528 is not set -# CONFIG_MFD_SPMI_PMIC is not set -# CONFIG_MFD_STMFX is not set -# CONFIG_MFD_STPMIC1 is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TQMX86 is not set -CONFIG_MHI_BUS=y -CONFIG_MHI_BUS_DEBUG=y -CONFIG_MHI_NETDEV=y -CONFIG_MHI_QTI=y -# CONFIG_MHI_SATELLITE is not set -CONFIG_MHI_UCI=y -CONFIG_MHI_WWAN_CTRL=y -CONFIG_MIGHT_HAVE_CACHE_L2X0=y -CONFIG_MIGHT_HAVE_PCI=y -CONFIG_MIGRATION=y -# CONFIG_MIKROTIK is not set -# CONFIG_MISC_ALCOR_PCI is not set -# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set -CONFIG_MMC=y -CONFIG_MMC_ARMMMCI=y -CONFIG_MMC_BLOCK=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_QCOM_DML=y -CONFIG_MMC_SDHCI=y -# CONFIG_MMC_SDHCI_AM654 is not set -CONFIG_MMC_SDHCI_IO_ACCESSORS=y -CONFIG_MMC_SDHCI_MSM=y -# CONFIG_MMC_SDHCI_MSM_ICE is not set -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -# CONFIG_MMC_SDHCI_OF_ASPEED is not set -# CONFIG_MMC_SDHCI_PCI is not set -CONFIG_MMC_SDHCI_PLTFM=y -# CONFIG_MMC_STM32_SDMMC is not set -# CONFIG_MMC_TIFM_SD is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_MODULES_USE_ELF_REL=y -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -# CONFIG_MPLS_ROUTING is not set -# CONFIG_MSM_ADSPRPC is not set -# CONFIG_MSM_BUS_SCALING is not set -# CONFIG_MSM_GCC_8660 is not set -# CONFIG_MSM_GCC_8916 is not set -# CONFIG_MSM_GCC_8960 is not set -# CONFIG_MSM_GCC_8974 is not set -# CONFIG_MSM_GCC_8994 is not set -# CONFIG_MSM_GCC_8996 is not set -# CONFIG_MSM_GCC_8998 is not set -# CONFIG_MSM_GLINK is not set -# CONFIG_MSM_GLINK_LOOPBACK_SERVER is not set -# CONFIG_MSM_GLINK_PKT is not set -# CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT is not set -# CONFIG_MSM_IPC_ROUTER_GLINK_XPRT is not set -# CONFIG_MSM_IPC_ROUTER_MHI_XPRT is not set -# CONFIG_MSM_LCC_8960 is not set -# CONFIG_MSM_MHI is not set -# CONFIG_MSM_MHI_DEBUG is not set -# CONFIG_MSM_MHI_DEV is not set -# CONFIG_MSM_MHI_UCI is not set -# CONFIG_MSM_MMCC_8960 is not set -# CONFIG_MSM_MMCC_8974 is not set -# CONFIG_MSM_MMCC_8996 is not set -# CONFIG_MSM_QMI_INTERFACE is not set -# CONFIG_MSM_RPM_GLINK is not set -# CONFIG_MSM_RPM_LOG is not set -CONFIG_MSM_RPM_RPMSG=y -# CONFIG_MSM_RPM_SMD is not set -# CONFIG_MSM_SECURE_BUFFER is not set -# CONFIG_MSM_SMEM is not set -# CONFIG_MSM_TEST_QMI_CLIENT is not set -CONFIG_MTD_CMDLINE_PARTS=y -# CONFIG_MTD_HYPERBUS is not set -CONFIG_MTD_M25P80=y -# CONFIG_MTD_NAND_ECC_SW_BCH is not set -# CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC is not set -CONFIG_MTD_NAND_QCOM=y -# CONFIG_MTD_NAND_QCOM_SERIAL is not set -CONFIG_MTD_RAW_NAND=y -# CONFIG_MTD_ROUTERBOOT_PARTS is not set -CONFIG_MTD_SPINAND_GIGADEVICE=y -CONFIG_MTD_SPINAND_MT29F=y -CONFIG_MTD_SPINAND_ONDIEECC=y -CONFIG_MTD_SPI_NOR=y -CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y -CONFIG_MTD_SPI_NOR_USE_4K_SECTORS_LIMIT=65536 -# CONFIG_MTD_SPLIT_BCM_WFI_FW is not set -# CONFIG_MTD_SPLIT_ELF_FW is not set -CONFIG_MTD_SPLIT_FIRMWARE=y -CONFIG_MTD_SPLIT_FIT_FW=y -CONFIG_MTD_UBI=y -CONFIG_MTD_UBI_BEB_LIMIT=20 -CONFIG_MTD_UBI_BLOCK=y -# CONFIG_MTD_UBI_FASTMAP is not set -CONFIG_MTD_UBI_GLUEBI=y -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MULTI_IRQ_HANDLER=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEON=y -CONFIG_NET=y -# CONFIG_NET_DSA_MV88E6063 is not set -CONFIG_NET_FLOW_LIMIT=y -CONFIG_NET_L3_MASTER_DEV=y -CONFIG_NET_PTP_CLASSIFY=y -# CONFIG_NET_SCH_TAPRIO is not set -# CONFIG_NET_SWITCHDEV is not set -# CONFIG_NET_VENDOR_GOOGLE is not set -# CONFIG_NET_VENDOR_PENSANDO is not set -# CONFIG_NF_CONNTRACK_DSCPREMARK_EXT is not set -# CONFIG_NF_IPV6_DUMMY_HEADER is not set -# CONFIG_NI_XGE_MANAGEMENT_ENET is not set -# CONFIG_NOA1305 is not set -CONFIG_NO_BOOTMEM=y -CONFIG_NO_HZ=y -CONFIG_NO_HZ_COMMON=y -CONFIG_NO_HZ_IDLE=y -CONFIG_NR_CPUS=2 -# CONFIG_NULL_TTY is not set -# CONFIG_NUMA is not set -CONFIG_NUM_ALT_PARTITION=16 -CONFIG_NVMEM=y -# CONFIG_NVMEM_REBOOT_MODE is not set -# CONFIG_NVMEM_SYSFS is not set -# CONFIG_NVME_MULTIPATH is not set -# CONFIG_NVME_TCP is not set -# CONFIG_OCTEONTX2_AF is not set -CONFIG_OF=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_ADDRESS_PCI=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_FLATTREE=y -CONFIG_OF_GPIO=y -CONFIG_OF_IRQ=y -CONFIG_OF_KOBJ=y -CONFIG_OF_MDIO=y -CONFIG_OF_MTD=y -CONFIG_OF_NET=y -CONFIG_OF_PCI=y -CONFIG_OF_PCI_IRQ=y -CONFIG_OF_RESERVED_MEM=y -# CONFIG_OF_SLIMBUS is not set -CONFIG_OLD_SIGACTION=y -CONFIG_OLD_SIGSUSPEND3=y -# CONFIG_OPTIMIZE_INLINING is not set -# CONFIG_PACKING is not set -CONFIG_PADATA=y -CONFIG_PAGE_OFFSET=0xC0000000 -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=5 -CONFIG_PCI=y -# CONFIG_PCIEAER is not set -# CONFIG_PCIE_AL is not set -# CONFIG_PCIE_CADENCE_EP is not set -CONFIG_PCIE_DW=y -CONFIG_PHY_IPQ_UNIPHY_PCIE=y -# CONFIG_PCIE_DW_PLAT is not set -# CONFIG_PCIE_PME is not set -CONFIG_PCIE_QCOM=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -# CONFIG_PCI_MESON is not set -CONFIG_PCI_MSI=y -CONFIG_PERF_EVENTS=y -CONFIG_PERF_USE_VMALLOC=y -# CONFIG_PFT is not set -CONFIG_PGTABLE_LEVELS=2 -CONFIG_PHYLIB=y -# CONFIG_PHY_CADENCE_DP is not set -# CONFIG_PHY_CADENCE_DPHY is not set -# CONFIG_PHY_CADENCE_SIERRA is not set -# CONFIG_PHY_FSL_IMX8MQ_USB is not set -# CONFIG_PHY_IPQ_BALDUR_USB is not set -# CONFIG_PHY_IPQ_UNIPHY_USB is not set -# CONFIG_PHY_MIXEL_MIPI_DPHY is not set -# CONFIG_PHY_OCELOT_SERDES is not set -# CONFIG_PHY_QCA_PCIE_QMP is not set -# CONFIG_PHY_QCOM_APQ8064_SATA is not set -# CONFIG_PHY_QCOM_IPQ806X_SATA is not set -# CONFIG_PHY_QCOM_PCIE2 is not set -# CONFIG_PHY_QCOM_QMP is not set -# CONFIG_PHY_QCOM_QUSB2 is not set -# CONFIG_PHY_QCOM_UFS is not set -CONFIG_PINCTRL=y -# CONFIG_PINCTRL_APQ8064 is not set -# CONFIG_PINCTRL_APQ8084 is not set -# CONFIG_PINCTRL_IPQ4019 is not set -CONFIG_PINCTRL_IPQ5018=y -# CONFIG_PINCTRL_IPQ6018 is not set -# CONFIG_PINCTRL_IPQ8064 is not set -# CONFIG_PINCTRL_IPQ8074 is not set -# CONFIG_PINCTRL_IPQ9574 is not set -# CONFIG_PINCTRL_MDM9615 is not set -CONFIG_PINCTRL_MSM=y -# CONFIG_PINCTRL_MSM8660 is not set -# CONFIG_PINCTRL_MSM8916 is not set -# CONFIG_PINCTRL_MSM8960 is not set -# CONFIG_PINCTRL_MSM8994 is not set -# CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_MSM8998 is not set -# CONFIG_PINCTRL_OCELOT is not set -# CONFIG_PINCTRL_QCOM_SPMI_PMIC is not set -# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set -# CONFIG_PINCTRL_QCS404 is not set -# CONFIG_PINCTRL_SC7180 is not set -# CONFIG_PINCTRL_SDM660 is not set -# CONFIG_PINCTRL_SDM845 is not set -# CONFIG_PINCTRL_SINGLE is not set -# CONFIG_PINCTRL_SM8150 is not set -# CONFIG_PINCTRL_STMFX is not set -# CONFIG_PKCS7_TEST_KEY is not set -# CONFIG_PKCS7_MESSAGE_PARSER is not set -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -# CONFIG_PL330_DMA is not set -CONFIG_PM=y -# CONFIG_PM8916_WATCHDOG is not set -CONFIG_PM_CLK=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_GENERIC_DOMAINS=y -CONFIG_PM_GENERIC_DOMAINS_OF=y -CONFIG_PM_GENERIC_DOMAINS_SLEEP=y -CONFIG_PM_OPP=y -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_POWER_RESET is not set -# CONFIG_POWER_RESET_MSM is not set -# CONFIG_POWER_RESET_QCOM_PON is not set -# CONFIG_POWER_SUPPLY is not set -CONFIG_PPS=y -CONFIG_PREEMPT=y -CONFIG_PREEMPT_COUNT=y -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_RCU=y -# CONFIG_PRINTK_CALLER is not set -CONFIG_PRINTK_TIME=y -CONFIG_PROC_PAGE_MONITOR=y -# CONFIG_PROC_STRIPPED is not set -# CONFIG_PSI is not set -CONFIG_PTP_1588_CLOCK=y -CONFIG_PUBLIC_KEY_ALGO_RSA=y -# CONFIG_PVPANIC is not set -CONFIG_PWM=y -CONFIG_PWM_IPQ=y -# CONFIG_PWM_PCA9685 is not set -CONFIG_PWM_SYSFS=y -# CONFIG_PWRSEQ_EMMC is not set -CONFIG_PWRSEQ_IPQ=y -# CONFIG_PWRSEQ_SIMPLE is not set -CONFIG_QCA_MINIDUMP=y -# CONFIG_QCA_MINIDUMP_DEBUG is not set -# CONFIG_QCOM_A53PLL is not set -CONFIG_QCOM_ADM=y -# CONFIG_QCOM_AOSS_QMP is not set -CONFIG_QCOM_APCS_IPC=y -# CONFIG_QCOM_APM is not set -# CONFIG_QCOM_APR is not set -CONFIG_QCOM_BAM_DMA=y -CONFIG_QCOM_CACHE_DUMP=y -CONFIG_QCOM_CACHE_DUMP_ON_PANIC=y -# CONFIG_QCOM_CLK_APCS_MSM8916 is not set -# CONFIG_QCOM_CLK_RPM is not set -# CONFIG_QCOM_COINCELL is not set -# CONFIG_QCOM_COMMAND_DB is not set -CONFIG_QCOM_DLOAD_MODE=y -CONFIG_QCOM_DLOAD_MODE_APPSBL=y -# CONFIG_QCOM_EBI2 is not set -# CONFIG_QCOM_FASTRPC is not set -CONFIG_QCOM_GDSC=y -# CONFIG_QCOM_GENI_SE is not set -CONFIG_QCOM_GLINK_SSR=y -# CONFIG_QCOM_GSBI is not set -# CONFIG_QCOM_HFPLL is not set -# CONFIG_QCOM_LLCC is not set -# CONFIG_QCOM_PDC is not set -# CONFIG_QCOM_PM is not set -# CONFIG_QCOM_Q6V5_ADSP is not set -CONFIG_QCOM_Q6V5_MPD=y -# CONFIG_QCOM_Q6V5_MSS is not set -# CONFIG_QCOM_Q6V5_PAS is not set -CONFIG_QCOM_Q6V5_WCSS=y -CONFIG_QCOM_QFPROM=y -CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_RESTART_REASON=y -# CONFIG_QCOM_RMTFS_MEM is not set -# CONFIG_QCOM_RPMH is not set -CONFIG_QCOM_RPM_CLK=y -# CONFIG_QCOM_RTB is not set -CONFIG_QCOM_SCM=y -CONFIG_QCOM_SCM_32=y -# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set -# CONFIG_QCOM_SMD is not set -# CONFIG_QCOM_SMD_RPM is not set -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMEM_STATE=y -CONFIG_QCOM_SMP2P=y -# CONFIG_QCOM_SMSM is not set -CONFIG_QCOM_SOCINFO=y -# CONFIG_QCOM_SPMI_ADC5 is not set -# CONFIG_QCOM_SPMI_TEMP_ALARM is not set -# CONFIG_QCOM_SPMI_VADC is not set -CONFIG_QCOM_SYSMON=y -CONFIG_QCOM_TSENS=y -# CONFIG_QCOM_WCNSS_CTRL is not set -# CONFIG_QCOM_WCNSS_PIL is not set -CONFIG_QCOM_WDT=y -# CONFIG_QCS_GCC_404 is not set -# CONFIG_QCS_TURING_404 is not set -CONFIG_QMI_ENCDEC=y -# CONFIG_QPNP_REVID is not set -CONFIG_QRTR=y -# CONFIG_QRTR_FIFO is not set -CONFIG_QRTR_MHI=y -CONFIG_QRTR_SMD=y -# CONFIG_QRTR_TUN is not set -# CONFIG_QRTR_USB is not set -CONFIG_QSEECOM=m -# CONFIG_QTI_APSS_ACC is not set -CONFIG_QTI_CTXT_SAVE=y -CONFIG_QTI_DCC=y -CONFIG_QTI_EUD=y -# CONFIG_EUD_EXTCON_SUPPORT is not set -# CONFIG_QTI_DCC_V2 is not set -# CONFIG_QTI_MEMORY_DUMP_V2 is not set -CONFIG_QTI_SCM_RESTART_REASON=y -CONFIG_QTI_TZ_LOG=y -# CONFIG_RANDOMIZE_BASE is not set -# CONFIG_RANDOM_TRUST_BOOTLOADER is not set -CONFIG_RATIONAL=y -# CONFIG_RCU_BOOST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=21 -# CONFIG_RCU_EXPERT is not set -CONFIG_RCU_STALL_COMMON=y -CONFIG_RD_GZIP=y -# CONFIG_REED_SOLOMON_TEST is not set -CONFIG_REGMAP=y -CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y -CONFIG_REGMAP_MMIO=y -CONFIG_REGULATOR=y -# CONFIG_REGULATOR_CPR3 is not set -# CONFIG_REGULATOR_CPR3_NPU is not set -# CONFIG_REGULATOR_CPR4_APSS is not set -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_GPIO=y -# CONFIG_REGULATOR_IPQ40XX is not set -# CONFIG_REGULATOR_QCOM_RPM is not set -# CONFIG_REGULATOR_QCOM_SPMI is not set -CONFIG_REGULATOR_RPM_GLINK=y -# CONFIG_REGULATOR_RPM_SMD is not set -CONFIG_RELAY=y -CONFIG_REMOTEPROC=y -# CONFIG_RESET_ATTACK_MITIGATION is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_QCOM_AOSS is not set -# CONFIG_RESET_QCOM_PDC is not set -CONFIG_RFS_ACCEL=y -# CONFIG_RMNET is not set -# CONFIG_RMNET_DATA is not set -# CONFIG_RMNET_DATA_DEBUG_PKT is not set -# CONFIG_RMNET_DATA_FC is not set -# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set -CONFIG_RPMSG=y -CONFIG_RPMSG_CHAR=y -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set -CONFIG_RPMSG_QCOM_GLINK_SMEM=y -CONFIG_RPMSG_QCOM_SMD=y -# CONFIG_RPMSG_VIRTIO is not set -CONFIG_RPS=y -CONFIG_RTC_CLASS=y -# CONFIG_RTC_DRV_CMOS is not set -# CONFIG_RTC_DRV_PM8XXX is not set -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_SAMPLES=y -# CONFIG_SAMPLE_CONFIGFS is not set -# CONFIG_SAMPLE_HW_BREAKPOINT is not set -# CONFIG_SAMPLE_KFIFO is not set -# CONFIG_SAMPLE_KOBJECT is not set -# CONFIG_SAMPLE_KPROBES is not set -# CONFIG_SAMPLE_KRETPROBES is not set -CONFIG_SAMPLE_QMI_CLIENT=m -# CONFIG_SAMPLE_RPMSG_CLIENT is not set -CONFIG_SAMPLE_TRACE_EVENTS=y -# CONFIG_SAMPLE_TRACE_PRINTK is not set -# CONFIG_SAMPLE_VFIO_MDEV_MDPY_FB is not set -# CONFIG_SATA_AHCI is not set -CONFIG_SCHED_HRTICK=y -# CONFIG_SCHED_INFO is not set -# CONFIG_SCSI is not set -# CONFIG_SCSI_DMA is not set -# CONFIG_SCSI_MYRS is not set -CONFIG_SCSI_SCAN_ASYNC=y -# CONFIG_SDM_CAMCC_845 is not set -# CONFIG_SDM_DISPCC_845 is not set -# CONFIG_SDM_GCC_660 is not set -# CONFIG_SDM_GCC_845 is not set -# CONFIG_SDM_GPUCC_845 is not set -# CONFIG_SDM_LPASSCC_845 is not set -# CONFIG_SDM_VIDEOCC_845 is not set -# CONFIG_SEEMP_CORE is not set -# CONFIG_SENSIRION_SGP30 is not set -# CONFIG_SENSORS_DRIVETEMP is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set -# CONFIG_SENSORS_RM3100_I2C is not set -# CONFIG_SENSORS_RM3100_SPI is not set -# CONFIG_SERIAL_8250 is not set -# CONFIG_SERIAL_8250_CONSOLE is not set -# CONFIG_SERIAL_8250_DMA is not set -# CONFIG_SERIAL_AMBA_PL010 is not set -# CONFIG_SERIAL_AMBA_PL011 is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -CONFIG_SERIAL_MSM=y -CONFIG_SERIAL_MSM_CONSOLE=y -# CONFIG_SERIAL_SIFIVE is not set -# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set -CONFIG_SKB_RECYCLER=y -CONFIG_SKB_RECYCLER_MULTI_CPU=y -# CONFIG_SKB_RECYCLER_PREALLOC is not set -# CONFIG_SLIMBUS is not set -# CONFIG_SLIMBUS_MSM_CTRL is not set -# CONFIG_SLIMBUS_MSM_NGD is not set -CONFIG_SMP=y -CONFIG_SMP_ON_UP=y -# CONFIG_SM_GCC_8150 is not set -CONFIG_SND=y -# CONFIG_SND_AOA is not set -# CONFIG_SND_COMPRESS_OFFLOAD is not set -# CONFIG_SND_DYNAMIC_MINORS is not set -# CONFIG_SND_PCM is not set -# CONFIG_SND_PROC_FS is not set -CONFIG_SND_SOC=y -# CONFIG_SND_SOC_AK4118 is not set -# CONFIG_SND_SOC_APQ8016_SBC is not set -# CONFIG_SND_SOC_CS35L36 is not set -# CONFIG_SND_SOC_CS4341 is not set -# CONFIG_SND_SOC_CX2072X is not set -# CONFIG_SND_SOC_DMIC is not set -# CONFIG_SND_SOC_FSL_AUDMIX is not set -# CONFIG_SND_SOC_FSL_MICFIL is not set -# CONFIG_SND_SOC_I2C_AND_SPI is not set -CONFIG_SND_SOC_IPQ=y -# CONFIG_SND_SOC_IPQ_ADSS is not set -# CONFIG_SND_SOC_IPQ_CODEC is not set -# CONFIG_SND_SOC_IPQ_CPU_DAI is not set -# CONFIG_SND_SOC_IPQ_MBOX is not set -CONFIG_SND_SOC_IPQ_LPASS=y -CONFIG_SND_SOC_IPQ_LPASS_PCM_RAW=y -# CONFIG_SND_SOC_IPQ_PCM_I2S is not set -# CONFIG_SND_SOC_IPQ_PCM_TDM is not set -# CONFIG_SND_SOC_IPQ_PCM_RAW is not set -# CONFIG_SND_SOC_IPQ_STEREO is not set -# CONFIG_SND_SOC_MAX98088 is not set -# CONFIG_SND_SOC_MAX98357A is not set -# CONFIG_SND_SOC_MT6358 is not set -# CONFIG_SND_SOC_MTK_BTCVSD is not set -# CONFIG_SND_SOC_NAU8822 is not set -# CONFIG_SND_SOC_PCM3060_I2C is not set -# CONFIG_SND_SOC_PCM3060_SPI is not set -CONFIG_SND_SOC_QCOM=y -# CONFIG_SND_SOC_RK3328 is not set -# CONFIG_SND_SOC_SOF_TOPLEVEL is not set -# CONFIG_SND_SOC_STORM is not set -# CONFIG_SND_SOC_UDA1334 is not set -# CONFIG_SND_SOC_WM8904 is not set -# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set -# CONFIG_SND_SOC_XILINX_I2S is not set -# CONFIG_SND_SOC_XILINX_SPDIF is not set -CONFIG_SOUND=y -# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set -CONFIG_SPARSE_IRQ=y -CONFIG_SPI=y -CONFIG_SPI_MASTER=y -CONFIG_SPI_MEM=y -# CONFIG_SPI_MTK_QUADSPI is not set -# CONFIG_SPI_QCOM_QSPI is not set -CONFIG_SPI_QUP=y -CONFIG_SPI_SPIDEV=y -# CONFIG_SPI_VSC7385 is not set -# CONFIG_SPMI is not set -# CONFIG_SPMI_MSM_PMIC_ARB is not set -# CONFIG_SPMI_PMIC_CLKDIV is not set -CONFIG_SPS=y -# CONFIG_SPS30 is not set -# CONFIG_SPS_SUPPORT_BAMDMA is not set -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_SRCU=y -# CONFIG_SRD_TRACE is not set -# CONFIG_STAGING is not set -# CONFIG_STM_PROTO_BASIC is not set -# CONFIG_STM_PROTO_SYS_T is not set -# CONFIG_STM_SOURCE_FTRACE is not set -# CONFIG_STM_SOURCE_HEARTBEAT is not set -# CONFIG_STOPWATCH is not set -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set -# CONFIG_SECONDARY_TRUSTED_KEYRING is not set -CONFIG_SUSPEND=y -# CONFIG_SWAP is not set -CONFIG_SWCONFIG=y -CONFIG_SWIOTLB=y -CONFIG_SWP_EMULATE=y -# CONFIG_SW_SYNC is not set -# CONFIG_SYNC is not set -CONFIG_SYS_SUPPORTS_APM_EMULATION=y -# CONFIG_TEST_BLACKHOLE_DEV is not set -# CONFIG_TEST_MEMCAT_P is not set -# CONFIG_TEST_MEMINIT is not set -# CONFIG_TEST_STACKINIT is not set -# CONFIG_TEST_STRSCPY is not set -# CONFIG_TEST_VMALLOC is not set -# CONFIG_TEST_XARRAY is not set -CONFIG_THERMAL=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_GOV_STEP_WISE=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -# CONFIG_THUMB2_KERNEL is not set -# CONFIG_TICK_CPU_ACCOUNTING is not set -# CONFIG_TI_ADS124S08 is not set -# CONFIG_TI_ADS8344 is not set -# CONFIG_TI_CPSW_PHY_SEL is not set -# CONFIG_TI_DAC7311 is not set -# CONFIG_TI_DAC7612 is not set -CONFIG_TRACING_EVENTS_GPIO=y -# CONFIG_TRUSTED_FOUNDATIONS is not set -CONFIG_UBIFS_FS=y -CONFIG_UBIFS_FS_ADVANCED_COMPR=y -CONFIG_UBIFS_FS_LZO=y -CONFIG_UBIFS_FS_XATTR=y -CONFIG_UBIFS_FS_XZ=y -CONFIG_UBIFS_FS_ZLIB=y -# CONFIG_UBIFS_FS_ZSTD is not set -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" -# CONFIG_UNICODE is not set -CONFIG_UNINLINE_SPIN_UNLOCK=y -# CONFIG_UNMAP_KERNEL_AT_EL0 is not set -CONFIG_UNWINDER_ARM=y -# CONFIG_UNWINDER_FRAME_POINTER is not set -# CONFIG_USB_BAM is not set -CONFIG_USB_CONFIGFS=y -# CONFIG_USB_CONFIGFS_ACM is not set -# CONFIG_USB_CONFIGFS_ECM is not set -# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set -# CONFIG_USB_CONFIGFS_EEM is not set -# CONFIG_USB_CONFIGFS_F_DIAG is not set -# CONFIG_USB_CONFIGFS_F_FS is not set -# CONFIG_USB_CONFIGFS_F_HID is not set -# CONFIG_USB_CONFIGFS_F_LB_SS is not set -# CONFIG_USB_CONFIGFS_F_MIDI is not set -# CONFIG_USB_CONFIGFS_F_PRINTER is not set -# CONFIG_USB_CONFIGFS_F_QDSS is not set -# CONFIG_USB_CONFIGFS_F_UAC1 is not set -# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set -# CONFIG_USB_CONFIGFS_F_UAC2 is not set -# CONFIG_USB_CONFIGFS_MASS_STORAGE is not set -# CONFIG_USB_CONFIGFS_NCM is not set -# CONFIG_USB_CONFIGFS_OBEX is not set -# CONFIG_USB_CONFIGFS_RNDIS is not set -# CONFIG_USB_CONFIGFS_SERIAL is not set -# CONFIG_USB_CONN_GPIO is not set -# CONFIG_USB_DWC3_OF_SIMPLE is not set -# CONFIG_USB_EHCI_FSL is not set -# CONFIG_USB_EHCI_ROOT_HUB_TT is not set -# CONFIG_USB_EHCI_TT_NEWSCHED is not set -# CONFIG_USB_GADGET is not set -# CONFIG_USB_NET_AQC111 is not set -# CONFIG_USB_OHCI_LITTLE_ENDIAN is not set -CONFIG_USB_QCA_M31_PHY=y -# CONFIG_USB_QCOM_8X16_PHY is not set -# CONFIG_USB_QCOM_QMP_PHY is not set -# CONFIG_USB_QCOM_QUSB_PHY is not set -CONFIG_USB_SUPPORT=y -CONFIG_USE_OF=y -# CONFIG_U_SERIAL_CONSOLE is not set -# CONFIG_VALIDATE_FS_PARSER is not set -# CONFIG_VCNL4035 is not set -CONFIG_VDSO=y -CONFIG_VECTORS_BASE=0xffff0000 -# CONFIG_VFIO is not set -CONFIG_VFP=y -CONFIG_VFPv3=y -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set -# CONFIG_VHOST_NET is not set -# CONFIG_VIRT_WIFI is not set -# CONFIG_VIRTIO_BLK is not set -# CONFIG_VIRTIO_CONSOLE is not set -# CONFIG_VIRTIO_FS is not set -# CONFIG_VIRTIO_NET is not set -# CONFIG_VL53L0X_I2C is not set -# CONFIG_VMAP_STACK is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_WATCHDOG_CORE=y -CONFIG_WATCHDOG_OPEN_TIMEOUT=0 -# CONFIG_WL_TI is not set -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_WWAN=y -CONFIG_WWAN_CORE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_XILINX_SDFEC is not set -# CONFIG_XILINX_XADC is not set -CONFIG_XPS=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_BCJ=y -CONFIG_ZBOOT_ROM_BSS=0 -CONFIG_ZBOOT_ROM_TEXT=0 -CONFIG_ZLIB_DEFLATE=y -CONFIG_ZLIB_INFLATE=y -CONFIG_ZONE_DMA_FLAG=0 -# CONFIG_DEBUG_MEM_USAGE is not set -CONFIG_ARCH_IPQ5018=y -CONFIG_ARCH_MMAP_RND_BITS=8 -CONFIG_IPQ_APSS_5018=y -CONFIG_PCIE_DW_PLAT=y -# CONFIG_USB_QCOM_DIAG_BRIDGE is not set -CONFIG_VMSPLIT_2G=y -# CONFIG_VMSPLIT_3G is not set -# CONFIG_SKB_FIXED_SIZE_2K is not set -# CONFIG_ARCH_IPQ256M is not set -# CONFIG_NF_CONNTRACK_CHAIN_EVENTS is not set -# CONFIG_MHI_BUS_TEST is not set -# CONFIG_BOOTCONFIG_PARTITION is not set -# CONFIG_QTI_BT_PIL is not set -# CONFIG_QTI_BT_INTERFACE is not set -CONFIG_LEDS_GPIO=y - - diff --git a/feeds/ipq807x/ipq807x/ipq50xx/config-default b/feeds/ipq807x/ipq807x/ipq50xx/config-default new file mode 100644 index 000000000..ebf414081 --- /dev/null +++ b/feeds/ipq807x/ipq807x/ipq50xx/config-default @@ -0,0 +1,85 @@ +# CONFIG_AHCI_IPQ is not set +CONFIG_ARCH_IPQ5018=y +# CONFIG_DIAGFWD_BRIDGE_CODE is not set +CONFIG_IPQ_ADSS_5018=y +CONFIG_IPQ_APSS_5018=y +CONFIG_IPQ_GCC_5018=y +# CONFIG_NET_SWITCHDEV is not set +CONFIG_NUM_ALT_PARTITION=16 +CONFIG_PINCTRL_IPQ5018=y +# CONFIG_IPC_LOGGING is not set +CONFIG_IPQ_SUBSYSTEM_DUMP=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y +CONFIG_CORESIGHT=y +CONFIG_CORESIGHT_CSR=y +CONFIG_CORESIGHT_CTI=y +CONFIG_CORESIGHT_EVENT=y +CONFIG_CORESIGHT_HWEVENT=y +CONFIG_CORESIGHT_LINKS_AND_SINKS=y +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y +CONFIG_CORESIGHT_QCOM_REPLICATOR=y +# CONFIG_INPUT_PM8941_PWRKEY is not set +CONFIG_MDIO_QCA=y +# CONFIG_CRYPTO_ALL_CASES is not set +CONFIG_CRYPTO_DEV_QCOM_ICE=y +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SHA512=y +# CONFIG_CORESIGHT_QPDI is not set +# CONFIG_CORESIGHT_SINK_ETBV10 is not set +CONFIG_CORESIGHT_SINK_TPIU=y +# CONFIG_CORESIGHT_SOURCE_DUMMY is not set +CONFIG_CORESIGHT_SOURCE_ETM3X=y +CONFIG_CORESIGHT_SOURCE_ETM4X=y +# CONFIG_CORESIGHT_REMOTE_ETM is not set +CONFIG_CORESIGHT_STM=y +CONFIG_CORESIGHT_TPDA=y +CONFIG_CORESIGHT_TPDM=y +# CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE is not set +CONFIG_IIO=y +# CONFIG_IIO_BUFFER is not set +# CONFIG_IIO_TRIGGER is not set +CONFIG_PCIE_DW_PLAT=y +CONFIG_PHY_IPQ_UNIPHY_PCIE=y +CONFIG_VMSPLIT_2G=y +# CONFIG_VMSPLIT_3G is not set +CONFIG_PPS=y +CONFIG_PTP_1588_CLOCK=y +# CONFIG_DP83640_PHY is not set +CONFIG_PWM_IPQ5018=y +CONFIG_QCOM_APM=y +CONFIG_QCOM_DCC=y +# CONFIG_QCOM_SPMI_TEMP_ALARM is not set +CONFIG_MMC_SDHCI_MSM_ICE=y +CONFIG_USB_BAM=y +CONFIG_MAILBOX=y +# CONFIG_USB_QCOM_DIAG_BRIDGE is not set +# CONFIG_USB_CONFIGFS_F_DIAG is not set +# CONFIG_NF_IPV6_DUMMY_HEADER is not set +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +CONFIG_MTD_NAND_SERIAL=y +CONFIG_PAGE_SCOPE_MULTI_PAGE_READ=y +# CONFIG_RMNET_DATA_FC is not set +CONFIG_CRYPTO_NO_ZERO_LEN_HASH=y +CONFIG_CRYPTO_DISABLE_AES192_TEST=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +CONFIG_QTI_EUD=y +CONFIG_USB_QCA_M31_PHY=y +CONFIG_QGIC2_MSI=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +CONFIG_PWM_IPQ4019=y +CONFIG_RMNET=y +CONFIG_QCOM_QMI_RMNET=y +CONFIG_QCOM_QMI_DFC=y +CONFIG_QCOM_QMI_POWER_COLLAPSE=y +CONFIG_RMNET_CTL=y +CONFIG_RMNET_CTL_DEBUG=y +CONFIG_SND_SOC_IPQ_LPASS=y +CONFIG_SND_SOC_IPQ_LPASS_PCM_RAW=y +# CONFIG_SND_SOC_IPQ_PCM_RAW is not set +CONFIG_QCOM_RESTART_REASON=y diff --git a/feeds/ipq807x/ipq807x/ipq50xx/config-lowmem b/feeds/ipq807x/ipq807x/ipq50xx/config-lowmem new file mode 100644 index 000000000..b1b817ef6 --- /dev/null +++ b/feeds/ipq807x/ipq807x/ipq50xx/config-lowmem @@ -0,0 +1,73 @@ +# CONFIG_AHCI_IPQ is not set +CONFIG_ARCH_IPQ5018=y +# CONFIG_DIAGFWD_BRIDGE_CODE is not set +CONFIG_IPQ_ADSS_5018=y +CONFIG_IPQ_APSS_5018=y +CONFIG_IPQ_GCC_5018=y +# CONFIG_NET_SWITCHDEV is not set +CONFIG_NUM_ALT_PARTITION=16 +CONFIG_PINCTRL_IPQ5018=y +# CONFIG_IPC_LOGGING is not set +CONFIG_IPQ_SUBSYSTEM_DUMP=y +# CONFIG_SPS is not set +# CONFIG_SPS_SUPPORT_NDP_BAM is not set +# CONFIG_CORESIGHT is not set +# CONFIG_INPUT_PM8941_PWRKEY is not set +CONFIG_MDIO_QCA=y +# CONFIG_CRYPTO_ALL_CASES is not set +# CONFIG_CRYPTO_DEV_QCOM_ICE is not set +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_IIO is not set +# CONFIG_IIO_BUFFER is not set +# CONFIG_IIO_TRIGGER is not set +CONFIG_PCIE_DW_PLAT=y +CONFIG_PHY_IPQ_UNIPHY_PCIE=y +CONFIG_VMSPLIT_2G=y +# CONFIG_VMSPLIT_3G is not set +# CONFIG_PPS is not set +# CONFIG_PTP_1588_CLOCK is not set +# CONFIG_DP83640_PHY is not set +CONFIG_PWM_IPQ5018=y +CONFIG_QCOM_APM=y +# CONFIG_QCOM_DCC is not set +# CONFIG_QCOM_SPMI_TEMP_ALARM is not set +CONFIG_MMC_SDHCI_MSM_ICE=y +CONFIG_USB_BAM=y +CONFIG_MAILBOX=y +# CONFIG_USB_QCOM_DIAG_BRIDGE is not set +# CONFIG_USB_CONFIGFS_F_DIAG is not set +# CONFIG_NF_IPV6_DUMMY_HEADER is not set +# CONFIG_RMNET_DATA is not set +# CONFIG_RMNET_DATA_DEBUG_PKT is not set +CONFIG_MTD_NAND_SERIAL=y +CONFIG_PAGE_SCOPE_MULTI_PAGE_READ=y +# CONFIG_RMNET_DATA_FC is not set +# CONFIG_CRYPTO_NO_ZERO_LEN_HASH is not set +# CONFIG_CRYPTO_DISABLE_AES192_TEST is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_GOV_PERFORMANCE is not set +# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +CONFIG_QTI_EUD=y +CONFIG_USB_QCA_M31_PHY=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZLIB is not set +# CONFIG_JFFS2_LZMA is not set +CONFIG_JFFS2_ZLIB=y +# CONFIG_LZO_COMPRESS is not set +# CONFIG_LZO_DECOMPRESS is not set +CONFIG_XZ_DEC=y +# CONFIG_XZ_DEC_X86 is not set +# CONFIG_XZ_DEC_POWERPC is not set +# CONFIG_XZ_DEC_IA64 is not set +CONFIG_XZ_DEC_ARM=y +# CONFIG_XZ_DEC_ARMTHUMB is not set +# CONFIG_XZ_DEC_SPARC is not set +CONFIG_XZ_DEC_BCJ=y +# CONFIG_LZO_COMPRESS is not set +# CONFIG_LZO_DECOMPRESS is not set +# CONFIG_CRYPTO is not set +CONFIG_QGIC2_MSI=y +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y diff --git a/feeds/ipq807x/ipq807x/ipq60xx/config-default b/feeds/ipq807x/ipq807x/ipq60xx/config-default index d07fa36d5..3d7c59698 100644 --- a/feeds/ipq807x/ipq807x/ipq60xx/config-default +++ b/feeds/ipq807x/ipq807x/ipq60xx/config-default @@ -1,1249 +1,122 @@ -# CONFIG_AC97_BUS is not set -# CONFIG_AC97_BUS_NEW is not set -# CONFIG_AD7124 is not set -# CONFIG_AD7606_IFACE_PARALLEL is not set -# CONFIG_AD7606_IFACE_SPI is not set -# CONFIG_AD7768_1 is not set -# CONFIG_AD7949 is not set -# CONFIG_ADF4371 is not set -# CONFIG_ADIN_PHY is not set -# CONFIG_ADIS16460 is not set -# CONFIG_ADXL345_I2C is not set -# CONFIG_ADXL345_SPI is not set -# CONFIG_ADXL372_I2C is not set -# CONFIG_ADXL372_SPI is not set -# CONFIG_AHCI_IPQ is not set -CONFIG_ALIGNMENT_TRAP=y -# CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS is not set -# CONFIG_ALLOC_SKB_PAGE_FRAG_DISABLE is not set -# CONFIG_AL_FIC is not set -# CONFIG_AMBA_PL08X is not set -# CONFIG_ANDROID is not set -# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set -# CONFIG_ANDROID_TIMED_OUTPUT is not set -# CONFIG_APM_EMULATION is not set -# CONFIG_APQ_GCC_8084 is not set -# CONFIG_APQ_MMCC_8084 is not set -CONFIG_AQUANTIA_PHY=y -# CONFIG_AR8216_PHY is not set -# CONFIG_ARCHES is not set -CONFIG_ARCH_32BIT_OFF_T=y -# CONFIG_ARCH_AGILEX is not set -# CONFIG_ARCH_BITMAIN is not set -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN=y -CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# CONFIG_ARCH_HAS_KCOV is not set -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_ARCH_HAS_PHYS_TO_DMA=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SG_CHAIN=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS=y +CONFIG_ANDROID=y +# CONFIG_ANDROID_BINDER_IPC is not set +# CONFIG_AQ_PHY is not set CONFIG_ARCH_HIBERNATION_POSSIBLE=y -# CONFIG_ARCH_IPQ6018 is not set -# CONFIG_ARCH_IPQ9574 is not set -CONFIG_ARCH_KEEP_MEMBLOCK=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -# CONFIG_ARCH_MDM9615 is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -# CONFIG_ARCH_MILBEAUT is not set -CONFIG_ARCH_MMAP_RND_BITS=18 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 -# CONFIG_ARCH_MSM8960 is not set -# CONFIG_ARCH_MSM8974 is not set +CONFIG_ARCH_IPQ6018=y # CONFIG_ARCH_MSM8X60 is not set -CONFIG_ARCH_MULTIPLATFORM=y -# CONFIG_ARCH_MULTI_CPU_AUTO is not set -CONFIG_ARCH_MULTI_V6_V7=y -CONFIG_ARCH_MULTI_V7=y -CONFIG_ARCH_NR_GPIO=0 -CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y -CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y -CONFIG_ARCH_QCOM=y -# CONFIG_ARCH_RDA is not set -# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set -# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_ARM=y -# CONFIG_ARM64_CNP is not set -# CONFIG_ARM64_ERRATUM_1165522 is not set -# CONFIG_ARM64_ERRATUM_1286807 is not set -# CONFIG_ARM64_ERRATUM_1418040 is not set -# CONFIG_ARM64_ERRATUM_1542419 is not set -# CONFIG_ARM64_MODULE_PLTS is not set -# CONFIG_ARM64_PMEM is not set -# CONFIG_ARM64_PSEUDO_NMI is not set -# CONFIG_ARM64_PTDUMP_DEBUGFS is not set -# CONFIG_ARM64_PTR_AUTH is not set -# CONFIG_ARM64_SSBD is not set -# CONFIG_ARM64_SVE is not set -# CONFIG_ARM64_TAGGED_ADDR_ABI is not set -# CONFIG_ARM64_UAO is not set -# CONFIG_ARM64_VHE is not set -CONFIG_ARM_AMBA=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -# CONFIG_ARM_ATAG_DTB_COMPAT is not set -# CONFIG_ARM_CCI is not set -# CONFIG_ARM_CCI400_COMMON is not set -# CONFIG_ARM_CCI400_PMU is not set -# CONFIG_ARM_CCI_PMU is not set -CONFIG_ARM_CPUIDLE=y -CONFIG_ARM_CPU_SUSPEND=y -# CONFIG_ARM_ERRATA_814220 is not set -# CONFIG_ARM_ERRATA_857272 is not set -CONFIG_ARM_GIC=y -CONFIG_ARM_HAS_SG_CHAIN=y -# CONFIG_ARM_HIGHBANK_CPUIDLE is not set -CONFIG_ARM_L1_CACHE_SHIFT=6 -CONFIG_ARM_L1_CACHE_SHIFT_6=y -# CONFIG_ARM_LPAE is not set -CONFIG_ARM_MODULE_PLTS=y -CONFIG_ARM_PATCH_IDIV=y -CONFIG_ARM_PATCH_PHYS_VIRT=y -CONFIG_ARM_PMU=y -CONFIG_ARM_PSCI=y -# CONFIG_ARM_PSCI_CPUIDLE is not set -CONFIG_ARM_PSCI_FW=y -CONFIG_ARM_QCOM_CPUFREQ=y -# CONFIG_ARM_QCOM_CPUFREQ_HW is not set -CONFIG_ARM_QCOM_CPUFREQ_NVMEM=y -# CONFIG_ARM_SCMI_PROTOCOL is not set -# CONFIG_ARM_SMMU is not set -# CONFIG_ARM_SP805_WATCHDOG is not set -CONFIG_ARM_THUMB=y -# CONFIG_ARM_THUMBEE is not set -CONFIG_ARM_UNWIND=y -CONFIG_ARM_VIRT_EXT=y -# CONFIG_ASHMEM is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_AT803X_PHY=y -# CONFIG_ATA is not set -# CONFIG_BACKLIGHT_CLASS_DEVICE is not set -# CONFIG_BATTERY_RT5033 is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_NVME=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=4096 -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_BOUNCE=y -# CONFIG_BPFILTER is not set -CONFIG_BRIDGE_VLAN_FILTERING=y -# CONFIG_BT_HCIBTUSB_MTK is not set -# CONFIG_BT_MTKSDIO is not set -CONFIG_BUILD_BIN2C=y -# CONFIG_BUS_TOPOLOGY_ADHOC is not set -# CONFIG_CACHE_L2X0 is not set -# CONFIG_CAVIUM_TX2_ERRATUM_219 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_CC_STACKPROTECTOR=y -# CONFIG_CC_STACKPROTECTOR_NONE is not set -CONFIG_CC_STACKPROTECTOR_REGULAR=y -# CONFIG_CHARGER_LT3651 is not set -# CONFIG_CHARGER_QCOM_SMBB is not set -# CONFIG_CHARGER_UCS1002 is not set -CONFIG_CLEANCACHE=y -CONFIG_CLKDEV_LOOKUP=y -CONFIG_CLKSRC_OF=y -CONFIG_CLKSRC_PROBE=y -CONFIG_CLKSRC_QCOM=y -CONFIG_CLONE_BACKWARDS=y -# CONFIG_CMA is not set -# CONFIG_CMA_ALIGNMENT is not set -# CONFIG_CMA_AREAS is not set -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -# CONFIG_CMA_SIZE_MBYTES is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -# CONFIG_CMA_SIZE_SEL_MBYTES is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -CONFIG_CNSS2=y -# CONFIG_CNSS2_CALIBRATION_SUPPORT is not set -# CONFIG_CNSS2_DEBUG is not set -CONFIG_CNSS2_GENL=y -# CONFIG_CNSS2_PCI_DRIVER is not set -# CONFIG_CNSS2_PM is not set -# CONFIG_CNSS2_RAMDUMP is not set -# CONFIG_CNSS2_SMMU is not set -# CONFIG_CNSS2_QCA9574_SUPPORT is not set -CONFIG_CNSS_QCN9000=y -CONFIG_COMMON_CLK=y -# CONFIG_COMMON_CLK_FIXED_MMIO is not set -CONFIG_COMMON_CLK_QCOM=y -# CONFIG_COMMON_CLK_SI5341 is not set -CONFIG_CONFIGFS_FS=y -CONFIG_COREDUMP=y +CONFIG_ARM_DMA_IOMMU_ALIGNMENT=8 +CONFIG_ARM_DMA_USE_IOMMU=y +CONFIG_ARM_HEAVY_MB=y +CONFIG_ARM_QTI_IPQ60XX_CPUFREQ=y +CONFIG_ARM_SMMU=y +CONFIG_ASN1=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_CACHE_L2X0=y +CONFIG_CLZ_TAB=y +CONFIG_CMA=y +CONFIG_CMA_ALIGNMENT=8 +CONFIG_CMA_AREAS=7 +CONFIG_CMA_DEBUG=y +CONFIG_CMA_DEBUGFS=y +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_CMA_SIZE_SEL_MBYTES=y CONFIG_CORESIGHT=y -# CONFIG_CORESIGHT_BYTE_CNTR is not set -# CONFIG_CORESIGHT_CATU is not set -# CONFIG_CORESIGHT_CPU_DEBUG is not set CONFIG_CORESIGHT_CSR=y CONFIG_CORESIGHT_CTI=y -# CONFIG_CORESIGHT_CTI_SAVE_DISABLE is not set -# CONFIG_CORESIGHT_DUMMY is not set CONFIG_CORESIGHT_HWEVENT=y CONFIG_CORESIGHT_LINKS_AND_SINKS=y CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -# CONFIG_CORESIGHT_REMOTE_ETM is not set -# CONFIG_CORESIGHT_SINK_ETBV10 is not set -# CONFIG_CORESIGHT_SINK_TPIU is not set -# CONFIG_CORESIGHT_SOURCE_ETM3X is not set +CONFIG_CORESIGHT_QCOM_REPLICATOR=y +CONFIG_CORESIGHT_SINK_TPIU=y +CONFIG_CORESIGHT_SOURCE_ETM3X=y CONFIG_CORESIGHT_SOURCE_ETM4X=y CONFIG_CORESIGHT_STM=y CONFIG_CORESIGHT_TPDA=y CONFIG_CORESIGHT_TPDM=y -# CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE is not set -# CONFIG_COUNTER is not set -CONFIG_CPUFREQ_DT=y -CONFIG_CPUFREQ_DT_PLATDEV=y -CONFIG_CPU_32v6K=y -CONFIG_CPU_32v7=y -CONFIG_CPU_ABRT_EV7=y -# CONFIG_CPU_BIG_ENDIAN is not set -# CONFIG_CPU_BPREDICT_DISABLE is not set -CONFIG_CPU_CACHE_V7=y -CONFIG_CPU_CACHE_VIPT=y -CONFIG_CPU_COPY_V6=y -CONFIG_CPU_CP15=y -CONFIG_CPU_CP15_MMU=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_HAS_ASID=y -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_CPU_ICACHE_DISABLE is not set -# CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND is not set -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set -CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y -CONFIG_CPU_PABRT_V7=y -CONFIG_CPU_PM=y -CONFIG_CPU_RMAP=y -# CONFIG_CPU_SW_DOMAIN_PAN is not set -# CONFIG_CPU_THERMAL is not set -CONFIG_CPU_TLB_V7=y -CONFIG_CPU_V7=y -CONFIG_CRC16=y -# CONFIG_CRC32_SARWATE is not set -CONFIG_CRC32_SLICEBY8=y -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_CRYPTO_ADIANTUM is not set -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_AES_586=y -# CONFIG_CRYPTO_ALL_CASES is not set -CONFIG_CRYPTO_ARC4=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CCM=y -CONFIG_CRYPTO_CMAC=y -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -# CONFIG_CRYPTO_DEV_HISI_ZIP is not set -# CONFIG_CRYPTO_DEV_OTA_CRYPTO is not set -CONFIG_CRYPTO_DEV_QCOM_ICE=y -# CONFIG_CRYPTO_DEV_QCOM_MSM_QCE is not set -# CONFIG_CRYPTO_DEV_QCOM_RNG is not set -CONFIG_CRYPTO_DISABLE_AHASH_LARGE_KEY_TEST=y -CONFIG_CRYPTO_DISABLE_AHASH_TYPE1_TESTS=y -CONFIG_CRYPTO_DISABLE_AHASH_TYPE2_TESTS=y -CONFIG_CRYPTO_DISABLE_AHASH_TYPE3_TESTS=y -CONFIG_CRYPTO_DISABLE_AUTH_SPLIT_TESTS=y -CONFIG_CRYPTO_DISABLE_HW_UNSUPPORTED_TESTS=y -CONFIG_CRYPTO_DISABLE_OUTOFPLACE_TESTS=y -# CONFIG_CRYPTO_DISABLE_AES192_TEST is not set +CONFIG_CRC_CCITT=m +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_DRBG=y CONFIG_CRYPTO_DRBG_HMAC=y CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_ECHAINIV=y -# CONFIG_CRYPTO_ECRDSA is not set -# CONFIG_CRYPTO_ESSIV is not set -CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_GF128MUL=y CONFIG_CRYPTO_GHASH=y CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_JITTERENTROPY=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_LIB_SHA256=y -CONFIG_CRYPTO_LZO=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set -CONFIG_CRYPTO_MD5_PPC=y -CONFIG_CRYPTO_MICHAEL_MIC=y -CONFIG_CRYPTO_NO_AES_XTS_ZERO_KEY_SUPPORT=y -CONFIG_CRYPTO_NO_AES_CTR_UNEVEN_DATA_LEN_SUPPORT=y -# CONFIG_CRYPTO_NO_ZERO_LEN_HASH is not set -CONFIG_CRYPTO_NULL=y CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_OFB=y -# CONFIG_CRYPTO_PCRYPT is not set -CONFIG_CRYPTO_RMD160=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP2=y CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA1_PPC=y -CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y -# CONFIG_CRYPTO_STREEBOG is not set -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_XTS=y -# CONFIG_CRYPTO_XXHASH is not set -CONFIG_CRYPTO_XZ=y -CONFIG_DCACHE_WORD_ACCESS=y -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_EFI is not set -CONFIG_DEBUG_GPIO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S" -# CONFIG_DEBUG_MISC is not set -# CONFIG_DEBUG_PLIST is not set -# CONFIG_DEBUG_UART_8250 is not set -# CONFIG_DEBUG_USER is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_DEVMEM=y -# CONFIG_DIAGFWD_BRIDGE_CODE is not set -CONFIG_DIAG_OVER_QRTR=y -# CONFIG_DIAG_OVER_USB is not set -CONFIG_DMADEVICES=y -# CONFIG_DMA_CMA is not set -CONFIG_DMA_ENGINE=y -CONFIG_DMA_OF=y -# CONFIG_DMA_SOUND is not set -CONFIG_DMA_VIRTUAL_CHANNELS=y -# CONFIG_DMI is not set -# CONFIG_DMIID is not set -# CONFIG_DMI_SYSFS is not set -# CONFIG_DM_INIT is not set -# CONFIG_DP83640_PHY is not set -# CONFIG_DPS310 is not set -CONFIG_DTC=y -# CONFIG_DWMAC_GENERIC is not set -# CONFIG_DWMAC_IPQ806X is not set -# CONFIG_DWMAC_SUNXI is not set -# CONFIG_DW_DMAC_PCI is not set -CONFIG_DYNAMIC_DEBUG=y +CONFIG_DMA_CMA=y +CONFIG_DMA_SHARED_BUFFER=y +CONFIG_DT_IDLE_STATES=y CONFIG_EDAC_ATOMIC_SCRUB=y CONFIG_EDAC_SUPPORT=y -# CONFIG_EEPROM_EE1004 is not set -# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_ENERGY_MODEL is not set -# CONFIG_EP_PCIE is not set -CONFIG_ETHERNET_PACKET_MANGLE=y -# CONFIG_EXPORTFS_BLOCK_OPS is not set -CONFIG_EXT4_FS=y -# CONFIG_EXT4_USE_FOR_EXT2 is not set -# CONFIG_EXTCON_FSA9480 is not set -CONFIG_FB=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_CMDLINE=y -# CONFIG_FB_EFI is not set -CONFIG_FB_QTI_QPIC=y -CONFIG_FB_QTI_QPIC_ER_SSD1963_PANEL=y -CONFIG_FB_SYS_FOPS=y -# CONFIG_FIPS_ENABLE is not set -CONFIG_FIXED_PHY=y -CONFIG_FIX_EARLYCON_MEM=y -# CONFIG_FSL_MC_BUS is not set -# CONFIG_FSL_QDMA is not set -CONFIG_FS_MBCACHE=y -# CONFIG_FS_VERITY is not set -# CONFIG_FUJITSU_ERRATUM_010001 is not set -CONFIG_FW_AUTH=y -CONFIG_FW_AUTH_TEST=m -# CONFIG_FW_LOADER_COMPRESS is not set -# CONFIG_FXAS21002C is not set -# CONFIG_FXOS8700_I2C is not set -# CONFIG_FXOS8700_SPI is not set -# CONFIG_GCC_PLUGINS is not set -# CONFIG_GCC_PLUGIN_CYC_COMPLEXITY is not set -# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set -# CONFIG_GCC_PLUGIN_RANDSTRUCT is not set -# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set -# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL is not set -# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -# CONFIG_GENERIC_CPUFREQ_KRAIT is not set -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_GENERIC_IO=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_PHY=y -CONFIG_GENERIC_PINCONF=y -CONFIG_GENERIC_PINCTRL_GROUPS=y -CONFIG_GENERIC_PINMUX_FUNCTIONS=y -CONFIG_GENERIC_SCHED_CLOCK=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_TIME_VSYSCALL=y -# CONFIG_GEN_RTC is not set -# CONFIG_GLACIER is not set -# CONFIG_GLINK_DEBUG_FS is not set -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_GPIO_AMD_FCH is not set -# CONFIG_GPIO_CADENCE is not set -CONFIG_GPIO_DEVRES=y -# CONFIG_GPIO_GW_PLD is not set -# CONFIG_GPIO_LATCH is not set -# CONFIG_GPIO_NXP_74HC153 is not set -# CONFIG_GPIO_SAMA5D2_PIOBU is not set -CONFIG_GPIO_SYSFS=y -# CONFIG_GPIO_USB_DETECT is not set -# CONFIG_GSI is not set -# CONFIG_HABANA_AI is not set -CONFIG_HANDLE_DOMAIN_IRQ=y -CONFIG_HARDEN_BRANCH_PREDICTOR=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_HAS_DMA=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_HAVE_ARCH_BITREVERSE=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_HAVE_ARCH_PFN_VALID=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_ARM_ARCH_TIMER=y -# CONFIG_HAVE_ARM_SMCCC is not set -# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set -CONFIG_HAVE_BPF_JIT=y -CONFIG_HAVE_CC_STACKPROTECTOR=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_HAVE_DEBUG_KMEMLEAK=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_IDE=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_XZ=y -# CONFIG_HAVE_KPROBES is not set -# CONFIG_HAVE_KRETPROBES is not set -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_HAVE_NET_DSA=y -CONFIG_HAVE_OPROFILE=y -# CONFIG_HAVE_OPTPROBES is not set -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_PROC_CPU=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_SMP=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_UID16=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -# CONFIG_HEADERS_INSTALL is not set -# CONFIG_HID_MACALLY is not set -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_VIEWSONIC is not set -CONFIG_HIGHMEM=y -CONFIG_HIGHPTE=y -CONFIG_HOTPLUG_CPU=y -CONFIG_HWSPINLOCK=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_MSM_LEGACY=y -CONFIG_HZ_FIXED=0 -CONFIG_I2C=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_HELPER_AUTO=y -# CONFIG_I2C_NVIDIA_GPU is not set -CONFIG_I2C_QUP=y -# CONFIG_I3C is not set -# CONFIG_IGC is not set -CONFIG_IIO=y -# CONFIG_IIO_BUFFER is not set -# CONFIG_IIO_TRIGGER is not set -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_INITRAMFS_SOURCE="" -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -# CONFIG_INPUT_GPIO_VIBRA is not set -# CONFIG_INPUT_MSM_VIBRATOR is not set -# CONFIG_INPUT_PM8941_PWRKEY is not set -# CONFIG_INPUT_PM8XXX_VIBRATOR is not set -# CONFIG_INTERCONNECT is not set -CONFIG_IOMMU_HELPER=y -# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set -# CONFIG_ION is not set -# CONFIG_ION_DUMMY is not set -# CONFIG_ION_MSM is not set -# CONFIG_ION_TEST is not set -# CONFIG_IO_URING is not set -# CONFIG_IPA is not set -# CONFIG_IPA3 is not set -# CONFIG_IPC_LOGGING is not set -# CONFIG_IPC_ROUTER is not set -# CONFIG_IPC_ROUTER_SECURITY is not set -# CONFIG_IPQ807X_REMOTEPROC is not set -# CONFIG_IPQ_ADCC_4019 is not set -# CONFIG_IPQ_ADSS_8074 is not set -# CONFIG_IPQ_APSS_5018 is not set +CONFIG_EXTCON=y +CONFIG_EXTCON_USB_GPIO=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FREEZER=y +CONFIG_GPIO_WATCHDOG=y +CONFIG_GPIO_WATCHDOG_ARCH_INITCALL=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_IO_PGTABLE=y +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +CONFIG_IOMMU_SUPPORT=y +CONFIG_ION=y +CONFIG_ION_MSM=y CONFIG_IPQ_APSS_6018=y -# CONFIG_IPQ_APSS_8074 is not set -# CONFIG_IPQ_DWC3_QTI_EXTCON is not set -# CONFIG_IPQ_FLASH_16M_PROFILE is not set -# CONFIG_IPQ_GCC_4019 is not set -# CONFIG_IPQ_GCC_5018 is not set CONFIG_IPQ_GCC_6018=y -# CONFIG_IPQ_GCC_806X is not set -# CONFIG_IPQ_GCC_8074 is not set -# CONFIG_IPQ_GCC_9574 is not set -# CONFIG_IPQ_LCC_806X is not set -# CONFIG_IPQ_REMOTEPROC_ADSP is not set -CONFIG_IPQ_SUBSYSTEM_DUMP=y -CONFIG_IPQ_SUBSYSTEM_RAMDUMP=y -# CONFIG_IPQ_SUBSYSTEM_RESTART is not set -# CONFIG_IPQ_SUBSYSTEM_RESTART_TEST is not set -CONFIG_IPQ_TCSR=y -CONFIG_IRQCHIP=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_IRQ_WORK=y -CONFIG_JBD2=y -# CONFIG_KCOV is not set -CONFIG_KEYS=y -# CONFIG_KEYS_REQUEST_CACHE is not set -CONFIG_KPSS_XCC=y -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -# CONFIG_KRAITCC is not set -# CONFIG_KRAIT_CLOCKS is not set -# CONFIG_KRAIT_L2_ACCESSORS is not set -# CONFIG_LCD_CLASS_DEVICE is not set -# CONFIG_LEDS_AN30259A is not set -CONFIG_LEDS_IPQ=y -# CONFIG_LEDS_LM3532 is not set -# CONFIG_LEDS_PCA9956B is not set -CONFIG_LEDS_TLC591XX=y -# CONFIG_LEDS_TRIGGER_AUDIO is not set -# CONFIG_LEDS_TRIGGER_PATTERN is not set -CONFIG_LIBFDT=y -CONFIG_LOCKUP_DETECTOR=y -# CONFIG_LOCK_EVENT_COUNTS is not set -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" -# CONFIG_LTC1660 is not set -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_MAILBOX=y -# CONFIG_MAILBOX_TEST is not set -# CONFIG_MAP_E_SUPPORT is not set -# CONFIG_MAX31856 is not set -# CONFIG_MAX44009 is not set -# CONFIG_MAX5432 is not set -# CONFIG_MB1232 is not set -# CONFIG_MCP3911 is not set -# CONFIG_MCP41010 is not set -CONFIG_MDIO=y -CONFIG_MDIO_BITBANG=y -CONFIG_MDIO_BOARDINFO=y -# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set -CONFIG_MDIO_GPIO=y +CONFIG_IPQ_MEM_PROFILE=256 +CONFIG_KASAN_SHADOW_OFFSET=0x5f000000 +CONFIG_LEDS_PCA9956B=y CONFIG_MDIO_QCA=y -# CONFIG_MDM_GCC_9615 is not set -# CONFIG_MDM_LCC_9615 is not set -# CONFIG_MEMORY_HOTPLUG is not set -# CONFIG_MFD_LOCHNAGAR is not set -# CONFIG_MFD_MAX77650 is not set -CONFIG_MFD_QCOM_RPM=y -# CONFIG_MFD_ROHM_BD70528 is not set -# CONFIG_MFD_SPMI_PMIC is not set -# CONFIG_MFD_STMFX is not set -# CONFIG_MFD_STPMIC1 is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TQMX86 is not set -CONFIG_MHI_BUS=y -# CONFIG_MHI_BUS_DEBUG is not set -CONFIG_MHI_NETDEV=y -CONFIG_MHI_QTI=y -# CONFIG_MHI_SATELLITE is not set -CONFIG_MHI_UCI=y -# CONFIG_MHI_WWAN_CTRL is not set -CONFIG_MIGHT_HAVE_CACHE_L2X0=y -CONFIG_MIGHT_HAVE_PCI=y +CONFIG_MEMORY_ISOLATION=y CONFIG_MIGRATION=y -# CONFIG_MIKROTIK is not set -# CONFIG_MISC_ALCOR_PCI is not set -# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set -CONFIG_MMC=y -CONFIG_MMC_ARMMMCI=y -CONFIG_MMC_BLOCK=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_QCOM_DML=y -CONFIG_MMC_SDHCI=y -# CONFIG_MMC_SDHCI_AM654 is not set -CONFIG_MMC_SDHCI_IO_ACCESSORS=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -# CONFIG_MMC_SDHCI_OF_ASPEED is not set -# CONFIG_MMC_SDHCI_PCI is not set -CONFIG_MMC_SDHCI_PLTFM=y -# CONFIG_MMC_STM32_SDMMC is not set -# CONFIG_MMC_TIFM_SD is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_MODULES_USE_ELF_REL=y -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -# CONFIG_MPLS_ROUTING is not set -# CONFIG_MSM_ADSPRPC is not set -# CONFIG_MSM_BUS_SCALING is not set -# CONFIG_MSM_GCC_8660 is not set -# CONFIG_MSM_GCC_8916 is not set -# CONFIG_MSM_GCC_8960 is not set -# CONFIG_MSM_GCC_8974 is not set -# CONFIG_MSM_GCC_8994 is not set -# CONFIG_MSM_GCC_8996 is not set -# CONFIG_MSM_GCC_8998 is not set -# CONFIG_MSM_GLINK is not set -# CONFIG_MSM_GLINK_LOOPBACK_SERVER is not set -# CONFIG_MSM_GLINK_PKT is not set -# CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT is not set -# CONFIG_MSM_IPC_ROUTER_GLINK_XPRT is not set -# CONFIG_MSM_IPC_ROUTER_MHI_XPRT is not set -# CONFIG_MSM_LCC_8960 is not set -# CONFIG_MSM_MHI is not set -# CONFIG_MSM_MHI_DEBUG is not set -# CONFIG_MSM_MHI_DEV is not set -# CONFIG_MSM_MHI_UCI is not set -# CONFIG_MSM_MMCC_8960 is not set -# CONFIG_MSM_MMCC_8974 is not set -# CONFIG_MSM_MMCC_8996 is not set -# CONFIG_MSM_QMI_INTERFACE is not set -# CONFIG_MSM_RPM_GLINK is not set -CONFIG_MSM_RPM_LOG=y -CONFIG_MSM_RPM_RPMSG=y -# CONFIG_MSM_RPM_SMD is not set -# CONFIG_MSM_SECURE_BUFFER is not set -# CONFIG_MSM_SMEM is not set -# CONFIG_MSM_TEST_QMI_CLIENT is not set -CONFIG_MTD_CMDLINE_PARTS=y -# CONFIG_MTD_HYPERBUS is not set -CONFIG_MTD_M25P80=y -# CONFIG_MTD_NAND_ECC_SW_BCH is not set -# CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC is not set -CONFIG_MTD_NAND_QCOM=y -# CONFIG_MTD_NAND_QCOM_SERIAL is not set -CONFIG_MTD_RAW_NAND=y -# CONFIG_MTD_ROUTERBOOT_PARTS is not set -CONFIG_MTD_SPINAND_GIGADEVICE=y -CONFIG_MTD_SPINAND_MT29F=y -CONFIG_MTD_SPINAND_ONDIEECC=y -CONFIG_MTD_SPI_NOR=y -# CONFIG_MTD_SPLIT_BCM_WFI_FW is not set -# CONFIG_MTD_SPLIT_ELF_FW is not set -CONFIG_MTD_SPLIT_FIRMWARE=y -CONFIG_MTD_SPLIT_FIT_FW=y -CONFIG_MTD_UBI=y -CONFIG_MTD_UBI_BEB_LIMIT=20 -CONFIG_MTD_UBI_BLOCK=y -# CONFIG_MTD_UBI_FASTMAP is not set -CONFIG_MTD_UBI_GLUEBI=y -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MULTI_IRQ_HANDLER=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEON=y -CONFIG_NET=y -# CONFIG_NET_DSA_MV88E6063 is not set -CONFIG_NET_FLOW_LIMIT=y -CONFIG_NET_L3_MASTER_DEV=y -CONFIG_NET_PTP_CLASSIFY=y -# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_MPILIB=y +CONFIG_MSM_SECURE_BUFFER=y +CONFIG_NEED_SG_DMA_LENGTH=y CONFIG_NET_SWITCHDEV=y -# CONFIG_NET_VENDOR_GOOGLE is not set -# CONFIG_NET_VENDOR_PENSANDO is not set -# CONFIG_NF_IPV6_DUMMY_HEADER is not set -# CONFIG_NI_XGE_MANAGEMENT_ENET is not set -# CONFIG_NOA1305 is not set -CONFIG_NO_BOOTMEM=y -CONFIG_NO_HZ=y -CONFIG_NO_HZ_COMMON=y -CONFIG_NO_HZ_IDLE=y -CONFIG_NR_CPUS=4 -# CONFIG_NULL_TTY is not set -# CONFIG_NUMA is not set CONFIG_NUM_ALT_PARTITION=16 -CONFIG_NVMEM=y -# CONFIG_NVMEM_REBOOT_MODE is not set -# CONFIG_NVMEM_SYSFS is not set -# CONFIG_NVME_MULTIPATH is not set -# CONFIG_NVME_TCP is not set -# CONFIG_OCTEONTX2_AF is not set -CONFIG_OF=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_ADDRESS_PCI=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_FLATTREE=y -CONFIG_OF_GPIO=y -CONFIG_OF_IRQ=y -CONFIG_OF_KOBJ=y -CONFIG_OF_MDIO=y -CONFIG_OF_MTD=y -CONFIG_OF_NET=y -CONFIG_OF_PCI=y -CONFIG_OF_PCI_IRQ=y -CONFIG_OF_RESERVED_MEM=y -# CONFIG_OF_SLIMBUS is not set -CONFIG_OLD_SIGACTION=y -CONFIG_OLD_SIGSUSPEND3=y -# CONFIG_OPTIMIZE_INLINING is not set -# CONFIG_PACKING is not set -CONFIG_PADATA=y -CONFIG_PAGE_OFFSET=0xC0000000 -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=5 -CONFIG_PCI=y -# CONFIG_PCIEAER is not set -# CONFIG_PCIE_AL is not set -# CONFIG_PCIE_CADENCE_EP is not set -CONFIG_PCIE_DW=y -# CONFIG_PCIE_DW_PLAT is not set -# CONFIG_PCIE_PME is not set -CONFIG_PCIE_QCOM=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -# CONFIG_PCI_MESON is not set -CONFIG_PCI_MSI=y -CONFIG_PERF_EVENTS=y -CONFIG_PERF_USE_VMALLOC=y -# CONFIG_PFT is not set -CONFIG_PGTABLE_LEVELS=2 -CONFIG_PHYLIB=y -# CONFIG_PHY_CADENCE_DP is not set -# CONFIG_PHY_CADENCE_DPHY is not set -# CONFIG_PHY_CADENCE_SIERRA is not set -# CONFIG_PHY_FSL_IMX8MQ_USB is not set -# CONFIG_PHY_IPQ_BALDUR_USB is not set -# CONFIG_PHY_IPQ_UNIPHY_PCIE is not set -# CONFIG_PHY_IPQ_UNIPHY_USB is not set -# CONFIG_PHY_MIXEL_MIPI_DPHY is not set -# CONFIG_PHY_OCELOT_SERDES is not set -# CONFIG_PHY_QCA_PCIE_QMP is not set -# CONFIG_PHY_QCOM_APQ8064_SATA is not set -# CONFIG_PHY_QCOM_IPQ806X_SATA is not set -# CONFIG_PHY_QCOM_PCIE2 is not set -CONFIG_PHY_QCOM_QMP=y -# CONFIG_PHY_QCOM_QUSB2 is not set -# CONFIG_PHY_QCOM_UFS is not set -CONFIG_PINCTRL=y -# CONFIG_PINCTRL_APQ8064 is not set -# CONFIG_PINCTRL_APQ8084 is not set -# CONFIG_PINCTRL_IPQ4019 is not set -# CONFIG_PINCTRL_IPQ5018 is not set +CONFIG_OF_IOMMU=y +CONFIG_OID_REGISTRY=y +CONFIG_OUTER_CACHE=y +CONFIG_OUTER_CACHE_SYNC=y +CONFIG_PAGE_OFFSET=0x80000000 CONFIG_PINCTRL_IPQ6018=y -# CONFIG_PINCTRL_IPQ8064 is not set -# CONFIG_PINCTRL_IPQ8074 is not set -# CONFIG_PINCTRL_IPQ9574 is not set -# CONFIG_PINCTRL_MDM9615 is not set -CONFIG_PINCTRL_MSM=y -# CONFIG_PINCTRL_MSM8660 is not set -# CONFIG_PINCTRL_MSM8916 is not set -# CONFIG_PINCTRL_MSM8960 is not set -# CONFIG_PINCTRL_MSM8994 is not set -# CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_MSM8998 is not set -# CONFIG_PINCTRL_OCELOT is not set -# CONFIG_PINCTRL_QCOM_SPMI_PMIC is not set -# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set -# CONFIG_PINCTRL_QCS404 is not set -# CONFIG_PINCTRL_SC7180 is not set -# CONFIG_PINCTRL_SDM660 is not set -# CONFIG_PINCTRL_SDM845 is not set -# CONFIG_PINCTRL_SINGLE is not set -# CONFIG_PINCTRL_SM8150 is not set -# CONFIG_PINCTRL_STMFX is not set -# CONFIG_PKCS7_TEST_KEY is not set # CONFIG_PKCS7_MESSAGE_PARSER is not set -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -# CONFIG_PL330_DMA is not set -CONFIG_PM=y -# CONFIG_PM8916_WATCHDOG is not set -CONFIG_PM_CLK=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_GENERIC_DOMAINS=y -CONFIG_PM_GENERIC_DOMAINS_OF=y -CONFIG_PM_GENERIC_DOMAINS_SLEEP=y -CONFIG_PM_OPP=y -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_MSM=y -# CONFIG_POWER_RESET_QCOM_PON is not set -CONFIG_POWER_SUPPLY=y +# CONFIG_PL310_ERRATA_588369 is not set +# CONFIG_PL310_ERRATA_727915 is not set +# CONFIG_PL310_ERRATA_753970 is not set +# CONFIG_PL310_ERRATA_769419 is not set CONFIG_PPS=y -CONFIG_PREEMPT=y -CONFIG_PREEMPT_COUNT=y -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_RCU=y -# CONFIG_PRINTK_CALLER is not set -CONFIG_PRINTK_TIME=y -CONFIG_PROC_PAGE_MONITOR=y -# CONFIG_PROC_STRIPPED is not set -# CONFIG_PSI is not set CONFIG_PTP_1588_CLOCK=y -CONFIG_PUBLIC_KEY_ALGO_RSA=y -# CONFIG_PVPANIC is not set -CONFIG_PWM=y -CONFIG_PWM_IPQ=y -# CONFIG_PWM_PCA9685 is not set -CONFIG_PWM_SYSFS=y -# CONFIG_PWRSEQ_EMMC is not set -CONFIG_PWRSEQ_IPQ=y -# CONFIG_PWRSEQ_SIMPLE is not set -CONFIG_QCA_MINIDUMP=y -# CONFIG_QCA_MINIDUMP_DEBUG is not set -# CONFIG_QCOM_A53PLL is not set -CONFIG_QCOM_ADM=y -# CONFIG_QCOM_AOSS_QMP is not set -CONFIG_QCOM_APCS_IPC=y +CONFIG_PWM_IPQ4019=y CONFIG_QCOM_APM=y -# CONFIG_QCOM_APR is not set -CONFIG_QCOM_BAM_DMA=y -CONFIG_QCOM_CACHE_DUMP=y -CONFIG_QCOM_CACHE_DUMP_ON_PANIC=y -# CONFIG_QCOM_CLK_APCS_MSM8916 is not set -# CONFIG_QCOM_CLK_RPM is not set -# CONFIG_QCOM_CLK_SMD_RPM is not set -# CONFIG_QCOM_COINCELL is not set -# CONFIG_QCOM_COMMAND_DB is not set -CONFIG_QCOM_DLOAD_MODE=y -CONFIG_QCOM_DLOAD_MODE_APPSBL=y -# CONFIG_QCOM_EBI2 is not set -# CONFIG_QCOM_FASTRPC is not set -CONFIG_QCOM_GDSC=y -# CONFIG_QCOM_GENI_SE is not set -CONFIG_QCOM_GLINK_SSR=y -# CONFIG_QCOM_GSBI is not set -# CONFIG_QCOM_HFPLL is not set -# CONFIG_QCOM_LLCC is not set -# CONFIG_QCOM_PDC is not set -# CONFIG_QCOM_PM is not set -# CONFIG_QCOM_Q6V5_ADSP is not set -# CONFIG_QCOM_Q6V5_MPD is not set -# CONFIG_QCOM_Q6V5_MSS is not set -# CONFIG_QCOM_Q6V5_PAS is not set -CONFIG_QCOM_Q6V5_WCSS=y -CONFIG_QCOM_QFPROM=y +CONFIG_QCOM_DCC=y CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_RESTART_REASON=y -# CONFIG_QCOM_RMTFS_MEM is not set -# CONFIG_QCOM_RPMH is not set -# CONFIG_QCOM_RPMPD is not set -CONFIG_QCOM_RPM_CLK=y -# CONFIG_QCOM_RTB is not set -CONFIG_QCOM_SCM=y -CONFIG_QCOM_SCM_32=y -# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set -# CONFIG_QCOM_SMD is not set -CONFIG_QCOM_SMD_RPM=y -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMEM_STATE=y -CONFIG_QCOM_SMP2P=y -# CONFIG_QCOM_SMSM is not set -CONFIG_QCOM_SOCINFO=y -# CONFIG_QCOM_SPMI_ADC5 is not set -# CONFIG_QCOM_SPMI_TEMP_ALARM is not set -# CONFIG_QCOM_SPMI_VADC is not set -CONFIG_QCOM_SYSMON=y -CONFIG_QCOM_TSENS=y -# CONFIG_QCOM_WCNSS_CTRL is not set -# CONFIG_QCOM_WCNSS_PIL is not set -CONFIG_QCOM_WDT=y -# CONFIG_QCS_GCC_404 is not set -# CONFIG_QCS_TURING_404 is not set -CONFIG_QMI_ENCDEC=y -# CONFIG_QPNP_REVID is not set -CONFIG_QRTR=y -# CONFIG_QRTR_FIFO is not set -CONFIG_QRTR_MHI=y -CONFIG_QRTR_SMD=y -# CONFIG_QRTR_TUN is not set -# CONFIG_QRTR_USB is not set -CONFIG_QSEECOM=m -# CONFIG_QTI_APSS_ACC is not set -CONFIG_QTI_CTXT_SAVE=y -CONFIG_QTI_DCC=y -# CONFIG_QTI_DCC_V2 is not set -# CONFIG_QTI_EUD is not set -CONFIG_QTI_MEMORY_DUMP_V2=y -CONFIG_QTI_SCM_RESTART_REASON=y -CONFIG_QTI_TZ_LOG=y -# CONFIG_RANDOMIZE_BASE is not set -# CONFIG_RANDOM_TRUST_BOOTLOADER is not set -CONFIG_RATIONAL=y -# CONFIG_RCU_BOOST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=21 -# CONFIG_RCU_EXPERT is not set -CONFIG_RCU_STALL_COMMON=y -CONFIG_RD_GZIP=y -# CONFIG_REED_SOLOMON_TEST is not set -CONFIG_REGMAP=y -CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y -CONFIG_REGMAP_MMIO=y -CONFIG_REGULATOR=y +CONFIG_REGMAP_I2C=y +CONFIG_REGMAP_SPMI=y CONFIG_REGULATOR_CPR3=y CONFIG_REGULATOR_CPR3_NPU=y CONFIG_REGULATOR_CPR4_APSS=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_GPIO=y -# CONFIG_REGULATOR_IPQ40XX is not set -CONFIG_REGULATOR_QCOM_RPM=y -CONFIG_REGULATOR_QCOM_SMD_RPM=y -# CONFIG_REGULATOR_QCOM_SPMI is not set -CONFIG_REGULATOR_RPM_GLINK=y -# CONFIG_REGULATOR_RPM_SMD is not set -CONFIG_RELAY=y -CONFIG_REMOTEPROC=y -# CONFIG_RESET_ATTACK_MITIGATION is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_QCOM_AOSS is not set -# CONFIG_RESET_QCOM_PDC is not set -CONFIG_RFS_ACCEL=y -CONFIG_RMNET=y -CONFIG_RMNET_DATA=y -CONFIG_RMNET_DATA_DEBUG_PKT=y -# CONFIG_RMNET_DATA_FC is not set -# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set -CONFIG_RPMSG=y -CONFIG_RPMSG_CHAR=y -CONFIG_RPMSG_QCOM_GLINK_RPM=y -CONFIG_RPMSG_QCOM_GLINK_SMEM=y -CONFIG_RPMSG_QCOM_SMD=y -# CONFIG_RPMSG_VIRTIO is not set -CONFIG_RPS=y -CONFIG_RTC_CLASS=y -# CONFIG_RTC_DRV_CMOS is not set -# CONFIG_RTC_DRV_PM8XXX is not set -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_SAMPLES=y -# CONFIG_SAMPLE_CONFIGFS is not set -# CONFIG_SAMPLE_HW_BREAKPOINT is not set -# CONFIG_SAMPLE_KFIFO is not set -# CONFIG_SAMPLE_KOBJECT is not set -# CONFIG_SAMPLE_KPROBES is not set -# CONFIG_SAMPLE_KRETPROBES is not set -CONFIG_SAMPLE_QMI_CLIENT=m -# CONFIG_SAMPLE_RPMSG_CLIENT is not set -CONFIG_SAMPLE_TRACE_EVENTS=y -# CONFIG_SAMPLE_VFIO_MDEV_MDPY_FB is not set -# CONFIG_SATA_AHCI is not set -CONFIG_SCHED_HRTICK=y -# CONFIG_SCHED_INFO is not set -# CONFIG_SCSI is not set -# CONFIG_SCSI_DMA is not set -# CONFIG_SCSI_MYRS is not set -CONFIG_SCSI_SCAN_ASYNC=y -# CONFIG_SDM_CAMCC_845 is not set -# CONFIG_SDM_DISPCC_845 is not set -# CONFIG_SDM_GCC_660 is not set -# CONFIG_SDM_GCC_845 is not set -# CONFIG_SDM_GPUCC_845 is not set -# CONFIG_SDM_LPASSCC_845 is not set -# CONFIG_SDM_VIDEOCC_845 is not set -# CONFIG_SEEMP_CORE is not set -# CONFIG_SENSIRION_SGP30 is not set -# CONFIG_SENSORS_DRIVETEMP is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set -# CONFIG_SENSORS_RM3100_I2C is not set -# CONFIG_SENSORS_RM3100_SPI is not set -# CONFIG_SERIAL_8250 is not set -# CONFIG_SERIAL_8250_CONSOLE is not set -# CONFIG_SERIAL_8250_DMA is not set -# CONFIG_SERIAL_AMBA_PL010 is not set -# CONFIG_SERIAL_AMBA_PL011 is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -CONFIG_SERIAL_MSM=y -CONFIG_SERIAL_MSM_CONSOLE=y -# CONFIG_SERIAL_SIFIVE is not set -# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set -CONFIG_SKB_RECYCLER=y -CONFIG_SKB_RECYCLER_MULTI_CPU=y -# CONFIG_SKB_RECYCLER_PREALLOC is not set -# CONFIG_SLIMBUS is not set -# CONFIG_SLIMBUS_MSM_CTRL is not set -# CONFIG_SLIMBUS_MSM_NGD is not set -CONFIG_SMP=y -CONFIG_SMP_ON_UP=y -# CONFIG_SM_GCC_8150 is not set -# CONFIG_SND is not set -# CONFIG_SND_AOA is not set -# CONFIG_SND_COMPRESS_OFFLOAD is not set -# CONFIG_SND_DYNAMIC_MINORS is not set -# CONFIG_SND_PCM is not set -# CONFIG_SND_PROC_FS is not set -# CONFIG_SND_SOC is not set -# CONFIG_SND_SOC_AK4118 is not set -# CONFIG_SND_SOC_APQ8016_SBC is not set -# CONFIG_SND_SOC_CS35L36 is not set -# CONFIG_SND_SOC_CS4341 is not set -# CONFIG_SND_SOC_CX2072X is not set -# CONFIG_SND_SOC_DMIC is not set -# CONFIG_SND_SOC_FSL_AUDMIX is not set -# CONFIG_SND_SOC_FSL_MICFIL is not set -# CONFIG_SND_SOC_I2C_AND_SPI is not set -# CONFIG_SND_SOC_MAX98088 is not set -# CONFIG_SND_SOC_MAX98357A is not set -# CONFIG_SND_SOC_MT6358 is not set -# CONFIG_SND_SOC_MTK_BTCVSD is not set -# CONFIG_SND_SOC_NAU8822 is not set -# CONFIG_SND_SOC_PCM3060_I2C is not set -# CONFIG_SND_SOC_PCM3060_SPI is not set -# CONFIG_SND_SOC_RK3328 is not set -# CONFIG_SND_SOC_SOF_TOPLEVEL is not set -# CONFIG_SND_SOC_STORM is not set -# CONFIG_SND_SOC_UDA1334 is not set -# CONFIG_SND_SOC_WM8904 is not set -# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set -# CONFIG_SND_SOC_XILINX_I2S is not set -# CONFIG_SND_SOC_XILINX_SPDIF is not set -# CONFIG_SOUND is not set -# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set -CONFIG_SPARSE_IRQ=y -CONFIG_SPI=y -CONFIG_SPI_MASTER=y -CONFIG_SPI_MEM=y -# CONFIG_SPI_MTK_QUADSPI is not set -# CONFIG_SPI_QCOM_QSPI is not set -CONFIG_SPI_QUP=y -CONFIG_SPI_SPIDEV=y -# CONFIG_SPI_VSC7385 is not set -# CONFIG_SPMI is not set -# CONFIG_SPMI_MSM_PMIC_ARB is not set -# CONFIG_SPMI_PMIC_CLKDIV is not set +# CONFIG_SKB_FIXED_SIZE_2K is not set +CONFIG_SOC_BUS=y CONFIG_SPS=y -# CONFIG_SPS30 is not set -# CONFIG_SPS_SUPPORT_BAMDMA is not set CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_SRCU=y -# CONFIG_SRD_TRACE is not set -# CONFIG_STAGING is not set -# CONFIG_STM_PROTO_BASIC is not set -# CONFIG_STM_PROTO_SYS_T is not set -# CONFIG_STM_SOURCE_HEARTBEAT is not set -# CONFIG_STOPWATCH is not set -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set -# CONFIG_SECONDARY_TRUSTED_KEYRING is not set -CONFIG_SUSPEND=y -# CONFIG_SWAP is not set -CONFIG_SWCONFIG=y -CONFIG_SWIOTLB=y -CONFIG_SWP_EMULATE=y -# CONFIG_SW_SYNC is not set -# CONFIG_SYNC is not set -CONFIG_SYS_SUPPORTS_APM_EMULATION=y -# CONFIG_TEST_BLACKHOLE_DEV is not set -# CONFIG_TEST_MEMCAT_P is not set -# CONFIG_TEST_MEMINIT is not set -# CONFIG_TEST_STACKINIT is not set -# CONFIG_TEST_STRSCPY is not set -# CONFIG_TEST_VMALLOC is not set -# CONFIG_TEST_XARRAY is not set -CONFIG_THERMAL=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_GOV_STEP_WISE=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -# CONFIG_THUMB2_KERNEL is not set -# CONFIG_TICK_CPU_ACCOUNTING is not set -# CONFIG_TI_ADS124S08 is not set -# CONFIG_TI_ADS8344 is not set -# CONFIG_TI_CPSW_PHY_SEL is not set -# CONFIG_TI_DAC7311 is not set -# CONFIG_TI_DAC7612 is not set -CONFIG_TRACING_EVENTS_GPIO=y -# CONFIG_TRUSTED_FOUNDATIONS is not set -CONFIG_UBIFS_FS=y -CONFIG_UBIFS_FS_ADVANCED_COMPR=y -CONFIG_UBIFS_FS_LZO=y -CONFIG_UBIFS_FS_XATTR=y -CONFIG_UBIFS_FS_XZ=y -CONFIG_UBIFS_FS_ZLIB=y -# CONFIG_UBIFS_FS_ZSTD is not set -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" -# CONFIG_UNICODE is not set -CONFIG_UNINLINE_SPIN_UNLOCK=y -# CONFIG_UNMAP_KERNEL_AT_EL0 is not set -CONFIG_UNWINDER_ARM=y -# CONFIG_UNWINDER_FRAME_POINTER is not set -# CONFIG_USB_BAM is not set -CONFIG_USB_CONFIGFS=y -# CONFIG_USB_CONFIGFS_ACM is not set -# CONFIG_USB_CONFIGFS_ECM is not set -# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set -# CONFIG_USB_CONFIGFS_EEM is not set -# CONFIG_USB_CONFIGFS_F_DIAG is not set -# CONFIG_USB_CONFIGFS_F_FS is not set -# CONFIG_USB_CONFIGFS_F_HID is not set -# CONFIG_USB_CONFIGFS_F_LB_SS is not set -# CONFIG_USB_CONFIGFS_F_MIDI is not set -# CONFIG_USB_CONFIGFS_F_PRINTER is not set -# CONFIG_USB_CONFIGFS_F_QDSS is not set -# CONFIG_USB_CONFIGFS_F_UAC1 is not set -# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set -# CONFIG_USB_CONFIGFS_F_UAC2 is not set -# CONFIG_USB_CONFIGFS_MASS_STORAGE is not set -# CONFIG_USB_CONFIGFS_NCM is not set -# CONFIG_USB_CONFIGFS_OBEX is not set -# CONFIG_USB_CONFIGFS_RNDIS is not set -# CONFIG_USB_CONFIGFS_SERIAL is not set -# CONFIG_USB_CONN_GPIO is not set -# CONFIG_USB_DWC3_OF_SIMPLE is not set -# CONFIG_USB_EHCI_FSL is not set -# CONFIG_USB_EHCI_ROOT_HUB_TT is not set -# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_STAGING=y +CONFIG_SUSPEND_FREEZER=y # CONFIG_USB_GADGET is not set -# CONFIG_USB_NET_AQC111 is not set -# CONFIG_USB_OHCI_LITTLE_ENDIAN is not set -# CONFIG_USB_QCA_M31_PHY is not set -# CONFIG_USB_QCOM_8X16_PHY is not set -# CONFIG_USB_QCOM_QMP_PHY is not set -# CONFIG_USB_QCOM_QUSB_PHY is not set -CONFIG_USB_SUPPORT=y -CONFIG_USE_OF=y -# CONFIG_U_SERIAL_CONSOLE is not set -# CONFIG_VALIDATE_FS_PARSER is not set -# CONFIG_VCNL4035 is not set -CONFIG_VDSO=y -CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_USB_OHCI_LITTLE_ENDIAN=y # CONFIG_VFIO is not set -CONFIG_VFP=y -CONFIG_VFPv3=y -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set -# CONFIG_VHOST_NET is not set -# CONFIG_VIRT_WIFI is not set -# CONFIG_VIRTIO_BLK is not set -# CONFIG_VIRTIO_CONSOLE is not set -# CONFIG_VIRTIO_FS is not set -# CONFIG_VIRTIO_NET is not set -# CONFIG_VL53L0X_I2C is not set -# CONFIG_VMAP_STACK is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_WATCHDOG_CORE=y -CONFIG_WATCHDOG_OPEN_TIMEOUT=0 -# CONFIG_WL_TI is not set -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -# CONFIG_WWAN is not set -# CONFIG_WWAN_CORE is not set -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_XILINX_SDFEC is not set -# CONFIG_XILINX_XADC is not set -CONFIG_XPS=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_BCJ=y -CONFIG_ZBOOT_ROM_BSS=0 -CONFIG_ZBOOT_ROM_TEXT=0 -CONFIG_ZLIB_DEFLATE=y -CONFIG_ZLIB_INFLATE=y -CONFIG_ZONE_DMA_FLAG=0 -# CONFIG_DEBUG_MEM_USAGE is not set -CONFIG_ARCH_IPQ6018=y -CONFIG_ARCH_MMAP_RND_BITS=8 -CONFIG_PCIE_DW_PLAT=y -# CONFIG_USB_QCOM_DIAG_BRIDGE is not set +CONFIG_VIRTIO=y +CONFIG_VIRTUALIZATION=y CONFIG_VMSPLIT_2G=y # CONFIG_VMSPLIT_3G is not set -# CONFIG_SKB_FIXED_SIZE_2K is not set -# CONFIG_ARCH_IPQ256M is not set -# CONFIG_NF_CONNTRACK_DSCPREMARK_EXT is not set -# CONFIG_NF_CONNTRACK_CHAIN_EVENTS is not set -# CONFIG_MHI_BUS_TEST is not set -# CONFIG_DIAG_MHI is not set -# CONFIG_BOOTCONFIG_PARTITION is not set -# CONFIG_QTI_BT_PIL is not set -# CONFIG_QTI_BT_INTERFACE is not set -CONFIG_LEDS_GPIO=y -CONFIG_GPIO_WATCHDOG=y -CONFIG_GPIO_WATCHDOG_ARCH_INITCALL=y +CONFIG_WANT_DEV_COREDUMP=y diff --git a/feeds/ipq807x/ipq807x/ipq60xx/target.mk b/feeds/ipq807x/ipq807x/ipq60xx/target.mk index 8f641074c..cf2bf7b9c 100644 --- a/feeds/ipq807x/ipq807x/ipq60xx/target.mk +++ b/feeds/ipq807x/ipq807x/ipq60xx/target.mk @@ -1,8 +1,6 @@ SUBTARGET:=ipq60xx BOARDNAME:=IPQ60xx based boards -KERNEL_PATCHVER:=5.4 - DEFAULT_PACKAGES += ath11k-firmware-ipq60xx qca-nss-fw-ipq60xx define Target/Description diff --git a/feeds/ipq807x/ipq807x/ipq807x/config-default b/feeds/ipq807x/ipq807x/ipq807x/config-default index 34dea205e..f1e8aadc9 100644 --- a/feeds/ipq807x/ipq807x/ipq807x/config-default +++ b/feeds/ipq807x/ipq807x/ipq807x/config-default @@ -1,1260 +1,78 @@ -# CONFIG_AC97_BUS is not set -# CONFIG_AC97_BUS_NEW is not set -# CONFIG_AD7124 is not set -# CONFIG_AD7606_IFACE_PARALLEL is not set -# CONFIG_AD7606_IFACE_SPI is not set -# CONFIG_AD7768_1 is not set -# CONFIG_AD7949 is not set -# CONFIG_ADF4371 is not set -# CONFIG_ADIN_PHY is not set -# CONFIG_ADIS16460 is not set -# CONFIG_ADXL345_I2C is not set -# CONFIG_ADXL345_SPI is not set -# CONFIG_ADXL372_I2C is not set -# CONFIG_ADXL372_SPI is not set # CONFIG_AHCI_IPQ is not set -CONFIG_ALIGNMENT_TRAP=y -# CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS is not set -# CONFIG_ALLOC_SKB_PAGE_FRAG_DISABLE is not set -# CONFIG_AL_FIC is not set -# CONFIG_AMBA_PL08X is not set -# CONFIG_ANDROID is not set -# CONFIG_ANDROID_LOW_MEMORY_KILLER is not set -# CONFIG_ANDROID_TIMED_OUTPUT is not set -# CONFIG_APM_EMULATION is not set -# CONFIG_APQ_GCC_8084 is not set -# CONFIG_APQ_MMCC_8084 is not set -CONFIG_AQUANTIA_PHY=y -# CONFIG_AR8216_PHY is not set -# CONFIG_ARCHES is not set -CONFIG_ARCH_32BIT_OFF_T=y -# CONFIG_ARCH_AGILEX is not set -# CONFIG_ARCH_BITMAIN is not set -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y -CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y -CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y -CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN=y -CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y -CONFIG_ARCH_HAS_ELF_RANDOMIZE=y -CONFIG_ARCH_HAS_FORTIFY_SOURCE=y -CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y -# CONFIG_ARCH_HAS_KCOV is not set -CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y -CONFIG_ARCH_HAS_PHYS_TO_DMA=y -CONFIG_ARCH_HAS_SET_MEMORY=y -CONFIG_ARCH_HAS_SG_CHAIN=y -CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y -CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y -CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y -CONFIG_ARCH_HAS_TICK_BROADCAST=y -CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y -CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -# CONFIG_ARCH_IPQ6018 is not set -# CONFIG_ARCH_IPQ9574 is not set -CONFIG_ARCH_KEEP_MEMBLOCK=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -# CONFIG_ARCH_MDM9615 is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -# CONFIG_ARCH_MILBEAUT is not set -CONFIG_ARCH_MMAP_RND_BITS=18 -CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 -# CONFIG_ARCH_MSM8960 is not set -# CONFIG_ARCH_MSM8974 is not set -CONFIG_ARCH_MSM8X60=y -CONFIG_ARCH_MULTIPLATFORM=y -# CONFIG_ARCH_MULTI_CPU_AUTO is not set -CONFIG_ARCH_MULTI_V6_V7=y -CONFIG_ARCH_MULTI_V7=y -CONFIG_ARCH_NR_GPIO=0 -CONFIG_ARCH_OPTIONAL_KERNEL_RWX=y -CONFIG_ARCH_OPTIONAL_KERNEL_RWX_DEFAULT=y -CONFIG_ARCH_QCOM=y -# CONFIG_ARCH_RDA is not set -# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set -# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_ARM=y -# CONFIG_ARM64_CNP is not set -# CONFIG_ARM64_ERRATUM_1165522 is not set -# CONFIG_ARM64_ERRATUM_1286807 is not set -# CONFIG_ARM64_ERRATUM_1418040 is not set -# CONFIG_ARM64_ERRATUM_1542419 is not set -# CONFIG_ARM64_MODULE_PLTS is not set -# CONFIG_ARM64_PMEM is not set -# CONFIG_ARM64_PSEUDO_NMI is not set -# CONFIG_ARM64_PTDUMP_DEBUGFS is not set -# CONFIG_ARM64_PTR_AUTH is not set -# CONFIG_ARM64_SSBD is not set -# CONFIG_ARM64_SVE is not set -# CONFIG_ARM64_TAGGED_ADDR_ABI is not set -# CONFIG_ARM64_UAO is not set -# CONFIG_ARM64_VHE is not set -CONFIG_ARM_AMBA=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -# CONFIG_ARM_ATAG_DTB_COMPAT is not set -# CONFIG_ARM_CCI is not set -# CONFIG_ARM_CCI400_COMMON is not set -# CONFIG_ARM_CCI400_PMU is not set -# CONFIG_ARM_CCI_PMU is not set -CONFIG_ARM_CPUIDLE=y -CONFIG_ARM_CPU_SUSPEND=y -# CONFIG_ARM_ERRATA_814220 is not set -# CONFIG_ARM_ERRATA_857272 is not set -CONFIG_ARM_GIC=y -CONFIG_ARM_HAS_SG_CHAIN=y -# CONFIG_ARM_HIGHBANK_CPUIDLE is not set -CONFIG_ARM_L1_CACHE_SHIFT=6 -CONFIG_ARM_L1_CACHE_SHIFT_6=y -# CONFIG_ARM_LPAE is not set -CONFIG_ARM_MODULE_PLTS=y -CONFIG_ARM_PATCH_IDIV=y -CONFIG_ARM_PATCH_PHYS_VIRT=y -CONFIG_ARM_PMU=y -CONFIG_ARM_PSCI=y -# CONFIG_ARM_PSCI_CPUIDLE is not set -CONFIG_ARM_PSCI_FW=y -CONFIG_ARM_QCOM_CPUFREQ=y -# CONFIG_ARM_QCOM_CPUFREQ_HW is not set -# CONFIG_ARM_QCOM_CPUFREQ_NVMEM is not set -# CONFIG_ARM_SCMI_PROTOCOL is not set -# CONFIG_ARM_SMMU is not set -# CONFIG_ARM_SP805_WATCHDOG is not set -CONFIG_ARM_THUMB=y -# CONFIG_ARM_THUMBEE is not set -CONFIG_ARM_UNWIND=y -CONFIG_ARM_VIRT_EXT=y -# CONFIG_ASHMEM is not set -CONFIG_ASYMMETRIC_KEY_TYPE=y -CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y -CONFIG_AT803X_PHY=y -# CONFIG_ATA is not set -# CONFIG_BACKLIGHT_CLASS_DEVICE is not set -# CONFIG_BATTERY_RT5033 is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_NVME=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=4096 -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_BOUNCE=y -# CONFIG_BPFILTER is not set -# CONFIG_BPF_KPROBE_OVERRIDE is not set -CONFIG_BRIDGE_VLAN_FILTERING=y -# CONFIG_BT_HCIBTUSB_MTK is not set -# CONFIG_BT_MTKSDIO is not set -CONFIG_BUILD_BIN2C=y -# CONFIG_BUS_TOPOLOGY_ADHOC is not set -# CONFIG_CACHE_L2X0 is not set -# CONFIG_CAVIUM_TX2_ERRATUM_219 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_CC_STACKPROTECTOR=y -# CONFIG_CC_STACKPROTECTOR_NONE is not set -CONFIG_CC_STACKPROTECTOR_REGULAR=y -# CONFIG_CHARGER_LT3651 is not set -# CONFIG_CHARGER_QCOM_SMBB is not set -# CONFIG_CHARGER_UCS1002 is not set -CONFIG_CLEANCACHE=y -CONFIG_CLKDEV_LOOKUP=y -CONFIG_CLKSRC_OF=y -CONFIG_CLKSRC_PROBE=y -CONFIG_CLKSRC_QCOM=y -CONFIG_CLONE_BACKWARDS=y -# CONFIG_CMA is not set -# CONFIG_CMA_ALIGNMENT is not set -# CONFIG_CMA_AREAS is not set -# CONFIG_CMA_DEBUG is not set -# CONFIG_CMA_DEBUGFS is not set -# CONFIG_CMA_SIZE_MBYTES is not set -# CONFIG_CMA_SIZE_SEL_MAX is not set -# CONFIG_CMA_SIZE_SEL_MBYTES is not set -# CONFIG_CMA_SIZE_SEL_MIN is not set -# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -# CONFIG_CNSS2 is not set -# CONFIG_CNSS2_CALIBRATION_SUPPORT is not set -# CONFIG_CNSS2_DEBUG is not set -CONFIG_CNSS2_GENL=y -# CONFIG_CNSS2_PCI_DRIVER is not set -# CONFIG_CNSS2_PM is not set -# CONFIG_CNSS2_RAMDUMP is not set -# CONFIG_CNSS2_SMMU is not set -# CONFIG_CNSS2_QCA9574_SUPPORT is not set -CONFIG_CNSS_QCN9000=y -CONFIG_COMMON_CLK=y -# CONFIG_COMMON_CLK_FIXED_MMIO is not set -CONFIG_COMMON_CLK_QCOM=y -# CONFIG_COMMON_CLK_SI5341 is not set -CONFIG_CONFIGFS_FS=y -CONFIG_COREDUMP=y +CONFIG_ARCH_IPQ807x=y +# CONFIG_DIAGFWD_BRIDGE_CODE is not set +CONFIG_IPQ_ADSS_807x=y +CONFIG_IPQ_APSS_807x=y +CONFIG_IPQ_GCC_807x=y +CONFIG_NET_SWITCHDEV=y +CONFIG_NUM_ALT_PARTITION=16 +CONFIG_PINCTRL_IPQ807x=y +# CONFIG_IPC_LOGGING is not set +CONFIG_IPQ_SUBSYSTEM_DUMP=y +CONFIG_SPS=y +CONFIG_SPS_SUPPORT_NDP_BAM=y CONFIG_CORESIGHT=y -# CONFIG_CORESIGHT_BYTE_CNTR is not set -# CONFIG_CORESIGHT_CATU is not set -# CONFIG_CORESIGHT_CPU_DEBUG is not set CONFIG_CORESIGHT_CSR=y CONFIG_CORESIGHT_CTI=y -# CONFIG_CORESIGHT_CTI_SAVE_DISABLE is not set -# CONFIG_CORESIGHT_DUMMY is not set +CONFIG_CORESIGHT_EVENT=y CONFIG_CORESIGHT_HWEVENT=y CONFIG_CORESIGHT_LINKS_AND_SINKS=y CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y -# CONFIG_CORESIGHT_REMOTE_ETM is not set +CONFIG_CORESIGHT_QCOM_REPLICATOR=y +CONFIG_CORESIGHT_STREAM=m +# CONFIG_INPUT_PM8941_PWRKEY is not set +CONFIG_MDIO_QCA=y +# CONFIG_CRYPTO_ALL_CASES is not set +CONFIG_CRYPTO_DEV_QCOM_ICE=y +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_SHA512=y +# CONFIG_CORESIGHT_QPDI is not set # CONFIG_CORESIGHT_SINK_ETBV10 is not set -# CONFIG_CORESIGHT_SINK_TPIU is not set -# CONFIG_CORESIGHT_SOURCE_ETM3X is not set +CONFIG_CORESIGHT_SINK_TPIU=y +# CONFIG_CORESIGHT_SOURCE_DUMMY is not set +CONFIG_CORESIGHT_SOURCE_ETM3X=y CONFIG_CORESIGHT_SOURCE_ETM4X=y +# CONFIG_CORESIGHT_REMOTE_ETM is not set CONFIG_CORESIGHT_STM=y CONFIG_CORESIGHT_TPDA=y CONFIG_CORESIGHT_TPDM=y +CONFIG_AQUANTIA_PHY=y # CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE is not set -# CONFIG_COUNTER is not set -CONFIG_CPUFREQ_DT=y -CONFIG_CPUFREQ_DT_PLATDEV=y -CONFIG_CPU_32v6K=y -CONFIG_CPU_32v7=y -CONFIG_CPU_ABRT_EV7=y -# CONFIG_CPU_BIG_ENDIAN is not set -# CONFIG_CPU_BPREDICT_DISABLE is not set -CONFIG_CPU_CACHE_V7=y -CONFIG_CPU_CACHE_VIPT=y -CONFIG_CPU_COPY_V6=y -CONFIG_CPU_CP15=y -CONFIG_CPU_CP15_MMU=y -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_STAT=y -CONFIG_CPU_HAS_ASID=y -# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set -# CONFIG_CPU_ICACHE_DISABLE is not set -# CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND is not set -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_CPU_IDLE_GOV_TEO is not set -CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y -CONFIG_CPU_PABRT_V7=y -CONFIG_CPU_PM=y -CONFIG_CPU_RMAP=y -# CONFIG_CPU_SW_DOMAIN_PAN is not set -# CONFIG_CPU_THERMAL is not set -CONFIG_CPU_TLB_V7=y -CONFIG_CPU_V7=y -CONFIG_CRC16=y -# CONFIG_CRC32_SARWATE is not set -CONFIG_CRC32_SLICEBY8=y -CONFIG_CROSS_MEMORY_ATTACH=y -# CONFIG_CRYPTO_ADIANTUM is not set -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_AES_586=y -# CONFIG_CRYPTO_ALL_CASES is not set -CONFIG_CRYPTO_ARC4=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CCM=y -CONFIG_CRYPTO_CMAC=y -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set -# CONFIG_CRYPTO_DEV_HISI_ZIP is not set -# CONFIG_CRYPTO_DEV_OTA_CRYPTO is not set -CONFIG_CRYPTO_DEV_QCOM_ICE=y -# CONFIG_CRYPTO_DEV_QCOM_MSM_QCE is not set -# CONFIG_CRYPTO_DEV_QCOM_RNG is not set -CONFIG_CRYPTO_DISABLE_AHASH_LARGE_KEY_TEST=y -CONFIG_CRYPTO_DISABLE_AHASH_TYPE1_TESTS=y -CONFIG_CRYPTO_DISABLE_AHASH_TYPE2_TESTS=y -CONFIG_CRYPTO_DISABLE_AHASH_TYPE3_TESTS=y -CONFIG_CRYPTO_DISABLE_AUTH_SPLIT_TESTS=y -CONFIG_CRYPTO_DISABLE_HW_UNSUPPORTED_TESTS=y -CONFIG_CRYPTO_DISABLE_OUTOFPLACE_TESTS=y -# CONFIG_CRYPTO_DISABLE_AES192_TEST is not set -CONFIG_CRYPTO_DRBG_HMAC=y -CONFIG_CRYPTO_DRBG_MENU=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_ECHAINIV=y -# CONFIG_CRYPTO_ECRDSA is not set -# CONFIG_CRYPTO_ESSIV is not set -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_GF128MUL=y -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_JITTERENTROPY=y -CONFIG_CRYPTO_KPP2=y -CONFIG_CRYPTO_LIB_AES=y -CONFIG_CRYPTO_LIB_SHA256=y -CONFIG_CRYPTO_LZO=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set -CONFIG_CRYPTO_MD5_PPC=y -CONFIG_CRYPTO_MICHAEL_MIC=y -# CONFIG_CRYPTO_NO_ZERO_LEN_HASH is not set -# CONFIG_CRYPTO_NO_AES_XTS_ZERO_KEY_SUPPORT is not set -# CONFIG_CRYPTO_NO_AES_CTR_UNEVEN_DATA_LEN_SUPPORT is not set -CONFIG_CRYPTO_NULL=y -CONFIG_CRYPTO_NULL2=y -CONFIG_CRYPTO_OFB=y -# CONFIG_CRYPTO_PCRYPT is not set -CONFIG_CRYPTO_RMD160=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_RNG_DEFAULT=y -CONFIG_CRYPTO_SEQIV=y -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA1_PPC=y -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -# CONFIG_CRYPTO_STREEBOG is not set -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_XTS=y -# CONFIG_CRYPTO_XXHASH is not set -CONFIG_CRYPTO_XZ=y -CONFIG_DCACHE_WORD_ACCESS=y -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_EFI is not set -CONFIG_DEBUG_GPIO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN is not set -CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S" -# CONFIG_DEBUG_MISC is not set -# CONFIG_DEBUG_PLIST is not set -# CONFIG_DEBUG_UART_8250 is not set -# CONFIG_DEBUG_USER is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_DEVMEM=y -# CONFIG_DIAGFWD_BRIDGE_CODE is not set -CONFIG_DIAG_OVER_QRTR=y -# CONFIG_DIAG_OVER_USB is not set -CONFIG_DMADEVICES=y -# CONFIG_DMA_CMA is not set -CONFIG_DMA_ENGINE=y -CONFIG_DMA_OF=y -# CONFIG_DMA_SOUND is not set -CONFIG_DMA_VIRTUAL_CHANNELS=y -# CONFIG_DMI is not set -# CONFIG_DMIID is not set -# CONFIG_DMI_SYSFS is not set -# CONFIG_DM_INIT is not set -# CONFIG_DP83640_PHY is not set -# CONFIG_DPS310 is not set -CONFIG_DTC=y -# CONFIG_DWMAC_GENERIC is not set -# CONFIG_DWMAC_IPQ806X is not set -# CONFIG_DWMAC_SUNXI is not set -# CONFIG_DW_DMAC_PCI is not set -CONFIG_DYNAMIC_DEBUG=y -CONFIG_EDAC_ATOMIC_SCRUB=y -CONFIG_EDAC_SUPPORT=y -# CONFIG_EEPROM_EE1004 is not set -# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set -# CONFIG_EFI_CAPSULE_LOADER is not set -# CONFIG_EFI_TEST is not set -# CONFIG_ENERGY_MODEL is not set -# CONFIG_EP_PCIE is not set -CONFIG_ETHERNET_PACKET_MANGLE=y -# CONFIG_EXPORTFS_BLOCK_OPS is not set -CONFIG_EXT4_FS=y -# CONFIG_EXT4_USE_FOR_EXT2 is not set -# CONFIG_EXTCON_FSA9480 is not set -CONFIG_FB=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_IMAGEBLIT=y -CONFIG_FB_CMDLINE=y -# CONFIG_FB_EFI is not set -CONFIG_FB_QTI_QPIC=y -CONFIG_FB_QTI_QPIC_ER_SSD1963_PANEL=y -CONFIG_FB_SYS_FOPS=y -# CONFIG_FIPS_ENABLE is not set -CONFIG_FIXED_PHY=y -CONFIG_FIX_EARLYCON_MEM=y -# CONFIG_FSL_MC_BUS is not set -# CONFIG_FSL_QDMA is not set -CONFIG_FS_MBCACHE=y -# CONFIG_FS_VERITY is not set -# CONFIG_FUJITSU_ERRATUM_010001 is not set -CONFIG_FW_AUTH=y -CONFIG_FW_AUTH_TEST=m -# CONFIG_FW_LOADER_COMPRESS is not set -# CONFIG_FXAS21002C is not set -# CONFIG_FXOS8700_I2C is not set -# CONFIG_FXOS8700_SPI is not set -# CONFIG_GCC_PLUGINS is not set -# CONFIG_GCC_PLUGIN_CYC_COMPLEXITY is not set -# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set -# CONFIG_GCC_PLUGIN_RANDSTRUCT is not set -# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF is not set -# CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL is not set -# CONFIG_GCC_PLUGIN_STRUCTLEAK_USER is not set -CONFIG_GENERIC_ALLOCATOR=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -# CONFIG_GENERIC_CPUFREQ_KRAIT is not set -CONFIG_GENERIC_IDLE_POLL_SETUP=y -CONFIG_GENERIC_IO=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_SHOW_LEVEL=y -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_PHY=y -CONFIG_GENERIC_PINCONF=y -CONFIG_GENERIC_PINCTRL_GROUPS=y -CONFIG_GENERIC_PINMUX_FUNCTIONS=y -CONFIG_GENERIC_SCHED_CLOCK=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_TIME_VSYSCALL=y -# CONFIG_GEN_RTC is not set -# CONFIG_GLACIER is not set -# CONFIG_GLINK_DEBUG_FS is not set -CONFIG_GPIOLIB=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_GPIO_AMD_FCH is not set -# CONFIG_GPIO_CADENCE is not set -CONFIG_GPIO_DEVRES=y -# CONFIG_GPIO_GW_PLD is not set -# CONFIG_GPIO_LATCH is not set -# CONFIG_GPIO_NXP_74HC153 is not set -# CONFIG_GPIO_SAMA5D2_PIOBU is not set -CONFIG_GPIO_SYSFS=y -# CONFIG_GPIO_USB_DETECT is not set -# CONFIG_GSI is not set -# CONFIG_HABANA_AI is not set -CONFIG_HANDLE_DOMAIN_IRQ=y -CONFIG_HARDEN_BRANCH_PREDICTOR=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_HAS_DMA=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_HAVE_ARCH_BITREVERSE=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_ARCH_KGDB=y -CONFIG_HAVE_ARCH_PFN_VALID=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_ARM_ARCH_TIMER=y -# CONFIG_HAVE_ARM_SMCCC is not set -# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set -CONFIG_HAVE_BPF_JIT=y -CONFIG_HAVE_CC_STACKPROTECTOR=y -CONFIG_HAVE_CLK=y -CONFIG_HAVE_CLK_PREPARE=y -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_HAVE_DEBUG_KMEMLEAK=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_IDE=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_LZ4=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_XZ=y -# CONFIG_HAVE_KPROBES is not set -# CONFIG_HAVE_KRETPROBES is not set -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_MOD_ARCH_SPECIFIC=y -CONFIG_HAVE_NET_DSA=y -CONFIG_HAVE_OPROFILE=y -# CONFIG_HAVE_OPTPROBES is not set -CONFIG_HAVE_PERF_EVENTS=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_PROC_CPU=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_RSEQ=y -CONFIG_HAVE_SMP=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_UID16=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -# CONFIG_HEADERS_INSTALL is not set -# CONFIG_HID_MACALLY is not set -# CONFIG_HID_MALTRON is not set -# CONFIG_HID_VIEWSONIC is not set -CONFIG_HIGHMEM=y -CONFIG_HIGHPTE=y -# CONFIG_HIST_TRIGGERS is not set -CONFIG_HOTPLUG_CPU=y -CONFIG_HWSPINLOCK=y -CONFIG_HWSPINLOCK_QCOM=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_MSM_LEGACY=y -CONFIG_HZ_FIXED=0 -CONFIG_I2C=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_HELPER_AUTO=y -# CONFIG_I2C_NVIDIA_GPU is not set -CONFIG_I2C_QUP=y -# CONFIG_I3C is not set -# CONFIG_IGC is not set CONFIG_IIO=y # CONFIG_IIO_BUFFER is not set # CONFIG_IIO_TRIGGER is not set -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -# CONFIG_IKHEADERS is not set -CONFIG_INITRAMFS_SOURCE="" -# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set -# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set -# CONFIG_INPUT_GPIO_VIBRA is not set -# CONFIG_INPUT_MSM_VIBRATOR is not set -# CONFIG_INPUT_PM8941_PWRKEY is not set -# CONFIG_INPUT_PM8XXX_VIBRATOR is not set -# CONFIG_INTERCONNECT is not set -CONFIG_IOMMU_HELPER=y -# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set -# CONFIG_ION is not set -# CONFIG_ION_DUMMY is not set -# CONFIG_ION_MSM is not set -# CONFIG_ION_TEST is not set -# CONFIG_IO_URING is not set -# CONFIG_IPA is not set -# CONFIG_IPA3 is not set -# CONFIG_IPC_LOGGING is not set -# CONFIG_IPC_ROUTER is not set -# CONFIG_IPC_ROUTER_SECURITY is not set -# CONFIG_IPQ807X_REMOTEPROC is not set -# CONFIG_IPQ_ADCC_4019 is not set -CONFIG_IPQ_ADSS_8074=y -# CONFIG_IPQ_APSS_PLL is not set -# CONFIG_IPQ_APSS_5018 is not set -# CONFIG_IPQ_APSS_6018 is not set -CONFIG_IPQ_APSS_8074=y -# CONFIG_IPQ_DWC3_QTI_EXTCON is not set -# CONFIG_IPQ_FLASH_16M_PROFILE is not set -# CONFIG_IPQ_GCC_4019 is not set -# CONFIG_IPQ_GCC_5018 is not set -# CONFIG_IPQ_GCC_6018 is not set -# CONFIG_IPQ_GCC_806X is not set -CONFIG_IPQ_GCC_8074=y -# CONFIG_IPQ_GCC_9574 is not set -# CONFIG_IPQ_LCC_806X is not set -# CONFIG_IPQ_REMOTEPROC_ADSP is not set -CONFIG_IPQ_SUBSYSTEM_DUMP=y -CONFIG_IPQ_SUBSYSTEM_RAMDUMP=y -# CONFIG_IPQ_SUBSYSTEM_RESTART is not set -# CONFIG_IPQ_SUBSYSTEM_RESTART_TEST is not set -CONFIG_IPQ_TCSR=y -CONFIG_IRQCHIP=y -CONFIG_IRQ_DOMAIN=y -CONFIG_IRQ_DOMAIN_HIERARCHY=y -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_IRQ_TIME_ACCOUNTING=y -CONFIG_IRQ_WORK=y -CONFIG_JBD2=y -# CONFIG_KCOV is not set -CONFIG_KEYS=y -# CONFIG_KEYS_REQUEST_CACHE is not set -CONFIG_KPSS_XCC=y -# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set -# CONFIG_KRAITCC is not set -# CONFIG_KRAIT_CLOCKS is not set -# CONFIG_KRAIT_L2_ACCESSORS is not set -# CONFIG_LCD_CLASS_DEVICE is not set -# CONFIG_LEDS_AN30259A is not set -CONFIG_LEDS_IPQ=y -# CONFIG_LEDS_LM3532 is not set -# CONFIG_LEDS_PCA9956B is not set -CONFIG_LEDS_TLC591XX=y -# CONFIG_LEDS_TRIGGER_AUDIO is not set -# CONFIG_LEDS_TRIGGER_PATTERN is not set -CONFIG_LIBFDT=y -CONFIG_LOCKUP_DETECTOR=y -# CONFIG_LOCK_EVENT_COUNTS is not set -CONFIG_LOCK_SPIN_ON_OWNER=y -CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity" -# CONFIG_LTC1660 is not set -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_MAILBOX=y -# CONFIG_MAILBOX_TEST is not set -# CONFIG_MAP_E_SUPPORT is not set -# CONFIG_MAX31856 is not set -# CONFIG_MAX44009 is not set -# CONFIG_MAX5432 is not set -# CONFIG_MB1232 is not set -# CONFIG_MCP3911 is not set -# CONFIG_MCP41010 is not set -CONFIG_MDIO=y -CONFIG_MDIO_BITBANG=y -CONFIG_MDIO_BOARDINFO=y -# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set -CONFIG_MDIO_GPIO=y -CONFIG_MDIO_QCA=y -# CONFIG_MDM_GCC_9615 is not set -# CONFIG_MDM_LCC_9615 is not set -# CONFIG_MEMORY_HOTPLUG is not set -# CONFIG_MFD_LOCHNAGAR is not set -# CONFIG_MFD_MAX77650 is not set -CONFIG_MFD_QCOM_RPM=y -# CONFIG_MFD_ROHM_BD70528 is not set -CONFIG_MFD_SPMI_PMIC=y -# CONFIG_MFD_STMFX is not set -# CONFIG_MFD_STPMIC1 is not set -CONFIG_MFD_SYSCON=y -# CONFIG_MFD_TQMX86 is not set -CONFIG_MHI_BUS=y -CONFIG_MHI_BUS_DEBUG=y -CONFIG_MHI_NETDEV=y -CONFIG_MHI_QTI=y -# CONFIG_MHI_SATELLITE is not set -CONFIG_MHI_UCI=y -CONFIG_MHI_WWAN_CTRL=y -CONFIG_MIGHT_HAVE_CACHE_L2X0=y -CONFIG_MIGHT_HAVE_PCI=y -CONFIG_MIGRATION=y -# CONFIG_MIKROTIK is not set -# CONFIG_MISC_ALCOR_PCI is not set -# CONFIG_MMAP_ALLOW_UNINITIALIZED is not set -CONFIG_MMC=y -CONFIG_MMC_ARMMMCI=y -CONFIG_MMC_BLOCK=y -CONFIG_MMC_BLOCK_MINORS=32 -CONFIG_MMC_QCOM_DML=y -CONFIG_MMC_SDHCI=y -# CONFIG_MMC_SDHCI_AM654 is not set -CONFIG_MMC_SDHCI_IO_ACCESSORS=y -CONFIG_MMC_SDHCI_MSM=y -CONFIG_MMC_SDHCI_MSM_ICE=y -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -# CONFIG_MMC_SDHCI_OF_ASPEED is not set -# CONFIG_MMC_SDHCI_PCI is not set -CONFIG_MMC_SDHCI_PLTFM=y -# CONFIG_MMC_STM32_SDMMC is not set -# CONFIG_MMC_TIFM_SD is not set -CONFIG_MODULES_TREE_LOOKUP=y -CONFIG_MODULES_USE_ELF_REL=y -# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set -# CONFIG_MPLS_ROUTING is not set -# CONFIG_MSM_ADSPRPC is not set -# CONFIG_MSM_BUS_SCALING is not set -# CONFIG_MSM_GCC_8660 is not set -# CONFIG_MSM_GCC_8916 is not set -# CONFIG_MSM_GCC_8960 is not set -# CONFIG_MSM_GCC_8974 is not set -# CONFIG_MSM_GCC_8994 is not set -# CONFIG_MSM_GCC_8996 is not set -# CONFIG_MSM_GCC_8998 is not set -# CONFIG_MSM_GLINK is not set -# CONFIG_MSM_GLINK_LOOPBACK_SERVER is not set -# CONFIG_MSM_GLINK_PKT is not set -# CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT is not set -# CONFIG_MSM_IPC_ROUTER_GLINK_XPRT is not set -# CONFIG_MSM_IPC_ROUTER_MHI_XPRT is not set -# CONFIG_MSM_LCC_8960 is not set -# CONFIG_MSM_MHI is not set -# CONFIG_MSM_MHI_DEBUG is not set -# CONFIG_MSM_MHI_DEV is not set -# CONFIG_MSM_MHI_UCI is not set -# CONFIG_MSM_MMCC_8960 is not set -# CONFIG_MSM_MMCC_8974 is not set -# CONFIG_MSM_MMCC_8996 is not set -# CONFIG_MSM_QMI_INTERFACE is not set -# CONFIG_MSM_RPM_GLINK is not set -CONFIG_MSM_RPM_LOG=y -CONFIG_MSM_RPM_RPMSG=y -# CONFIG_MSM_RPM_SMD is not set -# CONFIG_MSM_SECURE_BUFFER is not set -# CONFIG_MSM_SMEM is not set -# CONFIG_MSM_TEST_QMI_CLIENT is not set -CONFIG_MTD_CMDLINE_PARTS=y -# CONFIG_MTD_HYPERBUS is not set -CONFIG_MTD_M25P80=y -# CONFIG_MTD_NAND_ECC_SW_BCH is not set -# CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC is not set -CONFIG_MTD_NAND_QCOM=y -# CONFIG_MTD_NAND_QCOM_SERIAL is not set -CONFIG_MTD_RAW_NAND=y -# CONFIG_MTD_ROUTERBOOT_PARTS is not set -CONFIG_MTD_SPINAND_GIGADEVICE=y -CONFIG_MTD_SPINAND_MT29F=y -CONFIG_MTD_SPINAND_ONDIEECC=y -CONFIG_MTD_SPI_NOR=y -# CONFIG_MTD_SPLIT_BCM_WFI_FW is not set -# CONFIG_MTD_SPLIT_ELF_FW is not set -CONFIG_MTD_SPLIT_FIRMWARE=y -CONFIG_MTD_SPLIT_FIT_FW=y -CONFIG_MTD_UBI=y -CONFIG_MTD_UBI_BEB_LIMIT=20 -CONFIG_MTD_UBI_BLOCK=y -# CONFIG_MTD_UBI_FASTMAP is not set -CONFIG_MTD_UBI_GLUEBI=y -CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MULTI_IRQ_HANDLER=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEON=y -CONFIG_NET=y -# CONFIG_NET_DSA_MV88E6063 is not set -CONFIG_NET_FLOW_LIMIT=y -CONFIG_NET_L3_MASTER_DEV=y -CONFIG_NET_PTP_CLASSIFY=y -# CONFIG_NET_SCH_TAPRIO is not set -CONFIG_NET_SWITCHDEV=y -# CONFIG_NET_VENDOR_GOOGLE is not set -# CONFIG_NET_VENDOR_PENSANDO is not set -# CONFIG_NF_IPV6_DUMMY_HEADER is not set -# CONFIG_NI_XGE_MANAGEMENT_ENET is not set -# CONFIG_NOA1305 is not set -CONFIG_NO_BOOTMEM=y -CONFIG_NO_HZ=y -CONFIG_NO_HZ_COMMON=y -CONFIG_NO_HZ_IDLE=y -CONFIG_NR_CPUS=4 -# CONFIG_NULL_TTY is not set -# CONFIG_NUMA is not set -CONFIG_NUM_ALT_PARTITION=16 -CONFIG_NVMEM=y -# CONFIG_NVMEM_REBOOT_MODE is not set -# CONFIG_NVMEM_SYSFS is not set -# CONFIG_NVME_MULTIPATH is not set -# CONFIG_NVME_TCP is not set -# CONFIG_OCTEONTX2_AF is not set -CONFIG_OF=y -CONFIG_OF_ADDRESS=y -CONFIG_OF_ADDRESS_PCI=y -CONFIG_OF_EARLY_FLATTREE=y -CONFIG_OF_FLATTREE=y -CONFIG_OF_GPIO=y -CONFIG_OF_IRQ=y -CONFIG_OF_KOBJ=y -CONFIG_OF_MDIO=y -CONFIG_OF_MTD=y -CONFIG_OF_NET=y -CONFIG_OF_PCI=y -CONFIG_OF_PCI_IRQ=y -CONFIG_OF_RESERVED_MEM=y -# CONFIG_OF_SLIMBUS is not set -CONFIG_OLD_SIGACTION=y -CONFIG_OLD_SIGSUSPEND3=y -# CONFIG_OPTIMIZE_INLINING is not set -# CONFIG_PACKING is not set -CONFIG_PADATA=y -CONFIG_PAGE_OFFSET=0xC0000000 -CONFIG_PANIC_ON_OOPS=y -CONFIG_PANIC_ON_OOPS_VALUE=1 -CONFIG_PANIC_TIMEOUT=5 -CONFIG_PCI=y -# CONFIG_PCIEAER is not set -# CONFIG_PCIE_AL is not set -# CONFIG_PCIE_CADENCE_EP is not set -CONFIG_PCIE_DW=y -# CONFIG_PCIE_DW_PLAT is not set -# CONFIG_PCIE_PME is not set -CONFIG_PCIE_QCOM=y -CONFIG_PCI_DOMAINS=y -CONFIG_PCI_DOMAINS_GENERIC=y -# CONFIG_PCI_MESON is not set -CONFIG_PCI_MSI=y -CONFIG_PERF_EVENTS=y -CONFIG_PERF_USE_VMALLOC=y -# CONFIG_PFT is not set -CONFIG_PGTABLE_LEVELS=2 -CONFIG_PHYLIB=y -# CONFIG_PHY_CADENCE_DP is not set -# CONFIG_PHY_CADENCE_DPHY is not set -# CONFIG_PHY_CADENCE_SIERRA is not set -# CONFIG_PHY_FSL_IMX8MQ_USB is not set -# CONFIG_PHY_IPQ_BALDUR_USB is not set -# CONFIG_PHY_IPQ_UNIPHY_PCIE is not set -# CONFIG_PHY_IPQ_UNIPHY_USB is not set -# CONFIG_PHY_MIXEL_MIPI_DPHY is not set -# CONFIG_PHY_OCELOT_SERDES is not set -# CONFIG_PHY_QCA_PCIE_QMP is not set -# CONFIG_PHY_QCOM_APQ8064_SATA is not set -# CONFIG_PHY_QCOM_IPQ806X_SATA is not set -# CONFIG_PHY_QCOM_PCIE2 is not set -CONFIG_PHY_QCOM_QMP=y -# CONFIG_PHY_QCOM_QUSB2 is not set -# CONFIG_PHY_QCOM_UFS is not set -CONFIG_PINCTRL=y -# CONFIG_PINCTRL_APQ8064 is not set -# CONFIG_PINCTRL_APQ8084 is not set -# CONFIG_PINCTRL_IPQ4019 is not set -# CONFIG_PINCTRL_IPQ5018 is not set -# CONFIG_PINCTRL_IPQ6018 is not set -# CONFIG_PINCTRL_IPQ8064 is not set -CONFIG_PINCTRL_IPQ8074=y -# CONFIG_PINCTRL_IPQ9574 is not set -# CONFIG_PINCTRL_MDM9615 is not set -CONFIG_PINCTRL_MSM=y -# CONFIG_PINCTRL_MSM8660 is not set -# CONFIG_PINCTRL_MSM8916 is not set -# CONFIG_PINCTRL_MSM8960 is not set -# CONFIG_PINCTRL_MSM8994 is not set -# CONFIG_PINCTRL_MSM8996 is not set -# CONFIG_PINCTRL_MSM8998 is not set -# CONFIG_PINCTRL_OCELOT is not set -CONFIG_PINCTRL_QCOM_SPMI_PMIC=y -# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set -# CONFIG_PINCTRL_QCS404 is not set -# CONFIG_PINCTRL_SC7180 is not set -# CONFIG_PINCTRL_SDM660 is not set -# CONFIG_PINCTRL_SDM845 is not set -# CONFIG_PINCTRL_SINGLE is not set -# CONFIG_PINCTRL_SM8150 is not set -# CONFIG_PINCTRL_STMFX is not set -# CONFIG_PKCS7_TEST_KEY is not set -# CONFIG_PKCS7_MESSAGE_PARSER is not set -# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set -# CONFIG_PL330_DMA is not set -CONFIG_PM=y -# CONFIG_PM8916_WATCHDOG is not set -CONFIG_PM_CLK=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_GENERIC_DOMAINS=y -CONFIG_PM_GENERIC_DOMAINS_OF=y -CONFIG_PM_GENERIC_DOMAINS_SLEEP=y -CONFIG_PM_OPP=y -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_MSM=y -# CONFIG_POWER_RESET_QCOM_PON is not set -CONFIG_POWER_SUPPLY=y +CONFIG_PCIE_DW_PLAT=y +CONFIG_VMSPLIT_2G=y +# CONFIG_VMSPLIT_3G is not set CONFIG_PPS=y -CONFIG_PREEMPT=y -CONFIG_PREEMPT_COUNT=y -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_RCU=y -# CONFIG_PRINTK_CALLER is not set -CONFIG_PRINTK_TIME=y -CONFIG_PROC_PAGE_MONITOR=y -# CONFIG_PROC_STRIPPED is not set -# CONFIG_PSI is not set CONFIG_PTP_1588_CLOCK=y -CONFIG_PUBLIC_KEY_ALGO_RSA=y -# CONFIG_PVPANIC is not set -CONFIG_PWM=y -CONFIG_PWM_IPQ=y -# CONFIG_PWM_PCA9685 is not set -CONFIG_PWM_SYSFS=y -# CONFIG_PWRSEQ_EMMC is not set -CONFIG_PWRSEQ_IPQ=y -# CONFIG_PWRSEQ_SIMPLE is not set -CONFIG_QCA_MINIDUMP=y -# CONFIG_QCA_MINIDUMP_DEBUG is not set -# CONFIG_QCOM_A53PLL is not set -CONFIG_QCOM_ADM=y -# CONFIG_QCOM_AOSS_QMP is not set -CONFIG_QCOM_APCS_IPC=y +# CONFIG_DP83640_PHY is not set +CONFIG_PWM_IPQ4019=y CONFIG_QCOM_APM=y -# CONFIG_QCOM_APR is not set -CONFIG_QCOM_BAM_DMA=y -CONFIG_QCOM_CACHE_DUMP=y -CONFIG_QCOM_CACHE_DUMP_ON_PANIC=y -# CONFIG_QCOM_CLK_APCS_MSM8916 is not set -# CONFIG_QCOM_CLK_RPM is not set -# CONFIG_QCOM_COINCELL is not set -# CONFIG_QCOM_COMMAND_DB is not set -CONFIG_QCOM_DLOAD_MODE=y -CONFIG_QCOM_DLOAD_MODE_APPSBL=y -# CONFIG_QCOM_EBI2 is not set -# CONFIG_QCOM_FASTRPC is not set -CONFIG_QCOM_GDSC=y -# CONFIG_QCOM_GENI_SE is not set -CONFIG_QCOM_GLINK_SSR=y -# CONFIG_QCOM_GSBI is not set -# CONFIG_QCOM_HFPLL is not set -# CONFIG_QCOM_LLCC is not set -# CONFIG_QCOM_PDC is not set -# CONFIG_QCOM_PM is not set -# CONFIG_QCOM_Q6V5_ADSP is not set -# CONFIG_QCOM_Q6V5_MPD is not set -# CONFIG_QCOM_Q6V5_MSS is not set -# CONFIG_QCOM_Q6V5_PAS is not set -CONFIG_QCOM_Q6V5_WCSS=y -CONFIG_QCOM_QFPROM=y -CONFIG_QCOM_QMI_HELPERS=y -CONFIG_QCOM_RESTART_REASON=y -# CONFIG_QCOM_RMTFS_MEM is not set -# CONFIG_QCOM_RPMH is not set -CONFIG_QCOM_RPM_CLK=y -# CONFIG_QCOM_RTB is not set -CONFIG_QCOM_SCM=y -CONFIG_QCOM_SCM_32=y -# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set -# CONFIG_QCOM_SMD is not set -# CONFIG_QCOM_SMD_RPM is not set -CONFIG_QCOM_SMEM=y -CONFIG_QCOM_SMEM_STATE=y -CONFIG_QCOM_SMP2P=y -# CONFIG_QCOM_SMSM is not set -CONFIG_QCOM_SOCINFO=y -# CONFIG_QCOM_SPMI_ADC5 is not set +CONFIG_QCOM_DCC=y # CONFIG_QCOM_SPMI_TEMP_ALARM is not set CONFIG_QCOM_SPMI_VADC=y -CONFIG_QCOM_SYSMON=y -CONFIG_QCOM_TSENS=y -# CONFIG_QCOM_WCNSS_CTRL is not set -# CONFIG_QCOM_WCNSS_PIL is not set -# CONFIG_QTI_BT_PIL is not set -# CONFIG_QTI_BT_INTERFACE is not set -CONFIG_QCOM_WDT=y -# CONFIG_QCS_GCC_404 is not set -# CONFIG_QCS_TURING_404 is not set -CONFIG_QMI_ENCDEC=y -# CONFIG_QPNP_REVID is not set -CONFIG_QRTR=y -# CONFIG_QRTR_FIFO is not set -CONFIG_QRTR_MHI=y -CONFIG_QRTR_SMD=y -# CONFIG_QRTR_TUN is not set -# CONFIG_QRTR_USB is not set -CONFIG_QSEECOM=m -# CONFIG_QTI_APSS_ACC is not set -CONFIG_QTI_CTXT_SAVE=y -CONFIG_QTI_DCC=y -# CONFIG_QTI_DCC_V2 is not set -# CONFIG_QTI_EUD is not set -# CONFIG_EUD_EXTCON_SUPPORT is not set -CONFIG_QTI_MEMORY_DUMP_V2=y -# CONFIG_QTI_SCM_RESTART_REASON is not set -CONFIG_QTI_TZ_LOG=y -# CONFIG_RANDOMIZE_BASE is not set -# CONFIG_RANDOM_TRUST_BOOTLOADER is not set -CONFIG_RATIONAL=y -# CONFIG_RCU_BOOST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=21 -# CONFIG_RCU_EXPERT is not set -CONFIG_RCU_STALL_COMMON=y -CONFIG_RD_GZIP=y -# CONFIG_REED_SOLOMON_TEST is not set -CONFIG_REGMAP=y CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y -CONFIG_REGMAP_MMIO=y -CONFIG_REGULATOR=y CONFIG_REGULATOR_CPR3=y CONFIG_REGULATOR_CPR3_NPU=y CONFIG_REGULATOR_CPR4_APSS=y -CONFIG_REGULATOR_FIXED_VOLTAGE=y -CONFIG_REGULATOR_GPIO=y -# CONFIG_REGULATOR_IPQ40XX is not set -CONFIG_REGULATOR_QCOM_RPM=y -CONFIG_REGULATOR_QCOM_SPMI=y -CONFIG_REGULATOR_RPM_GLINK=y -# CONFIG_REGULATOR_RPM_SMD is not set -CONFIG_RELAY=y -CONFIG_REMOTEPROC=y -# CONFIG_RESET_ATTACK_MITIGATION is not set -CONFIG_RESET_CONTROLLER=y -# CONFIG_RESET_QCOM_AOSS is not set -# CONFIG_RESET_QCOM_PDC is not set -CONFIG_RFS_ACCEL=y -# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set -CONFIG_RPMSG=y -CONFIG_RPMSG_CHAR=y -# CONFIG_RPMSG_QCOM_GLINK_RPM is not set -CONFIG_RPMSG_QCOM_GLINK_SMEM=y -CONFIG_RPMSG_QCOM_SMD=y -# CONFIG_RPMSG_VIRTIO is not set -CONFIG_RPS=y -CONFIG_RTC_CLASS=y -# CONFIG_RTC_DRV_CMOS is not set -# CONFIG_RTC_DRV_PM8XXX is not set -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_SAMPLES=y -# CONFIG_SAMPLE_CONFIGFS is not set -# CONFIG_SAMPLE_HW_BREAKPOINT is not set -# CONFIG_SAMPLE_KFIFO is not set -# CONFIG_SAMPLE_KOBJECT is not set -# CONFIG_SAMPLE_KPROBES is not set -# CONFIG_SAMPLE_KRETPROBES is not set -CONFIG_SAMPLE_QMI_CLIENT=m -# CONFIG_SAMPLE_RPMSG_CLIENT is not set -CONFIG_SAMPLE_TRACE_EVENTS=y -# CONFIG_SAMPLE_TRACE_PRINTK is not set -# CONFIG_SAMPLE_VFIO_MDEV_MDPY_FB is not set -# CONFIG_SATA_AHCI is not set -CONFIG_SCHED_HRTICK=y -# CONFIG_SCHED_INFO is not set -# CONFIG_SCSI is not set -# CONFIG_SCSI_DMA is not set -# CONFIG_SCSI_MYRS is not set -CONFIG_SCSI_SCAN_ASYNC=y -# CONFIG_SDM_CAMCC_845 is not set -# CONFIG_SDM_DISPCC_845 is not set -# CONFIG_SDM_GCC_660 is not set -# CONFIG_SDM_GCC_845 is not set -# CONFIG_SDM_GPUCC_845 is not set -# CONFIG_SDM_LPASSCC_845 is not set -# CONFIG_SDM_VIDEOCC_845 is not set -# CONFIG_SEEMP_CORE is not set -# CONFIG_SENSIRION_SGP30 is not set -# CONFIG_SENSORS_DRIVETEMP is not set -# CONFIG_SENSORS_OCC_P8_I2C is not set -# CONFIG_SENSORS_RM3100_I2C is not set -# CONFIG_SENSORS_RM3100_SPI is not set -# CONFIG_SERIAL_8250 is not set -# CONFIG_SERIAL_8250_CONSOLE is not set -# CONFIG_SERIAL_8250_DMA is not set -# CONFIG_SERIAL_AMBA_PL010 is not set -# CONFIG_SERIAL_AMBA_PL011 is not set -# CONFIG_SERIAL_FSL_LINFLEXUART is not set -CONFIG_SERIAL_MSM=y -CONFIG_SERIAL_MSM_CONSOLE=y -# CONFIG_SERIAL_SIFIVE is not set -# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set -CONFIG_SKB_RECYCLER=y -CONFIG_SKB_RECYCLER_MULTI_CPU=y -# CONFIG_SKB_RECYCLER_PREALLOC is not set -# CONFIG_SLIMBUS is not set -# CONFIG_SLIMBUS_MSM_CTRL is not set -# CONFIG_SLIMBUS_MSM_NGD is not set -CONFIG_SMP=y -CONFIG_SMP_ON_UP=y -# CONFIG_SM_GCC_8150 is not set -# CONFIG_SND is not set -# CONFIG_SND_AOA is not set -# CONFIG_SND_COMPRESS_OFFLOAD is not set -# CONFIG_SND_DYNAMIC_MINORS is not set -# CONFIG_SND_PCM is not set -# CONFIG_SND_PROC_FS is not set -CONFIG_SND_SOC=y -# CONFIG_SND_SOC_AK4118 is not set -# CONFIG_SND_SOC_APQ8016_SBC is not set -# CONFIG_SND_SOC_CS35L36 is not set -# CONFIG_SND_SOC_CS4341 is not set -# CONFIG_SND_SOC_CX2072X is not set -# CONFIG_SND_SOC_DMIC is not set -# CONFIG_SND_SOC_FSL_AUDMIX is not set -# CONFIG_SND_SOC_FSL_MICFIL is not set -# CONFIG_SND_SOC_I2C_AND_SPI is not set -CONFIG_SND_SOC_IPQ=y -CONFIG_SND_SOC_IPQ_ADSS=y -CONFIG_SND_SOC_IPQ_CODEC=y -CONFIG_SND_SOC_IPQ_CPU_DAI=y -CONFIG_SND_SOC_IPQ_MBOX=y -# CONFIG_SND_SOC_IPQ_LPASS is not set -# CONFIG_SND_SOC_IPQ_LPASS_PCM_RAW is not set -CONFIG_SND_SOC_IPQ_PCM_I2S=y -CONFIG_SND_SOC_IPQ_PCM_TDM=y -CONFIG_SND_SOC_IPQ_PCM_RAW=y -CONFIG_SND_SOC_IPQ_STEREO=y -# CONFIG_SND_SOC_MAX98088 is not set -# CONFIG_SND_SOC_MAX98357A is not set -# CONFIG_SND_SOC_MT6358 is not set -# CONFIG_SND_SOC_MTK_BTCVSD is not set -# CONFIG_SND_SOC_NAU8822 is not set -# CONFIG_SND_SOC_PCM3060_I2C is not set -# CONFIG_SND_SOC_PCM3060_SPI is not set -CONFIG_SND_SOC_QCOM=y -# CONFIG_SND_SOC_RK3328 is not set -# CONFIG_SND_SOC_SOF_TOPLEVEL is not set -# CONFIG_SND_SOC_STORM is not set -# CONFIG_SND_SOC_UDA1334 is not set -# CONFIG_SND_SOC_WM8904 is not set -# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set -# CONFIG_SND_SOC_XILINX_I2S is not set -# CONFIG_SND_SOC_XILINX_SPDIF is not set -CONFIG_SOUND=y -# CONFIG_SOUND_OSS_CORE_PRECLAIM is not set -CONFIG_SPARSE_IRQ=y -CONFIG_SPI=y -CONFIG_SPI_MASTER=y -CONFIG_SPI_MEM=y -# CONFIG_SPI_MTK_QUADSPI is not set -# CONFIG_SPI_QCOM_QSPI is not set -CONFIG_SPI_QUP=y -CONFIG_SPI_SPIDEV=y -# CONFIG_SPI_VSC7385 is not set -CONFIG_SPMI=y -CONFIG_SPMI_MSM_PMIC_ARB=y -# CONFIG_SPMI_PMIC_CLKDIV is not set -CONFIG_SPS=y -# CONFIG_SPS30 is not set -# CONFIG_SPS_SUPPORT_BAMDMA is not set -CONFIG_SPS_SUPPORT_NDP_BAM=y -CONFIG_SRCU=y -# CONFIG_SRD_TRACE is not set -# CONFIG_STAGING is not set -# CONFIG_STM_PROTO_BASIC is not set -# CONFIG_STM_PROTO_SYS_T is not set -# CONFIG_STM_SOURCE_FTRACE is not set -# CONFIG_STM_SOURCE_HEARTBEAT is not set -# CONFIG_STOPWATCH is not set -# CONFIG_STRIP_ASM_SYMS is not set -# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set -# CONFIG_SECONDARY_TRUSTED_KEYRING is not set -CONFIG_SUSPEND=y -# CONFIG_SWAP is not set -CONFIG_SWCONFIG=y -CONFIG_SWIOTLB=y -CONFIG_SWP_EMULATE=y -# CONFIG_SW_SYNC is not set -# CONFIG_SYNC is not set -CONFIG_SYS_SUPPORTS_APM_EMULATION=y -# CONFIG_TEST_BLACKHOLE_DEV is not set -# CONFIG_TEST_MEMCAT_P is not set -# CONFIG_TEST_MEMINIT is not set -# CONFIG_TEST_STACKINIT is not set -# CONFIG_TEST_STRSCPY is not set -# CONFIG_TEST_VMALLOC is not set -# CONFIG_TEST_XARRAY is not set -CONFIG_THERMAL=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 -CONFIG_THERMAL_GOV_STEP_WISE=y -CONFIG_THERMAL_GOV_USER_SPACE=y -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_OF=y -CONFIG_THERMAL_WRITABLE_TRIPS=y -# CONFIG_THUMB2_KERNEL is not set -# CONFIG_TICK_CPU_ACCOUNTING is not set -# CONFIG_TI_ADS124S08 is not set -# CONFIG_TI_ADS8344 is not set -# CONFIG_TI_CPSW_PHY_SEL is not set -# CONFIG_TI_DAC7311 is not set -# CONFIG_TI_DAC7612 is not set -CONFIG_TRACING_EVENTS_GPIO=y -# CONFIG_TRUSTED_FOUNDATIONS is not set -CONFIG_UBIFS_FS=y -CONFIG_UBIFS_FS_ADVANCED_COMPR=y -CONFIG_UBIFS_FS_LZO=y -CONFIG_UBIFS_FS_XATTR=y -CONFIG_UBIFS_FS_XZ=y -CONFIG_UBIFS_FS_ZLIB=y -# CONFIG_UBIFS_FS_ZSTD is not set -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" -# CONFIG_UNICODE is not set -CONFIG_UNINLINE_SPIN_UNLOCK=y -# CONFIG_UNMAP_KERNEL_AT_EL0 is not set -CONFIG_UNWINDER_ARM=y -# CONFIG_UNWINDER_FRAME_POINTER is not set -# CONFIG_USB_NET_AQC111 is not set -# CONFIG_USB_BAM is not set -CONFIG_USB_CONFIGFS=y -# CONFIG_USB_CONFIGFS_ACM is not set -# CONFIG_USB_CONFIGFS_ECM is not set -# CONFIG_USB_CONFIGFS_ECM_SUBSET is not set -# CONFIG_USB_CONFIGFS_EEM is not set -# CONFIG_USB_CONFIGFS_F_DIAG is not set -# CONFIG_USB_CONFIGFS_F_FS is not set -# CONFIG_USB_CONFIGFS_F_HID is not set -# CONFIG_USB_CONFIGFS_F_LB_SS is not set -# CONFIG_USB_CONFIGFS_F_MIDI is not set -# CONFIG_USB_CONFIGFS_F_PRINTER is not set -# CONFIG_USB_CONFIGFS_F_QDSS is not set -# CONFIG_USB_CONFIGFS_F_UAC1 is not set -# CONFIG_USB_CONFIGFS_F_UAC1_LEGACY is not set -# CONFIG_USB_CONFIGFS_F_UAC2 is not set -# CONFIG_USB_CONFIGFS_MASS_STORAGE is not set -# CONFIG_USB_CONFIGFS_NCM is not set -# CONFIG_USB_CONFIGFS_OBEX is not set -# CONFIG_USB_CONFIGFS_RNDIS is not set -# CONFIG_USB_CONFIGFS_SERIAL is not set -# CONFIG_USB_CONN_GPIO is not set -# CONFIG_USB_DWC3_OF_SIMPLE is not set -# CONFIG_USB_EHCI_FSL is not set -# CONFIG_USB_EHCI_ROOT_HUB_TT is not set -# CONFIG_USB_EHCI_TT_NEWSCHED is not set -# CONFIG_USB_GADGET is not set -# CONFIG_USB_NET_AQC111 is not set -# CONFIG_USB_OHCI_LITTLE_ENDIAN is not set -# CONFIG_USB_QCA_M31_PHY is not set -# CONFIG_USB_QCOM_8X16_PHY is not set -# CONFIG_USB_QCOM_QMP_PHY is not set -# CONFIG_USB_QCOM_QUSB_PHY is not set -CONFIG_USB_SUPPORT=y -CONFIG_USE_OF=y -# CONFIG_U_SERIAL_CONSOLE is not set -# CONFIG_VALIDATE_FS_PARSER is not set -# CONFIG_VCNL4035 is not set -CONFIG_VDSO=y -CONFIG_VECTORS_BASE=0xffff0000 -# CONFIG_VFIO is not set -CONFIG_VFP=y -CONFIG_VFPv3=y -# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set -# CONFIG_VHOST_NET is not set -# CONFIG_VIRT_WIFI is not set -# CONFIG_VIRTIO_BLK is not set -# CONFIG_VIRTIO_CONSOLE is not set -# CONFIG_VIRTIO_FS is not set -# CONFIG_VIRTIO_NET is not set -# CONFIG_VL53L0X_I2C is not set -# CONFIG_VMAP_STACK is not set -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_WATCHDOG_CORE=y -CONFIG_WATCHDOG_OPEN_TIMEOUT=0 -# CONFIG_WL_TI is not set -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_WWAN=y -CONFIG_WWAN_CORE=y -CONFIG_X509_CERTIFICATE_PARSER=y -# CONFIG_XILINX_SDFEC is not set -# CONFIG_XILINX_XADC is not set -CONFIG_XPS=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_BCJ=y -CONFIG_ZBOOT_ROM_BSS=0 -CONFIG_ZBOOT_ROM_TEXT=0 -CONFIG_ZLIB_DEFLATE=y -CONFIG_ZLIB_INFLATE=y -CONFIG_ZONE_DMA_FLAG=0 -# CONFIG_SKB_FIXED_SIZE_2K is not set -# CONFIG_ARCH_IPQ256M is not set -# CONFIG_DEBUG_MEM_USAGE is not set -# CONFIG_NF_CONNTRACK_DSCPREMARK_EXT is not set -# CONFIG_NF_CONNTRACK_CHAIN_EVENTS is not set -# CONFIG_MHI_BUS_TEST is not set -# CONFIG_DIAG_MHI is not set -# CONFIG_BOOTCONFIG_PARTITION is not set -CONFIG_LEDS_GPIO=y -CONFIG_ARCH_IPQ8074=y -CONFIG_ARCH_MMAP_RND_BITS=8 -CONFIG_PCIE_DW_PLAT=y +CONFIG_MMC_SDHCI_MSM_ICE=y +CONFIG_USB_BAM=y +CONFIG_USB_QCOM_KS_BRIDGE=m +CONFIG_MAILBOX=y # CONFIG_USB_QCOM_DIAG_BRIDGE is not set -CONFIG_VMSPLIT_2G=y -# CONFIG_VMSPLIT_3G is not set +# CONFIG_USB_CONFIGFS_F_DIAG is not set +# CONFIG_NF_IPV6_DUMMY_HEADER is not set +CONFIG_RMNET=y +CONFIG_RMNET_DATA=y +CONFIG_RMNET_DATA_DEBUG_PKT=y +# CONFIG_RMNET_DATA_FC is not set +CONFIG_QCOM_QMI_RMNET=y +CONFIG_QCOM_QMI_DFC=y +CONFIG_QCOM_QMI_POWER_COLLAPSE=y +CONFIG_RMNET_CTL=y +CONFIG_RMNET_CTL_DEBUG=y +CONFIG_MHI_BUS_TEST=y +CONFIG_MHI_DEBUG=y +CONFIG_MHI_NETDEV=y +CONFIG_MHI_UCI=y diff --git a/feeds/ipq807x/ipq807x/modules.mk b/feeds/ipq807x/ipq807x/modules.mk index fcbdc0f7b..6d9a3b0a9 100644 --- a/feeds/ipq807x/ipq807x/modules.mk +++ b/feeds/ipq807x/ipq807x/modules.mk @@ -2,15 +2,12 @@ define KernelPackage/usb-phy-ipq807x TITLE:=DWC3 USB QCOM PHY driver for IPQ807x DEPENDS:=@TARGET_ipq807x KCONFIG:= \ - CONFIG_PHY_QCOM_QUSB2 \ - CONFIG_PHY_QCOM_QMP=y \ CONFIG_USB_QCOM_QUSB_PHY \ CONFIG_USB_QCOM_QMP_PHY FILES:= \ - $(LINUX_DIR)/drivers/phy/qualcomm/phy-qcom-qusb2.ko@ge5.4 \ - $(LINUX_DIR)/drivers/usb/phy/phy-msm-qusb.ko@le4.4 \ - $(LINUX_DIR)/drivers/usb/phy/phy-msm-ssusb-qmp.ko@le4.4 - AUTOLOAD:=$(call AutoLoad,45,phy-qcom-qusb2 phy-msm-qusb phy-msm-ssusb-qmp,1) + $(LINUX_DIR)/drivers/usb/phy/phy-msm-qusb.ko \ + $(LINUX_DIR)/drivers/usb/phy/phy-msm-ssusb-qmp.ko + AUTOLOAD:=$(call AutoLoad,45,phy-msm-qusb phy-msm-ssusb-qmp,1) $(call AddDepends/usb) endef @@ -22,45 +19,46 @@ endef $(eval $(call KernelPackage,usb-phy-ipq807x)) -define KernelPackage/usb-dwc3-internal - TITLE:=DWC3 USB controller driver - DEPENDS:=+USB_GADGET_SUPPORT:kmod-usb-gadget +kmod-usb-core +define KernelPackage/qrtr_mproc + TITLE:= Ath11k Specific kernel configs for IPQ807x and IPQ60xx + DEPENDS+= @TARGET_ipq807x KCONFIG:= \ - CONFIG_USB_DWC3 \ - CONFIG_USB_DWC3_HOST=n \ - CONFIG_USB_DWC3_GADGET=n \ - CONFIG_USB_DWC3_DUAL_ROLE=y \ - CONFIG_EXTCON=y \ - CONFIG_USB_DWC3_DEBUG=n \ - CONFIG_USB_DWC3_VERBOSE=n - FILES:= $(LINUX_DIR)/drivers/usb/dwc3/dwc3.ko - AUTOLOAD:=$(call AutoLoad,54,dwc3,1) - $(call AddPlatformDepends/usb) + CONFIG_QRTR=y \ + CONFIG_QRTR_MHI=y \ + CONFIG_MHI_BUS=y \ + CONFIG_MHI_QTI=y \ + CONFIG_QCOM_APCS_IPC=y \ + CONFIG_QCOM_GLINK_SSR=y \ + CONFIG_QCOM_Q6V5_WCSS=y \ + CONFIG_MSM_RPM_RPMSG=y \ + CONFIG_RPMSG_QCOM_GLINK_RPM=y \ + CONFIG_REGULATOR_RPM_GLINK=y \ + CONFIG_QCOM_SYSMON=y \ + CONFIG_RPMSG=y \ + CONFIG_RPMSG_CHAR=y \ + CONFIG_RPMSG_QCOM_GLINK_SMEM=y \ + CONFIG_RPMSG_QCOM_SMD=y \ + CONFIG_QRTR_SMD=y \ + CONFIG_QCOM_QMI_HELPERS=y \ + CONFIG_SAMPLES=y \ + CONFIG_SAMPLE_QMI_CLIENT=m \ + CONFIG_SAMPLE_TRACE_EVENTS=n \ + CONFIG_SAMPLE_KOBJECT=n \ + CONFIG_SAMPLE_KPROBES=n \ + CONFIG_SAMPLE_KRETPROBES=n \ + CONFIG_SAMPLE_HW_BREAKPOINT=n \ + CONFIG_SAMPLE_KFIFO=n \ + CONFIG_SAMPLE_CONFIGFS=n \ + CONFIG_SAMPLE_RPMSG_CLIENT=n \ + CONFIG_MAILBOX=y \ + CONFIG_DIAG_OVER_QRTR=y endef -define KernelPackage/usb-dwc3-internal/description - This driver provides support for the Dual Role SuperSpeed - USB Controller based on the Synopsys DesignWare USB3 IP Core +define KernelPackage/qrtr_mproc/description +Kernel configs for ath11k support specific to ipq807x and IPQ60xx endef -$(eval $(call KernelPackage,usb-dwc3-internal)) - -define KernelPackage/usb-dwc3-qcom-internal - TITLE:=DWC3 QTI USB driver - DEPENDS:=@!LINUX_4_14 @(TARGET_ipq807x||TARGET_ipq60xx||TARGET_ipq95xx||TARGET_ipq50xx) +kmod-usb-dwc3-internal - KCONFIG:= CONFIG_USB_DWC3_QCOM - FILES:= $(LINUX_DIR)/drivers/usb/dwc3/dwc3-qcom.ko - AUTOLOAD:=$(call AutoLoad,53,dwc3-qcom,1) - $(call AddPlatformDepends/usb) -endef - -define KernelPackage/usb-dwc3-qcom-internal/description - Some QTI SoCs use DesignWare Core IP for USB2/3 functionality. - This driver also handles Qscratch wrapper which is needed for - peripheral mode support. -endef - -$(eval $(call KernelPackage,usb-dwc3-qcom-internal)) +$(eval $(call KernelPackage,qrtr_mproc)) define KernelPackage/bt_tty TITLE:= BT Inter-processor Communication @@ -85,8 +83,8 @@ define KernelPackage/usb-phy-ipq5018 CONFIG_USB_QCA_M31_PHY \ CONFIG_PHY_IPQ_UNIPHY_USB FILES:= \ - $(LINUX_DIR)/drivers/usb/phy/phy-qca-m31.ko \ - $(LINUX_DIR)/drivers/phy/qualcomm/phy-qca-uniphy.ko + $(LINUX_DIR)/drivers/usb/phy/phy-qca-m31.ko \ + $(LINUX_DIR)/drivers/phy/phy-qca-uniphy.ko AUTOLOAD:=$(call AutoLoad,45,phy-qca-m31 phy-qca-uniphy,1) $(call AddDepends/usb) endef @@ -98,30 +96,13 @@ endef $(eval $(call KernelPackage,usb-phy-ipq5018)) -define KernelPackage/usb-f-diag - TITLE:=USB DIAG - KCONFIG:=CONFIG_USB_F_DIAG \ - CONFIG_USB_CONFIGFS_F_DIAG=y \ - CONFIG_DIAG_OVER_USB=y - DEPENDS:=+kmod-usb-lib-composite +kmod-usb-configfs - FILES:=$(LINUX_DIR)/drivers/usb/gadget/function/usb_f_diag.ko - AUTOLOAD:=$(call AutoLoad,52,usb_f_diag) - $(call AddPlatformDepends/usb) -endef - -define KernelPackage/usb-f-diag/description - USB DIAG -endef - -$(eval $(call KernelPackage,usb-f-diag)) - define KernelPackage/diag-char TITLE:=CHAR DIAG KCONFIG:= CONFIG_DIAG_MHI=y@ge5.4 \ CONFIG_DIAG_OVER_PCIE=n@ge5.4 \ CONFIG_DIAGFWD_BRIDGE_CODE=y \ - CONFIG_DIAG_CHAR - DEPENDS:=+kmod-lib-crc-ccitt +kmod-usb-f-diag + CONFIG_DIAG_CHAR=m + DEPENDS:=+kmod-lib-crc-ccitt FILES:=$(LINUX_DIR)/drivers/char/diag/diagchar.ko endef @@ -131,34 +112,21 @@ endef $(eval $(call KernelPackage,diag-char)) -define KernelPackage/usb-configfs - TITLE:= USB functions - KCONFIG:=CONFIG_USB_CONFIGFS \ - CONFIG_USB_CONFIGFS_SERIAL=n \ - CONFIG_USB_CONFIGFS_ACM=n \ - CONFIG_USB_CONFIGFS_OBEX=n \ - CONFIG_USB_CONFIGFS_NCM=n \ - CONFIG_USB_CONFIGFS_ECM=n \ - CONFIG_USB_CONFIGFS_ECM_SUBSET=n \ - CONFIG_USB_CONFIGFS_RNDIS=n \ - CONFIG_USB_CONFIGFS_EEM=n \ - CONFIG_USB_CONFIGFS_MASS_STORAGE=n \ - CONFIG_USB_CONFIGFS_F_LB_SS=n \ - CONFIG_USB_CONFIGFS_F_FS=n \ - CONFIG_USB_CONFIGFS_F_UAC1=n \ - CONFIG_USB_CONFIGFS_F_UAC2=n \ - CONFIG_USB_CONFIGFS_F_MIDI=n \ - CONFIG_USB_CONFIGFS_F_HID=n \ - CONFIG_USB_CONFIGFS_F_PRINTER=n \ - CONFIG_USB_CONFIGFS_F_QDSS=n - $(call AddPlatformDepends/usb) +define KernelPackage/usb-dwc3-qcom + TITLE:=DWC3 Qualcomm USB driver + DEPENDS:=@(!LINUX_4_14) @TARGET_ipq807x +kmod-usb-dwc3 + KCONFIG:= CONFIG_USB_DWC3_QCOM + FILES:= $(LINUX_DIR)/drivers/usb/dwc3/dwc3-qcom.ko \ + $(LINUX_DIR)/drivers/usb/dwc3/dbm.ko + AUTOLOAD:=$(call AutoLoad,53,dwc3-qcom dbm,1) + $(call AddDepends/usb) endef -define KernelPackage/usb-configfs/description - USB functions +define KernelPackage/usb-dwc3-qcom/description + Some Qualcomm SoCs use DesignWare Core IP for USB2/3 functionality. + This driver also handles Qscratch wrapper which is needed for + peripheral mode support. endef -$(eval $(call KernelPackage,usb-configfs)) - - +$(eval $(call KernelPackage,usb-dwc3-qcom)) diff --git a/feeds/ipq807x/ipq807x/patches/001-backport_kbuild_fix.patch b/feeds/ipq807x/ipq807x/patches/001-backport_kbuild_fix.patch new file mode 100644 index 000000000..48fe7ec25 --- /dev/null +++ b/feeds/ipq807x/ipq807x/patches/001-backport_kbuild_fix.patch @@ -0,0 +1,25 @@ +--- a/scripts/Makefile.lib ++++ b/scripts/Makefile.lib +@@ -96,10 +96,10 @@ obj-dirs := $(addprefix $(obj)/,$(obj-di + # Note: Files that end up in two or more modules are compiled without the + # KBUILD_MODNAME definition. The reason is that any made-up name would + # differ in different configs. +-name-fix = $(subst $(comma),_,$(subst -,_,$1)) +-basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))" ++name-fix = $(squote)$(quote)$(subst $(comma),_,$(subst -,_,$1))$(quote)$(squote) ++basename_flags = -DKBUILD_BASENAME=$(call name-fix,$(basetarget)) + modname_flags = $(if $(filter 1,$(words $(modname))),\ +- -D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))") ++ -DKBUILD_MODNAME=$(call name-fix,$(modname))) + + orig_c_flags = $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(KBUILD_SUBDIR_CCFLAGS) \ + $(ccflags-y) $(CFLAGS_$(basetarget).o) +@@ -155,7 +155,7 @@ endif + + c_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ + $(__c_flags) $(modkern_cflags) \ +- -D"KBUILD_STR(s)=\#s" $(basename_flags) $(modname_flags) ++ $(basename_flags) $(modname_flags) + + a_flags = -Wp,-MD,$(depfile) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) \ + $(__a_flags) $(modkern_aflags) diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0001-crypto-lib-tidy-up-lib-crypto-Kconfig-and-Makefile.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0001-crypto-lib-tidy-up-lib-crypto-Kconfig-and-Makefile.patch deleted file mode 100644 index c772751ca..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0001-crypto-lib-tidy-up-lib-crypto-Kconfig-and-Makefile.patch +++ /dev/null @@ -1,118 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:07 +0100 -Subject: [PATCH] crypto: lib - tidy up lib/crypto Kconfig and Makefile - -commit 746b2e024c67aa605ac12d135cd7085a49cf9dc4 upstream. - -In preparation of introducing a set of crypto library interfaces, tidy -up the Makefile and split off the Kconfig symbols into a separate file. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - crypto/Kconfig | 13 +------------ - lib/crypto/Kconfig | 15 +++++++++++++++ - lib/crypto/Makefile | 16 ++++++++-------- - 3 files changed, 24 insertions(+), 20 deletions(-) - create mode 100644 lib/crypto/Kconfig - -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/crypto/Kconfig -=================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/crypto/Kconfig -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/crypto/Kconfig -@@ -942,9 +942,6 @@ config CRYPTO_SHA1_PPC_SPE - SHA-1 secure hash standard (DFIPS 180-4) implemented - using powerpc SPE SIMD instruction set. - --config CRYPTO_LIB_SHA256 -- tristate -- - config CRYPTO_SHA256 - tristate "SHA224 and SHA256 digest algorithm" - select CRYPTO_HASH -@@ -1083,9 +1080,6 @@ config CRYPTO_GHASH_CLMUL_NI_INTEL - - comment "Ciphers" - --config CRYPTO_LIB_AES -- tristate -- - config CRYPTO_AES - tristate "AES cipher algorithms" - select CRYPTO_ALGAPI -@@ -1214,9 +1208,6 @@ config CRYPTO_ANUBIS - - - --config CRYPTO_LIB_ARC4 -- tristate "ARC4 cipher library" -- - config CRYPTO_ARC4 - tristate "ARC4 cipher algorithm" - select CRYPTO_BLKCIPHER -@@ -1403,9 +1394,6 @@ config CRYPTO_CAST6_AVX_X86_64 - This module provides the Cast6 cipher algorithm that processes - eight blocks parallel using the AVX instruction set. - --config CRYPTO_LIB_DES -- tristate -- - config CRYPTO_DES - tristate "DES and Triple DES EDE cipher algorithms" - select CRYPTO_ALGAPI -@@ -1909,6 +1897,7 @@ config CRYPTO_STATS - config CRYPTO_HASH_INFO - bool - -+source "lib/crypto/Kconfig" - source "drivers/crypto/Kconfig" - source "crypto/asymmetric_keys/Kconfig" - source "certs/Kconfig" -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/lib/crypto/Kconfig -=================================================================== ---- /dev/null -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/lib/crypto/Kconfig -@@ -0,0 +1,15 @@ -+# SPDX-License-Identifier: GPL-2.0 -+ -+comment "Crypto library routines" -+ -+config CRYPTO_LIB_AES -+ tristate -+ -+config CRYPTO_LIB_ARC4 -+ tristate -+ -+config CRYPTO_LIB_DES -+ tristate -+ -+config CRYPTO_LIB_SHA256 -+ tristate -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/lib/crypto/Makefile -=================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/lib/crypto/Makefile -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/lib/crypto/Makefile -@@ -1,13 +1,13 @@ - # SPDX-License-Identifier: GPL-2.0 - --obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o --libaes-y := aes.o -+obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o -+libaes-y := aes.o - --obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o --libarc4-y := arc4.o -+obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o -+libarc4-y := arc4.o - --obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o --libdes-y := des.o -+obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o -+libdes-y := des.o - --obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o --libsha256-y := sha256.o -+obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o -+libsha256-y := sha256.o diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0002-crypto-chacha-move-existing-library-code-into-lib-cr.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0002-crypto-chacha-move-existing-library-code-into-lib-cr.patch deleted file mode 100644 index 177b5840d..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0002-crypto-chacha-move-existing-library-code-into-lib-cr.patch +++ /dev/null @@ -1,668 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:08 +0100 -Subject: [PATCH] crypto: chacha - move existing library code into lib/crypto - -commit 5fb8ef25803ef33e2eb60b626435828b937bed75 upstream. - -Currently, our generic ChaCha implementation consists of a permute -function in lib/chacha.c that operates on the 64-byte ChaCha state -directly [and which is always included into the core kernel since it -is used by the /dev/random driver], and the crypto API plumbing to -expose it as a skcipher. - -In order to support in-kernel users that need the ChaCha streamcipher -but have no need [or tolerance] for going through the abstractions of -the crypto API, let's expose the streamcipher bits via a library API -as well, in a way that permits the implementation to be superseded by -an architecture specific one if provided. - -So move the streamcipher code into a separate module in lib/crypto, -and expose the init() and crypt() routines to users of the library. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/chacha-neon-glue.c | 2 +- - arch/arm64/crypto/chacha-neon-glue.c | 2 +- - arch/x86/crypto/chacha_glue.c | 2 +- - crypto/Kconfig | 1 + - crypto/chacha_generic.c | 60 ++-------------------- - include/crypto/chacha.h | 77 ++++++++++++++++++++++------ - include/crypto/internal/chacha.h | 53 +++++++++++++++++++ - lib/Makefile | 3 +- - lib/crypto/Kconfig | 26 ++++++++++ - lib/crypto/Makefile | 4 ++ - lib/{ => crypto}/chacha.c | 20 ++++---- - lib/crypto/libchacha.c | 35 +++++++++++++ - 12 files changed, 199 insertions(+), 86 deletions(-) - create mode 100644 include/crypto/internal/chacha.h - rename lib/{ => crypto}/chacha.c (88%) - create mode 100644 lib/crypto/libchacha.c - ---- a/arch/arm/crypto/chacha-neon-glue.c -+++ b/arch/arm/crypto/chacha-neon-glue.c -@@ -20,7 +20,7 @@ - */ - - #include --#include -+#include - #include - #include - #include ---- a/arch/arm64/crypto/chacha-neon-glue.c -+++ b/arch/arm64/crypto/chacha-neon-glue.c -@@ -20,7 +20,7 @@ - */ - - #include --#include -+#include - #include - #include - #include ---- a/arch/x86/crypto/chacha_glue.c -+++ b/arch/x86/crypto/chacha_glue.c -@@ -7,7 +7,7 @@ - */ - - #include --#include -+#include - #include - #include - #include ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -1393,6 +1393,7 @@ config CRYPTO_SALSA20 - - config CRYPTO_CHACHA20 - tristate "ChaCha stream cipher algorithms" -+ select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_BLKCIPHER - help - The ChaCha20, XChaCha20, and XChaCha12 stream cipher algorithms. ---- a/crypto/chacha_generic.c -+++ b/crypto/chacha_generic.c -@@ -8,29 +8,10 @@ - - #include - #include --#include -+#include - #include - #include - --static void chacha_docrypt(u32 *state, u8 *dst, const u8 *src, -- unsigned int bytes, int nrounds) --{ -- /* aligned to potentially speed up crypto_xor() */ -- u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long)); -- -- while (bytes >= CHACHA_BLOCK_SIZE) { -- chacha_block(state, stream, nrounds); -- crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE); -- bytes -= CHACHA_BLOCK_SIZE; -- dst += CHACHA_BLOCK_SIZE; -- src += CHACHA_BLOCK_SIZE; -- } -- if (bytes) { -- chacha_block(state, stream, nrounds); -- crypto_xor_cpy(dst, src, stream, bytes); -- } --} -- - static int chacha_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) - { -@@ -48,8 +29,8 @@ static int chacha_stream_xor(struct skci - if (nbytes < walk.total) - nbytes = round_down(nbytes, CHACHA_BLOCK_SIZE); - -- chacha_docrypt(state, walk.dst.virt.addr, walk.src.virt.addr, -- nbytes, ctx->nrounds); -+ chacha_crypt_generic(state, walk.dst.virt.addr, -+ walk.src.virt.addr, nbytes, ctx->nrounds); - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - -@@ -58,41 +39,10 @@ static int chacha_stream_xor(struct skci - - void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv) - { -- state[0] = 0x61707865; /* "expa" */ -- state[1] = 0x3320646e; /* "nd 3" */ -- state[2] = 0x79622d32; /* "2-by" */ -- state[3] = 0x6b206574; /* "te k" */ -- state[4] = ctx->key[0]; -- state[5] = ctx->key[1]; -- state[6] = ctx->key[2]; -- state[7] = ctx->key[3]; -- state[8] = ctx->key[4]; -- state[9] = ctx->key[5]; -- state[10] = ctx->key[6]; -- state[11] = ctx->key[7]; -- state[12] = get_unaligned_le32(iv + 0); -- state[13] = get_unaligned_le32(iv + 4); -- state[14] = get_unaligned_le32(iv + 8); -- state[15] = get_unaligned_le32(iv + 12); -+ chacha_init_generic(state, ctx->key, iv); - } - EXPORT_SYMBOL_GPL(crypto_chacha_init); - --static int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key, -- unsigned int keysize, int nrounds) --{ -- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -- int i; -- -- if (keysize != CHACHA_KEY_SIZE) -- return -EINVAL; -- -- for (i = 0; i < ARRAY_SIZE(ctx->key); i++) -- ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); -- -- ctx->nrounds = nrounds; -- return 0; --} -- - int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize) - { -@@ -126,7 +76,7 @@ int crypto_xchacha_crypt(struct skcipher - - /* Compute the subkey given the original key and first 128 nonce bits */ - crypto_chacha_init(state, ctx, req->iv); -- hchacha_block(state, subctx.key, ctx->nrounds); -+ hchacha_block_generic(state, subctx.key, ctx->nrounds); - subctx.nrounds = ctx->nrounds; - - /* Build the real IV */ ---- a/include/crypto/chacha.h -+++ b/include/crypto/chacha.h -@@ -15,9 +15,8 @@ - #ifndef _CRYPTO_CHACHA_H - #define _CRYPTO_CHACHA_H - --#include -+#include - #include --#include - - /* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */ - #define CHACHA_IV_SIZE 16 -@@ -29,26 +28,70 @@ - /* 192-bit nonce, then 64-bit stream position */ - #define XCHACHA_IV_SIZE 32 - --struct chacha_ctx { -- u32 key[8]; -- int nrounds; --}; -- --void chacha_block(u32 *state, u8 *stream, int nrounds); -+void chacha_block_generic(u32 *state, u8 *stream, int nrounds); - static inline void chacha20_block(u32 *state, u8 *stream) - { -- chacha_block(state, stream, 20); -+ chacha_block_generic(state, stream, 20); - } --void hchacha_block(const u32 *in, u32 *out, int nrounds); - --void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv); -+void hchacha_block_arch(const u32 *state, u32 *out, int nrounds); -+void hchacha_block_generic(const u32 *state, u32 *out, int nrounds); -+ -+static inline void hchacha_block(const u32 *state, u32 *out, int nrounds) -+{ -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) -+ hchacha_block_arch(state, out, nrounds); -+ else -+ hchacha_block_generic(state, out, nrounds); -+} - --int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, -- unsigned int keysize); --int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, -- unsigned int keysize); -+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); -+static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) -+{ -+ state[0] = 0x61707865; /* "expa" */ -+ state[1] = 0x3320646e; /* "nd 3" */ -+ state[2] = 0x79622d32; /* "2-by" */ -+ state[3] = 0x6b206574; /* "te k" */ -+ state[4] = key[0]; -+ state[5] = key[1]; -+ state[6] = key[2]; -+ state[7] = key[3]; -+ state[8] = key[4]; -+ state[9] = key[5]; -+ state[10] = key[6]; -+ state[11] = key[7]; -+ state[12] = get_unaligned_le32(iv + 0); -+ state[13] = get_unaligned_le32(iv + 4); -+ state[14] = get_unaligned_le32(iv + 8); -+ state[15] = get_unaligned_le32(iv + 12); -+} - --int crypto_chacha_crypt(struct skcipher_request *req); --int crypto_xchacha_crypt(struct skcipher_request *req); -+static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv) -+{ -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) -+ chacha_init_arch(state, key, iv); -+ else -+ chacha_init_generic(state, key, iv); -+} -+ -+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, -+ unsigned int bytes, int nrounds); -+void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src, -+ unsigned int bytes, int nrounds); -+ -+static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src, -+ unsigned int bytes, int nrounds) -+{ -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) -+ chacha_crypt_arch(state, dst, src, bytes, nrounds); -+ else -+ chacha_crypt_generic(state, dst, src, bytes, nrounds); -+} -+ -+static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src, -+ unsigned int bytes) -+{ -+ chacha_crypt(state, dst, src, bytes, 20); -+} - - #endif /* _CRYPTO_CHACHA_H */ ---- /dev/null -+++ b/include/crypto/internal/chacha.h -@@ -0,0 +1,53 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+ -+#ifndef _CRYPTO_INTERNAL_CHACHA_H -+#define _CRYPTO_INTERNAL_CHACHA_H -+ -+#include -+#include -+#include -+ -+struct chacha_ctx { -+ u32 key[8]; -+ int nrounds; -+}; -+ -+void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv); -+ -+static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key, -+ unsigned int keysize, int nrounds) -+{ -+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -+ int i; -+ -+ if (keysize != CHACHA_KEY_SIZE) -+ return -EINVAL; -+ -+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++) -+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32)); -+ -+ ctx->nrounds = nrounds; -+ return 0; -+} -+ -+static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, -+ unsigned int keysize) -+{ -+ return chacha_setkey(tfm, key, keysize, 20); -+} -+ -+static int inline chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, -+ unsigned int keysize) -+{ -+ return chacha_setkey(tfm, key, keysize, 12); -+} -+ -+int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, -+ unsigned int keysize); -+int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, -+ unsigned int keysize); -+ -+int crypto_chacha_crypt(struct skcipher_request *req); -+int crypto_xchacha_crypt(struct skcipher_request *req); -+ -+#endif /* _CRYPTO_CHACHA_H */ ---- a/lib/Makefile -+++ b/lib/Makefile -@@ -26,8 +26,7 @@ endif - - lib-y := ctype.o string.o vsprintf.o cmdline.o \ - rbtree.o radix-tree.o timerqueue.o xarray.o \ -- idr.o extable.o \ -- sha1.o chacha.o irq_regs.o argv_split.o \ -+ idr.o extable.o sha1.o irq_regs.o argv_split.o \ - flex_proportions.o ratelimit.o show_mem.o \ - is_single_threaded.o plist.o decompress.o kobject_uevent.o \ - earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -8,6 +8,32 @@ config CRYPTO_LIB_AES - config CRYPTO_LIB_ARC4 - tristate - -+config CRYPTO_ARCH_HAVE_LIB_CHACHA -+ tristate -+ help -+ Declares whether the architecture provides an arch-specific -+ accelerated implementation of the ChaCha library interface, -+ either builtin or as a module. -+ -+config CRYPTO_LIB_CHACHA_GENERIC -+ tristate -+ select CRYPTO_ALGAPI -+ help -+ This symbol can be depended upon by arch implementations of the -+ ChaCha library interface that require the generic code as a -+ fallback, e.g., for SIMD implementations. If no arch specific -+ implementation is enabled, this implementation serves the users -+ of CRYPTO_LIB_CHACHA. -+ -+config CRYPTO_LIB_CHACHA -+ tristate "ChaCha library interface" -+ depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA -+ select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n -+ help -+ Enable the ChaCha library interface. This interface may be fulfilled -+ by either the generic implementation or an arch-specific one, if one -+ is available and enabled. -+ - config CRYPTO_LIB_DES - tristate - ---- a/lib/crypto/Makefile -+++ b/lib/crypto/Makefile -@@ -1,5 +1,9 @@ - # SPDX-License-Identifier: GPL-2.0 - -+# chacha is used by the /dev/random driver which is always builtin -+obj-y += chacha.o -+obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o -+ - obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o - libaes-y := aes.o - ---- a/lib/chacha.c -+++ /dev/null -@@ -1,113 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0-or-later --/* -- * The "hash function" used as the core of the ChaCha stream cipher (RFC7539) -- * -- * Copyright (C) 2015 Martin Willi -- */ -- --#include --#include --#include --#include --#include --#include -- --static void chacha_permute(u32 *x, int nrounds) --{ -- int i; -- -- /* whitelist the allowed round counts */ -- WARN_ON_ONCE(nrounds != 20 && nrounds != 12); -- -- for (i = 0; i < nrounds; i += 2) { -- x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16); -- x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16); -- x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16); -- x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16); -- -- x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12); -- x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12); -- x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12); -- x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12); -- -- x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8); -- x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8); -- x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8); -- x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8); -- -- x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7); -- x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7); -- x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7); -- x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7); -- -- x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16); -- x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16); -- x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16); -- x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16); -- -- x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12); -- x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12); -- x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12); -- x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12); -- -- x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8); -- x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8); -- x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8); -- x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8); -- -- x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7); -- x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7); -- x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7); -- x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7); -- } --} -- --/** -- * chacha_block - generate one keystream block and increment block counter -- * @state: input state matrix (16 32-bit words) -- * @stream: output keystream block (64 bytes) -- * @nrounds: number of rounds (20 or 12; 20 is recommended) -- * -- * This is the ChaCha core, a function from 64-byte strings to 64-byte strings. -- * The caller has already converted the endianness of the input. This function -- * also handles incrementing the block counter in the input matrix. -- */ --void chacha_block(u32 *state, u8 *stream, int nrounds) --{ -- u32 x[16]; -- int i; -- -- memcpy(x, state, 64); -- -- chacha_permute(x, nrounds); -- -- for (i = 0; i < ARRAY_SIZE(x); i++) -- put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]); -- -- state[12]++; --} --EXPORT_SYMBOL(chacha_block); -- --/** -- * hchacha_block - abbreviated ChaCha core, for XChaCha -- * @in: input state matrix (16 32-bit words) -- * @out: output (8 32-bit words) -- * @nrounds: number of rounds (20 or 12; 20 is recommended) -- * -- * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step -- * towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha -- * skips the final addition of the initial state, and outputs only certain words -- * of the state. It should not be used for streaming directly. -- */ --void hchacha_block(const u32 *in, u32 *out, int nrounds) --{ -- u32 x[16]; -- -- memcpy(x, in, 64); -- -- chacha_permute(x, nrounds); -- -- memcpy(&out[0], &x[0], 16); -- memcpy(&out[4], &x[12], 16); --} --EXPORT_SYMBOL(hchacha_block); ---- /dev/null -+++ b/lib/crypto/chacha.c -@@ -0,0 +1,115 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later -+/* -+ * The "hash function" used as the core of the ChaCha stream cipher (RFC7539) -+ * -+ * Copyright (C) 2015 Martin Willi -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static void chacha_permute(u32 *x, int nrounds) -+{ -+ int i; -+ -+ /* whitelist the allowed round counts */ -+ WARN_ON_ONCE(nrounds != 20 && nrounds != 12); -+ -+ for (i = 0; i < nrounds; i += 2) { -+ x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 16); -+ x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 16); -+ x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 16); -+ x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 16); -+ -+ x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 12); -+ x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 12); -+ x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 12); -+ x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 12); -+ -+ x[0] += x[4]; x[12] = rol32(x[12] ^ x[0], 8); -+ x[1] += x[5]; x[13] = rol32(x[13] ^ x[1], 8); -+ x[2] += x[6]; x[14] = rol32(x[14] ^ x[2], 8); -+ x[3] += x[7]; x[15] = rol32(x[15] ^ x[3], 8); -+ -+ x[8] += x[12]; x[4] = rol32(x[4] ^ x[8], 7); -+ x[9] += x[13]; x[5] = rol32(x[5] ^ x[9], 7); -+ x[10] += x[14]; x[6] = rol32(x[6] ^ x[10], 7); -+ x[11] += x[15]; x[7] = rol32(x[7] ^ x[11], 7); -+ -+ x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 16); -+ x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 16); -+ x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 16); -+ x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 16); -+ -+ x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 12); -+ x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 12); -+ x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 12); -+ x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 12); -+ -+ x[0] += x[5]; x[15] = rol32(x[15] ^ x[0], 8); -+ x[1] += x[6]; x[12] = rol32(x[12] ^ x[1], 8); -+ x[2] += x[7]; x[13] = rol32(x[13] ^ x[2], 8); -+ x[3] += x[4]; x[14] = rol32(x[14] ^ x[3], 8); -+ -+ x[10] += x[15]; x[5] = rol32(x[5] ^ x[10], 7); -+ x[11] += x[12]; x[6] = rol32(x[6] ^ x[11], 7); -+ x[8] += x[13]; x[7] = rol32(x[7] ^ x[8], 7); -+ x[9] += x[14]; x[4] = rol32(x[4] ^ x[9], 7); -+ } -+} -+ -+/** -+ * chacha_block - generate one keystream block and increment block counter -+ * @state: input state matrix (16 32-bit words) -+ * @stream: output keystream block (64 bytes) -+ * @nrounds: number of rounds (20 or 12; 20 is recommended) -+ * -+ * This is the ChaCha core, a function from 64-byte strings to 64-byte strings. -+ * The caller has already converted the endianness of the input. This function -+ * also handles incrementing the block counter in the input matrix. -+ */ -+void chacha_block_generic(u32 *state, u8 *stream, int nrounds) -+{ -+ u32 x[16]; -+ int i; -+ -+ memcpy(x, state, 64); -+ -+ chacha_permute(x, nrounds); -+ -+ for (i = 0; i < ARRAY_SIZE(x); i++) -+ put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]); -+ -+ state[12]++; -+} -+EXPORT_SYMBOL(chacha_block_generic); -+ -+/** -+ * hchacha_block_generic - abbreviated ChaCha core, for XChaCha -+ * @state: input state matrix (16 32-bit words) -+ * @out: output (8 32-bit words) -+ * @nrounds: number of rounds (20 or 12; 20 is recommended) -+ * -+ * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step -+ * towards XChaCha (see https://cr.yp.to/snuffle/xsalsa-20081128.pdf). HChaCha -+ * skips the final addition of the initial state, and outputs only certain words -+ * of the state. It should not be used for streaming directly. -+ */ -+void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds) -+{ -+ u32 x[16]; -+ -+ memcpy(x, state, 64); -+ -+ chacha_permute(x, nrounds); -+ -+ memcpy(&stream[0], &x[0], 16); -+ memcpy(&stream[4], &x[12], 16); -+} -+EXPORT_SYMBOL(hchacha_block_generic); ---- /dev/null -+++ b/lib/crypto/libchacha.c -@@ -0,0 +1,35 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later -+/* -+ * The ChaCha stream cipher (RFC7539) -+ * -+ * Copyright (C) 2015 Martin Willi -+ */ -+ -+#include -+#include -+#include -+ -+#include // for crypto_xor_cpy -+#include -+ -+void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src, -+ unsigned int bytes, int nrounds) -+{ -+ /* aligned to potentially speed up crypto_xor() */ -+ u8 stream[CHACHA_BLOCK_SIZE] __aligned(sizeof(long)); -+ -+ while (bytes >= CHACHA_BLOCK_SIZE) { -+ chacha_block_generic(state, stream, nrounds); -+ crypto_xor_cpy(dst, src, stream, CHACHA_BLOCK_SIZE); -+ bytes -= CHACHA_BLOCK_SIZE; -+ dst += CHACHA_BLOCK_SIZE; -+ src += CHACHA_BLOCK_SIZE; -+ } -+ if (bytes) { -+ chacha_block_generic(state, stream, nrounds); -+ crypto_xor_cpy(dst, src, stream, bytes); -+ } -+} -+EXPORT_SYMBOL(chacha_crypt_generic); -+ -+MODULE_LICENSE("GPL"); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0003-crypto-x86-chacha-depend-on-generic-chacha-library-i.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0003-crypto-x86-chacha-depend-on-generic-chacha-library-i.patch deleted file mode 100644 index b1f59cc38..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0003-crypto-x86-chacha-depend-on-generic-chacha-library-i.patch +++ /dev/null @@ -1,192 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:09 +0100 -Subject: [PATCH] crypto: x86/chacha - depend on generic chacha library instead - of crypto driver - -commit 28e8d89b1ce8d2e7badfb5f69971dd635acb8863 upstream. - -In preparation of extending the x86 ChaCha driver to also expose the ChaCha -library interface, drop the dependency on the chacha_generic crypto driver -as a non-SIMD fallback, and depend on the generic ChaCha library directly. -This way, we only pull in the code we actually need, without registering -a set of ChaCha skciphers that we will never use. - -Since turning the FPU on and off is cheap these days, simplify the SIMD -routine by dropping the per-page yield, which makes for a cleaner switch -to the library API as well. This also allows use to invoke the skcipher -walk routines in non-atomic mode. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/chacha_glue.c | 90 ++++++++++++++--------------------- - crypto/Kconfig | 2 +- - 2 files changed, 36 insertions(+), 56 deletions(-) - ---- a/arch/x86/crypto/chacha_glue.c -+++ b/arch/x86/crypto/chacha_glue.c -@@ -123,37 +123,38 @@ static void chacha_dosimd(u32 *state, u8 - } - } - --static int chacha_simd_stream_xor(struct skcipher_walk *walk, -+static int chacha_simd_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) - { - u32 *state, state_buf[16 + 2] __aligned(8); -- int next_yield = 4096; /* bytes until next FPU yield */ -- int err = 0; -+ struct skcipher_walk walk; -+ int err; -+ -+ err = skcipher_walk_virt(&walk, req, false); - - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); - -- crypto_chacha_init(state, ctx, iv); -+ chacha_init_generic(state, ctx->key, iv); - -- while (walk->nbytes > 0) { -- unsigned int nbytes = walk->nbytes; -+ while (walk.nbytes > 0) { -+ unsigned int nbytes = walk.nbytes; - -- if (nbytes < walk->total) { -- nbytes = round_down(nbytes, walk->stride); -- next_yield -= nbytes; -- } -- -- chacha_dosimd(state, walk->dst.virt.addr, walk->src.virt.addr, -- nbytes, ctx->nrounds); -+ if (nbytes < walk.total) -+ nbytes = round_down(nbytes, walk.stride); - -- if (next_yield <= 0) { -- /* temporarily allow preemption */ -- kernel_fpu_end(); -+ if (!crypto_simd_usable()) { -+ chacha_crypt_generic(state, walk.dst.virt.addr, -+ walk.src.virt.addr, nbytes, -+ ctx->nrounds); -+ } else { - kernel_fpu_begin(); -- next_yield = 4096; -+ chacha_dosimd(state, walk.dst.virt.addr, -+ walk.src.virt.addr, nbytes, -+ ctx->nrounds); -+ kernel_fpu_end(); - } -- -- err = skcipher_walk_done(walk, walk->nbytes - nbytes); -+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - - return err; -@@ -163,55 +164,34 @@ static int chacha_simd(struct skcipher_r - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -- struct skcipher_walk walk; -- int err; -- -- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) -- return crypto_chacha_crypt(req); - -- err = skcipher_walk_virt(&walk, req, true); -- if (err) -- return err; -- -- kernel_fpu_begin(); -- err = chacha_simd_stream_xor(&walk, ctx, req->iv); -- kernel_fpu_end(); -- return err; -+ return chacha_simd_stream_xor(req, ctx, req->iv); - } - - static int xchacha_simd(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -- struct skcipher_walk walk; -- struct chacha_ctx subctx; - u32 *state, state_buf[16 + 2] __aligned(8); -+ struct chacha_ctx subctx; - u8 real_iv[16]; -- int err; -- -- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) -- return crypto_xchacha_crypt(req); -- -- err = skcipher_walk_virt(&walk, req, true); -- if (err) -- return err; - - BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); - state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); -- crypto_chacha_init(state, ctx, req->iv); -+ chacha_init_generic(state, ctx->key, req->iv); - -- kernel_fpu_begin(); -- -- hchacha_block_ssse3(state, subctx.key, ctx->nrounds); -+ if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) { -+ kernel_fpu_begin(); -+ hchacha_block_ssse3(state, subctx.key, ctx->nrounds); -+ kernel_fpu_end(); -+ } else { -+ hchacha_block_generic(state, subctx.key, ctx->nrounds); -+ } - subctx.nrounds = ctx->nrounds; - - memcpy(&real_iv[0], req->iv + 24, 8); - memcpy(&real_iv[8], req->iv + 16, 8); -- err = chacha_simd_stream_xor(&walk, &subctx, real_iv); -- -- kernel_fpu_end(); -- -- return err; -+ return chacha_simd_stream_xor(req, &subctx, real_iv); - } - - static struct skcipher_alg algs[] = { -@@ -227,7 +207,7 @@ static struct skcipher_alg algs[] = { - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha20_setkey, -+ .setkey = chacha20_setkey, - .encrypt = chacha_simd, - .decrypt = chacha_simd, - }, { -@@ -242,7 +222,7 @@ static struct skcipher_alg algs[] = { - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha20_setkey, -+ .setkey = chacha20_setkey, - .encrypt = xchacha_simd, - .decrypt = xchacha_simd, - }, { -@@ -257,7 +237,7 @@ static struct skcipher_alg algs[] = { - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha12_setkey, -+ .setkey = chacha12_setkey, - .encrypt = xchacha_simd, - .decrypt = xchacha_simd, - }, ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -1417,7 +1417,7 @@ config CRYPTO_CHACHA20_X86_64 - tristate "ChaCha stream cipher algorithms (x86_64/SSSE3/AVX2/AVX-512VL)" - depends on X86 && 64BIT - select CRYPTO_BLKCIPHER -- select CRYPTO_CHACHA20 -+ select CRYPTO_LIB_CHACHA_GENERIC - help - SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20, - XChaCha20, and XChaCha12 stream ciphers. diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0004-crypto-x86-chacha-expose-SIMD-ChaCha-routine-as-libr.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0004-crypto-x86-chacha-expose-SIMD-ChaCha-routine-as-libr.patch deleted file mode 100644 index 0e5462837..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0004-crypto-x86-chacha-expose-SIMD-ChaCha-routine-as-libr.patch +++ /dev/null @@ -1,205 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:10 +0100 -Subject: [PATCH] crypto: x86/chacha - expose SIMD ChaCha routine as library - function - -commit 84e03fa39fbe95a5567d43bff458c6d3b3a23ad1 upstream. - -Wire the existing x86 SIMD ChaCha code into the new ChaCha library -interface, so that users of the library interface will get the -accelerated version when available. - -Given that calls into the library API will always go through the -routines in this module if it is enabled, switch to static keys -to select the optimal implementation available (which may be none -at all, in which case we defer to the generic implementation for -all invocations). - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/chacha_glue.c | 91 +++++++++++++++++++++++++---------- - crypto/Kconfig | 1 + - include/crypto/chacha.h | 6 +++ - 3 files changed, 73 insertions(+), 25 deletions(-) - ---- a/arch/x86/crypto/chacha_glue.c -+++ b/arch/x86/crypto/chacha_glue.c -@@ -21,24 +21,24 @@ asmlinkage void chacha_block_xor_ssse3(u - asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); - asmlinkage void hchacha_block_ssse3(const u32 *state, u32 *out, int nrounds); --#ifdef CONFIG_AS_AVX2 -+ - asmlinkage void chacha_2block_xor_avx2(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); - asmlinkage void chacha_4block_xor_avx2(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); - asmlinkage void chacha_8block_xor_avx2(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); --static bool chacha_use_avx2; --#ifdef CONFIG_AS_AVX512 -+ - asmlinkage void chacha_2block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); - asmlinkage void chacha_4block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); - asmlinkage void chacha_8block_xor_avx512vl(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); --static bool chacha_use_avx512vl; --#endif --#endif -+ -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_simd); -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx2); -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(chacha_use_avx512vl); - - static unsigned int chacha_advance(unsigned int len, unsigned int maxblocks) - { -@@ -49,9 +49,8 @@ static unsigned int chacha_advance(unsig - static void chacha_dosimd(u32 *state, u8 *dst, const u8 *src, - unsigned int bytes, int nrounds) - { --#ifdef CONFIG_AS_AVX2 --#ifdef CONFIG_AS_AVX512 -- if (chacha_use_avx512vl) { -+ if (IS_ENABLED(CONFIG_AS_AVX512) && -+ static_branch_likely(&chacha_use_avx512vl)) { - while (bytes >= CHACHA_BLOCK_SIZE * 8) { - chacha_8block_xor_avx512vl(state, dst, src, bytes, - nrounds); -@@ -79,8 +78,9 @@ static void chacha_dosimd(u32 *state, u8 - return; - } - } --#endif -- if (chacha_use_avx2) { -+ -+ if (IS_ENABLED(CONFIG_AS_AVX2) && -+ static_branch_likely(&chacha_use_avx2)) { - while (bytes >= CHACHA_BLOCK_SIZE * 8) { - chacha_8block_xor_avx2(state, dst, src, bytes, nrounds); - bytes -= CHACHA_BLOCK_SIZE * 8; -@@ -104,7 +104,7 @@ static void chacha_dosimd(u32 *state, u8 - return; - } - } --#endif -+ - while (bytes >= CHACHA_BLOCK_SIZE * 4) { - chacha_4block_xor_ssse3(state, dst, src, bytes, nrounds); - bytes -= CHACHA_BLOCK_SIZE * 4; -@@ -123,6 +123,43 @@ static void chacha_dosimd(u32 *state, u8 - } - } - -+void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) -+{ -+ state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); -+ -+ if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) { -+ hchacha_block_generic(state, stream, nrounds); -+ } else { -+ kernel_fpu_begin(); -+ hchacha_block_ssse3(state, stream, nrounds); -+ kernel_fpu_end(); -+ } -+} -+EXPORT_SYMBOL(hchacha_block_arch); -+ -+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -+{ -+ state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); -+ -+ chacha_init_generic(state, key, iv); -+} -+EXPORT_SYMBOL(chacha_init_arch); -+ -+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, -+ int nrounds) -+{ -+ state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); -+ -+ if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() || -+ bytes <= CHACHA_BLOCK_SIZE) -+ return chacha_crypt_generic(state, dst, src, bytes, nrounds); -+ -+ kernel_fpu_begin(); -+ chacha_dosimd(state, dst, src, bytes, nrounds); -+ kernel_fpu_end(); -+} -+EXPORT_SYMBOL(chacha_crypt_arch); -+ - static int chacha_simd_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) - { -@@ -143,7 +180,8 @@ static int chacha_simd_stream_xor(struct - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - -- if (!crypto_simd_usable()) { -+ if (!static_branch_likely(&chacha_use_simd) || -+ !crypto_simd_usable()) { - chacha_crypt_generic(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, - ctx->nrounds); -@@ -246,18 +284,21 @@ static struct skcipher_alg algs[] = { - static int __init chacha_simd_mod_init(void) - { - if (!boot_cpu_has(X86_FEATURE_SSSE3)) -- return -ENODEV; -+ return 0; - --#ifdef CONFIG_AS_AVX2 -- chacha_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) && -- boot_cpu_has(X86_FEATURE_AVX2) && -- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); --#ifdef CONFIG_AS_AVX512 -- chacha_use_avx512vl = chacha_use_avx2 && -- boot_cpu_has(X86_FEATURE_AVX512VL) && -- boot_cpu_has(X86_FEATURE_AVX512BW); /* kmovq */ --#endif --#endif -+ static_branch_enable(&chacha_use_simd); -+ -+ if (IS_ENABLED(CONFIG_AS_AVX2) && -+ boot_cpu_has(X86_FEATURE_AVX) && -+ boot_cpu_has(X86_FEATURE_AVX2) && -+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) { -+ static_branch_enable(&chacha_use_avx2); -+ -+ if (IS_ENABLED(CONFIG_AS_AVX512) && -+ boot_cpu_has(X86_FEATURE_AVX512VL) && -+ boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */ -+ static_branch_enable(&chacha_use_avx512vl); -+ } - return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); - } - ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -1418,6 +1418,7 @@ config CRYPTO_CHACHA20_X86_64 - depends on X86 && 64BIT - select CRYPTO_BLKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC -+ select CRYPTO_ARCH_HAVE_LIB_CHACHA - help - SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20, - XChaCha20, and XChaCha12 stream ciphers. ---- a/include/crypto/chacha.h -+++ b/include/crypto/chacha.h -@@ -25,6 +25,12 @@ - #define CHACHA_BLOCK_SIZE 64 - #define CHACHAPOLY_IV_SIZE 12 - -+#ifdef CONFIG_X86_64 -+#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32)) -+#else -+#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32)) -+#endif -+ - /* 192-bit nonce, then 64-bit stream position */ - #define XCHACHA_IV_SIZE 32 - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0005-crypto-arm64-chacha-depend-on-generic-chacha-library.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0005-crypto-arm64-chacha-depend-on-generic-chacha-library.patch deleted file mode 100644 index 10e49c192..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0005-crypto-arm64-chacha-depend-on-generic-chacha-library.patch +++ /dev/null @@ -1,129 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:11 +0100 -Subject: [PATCH] crypto: arm64/chacha - depend on generic chacha library - instead of crypto driver - -commit c77da4867cbb7841177275dbb250f5c09679fae4 upstream. - -Depend on the generic ChaCha library routines instead of pulling in the -generic ChaCha skcipher driver, which is more than we need, and makes -managing the dependencies between the generic library, generic driver, -accelerated library and driver more complicated. - -While at it, drop the logic to prefer the scalar code on short inputs. -Turning the NEON on and off is cheap these days, and one major use case -for ChaCha20 is ChaCha20-Poly1305, which is guaranteed to hit the scalar -path upon every invocation (when doing the Poly1305 nonce generation) - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm64/crypto/Kconfig | 2 +- - arch/arm64/crypto/chacha-neon-glue.c | 40 +++++++++++++++------------- - 2 files changed, 23 insertions(+), 19 deletions(-) - ---- a/arch/arm64/crypto/Kconfig -+++ b/arch/arm64/crypto/Kconfig -@@ -103,7 +103,7 @@ config CRYPTO_CHACHA20_NEON - tristate "ChaCha20, XChaCha20, and XChaCha12 stream ciphers using NEON instructions" - depends on KERNEL_MODE_NEON - select CRYPTO_BLKCIPHER -- select CRYPTO_CHACHA20 -+ select CRYPTO_LIB_CHACHA_GENERIC - - config CRYPTO_NHPOLY1305_NEON - tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)" ---- a/arch/arm64/crypto/chacha-neon-glue.c -+++ b/arch/arm64/crypto/chacha-neon-glue.c -@@ -68,7 +68,7 @@ static int chacha_neon_stream_xor(struct - - err = skcipher_walk_virt(&walk, req, false); - -- crypto_chacha_init(state, ctx, iv); -+ chacha_init_generic(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; -@@ -76,10 +76,16 @@ static int chacha_neon_stream_xor(struct - if (nbytes < walk.total) - nbytes = rounddown(nbytes, walk.stride); - -- kernel_neon_begin(); -- chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr, -- nbytes, ctx->nrounds); -- kernel_neon_end(); -+ if (!crypto_simd_usable()) { -+ chacha_crypt_generic(state, walk.dst.virt.addr, -+ walk.src.virt.addr, nbytes, -+ ctx->nrounds); -+ } else { -+ kernel_neon_begin(); -+ chacha_doneon(state, walk.dst.virt.addr, -+ walk.src.virt.addr, nbytes, ctx->nrounds); -+ kernel_neon_end(); -+ } - err = skcipher_walk_done(&walk, walk.nbytes - nbytes); - } - -@@ -91,9 +97,6 @@ static int chacha_neon(struct skcipher_r - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - -- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) -- return crypto_chacha_crypt(req); -- - return chacha_neon_stream_xor(req, ctx, req->iv); - } - -@@ -105,14 +108,15 @@ static int xchacha_neon(struct skcipher_ - u32 state[16]; - u8 real_iv[16]; - -- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) -- return crypto_xchacha_crypt(req); -- -- crypto_chacha_init(state, ctx, req->iv); -+ chacha_init_generic(state, ctx->key, req->iv); - -- kernel_neon_begin(); -- hchacha_block_neon(state, subctx.key, ctx->nrounds); -- kernel_neon_end(); -+ if (crypto_simd_usable()) { -+ kernel_neon_begin(); -+ hchacha_block_neon(state, subctx.key, ctx->nrounds); -+ kernel_neon_end(); -+ } else { -+ hchacha_block_generic(state, subctx.key, ctx->nrounds); -+ } - subctx.nrounds = ctx->nrounds; - - memcpy(&real_iv[0], req->iv + 24, 8); -@@ -134,7 +138,7 @@ static struct skcipher_alg algs[] = { - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .walksize = 5 * CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha20_setkey, -+ .setkey = chacha20_setkey, - .encrypt = chacha_neon, - .decrypt = chacha_neon, - }, { -@@ -150,7 +154,7 @@ static struct skcipher_alg algs[] = { - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .walksize = 5 * CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha20_setkey, -+ .setkey = chacha20_setkey, - .encrypt = xchacha_neon, - .decrypt = xchacha_neon, - }, { -@@ -166,7 +170,7 @@ static struct skcipher_alg algs[] = { - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, - .walksize = 5 * CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha12_setkey, -+ .setkey = chacha12_setkey, - .encrypt = xchacha_neon, - .decrypt = xchacha_neon, - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0006-crypto-arm64-chacha-expose-arm64-ChaCha-routine-as-l.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0006-crypto-arm64-chacha-expose-arm64-ChaCha-routine-as-l.patch deleted file mode 100644 index 71665e8bf..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0006-crypto-arm64-chacha-expose-arm64-ChaCha-routine-as-l.patch +++ /dev/null @@ -1,138 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:12 +0100 -Subject: [PATCH] crypto: arm64/chacha - expose arm64 ChaCha routine as library - function - -commit b3aad5bad26a01a4bd8c49a5c5f52aec665f3b7c upstream. - -Expose the accelerated NEON ChaCha routine directly as a symbol -export so that users of the ChaCha library API can use it directly. - -Given that calls into the library API will always go through the -routines in this module if it is enabled, switch to static keys -to select the optimal implementation available (which may be none -at all, in which case we defer to the generic implementation for -all invocations). - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm64/crypto/Kconfig | 1 + - arch/arm64/crypto/chacha-neon-glue.c | 53 ++++++++++++++++++++++------ - 2 files changed, 43 insertions(+), 11 deletions(-) - ---- a/arch/arm64/crypto/Kconfig -+++ b/arch/arm64/crypto/Kconfig -@@ -104,6 +104,7 @@ config CRYPTO_CHACHA20_NEON - depends on KERNEL_MODE_NEON - select CRYPTO_BLKCIPHER - select CRYPTO_LIB_CHACHA_GENERIC -+ select CRYPTO_ARCH_HAVE_LIB_CHACHA - - config CRYPTO_NHPOLY1305_NEON - tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)" ---- a/arch/arm64/crypto/chacha-neon-glue.c -+++ b/arch/arm64/crypto/chacha-neon-glue.c -@@ -23,6 +23,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -36,6 +37,8 @@ asmlinkage void chacha_4block_xor_neon(u - int nrounds, int bytes); - asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); - -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); -+ - static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, - int bytes, int nrounds) - { -@@ -59,6 +62,37 @@ static void chacha_doneon(u32 *state, u8 - } - } - -+void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) -+{ -+ if (!static_branch_likely(&have_neon) || !crypto_simd_usable()) { -+ hchacha_block_generic(state, stream, nrounds); -+ } else { -+ kernel_neon_begin(); -+ hchacha_block_neon(state, stream, nrounds); -+ kernel_neon_end(); -+ } -+} -+EXPORT_SYMBOL(hchacha_block_arch); -+ -+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -+{ -+ chacha_init_generic(state, key, iv); -+} -+EXPORT_SYMBOL(chacha_init_arch); -+ -+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, -+ int nrounds) -+{ -+ if (!static_branch_likely(&have_neon) || bytes <= CHACHA_BLOCK_SIZE || -+ !crypto_simd_usable()) -+ return chacha_crypt_generic(state, dst, src, bytes, nrounds); -+ -+ kernel_neon_begin(); -+ chacha_doneon(state, dst, src, bytes, nrounds); -+ kernel_neon_end(); -+} -+EXPORT_SYMBOL(chacha_crypt_arch); -+ - static int chacha_neon_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) - { -@@ -76,7 +110,8 @@ static int chacha_neon_stream_xor(struct - if (nbytes < walk.total) - nbytes = rounddown(nbytes, walk.stride); - -- if (!crypto_simd_usable()) { -+ if (!static_branch_likely(&have_neon) || -+ !crypto_simd_usable()) { - chacha_crypt_generic(state, walk.dst.virt.addr, - walk.src.virt.addr, nbytes, - ctx->nrounds); -@@ -109,14 +144,7 @@ static int xchacha_neon(struct skcipher_ - u8 real_iv[16]; - - chacha_init_generic(state, ctx->key, req->iv); -- -- if (crypto_simd_usable()) { -- kernel_neon_begin(); -- hchacha_block_neon(state, subctx.key, ctx->nrounds); -- kernel_neon_end(); -- } else { -- hchacha_block_generic(state, subctx.key, ctx->nrounds); -- } -+ hchacha_block_arch(state, subctx.key, ctx->nrounds); - subctx.nrounds = ctx->nrounds; - - memcpy(&real_iv[0], req->iv + 24, 8); -@@ -179,14 +207,17 @@ static struct skcipher_alg algs[] = { - static int __init chacha_simd_mod_init(void) - { - if (!cpu_have_named_feature(ASIMD)) -- return -ENODEV; -+ return 0; -+ -+ static_branch_enable(&have_neon); - - return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); - } - - static void __exit chacha_simd_mod_fini(void) - { -- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -+ if (cpu_have_named_feature(ASIMD)) -+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); - } - - module_init(chacha_simd_mod_init); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0007-crypto-arm-chacha-import-Eric-Biggers-s-scalar-accel.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0007-crypto-arm-chacha-import-Eric-Biggers-s-scalar-accel.patch deleted file mode 100644 index 978f2f55b..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0007-crypto-arm-chacha-import-Eric-Biggers-s-scalar-accel.patch +++ /dev/null @@ -1,480 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:13 +0100 -Subject: [PATCH] crypto: arm/chacha - import Eric Biggers's scalar accelerated - ChaCha code - -commit 29621d099f9c642b22a69dc8e7e20c108473a392 upstream. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/chacha-scalar-core.S | 461 +++++++++++++++++++++++++++ - 1 file changed, 461 insertions(+) - create mode 100644 arch/arm/crypto/chacha-scalar-core.S - ---- /dev/null -+++ b/arch/arm/crypto/chacha-scalar-core.S -@@ -0,0 +1,461 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2018 Google, Inc. -+ */ -+ -+#include -+#include -+ -+/* -+ * Design notes: -+ * -+ * 16 registers would be needed to hold the state matrix, but only 14 are -+ * available because 'sp' and 'pc' cannot be used. So we spill the elements -+ * (x8, x9) to the stack and swap them out with (x10, x11). This adds one -+ * 'ldrd' and one 'strd' instruction per round. -+ * -+ * All rotates are performed using the implicit rotate operand accepted by the -+ * 'add' and 'eor' instructions. This is faster than using explicit rotate -+ * instructions. To make this work, we allow the values in the second and last -+ * rows of the ChaCha state matrix (rows 'b' and 'd') to temporarily have the -+ * wrong rotation amount. The rotation amount is then fixed up just in time -+ * when the values are used. 'brot' is the number of bits the values in row 'b' -+ * need to be rotated right to arrive at the correct values, and 'drot' -+ * similarly for row 'd'. (brot, drot) start out as (0, 0) but we make it such -+ * that they end up as (25, 24) after every round. -+ */ -+ -+ // ChaCha state registers -+ X0 .req r0 -+ X1 .req r1 -+ X2 .req r2 -+ X3 .req r3 -+ X4 .req r4 -+ X5 .req r5 -+ X6 .req r6 -+ X7 .req r7 -+ X8_X10 .req r8 // shared by x8 and x10 -+ X9_X11 .req r9 // shared by x9 and x11 -+ X12 .req r10 -+ X13 .req r11 -+ X14 .req r12 -+ X15 .req r14 -+ -+.Lexpand_32byte_k: -+ // "expand 32-byte k" -+ .word 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574 -+ -+#ifdef __thumb2__ -+# define adrl adr -+#endif -+ -+.macro __rev out, in, t0, t1, t2 -+.if __LINUX_ARM_ARCH__ >= 6 -+ rev \out, \in -+.else -+ lsl \t0, \in, #24 -+ and \t1, \in, #0xff00 -+ and \t2, \in, #0xff0000 -+ orr \out, \t0, \in, lsr #24 -+ orr \out, \out, \t1, lsl #8 -+ orr \out, \out, \t2, lsr #8 -+.endif -+.endm -+ -+.macro _le32_bswap x, t0, t1, t2 -+#ifdef __ARMEB__ -+ __rev \x, \x, \t0, \t1, \t2 -+#endif -+.endm -+ -+.macro _le32_bswap_4x a, b, c, d, t0, t1, t2 -+ _le32_bswap \a, \t0, \t1, \t2 -+ _le32_bswap \b, \t0, \t1, \t2 -+ _le32_bswap \c, \t0, \t1, \t2 -+ _le32_bswap \d, \t0, \t1, \t2 -+.endm -+ -+.macro __ldrd a, b, src, offset -+#if __LINUX_ARM_ARCH__ >= 6 -+ ldrd \a, \b, [\src, #\offset] -+#else -+ ldr \a, [\src, #\offset] -+ ldr \b, [\src, #\offset + 4] -+#endif -+.endm -+ -+.macro __strd a, b, dst, offset -+#if __LINUX_ARM_ARCH__ >= 6 -+ strd \a, \b, [\dst, #\offset] -+#else -+ str \a, [\dst, #\offset] -+ str \b, [\dst, #\offset + 4] -+#endif -+.endm -+ -+.macro _halfround a1, b1, c1, d1, a2, b2, c2, d2 -+ -+ // a += b; d ^= a; d = rol(d, 16); -+ add \a1, \a1, \b1, ror #brot -+ add \a2, \a2, \b2, ror #brot -+ eor \d1, \a1, \d1, ror #drot -+ eor \d2, \a2, \d2, ror #drot -+ // drot == 32 - 16 == 16 -+ -+ // c += d; b ^= c; b = rol(b, 12); -+ add \c1, \c1, \d1, ror #16 -+ add \c2, \c2, \d2, ror #16 -+ eor \b1, \c1, \b1, ror #brot -+ eor \b2, \c2, \b2, ror #brot -+ // brot == 32 - 12 == 20 -+ -+ // a += b; d ^= a; d = rol(d, 8); -+ add \a1, \a1, \b1, ror #20 -+ add \a2, \a2, \b2, ror #20 -+ eor \d1, \a1, \d1, ror #16 -+ eor \d2, \a2, \d2, ror #16 -+ // drot == 32 - 8 == 24 -+ -+ // c += d; b ^= c; b = rol(b, 7); -+ add \c1, \c1, \d1, ror #24 -+ add \c2, \c2, \d2, ror #24 -+ eor \b1, \c1, \b1, ror #20 -+ eor \b2, \c2, \b2, ror #20 -+ // brot == 32 - 7 == 25 -+.endm -+ -+.macro _doubleround -+ -+ // column round -+ -+ // quarterrounds: (x0, x4, x8, x12) and (x1, x5, x9, x13) -+ _halfround X0, X4, X8_X10, X12, X1, X5, X9_X11, X13 -+ -+ // save (x8, x9); restore (x10, x11) -+ __strd X8_X10, X9_X11, sp, 0 -+ __ldrd X8_X10, X9_X11, sp, 8 -+ -+ // quarterrounds: (x2, x6, x10, x14) and (x3, x7, x11, x15) -+ _halfround X2, X6, X8_X10, X14, X3, X7, X9_X11, X15 -+ -+ .set brot, 25 -+ .set drot, 24 -+ -+ // diagonal round -+ -+ // quarterrounds: (x0, x5, x10, x15) and (x1, x6, x11, x12) -+ _halfround X0, X5, X8_X10, X15, X1, X6, X9_X11, X12 -+ -+ // save (x10, x11); restore (x8, x9) -+ __strd X8_X10, X9_X11, sp, 8 -+ __ldrd X8_X10, X9_X11, sp, 0 -+ -+ // quarterrounds: (x2, x7, x8, x13) and (x3, x4, x9, x14) -+ _halfround X2, X7, X8_X10, X13, X3, X4, X9_X11, X14 -+.endm -+ -+.macro _chacha_permute nrounds -+ .set brot, 0 -+ .set drot, 0 -+ .rept \nrounds / 2 -+ _doubleround -+ .endr -+.endm -+ -+.macro _chacha nrounds -+ -+.Lnext_block\@: -+ // Stack: unused0-unused1 x10-x11 x0-x15 OUT IN LEN -+ // Registers contain x0-x9,x12-x15. -+ -+ // Do the core ChaCha permutation to update x0-x15. -+ _chacha_permute \nrounds -+ -+ add sp, #8 -+ // Stack: x10-x11 orig_x0-orig_x15 OUT IN LEN -+ // Registers contain x0-x9,x12-x15. -+ // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'. -+ -+ // Free up some registers (r8-r12,r14) by pushing (x8-x9,x12-x15). -+ push {X8_X10, X9_X11, X12, X13, X14, X15} -+ -+ // Load (OUT, IN, LEN). -+ ldr r14, [sp, #96] -+ ldr r12, [sp, #100] -+ ldr r11, [sp, #104] -+ -+ orr r10, r14, r12 -+ -+ // Use slow path if fewer than 64 bytes remain. -+ cmp r11, #64 -+ blt .Lxor_slowpath\@ -+ -+ // Use slow path if IN and/or OUT isn't 4-byte aligned. Needed even on -+ // ARMv6+, since ldmia and stmia (used below) still require alignment. -+ tst r10, #3 -+ bne .Lxor_slowpath\@ -+ -+ // Fast path: XOR 64 bytes of aligned data. -+ -+ // Stack: x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN -+ // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is OUT. -+ // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'. -+ -+ // x0-x3 -+ __ldrd r8, r9, sp, 32 -+ __ldrd r10, r11, sp, 40 -+ add X0, X0, r8 -+ add X1, X1, r9 -+ add X2, X2, r10 -+ add X3, X3, r11 -+ _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 -+ ldmia r12!, {r8-r11} -+ eor X0, X0, r8 -+ eor X1, X1, r9 -+ eor X2, X2, r10 -+ eor X3, X3, r11 -+ stmia r14!, {X0-X3} -+ -+ // x4-x7 -+ __ldrd r8, r9, sp, 48 -+ __ldrd r10, r11, sp, 56 -+ add X4, r8, X4, ror #brot -+ add X5, r9, X5, ror #brot -+ ldmia r12!, {X0-X3} -+ add X6, r10, X6, ror #brot -+ add X7, r11, X7, ror #brot -+ _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 -+ eor X4, X4, X0 -+ eor X5, X5, X1 -+ eor X6, X6, X2 -+ eor X7, X7, X3 -+ stmia r14!, {X4-X7} -+ -+ // x8-x15 -+ pop {r0-r7} // (x8-x9,x12-x15,x10-x11) -+ __ldrd r8, r9, sp, 32 -+ __ldrd r10, r11, sp, 40 -+ add r0, r0, r8 // x8 -+ add r1, r1, r9 // x9 -+ add r6, r6, r10 // x10 -+ add r7, r7, r11 // x11 -+ _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 -+ ldmia r12!, {r8-r11} -+ eor r0, r0, r8 // x8 -+ eor r1, r1, r9 // x9 -+ eor r6, r6, r10 // x10 -+ eor r7, r7, r11 // x11 -+ stmia r14!, {r0,r1,r6,r7} -+ ldmia r12!, {r0,r1,r6,r7} -+ __ldrd r8, r9, sp, 48 -+ __ldrd r10, r11, sp, 56 -+ add r2, r8, r2, ror #drot // x12 -+ add r3, r9, r3, ror #drot // x13 -+ add r4, r10, r4, ror #drot // x14 -+ add r5, r11, r5, ror #drot // x15 -+ _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 -+ ldr r9, [sp, #72] // load LEN -+ eor r2, r2, r0 // x12 -+ eor r3, r3, r1 // x13 -+ eor r4, r4, r6 // x14 -+ eor r5, r5, r7 // x15 -+ subs r9, #64 // decrement and check LEN -+ stmia r14!, {r2-r5} -+ -+ beq .Ldone\@ -+ -+.Lprepare_for_next_block\@: -+ -+ // Stack: x0-x15 OUT IN LEN -+ -+ // Increment block counter (x12) -+ add r8, #1 -+ -+ // Store updated (OUT, IN, LEN) -+ str r14, [sp, #64] -+ str r12, [sp, #68] -+ str r9, [sp, #72] -+ -+ mov r14, sp -+ -+ // Store updated block counter (x12) -+ str r8, [sp, #48] -+ -+ sub sp, #16 -+ -+ // Reload state and do next block -+ ldmia r14!, {r0-r11} // load x0-x11 -+ __strd r10, r11, sp, 8 // store x10-x11 before state -+ ldmia r14, {r10-r12,r14} // load x12-x15 -+ b .Lnext_block\@ -+ -+.Lxor_slowpath\@: -+ // Slow path: < 64 bytes remaining, or unaligned input or output buffer. -+ // We handle it by storing the 64 bytes of keystream to the stack, then -+ // XOR-ing the needed portion with the data. -+ -+ // Allocate keystream buffer -+ sub sp, #64 -+ mov r14, sp -+ -+ // Stack: ks0-ks15 x8-x9 x12-x15 x10-x11 orig_x0-orig_x15 OUT IN LEN -+ // Registers: r0-r7 are x0-x7; r8-r11 are free; r12 is IN; r14 is &ks0. -+ // x4-x7 are rotated by 'brot'; x12-x15 are rotated by 'drot'. -+ -+ // Save keystream for x0-x3 -+ __ldrd r8, r9, sp, 96 -+ __ldrd r10, r11, sp, 104 -+ add X0, X0, r8 -+ add X1, X1, r9 -+ add X2, X2, r10 -+ add X3, X3, r11 -+ _le32_bswap_4x X0, X1, X2, X3, r8, r9, r10 -+ stmia r14!, {X0-X3} -+ -+ // Save keystream for x4-x7 -+ __ldrd r8, r9, sp, 112 -+ __ldrd r10, r11, sp, 120 -+ add X4, r8, X4, ror #brot -+ add X5, r9, X5, ror #brot -+ add X6, r10, X6, ror #brot -+ add X7, r11, X7, ror #brot -+ _le32_bswap_4x X4, X5, X6, X7, r8, r9, r10 -+ add r8, sp, #64 -+ stmia r14!, {X4-X7} -+ -+ // Save keystream for x8-x15 -+ ldm r8, {r0-r7} // (x8-x9,x12-x15,x10-x11) -+ __ldrd r8, r9, sp, 128 -+ __ldrd r10, r11, sp, 136 -+ add r0, r0, r8 // x8 -+ add r1, r1, r9 // x9 -+ add r6, r6, r10 // x10 -+ add r7, r7, r11 // x11 -+ _le32_bswap_4x r0, r1, r6, r7, r8, r9, r10 -+ stmia r14!, {r0,r1,r6,r7} -+ __ldrd r8, r9, sp, 144 -+ __ldrd r10, r11, sp, 152 -+ add r2, r8, r2, ror #drot // x12 -+ add r3, r9, r3, ror #drot // x13 -+ add r4, r10, r4, ror #drot // x14 -+ add r5, r11, r5, ror #drot // x15 -+ _le32_bswap_4x r2, r3, r4, r5, r9, r10, r11 -+ stmia r14, {r2-r5} -+ -+ // Stack: ks0-ks15 unused0-unused7 x0-x15 OUT IN LEN -+ // Registers: r8 is block counter, r12 is IN. -+ -+ ldr r9, [sp, #168] // LEN -+ ldr r14, [sp, #160] // OUT -+ cmp r9, #64 -+ mov r0, sp -+ movle r1, r9 -+ movgt r1, #64 -+ // r1 is number of bytes to XOR, in range [1, 64] -+ -+.if __LINUX_ARM_ARCH__ < 6 -+ orr r2, r12, r14 -+ tst r2, #3 // IN or OUT misaligned? -+ bne .Lxor_next_byte\@ -+.endif -+ -+ // XOR a word at a time -+.rept 16 -+ subs r1, #4 -+ blt .Lxor_words_done\@ -+ ldr r2, [r12], #4 -+ ldr r3, [r0], #4 -+ eor r2, r2, r3 -+ str r2, [r14], #4 -+.endr -+ b .Lxor_slowpath_done\@ -+.Lxor_words_done\@: -+ ands r1, r1, #3 -+ beq .Lxor_slowpath_done\@ -+ -+ // XOR a byte at a time -+.Lxor_next_byte\@: -+ ldrb r2, [r12], #1 -+ ldrb r3, [r0], #1 -+ eor r2, r2, r3 -+ strb r2, [r14], #1 -+ subs r1, #1 -+ bne .Lxor_next_byte\@ -+ -+.Lxor_slowpath_done\@: -+ subs r9, #64 -+ add sp, #96 -+ bgt .Lprepare_for_next_block\@ -+ -+.Ldone\@: -+.endm // _chacha -+ -+/* -+ * void chacha20_arm(u8 *out, const u8 *in, size_t len, const u32 key[8], -+ * const u32 iv[4]); -+ */ -+ENTRY(chacha20_arm) -+ cmp r2, #0 // len == 0? -+ reteq lr -+ -+ push {r0-r2,r4-r11,lr} -+ -+ // Push state x0-x15 onto stack. -+ // Also store an extra copy of x10-x11 just before the state. -+ -+ ldr r4, [sp, #48] // iv -+ mov r0, sp -+ sub sp, #80 -+ -+ // iv: x12-x15 -+ ldm r4, {X12,X13,X14,X15} -+ stmdb r0!, {X12,X13,X14,X15} -+ -+ // key: x4-x11 -+ __ldrd X8_X10, X9_X11, r3, 24 -+ __strd X8_X10, X9_X11, sp, 8 -+ stmdb r0!, {X8_X10, X9_X11} -+ ldm r3, {X4-X9_X11} -+ stmdb r0!, {X4-X9_X11} -+ -+ // constants: x0-x3 -+ adrl X3, .Lexpand_32byte_k -+ ldm X3, {X0-X3} -+ __strd X0, X1, sp, 16 -+ __strd X2, X3, sp, 24 -+ -+ _chacha 20 -+ -+ add sp, #76 -+ pop {r4-r11, pc} -+ENDPROC(chacha20_arm) -+ -+/* -+ * void hchacha20_arm(const u32 state[16], u32 out[8]); -+ */ -+ENTRY(hchacha20_arm) -+ push {r1,r4-r11,lr} -+ -+ mov r14, r0 -+ ldmia r14!, {r0-r11} // load x0-x11 -+ push {r10-r11} // store x10-x11 to stack -+ ldm r14, {r10-r12,r14} // load x12-x15 -+ sub sp, #8 -+ -+ _chacha_permute 20 -+ -+ // Skip over (unused0-unused1, x10-x11) -+ add sp, #16 -+ -+ // Fix up rotations of x12-x15 -+ ror X12, X12, #drot -+ ror X13, X13, #drot -+ pop {r4} // load 'out' -+ ror X14, X14, #drot -+ ror X15, X15, #drot -+ -+ // Store (x0-x3,x12-x15) to 'out' -+ stm r4, {X0,X1,X2,X3,X12,X13,X14,X15} -+ -+ pop {r4-r11,pc} -+ENDPROC(hchacha20_arm) diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0008-crypto-arm-chacha-remove-dependency-on-generic-ChaCh.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0008-crypto-arm-chacha-remove-dependency-on-generic-ChaCh.patch deleted file mode 100644 index 88c9738db..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0008-crypto-arm-chacha-remove-dependency-on-generic-ChaCh.patch +++ /dev/null @@ -1,691 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:14 +0100 -Subject: [PATCH] crypto: arm/chacha - remove dependency on generic ChaCha - driver - -commit b36d8c09e710c71f6a9690b6586fea2d1c9e1e27 upstream. - -Instead of falling back to the generic ChaCha skcipher driver for -non-SIMD cases, use a fast scalar implementation for ARM authored -by Eric Biggers. This removes the module dependency on chacha-generic -altogether, which also simplifies things when we expose the ChaCha -library interface from this module. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/Kconfig | 4 +- - arch/arm/crypto/Makefile | 3 +- - arch/arm/crypto/chacha-glue.c | 304 +++++++++++++++++++++++++++ - arch/arm/crypto/chacha-neon-glue.c | 202 ------------------ - arch/arm/crypto/chacha-scalar-core.S | 65 +++--- - arch/arm64/crypto/chacha-neon-glue.c | 2 +- - 6 files changed, 340 insertions(+), 240 deletions(-) - create mode 100644 arch/arm/crypto/chacha-glue.c - delete mode 100644 arch/arm/crypto/chacha-neon-glue.c - ---- a/arch/arm/crypto/Kconfig -+++ b/arch/arm/crypto/Kconfig -@@ -127,10 +127,8 @@ config CRYPTO_CRC32_ARM_CE - select CRYPTO_HASH - - config CRYPTO_CHACHA20_NEON -- tristate "NEON accelerated ChaCha stream cipher algorithms" -- depends on KERNEL_MODE_NEON -+ tristate "NEON and scalar accelerated ChaCha stream cipher algorithms" - select CRYPTO_BLKCIPHER -- select CRYPTO_CHACHA20 - - config CRYPTO_NHPOLY1305_NEON - tristate "NEON accelerated NHPoly1305 hash function (for Adiantum)" ---- a/arch/arm/crypto/Makefile -+++ b/arch/arm/crypto/Makefile -@@ -53,7 +53,8 @@ aes-arm-ce-y := aes-ce-core.o aes-ce-glu - ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o - crct10dif-arm-ce-y := crct10dif-ce-core.o crct10dif-ce-glue.o - crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o --chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o -+chacha-neon-y := chacha-scalar-core.o chacha-glue.o -+chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o - nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o - - ifdef REGENERATE_ARM_CRYPTO ---- /dev/null -+++ b/arch/arm/crypto/chacha-glue.c -@@ -0,0 +1,304 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * ARM NEON accelerated ChaCha and XChaCha stream ciphers, -+ * including ChaCha20 (RFC7539) -+ * -+ * Copyright (C) 2016-2019 Linaro, Ltd. -+ * Copyright (C) 2015 Martin Willi -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src, -+ int nrounds); -+asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src, -+ int nrounds); -+asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds); -+asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); -+ -+asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes, -+ const u32 *state, int nrounds); -+ -+static inline bool neon_usable(void) -+{ -+ return crypto_simd_usable(); -+} -+ -+static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, -+ unsigned int bytes, int nrounds) -+{ -+ u8 buf[CHACHA_BLOCK_SIZE]; -+ -+ while (bytes >= CHACHA_BLOCK_SIZE * 4) { -+ chacha_4block_xor_neon(state, dst, src, nrounds); -+ bytes -= CHACHA_BLOCK_SIZE * 4; -+ src += CHACHA_BLOCK_SIZE * 4; -+ dst += CHACHA_BLOCK_SIZE * 4; -+ state[12] += 4; -+ } -+ while (bytes >= CHACHA_BLOCK_SIZE) { -+ chacha_block_xor_neon(state, dst, src, nrounds); -+ bytes -= CHACHA_BLOCK_SIZE; -+ src += CHACHA_BLOCK_SIZE; -+ dst += CHACHA_BLOCK_SIZE; -+ state[12]++; -+ } -+ if (bytes) { -+ memcpy(buf, src, bytes); -+ chacha_block_xor_neon(state, buf, buf, nrounds); -+ memcpy(dst, buf, bytes); -+ } -+} -+ -+static int chacha_stream_xor(struct skcipher_request *req, -+ const struct chacha_ctx *ctx, const u8 *iv, -+ bool neon) -+{ -+ struct skcipher_walk walk; -+ u32 state[16]; -+ int err; -+ -+ err = skcipher_walk_virt(&walk, req, false); -+ -+ chacha_init_generic(state, ctx->key, iv); -+ -+ while (walk.nbytes > 0) { -+ unsigned int nbytes = walk.nbytes; -+ -+ if (nbytes < walk.total) -+ nbytes = round_down(nbytes, walk.stride); -+ -+ if (!neon) { -+ chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr, -+ nbytes, state, ctx->nrounds); -+ state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE); -+ } else { -+ kernel_neon_begin(); -+ chacha_doneon(state, walk.dst.virt.addr, -+ walk.src.virt.addr, nbytes, ctx->nrounds); -+ kernel_neon_end(); -+ } -+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes); -+ } -+ -+ return err; -+} -+ -+static int do_chacha(struct skcipher_request *req, bool neon) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -+ -+ return chacha_stream_xor(req, ctx, req->iv, neon); -+} -+ -+static int chacha_arm(struct skcipher_request *req) -+{ -+ return do_chacha(req, false); -+} -+ -+static int chacha_neon(struct skcipher_request *req) -+{ -+ return do_chacha(req, neon_usable()); -+} -+ -+static int do_xchacha(struct skcipher_request *req, bool neon) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct chacha_ctx subctx; -+ u32 state[16]; -+ u8 real_iv[16]; -+ -+ chacha_init_generic(state, ctx->key, req->iv); -+ -+ if (!neon) { -+ hchacha_block_arm(state, subctx.key, ctx->nrounds); -+ } else { -+ kernel_neon_begin(); -+ hchacha_block_neon(state, subctx.key, ctx->nrounds); -+ kernel_neon_end(); -+ } -+ subctx.nrounds = ctx->nrounds; -+ -+ memcpy(&real_iv[0], req->iv + 24, 8); -+ memcpy(&real_iv[8], req->iv + 16, 8); -+ return chacha_stream_xor(req, &subctx, real_iv, neon); -+} -+ -+static int xchacha_arm(struct skcipher_request *req) -+{ -+ return do_xchacha(req, false); -+} -+ -+static int xchacha_neon(struct skcipher_request *req) -+{ -+ return do_xchacha(req, neon_usable()); -+} -+ -+static struct skcipher_alg arm_algs[] = { -+ { -+ .base.cra_name = "chacha20", -+ .base.cra_driver_name = "chacha20-arm", -+ .base.cra_priority = 200, -+ .base.cra_blocksize = 1, -+ .base.cra_ctxsize = sizeof(struct chacha_ctx), -+ .base.cra_module = THIS_MODULE, -+ -+ .min_keysize = CHACHA_KEY_SIZE, -+ .max_keysize = CHACHA_KEY_SIZE, -+ .ivsize = CHACHA_IV_SIZE, -+ .chunksize = CHACHA_BLOCK_SIZE, -+ .setkey = chacha20_setkey, -+ .encrypt = chacha_arm, -+ .decrypt = chacha_arm, -+ }, { -+ .base.cra_name = "xchacha20", -+ .base.cra_driver_name = "xchacha20-arm", -+ .base.cra_priority = 200, -+ .base.cra_blocksize = 1, -+ .base.cra_ctxsize = sizeof(struct chacha_ctx), -+ .base.cra_module = THIS_MODULE, -+ -+ .min_keysize = CHACHA_KEY_SIZE, -+ .max_keysize = CHACHA_KEY_SIZE, -+ .ivsize = XCHACHA_IV_SIZE, -+ .chunksize = CHACHA_BLOCK_SIZE, -+ .setkey = chacha20_setkey, -+ .encrypt = xchacha_arm, -+ .decrypt = xchacha_arm, -+ }, { -+ .base.cra_name = "xchacha12", -+ .base.cra_driver_name = "xchacha12-arm", -+ .base.cra_priority = 200, -+ .base.cra_blocksize = 1, -+ .base.cra_ctxsize = sizeof(struct chacha_ctx), -+ .base.cra_module = THIS_MODULE, -+ -+ .min_keysize = CHACHA_KEY_SIZE, -+ .max_keysize = CHACHA_KEY_SIZE, -+ .ivsize = XCHACHA_IV_SIZE, -+ .chunksize = CHACHA_BLOCK_SIZE, -+ .setkey = chacha12_setkey, -+ .encrypt = xchacha_arm, -+ .decrypt = xchacha_arm, -+ }, -+}; -+ -+static struct skcipher_alg neon_algs[] = { -+ { -+ .base.cra_name = "chacha20", -+ .base.cra_driver_name = "chacha20-neon", -+ .base.cra_priority = 300, -+ .base.cra_blocksize = 1, -+ .base.cra_ctxsize = sizeof(struct chacha_ctx), -+ .base.cra_module = THIS_MODULE, -+ -+ .min_keysize = CHACHA_KEY_SIZE, -+ .max_keysize = CHACHA_KEY_SIZE, -+ .ivsize = CHACHA_IV_SIZE, -+ .chunksize = CHACHA_BLOCK_SIZE, -+ .walksize = 4 * CHACHA_BLOCK_SIZE, -+ .setkey = chacha20_setkey, -+ .encrypt = chacha_neon, -+ .decrypt = chacha_neon, -+ }, { -+ .base.cra_name = "xchacha20", -+ .base.cra_driver_name = "xchacha20-neon", -+ .base.cra_priority = 300, -+ .base.cra_blocksize = 1, -+ .base.cra_ctxsize = sizeof(struct chacha_ctx), -+ .base.cra_module = THIS_MODULE, -+ -+ .min_keysize = CHACHA_KEY_SIZE, -+ .max_keysize = CHACHA_KEY_SIZE, -+ .ivsize = XCHACHA_IV_SIZE, -+ .chunksize = CHACHA_BLOCK_SIZE, -+ .walksize = 4 * CHACHA_BLOCK_SIZE, -+ .setkey = chacha20_setkey, -+ .encrypt = xchacha_neon, -+ .decrypt = xchacha_neon, -+ }, { -+ .base.cra_name = "xchacha12", -+ .base.cra_driver_name = "xchacha12-neon", -+ .base.cra_priority = 300, -+ .base.cra_blocksize = 1, -+ .base.cra_ctxsize = sizeof(struct chacha_ctx), -+ .base.cra_module = THIS_MODULE, -+ -+ .min_keysize = CHACHA_KEY_SIZE, -+ .max_keysize = CHACHA_KEY_SIZE, -+ .ivsize = XCHACHA_IV_SIZE, -+ .chunksize = CHACHA_BLOCK_SIZE, -+ .walksize = 4 * CHACHA_BLOCK_SIZE, -+ .setkey = chacha12_setkey, -+ .encrypt = xchacha_neon, -+ .decrypt = xchacha_neon, -+ } -+}; -+ -+static int __init chacha_simd_mod_init(void) -+{ -+ int err; -+ -+ err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); -+ if (err) -+ return err; -+ -+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) { -+ int i; -+ -+ switch (read_cpuid_part()) { -+ case ARM_CPU_PART_CORTEX_A7: -+ case ARM_CPU_PART_CORTEX_A5: -+ /* -+ * The Cortex-A7 and Cortex-A5 do not perform well with -+ * the NEON implementation but do incredibly with the -+ * scalar one and use less power. -+ */ -+ for (i = 0; i < ARRAY_SIZE(neon_algs); i++) -+ neon_algs[i].base.cra_priority = 0; -+ break; -+ } -+ -+ err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); -+ if (err) -+ crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); -+ } -+ return err; -+} -+ -+static void __exit chacha_simd_mod_fini(void) -+{ -+ crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); -+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) -+ crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); -+} -+ -+module_init(chacha_simd_mod_init); -+module_exit(chacha_simd_mod_fini); -+ -+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (scalar and NEON accelerated)"); -+MODULE_AUTHOR("Ard Biesheuvel "); -+MODULE_LICENSE("GPL v2"); -+MODULE_ALIAS_CRYPTO("chacha20"); -+MODULE_ALIAS_CRYPTO("chacha20-arm"); -+MODULE_ALIAS_CRYPTO("xchacha20"); -+MODULE_ALIAS_CRYPTO("xchacha20-arm"); -+MODULE_ALIAS_CRYPTO("xchacha12"); -+MODULE_ALIAS_CRYPTO("xchacha12-arm"); -+#ifdef CONFIG_KERNEL_MODE_NEON -+MODULE_ALIAS_CRYPTO("chacha20-neon"); -+MODULE_ALIAS_CRYPTO("xchacha20-neon"); -+MODULE_ALIAS_CRYPTO("xchacha12-neon"); -+#endif ---- a/arch/arm/crypto/chacha-neon-glue.c -+++ /dev/null -@@ -1,202 +0,0 @@ --/* -- * ARM NEON accelerated ChaCha and XChaCha stream ciphers, -- * including ChaCha20 (RFC7539) -- * -- * Copyright (C) 2016 Linaro, Ltd. -- * -- * This program is free software; you can redistribute it and/or modify -- * it under the terms of the GNU General Public License version 2 as -- * published by the Free Software Foundation. -- * -- * Based on: -- * ChaCha20 256-bit cipher algorithm, RFC7539, SIMD glue code -- * -- * Copyright (C) 2015 Martin Willi -- * -- * This program is free software; you can redistribute it and/or modify -- * it under the terms of the GNU General Public License as published by -- * the Free Software Foundation; either version 2 of the License, or -- * (at your option) any later version. -- */ -- --#include --#include --#include --#include --#include --#include -- --#include --#include --#include -- --asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src, -- int nrounds); --asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src, -- int nrounds); --asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); -- --static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, -- unsigned int bytes, int nrounds) --{ -- u8 buf[CHACHA_BLOCK_SIZE]; -- -- while (bytes >= CHACHA_BLOCK_SIZE * 4) { -- chacha_4block_xor_neon(state, dst, src, nrounds); -- bytes -= CHACHA_BLOCK_SIZE * 4; -- src += CHACHA_BLOCK_SIZE * 4; -- dst += CHACHA_BLOCK_SIZE * 4; -- state[12] += 4; -- } -- while (bytes >= CHACHA_BLOCK_SIZE) { -- chacha_block_xor_neon(state, dst, src, nrounds); -- bytes -= CHACHA_BLOCK_SIZE; -- src += CHACHA_BLOCK_SIZE; -- dst += CHACHA_BLOCK_SIZE; -- state[12]++; -- } -- if (bytes) { -- memcpy(buf, src, bytes); -- chacha_block_xor_neon(state, buf, buf, nrounds); -- memcpy(dst, buf, bytes); -- } --} -- --static int chacha_neon_stream_xor(struct skcipher_request *req, -- const struct chacha_ctx *ctx, const u8 *iv) --{ -- struct skcipher_walk walk; -- u32 state[16]; -- int err; -- -- err = skcipher_walk_virt(&walk, req, false); -- -- crypto_chacha_init(state, ctx, iv); -- -- while (walk.nbytes > 0) { -- unsigned int nbytes = walk.nbytes; -- -- if (nbytes < walk.total) -- nbytes = round_down(nbytes, walk.stride); -- -- kernel_neon_begin(); -- chacha_doneon(state, walk.dst.virt.addr, walk.src.virt.addr, -- nbytes, ctx->nrounds); -- kernel_neon_end(); -- err = skcipher_walk_done(&walk, walk.nbytes - nbytes); -- } -- -- return err; --} -- --static int chacha_neon(struct skcipher_request *req) --{ -- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -- -- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) -- return crypto_chacha_crypt(req); -- -- return chacha_neon_stream_xor(req, ctx, req->iv); --} -- --static int xchacha_neon(struct skcipher_request *req) --{ -- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -- struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -- struct chacha_ctx subctx; -- u32 state[16]; -- u8 real_iv[16]; -- -- if (req->cryptlen <= CHACHA_BLOCK_SIZE || !crypto_simd_usable()) -- return crypto_xchacha_crypt(req); -- -- crypto_chacha_init(state, ctx, req->iv); -- -- kernel_neon_begin(); -- hchacha_block_neon(state, subctx.key, ctx->nrounds); -- kernel_neon_end(); -- subctx.nrounds = ctx->nrounds; -- -- memcpy(&real_iv[0], req->iv + 24, 8); -- memcpy(&real_iv[8], req->iv + 16, 8); -- return chacha_neon_stream_xor(req, &subctx, real_iv); --} -- --static struct skcipher_alg algs[] = { -- { -- .base.cra_name = "chacha20", -- .base.cra_driver_name = "chacha20-neon", -- .base.cra_priority = 300, -- .base.cra_blocksize = 1, -- .base.cra_ctxsize = sizeof(struct chacha_ctx), -- .base.cra_module = THIS_MODULE, -- -- .min_keysize = CHACHA_KEY_SIZE, -- .max_keysize = CHACHA_KEY_SIZE, -- .ivsize = CHACHA_IV_SIZE, -- .chunksize = CHACHA_BLOCK_SIZE, -- .walksize = 4 * CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha20_setkey, -- .encrypt = chacha_neon, -- .decrypt = chacha_neon, -- }, { -- .base.cra_name = "xchacha20", -- .base.cra_driver_name = "xchacha20-neon", -- .base.cra_priority = 300, -- .base.cra_blocksize = 1, -- .base.cra_ctxsize = sizeof(struct chacha_ctx), -- .base.cra_module = THIS_MODULE, -- -- .min_keysize = CHACHA_KEY_SIZE, -- .max_keysize = CHACHA_KEY_SIZE, -- .ivsize = XCHACHA_IV_SIZE, -- .chunksize = CHACHA_BLOCK_SIZE, -- .walksize = 4 * CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha20_setkey, -- .encrypt = xchacha_neon, -- .decrypt = xchacha_neon, -- }, { -- .base.cra_name = "xchacha12", -- .base.cra_driver_name = "xchacha12-neon", -- .base.cra_priority = 300, -- .base.cra_blocksize = 1, -- .base.cra_ctxsize = sizeof(struct chacha_ctx), -- .base.cra_module = THIS_MODULE, -- -- .min_keysize = CHACHA_KEY_SIZE, -- .max_keysize = CHACHA_KEY_SIZE, -- .ivsize = XCHACHA_IV_SIZE, -- .chunksize = CHACHA_BLOCK_SIZE, -- .walksize = 4 * CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha12_setkey, -- .encrypt = xchacha_neon, -- .decrypt = xchacha_neon, -- } --}; -- --static int __init chacha_simd_mod_init(void) --{ -- if (!(elf_hwcap & HWCAP_NEON)) -- return -ENODEV; -- -- return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); --} -- --static void __exit chacha_simd_mod_fini(void) --{ -- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); --} -- --module_init(chacha_simd_mod_init); --module_exit(chacha_simd_mod_fini); -- --MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (NEON accelerated)"); --MODULE_AUTHOR("Ard Biesheuvel "); --MODULE_LICENSE("GPL v2"); --MODULE_ALIAS_CRYPTO("chacha20"); --MODULE_ALIAS_CRYPTO("chacha20-neon"); --MODULE_ALIAS_CRYPTO("xchacha20"); --MODULE_ALIAS_CRYPTO("xchacha20-neon"); --MODULE_ALIAS_CRYPTO("xchacha12"); --MODULE_ALIAS_CRYPTO("xchacha12-neon"); ---- a/arch/arm/crypto/chacha-scalar-core.S -+++ b/arch/arm/crypto/chacha-scalar-core.S -@@ -41,14 +41,6 @@ - X14 .req r12 - X15 .req r14 - --.Lexpand_32byte_k: -- // "expand 32-byte k" -- .word 0x61707865, 0x3320646e, 0x79622d32, 0x6b206574 -- --#ifdef __thumb2__ --# define adrl adr --#endif -- - .macro __rev out, in, t0, t1, t2 - .if __LINUX_ARM_ARCH__ >= 6 - rev \out, \in -@@ -391,61 +383,65 @@ - .endm // _chacha - - /* -- * void chacha20_arm(u8 *out, const u8 *in, size_t len, const u32 key[8], -- * const u32 iv[4]); -+ * void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes, -+ * const u32 *state, int nrounds); - */ --ENTRY(chacha20_arm) -+ENTRY(chacha_doarm) - cmp r2, #0 // len == 0? - reteq lr - -+ ldr ip, [sp] -+ cmp ip, #12 -+ - push {r0-r2,r4-r11,lr} - - // Push state x0-x15 onto stack. - // Also store an extra copy of x10-x11 just before the state. - -- ldr r4, [sp, #48] // iv -- mov r0, sp -- sub sp, #80 -- -- // iv: x12-x15 -- ldm r4, {X12,X13,X14,X15} -- stmdb r0!, {X12,X13,X14,X15} -+ add X12, r3, #48 -+ ldm X12, {X12,X13,X14,X15} -+ push {X12,X13,X14,X15} -+ sub sp, sp, #64 - -- // key: x4-x11 -- __ldrd X8_X10, X9_X11, r3, 24 -+ __ldrd X8_X10, X9_X11, r3, 40 - __strd X8_X10, X9_X11, sp, 8 -- stmdb r0!, {X8_X10, X9_X11} -- ldm r3, {X4-X9_X11} -- stmdb r0!, {X4-X9_X11} -- -- // constants: x0-x3 -- adrl X3, .Lexpand_32byte_k -- ldm X3, {X0-X3} -+ __strd X8_X10, X9_X11, sp, 56 -+ ldm r3, {X0-X9_X11} - __strd X0, X1, sp, 16 - __strd X2, X3, sp, 24 -+ __strd X4, X5, sp, 32 -+ __strd X6, X7, sp, 40 -+ __strd X8_X10, X9_X11, sp, 48 - -+ beq 1f - _chacha 20 - -- add sp, #76 -+0: add sp, #76 - pop {r4-r11, pc} --ENDPROC(chacha20_arm) -+ -+1: _chacha 12 -+ b 0b -+ENDPROC(chacha_doarm) - - /* -- * void hchacha20_arm(const u32 state[16], u32 out[8]); -+ * void hchacha_block_arm(const u32 state[16], u32 out[8], int nrounds); - */ --ENTRY(hchacha20_arm) -+ENTRY(hchacha_block_arm) - push {r1,r4-r11,lr} - -+ cmp r2, #12 // ChaCha12 ? -+ - mov r14, r0 - ldmia r14!, {r0-r11} // load x0-x11 - push {r10-r11} // store x10-x11 to stack - ldm r14, {r10-r12,r14} // load x12-x15 - sub sp, #8 - -+ beq 1f - _chacha_permute 20 - - // Skip over (unused0-unused1, x10-x11) -- add sp, #16 -+0: add sp, #16 - - // Fix up rotations of x12-x15 - ror X12, X12, #drot -@@ -458,4 +454,7 @@ ENTRY(hchacha20_arm) - stm r4, {X0,X1,X2,X3,X12,X13,X14,X15} - - pop {r4-r11,pc} --ENDPROC(hchacha20_arm) -+ -+1: _chacha_permute 12 -+ b 0b -+ENDPROC(hchacha_block_arm) ---- a/arch/arm64/crypto/chacha-neon-glue.c -+++ b/arch/arm64/crypto/chacha-neon-glue.c -@@ -1,5 +1,5 @@ - /* -- * ARM NEON accelerated ChaCha and XChaCha stream ciphers, -+ * ARM NEON and scalar accelerated ChaCha and XChaCha stream ciphers, - * including ChaCha20 (RFC7539) - * - * Copyright (C) 2016 - 2017 Linaro, Ltd. diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0009-crypto-arm-chacha-expose-ARM-ChaCha-routine-as-libra.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0009-crypto-arm-chacha-expose-ARM-ChaCha-routine-as-libra.patch deleted file mode 100644 index 4006dc63b..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0009-crypto-arm-chacha-expose-ARM-ChaCha-routine-as-libra.patch +++ /dev/null @@ -1,108 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:15 +0100 -Subject: [PATCH] crypto: arm/chacha - expose ARM ChaCha routine as library - function - -commit a44a3430d71bad4ee56788a59fff099b291ea54c upstream. - -Expose the accelerated NEON ChaCha routine directly as a symbol -export so that users of the ChaCha library API can use it directly. - -Given that calls into the library API will always go through the -routines in this module if it is enabled, switch to static keys -to select the optimal implementation available (which may be none -at all, in which case we defer to the generic implementation for -all invocations). - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/Kconfig | 1 + - arch/arm/crypto/chacha-glue.c | 41 ++++++++++++++++++++++++++++++++++- - 2 files changed, 41 insertions(+), 1 deletion(-) - ---- a/arch/arm/crypto/Kconfig -+++ b/arch/arm/crypto/Kconfig -@@ -129,6 +129,7 @@ config CRYPTO_CRC32_ARM_CE - config CRYPTO_CHACHA20_NEON - tristate "NEON and scalar accelerated ChaCha stream cipher algorithms" - select CRYPTO_BLKCIPHER -+ select CRYPTO_ARCH_HAVE_LIB_CHACHA - - config CRYPTO_NHPOLY1305_NEON - tristate "NEON accelerated NHPoly1305 hash function (for Adiantum)" ---- a/arch/arm/crypto/chacha-glue.c -+++ b/arch/arm/crypto/chacha-glue.c -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -29,9 +30,11 @@ asmlinkage void hchacha_block_neon(const - asmlinkage void chacha_doarm(u8 *dst, const u8 *src, unsigned int bytes, - const u32 *state, int nrounds); - -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(use_neon); -+ - static inline bool neon_usable(void) - { -- return crypto_simd_usable(); -+ return static_branch_likely(&use_neon) && crypto_simd_usable(); - } - - static void chacha_doneon(u32 *state, u8 *dst, const u8 *src, -@@ -60,6 +63,40 @@ static void chacha_doneon(u32 *state, u8 - } - } - -+void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) -+{ -+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable()) { -+ hchacha_block_arm(state, stream, nrounds); -+ } else { -+ kernel_neon_begin(); -+ hchacha_block_neon(state, stream, nrounds); -+ kernel_neon_end(); -+ } -+} -+EXPORT_SYMBOL(hchacha_block_arch); -+ -+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -+{ -+ chacha_init_generic(state, key, iv); -+} -+EXPORT_SYMBOL(chacha_init_arch); -+ -+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, -+ int nrounds) -+{ -+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon_usable() || -+ bytes <= CHACHA_BLOCK_SIZE) { -+ chacha_doarm(dst, src, bytes, state, nrounds); -+ state[12] += DIV_ROUND_UP(bytes, CHACHA_BLOCK_SIZE); -+ return; -+ } -+ -+ kernel_neon_begin(); -+ chacha_doneon(state, dst, src, bytes, nrounds); -+ kernel_neon_end(); -+} -+EXPORT_SYMBOL(chacha_crypt_arch); -+ - static int chacha_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv, - bool neon) -@@ -269,6 +306,8 @@ static int __init chacha_simd_mod_init(v - for (i = 0; i < ARRAY_SIZE(neon_algs); i++) - neon_algs[i].base.cra_priority = 0; - break; -+ default: -+ static_branch_enable(&use_neon); - } - - err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0010-crypto-mips-chacha-import-32r2-ChaCha-code-from-Zinc.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0010-crypto-mips-chacha-import-32r2-ChaCha-code-from-Zinc.patch deleted file mode 100644 index 0a2b4c452..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0010-crypto-mips-chacha-import-32r2-ChaCha-code-from-Zinc.patch +++ /dev/null @@ -1,451 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 8 Nov 2019 13:22:16 +0100 -Subject: [PATCH] crypto: mips/chacha - import 32r2 ChaCha code from Zinc -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit 49aa7c00eddf8d8f462b0256bd82e81762d7b0c6 upstream. - -This imports the accelerated MIPS 32r2 ChaCha20 implementation from the -Zinc patch set. - -Co-developed-by: René van Dorst -Signed-off-by: René van Dorst -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/mips/crypto/chacha-core.S | 424 +++++++++++++++++++++++++++++++++ - 1 file changed, 424 insertions(+) - create mode 100644 arch/mips/crypto/chacha-core.S - ---- /dev/null -+++ b/arch/mips/crypto/chacha-core.S -@@ -0,0 +1,424 @@ -+/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -+/* -+ * Copyright (C) 2016-2018 René van Dorst . All Rights Reserved. -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#define MASK_U32 0x3c -+#define CHACHA20_BLOCK_SIZE 64 -+#define STACK_SIZE 32 -+ -+#define X0 $t0 -+#define X1 $t1 -+#define X2 $t2 -+#define X3 $t3 -+#define X4 $t4 -+#define X5 $t5 -+#define X6 $t6 -+#define X7 $t7 -+#define X8 $t8 -+#define X9 $t9 -+#define X10 $v1 -+#define X11 $s6 -+#define X12 $s5 -+#define X13 $s4 -+#define X14 $s3 -+#define X15 $s2 -+/* Use regs which are overwritten on exit for Tx so we don't leak clear data. */ -+#define T0 $s1 -+#define T1 $s0 -+#define T(n) T ## n -+#define X(n) X ## n -+ -+/* Input arguments */ -+#define STATE $a0 -+#define OUT $a1 -+#define IN $a2 -+#define BYTES $a3 -+ -+/* Output argument */ -+/* NONCE[0] is kept in a register and not in memory. -+ * We don't want to touch original value in memory. -+ * Must be incremented every loop iteration. -+ */ -+#define NONCE_0 $v0 -+ -+/* SAVED_X and SAVED_CA are set in the jump table. -+ * Use regs which are overwritten on exit else we don't leak clear data. -+ * They are used to handling the last bytes which are not multiple of 4. -+ */ -+#define SAVED_X X15 -+#define SAVED_CA $s7 -+ -+#define IS_UNALIGNED $s7 -+ -+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ -+#define MSB 0 -+#define LSB 3 -+#define ROTx rotl -+#define ROTR(n) rotr n, 24 -+#define CPU_TO_LE32(n) \ -+ wsbh n; \ -+ rotr n, 16; -+#else -+#define MSB 3 -+#define LSB 0 -+#define ROTx rotr -+#define CPU_TO_LE32(n) -+#define ROTR(n) -+#endif -+ -+#define FOR_EACH_WORD(x) \ -+ x( 0); \ -+ x( 1); \ -+ x( 2); \ -+ x( 3); \ -+ x( 4); \ -+ x( 5); \ -+ x( 6); \ -+ x( 7); \ -+ x( 8); \ -+ x( 9); \ -+ x(10); \ -+ x(11); \ -+ x(12); \ -+ x(13); \ -+ x(14); \ -+ x(15); -+ -+#define FOR_EACH_WORD_REV(x) \ -+ x(15); \ -+ x(14); \ -+ x(13); \ -+ x(12); \ -+ x(11); \ -+ x(10); \ -+ x( 9); \ -+ x( 8); \ -+ x( 7); \ -+ x( 6); \ -+ x( 5); \ -+ x( 4); \ -+ x( 3); \ -+ x( 2); \ -+ x( 1); \ -+ x( 0); -+ -+#define PLUS_ONE_0 1 -+#define PLUS_ONE_1 2 -+#define PLUS_ONE_2 3 -+#define PLUS_ONE_3 4 -+#define PLUS_ONE_4 5 -+#define PLUS_ONE_5 6 -+#define PLUS_ONE_6 7 -+#define PLUS_ONE_7 8 -+#define PLUS_ONE_8 9 -+#define PLUS_ONE_9 10 -+#define PLUS_ONE_10 11 -+#define PLUS_ONE_11 12 -+#define PLUS_ONE_12 13 -+#define PLUS_ONE_13 14 -+#define PLUS_ONE_14 15 -+#define PLUS_ONE_15 16 -+#define PLUS_ONE(x) PLUS_ONE_ ## x -+#define _CONCAT3(a,b,c) a ## b ## c -+#define CONCAT3(a,b,c) _CONCAT3(a,b,c) -+ -+#define STORE_UNALIGNED(x) \ -+CONCAT3(.Lchacha20_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \ -+ .if (x != 12); \ -+ lw T0, (x*4)(STATE); \ -+ .endif; \ -+ lwl T1, (x*4)+MSB ## (IN); \ -+ lwr T1, (x*4)+LSB ## (IN); \ -+ .if (x == 12); \ -+ addu X ## x, NONCE_0; \ -+ .else; \ -+ addu X ## x, T0; \ -+ .endif; \ -+ CPU_TO_LE32(X ## x); \ -+ xor X ## x, T1; \ -+ swl X ## x, (x*4)+MSB ## (OUT); \ -+ swr X ## x, (x*4)+LSB ## (OUT); -+ -+#define STORE_ALIGNED(x) \ -+CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \ -+ .if (x != 12); \ -+ lw T0, (x*4)(STATE); \ -+ .endif; \ -+ lw T1, (x*4) ## (IN); \ -+ .if (x == 12); \ -+ addu X ## x, NONCE_0; \ -+ .else; \ -+ addu X ## x, T0; \ -+ .endif; \ -+ CPU_TO_LE32(X ## x); \ -+ xor X ## x, T1; \ -+ sw X ## x, (x*4) ## (OUT); -+ -+/* Jump table macro. -+ * Used for setup and handling the last bytes, which are not multiple of 4. -+ * X15 is free to store Xn -+ * Every jumptable entry must be equal in size. -+ */ -+#define JMPTBL_ALIGNED(x) \ -+.Lchacha20_mips_jmptbl_aligned_ ## x: ; \ -+ .set noreorder; \ -+ b .Lchacha20_mips_xor_aligned_ ## x ## _b; \ -+ .if (x == 12); \ -+ addu SAVED_X, X ## x, NONCE_0; \ -+ .else; \ -+ addu SAVED_X, X ## x, SAVED_CA; \ -+ .endif; \ -+ .set reorder -+ -+#define JMPTBL_UNALIGNED(x) \ -+.Lchacha20_mips_jmptbl_unaligned_ ## x: ; \ -+ .set noreorder; \ -+ b .Lchacha20_mips_xor_unaligned_ ## x ## _b; \ -+ .if (x == 12); \ -+ addu SAVED_X, X ## x, NONCE_0; \ -+ .else; \ -+ addu SAVED_X, X ## x, SAVED_CA; \ -+ .endif; \ -+ .set reorder -+ -+#define AXR(A, B, C, D, K, L, M, N, V, W, Y, Z, S) \ -+ addu X(A), X(K); \ -+ addu X(B), X(L); \ -+ addu X(C), X(M); \ -+ addu X(D), X(N); \ -+ xor X(V), X(A); \ -+ xor X(W), X(B); \ -+ xor X(Y), X(C); \ -+ xor X(Z), X(D); \ -+ rotl X(V), S; \ -+ rotl X(W), S; \ -+ rotl X(Y), S; \ -+ rotl X(Z), S; -+ -+.text -+.set reorder -+.set noat -+.globl chacha20_mips -+.ent chacha20_mips -+chacha20_mips: -+ .frame $sp, STACK_SIZE, $ra -+ -+ addiu $sp, -STACK_SIZE -+ -+ /* Return bytes = 0. */ -+ beqz BYTES, .Lchacha20_mips_end -+ -+ lw NONCE_0, 48(STATE) -+ -+ /* Save s0-s7 */ -+ sw $s0, 0($sp) -+ sw $s1, 4($sp) -+ sw $s2, 8($sp) -+ sw $s3, 12($sp) -+ sw $s4, 16($sp) -+ sw $s5, 20($sp) -+ sw $s6, 24($sp) -+ sw $s7, 28($sp) -+ -+ /* Test IN or OUT is unaligned. -+ * IS_UNALIGNED = ( IN | OUT ) & 0x00000003 -+ */ -+ or IS_UNALIGNED, IN, OUT -+ andi IS_UNALIGNED, 0x3 -+ -+ /* Set number of rounds */ -+ li $at, 20 -+ -+ b .Lchacha20_rounds_start -+ -+.align 4 -+.Loop_chacha20_rounds: -+ addiu IN, CHACHA20_BLOCK_SIZE -+ addiu OUT, CHACHA20_BLOCK_SIZE -+ addiu NONCE_0, 1 -+ -+.Lchacha20_rounds_start: -+ lw X0, 0(STATE) -+ lw X1, 4(STATE) -+ lw X2, 8(STATE) -+ lw X3, 12(STATE) -+ -+ lw X4, 16(STATE) -+ lw X5, 20(STATE) -+ lw X6, 24(STATE) -+ lw X7, 28(STATE) -+ lw X8, 32(STATE) -+ lw X9, 36(STATE) -+ lw X10, 40(STATE) -+ lw X11, 44(STATE) -+ -+ move X12, NONCE_0 -+ lw X13, 52(STATE) -+ lw X14, 56(STATE) -+ lw X15, 60(STATE) -+ -+.Loop_chacha20_xor_rounds: -+ addiu $at, -2 -+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16); -+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12); -+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8); -+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7); -+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16); -+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12); -+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8); -+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7); -+ bnez $at, .Loop_chacha20_xor_rounds -+ -+ addiu BYTES, -(CHACHA20_BLOCK_SIZE) -+ -+ /* Is data src/dst unaligned? Jump */ -+ bnez IS_UNALIGNED, .Loop_chacha20_unaligned -+ -+ /* Set number rounds here to fill delayslot. */ -+ li $at, 20 -+ -+ /* BYTES < 0, it has no full block. */ -+ bltz BYTES, .Lchacha20_mips_no_full_block_aligned -+ -+ FOR_EACH_WORD_REV(STORE_ALIGNED) -+ -+ /* BYTES > 0? Loop again. */ -+ bgtz BYTES, .Loop_chacha20_rounds -+ -+ /* Place this here to fill delay slot */ -+ addiu NONCE_0, 1 -+ -+ /* BYTES < 0? Handle last bytes */ -+ bltz BYTES, .Lchacha20_mips_xor_bytes -+ -+.Lchacha20_mips_xor_done: -+ /* Restore used registers */ -+ lw $s0, 0($sp) -+ lw $s1, 4($sp) -+ lw $s2, 8($sp) -+ lw $s3, 12($sp) -+ lw $s4, 16($sp) -+ lw $s5, 20($sp) -+ lw $s6, 24($sp) -+ lw $s7, 28($sp) -+ -+ /* Write NONCE_0 back to right location in state */ -+ sw NONCE_0, 48(STATE) -+ -+.Lchacha20_mips_end: -+ addiu $sp, STACK_SIZE -+ jr $ra -+ -+.Lchacha20_mips_no_full_block_aligned: -+ /* Restore the offset on BYTES */ -+ addiu BYTES, CHACHA20_BLOCK_SIZE -+ -+ /* Get number of full WORDS */ -+ andi $at, BYTES, MASK_U32 -+ -+ /* Load upper half of jump table addr */ -+ lui T0, %hi(.Lchacha20_mips_jmptbl_aligned_0) -+ -+ /* Calculate lower half jump table offset */ -+ ins T0, $at, 1, 6 -+ -+ /* Add offset to STATE */ -+ addu T1, STATE, $at -+ -+ /* Add lower half jump table addr */ -+ addiu T0, %lo(.Lchacha20_mips_jmptbl_aligned_0) -+ -+ /* Read value from STATE */ -+ lw SAVED_CA, 0(T1) -+ -+ /* Store remaining bytecounter as negative value */ -+ subu BYTES, $at, BYTES -+ -+ jr T0 -+ -+ /* Jump table */ -+ FOR_EACH_WORD(JMPTBL_ALIGNED) -+ -+ -+.Loop_chacha20_unaligned: -+ /* Set number rounds here to fill delayslot. */ -+ li $at, 20 -+ -+ /* BYTES > 0, it has no full block. */ -+ bltz BYTES, .Lchacha20_mips_no_full_block_unaligned -+ -+ FOR_EACH_WORD_REV(STORE_UNALIGNED) -+ -+ /* BYTES > 0? Loop again. */ -+ bgtz BYTES, .Loop_chacha20_rounds -+ -+ /* Write NONCE_0 back to right location in state */ -+ sw NONCE_0, 48(STATE) -+ -+ .set noreorder -+ /* Fall through to byte handling */ -+ bgez BYTES, .Lchacha20_mips_xor_done -+.Lchacha20_mips_xor_unaligned_0_b: -+.Lchacha20_mips_xor_aligned_0_b: -+ /* Place this here to fill delay slot */ -+ addiu NONCE_0, 1 -+ .set reorder -+ -+.Lchacha20_mips_xor_bytes: -+ addu IN, $at -+ addu OUT, $at -+ /* First byte */ -+ lbu T1, 0(IN) -+ addiu $at, BYTES, 1 -+ CPU_TO_LE32(SAVED_X) -+ ROTR(SAVED_X) -+ xor T1, SAVED_X -+ sb T1, 0(OUT) -+ beqz $at, .Lchacha20_mips_xor_done -+ /* Second byte */ -+ lbu T1, 1(IN) -+ addiu $at, BYTES, 2 -+ ROTx SAVED_X, 8 -+ xor T1, SAVED_X -+ sb T1, 1(OUT) -+ beqz $at, .Lchacha20_mips_xor_done -+ /* Third byte */ -+ lbu T1, 2(IN) -+ ROTx SAVED_X, 8 -+ xor T1, SAVED_X -+ sb T1, 2(OUT) -+ b .Lchacha20_mips_xor_done -+ -+.Lchacha20_mips_no_full_block_unaligned: -+ /* Restore the offset on BYTES */ -+ addiu BYTES, CHACHA20_BLOCK_SIZE -+ -+ /* Get number of full WORDS */ -+ andi $at, BYTES, MASK_U32 -+ -+ /* Load upper half of jump table addr */ -+ lui T0, %hi(.Lchacha20_mips_jmptbl_unaligned_0) -+ -+ /* Calculate lower half jump table offset */ -+ ins T0, $at, 1, 6 -+ -+ /* Add offset to STATE */ -+ addu T1, STATE, $at -+ -+ /* Add lower half jump table addr */ -+ addiu T0, %lo(.Lchacha20_mips_jmptbl_unaligned_0) -+ -+ /* Read value from STATE */ -+ lw SAVED_CA, 0(T1) -+ -+ /* Store remaining bytecounter as negative value */ -+ subu BYTES, $at, BYTES -+ -+ jr T0 -+ -+ /* Jump table */ -+ FOR_EACH_WORD(JMPTBL_UNALIGNED) -+.end chacha20_mips -+.set at diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0011-crypto-mips-chacha-wire-up-accelerated-32r2-code-fro.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0011-crypto-mips-chacha-wire-up-accelerated-32r2-code-fro.patch deleted file mode 100644 index 0d24ce29e..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0011-crypto-mips-chacha-wire-up-accelerated-32r2-code-fro.patch +++ /dev/null @@ -1,559 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:17 +0100 -Subject: [PATCH] crypto: mips/chacha - wire up accelerated 32r2 code from Zinc -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit 3a2f58f3ba4f6f44e33d1a48240d5eadb882cb59 upstream. - -This integrates the accelerated MIPS 32r2 implementation of ChaCha -into both the API and library interfaces of the kernel crypto stack. - -The significance of this is that, in addition to becoming available -as an accelerated library implementation, it can also be used by -existing crypto API code such as Adiantum (for block encryption on -ultra low performance cores) or IPsec using chacha20poly1305. These -are use cases that have already opted into using the abstract crypto -API. In order to support Adiantum, the core assembler routine has -been adapted to take the round count as a function argument rather -than hardcoding it to 20. - -Co-developed-by: René van Dorst -Signed-off-by: René van Dorst -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/mips/Makefile | 2 +- - arch/mips/crypto/Makefile | 4 + - arch/mips/crypto/chacha-core.S | 159 ++++++++++++++++++++++++--------- - arch/mips/crypto/chacha-glue.c | 150 +++++++++++++++++++++++++++++++ - crypto/Kconfig | 6 ++ - 5 files changed, 277 insertions(+), 44 deletions(-) - create mode 100644 arch/mips/crypto/chacha-glue.c - ---- a/arch/mips/Makefile -+++ b/arch/mips/Makefile -@@ -334,7 +334,7 @@ libs-$(CONFIG_MIPS_FP_SUPPORT) += arch/m - # See arch/mips/Kbuild for content of core part of the kernel - core-y += arch/mips/ - --drivers-$(CONFIG_MIPS_CRC_SUPPORT) += arch/mips/crypto/ -+drivers-y += arch/mips/crypto/ - drivers-$(CONFIG_OPROFILE) += arch/mips/oprofile/ - - # suspend and hibernation support ---- a/arch/mips/crypto/Makefile -+++ b/arch/mips/crypto/Makefile -@@ -4,3 +4,7 @@ - # - - obj-$(CONFIG_CRYPTO_CRC32_MIPS) += crc32-mips.o -+ -+obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o -+chacha-mips-y := chacha-core.o chacha-glue.o -+AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots ---- a/arch/mips/crypto/chacha-core.S -+++ b/arch/mips/crypto/chacha-core.S -@@ -125,7 +125,7 @@ - #define CONCAT3(a,b,c) _CONCAT3(a,b,c) - - #define STORE_UNALIGNED(x) \ --CONCAT3(.Lchacha20_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \ -+CONCAT3(.Lchacha_mips_xor_unaligned_, PLUS_ONE(x), _b: ;) \ - .if (x != 12); \ - lw T0, (x*4)(STATE); \ - .endif; \ -@@ -142,7 +142,7 @@ CONCAT3(.Lchacha20_mips_xor_unaligned_, - swr X ## x, (x*4)+LSB ## (OUT); - - #define STORE_ALIGNED(x) \ --CONCAT3(.Lchacha20_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \ -+CONCAT3(.Lchacha_mips_xor_aligned_, PLUS_ONE(x), _b: ;) \ - .if (x != 12); \ - lw T0, (x*4)(STATE); \ - .endif; \ -@@ -162,9 +162,9 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PL - * Every jumptable entry must be equal in size. - */ - #define JMPTBL_ALIGNED(x) \ --.Lchacha20_mips_jmptbl_aligned_ ## x: ; \ -+.Lchacha_mips_jmptbl_aligned_ ## x: ; \ - .set noreorder; \ -- b .Lchacha20_mips_xor_aligned_ ## x ## _b; \ -+ b .Lchacha_mips_xor_aligned_ ## x ## _b; \ - .if (x == 12); \ - addu SAVED_X, X ## x, NONCE_0; \ - .else; \ -@@ -173,9 +173,9 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PL - .set reorder - - #define JMPTBL_UNALIGNED(x) \ --.Lchacha20_mips_jmptbl_unaligned_ ## x: ; \ -+.Lchacha_mips_jmptbl_unaligned_ ## x: ; \ - .set noreorder; \ -- b .Lchacha20_mips_xor_unaligned_ ## x ## _b; \ -+ b .Lchacha_mips_xor_unaligned_ ## x ## _b; \ - .if (x == 12); \ - addu SAVED_X, X ## x, NONCE_0; \ - .else; \ -@@ -200,15 +200,18 @@ CONCAT3(.Lchacha20_mips_xor_aligned_, PL - .text - .set reorder - .set noat --.globl chacha20_mips --.ent chacha20_mips --chacha20_mips: -+.globl chacha_crypt_arch -+.ent chacha_crypt_arch -+chacha_crypt_arch: - .frame $sp, STACK_SIZE, $ra - -+ /* Load number of rounds */ -+ lw $at, 16($sp) -+ - addiu $sp, -STACK_SIZE - - /* Return bytes = 0. */ -- beqz BYTES, .Lchacha20_mips_end -+ beqz BYTES, .Lchacha_mips_end - - lw NONCE_0, 48(STATE) - -@@ -228,18 +231,15 @@ chacha20_mips: - or IS_UNALIGNED, IN, OUT - andi IS_UNALIGNED, 0x3 - -- /* Set number of rounds */ -- li $at, 20 -- -- b .Lchacha20_rounds_start -+ b .Lchacha_rounds_start - - .align 4 --.Loop_chacha20_rounds: -+.Loop_chacha_rounds: - addiu IN, CHACHA20_BLOCK_SIZE - addiu OUT, CHACHA20_BLOCK_SIZE - addiu NONCE_0, 1 - --.Lchacha20_rounds_start: -+.Lchacha_rounds_start: - lw X0, 0(STATE) - lw X1, 4(STATE) - lw X2, 8(STATE) -@@ -259,7 +259,7 @@ chacha20_mips: - lw X14, 56(STATE) - lw X15, 60(STATE) - --.Loop_chacha20_xor_rounds: -+.Loop_chacha_xor_rounds: - addiu $at, -2 - AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16); - AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12); -@@ -269,31 +269,31 @@ chacha20_mips: - AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12); - AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8); - AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7); -- bnez $at, .Loop_chacha20_xor_rounds -+ bnez $at, .Loop_chacha_xor_rounds - - addiu BYTES, -(CHACHA20_BLOCK_SIZE) - - /* Is data src/dst unaligned? Jump */ -- bnez IS_UNALIGNED, .Loop_chacha20_unaligned -+ bnez IS_UNALIGNED, .Loop_chacha_unaligned - - /* Set number rounds here to fill delayslot. */ -- li $at, 20 -+ lw $at, (STACK_SIZE+16)($sp) - - /* BYTES < 0, it has no full block. */ -- bltz BYTES, .Lchacha20_mips_no_full_block_aligned -+ bltz BYTES, .Lchacha_mips_no_full_block_aligned - - FOR_EACH_WORD_REV(STORE_ALIGNED) - - /* BYTES > 0? Loop again. */ -- bgtz BYTES, .Loop_chacha20_rounds -+ bgtz BYTES, .Loop_chacha_rounds - - /* Place this here to fill delay slot */ - addiu NONCE_0, 1 - - /* BYTES < 0? Handle last bytes */ -- bltz BYTES, .Lchacha20_mips_xor_bytes -+ bltz BYTES, .Lchacha_mips_xor_bytes - --.Lchacha20_mips_xor_done: -+.Lchacha_mips_xor_done: - /* Restore used registers */ - lw $s0, 0($sp) - lw $s1, 4($sp) -@@ -307,11 +307,11 @@ chacha20_mips: - /* Write NONCE_0 back to right location in state */ - sw NONCE_0, 48(STATE) - --.Lchacha20_mips_end: -+.Lchacha_mips_end: - addiu $sp, STACK_SIZE - jr $ra - --.Lchacha20_mips_no_full_block_aligned: -+.Lchacha_mips_no_full_block_aligned: - /* Restore the offset on BYTES */ - addiu BYTES, CHACHA20_BLOCK_SIZE - -@@ -319,7 +319,7 @@ chacha20_mips: - andi $at, BYTES, MASK_U32 - - /* Load upper half of jump table addr */ -- lui T0, %hi(.Lchacha20_mips_jmptbl_aligned_0) -+ lui T0, %hi(.Lchacha_mips_jmptbl_aligned_0) - - /* Calculate lower half jump table offset */ - ins T0, $at, 1, 6 -@@ -328,7 +328,7 @@ chacha20_mips: - addu T1, STATE, $at - - /* Add lower half jump table addr */ -- addiu T0, %lo(.Lchacha20_mips_jmptbl_aligned_0) -+ addiu T0, %lo(.Lchacha_mips_jmptbl_aligned_0) - - /* Read value from STATE */ - lw SAVED_CA, 0(T1) -@@ -342,31 +342,31 @@ chacha20_mips: - FOR_EACH_WORD(JMPTBL_ALIGNED) - - --.Loop_chacha20_unaligned: -+.Loop_chacha_unaligned: - /* Set number rounds here to fill delayslot. */ -- li $at, 20 -+ lw $at, (STACK_SIZE+16)($sp) - - /* BYTES > 0, it has no full block. */ -- bltz BYTES, .Lchacha20_mips_no_full_block_unaligned -+ bltz BYTES, .Lchacha_mips_no_full_block_unaligned - - FOR_EACH_WORD_REV(STORE_UNALIGNED) - - /* BYTES > 0? Loop again. */ -- bgtz BYTES, .Loop_chacha20_rounds -+ bgtz BYTES, .Loop_chacha_rounds - - /* Write NONCE_0 back to right location in state */ - sw NONCE_0, 48(STATE) - - .set noreorder - /* Fall through to byte handling */ -- bgez BYTES, .Lchacha20_mips_xor_done --.Lchacha20_mips_xor_unaligned_0_b: --.Lchacha20_mips_xor_aligned_0_b: -+ bgez BYTES, .Lchacha_mips_xor_done -+.Lchacha_mips_xor_unaligned_0_b: -+.Lchacha_mips_xor_aligned_0_b: - /* Place this here to fill delay slot */ - addiu NONCE_0, 1 - .set reorder - --.Lchacha20_mips_xor_bytes: -+.Lchacha_mips_xor_bytes: - addu IN, $at - addu OUT, $at - /* First byte */ -@@ -376,22 +376,22 @@ chacha20_mips: - ROTR(SAVED_X) - xor T1, SAVED_X - sb T1, 0(OUT) -- beqz $at, .Lchacha20_mips_xor_done -+ beqz $at, .Lchacha_mips_xor_done - /* Second byte */ - lbu T1, 1(IN) - addiu $at, BYTES, 2 - ROTx SAVED_X, 8 - xor T1, SAVED_X - sb T1, 1(OUT) -- beqz $at, .Lchacha20_mips_xor_done -+ beqz $at, .Lchacha_mips_xor_done - /* Third byte */ - lbu T1, 2(IN) - ROTx SAVED_X, 8 - xor T1, SAVED_X - sb T1, 2(OUT) -- b .Lchacha20_mips_xor_done -+ b .Lchacha_mips_xor_done - --.Lchacha20_mips_no_full_block_unaligned: -+.Lchacha_mips_no_full_block_unaligned: - /* Restore the offset on BYTES */ - addiu BYTES, CHACHA20_BLOCK_SIZE - -@@ -399,7 +399,7 @@ chacha20_mips: - andi $at, BYTES, MASK_U32 - - /* Load upper half of jump table addr */ -- lui T0, %hi(.Lchacha20_mips_jmptbl_unaligned_0) -+ lui T0, %hi(.Lchacha_mips_jmptbl_unaligned_0) - - /* Calculate lower half jump table offset */ - ins T0, $at, 1, 6 -@@ -408,7 +408,7 @@ chacha20_mips: - addu T1, STATE, $at - - /* Add lower half jump table addr */ -- addiu T0, %lo(.Lchacha20_mips_jmptbl_unaligned_0) -+ addiu T0, %lo(.Lchacha_mips_jmptbl_unaligned_0) - - /* Read value from STATE */ - lw SAVED_CA, 0(T1) -@@ -420,5 +420,78 @@ chacha20_mips: - - /* Jump table */ - FOR_EACH_WORD(JMPTBL_UNALIGNED) --.end chacha20_mips -+.end chacha_crypt_arch -+.set at -+ -+/* Input arguments -+ * STATE $a0 -+ * OUT $a1 -+ * NROUND $a2 -+ */ -+ -+#undef X12 -+#undef X13 -+#undef X14 -+#undef X15 -+ -+#define X12 $a3 -+#define X13 $at -+#define X14 $v0 -+#define X15 STATE -+ -+.set noat -+.globl hchacha_block_arch -+.ent hchacha_block_arch -+hchacha_block_arch: -+ .frame $sp, STACK_SIZE, $ra -+ -+ addiu $sp, -STACK_SIZE -+ -+ /* Save X11(s6) */ -+ sw X11, 0($sp) -+ -+ lw X0, 0(STATE) -+ lw X1, 4(STATE) -+ lw X2, 8(STATE) -+ lw X3, 12(STATE) -+ lw X4, 16(STATE) -+ lw X5, 20(STATE) -+ lw X6, 24(STATE) -+ lw X7, 28(STATE) -+ lw X8, 32(STATE) -+ lw X9, 36(STATE) -+ lw X10, 40(STATE) -+ lw X11, 44(STATE) -+ lw X12, 48(STATE) -+ lw X13, 52(STATE) -+ lw X14, 56(STATE) -+ lw X15, 60(STATE) -+ -+.Loop_hchacha_xor_rounds: -+ addiu $a2, -2 -+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 16); -+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 12); -+ AXR( 0, 1, 2, 3, 4, 5, 6, 7, 12,13,14,15, 8); -+ AXR( 8, 9,10,11, 12,13,14,15, 4, 5, 6, 7, 7); -+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 16); -+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 12); -+ AXR( 0, 1, 2, 3, 5, 6, 7, 4, 15,12,13,14, 8); -+ AXR(10,11, 8, 9, 15,12,13,14, 5, 6, 7, 4, 7); -+ bnez $a2, .Loop_hchacha_xor_rounds -+ -+ /* Restore used register */ -+ lw X11, 0($sp) -+ -+ sw X0, 0(OUT) -+ sw X1, 4(OUT) -+ sw X2, 8(OUT) -+ sw X3, 12(OUT) -+ sw X12, 16(OUT) -+ sw X13, 20(OUT) -+ sw X14, 24(OUT) -+ sw X15, 28(OUT) -+ -+ addiu $sp, STACK_SIZE -+ jr $ra -+.end hchacha_block_arch - .set at ---- /dev/null -+++ b/arch/mips/crypto/chacha-glue.c -@@ -0,0 +1,150 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * MIPS accelerated ChaCha and XChaCha stream ciphers, -+ * including ChaCha20 (RFC7539) -+ * -+ * Copyright (C) 2019 Linaro, Ltd. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+asmlinkage void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, -+ unsigned int bytes, int nrounds); -+EXPORT_SYMBOL(chacha_crypt_arch); -+ -+asmlinkage void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds); -+EXPORT_SYMBOL(hchacha_block_arch); -+ -+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) -+{ -+ chacha_init_generic(state, key, iv); -+} -+EXPORT_SYMBOL(chacha_init_arch); -+ -+static int chacha_mips_stream_xor(struct skcipher_request *req, -+ const struct chacha_ctx *ctx, const u8 *iv) -+{ -+ struct skcipher_walk walk; -+ u32 state[16]; -+ int err; -+ -+ err = skcipher_walk_virt(&walk, req, false); -+ -+ chacha_init_generic(state, ctx->key, iv); -+ -+ while (walk.nbytes > 0) { -+ unsigned int nbytes = walk.nbytes; -+ -+ if (nbytes < walk.total) -+ nbytes = round_down(nbytes, walk.stride); -+ -+ chacha_crypt(state, walk.dst.virt.addr, walk.src.virt.addr, -+ nbytes, ctx->nrounds); -+ err = skcipher_walk_done(&walk, walk.nbytes - nbytes); -+ } -+ -+ return err; -+} -+ -+static int chacha_mips(struct skcipher_request *req) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -+ -+ return chacha_mips_stream_xor(req, ctx, req->iv); -+} -+ -+static int xchacha_mips(struct skcipher_request *req) -+{ -+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -+ struct chacha_ctx subctx; -+ u32 state[16]; -+ u8 real_iv[16]; -+ -+ chacha_init_generic(state, ctx->key, req->iv); -+ -+ hchacha_block(state, subctx.key, ctx->nrounds); -+ subctx.nrounds = ctx->nrounds; -+ -+ memcpy(&real_iv[0], req->iv + 24, 8); -+ memcpy(&real_iv[8], req->iv + 16, 8); -+ return chacha_mips_stream_xor(req, &subctx, real_iv); -+} -+ -+static struct skcipher_alg algs[] = { -+ { -+ .base.cra_name = "chacha20", -+ .base.cra_driver_name = "chacha20-mips", -+ .base.cra_priority = 200, -+ .base.cra_blocksize = 1, -+ .base.cra_ctxsize = sizeof(struct chacha_ctx), -+ .base.cra_module = THIS_MODULE, -+ -+ .min_keysize = CHACHA_KEY_SIZE, -+ .max_keysize = CHACHA_KEY_SIZE, -+ .ivsize = CHACHA_IV_SIZE, -+ .chunksize = CHACHA_BLOCK_SIZE, -+ .setkey = chacha20_setkey, -+ .encrypt = chacha_mips, -+ .decrypt = chacha_mips, -+ }, { -+ .base.cra_name = "xchacha20", -+ .base.cra_driver_name = "xchacha20-mips", -+ .base.cra_priority = 200, -+ .base.cra_blocksize = 1, -+ .base.cra_ctxsize = sizeof(struct chacha_ctx), -+ .base.cra_module = THIS_MODULE, -+ -+ .min_keysize = CHACHA_KEY_SIZE, -+ .max_keysize = CHACHA_KEY_SIZE, -+ .ivsize = XCHACHA_IV_SIZE, -+ .chunksize = CHACHA_BLOCK_SIZE, -+ .setkey = chacha20_setkey, -+ .encrypt = xchacha_mips, -+ .decrypt = xchacha_mips, -+ }, { -+ .base.cra_name = "xchacha12", -+ .base.cra_driver_name = "xchacha12-mips", -+ .base.cra_priority = 200, -+ .base.cra_blocksize = 1, -+ .base.cra_ctxsize = sizeof(struct chacha_ctx), -+ .base.cra_module = THIS_MODULE, -+ -+ .min_keysize = CHACHA_KEY_SIZE, -+ .max_keysize = CHACHA_KEY_SIZE, -+ .ivsize = XCHACHA_IV_SIZE, -+ .chunksize = CHACHA_BLOCK_SIZE, -+ .setkey = chacha12_setkey, -+ .encrypt = xchacha_mips, -+ .decrypt = xchacha_mips, -+ } -+}; -+ -+static int __init chacha_simd_mod_init(void) -+{ -+ return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); -+} -+ -+static void __exit chacha_simd_mod_fini(void) -+{ -+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -+} -+ -+module_init(chacha_simd_mod_init); -+module_exit(chacha_simd_mod_fini); -+ -+MODULE_DESCRIPTION("ChaCha and XChaCha stream ciphers (MIPS accelerated)"); -+MODULE_AUTHOR("Ard Biesheuvel "); -+MODULE_LICENSE("GPL v2"); -+MODULE_ALIAS_CRYPTO("chacha20"); -+MODULE_ALIAS_CRYPTO("chacha20-mips"); -+MODULE_ALIAS_CRYPTO("xchacha20"); -+MODULE_ALIAS_CRYPTO("xchacha20-mips"); -+MODULE_ALIAS_CRYPTO("xchacha12"); -+MODULE_ALIAS_CRYPTO("xchacha12-mips"); ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -1423,6 +1423,12 @@ config CRYPTO_CHACHA20_X86_64 - SSSE3, AVX2, and AVX-512VL optimized implementations of the ChaCha20, - XChaCha20, and XChaCha12 stream ciphers. - -+config CRYPTO_CHACHA_MIPS -+ tristate "ChaCha stream cipher algorithms (MIPS 32r2 optimized)" -+ depends on CPU_MIPS32_R2 -+ select CRYPTO_BLKCIPHER -+ select CRYPTO_ARCH_HAVE_LIB_CHACHA -+ - config CRYPTO_SEED - tristate "SEED cipher algorithm" - select CRYPTO_ALGAPI diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0012-crypto-chacha-unexport-chacha_generic-routines.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0012-crypto-chacha-unexport-chacha_generic-routines.patch deleted file mode 100644 index d06f47a10..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0012-crypto-chacha-unexport-chacha_generic-routines.patch +++ /dev/null @@ -1,115 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:18 +0100 -Subject: [PATCH] crypto: chacha - unexport chacha_generic routines - -commit 22cf705360707ced15f9fe5423938f313c7df536 upstream. - -Now that all users of generic ChaCha code have moved to the core library, -there is no longer a need for the generic ChaCha skcpiher driver to -export parts of it implementation for reuse by other drivers. So drop -the exports, and make the symbols static. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - crypto/chacha_generic.c | 26 ++++++++------------------ - include/crypto/internal/chacha.h | 10 ---------- - 2 files changed, 8 insertions(+), 28 deletions(-) - ---- a/crypto/chacha_generic.c -+++ b/crypto/chacha_generic.c -@@ -21,7 +21,7 @@ static int chacha_stream_xor(struct skci - - err = skcipher_walk_virt(&walk, req, false); - -- crypto_chacha_init(state, ctx, iv); -+ chacha_init_generic(state, ctx->key, iv); - - while (walk.nbytes > 0) { - unsigned int nbytes = walk.nbytes; -@@ -37,36 +37,27 @@ static int chacha_stream_xor(struct skci - return err; - } - --void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv) --{ -- chacha_init_generic(state, ctx->key, iv); --} --EXPORT_SYMBOL_GPL(crypto_chacha_init); -- --int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, -- unsigned int keysize) -+static int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, -+ unsigned int keysize) - { - return chacha_setkey(tfm, key, keysize, 20); - } --EXPORT_SYMBOL_GPL(crypto_chacha20_setkey); - --int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, -- unsigned int keysize) -+static int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, -+ unsigned int keysize) - { - return chacha_setkey(tfm, key, keysize, 12); - } --EXPORT_SYMBOL_GPL(crypto_chacha12_setkey); - --int crypto_chacha_crypt(struct skcipher_request *req) -+static int crypto_chacha_crypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); - - return chacha_stream_xor(req, ctx, req->iv); - } --EXPORT_SYMBOL_GPL(crypto_chacha_crypt); - --int crypto_xchacha_crypt(struct skcipher_request *req) -+static int crypto_xchacha_crypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -@@ -75,7 +66,7 @@ int crypto_xchacha_crypt(struct skcipher - u8 real_iv[16]; - - /* Compute the subkey given the original key and first 128 nonce bits */ -- crypto_chacha_init(state, ctx, req->iv); -+ chacha_init_generic(state, ctx->key, req->iv); - hchacha_block_generic(state, subctx.key, ctx->nrounds); - subctx.nrounds = ctx->nrounds; - -@@ -86,7 +77,6 @@ int crypto_xchacha_crypt(struct skcipher - /* Generate the stream and XOR it with the data */ - return chacha_stream_xor(req, &subctx, real_iv); - } --EXPORT_SYMBOL_GPL(crypto_xchacha_crypt); - - static struct skcipher_alg algs[] = { - { ---- a/include/crypto/internal/chacha.h -+++ b/include/crypto/internal/chacha.h -@@ -12,8 +12,6 @@ struct chacha_ctx { - int nrounds; - }; - --void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv); -- - static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize, int nrounds) - { -@@ -42,12 +40,4 @@ static int inline chacha12_setkey(struct - return chacha_setkey(tfm, key, keysize, 12); - } - --int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, -- unsigned int keysize); --int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, -- unsigned int keysize); -- --int crypto_chacha_crypt(struct skcipher_request *req); --int crypto_xchacha_crypt(struct skcipher_request *req); -- - #endif /* _CRYPTO_CHACHA_H */ diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0013-crypto-poly1305-move-core-routines-into-a-separate-l.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0013-crypto-poly1305-move-core-routines-into-a-separate-l.patch deleted file mode 100644 index 960300d2a..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0013-crypto-poly1305-move-core-routines-into-a-separate-l.patch +++ /dev/null @@ -1,649 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:19 +0100 -Subject: [PATCH] crypto: poly1305 - move core routines into a separate library - -commit 48ea8c6ebc96bc0990e12ee1c43d0832c23576bb upstream. - -Move the core Poly1305 routines shared between the generic Poly1305 -shash driver and the Adiantum and NHPoly1305 drivers into a separate -library so that using just this pieces does not pull in the crypto -API pieces of the generic Poly1305 routine. - -In a subsequent patch, we will augment this generic library with -init/update/final routines so that Poyl1305 algorithm can be used -directly without the need for using the crypto API's shash abstraction. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/poly1305_glue.c | 2 +- - crypto/Kconfig | 5 +- - crypto/adiantum.c | 5 +- - crypto/nhpoly1305.c | 3 +- - crypto/poly1305_generic.c | 195 ++--------------------------- - include/crypto/internal/poly1305.h | 67 ++++++++++ - include/crypto/poly1305.h | 23 ---- - lib/crypto/Kconfig | 3 + - lib/crypto/Makefile | 3 + - lib/crypto/poly1305.c | 158 +++++++++++++++++++++++ - 10 files changed, 248 insertions(+), 216 deletions(-) - create mode 100644 include/crypto/internal/poly1305.h - create mode 100644 lib/crypto/poly1305.c - ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -7,8 +7,8 @@ - - #include - #include -+#include - #include --#include - #include - #include - #include ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -446,7 +446,7 @@ config CRYPTO_KEYWRAP - config CRYPTO_NHPOLY1305 - tristate - select CRYPTO_HASH -- select CRYPTO_POLY1305 -+ select CRYPTO_LIB_POLY1305_GENERIC - - config CRYPTO_NHPOLY1305_SSE2 - tristate "NHPoly1305 hash function (x86_64 SSE2 implementation)" -@@ -467,7 +467,7 @@ config CRYPTO_NHPOLY1305_AVX2 - config CRYPTO_ADIANTUM - tristate "Adiantum support" - select CRYPTO_CHACHA20 -- select CRYPTO_POLY1305 -+ select CRYPTO_LIB_POLY1305_GENERIC - select CRYPTO_NHPOLY1305 - select CRYPTO_MANAGER - help -@@ -686,6 +686,7 @@ config CRYPTO_GHASH - config CRYPTO_POLY1305 - tristate "Poly1305 authenticator algorithm" - select CRYPTO_HASH -+ select CRYPTO_LIB_POLY1305_GENERIC - help - Poly1305 authenticator algorithm, RFC7539. - ---- a/crypto/adiantum.c -+++ b/crypto/adiantum.c -@@ -33,6 +33,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -242,11 +243,11 @@ static void adiantum_hash_header(struct - - BUILD_BUG_ON(sizeof(header) % POLY1305_BLOCK_SIZE != 0); - poly1305_core_blocks(&state, &tctx->header_hash_key, -- &header, sizeof(header) / POLY1305_BLOCK_SIZE); -+ &header, sizeof(header) / POLY1305_BLOCK_SIZE, 1); - - BUILD_BUG_ON(TWEAK_SIZE % POLY1305_BLOCK_SIZE != 0); - poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv, -- TWEAK_SIZE / POLY1305_BLOCK_SIZE); -+ TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1); - - poly1305_core_emit(&state, &rctx->header_hash); - } ---- a/crypto/nhpoly1305.c -+++ b/crypto/nhpoly1305.c -@@ -33,6 +33,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -78,7 +79,7 @@ static void process_nh_hash_value(struct - BUILD_BUG_ON(NH_HASH_BYTES % POLY1305_BLOCK_SIZE != 0); - - poly1305_core_blocks(&state->poly_state, &key->poly_key, state->nh_hash, -- NH_HASH_BYTES / POLY1305_BLOCK_SIZE); -+ NH_HASH_BYTES / POLY1305_BLOCK_SIZE, 1); - } - - /* ---- a/crypto/poly1305_generic.c -+++ b/crypto/poly1305_generic.c -@@ -13,27 +13,12 @@ - - #include - #include --#include -+#include - #include - #include - #include - #include - --static inline u64 mlt(u64 a, u64 b) --{ -- return a * b; --} -- --static inline u32 sr(u64 v, u_char n) --{ -- return v >> n; --} -- --static inline u32 and(u32 v, u32 mask) --{ -- return v & mask; --} -- - int crypto_poly1305_init(struct shash_desc *desc) - { - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -@@ -47,124 +32,8 @@ int crypto_poly1305_init(struct shash_de - } - EXPORT_SYMBOL_GPL(crypto_poly1305_init); - --void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key) --{ -- /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ -- key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff; -- key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03; -- key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff; -- key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff; -- key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff; --} --EXPORT_SYMBOL_GPL(poly1305_core_setkey); -- --/* -- * Poly1305 requires a unique key for each tag, which implies that we can't set -- * it on the tfm that gets accessed by multiple users simultaneously. Instead we -- * expect the key as the first 32 bytes in the update() call. -- */ --unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, -- const u8 *src, unsigned int srclen) --{ -- if (!dctx->sset) { -- if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { -- poly1305_core_setkey(&dctx->r, src); -- src += POLY1305_BLOCK_SIZE; -- srclen -= POLY1305_BLOCK_SIZE; -- dctx->rset = true; -- } -- if (srclen >= POLY1305_BLOCK_SIZE) { -- dctx->s[0] = get_unaligned_le32(src + 0); -- dctx->s[1] = get_unaligned_le32(src + 4); -- dctx->s[2] = get_unaligned_le32(src + 8); -- dctx->s[3] = get_unaligned_le32(src + 12); -- src += POLY1305_BLOCK_SIZE; -- srclen -= POLY1305_BLOCK_SIZE; -- dctx->sset = true; -- } -- } -- return srclen; --} --EXPORT_SYMBOL_GPL(crypto_poly1305_setdesckey); -- --static void poly1305_blocks_internal(struct poly1305_state *state, -- const struct poly1305_key *key, -- const void *src, unsigned int nblocks, -- u32 hibit) --{ -- u32 r0, r1, r2, r3, r4; -- u32 s1, s2, s3, s4; -- u32 h0, h1, h2, h3, h4; -- u64 d0, d1, d2, d3, d4; -- -- if (!nblocks) -- return; -- -- r0 = key->r[0]; -- r1 = key->r[1]; -- r2 = key->r[2]; -- r3 = key->r[3]; -- r4 = key->r[4]; -- -- s1 = r1 * 5; -- s2 = r2 * 5; -- s3 = r3 * 5; -- s4 = r4 * 5; -- -- h0 = state->h[0]; -- h1 = state->h[1]; -- h2 = state->h[2]; -- h3 = state->h[3]; -- h4 = state->h[4]; -- -- do { -- /* h += m[i] */ -- h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff; -- h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff; -- h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff; -- h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff; -- h4 += (get_unaligned_le32(src + 12) >> 8) | hibit; -- -- /* h *= r */ -- d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + -- mlt(h3, s2) + mlt(h4, s1); -- d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) + -- mlt(h3, s3) + mlt(h4, s2); -- d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) + -- mlt(h3, s4) + mlt(h4, s3); -- d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) + -- mlt(h3, r0) + mlt(h4, s4); -- d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) + -- mlt(h3, r1) + mlt(h4, r0); -- -- /* (partial) h %= p */ -- d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff); -- d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff); -- d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff); -- d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff); -- h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff); -- h1 += h0 >> 26; h0 = h0 & 0x3ffffff; -- -- src += POLY1305_BLOCK_SIZE; -- } while (--nblocks); -- -- state->h[0] = h0; -- state->h[1] = h1; -- state->h[2] = h2; -- state->h[3] = h3; -- state->h[4] = h4; --} -- --void poly1305_core_blocks(struct poly1305_state *state, -- const struct poly1305_key *key, -- const void *src, unsigned int nblocks) --{ -- poly1305_blocks_internal(state, key, src, nblocks, 1 << 24); --} --EXPORT_SYMBOL_GPL(poly1305_core_blocks); -- --static void poly1305_blocks(struct poly1305_desc_ctx *dctx, -- const u8 *src, unsigned int srclen, u32 hibit) -+static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, -+ unsigned int srclen) - { - unsigned int datalen; - -@@ -174,8 +43,8 @@ static void poly1305_blocks(struct poly1 - srclen = datalen; - } - -- poly1305_blocks_internal(&dctx->h, &dctx->r, -- src, srclen / POLY1305_BLOCK_SIZE, hibit); -+ poly1305_core_blocks(&dctx->h, &dctx->r, src, -+ srclen / POLY1305_BLOCK_SIZE, 1); - } - - int crypto_poly1305_update(struct shash_desc *desc, -@@ -193,13 +62,13 @@ int crypto_poly1305_update(struct shash_ - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { - poly1305_blocks(dctx, dctx->buf, -- POLY1305_BLOCK_SIZE, 1 << 24); -+ POLY1305_BLOCK_SIZE); - dctx->buflen = 0; - } - } - - if (likely(srclen >= POLY1305_BLOCK_SIZE)) { -- poly1305_blocks(dctx, src, srclen, 1 << 24); -+ poly1305_blocks(dctx, src, srclen); - src += srclen - (srclen % POLY1305_BLOCK_SIZE); - srclen %= POLY1305_BLOCK_SIZE; - } -@@ -213,54 +82,6 @@ int crypto_poly1305_update(struct shash_ - } - EXPORT_SYMBOL_GPL(crypto_poly1305_update); - --void poly1305_core_emit(const struct poly1305_state *state, void *dst) --{ -- u32 h0, h1, h2, h3, h4; -- u32 g0, g1, g2, g3, g4; -- u32 mask; -- -- /* fully carry h */ -- h0 = state->h[0]; -- h1 = state->h[1]; -- h2 = state->h[2]; -- h3 = state->h[3]; -- h4 = state->h[4]; -- -- h2 += (h1 >> 26); h1 = h1 & 0x3ffffff; -- h3 += (h2 >> 26); h2 = h2 & 0x3ffffff; -- h4 += (h3 >> 26); h3 = h3 & 0x3ffffff; -- h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff; -- h1 += (h0 >> 26); h0 = h0 & 0x3ffffff; -- -- /* compute h + -p */ -- g0 = h0 + 5; -- g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff; -- g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff; -- g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff; -- g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff; -- -- /* select h if h < p, or h + -p if h >= p */ -- mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; -- g0 &= mask; -- g1 &= mask; -- g2 &= mask; -- g3 &= mask; -- g4 &= mask; -- mask = ~mask; -- h0 = (h0 & mask) | g0; -- h1 = (h1 & mask) | g1; -- h2 = (h2 & mask) | g2; -- h3 = (h3 & mask) | g3; -- h4 = (h4 & mask) | g4; -- -- /* h = h % (2^128) */ -- put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0); -- put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4); -- put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8); -- put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12); --} --EXPORT_SYMBOL_GPL(poly1305_core_emit); -- - int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) - { - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -@@ -274,7 +95,7 @@ int crypto_poly1305_final(struct shash_d - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, - POLY1305_BLOCK_SIZE - dctx->buflen); -- poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 0); -+ poly1305_core_blocks(&dctx->h, &dctx->r, dctx->buf, 1, 0); - } - - poly1305_core_emit(&dctx->h, digest); ---- /dev/null -+++ b/include/crypto/internal/poly1305.h -@@ -0,0 +1,67 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Common values for the Poly1305 algorithm -+ */ -+ -+#ifndef _CRYPTO_INTERNAL_POLY1305_H -+#define _CRYPTO_INTERNAL_POLY1305_H -+ -+#include -+#include -+#include -+ -+struct shash_desc; -+ -+/* -+ * Poly1305 core functions. These implement the ε-almost-∆-universal hash -+ * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce -+ * ("s key") at the end. They also only support block-aligned inputs. -+ */ -+void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key); -+static inline void poly1305_core_init(struct poly1305_state *state) -+{ -+ *state = (struct poly1305_state){}; -+} -+ -+void poly1305_core_blocks(struct poly1305_state *state, -+ const struct poly1305_key *key, const void *src, -+ unsigned int nblocks, u32 hibit); -+void poly1305_core_emit(const struct poly1305_state *state, void *dst); -+ -+/* Crypto API helper functions for the Poly1305 MAC */ -+int crypto_poly1305_init(struct shash_desc *desc); -+ -+int crypto_poly1305_update(struct shash_desc *desc, -+ const u8 *src, unsigned int srclen); -+int crypto_poly1305_final(struct shash_desc *desc, u8 *dst); -+ -+/* -+ * Poly1305 requires a unique key for each tag, which implies that we can't set -+ * it on the tfm that gets accessed by multiple users simultaneously. Instead we -+ * expect the key as the first 32 bytes in the update() call. -+ */ -+static inline -+unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, -+ const u8 *src, unsigned int srclen) -+{ -+ if (!dctx->sset) { -+ if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { -+ poly1305_core_setkey(&dctx->r, src); -+ src += POLY1305_BLOCK_SIZE; -+ srclen -= POLY1305_BLOCK_SIZE; -+ dctx->rset = true; -+ } -+ if (srclen >= POLY1305_BLOCK_SIZE) { -+ dctx->s[0] = get_unaligned_le32(src + 0); -+ dctx->s[1] = get_unaligned_le32(src + 4); -+ dctx->s[2] = get_unaligned_le32(src + 8); -+ dctx->s[3] = get_unaligned_le32(src + 12); -+ src += POLY1305_BLOCK_SIZE; -+ srclen -= POLY1305_BLOCK_SIZE; -+ dctx->sset = true; -+ } -+ } -+ return srclen; -+} -+ -+#endif ---- a/include/crypto/poly1305.h -+++ b/include/crypto/poly1305.h -@@ -38,27 +38,4 @@ struct poly1305_desc_ctx { - bool sset; - }; - --/* -- * Poly1305 core functions. These implement the ε-almost-∆-universal hash -- * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce -- * ("s key") at the end. They also only support block-aligned inputs. -- */ --void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key); --static inline void poly1305_core_init(struct poly1305_state *state) --{ -- memset(state->h, 0, sizeof(state->h)); --} --void poly1305_core_blocks(struct poly1305_state *state, -- const struct poly1305_key *key, -- const void *src, unsigned int nblocks); --void poly1305_core_emit(const struct poly1305_state *state, void *dst); -- --/* Crypto API helper functions for the Poly1305 MAC */ --int crypto_poly1305_init(struct shash_desc *desc); --unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, -- const u8 *src, unsigned int srclen); --int crypto_poly1305_update(struct shash_desc *desc, -- const u8 *src, unsigned int srclen); --int crypto_poly1305_final(struct shash_desc *desc, u8 *dst); -- - #endif ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -37,5 +37,8 @@ config CRYPTO_LIB_CHACHA - config CRYPTO_LIB_DES - tristate - -+config CRYPTO_LIB_POLY1305_GENERIC -+ tristate -+ - config CRYPTO_LIB_SHA256 - tristate ---- a/lib/crypto/Makefile -+++ b/lib/crypto/Makefile -@@ -13,5 +13,8 @@ libarc4-y := arc4.o - obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o - libdes-y := des.o - -+obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305.o -+libpoly1305-y := poly1305.o -+ - obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o - libsha256-y := sha256.o ---- /dev/null -+++ b/lib/crypto/poly1305.c -@@ -0,0 +1,158 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later -+/* -+ * Poly1305 authenticator algorithm, RFC7539 -+ * -+ * Copyright (C) 2015 Martin Willi -+ * -+ * Based on public domain code by Andrew Moon and Daniel J. Bernstein. -+ */ -+ -+#include -+#include -+#include -+#include -+ -+static inline u64 mlt(u64 a, u64 b) -+{ -+ return a * b; -+} -+ -+static inline u32 sr(u64 v, u_char n) -+{ -+ return v >> n; -+} -+ -+static inline u32 and(u32 v, u32 mask) -+{ -+ return v & mask; -+} -+ -+void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key) -+{ -+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ -+ key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff; -+ key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03; -+ key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff; -+ key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff; -+ key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff; -+} -+EXPORT_SYMBOL_GPL(poly1305_core_setkey); -+ -+void poly1305_core_blocks(struct poly1305_state *state, -+ const struct poly1305_key *key, const void *src, -+ unsigned int nblocks, u32 hibit) -+{ -+ u32 r0, r1, r2, r3, r4; -+ u32 s1, s2, s3, s4; -+ u32 h0, h1, h2, h3, h4; -+ u64 d0, d1, d2, d3, d4; -+ -+ if (!nblocks) -+ return; -+ -+ r0 = key->r[0]; -+ r1 = key->r[1]; -+ r2 = key->r[2]; -+ r3 = key->r[3]; -+ r4 = key->r[4]; -+ -+ s1 = r1 * 5; -+ s2 = r2 * 5; -+ s3 = r3 * 5; -+ s4 = r4 * 5; -+ -+ h0 = state->h[0]; -+ h1 = state->h[1]; -+ h2 = state->h[2]; -+ h3 = state->h[3]; -+ h4 = state->h[4]; -+ -+ do { -+ /* h += m[i] */ -+ h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff; -+ h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff; -+ h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff; -+ h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff; -+ h4 += (get_unaligned_le32(src + 12) >> 8) | (hibit << 24); -+ -+ /* h *= r */ -+ d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + -+ mlt(h3, s2) + mlt(h4, s1); -+ d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) + -+ mlt(h3, s3) + mlt(h4, s2); -+ d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) + -+ mlt(h3, s4) + mlt(h4, s3); -+ d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) + -+ mlt(h3, r0) + mlt(h4, s4); -+ d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) + -+ mlt(h3, r1) + mlt(h4, r0); -+ -+ /* (partial) h %= p */ -+ d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff); -+ d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff); -+ d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff); -+ d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff); -+ h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff); -+ h1 += h0 >> 26; h0 = h0 & 0x3ffffff; -+ -+ src += POLY1305_BLOCK_SIZE; -+ } while (--nblocks); -+ -+ state->h[0] = h0; -+ state->h[1] = h1; -+ state->h[2] = h2; -+ state->h[3] = h3; -+ state->h[4] = h4; -+} -+EXPORT_SYMBOL_GPL(poly1305_core_blocks); -+ -+void poly1305_core_emit(const struct poly1305_state *state, void *dst) -+{ -+ u32 h0, h1, h2, h3, h4; -+ u32 g0, g1, g2, g3, g4; -+ u32 mask; -+ -+ /* fully carry h */ -+ h0 = state->h[0]; -+ h1 = state->h[1]; -+ h2 = state->h[2]; -+ h3 = state->h[3]; -+ h4 = state->h[4]; -+ -+ h2 += (h1 >> 26); h1 = h1 & 0x3ffffff; -+ h3 += (h2 >> 26); h2 = h2 & 0x3ffffff; -+ h4 += (h3 >> 26); h3 = h3 & 0x3ffffff; -+ h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff; -+ h1 += (h0 >> 26); h0 = h0 & 0x3ffffff; -+ -+ /* compute h + -p */ -+ g0 = h0 + 5; -+ g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff; -+ g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff; -+ g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff; -+ g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff; -+ -+ /* select h if h < p, or h + -p if h >= p */ -+ mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; -+ g0 &= mask; -+ g1 &= mask; -+ g2 &= mask; -+ g3 &= mask; -+ g4 &= mask; -+ mask = ~mask; -+ h0 = (h0 & mask) | g0; -+ h1 = (h1 & mask) | g1; -+ h2 = (h2 & mask) | g2; -+ h3 = (h3 & mask) | g3; -+ h4 = (h4 & mask) | g4; -+ -+ /* h = h % (2^128) */ -+ put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0); -+ put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4); -+ put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8); -+ put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12); -+} -+EXPORT_SYMBOL_GPL(poly1305_core_emit); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Martin Willi "); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0014-crypto-x86-poly1305-unify-Poly1305-state-struct-with.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0014-crypto-x86-poly1305-unify-Poly1305-state-struct-with.patch deleted file mode 100644 index 7d237549b..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0014-crypto-x86-poly1305-unify-Poly1305-state-struct-with.patch +++ /dev/null @@ -1,251 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:20 +0100 -Subject: [PATCH] crypto: x86/poly1305 - unify Poly1305 state struct with - generic code - -commit ad8f5b88383ea685f2b8df2a12ee3e08089a1287 upstream. - -In preparation of exposing a Poly1305 library interface directly from -the accelerated x86 driver, align the state descriptor of the x86 code -with the one used by the generic driver. This is needed to make the -library interface unified between all implementations. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/poly1305_glue.c | 88 ++++++++++-------------------- - crypto/poly1305_generic.c | 6 +- - include/crypto/internal/poly1305.h | 4 +- - include/crypto/poly1305.h | 18 +++--- - 4 files changed, 43 insertions(+), 73 deletions(-) - ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -14,40 +14,14 @@ - #include - #include - --struct poly1305_simd_desc_ctx { -- struct poly1305_desc_ctx base; -- /* derived key u set? */ -- bool uset; --#ifdef CONFIG_AS_AVX2 -- /* derived keys r^3, r^4 set? */ -- bool wset; --#endif -- /* derived Poly1305 key r^2 */ -- u32 u[5]; -- /* ... silently appended r^3 and r^4 when using AVX2 */ --}; -- - asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src, - const u32 *r, unsigned int blocks); - asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r, - unsigned int blocks, const u32 *u); --#ifdef CONFIG_AS_AVX2 - asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r, - unsigned int blocks, const u32 *u); --static bool poly1305_use_avx2; --#endif - --static int poly1305_simd_init(struct shash_desc *desc) --{ -- struct poly1305_simd_desc_ctx *sctx = shash_desc_ctx(desc); -- -- sctx->uset = false; --#ifdef CONFIG_AS_AVX2 -- sctx->wset = false; --#endif -- -- return crypto_poly1305_init(desc); --} -+static bool poly1305_use_avx2 __ro_after_init; - - static void poly1305_simd_mult(u32 *a, const u32 *b) - { -@@ -63,53 +37,49 @@ static void poly1305_simd_mult(u32 *a, c - static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx, - const u8 *src, unsigned int srclen) - { -- struct poly1305_simd_desc_ctx *sctx; - unsigned int blocks, datalen; - -- BUILD_BUG_ON(offsetof(struct poly1305_simd_desc_ctx, base)); -- sctx = container_of(dctx, struct poly1305_simd_desc_ctx, base); -- - if (unlikely(!dctx->sset)) { - datalen = crypto_poly1305_setdesckey(dctx, src, srclen); - src += srclen - datalen; - srclen = datalen; - } - --#ifdef CONFIG_AS_AVX2 -- if (poly1305_use_avx2 && srclen >= POLY1305_BLOCK_SIZE * 4) { -- if (unlikely(!sctx->wset)) { -- if (!sctx->uset) { -- memcpy(sctx->u, dctx->r.r, sizeof(sctx->u)); -- poly1305_simd_mult(sctx->u, dctx->r.r); -- sctx->uset = true; -+ if (IS_ENABLED(CONFIG_AS_AVX2) && -+ poly1305_use_avx2 && -+ srclen >= POLY1305_BLOCK_SIZE * 4) { -+ if (unlikely(dctx->rset < 4)) { -+ if (dctx->rset < 2) { -+ dctx->r[1] = dctx->r[0]; -+ poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r); - } -- memcpy(sctx->u + 5, sctx->u, sizeof(sctx->u)); -- poly1305_simd_mult(sctx->u + 5, dctx->r.r); -- memcpy(sctx->u + 10, sctx->u + 5, sizeof(sctx->u)); -- poly1305_simd_mult(sctx->u + 10, dctx->r.r); -- sctx->wset = true; -+ dctx->r[2] = dctx->r[1]; -+ poly1305_simd_mult(dctx->r[2].r, dctx->r[0].r); -+ dctx->r[3] = dctx->r[2]; -+ poly1305_simd_mult(dctx->r[3].r, dctx->r[0].r); -+ dctx->rset = 4; - } - blocks = srclen / (POLY1305_BLOCK_SIZE * 4); -- poly1305_4block_avx2(dctx->h.h, src, dctx->r.r, blocks, -- sctx->u); -+ poly1305_4block_avx2(dctx->h.h, src, dctx->r[0].r, blocks, -+ dctx->r[1].r); - src += POLY1305_BLOCK_SIZE * 4 * blocks; - srclen -= POLY1305_BLOCK_SIZE * 4 * blocks; - } --#endif -+ - if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) { -- if (unlikely(!sctx->uset)) { -- memcpy(sctx->u, dctx->r.r, sizeof(sctx->u)); -- poly1305_simd_mult(sctx->u, dctx->r.r); -- sctx->uset = true; -+ if (unlikely(dctx->rset < 2)) { -+ dctx->r[1] = dctx->r[0]; -+ poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r); -+ dctx->rset = 2; - } - blocks = srclen / (POLY1305_BLOCK_SIZE * 2); -- poly1305_2block_sse2(dctx->h.h, src, dctx->r.r, blocks, -- sctx->u); -+ poly1305_2block_sse2(dctx->h.h, src, dctx->r[0].r, -+ blocks, dctx->r[1].r); - src += POLY1305_BLOCK_SIZE * 2 * blocks; - srclen -= POLY1305_BLOCK_SIZE * 2 * blocks; - } - if (srclen >= POLY1305_BLOCK_SIZE) { -- poly1305_block_sse2(dctx->h.h, src, dctx->r.r, 1); -+ poly1305_block_sse2(dctx->h.h, src, dctx->r[0].r, 1); - srclen -= POLY1305_BLOCK_SIZE; - } - return srclen; -@@ -159,10 +129,10 @@ static int poly1305_simd_update(struct s - - static struct shash_alg alg = { - .digestsize = POLY1305_DIGEST_SIZE, -- .init = poly1305_simd_init, -+ .init = crypto_poly1305_init, - .update = poly1305_simd_update, - .final = crypto_poly1305_final, -- .descsize = sizeof(struct poly1305_simd_desc_ctx), -+ .descsize = sizeof(struct poly1305_desc_ctx), - .base = { - .cra_name = "poly1305", - .cra_driver_name = "poly1305-simd", -@@ -177,14 +147,14 @@ static int __init poly1305_simd_mod_init - if (!boot_cpu_has(X86_FEATURE_XMM2)) - return -ENODEV; - --#ifdef CONFIG_AS_AVX2 -- poly1305_use_avx2 = boot_cpu_has(X86_FEATURE_AVX) && -+ poly1305_use_avx2 = IS_ENABLED(CONFIG_AS_AVX2) && -+ boot_cpu_has(X86_FEATURE_AVX) && - boot_cpu_has(X86_FEATURE_AVX2) && - cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); -- alg.descsize = sizeof(struct poly1305_simd_desc_ctx); -+ alg.descsize = sizeof(struct poly1305_desc_ctx) + 5 * sizeof(u32); - if (poly1305_use_avx2) - alg.descsize += 10 * sizeof(u32); --#endif -+ - return crypto_register_shash(&alg); - } - ---- a/crypto/poly1305_generic.c -+++ b/crypto/poly1305_generic.c -@@ -25,7 +25,7 @@ int crypto_poly1305_init(struct shash_de - - poly1305_core_init(&dctx->h); - dctx->buflen = 0; -- dctx->rset = false; -+ dctx->rset = 0; - dctx->sset = false; - - return 0; -@@ -43,7 +43,7 @@ static void poly1305_blocks(struct poly1 - srclen = datalen; - } - -- poly1305_core_blocks(&dctx->h, &dctx->r, src, -+ poly1305_core_blocks(&dctx->h, dctx->r, src, - srclen / POLY1305_BLOCK_SIZE, 1); - } - -@@ -95,7 +95,7 @@ int crypto_poly1305_final(struct shash_d - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, - POLY1305_BLOCK_SIZE - dctx->buflen); -- poly1305_core_blocks(&dctx->h, &dctx->r, dctx->buf, 1, 0); -+ poly1305_core_blocks(&dctx->h, dctx->r, dctx->buf, 1, 0); - } - - poly1305_core_emit(&dctx->h, digest); ---- a/include/crypto/internal/poly1305.h -+++ b/include/crypto/internal/poly1305.h -@@ -46,10 +46,10 @@ unsigned int crypto_poly1305_setdesckey( - { - if (!dctx->sset) { - if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { -- poly1305_core_setkey(&dctx->r, src); -+ poly1305_core_setkey(dctx->r, src); - src += POLY1305_BLOCK_SIZE; - srclen -= POLY1305_BLOCK_SIZE; -- dctx->rset = true; -+ dctx->rset = 1; - } - if (srclen >= POLY1305_BLOCK_SIZE) { - dctx->s[0] = get_unaligned_le32(src + 0); ---- a/include/crypto/poly1305.h -+++ b/include/crypto/poly1305.h -@@ -22,20 +22,20 @@ struct poly1305_state { - }; - - struct poly1305_desc_ctx { -- /* key */ -- struct poly1305_key r; -- /* finalize key */ -- u32 s[4]; -- /* accumulator */ -- struct poly1305_state h; - /* partial buffer */ - u8 buf[POLY1305_BLOCK_SIZE]; - /* bytes used in partial buffer */ - unsigned int buflen; -- /* r key has been set */ -- bool rset; -- /* s key has been set */ -+ /* how many keys have been set in r[] */ -+ unsigned short rset; -+ /* whether s[] has been set */ - bool sset; -+ /* finalize key */ -+ u32 s[4]; -+ /* accumulator */ -+ struct poly1305_state h; -+ /* key */ -+ struct poly1305_key r[1]; - }; - - #endif diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0015-crypto-poly1305-expose-init-update-final-library-int.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0015-crypto-poly1305-expose-init-update-final-library-int.patch deleted file mode 100644 index bf8e90bf0..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0015-crypto-poly1305-expose-init-update-final-library-int.patch +++ /dev/null @@ -1,224 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:21 +0100 -Subject: [PATCH] crypto: poly1305 - expose init/update/final library interface - -commit a1d93064094cc5e24d64e35cf093e7191d0c9344 upstream. - -Expose the existing generic Poly1305 code via a init/update/final -library interface so that callers are not required to go through -the crypto API's shash abstraction to access it. At the same time, -make some preparations so that the library implementation can be -superseded by an accelerated arch-specific version in the future. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - crypto/poly1305_generic.c | 22 +----------- - include/crypto/poly1305.h | 38 +++++++++++++++++++- - lib/crypto/Kconfig | 26 ++++++++++++++ - lib/crypto/poly1305.c | 74 +++++++++++++++++++++++++++++++++++++++ - 4 files changed, 138 insertions(+), 22 deletions(-) - ---- a/crypto/poly1305_generic.c -+++ b/crypto/poly1305_generic.c -@@ -85,31 +85,11 @@ EXPORT_SYMBOL_GPL(crypto_poly1305_update - int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) - { - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -- __le32 digest[4]; -- u64 f = 0; - - if (unlikely(!dctx->sset)) - return -ENOKEY; - -- if (unlikely(dctx->buflen)) { -- dctx->buf[dctx->buflen++] = 1; -- memset(dctx->buf + dctx->buflen, 0, -- POLY1305_BLOCK_SIZE - dctx->buflen); -- poly1305_core_blocks(&dctx->h, dctx->r, dctx->buf, 1, 0); -- } -- -- poly1305_core_emit(&dctx->h, digest); -- -- /* mac = (h + s) % (2^128) */ -- f = (f >> 32) + le32_to_cpu(digest[0]) + dctx->s[0]; -- put_unaligned_le32(f, dst + 0); -- f = (f >> 32) + le32_to_cpu(digest[1]) + dctx->s[1]; -- put_unaligned_le32(f, dst + 4); -- f = (f >> 32) + le32_to_cpu(digest[2]) + dctx->s[2]; -- put_unaligned_le32(f, dst + 8); -- f = (f >> 32) + le32_to_cpu(digest[3]) + dctx->s[3]; -- put_unaligned_le32(f, dst + 12); -- -+ poly1305_final_generic(dctx, dst); - return 0; - } - EXPORT_SYMBOL_GPL(crypto_poly1305_final); ---- a/include/crypto/poly1305.h -+++ b/include/crypto/poly1305.h -@@ -35,7 +35,43 @@ struct poly1305_desc_ctx { - /* accumulator */ - struct poly1305_state h; - /* key */ -- struct poly1305_key r[1]; -+ struct poly1305_key r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE]; - }; - -+void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key); -+void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key); -+ -+static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key) -+{ -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) -+ poly1305_init_arch(desc, key); -+ else -+ poly1305_init_generic(desc, key); -+} -+ -+void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src, -+ unsigned int nbytes); -+void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src, -+ unsigned int nbytes); -+ -+static inline void poly1305_update(struct poly1305_desc_ctx *desc, -+ const u8 *src, unsigned int nbytes) -+{ -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) -+ poly1305_update_arch(desc, src, nbytes); -+ else -+ poly1305_update_generic(desc, src, nbytes); -+} -+ -+void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest); -+void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest); -+ -+static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest) -+{ -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) -+ poly1305_final_arch(desc, digest); -+ else -+ poly1305_final_generic(desc, digest); -+} -+ - #endif ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -37,8 +37,34 @@ config CRYPTO_LIB_CHACHA - config CRYPTO_LIB_DES - tristate - -+config CRYPTO_LIB_POLY1305_RSIZE -+ int -+ default 1 -+ -+config CRYPTO_ARCH_HAVE_LIB_POLY1305 -+ tristate -+ help -+ Declares whether the architecture provides an arch-specific -+ accelerated implementation of the Poly1305 library interface, -+ either builtin or as a module. -+ - config CRYPTO_LIB_POLY1305_GENERIC - tristate -+ help -+ This symbol can be depended upon by arch implementations of the -+ Poly1305 library interface that require the generic code as a -+ fallback, e.g., for SIMD implementations. If no arch specific -+ implementation is enabled, this implementation serves the users -+ of CRYPTO_LIB_POLY1305. -+ -+config CRYPTO_LIB_POLY1305 -+ tristate "Poly1305 library interface" -+ depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 -+ select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n -+ help -+ Enable the Poly1305 library interface. This interface may be fulfilled -+ by either the generic implementation or an arch-specific one, if one -+ is available and enabled. - - config CRYPTO_LIB_SHA256 - tristate ---- a/lib/crypto/poly1305.c -+++ b/lib/crypto/poly1305.c -@@ -154,5 +154,79 @@ void poly1305_core_emit(const struct pol - } - EXPORT_SYMBOL_GPL(poly1305_core_emit); - -+void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key) -+{ -+ poly1305_core_setkey(desc->r, key); -+ desc->s[0] = get_unaligned_le32(key + 16); -+ desc->s[1] = get_unaligned_le32(key + 20); -+ desc->s[2] = get_unaligned_le32(key + 24); -+ desc->s[3] = get_unaligned_le32(key + 28); -+ poly1305_core_init(&desc->h); -+ desc->buflen = 0; -+ desc->sset = true; -+ desc->rset = 1; -+} -+EXPORT_SYMBOL_GPL(poly1305_init_generic); -+ -+void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src, -+ unsigned int nbytes) -+{ -+ unsigned int bytes; -+ -+ if (unlikely(desc->buflen)) { -+ bytes = min(nbytes, POLY1305_BLOCK_SIZE - desc->buflen); -+ memcpy(desc->buf + desc->buflen, src, bytes); -+ src += bytes; -+ nbytes -= bytes; -+ desc->buflen += bytes; -+ -+ if (desc->buflen == POLY1305_BLOCK_SIZE) { -+ poly1305_core_blocks(&desc->h, desc->r, desc->buf, 1, 1); -+ desc->buflen = 0; -+ } -+ } -+ -+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { -+ poly1305_core_blocks(&desc->h, desc->r, src, -+ nbytes / POLY1305_BLOCK_SIZE, 1); -+ src += nbytes - (nbytes % POLY1305_BLOCK_SIZE); -+ nbytes %= POLY1305_BLOCK_SIZE; -+ } -+ -+ if (unlikely(nbytes)) { -+ desc->buflen = nbytes; -+ memcpy(desc->buf, src, nbytes); -+ } -+} -+EXPORT_SYMBOL_GPL(poly1305_update_generic); -+ -+void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst) -+{ -+ __le32 digest[4]; -+ u64 f = 0; -+ -+ if (unlikely(desc->buflen)) { -+ desc->buf[desc->buflen++] = 1; -+ memset(desc->buf + desc->buflen, 0, -+ POLY1305_BLOCK_SIZE - desc->buflen); -+ poly1305_core_blocks(&desc->h, desc->r, desc->buf, 1, 0); -+ } -+ -+ poly1305_core_emit(&desc->h, digest); -+ -+ /* mac = (h + s) % (2^128) */ -+ f = (f >> 32) + le32_to_cpu(digest[0]) + desc->s[0]; -+ put_unaligned_le32(f, dst + 0); -+ f = (f >> 32) + le32_to_cpu(digest[1]) + desc->s[1]; -+ put_unaligned_le32(f, dst + 4); -+ f = (f >> 32) + le32_to_cpu(digest[2]) + desc->s[2]; -+ put_unaligned_le32(f, dst + 8); -+ f = (f >> 32) + le32_to_cpu(digest[3]) + desc->s[3]; -+ put_unaligned_le32(f, dst + 12); -+ -+ *desc = (struct poly1305_desc_ctx){}; -+} -+EXPORT_SYMBOL_GPL(poly1305_final_generic); -+ - MODULE_LICENSE("GPL"); - MODULE_AUTHOR("Martin Willi "); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0016-crypto-x86-poly1305-depend-on-generic-library-not-ge.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0016-crypto-x86-poly1305-depend-on-generic-library-not-ge.patch deleted file mode 100644 index 8ea63f3b9..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0016-crypto-x86-poly1305-depend-on-generic-library-not-ge.patch +++ /dev/null @@ -1,217 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:22 +0100 -Subject: [PATCH] crypto: x86/poly1305 - depend on generic library not generic - shash - -commit 1b2c6a5120489d41c8ea3b8dacd0b4586289b158 upstream. - -Remove the dependency on the generic Poly1305 driver. Instead, depend -on the generic library so that we only reuse code without pulling in -the generic skcipher implementation as well. - -While at it, remove the logic that prefers the non-SIMD path for short -inputs - this is no longer necessary after recent FPU handling changes -on x86. - -Since this removes the last remaining user of the routines exported -by the generic shash driver, unexport them and make them static. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/poly1305_glue.c | 66 +++++++++++++++++++++++++----- - crypto/Kconfig | 2 +- - crypto/poly1305_generic.c | 11 ++--- - include/crypto/internal/poly1305.h | 9 ---- - 4 files changed, 60 insertions(+), 28 deletions(-) - ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -34,6 +34,24 @@ static void poly1305_simd_mult(u32 *a, c - poly1305_block_sse2(a, m, b, 1); - } - -+static unsigned int poly1305_scalar_blocks(struct poly1305_desc_ctx *dctx, -+ const u8 *src, unsigned int srclen) -+{ -+ unsigned int datalen; -+ -+ if (unlikely(!dctx->sset)) { -+ datalen = crypto_poly1305_setdesckey(dctx, src, srclen); -+ src += srclen - datalen; -+ srclen = datalen; -+ } -+ if (srclen >= POLY1305_BLOCK_SIZE) { -+ poly1305_core_blocks(&dctx->h, dctx->r, src, -+ srclen / POLY1305_BLOCK_SIZE, 1); -+ srclen %= POLY1305_BLOCK_SIZE; -+ } -+ return srclen; -+} -+ - static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx, - const u8 *src, unsigned int srclen) - { -@@ -91,12 +109,6 @@ static int poly1305_simd_update(struct s - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int bytes; - -- /* kernel_fpu_begin/end is costly, use fallback for small updates */ -- if (srclen <= 288 || !crypto_simd_usable()) -- return crypto_poly1305_update(desc, src, srclen); -- -- kernel_fpu_begin(); -- - if (unlikely(dctx->buflen)) { - bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); - memcpy(dctx->buf + dctx->buflen, src, bytes); -@@ -105,25 +117,57 @@ static int poly1305_simd_update(struct s - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { -- poly1305_simd_blocks(dctx, dctx->buf, -- POLY1305_BLOCK_SIZE); -+ if (likely(crypto_simd_usable())) { -+ kernel_fpu_begin(); -+ poly1305_simd_blocks(dctx, dctx->buf, -+ POLY1305_BLOCK_SIZE); -+ kernel_fpu_end(); -+ } else { -+ poly1305_scalar_blocks(dctx, dctx->buf, -+ POLY1305_BLOCK_SIZE); -+ } - dctx->buflen = 0; - } - } - - if (likely(srclen >= POLY1305_BLOCK_SIZE)) { -- bytes = poly1305_simd_blocks(dctx, src, srclen); -+ if (likely(crypto_simd_usable())) { -+ kernel_fpu_begin(); -+ bytes = poly1305_simd_blocks(dctx, src, srclen); -+ kernel_fpu_end(); -+ } else { -+ bytes = poly1305_scalar_blocks(dctx, src, srclen); -+ } - src += srclen - bytes; - srclen = bytes; - } - -- kernel_fpu_end(); -- - if (unlikely(srclen)) { - dctx->buflen = srclen; - memcpy(dctx->buf, src, srclen); - } -+} -+ -+static int crypto_poly1305_init(struct shash_desc *desc) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ poly1305_core_init(&dctx->h); -+ dctx->buflen = 0; -+ dctx->rset = 0; -+ dctx->sset = false; -+ -+ return 0; -+} -+ -+static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ if (unlikely(!dctx->sset)) -+ return -ENOKEY; - -+ poly1305_final_generic(dctx, dst); - return 0; - } - ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -697,7 +697,7 @@ config CRYPTO_POLY1305 - config CRYPTO_POLY1305_X86_64 - tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)" - depends on X86 && 64BIT -- select CRYPTO_POLY1305 -+ select CRYPTO_LIB_POLY1305_GENERIC - help - Poly1305 authenticator algorithm, RFC7539. - ---- a/crypto/poly1305_generic.c -+++ b/crypto/poly1305_generic.c -@@ -19,7 +19,7 @@ - #include - #include - --int crypto_poly1305_init(struct shash_desc *desc) -+static int crypto_poly1305_init(struct shash_desc *desc) - { - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - -@@ -30,7 +30,6 @@ int crypto_poly1305_init(struct shash_de - - return 0; - } --EXPORT_SYMBOL_GPL(crypto_poly1305_init); - - static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int srclen) -@@ -47,8 +46,8 @@ static void poly1305_blocks(struct poly1 - srclen / POLY1305_BLOCK_SIZE, 1); - } - --int crypto_poly1305_update(struct shash_desc *desc, -- const u8 *src, unsigned int srclen) -+static int crypto_poly1305_update(struct shash_desc *desc, -+ const u8 *src, unsigned int srclen) - { - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int bytes; -@@ -80,9 +79,8 @@ int crypto_poly1305_update(struct shash_ - - return 0; - } --EXPORT_SYMBOL_GPL(crypto_poly1305_update); - --int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) -+static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) - { - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - -@@ -92,7 +90,6 @@ int crypto_poly1305_final(struct shash_d - poly1305_final_generic(dctx, dst); - return 0; - } --EXPORT_SYMBOL_GPL(crypto_poly1305_final); - - static struct shash_alg poly1305_alg = { - .digestsize = POLY1305_DIGEST_SIZE, ---- a/include/crypto/internal/poly1305.h -+++ b/include/crypto/internal/poly1305.h -@@ -10,8 +10,6 @@ - #include - #include - --struct shash_desc; -- - /* - * Poly1305 core functions. These implement the ε-almost-∆-universal hash - * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce -@@ -28,13 +26,6 @@ void poly1305_core_blocks(struct poly130 - unsigned int nblocks, u32 hibit); - void poly1305_core_emit(const struct poly1305_state *state, void *dst); - --/* Crypto API helper functions for the Poly1305 MAC */ --int crypto_poly1305_init(struct shash_desc *desc); -- --int crypto_poly1305_update(struct shash_desc *desc, -- const u8 *src, unsigned int srclen); --int crypto_poly1305_final(struct shash_desc *desc, u8 *dst); -- - /* - * Poly1305 requires a unique key for each tag, which implies that we can't set - * it on the tfm that gets accessed by multiple users simultaneously. Instead we diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0017-crypto-x86-poly1305-expose-existing-driver-as-poly13.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0017-crypto-x86-poly1305-expose-existing-driver-as-poly13.patch deleted file mode 100644 index 6514987b4..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0017-crypto-x86-poly1305-expose-existing-driver-as-poly13.patch +++ /dev/null @@ -1,163 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:23 +0100 -Subject: [PATCH] crypto: x86/poly1305 - expose existing driver as poly1305 - library - -commit f0e89bcfbb894e5844cd1bbf6b3cf7c63cb0f5ac upstream. - -Implement the arch init/update/final Poly1305 library routines in the -accelerated SIMD driver for x86 so they are accessible to users of -the Poly1305 library interface as well. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/poly1305_glue.c | 57 ++++++++++++++++++++++++--------- - crypto/Kconfig | 1 + - lib/crypto/Kconfig | 1 + - 3 files changed, 43 insertions(+), 16 deletions(-) - ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -21,7 +22,8 @@ asmlinkage void poly1305_2block_sse2(u32 - asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r, - unsigned int blocks, const u32 *u); - --static bool poly1305_use_avx2 __ro_after_init; -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_simd); -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2); - - static void poly1305_simd_mult(u32 *a, const u32 *b) - { -@@ -64,7 +66,7 @@ static unsigned int poly1305_simd_blocks - } - - if (IS_ENABLED(CONFIG_AS_AVX2) && -- poly1305_use_avx2 && -+ static_branch_likely(&poly1305_use_avx2) && - srclen >= POLY1305_BLOCK_SIZE * 4) { - if (unlikely(dctx->rset < 4)) { - if (dctx->rset < 2) { -@@ -103,10 +105,15 @@ static unsigned int poly1305_simd_blocks - return srclen; - } - --static int poly1305_simd_update(struct shash_desc *desc, -- const u8 *src, unsigned int srclen) -+void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key) -+{ -+ poly1305_init_generic(desc, key); -+} -+EXPORT_SYMBOL(poly1305_init_arch); -+ -+void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, -+ unsigned int srclen) - { -- struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - unsigned int bytes; - - if (unlikely(dctx->buflen)) { -@@ -117,7 +124,8 @@ static int poly1305_simd_update(struct s - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { -- if (likely(crypto_simd_usable())) { -+ if (static_branch_likely(&poly1305_use_simd) && -+ likely(crypto_simd_usable())) { - kernel_fpu_begin(); - poly1305_simd_blocks(dctx, dctx->buf, - POLY1305_BLOCK_SIZE); -@@ -131,7 +139,8 @@ static int poly1305_simd_update(struct s - } - - if (likely(srclen >= POLY1305_BLOCK_SIZE)) { -- if (likely(crypto_simd_usable())) { -+ if (static_branch_likely(&poly1305_use_simd) && -+ likely(crypto_simd_usable())) { - kernel_fpu_begin(); - bytes = poly1305_simd_blocks(dctx, src, srclen); - kernel_fpu_end(); -@@ -147,6 +156,13 @@ static int poly1305_simd_update(struct s - memcpy(dctx->buf, src, srclen); - } - } -+EXPORT_SYMBOL(poly1305_update_arch); -+ -+void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest) -+{ -+ poly1305_final_generic(desc, digest); -+} -+EXPORT_SYMBOL(poly1305_final_arch); - - static int crypto_poly1305_init(struct shash_desc *desc) - { -@@ -171,6 +187,15 @@ static int crypto_poly1305_final(struct - return 0; - } - -+static int poly1305_simd_update(struct shash_desc *desc, -+ const u8 *src, unsigned int srclen) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ poly1305_update_arch(dctx, src, srclen); -+ return 0; -+} -+ - static struct shash_alg alg = { - .digestsize = POLY1305_DIGEST_SIZE, - .init = crypto_poly1305_init, -@@ -189,15 +214,15 @@ static struct shash_alg alg = { - static int __init poly1305_simd_mod_init(void) - { - if (!boot_cpu_has(X86_FEATURE_XMM2)) -- return -ENODEV; -+ return 0; - -- poly1305_use_avx2 = IS_ENABLED(CONFIG_AS_AVX2) && -- boot_cpu_has(X86_FEATURE_AVX) && -- boot_cpu_has(X86_FEATURE_AVX2) && -- cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL); -- alg.descsize = sizeof(struct poly1305_desc_ctx) + 5 * sizeof(u32); -- if (poly1305_use_avx2) -- alg.descsize += 10 * sizeof(u32); -+ static_branch_enable(&poly1305_use_simd); -+ -+ if (IS_ENABLED(CONFIG_AS_AVX2) && -+ boot_cpu_has(X86_FEATURE_AVX) && -+ boot_cpu_has(X86_FEATURE_AVX2) && -+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) -+ static_branch_enable(&poly1305_use_avx2); - - return crypto_register_shash(&alg); - } ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -698,6 +698,7 @@ config CRYPTO_POLY1305_X86_64 - tristate "Poly1305 authenticator algorithm (x86_64/SSE2/AVX2)" - depends on X86 && 64BIT - select CRYPTO_LIB_POLY1305_GENERIC -+ select CRYPTO_ARCH_HAVE_LIB_POLY1305 - help - Poly1305 authenticator algorithm, RFC7539. - ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -39,6 +39,7 @@ config CRYPTO_LIB_DES - - config CRYPTO_LIB_POLY1305_RSIZE - int -+ default 4 if X86_64 - default 1 - - config CRYPTO_ARCH_HAVE_LIB_POLY1305 diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0018-crypto-arm64-poly1305-incorporate-OpenSSL-CRYPTOGAMS.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0018-crypto-arm64-poly1305-incorporate-OpenSSL-CRYPTOGAMS.patch deleted file mode 100644 index 464c6568f..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0018-crypto-arm64-poly1305-incorporate-OpenSSL-CRYPTOGAMS.patch +++ /dev/null @@ -1,2083 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:24 +0100 -Subject: [PATCH] crypto: arm64/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON - implementation - -commit f569ca16475155013525686d0f73bc379c67e635 upstream. - -This is a straight import of the OpenSSL/CRYPTOGAMS Poly1305 implementation -for NEON authored by Andy Polyakov, and contributed by him to the OpenSSL -project. The file 'poly1305-armv8.pl' is taken straight from this upstream -GitHub repository [0] at commit ec55a08dc0244ce570c4fc7cade330c60798952f, -and already contains all the changes required to build it as part of a -Linux kernel module. - -[0] https://github.com/dot-asm/cryptogams - -Co-developed-by: Andy Polyakov -Signed-off-by: Andy Polyakov -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm64/crypto/Kconfig | 6 + - arch/arm64/crypto/Makefile | 10 +- - arch/arm64/crypto/poly1305-armv8.pl | 913 ++++++++++++++++++++++ - arch/arm64/crypto/poly1305-core.S_shipped | 835 ++++++++++++++++++++ - arch/arm64/crypto/poly1305-glue.c | 237 ++++++ - lib/crypto/Kconfig | 1 + - 6 files changed, 2001 insertions(+), 1 deletion(-) - create mode 100644 arch/arm64/crypto/poly1305-armv8.pl - create mode 100644 arch/arm64/crypto/poly1305-core.S_shipped - create mode 100644 arch/arm64/crypto/poly1305-glue.c - ---- a/arch/arm64/crypto/Kconfig -+++ b/arch/arm64/crypto/Kconfig -@@ -106,6 +106,12 @@ config CRYPTO_CHACHA20_NEON - select CRYPTO_LIB_CHACHA_GENERIC - select CRYPTO_ARCH_HAVE_LIB_CHACHA - -+config CRYPTO_POLY1305_NEON -+ tristate "Poly1305 hash function using scalar or NEON instructions" -+ depends on KERNEL_MODE_NEON -+ select CRYPTO_HASH -+ select CRYPTO_ARCH_HAVE_LIB_POLY1305 -+ - config CRYPTO_NHPOLY1305_NEON - tristate "NHPoly1305 hash function using NEON instructions (for Adiantum)" - depends on KERNEL_MODE_NEON ---- a/arch/arm64/crypto/Makefile -+++ b/arch/arm64/crypto/Makefile -@@ -50,6 +50,10 @@ sha512-arm64-y := sha512-glue.o sha512-c - obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o - chacha-neon-y := chacha-neon-core.o chacha-neon-glue.o - -+obj-$(CONFIG_CRYPTO_POLY1305_NEON) += poly1305-neon.o -+poly1305-neon-y := poly1305-core.o poly1305-glue.o -+AFLAGS_poly1305-core.o += -Dpoly1305_init=poly1305_init_arm64 -+ - obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o - nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o - -@@ -68,11 +72,15 @@ ifdef REGENERATE_ARM64_CRYPTO - quiet_cmd_perlasm = PERLASM $@ - cmd_perlasm = $(PERL) $(<) void $(@) - -+$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv8.pl -+ $(call cmd,perlasm) -+ - $(src)/sha256-core.S_shipped: $(src)/sha512-armv8.pl - $(call cmd,perlasm) - - $(src)/sha512-core.S_shipped: $(src)/sha512-armv8.pl - $(call cmd,perlasm) -+ - endif - --clean-files += sha256-core.S sha512-core.S -+clean-files += poly1305-core.S sha256-core.S sha512-core.S ---- /dev/null -+++ b/arch/arm64/crypto/poly1305-armv8.pl -@@ -0,0 +1,913 @@ -+#!/usr/bin/env perl -+# SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause -+# -+# ==================================================================== -+# Written by Andy Polyakov, @dot-asm, initially for the OpenSSL -+# project. -+# ==================================================================== -+# -+# This module implements Poly1305 hash for ARMv8. -+# -+# June 2015 -+# -+# Numbers are cycles per processed byte with poly1305_blocks alone. -+# -+# IALU/gcc-4.9 NEON -+# -+# Apple A7 1.86/+5% 0.72 -+# Cortex-A53 2.69/+58% 1.47 -+# Cortex-A57 2.70/+7% 1.14 -+# Denver 1.64/+50% 1.18(*) -+# X-Gene 2.13/+68% 2.27 -+# Mongoose 1.77/+75% 1.12 -+# Kryo 2.70/+55% 1.13 -+# ThunderX2 1.17/+95% 1.36 -+# -+# (*) estimate based on resources availability is less than 1.0, -+# i.e. measured result is worse than expected, presumably binary -+# translator is not almighty; -+ -+$flavour=shift; -+$output=shift; -+ -+if ($flavour && $flavour ne "void") { -+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; -+ ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or -+ ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or -+ die "can't locate arm-xlate.pl"; -+ -+ open STDOUT,"| \"$^X\" $xlate $flavour $output"; -+} else { -+ open STDOUT,">$output"; -+} -+ -+my ($ctx,$inp,$len,$padbit) = map("x$_",(0..3)); -+my ($mac,$nonce)=($inp,$len); -+ -+my ($h0,$h1,$h2,$r0,$r1,$s1,$t0,$t1,$d0,$d1,$d2) = map("x$_",(4..14)); -+ -+$code.=<<___; -+#ifndef __KERNEL__ -+# include "arm_arch.h" -+.extern OPENSSL_armcap_P -+#endif -+ -+.text -+ -+// forward "declarations" are required for Apple -+.globl poly1305_blocks -+.globl poly1305_emit -+ -+.globl poly1305_init -+.type poly1305_init,%function -+.align 5 -+poly1305_init: -+ cmp $inp,xzr -+ stp xzr,xzr,[$ctx] // zero hash value -+ stp xzr,xzr,[$ctx,#16] // [along with is_base2_26] -+ -+ csel x0,xzr,x0,eq -+ b.eq .Lno_key -+ -+#ifndef __KERNEL__ -+ adrp x17,OPENSSL_armcap_P -+ ldr w17,[x17,#:lo12:OPENSSL_armcap_P] -+#endif -+ -+ ldp $r0,$r1,[$inp] // load key -+ mov $s1,#0xfffffffc0fffffff -+ movk $s1,#0x0fff,lsl#48 -+#ifdef __AARCH64EB__ -+ rev $r0,$r0 // flip bytes -+ rev $r1,$r1 -+#endif -+ and $r0,$r0,$s1 // &=0ffffffc0fffffff -+ and $s1,$s1,#-4 -+ and $r1,$r1,$s1 // &=0ffffffc0ffffffc -+ mov w#$s1,#-1 -+ stp $r0,$r1,[$ctx,#32] // save key value -+ str w#$s1,[$ctx,#48] // impossible key power value -+ -+#ifndef __KERNEL__ -+ tst w17,#ARMV7_NEON -+ -+ adr $d0,.Lpoly1305_blocks -+ adr $r0,.Lpoly1305_blocks_neon -+ adr $d1,.Lpoly1305_emit -+ -+ csel $d0,$d0,$r0,eq -+ -+# ifdef __ILP32__ -+ stp w#$d0,w#$d1,[$len] -+# else -+ stp $d0,$d1,[$len] -+# endif -+#endif -+ mov x0,#1 -+.Lno_key: -+ ret -+.size poly1305_init,.-poly1305_init -+ -+.type poly1305_blocks,%function -+.align 5 -+poly1305_blocks: -+.Lpoly1305_blocks: -+ ands $len,$len,#-16 -+ b.eq .Lno_data -+ -+ ldp $h0,$h1,[$ctx] // load hash value -+ ldp $h2,x17,[$ctx,#16] // [along with is_base2_26] -+ ldp $r0,$r1,[$ctx,#32] // load key value -+ -+#ifdef __AARCH64EB__ -+ lsr $d0,$h0,#32 -+ mov w#$d1,w#$h0 -+ lsr $d2,$h1,#32 -+ mov w15,w#$h1 -+ lsr x16,$h2,#32 -+#else -+ mov w#$d0,w#$h0 -+ lsr $d1,$h0,#32 -+ mov w#$d2,w#$h1 -+ lsr x15,$h1,#32 -+ mov w16,w#$h2 -+#endif -+ -+ add $d0,$d0,$d1,lsl#26 // base 2^26 -> base 2^64 -+ lsr $d1,$d2,#12 -+ adds $d0,$d0,$d2,lsl#52 -+ add $d1,$d1,x15,lsl#14 -+ adc $d1,$d1,xzr -+ lsr $d2,x16,#24 -+ adds $d1,$d1,x16,lsl#40 -+ adc $d2,$d2,xzr -+ -+ cmp x17,#0 // is_base2_26? -+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) -+ csel $h0,$h0,$d0,eq // choose between radixes -+ csel $h1,$h1,$d1,eq -+ csel $h2,$h2,$d2,eq -+ -+.Loop: -+ ldp $t0,$t1,[$inp],#16 // load input -+ sub $len,$len,#16 -+#ifdef __AARCH64EB__ -+ rev $t0,$t0 -+ rev $t1,$t1 -+#endif -+ adds $h0,$h0,$t0 // accumulate input -+ adcs $h1,$h1,$t1 -+ -+ mul $d0,$h0,$r0 // h0*r0 -+ adc $h2,$h2,$padbit -+ umulh $d1,$h0,$r0 -+ -+ mul $t0,$h1,$s1 // h1*5*r1 -+ umulh $t1,$h1,$s1 -+ -+ adds $d0,$d0,$t0 -+ mul $t0,$h0,$r1 // h0*r1 -+ adc $d1,$d1,$t1 -+ umulh $d2,$h0,$r1 -+ -+ adds $d1,$d1,$t0 -+ mul $t0,$h1,$r0 // h1*r0 -+ adc $d2,$d2,xzr -+ umulh $t1,$h1,$r0 -+ -+ adds $d1,$d1,$t0 -+ mul $t0,$h2,$s1 // h2*5*r1 -+ adc $d2,$d2,$t1 -+ mul $t1,$h2,$r0 // h2*r0 -+ -+ adds $d1,$d1,$t0 -+ adc $d2,$d2,$t1 -+ -+ and $t0,$d2,#-4 // final reduction -+ and $h2,$d2,#3 -+ add $t0,$t0,$d2,lsr#2 -+ adds $h0,$d0,$t0 -+ adcs $h1,$d1,xzr -+ adc $h2,$h2,xzr -+ -+ cbnz $len,.Loop -+ -+ stp $h0,$h1,[$ctx] // store hash value -+ stp $h2,xzr,[$ctx,#16] // [and clear is_base2_26] -+ -+.Lno_data: -+ ret -+.size poly1305_blocks,.-poly1305_blocks -+ -+.type poly1305_emit,%function -+.align 5 -+poly1305_emit: -+.Lpoly1305_emit: -+ ldp $h0,$h1,[$ctx] // load hash base 2^64 -+ ldp $h2,$r0,[$ctx,#16] // [along with is_base2_26] -+ ldp $t0,$t1,[$nonce] // load nonce -+ -+#ifdef __AARCH64EB__ -+ lsr $d0,$h0,#32 -+ mov w#$d1,w#$h0 -+ lsr $d2,$h1,#32 -+ mov w15,w#$h1 -+ lsr x16,$h2,#32 -+#else -+ mov w#$d0,w#$h0 -+ lsr $d1,$h0,#32 -+ mov w#$d2,w#$h1 -+ lsr x15,$h1,#32 -+ mov w16,w#$h2 -+#endif -+ -+ add $d0,$d0,$d1,lsl#26 // base 2^26 -> base 2^64 -+ lsr $d1,$d2,#12 -+ adds $d0,$d0,$d2,lsl#52 -+ add $d1,$d1,x15,lsl#14 -+ adc $d1,$d1,xzr -+ lsr $d2,x16,#24 -+ adds $d1,$d1,x16,lsl#40 -+ adc $d2,$d2,xzr -+ -+ cmp $r0,#0 // is_base2_26? -+ csel $h0,$h0,$d0,eq // choose between radixes -+ csel $h1,$h1,$d1,eq -+ csel $h2,$h2,$d2,eq -+ -+ adds $d0,$h0,#5 // compare to modulus -+ adcs $d1,$h1,xzr -+ adc $d2,$h2,xzr -+ -+ tst $d2,#-4 // see if it's carried/borrowed -+ -+ csel $h0,$h0,$d0,eq -+ csel $h1,$h1,$d1,eq -+ -+#ifdef __AARCH64EB__ -+ ror $t0,$t0,#32 // flip nonce words -+ ror $t1,$t1,#32 -+#endif -+ adds $h0,$h0,$t0 // accumulate nonce -+ adc $h1,$h1,$t1 -+#ifdef __AARCH64EB__ -+ rev $h0,$h0 // flip output bytes -+ rev $h1,$h1 -+#endif -+ stp $h0,$h1,[$mac] // write result -+ -+ ret -+.size poly1305_emit,.-poly1305_emit -+___ -+my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("v$_.4s",(0..8)); -+my ($IN01_0,$IN01_1,$IN01_2,$IN01_3,$IN01_4) = map("v$_.2s",(9..13)); -+my ($IN23_0,$IN23_1,$IN23_2,$IN23_3,$IN23_4) = map("v$_.2s",(14..18)); -+my ($ACC0,$ACC1,$ACC2,$ACC3,$ACC4) = map("v$_.2d",(19..23)); -+my ($H0,$H1,$H2,$H3,$H4) = map("v$_.2s",(24..28)); -+my ($T0,$T1,$MASK) = map("v$_",(29..31)); -+ -+my ($in2,$zeros)=("x16","x17"); -+my $is_base2_26 = $zeros; # borrow -+ -+$code.=<<___; -+.type poly1305_mult,%function -+.align 5 -+poly1305_mult: -+ mul $d0,$h0,$r0 // h0*r0 -+ umulh $d1,$h0,$r0 -+ -+ mul $t0,$h1,$s1 // h1*5*r1 -+ umulh $t1,$h1,$s1 -+ -+ adds $d0,$d0,$t0 -+ mul $t0,$h0,$r1 // h0*r1 -+ adc $d1,$d1,$t1 -+ umulh $d2,$h0,$r1 -+ -+ adds $d1,$d1,$t0 -+ mul $t0,$h1,$r0 // h1*r0 -+ adc $d2,$d2,xzr -+ umulh $t1,$h1,$r0 -+ -+ adds $d1,$d1,$t0 -+ mul $t0,$h2,$s1 // h2*5*r1 -+ adc $d2,$d2,$t1 -+ mul $t1,$h2,$r0 // h2*r0 -+ -+ adds $d1,$d1,$t0 -+ adc $d2,$d2,$t1 -+ -+ and $t0,$d2,#-4 // final reduction -+ and $h2,$d2,#3 -+ add $t0,$t0,$d2,lsr#2 -+ adds $h0,$d0,$t0 -+ adcs $h1,$d1,xzr -+ adc $h2,$h2,xzr -+ -+ ret -+.size poly1305_mult,.-poly1305_mult -+ -+.type poly1305_splat,%function -+.align 4 -+poly1305_splat: -+ and x12,$h0,#0x03ffffff // base 2^64 -> base 2^26 -+ ubfx x13,$h0,#26,#26 -+ extr x14,$h1,$h0,#52 -+ and x14,x14,#0x03ffffff -+ ubfx x15,$h1,#14,#26 -+ extr x16,$h2,$h1,#40 -+ -+ str w12,[$ctx,#16*0] // r0 -+ add w12,w13,w13,lsl#2 // r1*5 -+ str w13,[$ctx,#16*1] // r1 -+ add w13,w14,w14,lsl#2 // r2*5 -+ str w12,[$ctx,#16*2] // s1 -+ str w14,[$ctx,#16*3] // r2 -+ add w14,w15,w15,lsl#2 // r3*5 -+ str w13,[$ctx,#16*4] // s2 -+ str w15,[$ctx,#16*5] // r3 -+ add w15,w16,w16,lsl#2 // r4*5 -+ str w14,[$ctx,#16*6] // s3 -+ str w16,[$ctx,#16*7] // r4 -+ str w15,[$ctx,#16*8] // s4 -+ -+ ret -+.size poly1305_splat,.-poly1305_splat -+ -+#ifdef __KERNEL__ -+.globl poly1305_blocks_neon -+#endif -+.type poly1305_blocks_neon,%function -+.align 5 -+poly1305_blocks_neon: -+.Lpoly1305_blocks_neon: -+ ldr $is_base2_26,[$ctx,#24] -+ cmp $len,#128 -+ b.lo .Lpoly1305_blocks -+ -+ .inst 0xd503233f // paciasp -+ stp x29,x30,[sp,#-80]! -+ add x29,sp,#0 -+ -+ stp d8,d9,[sp,#16] // meet ABI requirements -+ stp d10,d11,[sp,#32] -+ stp d12,d13,[sp,#48] -+ stp d14,d15,[sp,#64] -+ -+ cbz $is_base2_26,.Lbase2_64_neon -+ -+ ldp w10,w11,[$ctx] // load hash value base 2^26 -+ ldp w12,w13,[$ctx,#8] -+ ldr w14,[$ctx,#16] -+ -+ tst $len,#31 -+ b.eq .Leven_neon -+ -+ ldp $r0,$r1,[$ctx,#32] // load key value -+ -+ add $h0,x10,x11,lsl#26 // base 2^26 -> base 2^64 -+ lsr $h1,x12,#12 -+ adds $h0,$h0,x12,lsl#52 -+ add $h1,$h1,x13,lsl#14 -+ adc $h1,$h1,xzr -+ lsr $h2,x14,#24 -+ adds $h1,$h1,x14,lsl#40 -+ adc $d2,$h2,xzr // can be partially reduced... -+ -+ ldp $d0,$d1,[$inp],#16 // load input -+ sub $len,$len,#16 -+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) -+ -+#ifdef __AARCH64EB__ -+ rev $d0,$d0 -+ rev $d1,$d1 -+#endif -+ adds $h0,$h0,$d0 // accumulate input -+ adcs $h1,$h1,$d1 -+ adc $h2,$h2,$padbit -+ -+ bl poly1305_mult -+ -+ and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26 -+ ubfx x11,$h0,#26,#26 -+ extr x12,$h1,$h0,#52 -+ and x12,x12,#0x03ffffff -+ ubfx x13,$h1,#14,#26 -+ extr x14,$h2,$h1,#40 -+ -+ b .Leven_neon -+ -+.align 4 -+.Lbase2_64_neon: -+ ldp $r0,$r1,[$ctx,#32] // load key value -+ -+ ldp $h0,$h1,[$ctx] // load hash value base 2^64 -+ ldr $h2,[$ctx,#16] -+ -+ tst $len,#31 -+ b.eq .Linit_neon -+ -+ ldp $d0,$d1,[$inp],#16 // load input -+ sub $len,$len,#16 -+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) -+#ifdef __AARCH64EB__ -+ rev $d0,$d0 -+ rev $d1,$d1 -+#endif -+ adds $h0,$h0,$d0 // accumulate input -+ adcs $h1,$h1,$d1 -+ adc $h2,$h2,$padbit -+ -+ bl poly1305_mult -+ -+.Linit_neon: -+ ldr w17,[$ctx,#48] // first table element -+ and x10,$h0,#0x03ffffff // base 2^64 -> base 2^26 -+ ubfx x11,$h0,#26,#26 -+ extr x12,$h1,$h0,#52 -+ and x12,x12,#0x03ffffff -+ ubfx x13,$h1,#14,#26 -+ extr x14,$h2,$h1,#40 -+ -+ cmp w17,#-1 // is value impossible? -+ b.ne .Leven_neon -+ -+ fmov ${H0},x10 -+ fmov ${H1},x11 -+ fmov ${H2},x12 -+ fmov ${H3},x13 -+ fmov ${H4},x14 -+ -+ ////////////////////////////////// initialize r^n table -+ mov $h0,$r0 // r^1 -+ add $s1,$r1,$r1,lsr#2 // s1 = r1 + (r1 >> 2) -+ mov $h1,$r1 -+ mov $h2,xzr -+ add $ctx,$ctx,#48+12 -+ bl poly1305_splat -+ -+ bl poly1305_mult // r^2 -+ sub $ctx,$ctx,#4 -+ bl poly1305_splat -+ -+ bl poly1305_mult // r^3 -+ sub $ctx,$ctx,#4 -+ bl poly1305_splat -+ -+ bl poly1305_mult // r^4 -+ sub $ctx,$ctx,#4 -+ bl poly1305_splat -+ sub $ctx,$ctx,#48 // restore original $ctx -+ b .Ldo_neon -+ -+.align 4 -+.Leven_neon: -+ fmov ${H0},x10 -+ fmov ${H1},x11 -+ fmov ${H2},x12 -+ fmov ${H3},x13 -+ fmov ${H4},x14 -+ -+.Ldo_neon: -+ ldp x8,x12,[$inp,#32] // inp[2:3] -+ subs $len,$len,#64 -+ ldp x9,x13,[$inp,#48] -+ add $in2,$inp,#96 -+ adr $zeros,.Lzeros -+ -+ lsl $padbit,$padbit,#24 -+ add x15,$ctx,#48 -+ -+#ifdef __AARCH64EB__ -+ rev x8,x8 -+ rev x12,x12 -+ rev x9,x9 -+ rev x13,x13 -+#endif -+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 -+ and x5,x9,#0x03ffffff -+ ubfx x6,x8,#26,#26 -+ ubfx x7,x9,#26,#26 -+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 -+ extr x8,x12,x8,#52 -+ extr x9,x13,x9,#52 -+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 -+ fmov $IN23_0,x4 -+ and x8,x8,#0x03ffffff -+ and x9,x9,#0x03ffffff -+ ubfx x10,x12,#14,#26 -+ ubfx x11,x13,#14,#26 -+ add x12,$padbit,x12,lsr#40 -+ add x13,$padbit,x13,lsr#40 -+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 -+ fmov $IN23_1,x6 -+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 -+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 -+ fmov $IN23_2,x8 -+ fmov $IN23_3,x10 -+ fmov $IN23_4,x12 -+ -+ ldp x8,x12,[$inp],#16 // inp[0:1] -+ ldp x9,x13,[$inp],#48 -+ -+ ld1 {$R0,$R1,$S1,$R2},[x15],#64 -+ ld1 {$S2,$R3,$S3,$R4},[x15],#64 -+ ld1 {$S4},[x15] -+ -+#ifdef __AARCH64EB__ -+ rev x8,x8 -+ rev x12,x12 -+ rev x9,x9 -+ rev x13,x13 -+#endif -+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 -+ and x5,x9,#0x03ffffff -+ ubfx x6,x8,#26,#26 -+ ubfx x7,x9,#26,#26 -+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 -+ extr x8,x12,x8,#52 -+ extr x9,x13,x9,#52 -+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 -+ fmov $IN01_0,x4 -+ and x8,x8,#0x03ffffff -+ and x9,x9,#0x03ffffff -+ ubfx x10,x12,#14,#26 -+ ubfx x11,x13,#14,#26 -+ add x12,$padbit,x12,lsr#40 -+ add x13,$padbit,x13,lsr#40 -+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 -+ fmov $IN01_1,x6 -+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 -+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 -+ movi $MASK.2d,#-1 -+ fmov $IN01_2,x8 -+ fmov $IN01_3,x10 -+ fmov $IN01_4,x12 -+ ushr $MASK.2d,$MASK.2d,#38 -+ -+ b.ls .Lskip_loop -+ -+.align 4 -+.Loop_neon: -+ //////////////////////////////////////////////////////////////// -+ // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 -+ // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r -+ // \___________________/ -+ // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 -+ // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r -+ // \___________________/ \____________________/ -+ // -+ // Note that we start with inp[2:3]*r^2. This is because it -+ // doesn't depend on reduction in previous iteration. -+ //////////////////////////////////////////////////////////////// -+ // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0 -+ // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4 -+ // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3 -+ // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2 -+ // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1 -+ -+ subs $len,$len,#64 -+ umull $ACC4,$IN23_0,${R4}[2] -+ csel $in2,$zeros,$in2,lo -+ umull $ACC3,$IN23_0,${R3}[2] -+ umull $ACC2,$IN23_0,${R2}[2] -+ ldp x8,x12,[$in2],#16 // inp[2:3] (or zero) -+ umull $ACC1,$IN23_0,${R1}[2] -+ ldp x9,x13,[$in2],#48 -+ umull $ACC0,$IN23_0,${R0}[2] -+#ifdef __AARCH64EB__ -+ rev x8,x8 -+ rev x12,x12 -+ rev x9,x9 -+ rev x13,x13 -+#endif -+ -+ umlal $ACC4,$IN23_1,${R3}[2] -+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 -+ umlal $ACC3,$IN23_1,${R2}[2] -+ and x5,x9,#0x03ffffff -+ umlal $ACC2,$IN23_1,${R1}[2] -+ ubfx x6,x8,#26,#26 -+ umlal $ACC1,$IN23_1,${R0}[2] -+ ubfx x7,x9,#26,#26 -+ umlal $ACC0,$IN23_1,${S4}[2] -+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 -+ -+ umlal $ACC4,$IN23_2,${R2}[2] -+ extr x8,x12,x8,#52 -+ umlal $ACC3,$IN23_2,${R1}[2] -+ extr x9,x13,x9,#52 -+ umlal $ACC2,$IN23_2,${R0}[2] -+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 -+ umlal $ACC1,$IN23_2,${S4}[2] -+ fmov $IN23_0,x4 -+ umlal $ACC0,$IN23_2,${S3}[2] -+ and x8,x8,#0x03ffffff -+ -+ umlal $ACC4,$IN23_3,${R1}[2] -+ and x9,x9,#0x03ffffff -+ umlal $ACC3,$IN23_3,${R0}[2] -+ ubfx x10,x12,#14,#26 -+ umlal $ACC2,$IN23_3,${S4}[2] -+ ubfx x11,x13,#14,#26 -+ umlal $ACC1,$IN23_3,${S3}[2] -+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 -+ umlal $ACC0,$IN23_3,${S2}[2] -+ fmov $IN23_1,x6 -+ -+ add $IN01_2,$IN01_2,$H2 -+ add x12,$padbit,x12,lsr#40 -+ umlal $ACC4,$IN23_4,${R0}[2] -+ add x13,$padbit,x13,lsr#40 -+ umlal $ACC3,$IN23_4,${S4}[2] -+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 -+ umlal $ACC2,$IN23_4,${S3}[2] -+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 -+ umlal $ACC1,$IN23_4,${S2}[2] -+ fmov $IN23_2,x8 -+ umlal $ACC0,$IN23_4,${S1}[2] -+ fmov $IN23_3,x10 -+ -+ //////////////////////////////////////////////////////////////// -+ // (hash+inp[0:1])*r^4 and accumulate -+ -+ add $IN01_0,$IN01_0,$H0 -+ fmov $IN23_4,x12 -+ umlal $ACC3,$IN01_2,${R1}[0] -+ ldp x8,x12,[$inp],#16 // inp[0:1] -+ umlal $ACC0,$IN01_2,${S3}[0] -+ ldp x9,x13,[$inp],#48 -+ umlal $ACC4,$IN01_2,${R2}[0] -+ umlal $ACC1,$IN01_2,${S4}[0] -+ umlal $ACC2,$IN01_2,${R0}[0] -+#ifdef __AARCH64EB__ -+ rev x8,x8 -+ rev x12,x12 -+ rev x9,x9 -+ rev x13,x13 -+#endif -+ -+ add $IN01_1,$IN01_1,$H1 -+ umlal $ACC3,$IN01_0,${R3}[0] -+ umlal $ACC4,$IN01_0,${R4}[0] -+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 -+ umlal $ACC2,$IN01_0,${R2}[0] -+ and x5,x9,#0x03ffffff -+ umlal $ACC0,$IN01_0,${R0}[0] -+ ubfx x6,x8,#26,#26 -+ umlal $ACC1,$IN01_0,${R1}[0] -+ ubfx x7,x9,#26,#26 -+ -+ add $IN01_3,$IN01_3,$H3 -+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 -+ umlal $ACC3,$IN01_1,${R2}[0] -+ extr x8,x12,x8,#52 -+ umlal $ACC4,$IN01_1,${R3}[0] -+ extr x9,x13,x9,#52 -+ umlal $ACC0,$IN01_1,${S4}[0] -+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 -+ umlal $ACC2,$IN01_1,${R1}[0] -+ fmov $IN01_0,x4 -+ umlal $ACC1,$IN01_1,${R0}[0] -+ and x8,x8,#0x03ffffff -+ -+ add $IN01_4,$IN01_4,$H4 -+ and x9,x9,#0x03ffffff -+ umlal $ACC3,$IN01_3,${R0}[0] -+ ubfx x10,x12,#14,#26 -+ umlal $ACC0,$IN01_3,${S2}[0] -+ ubfx x11,x13,#14,#26 -+ umlal $ACC4,$IN01_3,${R1}[0] -+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 -+ umlal $ACC1,$IN01_3,${S3}[0] -+ fmov $IN01_1,x6 -+ umlal $ACC2,$IN01_3,${S4}[0] -+ add x12,$padbit,x12,lsr#40 -+ -+ umlal $ACC3,$IN01_4,${S4}[0] -+ add x13,$padbit,x13,lsr#40 -+ umlal $ACC0,$IN01_4,${S1}[0] -+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 -+ umlal $ACC4,$IN01_4,${R0}[0] -+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 -+ umlal $ACC1,$IN01_4,${S2}[0] -+ fmov $IN01_2,x8 -+ umlal $ACC2,$IN01_4,${S3}[0] -+ fmov $IN01_3,x10 -+ fmov $IN01_4,x12 -+ -+ ///////////////////////////////////////////////////////////////// -+ // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein -+ // and P. Schwabe -+ // -+ // [see discussion in poly1305-armv4 module] -+ -+ ushr $T0.2d,$ACC3,#26 -+ xtn $H3,$ACC3 -+ ushr $T1.2d,$ACC0,#26 -+ and $ACC0,$ACC0,$MASK.2d -+ add $ACC4,$ACC4,$T0.2d // h3 -> h4 -+ bic $H3,#0xfc,lsl#24 // &=0x03ffffff -+ add $ACC1,$ACC1,$T1.2d // h0 -> h1 -+ -+ ushr $T0.2d,$ACC4,#26 -+ xtn $H4,$ACC4 -+ ushr $T1.2d,$ACC1,#26 -+ xtn $H1,$ACC1 -+ bic $H4,#0xfc,lsl#24 -+ add $ACC2,$ACC2,$T1.2d // h1 -> h2 -+ -+ add $ACC0,$ACC0,$T0.2d -+ shl $T0.2d,$T0.2d,#2 -+ shrn $T1.2s,$ACC2,#26 -+ xtn $H2,$ACC2 -+ add $ACC0,$ACC0,$T0.2d // h4 -> h0 -+ bic $H1,#0xfc,lsl#24 -+ add $H3,$H3,$T1.2s // h2 -> h3 -+ bic $H2,#0xfc,lsl#24 -+ -+ shrn $T0.2s,$ACC0,#26 -+ xtn $H0,$ACC0 -+ ushr $T1.2s,$H3,#26 -+ bic $H3,#0xfc,lsl#24 -+ bic $H0,#0xfc,lsl#24 -+ add $H1,$H1,$T0.2s // h0 -> h1 -+ add $H4,$H4,$T1.2s // h3 -> h4 -+ -+ b.hi .Loop_neon -+ -+.Lskip_loop: -+ dup $IN23_2,${IN23_2}[0] -+ add $IN01_2,$IN01_2,$H2 -+ -+ //////////////////////////////////////////////////////////////// -+ // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 -+ -+ adds $len,$len,#32 -+ b.ne .Long_tail -+ -+ dup $IN23_2,${IN01_2}[0] -+ add $IN23_0,$IN01_0,$H0 -+ add $IN23_3,$IN01_3,$H3 -+ add $IN23_1,$IN01_1,$H1 -+ add $IN23_4,$IN01_4,$H4 -+ -+.Long_tail: -+ dup $IN23_0,${IN23_0}[0] -+ umull2 $ACC0,$IN23_2,${S3} -+ umull2 $ACC3,$IN23_2,${R1} -+ umull2 $ACC4,$IN23_2,${R2} -+ umull2 $ACC2,$IN23_2,${R0} -+ umull2 $ACC1,$IN23_2,${S4} -+ -+ dup $IN23_1,${IN23_1}[0] -+ umlal2 $ACC0,$IN23_0,${R0} -+ umlal2 $ACC2,$IN23_0,${R2} -+ umlal2 $ACC3,$IN23_0,${R3} -+ umlal2 $ACC4,$IN23_0,${R4} -+ umlal2 $ACC1,$IN23_0,${R1} -+ -+ dup $IN23_3,${IN23_3}[0] -+ umlal2 $ACC0,$IN23_1,${S4} -+ umlal2 $ACC3,$IN23_1,${R2} -+ umlal2 $ACC2,$IN23_1,${R1} -+ umlal2 $ACC4,$IN23_1,${R3} -+ umlal2 $ACC1,$IN23_1,${R0} -+ -+ dup $IN23_4,${IN23_4}[0] -+ umlal2 $ACC3,$IN23_3,${R0} -+ umlal2 $ACC4,$IN23_3,${R1} -+ umlal2 $ACC0,$IN23_3,${S2} -+ umlal2 $ACC1,$IN23_3,${S3} -+ umlal2 $ACC2,$IN23_3,${S4} -+ -+ umlal2 $ACC3,$IN23_4,${S4} -+ umlal2 $ACC0,$IN23_4,${S1} -+ umlal2 $ACC4,$IN23_4,${R0} -+ umlal2 $ACC1,$IN23_4,${S2} -+ umlal2 $ACC2,$IN23_4,${S3} -+ -+ b.eq .Lshort_tail -+ -+ //////////////////////////////////////////////////////////////// -+ // (hash+inp[0:1])*r^4:r^3 and accumulate -+ -+ add $IN01_0,$IN01_0,$H0 -+ umlal $ACC3,$IN01_2,${R1} -+ umlal $ACC0,$IN01_2,${S3} -+ umlal $ACC4,$IN01_2,${R2} -+ umlal $ACC1,$IN01_2,${S4} -+ umlal $ACC2,$IN01_2,${R0} -+ -+ add $IN01_1,$IN01_1,$H1 -+ umlal $ACC3,$IN01_0,${R3} -+ umlal $ACC0,$IN01_0,${R0} -+ umlal $ACC4,$IN01_0,${R4} -+ umlal $ACC1,$IN01_0,${R1} -+ umlal $ACC2,$IN01_0,${R2} -+ -+ add $IN01_3,$IN01_3,$H3 -+ umlal $ACC3,$IN01_1,${R2} -+ umlal $ACC0,$IN01_1,${S4} -+ umlal $ACC4,$IN01_1,${R3} -+ umlal $ACC1,$IN01_1,${R0} -+ umlal $ACC2,$IN01_1,${R1} -+ -+ add $IN01_4,$IN01_4,$H4 -+ umlal $ACC3,$IN01_3,${R0} -+ umlal $ACC0,$IN01_3,${S2} -+ umlal $ACC4,$IN01_3,${R1} -+ umlal $ACC1,$IN01_3,${S3} -+ umlal $ACC2,$IN01_3,${S4} -+ -+ umlal $ACC3,$IN01_4,${S4} -+ umlal $ACC0,$IN01_4,${S1} -+ umlal $ACC4,$IN01_4,${R0} -+ umlal $ACC1,$IN01_4,${S2} -+ umlal $ACC2,$IN01_4,${S3} -+ -+.Lshort_tail: -+ //////////////////////////////////////////////////////////////// -+ // horizontal add -+ -+ addp $ACC3,$ACC3,$ACC3 -+ ldp d8,d9,[sp,#16] // meet ABI requirements -+ addp $ACC0,$ACC0,$ACC0 -+ ldp d10,d11,[sp,#32] -+ addp $ACC4,$ACC4,$ACC4 -+ ldp d12,d13,[sp,#48] -+ addp $ACC1,$ACC1,$ACC1 -+ ldp d14,d15,[sp,#64] -+ addp $ACC2,$ACC2,$ACC2 -+ ldr x30,[sp,#8] -+ .inst 0xd50323bf // autiasp -+ -+ //////////////////////////////////////////////////////////////// -+ // lazy reduction, but without narrowing -+ -+ ushr $T0.2d,$ACC3,#26 -+ and $ACC3,$ACC3,$MASK.2d -+ ushr $T1.2d,$ACC0,#26 -+ and $ACC0,$ACC0,$MASK.2d -+ -+ add $ACC4,$ACC4,$T0.2d // h3 -> h4 -+ add $ACC1,$ACC1,$T1.2d // h0 -> h1 -+ -+ ushr $T0.2d,$ACC4,#26 -+ and $ACC4,$ACC4,$MASK.2d -+ ushr $T1.2d,$ACC1,#26 -+ and $ACC1,$ACC1,$MASK.2d -+ add $ACC2,$ACC2,$T1.2d // h1 -> h2 -+ -+ add $ACC0,$ACC0,$T0.2d -+ shl $T0.2d,$T0.2d,#2 -+ ushr $T1.2d,$ACC2,#26 -+ and $ACC2,$ACC2,$MASK.2d -+ add $ACC0,$ACC0,$T0.2d // h4 -> h0 -+ add $ACC3,$ACC3,$T1.2d // h2 -> h3 -+ -+ ushr $T0.2d,$ACC0,#26 -+ and $ACC0,$ACC0,$MASK.2d -+ ushr $T1.2d,$ACC3,#26 -+ and $ACC3,$ACC3,$MASK.2d -+ add $ACC1,$ACC1,$T0.2d // h0 -> h1 -+ add $ACC4,$ACC4,$T1.2d // h3 -> h4 -+ -+ //////////////////////////////////////////////////////////////// -+ // write the result, can be partially reduced -+ -+ st4 {$ACC0,$ACC1,$ACC2,$ACC3}[0],[$ctx],#16 -+ mov x4,#1 -+ st1 {$ACC4}[0],[$ctx] -+ str x4,[$ctx,#8] // set is_base2_26 -+ -+ ldr x29,[sp],#80 -+ ret -+.size poly1305_blocks_neon,.-poly1305_blocks_neon -+ -+.align 5 -+.Lzeros: -+.long 0,0,0,0,0,0,0,0 -+.asciz "Poly1305 for ARMv8, CRYPTOGAMS by \@dot-asm" -+.align 2 -+#if !defined(__KERNEL__) && !defined(_WIN64) -+.comm OPENSSL_armcap_P,4,4 -+.hidden OPENSSL_armcap_P -+#endif -+___ -+ -+foreach (split("\n",$code)) { -+ s/\b(shrn\s+v[0-9]+)\.[24]d/$1.2s/ or -+ s/\b(fmov\s+)v([0-9]+)[^,]*,\s*x([0-9]+)/$1d$2,x$3/ or -+ (m/\bdup\b/ and (s/\.[24]s/.2d/g or 1)) or -+ (m/\b(eor|and)/ and (s/\.[248][sdh]/.16b/g or 1)) or -+ (m/\bum(ul|la)l\b/ and (s/\.4s/.2s/g or 1)) or -+ (m/\bum(ul|la)l2\b/ and (s/\.2s/.4s/g or 1)) or -+ (m/\bst[1-4]\s+{[^}]+}\[/ and (s/\.[24]d/.s/g or 1)); -+ -+ s/\.[124]([sd])\[/.$1\[/; -+ s/w#x([0-9]+)/w$1/g; -+ -+ print $_,"\n"; -+} -+close STDOUT; ---- /dev/null -+++ b/arch/arm64/crypto/poly1305-core.S_shipped -@@ -0,0 +1,835 @@ -+#ifndef __KERNEL__ -+# include "arm_arch.h" -+.extern OPENSSL_armcap_P -+#endif -+ -+.text -+ -+// forward "declarations" are required for Apple -+.globl poly1305_blocks -+.globl poly1305_emit -+ -+.globl poly1305_init -+.type poly1305_init,%function -+.align 5 -+poly1305_init: -+ cmp x1,xzr -+ stp xzr,xzr,[x0] // zero hash value -+ stp xzr,xzr,[x0,#16] // [along with is_base2_26] -+ -+ csel x0,xzr,x0,eq -+ b.eq .Lno_key -+ -+#ifndef __KERNEL__ -+ adrp x17,OPENSSL_armcap_P -+ ldr w17,[x17,#:lo12:OPENSSL_armcap_P] -+#endif -+ -+ ldp x7,x8,[x1] // load key -+ mov x9,#0xfffffffc0fffffff -+ movk x9,#0x0fff,lsl#48 -+#ifdef __AARCH64EB__ -+ rev x7,x7 // flip bytes -+ rev x8,x8 -+#endif -+ and x7,x7,x9 // &=0ffffffc0fffffff -+ and x9,x9,#-4 -+ and x8,x8,x9 // &=0ffffffc0ffffffc -+ mov w9,#-1 -+ stp x7,x8,[x0,#32] // save key value -+ str w9,[x0,#48] // impossible key power value -+ -+#ifndef __KERNEL__ -+ tst w17,#ARMV7_NEON -+ -+ adr x12,.Lpoly1305_blocks -+ adr x7,.Lpoly1305_blocks_neon -+ adr x13,.Lpoly1305_emit -+ -+ csel x12,x12,x7,eq -+ -+# ifdef __ILP32__ -+ stp w12,w13,[x2] -+# else -+ stp x12,x13,[x2] -+# endif -+#endif -+ mov x0,#1 -+.Lno_key: -+ ret -+.size poly1305_init,.-poly1305_init -+ -+.type poly1305_blocks,%function -+.align 5 -+poly1305_blocks: -+.Lpoly1305_blocks: -+ ands x2,x2,#-16 -+ b.eq .Lno_data -+ -+ ldp x4,x5,[x0] // load hash value -+ ldp x6,x17,[x0,#16] // [along with is_base2_26] -+ ldp x7,x8,[x0,#32] // load key value -+ -+#ifdef __AARCH64EB__ -+ lsr x12,x4,#32 -+ mov w13,w4 -+ lsr x14,x5,#32 -+ mov w15,w5 -+ lsr x16,x6,#32 -+#else -+ mov w12,w4 -+ lsr x13,x4,#32 -+ mov w14,w5 -+ lsr x15,x5,#32 -+ mov w16,w6 -+#endif -+ -+ add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64 -+ lsr x13,x14,#12 -+ adds x12,x12,x14,lsl#52 -+ add x13,x13,x15,lsl#14 -+ adc x13,x13,xzr -+ lsr x14,x16,#24 -+ adds x13,x13,x16,lsl#40 -+ adc x14,x14,xzr -+ -+ cmp x17,#0 // is_base2_26? -+ add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2) -+ csel x4,x4,x12,eq // choose between radixes -+ csel x5,x5,x13,eq -+ csel x6,x6,x14,eq -+ -+.Loop: -+ ldp x10,x11,[x1],#16 // load input -+ sub x2,x2,#16 -+#ifdef __AARCH64EB__ -+ rev x10,x10 -+ rev x11,x11 -+#endif -+ adds x4,x4,x10 // accumulate input -+ adcs x5,x5,x11 -+ -+ mul x12,x4,x7 // h0*r0 -+ adc x6,x6,x3 -+ umulh x13,x4,x7 -+ -+ mul x10,x5,x9 // h1*5*r1 -+ umulh x11,x5,x9 -+ -+ adds x12,x12,x10 -+ mul x10,x4,x8 // h0*r1 -+ adc x13,x13,x11 -+ umulh x14,x4,x8 -+ -+ adds x13,x13,x10 -+ mul x10,x5,x7 // h1*r0 -+ adc x14,x14,xzr -+ umulh x11,x5,x7 -+ -+ adds x13,x13,x10 -+ mul x10,x6,x9 // h2*5*r1 -+ adc x14,x14,x11 -+ mul x11,x6,x7 // h2*r0 -+ -+ adds x13,x13,x10 -+ adc x14,x14,x11 -+ -+ and x10,x14,#-4 // final reduction -+ and x6,x14,#3 -+ add x10,x10,x14,lsr#2 -+ adds x4,x12,x10 -+ adcs x5,x13,xzr -+ adc x6,x6,xzr -+ -+ cbnz x2,.Loop -+ -+ stp x4,x5,[x0] // store hash value -+ stp x6,xzr,[x0,#16] // [and clear is_base2_26] -+ -+.Lno_data: -+ ret -+.size poly1305_blocks,.-poly1305_blocks -+ -+.type poly1305_emit,%function -+.align 5 -+poly1305_emit: -+.Lpoly1305_emit: -+ ldp x4,x5,[x0] // load hash base 2^64 -+ ldp x6,x7,[x0,#16] // [along with is_base2_26] -+ ldp x10,x11,[x2] // load nonce -+ -+#ifdef __AARCH64EB__ -+ lsr x12,x4,#32 -+ mov w13,w4 -+ lsr x14,x5,#32 -+ mov w15,w5 -+ lsr x16,x6,#32 -+#else -+ mov w12,w4 -+ lsr x13,x4,#32 -+ mov w14,w5 -+ lsr x15,x5,#32 -+ mov w16,w6 -+#endif -+ -+ add x12,x12,x13,lsl#26 // base 2^26 -> base 2^64 -+ lsr x13,x14,#12 -+ adds x12,x12,x14,lsl#52 -+ add x13,x13,x15,lsl#14 -+ adc x13,x13,xzr -+ lsr x14,x16,#24 -+ adds x13,x13,x16,lsl#40 -+ adc x14,x14,xzr -+ -+ cmp x7,#0 // is_base2_26? -+ csel x4,x4,x12,eq // choose between radixes -+ csel x5,x5,x13,eq -+ csel x6,x6,x14,eq -+ -+ adds x12,x4,#5 // compare to modulus -+ adcs x13,x5,xzr -+ adc x14,x6,xzr -+ -+ tst x14,#-4 // see if it's carried/borrowed -+ -+ csel x4,x4,x12,eq -+ csel x5,x5,x13,eq -+ -+#ifdef __AARCH64EB__ -+ ror x10,x10,#32 // flip nonce words -+ ror x11,x11,#32 -+#endif -+ adds x4,x4,x10 // accumulate nonce -+ adc x5,x5,x11 -+#ifdef __AARCH64EB__ -+ rev x4,x4 // flip output bytes -+ rev x5,x5 -+#endif -+ stp x4,x5,[x1] // write result -+ -+ ret -+.size poly1305_emit,.-poly1305_emit -+.type poly1305_mult,%function -+.align 5 -+poly1305_mult: -+ mul x12,x4,x7 // h0*r0 -+ umulh x13,x4,x7 -+ -+ mul x10,x5,x9 // h1*5*r1 -+ umulh x11,x5,x9 -+ -+ adds x12,x12,x10 -+ mul x10,x4,x8 // h0*r1 -+ adc x13,x13,x11 -+ umulh x14,x4,x8 -+ -+ adds x13,x13,x10 -+ mul x10,x5,x7 // h1*r0 -+ adc x14,x14,xzr -+ umulh x11,x5,x7 -+ -+ adds x13,x13,x10 -+ mul x10,x6,x9 // h2*5*r1 -+ adc x14,x14,x11 -+ mul x11,x6,x7 // h2*r0 -+ -+ adds x13,x13,x10 -+ adc x14,x14,x11 -+ -+ and x10,x14,#-4 // final reduction -+ and x6,x14,#3 -+ add x10,x10,x14,lsr#2 -+ adds x4,x12,x10 -+ adcs x5,x13,xzr -+ adc x6,x6,xzr -+ -+ ret -+.size poly1305_mult,.-poly1305_mult -+ -+.type poly1305_splat,%function -+.align 4 -+poly1305_splat: -+ and x12,x4,#0x03ffffff // base 2^64 -> base 2^26 -+ ubfx x13,x4,#26,#26 -+ extr x14,x5,x4,#52 -+ and x14,x14,#0x03ffffff -+ ubfx x15,x5,#14,#26 -+ extr x16,x6,x5,#40 -+ -+ str w12,[x0,#16*0] // r0 -+ add w12,w13,w13,lsl#2 // r1*5 -+ str w13,[x0,#16*1] // r1 -+ add w13,w14,w14,lsl#2 // r2*5 -+ str w12,[x0,#16*2] // s1 -+ str w14,[x0,#16*3] // r2 -+ add w14,w15,w15,lsl#2 // r3*5 -+ str w13,[x0,#16*4] // s2 -+ str w15,[x0,#16*5] // r3 -+ add w15,w16,w16,lsl#2 // r4*5 -+ str w14,[x0,#16*6] // s3 -+ str w16,[x0,#16*7] // r4 -+ str w15,[x0,#16*8] // s4 -+ -+ ret -+.size poly1305_splat,.-poly1305_splat -+ -+#ifdef __KERNEL__ -+.globl poly1305_blocks_neon -+#endif -+.type poly1305_blocks_neon,%function -+.align 5 -+poly1305_blocks_neon: -+.Lpoly1305_blocks_neon: -+ ldr x17,[x0,#24] -+ cmp x2,#128 -+ b.lo .Lpoly1305_blocks -+ -+ .inst 0xd503233f // paciasp -+ stp x29,x30,[sp,#-80]! -+ add x29,sp,#0 -+ -+ stp d8,d9,[sp,#16] // meet ABI requirements -+ stp d10,d11,[sp,#32] -+ stp d12,d13,[sp,#48] -+ stp d14,d15,[sp,#64] -+ -+ cbz x17,.Lbase2_64_neon -+ -+ ldp w10,w11,[x0] // load hash value base 2^26 -+ ldp w12,w13,[x0,#8] -+ ldr w14,[x0,#16] -+ -+ tst x2,#31 -+ b.eq .Leven_neon -+ -+ ldp x7,x8,[x0,#32] // load key value -+ -+ add x4,x10,x11,lsl#26 // base 2^26 -> base 2^64 -+ lsr x5,x12,#12 -+ adds x4,x4,x12,lsl#52 -+ add x5,x5,x13,lsl#14 -+ adc x5,x5,xzr -+ lsr x6,x14,#24 -+ adds x5,x5,x14,lsl#40 -+ adc x14,x6,xzr // can be partially reduced... -+ -+ ldp x12,x13,[x1],#16 // load input -+ sub x2,x2,#16 -+ add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2) -+ -+#ifdef __AARCH64EB__ -+ rev x12,x12 -+ rev x13,x13 -+#endif -+ adds x4,x4,x12 // accumulate input -+ adcs x5,x5,x13 -+ adc x6,x6,x3 -+ -+ bl poly1305_mult -+ -+ and x10,x4,#0x03ffffff // base 2^64 -> base 2^26 -+ ubfx x11,x4,#26,#26 -+ extr x12,x5,x4,#52 -+ and x12,x12,#0x03ffffff -+ ubfx x13,x5,#14,#26 -+ extr x14,x6,x5,#40 -+ -+ b .Leven_neon -+ -+.align 4 -+.Lbase2_64_neon: -+ ldp x7,x8,[x0,#32] // load key value -+ -+ ldp x4,x5,[x0] // load hash value base 2^64 -+ ldr x6,[x0,#16] -+ -+ tst x2,#31 -+ b.eq .Linit_neon -+ -+ ldp x12,x13,[x1],#16 // load input -+ sub x2,x2,#16 -+ add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2) -+#ifdef __AARCH64EB__ -+ rev x12,x12 -+ rev x13,x13 -+#endif -+ adds x4,x4,x12 // accumulate input -+ adcs x5,x5,x13 -+ adc x6,x6,x3 -+ -+ bl poly1305_mult -+ -+.Linit_neon: -+ ldr w17,[x0,#48] // first table element -+ and x10,x4,#0x03ffffff // base 2^64 -> base 2^26 -+ ubfx x11,x4,#26,#26 -+ extr x12,x5,x4,#52 -+ and x12,x12,#0x03ffffff -+ ubfx x13,x5,#14,#26 -+ extr x14,x6,x5,#40 -+ -+ cmp w17,#-1 // is value impossible? -+ b.ne .Leven_neon -+ -+ fmov d24,x10 -+ fmov d25,x11 -+ fmov d26,x12 -+ fmov d27,x13 -+ fmov d28,x14 -+ -+ ////////////////////////////////// initialize r^n table -+ mov x4,x7 // r^1 -+ add x9,x8,x8,lsr#2 // s1 = r1 + (r1 >> 2) -+ mov x5,x8 -+ mov x6,xzr -+ add x0,x0,#48+12 -+ bl poly1305_splat -+ -+ bl poly1305_mult // r^2 -+ sub x0,x0,#4 -+ bl poly1305_splat -+ -+ bl poly1305_mult // r^3 -+ sub x0,x0,#4 -+ bl poly1305_splat -+ -+ bl poly1305_mult // r^4 -+ sub x0,x0,#4 -+ bl poly1305_splat -+ sub x0,x0,#48 // restore original x0 -+ b .Ldo_neon -+ -+.align 4 -+.Leven_neon: -+ fmov d24,x10 -+ fmov d25,x11 -+ fmov d26,x12 -+ fmov d27,x13 -+ fmov d28,x14 -+ -+.Ldo_neon: -+ ldp x8,x12,[x1,#32] // inp[2:3] -+ subs x2,x2,#64 -+ ldp x9,x13,[x1,#48] -+ add x16,x1,#96 -+ adr x17,.Lzeros -+ -+ lsl x3,x3,#24 -+ add x15,x0,#48 -+ -+#ifdef __AARCH64EB__ -+ rev x8,x8 -+ rev x12,x12 -+ rev x9,x9 -+ rev x13,x13 -+#endif -+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 -+ and x5,x9,#0x03ffffff -+ ubfx x6,x8,#26,#26 -+ ubfx x7,x9,#26,#26 -+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 -+ extr x8,x12,x8,#52 -+ extr x9,x13,x9,#52 -+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 -+ fmov d14,x4 -+ and x8,x8,#0x03ffffff -+ and x9,x9,#0x03ffffff -+ ubfx x10,x12,#14,#26 -+ ubfx x11,x13,#14,#26 -+ add x12,x3,x12,lsr#40 -+ add x13,x3,x13,lsr#40 -+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 -+ fmov d15,x6 -+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 -+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 -+ fmov d16,x8 -+ fmov d17,x10 -+ fmov d18,x12 -+ -+ ldp x8,x12,[x1],#16 // inp[0:1] -+ ldp x9,x13,[x1],#48 -+ -+ ld1 {v0.4s,v1.4s,v2.4s,v3.4s},[x15],#64 -+ ld1 {v4.4s,v5.4s,v6.4s,v7.4s},[x15],#64 -+ ld1 {v8.4s},[x15] -+ -+#ifdef __AARCH64EB__ -+ rev x8,x8 -+ rev x12,x12 -+ rev x9,x9 -+ rev x13,x13 -+#endif -+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 -+ and x5,x9,#0x03ffffff -+ ubfx x6,x8,#26,#26 -+ ubfx x7,x9,#26,#26 -+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 -+ extr x8,x12,x8,#52 -+ extr x9,x13,x9,#52 -+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 -+ fmov d9,x4 -+ and x8,x8,#0x03ffffff -+ and x9,x9,#0x03ffffff -+ ubfx x10,x12,#14,#26 -+ ubfx x11,x13,#14,#26 -+ add x12,x3,x12,lsr#40 -+ add x13,x3,x13,lsr#40 -+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 -+ fmov d10,x6 -+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 -+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 -+ movi v31.2d,#-1 -+ fmov d11,x8 -+ fmov d12,x10 -+ fmov d13,x12 -+ ushr v31.2d,v31.2d,#38 -+ -+ b.ls .Lskip_loop -+ -+.align 4 -+.Loop_neon: -+ //////////////////////////////////////////////////////////////// -+ // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 -+ // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r -+ // ___________________/ -+ // ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 -+ // ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r -+ // ___________________/ ____________________/ -+ // -+ // Note that we start with inp[2:3]*r^2. This is because it -+ // doesn't depend on reduction in previous iteration. -+ //////////////////////////////////////////////////////////////// -+ // d4 = h0*r4 + h1*r3 + h2*r2 + h3*r1 + h4*r0 -+ // d3 = h0*r3 + h1*r2 + h2*r1 + h3*r0 + h4*5*r4 -+ // d2 = h0*r2 + h1*r1 + h2*r0 + h3*5*r4 + h4*5*r3 -+ // d1 = h0*r1 + h1*r0 + h2*5*r4 + h3*5*r3 + h4*5*r2 -+ // d0 = h0*r0 + h1*5*r4 + h2*5*r3 + h3*5*r2 + h4*5*r1 -+ -+ subs x2,x2,#64 -+ umull v23.2d,v14.2s,v7.s[2] -+ csel x16,x17,x16,lo -+ umull v22.2d,v14.2s,v5.s[2] -+ umull v21.2d,v14.2s,v3.s[2] -+ ldp x8,x12,[x16],#16 // inp[2:3] (or zero) -+ umull v20.2d,v14.2s,v1.s[2] -+ ldp x9,x13,[x16],#48 -+ umull v19.2d,v14.2s,v0.s[2] -+#ifdef __AARCH64EB__ -+ rev x8,x8 -+ rev x12,x12 -+ rev x9,x9 -+ rev x13,x13 -+#endif -+ -+ umlal v23.2d,v15.2s,v5.s[2] -+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 -+ umlal v22.2d,v15.2s,v3.s[2] -+ and x5,x9,#0x03ffffff -+ umlal v21.2d,v15.2s,v1.s[2] -+ ubfx x6,x8,#26,#26 -+ umlal v20.2d,v15.2s,v0.s[2] -+ ubfx x7,x9,#26,#26 -+ umlal v19.2d,v15.2s,v8.s[2] -+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 -+ -+ umlal v23.2d,v16.2s,v3.s[2] -+ extr x8,x12,x8,#52 -+ umlal v22.2d,v16.2s,v1.s[2] -+ extr x9,x13,x9,#52 -+ umlal v21.2d,v16.2s,v0.s[2] -+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 -+ umlal v20.2d,v16.2s,v8.s[2] -+ fmov d14,x4 -+ umlal v19.2d,v16.2s,v6.s[2] -+ and x8,x8,#0x03ffffff -+ -+ umlal v23.2d,v17.2s,v1.s[2] -+ and x9,x9,#0x03ffffff -+ umlal v22.2d,v17.2s,v0.s[2] -+ ubfx x10,x12,#14,#26 -+ umlal v21.2d,v17.2s,v8.s[2] -+ ubfx x11,x13,#14,#26 -+ umlal v20.2d,v17.2s,v6.s[2] -+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 -+ umlal v19.2d,v17.2s,v4.s[2] -+ fmov d15,x6 -+ -+ add v11.2s,v11.2s,v26.2s -+ add x12,x3,x12,lsr#40 -+ umlal v23.2d,v18.2s,v0.s[2] -+ add x13,x3,x13,lsr#40 -+ umlal v22.2d,v18.2s,v8.s[2] -+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 -+ umlal v21.2d,v18.2s,v6.s[2] -+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 -+ umlal v20.2d,v18.2s,v4.s[2] -+ fmov d16,x8 -+ umlal v19.2d,v18.2s,v2.s[2] -+ fmov d17,x10 -+ -+ //////////////////////////////////////////////////////////////// -+ // (hash+inp[0:1])*r^4 and accumulate -+ -+ add v9.2s,v9.2s,v24.2s -+ fmov d18,x12 -+ umlal v22.2d,v11.2s,v1.s[0] -+ ldp x8,x12,[x1],#16 // inp[0:1] -+ umlal v19.2d,v11.2s,v6.s[0] -+ ldp x9,x13,[x1],#48 -+ umlal v23.2d,v11.2s,v3.s[0] -+ umlal v20.2d,v11.2s,v8.s[0] -+ umlal v21.2d,v11.2s,v0.s[0] -+#ifdef __AARCH64EB__ -+ rev x8,x8 -+ rev x12,x12 -+ rev x9,x9 -+ rev x13,x13 -+#endif -+ -+ add v10.2s,v10.2s,v25.2s -+ umlal v22.2d,v9.2s,v5.s[0] -+ umlal v23.2d,v9.2s,v7.s[0] -+ and x4,x8,#0x03ffffff // base 2^64 -> base 2^26 -+ umlal v21.2d,v9.2s,v3.s[0] -+ and x5,x9,#0x03ffffff -+ umlal v19.2d,v9.2s,v0.s[0] -+ ubfx x6,x8,#26,#26 -+ umlal v20.2d,v9.2s,v1.s[0] -+ ubfx x7,x9,#26,#26 -+ -+ add v12.2s,v12.2s,v27.2s -+ add x4,x4,x5,lsl#32 // bfi x4,x5,#32,#32 -+ umlal v22.2d,v10.2s,v3.s[0] -+ extr x8,x12,x8,#52 -+ umlal v23.2d,v10.2s,v5.s[0] -+ extr x9,x13,x9,#52 -+ umlal v19.2d,v10.2s,v8.s[0] -+ add x6,x6,x7,lsl#32 // bfi x6,x7,#32,#32 -+ umlal v21.2d,v10.2s,v1.s[0] -+ fmov d9,x4 -+ umlal v20.2d,v10.2s,v0.s[0] -+ and x8,x8,#0x03ffffff -+ -+ add v13.2s,v13.2s,v28.2s -+ and x9,x9,#0x03ffffff -+ umlal v22.2d,v12.2s,v0.s[0] -+ ubfx x10,x12,#14,#26 -+ umlal v19.2d,v12.2s,v4.s[0] -+ ubfx x11,x13,#14,#26 -+ umlal v23.2d,v12.2s,v1.s[0] -+ add x8,x8,x9,lsl#32 // bfi x8,x9,#32,#32 -+ umlal v20.2d,v12.2s,v6.s[0] -+ fmov d10,x6 -+ umlal v21.2d,v12.2s,v8.s[0] -+ add x12,x3,x12,lsr#40 -+ -+ umlal v22.2d,v13.2s,v8.s[0] -+ add x13,x3,x13,lsr#40 -+ umlal v19.2d,v13.2s,v2.s[0] -+ add x10,x10,x11,lsl#32 // bfi x10,x11,#32,#32 -+ umlal v23.2d,v13.2s,v0.s[0] -+ add x12,x12,x13,lsl#32 // bfi x12,x13,#32,#32 -+ umlal v20.2d,v13.2s,v4.s[0] -+ fmov d11,x8 -+ umlal v21.2d,v13.2s,v6.s[0] -+ fmov d12,x10 -+ fmov d13,x12 -+ -+ ///////////////////////////////////////////////////////////////// -+ // lazy reduction as discussed in "NEON crypto" by D.J. Bernstein -+ // and P. Schwabe -+ // -+ // [see discussion in poly1305-armv4 module] -+ -+ ushr v29.2d,v22.2d,#26 -+ xtn v27.2s,v22.2d -+ ushr v30.2d,v19.2d,#26 -+ and v19.16b,v19.16b,v31.16b -+ add v23.2d,v23.2d,v29.2d // h3 -> h4 -+ bic v27.2s,#0xfc,lsl#24 // &=0x03ffffff -+ add v20.2d,v20.2d,v30.2d // h0 -> h1 -+ -+ ushr v29.2d,v23.2d,#26 -+ xtn v28.2s,v23.2d -+ ushr v30.2d,v20.2d,#26 -+ xtn v25.2s,v20.2d -+ bic v28.2s,#0xfc,lsl#24 -+ add v21.2d,v21.2d,v30.2d // h1 -> h2 -+ -+ add v19.2d,v19.2d,v29.2d -+ shl v29.2d,v29.2d,#2 -+ shrn v30.2s,v21.2d,#26 -+ xtn v26.2s,v21.2d -+ add v19.2d,v19.2d,v29.2d // h4 -> h0 -+ bic v25.2s,#0xfc,lsl#24 -+ add v27.2s,v27.2s,v30.2s // h2 -> h3 -+ bic v26.2s,#0xfc,lsl#24 -+ -+ shrn v29.2s,v19.2d,#26 -+ xtn v24.2s,v19.2d -+ ushr v30.2s,v27.2s,#26 -+ bic v27.2s,#0xfc,lsl#24 -+ bic v24.2s,#0xfc,lsl#24 -+ add v25.2s,v25.2s,v29.2s // h0 -> h1 -+ add v28.2s,v28.2s,v30.2s // h3 -> h4 -+ -+ b.hi .Loop_neon -+ -+.Lskip_loop: -+ dup v16.2d,v16.d[0] -+ add v11.2s,v11.2s,v26.2s -+ -+ //////////////////////////////////////////////////////////////// -+ // multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 -+ -+ adds x2,x2,#32 -+ b.ne .Long_tail -+ -+ dup v16.2d,v11.d[0] -+ add v14.2s,v9.2s,v24.2s -+ add v17.2s,v12.2s,v27.2s -+ add v15.2s,v10.2s,v25.2s -+ add v18.2s,v13.2s,v28.2s -+ -+.Long_tail: -+ dup v14.2d,v14.d[0] -+ umull2 v19.2d,v16.4s,v6.4s -+ umull2 v22.2d,v16.4s,v1.4s -+ umull2 v23.2d,v16.4s,v3.4s -+ umull2 v21.2d,v16.4s,v0.4s -+ umull2 v20.2d,v16.4s,v8.4s -+ -+ dup v15.2d,v15.d[0] -+ umlal2 v19.2d,v14.4s,v0.4s -+ umlal2 v21.2d,v14.4s,v3.4s -+ umlal2 v22.2d,v14.4s,v5.4s -+ umlal2 v23.2d,v14.4s,v7.4s -+ umlal2 v20.2d,v14.4s,v1.4s -+ -+ dup v17.2d,v17.d[0] -+ umlal2 v19.2d,v15.4s,v8.4s -+ umlal2 v22.2d,v15.4s,v3.4s -+ umlal2 v21.2d,v15.4s,v1.4s -+ umlal2 v23.2d,v15.4s,v5.4s -+ umlal2 v20.2d,v15.4s,v0.4s -+ -+ dup v18.2d,v18.d[0] -+ umlal2 v22.2d,v17.4s,v0.4s -+ umlal2 v23.2d,v17.4s,v1.4s -+ umlal2 v19.2d,v17.4s,v4.4s -+ umlal2 v20.2d,v17.4s,v6.4s -+ umlal2 v21.2d,v17.4s,v8.4s -+ -+ umlal2 v22.2d,v18.4s,v8.4s -+ umlal2 v19.2d,v18.4s,v2.4s -+ umlal2 v23.2d,v18.4s,v0.4s -+ umlal2 v20.2d,v18.4s,v4.4s -+ umlal2 v21.2d,v18.4s,v6.4s -+ -+ b.eq .Lshort_tail -+ -+ //////////////////////////////////////////////////////////////// -+ // (hash+inp[0:1])*r^4:r^3 and accumulate -+ -+ add v9.2s,v9.2s,v24.2s -+ umlal v22.2d,v11.2s,v1.2s -+ umlal v19.2d,v11.2s,v6.2s -+ umlal v23.2d,v11.2s,v3.2s -+ umlal v20.2d,v11.2s,v8.2s -+ umlal v21.2d,v11.2s,v0.2s -+ -+ add v10.2s,v10.2s,v25.2s -+ umlal v22.2d,v9.2s,v5.2s -+ umlal v19.2d,v9.2s,v0.2s -+ umlal v23.2d,v9.2s,v7.2s -+ umlal v20.2d,v9.2s,v1.2s -+ umlal v21.2d,v9.2s,v3.2s -+ -+ add v12.2s,v12.2s,v27.2s -+ umlal v22.2d,v10.2s,v3.2s -+ umlal v19.2d,v10.2s,v8.2s -+ umlal v23.2d,v10.2s,v5.2s -+ umlal v20.2d,v10.2s,v0.2s -+ umlal v21.2d,v10.2s,v1.2s -+ -+ add v13.2s,v13.2s,v28.2s -+ umlal v22.2d,v12.2s,v0.2s -+ umlal v19.2d,v12.2s,v4.2s -+ umlal v23.2d,v12.2s,v1.2s -+ umlal v20.2d,v12.2s,v6.2s -+ umlal v21.2d,v12.2s,v8.2s -+ -+ umlal v22.2d,v13.2s,v8.2s -+ umlal v19.2d,v13.2s,v2.2s -+ umlal v23.2d,v13.2s,v0.2s -+ umlal v20.2d,v13.2s,v4.2s -+ umlal v21.2d,v13.2s,v6.2s -+ -+.Lshort_tail: -+ //////////////////////////////////////////////////////////////// -+ // horizontal add -+ -+ addp v22.2d,v22.2d,v22.2d -+ ldp d8,d9,[sp,#16] // meet ABI requirements -+ addp v19.2d,v19.2d,v19.2d -+ ldp d10,d11,[sp,#32] -+ addp v23.2d,v23.2d,v23.2d -+ ldp d12,d13,[sp,#48] -+ addp v20.2d,v20.2d,v20.2d -+ ldp d14,d15,[sp,#64] -+ addp v21.2d,v21.2d,v21.2d -+ ldr x30,[sp,#8] -+ .inst 0xd50323bf // autiasp -+ -+ //////////////////////////////////////////////////////////////// -+ // lazy reduction, but without narrowing -+ -+ ushr v29.2d,v22.2d,#26 -+ and v22.16b,v22.16b,v31.16b -+ ushr v30.2d,v19.2d,#26 -+ and v19.16b,v19.16b,v31.16b -+ -+ add v23.2d,v23.2d,v29.2d // h3 -> h4 -+ add v20.2d,v20.2d,v30.2d // h0 -> h1 -+ -+ ushr v29.2d,v23.2d,#26 -+ and v23.16b,v23.16b,v31.16b -+ ushr v30.2d,v20.2d,#26 -+ and v20.16b,v20.16b,v31.16b -+ add v21.2d,v21.2d,v30.2d // h1 -> h2 -+ -+ add v19.2d,v19.2d,v29.2d -+ shl v29.2d,v29.2d,#2 -+ ushr v30.2d,v21.2d,#26 -+ and v21.16b,v21.16b,v31.16b -+ add v19.2d,v19.2d,v29.2d // h4 -> h0 -+ add v22.2d,v22.2d,v30.2d // h2 -> h3 -+ -+ ushr v29.2d,v19.2d,#26 -+ and v19.16b,v19.16b,v31.16b -+ ushr v30.2d,v22.2d,#26 -+ and v22.16b,v22.16b,v31.16b -+ add v20.2d,v20.2d,v29.2d // h0 -> h1 -+ add v23.2d,v23.2d,v30.2d // h3 -> h4 -+ -+ //////////////////////////////////////////////////////////////// -+ // write the result, can be partially reduced -+ -+ st4 {v19.s,v20.s,v21.s,v22.s}[0],[x0],#16 -+ mov x4,#1 -+ st1 {v23.s}[0],[x0] -+ str x4,[x0,#8] // set is_base2_26 -+ -+ ldr x29,[sp],#80 -+ ret -+.size poly1305_blocks_neon,.-poly1305_blocks_neon -+ -+.align 5 -+.Lzeros: -+.long 0,0,0,0,0,0,0,0 -+.asciz "Poly1305 for ARMv8, CRYPTOGAMS by @dot-asm" -+.align 2 -+#if !defined(__KERNEL__) && !defined(_WIN64) -+.comm OPENSSL_armcap_P,4,4 -+.hidden OPENSSL_armcap_P -+#endif ---- /dev/null -+++ b/arch/arm64/crypto/poly1305-glue.c -@@ -0,0 +1,237 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * OpenSSL/Cryptogams accelerated Poly1305 transform for arm64 -+ * -+ * Copyright (C) 2019 Linaro Ltd. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+asmlinkage void poly1305_init_arm64(void *state, const u8 *key); -+asmlinkage void poly1305_blocks(void *state, const u8 *src, u32 len, u32 hibit); -+asmlinkage void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit); -+asmlinkage void poly1305_emit(void *state, __le32 *digest, const u32 *nonce); -+ -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); -+ -+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) -+{ -+ poly1305_init_arm64(&dctx->h, key); -+ dctx->s[0] = get_unaligned_le32(key + 16); -+ dctx->s[1] = get_unaligned_le32(key + 20); -+ dctx->s[2] = get_unaligned_le32(key + 24); -+ dctx->s[3] = get_unaligned_le32(key + 28); -+ dctx->buflen = 0; -+} -+EXPORT_SYMBOL(poly1305_init_arch); -+ -+static int neon_poly1305_init(struct shash_desc *desc) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ dctx->buflen = 0; -+ dctx->rset = 0; -+ dctx->sset = false; -+ -+ return 0; -+} -+ -+static void neon_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, -+ u32 len, u32 hibit, bool do_neon) -+{ -+ if (unlikely(!dctx->sset)) { -+ if (!dctx->rset) { -+ poly1305_init_arch(dctx, src); -+ src += POLY1305_BLOCK_SIZE; -+ len -= POLY1305_BLOCK_SIZE; -+ dctx->rset = 1; -+ } -+ if (len >= POLY1305_BLOCK_SIZE) { -+ dctx->s[0] = get_unaligned_le32(src + 0); -+ dctx->s[1] = get_unaligned_le32(src + 4); -+ dctx->s[2] = get_unaligned_le32(src + 8); -+ dctx->s[3] = get_unaligned_le32(src + 12); -+ src += POLY1305_BLOCK_SIZE; -+ len -= POLY1305_BLOCK_SIZE; -+ dctx->sset = true; -+ } -+ if (len < POLY1305_BLOCK_SIZE) -+ return; -+ } -+ -+ len &= ~(POLY1305_BLOCK_SIZE - 1); -+ -+ if (static_branch_likely(&have_neon) && likely(do_neon)) -+ poly1305_blocks_neon(&dctx->h, src, len, hibit); -+ else -+ poly1305_blocks(&dctx->h, src, len, hibit); -+} -+ -+static void neon_poly1305_do_update(struct poly1305_desc_ctx *dctx, -+ const u8 *src, u32 len, bool do_neon) -+{ -+ if (unlikely(dctx->buflen)) { -+ u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen); -+ -+ memcpy(dctx->buf + dctx->buflen, src, bytes); -+ src += bytes; -+ len -= bytes; -+ dctx->buflen += bytes; -+ -+ if (dctx->buflen == POLY1305_BLOCK_SIZE) { -+ neon_poly1305_blocks(dctx, dctx->buf, -+ POLY1305_BLOCK_SIZE, 1, false); -+ dctx->buflen = 0; -+ } -+ } -+ -+ if (likely(len >= POLY1305_BLOCK_SIZE)) { -+ neon_poly1305_blocks(dctx, src, len, 1, do_neon); -+ src += round_down(len, POLY1305_BLOCK_SIZE); -+ len %= POLY1305_BLOCK_SIZE; -+ } -+ -+ if (unlikely(len)) { -+ dctx->buflen = len; -+ memcpy(dctx->buf, src, len); -+ } -+} -+ -+static int neon_poly1305_update(struct shash_desc *desc, -+ const u8 *src, unsigned int srclen) -+{ -+ bool do_neon = crypto_simd_usable() && srclen > 128; -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ if (static_branch_likely(&have_neon) && do_neon) -+ kernel_neon_begin(); -+ neon_poly1305_do_update(dctx, src, srclen, do_neon); -+ if (static_branch_likely(&have_neon) && do_neon) -+ kernel_neon_end(); -+ return 0; -+} -+ -+void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, -+ unsigned int nbytes) -+{ -+ if (unlikely(dctx->buflen)) { -+ u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen); -+ -+ memcpy(dctx->buf + dctx->buflen, src, bytes); -+ src += bytes; -+ nbytes -= bytes; -+ dctx->buflen += bytes; -+ -+ if (dctx->buflen == POLY1305_BLOCK_SIZE) { -+ poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1); -+ dctx->buflen = 0; -+ } -+ } -+ -+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { -+ unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); -+ -+ if (static_branch_likely(&have_neon) && crypto_simd_usable()) { -+ kernel_neon_begin(); -+ poly1305_blocks_neon(&dctx->h, src, len, 1); -+ kernel_neon_end(); -+ } else { -+ poly1305_blocks(&dctx->h, src, len, 1); -+ } -+ src += len; -+ nbytes %= POLY1305_BLOCK_SIZE; -+ } -+ -+ if (unlikely(nbytes)) { -+ dctx->buflen = nbytes; -+ memcpy(dctx->buf, src, nbytes); -+ } -+} -+EXPORT_SYMBOL(poly1305_update_arch); -+ -+void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) -+{ -+ __le32 digest[4]; -+ u64 f = 0; -+ -+ if (unlikely(dctx->buflen)) { -+ dctx->buf[dctx->buflen++] = 1; -+ memset(dctx->buf + dctx->buflen, 0, -+ POLY1305_BLOCK_SIZE - dctx->buflen); -+ poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); -+ } -+ -+ poly1305_emit(&dctx->h, digest, dctx->s); -+ -+ /* mac = (h + s) % (2^128) */ -+ f = (f >> 32) + le32_to_cpu(digest[0]); -+ put_unaligned_le32(f, dst); -+ f = (f >> 32) + le32_to_cpu(digest[1]); -+ put_unaligned_le32(f, dst + 4); -+ f = (f >> 32) + le32_to_cpu(digest[2]); -+ put_unaligned_le32(f, dst + 8); -+ f = (f >> 32) + le32_to_cpu(digest[3]); -+ put_unaligned_le32(f, dst + 12); -+ -+ *dctx = (struct poly1305_desc_ctx){}; -+} -+EXPORT_SYMBOL(poly1305_final_arch); -+ -+static int neon_poly1305_final(struct shash_desc *desc, u8 *dst) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ if (unlikely(!dctx->sset)) -+ return -ENOKEY; -+ -+ poly1305_final_arch(dctx, dst); -+ return 0; -+} -+ -+static struct shash_alg neon_poly1305_alg = { -+ .init = neon_poly1305_init, -+ .update = neon_poly1305_update, -+ .final = neon_poly1305_final, -+ .digestsize = POLY1305_DIGEST_SIZE, -+ .descsize = sizeof(struct poly1305_desc_ctx), -+ -+ .base.cra_name = "poly1305", -+ .base.cra_driver_name = "poly1305-neon", -+ .base.cra_priority = 200, -+ .base.cra_blocksize = POLY1305_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+}; -+ -+static int __init neon_poly1305_mod_init(void) -+{ -+ if (!cpu_have_named_feature(ASIMD)) -+ return 0; -+ -+ static_branch_enable(&have_neon); -+ -+ return crypto_register_shash(&neon_poly1305_alg); -+} -+ -+static void __exit neon_poly1305_mod_exit(void) -+{ -+ if (cpu_have_named_feature(ASIMD)) -+ crypto_unregister_shash(&neon_poly1305_alg); -+} -+ -+module_init(neon_poly1305_mod_init); -+module_exit(neon_poly1305_mod_exit); -+ -+MODULE_LICENSE("GPL v2"); -+MODULE_ALIAS_CRYPTO("poly1305"); -+MODULE_ALIAS_CRYPTO("poly1305-neon"); ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -40,6 +40,7 @@ config CRYPTO_LIB_DES - config CRYPTO_LIB_POLY1305_RSIZE - int - default 4 if X86_64 -+ default 9 if ARM64 - default 1 - - config CRYPTO_ARCH_HAVE_LIB_POLY1305 diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0019-crypto-arm-poly1305-incorporate-OpenSSL-CRYPTOGAMS-N.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0019-crypto-arm-poly1305-incorporate-OpenSSL-CRYPTOGAMS-N.patch deleted file mode 100644 index 367b20fc3..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0019-crypto-arm-poly1305-incorporate-OpenSSL-CRYPTOGAMS-N.patch +++ /dev/null @@ -1,2776 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:25 +0100 -Subject: [PATCH] crypto: arm/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON - implementation - -commit a6b803b3ddc793d6db0c16f12fc12d30d20fa9cc upstream. - -This is a straight import of the OpenSSL/CRYPTOGAMS Poly1305 implementation -for NEON authored by Andy Polyakov, and contributed by him to the OpenSSL -project. The file 'poly1305-armv4.pl' is taken straight from this upstream -GitHub repository [0] at commit ec55a08dc0244ce570c4fc7cade330c60798952f, -and already contains all the changes required to build it as part of a -Linux kernel module. - -[0] https://github.com/dot-asm/cryptogams - -Co-developed-by: Andy Polyakov -Signed-off-by: Andy Polyakov -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/Kconfig | 5 + - arch/arm/crypto/Makefile | 12 +- - arch/arm/crypto/poly1305-armv4.pl | 1236 +++++++++++++++++++++++ - arch/arm/crypto/poly1305-core.S_shipped | 1158 +++++++++++++++++++++ - arch/arm/crypto/poly1305-glue.c | 276 +++++ - lib/crypto/Kconfig | 2 +- - 6 files changed, 2687 insertions(+), 2 deletions(-) - create mode 100644 arch/arm/crypto/poly1305-armv4.pl - create mode 100644 arch/arm/crypto/poly1305-core.S_shipped - create mode 100644 arch/arm/crypto/poly1305-glue.c - ---- a/arch/arm/crypto/Kconfig -+++ b/arch/arm/crypto/Kconfig -@@ -131,6 +131,11 @@ config CRYPTO_CHACHA20_NEON - select CRYPTO_BLKCIPHER - select CRYPTO_ARCH_HAVE_LIB_CHACHA - -+config CRYPTO_POLY1305_ARM -+ tristate "Accelerated scalar and SIMD Poly1305 hash implementations" -+ select CRYPTO_HASH -+ select CRYPTO_ARCH_HAVE_LIB_POLY1305 -+ - config CRYPTO_NHPOLY1305_NEON - tristate "NEON accelerated NHPoly1305 hash function (for Adiantum)" - depends on KERNEL_MODE_NEON ---- a/arch/arm/crypto/Makefile -+++ b/arch/arm/crypto/Makefile -@@ -10,6 +10,7 @@ obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sh - obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o - obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o - obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o -+obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o - obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o - - ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o -@@ -55,12 +56,16 @@ crct10dif-arm-ce-y := crct10dif-ce-core. - crc32-arm-ce-y:= crc32-ce-core.o crc32-ce-glue.o - chacha-neon-y := chacha-scalar-core.o chacha-glue.o - chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o -+poly1305-arm-y := poly1305-core.o poly1305-glue.o - nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o - - ifdef REGENERATE_ARM_CRYPTO - quiet_cmd_perl = PERL $@ - cmd_perl = $(PERL) $(<) > $(@) - -+$(src)/poly1305-core.S_shipped: $(src)/poly1305-armv4.pl -+ $(call cmd,perl) -+ - $(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl - $(call cmd,perl) - -@@ -68,4 +73,9 @@ $(src)/sha512-core.S_shipped: $(src)/sha - $(call cmd,perl) - endif - --clean-files += sha256-core.S sha512-core.S -+clean-files += poly1305-core.S sha256-core.S sha512-core.S -+ -+# massage the perlasm code a bit so we only get the NEON routine if we need it -+poly1305-aflags-$(CONFIG_CPU_V7) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=5 -+poly1305-aflags-$(CONFIG_KERNEL_MODE_NEON) := -U__LINUX_ARM_ARCH__ -D__LINUX_ARM_ARCH__=7 -+AFLAGS_poly1305-core.o += $(poly1305-aflags-y) ---- /dev/null -+++ b/arch/arm/crypto/poly1305-armv4.pl -@@ -0,0 +1,1236 @@ -+#!/usr/bin/env perl -+# SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause -+# -+# ==================================================================== -+# Written by Andy Polyakov, @dot-asm, initially for the OpenSSL -+# project. -+# ==================================================================== -+# -+# IALU(*)/gcc-4.4 NEON -+# -+# ARM11xx(ARMv6) 7.78/+100% - -+# Cortex-A5 6.35/+130% 3.00 -+# Cortex-A8 6.25/+115% 2.36 -+# Cortex-A9 5.10/+95% 2.55 -+# Cortex-A15 3.85/+85% 1.25(**) -+# Snapdragon S4 5.70/+100% 1.48(**) -+# -+# (*) this is for -march=armv6, i.e. with bunch of ldrb loading data; -+# (**) these are trade-off results, they can be improved by ~8% but at -+# the cost of 15/12% regression on Cortex-A5/A7, it's even possible -+# to improve Cortex-A9 result, but then A5/A7 loose more than 20%; -+ -+$flavour = shift; -+if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; } -+else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} } -+ -+if ($flavour && $flavour ne "void") { -+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; -+ ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or -+ ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or -+ die "can't locate arm-xlate.pl"; -+ -+ open STDOUT,"| \"$^X\" $xlate $flavour $output"; -+} else { -+ open STDOUT,">$output"; -+} -+ -+($ctx,$inp,$len,$padbit)=map("r$_",(0..3)); -+ -+$code.=<<___; -+#ifndef __KERNEL__ -+# include "arm_arch.h" -+#else -+# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -+# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__ -+# define poly1305_init poly1305_init_arm -+# define poly1305_blocks poly1305_blocks_arm -+# define poly1305_emit poly1305_emit_arm -+.globl poly1305_blocks_neon -+#endif -+ -+#if defined(__thumb2__) -+.syntax unified -+.thumb -+#else -+.code 32 -+#endif -+ -+.text -+ -+.globl poly1305_emit -+.globl poly1305_blocks -+.globl poly1305_init -+.type poly1305_init,%function -+.align 5 -+poly1305_init: -+.Lpoly1305_init: -+ stmdb sp!,{r4-r11} -+ -+ eor r3,r3,r3 -+ cmp $inp,#0 -+ str r3,[$ctx,#0] @ zero hash value -+ str r3,[$ctx,#4] -+ str r3,[$ctx,#8] -+ str r3,[$ctx,#12] -+ str r3,[$ctx,#16] -+ str r3,[$ctx,#36] @ clear is_base2_26 -+ add $ctx,$ctx,#20 -+ -+#ifdef __thumb2__ -+ it eq -+#endif -+ moveq r0,#0 -+ beq .Lno_key -+ -+#if __ARM_MAX_ARCH__>=7 -+ mov r3,#-1 -+ str r3,[$ctx,#28] @ impossible key power value -+# ifndef __KERNEL__ -+ adr r11,.Lpoly1305_init -+ ldr r12,.LOPENSSL_armcap -+# endif -+#endif -+ ldrb r4,[$inp,#0] -+ mov r10,#0x0fffffff -+ ldrb r5,[$inp,#1] -+ and r3,r10,#-4 @ 0x0ffffffc -+ ldrb r6,[$inp,#2] -+ ldrb r7,[$inp,#3] -+ orr r4,r4,r5,lsl#8 -+ ldrb r5,[$inp,#4] -+ orr r4,r4,r6,lsl#16 -+ ldrb r6,[$inp,#5] -+ orr r4,r4,r7,lsl#24 -+ ldrb r7,[$inp,#6] -+ and r4,r4,r10 -+ -+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -+# if !defined(_WIN32) -+ ldr r12,[r11,r12] @ OPENSSL_armcap_P -+# endif -+# if defined(__APPLE__) || defined(_WIN32) -+ ldr r12,[r12] -+# endif -+#endif -+ ldrb r8,[$inp,#7] -+ orr r5,r5,r6,lsl#8 -+ ldrb r6,[$inp,#8] -+ orr r5,r5,r7,lsl#16 -+ ldrb r7,[$inp,#9] -+ orr r5,r5,r8,lsl#24 -+ ldrb r8,[$inp,#10] -+ and r5,r5,r3 -+ -+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -+ tst r12,#ARMV7_NEON @ check for NEON -+# ifdef __thumb2__ -+ adr r9,.Lpoly1305_blocks_neon -+ adr r11,.Lpoly1305_blocks -+ it ne -+ movne r11,r9 -+ adr r12,.Lpoly1305_emit -+ orr r11,r11,#1 @ thumb-ify addresses -+ orr r12,r12,#1 -+# else -+ add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init) -+ ite eq -+ addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init) -+ addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init) -+# endif -+#endif -+ ldrb r9,[$inp,#11] -+ orr r6,r6,r7,lsl#8 -+ ldrb r7,[$inp,#12] -+ orr r6,r6,r8,lsl#16 -+ ldrb r8,[$inp,#13] -+ orr r6,r6,r9,lsl#24 -+ ldrb r9,[$inp,#14] -+ and r6,r6,r3 -+ -+ ldrb r10,[$inp,#15] -+ orr r7,r7,r8,lsl#8 -+ str r4,[$ctx,#0] -+ orr r7,r7,r9,lsl#16 -+ str r5,[$ctx,#4] -+ orr r7,r7,r10,lsl#24 -+ str r6,[$ctx,#8] -+ and r7,r7,r3 -+ str r7,[$ctx,#12] -+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -+ stmia r2,{r11,r12} @ fill functions table -+ mov r0,#1 -+#else -+ mov r0,#0 -+#endif -+.Lno_key: -+ ldmia sp!,{r4-r11} -+#if __ARM_ARCH__>=5 -+ ret @ bx lr -+#else -+ tst lr,#1 -+ moveq pc,lr @ be binary compatible with V4, yet -+ bx lr @ interoperable with Thumb ISA:-) -+#endif -+.size poly1305_init,.-poly1305_init -+___ -+{ -+my ($h0,$h1,$h2,$h3,$h4,$r0,$r1,$r2,$r3)=map("r$_",(4..12)); -+my ($s1,$s2,$s3)=($r1,$r2,$r3); -+ -+$code.=<<___; -+.type poly1305_blocks,%function -+.align 5 -+poly1305_blocks: -+.Lpoly1305_blocks: -+ stmdb sp!,{r3-r11,lr} -+ -+ ands $len,$len,#-16 -+ beq .Lno_data -+ -+ add $len,$len,$inp @ end pointer -+ sub sp,sp,#32 -+ -+#if __ARM_ARCH__<7 -+ ldmia $ctx,{$h0-$r3} @ load context -+ add $ctx,$ctx,#20 -+ str $len,[sp,#16] @ offload stuff -+ str $ctx,[sp,#12] -+#else -+ ldr lr,[$ctx,#36] @ is_base2_26 -+ ldmia $ctx!,{$h0-$h4} @ load hash value -+ str $len,[sp,#16] @ offload stuff -+ str $ctx,[sp,#12] -+ -+ adds $r0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32 -+ mov $r1,$h1,lsr#6 -+ adcs $r1,$r1,$h2,lsl#20 -+ mov $r2,$h2,lsr#12 -+ adcs $r2,$r2,$h3,lsl#14 -+ mov $r3,$h3,lsr#18 -+ adcs $r3,$r3,$h4,lsl#8 -+ mov $len,#0 -+ teq lr,#0 -+ str $len,[$ctx,#16] @ clear is_base2_26 -+ adc $len,$len,$h4,lsr#24 -+ -+ itttt ne -+ movne $h0,$r0 @ choose between radixes -+ movne $h1,$r1 -+ movne $h2,$r2 -+ movne $h3,$r3 -+ ldmia $ctx,{$r0-$r3} @ load key -+ it ne -+ movne $h4,$len -+#endif -+ -+ mov lr,$inp -+ cmp $padbit,#0 -+ str $r1,[sp,#20] -+ str $r2,[sp,#24] -+ str $r3,[sp,#28] -+ b .Loop -+ -+.align 4 -+.Loop: -+#if __ARM_ARCH__<7 -+ ldrb r0,[lr],#16 @ load input -+# ifdef __thumb2__ -+ it hi -+# endif -+ addhi $h4,$h4,#1 @ 1<<128 -+ ldrb r1,[lr,#-15] -+ ldrb r2,[lr,#-14] -+ ldrb r3,[lr,#-13] -+ orr r1,r0,r1,lsl#8 -+ ldrb r0,[lr,#-12] -+ orr r2,r1,r2,lsl#16 -+ ldrb r1,[lr,#-11] -+ orr r3,r2,r3,lsl#24 -+ ldrb r2,[lr,#-10] -+ adds $h0,$h0,r3 @ accumulate input -+ -+ ldrb r3,[lr,#-9] -+ orr r1,r0,r1,lsl#8 -+ ldrb r0,[lr,#-8] -+ orr r2,r1,r2,lsl#16 -+ ldrb r1,[lr,#-7] -+ orr r3,r2,r3,lsl#24 -+ ldrb r2,[lr,#-6] -+ adcs $h1,$h1,r3 -+ -+ ldrb r3,[lr,#-5] -+ orr r1,r0,r1,lsl#8 -+ ldrb r0,[lr,#-4] -+ orr r2,r1,r2,lsl#16 -+ ldrb r1,[lr,#-3] -+ orr r3,r2,r3,lsl#24 -+ ldrb r2,[lr,#-2] -+ adcs $h2,$h2,r3 -+ -+ ldrb r3,[lr,#-1] -+ orr r1,r0,r1,lsl#8 -+ str lr,[sp,#8] @ offload input pointer -+ orr r2,r1,r2,lsl#16 -+ add $s1,$r1,$r1,lsr#2 -+ orr r3,r2,r3,lsl#24 -+#else -+ ldr r0,[lr],#16 @ load input -+ it hi -+ addhi $h4,$h4,#1 @ padbit -+ ldr r1,[lr,#-12] -+ ldr r2,[lr,#-8] -+ ldr r3,[lr,#-4] -+# ifdef __ARMEB__ -+ rev r0,r0 -+ rev r1,r1 -+ rev r2,r2 -+ rev r3,r3 -+# endif -+ adds $h0,$h0,r0 @ accumulate input -+ str lr,[sp,#8] @ offload input pointer -+ adcs $h1,$h1,r1 -+ add $s1,$r1,$r1,lsr#2 -+ adcs $h2,$h2,r2 -+#endif -+ add $s2,$r2,$r2,lsr#2 -+ adcs $h3,$h3,r3 -+ add $s3,$r3,$r3,lsr#2 -+ -+ umull r2,r3,$h1,$r0 -+ adc $h4,$h4,#0 -+ umull r0,r1,$h0,$r0 -+ umlal r2,r3,$h4,$s1 -+ umlal r0,r1,$h3,$s1 -+ ldr $r1,[sp,#20] @ reload $r1 -+ umlal r2,r3,$h2,$s3 -+ umlal r0,r1,$h1,$s3 -+ umlal r2,r3,$h3,$s2 -+ umlal r0,r1,$h2,$s2 -+ umlal r2,r3,$h0,$r1 -+ str r0,[sp,#0] @ future $h0 -+ mul r0,$s2,$h4 -+ ldr $r2,[sp,#24] @ reload $r2 -+ adds r2,r2,r1 @ d1+=d0>>32 -+ eor r1,r1,r1 -+ adc lr,r3,#0 @ future $h2 -+ str r2,[sp,#4] @ future $h1 -+ -+ mul r2,$s3,$h4 -+ eor r3,r3,r3 -+ umlal r0,r1,$h3,$s3 -+ ldr $r3,[sp,#28] @ reload $r3 -+ umlal r2,r3,$h3,$r0 -+ umlal r0,r1,$h2,$r0 -+ umlal r2,r3,$h2,$r1 -+ umlal r0,r1,$h1,$r1 -+ umlal r2,r3,$h1,$r2 -+ umlal r0,r1,$h0,$r2 -+ umlal r2,r3,$h0,$r3 -+ ldr $h0,[sp,#0] -+ mul $h4,$r0,$h4 -+ ldr $h1,[sp,#4] -+ -+ adds $h2,lr,r0 @ d2+=d1>>32 -+ ldr lr,[sp,#8] @ reload input pointer -+ adc r1,r1,#0 -+ adds $h3,r2,r1 @ d3+=d2>>32 -+ ldr r0,[sp,#16] @ reload end pointer -+ adc r3,r3,#0 -+ add $h4,$h4,r3 @ h4+=d3>>32 -+ -+ and r1,$h4,#-4 -+ and $h4,$h4,#3 -+ add r1,r1,r1,lsr#2 @ *=5 -+ adds $h0,$h0,r1 -+ adcs $h1,$h1,#0 -+ adcs $h2,$h2,#0 -+ adcs $h3,$h3,#0 -+ adc $h4,$h4,#0 -+ -+ cmp r0,lr @ done yet? -+ bhi .Loop -+ -+ ldr $ctx,[sp,#12] -+ add sp,sp,#32 -+ stmdb $ctx,{$h0-$h4} @ store the result -+ -+.Lno_data: -+#if __ARM_ARCH__>=5 -+ ldmia sp!,{r3-r11,pc} -+#else -+ ldmia sp!,{r3-r11,lr} -+ tst lr,#1 -+ moveq pc,lr @ be binary compatible with V4, yet -+ bx lr @ interoperable with Thumb ISA:-) -+#endif -+.size poly1305_blocks,.-poly1305_blocks -+___ -+} -+{ -+my ($ctx,$mac,$nonce)=map("r$_",(0..2)); -+my ($h0,$h1,$h2,$h3,$h4,$g0,$g1,$g2,$g3)=map("r$_",(3..11)); -+my $g4=$ctx; -+ -+$code.=<<___; -+.type poly1305_emit,%function -+.align 5 -+poly1305_emit: -+.Lpoly1305_emit: -+ stmdb sp!,{r4-r11} -+ -+ ldmia $ctx,{$h0-$h4} -+ -+#if __ARM_ARCH__>=7 -+ ldr ip,[$ctx,#36] @ is_base2_26 -+ -+ adds $g0,$h0,$h1,lsl#26 @ base 2^26 -> base 2^32 -+ mov $g1,$h1,lsr#6 -+ adcs $g1,$g1,$h2,lsl#20 -+ mov $g2,$h2,lsr#12 -+ adcs $g2,$g2,$h3,lsl#14 -+ mov $g3,$h3,lsr#18 -+ adcs $g3,$g3,$h4,lsl#8 -+ mov $g4,#0 -+ adc $g4,$g4,$h4,lsr#24 -+ -+ tst ip,ip -+ itttt ne -+ movne $h0,$g0 -+ movne $h1,$g1 -+ movne $h2,$g2 -+ movne $h3,$g3 -+ it ne -+ movne $h4,$g4 -+#endif -+ -+ adds $g0,$h0,#5 @ compare to modulus -+ adcs $g1,$h1,#0 -+ adcs $g2,$h2,#0 -+ adcs $g3,$h3,#0 -+ adc $g4,$h4,#0 -+ tst $g4,#4 @ did it carry/borrow? -+ -+#ifdef __thumb2__ -+ it ne -+#endif -+ movne $h0,$g0 -+ ldr $g0,[$nonce,#0] -+#ifdef __thumb2__ -+ it ne -+#endif -+ movne $h1,$g1 -+ ldr $g1,[$nonce,#4] -+#ifdef __thumb2__ -+ it ne -+#endif -+ movne $h2,$g2 -+ ldr $g2,[$nonce,#8] -+#ifdef __thumb2__ -+ it ne -+#endif -+ movne $h3,$g3 -+ ldr $g3,[$nonce,#12] -+ -+ adds $h0,$h0,$g0 -+ adcs $h1,$h1,$g1 -+ adcs $h2,$h2,$g2 -+ adc $h3,$h3,$g3 -+ -+#if __ARM_ARCH__>=7 -+# ifdef __ARMEB__ -+ rev $h0,$h0 -+ rev $h1,$h1 -+ rev $h2,$h2 -+ rev $h3,$h3 -+# endif -+ str $h0,[$mac,#0] -+ str $h1,[$mac,#4] -+ str $h2,[$mac,#8] -+ str $h3,[$mac,#12] -+#else -+ strb $h0,[$mac,#0] -+ mov $h0,$h0,lsr#8 -+ strb $h1,[$mac,#4] -+ mov $h1,$h1,lsr#8 -+ strb $h2,[$mac,#8] -+ mov $h2,$h2,lsr#8 -+ strb $h3,[$mac,#12] -+ mov $h3,$h3,lsr#8 -+ -+ strb $h0,[$mac,#1] -+ mov $h0,$h0,lsr#8 -+ strb $h1,[$mac,#5] -+ mov $h1,$h1,lsr#8 -+ strb $h2,[$mac,#9] -+ mov $h2,$h2,lsr#8 -+ strb $h3,[$mac,#13] -+ mov $h3,$h3,lsr#8 -+ -+ strb $h0,[$mac,#2] -+ mov $h0,$h0,lsr#8 -+ strb $h1,[$mac,#6] -+ mov $h1,$h1,lsr#8 -+ strb $h2,[$mac,#10] -+ mov $h2,$h2,lsr#8 -+ strb $h3,[$mac,#14] -+ mov $h3,$h3,lsr#8 -+ -+ strb $h0,[$mac,#3] -+ strb $h1,[$mac,#7] -+ strb $h2,[$mac,#11] -+ strb $h3,[$mac,#15] -+#endif -+ ldmia sp!,{r4-r11} -+#if __ARM_ARCH__>=5 -+ ret @ bx lr -+#else -+ tst lr,#1 -+ moveq pc,lr @ be binary compatible with V4, yet -+ bx lr @ interoperable with Thumb ISA:-) -+#endif -+.size poly1305_emit,.-poly1305_emit -+___ -+{ -+my ($R0,$R1,$S1,$R2,$S2,$R3,$S3,$R4,$S4) = map("d$_",(0..9)); -+my ($D0,$D1,$D2,$D3,$D4, $H0,$H1,$H2,$H3,$H4) = map("q$_",(5..14)); -+my ($T0,$T1,$MASK) = map("q$_",(15,4,0)); -+ -+my ($in2,$zeros,$tbl0,$tbl1) = map("r$_",(4..7)); -+ -+$code.=<<___; -+#if __ARM_MAX_ARCH__>=7 -+.fpu neon -+ -+.type poly1305_init_neon,%function -+.align 5 -+poly1305_init_neon: -+.Lpoly1305_init_neon: -+ ldr r3,[$ctx,#48] @ first table element -+ cmp r3,#-1 @ is value impossible? -+ bne .Lno_init_neon -+ -+ ldr r4,[$ctx,#20] @ load key base 2^32 -+ ldr r5,[$ctx,#24] -+ ldr r6,[$ctx,#28] -+ ldr r7,[$ctx,#32] -+ -+ and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26 -+ mov r3,r4,lsr#26 -+ mov r4,r5,lsr#20 -+ orr r3,r3,r5,lsl#6 -+ mov r5,r6,lsr#14 -+ orr r4,r4,r6,lsl#12 -+ mov r6,r7,lsr#8 -+ orr r5,r5,r7,lsl#18 -+ and r3,r3,#0x03ffffff -+ and r4,r4,#0x03ffffff -+ and r5,r5,#0x03ffffff -+ -+ vdup.32 $R0,r2 @ r^1 in both lanes -+ add r2,r3,r3,lsl#2 @ *5 -+ vdup.32 $R1,r3 -+ add r3,r4,r4,lsl#2 -+ vdup.32 $S1,r2 -+ vdup.32 $R2,r4 -+ add r4,r5,r5,lsl#2 -+ vdup.32 $S2,r3 -+ vdup.32 $R3,r5 -+ add r5,r6,r6,lsl#2 -+ vdup.32 $S3,r4 -+ vdup.32 $R4,r6 -+ vdup.32 $S4,r5 -+ -+ mov $zeros,#2 @ counter -+ -+.Lsquare_neon: -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 -+ @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 -+ @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 -+ @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 -+ @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 -+ -+ vmull.u32 $D0,$R0,${R0}[1] -+ vmull.u32 $D1,$R1,${R0}[1] -+ vmull.u32 $D2,$R2,${R0}[1] -+ vmull.u32 $D3,$R3,${R0}[1] -+ vmull.u32 $D4,$R4,${R0}[1] -+ -+ vmlal.u32 $D0,$R4,${S1}[1] -+ vmlal.u32 $D1,$R0,${R1}[1] -+ vmlal.u32 $D2,$R1,${R1}[1] -+ vmlal.u32 $D3,$R2,${R1}[1] -+ vmlal.u32 $D4,$R3,${R1}[1] -+ -+ vmlal.u32 $D0,$R3,${S2}[1] -+ vmlal.u32 $D1,$R4,${S2}[1] -+ vmlal.u32 $D3,$R1,${R2}[1] -+ vmlal.u32 $D2,$R0,${R2}[1] -+ vmlal.u32 $D4,$R2,${R2}[1] -+ -+ vmlal.u32 $D0,$R2,${S3}[1] -+ vmlal.u32 $D3,$R0,${R3}[1] -+ vmlal.u32 $D1,$R3,${S3}[1] -+ vmlal.u32 $D2,$R4,${S3}[1] -+ vmlal.u32 $D4,$R1,${R3}[1] -+ -+ vmlal.u32 $D3,$R4,${S4}[1] -+ vmlal.u32 $D0,$R1,${S4}[1] -+ vmlal.u32 $D1,$R2,${S4}[1] -+ vmlal.u32 $D2,$R3,${S4}[1] -+ vmlal.u32 $D4,$R0,${R4}[1] -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein -+ @ and P. Schwabe -+ @ -+ @ H0>>+H1>>+H2>>+H3>>+H4 -+ @ H3>>+H4>>*5+H0>>+H1 -+ @ -+ @ Trivia. -+ @ -+ @ Result of multiplication of n-bit number by m-bit number is -+ @ n+m bits wide. However! Even though 2^n is a n+1-bit number, -+ @ m-bit number multiplied by 2^n is still n+m bits wide. -+ @ -+ @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2, -+ @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit -+ @ one is n+1 bits wide. -+ @ -+ @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that -+ @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4 -+ @ can be 27. However! In cases when their width exceeds 26 bits -+ @ they are limited by 2^26+2^6. This in turn means that *sum* -+ @ of the products with these values can still be viewed as sum -+ @ of 52-bit numbers as long as the amount of addends is not a -+ @ power of 2. For example, -+ @ -+ @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4, -+ @ -+ @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or -+ @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than -+ @ 8 * (2^52) or 2^55. However, the value is then multiplied by -+ @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12), -+ @ which is less than 32 * (2^52) or 2^57. And when processing -+ @ data we are looking at triple as many addends... -+ @ -+ @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and -+ @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the -+ @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while -+ @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32 -+ @ instruction accepts 2x32-bit input and writes 2x64-bit result. -+ @ This means that result of reduction have to be compressed upon -+ @ loop wrap-around. This can be done in the process of reduction -+ @ to minimize amount of instructions [as well as amount of -+ @ 128-bit instructions, which benefits low-end processors], but -+ @ one has to watch for H2 (which is narrower than H0) and 5*H4 -+ @ not being wider than 58 bits, so that result of right shift -+ @ by 26 bits fits in 32 bits. This is also useful on x86, -+ @ because it allows to use paddd in place for paddq, which -+ @ benefits Atom, where paddq is ridiculously slow. -+ -+ vshr.u64 $T0,$D3,#26 -+ vmovn.i64 $D3#lo,$D3 -+ vshr.u64 $T1,$D0,#26 -+ vmovn.i64 $D0#lo,$D0 -+ vadd.i64 $D4,$D4,$T0 @ h3 -> h4 -+ vbic.i32 $D3#lo,#0xfc000000 @ &=0x03ffffff -+ vadd.i64 $D1,$D1,$T1 @ h0 -> h1 -+ vbic.i32 $D0#lo,#0xfc000000 -+ -+ vshrn.u64 $T0#lo,$D4,#26 -+ vmovn.i64 $D4#lo,$D4 -+ vshr.u64 $T1,$D1,#26 -+ vmovn.i64 $D1#lo,$D1 -+ vadd.i64 $D2,$D2,$T1 @ h1 -> h2 -+ vbic.i32 $D4#lo,#0xfc000000 -+ vbic.i32 $D1#lo,#0xfc000000 -+ -+ vadd.i32 $D0#lo,$D0#lo,$T0#lo -+ vshl.u32 $T0#lo,$T0#lo,#2 -+ vshrn.u64 $T1#lo,$D2,#26 -+ vmovn.i64 $D2#lo,$D2 -+ vadd.i32 $D0#lo,$D0#lo,$T0#lo @ h4 -> h0 -+ vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3 -+ vbic.i32 $D2#lo,#0xfc000000 -+ -+ vshr.u32 $T0#lo,$D0#lo,#26 -+ vbic.i32 $D0#lo,#0xfc000000 -+ vshr.u32 $T1#lo,$D3#lo,#26 -+ vbic.i32 $D3#lo,#0xfc000000 -+ vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1 -+ vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4 -+ -+ subs $zeros,$zeros,#1 -+ beq .Lsquare_break_neon -+ -+ add $tbl0,$ctx,#(48+0*9*4) -+ add $tbl1,$ctx,#(48+1*9*4) -+ -+ vtrn.32 $R0,$D0#lo @ r^2:r^1 -+ vtrn.32 $R2,$D2#lo -+ vtrn.32 $R3,$D3#lo -+ vtrn.32 $R1,$D1#lo -+ vtrn.32 $R4,$D4#lo -+ -+ vshl.u32 $S2,$R2,#2 @ *5 -+ vshl.u32 $S3,$R3,#2 -+ vshl.u32 $S1,$R1,#2 -+ vshl.u32 $S4,$R4,#2 -+ vadd.i32 $S2,$S2,$R2 -+ vadd.i32 $S1,$S1,$R1 -+ vadd.i32 $S3,$S3,$R3 -+ vadd.i32 $S4,$S4,$R4 -+ -+ vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! -+ vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! -+ vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! -+ vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! -+ vst1.32 {${S4}[0]},[$tbl0,:32] -+ vst1.32 {${S4}[1]},[$tbl1,:32] -+ -+ b .Lsquare_neon -+ -+.align 4 -+.Lsquare_break_neon: -+ add $tbl0,$ctx,#(48+2*4*9) -+ add $tbl1,$ctx,#(48+3*4*9) -+ -+ vmov $R0,$D0#lo @ r^4:r^3 -+ vshl.u32 $S1,$D1#lo,#2 @ *5 -+ vmov $R1,$D1#lo -+ vshl.u32 $S2,$D2#lo,#2 -+ vmov $R2,$D2#lo -+ vshl.u32 $S3,$D3#lo,#2 -+ vmov $R3,$D3#lo -+ vshl.u32 $S4,$D4#lo,#2 -+ vmov $R4,$D4#lo -+ vadd.i32 $S1,$S1,$D1#lo -+ vadd.i32 $S2,$S2,$D2#lo -+ vadd.i32 $S3,$S3,$D3#lo -+ vadd.i32 $S4,$S4,$D4#lo -+ -+ vst4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! -+ vst4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! -+ vst4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! -+ vst4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! -+ vst1.32 {${S4}[0]},[$tbl0] -+ vst1.32 {${S4}[1]},[$tbl1] -+ -+.Lno_init_neon: -+ ret @ bx lr -+.size poly1305_init_neon,.-poly1305_init_neon -+ -+.type poly1305_blocks_neon,%function -+.align 5 -+poly1305_blocks_neon: -+.Lpoly1305_blocks_neon: -+ ldr ip,[$ctx,#36] @ is_base2_26 -+ -+ cmp $len,#64 -+ blo .Lpoly1305_blocks -+ -+ stmdb sp!,{r4-r7} -+ vstmdb sp!,{d8-d15} @ ABI specification says so -+ -+ tst ip,ip @ is_base2_26? -+ bne .Lbase2_26_neon -+ -+ stmdb sp!,{r1-r3,lr} -+ bl .Lpoly1305_init_neon -+ -+ ldr r4,[$ctx,#0] @ load hash value base 2^32 -+ ldr r5,[$ctx,#4] -+ ldr r6,[$ctx,#8] -+ ldr r7,[$ctx,#12] -+ ldr ip,[$ctx,#16] -+ -+ and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26 -+ mov r3,r4,lsr#26 -+ veor $D0#lo,$D0#lo,$D0#lo -+ mov r4,r5,lsr#20 -+ orr r3,r3,r5,lsl#6 -+ veor $D1#lo,$D1#lo,$D1#lo -+ mov r5,r6,lsr#14 -+ orr r4,r4,r6,lsl#12 -+ veor $D2#lo,$D2#lo,$D2#lo -+ mov r6,r7,lsr#8 -+ orr r5,r5,r7,lsl#18 -+ veor $D3#lo,$D3#lo,$D3#lo -+ and r3,r3,#0x03ffffff -+ orr r6,r6,ip,lsl#24 -+ veor $D4#lo,$D4#lo,$D4#lo -+ and r4,r4,#0x03ffffff -+ mov r1,#1 -+ and r5,r5,#0x03ffffff -+ str r1,[$ctx,#36] @ set is_base2_26 -+ -+ vmov.32 $D0#lo[0],r2 -+ vmov.32 $D1#lo[0],r3 -+ vmov.32 $D2#lo[0],r4 -+ vmov.32 $D3#lo[0],r5 -+ vmov.32 $D4#lo[0],r6 -+ adr $zeros,.Lzeros -+ -+ ldmia sp!,{r1-r3,lr} -+ b .Lhash_loaded -+ -+.align 4 -+.Lbase2_26_neon: -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ load hash value -+ -+ veor $D0#lo,$D0#lo,$D0#lo -+ veor $D1#lo,$D1#lo,$D1#lo -+ veor $D2#lo,$D2#lo,$D2#lo -+ veor $D3#lo,$D3#lo,$D3#lo -+ veor $D4#lo,$D4#lo,$D4#lo -+ vld4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]! -+ adr $zeros,.Lzeros -+ vld1.32 {$D4#lo[0]},[$ctx] -+ sub $ctx,$ctx,#16 @ rewind -+ -+.Lhash_loaded: -+ add $in2,$inp,#32 -+ mov $padbit,$padbit,lsl#24 -+ tst $len,#31 -+ beq .Leven -+ -+ vld4.32 {$H0#lo[0],$H1#lo[0],$H2#lo[0],$H3#lo[0]},[$inp]! -+ vmov.32 $H4#lo[0],$padbit -+ sub $len,$len,#16 -+ add $in2,$inp,#32 -+ -+# ifdef __ARMEB__ -+ vrev32.8 $H0,$H0 -+ vrev32.8 $H3,$H3 -+ vrev32.8 $H1,$H1 -+ vrev32.8 $H2,$H2 -+# endif -+ vsri.u32 $H4#lo,$H3#lo,#8 @ base 2^32 -> base 2^26 -+ vshl.u32 $H3#lo,$H3#lo,#18 -+ -+ vsri.u32 $H3#lo,$H2#lo,#14 -+ vshl.u32 $H2#lo,$H2#lo,#12 -+ vadd.i32 $H4#hi,$H4#lo,$D4#lo @ add hash value and move to #hi -+ -+ vbic.i32 $H3#lo,#0xfc000000 -+ vsri.u32 $H2#lo,$H1#lo,#20 -+ vshl.u32 $H1#lo,$H1#lo,#6 -+ -+ vbic.i32 $H2#lo,#0xfc000000 -+ vsri.u32 $H1#lo,$H0#lo,#26 -+ vadd.i32 $H3#hi,$H3#lo,$D3#lo -+ -+ vbic.i32 $H0#lo,#0xfc000000 -+ vbic.i32 $H1#lo,#0xfc000000 -+ vadd.i32 $H2#hi,$H2#lo,$D2#lo -+ -+ vadd.i32 $H0#hi,$H0#lo,$D0#lo -+ vadd.i32 $H1#hi,$H1#lo,$D1#lo -+ -+ mov $tbl1,$zeros -+ add $tbl0,$ctx,#48 -+ -+ cmp $len,$len -+ b .Long_tail -+ -+.align 4 -+.Leven: -+ subs $len,$len,#64 -+ it lo -+ movlo $in2,$zeros -+ -+ vmov.i32 $H4,#1<<24 @ padbit, yes, always -+ vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1] -+ add $inp,$inp,#64 -+ vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0) -+ add $in2,$in2,#64 -+ itt hi -+ addhi $tbl1,$ctx,#(48+1*9*4) -+ addhi $tbl0,$ctx,#(48+3*9*4) -+ -+# ifdef __ARMEB__ -+ vrev32.8 $H0,$H0 -+ vrev32.8 $H3,$H3 -+ vrev32.8 $H1,$H1 -+ vrev32.8 $H2,$H2 -+# endif -+ vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26 -+ vshl.u32 $H3,$H3,#18 -+ -+ vsri.u32 $H3,$H2,#14 -+ vshl.u32 $H2,$H2,#12 -+ -+ vbic.i32 $H3,#0xfc000000 -+ vsri.u32 $H2,$H1,#20 -+ vshl.u32 $H1,$H1,#6 -+ -+ vbic.i32 $H2,#0xfc000000 -+ vsri.u32 $H1,$H0,#26 -+ -+ vbic.i32 $H0,#0xfc000000 -+ vbic.i32 $H1,#0xfc000000 -+ -+ bls .Lskip_loop -+ -+ vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^2 -+ vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4 -+ vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! -+ vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! -+ b .Loop_neon -+ -+.align 5 -+.Loop_neon: -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 -+ @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r -+ @ \___________________/ -+ @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 -+ @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r -+ @ \___________________/ \____________________/ -+ @ -+ @ Note that we start with inp[2:3]*r^2. This is because it -+ @ doesn't depend on reduction in previous iteration. -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 -+ @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 -+ @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 -+ @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 -+ @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ inp[2:3]*r^2 -+ -+ vadd.i32 $H2#lo,$H2#lo,$D2#lo @ accumulate inp[0:1] -+ vmull.u32 $D2,$H2#hi,${R0}[1] -+ vadd.i32 $H0#lo,$H0#lo,$D0#lo -+ vmull.u32 $D0,$H0#hi,${R0}[1] -+ vadd.i32 $H3#lo,$H3#lo,$D3#lo -+ vmull.u32 $D3,$H3#hi,${R0}[1] -+ vmlal.u32 $D2,$H1#hi,${R1}[1] -+ vadd.i32 $H1#lo,$H1#lo,$D1#lo -+ vmull.u32 $D1,$H1#hi,${R0}[1] -+ -+ vadd.i32 $H4#lo,$H4#lo,$D4#lo -+ vmull.u32 $D4,$H4#hi,${R0}[1] -+ subs $len,$len,#64 -+ vmlal.u32 $D0,$H4#hi,${S1}[1] -+ it lo -+ movlo $in2,$zeros -+ vmlal.u32 $D3,$H2#hi,${R1}[1] -+ vld1.32 ${S4}[1],[$tbl1,:32] -+ vmlal.u32 $D1,$H0#hi,${R1}[1] -+ vmlal.u32 $D4,$H3#hi,${R1}[1] -+ -+ vmlal.u32 $D0,$H3#hi,${S2}[1] -+ vmlal.u32 $D3,$H1#hi,${R2}[1] -+ vmlal.u32 $D4,$H2#hi,${R2}[1] -+ vmlal.u32 $D1,$H4#hi,${S2}[1] -+ vmlal.u32 $D2,$H0#hi,${R2}[1] -+ -+ vmlal.u32 $D3,$H0#hi,${R3}[1] -+ vmlal.u32 $D0,$H2#hi,${S3}[1] -+ vmlal.u32 $D4,$H1#hi,${R3}[1] -+ vmlal.u32 $D1,$H3#hi,${S3}[1] -+ vmlal.u32 $D2,$H4#hi,${S3}[1] -+ -+ vmlal.u32 $D3,$H4#hi,${S4}[1] -+ vmlal.u32 $D0,$H1#hi,${S4}[1] -+ vmlal.u32 $D4,$H0#hi,${R4}[1] -+ vmlal.u32 $D1,$H2#hi,${S4}[1] -+ vmlal.u32 $D2,$H3#hi,${S4}[1] -+ -+ vld4.32 {$H0#hi,$H1#hi,$H2#hi,$H3#hi},[$in2] @ inp[2:3] (or 0) -+ add $in2,$in2,#64 -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ (hash+inp[0:1])*r^4 and accumulate -+ -+ vmlal.u32 $D3,$H3#lo,${R0}[0] -+ vmlal.u32 $D0,$H0#lo,${R0}[0] -+ vmlal.u32 $D4,$H4#lo,${R0}[0] -+ vmlal.u32 $D1,$H1#lo,${R0}[0] -+ vmlal.u32 $D2,$H2#lo,${R0}[0] -+ vld1.32 ${S4}[0],[$tbl0,:32] -+ -+ vmlal.u32 $D3,$H2#lo,${R1}[0] -+ vmlal.u32 $D0,$H4#lo,${S1}[0] -+ vmlal.u32 $D4,$H3#lo,${R1}[0] -+ vmlal.u32 $D1,$H0#lo,${R1}[0] -+ vmlal.u32 $D2,$H1#lo,${R1}[0] -+ -+ vmlal.u32 $D3,$H1#lo,${R2}[0] -+ vmlal.u32 $D0,$H3#lo,${S2}[0] -+ vmlal.u32 $D4,$H2#lo,${R2}[0] -+ vmlal.u32 $D1,$H4#lo,${S2}[0] -+ vmlal.u32 $D2,$H0#lo,${R2}[0] -+ -+ vmlal.u32 $D3,$H0#lo,${R3}[0] -+ vmlal.u32 $D0,$H2#lo,${S3}[0] -+ vmlal.u32 $D4,$H1#lo,${R3}[0] -+ vmlal.u32 $D1,$H3#lo,${S3}[0] -+ vmlal.u32 $D3,$H4#lo,${S4}[0] -+ -+ vmlal.u32 $D2,$H4#lo,${S3}[0] -+ vmlal.u32 $D0,$H1#lo,${S4}[0] -+ vmlal.u32 $D4,$H0#lo,${R4}[0] -+ vmov.i32 $H4,#1<<24 @ padbit, yes, always -+ vmlal.u32 $D1,$H2#lo,${S4}[0] -+ vmlal.u32 $D2,$H3#lo,${S4}[0] -+ -+ vld4.32 {$H0#lo,$H1#lo,$H2#lo,$H3#lo},[$inp] @ inp[0:1] -+ add $inp,$inp,#64 -+# ifdef __ARMEB__ -+ vrev32.8 $H0,$H0 -+ vrev32.8 $H1,$H1 -+ vrev32.8 $H2,$H2 -+ vrev32.8 $H3,$H3 -+# endif -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ lazy reduction interleaved with base 2^32 -> base 2^26 of -+ @ inp[0:3] previously loaded to $H0-$H3 and smashed to $H0-$H4. -+ -+ vshr.u64 $T0,$D3,#26 -+ vmovn.i64 $D3#lo,$D3 -+ vshr.u64 $T1,$D0,#26 -+ vmovn.i64 $D0#lo,$D0 -+ vadd.i64 $D4,$D4,$T0 @ h3 -> h4 -+ vbic.i32 $D3#lo,#0xfc000000 -+ vsri.u32 $H4,$H3,#8 @ base 2^32 -> base 2^26 -+ vadd.i64 $D1,$D1,$T1 @ h0 -> h1 -+ vshl.u32 $H3,$H3,#18 -+ vbic.i32 $D0#lo,#0xfc000000 -+ -+ vshrn.u64 $T0#lo,$D4,#26 -+ vmovn.i64 $D4#lo,$D4 -+ vshr.u64 $T1,$D1,#26 -+ vmovn.i64 $D1#lo,$D1 -+ vadd.i64 $D2,$D2,$T1 @ h1 -> h2 -+ vsri.u32 $H3,$H2,#14 -+ vbic.i32 $D4#lo,#0xfc000000 -+ vshl.u32 $H2,$H2,#12 -+ vbic.i32 $D1#lo,#0xfc000000 -+ -+ vadd.i32 $D0#lo,$D0#lo,$T0#lo -+ vshl.u32 $T0#lo,$T0#lo,#2 -+ vbic.i32 $H3,#0xfc000000 -+ vshrn.u64 $T1#lo,$D2,#26 -+ vmovn.i64 $D2#lo,$D2 -+ vaddl.u32 $D0,$D0#lo,$T0#lo @ h4 -> h0 [widen for a sec] -+ vsri.u32 $H2,$H1,#20 -+ vadd.i32 $D3#lo,$D3#lo,$T1#lo @ h2 -> h3 -+ vshl.u32 $H1,$H1,#6 -+ vbic.i32 $D2#lo,#0xfc000000 -+ vbic.i32 $H2,#0xfc000000 -+ -+ vshrn.u64 $T0#lo,$D0,#26 @ re-narrow -+ vmovn.i64 $D0#lo,$D0 -+ vsri.u32 $H1,$H0,#26 -+ vbic.i32 $H0,#0xfc000000 -+ vshr.u32 $T1#lo,$D3#lo,#26 -+ vbic.i32 $D3#lo,#0xfc000000 -+ vbic.i32 $D0#lo,#0xfc000000 -+ vadd.i32 $D1#lo,$D1#lo,$T0#lo @ h0 -> h1 -+ vadd.i32 $D4#lo,$D4#lo,$T1#lo @ h3 -> h4 -+ vbic.i32 $H1,#0xfc000000 -+ -+ bhi .Loop_neon -+ -+.Lskip_loop: -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 -+ -+ add $tbl1,$ctx,#(48+0*9*4) -+ add $tbl0,$ctx,#(48+1*9*4) -+ adds $len,$len,#32 -+ it ne -+ movne $len,#0 -+ bne .Long_tail -+ -+ vadd.i32 $H2#hi,$H2#lo,$D2#lo @ add hash value and move to #hi -+ vadd.i32 $H0#hi,$H0#lo,$D0#lo -+ vadd.i32 $H3#hi,$H3#lo,$D3#lo -+ vadd.i32 $H1#hi,$H1#lo,$D1#lo -+ vadd.i32 $H4#hi,$H4#lo,$D4#lo -+ -+.Long_tail: -+ vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^1 -+ vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^2 -+ -+ vadd.i32 $H2#lo,$H2#lo,$D2#lo @ can be redundant -+ vmull.u32 $D2,$H2#hi,$R0 -+ vadd.i32 $H0#lo,$H0#lo,$D0#lo -+ vmull.u32 $D0,$H0#hi,$R0 -+ vadd.i32 $H3#lo,$H3#lo,$D3#lo -+ vmull.u32 $D3,$H3#hi,$R0 -+ vadd.i32 $H1#lo,$H1#lo,$D1#lo -+ vmull.u32 $D1,$H1#hi,$R0 -+ vadd.i32 $H4#lo,$H4#lo,$D4#lo -+ vmull.u32 $D4,$H4#hi,$R0 -+ -+ vmlal.u32 $D0,$H4#hi,$S1 -+ vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! -+ vmlal.u32 $D3,$H2#hi,$R1 -+ vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! -+ vmlal.u32 $D1,$H0#hi,$R1 -+ vmlal.u32 $D4,$H3#hi,$R1 -+ vmlal.u32 $D2,$H1#hi,$R1 -+ -+ vmlal.u32 $D3,$H1#hi,$R2 -+ vld1.32 ${S4}[1],[$tbl1,:32] -+ vmlal.u32 $D0,$H3#hi,$S2 -+ vld1.32 ${S4}[0],[$tbl0,:32] -+ vmlal.u32 $D4,$H2#hi,$R2 -+ vmlal.u32 $D1,$H4#hi,$S2 -+ vmlal.u32 $D2,$H0#hi,$R2 -+ -+ vmlal.u32 $D3,$H0#hi,$R3 -+ it ne -+ addne $tbl1,$ctx,#(48+2*9*4) -+ vmlal.u32 $D0,$H2#hi,$S3 -+ it ne -+ addne $tbl0,$ctx,#(48+3*9*4) -+ vmlal.u32 $D4,$H1#hi,$R3 -+ vmlal.u32 $D1,$H3#hi,$S3 -+ vmlal.u32 $D2,$H4#hi,$S3 -+ -+ vmlal.u32 $D3,$H4#hi,$S4 -+ vorn $MASK,$MASK,$MASK @ all-ones, can be redundant -+ vmlal.u32 $D0,$H1#hi,$S4 -+ vshr.u64 $MASK,$MASK,#38 -+ vmlal.u32 $D4,$H0#hi,$R4 -+ vmlal.u32 $D1,$H2#hi,$S4 -+ vmlal.u32 $D2,$H3#hi,$S4 -+ -+ beq .Lshort_tail -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ (hash+inp[0:1])*r^4:r^3 and accumulate -+ -+ vld4.32 {${R0}[1],${R1}[1],${S1}[1],${R2}[1]},[$tbl1]! @ load r^3 -+ vld4.32 {${R0}[0],${R1}[0],${S1}[0],${R2}[0]},[$tbl0]! @ load r^4 -+ -+ vmlal.u32 $D2,$H2#lo,$R0 -+ vmlal.u32 $D0,$H0#lo,$R0 -+ vmlal.u32 $D3,$H3#lo,$R0 -+ vmlal.u32 $D1,$H1#lo,$R0 -+ vmlal.u32 $D4,$H4#lo,$R0 -+ -+ vmlal.u32 $D0,$H4#lo,$S1 -+ vld4.32 {${S2}[1],${R3}[1],${S3}[1],${R4}[1]},[$tbl1]! -+ vmlal.u32 $D3,$H2#lo,$R1 -+ vld4.32 {${S2}[0],${R3}[0],${S3}[0],${R4}[0]},[$tbl0]! -+ vmlal.u32 $D1,$H0#lo,$R1 -+ vmlal.u32 $D4,$H3#lo,$R1 -+ vmlal.u32 $D2,$H1#lo,$R1 -+ -+ vmlal.u32 $D3,$H1#lo,$R2 -+ vld1.32 ${S4}[1],[$tbl1,:32] -+ vmlal.u32 $D0,$H3#lo,$S2 -+ vld1.32 ${S4}[0],[$tbl0,:32] -+ vmlal.u32 $D4,$H2#lo,$R2 -+ vmlal.u32 $D1,$H4#lo,$S2 -+ vmlal.u32 $D2,$H0#lo,$R2 -+ -+ vmlal.u32 $D3,$H0#lo,$R3 -+ vmlal.u32 $D0,$H2#lo,$S3 -+ vmlal.u32 $D4,$H1#lo,$R3 -+ vmlal.u32 $D1,$H3#lo,$S3 -+ vmlal.u32 $D2,$H4#lo,$S3 -+ -+ vmlal.u32 $D3,$H4#lo,$S4 -+ vorn $MASK,$MASK,$MASK @ all-ones -+ vmlal.u32 $D0,$H1#lo,$S4 -+ vshr.u64 $MASK,$MASK,#38 -+ vmlal.u32 $D4,$H0#lo,$R4 -+ vmlal.u32 $D1,$H2#lo,$S4 -+ vmlal.u32 $D2,$H3#lo,$S4 -+ -+.Lshort_tail: -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ horizontal addition -+ -+ vadd.i64 $D3#lo,$D3#lo,$D3#hi -+ vadd.i64 $D0#lo,$D0#lo,$D0#hi -+ vadd.i64 $D4#lo,$D4#lo,$D4#hi -+ vadd.i64 $D1#lo,$D1#lo,$D1#hi -+ vadd.i64 $D2#lo,$D2#lo,$D2#hi -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ lazy reduction, but without narrowing -+ -+ vshr.u64 $T0,$D3,#26 -+ vand.i64 $D3,$D3,$MASK -+ vshr.u64 $T1,$D0,#26 -+ vand.i64 $D0,$D0,$MASK -+ vadd.i64 $D4,$D4,$T0 @ h3 -> h4 -+ vadd.i64 $D1,$D1,$T1 @ h0 -> h1 -+ -+ vshr.u64 $T0,$D4,#26 -+ vand.i64 $D4,$D4,$MASK -+ vshr.u64 $T1,$D1,#26 -+ vand.i64 $D1,$D1,$MASK -+ vadd.i64 $D2,$D2,$T1 @ h1 -> h2 -+ -+ vadd.i64 $D0,$D0,$T0 -+ vshl.u64 $T0,$T0,#2 -+ vshr.u64 $T1,$D2,#26 -+ vand.i64 $D2,$D2,$MASK -+ vadd.i64 $D0,$D0,$T0 @ h4 -> h0 -+ vadd.i64 $D3,$D3,$T1 @ h2 -> h3 -+ -+ vshr.u64 $T0,$D0,#26 -+ vand.i64 $D0,$D0,$MASK -+ vshr.u64 $T1,$D3,#26 -+ vand.i64 $D3,$D3,$MASK -+ vadd.i64 $D1,$D1,$T0 @ h0 -> h1 -+ vadd.i64 $D4,$D4,$T1 @ h3 -> h4 -+ -+ cmp $len,#0 -+ bne .Leven -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ store hash value -+ -+ vst4.32 {$D0#lo[0],$D1#lo[0],$D2#lo[0],$D3#lo[0]},[$ctx]! -+ vst1.32 {$D4#lo[0]},[$ctx] -+ -+ vldmia sp!,{d8-d15} @ epilogue -+ ldmia sp!,{r4-r7} -+ ret @ bx lr -+.size poly1305_blocks_neon,.-poly1305_blocks_neon -+ -+.align 5 -+.Lzeros: -+.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -+#ifndef __KERNEL__ -+.LOPENSSL_armcap: -+# ifdef _WIN32 -+.word OPENSSL_armcap_P -+# else -+.word OPENSSL_armcap_P-.Lpoly1305_init -+# endif -+.comm OPENSSL_armcap_P,4,4 -+.hidden OPENSSL_armcap_P -+#endif -+#endif -+___ -+} } -+$code.=<<___; -+.asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by \@dot-asm" -+.align 2 -+___ -+ -+foreach (split("\n",$code)) { -+ s/\`([^\`]*)\`/eval $1/geo; -+ -+ s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or -+ s/\bret\b/bx lr/go or -+ s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4 -+ -+ print $_,"\n"; -+} -+close STDOUT; # enforce flush ---- /dev/null -+++ b/arch/arm/crypto/poly1305-core.S_shipped -@@ -0,0 +1,1158 @@ -+#ifndef __KERNEL__ -+# include "arm_arch.h" -+#else -+# define __ARM_ARCH__ __LINUX_ARM_ARCH__ -+# define __ARM_MAX_ARCH__ __LINUX_ARM_ARCH__ -+# define poly1305_init poly1305_init_arm -+# define poly1305_blocks poly1305_blocks_arm -+# define poly1305_emit poly1305_emit_arm -+.globl poly1305_blocks_neon -+#endif -+ -+#if defined(__thumb2__) -+.syntax unified -+.thumb -+#else -+.code 32 -+#endif -+ -+.text -+ -+.globl poly1305_emit -+.globl poly1305_blocks -+.globl poly1305_init -+.type poly1305_init,%function -+.align 5 -+poly1305_init: -+.Lpoly1305_init: -+ stmdb sp!,{r4-r11} -+ -+ eor r3,r3,r3 -+ cmp r1,#0 -+ str r3,[r0,#0] @ zero hash value -+ str r3,[r0,#4] -+ str r3,[r0,#8] -+ str r3,[r0,#12] -+ str r3,[r0,#16] -+ str r3,[r0,#36] @ clear is_base2_26 -+ add r0,r0,#20 -+ -+#ifdef __thumb2__ -+ it eq -+#endif -+ moveq r0,#0 -+ beq .Lno_key -+ -+#if __ARM_MAX_ARCH__>=7 -+ mov r3,#-1 -+ str r3,[r0,#28] @ impossible key power value -+# ifndef __KERNEL__ -+ adr r11,.Lpoly1305_init -+ ldr r12,.LOPENSSL_armcap -+# endif -+#endif -+ ldrb r4,[r1,#0] -+ mov r10,#0x0fffffff -+ ldrb r5,[r1,#1] -+ and r3,r10,#-4 @ 0x0ffffffc -+ ldrb r6,[r1,#2] -+ ldrb r7,[r1,#3] -+ orr r4,r4,r5,lsl#8 -+ ldrb r5,[r1,#4] -+ orr r4,r4,r6,lsl#16 -+ ldrb r6,[r1,#5] -+ orr r4,r4,r7,lsl#24 -+ ldrb r7,[r1,#6] -+ and r4,r4,r10 -+ -+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -+# if !defined(_WIN32) -+ ldr r12,[r11,r12] @ OPENSSL_armcap_P -+# endif -+# if defined(__APPLE__) || defined(_WIN32) -+ ldr r12,[r12] -+# endif -+#endif -+ ldrb r8,[r1,#7] -+ orr r5,r5,r6,lsl#8 -+ ldrb r6,[r1,#8] -+ orr r5,r5,r7,lsl#16 -+ ldrb r7,[r1,#9] -+ orr r5,r5,r8,lsl#24 -+ ldrb r8,[r1,#10] -+ and r5,r5,r3 -+ -+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -+ tst r12,#ARMV7_NEON @ check for NEON -+# ifdef __thumb2__ -+ adr r9,.Lpoly1305_blocks_neon -+ adr r11,.Lpoly1305_blocks -+ it ne -+ movne r11,r9 -+ adr r12,.Lpoly1305_emit -+ orr r11,r11,#1 @ thumb-ify addresses -+ orr r12,r12,#1 -+# else -+ add r12,r11,#(.Lpoly1305_emit-.Lpoly1305_init) -+ ite eq -+ addeq r11,r11,#(.Lpoly1305_blocks-.Lpoly1305_init) -+ addne r11,r11,#(.Lpoly1305_blocks_neon-.Lpoly1305_init) -+# endif -+#endif -+ ldrb r9,[r1,#11] -+ orr r6,r6,r7,lsl#8 -+ ldrb r7,[r1,#12] -+ orr r6,r6,r8,lsl#16 -+ ldrb r8,[r1,#13] -+ orr r6,r6,r9,lsl#24 -+ ldrb r9,[r1,#14] -+ and r6,r6,r3 -+ -+ ldrb r10,[r1,#15] -+ orr r7,r7,r8,lsl#8 -+ str r4,[r0,#0] -+ orr r7,r7,r9,lsl#16 -+ str r5,[r0,#4] -+ orr r7,r7,r10,lsl#24 -+ str r6,[r0,#8] -+ and r7,r7,r3 -+ str r7,[r0,#12] -+#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__) -+ stmia r2,{r11,r12} @ fill functions table -+ mov r0,#1 -+#else -+ mov r0,#0 -+#endif -+.Lno_key: -+ ldmia sp!,{r4-r11} -+#if __ARM_ARCH__>=5 -+ bx lr @ bx lr -+#else -+ tst lr,#1 -+ moveq pc,lr @ be binary compatible with V4, yet -+ .word 0xe12fff1e @ interoperable with Thumb ISA:-) -+#endif -+.size poly1305_init,.-poly1305_init -+.type poly1305_blocks,%function -+.align 5 -+poly1305_blocks: -+.Lpoly1305_blocks: -+ stmdb sp!,{r3-r11,lr} -+ -+ ands r2,r2,#-16 -+ beq .Lno_data -+ -+ add r2,r2,r1 @ end pointer -+ sub sp,sp,#32 -+ -+#if __ARM_ARCH__<7 -+ ldmia r0,{r4-r12} @ load context -+ add r0,r0,#20 -+ str r2,[sp,#16] @ offload stuff -+ str r0,[sp,#12] -+#else -+ ldr lr,[r0,#36] @ is_base2_26 -+ ldmia r0!,{r4-r8} @ load hash value -+ str r2,[sp,#16] @ offload stuff -+ str r0,[sp,#12] -+ -+ adds r9,r4,r5,lsl#26 @ base 2^26 -> base 2^32 -+ mov r10,r5,lsr#6 -+ adcs r10,r10,r6,lsl#20 -+ mov r11,r6,lsr#12 -+ adcs r11,r11,r7,lsl#14 -+ mov r12,r7,lsr#18 -+ adcs r12,r12,r8,lsl#8 -+ mov r2,#0 -+ teq lr,#0 -+ str r2,[r0,#16] @ clear is_base2_26 -+ adc r2,r2,r8,lsr#24 -+ -+ itttt ne -+ movne r4,r9 @ choose between radixes -+ movne r5,r10 -+ movne r6,r11 -+ movne r7,r12 -+ ldmia r0,{r9-r12} @ load key -+ it ne -+ movne r8,r2 -+#endif -+ -+ mov lr,r1 -+ cmp r3,#0 -+ str r10,[sp,#20] -+ str r11,[sp,#24] -+ str r12,[sp,#28] -+ b .Loop -+ -+.align 4 -+.Loop: -+#if __ARM_ARCH__<7 -+ ldrb r0,[lr],#16 @ load input -+# ifdef __thumb2__ -+ it hi -+# endif -+ addhi r8,r8,#1 @ 1<<128 -+ ldrb r1,[lr,#-15] -+ ldrb r2,[lr,#-14] -+ ldrb r3,[lr,#-13] -+ orr r1,r0,r1,lsl#8 -+ ldrb r0,[lr,#-12] -+ orr r2,r1,r2,lsl#16 -+ ldrb r1,[lr,#-11] -+ orr r3,r2,r3,lsl#24 -+ ldrb r2,[lr,#-10] -+ adds r4,r4,r3 @ accumulate input -+ -+ ldrb r3,[lr,#-9] -+ orr r1,r0,r1,lsl#8 -+ ldrb r0,[lr,#-8] -+ orr r2,r1,r2,lsl#16 -+ ldrb r1,[lr,#-7] -+ orr r3,r2,r3,lsl#24 -+ ldrb r2,[lr,#-6] -+ adcs r5,r5,r3 -+ -+ ldrb r3,[lr,#-5] -+ orr r1,r0,r1,lsl#8 -+ ldrb r0,[lr,#-4] -+ orr r2,r1,r2,lsl#16 -+ ldrb r1,[lr,#-3] -+ orr r3,r2,r3,lsl#24 -+ ldrb r2,[lr,#-2] -+ adcs r6,r6,r3 -+ -+ ldrb r3,[lr,#-1] -+ orr r1,r0,r1,lsl#8 -+ str lr,[sp,#8] @ offload input pointer -+ orr r2,r1,r2,lsl#16 -+ add r10,r10,r10,lsr#2 -+ orr r3,r2,r3,lsl#24 -+#else -+ ldr r0,[lr],#16 @ load input -+ it hi -+ addhi r8,r8,#1 @ padbit -+ ldr r1,[lr,#-12] -+ ldr r2,[lr,#-8] -+ ldr r3,[lr,#-4] -+# ifdef __ARMEB__ -+ rev r0,r0 -+ rev r1,r1 -+ rev r2,r2 -+ rev r3,r3 -+# endif -+ adds r4,r4,r0 @ accumulate input -+ str lr,[sp,#8] @ offload input pointer -+ adcs r5,r5,r1 -+ add r10,r10,r10,lsr#2 -+ adcs r6,r6,r2 -+#endif -+ add r11,r11,r11,lsr#2 -+ adcs r7,r7,r3 -+ add r12,r12,r12,lsr#2 -+ -+ umull r2,r3,r5,r9 -+ adc r8,r8,#0 -+ umull r0,r1,r4,r9 -+ umlal r2,r3,r8,r10 -+ umlal r0,r1,r7,r10 -+ ldr r10,[sp,#20] @ reload r10 -+ umlal r2,r3,r6,r12 -+ umlal r0,r1,r5,r12 -+ umlal r2,r3,r7,r11 -+ umlal r0,r1,r6,r11 -+ umlal r2,r3,r4,r10 -+ str r0,[sp,#0] @ future r4 -+ mul r0,r11,r8 -+ ldr r11,[sp,#24] @ reload r11 -+ adds r2,r2,r1 @ d1+=d0>>32 -+ eor r1,r1,r1 -+ adc lr,r3,#0 @ future r6 -+ str r2,[sp,#4] @ future r5 -+ -+ mul r2,r12,r8 -+ eor r3,r3,r3 -+ umlal r0,r1,r7,r12 -+ ldr r12,[sp,#28] @ reload r12 -+ umlal r2,r3,r7,r9 -+ umlal r0,r1,r6,r9 -+ umlal r2,r3,r6,r10 -+ umlal r0,r1,r5,r10 -+ umlal r2,r3,r5,r11 -+ umlal r0,r1,r4,r11 -+ umlal r2,r3,r4,r12 -+ ldr r4,[sp,#0] -+ mul r8,r9,r8 -+ ldr r5,[sp,#4] -+ -+ adds r6,lr,r0 @ d2+=d1>>32 -+ ldr lr,[sp,#8] @ reload input pointer -+ adc r1,r1,#0 -+ adds r7,r2,r1 @ d3+=d2>>32 -+ ldr r0,[sp,#16] @ reload end pointer -+ adc r3,r3,#0 -+ add r8,r8,r3 @ h4+=d3>>32 -+ -+ and r1,r8,#-4 -+ and r8,r8,#3 -+ add r1,r1,r1,lsr#2 @ *=5 -+ adds r4,r4,r1 -+ adcs r5,r5,#0 -+ adcs r6,r6,#0 -+ adcs r7,r7,#0 -+ adc r8,r8,#0 -+ -+ cmp r0,lr @ done yet? -+ bhi .Loop -+ -+ ldr r0,[sp,#12] -+ add sp,sp,#32 -+ stmdb r0,{r4-r8} @ store the result -+ -+.Lno_data: -+#if __ARM_ARCH__>=5 -+ ldmia sp!,{r3-r11,pc} -+#else -+ ldmia sp!,{r3-r11,lr} -+ tst lr,#1 -+ moveq pc,lr @ be binary compatible with V4, yet -+ .word 0xe12fff1e @ interoperable with Thumb ISA:-) -+#endif -+.size poly1305_blocks,.-poly1305_blocks -+.type poly1305_emit,%function -+.align 5 -+poly1305_emit: -+.Lpoly1305_emit: -+ stmdb sp!,{r4-r11} -+ -+ ldmia r0,{r3-r7} -+ -+#if __ARM_ARCH__>=7 -+ ldr ip,[r0,#36] @ is_base2_26 -+ -+ adds r8,r3,r4,lsl#26 @ base 2^26 -> base 2^32 -+ mov r9,r4,lsr#6 -+ adcs r9,r9,r5,lsl#20 -+ mov r10,r5,lsr#12 -+ adcs r10,r10,r6,lsl#14 -+ mov r11,r6,lsr#18 -+ adcs r11,r11,r7,lsl#8 -+ mov r0,#0 -+ adc r0,r0,r7,lsr#24 -+ -+ tst ip,ip -+ itttt ne -+ movne r3,r8 -+ movne r4,r9 -+ movne r5,r10 -+ movne r6,r11 -+ it ne -+ movne r7,r0 -+#endif -+ -+ adds r8,r3,#5 @ compare to modulus -+ adcs r9,r4,#0 -+ adcs r10,r5,#0 -+ adcs r11,r6,#0 -+ adc r0,r7,#0 -+ tst r0,#4 @ did it carry/borrow? -+ -+#ifdef __thumb2__ -+ it ne -+#endif -+ movne r3,r8 -+ ldr r8,[r2,#0] -+#ifdef __thumb2__ -+ it ne -+#endif -+ movne r4,r9 -+ ldr r9,[r2,#4] -+#ifdef __thumb2__ -+ it ne -+#endif -+ movne r5,r10 -+ ldr r10,[r2,#8] -+#ifdef __thumb2__ -+ it ne -+#endif -+ movne r6,r11 -+ ldr r11,[r2,#12] -+ -+ adds r3,r3,r8 -+ adcs r4,r4,r9 -+ adcs r5,r5,r10 -+ adc r6,r6,r11 -+ -+#if __ARM_ARCH__>=7 -+# ifdef __ARMEB__ -+ rev r3,r3 -+ rev r4,r4 -+ rev r5,r5 -+ rev r6,r6 -+# endif -+ str r3,[r1,#0] -+ str r4,[r1,#4] -+ str r5,[r1,#8] -+ str r6,[r1,#12] -+#else -+ strb r3,[r1,#0] -+ mov r3,r3,lsr#8 -+ strb r4,[r1,#4] -+ mov r4,r4,lsr#8 -+ strb r5,[r1,#8] -+ mov r5,r5,lsr#8 -+ strb r6,[r1,#12] -+ mov r6,r6,lsr#8 -+ -+ strb r3,[r1,#1] -+ mov r3,r3,lsr#8 -+ strb r4,[r1,#5] -+ mov r4,r4,lsr#8 -+ strb r5,[r1,#9] -+ mov r5,r5,lsr#8 -+ strb r6,[r1,#13] -+ mov r6,r6,lsr#8 -+ -+ strb r3,[r1,#2] -+ mov r3,r3,lsr#8 -+ strb r4,[r1,#6] -+ mov r4,r4,lsr#8 -+ strb r5,[r1,#10] -+ mov r5,r5,lsr#8 -+ strb r6,[r1,#14] -+ mov r6,r6,lsr#8 -+ -+ strb r3,[r1,#3] -+ strb r4,[r1,#7] -+ strb r5,[r1,#11] -+ strb r6,[r1,#15] -+#endif -+ ldmia sp!,{r4-r11} -+#if __ARM_ARCH__>=5 -+ bx lr @ bx lr -+#else -+ tst lr,#1 -+ moveq pc,lr @ be binary compatible with V4, yet -+ .word 0xe12fff1e @ interoperable with Thumb ISA:-) -+#endif -+.size poly1305_emit,.-poly1305_emit -+#if __ARM_MAX_ARCH__>=7 -+.fpu neon -+ -+.type poly1305_init_neon,%function -+.align 5 -+poly1305_init_neon: -+.Lpoly1305_init_neon: -+ ldr r3,[r0,#48] @ first table element -+ cmp r3,#-1 @ is value impossible? -+ bne .Lno_init_neon -+ -+ ldr r4,[r0,#20] @ load key base 2^32 -+ ldr r5,[r0,#24] -+ ldr r6,[r0,#28] -+ ldr r7,[r0,#32] -+ -+ and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26 -+ mov r3,r4,lsr#26 -+ mov r4,r5,lsr#20 -+ orr r3,r3,r5,lsl#6 -+ mov r5,r6,lsr#14 -+ orr r4,r4,r6,lsl#12 -+ mov r6,r7,lsr#8 -+ orr r5,r5,r7,lsl#18 -+ and r3,r3,#0x03ffffff -+ and r4,r4,#0x03ffffff -+ and r5,r5,#0x03ffffff -+ -+ vdup.32 d0,r2 @ r^1 in both lanes -+ add r2,r3,r3,lsl#2 @ *5 -+ vdup.32 d1,r3 -+ add r3,r4,r4,lsl#2 -+ vdup.32 d2,r2 -+ vdup.32 d3,r4 -+ add r4,r5,r5,lsl#2 -+ vdup.32 d4,r3 -+ vdup.32 d5,r5 -+ add r5,r6,r6,lsl#2 -+ vdup.32 d6,r4 -+ vdup.32 d7,r6 -+ vdup.32 d8,r5 -+ -+ mov r5,#2 @ counter -+ -+.Lsquare_neon: -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 -+ @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 -+ @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 -+ @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 -+ @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 -+ -+ vmull.u32 q5,d0,d0[1] -+ vmull.u32 q6,d1,d0[1] -+ vmull.u32 q7,d3,d0[1] -+ vmull.u32 q8,d5,d0[1] -+ vmull.u32 q9,d7,d0[1] -+ -+ vmlal.u32 q5,d7,d2[1] -+ vmlal.u32 q6,d0,d1[1] -+ vmlal.u32 q7,d1,d1[1] -+ vmlal.u32 q8,d3,d1[1] -+ vmlal.u32 q9,d5,d1[1] -+ -+ vmlal.u32 q5,d5,d4[1] -+ vmlal.u32 q6,d7,d4[1] -+ vmlal.u32 q8,d1,d3[1] -+ vmlal.u32 q7,d0,d3[1] -+ vmlal.u32 q9,d3,d3[1] -+ -+ vmlal.u32 q5,d3,d6[1] -+ vmlal.u32 q8,d0,d5[1] -+ vmlal.u32 q6,d5,d6[1] -+ vmlal.u32 q7,d7,d6[1] -+ vmlal.u32 q9,d1,d5[1] -+ -+ vmlal.u32 q8,d7,d8[1] -+ vmlal.u32 q5,d1,d8[1] -+ vmlal.u32 q6,d3,d8[1] -+ vmlal.u32 q7,d5,d8[1] -+ vmlal.u32 q9,d0,d7[1] -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ lazy reduction as discussed in "NEON crypto" by D.J. Bernstein -+ @ and P. Schwabe -+ @ -+ @ H0>>+H1>>+H2>>+H3>>+H4 -+ @ H3>>+H4>>*5+H0>>+H1 -+ @ -+ @ Trivia. -+ @ -+ @ Result of multiplication of n-bit number by m-bit number is -+ @ n+m bits wide. However! Even though 2^n is a n+1-bit number, -+ @ m-bit number multiplied by 2^n is still n+m bits wide. -+ @ -+ @ Sum of two n-bit numbers is n+1 bits wide, sum of three - n+2, -+ @ and so is sum of four. Sum of 2^m n-m-bit numbers and n-bit -+ @ one is n+1 bits wide. -+ @ -+ @ >>+ denotes Hnext += Hn>>26, Hn &= 0x3ffffff. This means that -+ @ H0, H2, H3 are guaranteed to be 26 bits wide, while H1 and H4 -+ @ can be 27. However! In cases when their width exceeds 26 bits -+ @ they are limited by 2^26+2^6. This in turn means that *sum* -+ @ of the products with these values can still be viewed as sum -+ @ of 52-bit numbers as long as the amount of addends is not a -+ @ power of 2. For example, -+ @ -+ @ H4 = H4*R0 + H3*R1 + H2*R2 + H1*R3 + H0 * R4, -+ @ -+ @ which can't be larger than 5 * (2^26 + 2^6) * (2^26 + 2^6), or -+ @ 5 * (2^52 + 2*2^32 + 2^12), which in turn is smaller than -+ @ 8 * (2^52) or 2^55. However, the value is then multiplied by -+ @ by 5, so we should be looking at 5 * 5 * (2^52 + 2^33 + 2^12), -+ @ which is less than 32 * (2^52) or 2^57. And when processing -+ @ data we are looking at triple as many addends... -+ @ -+ @ In key setup procedure pre-reduced H0 is limited by 5*4+1 and -+ @ 5*H4 - by 5*5 52-bit addends, or 57 bits. But when hashing the -+ @ input H0 is limited by (5*4+1)*3 addends, or 58 bits, while -+ @ 5*H4 by 5*5*3, or 59[!] bits. How is this relevant? vmlal.u32 -+ @ instruction accepts 2x32-bit input and writes 2x64-bit result. -+ @ This means that result of reduction have to be compressed upon -+ @ loop wrap-around. This can be done in the process of reduction -+ @ to minimize amount of instructions [as well as amount of -+ @ 128-bit instructions, which benefits low-end processors], but -+ @ one has to watch for H2 (which is narrower than H0) and 5*H4 -+ @ not being wider than 58 bits, so that result of right shift -+ @ by 26 bits fits in 32 bits. This is also useful on x86, -+ @ because it allows to use paddd in place for paddq, which -+ @ benefits Atom, where paddq is ridiculously slow. -+ -+ vshr.u64 q15,q8,#26 -+ vmovn.i64 d16,q8 -+ vshr.u64 q4,q5,#26 -+ vmovn.i64 d10,q5 -+ vadd.i64 q9,q9,q15 @ h3 -> h4 -+ vbic.i32 d16,#0xfc000000 @ &=0x03ffffff -+ vadd.i64 q6,q6,q4 @ h0 -> h1 -+ vbic.i32 d10,#0xfc000000 -+ -+ vshrn.u64 d30,q9,#26 -+ vmovn.i64 d18,q9 -+ vshr.u64 q4,q6,#26 -+ vmovn.i64 d12,q6 -+ vadd.i64 q7,q7,q4 @ h1 -> h2 -+ vbic.i32 d18,#0xfc000000 -+ vbic.i32 d12,#0xfc000000 -+ -+ vadd.i32 d10,d10,d30 -+ vshl.u32 d30,d30,#2 -+ vshrn.u64 d8,q7,#26 -+ vmovn.i64 d14,q7 -+ vadd.i32 d10,d10,d30 @ h4 -> h0 -+ vadd.i32 d16,d16,d8 @ h2 -> h3 -+ vbic.i32 d14,#0xfc000000 -+ -+ vshr.u32 d30,d10,#26 -+ vbic.i32 d10,#0xfc000000 -+ vshr.u32 d8,d16,#26 -+ vbic.i32 d16,#0xfc000000 -+ vadd.i32 d12,d12,d30 @ h0 -> h1 -+ vadd.i32 d18,d18,d8 @ h3 -> h4 -+ -+ subs r5,r5,#1 -+ beq .Lsquare_break_neon -+ -+ add r6,r0,#(48+0*9*4) -+ add r7,r0,#(48+1*9*4) -+ -+ vtrn.32 d0,d10 @ r^2:r^1 -+ vtrn.32 d3,d14 -+ vtrn.32 d5,d16 -+ vtrn.32 d1,d12 -+ vtrn.32 d7,d18 -+ -+ vshl.u32 d4,d3,#2 @ *5 -+ vshl.u32 d6,d5,#2 -+ vshl.u32 d2,d1,#2 -+ vshl.u32 d8,d7,#2 -+ vadd.i32 d4,d4,d3 -+ vadd.i32 d2,d2,d1 -+ vadd.i32 d6,d6,d5 -+ vadd.i32 d8,d8,d7 -+ -+ vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! -+ vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! -+ vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]! -+ vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]! -+ vst1.32 {d8[0]},[r6,:32] -+ vst1.32 {d8[1]},[r7,:32] -+ -+ b .Lsquare_neon -+ -+.align 4 -+.Lsquare_break_neon: -+ add r6,r0,#(48+2*4*9) -+ add r7,r0,#(48+3*4*9) -+ -+ vmov d0,d10 @ r^4:r^3 -+ vshl.u32 d2,d12,#2 @ *5 -+ vmov d1,d12 -+ vshl.u32 d4,d14,#2 -+ vmov d3,d14 -+ vshl.u32 d6,d16,#2 -+ vmov d5,d16 -+ vshl.u32 d8,d18,#2 -+ vmov d7,d18 -+ vadd.i32 d2,d2,d12 -+ vadd.i32 d4,d4,d14 -+ vadd.i32 d6,d6,d16 -+ vadd.i32 d8,d8,d18 -+ -+ vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! -+ vst4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! -+ vst4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]! -+ vst4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]! -+ vst1.32 {d8[0]},[r6] -+ vst1.32 {d8[1]},[r7] -+ -+.Lno_init_neon: -+ bx lr @ bx lr -+.size poly1305_init_neon,.-poly1305_init_neon -+ -+.type poly1305_blocks_neon,%function -+.align 5 -+poly1305_blocks_neon: -+.Lpoly1305_blocks_neon: -+ ldr ip,[r0,#36] @ is_base2_26 -+ -+ cmp r2,#64 -+ blo .Lpoly1305_blocks -+ -+ stmdb sp!,{r4-r7} -+ vstmdb sp!,{d8-d15} @ ABI specification says so -+ -+ tst ip,ip @ is_base2_26? -+ bne .Lbase2_26_neon -+ -+ stmdb sp!,{r1-r3,lr} -+ bl .Lpoly1305_init_neon -+ -+ ldr r4,[r0,#0] @ load hash value base 2^32 -+ ldr r5,[r0,#4] -+ ldr r6,[r0,#8] -+ ldr r7,[r0,#12] -+ ldr ip,[r0,#16] -+ -+ and r2,r4,#0x03ffffff @ base 2^32 -> base 2^26 -+ mov r3,r4,lsr#26 -+ veor d10,d10,d10 -+ mov r4,r5,lsr#20 -+ orr r3,r3,r5,lsl#6 -+ veor d12,d12,d12 -+ mov r5,r6,lsr#14 -+ orr r4,r4,r6,lsl#12 -+ veor d14,d14,d14 -+ mov r6,r7,lsr#8 -+ orr r5,r5,r7,lsl#18 -+ veor d16,d16,d16 -+ and r3,r3,#0x03ffffff -+ orr r6,r6,ip,lsl#24 -+ veor d18,d18,d18 -+ and r4,r4,#0x03ffffff -+ mov r1,#1 -+ and r5,r5,#0x03ffffff -+ str r1,[r0,#36] @ set is_base2_26 -+ -+ vmov.32 d10[0],r2 -+ vmov.32 d12[0],r3 -+ vmov.32 d14[0],r4 -+ vmov.32 d16[0],r5 -+ vmov.32 d18[0],r6 -+ adr r5,.Lzeros -+ -+ ldmia sp!,{r1-r3,lr} -+ b .Lhash_loaded -+ -+.align 4 -+.Lbase2_26_neon: -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ load hash value -+ -+ veor d10,d10,d10 -+ veor d12,d12,d12 -+ veor d14,d14,d14 -+ veor d16,d16,d16 -+ veor d18,d18,d18 -+ vld4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]! -+ adr r5,.Lzeros -+ vld1.32 {d18[0]},[r0] -+ sub r0,r0,#16 @ rewind -+ -+.Lhash_loaded: -+ add r4,r1,#32 -+ mov r3,r3,lsl#24 -+ tst r2,#31 -+ beq .Leven -+ -+ vld4.32 {d20[0],d22[0],d24[0],d26[0]},[r1]! -+ vmov.32 d28[0],r3 -+ sub r2,r2,#16 -+ add r4,r1,#32 -+ -+# ifdef __ARMEB__ -+ vrev32.8 q10,q10 -+ vrev32.8 q13,q13 -+ vrev32.8 q11,q11 -+ vrev32.8 q12,q12 -+# endif -+ vsri.u32 d28,d26,#8 @ base 2^32 -> base 2^26 -+ vshl.u32 d26,d26,#18 -+ -+ vsri.u32 d26,d24,#14 -+ vshl.u32 d24,d24,#12 -+ vadd.i32 d29,d28,d18 @ add hash value and move to #hi -+ -+ vbic.i32 d26,#0xfc000000 -+ vsri.u32 d24,d22,#20 -+ vshl.u32 d22,d22,#6 -+ -+ vbic.i32 d24,#0xfc000000 -+ vsri.u32 d22,d20,#26 -+ vadd.i32 d27,d26,d16 -+ -+ vbic.i32 d20,#0xfc000000 -+ vbic.i32 d22,#0xfc000000 -+ vadd.i32 d25,d24,d14 -+ -+ vadd.i32 d21,d20,d10 -+ vadd.i32 d23,d22,d12 -+ -+ mov r7,r5 -+ add r6,r0,#48 -+ -+ cmp r2,r2 -+ b .Long_tail -+ -+.align 4 -+.Leven: -+ subs r2,r2,#64 -+ it lo -+ movlo r4,r5 -+ -+ vmov.i32 q14,#1<<24 @ padbit, yes, always -+ vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1] -+ add r1,r1,#64 -+ vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0) -+ add r4,r4,#64 -+ itt hi -+ addhi r7,r0,#(48+1*9*4) -+ addhi r6,r0,#(48+3*9*4) -+ -+# ifdef __ARMEB__ -+ vrev32.8 q10,q10 -+ vrev32.8 q13,q13 -+ vrev32.8 q11,q11 -+ vrev32.8 q12,q12 -+# endif -+ vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26 -+ vshl.u32 q13,q13,#18 -+ -+ vsri.u32 q13,q12,#14 -+ vshl.u32 q12,q12,#12 -+ -+ vbic.i32 q13,#0xfc000000 -+ vsri.u32 q12,q11,#20 -+ vshl.u32 q11,q11,#6 -+ -+ vbic.i32 q12,#0xfc000000 -+ vsri.u32 q11,q10,#26 -+ -+ vbic.i32 q10,#0xfc000000 -+ vbic.i32 q11,#0xfc000000 -+ -+ bls .Lskip_loop -+ -+ vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^2 -+ vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4 -+ vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]! -+ vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]! -+ b .Loop_neon -+ -+.align 5 -+.Loop_neon: -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 -+ @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r -+ @ ___________________/ -+ @ ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 -+ @ ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r -+ @ ___________________/ ____________________/ -+ @ -+ @ Note that we start with inp[2:3]*r^2. This is because it -+ @ doesn't depend on reduction in previous iteration. -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 -+ @ d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 -+ @ d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 -+ @ d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 -+ @ d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ inp[2:3]*r^2 -+ -+ vadd.i32 d24,d24,d14 @ accumulate inp[0:1] -+ vmull.u32 q7,d25,d0[1] -+ vadd.i32 d20,d20,d10 -+ vmull.u32 q5,d21,d0[1] -+ vadd.i32 d26,d26,d16 -+ vmull.u32 q8,d27,d0[1] -+ vmlal.u32 q7,d23,d1[1] -+ vadd.i32 d22,d22,d12 -+ vmull.u32 q6,d23,d0[1] -+ -+ vadd.i32 d28,d28,d18 -+ vmull.u32 q9,d29,d0[1] -+ subs r2,r2,#64 -+ vmlal.u32 q5,d29,d2[1] -+ it lo -+ movlo r4,r5 -+ vmlal.u32 q8,d25,d1[1] -+ vld1.32 d8[1],[r7,:32] -+ vmlal.u32 q6,d21,d1[1] -+ vmlal.u32 q9,d27,d1[1] -+ -+ vmlal.u32 q5,d27,d4[1] -+ vmlal.u32 q8,d23,d3[1] -+ vmlal.u32 q9,d25,d3[1] -+ vmlal.u32 q6,d29,d4[1] -+ vmlal.u32 q7,d21,d3[1] -+ -+ vmlal.u32 q8,d21,d5[1] -+ vmlal.u32 q5,d25,d6[1] -+ vmlal.u32 q9,d23,d5[1] -+ vmlal.u32 q6,d27,d6[1] -+ vmlal.u32 q7,d29,d6[1] -+ -+ vmlal.u32 q8,d29,d8[1] -+ vmlal.u32 q5,d23,d8[1] -+ vmlal.u32 q9,d21,d7[1] -+ vmlal.u32 q6,d25,d8[1] -+ vmlal.u32 q7,d27,d8[1] -+ -+ vld4.32 {d21,d23,d25,d27},[r4] @ inp[2:3] (or 0) -+ add r4,r4,#64 -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ (hash+inp[0:1])*r^4 and accumulate -+ -+ vmlal.u32 q8,d26,d0[0] -+ vmlal.u32 q5,d20,d0[0] -+ vmlal.u32 q9,d28,d0[0] -+ vmlal.u32 q6,d22,d0[0] -+ vmlal.u32 q7,d24,d0[0] -+ vld1.32 d8[0],[r6,:32] -+ -+ vmlal.u32 q8,d24,d1[0] -+ vmlal.u32 q5,d28,d2[0] -+ vmlal.u32 q9,d26,d1[0] -+ vmlal.u32 q6,d20,d1[0] -+ vmlal.u32 q7,d22,d1[0] -+ -+ vmlal.u32 q8,d22,d3[0] -+ vmlal.u32 q5,d26,d4[0] -+ vmlal.u32 q9,d24,d3[0] -+ vmlal.u32 q6,d28,d4[0] -+ vmlal.u32 q7,d20,d3[0] -+ -+ vmlal.u32 q8,d20,d5[0] -+ vmlal.u32 q5,d24,d6[0] -+ vmlal.u32 q9,d22,d5[0] -+ vmlal.u32 q6,d26,d6[0] -+ vmlal.u32 q8,d28,d8[0] -+ -+ vmlal.u32 q7,d28,d6[0] -+ vmlal.u32 q5,d22,d8[0] -+ vmlal.u32 q9,d20,d7[0] -+ vmov.i32 q14,#1<<24 @ padbit, yes, always -+ vmlal.u32 q6,d24,d8[0] -+ vmlal.u32 q7,d26,d8[0] -+ -+ vld4.32 {d20,d22,d24,d26},[r1] @ inp[0:1] -+ add r1,r1,#64 -+# ifdef __ARMEB__ -+ vrev32.8 q10,q10 -+ vrev32.8 q11,q11 -+ vrev32.8 q12,q12 -+ vrev32.8 q13,q13 -+# endif -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ lazy reduction interleaved with base 2^32 -> base 2^26 of -+ @ inp[0:3] previously loaded to q10-q13 and smashed to q10-q14. -+ -+ vshr.u64 q15,q8,#26 -+ vmovn.i64 d16,q8 -+ vshr.u64 q4,q5,#26 -+ vmovn.i64 d10,q5 -+ vadd.i64 q9,q9,q15 @ h3 -> h4 -+ vbic.i32 d16,#0xfc000000 -+ vsri.u32 q14,q13,#8 @ base 2^32 -> base 2^26 -+ vadd.i64 q6,q6,q4 @ h0 -> h1 -+ vshl.u32 q13,q13,#18 -+ vbic.i32 d10,#0xfc000000 -+ -+ vshrn.u64 d30,q9,#26 -+ vmovn.i64 d18,q9 -+ vshr.u64 q4,q6,#26 -+ vmovn.i64 d12,q6 -+ vadd.i64 q7,q7,q4 @ h1 -> h2 -+ vsri.u32 q13,q12,#14 -+ vbic.i32 d18,#0xfc000000 -+ vshl.u32 q12,q12,#12 -+ vbic.i32 d12,#0xfc000000 -+ -+ vadd.i32 d10,d10,d30 -+ vshl.u32 d30,d30,#2 -+ vbic.i32 q13,#0xfc000000 -+ vshrn.u64 d8,q7,#26 -+ vmovn.i64 d14,q7 -+ vaddl.u32 q5,d10,d30 @ h4 -> h0 [widen for a sec] -+ vsri.u32 q12,q11,#20 -+ vadd.i32 d16,d16,d8 @ h2 -> h3 -+ vshl.u32 q11,q11,#6 -+ vbic.i32 d14,#0xfc000000 -+ vbic.i32 q12,#0xfc000000 -+ -+ vshrn.u64 d30,q5,#26 @ re-narrow -+ vmovn.i64 d10,q5 -+ vsri.u32 q11,q10,#26 -+ vbic.i32 q10,#0xfc000000 -+ vshr.u32 d8,d16,#26 -+ vbic.i32 d16,#0xfc000000 -+ vbic.i32 d10,#0xfc000000 -+ vadd.i32 d12,d12,d30 @ h0 -> h1 -+ vadd.i32 d18,d18,d8 @ h3 -> h4 -+ vbic.i32 q11,#0xfc000000 -+ -+ bhi .Loop_neon -+ -+.Lskip_loop: -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 -+ -+ add r7,r0,#(48+0*9*4) -+ add r6,r0,#(48+1*9*4) -+ adds r2,r2,#32 -+ it ne -+ movne r2,#0 -+ bne .Long_tail -+ -+ vadd.i32 d25,d24,d14 @ add hash value and move to #hi -+ vadd.i32 d21,d20,d10 -+ vadd.i32 d27,d26,d16 -+ vadd.i32 d23,d22,d12 -+ vadd.i32 d29,d28,d18 -+ -+.Long_tail: -+ vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^1 -+ vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^2 -+ -+ vadd.i32 d24,d24,d14 @ can be redundant -+ vmull.u32 q7,d25,d0 -+ vadd.i32 d20,d20,d10 -+ vmull.u32 q5,d21,d0 -+ vadd.i32 d26,d26,d16 -+ vmull.u32 q8,d27,d0 -+ vadd.i32 d22,d22,d12 -+ vmull.u32 q6,d23,d0 -+ vadd.i32 d28,d28,d18 -+ vmull.u32 q9,d29,d0 -+ -+ vmlal.u32 q5,d29,d2 -+ vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]! -+ vmlal.u32 q8,d25,d1 -+ vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]! -+ vmlal.u32 q6,d21,d1 -+ vmlal.u32 q9,d27,d1 -+ vmlal.u32 q7,d23,d1 -+ -+ vmlal.u32 q8,d23,d3 -+ vld1.32 d8[1],[r7,:32] -+ vmlal.u32 q5,d27,d4 -+ vld1.32 d8[0],[r6,:32] -+ vmlal.u32 q9,d25,d3 -+ vmlal.u32 q6,d29,d4 -+ vmlal.u32 q7,d21,d3 -+ -+ vmlal.u32 q8,d21,d5 -+ it ne -+ addne r7,r0,#(48+2*9*4) -+ vmlal.u32 q5,d25,d6 -+ it ne -+ addne r6,r0,#(48+3*9*4) -+ vmlal.u32 q9,d23,d5 -+ vmlal.u32 q6,d27,d6 -+ vmlal.u32 q7,d29,d6 -+ -+ vmlal.u32 q8,d29,d8 -+ vorn q0,q0,q0 @ all-ones, can be redundant -+ vmlal.u32 q5,d23,d8 -+ vshr.u64 q0,q0,#38 -+ vmlal.u32 q9,d21,d7 -+ vmlal.u32 q6,d25,d8 -+ vmlal.u32 q7,d27,d8 -+ -+ beq .Lshort_tail -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ (hash+inp[0:1])*r^4:r^3 and accumulate -+ -+ vld4.32 {d0[1],d1[1],d2[1],d3[1]},[r7]! @ load r^3 -+ vld4.32 {d0[0],d1[0],d2[0],d3[0]},[r6]! @ load r^4 -+ -+ vmlal.u32 q7,d24,d0 -+ vmlal.u32 q5,d20,d0 -+ vmlal.u32 q8,d26,d0 -+ vmlal.u32 q6,d22,d0 -+ vmlal.u32 q9,d28,d0 -+ -+ vmlal.u32 q5,d28,d2 -+ vld4.32 {d4[1],d5[1],d6[1],d7[1]},[r7]! -+ vmlal.u32 q8,d24,d1 -+ vld4.32 {d4[0],d5[0],d6[0],d7[0]},[r6]! -+ vmlal.u32 q6,d20,d1 -+ vmlal.u32 q9,d26,d1 -+ vmlal.u32 q7,d22,d1 -+ -+ vmlal.u32 q8,d22,d3 -+ vld1.32 d8[1],[r7,:32] -+ vmlal.u32 q5,d26,d4 -+ vld1.32 d8[0],[r6,:32] -+ vmlal.u32 q9,d24,d3 -+ vmlal.u32 q6,d28,d4 -+ vmlal.u32 q7,d20,d3 -+ -+ vmlal.u32 q8,d20,d5 -+ vmlal.u32 q5,d24,d6 -+ vmlal.u32 q9,d22,d5 -+ vmlal.u32 q6,d26,d6 -+ vmlal.u32 q7,d28,d6 -+ -+ vmlal.u32 q8,d28,d8 -+ vorn q0,q0,q0 @ all-ones -+ vmlal.u32 q5,d22,d8 -+ vshr.u64 q0,q0,#38 -+ vmlal.u32 q9,d20,d7 -+ vmlal.u32 q6,d24,d8 -+ vmlal.u32 q7,d26,d8 -+ -+.Lshort_tail: -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ horizontal addition -+ -+ vadd.i64 d16,d16,d17 -+ vadd.i64 d10,d10,d11 -+ vadd.i64 d18,d18,d19 -+ vadd.i64 d12,d12,d13 -+ vadd.i64 d14,d14,d15 -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ lazy reduction, but without narrowing -+ -+ vshr.u64 q15,q8,#26 -+ vand.i64 q8,q8,q0 -+ vshr.u64 q4,q5,#26 -+ vand.i64 q5,q5,q0 -+ vadd.i64 q9,q9,q15 @ h3 -> h4 -+ vadd.i64 q6,q6,q4 @ h0 -> h1 -+ -+ vshr.u64 q15,q9,#26 -+ vand.i64 q9,q9,q0 -+ vshr.u64 q4,q6,#26 -+ vand.i64 q6,q6,q0 -+ vadd.i64 q7,q7,q4 @ h1 -> h2 -+ -+ vadd.i64 q5,q5,q15 -+ vshl.u64 q15,q15,#2 -+ vshr.u64 q4,q7,#26 -+ vand.i64 q7,q7,q0 -+ vadd.i64 q5,q5,q15 @ h4 -> h0 -+ vadd.i64 q8,q8,q4 @ h2 -> h3 -+ -+ vshr.u64 q15,q5,#26 -+ vand.i64 q5,q5,q0 -+ vshr.u64 q4,q8,#26 -+ vand.i64 q8,q8,q0 -+ vadd.i64 q6,q6,q15 @ h0 -> h1 -+ vadd.i64 q9,q9,q4 @ h3 -> h4 -+ -+ cmp r2,#0 -+ bne .Leven -+ -+ @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ -+ @ store hash value -+ -+ vst4.32 {d10[0],d12[0],d14[0],d16[0]},[r0]! -+ vst1.32 {d18[0]},[r0] -+ -+ vldmia sp!,{d8-d15} @ epilogue -+ ldmia sp!,{r4-r7} -+ bx lr @ bx lr -+.size poly1305_blocks_neon,.-poly1305_blocks_neon -+ -+.align 5 -+.Lzeros: -+.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 -+#ifndef __KERNEL__ -+.LOPENSSL_armcap: -+# ifdef _WIN32 -+.word OPENSSL_armcap_P -+# else -+.word OPENSSL_armcap_P-.Lpoly1305_init -+# endif -+.comm OPENSSL_armcap_P,4,4 -+.hidden OPENSSL_armcap_P -+#endif -+#endif -+.asciz "Poly1305 for ARMv4/NEON, CRYPTOGAMS by @dot-asm" -+.align 2 ---- /dev/null -+++ b/arch/arm/crypto/poly1305-glue.c -@@ -0,0 +1,276 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * OpenSSL/Cryptogams accelerated Poly1305 transform for ARM -+ * -+ * Copyright (C) 2019 Linaro Ltd. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+void poly1305_init_arm(void *state, const u8 *key); -+void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit); -+void poly1305_emit_arm(void *state, __le32 *digest, const u32 *nonce); -+ -+void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit) -+{ -+} -+ -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); -+ -+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) -+{ -+ poly1305_init_arm(&dctx->h, key); -+ dctx->s[0] = get_unaligned_le32(key + 16); -+ dctx->s[1] = get_unaligned_le32(key + 20); -+ dctx->s[2] = get_unaligned_le32(key + 24); -+ dctx->s[3] = get_unaligned_le32(key + 28); -+ dctx->buflen = 0; -+} -+EXPORT_SYMBOL(poly1305_init_arch); -+ -+static int arm_poly1305_init(struct shash_desc *desc) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ dctx->buflen = 0; -+ dctx->rset = 0; -+ dctx->sset = false; -+ -+ return 0; -+} -+ -+static void arm_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, -+ u32 len, u32 hibit, bool do_neon) -+{ -+ if (unlikely(!dctx->sset)) { -+ if (!dctx->rset) { -+ poly1305_init_arm(&dctx->h, src); -+ src += POLY1305_BLOCK_SIZE; -+ len -= POLY1305_BLOCK_SIZE; -+ dctx->rset = 1; -+ } -+ if (len >= POLY1305_BLOCK_SIZE) { -+ dctx->s[0] = get_unaligned_le32(src + 0); -+ dctx->s[1] = get_unaligned_le32(src + 4); -+ dctx->s[2] = get_unaligned_le32(src + 8); -+ dctx->s[3] = get_unaligned_le32(src + 12); -+ src += POLY1305_BLOCK_SIZE; -+ len -= POLY1305_BLOCK_SIZE; -+ dctx->sset = true; -+ } -+ if (len < POLY1305_BLOCK_SIZE) -+ return; -+ } -+ -+ len &= ~(POLY1305_BLOCK_SIZE - 1); -+ -+ if (static_branch_likely(&have_neon) && likely(do_neon)) -+ poly1305_blocks_neon(&dctx->h, src, len, hibit); -+ else -+ poly1305_blocks_arm(&dctx->h, src, len, hibit); -+} -+ -+static void arm_poly1305_do_update(struct poly1305_desc_ctx *dctx, -+ const u8 *src, u32 len, bool do_neon) -+{ -+ if (unlikely(dctx->buflen)) { -+ u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen); -+ -+ memcpy(dctx->buf + dctx->buflen, src, bytes); -+ src += bytes; -+ len -= bytes; -+ dctx->buflen += bytes; -+ -+ if (dctx->buflen == POLY1305_BLOCK_SIZE) { -+ arm_poly1305_blocks(dctx, dctx->buf, -+ POLY1305_BLOCK_SIZE, 1, false); -+ dctx->buflen = 0; -+ } -+ } -+ -+ if (likely(len >= POLY1305_BLOCK_SIZE)) { -+ arm_poly1305_blocks(dctx, src, len, 1, do_neon); -+ src += round_down(len, POLY1305_BLOCK_SIZE); -+ len %= POLY1305_BLOCK_SIZE; -+ } -+ -+ if (unlikely(len)) { -+ dctx->buflen = len; -+ memcpy(dctx->buf, src, len); -+ } -+} -+ -+static int arm_poly1305_update(struct shash_desc *desc, -+ const u8 *src, unsigned int srclen) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ arm_poly1305_do_update(dctx, src, srclen, false); -+ return 0; -+} -+ -+static int __maybe_unused arm_poly1305_update_neon(struct shash_desc *desc, -+ const u8 *src, -+ unsigned int srclen) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ bool do_neon = crypto_simd_usable() && srclen > 128; -+ -+ if (static_branch_likely(&have_neon) && do_neon) -+ kernel_neon_begin(); -+ arm_poly1305_do_update(dctx, src, srclen, do_neon); -+ if (static_branch_likely(&have_neon) && do_neon) -+ kernel_neon_end(); -+ return 0; -+} -+ -+void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, -+ unsigned int nbytes) -+{ -+ bool do_neon = IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && -+ crypto_simd_usable(); -+ -+ if (unlikely(dctx->buflen)) { -+ u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen); -+ -+ memcpy(dctx->buf + dctx->buflen, src, bytes); -+ src += bytes; -+ nbytes -= bytes; -+ dctx->buflen += bytes; -+ -+ if (dctx->buflen == POLY1305_BLOCK_SIZE) { -+ poly1305_blocks_arm(&dctx->h, dctx->buf, -+ POLY1305_BLOCK_SIZE, 1); -+ dctx->buflen = 0; -+ } -+ } -+ -+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { -+ unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); -+ -+ if (static_branch_likely(&have_neon) && do_neon) { -+ kernel_neon_begin(); -+ poly1305_blocks_neon(&dctx->h, src, len, 1); -+ kernel_neon_end(); -+ } else { -+ poly1305_blocks_arm(&dctx->h, src, len, 1); -+ } -+ src += len; -+ nbytes %= POLY1305_BLOCK_SIZE; -+ } -+ -+ if (unlikely(nbytes)) { -+ dctx->buflen = nbytes; -+ memcpy(dctx->buf, src, nbytes); -+ } -+} -+EXPORT_SYMBOL(poly1305_update_arch); -+ -+void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) -+{ -+ __le32 digest[4]; -+ u64 f = 0; -+ -+ if (unlikely(dctx->buflen)) { -+ dctx->buf[dctx->buflen++] = 1; -+ memset(dctx->buf + dctx->buflen, 0, -+ POLY1305_BLOCK_SIZE - dctx->buflen); -+ poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); -+ } -+ -+ poly1305_emit_arm(&dctx->h, digest, dctx->s); -+ -+ /* mac = (h + s) % (2^128) */ -+ f = (f >> 32) + le32_to_cpu(digest[0]); -+ put_unaligned_le32(f, dst); -+ f = (f >> 32) + le32_to_cpu(digest[1]); -+ put_unaligned_le32(f, dst + 4); -+ f = (f >> 32) + le32_to_cpu(digest[2]); -+ put_unaligned_le32(f, dst + 8); -+ f = (f >> 32) + le32_to_cpu(digest[3]); -+ put_unaligned_le32(f, dst + 12); -+ -+ *dctx = (struct poly1305_desc_ctx){}; -+} -+EXPORT_SYMBOL(poly1305_final_arch); -+ -+static int arm_poly1305_final(struct shash_desc *desc, u8 *dst) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ if (unlikely(!dctx->sset)) -+ return -ENOKEY; -+ -+ poly1305_final_arch(dctx, dst); -+ return 0; -+} -+ -+static struct shash_alg arm_poly1305_algs[] = {{ -+ .init = arm_poly1305_init, -+ .update = arm_poly1305_update, -+ .final = arm_poly1305_final, -+ .digestsize = POLY1305_DIGEST_SIZE, -+ .descsize = sizeof(struct poly1305_desc_ctx), -+ -+ .base.cra_name = "poly1305", -+ .base.cra_driver_name = "poly1305-arm", -+ .base.cra_priority = 150, -+ .base.cra_blocksize = POLY1305_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+#ifdef CONFIG_KERNEL_MODE_NEON -+}, { -+ .init = arm_poly1305_init, -+ .update = arm_poly1305_update_neon, -+ .final = arm_poly1305_final, -+ .digestsize = POLY1305_DIGEST_SIZE, -+ .descsize = sizeof(struct poly1305_desc_ctx), -+ -+ .base.cra_name = "poly1305", -+ .base.cra_driver_name = "poly1305-neon", -+ .base.cra_priority = 200, -+ .base.cra_blocksize = POLY1305_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+#endif -+}}; -+ -+static int __init arm_poly1305_mod_init(void) -+{ -+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && -+ (elf_hwcap & HWCAP_NEON)) -+ static_branch_enable(&have_neon); -+ else -+ /* register only the first entry */ -+ return crypto_register_shash(&arm_poly1305_algs[0]); -+ -+ return crypto_register_shashes(arm_poly1305_algs, -+ ARRAY_SIZE(arm_poly1305_algs)); -+} -+ -+static void __exit arm_poly1305_mod_exit(void) -+{ -+ if (!static_branch_likely(&have_neon)) { -+ crypto_unregister_shash(&arm_poly1305_algs[0]); -+ return; -+ } -+ crypto_unregister_shashes(arm_poly1305_algs, -+ ARRAY_SIZE(arm_poly1305_algs)); -+} -+ -+module_init(arm_poly1305_mod_init); -+module_exit(arm_poly1305_mod_exit); -+ -+MODULE_LICENSE("GPL v2"); -+MODULE_ALIAS_CRYPTO("poly1305"); -+MODULE_ALIAS_CRYPTO("poly1305-arm"); -+MODULE_ALIAS_CRYPTO("poly1305-neon"); ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -40,7 +40,7 @@ config CRYPTO_LIB_DES - config CRYPTO_LIB_POLY1305_RSIZE - int - default 4 if X86_64 -- default 9 if ARM64 -+ default 9 if ARM || ARM64 - default 1 - - config CRYPTO_ARCH_HAVE_LIB_POLY1305 diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0020-crypto-mips-poly1305-incorporate-OpenSSL-CRYPTOGAMS-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0020-crypto-mips-poly1305-incorporate-OpenSSL-CRYPTOGAMS-.patch deleted file mode 100644 index 272e1797d..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0020-crypto-mips-poly1305-incorporate-OpenSSL-CRYPTOGAMS-.patch +++ /dev/null @@ -1,1563 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:26 +0100 -Subject: [PATCH] crypto: mips/poly1305 - incorporate OpenSSL/CRYPTOGAMS - optimized implementation -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit a11d055e7a64ac34a5e99b6fe731299449cbcd58 upstream. - -This is a straight import of the OpenSSL/CRYPTOGAMS Poly1305 implementation for -MIPS authored by Andy Polyakov, a prior 64-bit only version of which has been -contributed by him to the OpenSSL project. The file 'poly1305-mips.pl' is taken -straight from this upstream GitHub repository [0] at commit -d22ade312a7af958ec955620b0d241cf42c37feb, and already contains all the changes -required to build it as part of a Linux kernel module. - -[0] https://github.com/dot-asm/cryptogams - -Co-developed-by: Andy Polyakov -Signed-off-by: Andy Polyakov -Co-developed-by: René van Dorst -Signed-off-by: René van Dorst -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/mips/crypto/Makefile | 14 + - arch/mips/crypto/poly1305-glue.c | 203 +++++ - arch/mips/crypto/poly1305-mips.pl | 1273 +++++++++++++++++++++++++++++ - crypto/Kconfig | 5 + - lib/crypto/Kconfig | 1 + - 5 files changed, 1496 insertions(+) - create mode 100644 arch/mips/crypto/poly1305-glue.c - create mode 100644 arch/mips/crypto/poly1305-mips.pl - ---- a/arch/mips/crypto/Makefile -+++ b/arch/mips/crypto/Makefile -@@ -8,3 +8,17 @@ obj-$(CONFIG_CRYPTO_CRC32_MIPS) += crc32 - obj-$(CONFIG_CRYPTO_CHACHA_MIPS) += chacha-mips.o - chacha-mips-y := chacha-core.o chacha-glue.o - AFLAGS_chacha-core.o += -O2 # needed to fill branch delay slots -+ -+obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o -+poly1305-mips-y := poly1305-core.o poly1305-glue.o -+ -+perlasm-flavour-$(CONFIG_CPU_MIPS32) := o32 -+perlasm-flavour-$(CONFIG_CPU_MIPS64) := 64 -+ -+quiet_cmd_perlasm = PERLASM $@ -+ cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@) -+ -+$(obj)/poly1305-core.S: $(src)/poly1305-mips.pl FORCE -+ $(call if_changed,perlasm) -+ -+targets += poly1305-core.S ---- /dev/null -+++ b/arch/mips/crypto/poly1305-glue.c -@@ -0,0 +1,203 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * OpenSSL/Cryptogams accelerated Poly1305 transform for MIPS -+ * -+ * Copyright (C) 2019 Linaro Ltd. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+asmlinkage void poly1305_init_mips(void *state, const u8 *key); -+asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit); -+asmlinkage void poly1305_emit_mips(void *state, __le32 *digest, const u32 *nonce); -+ -+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) -+{ -+ poly1305_init_mips(&dctx->h, key); -+ dctx->s[0] = get_unaligned_le32(key + 16); -+ dctx->s[1] = get_unaligned_le32(key + 20); -+ dctx->s[2] = get_unaligned_le32(key + 24); -+ dctx->s[3] = get_unaligned_le32(key + 28); -+ dctx->buflen = 0; -+} -+EXPORT_SYMBOL(poly1305_init_arch); -+ -+static int mips_poly1305_init(struct shash_desc *desc) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ dctx->buflen = 0; -+ dctx->rset = 0; -+ dctx->sset = false; -+ -+ return 0; -+} -+ -+static void mips_poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, -+ u32 len, u32 hibit) -+{ -+ if (unlikely(!dctx->sset)) { -+ if (!dctx->rset) { -+ poly1305_init_mips(&dctx->h, src); -+ src += POLY1305_BLOCK_SIZE; -+ len -= POLY1305_BLOCK_SIZE; -+ dctx->rset = 1; -+ } -+ if (len >= POLY1305_BLOCK_SIZE) { -+ dctx->s[0] = get_unaligned_le32(src + 0); -+ dctx->s[1] = get_unaligned_le32(src + 4); -+ dctx->s[2] = get_unaligned_le32(src + 8); -+ dctx->s[3] = get_unaligned_le32(src + 12); -+ src += POLY1305_BLOCK_SIZE; -+ len -= POLY1305_BLOCK_SIZE; -+ dctx->sset = true; -+ } -+ if (len < POLY1305_BLOCK_SIZE) -+ return; -+ } -+ -+ len &= ~(POLY1305_BLOCK_SIZE - 1); -+ -+ poly1305_blocks_mips(&dctx->h, src, len, hibit); -+} -+ -+static int mips_poly1305_update(struct shash_desc *desc, const u8 *src, -+ unsigned int len) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ if (unlikely(dctx->buflen)) { -+ u32 bytes = min(len, POLY1305_BLOCK_SIZE - dctx->buflen); -+ -+ memcpy(dctx->buf + dctx->buflen, src, bytes); -+ src += bytes; -+ len -= bytes; -+ dctx->buflen += bytes; -+ -+ if (dctx->buflen == POLY1305_BLOCK_SIZE) { -+ mips_poly1305_blocks(dctx, dctx->buf, POLY1305_BLOCK_SIZE, 1); -+ dctx->buflen = 0; -+ } -+ } -+ -+ if (likely(len >= POLY1305_BLOCK_SIZE)) { -+ mips_poly1305_blocks(dctx, src, len, 1); -+ src += round_down(len, POLY1305_BLOCK_SIZE); -+ len %= POLY1305_BLOCK_SIZE; -+ } -+ -+ if (unlikely(len)) { -+ dctx->buflen = len; -+ memcpy(dctx->buf, src, len); -+ } -+ return 0; -+} -+ -+void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, -+ unsigned int nbytes) -+{ -+ if (unlikely(dctx->buflen)) { -+ u32 bytes = min(nbytes, POLY1305_BLOCK_SIZE - dctx->buflen); -+ -+ memcpy(dctx->buf + dctx->buflen, src, bytes); -+ src += bytes; -+ nbytes -= bytes; -+ dctx->buflen += bytes; -+ -+ if (dctx->buflen == POLY1305_BLOCK_SIZE) { -+ poly1305_blocks_mips(&dctx->h, dctx->buf, -+ POLY1305_BLOCK_SIZE, 1); -+ dctx->buflen = 0; -+ } -+ } -+ -+ if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { -+ unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); -+ -+ poly1305_blocks_mips(&dctx->h, src, len, 1); -+ src += len; -+ nbytes %= POLY1305_BLOCK_SIZE; -+ } -+ -+ if (unlikely(nbytes)) { -+ dctx->buflen = nbytes; -+ memcpy(dctx->buf, src, nbytes); -+ } -+} -+EXPORT_SYMBOL(poly1305_update_arch); -+ -+void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) -+{ -+ __le32 digest[4]; -+ u64 f = 0; -+ -+ if (unlikely(dctx->buflen)) { -+ dctx->buf[dctx->buflen++] = 1; -+ memset(dctx->buf + dctx->buflen, 0, -+ POLY1305_BLOCK_SIZE - dctx->buflen); -+ poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); -+ } -+ -+ poly1305_emit_mips(&dctx->h, digest, dctx->s); -+ -+ /* mac = (h + s) % (2^128) */ -+ f = (f >> 32) + le32_to_cpu(digest[0]); -+ put_unaligned_le32(f, dst); -+ f = (f >> 32) + le32_to_cpu(digest[1]); -+ put_unaligned_le32(f, dst + 4); -+ f = (f >> 32) + le32_to_cpu(digest[2]); -+ put_unaligned_le32(f, dst + 8); -+ f = (f >> 32) + le32_to_cpu(digest[3]); -+ put_unaligned_le32(f, dst + 12); -+ -+ *dctx = (struct poly1305_desc_ctx){}; -+} -+EXPORT_SYMBOL(poly1305_final_arch); -+ -+static int mips_poly1305_final(struct shash_desc *desc, u8 *dst) -+{ -+ struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); -+ -+ if (unlikely(!dctx->sset)) -+ return -ENOKEY; -+ -+ poly1305_final_arch(dctx, dst); -+ return 0; -+} -+ -+static struct shash_alg mips_poly1305_alg = { -+ .init = mips_poly1305_init, -+ .update = mips_poly1305_update, -+ .final = mips_poly1305_final, -+ .digestsize = POLY1305_DIGEST_SIZE, -+ .descsize = sizeof(struct poly1305_desc_ctx), -+ -+ .base.cra_name = "poly1305", -+ .base.cra_driver_name = "poly1305-mips", -+ .base.cra_priority = 200, -+ .base.cra_blocksize = POLY1305_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+}; -+ -+static int __init mips_poly1305_mod_init(void) -+{ -+ return crypto_register_shash(&mips_poly1305_alg); -+} -+ -+static void __exit mips_poly1305_mod_exit(void) -+{ -+ crypto_unregister_shash(&mips_poly1305_alg); -+} -+ -+module_init(mips_poly1305_mod_init); -+module_exit(mips_poly1305_mod_exit); -+ -+MODULE_LICENSE("GPL v2"); -+MODULE_ALIAS_CRYPTO("poly1305"); -+MODULE_ALIAS_CRYPTO("poly1305-mips"); ---- /dev/null -+++ b/arch/mips/crypto/poly1305-mips.pl -@@ -0,0 +1,1273 @@ -+#!/usr/bin/env perl -+# SPDX-License-Identifier: GPL-1.0+ OR BSD-3-Clause -+# -+# ==================================================================== -+# Written by Andy Polyakov, @dot-asm, originally for the OpenSSL -+# project. -+# ==================================================================== -+ -+# Poly1305 hash for MIPS. -+# -+# May 2016 -+# -+# Numbers are cycles per processed byte with poly1305_blocks alone. -+# -+# IALU/gcc -+# R1x000 ~5.5/+130% (big-endian) -+# Octeon II 2.50/+70% (little-endian) -+# -+# March 2019 -+# -+# Add 32-bit code path. -+# -+# October 2019 -+# -+# Modulo-scheduling reduction allows to omit dependency chain at the -+# end of inner loop and improve performance. Also optimize MIPS32R2 -+# code path for MIPS 1004K core. Per René von Dorst's suggestions. -+# -+# IALU/gcc -+# R1x000 ~9.8/? (big-endian) -+# Octeon II 3.65/+140% (little-endian) -+# MT7621/1004K 4.75/? (little-endian) -+# -+###################################################################### -+# There is a number of MIPS ABI in use, O32 and N32/64 are most -+# widely used. Then there is a new contender: NUBI. It appears that if -+# one picks the latter, it's possible to arrange code in ABI neutral -+# manner. Therefore let's stick to NUBI register layout: -+# -+($zero,$at,$t0,$t1,$t2)=map("\$$_",(0..2,24,25)); -+($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); -+($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8,$s9,$s10,$s11)=map("\$$_",(12..23)); -+($gp,$tp,$sp,$fp,$ra)=map("\$$_",(3,28..31)); -+# -+# The return value is placed in $a0. Following coding rules facilitate -+# interoperability: -+# -+# - never ever touch $tp, "thread pointer", former $gp [o32 can be -+# excluded from the rule, because it's specified volatile]; -+# - copy return value to $t0, former $v0 [or to $a0 if you're adapting -+# old code]; -+# - on O32 populate $a4-$a7 with 'lw $aN,4*N($sp)' if necessary; -+# -+# For reference here is register layout for N32/64 MIPS ABIs: -+# -+# ($zero,$at,$v0,$v1)=map("\$$_",(0..3)); -+# ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$$_",(4..11)); -+# ($t0,$t1,$t2,$t3,$t8,$t9)=map("\$$_",(12..15,24,25)); -+# ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7)=map("\$$_",(16..23)); -+# ($gp,$sp,$fp,$ra)=map("\$$_",(28..31)); -+# -+# -+# -+###################################################################### -+ -+$flavour = shift || "64"; # supported flavours are o32,n32,64,nubi32,nubi64 -+ -+$v0 = ($flavour =~ /nubi/i) ? $a0 : $t0; -+ -+if ($flavour =~ /64|n32/i) {{{ -+###################################################################### -+# 64-bit code path -+# -+ -+my ($ctx,$inp,$len,$padbit) = ($a0,$a1,$a2,$a3); -+my ($in0,$in1,$tmp0,$tmp1,$tmp2,$tmp3,$tmp4) = ($a4,$a5,$a6,$a7,$at,$t0,$t1); -+ -+$code.=<<___; -+#if (defined(_MIPS_ARCH_MIPS64R3) || defined(_MIPS_ARCH_MIPS64R5) || \\ -+ defined(_MIPS_ARCH_MIPS64R6)) \\ -+ && !defined(_MIPS_ARCH_MIPS64R2) -+# define _MIPS_ARCH_MIPS64R2 -+#endif -+ -+#if defined(_MIPS_ARCH_MIPS64R6) -+# define dmultu(rs,rt) -+# define mflo(rd,rs,rt) dmulu rd,rs,rt -+# define mfhi(rd,rs,rt) dmuhu rd,rs,rt -+#else -+# define dmultu(rs,rt) dmultu rs,rt -+# define mflo(rd,rs,rt) mflo rd -+# define mfhi(rd,rs,rt) mfhi rd -+#endif -+ -+#ifdef __KERNEL__ -+# define poly1305_init poly1305_init_mips -+# define poly1305_blocks poly1305_blocks_mips -+# define poly1305_emit poly1305_emit_mips -+#endif -+ -+#if defined(__MIPSEB__) && !defined(MIPSEB) -+# define MIPSEB -+#endif -+ -+#ifdef MIPSEB -+# define MSB 0 -+# define LSB 7 -+#else -+# define MSB 7 -+# define LSB 0 -+#endif -+ -+.text -+.set noat -+.set noreorder -+ -+.align 5 -+.globl poly1305_init -+.ent poly1305_init -+poly1305_init: -+ .frame $sp,0,$ra -+ .set reorder -+ -+ sd $zero,0($ctx) -+ sd $zero,8($ctx) -+ sd $zero,16($ctx) -+ -+ beqz $inp,.Lno_key -+ -+#if defined(_MIPS_ARCH_MIPS64R6) -+ andi $tmp0,$inp,7 # $inp % 8 -+ dsubu $inp,$inp,$tmp0 # align $inp -+ sll $tmp0,$tmp0,3 # byte to bit offset -+ ld $in0,0($inp) -+ ld $in1,8($inp) -+ beqz $tmp0,.Laligned_key -+ ld $tmp2,16($inp) -+ -+ subu $tmp1,$zero,$tmp0 -+# ifdef MIPSEB -+ dsllv $in0,$in0,$tmp0 -+ dsrlv $tmp3,$in1,$tmp1 -+ dsllv $in1,$in1,$tmp0 -+ dsrlv $tmp2,$tmp2,$tmp1 -+# else -+ dsrlv $in0,$in0,$tmp0 -+ dsllv $tmp3,$in1,$tmp1 -+ dsrlv $in1,$in1,$tmp0 -+ dsllv $tmp2,$tmp2,$tmp1 -+# endif -+ or $in0,$in0,$tmp3 -+ or $in1,$in1,$tmp2 -+.Laligned_key: -+#else -+ ldl $in0,0+MSB($inp) -+ ldl $in1,8+MSB($inp) -+ ldr $in0,0+LSB($inp) -+ ldr $in1,8+LSB($inp) -+#endif -+#ifdef MIPSEB -+# if defined(_MIPS_ARCH_MIPS64R2) -+ dsbh $in0,$in0 # byte swap -+ dsbh $in1,$in1 -+ dshd $in0,$in0 -+ dshd $in1,$in1 -+# else -+ ori $tmp0,$zero,0xFF -+ dsll $tmp2,$tmp0,32 -+ or $tmp0,$tmp2 # 0x000000FF000000FF -+ -+ and $tmp1,$in0,$tmp0 # byte swap -+ and $tmp3,$in1,$tmp0 -+ dsrl $tmp2,$in0,24 -+ dsrl $tmp4,$in1,24 -+ dsll $tmp1,24 -+ dsll $tmp3,24 -+ and $tmp2,$tmp0 -+ and $tmp4,$tmp0 -+ dsll $tmp0,8 # 0x0000FF000000FF00 -+ or $tmp1,$tmp2 -+ or $tmp3,$tmp4 -+ and $tmp2,$in0,$tmp0 -+ and $tmp4,$in1,$tmp0 -+ dsrl $in0,8 -+ dsrl $in1,8 -+ dsll $tmp2,8 -+ dsll $tmp4,8 -+ and $in0,$tmp0 -+ and $in1,$tmp0 -+ or $tmp1,$tmp2 -+ or $tmp3,$tmp4 -+ or $in0,$tmp1 -+ or $in1,$tmp3 -+ dsrl $tmp1,$in0,32 -+ dsrl $tmp3,$in1,32 -+ dsll $in0,32 -+ dsll $in1,32 -+ or $in0,$tmp1 -+ or $in1,$tmp3 -+# endif -+#endif -+ li $tmp0,1 -+ dsll $tmp0,32 # 0x0000000100000000 -+ daddiu $tmp0,-63 # 0x00000000ffffffc1 -+ dsll $tmp0,28 # 0x0ffffffc10000000 -+ daddiu $tmp0,-1 # 0x0ffffffc0fffffff -+ -+ and $in0,$tmp0 -+ daddiu $tmp0,-3 # 0x0ffffffc0ffffffc -+ and $in1,$tmp0 -+ -+ sd $in0,24($ctx) -+ dsrl $tmp0,$in1,2 -+ sd $in1,32($ctx) -+ daddu $tmp0,$in1 # s1 = r1 + (r1 >> 2) -+ sd $tmp0,40($ctx) -+ -+.Lno_key: -+ li $v0,0 # return 0 -+ jr $ra -+.end poly1305_init -+___ -+{ -+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? "0x0003f000" : "0x00030000"; -+ -+my ($h0,$h1,$h2,$r0,$r1,$rs1,$d0,$d1,$d2) = -+ ($s0,$s1,$s2,$s3,$s4,$s5,$in0,$in1,$t2); -+my ($shr,$shl) = ($s6,$s7); # used on R6 -+ -+$code.=<<___; -+.align 5 -+.globl poly1305_blocks -+.ent poly1305_blocks -+poly1305_blocks: -+ .set noreorder -+ dsrl $len,4 # number of complete blocks -+ bnez $len,poly1305_blocks_internal -+ nop -+ jr $ra -+ nop -+.end poly1305_blocks -+ -+.align 5 -+.ent poly1305_blocks_internal -+poly1305_blocks_internal: -+ .set noreorder -+#if defined(_MIPS_ARCH_MIPS64R6) -+ .frame $sp,8*8,$ra -+ .mask $SAVED_REGS_MASK|0x000c0000,-8 -+ dsubu $sp,8*8 -+ sd $s7,56($sp) -+ sd $s6,48($sp) -+#else -+ .frame $sp,6*8,$ra -+ .mask $SAVED_REGS_MASK,-8 -+ dsubu $sp,6*8 -+#endif -+ sd $s5,40($sp) -+ sd $s4,32($sp) -+___ -+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue -+ sd $s3,24($sp) -+ sd $s2,16($sp) -+ sd $s1,8($sp) -+ sd $s0,0($sp) -+___ -+$code.=<<___; -+ .set reorder -+ -+#if defined(_MIPS_ARCH_MIPS64R6) -+ andi $shr,$inp,7 -+ dsubu $inp,$inp,$shr # align $inp -+ sll $shr,$shr,3 # byte to bit offset -+ subu $shl,$zero,$shr -+#endif -+ -+ ld $h0,0($ctx) # load hash value -+ ld $h1,8($ctx) -+ ld $h2,16($ctx) -+ -+ ld $r0,24($ctx) # load key -+ ld $r1,32($ctx) -+ ld $rs1,40($ctx) -+ -+ dsll $len,4 -+ daddu $len,$inp # end of buffer -+ b .Loop -+ -+.align 4 -+.Loop: -+#if defined(_MIPS_ARCH_MIPS64R6) -+ ld $in0,0($inp) # load input -+ ld $in1,8($inp) -+ beqz $shr,.Laligned_inp -+ -+ ld $tmp2,16($inp) -+# ifdef MIPSEB -+ dsllv $in0,$in0,$shr -+ dsrlv $tmp3,$in1,$shl -+ dsllv $in1,$in1,$shr -+ dsrlv $tmp2,$tmp2,$shl -+# else -+ dsrlv $in0,$in0,$shr -+ dsllv $tmp3,$in1,$shl -+ dsrlv $in1,$in1,$shr -+ dsllv $tmp2,$tmp2,$shl -+# endif -+ or $in0,$in0,$tmp3 -+ or $in1,$in1,$tmp2 -+.Laligned_inp: -+#else -+ ldl $in0,0+MSB($inp) # load input -+ ldl $in1,8+MSB($inp) -+ ldr $in0,0+LSB($inp) -+ ldr $in1,8+LSB($inp) -+#endif -+ daddiu $inp,16 -+#ifdef MIPSEB -+# if defined(_MIPS_ARCH_MIPS64R2) -+ dsbh $in0,$in0 # byte swap -+ dsbh $in1,$in1 -+ dshd $in0,$in0 -+ dshd $in1,$in1 -+# else -+ ori $tmp0,$zero,0xFF -+ dsll $tmp2,$tmp0,32 -+ or $tmp0,$tmp2 # 0x000000FF000000FF -+ -+ and $tmp1,$in0,$tmp0 # byte swap -+ and $tmp3,$in1,$tmp0 -+ dsrl $tmp2,$in0,24 -+ dsrl $tmp4,$in1,24 -+ dsll $tmp1,24 -+ dsll $tmp3,24 -+ and $tmp2,$tmp0 -+ and $tmp4,$tmp0 -+ dsll $tmp0,8 # 0x0000FF000000FF00 -+ or $tmp1,$tmp2 -+ or $tmp3,$tmp4 -+ and $tmp2,$in0,$tmp0 -+ and $tmp4,$in1,$tmp0 -+ dsrl $in0,8 -+ dsrl $in1,8 -+ dsll $tmp2,8 -+ dsll $tmp4,8 -+ and $in0,$tmp0 -+ and $in1,$tmp0 -+ or $tmp1,$tmp2 -+ or $tmp3,$tmp4 -+ or $in0,$tmp1 -+ or $in1,$tmp3 -+ dsrl $tmp1,$in0,32 -+ dsrl $tmp3,$in1,32 -+ dsll $in0,32 -+ dsll $in1,32 -+ or $in0,$tmp1 -+ or $in1,$tmp3 -+# endif -+#endif -+ dsrl $tmp1,$h2,2 # modulo-scheduled reduction -+ andi $h2,$h2,3 -+ dsll $tmp0,$tmp1,2 -+ -+ daddu $d0,$h0,$in0 # accumulate input -+ daddu $tmp1,$tmp0 -+ sltu $tmp0,$d0,$h0 -+ daddu $d0,$d0,$tmp1 # ... and residue -+ sltu $tmp1,$d0,$tmp1 -+ daddu $d1,$h1,$in1 -+ daddu $tmp0,$tmp1 -+ sltu $tmp1,$d1,$h1 -+ daddu $d1,$tmp0 -+ -+ dmultu ($r0,$d0) # h0*r0 -+ daddu $d2,$h2,$padbit -+ sltu $tmp0,$d1,$tmp0 -+ mflo ($h0,$r0,$d0) -+ mfhi ($h1,$r0,$d0) -+ -+ dmultu ($rs1,$d1) # h1*5*r1 -+ daddu $d2,$tmp1 -+ daddu $d2,$tmp0 -+ mflo ($tmp0,$rs1,$d1) -+ mfhi ($tmp1,$rs1,$d1) -+ -+ dmultu ($r1,$d0) # h0*r1 -+ mflo ($tmp2,$r1,$d0) -+ mfhi ($h2,$r1,$d0) -+ daddu $h0,$tmp0 -+ daddu $h1,$tmp1 -+ sltu $tmp0,$h0,$tmp0 -+ -+ dmultu ($r0,$d1) # h1*r0 -+ daddu $h1,$tmp0 -+ daddu $h1,$tmp2 -+ mflo ($tmp0,$r0,$d1) -+ mfhi ($tmp1,$r0,$d1) -+ -+ dmultu ($rs1,$d2) # h2*5*r1 -+ sltu $tmp2,$h1,$tmp2 -+ daddu $h2,$tmp2 -+ mflo ($tmp2,$rs1,$d2) -+ -+ dmultu ($r0,$d2) # h2*r0 -+ daddu $h1,$tmp0 -+ daddu $h2,$tmp1 -+ mflo ($tmp3,$r0,$d2) -+ sltu $tmp0,$h1,$tmp0 -+ daddu $h2,$tmp0 -+ -+ daddu $h1,$tmp2 -+ sltu $tmp2,$h1,$tmp2 -+ daddu $h2,$tmp2 -+ daddu $h2,$tmp3 -+ -+ bne $inp,$len,.Loop -+ -+ sd $h0,0($ctx) # store hash value -+ sd $h1,8($ctx) -+ sd $h2,16($ctx) -+ -+ .set noreorder -+#if defined(_MIPS_ARCH_MIPS64R6) -+ ld $s7,56($sp) -+ ld $s6,48($sp) -+#endif -+ ld $s5,40($sp) # epilogue -+ ld $s4,32($sp) -+___ -+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi epilogue -+ ld $s3,24($sp) -+ ld $s2,16($sp) -+ ld $s1,8($sp) -+ ld $s0,0($sp) -+___ -+$code.=<<___; -+ jr $ra -+#if defined(_MIPS_ARCH_MIPS64R6) -+ daddu $sp,8*8 -+#else -+ daddu $sp,6*8 -+#endif -+.end poly1305_blocks_internal -+___ -+} -+{ -+my ($ctx,$mac,$nonce) = ($a0,$a1,$a2); -+ -+$code.=<<___; -+.align 5 -+.globl poly1305_emit -+.ent poly1305_emit -+poly1305_emit: -+ .frame $sp,0,$ra -+ .set reorder -+ -+ ld $tmp2,16($ctx) -+ ld $tmp0,0($ctx) -+ ld $tmp1,8($ctx) -+ -+ li $in0,-4 # final reduction -+ dsrl $in1,$tmp2,2 -+ and $in0,$tmp2 -+ andi $tmp2,$tmp2,3 -+ daddu $in0,$in1 -+ -+ daddu $tmp0,$tmp0,$in0 -+ sltu $in1,$tmp0,$in0 -+ daddiu $in0,$tmp0,5 # compare to modulus -+ daddu $tmp1,$tmp1,$in1 -+ sltiu $tmp3,$in0,5 -+ sltu $tmp4,$tmp1,$in1 -+ daddu $in1,$tmp1,$tmp3 -+ daddu $tmp2,$tmp2,$tmp4 -+ sltu $tmp3,$in1,$tmp3 -+ daddu $tmp2,$tmp2,$tmp3 -+ -+ dsrl $tmp2,2 # see if it carried/borrowed -+ dsubu $tmp2,$zero,$tmp2 -+ -+ xor $in0,$tmp0 -+ xor $in1,$tmp1 -+ and $in0,$tmp2 -+ and $in1,$tmp2 -+ xor $in0,$tmp0 -+ xor $in1,$tmp1 -+ -+ lwu $tmp0,0($nonce) # load nonce -+ lwu $tmp1,4($nonce) -+ lwu $tmp2,8($nonce) -+ lwu $tmp3,12($nonce) -+ dsll $tmp1,32 -+ dsll $tmp3,32 -+ or $tmp0,$tmp1 -+ or $tmp2,$tmp3 -+ -+ daddu $in0,$tmp0 # accumulate nonce -+ daddu $in1,$tmp2 -+ sltu $tmp0,$in0,$tmp0 -+ daddu $in1,$tmp0 -+ -+ dsrl $tmp0,$in0,8 # write mac value -+ dsrl $tmp1,$in0,16 -+ dsrl $tmp2,$in0,24 -+ sb $in0,0($mac) -+ dsrl $tmp3,$in0,32 -+ sb $tmp0,1($mac) -+ dsrl $tmp0,$in0,40 -+ sb $tmp1,2($mac) -+ dsrl $tmp1,$in0,48 -+ sb $tmp2,3($mac) -+ dsrl $tmp2,$in0,56 -+ sb $tmp3,4($mac) -+ dsrl $tmp3,$in1,8 -+ sb $tmp0,5($mac) -+ dsrl $tmp0,$in1,16 -+ sb $tmp1,6($mac) -+ dsrl $tmp1,$in1,24 -+ sb $tmp2,7($mac) -+ -+ sb $in1,8($mac) -+ dsrl $tmp2,$in1,32 -+ sb $tmp3,9($mac) -+ dsrl $tmp3,$in1,40 -+ sb $tmp0,10($mac) -+ dsrl $tmp0,$in1,48 -+ sb $tmp1,11($mac) -+ dsrl $tmp1,$in1,56 -+ sb $tmp2,12($mac) -+ sb $tmp3,13($mac) -+ sb $tmp0,14($mac) -+ sb $tmp1,15($mac) -+ -+ jr $ra -+.end poly1305_emit -+.rdata -+.asciiz "Poly1305 for MIPS64, CRYPTOGAMS by \@dot-asm" -+.align 2 -+___ -+} -+}}} else {{{ -+###################################################################### -+# 32-bit code path -+# -+ -+my ($ctx,$inp,$len,$padbit) = ($a0,$a1,$a2,$a3); -+my ($in0,$in1,$in2,$in3,$tmp0,$tmp1,$tmp2,$tmp3) = -+ ($a4,$a5,$a6,$a7,$at,$t0,$t1,$t2); -+ -+$code.=<<___; -+#if (defined(_MIPS_ARCH_MIPS32R3) || defined(_MIPS_ARCH_MIPS32R5) || \\ -+ defined(_MIPS_ARCH_MIPS32R6)) \\ -+ && !defined(_MIPS_ARCH_MIPS32R2) -+# define _MIPS_ARCH_MIPS32R2 -+#endif -+ -+#if defined(_MIPS_ARCH_MIPS32R6) -+# define multu(rs,rt) -+# define mflo(rd,rs,rt) mulu rd,rs,rt -+# define mfhi(rd,rs,rt) muhu rd,rs,rt -+#else -+# define multu(rs,rt) multu rs,rt -+# define mflo(rd,rs,rt) mflo rd -+# define mfhi(rd,rs,rt) mfhi rd -+#endif -+ -+#ifdef __KERNEL__ -+# define poly1305_init poly1305_init_mips -+# define poly1305_blocks poly1305_blocks_mips -+# define poly1305_emit poly1305_emit_mips -+#endif -+ -+#if defined(__MIPSEB__) && !defined(MIPSEB) -+# define MIPSEB -+#endif -+ -+#ifdef MIPSEB -+# define MSB 0 -+# define LSB 3 -+#else -+# define MSB 3 -+# define LSB 0 -+#endif -+ -+.text -+.set noat -+.set noreorder -+ -+.align 5 -+.globl poly1305_init -+.ent poly1305_init -+poly1305_init: -+ .frame $sp,0,$ra -+ .set reorder -+ -+ sw $zero,0($ctx) -+ sw $zero,4($ctx) -+ sw $zero,8($ctx) -+ sw $zero,12($ctx) -+ sw $zero,16($ctx) -+ -+ beqz $inp,.Lno_key -+ -+#if defined(_MIPS_ARCH_MIPS32R6) -+ andi $tmp0,$inp,3 # $inp % 4 -+ subu $inp,$inp,$tmp0 # align $inp -+ sll $tmp0,$tmp0,3 # byte to bit offset -+ lw $in0,0($inp) -+ lw $in1,4($inp) -+ lw $in2,8($inp) -+ lw $in3,12($inp) -+ beqz $tmp0,.Laligned_key -+ -+ lw $tmp2,16($inp) -+ subu $tmp1,$zero,$tmp0 -+# ifdef MIPSEB -+ sllv $in0,$in0,$tmp0 -+ srlv $tmp3,$in1,$tmp1 -+ sllv $in1,$in1,$tmp0 -+ or $in0,$in0,$tmp3 -+ srlv $tmp3,$in2,$tmp1 -+ sllv $in2,$in2,$tmp0 -+ or $in1,$in1,$tmp3 -+ srlv $tmp3,$in3,$tmp1 -+ sllv $in3,$in3,$tmp0 -+ or $in2,$in2,$tmp3 -+ srlv $tmp2,$tmp2,$tmp1 -+ or $in3,$in3,$tmp2 -+# else -+ srlv $in0,$in0,$tmp0 -+ sllv $tmp3,$in1,$tmp1 -+ srlv $in1,$in1,$tmp0 -+ or $in0,$in0,$tmp3 -+ sllv $tmp3,$in2,$tmp1 -+ srlv $in2,$in2,$tmp0 -+ or $in1,$in1,$tmp3 -+ sllv $tmp3,$in3,$tmp1 -+ srlv $in3,$in3,$tmp0 -+ or $in2,$in2,$tmp3 -+ sllv $tmp2,$tmp2,$tmp1 -+ or $in3,$in3,$tmp2 -+# endif -+.Laligned_key: -+#else -+ lwl $in0,0+MSB($inp) -+ lwl $in1,4+MSB($inp) -+ lwl $in2,8+MSB($inp) -+ lwl $in3,12+MSB($inp) -+ lwr $in0,0+LSB($inp) -+ lwr $in1,4+LSB($inp) -+ lwr $in2,8+LSB($inp) -+ lwr $in3,12+LSB($inp) -+#endif -+#ifdef MIPSEB -+# if defined(_MIPS_ARCH_MIPS32R2) -+ wsbh $in0,$in0 # byte swap -+ wsbh $in1,$in1 -+ wsbh $in2,$in2 -+ wsbh $in3,$in3 -+ rotr $in0,$in0,16 -+ rotr $in1,$in1,16 -+ rotr $in2,$in2,16 -+ rotr $in3,$in3,16 -+# else -+ srl $tmp0,$in0,24 # byte swap -+ srl $tmp1,$in0,8 -+ andi $tmp2,$in0,0xFF00 -+ sll $in0,$in0,24 -+ andi $tmp1,0xFF00 -+ sll $tmp2,$tmp2,8 -+ or $in0,$tmp0 -+ srl $tmp0,$in1,24 -+ or $tmp1,$tmp2 -+ srl $tmp2,$in1,8 -+ or $in0,$tmp1 -+ andi $tmp1,$in1,0xFF00 -+ sll $in1,$in1,24 -+ andi $tmp2,0xFF00 -+ sll $tmp1,$tmp1,8 -+ or $in1,$tmp0 -+ srl $tmp0,$in2,24 -+ or $tmp2,$tmp1 -+ srl $tmp1,$in2,8 -+ or $in1,$tmp2 -+ andi $tmp2,$in2,0xFF00 -+ sll $in2,$in2,24 -+ andi $tmp1,0xFF00 -+ sll $tmp2,$tmp2,8 -+ or $in2,$tmp0 -+ srl $tmp0,$in3,24 -+ or $tmp1,$tmp2 -+ srl $tmp2,$in3,8 -+ or $in2,$tmp1 -+ andi $tmp1,$in3,0xFF00 -+ sll $in3,$in3,24 -+ andi $tmp2,0xFF00 -+ sll $tmp1,$tmp1,8 -+ or $in3,$tmp0 -+ or $tmp2,$tmp1 -+ or $in3,$tmp2 -+# endif -+#endif -+ lui $tmp0,0x0fff -+ ori $tmp0,0xffff # 0x0fffffff -+ and $in0,$in0,$tmp0 -+ subu $tmp0,3 # 0x0ffffffc -+ and $in1,$in1,$tmp0 -+ and $in2,$in2,$tmp0 -+ and $in3,$in3,$tmp0 -+ -+ sw $in0,20($ctx) -+ sw $in1,24($ctx) -+ sw $in2,28($ctx) -+ sw $in3,32($ctx) -+ -+ srl $tmp1,$in1,2 -+ srl $tmp2,$in2,2 -+ srl $tmp3,$in3,2 -+ addu $in1,$in1,$tmp1 # s1 = r1 + (r1 >> 2) -+ addu $in2,$in2,$tmp2 -+ addu $in3,$in3,$tmp3 -+ sw $in1,36($ctx) -+ sw $in2,40($ctx) -+ sw $in3,44($ctx) -+.Lno_key: -+ li $v0,0 -+ jr $ra -+.end poly1305_init -+___ -+{ -+my $SAVED_REGS_MASK = ($flavour =~ /nubi/i) ? "0x00fff000" : "0x00ff0000"; -+ -+my ($h0,$h1,$h2,$h3,$h4, $r0,$r1,$r2,$r3, $rs1,$rs2,$rs3) = -+ ($s0,$s1,$s2,$s3,$s4, $s5,$s6,$s7,$s8, $s9,$s10,$s11); -+my ($d0,$d1,$d2,$d3) = -+ ($a4,$a5,$a6,$a7); -+my $shr = $t2; # used on R6 -+my $one = $t2; # used on R2 -+ -+$code.=<<___; -+.globl poly1305_blocks -+.align 5 -+.ent poly1305_blocks -+poly1305_blocks: -+ .frame $sp,16*4,$ra -+ .mask $SAVED_REGS_MASK,-4 -+ .set noreorder -+ subu $sp, $sp,4*12 -+ sw $s11,4*11($sp) -+ sw $s10,4*10($sp) -+ sw $s9, 4*9($sp) -+ sw $s8, 4*8($sp) -+ sw $s7, 4*7($sp) -+ sw $s6, 4*6($sp) -+ sw $s5, 4*5($sp) -+ sw $s4, 4*4($sp) -+___ -+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue -+ sw $s3, 4*3($sp) -+ sw $s2, 4*2($sp) -+ sw $s1, 4*1($sp) -+ sw $s0, 4*0($sp) -+___ -+$code.=<<___; -+ .set reorder -+ -+ srl $len,4 # number of complete blocks -+ li $one,1 -+ beqz $len,.Labort -+ -+#if defined(_MIPS_ARCH_MIPS32R6) -+ andi $shr,$inp,3 -+ subu $inp,$inp,$shr # align $inp -+ sll $shr,$shr,3 # byte to bit offset -+#endif -+ -+ lw $h0,0($ctx) # load hash value -+ lw $h1,4($ctx) -+ lw $h2,8($ctx) -+ lw $h3,12($ctx) -+ lw $h4,16($ctx) -+ -+ lw $r0,20($ctx) # load key -+ lw $r1,24($ctx) -+ lw $r2,28($ctx) -+ lw $r3,32($ctx) -+ lw $rs1,36($ctx) -+ lw $rs2,40($ctx) -+ lw $rs3,44($ctx) -+ -+ sll $len,4 -+ addu $len,$len,$inp # end of buffer -+ b .Loop -+ -+.align 4 -+.Loop: -+#if defined(_MIPS_ARCH_MIPS32R6) -+ lw $d0,0($inp) # load input -+ lw $d1,4($inp) -+ lw $d2,8($inp) -+ lw $d3,12($inp) -+ beqz $shr,.Laligned_inp -+ -+ lw $t0,16($inp) -+ subu $t1,$zero,$shr -+# ifdef MIPSEB -+ sllv $d0,$d0,$shr -+ srlv $at,$d1,$t1 -+ sllv $d1,$d1,$shr -+ or $d0,$d0,$at -+ srlv $at,$d2,$t1 -+ sllv $d2,$d2,$shr -+ or $d1,$d1,$at -+ srlv $at,$d3,$t1 -+ sllv $d3,$d3,$shr -+ or $d2,$d2,$at -+ srlv $t0,$t0,$t1 -+ or $d3,$d3,$t0 -+# else -+ srlv $d0,$d0,$shr -+ sllv $at,$d1,$t1 -+ srlv $d1,$d1,$shr -+ or $d0,$d0,$at -+ sllv $at,$d2,$t1 -+ srlv $d2,$d2,$shr -+ or $d1,$d1,$at -+ sllv $at,$d3,$t1 -+ srlv $d3,$d3,$shr -+ or $d2,$d2,$at -+ sllv $t0,$t0,$t1 -+ or $d3,$d3,$t0 -+# endif -+.Laligned_inp: -+#else -+ lwl $d0,0+MSB($inp) # load input -+ lwl $d1,4+MSB($inp) -+ lwl $d2,8+MSB($inp) -+ lwl $d3,12+MSB($inp) -+ lwr $d0,0+LSB($inp) -+ lwr $d1,4+LSB($inp) -+ lwr $d2,8+LSB($inp) -+ lwr $d3,12+LSB($inp) -+#endif -+#ifdef MIPSEB -+# if defined(_MIPS_ARCH_MIPS32R2) -+ wsbh $d0,$d0 # byte swap -+ wsbh $d1,$d1 -+ wsbh $d2,$d2 -+ wsbh $d3,$d3 -+ rotr $d0,$d0,16 -+ rotr $d1,$d1,16 -+ rotr $d2,$d2,16 -+ rotr $d3,$d3,16 -+# else -+ srl $at,$d0,24 # byte swap -+ srl $t0,$d0,8 -+ andi $t1,$d0,0xFF00 -+ sll $d0,$d0,24 -+ andi $t0,0xFF00 -+ sll $t1,$t1,8 -+ or $d0,$at -+ srl $at,$d1,24 -+ or $t0,$t1 -+ srl $t1,$d1,8 -+ or $d0,$t0 -+ andi $t0,$d1,0xFF00 -+ sll $d1,$d1,24 -+ andi $t1,0xFF00 -+ sll $t0,$t0,8 -+ or $d1,$at -+ srl $at,$d2,24 -+ or $t1,$t0 -+ srl $t0,$d2,8 -+ or $d1,$t1 -+ andi $t1,$d2,0xFF00 -+ sll $d2,$d2,24 -+ andi $t0,0xFF00 -+ sll $t1,$t1,8 -+ or $d2,$at -+ srl $at,$d3,24 -+ or $t0,$t1 -+ srl $t1,$d3,8 -+ or $d2,$t0 -+ andi $t0,$d3,0xFF00 -+ sll $d3,$d3,24 -+ andi $t1,0xFF00 -+ sll $t0,$t0,8 -+ or $d3,$at -+ or $t1,$t0 -+ or $d3,$t1 -+# endif -+#endif -+ srl $t0,$h4,2 # modulo-scheduled reduction -+ andi $h4,$h4,3 -+ sll $at,$t0,2 -+ -+ addu $d0,$d0,$h0 # accumulate input -+ addu $t0,$t0,$at -+ sltu $h0,$d0,$h0 -+ addu $d0,$d0,$t0 # ... and residue -+ sltu $at,$d0,$t0 -+ -+ addu $d1,$d1,$h1 -+ addu $h0,$h0,$at # carry -+ sltu $h1,$d1,$h1 -+ addu $d1,$d1,$h0 -+ sltu $h0,$d1,$h0 -+ -+ addu $d2,$d2,$h2 -+ addu $h1,$h1,$h0 # carry -+ sltu $h2,$d2,$h2 -+ addu $d2,$d2,$h1 -+ sltu $h1,$d2,$h1 -+ -+ addu $d3,$d3,$h3 -+ addu $h2,$h2,$h1 # carry -+ sltu $h3,$d3,$h3 -+ addu $d3,$d3,$h2 -+ -+#if defined(_MIPS_ARCH_MIPS32R2) && !defined(_MIPS_ARCH_MIPS32R6) -+ multu $r0,$d0 # d0*r0 -+ sltu $h2,$d3,$h2 -+ maddu $rs3,$d1 # d1*s3 -+ addu $h3,$h3,$h2 # carry -+ maddu $rs2,$d2 # d2*s2 -+ addu $h4,$h4,$padbit -+ maddu $rs1,$d3 # d3*s1 -+ addu $h4,$h4,$h3 -+ mfhi $at -+ mflo $h0 -+ -+ multu $r1,$d0 # d0*r1 -+ maddu $r0,$d1 # d1*r0 -+ maddu $rs3,$d2 # d2*s3 -+ maddu $rs2,$d3 # d3*s2 -+ maddu $rs1,$h4 # h4*s1 -+ maddu $at,$one # hi*1 -+ mfhi $at -+ mflo $h1 -+ -+ multu $r2,$d0 # d0*r2 -+ maddu $r1,$d1 # d1*r1 -+ maddu $r0,$d2 # d2*r0 -+ maddu $rs3,$d3 # d3*s3 -+ maddu $rs2,$h4 # h4*s2 -+ maddu $at,$one # hi*1 -+ mfhi $at -+ mflo $h2 -+ -+ mul $t0,$r0,$h4 # h4*r0 -+ -+ multu $r3,$d0 # d0*r3 -+ maddu $r2,$d1 # d1*r2 -+ maddu $r1,$d2 # d2*r1 -+ maddu $r0,$d3 # d3*r0 -+ maddu $rs3,$h4 # h4*s3 -+ maddu $at,$one # hi*1 -+ mfhi $at -+ mflo $h3 -+ -+ addiu $inp,$inp,16 -+ -+ addu $h4,$t0,$at -+#else -+ multu ($r0,$d0) # d0*r0 -+ mflo ($h0,$r0,$d0) -+ mfhi ($h1,$r0,$d0) -+ -+ sltu $h2,$d3,$h2 -+ addu $h3,$h3,$h2 # carry -+ -+ multu ($rs3,$d1) # d1*s3 -+ mflo ($at,$rs3,$d1) -+ mfhi ($t0,$rs3,$d1) -+ -+ addu $h4,$h4,$padbit -+ addiu $inp,$inp,16 -+ addu $h4,$h4,$h3 -+ -+ multu ($rs2,$d2) # d2*s2 -+ mflo ($a3,$rs2,$d2) -+ mfhi ($t1,$rs2,$d2) -+ addu $h0,$h0,$at -+ addu $h1,$h1,$t0 -+ multu ($rs1,$d3) # d3*s1 -+ sltu $at,$h0,$at -+ addu $h1,$h1,$at -+ -+ mflo ($at,$rs1,$d3) -+ mfhi ($t0,$rs1,$d3) -+ addu $h0,$h0,$a3 -+ addu $h1,$h1,$t1 -+ multu ($r1,$d0) # d0*r1 -+ sltu $a3,$h0,$a3 -+ addu $h1,$h1,$a3 -+ -+ -+ mflo ($a3,$r1,$d0) -+ mfhi ($h2,$r1,$d0) -+ addu $h0,$h0,$at -+ addu $h1,$h1,$t0 -+ multu ($r0,$d1) # d1*r0 -+ sltu $at,$h0,$at -+ addu $h1,$h1,$at -+ -+ mflo ($at,$r0,$d1) -+ mfhi ($t0,$r0,$d1) -+ addu $h1,$h1,$a3 -+ sltu $a3,$h1,$a3 -+ multu ($rs3,$d2) # d2*s3 -+ addu $h2,$h2,$a3 -+ -+ mflo ($a3,$rs3,$d2) -+ mfhi ($t1,$rs3,$d2) -+ addu $h1,$h1,$at -+ addu $h2,$h2,$t0 -+ multu ($rs2,$d3) # d3*s2 -+ sltu $at,$h1,$at -+ addu $h2,$h2,$at -+ -+ mflo ($at,$rs2,$d3) -+ mfhi ($t0,$rs2,$d3) -+ addu $h1,$h1,$a3 -+ addu $h2,$h2,$t1 -+ multu ($rs1,$h4) # h4*s1 -+ sltu $a3,$h1,$a3 -+ addu $h2,$h2,$a3 -+ -+ mflo ($a3,$rs1,$h4) -+ addu $h1,$h1,$at -+ addu $h2,$h2,$t0 -+ multu ($r2,$d0) # d0*r2 -+ sltu $at,$h1,$at -+ addu $h2,$h2,$at -+ -+ -+ mflo ($at,$r2,$d0) -+ mfhi ($h3,$r2,$d0) -+ addu $h1,$h1,$a3 -+ sltu $a3,$h1,$a3 -+ multu ($r1,$d1) # d1*r1 -+ addu $h2,$h2,$a3 -+ -+ mflo ($a3,$r1,$d1) -+ mfhi ($t1,$r1,$d1) -+ addu $h2,$h2,$at -+ sltu $at,$h2,$at -+ multu ($r0,$d2) # d2*r0 -+ addu $h3,$h3,$at -+ -+ mflo ($at,$r0,$d2) -+ mfhi ($t0,$r0,$d2) -+ addu $h2,$h2,$a3 -+ addu $h3,$h3,$t1 -+ multu ($rs3,$d3) # d3*s3 -+ sltu $a3,$h2,$a3 -+ addu $h3,$h3,$a3 -+ -+ mflo ($a3,$rs3,$d3) -+ mfhi ($t1,$rs3,$d3) -+ addu $h2,$h2,$at -+ addu $h3,$h3,$t0 -+ multu ($rs2,$h4) # h4*s2 -+ sltu $at,$h2,$at -+ addu $h3,$h3,$at -+ -+ mflo ($at,$rs2,$h4) -+ addu $h2,$h2,$a3 -+ addu $h3,$h3,$t1 -+ multu ($r3,$d0) # d0*r3 -+ sltu $a3,$h2,$a3 -+ addu $h3,$h3,$a3 -+ -+ -+ mflo ($a3,$r3,$d0) -+ mfhi ($t1,$r3,$d0) -+ addu $h2,$h2,$at -+ sltu $at,$h2,$at -+ multu ($r2,$d1) # d1*r2 -+ addu $h3,$h3,$at -+ -+ mflo ($at,$r2,$d1) -+ mfhi ($t0,$r2,$d1) -+ addu $h3,$h3,$a3 -+ sltu $a3,$h3,$a3 -+ multu ($r0,$d3) # d3*r0 -+ addu $t1,$t1,$a3 -+ -+ mflo ($a3,$r0,$d3) -+ mfhi ($d3,$r0,$d3) -+ addu $h3,$h3,$at -+ addu $t1,$t1,$t0 -+ multu ($r1,$d2) # d2*r1 -+ sltu $at,$h3,$at -+ addu $t1,$t1,$at -+ -+ mflo ($at,$r1,$d2) -+ mfhi ($t0,$r1,$d2) -+ addu $h3,$h3,$a3 -+ addu $t1,$t1,$d3 -+ multu ($rs3,$h4) # h4*s3 -+ sltu $a3,$h3,$a3 -+ addu $t1,$t1,$a3 -+ -+ mflo ($a3,$rs3,$h4) -+ addu $h3,$h3,$at -+ addu $t1,$t1,$t0 -+ multu ($r0,$h4) # h4*r0 -+ sltu $at,$h3,$at -+ addu $t1,$t1,$at -+ -+ -+ mflo ($h4,$r0,$h4) -+ addu $h3,$h3,$a3 -+ sltu $a3,$h3,$a3 -+ addu $t1,$t1,$a3 -+ addu $h4,$h4,$t1 -+ -+ li $padbit,1 # if we loop, padbit is 1 -+#endif -+ bne $inp,$len,.Loop -+ -+ sw $h0,0($ctx) # store hash value -+ sw $h1,4($ctx) -+ sw $h2,8($ctx) -+ sw $h3,12($ctx) -+ sw $h4,16($ctx) -+ -+ .set noreorder -+.Labort: -+ lw $s11,4*11($sp) -+ lw $s10,4*10($sp) -+ lw $s9, 4*9($sp) -+ lw $s8, 4*8($sp) -+ lw $s7, 4*7($sp) -+ lw $s6, 4*6($sp) -+ lw $s5, 4*5($sp) -+ lw $s4, 4*4($sp) -+___ -+$code.=<<___ if ($flavour =~ /nubi/i); # optimize non-nubi prologue -+ lw $s3, 4*3($sp) -+ lw $s2, 4*2($sp) -+ lw $s1, 4*1($sp) -+ lw $s0, 4*0($sp) -+___ -+$code.=<<___; -+ jr $ra -+ addu $sp,$sp,4*12 -+.end poly1305_blocks -+___ -+} -+{ -+my ($ctx,$mac,$nonce,$tmp4) = ($a0,$a1,$a2,$a3); -+ -+$code.=<<___; -+.align 5 -+.globl poly1305_emit -+.ent poly1305_emit -+poly1305_emit: -+ .frame $sp,0,$ra -+ .set reorder -+ -+ lw $tmp4,16($ctx) -+ lw $tmp0,0($ctx) -+ lw $tmp1,4($ctx) -+ lw $tmp2,8($ctx) -+ lw $tmp3,12($ctx) -+ -+ li $in0,-4 # final reduction -+ srl $ctx,$tmp4,2 -+ and $in0,$in0,$tmp4 -+ andi $tmp4,$tmp4,3 -+ addu $ctx,$ctx,$in0 -+ -+ addu $tmp0,$tmp0,$ctx -+ sltu $ctx,$tmp0,$ctx -+ addiu $in0,$tmp0,5 # compare to modulus -+ addu $tmp1,$tmp1,$ctx -+ sltiu $in1,$in0,5 -+ sltu $ctx,$tmp1,$ctx -+ addu $in1,$in1,$tmp1 -+ addu $tmp2,$tmp2,$ctx -+ sltu $in2,$in1,$tmp1 -+ sltu $ctx,$tmp2,$ctx -+ addu $in2,$in2,$tmp2 -+ addu $tmp3,$tmp3,$ctx -+ sltu $in3,$in2,$tmp2 -+ sltu $ctx,$tmp3,$ctx -+ addu $in3,$in3,$tmp3 -+ addu $tmp4,$tmp4,$ctx -+ sltu $ctx,$in3,$tmp3 -+ addu $ctx,$tmp4 -+ -+ srl $ctx,2 # see if it carried/borrowed -+ subu $ctx,$zero,$ctx -+ -+ xor $in0,$tmp0 -+ xor $in1,$tmp1 -+ xor $in2,$tmp2 -+ xor $in3,$tmp3 -+ and $in0,$ctx -+ and $in1,$ctx -+ and $in2,$ctx -+ and $in3,$ctx -+ xor $in0,$tmp0 -+ xor $in1,$tmp1 -+ xor $in2,$tmp2 -+ xor $in3,$tmp3 -+ -+ lw $tmp0,0($nonce) # load nonce -+ lw $tmp1,4($nonce) -+ lw $tmp2,8($nonce) -+ lw $tmp3,12($nonce) -+ -+ addu $in0,$tmp0 # accumulate nonce -+ sltu $ctx,$in0,$tmp0 -+ -+ addu $in1,$tmp1 -+ sltu $tmp1,$in1,$tmp1 -+ addu $in1,$ctx -+ sltu $ctx,$in1,$ctx -+ addu $ctx,$tmp1 -+ -+ addu $in2,$tmp2 -+ sltu $tmp2,$in2,$tmp2 -+ addu $in2,$ctx -+ sltu $ctx,$in2,$ctx -+ addu $ctx,$tmp2 -+ -+ addu $in3,$tmp3 -+ addu $in3,$ctx -+ -+ srl $tmp0,$in0,8 # write mac value -+ srl $tmp1,$in0,16 -+ srl $tmp2,$in0,24 -+ sb $in0, 0($mac) -+ sb $tmp0,1($mac) -+ srl $tmp0,$in1,8 -+ sb $tmp1,2($mac) -+ srl $tmp1,$in1,16 -+ sb $tmp2,3($mac) -+ srl $tmp2,$in1,24 -+ sb $in1, 4($mac) -+ sb $tmp0,5($mac) -+ srl $tmp0,$in2,8 -+ sb $tmp1,6($mac) -+ srl $tmp1,$in2,16 -+ sb $tmp2,7($mac) -+ srl $tmp2,$in2,24 -+ sb $in2, 8($mac) -+ sb $tmp0,9($mac) -+ srl $tmp0,$in3,8 -+ sb $tmp1,10($mac) -+ srl $tmp1,$in3,16 -+ sb $tmp2,11($mac) -+ srl $tmp2,$in3,24 -+ sb $in3, 12($mac) -+ sb $tmp0,13($mac) -+ sb $tmp1,14($mac) -+ sb $tmp2,15($mac) -+ -+ jr $ra -+.end poly1305_emit -+.rdata -+.asciiz "Poly1305 for MIPS32, CRYPTOGAMS by \@dot-asm" -+.align 2 -+___ -+} -+}}} -+ -+$output=pop and open STDOUT,">$output"; -+print $code; -+close STDOUT; ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -707,6 +707,11 @@ config CRYPTO_POLY1305_X86_64 - in IETF protocols. This is the x86_64 assembler implementation using SIMD - instructions. - -+config CRYPTO_POLY1305_MIPS -+ tristate "Poly1305 authenticator algorithm (MIPS optimized)" -+ depends on CPU_MIPS32 || (CPU_MIPS64 && 64BIT) -+ select CRYPTO_ARCH_HAVE_LIB_POLY1305 -+ - config CRYPTO_MD4 - tristate "MD4 digest algorithm" - select CRYPTO_HASH ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -39,6 +39,7 @@ config CRYPTO_LIB_DES - - config CRYPTO_LIB_POLY1305_RSIZE - int -+ default 2 if MIPS - default 4 if X86_64 - default 9 if ARM || ARM64 - default 1 diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0021-crypto-blake2s-generic-C-library-implementation-and-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0021-crypto-blake2s-generic-C-library-implementation-and-.patch deleted file mode 100644 index 97f73b983..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0021-crypto-blake2s-generic-C-library-implementation-and-.patch +++ /dev/null @@ -1,1097 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 8 Nov 2019 13:22:28 +0100 -Subject: [PATCH] crypto: blake2s - generic C library implementation and - selftest - -commit 66d7fb94e4ffe5acc589e0b2b4710aecc1f07a28 upstream. - -The C implementation was originally based on Samuel Neves' public -domain reference implementation but has since been heavily modified -for the kernel. We're able to do compile-time optimizations by moving -some scaffolding around the final function into the header file. - -Information: https://blake2.net/ - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Samuel Neves -Co-developed-by: Samuel Neves -[ardb: - move from lib/zinc to lib/crypto - - remove simd handling - - rewrote selftest for better coverage - - use fixed digest length for blake2s_hmac() and rename to - blake2s256_hmac() ] -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - include/crypto/blake2s.h | 106 +++++ - include/crypto/internal/blake2s.h | 19 + - lib/crypto/Kconfig | 25 ++ - lib/crypto/Makefile | 10 + - lib/crypto/blake2s-generic.c | 111 ++++++ - lib/crypto/blake2s-selftest.c | 622 ++++++++++++++++++++++++++++++ - lib/crypto/blake2s.c | 126 ++++++ - 7 files changed, 1019 insertions(+) - create mode 100644 include/crypto/blake2s.h - create mode 100644 include/crypto/internal/blake2s.h - create mode 100644 lib/crypto/blake2s-generic.c - create mode 100644 lib/crypto/blake2s-selftest.c - create mode 100644 lib/crypto/blake2s.c - ---- /dev/null -+++ b/include/crypto/blake2s.h -@@ -0,0 +1,106 @@ -+/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef BLAKE2S_H -+#define BLAKE2S_H -+ -+#include -+#include -+#include -+ -+#include -+ -+enum blake2s_lengths { -+ BLAKE2S_BLOCK_SIZE = 64, -+ BLAKE2S_HASH_SIZE = 32, -+ BLAKE2S_KEY_SIZE = 32, -+ -+ BLAKE2S_128_HASH_SIZE = 16, -+ BLAKE2S_160_HASH_SIZE = 20, -+ BLAKE2S_224_HASH_SIZE = 28, -+ BLAKE2S_256_HASH_SIZE = 32, -+}; -+ -+struct blake2s_state { -+ u32 h[8]; -+ u32 t[2]; -+ u32 f[2]; -+ u8 buf[BLAKE2S_BLOCK_SIZE]; -+ unsigned int buflen; -+ unsigned int outlen; -+}; -+ -+enum blake2s_iv { -+ BLAKE2S_IV0 = 0x6A09E667UL, -+ BLAKE2S_IV1 = 0xBB67AE85UL, -+ BLAKE2S_IV2 = 0x3C6EF372UL, -+ BLAKE2S_IV3 = 0xA54FF53AUL, -+ BLAKE2S_IV4 = 0x510E527FUL, -+ BLAKE2S_IV5 = 0x9B05688CUL, -+ BLAKE2S_IV6 = 0x1F83D9ABUL, -+ BLAKE2S_IV7 = 0x5BE0CD19UL, -+}; -+ -+void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen); -+void blake2s_final(struct blake2s_state *state, u8 *out); -+ -+static inline void blake2s_init_param(struct blake2s_state *state, -+ const u32 param) -+{ -+ *state = (struct blake2s_state){{ -+ BLAKE2S_IV0 ^ param, -+ BLAKE2S_IV1, -+ BLAKE2S_IV2, -+ BLAKE2S_IV3, -+ BLAKE2S_IV4, -+ BLAKE2S_IV5, -+ BLAKE2S_IV6, -+ BLAKE2S_IV7, -+ }}; -+} -+ -+static inline void blake2s_init(struct blake2s_state *state, -+ const size_t outlen) -+{ -+ blake2s_init_param(state, 0x01010000 | outlen); -+ state->outlen = outlen; -+} -+ -+static inline void blake2s_init_key(struct blake2s_state *state, -+ const size_t outlen, const void *key, -+ const size_t keylen) -+{ -+ WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE || -+ !key || !keylen || keylen > BLAKE2S_KEY_SIZE)); -+ -+ blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen); -+ memcpy(state->buf, key, keylen); -+ state->buflen = BLAKE2S_BLOCK_SIZE; -+ state->outlen = outlen; -+} -+ -+static inline void blake2s(u8 *out, const u8 *in, const u8 *key, -+ const size_t outlen, const size_t inlen, -+ const size_t keylen) -+{ -+ struct blake2s_state state; -+ -+ WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen || -+ outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE || -+ (!key && keylen))); -+ -+ if (keylen) -+ blake2s_init_key(&state, outlen, key, keylen); -+ else -+ blake2s_init(&state, outlen); -+ -+ blake2s_update(&state, in, inlen); -+ blake2s_final(&state, out); -+} -+ -+void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, -+ const size_t keylen); -+ -+#endif /* BLAKE2S_H */ ---- /dev/null -+++ b/include/crypto/internal/blake2s.h -@@ -0,0 +1,19 @@ -+/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -+ -+#ifndef BLAKE2S_INTERNAL_H -+#define BLAKE2S_INTERNAL_H -+ -+#include -+ -+void blake2s_compress_generic(struct blake2s_state *state,const u8 *block, -+ size_t nblocks, const u32 inc); -+ -+void blake2s_compress_arch(struct blake2s_state *state,const u8 *block, -+ size_t nblocks, const u32 inc); -+ -+static inline void blake2s_set_lastblock(struct blake2s_state *state) -+{ -+ state->f[0] = -1; -+} -+ -+#endif /* BLAKE2S_INTERNAL_H */ ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -8,6 +8,31 @@ config CRYPTO_LIB_AES - config CRYPTO_LIB_ARC4 - tristate - -+config CRYPTO_ARCH_HAVE_LIB_BLAKE2S -+ tristate -+ help -+ Declares whether the architecture provides an arch-specific -+ accelerated implementation of the Blake2s library interface, -+ either builtin or as a module. -+ -+config CRYPTO_LIB_BLAKE2S_GENERIC -+ tristate -+ help -+ This symbol can be depended upon by arch implementations of the -+ Blake2s library interface that require the generic code as a -+ fallback, e.g., for SIMD implementations. If no arch specific -+ implementation is enabled, this implementation serves the users -+ of CRYPTO_LIB_BLAKE2S. -+ -+config CRYPTO_LIB_BLAKE2S -+ tristate "BLAKE2s hash function library" -+ depends on CRYPTO_ARCH_HAVE_LIB_BLAKE2S || !CRYPTO_ARCH_HAVE_LIB_BLAKE2S -+ select CRYPTO_LIB_BLAKE2S_GENERIC if CRYPTO_ARCH_HAVE_LIB_BLAKE2S=n -+ help -+ Enable the Blake2s library interface. This interface may be fulfilled -+ by either the generic implementation or an arch-specific one, if one -+ is available and enabled. -+ - config CRYPTO_ARCH_HAVE_LIB_CHACHA - tristate - help ---- a/lib/crypto/Makefile -+++ b/lib/crypto/Makefile -@@ -10,6 +10,12 @@ libaes-y := aes.o - obj-$(CONFIG_CRYPTO_LIB_ARC4) += libarc4.o - libarc4-y := arc4.o - -+obj-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += libblake2s-generic.o -+libblake2s-generic-y += blake2s-generic.o -+ -+obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o -+libblake2s-y += blake2s.o -+ - obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o - libdes-y := des.o - -@@ -18,3 +24,7 @@ libpoly1305-y := poly1305.o - - obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o - libsha256-y := sha256.o -+ -+ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y) -+libblake2s-y += blake2s-selftest.o -+endif ---- /dev/null -+++ b/lib/crypto/blake2s-generic.c -@@ -0,0 +1,111 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This is an implementation of the BLAKE2s hash and PRF functions. -+ * -+ * Information: https://blake2.net/ -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static const u8 blake2s_sigma[10][16] = { -+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, -+ { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, -+ { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, -+ { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, -+ { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, -+ { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, -+ { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, -+ { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, -+ { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, -+ { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, -+}; -+ -+static inline void blake2s_increment_counter(struct blake2s_state *state, -+ const u32 inc) -+{ -+ state->t[0] += inc; -+ state->t[1] += (state->t[0] < inc); -+} -+ -+void blake2s_compress_generic(struct blake2s_state *state,const u8 *block, -+ size_t nblocks, const u32 inc) -+{ -+ u32 m[16]; -+ u32 v[16]; -+ int i; -+ -+ WARN_ON(IS_ENABLED(DEBUG) && -+ (nblocks > 1 && inc != BLAKE2S_BLOCK_SIZE)); -+ -+ while (nblocks > 0) { -+ blake2s_increment_counter(state, inc); -+ memcpy(m, block, BLAKE2S_BLOCK_SIZE); -+ le32_to_cpu_array(m, ARRAY_SIZE(m)); -+ memcpy(v, state->h, 32); -+ v[ 8] = BLAKE2S_IV0; -+ v[ 9] = BLAKE2S_IV1; -+ v[10] = BLAKE2S_IV2; -+ v[11] = BLAKE2S_IV3; -+ v[12] = BLAKE2S_IV4 ^ state->t[0]; -+ v[13] = BLAKE2S_IV5 ^ state->t[1]; -+ v[14] = BLAKE2S_IV6 ^ state->f[0]; -+ v[15] = BLAKE2S_IV7 ^ state->f[1]; -+ -+#define G(r, i, a, b, c, d) do { \ -+ a += b + m[blake2s_sigma[r][2 * i + 0]]; \ -+ d = ror32(d ^ a, 16); \ -+ c += d; \ -+ b = ror32(b ^ c, 12); \ -+ a += b + m[blake2s_sigma[r][2 * i + 1]]; \ -+ d = ror32(d ^ a, 8); \ -+ c += d; \ -+ b = ror32(b ^ c, 7); \ -+} while (0) -+ -+#define ROUND(r) do { \ -+ G(r, 0, v[0], v[ 4], v[ 8], v[12]); \ -+ G(r, 1, v[1], v[ 5], v[ 9], v[13]); \ -+ G(r, 2, v[2], v[ 6], v[10], v[14]); \ -+ G(r, 3, v[3], v[ 7], v[11], v[15]); \ -+ G(r, 4, v[0], v[ 5], v[10], v[15]); \ -+ G(r, 5, v[1], v[ 6], v[11], v[12]); \ -+ G(r, 6, v[2], v[ 7], v[ 8], v[13]); \ -+ G(r, 7, v[3], v[ 4], v[ 9], v[14]); \ -+} while (0) -+ ROUND(0); -+ ROUND(1); -+ ROUND(2); -+ ROUND(3); -+ ROUND(4); -+ ROUND(5); -+ ROUND(6); -+ ROUND(7); -+ ROUND(8); -+ ROUND(9); -+ -+#undef G -+#undef ROUND -+ -+ for (i = 0; i < 8; ++i) -+ state->h[i] ^= v[i] ^ v[i + 8]; -+ -+ block += BLAKE2S_BLOCK_SIZE; -+ --nblocks; -+ } -+} -+ -+EXPORT_SYMBOL(blake2s_compress_generic); -+ -+MODULE_LICENSE("GPL v2"); -+MODULE_DESCRIPTION("BLAKE2s hash function"); -+MODULE_AUTHOR("Jason A. Donenfeld "); ---- /dev/null -+++ b/lib/crypto/blake2s-selftest.c -@@ -0,0 +1,622 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include -+#include -+ -+/* -+ * blake2s_testvecs[] generated with the program below (using libb2-dev and -+ * libssl-dev [OpenSSL]) -+ * -+ * #include -+ * #include -+ * #include -+ * -+ * #include -+ * #include -+ * -+ * #define BLAKE2S_TESTVEC_COUNT 256 -+ * -+ * static void print_vec(const uint8_t vec[], int len) -+ * { -+ * int i; -+ * -+ * printf(" { "); -+ * for (i = 0; i < len; i++) { -+ * if (i && (i % 12) == 0) -+ * printf("\n "); -+ * printf("0x%02x, ", vec[i]); -+ * } -+ * printf("},\n"); -+ * } -+ * -+ * int main(void) -+ * { -+ * uint8_t key[BLAKE2S_KEYBYTES]; -+ * uint8_t buf[BLAKE2S_TESTVEC_COUNT]; -+ * uint8_t hash[BLAKE2S_OUTBYTES]; -+ * int i, j; -+ * -+ * key[0] = key[1] = 1; -+ * for (i = 2; i < BLAKE2S_KEYBYTES; ++i) -+ * key[i] = key[i - 2] + key[i - 1]; -+ * -+ * for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i) -+ * buf[i] = (uint8_t)i; -+ * -+ * printf("static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n"); -+ * -+ * for (i = 0; i < BLAKE2S_TESTVEC_COUNT; ++i) { -+ * int outlen = 1 + i % BLAKE2S_OUTBYTES; -+ * int keylen = (13 * i) % (BLAKE2S_KEYBYTES + 1); -+ * -+ * blake2s(hash, buf, key + BLAKE2S_KEYBYTES - keylen, outlen, i, -+ * keylen); -+ * print_vec(hash, outlen); -+ * } -+ * printf("};\n\n"); -+ * -+ * printf("static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = {\n"); -+ * -+ * HMAC(EVP_blake2s256(), key, sizeof(key), buf, sizeof(buf), hash, NULL); -+ * print_vec(hash, BLAKE2S_OUTBYTES); -+ * -+ * HMAC(EVP_blake2s256(), buf, sizeof(buf), key, sizeof(key), hash, NULL); -+ * print_vec(hash, BLAKE2S_OUTBYTES); -+ * -+ * printf("};\n"); -+ * -+ * return 0; -+ *} -+ */ -+static const u8 blake2s_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { -+ { 0xa1, }, -+ { 0x7c, 0x89, }, -+ { 0x74, 0x0e, 0xd4, }, -+ { 0x47, 0x0c, 0x21, 0x15, }, -+ { 0x18, 0xd6, 0x9c, 0xa6, 0xc4, }, -+ { 0x13, 0x5d, 0x16, 0x63, 0x2e, 0xf9, }, -+ { 0x2c, 0xb5, 0x04, 0xb7, 0x99, 0xe2, 0x73, }, -+ { 0x9a, 0x0f, 0xd2, 0x39, 0xd6, 0x68, 0x1b, 0x92, }, -+ { 0xc8, 0xde, 0x7a, 0xea, 0x2f, 0xf4, 0xd2, 0xe3, 0x2b, }, -+ { 0x5b, 0xf9, 0x43, 0x52, 0x0c, 0x12, 0xba, 0xb5, 0x93, 0x9f, }, -+ { 0xc6, 0x2c, 0x4e, 0x80, 0xfc, 0x32, 0x5b, 0x33, 0xb8, 0xb8, 0x0a, }, -+ { 0xa7, 0x5c, 0xfd, 0x3a, 0xcc, 0xbf, 0x90, 0xca, 0xb7, 0x97, 0xde, 0xd8, }, -+ { 0x66, 0xca, 0x3c, 0xc4, 0x19, 0xef, 0x92, 0x66, 0x3f, 0x21, 0x8f, 0xda, -+ 0xb7, }, -+ { 0xba, 0xe5, 0xbb, 0x30, 0x25, 0x94, 0x6d, 0xc3, 0x89, 0x09, 0xc4, 0x25, -+ 0x52, 0x3e, }, -+ { 0xa2, 0xef, 0x0e, 0x52, 0x0b, 0x5f, 0xa2, 0x01, 0x6d, 0x0a, 0x25, 0xbc, -+ 0x57, 0xe2, 0x27, }, -+ { 0x4f, 0xe0, 0xf9, 0x52, 0x12, 0xda, 0x84, 0xb7, 0xab, 0xae, 0xb0, 0xa6, -+ 0x47, 0x2a, 0xc7, 0xf5, }, -+ { 0x56, 0xe7, 0xa8, 0x1c, 0x4c, 0xca, 0xed, 0x90, 0x31, 0xec, 0x87, 0x43, -+ 0xe7, 0x72, 0x08, 0xec, 0xbe, }, -+ { 0x7e, 0xdf, 0x80, 0x1c, 0x93, 0x33, 0xfd, 0x53, 0x44, 0xba, 0xfd, 0x96, -+ 0xe1, 0xbb, 0xb5, 0x65, 0xa5, 0x00, }, -+ { 0xec, 0x6b, 0xed, 0xf7, 0x7b, 0x62, 0x1d, 0x7d, 0xf4, 0x82, 0xf3, 0x1e, -+ 0x18, 0xff, 0x2b, 0xc4, 0x06, 0x20, 0x2a, }, -+ { 0x74, 0x98, 0xd7, 0x68, 0x63, 0xed, 0x87, 0xe4, 0x5d, 0x8d, 0x9e, 0x1d, -+ 0xfd, 0x2a, 0xbb, 0x86, 0xac, 0xe9, 0x2a, 0x89, }, -+ { 0x89, 0xc3, 0x88, 0xce, 0x2b, 0x33, 0x1e, 0x10, 0xd1, 0x37, 0x20, 0x86, -+ 0x28, 0x43, 0x70, 0xd9, 0xfb, 0x96, 0xd9, 0xb5, 0xd3, }, -+ { 0xcb, 0x56, 0x74, 0x41, 0x8d, 0x80, 0x01, 0x9a, 0x6b, 0x38, 0xe1, 0x41, -+ 0xad, 0x9c, 0x62, 0x74, 0xce, 0x35, 0xd5, 0x6c, 0x89, 0x6e, }, -+ { 0x79, 0xaf, 0x94, 0x59, 0x99, 0x26, 0xe1, 0xc9, 0x34, 0xfe, 0x7c, 0x22, -+ 0xf7, 0x43, 0xd7, 0x65, 0xd4, 0x48, 0x18, 0xac, 0x3d, 0xfd, 0x93, }, -+ { 0x85, 0x0d, 0xff, 0xb8, 0x3e, 0x87, 0x41, 0xb0, 0x95, 0xd3, 0x3d, 0x00, -+ 0x47, 0x55, 0x9e, 0xd2, 0x69, 0xea, 0xbf, 0xe9, 0x7a, 0x2d, 0x61, 0x45, }, -+ { 0x03, 0xe0, 0x85, 0xec, 0x54, 0xb5, 0x16, 0x53, 0xa8, 0xc4, 0x71, 0xe9, -+ 0x6a, 0xe7, 0xcb, 0xc4, 0x15, 0x02, 0xfc, 0x34, 0xa4, 0xa4, 0x28, 0x13, -+ 0xd1, }, -+ { 0xe3, 0x34, 0x4b, 0xe1, 0xd0, 0x4b, 0x55, 0x61, 0x8f, 0xc0, 0x24, 0x05, -+ 0xe6, 0xe0, 0x3d, 0x70, 0x24, 0x4d, 0xda, 0xb8, 0x91, 0x05, 0x29, 0x07, -+ 0x01, 0x3e, }, -+ { 0x61, 0xff, 0x01, 0x72, 0xb1, 0x4d, 0xf6, 0xfe, 0xd1, 0xd1, 0x08, 0x74, -+ 0xe6, 0x91, 0x44, 0xeb, 0x61, 0xda, 0x40, 0xaf, 0xfc, 0x8c, 0x91, 0x6b, -+ 0xec, 0x13, 0xed, }, -+ { 0xd4, 0x40, 0xd2, 0xa0, 0x7f, 0xc1, 0x58, 0x0c, 0x85, 0xa0, 0x86, 0xc7, -+ 0x86, 0xb9, 0x61, 0xc9, 0xea, 0x19, 0x86, 0x1f, 0xab, 0x07, 0xce, 0x37, -+ 0x72, 0x67, 0x09, 0xfc, }, -+ { 0x9e, 0xf8, 0x18, 0x67, 0x93, 0x10, 0x9b, 0x39, 0x75, 0xe8, 0x8b, 0x38, -+ 0x82, 0x7d, 0xb8, 0xb7, 0xa5, 0xaf, 0xe6, 0x6a, 0x22, 0x5e, 0x1f, 0x9c, -+ 0x95, 0x29, 0x19, 0xf2, 0x4b, }, -+ { 0xc8, 0x62, 0x25, 0xf5, 0x98, 0xc9, 0xea, 0xe5, 0x29, 0x3a, 0xd3, 0x22, -+ 0xeb, 0xeb, 0x07, 0x7c, 0x15, 0x07, 0xee, 0x15, 0x61, 0xbb, 0x05, 0x30, -+ 0x99, 0x7f, 0x11, 0xf6, 0x0a, 0x1d, }, -+ { 0x68, 0x70, 0xf7, 0x90, 0xa1, 0x8b, 0x1f, 0x0f, 0xbb, 0xce, 0xd2, 0x0e, -+ 0x33, 0x1f, 0x7f, 0xa9, 0x78, 0xa8, 0xa6, 0x81, 0x66, 0xab, 0x8d, 0xcd, -+ 0x58, 0x55, 0x3a, 0x0b, 0x7a, 0xdb, 0xb5, }, -+ { 0xdd, 0x35, 0xd2, 0xb4, 0xf6, 0xc7, 0xea, 0xab, 0x64, 0x24, 0x4e, 0xfe, -+ 0xe5, 0x3d, 0x4e, 0x95, 0x8b, 0x6d, 0x6c, 0xbc, 0xb0, 0xf8, 0x88, 0x61, -+ 0x09, 0xb7, 0x78, 0xa3, 0x31, 0xfe, 0xd9, 0x2f, }, -+ { 0x0a, }, -+ { 0x6e, 0xd4, }, -+ { 0x64, 0xe9, 0xd1, }, -+ { 0x30, 0xdd, 0x71, 0xef, }, -+ { 0x11, 0xb5, 0x0c, 0x87, 0xc9, }, -+ { 0x06, 0x1c, 0x6d, 0x04, 0x82, 0xd0, }, -+ { 0x5c, 0x42, 0x0b, 0xee, 0xc5, 0x9c, 0xb2, }, -+ { 0xe8, 0x29, 0xd6, 0xb4, 0x5d, 0xf7, 0x2b, 0x93, }, -+ { 0x18, 0xca, 0x27, 0x72, 0x43, 0x39, 0x16, 0xbc, 0x6a, }, -+ { 0x39, 0x8f, 0xfd, 0x64, 0xf5, 0x57, 0x23, 0xb0, 0x45, 0xf8, }, -+ { 0xbb, 0x3a, 0x78, 0x6b, 0x02, 0x1d, 0x0b, 0x16, 0xe3, 0xb2, 0x9a, }, -+ { 0xb8, 0xb4, 0x0b, 0xe5, 0xd4, 0x1d, 0x0d, 0x85, 0x49, 0x91, 0x35, 0xfa, }, -+ { 0x6d, 0x48, 0x2a, 0x0c, 0x42, 0x08, 0xbd, 0xa9, 0x78, 0x6f, 0x18, 0xaf, -+ 0xe2, }, -+ { 0x10, 0x45, 0xd4, 0x58, 0x88, 0xec, 0x4e, 0x1e, 0xf6, 0x14, 0x92, 0x64, -+ 0x7e, 0xb0, }, -+ { 0x8b, 0x0b, 0x95, 0xee, 0x92, 0xc6, 0x3b, 0x91, 0xf1, 0x1e, 0xeb, 0x51, -+ 0x98, 0x0a, 0x8d, }, -+ { 0xa3, 0x50, 0x4d, 0xa5, 0x1d, 0x03, 0x68, 0xe9, 0x57, 0x78, 0xd6, 0x04, -+ 0xf1, 0xc3, 0x94, 0xd8, }, -+ { 0xb8, 0x66, 0x6e, 0xdd, 0x46, 0x15, 0xae, 0x3d, 0x83, 0x7e, 0xcf, 0xe7, -+ 0x2c, 0xe8, 0x8f, 0xc7, 0x34, }, -+ { 0x2e, 0xc0, 0x1f, 0x29, 0xea, 0xf6, 0xb9, 0xe2, 0xc2, 0x93, 0xeb, 0x41, -+ 0x0d, 0xf0, 0x0a, 0x13, 0x0e, 0xa2, }, -+ { 0x71, 0xb8, 0x33, 0xa9, 0x1b, 0xac, 0xf1, 0xb5, 0x42, 0x8f, 0x5e, 0x81, -+ 0x34, 0x43, 0xb7, 0xa4, 0x18, 0x5c, 0x47, }, -+ { 0xda, 0x45, 0xb8, 0x2e, 0x82, 0x1e, 0xc0, 0x59, 0x77, 0x9d, 0xfa, 0xb4, -+ 0x1c, 0x5e, 0xa0, 0x2b, 0x33, 0x96, 0x5a, 0x58, }, -+ { 0xe3, 0x09, 0x05, 0xa9, 0xeb, 0x48, 0x13, 0xad, 0x71, 0x88, 0x81, 0x9a, -+ 0x3e, 0x2c, 0xe1, 0x23, 0x99, 0x13, 0x35, 0x9f, 0xb5, }, -+ { 0xb7, 0x86, 0x2d, 0x16, 0xe1, 0x04, 0x00, 0x47, 0x47, 0x61, 0x31, 0xfb, -+ 0x14, 0xac, 0xd8, 0xe9, 0xe3, 0x49, 0xbd, 0xf7, 0x9c, 0x3f, }, -+ { 0x7f, 0xd9, 0x95, 0xa8, 0xa7, 0xa0, 0xcc, 0xba, 0xef, 0xb1, 0x0a, 0xa9, -+ 0x21, 0x62, 0x08, 0x0f, 0x1b, 0xff, 0x7b, 0x9d, 0xae, 0xb2, 0x95, }, -+ { 0x85, 0x99, 0xea, 0x33, 0xe0, 0x56, 0xff, 0x13, 0xc6, 0x61, 0x8c, 0xf9, -+ 0x57, 0x05, 0x03, 0x11, 0xf9, 0xfb, 0x3a, 0xf7, 0xce, 0xbb, 0x52, 0x30, }, -+ { 0xb2, 0x72, 0x9c, 0xf8, 0x77, 0x4e, 0x8f, 0x6b, 0x01, 0x6c, 0xff, 0x4e, -+ 0x4f, 0x02, 0xd2, 0xbc, 0xeb, 0x51, 0x28, 0x99, 0x50, 0xab, 0xc4, 0x42, -+ 0xe3, }, -+ { 0x8b, 0x0a, 0xb5, 0x90, 0x8f, 0xf5, 0x7b, 0xdd, 0xba, 0x47, 0x37, 0xc9, -+ 0x2a, 0xd5, 0x4b, 0x25, 0x08, 0x8b, 0x02, 0x17, 0xa7, 0x9e, 0x6b, 0x6e, -+ 0xe3, 0x90, }, -+ { 0x90, 0xdd, 0xf7, 0x75, 0xa7, 0xa3, 0x99, 0x5e, 0x5b, 0x7d, 0x75, 0xc3, -+ 0x39, 0x6b, 0xa0, 0xe2, 0x44, 0x53, 0xb1, 0x9e, 0xc8, 0xf1, 0x77, 0x10, -+ 0x58, 0x06, 0x9a, }, -+ { 0x99, 0x52, 0xf0, 0x49, 0xa8, 0x8c, 0xec, 0xa6, 0x97, 0x32, 0x13, 0xb5, -+ 0xf7, 0xa3, 0x8e, 0xfb, 0x4b, 0x59, 0x31, 0x3d, 0x01, 0x59, 0x98, 0x5d, -+ 0x53, 0x03, 0x1a, 0x39, }, -+ { 0x9f, 0xe0, 0xc2, 0xe5, 0x5d, 0x93, 0xd6, 0x9b, 0x47, 0x8f, 0x9b, 0xe0, -+ 0x26, 0x35, 0x84, 0x20, 0x1d, 0xc5, 0x53, 0x10, 0x0f, 0x22, 0xb9, 0xb5, -+ 0xd4, 0x36, 0xb1, 0xac, 0x73, }, -+ { 0x30, 0x32, 0x20, 0x3b, 0x10, 0x28, 0xec, 0x1f, 0x4f, 0x9b, 0x47, 0x59, -+ 0xeb, 0x7b, 0xee, 0x45, 0xfb, 0x0c, 0x49, 0xd8, 0x3d, 0x69, 0xbd, 0x90, -+ 0x2c, 0xf0, 0x9e, 0x8d, 0xbf, 0xd5, }, -+ { 0x2a, 0x37, 0x73, 0x7f, 0xf9, 0x96, 0x19, 0xaa, 0x25, 0xd8, 0x13, 0x28, -+ 0x01, 0x29, 0x89, 0xdf, 0x6e, 0x0c, 0x9b, 0x43, 0x44, 0x51, 0xe9, 0x75, -+ 0x26, 0x0c, 0xb7, 0x87, 0x66, 0x0b, 0x5f, }, -+ { 0x23, 0xdf, 0x96, 0x68, 0x91, 0x86, 0xd0, 0x93, 0x55, 0x33, 0x24, 0xf6, -+ 0xba, 0x08, 0x75, 0x5b, 0x59, 0x11, 0x69, 0xb8, 0xb9, 0xe5, 0x2c, 0x77, -+ 0x02, 0xf6, 0x47, 0xee, 0x81, 0xdd, 0xb9, 0x06, }, -+ { 0x9d, }, -+ { 0x9d, 0x7d, }, -+ { 0xfd, 0xc3, 0xda, }, -+ { 0xe8, 0x82, 0xcd, 0x21, }, -+ { 0xc3, 0x1d, 0x42, 0x4c, 0x74, }, -+ { 0xe9, 0xda, 0xf1, 0xa2, 0xe5, 0x7c, }, -+ { 0x52, 0xb8, 0x6f, 0x81, 0x5c, 0x3a, 0x4c, }, -+ { 0x5b, 0x39, 0x26, 0xfc, 0x92, 0x5e, 0xe0, 0x49, }, -+ { 0x59, 0xe4, 0x7c, 0x93, 0x1c, 0xf9, 0x28, 0x93, 0xde, }, -+ { 0xde, 0xdf, 0xb2, 0x43, 0x61, 0x0b, 0x86, 0x16, 0x4c, 0x2e, }, -+ { 0x14, 0x8f, 0x75, 0x51, 0xaf, 0xb9, 0xee, 0x51, 0x5a, 0xae, 0x23, }, -+ { 0x43, 0x5f, 0x50, 0xd5, 0x70, 0xb0, 0x5b, 0x87, 0xf5, 0xd9, 0xb3, 0x6d, }, -+ { 0x66, 0x0a, 0x64, 0x93, 0x79, 0x71, 0x94, 0x40, 0xb7, 0x68, 0x2d, 0xd3, -+ 0x63, }, -+ { 0x15, 0x00, 0xc4, 0x0c, 0x7d, 0x1b, 0x10, 0xa9, 0x73, 0x1b, 0x90, 0x6f, -+ 0xe6, 0xa9, }, -+ { 0x34, 0x75, 0xf3, 0x86, 0x8f, 0x56, 0xcf, 0x2a, 0x0a, 0xf2, 0x62, 0x0a, -+ 0xf6, 0x0e, 0x20, }, -+ { 0xb1, 0xde, 0xc9, 0xf5, 0xdb, 0xf3, 0x2f, 0x4c, 0xd6, 0x41, 0x7d, 0x39, -+ 0x18, 0x3e, 0xc7, 0xc3, }, -+ { 0xc5, 0x89, 0xb2, 0xf8, 0xb8, 0xc0, 0xa3, 0xb9, 0x3b, 0x10, 0x6d, 0x7c, -+ 0x92, 0xfc, 0x7f, 0x34, 0x41, }, -+ { 0xc4, 0xd8, 0xef, 0xba, 0xef, 0xd2, 0xaa, 0xc5, 0x6c, 0x8e, 0x3e, 0xbb, -+ 0x12, 0xfc, 0x0f, 0x72, 0xbf, 0x0f, }, -+ { 0xdd, 0x91, 0xd1, 0x15, 0x9e, 0x7d, 0xf8, 0xc1, 0xb9, 0x14, 0x63, 0x96, -+ 0xb5, 0xcb, 0x83, 0x1d, 0x35, 0x1c, 0xec, }, -+ { 0xa9, 0xf8, 0x52, 0xc9, 0x67, 0x76, 0x2b, 0xad, 0xfb, 0xd8, 0x3a, 0xa6, -+ 0x74, 0x02, 0xae, 0xb8, 0x25, 0x2c, 0x63, 0x49, }, -+ { 0x77, 0x1f, 0x66, 0x70, 0xfd, 0x50, 0x29, 0xaa, 0xeb, 0xdc, 0xee, 0xba, -+ 0x75, 0x98, 0xdc, 0x93, 0x12, 0x3f, 0xdc, 0x7c, 0x38, }, -+ { 0xe2, 0xe1, 0x89, 0x5c, 0x37, 0x38, 0x6a, 0xa3, 0x40, 0xac, 0x3f, 0xb0, -+ 0xca, 0xfc, 0xa7, 0xf3, 0xea, 0xf9, 0x0f, 0x5d, 0x8e, 0x39, }, -+ { 0x0f, 0x67, 0xc8, 0x38, 0x01, 0xb1, 0xb7, 0xb8, 0xa2, 0xe7, 0x0a, 0x6d, -+ 0xd2, 0x63, 0x69, 0x9e, 0xcc, 0xf0, 0xf2, 0xbe, 0x9b, 0x98, 0xdd, }, -+ { 0x13, 0xe1, 0x36, 0x30, 0xfe, 0xc6, 0x01, 0x8a, 0xa1, 0x63, 0x96, 0x59, -+ 0xc2, 0xa9, 0x68, 0x3f, 0x58, 0xd4, 0x19, 0x0c, 0x40, 0xf3, 0xde, 0x02, }, -+ { 0xa3, 0x9e, 0xce, 0xda, 0x42, 0xee, 0x8c, 0x6c, 0x5a, 0x7d, 0xdc, 0x89, -+ 0x02, 0x77, 0xdd, 0xe7, 0x95, 0xbb, 0xff, 0x0d, 0xa4, 0xb5, 0x38, 0x1e, -+ 0xaf, }, -+ { 0x9a, 0xf6, 0xb5, 0x9a, 0x4f, 0xa9, 0x4f, 0x2c, 0x35, 0x3c, 0x24, 0xdc, -+ 0x97, 0x6f, 0xd9, 0xa1, 0x7d, 0x1a, 0x85, 0x0b, 0xf5, 0xda, 0x2e, 0xe7, -+ 0xb1, 0x1d, }, -+ { 0x84, 0x1e, 0x8e, 0x3d, 0x45, 0xa5, 0xf2, 0x27, 0xf3, 0x31, 0xfe, 0xb9, -+ 0xfb, 0xc5, 0x45, 0x99, 0x99, 0xdd, 0x93, 0x43, 0x02, 0xee, 0x58, 0xaf, -+ 0xee, 0x6a, 0xbe, }, -+ { 0x07, 0x2f, 0xc0, 0xa2, 0x04, 0xc4, 0xab, 0x7c, 0x26, 0xbb, 0xa8, 0xd8, -+ 0xe3, 0x1c, 0x75, 0x15, 0x64, 0x5d, 0x02, 0x6a, 0xf0, 0x86, 0xe9, 0xcd, -+ 0x5c, 0xef, 0xa3, 0x25, }, -+ { 0x2f, 0x3b, 0x1f, 0xb5, 0x91, 0x8f, 0x86, 0xe0, 0xdc, 0x31, 0x48, 0xb6, -+ 0xa1, 0x8c, 0xfd, 0x75, 0xbb, 0x7d, 0x3d, 0xc1, 0xf0, 0x10, 0x9a, 0xd8, -+ 0x4b, 0x0e, 0xe3, 0x94, 0x9f, }, -+ { 0x29, 0xbb, 0x8f, 0x6c, 0xd1, 0xf2, 0xb6, 0xaf, 0xe5, 0xe3, 0x2d, 0xdc, -+ 0x6f, 0xa4, 0x53, 0x88, 0xd8, 0xcf, 0x4d, 0x45, 0x42, 0x62, 0xdb, 0xdf, -+ 0xf8, 0x45, 0xc2, 0x13, 0xec, 0x35, }, -+ { 0x06, 0x3c, 0xe3, 0x2c, 0x15, 0xc6, 0x43, 0x03, 0x81, 0xfb, 0x08, 0x76, -+ 0x33, 0xcb, 0x02, 0xc1, 0xba, 0x33, 0xe5, 0xe0, 0xd1, 0x92, 0xa8, 0x46, -+ 0x28, 0x3f, 0x3e, 0x9d, 0x2c, 0x44, 0x54, }, -+ { 0xea, 0xbb, 0x96, 0xf8, 0xd1, 0x8b, 0x04, 0x11, 0x40, 0x78, 0x42, 0x02, -+ 0x19, 0xd1, 0xbc, 0x65, 0x92, 0xd3, 0xc3, 0xd6, 0xd9, 0x19, 0xe7, 0xc3, -+ 0x40, 0x97, 0xbd, 0xd4, 0xed, 0xfa, 0x5e, 0x28, }, -+ { 0x02, }, -+ { 0x52, 0xa8, }, -+ { 0x38, 0x25, 0x0d, }, -+ { 0xe3, 0x04, 0xd4, 0x92, }, -+ { 0x97, 0xdb, 0xf7, 0x81, 0xca, }, -+ { 0x8a, 0x56, 0x9d, 0x62, 0x56, 0xcc, }, -+ { 0xa1, 0x8e, 0x3c, 0x72, 0x8f, 0x63, 0x03, }, -+ { 0xf7, 0xf3, 0x39, 0x09, 0x0a, 0xa1, 0xbb, 0x23, }, -+ { 0x6b, 0x03, 0xc0, 0xe9, 0xd9, 0x83, 0x05, 0x22, 0x01, }, -+ { 0x1b, 0x4b, 0xf5, 0xd6, 0x4f, 0x05, 0x75, 0x91, 0x4c, 0x7f, }, -+ { 0x4c, 0x8c, 0x25, 0x20, 0x21, 0xcb, 0xc2, 0x4b, 0x3a, 0x5b, 0x8d, }, -+ { 0x56, 0xe2, 0x77, 0xa0, 0xb6, 0x9f, 0x81, 0xec, 0x83, 0x75, 0xc4, 0xf9, }, -+ { 0x71, 0x70, 0x0f, 0xad, 0x4d, 0x35, 0x81, 0x9d, 0x88, 0x69, 0xf9, 0xaa, -+ 0xd3, }, -+ { 0x50, 0x6e, 0x86, 0x6e, 0x43, 0xc0, 0xc2, 0x44, 0xc2, 0xe2, 0xa0, 0x1c, -+ 0xb7, 0x9a, }, -+ { 0xe4, 0x7e, 0x72, 0xc6, 0x12, 0x8e, 0x7c, 0xfc, 0xbd, 0xe2, 0x08, 0x31, -+ 0x3d, 0x47, 0x3d, }, -+ { 0x08, 0x97, 0x5b, 0x80, 0xae, 0xc4, 0x1d, 0x50, 0x77, 0xdf, 0x1f, 0xd0, -+ 0x24, 0xf0, 0x17, 0xc0, }, -+ { 0x01, 0xb6, 0x29, 0xf4, 0xaf, 0x78, 0x5f, 0xb6, 0x91, 0xdd, 0x76, 0x76, -+ 0xd2, 0xfd, 0x0c, 0x47, 0x40, }, -+ { 0xa1, 0xd8, 0x09, 0x97, 0x7a, 0xa6, 0xc8, 0x94, 0xf6, 0x91, 0x7b, 0xae, -+ 0x2b, 0x9f, 0x0d, 0x83, 0x48, 0xf7, }, -+ { 0x12, 0xd5, 0x53, 0x7d, 0x9a, 0xb0, 0xbe, 0xd9, 0xed, 0xe9, 0x9e, 0xee, -+ 0x61, 0x5b, 0x42, 0xf2, 0xc0, 0x73, 0xc0, }, -+ { 0xd5, 0x77, 0xd6, 0x5c, 0x6e, 0xa5, 0x69, 0x2b, 0x3b, 0x8c, 0xd6, 0x7d, -+ 0x1d, 0xbe, 0x2c, 0xa1, 0x02, 0x21, 0xcd, 0x29, }, -+ { 0xa4, 0x98, 0x80, 0xca, 0x22, 0xcf, 0x6a, 0xab, 0x5e, 0x40, 0x0d, 0x61, -+ 0x08, 0x21, 0xef, 0xc0, 0x6c, 0x52, 0xb4, 0xb0, 0x53, }, -+ { 0xbf, 0xaf, 0x8f, 0x3b, 0x7a, 0x97, 0x33, 0xe5, 0xca, 0x07, 0x37, 0xfd, -+ 0x15, 0xdf, 0xce, 0x26, 0x2a, 0xb1, 0xa7, 0x0b, 0xb3, 0xac, }, -+ { 0x16, 0x22, 0xe1, 0xbc, 0x99, 0x4e, 0x01, 0xf0, 0xfa, 0xff, 0x8f, 0xa5, -+ 0x0c, 0x61, 0xb0, 0xad, 0xcc, 0xb1, 0xe1, 0x21, 0x46, 0xfa, 0x2e, }, -+ { 0x11, 0x5b, 0x0b, 0x2b, 0xe6, 0x14, 0xc1, 0xd5, 0x4d, 0x71, 0x5e, 0x17, -+ 0xea, 0x23, 0xdd, 0x6c, 0xbd, 0x1d, 0xbe, 0x12, 0x1b, 0xee, 0x4c, 0x1a, }, -+ { 0x40, 0x88, 0x22, 0xf3, 0x20, 0x6c, 0xed, 0xe1, 0x36, 0x34, 0x62, 0x2c, -+ 0x98, 0x83, 0x52, 0xe2, 0x25, 0xee, 0xe9, 0xf5, 0xe1, 0x17, 0xf0, 0x5c, -+ 0xae, }, -+ { 0xc3, 0x76, 0x37, 0xde, 0x95, 0x8c, 0xca, 0x2b, 0x0c, 0x23, 0xe7, 0xb5, -+ 0x38, 0x70, 0x61, 0xcc, 0xff, 0xd3, 0x95, 0x7b, 0xf3, 0xff, 0x1f, 0x9d, -+ 0x59, 0x00, }, -+ { 0x0c, 0x19, 0x52, 0x05, 0x22, 0x53, 0xcb, 0x48, 0xd7, 0x10, 0x0e, 0x7e, -+ 0x14, 0x69, 0xb5, 0xa2, 0x92, 0x43, 0xa3, 0x9e, 0x4b, 0x8f, 0x51, 0x2c, -+ 0x5a, 0x2c, 0x3b, }, -+ { 0xe1, 0x9d, 0x70, 0x70, 0x28, 0xec, 0x86, 0x40, 0x55, 0x33, 0x56, 0xda, -+ 0x88, 0xca, 0xee, 0xc8, 0x6a, 0x20, 0xb1, 0xe5, 0x3d, 0x57, 0xf8, 0x3c, -+ 0x10, 0x07, 0x2a, 0xc4, }, -+ { 0x0b, 0xae, 0xf1, 0xc4, 0x79, 0xee, 0x1b, 0x3d, 0x27, 0x35, 0x8d, 0x14, -+ 0xd6, 0xae, 0x4e, 0x3c, 0xe9, 0x53, 0x50, 0xb5, 0xcc, 0x0c, 0xf7, 0xdf, -+ 0xee, 0xa1, 0x74, 0xd6, 0x71, }, -+ { 0xe6, 0xa4, 0xf4, 0x99, 0x98, 0xb9, 0x80, 0xea, 0x96, 0x7f, 0x4f, 0x33, -+ 0xcf, 0x74, 0x25, 0x6f, 0x17, 0x6c, 0xbf, 0xf5, 0x5c, 0x38, 0xd0, 0xff, -+ 0x96, 0xcb, 0x13, 0xf9, 0xdf, 0xfd, }, -+ { 0xbe, 0x92, 0xeb, 0xba, 0x44, 0x2c, 0x24, 0x74, 0xd4, 0x03, 0x27, 0x3c, -+ 0x5d, 0x5b, 0x03, 0x30, 0x87, 0x63, 0x69, 0xe0, 0xb8, 0x94, 0xf4, 0x44, -+ 0x7e, 0xad, 0xcd, 0x20, 0x12, 0x16, 0x79, }, -+ { 0x30, 0xf1, 0xc4, 0x8e, 0x05, 0x90, 0x2a, 0x97, 0x63, 0x94, 0x46, 0xff, -+ 0xce, 0xd8, 0x67, 0xa7, 0xac, 0x33, 0x8c, 0x95, 0xb7, 0xcd, 0xa3, 0x23, -+ 0x98, 0x9d, 0x76, 0x6c, 0x9d, 0xa8, 0xd6, 0x8a, }, -+ { 0xbe, }, -+ { 0x17, 0x6c, }, -+ { 0x1a, 0x42, 0x4f, }, -+ { 0xba, 0xaf, 0xb7, 0x65, }, -+ { 0xc2, 0x63, 0x43, 0x6a, 0xea, }, -+ { 0xe4, 0x4d, 0xad, 0xf2, 0x0b, 0x02, }, -+ { 0x04, 0xc7, 0xc4, 0x7f, 0xa9, 0x2b, 0xce, }, -+ { 0x66, 0xf6, 0x67, 0xcb, 0x03, 0x53, 0xc8, 0xf1, }, -+ { 0x56, 0xa3, 0x60, 0x78, 0xc9, 0x5f, 0x70, 0x1b, 0x5e, }, -+ { 0x99, 0xff, 0x81, 0x7c, 0x13, 0x3c, 0x29, 0x79, 0x4b, 0x65, }, -+ { 0x51, 0x10, 0x50, 0x93, 0x01, 0x93, 0xb7, 0x01, 0xc9, 0x18, 0xb7, }, -+ { 0x8e, 0x3c, 0x42, 0x1e, 0x5e, 0x7d, 0xc1, 0x50, 0x70, 0x1f, 0x00, 0x98, }, -+ { 0x5f, 0xd9, 0x9b, 0xc8, 0xd7, 0xb2, 0x72, 0x62, 0x1a, 0x1e, 0xba, 0x92, -+ 0xe9, }, -+ { 0x70, 0x2b, 0xba, 0xfe, 0xad, 0x5d, 0x96, 0x3f, 0x27, 0xc2, 0x41, 0x6d, -+ 0xc4, 0xb3, }, -+ { 0xae, 0xe0, 0xd5, 0xd4, 0xc7, 0xae, 0x15, 0x5e, 0xdc, 0xdd, 0x33, 0x60, -+ 0xd7, 0xd3, 0x5e, }, -+ { 0x79, 0x8e, 0xbc, 0x9e, 0x20, 0xb9, 0x19, 0x4b, 0x63, 0x80, 0xf3, 0x16, -+ 0xaf, 0x39, 0xbd, 0x92, }, -+ { 0xc2, 0x0e, 0x85, 0xa0, 0x0b, 0x9a, 0xb0, 0xec, 0xde, 0x38, 0xd3, 0x10, -+ 0xd9, 0xa7, 0x66, 0x27, 0xcf, }, -+ { 0x0e, 0x3b, 0x75, 0x80, 0x67, 0x14, 0x0c, 0x02, 0x90, 0xd6, 0xb3, 0x02, -+ 0x81, 0xf6, 0xa6, 0x87, 0xce, 0x58, }, -+ { 0x79, 0xb5, 0xe9, 0x5d, 0x52, 0x4d, 0xf7, 0x59, 0xf4, 0x2e, 0x27, 0xdd, -+ 0xb3, 0xed, 0x57, 0x5b, 0x82, 0xea, 0x6f, }, -+ { 0xa2, 0x97, 0xf5, 0x80, 0x02, 0x3d, 0xde, 0xa3, 0xf9, 0xf6, 0xab, 0xe3, -+ 0x57, 0x63, 0x7b, 0x9b, 0x10, 0x42, 0x6f, 0xf2, }, -+ { 0x12, 0x7a, 0xfc, 0xb7, 0x67, 0x06, 0x0c, 0x78, 0x1a, 0xfe, 0x88, 0x4f, -+ 0xc6, 0xac, 0x52, 0x96, 0x64, 0x28, 0x97, 0x84, 0x06, }, -+ { 0xc5, 0x04, 0x44, 0x6b, 0xb2, 0xa5, 0xa4, 0x66, 0xe1, 0x76, 0xa2, 0x51, -+ 0xf9, 0x59, 0x69, 0x97, 0x56, 0x0b, 0xbf, 0x50, 0xb3, 0x34, }, -+ { 0x21, 0x32, 0x6b, 0x42, 0xb5, 0xed, 0x71, 0x8d, 0xf7, 0x5a, 0x35, 0xe3, -+ 0x90, 0xe2, 0xee, 0xaa, 0x89, 0xf6, 0xc9, 0x9c, 0x4d, 0x73, 0xf4, }, -+ { 0x4c, 0xa6, 0x09, 0xf4, 0x48, 0xe7, 0x46, 0xbc, 0x49, 0xfc, 0xe5, 0xda, -+ 0xd1, 0x87, 0x13, 0x17, 0x4c, 0x59, 0x71, 0x26, 0x5b, 0x2c, 0x42, 0xb7, }, -+ { 0x13, 0x63, 0xf3, 0x40, 0x02, 0xe5, 0xa3, 0x3a, 0x5e, 0x8e, 0xf8, 0xb6, -+ 0x8a, 0x49, 0x60, 0x76, 0x34, 0x72, 0x94, 0x73, 0xf6, 0xd9, 0x21, 0x6a, -+ 0x26, }, -+ { 0xdf, 0x75, 0x16, 0x10, 0x1b, 0x5e, 0x81, 0xc3, 0xc8, 0xde, 0x34, 0x24, -+ 0xb0, 0x98, 0xeb, 0x1b, 0x8f, 0xa1, 0x9b, 0x05, 0xee, 0xa5, 0xe9, 0x35, -+ 0xf4, 0x1d, }, -+ { 0xcd, 0x21, 0x93, 0x6e, 0x5b, 0xa0, 0x26, 0x2b, 0x21, 0x0e, 0xa0, 0xb9, -+ 0x1c, 0xb5, 0xbb, 0xb8, 0xf8, 0x1e, 0xff, 0x5c, 0xa8, 0xf9, 0x39, 0x46, -+ 0x4e, 0x29, 0x26, }, -+ { 0x73, 0x7f, 0x0e, 0x3b, 0x0b, 0x5c, 0xf9, 0x60, 0xaa, 0x88, 0xa1, 0x09, -+ 0xb1, 0x5d, 0x38, 0x7b, 0x86, 0x8f, 0x13, 0x7a, 0x8d, 0x72, 0x7a, 0x98, -+ 0x1a, 0x5b, 0xff, 0xc9, }, -+ { 0xd3, 0x3c, 0x61, 0x71, 0x44, 0x7e, 0x31, 0x74, 0x98, 0x9d, 0x9a, 0xd2, -+ 0x27, 0xf3, 0x46, 0x43, 0x42, 0x51, 0xd0, 0x5f, 0xe9, 0x1c, 0x5c, 0x69, -+ 0xbf, 0xf6, 0xbe, 0x3c, 0x40, }, -+ { 0x31, 0x99, 0x31, 0x9f, 0xaa, 0x43, 0x2e, 0x77, 0x3e, 0x74, 0x26, 0x31, -+ 0x5e, 0x61, 0xf1, 0x87, 0xe2, 0xeb, 0x9b, 0xcd, 0xd0, 0x3a, 0xee, 0x20, -+ 0x7e, 0x10, 0x0a, 0x0b, 0x7e, 0xfa, }, -+ { 0xa4, 0x27, 0x80, 0x67, 0x81, 0x2a, 0xa7, 0x62, 0xf7, 0x6e, 0xda, 0xd4, -+ 0x5c, 0x39, 0x74, 0xad, 0x7e, 0xbe, 0xad, 0xa5, 0x84, 0x7f, 0xa9, 0x30, -+ 0x5d, 0xdb, 0xe2, 0x05, 0x43, 0xf7, 0x1b, }, -+ { 0x0b, 0x37, 0xd8, 0x02, 0xe1, 0x83, 0xd6, 0x80, 0xf2, 0x35, 0xc2, 0xb0, -+ 0x37, 0xef, 0xef, 0x5e, 0x43, 0x93, 0xf0, 0x49, 0x45, 0x0a, 0xef, 0xb5, -+ 0x76, 0x70, 0x12, 0x44, 0xc4, 0xdb, 0xf5, 0x7a, }, -+ { 0x1f, }, -+ { 0x82, 0x60, }, -+ { 0xcc, 0xe3, 0x08, }, -+ { 0x56, 0x17, 0xe4, 0x59, }, -+ { 0xe2, 0xd7, 0x9e, 0xc4, 0x4c, }, -+ { 0xb2, 0xad, 0xd3, 0x78, 0x58, 0x5a, }, -+ { 0xce, 0x43, 0xb4, 0x02, 0x96, 0xab, 0x3c, }, -+ { 0xe6, 0x05, 0x1a, 0x73, 0x22, 0x32, 0xbb, 0x77, }, -+ { 0x23, 0xe7, 0xda, 0xfe, 0x2c, 0xef, 0x8c, 0x22, 0xec, }, -+ { 0xe9, 0x8e, 0x55, 0x38, 0xd1, 0xd7, 0x35, 0x23, 0x98, 0xc7, }, -+ { 0xb5, 0x81, 0x1a, 0xe5, 0xb5, 0xa5, 0xd9, 0x4d, 0xca, 0x41, 0xe7, }, -+ { 0x41, 0x16, 0x16, 0x95, 0x8d, 0x9e, 0x0c, 0xea, 0x8c, 0x71, 0x9a, 0xc1, }, -+ { 0x7c, 0x33, 0xc0, 0xa4, 0x00, 0x62, 0xea, 0x60, 0x67, 0xe4, 0x20, 0xbc, -+ 0x5b, }, -+ { 0xdb, 0xb1, 0xdc, 0xfd, 0x08, 0xc0, 0xde, 0x82, 0xd1, 0xde, 0x38, 0xc0, -+ 0x90, 0x48, }, -+ { 0x37, 0x18, 0x2e, 0x0d, 0x61, 0xaa, 0x61, 0xd7, 0x86, 0x20, 0x16, 0x60, -+ 0x04, 0xd9, 0xd5, }, -+ { 0xb0, 0xcf, 0x2c, 0x4c, 0x5e, 0x5b, 0x4f, 0x2a, 0x23, 0x25, 0x58, 0x47, -+ 0xe5, 0x31, 0x06, 0x70, }, -+ { 0x91, 0xa0, 0xa3, 0x86, 0x4e, 0xe0, 0x72, 0x38, 0x06, 0x67, 0x59, 0x5c, -+ 0x70, 0x25, 0xdb, 0x33, 0x27, }, -+ { 0x44, 0x58, 0x66, 0xb8, 0x58, 0xc7, 0x13, 0xed, 0x4c, 0xc0, 0xf4, 0x9a, -+ 0x1e, 0x67, 0x75, 0x33, 0xb6, 0xb8, }, -+ { 0x7f, 0x98, 0x4a, 0x8e, 0x50, 0xa2, 0x5c, 0xcd, 0x59, 0xde, 0x72, 0xb3, -+ 0x9d, 0xc3, 0x09, 0x8a, 0xab, 0x56, 0xf1, }, -+ { 0x80, 0x96, 0x49, 0x1a, 0x59, 0xa2, 0xc5, 0xd5, 0xa7, 0x20, 0x8a, 0xb7, -+ 0x27, 0x62, 0x84, 0x43, 0xc6, 0xe1, 0x1b, 0x5d, }, -+ { 0x6b, 0xb7, 0x2b, 0x26, 0x62, 0x14, 0x70, 0x19, 0x3d, 0x4d, 0xac, 0xac, -+ 0x63, 0x58, 0x5e, 0x94, 0xb5, 0xb7, 0xe8, 0xe8, 0xa2, }, -+ { 0x20, 0xa8, 0xc0, 0xfd, 0x63, 0x3d, 0x6e, 0x98, 0xcf, 0x0c, 0x49, 0x98, -+ 0xe4, 0x5a, 0xfe, 0x8c, 0xaa, 0x70, 0x82, 0x1c, 0x7b, 0x74, }, -+ { 0xc8, 0xe8, 0xdd, 0xdf, 0x69, 0x30, 0x01, 0xc2, 0x0f, 0x7e, 0x2f, 0x11, -+ 0xcc, 0x3e, 0x17, 0xa5, 0x69, 0x40, 0x3f, 0x0e, 0x79, 0x7f, 0xcf, }, -+ { 0xdb, 0x61, 0xc0, 0xe2, 0x2e, 0x49, 0x07, 0x31, 0x1d, 0x91, 0x42, 0x8a, -+ 0xfc, 0x5e, 0xd3, 0xf8, 0x56, 0x1f, 0x2b, 0x73, 0xfd, 0x9f, 0xb2, 0x8e, }, -+ { 0x0c, 0x89, 0x55, 0x0c, 0x1f, 0x59, 0x2c, 0x9d, 0x1b, 0x29, 0x1d, 0x41, -+ 0x1d, 0xe6, 0x47, 0x8f, 0x8c, 0x2b, 0xea, 0x8f, 0xf0, 0xff, 0x21, 0x70, -+ 0x88, }, -+ { 0x12, 0x18, 0x95, 0xa6, 0x59, 0xb1, 0x31, 0x24, 0x45, 0x67, 0x55, 0xa4, -+ 0x1a, 0x2d, 0x48, 0x67, 0x1b, 0x43, 0x88, 0x2d, 0x8e, 0xa0, 0x70, 0xb3, -+ 0xc6, 0xbb, }, -+ { 0xe7, 0xb1, 0x1d, 0xb2, 0x76, 0x4d, 0x68, 0x68, 0x68, 0x23, 0x02, 0x55, -+ 0x3a, 0xe2, 0xe5, 0xd5, 0x4b, 0x43, 0xf9, 0x34, 0x77, 0x5c, 0xa1, 0xf5, -+ 0x55, 0xfd, 0x4f, }, -+ { 0x8c, 0x87, 0x5a, 0x08, 0x3a, 0x73, 0xad, 0x61, 0xe1, 0xe7, 0x99, 0x7e, -+ 0xf0, 0x5d, 0xe9, 0x5d, 0x16, 0x43, 0x80, 0x2f, 0xd0, 0x66, 0x34, 0xe2, -+ 0x42, 0x64, 0x3b, 0x1a, }, -+ { 0x39, 0xc1, 0x99, 0xcf, 0x22, 0xbf, 0x16, 0x8f, 0x9f, 0x80, 0x7f, 0x95, -+ 0x0a, 0x05, 0x67, 0x27, 0xe7, 0x15, 0xdf, 0x9d, 0xb2, 0xfe, 0x1c, 0xb5, -+ 0x1d, 0x60, 0x8f, 0x8a, 0x1d, }, -+ { 0x9b, 0x6e, 0x08, 0x09, 0x06, 0x73, 0xab, 0x68, 0x02, 0x62, 0x1a, 0xe4, -+ 0xd4, 0xdf, 0xc7, 0x02, 0x4c, 0x6a, 0x5f, 0xfd, 0x23, 0xac, 0xae, 0x6d, -+ 0x43, 0xa4, 0x7a, 0x50, 0x60, 0x3c, }, -+ { 0x1d, 0xb4, 0xc6, 0xe1, 0xb1, 0x4b, 0xe3, 0xf2, 0xe2, 0x1a, 0x73, 0x1b, -+ 0xa0, 0x92, 0xa7, 0xf5, 0xff, 0x8f, 0x8b, 0x5d, 0xdf, 0xa8, 0x04, 0xb3, -+ 0xb0, 0xf7, 0xcc, 0x12, 0xfa, 0x35, 0x46, }, -+ { 0x49, 0x45, 0x97, 0x11, 0x0f, 0x1c, 0x60, 0x8e, 0xe8, 0x47, 0x30, 0xcf, -+ 0x60, 0xa8, 0x71, 0xc5, 0x1b, 0xe9, 0x39, 0x4d, 0x49, 0xb6, 0x12, 0x1f, -+ 0x24, 0xab, 0x37, 0xff, 0x83, 0xc2, 0xe1, 0x3a, }, -+ { 0x60, }, -+ { 0x24, 0x26, }, -+ { 0x47, 0xeb, 0xc9, }, -+ { 0x4a, 0xd0, 0xbc, 0xf0, }, -+ { 0x8e, 0x2b, 0xc9, 0x85, 0x3c, }, -+ { 0xa2, 0x07, 0x15, 0xb8, 0x12, 0x74, }, -+ { 0x0f, 0xdb, 0x5b, 0x33, 0x69, 0xfe, 0x4b, }, -+ { 0xa2, 0x86, 0x54, 0xf4, 0xfd, 0xb2, 0xd4, 0xe6, }, -+ { 0xbb, 0x84, 0x78, 0x49, 0x27, 0x8e, 0x61, 0xda, 0x60, }, -+ { 0x04, 0xc3, 0xcd, 0xaa, 0x8f, 0xa7, 0x03, 0xc9, 0xf9, 0xb6, }, -+ { 0xf8, 0x27, 0x1d, 0x61, 0xdc, 0x21, 0x42, 0xdd, 0xad, 0x92, 0x40, }, -+ { 0x12, 0x87, 0xdf, 0xc2, 0x41, 0x45, 0x5a, 0x36, 0x48, 0x5b, 0x51, 0x2b, }, -+ { 0xbb, 0x37, 0x5d, 0x1f, 0xf1, 0x68, 0x7a, 0xc4, 0xa5, 0xd2, 0xa4, 0x91, -+ 0x8d, }, -+ { 0x5b, 0x27, 0xd1, 0x04, 0x54, 0x52, 0x9f, 0xa3, 0x47, 0x86, 0x33, 0x33, -+ 0xbf, 0xa0, }, -+ { 0xcf, 0x04, 0xea, 0xf8, 0x03, 0x2a, 0x43, 0xff, 0xa6, 0x68, 0x21, 0x4c, -+ 0xd5, 0x4b, 0xed, }, -+ { 0xaf, 0xb8, 0xbc, 0x63, 0x0f, 0x18, 0x4d, 0xe2, 0x7a, 0xdd, 0x46, 0x44, -+ 0xc8, 0x24, 0x0a, 0xb7, }, -+ { 0x3e, 0xdc, 0x36, 0xe4, 0x89, 0xb1, 0xfa, 0xc6, 0x40, 0x93, 0x2e, 0x75, -+ 0xb2, 0x15, 0xd1, 0xb1, 0x10, }, -+ { 0x6c, 0xd8, 0x20, 0x3b, 0x82, 0x79, 0xf9, 0xc8, 0xbc, 0x9d, 0xe0, 0x35, -+ 0xbe, 0x1b, 0x49, 0x1a, 0xbc, 0x3a, }, -+ { 0x78, 0x65, 0x2c, 0xbe, 0x35, 0x67, 0xdc, 0x78, 0xd4, 0x41, 0xf6, 0xc9, -+ 0xde, 0xde, 0x1f, 0x18, 0x13, 0x31, 0x11, }, -+ { 0x8a, 0x7f, 0xb1, 0x33, 0x8f, 0x0c, 0x3c, 0x0a, 0x06, 0x61, 0xf0, 0x47, -+ 0x29, 0x1b, 0x29, 0xbc, 0x1c, 0x47, 0xef, 0x7a, }, -+ { 0x65, 0x91, 0xf1, 0xe6, 0xb3, 0x96, 0xd3, 0x8c, 0xc2, 0x4a, 0x59, 0x35, -+ 0x72, 0x8e, 0x0b, 0x9a, 0x87, 0xca, 0x34, 0x7b, 0x63, }, -+ { 0x5f, 0x08, 0x87, 0x80, 0x56, 0x25, 0x89, 0x77, 0x61, 0x8c, 0x64, 0xa1, -+ 0x59, 0x6d, 0x59, 0x62, 0xe8, 0x4a, 0xc8, 0x58, 0x99, 0xd1, }, -+ { 0x23, 0x87, 0x1d, 0xed, 0x6f, 0xf2, 0x91, 0x90, 0xe2, 0xfe, 0x43, 0x21, -+ 0xaf, 0x97, 0xc6, 0xbc, 0xd7, 0x15, 0xc7, 0x2d, 0x08, 0x77, 0x91, }, -+ { 0x90, 0x47, 0x9a, 0x9e, 0x3a, 0xdf, 0xf3, 0xc9, 0x4c, 0x1e, 0xa7, 0xd4, -+ 0x6a, 0x32, 0x90, 0xfe, 0xb7, 0xb6, 0x7b, 0xfa, 0x96, 0x61, 0xfb, 0xa4, }, -+ { 0xb1, 0x67, 0x60, 0x45, 0xb0, 0x96, 0xc5, 0x15, 0x9f, 0x4d, 0x26, 0xd7, -+ 0x9d, 0xf1, 0xf5, 0x6d, 0x21, 0x00, 0x94, 0x31, 0x64, 0x94, 0xd3, 0xa7, -+ 0xd3, }, -+ { 0x02, 0x3e, 0xaf, 0xf3, 0x79, 0x73, 0xa5, 0xf5, 0xcc, 0x7a, 0x7f, 0xfb, -+ 0x79, 0x2b, 0x85, 0x8c, 0x88, 0x72, 0x06, 0xbe, 0xfe, 0xaf, 0xc1, 0x16, -+ 0xa6, 0xd6, }, -+ { 0x2a, 0xb0, 0x1a, 0xe5, 0xaa, 0x6e, 0xb3, 0xae, 0x53, 0x85, 0x33, 0x80, -+ 0x75, 0xae, 0x30, 0xe6, 0xb8, 0x72, 0x42, 0xf6, 0x25, 0x4f, 0x38, 0x88, -+ 0x55, 0xd1, 0xa9, }, -+ { 0x90, 0xd8, 0x0c, 0xc0, 0x93, 0x4b, 0x4f, 0x9e, 0x65, 0x6c, 0xa1, 0x54, -+ 0xa6, 0xf6, 0x6e, 0xca, 0xd2, 0xbb, 0x7e, 0x6a, 0x1c, 0xd3, 0xce, 0x46, -+ 0xef, 0xb0, 0x00, 0x8d, }, -+ { 0xed, 0x9c, 0x49, 0xcd, 0xc2, 0xde, 0x38, 0x0e, 0xe9, 0x98, 0x6c, 0xc8, -+ 0x90, 0x9e, 0x3c, 0xd4, 0xd3, 0xeb, 0x88, 0x32, 0xc7, 0x28, 0xe3, 0x94, -+ 0x1c, 0x9f, 0x8b, 0xf3, 0xcb, }, -+ { 0xac, 0xe7, 0x92, 0x16, 0xb4, 0x14, 0xa0, 0xe4, 0x04, 0x79, 0xa2, 0xf4, -+ 0x31, 0xe6, 0x0c, 0x26, 0xdc, 0xbf, 0x2f, 0x69, 0x1b, 0x55, 0x94, 0x67, -+ 0xda, 0x0c, 0xd7, 0x32, 0x1f, 0xef, }, -+ { 0x68, 0x63, 0x85, 0x57, 0x95, 0x9e, 0x42, 0x27, 0x41, 0x43, 0x42, 0x02, -+ 0xa5, 0x78, 0xa7, 0xc6, 0x43, 0xc1, 0x6a, 0xba, 0x70, 0x80, 0xcd, 0x04, -+ 0xb6, 0x78, 0x76, 0x29, 0xf3, 0xe8, 0xa0, }, -+ { 0xe6, 0xac, 0x8d, 0x9d, 0xf0, 0xc0, 0xf7, 0xf7, 0xe3, 0x3e, 0x4e, 0x28, -+ 0x0f, 0x59, 0xb2, 0x67, 0x9e, 0x84, 0x34, 0x42, 0x96, 0x30, 0x2b, 0xca, -+ 0x49, 0xb6, 0xc5, 0x9a, 0x84, 0x59, 0xa7, 0x81, }, -+ { 0x7e, }, -+ { 0x1e, 0x21, }, -+ { 0x26, 0xd3, 0xdd, }, -+ { 0x2c, 0xd4, 0xb3, 0x3d, }, -+ { 0x86, 0x7b, 0x76, 0x3c, 0xf0, }, -+ { 0x12, 0xc3, 0x70, 0x1d, 0x55, 0x18, }, -+ { 0x96, 0xc2, 0xbd, 0x61, 0x55, 0xf4, 0x24, }, -+ { 0x20, 0x51, 0xf7, 0x86, 0x58, 0x8f, 0x07, 0x2a, }, -+ { 0x93, 0x15, 0xa8, 0x1d, 0xda, 0x97, 0xee, 0x0e, 0x6c, }, -+ { 0x39, 0x93, 0xdf, 0xd5, 0x0e, 0xca, 0xdc, 0x7a, 0x92, 0xce, }, -+ { 0x60, 0xd5, 0xfd, 0xf5, 0x1b, 0x26, 0x82, 0x26, 0x73, 0x02, 0xbc, }, -+ { 0x98, 0xf2, 0x34, 0xe1, 0xf5, 0xfb, 0x00, 0xac, 0x10, 0x4a, 0x38, 0x9f, }, -+ { 0xda, 0x3a, 0x92, 0x8a, 0xd0, 0xcd, 0x12, 0xcd, 0x15, 0xbb, 0xab, 0x77, -+ 0x66, }, -+ { 0xa2, 0x92, 0x1a, 0xe5, 0xca, 0x0c, 0x30, 0x75, 0xeb, 0xaf, 0x00, 0x31, -+ 0x55, 0x66, }, -+ { 0x06, 0xea, 0xfd, 0x3e, 0x86, 0x38, 0x62, 0x4e, 0xa9, 0x12, 0xa4, 0x12, -+ 0x43, 0xbf, 0xa1, }, -+ { 0xe4, 0x71, 0x7b, 0x94, 0xdb, 0xa0, 0xd2, 0xff, 0x9b, 0xeb, 0xad, 0x8e, -+ 0x95, 0x8a, 0xc5, 0xed, }, -+ { 0x25, 0x5a, 0x77, 0x71, 0x41, 0x0e, 0x7a, 0xe9, 0xed, 0x0c, 0x10, 0xef, -+ 0xf6, 0x2b, 0x3a, 0xba, 0x60, }, -+ { 0xee, 0xe2, 0xa3, 0x67, 0x64, 0x1d, 0xc6, 0x04, 0xc4, 0xe1, 0x68, 0xd2, -+ 0x6e, 0xd2, 0x91, 0x75, 0x53, 0x07, }, -+ { 0xe0, 0xf6, 0x4d, 0x8f, 0x68, 0xfc, 0x06, 0x7e, 0x18, 0x79, 0x7f, 0x2b, -+ 0x6d, 0xef, 0x46, 0x7f, 0xab, 0xb2, 0xad, }, -+ { 0x3d, 0x35, 0x88, 0x9f, 0x2e, 0xcf, 0x96, 0x45, 0x07, 0x60, 0x71, 0x94, -+ 0x00, 0x8d, 0xbf, 0xf4, 0xef, 0x46, 0x2e, 0x3c, }, -+ { 0x43, 0xcf, 0x98, 0xf7, 0x2d, 0xf4, 0x17, 0xe7, 0x8c, 0x05, 0x2d, 0x9b, -+ 0x24, 0xfb, 0x4d, 0xea, 0x4a, 0xec, 0x01, 0x25, 0x29, }, -+ { 0x8e, 0x73, 0x9a, 0x78, 0x11, 0xfe, 0x48, 0xa0, 0x3b, 0x1a, 0x26, 0xdf, -+ 0x25, 0xe9, 0x59, 0x1c, 0x70, 0x07, 0x9f, 0xdc, 0xa0, 0xa6, }, -+ { 0xe8, 0x47, 0x71, 0xc7, 0x3e, 0xdf, 0xb5, 0x13, 0xb9, 0x85, 0x13, 0xa8, -+ 0x54, 0x47, 0x6e, 0x59, 0x96, 0x09, 0x13, 0x5f, 0x82, 0x16, 0x0b, }, -+ { 0xfb, 0xc0, 0x8c, 0x03, 0x21, 0xb3, 0xc4, 0xb5, 0x43, 0x32, 0x6c, 0xea, -+ 0x7f, 0xa8, 0x43, 0x91, 0xe8, 0x4e, 0x3f, 0xbf, 0x45, 0x58, 0x6a, 0xa3, }, -+ { 0x55, 0xf8, 0xf3, 0x00, 0x76, 0x09, 0xef, 0x69, 0x5d, 0xd2, 0x8a, 0xf2, -+ 0x65, 0xc3, 0xcb, 0x9b, 0x43, 0xfd, 0xb1, 0x7e, 0x7f, 0xa1, 0x94, 0xb0, -+ 0xd7, }, -+ { 0xaa, 0x13, 0xc1, 0x51, 0x40, 0x6d, 0x8d, 0x4c, 0x0a, 0x95, 0x64, 0x7b, -+ 0xd1, 0x96, 0xb6, 0x56, 0xb4, 0x5b, 0xcf, 0xd6, 0xd9, 0x15, 0x97, 0xdd, -+ 0xb6, 0xef, }, -+ { 0xaf, 0xb7, 0x36, 0xb0, 0x04, 0xdb, 0xd7, 0x9c, 0x9a, 0x44, 0xc4, 0xf6, -+ 0x1f, 0x12, 0x21, 0x2d, 0x59, 0x30, 0x54, 0xab, 0x27, 0x61, 0xa3, 0x57, -+ 0xef, 0xf8, 0x53, }, -+ { 0x97, 0x34, 0x45, 0x3e, 0xce, 0x7c, 0x35, 0xa2, 0xda, 0x9f, 0x4b, 0x46, -+ 0x6c, 0x11, 0x67, 0xff, 0x2f, 0x76, 0x58, 0x15, 0x71, 0xfa, 0x44, 0x89, -+ 0x89, 0xfd, 0xf7, 0x99, }, -+ { 0x1f, 0xb1, 0x62, 0xeb, 0x83, 0xc5, 0x9c, 0x89, 0xf9, 0x2c, 0xd2, 0x03, -+ 0x61, 0xbc, 0xbb, 0xa5, 0x74, 0x0e, 0x9b, 0x7e, 0x82, 0x3e, 0x70, 0x0a, -+ 0xa9, 0x8f, 0x2b, 0x59, 0xfb, }, -+ { 0xf8, 0xca, 0x5e, 0x3a, 0x4f, 0x9e, 0x10, 0x69, 0x10, 0xd5, 0x4c, 0xeb, -+ 0x1a, 0x0f, 0x3c, 0x6a, 0x98, 0xf5, 0xb0, 0x97, 0x5b, 0x37, 0x2f, 0x0d, -+ 0xbd, 0x42, 0x4b, 0x69, 0xa1, 0x82, }, -+ { 0x12, 0x8c, 0x6d, 0x52, 0x08, 0xef, 0x74, 0xb2, 0xe6, 0xaa, 0xd3, 0xb0, -+ 0x26, 0xb0, 0xd9, 0x94, 0xb6, 0x11, 0x45, 0x0e, 0x36, 0x71, 0x14, 0x2d, -+ 0x41, 0x8c, 0x21, 0x53, 0x31, 0xe9, 0x68, }, -+ { 0xee, 0xea, 0x0d, 0x89, 0x47, 0x7e, 0x72, 0xd1, 0xd8, 0xce, 0x58, 0x4c, -+ 0x94, 0x1f, 0x0d, 0x51, 0x08, 0xa3, 0xb6, 0x3d, 0xe7, 0x82, 0x46, 0x92, -+ 0xd6, 0x98, 0x6b, 0x07, 0x10, 0x65, 0x52, 0x65, }, -+}; -+ -+static const u8 blake2s_hmac_testvecs[][BLAKE2S_HASH_SIZE] __initconst = { -+ { 0xce, 0xe1, 0x57, 0x69, 0x82, 0xdc, 0xbf, 0x43, 0xad, 0x56, 0x4c, 0x70, -+ 0xed, 0x68, 0x16, 0x96, 0xcf, 0xa4, 0x73, 0xe8, 0xe8, 0xfc, 0x32, 0x79, -+ 0x08, 0x0a, 0x75, 0x82, 0xda, 0x3f, 0x05, 0x11, }, -+ { 0x77, 0x2f, 0x0c, 0x71, 0x41, 0xf4, 0x4b, 0x2b, 0xb3, 0xc6, 0xb6, 0xf9, -+ 0x60, 0xde, 0xe4, 0x52, 0x38, 0x66, 0xe8, 0xbf, 0x9b, 0x96, 0xc4, 0x9f, -+ 0x60, 0xd9, 0x24, 0x37, 0x99, 0xd6, 0xec, 0x31, }, -+}; -+ -+bool __init blake2s_selftest(void) -+{ -+ u8 key[BLAKE2S_KEY_SIZE]; -+ u8 buf[ARRAY_SIZE(blake2s_testvecs)]; -+ u8 hash[BLAKE2S_HASH_SIZE]; -+ struct blake2s_state state; -+ bool success = true; -+ int i, l; -+ -+ key[0] = key[1] = 1; -+ for (i = 2; i < sizeof(key); ++i) -+ key[i] = key[i - 2] + key[i - 1]; -+ -+ for (i = 0; i < sizeof(buf); ++i) -+ buf[i] = (u8)i; -+ -+ for (i = l = 0; i < ARRAY_SIZE(blake2s_testvecs); l = (l + 37) % ++i) { -+ int outlen = 1 + i % BLAKE2S_HASH_SIZE; -+ int keylen = (13 * i) % (BLAKE2S_KEY_SIZE + 1); -+ -+ blake2s(hash, buf, key + BLAKE2S_KEY_SIZE - keylen, outlen, i, -+ keylen); -+ if (memcmp(hash, blake2s_testvecs[i], outlen)) { -+ pr_err("blake2s self-test %d: FAIL\n", i + 1); -+ success = false; -+ } -+ -+ if (!keylen) -+ blake2s_init(&state, outlen); -+ else -+ blake2s_init_key(&state, outlen, -+ key + BLAKE2S_KEY_SIZE - keylen, -+ keylen); -+ -+ blake2s_update(&state, buf, l); -+ blake2s_update(&state, buf + l, i - l); -+ blake2s_final(&state, hash); -+ if (memcmp(hash, blake2s_testvecs[i], outlen)) { -+ pr_err("blake2s init/update/final self-test %d: FAIL\n", -+ i + 1); -+ success = false; -+ } -+ } -+ -+ if (success) { -+ blake2s256_hmac(hash, buf, key, sizeof(buf), sizeof(key)); -+ success &= !memcmp(hash, blake2s_hmac_testvecs[0], BLAKE2S_HASH_SIZE); -+ -+ blake2s256_hmac(hash, key, buf, sizeof(key), sizeof(buf)); -+ success &= !memcmp(hash, blake2s_hmac_testvecs[1], BLAKE2S_HASH_SIZE); -+ -+ if (!success) -+ pr_err("blake2s256_hmac self-test: FAIL\n"); -+ } -+ -+ return success; -+} ---- /dev/null -+++ b/lib/crypto/blake2s.c -@@ -0,0 +1,126 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This is an implementation of the BLAKE2s hash and PRF functions. -+ * -+ * Information: https://blake2.net/ -+ * -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+bool blake2s_selftest(void); -+ -+void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen) -+{ -+ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; -+ -+ if (unlikely(!inlen)) -+ return; -+ if (inlen > fill) { -+ memcpy(state->buf + state->buflen, in, fill); -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)) -+ blake2s_compress_arch(state, state->buf, 1, -+ BLAKE2S_BLOCK_SIZE); -+ else -+ blake2s_compress_generic(state, state->buf, 1, -+ BLAKE2S_BLOCK_SIZE); -+ state->buflen = 0; -+ in += fill; -+ inlen -= fill; -+ } -+ if (inlen > BLAKE2S_BLOCK_SIZE) { -+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); -+ /* Hash one less (full) block than strictly possible */ -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)) -+ blake2s_compress_arch(state, in, nblocks - 1, -+ BLAKE2S_BLOCK_SIZE); -+ else -+ blake2s_compress_generic(state, in, nblocks - 1, -+ BLAKE2S_BLOCK_SIZE); -+ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); -+ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); -+ } -+ memcpy(state->buf + state->buflen, in, inlen); -+ state->buflen += inlen; -+} -+EXPORT_SYMBOL(blake2s_update); -+ -+void blake2s_final(struct blake2s_state *state, u8 *out) -+{ -+ WARN_ON(IS_ENABLED(DEBUG) && !out); -+ blake2s_set_lastblock(state); -+ memset(state->buf + state->buflen, 0, -+ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)) -+ blake2s_compress_arch(state, state->buf, 1, state->buflen); -+ else -+ blake2s_compress_generic(state, state->buf, 1, state->buflen); -+ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); -+ memcpy(out, state->h, state->outlen); -+ memzero_explicit(state, sizeof(*state)); -+} -+EXPORT_SYMBOL(blake2s_final); -+ -+void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, -+ const size_t keylen) -+{ -+ struct blake2s_state state; -+ u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 }; -+ u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32)); -+ int i; -+ -+ if (keylen > BLAKE2S_BLOCK_SIZE) { -+ blake2s_init(&state, BLAKE2S_HASH_SIZE); -+ blake2s_update(&state, key, keylen); -+ blake2s_final(&state, x_key); -+ } else -+ memcpy(x_key, key, keylen); -+ -+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) -+ x_key[i] ^= 0x36; -+ -+ blake2s_init(&state, BLAKE2S_HASH_SIZE); -+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); -+ blake2s_update(&state, in, inlen); -+ blake2s_final(&state, i_hash); -+ -+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i) -+ x_key[i] ^= 0x5c ^ 0x36; -+ -+ blake2s_init(&state, BLAKE2S_HASH_SIZE); -+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE); -+ blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE); -+ blake2s_final(&state, i_hash); -+ -+ memcpy(out, i_hash, BLAKE2S_HASH_SIZE); -+ memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE); -+ memzero_explicit(i_hash, BLAKE2S_HASH_SIZE); -+} -+EXPORT_SYMBOL(blake2s256_hmac); -+ -+static int __init mod_init(void) -+{ -+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && -+ WARN_ON(!blake2s_selftest())) -+ return -ENODEV; -+ return 0; -+} -+ -+static void __exit mod_exit(void) -+{ -+} -+ -+module_init(mod_init); -+module_exit(mod_exit); -+MODULE_LICENSE("GPL v2"); -+MODULE_DESCRIPTION("BLAKE2s hash function"); -+MODULE_AUTHOR("Jason A. Donenfeld "); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0022-crypto-testmgr-add-test-cases-for-Blake2s.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0022-crypto-testmgr-add-test-cases-for-Blake2s.patch deleted file mode 100644 index 9adc75eb9..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0022-crypto-testmgr-add-test-cases-for-Blake2s.patch +++ /dev/null @@ -1,322 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:29 +0100 -Subject: [PATCH] crypto: testmgr - add test cases for Blake2s - -commit 17e1df67023a5c9ccaeb5de8bf5b88f63127ecf7 upstream. - -As suggested by Eric for the Blake2b implementation contributed by -David, introduce a set of test vectors for Blake2s covering different -digest and key sizes. - - blake2s-128 blake2s-160 blake2s-224 blake2s-256 - --------------------------------------------------- -len=0 | klen=0 klen=1 klen=16 klen=32 -len=1 | klen=16 klen=32 klen=0 klen=1 -len=7 | klen=32 klen=0 klen=1 klen=16 -len=15 | klen=1 klen=16 klen=32 klen=0 -len=64 | klen=0 klen=1 klen=16 klen=32 -len=247 | klen=16 klen=32 klen=0 klen=1 -len=256 | klen=32 klen=0 klen=1 klen=16 - -Cc: David Sterba -Cc: Eric Biggers -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - crypto/testmgr.c | 24 +++++ - crypto/testmgr.h | 251 +++++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 275 insertions(+) - ---- a/crypto/testmgr.c -+++ b/crypto/testmgr.c -@@ -4035,6 +4035,30 @@ static const struct alg_test_desc alg_te - .test = alg_test_null, - .fips_allowed = 1, - }, { -+ .alg = "blake2s-128", -+ .test = alg_test_hash, -+ .suite = { -+ .hash = __VECS(blakes2s_128_tv_template) -+ } -+ }, { -+ .alg = "blake2s-160", -+ .test = alg_test_hash, -+ .suite = { -+ .hash = __VECS(blakes2s_160_tv_template) -+ } -+ }, { -+ .alg = "blake2s-224", -+ .test = alg_test_hash, -+ .suite = { -+ .hash = __VECS(blakes2s_224_tv_template) -+ } -+ }, { -+ .alg = "blake2s-256", -+ .test = alg_test_hash, -+ .suite = { -+ .hash = __VECS(blakes2s_256_tv_template) -+ } -+ }, { - .alg = "cbc(aes)", - .test = alg_test_skcipher, - .fips_allowed = 1, ---- a/crypto/testmgr.h -+++ b/crypto/testmgr.h -@@ -31567,4 +31567,255 @@ static const struct aead_testvec essiv_h - }, - }; - -+static const char blake2_ordered_sequence[] = -+ "\x00\x01\x02\x03\x04\x05\x06\x07" -+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f" -+ "\x10\x11\x12\x13\x14\x15\x16\x17" -+ "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f" -+ "\x20\x21\x22\x23\x24\x25\x26\x27" -+ "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f" -+ "\x30\x31\x32\x33\x34\x35\x36\x37" -+ "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f" -+ "\x40\x41\x42\x43\x44\x45\x46\x47" -+ "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f" -+ "\x50\x51\x52\x53\x54\x55\x56\x57" -+ "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f" -+ "\x60\x61\x62\x63\x64\x65\x66\x67" -+ "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f" -+ "\x70\x71\x72\x73\x74\x75\x76\x77" -+ "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f" -+ "\x80\x81\x82\x83\x84\x85\x86\x87" -+ "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f" -+ "\x90\x91\x92\x93\x94\x95\x96\x97" -+ "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f" -+ "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7" -+ "\xa8\xa9\xaa\xab\xac\xad\xae\xaf" -+ "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7" -+ "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf" -+ "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7" -+ "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf" -+ "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7" -+ "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf" -+ "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7" -+ "\xe8\xe9\xea\xeb\xec\xed\xee\xef" -+ "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7" -+ "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"; -+ -+static const struct hash_testvec blakes2s_128_tv_template[] = {{ -+ .digest = (u8[]){ 0x64, 0x55, 0x0d, 0x6f, 0xfe, 0x2c, 0x0a, 0x01, -+ 0xa1, 0x4a, 0xba, 0x1e, 0xad, 0xe0, 0x20, 0x0c, }, -+}, { -+ .plaintext = blake2_ordered_sequence, -+ .psize = 64, -+ .digest = (u8[]){ 0xdc, 0x66, 0xca, 0x8f, 0x03, 0x86, 0x58, 0x01, -+ 0xb0, 0xff, 0xe0, 0x6e, 0xd8, 0xa1, 0xa9, 0x0e, }, -+}, { -+ .ksize = 16, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 1, -+ .digest = (u8[]){ 0x88, 0x1e, 0x42, 0xe7, 0xbb, 0x35, 0x80, 0x82, -+ 0x63, 0x7c, 0x0a, 0x0f, 0xd7, 0xec, 0x6c, 0x2f, }, -+}, { -+ .ksize = 32, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 7, -+ .digest = (u8[]){ 0xcf, 0x9e, 0x07, 0x2a, 0xd5, 0x22, 0xf2, 0xcd, -+ 0xa2, 0xd8, 0x25, 0x21, 0x80, 0x86, 0x73, 0x1c, }, -+}, { -+ .ksize = 1, -+ .key = "B", -+ .plaintext = blake2_ordered_sequence, -+ .psize = 15, -+ .digest = (u8[]){ 0xf6, 0x33, 0x5a, 0x2c, 0x22, 0xa0, 0x64, 0xb2, -+ 0xb6, 0x3f, 0xeb, 0xbc, 0xd1, 0xc3, 0xe5, 0xb2, }, -+}, { -+ .ksize = 16, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 247, -+ .digest = (u8[]){ 0x72, 0x66, 0x49, 0x60, 0xf9, 0x4a, 0xea, 0xbe, -+ 0x1f, 0xf4, 0x60, 0xce, 0xb7, 0x81, 0xcb, 0x09, }, -+}, { -+ .ksize = 32, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 256, -+ .digest = (u8[]){ 0xd5, 0xa4, 0x0e, 0xc3, 0x16, 0xc7, 0x51, 0xa6, -+ 0x3c, 0xd0, 0xd9, 0x11, 0x57, 0xfa, 0x1e, 0xbb, }, -+}}; -+ -+static const struct hash_testvec blakes2s_160_tv_template[] = {{ -+ .plaintext = blake2_ordered_sequence, -+ .psize = 7, -+ .digest = (u8[]){ 0xb4, 0xf2, 0x03, 0x49, 0x37, 0xed, 0xb1, 0x3e, -+ 0x5b, 0x2a, 0xca, 0x64, 0x82, 0x74, 0xf6, 0x62, -+ 0xe3, 0xf2, 0x84, 0xff, }, -+}, { -+ .plaintext = blake2_ordered_sequence, -+ .psize = 256, -+ .digest = (u8[]){ 0xaa, 0x56, 0x9b, 0xdc, 0x98, 0x17, 0x75, 0xf2, -+ 0xb3, 0x68, 0x83, 0xb7, 0x9b, 0x8d, 0x48, 0xb1, -+ 0x9b, 0x2d, 0x35, 0x05, }, -+}, { -+ .ksize = 1, -+ .key = "B", -+ .digest = (u8[]){ 0x50, 0x16, 0xe7, 0x0c, 0x01, 0xd0, 0xd3, 0xc3, -+ 0xf4, 0x3e, 0xb1, 0x6e, 0x97, 0xa9, 0x4e, 0xd1, -+ 0x79, 0x65, 0x32, 0x93, }, -+}, { -+ .ksize = 32, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 1, -+ .digest = (u8[]){ 0x1c, 0x2b, 0xcd, 0x9a, 0x68, 0xca, 0x8c, 0x71, -+ 0x90, 0x29, 0x6c, 0x54, 0xfa, 0x56, 0x4a, 0xef, -+ 0xa2, 0x3a, 0x56, 0x9c, }, -+}, { -+ .ksize = 16, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 15, -+ .digest = (u8[]){ 0x36, 0xc3, 0x5f, 0x9a, 0xdc, 0x7e, 0xbf, 0x19, -+ 0x68, 0xaa, 0xca, 0xd8, 0x81, 0xbf, 0x09, 0x34, -+ 0x83, 0x39, 0x0f, 0x30, }, -+}, { -+ .ksize = 1, -+ .key = "B", -+ .plaintext = blake2_ordered_sequence, -+ .psize = 64, -+ .digest = (u8[]){ 0x86, 0x80, 0x78, 0xa4, 0x14, 0xec, 0x03, 0xe5, -+ 0xb6, 0x9a, 0x52, 0x0e, 0x42, 0xee, 0x39, 0x9d, -+ 0xac, 0xa6, 0x81, 0x63, }, -+}, { -+ .ksize = 32, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 247, -+ .digest = (u8[]){ 0x2d, 0xd8, 0xd2, 0x53, 0x66, 0xfa, 0xa9, 0x01, -+ 0x1c, 0x9c, 0xaf, 0xa3, 0xe2, 0x9d, 0x9b, 0x10, -+ 0x0a, 0xf6, 0x73, 0xe8, }, -+}}; -+ -+static const struct hash_testvec blakes2s_224_tv_template[] = {{ -+ .plaintext = blake2_ordered_sequence, -+ .psize = 1, -+ .digest = (u8[]){ 0x61, 0xb9, 0x4e, 0xc9, 0x46, 0x22, 0xa3, 0x91, -+ 0xd2, 0xae, 0x42, 0xe6, 0x45, 0x6c, 0x90, 0x12, -+ 0xd5, 0x80, 0x07, 0x97, 0xb8, 0x86, 0x5a, 0xfc, -+ 0x48, 0x21, 0x97, 0xbb, }, -+}, { -+ .plaintext = blake2_ordered_sequence, -+ .psize = 247, -+ .digest = (u8[]){ 0x9e, 0xda, 0xc7, 0x20, 0x2c, 0xd8, 0x48, 0x2e, -+ 0x31, 0x94, 0xab, 0x46, 0x6d, 0x94, 0xd8, 0xb4, -+ 0x69, 0xcd, 0xae, 0x19, 0x6d, 0x9e, 0x41, 0xcc, -+ 0x2b, 0xa4, 0xd5, 0xf6, }, -+}, { -+ .ksize = 16, -+ .key = blake2_ordered_sequence, -+ .digest = (u8[]){ 0x32, 0xc0, 0xac, 0xf4, 0x3b, 0xd3, 0x07, 0x9f, -+ 0xbe, 0xfb, 0xfa, 0x4d, 0x6b, 0x4e, 0x56, 0xb3, -+ 0xaa, 0xd3, 0x27, 0xf6, 0x14, 0xbf, 0xb9, 0x32, -+ 0xa7, 0x19, 0xfc, 0xb8, }, -+}, { -+ .ksize = 1, -+ .key = "B", -+ .plaintext = blake2_ordered_sequence, -+ .psize = 7, -+ .digest = (u8[]){ 0x73, 0xad, 0x5e, 0x6d, 0xb9, 0x02, 0x8e, 0x76, -+ 0xf2, 0x66, 0x42, 0x4b, 0x4c, 0xfa, 0x1f, 0xe6, -+ 0x2e, 0x56, 0x40, 0xe5, 0xa2, 0xb0, 0x3c, 0xe8, -+ 0x7b, 0x45, 0xfe, 0x05, }, -+}, { -+ .ksize = 32, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 15, -+ .digest = (u8[]){ 0x16, 0x60, 0xfb, 0x92, 0x54, 0xb3, 0x6e, 0x36, -+ 0x81, 0xf4, 0x16, 0x41, 0xc3, 0x3d, 0xd3, 0x43, -+ 0x84, 0xed, 0x10, 0x6f, 0x65, 0x80, 0x7a, 0x3e, -+ 0x25, 0xab, 0xc5, 0x02, }, -+}, { -+ .ksize = 16, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 64, -+ .digest = (u8[]){ 0xca, 0xaa, 0x39, 0x67, 0x9c, 0xf7, 0x6b, 0xc7, -+ 0xb6, 0x82, 0xca, 0x0e, 0x65, 0x36, 0x5b, 0x7c, -+ 0x24, 0x00, 0xfa, 0x5f, 0xda, 0x06, 0x91, 0x93, -+ 0x6a, 0x31, 0x83, 0xb5, }, -+}, { -+ .ksize = 1, -+ .key = "B", -+ .plaintext = blake2_ordered_sequence, -+ .psize = 256, -+ .digest = (u8[]){ 0x90, 0x02, 0x26, 0xb5, 0x06, 0x9c, 0x36, 0x86, -+ 0x94, 0x91, 0x90, 0x1e, 0x7d, 0x2a, 0x71, 0xb2, -+ 0x48, 0xb5, 0xe8, 0x16, 0xfd, 0x64, 0x33, 0x45, -+ 0xb3, 0xd7, 0xec, 0xcc, }, -+}}; -+ -+static const struct hash_testvec blakes2s_256_tv_template[] = {{ -+ .plaintext = blake2_ordered_sequence, -+ .psize = 15, -+ .digest = (u8[]){ 0xd9, 0x7c, 0x82, 0x8d, 0x81, 0x82, 0xa7, 0x21, -+ 0x80, 0xa0, 0x6a, 0x78, 0x26, 0x83, 0x30, 0x67, -+ 0x3f, 0x7c, 0x4e, 0x06, 0x35, 0x94, 0x7c, 0x04, -+ 0xc0, 0x23, 0x23, 0xfd, 0x45, 0xc0, 0xa5, 0x2d, }, -+}, { -+ .ksize = 32, -+ .key = blake2_ordered_sequence, -+ .digest = (u8[]){ 0x48, 0xa8, 0x99, 0x7d, 0xa4, 0x07, 0x87, 0x6b, -+ 0x3d, 0x79, 0xc0, 0xd9, 0x23, 0x25, 0xad, 0x3b, -+ 0x89, 0xcb, 0xb7, 0x54, 0xd8, 0x6a, 0xb7, 0x1a, -+ 0xee, 0x04, 0x7a, 0xd3, 0x45, 0xfd, 0x2c, 0x49, }, -+}, { -+ .ksize = 1, -+ .key = "B", -+ .plaintext = blake2_ordered_sequence, -+ .psize = 1, -+ .digest = (u8[]){ 0x22, 0x27, 0xae, 0xaa, 0x6e, 0x81, 0x56, 0x03, -+ 0xa7, 0xe3, 0xa1, 0x18, 0xa5, 0x9a, 0x2c, 0x18, -+ 0xf4, 0x63, 0xbc, 0x16, 0x70, 0xf1, 0xe7, 0x4b, -+ 0x00, 0x6d, 0x66, 0x16, 0xae, 0x9e, 0x74, 0x4e, }, -+}, { -+ .ksize = 16, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 7, -+ .digest = (u8[]){ 0x58, 0x5d, 0xa8, 0x60, 0x1c, 0xa4, 0xd8, 0x03, -+ 0x86, 0x86, 0x84, 0x64, 0xd7, 0xa0, 0x8e, 0x15, -+ 0x2f, 0x05, 0xa2, 0x1b, 0xbc, 0xef, 0x7a, 0x34, -+ 0xb3, 0xc5, 0xbc, 0x4b, 0xf0, 0x32, 0xeb, 0x12, }, -+}, { -+ .ksize = 32, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 64, -+ .digest = (u8[]){ 0x89, 0x75, 0xb0, 0x57, 0x7f, 0xd3, 0x55, 0x66, -+ 0xd7, 0x50, 0xb3, 0x62, 0xb0, 0x89, 0x7a, 0x26, -+ 0xc3, 0x99, 0x13, 0x6d, 0xf0, 0x7b, 0xab, 0xab, -+ 0xbd, 0xe6, 0x20, 0x3f, 0xf2, 0x95, 0x4e, 0xd4, }, -+}, { -+ .ksize = 1, -+ .key = "B", -+ .plaintext = blake2_ordered_sequence, -+ .psize = 247, -+ .digest = (u8[]){ 0x2e, 0x74, 0x1c, 0x1d, 0x03, 0xf4, 0x9d, 0x84, -+ 0x6f, 0xfc, 0x86, 0x32, 0x92, 0x49, 0x7e, 0x66, -+ 0xd7, 0xc3, 0x10, 0x88, 0xfe, 0x28, 0xb3, 0xe0, -+ 0xbf, 0x50, 0x75, 0xad, 0x8e, 0xa4, 0xe6, 0xb2, }, -+}, { -+ .ksize = 16, -+ .key = blake2_ordered_sequence, -+ .plaintext = blake2_ordered_sequence, -+ .psize = 256, -+ .digest = (u8[]){ 0xb9, 0xd2, 0x81, 0x0e, 0x3a, 0xb1, 0x62, 0x9b, -+ 0xad, 0x44, 0x05, 0xf4, 0x92, 0x2e, 0x99, 0xc1, -+ 0x4a, 0x47, 0xbb, 0x5b, 0x6f, 0xb2, 0x96, 0xed, -+ 0xd5, 0x06, 0xb5, 0x3a, 0x7c, 0x7a, 0x65, 0x1d, }, -+}}; -+ - #endif /* _CRYPTO_TESTMGR_H */ diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0023-crypto-blake2s-implement-generic-shash-driver.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0023-crypto-blake2s-implement-generic-shash-driver.patch deleted file mode 100644 index e25edf5dd..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0023-crypto-blake2s-implement-generic-shash-driver.patch +++ /dev/null @@ -1,245 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:30 +0100 -Subject: [PATCH] crypto: blake2s - implement generic shash driver - -commit 7f9b0880925f1f9d7d59504ea0892d2ae9cfc233 upstream. - -Wire up our newly added Blake2s implementation via the shash API. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - crypto/Kconfig | 18 ++++ - crypto/Makefile | 1 + - crypto/blake2s_generic.c | 171 ++++++++++++++++++++++++++++++ - include/crypto/internal/blake2s.h | 5 + - 4 files changed, 195 insertions(+) - create mode 100644 crypto/blake2s_generic.c - ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -639,6 +639,24 @@ config CRYPTO_XXHASH - xxHash non-cryptographic hash algorithm. Extremely fast, working at - speeds close to RAM limits. - -+config CRYPTO_BLAKE2S -+ tristate "BLAKE2s digest algorithm" -+ select CRYPTO_LIB_BLAKE2S_GENERIC -+ select CRYPTO_HASH -+ help -+ Implementation of cryptographic hash function BLAKE2s -+ optimized for 8-32bit platforms and can produce digests of any size -+ between 1 to 32. The keyed hash is also implemented. -+ -+ This module provides the following algorithms: -+ -+ - blake2s-128 -+ - blake2s-160 -+ - blake2s-224 -+ - blake2s-256 -+ -+ See https://blake2.net for further information. -+ - config CRYPTO_CRCT10DIF - tristate "CRCT10DIF algorithm" - select CRYPTO_HASH ---- a/crypto/Makefile -+++ b/crypto/Makefile -@@ -74,6 +74,7 @@ obj-$(CONFIG_CRYPTO_STREEBOG) += streebo - obj-$(CONFIG_CRYPTO_WP512) += wp512.o - CFLAGS_wp512.o := $(call cc-option,-fno-schedule-insns) # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79149 - obj-$(CONFIG_CRYPTO_TGR192) += tgr192.o -+obj-$(CONFIG_CRYPTO_BLAKE2S) += blake2s_generic.o - obj-$(CONFIG_CRYPTO_GF128MUL) += gf128mul.o - obj-$(CONFIG_CRYPTO_ECB) += ecb.o - obj-$(CONFIG_CRYPTO_CBC) += cbc.o ---- /dev/null -+++ b/crypto/blake2s_generic.c -@@ -0,0 +1,171 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key, -+ unsigned int keylen) -+{ -+ struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); -+ -+ if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) { -+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); -+ return -EINVAL; -+ } -+ -+ memcpy(tctx->key, key, keylen); -+ tctx->keylen = keylen; -+ -+ return 0; -+} -+ -+static int crypto_blake2s_init(struct shash_desc *desc) -+{ -+ struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); -+ struct blake2s_state *state = shash_desc_ctx(desc); -+ const int outlen = crypto_shash_digestsize(desc->tfm); -+ -+ if (tctx->keylen) -+ blake2s_init_key(state, outlen, tctx->key, tctx->keylen); -+ else -+ blake2s_init(state, outlen); -+ -+ return 0; -+} -+ -+static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in, -+ unsigned int inlen) -+{ -+ struct blake2s_state *state = shash_desc_ctx(desc); -+ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; -+ -+ if (unlikely(!inlen)) -+ return 0; -+ if (inlen > fill) { -+ memcpy(state->buf + state->buflen, in, fill); -+ blake2s_compress_generic(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); -+ state->buflen = 0; -+ in += fill; -+ inlen -= fill; -+ } -+ if (inlen > BLAKE2S_BLOCK_SIZE) { -+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); -+ /* Hash one less (full) block than strictly possible */ -+ blake2s_compress_generic(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); -+ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); -+ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); -+ } -+ memcpy(state->buf + state->buflen, in, inlen); -+ state->buflen += inlen; -+ -+ return 0; -+} -+ -+static int crypto_blake2s_final(struct shash_desc *desc, u8 *out) -+{ -+ struct blake2s_state *state = shash_desc_ctx(desc); -+ -+ blake2s_set_lastblock(state); -+ memset(state->buf + state->buflen, 0, -+ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ -+ blake2s_compress_generic(state, state->buf, 1, state->buflen); -+ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); -+ memcpy(out, state->h, state->outlen); -+ memzero_explicit(state, sizeof(*state)); -+ -+ return 0; -+} -+ -+static struct shash_alg blake2s_algs[] = {{ -+ .base.cra_name = "blake2s-128", -+ .base.cra_driver_name = "blake2s-128-generic", -+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, -+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), -+ .base.cra_priority = 200, -+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+ -+ .digestsize = BLAKE2S_128_HASH_SIZE, -+ .setkey = crypto_blake2s_setkey, -+ .init = crypto_blake2s_init, -+ .update = crypto_blake2s_update, -+ .final = crypto_blake2s_final, -+ .descsize = sizeof(struct blake2s_state), -+}, { -+ .base.cra_name = "blake2s-160", -+ .base.cra_driver_name = "blake2s-160-generic", -+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, -+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), -+ .base.cra_priority = 200, -+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+ -+ .digestsize = BLAKE2S_160_HASH_SIZE, -+ .setkey = crypto_blake2s_setkey, -+ .init = crypto_blake2s_init, -+ .update = crypto_blake2s_update, -+ .final = crypto_blake2s_final, -+ .descsize = sizeof(struct blake2s_state), -+}, { -+ .base.cra_name = "blake2s-224", -+ .base.cra_driver_name = "blake2s-224-generic", -+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, -+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), -+ .base.cra_priority = 200, -+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+ -+ .digestsize = BLAKE2S_224_HASH_SIZE, -+ .setkey = crypto_blake2s_setkey, -+ .init = crypto_blake2s_init, -+ .update = crypto_blake2s_update, -+ .final = crypto_blake2s_final, -+ .descsize = sizeof(struct blake2s_state), -+}, { -+ .base.cra_name = "blake2s-256", -+ .base.cra_driver_name = "blake2s-256-generic", -+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, -+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), -+ .base.cra_priority = 200, -+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+ -+ .digestsize = BLAKE2S_256_HASH_SIZE, -+ .setkey = crypto_blake2s_setkey, -+ .init = crypto_blake2s_init, -+ .update = crypto_blake2s_update, -+ .final = crypto_blake2s_final, -+ .descsize = sizeof(struct blake2s_state), -+}}; -+ -+static int __init blake2s_mod_init(void) -+{ -+ return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -+} -+ -+static void __exit blake2s_mod_exit(void) -+{ -+ crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -+} -+ -+subsys_initcall(blake2s_mod_init); -+module_exit(blake2s_mod_exit); -+ -+MODULE_ALIAS_CRYPTO("blake2s-128"); -+MODULE_ALIAS_CRYPTO("blake2s-128-generic"); -+MODULE_ALIAS_CRYPTO("blake2s-160"); -+MODULE_ALIAS_CRYPTO("blake2s-160-generic"); -+MODULE_ALIAS_CRYPTO("blake2s-224"); -+MODULE_ALIAS_CRYPTO("blake2s-224-generic"); -+MODULE_ALIAS_CRYPTO("blake2s-256"); -+MODULE_ALIAS_CRYPTO("blake2s-256-generic"); -+MODULE_LICENSE("GPL v2"); ---- a/include/crypto/internal/blake2s.h -+++ b/include/crypto/internal/blake2s.h -@@ -5,6 +5,11 @@ - - #include - -+struct blake2s_tfm_ctx { -+ u8 key[BLAKE2S_KEY_SIZE]; -+ unsigned int keylen; -+}; -+ - void blake2s_compress_generic(struct blake2s_state *state,const u8 *block, - size_t nblocks, const u32 inc); - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0024-crypto-blake2s-x86_64-SIMD-implementation.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0024-crypto-blake2s-x86_64-SIMD-implementation.patch deleted file mode 100644 index 04405581d..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0024-crypto-blake2s-x86_64-SIMD-implementation.patch +++ /dev/null @@ -1,557 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 8 Nov 2019 13:22:31 +0100 -Subject: [PATCH] crypto: blake2s - x86_64 SIMD implementation - -commit ed0356eda153f6a95649e11feb7b07083caf9e20 upstream. - -These implementations from Samuel Neves support AVX and AVX-512VL. -Originally this used AVX-512F, but Skylake thermal throttling made -AVX-512VL more attractive and possible to do with negligable difference. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Samuel Neves -Co-developed-by: Samuel Neves -[ardb: move to arch/x86/crypto, wire into lib/crypto framework] -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/Makefile | 2 + - arch/x86/crypto/blake2s-core.S | 258 +++++++++++++++++++++++++++++++++ - arch/x86/crypto/blake2s-glue.c | 233 +++++++++++++++++++++++++++++ - crypto/Kconfig | 6 + - 4 files changed, 499 insertions(+) - create mode 100644 arch/x86/crypto/blake2s-core.S - create mode 100644 arch/x86/crypto/blake2s-glue.c - ---- a/arch/x86/crypto/Makefile -+++ b/arch/x86/crypto/Makefile -@@ -48,6 +48,7 @@ ifeq ($(avx_supported),yes) - obj-$(CONFIG_CRYPTO_CAST6_AVX_X86_64) += cast6-avx-x86_64.o - obj-$(CONFIG_CRYPTO_TWOFISH_AVX_X86_64) += twofish-avx-x86_64.o - obj-$(CONFIG_CRYPTO_SERPENT_AVX_X86_64) += serpent-avx-x86_64.o -+ obj-$(CONFIG_CRYPTO_BLAKE2S_X86) += blake2s-x86_64.o - endif - - # These modules require assembler to support AVX2. -@@ -70,6 +71,7 @@ serpent-sse2-x86_64-y := serpent-sse2-x8 - aegis128-aesni-y := aegis128-aesni-asm.o aegis128-aesni-glue.o - - nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o -+blake2s-x86_64-y := blake2s-core.o blake2s-glue.o - - ifeq ($(avx_supported),yes) - camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \ ---- /dev/null -+++ b/arch/x86/crypto/blake2s-core.S -@@ -0,0 +1,258 @@ -+/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * Copyright (C) 2017-2019 Samuel Neves . All Rights Reserved. -+ */ -+ -+#include -+ -+.section .rodata.cst32.BLAKE2S_IV, "aM", @progbits, 32 -+.align 32 -+IV: .octa 0xA54FF53A3C6EF372BB67AE856A09E667 -+ .octa 0x5BE0CD191F83D9AB9B05688C510E527F -+.section .rodata.cst16.ROT16, "aM", @progbits, 16 -+.align 16 -+ROT16: .octa 0x0D0C0F0E09080B0A0504070601000302 -+.section .rodata.cst16.ROR328, "aM", @progbits, 16 -+.align 16 -+ROR328: .octa 0x0C0F0E0D080B0A090407060500030201 -+.section .rodata.cst64.BLAKE2S_SIGMA, "aM", @progbits, 160 -+.align 64 -+SIGMA: -+.byte 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13 -+.byte 14, 4, 9, 13, 10, 8, 15, 6, 5, 1, 0, 11, 3, 12, 2, 7 -+.byte 11, 12, 5, 15, 8, 0, 2, 13, 9, 10, 3, 7, 4, 14, 6, 1 -+.byte 7, 3, 13, 11, 9, 1, 12, 14, 15, 2, 5, 4, 8, 6, 10, 0 -+.byte 9, 5, 2, 10, 0, 7, 4, 15, 3, 14, 11, 6, 13, 1, 12, 8 -+.byte 2, 6, 0, 8, 12, 10, 11, 3, 1, 4, 7, 15, 9, 13, 5, 14 -+.byte 12, 1, 14, 4, 5, 15, 13, 10, 8, 0, 6, 9, 11, 7, 3, 2 -+.byte 13, 7, 12, 3, 11, 14, 1, 9, 2, 5, 15, 8, 10, 0, 4, 6 -+.byte 6, 14, 11, 0, 15, 9, 3, 8, 10, 12, 13, 1, 5, 2, 7, 4 -+.byte 10, 8, 7, 1, 2, 4, 6, 5, 13, 15, 9, 3, 0, 11, 14, 12 -+#ifdef CONFIG_AS_AVX512 -+.section .rodata.cst64.BLAKE2S_SIGMA2, "aM", @progbits, 640 -+.align 64 -+SIGMA2: -+.long 0, 2, 4, 6, 1, 3, 5, 7, 14, 8, 10, 12, 15, 9, 11, 13 -+.long 8, 2, 13, 15, 10, 9, 12, 3, 6, 4, 0, 14, 5, 11, 1, 7 -+.long 11, 13, 8, 6, 5, 10, 14, 3, 2, 4, 12, 15, 1, 0, 7, 9 -+.long 11, 10, 7, 0, 8, 15, 1, 13, 3, 6, 2, 12, 4, 14, 9, 5 -+.long 4, 10, 9, 14, 15, 0, 11, 8, 1, 7, 3, 13, 2, 5, 6, 12 -+.long 2, 11, 4, 15, 14, 3, 10, 8, 13, 6, 5, 7, 0, 12, 1, 9 -+.long 4, 8, 15, 9, 14, 11, 13, 5, 3, 2, 1, 12, 6, 10, 7, 0 -+.long 6, 13, 0, 14, 12, 2, 1, 11, 15, 4, 5, 8, 7, 9, 3, 10 -+.long 15, 5, 4, 13, 10, 7, 3, 11, 12, 2, 0, 6, 9, 8, 1, 14 -+.long 8, 7, 14, 11, 13, 15, 0, 12, 10, 4, 5, 6, 3, 2, 1, 9 -+#endif /* CONFIG_AS_AVX512 */ -+ -+.text -+#ifdef CONFIG_AS_SSSE3 -+ENTRY(blake2s_compress_ssse3) -+ testq %rdx,%rdx -+ je .Lendofloop -+ movdqu (%rdi),%xmm0 -+ movdqu 0x10(%rdi),%xmm1 -+ movdqa ROT16(%rip),%xmm12 -+ movdqa ROR328(%rip),%xmm13 -+ movdqu 0x20(%rdi),%xmm14 -+ movq %rcx,%xmm15 -+ leaq SIGMA+0xa0(%rip),%r8 -+ jmp .Lbeginofloop -+ .align 32 -+.Lbeginofloop: -+ movdqa %xmm0,%xmm10 -+ movdqa %xmm1,%xmm11 -+ paddq %xmm15,%xmm14 -+ movdqa IV(%rip),%xmm2 -+ movdqa %xmm14,%xmm3 -+ pxor IV+0x10(%rip),%xmm3 -+ leaq SIGMA(%rip),%rcx -+.Lroundloop: -+ movzbl (%rcx),%eax -+ movd (%rsi,%rax,4),%xmm4 -+ movzbl 0x1(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm5 -+ movzbl 0x2(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm6 -+ movzbl 0x3(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm7 -+ punpckldq %xmm5,%xmm4 -+ punpckldq %xmm7,%xmm6 -+ punpcklqdq %xmm6,%xmm4 -+ paddd %xmm4,%xmm0 -+ paddd %xmm1,%xmm0 -+ pxor %xmm0,%xmm3 -+ pshufb %xmm12,%xmm3 -+ paddd %xmm3,%xmm2 -+ pxor %xmm2,%xmm1 -+ movdqa %xmm1,%xmm8 -+ psrld $0xc,%xmm1 -+ pslld $0x14,%xmm8 -+ por %xmm8,%xmm1 -+ movzbl 0x4(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm5 -+ movzbl 0x5(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm6 -+ movzbl 0x6(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm7 -+ movzbl 0x7(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm4 -+ punpckldq %xmm6,%xmm5 -+ punpckldq %xmm4,%xmm7 -+ punpcklqdq %xmm7,%xmm5 -+ paddd %xmm5,%xmm0 -+ paddd %xmm1,%xmm0 -+ pxor %xmm0,%xmm3 -+ pshufb %xmm13,%xmm3 -+ paddd %xmm3,%xmm2 -+ pxor %xmm2,%xmm1 -+ movdqa %xmm1,%xmm8 -+ psrld $0x7,%xmm1 -+ pslld $0x19,%xmm8 -+ por %xmm8,%xmm1 -+ pshufd $0x93,%xmm0,%xmm0 -+ pshufd $0x4e,%xmm3,%xmm3 -+ pshufd $0x39,%xmm2,%xmm2 -+ movzbl 0x8(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm6 -+ movzbl 0x9(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm7 -+ movzbl 0xa(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm4 -+ movzbl 0xb(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm5 -+ punpckldq %xmm7,%xmm6 -+ punpckldq %xmm5,%xmm4 -+ punpcklqdq %xmm4,%xmm6 -+ paddd %xmm6,%xmm0 -+ paddd %xmm1,%xmm0 -+ pxor %xmm0,%xmm3 -+ pshufb %xmm12,%xmm3 -+ paddd %xmm3,%xmm2 -+ pxor %xmm2,%xmm1 -+ movdqa %xmm1,%xmm8 -+ psrld $0xc,%xmm1 -+ pslld $0x14,%xmm8 -+ por %xmm8,%xmm1 -+ movzbl 0xc(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm7 -+ movzbl 0xd(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm4 -+ movzbl 0xe(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm5 -+ movzbl 0xf(%rcx),%eax -+ movd (%rsi,%rax,4),%xmm6 -+ punpckldq %xmm4,%xmm7 -+ punpckldq %xmm6,%xmm5 -+ punpcklqdq %xmm5,%xmm7 -+ paddd %xmm7,%xmm0 -+ paddd %xmm1,%xmm0 -+ pxor %xmm0,%xmm3 -+ pshufb %xmm13,%xmm3 -+ paddd %xmm3,%xmm2 -+ pxor %xmm2,%xmm1 -+ movdqa %xmm1,%xmm8 -+ psrld $0x7,%xmm1 -+ pslld $0x19,%xmm8 -+ por %xmm8,%xmm1 -+ pshufd $0x39,%xmm0,%xmm0 -+ pshufd $0x4e,%xmm3,%xmm3 -+ pshufd $0x93,%xmm2,%xmm2 -+ addq $0x10,%rcx -+ cmpq %r8,%rcx -+ jnz .Lroundloop -+ pxor %xmm2,%xmm0 -+ pxor %xmm3,%xmm1 -+ pxor %xmm10,%xmm0 -+ pxor %xmm11,%xmm1 -+ addq $0x40,%rsi -+ decq %rdx -+ jnz .Lbeginofloop -+ movdqu %xmm0,(%rdi) -+ movdqu %xmm1,0x10(%rdi) -+ movdqu %xmm14,0x20(%rdi) -+.Lendofloop: -+ ret -+ENDPROC(blake2s_compress_ssse3) -+#endif /* CONFIG_AS_SSSE3 */ -+ -+#ifdef CONFIG_AS_AVX512 -+ENTRY(blake2s_compress_avx512) -+ vmovdqu (%rdi),%xmm0 -+ vmovdqu 0x10(%rdi),%xmm1 -+ vmovdqu 0x20(%rdi),%xmm4 -+ vmovq %rcx,%xmm5 -+ vmovdqa IV(%rip),%xmm14 -+ vmovdqa IV+16(%rip),%xmm15 -+ jmp .Lblake2s_compress_avx512_mainloop -+.align 32 -+.Lblake2s_compress_avx512_mainloop: -+ vmovdqa %xmm0,%xmm10 -+ vmovdqa %xmm1,%xmm11 -+ vpaddq %xmm5,%xmm4,%xmm4 -+ vmovdqa %xmm14,%xmm2 -+ vpxor %xmm15,%xmm4,%xmm3 -+ vmovdqu (%rsi),%ymm6 -+ vmovdqu 0x20(%rsi),%ymm7 -+ addq $0x40,%rsi -+ leaq SIGMA2(%rip),%rax -+ movb $0xa,%cl -+.Lblake2s_compress_avx512_roundloop: -+ addq $0x40,%rax -+ vmovdqa -0x40(%rax),%ymm8 -+ vmovdqa -0x20(%rax),%ymm9 -+ vpermi2d %ymm7,%ymm6,%ymm8 -+ vpermi2d %ymm7,%ymm6,%ymm9 -+ vmovdqa %ymm8,%ymm6 -+ vmovdqa %ymm9,%ymm7 -+ vpaddd %xmm8,%xmm0,%xmm0 -+ vpaddd %xmm1,%xmm0,%xmm0 -+ vpxor %xmm0,%xmm3,%xmm3 -+ vprord $0x10,%xmm3,%xmm3 -+ vpaddd %xmm3,%xmm2,%xmm2 -+ vpxor %xmm2,%xmm1,%xmm1 -+ vprord $0xc,%xmm1,%xmm1 -+ vextracti128 $0x1,%ymm8,%xmm8 -+ vpaddd %xmm8,%xmm0,%xmm0 -+ vpaddd %xmm1,%xmm0,%xmm0 -+ vpxor %xmm0,%xmm3,%xmm3 -+ vprord $0x8,%xmm3,%xmm3 -+ vpaddd %xmm3,%xmm2,%xmm2 -+ vpxor %xmm2,%xmm1,%xmm1 -+ vprord $0x7,%xmm1,%xmm1 -+ vpshufd $0x93,%xmm0,%xmm0 -+ vpshufd $0x4e,%xmm3,%xmm3 -+ vpshufd $0x39,%xmm2,%xmm2 -+ vpaddd %xmm9,%xmm0,%xmm0 -+ vpaddd %xmm1,%xmm0,%xmm0 -+ vpxor %xmm0,%xmm3,%xmm3 -+ vprord $0x10,%xmm3,%xmm3 -+ vpaddd %xmm3,%xmm2,%xmm2 -+ vpxor %xmm2,%xmm1,%xmm1 -+ vprord $0xc,%xmm1,%xmm1 -+ vextracti128 $0x1,%ymm9,%xmm9 -+ vpaddd %xmm9,%xmm0,%xmm0 -+ vpaddd %xmm1,%xmm0,%xmm0 -+ vpxor %xmm0,%xmm3,%xmm3 -+ vprord $0x8,%xmm3,%xmm3 -+ vpaddd %xmm3,%xmm2,%xmm2 -+ vpxor %xmm2,%xmm1,%xmm1 -+ vprord $0x7,%xmm1,%xmm1 -+ vpshufd $0x39,%xmm0,%xmm0 -+ vpshufd $0x4e,%xmm3,%xmm3 -+ vpshufd $0x93,%xmm2,%xmm2 -+ decb %cl -+ jne .Lblake2s_compress_avx512_roundloop -+ vpxor %xmm10,%xmm0,%xmm0 -+ vpxor %xmm11,%xmm1,%xmm1 -+ vpxor %xmm2,%xmm0,%xmm0 -+ vpxor %xmm3,%xmm1,%xmm1 -+ decq %rdx -+ jne .Lblake2s_compress_avx512_mainloop -+ vmovdqu %xmm0,(%rdi) -+ vmovdqu %xmm1,0x10(%rdi) -+ vmovdqu %xmm4,0x20(%rdi) -+ vzeroupper -+ retq -+ENDPROC(blake2s_compress_avx512) -+#endif /* CONFIG_AS_AVX512 */ ---- /dev/null -+++ b/arch/x86/crypto/blake2s-glue.c -@@ -0,0 +1,233 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+asmlinkage void blake2s_compress_ssse3(struct blake2s_state *state, -+ const u8 *block, const size_t nblocks, -+ const u32 inc); -+asmlinkage void blake2s_compress_avx512(struct blake2s_state *state, -+ const u8 *block, const size_t nblocks, -+ const u32 inc); -+ -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_ssse3); -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(blake2s_use_avx512); -+ -+void blake2s_compress_arch(struct blake2s_state *state, -+ const u8 *block, size_t nblocks, -+ const u32 inc) -+{ -+ /* SIMD disables preemption, so relax after processing each page. */ -+ BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8); -+ -+ if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) { -+ blake2s_compress_generic(state, block, nblocks, inc); -+ return; -+ } -+ -+ for (;;) { -+ const size_t blocks = min_t(size_t, nblocks, -+ PAGE_SIZE / BLAKE2S_BLOCK_SIZE); -+ -+ kernel_fpu_begin(); -+ if (IS_ENABLED(CONFIG_AS_AVX512) && -+ static_branch_likely(&blake2s_use_avx512)) -+ blake2s_compress_avx512(state, block, blocks, inc); -+ else -+ blake2s_compress_ssse3(state, block, blocks, inc); -+ kernel_fpu_end(); -+ -+ nblocks -= blocks; -+ if (!nblocks) -+ break; -+ block += blocks * BLAKE2S_BLOCK_SIZE; -+ } -+} -+EXPORT_SYMBOL(blake2s_compress_arch); -+ -+static int crypto_blake2s_setkey(struct crypto_shash *tfm, const u8 *key, -+ unsigned int keylen) -+{ -+ struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(tfm); -+ -+ if (keylen == 0 || keylen > BLAKE2S_KEY_SIZE) { -+ crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); -+ return -EINVAL; -+ } -+ -+ memcpy(tctx->key, key, keylen); -+ tctx->keylen = keylen; -+ -+ return 0; -+} -+ -+static int crypto_blake2s_init(struct shash_desc *desc) -+{ -+ struct blake2s_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm); -+ struct blake2s_state *state = shash_desc_ctx(desc); -+ const int outlen = crypto_shash_digestsize(desc->tfm); -+ -+ if (tctx->keylen) -+ blake2s_init_key(state, outlen, tctx->key, tctx->keylen); -+ else -+ blake2s_init(state, outlen); -+ -+ return 0; -+} -+ -+static int crypto_blake2s_update(struct shash_desc *desc, const u8 *in, -+ unsigned int inlen) -+{ -+ struct blake2s_state *state = shash_desc_ctx(desc); -+ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen; -+ -+ if (unlikely(!inlen)) -+ return 0; -+ if (inlen > fill) { -+ memcpy(state->buf + state->buflen, in, fill); -+ blake2s_compress_arch(state, state->buf, 1, BLAKE2S_BLOCK_SIZE); -+ state->buflen = 0; -+ in += fill; -+ inlen -= fill; -+ } -+ if (inlen > BLAKE2S_BLOCK_SIZE) { -+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE); -+ /* Hash one less (full) block than strictly possible */ -+ blake2s_compress_arch(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE); -+ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1); -+ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1); -+ } -+ memcpy(state->buf + state->buflen, in, inlen); -+ state->buflen += inlen; -+ -+ return 0; -+} -+ -+static int crypto_blake2s_final(struct shash_desc *desc, u8 *out) -+{ -+ struct blake2s_state *state = shash_desc_ctx(desc); -+ -+ blake2s_set_lastblock(state); -+ memset(state->buf + state->buflen, 0, -+ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */ -+ blake2s_compress_arch(state, state->buf, 1, state->buflen); -+ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h)); -+ memcpy(out, state->h, state->outlen); -+ memzero_explicit(state, sizeof(*state)); -+ -+ return 0; -+} -+ -+static struct shash_alg blake2s_algs[] = {{ -+ .base.cra_name = "blake2s-128", -+ .base.cra_driver_name = "blake2s-128-x86", -+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, -+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), -+ .base.cra_priority = 200, -+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+ -+ .digestsize = BLAKE2S_128_HASH_SIZE, -+ .setkey = crypto_blake2s_setkey, -+ .init = crypto_blake2s_init, -+ .update = crypto_blake2s_update, -+ .final = crypto_blake2s_final, -+ .descsize = sizeof(struct blake2s_state), -+}, { -+ .base.cra_name = "blake2s-160", -+ .base.cra_driver_name = "blake2s-160-x86", -+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, -+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), -+ .base.cra_priority = 200, -+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+ -+ .digestsize = BLAKE2S_160_HASH_SIZE, -+ .setkey = crypto_blake2s_setkey, -+ .init = crypto_blake2s_init, -+ .update = crypto_blake2s_update, -+ .final = crypto_blake2s_final, -+ .descsize = sizeof(struct blake2s_state), -+}, { -+ .base.cra_name = "blake2s-224", -+ .base.cra_driver_name = "blake2s-224-x86", -+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, -+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), -+ .base.cra_priority = 200, -+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+ -+ .digestsize = BLAKE2S_224_HASH_SIZE, -+ .setkey = crypto_blake2s_setkey, -+ .init = crypto_blake2s_init, -+ .update = crypto_blake2s_update, -+ .final = crypto_blake2s_final, -+ .descsize = sizeof(struct blake2s_state), -+}, { -+ .base.cra_name = "blake2s-256", -+ .base.cra_driver_name = "blake2s-256-x86", -+ .base.cra_flags = CRYPTO_ALG_OPTIONAL_KEY, -+ .base.cra_ctxsize = sizeof(struct blake2s_tfm_ctx), -+ .base.cra_priority = 200, -+ .base.cra_blocksize = BLAKE2S_BLOCK_SIZE, -+ .base.cra_module = THIS_MODULE, -+ -+ .digestsize = BLAKE2S_256_HASH_SIZE, -+ .setkey = crypto_blake2s_setkey, -+ .init = crypto_blake2s_init, -+ .update = crypto_blake2s_update, -+ .final = crypto_blake2s_final, -+ .descsize = sizeof(struct blake2s_state), -+}}; -+ -+static int __init blake2s_mod_init(void) -+{ -+ if (!boot_cpu_has(X86_FEATURE_SSSE3)) -+ return 0; -+ -+ static_branch_enable(&blake2s_use_ssse3); -+ -+ if (IS_ENABLED(CONFIG_AS_AVX512) && -+ boot_cpu_has(X86_FEATURE_AVX) && -+ boot_cpu_has(X86_FEATURE_AVX2) && -+ boot_cpu_has(X86_FEATURE_AVX512F) && -+ boot_cpu_has(X86_FEATURE_AVX512VL) && -+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | -+ XFEATURE_MASK_AVX512, NULL)) -+ static_branch_enable(&blake2s_use_avx512); -+ -+ return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -+} -+ -+static void __exit blake2s_mod_exit(void) -+{ -+ if (boot_cpu_has(X86_FEATURE_SSSE3)) -+ crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -+} -+ -+module_init(blake2s_mod_init); -+module_exit(blake2s_mod_exit); -+ -+MODULE_ALIAS_CRYPTO("blake2s-128"); -+MODULE_ALIAS_CRYPTO("blake2s-128-x86"); -+MODULE_ALIAS_CRYPTO("blake2s-160"); -+MODULE_ALIAS_CRYPTO("blake2s-160-x86"); -+MODULE_ALIAS_CRYPTO("blake2s-224"); -+MODULE_ALIAS_CRYPTO("blake2s-224-x86"); -+MODULE_ALIAS_CRYPTO("blake2s-256"); -+MODULE_ALIAS_CRYPTO("blake2s-256-x86"); -+MODULE_LICENSE("GPL v2"); ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -657,6 +657,12 @@ config CRYPTO_BLAKE2S - - See https://blake2.net for further information. - -+config CRYPTO_BLAKE2S_X86 -+ tristate "BLAKE2s digest algorithm (x86 accelerated version)" -+ depends on X86 && 64BIT -+ select CRYPTO_LIB_BLAKE2S_GENERIC -+ select CRYPTO_ARCH_HAVE_LIB_BLAKE2S -+ - config CRYPTO_CRCT10DIF - tristate "CRCT10DIF algorithm" - select CRYPTO_HASH diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0025-crypto-curve25519-generic-C-library-implementations.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0025-crypto-curve25519-generic-C-library-implementations.patch deleted file mode 100644 index e58dda921..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0025-crypto-curve25519-generic-C-library-implementations.patch +++ /dev/null @@ -1,1849 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 8 Nov 2019 13:22:32 +0100 -Subject: [PATCH] crypto: curve25519 - generic C library implementations - -commit 0ed42a6f431e930b2e8fae21955406e09fe75d70 upstream. - -This contains two formally verified C implementations of the Curve25519 -scalar multiplication function, one for 32-bit systems, and one for -64-bit systems whose compiler supports efficient 128-bit integer types. -Not only are these implementations formally verified, but they are also -the fastest available C implementations. They have been modified to be -friendly to kernel space and to be generally less horrendous looking, -but still an effort has been made to retain their formally verified -characteristic, and so the C might look slightly unidiomatic. - -The 64-bit version comes from HACL*: https://github.com/project-everest/hacl-star -The 32-bit version comes from Fiat: https://github.com/mit-plv/fiat-crypto - -Information: https://cr.yp.to/ecdh.html - -Signed-off-by: Jason A. Donenfeld -[ardb: - move from lib/zinc to lib/crypto - - replace .c #includes with Kconfig based object selection - - drop simd handling and simplify support for per-arch versions ] -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - include/crypto/curve25519.h | 71 +++ - lib/crypto/Kconfig | 25 + - lib/crypto/Makefile | 5 + - lib/crypto/curve25519-fiat32.c | 864 +++++++++++++++++++++++++++++++++ - lib/crypto/curve25519-hacl64.c | 788 ++++++++++++++++++++++++++++++ - lib/crypto/curve25519.c | 25 + - 6 files changed, 1778 insertions(+) - create mode 100644 include/crypto/curve25519.h - create mode 100644 lib/crypto/curve25519-fiat32.c - create mode 100644 lib/crypto/curve25519-hacl64.c - create mode 100644 lib/crypto/curve25519.c - ---- /dev/null -+++ b/include/crypto/curve25519.h -@@ -0,0 +1,71 @@ -+/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef CURVE25519_H -+#define CURVE25519_H -+ -+#include // For crypto_memneq. -+#include -+#include -+ -+enum curve25519_lengths { -+ CURVE25519_KEY_SIZE = 32 -+}; -+ -+extern const u8 curve25519_null_point[]; -+extern const u8 curve25519_base_point[]; -+ -+void curve25519_generic(u8 out[CURVE25519_KEY_SIZE], -+ const u8 scalar[CURVE25519_KEY_SIZE], -+ const u8 point[CURVE25519_KEY_SIZE]); -+ -+void curve25519_arch(u8 out[CURVE25519_KEY_SIZE], -+ const u8 scalar[CURVE25519_KEY_SIZE], -+ const u8 point[CURVE25519_KEY_SIZE]); -+ -+void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], -+ const u8 secret[CURVE25519_KEY_SIZE]); -+ -+static inline -+bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE], -+ const u8 secret[CURVE25519_KEY_SIZE], -+ const u8 basepoint[CURVE25519_KEY_SIZE]) -+{ -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519)) -+ curve25519_arch(mypublic, secret, basepoint); -+ else -+ curve25519_generic(mypublic, secret, basepoint); -+ return crypto_memneq(mypublic, curve25519_null_point, -+ CURVE25519_KEY_SIZE); -+} -+ -+static inline bool -+__must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE], -+ const u8 secret[CURVE25519_KEY_SIZE]) -+{ -+ if (unlikely(!crypto_memneq(secret, curve25519_null_point, -+ CURVE25519_KEY_SIZE))) -+ return false; -+ -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519)) -+ curve25519_base_arch(pub, secret); -+ else -+ curve25519_generic(pub, secret, curve25519_base_point); -+ return crypto_memneq(pub, curve25519_null_point, CURVE25519_KEY_SIZE); -+} -+ -+static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE]) -+{ -+ secret[0] &= 248; -+ secret[31] = (secret[31] & 127) | 64; -+} -+ -+static inline void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE]) -+{ -+ get_random_bytes_wait(secret, CURVE25519_KEY_SIZE); -+ curve25519_clamp_secret(secret); -+} -+ -+#endif /* CURVE25519_H */ ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -59,6 +59,31 @@ config CRYPTO_LIB_CHACHA - by either the generic implementation or an arch-specific one, if one - is available and enabled. - -+config CRYPTO_ARCH_HAVE_LIB_CURVE25519 -+ tristate -+ help -+ Declares whether the architecture provides an arch-specific -+ accelerated implementation of the Curve25519 library interface, -+ either builtin or as a module. -+ -+config CRYPTO_LIB_CURVE25519_GENERIC -+ tristate -+ help -+ This symbol can be depended upon by arch implementations of the -+ Curve25519 library interface that require the generic code as a -+ fallback, e.g., for SIMD implementations. If no arch specific -+ implementation is enabled, this implementation serves the users -+ of CRYPTO_LIB_CURVE25519. -+ -+config CRYPTO_LIB_CURVE25519 -+ tristate "Curve25519 scalar multiplication library" -+ depends on CRYPTO_ARCH_HAVE_LIB_CURVE25519 || !CRYPTO_ARCH_HAVE_LIB_CURVE25519 -+ select CRYPTO_LIB_CURVE25519_GENERIC if CRYPTO_ARCH_HAVE_LIB_CURVE25519=n -+ help -+ Enable the Curve25519 library interface. This interface may be -+ fulfilled by either the generic implementation or an arch-specific -+ one, if one is available and enabled. -+ - config CRYPTO_LIB_DES - tristate - ---- a/lib/crypto/Makefile -+++ b/lib/crypto/Makefile -@@ -16,6 +16,11 @@ libblake2s-generic-y += blake2s-gener - obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o - libblake2s-y += blake2s.o - -+obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519.o -+libcurve25519-y := curve25519-fiat32.o -+libcurve25519-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o -+libcurve25519-y += curve25519.o -+ - obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o - libdes-y := des.o - ---- /dev/null -+++ b/lib/crypto/curve25519-fiat32.c -@@ -0,0 +1,864 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2016 The fiat-crypto Authors. -+ * Copyright (C) 2018-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This is a machine-generated formally verified implementation of Curve25519 -+ * ECDH from: . Though originally -+ * machine generated, it has been tweaked to be suitable for use in the kernel. -+ * It is optimized for 32-bit machines and machines that cannot work efficiently -+ * with 128-bit integer types. -+ */ -+ -+#include -+#include -+#include -+ -+/* fe means field element. Here the field is \Z/(2^255-19). An element t, -+ * entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -+ * t[3]+2^102 t[4]+...+2^230 t[9]. -+ * fe limbs are bounded by 1.125*2^26,1.125*2^25,1.125*2^26,1.125*2^25,etc. -+ * Multiplication and carrying produce fe from fe_loose. -+ */ -+typedef struct fe { u32 v[10]; } fe; -+ -+/* fe_loose limbs are bounded by 3.375*2^26,3.375*2^25,3.375*2^26,3.375*2^25,etc -+ * Addition and subtraction produce fe_loose from (fe, fe). -+ */ -+typedef struct fe_loose { u32 v[10]; } fe_loose; -+ -+static __always_inline void fe_frombytes_impl(u32 h[10], const u8 *s) -+{ -+ /* Ignores top bit of s. */ -+ u32 a0 = get_unaligned_le32(s); -+ u32 a1 = get_unaligned_le32(s+4); -+ u32 a2 = get_unaligned_le32(s+8); -+ u32 a3 = get_unaligned_le32(s+12); -+ u32 a4 = get_unaligned_le32(s+16); -+ u32 a5 = get_unaligned_le32(s+20); -+ u32 a6 = get_unaligned_le32(s+24); -+ u32 a7 = get_unaligned_le32(s+28); -+ h[0] = a0&((1<<26)-1); /* 26 used, 32-26 left. 26 */ -+ h[1] = (a0>>26) | ((a1&((1<<19)-1))<< 6); /* (32-26) + 19 = 6+19 = 25 */ -+ h[2] = (a1>>19) | ((a2&((1<<13)-1))<<13); /* (32-19) + 13 = 13+13 = 26 */ -+ h[3] = (a2>>13) | ((a3&((1<< 6)-1))<<19); /* (32-13) + 6 = 19+ 6 = 25 */ -+ h[4] = (a3>> 6); /* (32- 6) = 26 */ -+ h[5] = a4&((1<<25)-1); /* 25 */ -+ h[6] = (a4>>25) | ((a5&((1<<19)-1))<< 7); /* (32-25) + 19 = 7+19 = 26 */ -+ h[7] = (a5>>19) | ((a6&((1<<12)-1))<<13); /* (32-19) + 12 = 13+12 = 25 */ -+ h[8] = (a6>>12) | ((a7&((1<< 6)-1))<<20); /* (32-12) + 6 = 20+ 6 = 26 */ -+ h[9] = (a7>> 6)&((1<<25)-1); /* 25 */ -+} -+ -+static __always_inline void fe_frombytes(fe *h, const u8 *s) -+{ -+ fe_frombytes_impl(h->v, s); -+} -+ -+static __always_inline u8 /*bool*/ -+addcarryx_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low) -+{ -+ /* This function extracts 25 bits of result and 1 bit of carry -+ * (26 total), so a 32-bit intermediate is sufficient. -+ */ -+ u32 x = a + b + c; -+ *low = x & ((1 << 25) - 1); -+ return (x >> 25) & 1; -+} -+ -+static __always_inline u8 /*bool*/ -+addcarryx_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low) -+{ -+ /* This function extracts 26 bits of result and 1 bit of carry -+ * (27 total), so a 32-bit intermediate is sufficient. -+ */ -+ u32 x = a + b + c; -+ *low = x & ((1 << 26) - 1); -+ return (x >> 26) & 1; -+} -+ -+static __always_inline u8 /*bool*/ -+subborrow_u25(u8 /*bool*/ c, u32 a, u32 b, u32 *low) -+{ -+ /* This function extracts 25 bits of result and 1 bit of borrow -+ * (26 total), so a 32-bit intermediate is sufficient. -+ */ -+ u32 x = a - b - c; -+ *low = x & ((1 << 25) - 1); -+ return x >> 31; -+} -+ -+static __always_inline u8 /*bool*/ -+subborrow_u26(u8 /*bool*/ c, u32 a, u32 b, u32 *low) -+{ -+ /* This function extracts 26 bits of result and 1 bit of borrow -+ *(27 total), so a 32-bit intermediate is sufficient. -+ */ -+ u32 x = a - b - c; -+ *low = x & ((1 << 26) - 1); -+ return x >> 31; -+} -+ -+static __always_inline u32 cmovznz32(u32 t, u32 z, u32 nz) -+{ -+ t = -!!t; /* all set if nonzero, 0 if 0 */ -+ return (t&nz) | ((~t)&z); -+} -+ -+static __always_inline void fe_freeze(u32 out[10], const u32 in1[10]) -+{ -+ { const u32 x17 = in1[9]; -+ { const u32 x18 = in1[8]; -+ { const u32 x16 = in1[7]; -+ { const u32 x14 = in1[6]; -+ { const u32 x12 = in1[5]; -+ { const u32 x10 = in1[4]; -+ { const u32 x8 = in1[3]; -+ { const u32 x6 = in1[2]; -+ { const u32 x4 = in1[1]; -+ { const u32 x2 = in1[0]; -+ { u32 x20; u8/*bool*/ x21 = subborrow_u26(0x0, x2, 0x3ffffed, &x20); -+ { u32 x23; u8/*bool*/ x24 = subborrow_u25(x21, x4, 0x1ffffff, &x23); -+ { u32 x26; u8/*bool*/ x27 = subborrow_u26(x24, x6, 0x3ffffff, &x26); -+ { u32 x29; u8/*bool*/ x30 = subborrow_u25(x27, x8, 0x1ffffff, &x29); -+ { u32 x32; u8/*bool*/ x33 = subborrow_u26(x30, x10, 0x3ffffff, &x32); -+ { u32 x35; u8/*bool*/ x36 = subborrow_u25(x33, x12, 0x1ffffff, &x35); -+ { u32 x38; u8/*bool*/ x39 = subborrow_u26(x36, x14, 0x3ffffff, &x38); -+ { u32 x41; u8/*bool*/ x42 = subborrow_u25(x39, x16, 0x1ffffff, &x41); -+ { u32 x44; u8/*bool*/ x45 = subborrow_u26(x42, x18, 0x3ffffff, &x44); -+ { u32 x47; u8/*bool*/ x48 = subborrow_u25(x45, x17, 0x1ffffff, &x47); -+ { u32 x49 = cmovznz32(x48, 0x0, 0xffffffff); -+ { u32 x50 = (x49 & 0x3ffffed); -+ { u32 x52; u8/*bool*/ x53 = addcarryx_u26(0x0, x20, x50, &x52); -+ { u32 x54 = (x49 & 0x1ffffff); -+ { u32 x56; u8/*bool*/ x57 = addcarryx_u25(x53, x23, x54, &x56); -+ { u32 x58 = (x49 & 0x3ffffff); -+ { u32 x60; u8/*bool*/ x61 = addcarryx_u26(x57, x26, x58, &x60); -+ { u32 x62 = (x49 & 0x1ffffff); -+ { u32 x64; u8/*bool*/ x65 = addcarryx_u25(x61, x29, x62, &x64); -+ { u32 x66 = (x49 & 0x3ffffff); -+ { u32 x68; u8/*bool*/ x69 = addcarryx_u26(x65, x32, x66, &x68); -+ { u32 x70 = (x49 & 0x1ffffff); -+ { u32 x72; u8/*bool*/ x73 = addcarryx_u25(x69, x35, x70, &x72); -+ { u32 x74 = (x49 & 0x3ffffff); -+ { u32 x76; u8/*bool*/ x77 = addcarryx_u26(x73, x38, x74, &x76); -+ { u32 x78 = (x49 & 0x1ffffff); -+ { u32 x80; u8/*bool*/ x81 = addcarryx_u25(x77, x41, x78, &x80); -+ { u32 x82 = (x49 & 0x3ffffff); -+ { u32 x84; u8/*bool*/ x85 = addcarryx_u26(x81, x44, x82, &x84); -+ { u32 x86 = (x49 & 0x1ffffff); -+ { u32 x88; addcarryx_u25(x85, x47, x86, &x88); -+ out[0] = x52; -+ out[1] = x56; -+ out[2] = x60; -+ out[3] = x64; -+ out[4] = x68; -+ out[5] = x72; -+ out[6] = x76; -+ out[7] = x80; -+ out[8] = x84; -+ out[9] = x88; -+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} -+} -+ -+static __always_inline void fe_tobytes(u8 s[32], const fe *f) -+{ -+ u32 h[10]; -+ fe_freeze(h, f->v); -+ s[0] = h[0] >> 0; -+ s[1] = h[0] >> 8; -+ s[2] = h[0] >> 16; -+ s[3] = (h[0] >> 24) | (h[1] << 2); -+ s[4] = h[1] >> 6; -+ s[5] = h[1] >> 14; -+ s[6] = (h[1] >> 22) | (h[2] << 3); -+ s[7] = h[2] >> 5; -+ s[8] = h[2] >> 13; -+ s[9] = (h[2] >> 21) | (h[3] << 5); -+ s[10] = h[3] >> 3; -+ s[11] = h[3] >> 11; -+ s[12] = (h[3] >> 19) | (h[4] << 6); -+ s[13] = h[4] >> 2; -+ s[14] = h[4] >> 10; -+ s[15] = h[4] >> 18; -+ s[16] = h[5] >> 0; -+ s[17] = h[5] >> 8; -+ s[18] = h[5] >> 16; -+ s[19] = (h[5] >> 24) | (h[6] << 1); -+ s[20] = h[6] >> 7; -+ s[21] = h[6] >> 15; -+ s[22] = (h[6] >> 23) | (h[7] << 3); -+ s[23] = h[7] >> 5; -+ s[24] = h[7] >> 13; -+ s[25] = (h[7] >> 21) | (h[8] << 4); -+ s[26] = h[8] >> 4; -+ s[27] = h[8] >> 12; -+ s[28] = (h[8] >> 20) | (h[9] << 6); -+ s[29] = h[9] >> 2; -+ s[30] = h[9] >> 10; -+ s[31] = h[9] >> 18; -+} -+ -+/* h = f */ -+static __always_inline void fe_copy(fe *h, const fe *f) -+{ -+ memmove(h, f, sizeof(u32) * 10); -+} -+ -+static __always_inline void fe_copy_lt(fe_loose *h, const fe *f) -+{ -+ memmove(h, f, sizeof(u32) * 10); -+} -+ -+/* h = 0 */ -+static __always_inline void fe_0(fe *h) -+{ -+ memset(h, 0, sizeof(u32) * 10); -+} -+ -+/* h = 1 */ -+static __always_inline void fe_1(fe *h) -+{ -+ memset(h, 0, sizeof(u32) * 10); -+ h->v[0] = 1; -+} -+ -+static void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) -+{ -+ { const u32 x20 = in1[9]; -+ { const u32 x21 = in1[8]; -+ { const u32 x19 = in1[7]; -+ { const u32 x17 = in1[6]; -+ { const u32 x15 = in1[5]; -+ { const u32 x13 = in1[4]; -+ { const u32 x11 = in1[3]; -+ { const u32 x9 = in1[2]; -+ { const u32 x7 = in1[1]; -+ { const u32 x5 = in1[0]; -+ { const u32 x38 = in2[9]; -+ { const u32 x39 = in2[8]; -+ { const u32 x37 = in2[7]; -+ { const u32 x35 = in2[6]; -+ { const u32 x33 = in2[5]; -+ { const u32 x31 = in2[4]; -+ { const u32 x29 = in2[3]; -+ { const u32 x27 = in2[2]; -+ { const u32 x25 = in2[1]; -+ { const u32 x23 = in2[0]; -+ out[0] = (x5 + x23); -+ out[1] = (x7 + x25); -+ out[2] = (x9 + x27); -+ out[3] = (x11 + x29); -+ out[4] = (x13 + x31); -+ out[5] = (x15 + x33); -+ out[6] = (x17 + x35); -+ out[7] = (x19 + x37); -+ out[8] = (x21 + x39); -+ out[9] = (x20 + x38); -+ }}}}}}}}}}}}}}}}}}}} -+} -+ -+/* h = f + g -+ * Can overlap h with f or g. -+ */ -+static __always_inline void fe_add(fe_loose *h, const fe *f, const fe *g) -+{ -+ fe_add_impl(h->v, f->v, g->v); -+} -+ -+static void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) -+{ -+ { const u32 x20 = in1[9]; -+ { const u32 x21 = in1[8]; -+ { const u32 x19 = in1[7]; -+ { const u32 x17 = in1[6]; -+ { const u32 x15 = in1[5]; -+ { const u32 x13 = in1[4]; -+ { const u32 x11 = in1[3]; -+ { const u32 x9 = in1[2]; -+ { const u32 x7 = in1[1]; -+ { const u32 x5 = in1[0]; -+ { const u32 x38 = in2[9]; -+ { const u32 x39 = in2[8]; -+ { const u32 x37 = in2[7]; -+ { const u32 x35 = in2[6]; -+ { const u32 x33 = in2[5]; -+ { const u32 x31 = in2[4]; -+ { const u32 x29 = in2[3]; -+ { const u32 x27 = in2[2]; -+ { const u32 x25 = in2[1]; -+ { const u32 x23 = in2[0]; -+ out[0] = ((0x7ffffda + x5) - x23); -+ out[1] = ((0x3fffffe + x7) - x25); -+ out[2] = ((0x7fffffe + x9) - x27); -+ out[3] = ((0x3fffffe + x11) - x29); -+ out[4] = ((0x7fffffe + x13) - x31); -+ out[5] = ((0x3fffffe + x15) - x33); -+ out[6] = ((0x7fffffe + x17) - x35); -+ out[7] = ((0x3fffffe + x19) - x37); -+ out[8] = ((0x7fffffe + x21) - x39); -+ out[9] = ((0x3fffffe + x20) - x38); -+ }}}}}}}}}}}}}}}}}}}} -+} -+ -+/* h = f - g -+ * Can overlap h with f or g. -+ */ -+static __always_inline void fe_sub(fe_loose *h, const fe *f, const fe *g) -+{ -+ fe_sub_impl(h->v, f->v, g->v); -+} -+ -+static void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) -+{ -+ { const u32 x20 = in1[9]; -+ { const u32 x21 = in1[8]; -+ { const u32 x19 = in1[7]; -+ { const u32 x17 = in1[6]; -+ { const u32 x15 = in1[5]; -+ { const u32 x13 = in1[4]; -+ { const u32 x11 = in1[3]; -+ { const u32 x9 = in1[2]; -+ { const u32 x7 = in1[1]; -+ { const u32 x5 = in1[0]; -+ { const u32 x38 = in2[9]; -+ { const u32 x39 = in2[8]; -+ { const u32 x37 = in2[7]; -+ { const u32 x35 = in2[6]; -+ { const u32 x33 = in2[5]; -+ { const u32 x31 = in2[4]; -+ { const u32 x29 = in2[3]; -+ { const u32 x27 = in2[2]; -+ { const u32 x25 = in2[1]; -+ { const u32 x23 = in2[0]; -+ { u64 x40 = ((u64)x23 * x5); -+ { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); -+ { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); -+ { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); -+ { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5)); -+ { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5)); -+ { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5)); -+ { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5)); -+ { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5)); -+ { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5)); -+ { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9)); -+ { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9)); -+ { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13)); -+ { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13)); -+ { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17)); -+ { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17)); -+ { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19)))); -+ { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21)); -+ { u64 x58 = ((u64)(0x2 * x38) * x20); -+ { u64 x59 = (x48 + (x58 << 0x4)); -+ { u64 x60 = (x59 + (x58 << 0x1)); -+ { u64 x61 = (x60 + x58); -+ { u64 x62 = (x47 + (x57 << 0x4)); -+ { u64 x63 = (x62 + (x57 << 0x1)); -+ { u64 x64 = (x63 + x57); -+ { u64 x65 = (x46 + (x56 << 0x4)); -+ { u64 x66 = (x65 + (x56 << 0x1)); -+ { u64 x67 = (x66 + x56); -+ { u64 x68 = (x45 + (x55 << 0x4)); -+ { u64 x69 = (x68 + (x55 << 0x1)); -+ { u64 x70 = (x69 + x55); -+ { u64 x71 = (x44 + (x54 << 0x4)); -+ { u64 x72 = (x71 + (x54 << 0x1)); -+ { u64 x73 = (x72 + x54); -+ { u64 x74 = (x43 + (x53 << 0x4)); -+ { u64 x75 = (x74 + (x53 << 0x1)); -+ { u64 x76 = (x75 + x53); -+ { u64 x77 = (x42 + (x52 << 0x4)); -+ { u64 x78 = (x77 + (x52 << 0x1)); -+ { u64 x79 = (x78 + x52); -+ { u64 x80 = (x41 + (x51 << 0x4)); -+ { u64 x81 = (x80 + (x51 << 0x1)); -+ { u64 x82 = (x81 + x51); -+ { u64 x83 = (x40 + (x50 << 0x4)); -+ { u64 x84 = (x83 + (x50 << 0x1)); -+ { u64 x85 = (x84 + x50); -+ { u64 x86 = (x85 >> 0x1a); -+ { u32 x87 = ((u32)x85 & 0x3ffffff); -+ { u64 x88 = (x86 + x82); -+ { u64 x89 = (x88 >> 0x19); -+ { u32 x90 = ((u32)x88 & 0x1ffffff); -+ { u64 x91 = (x89 + x79); -+ { u64 x92 = (x91 >> 0x1a); -+ { u32 x93 = ((u32)x91 & 0x3ffffff); -+ { u64 x94 = (x92 + x76); -+ { u64 x95 = (x94 >> 0x19); -+ { u32 x96 = ((u32)x94 & 0x1ffffff); -+ { u64 x97 = (x95 + x73); -+ { u64 x98 = (x97 >> 0x1a); -+ { u32 x99 = ((u32)x97 & 0x3ffffff); -+ { u64 x100 = (x98 + x70); -+ { u64 x101 = (x100 >> 0x19); -+ { u32 x102 = ((u32)x100 & 0x1ffffff); -+ { u64 x103 = (x101 + x67); -+ { u64 x104 = (x103 >> 0x1a); -+ { u32 x105 = ((u32)x103 & 0x3ffffff); -+ { u64 x106 = (x104 + x64); -+ { u64 x107 = (x106 >> 0x19); -+ { u32 x108 = ((u32)x106 & 0x1ffffff); -+ { u64 x109 = (x107 + x61); -+ { u64 x110 = (x109 >> 0x1a); -+ { u32 x111 = ((u32)x109 & 0x3ffffff); -+ { u64 x112 = (x110 + x49); -+ { u64 x113 = (x112 >> 0x19); -+ { u32 x114 = ((u32)x112 & 0x1ffffff); -+ { u64 x115 = (x87 + (0x13 * x113)); -+ { u32 x116 = (u32) (x115 >> 0x1a); -+ { u32 x117 = ((u32)x115 & 0x3ffffff); -+ { u32 x118 = (x116 + x90); -+ { u32 x119 = (x118 >> 0x19); -+ { u32 x120 = (x118 & 0x1ffffff); -+ out[0] = x117; -+ out[1] = x120; -+ out[2] = (x119 + x93); -+ out[3] = x96; -+ out[4] = x99; -+ out[5] = x102; -+ out[6] = x105; -+ out[7] = x108; -+ out[8] = x111; -+ out[9] = x114; -+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} -+} -+ -+static __always_inline void fe_mul_ttt(fe *h, const fe *f, const fe *g) -+{ -+ fe_mul_impl(h->v, f->v, g->v); -+} -+ -+static __always_inline void fe_mul_tlt(fe *h, const fe_loose *f, const fe *g) -+{ -+ fe_mul_impl(h->v, f->v, g->v); -+} -+ -+static __always_inline void -+fe_mul_tll(fe *h, const fe_loose *f, const fe_loose *g) -+{ -+ fe_mul_impl(h->v, f->v, g->v); -+} -+ -+static void fe_sqr_impl(u32 out[10], const u32 in1[10]) -+{ -+ { const u32 x17 = in1[9]; -+ { const u32 x18 = in1[8]; -+ { const u32 x16 = in1[7]; -+ { const u32 x14 = in1[6]; -+ { const u32 x12 = in1[5]; -+ { const u32 x10 = in1[4]; -+ { const u32 x8 = in1[3]; -+ { const u32 x6 = in1[2]; -+ { const u32 x4 = in1[1]; -+ { const u32 x2 = in1[0]; -+ { u64 x19 = ((u64)x2 * x2); -+ { u64 x20 = ((u64)(0x2 * x2) * x4); -+ { u64 x21 = (0x2 * (((u64)x4 * x4) + ((u64)x2 * x6))); -+ { u64 x22 = (0x2 * (((u64)x4 * x6) + ((u64)x2 * x8))); -+ { u64 x23 = ((((u64)x6 * x6) + ((u64)(0x4 * x4) * x8)) + ((u64)(0x2 * x2) * x10)); -+ { u64 x24 = (0x2 * ((((u64)x6 * x8) + ((u64)x4 * x10)) + ((u64)x2 * x12))); -+ { u64 x25 = (0x2 * (((((u64)x8 * x8) + ((u64)x6 * x10)) + ((u64)x2 * x14)) + ((u64)(0x2 * x4) * x12))); -+ { u64 x26 = (0x2 * (((((u64)x8 * x10) + ((u64)x6 * x12)) + ((u64)x4 * x14)) + ((u64)x2 * x16))); -+ { u64 x27 = (((u64)x10 * x10) + (0x2 * ((((u64)x6 * x14) + ((u64)x2 * x18)) + (0x2 * (((u64)x4 * x16) + ((u64)x8 * x12)))))); -+ { u64 x28 = (0x2 * ((((((u64)x10 * x12) + ((u64)x8 * x14)) + ((u64)x6 * x16)) + ((u64)x4 * x18)) + ((u64)x2 * x17))); -+ { u64 x29 = (0x2 * (((((u64)x12 * x12) + ((u64)x10 * x14)) + ((u64)x6 * x18)) + (0x2 * (((u64)x8 * x16) + ((u64)x4 * x17))))); -+ { u64 x30 = (0x2 * (((((u64)x12 * x14) + ((u64)x10 * x16)) + ((u64)x8 * x18)) + ((u64)x6 * x17))); -+ { u64 x31 = (((u64)x14 * x14) + (0x2 * (((u64)x10 * x18) + (0x2 * (((u64)x12 * x16) + ((u64)x8 * x17)))))); -+ { u64 x32 = (0x2 * ((((u64)x14 * x16) + ((u64)x12 * x18)) + ((u64)x10 * x17))); -+ { u64 x33 = (0x2 * ((((u64)x16 * x16) + ((u64)x14 * x18)) + ((u64)(0x2 * x12) * x17))); -+ { u64 x34 = (0x2 * (((u64)x16 * x18) + ((u64)x14 * x17))); -+ { u64 x35 = (((u64)x18 * x18) + ((u64)(0x4 * x16) * x17)); -+ { u64 x36 = ((u64)(0x2 * x18) * x17); -+ { u64 x37 = ((u64)(0x2 * x17) * x17); -+ { u64 x38 = (x27 + (x37 << 0x4)); -+ { u64 x39 = (x38 + (x37 << 0x1)); -+ { u64 x40 = (x39 + x37); -+ { u64 x41 = (x26 + (x36 << 0x4)); -+ { u64 x42 = (x41 + (x36 << 0x1)); -+ { u64 x43 = (x42 + x36); -+ { u64 x44 = (x25 + (x35 << 0x4)); -+ { u64 x45 = (x44 + (x35 << 0x1)); -+ { u64 x46 = (x45 + x35); -+ { u64 x47 = (x24 + (x34 << 0x4)); -+ { u64 x48 = (x47 + (x34 << 0x1)); -+ { u64 x49 = (x48 + x34); -+ { u64 x50 = (x23 + (x33 << 0x4)); -+ { u64 x51 = (x50 + (x33 << 0x1)); -+ { u64 x52 = (x51 + x33); -+ { u64 x53 = (x22 + (x32 << 0x4)); -+ { u64 x54 = (x53 + (x32 << 0x1)); -+ { u64 x55 = (x54 + x32); -+ { u64 x56 = (x21 + (x31 << 0x4)); -+ { u64 x57 = (x56 + (x31 << 0x1)); -+ { u64 x58 = (x57 + x31); -+ { u64 x59 = (x20 + (x30 << 0x4)); -+ { u64 x60 = (x59 + (x30 << 0x1)); -+ { u64 x61 = (x60 + x30); -+ { u64 x62 = (x19 + (x29 << 0x4)); -+ { u64 x63 = (x62 + (x29 << 0x1)); -+ { u64 x64 = (x63 + x29); -+ { u64 x65 = (x64 >> 0x1a); -+ { u32 x66 = ((u32)x64 & 0x3ffffff); -+ { u64 x67 = (x65 + x61); -+ { u64 x68 = (x67 >> 0x19); -+ { u32 x69 = ((u32)x67 & 0x1ffffff); -+ { u64 x70 = (x68 + x58); -+ { u64 x71 = (x70 >> 0x1a); -+ { u32 x72 = ((u32)x70 & 0x3ffffff); -+ { u64 x73 = (x71 + x55); -+ { u64 x74 = (x73 >> 0x19); -+ { u32 x75 = ((u32)x73 & 0x1ffffff); -+ { u64 x76 = (x74 + x52); -+ { u64 x77 = (x76 >> 0x1a); -+ { u32 x78 = ((u32)x76 & 0x3ffffff); -+ { u64 x79 = (x77 + x49); -+ { u64 x80 = (x79 >> 0x19); -+ { u32 x81 = ((u32)x79 & 0x1ffffff); -+ { u64 x82 = (x80 + x46); -+ { u64 x83 = (x82 >> 0x1a); -+ { u32 x84 = ((u32)x82 & 0x3ffffff); -+ { u64 x85 = (x83 + x43); -+ { u64 x86 = (x85 >> 0x19); -+ { u32 x87 = ((u32)x85 & 0x1ffffff); -+ { u64 x88 = (x86 + x40); -+ { u64 x89 = (x88 >> 0x1a); -+ { u32 x90 = ((u32)x88 & 0x3ffffff); -+ { u64 x91 = (x89 + x28); -+ { u64 x92 = (x91 >> 0x19); -+ { u32 x93 = ((u32)x91 & 0x1ffffff); -+ { u64 x94 = (x66 + (0x13 * x92)); -+ { u32 x95 = (u32) (x94 >> 0x1a); -+ { u32 x96 = ((u32)x94 & 0x3ffffff); -+ { u32 x97 = (x95 + x69); -+ { u32 x98 = (x97 >> 0x19); -+ { u32 x99 = (x97 & 0x1ffffff); -+ out[0] = x96; -+ out[1] = x99; -+ out[2] = (x98 + x72); -+ out[3] = x75; -+ out[4] = x78; -+ out[5] = x81; -+ out[6] = x84; -+ out[7] = x87; -+ out[8] = x90; -+ out[9] = x93; -+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} -+} -+ -+static __always_inline void fe_sq_tl(fe *h, const fe_loose *f) -+{ -+ fe_sqr_impl(h->v, f->v); -+} -+ -+static __always_inline void fe_sq_tt(fe *h, const fe *f) -+{ -+ fe_sqr_impl(h->v, f->v); -+} -+ -+static __always_inline void fe_loose_invert(fe *out, const fe_loose *z) -+{ -+ fe t0; -+ fe t1; -+ fe t2; -+ fe t3; -+ int i; -+ -+ fe_sq_tl(&t0, z); -+ fe_sq_tt(&t1, &t0); -+ for (i = 1; i < 2; ++i) -+ fe_sq_tt(&t1, &t1); -+ fe_mul_tlt(&t1, z, &t1); -+ fe_mul_ttt(&t0, &t0, &t1); -+ fe_sq_tt(&t2, &t0); -+ fe_mul_ttt(&t1, &t1, &t2); -+ fe_sq_tt(&t2, &t1); -+ for (i = 1; i < 5; ++i) -+ fe_sq_tt(&t2, &t2); -+ fe_mul_ttt(&t1, &t2, &t1); -+ fe_sq_tt(&t2, &t1); -+ for (i = 1; i < 10; ++i) -+ fe_sq_tt(&t2, &t2); -+ fe_mul_ttt(&t2, &t2, &t1); -+ fe_sq_tt(&t3, &t2); -+ for (i = 1; i < 20; ++i) -+ fe_sq_tt(&t3, &t3); -+ fe_mul_ttt(&t2, &t3, &t2); -+ fe_sq_tt(&t2, &t2); -+ for (i = 1; i < 10; ++i) -+ fe_sq_tt(&t2, &t2); -+ fe_mul_ttt(&t1, &t2, &t1); -+ fe_sq_tt(&t2, &t1); -+ for (i = 1; i < 50; ++i) -+ fe_sq_tt(&t2, &t2); -+ fe_mul_ttt(&t2, &t2, &t1); -+ fe_sq_tt(&t3, &t2); -+ for (i = 1; i < 100; ++i) -+ fe_sq_tt(&t3, &t3); -+ fe_mul_ttt(&t2, &t3, &t2); -+ fe_sq_tt(&t2, &t2); -+ for (i = 1; i < 50; ++i) -+ fe_sq_tt(&t2, &t2); -+ fe_mul_ttt(&t1, &t2, &t1); -+ fe_sq_tt(&t1, &t1); -+ for (i = 1; i < 5; ++i) -+ fe_sq_tt(&t1, &t1); -+ fe_mul_ttt(out, &t1, &t0); -+} -+ -+static __always_inline void fe_invert(fe *out, const fe *z) -+{ -+ fe_loose l; -+ fe_copy_lt(&l, z); -+ fe_loose_invert(out, &l); -+} -+ -+/* Replace (f,g) with (g,f) if b == 1; -+ * replace (f,g) with (f,g) if b == 0. -+ * -+ * Preconditions: b in {0,1} -+ */ -+static __always_inline void fe_cswap(fe *f, fe *g, unsigned int b) -+{ -+ unsigned i; -+ b = 0 - b; -+ for (i = 0; i < 10; i++) { -+ u32 x = f->v[i] ^ g->v[i]; -+ x &= b; -+ f->v[i] ^= x; -+ g->v[i] ^= x; -+ } -+} -+ -+/* NOTE: based on fiat-crypto fe_mul, edited for in2=121666, 0, 0.*/ -+static __always_inline void fe_mul_121666_impl(u32 out[10], const u32 in1[10]) -+{ -+ { const u32 x20 = in1[9]; -+ { const u32 x21 = in1[8]; -+ { const u32 x19 = in1[7]; -+ { const u32 x17 = in1[6]; -+ { const u32 x15 = in1[5]; -+ { const u32 x13 = in1[4]; -+ { const u32 x11 = in1[3]; -+ { const u32 x9 = in1[2]; -+ { const u32 x7 = in1[1]; -+ { const u32 x5 = in1[0]; -+ { const u32 x38 = 0; -+ { const u32 x39 = 0; -+ { const u32 x37 = 0; -+ { const u32 x35 = 0; -+ { const u32 x33 = 0; -+ { const u32 x31 = 0; -+ { const u32 x29 = 0; -+ { const u32 x27 = 0; -+ { const u32 x25 = 0; -+ { const u32 x23 = 121666; -+ { u64 x40 = ((u64)x23 * x5); -+ { u64 x41 = (((u64)x23 * x7) + ((u64)x25 * x5)); -+ { u64 x42 = ((((u64)(0x2 * x25) * x7) + ((u64)x23 * x9)) + ((u64)x27 * x5)); -+ { u64 x43 = (((((u64)x25 * x9) + ((u64)x27 * x7)) + ((u64)x23 * x11)) + ((u64)x29 * x5)); -+ { u64 x44 = (((((u64)x27 * x9) + (0x2 * (((u64)x25 * x11) + ((u64)x29 * x7)))) + ((u64)x23 * x13)) + ((u64)x31 * x5)); -+ { u64 x45 = (((((((u64)x27 * x11) + ((u64)x29 * x9)) + ((u64)x25 * x13)) + ((u64)x31 * x7)) + ((u64)x23 * x15)) + ((u64)x33 * x5)); -+ { u64 x46 = (((((0x2 * ((((u64)x29 * x11) + ((u64)x25 * x15)) + ((u64)x33 * x7))) + ((u64)x27 * x13)) + ((u64)x31 * x9)) + ((u64)x23 * x17)) + ((u64)x35 * x5)); -+ { u64 x47 = (((((((((u64)x29 * x13) + ((u64)x31 * x11)) + ((u64)x27 * x15)) + ((u64)x33 * x9)) + ((u64)x25 * x17)) + ((u64)x35 * x7)) + ((u64)x23 * x19)) + ((u64)x37 * x5)); -+ { u64 x48 = (((((((u64)x31 * x13) + (0x2 * (((((u64)x29 * x15) + ((u64)x33 * x11)) + ((u64)x25 * x19)) + ((u64)x37 * x7)))) + ((u64)x27 * x17)) + ((u64)x35 * x9)) + ((u64)x23 * x21)) + ((u64)x39 * x5)); -+ { u64 x49 = (((((((((((u64)x31 * x15) + ((u64)x33 * x13)) + ((u64)x29 * x17)) + ((u64)x35 * x11)) + ((u64)x27 * x19)) + ((u64)x37 * x9)) + ((u64)x25 * x21)) + ((u64)x39 * x7)) + ((u64)x23 * x20)) + ((u64)x38 * x5)); -+ { u64 x50 = (((((0x2 * ((((((u64)x33 * x15) + ((u64)x29 * x19)) + ((u64)x37 * x11)) + ((u64)x25 * x20)) + ((u64)x38 * x7))) + ((u64)x31 * x17)) + ((u64)x35 * x13)) + ((u64)x27 * x21)) + ((u64)x39 * x9)); -+ { u64 x51 = (((((((((u64)x33 * x17) + ((u64)x35 * x15)) + ((u64)x31 * x19)) + ((u64)x37 * x13)) + ((u64)x29 * x21)) + ((u64)x39 * x11)) + ((u64)x27 * x20)) + ((u64)x38 * x9)); -+ { u64 x52 = (((((u64)x35 * x17) + (0x2 * (((((u64)x33 * x19) + ((u64)x37 * x15)) + ((u64)x29 * x20)) + ((u64)x38 * x11)))) + ((u64)x31 * x21)) + ((u64)x39 * x13)); -+ { u64 x53 = (((((((u64)x35 * x19) + ((u64)x37 * x17)) + ((u64)x33 * x21)) + ((u64)x39 * x15)) + ((u64)x31 * x20)) + ((u64)x38 * x13)); -+ { u64 x54 = (((0x2 * ((((u64)x37 * x19) + ((u64)x33 * x20)) + ((u64)x38 * x15))) + ((u64)x35 * x21)) + ((u64)x39 * x17)); -+ { u64 x55 = (((((u64)x37 * x21) + ((u64)x39 * x19)) + ((u64)x35 * x20)) + ((u64)x38 * x17)); -+ { u64 x56 = (((u64)x39 * x21) + (0x2 * (((u64)x37 * x20) + ((u64)x38 * x19)))); -+ { u64 x57 = (((u64)x39 * x20) + ((u64)x38 * x21)); -+ { u64 x58 = ((u64)(0x2 * x38) * x20); -+ { u64 x59 = (x48 + (x58 << 0x4)); -+ { u64 x60 = (x59 + (x58 << 0x1)); -+ { u64 x61 = (x60 + x58); -+ { u64 x62 = (x47 + (x57 << 0x4)); -+ { u64 x63 = (x62 + (x57 << 0x1)); -+ { u64 x64 = (x63 + x57); -+ { u64 x65 = (x46 + (x56 << 0x4)); -+ { u64 x66 = (x65 + (x56 << 0x1)); -+ { u64 x67 = (x66 + x56); -+ { u64 x68 = (x45 + (x55 << 0x4)); -+ { u64 x69 = (x68 + (x55 << 0x1)); -+ { u64 x70 = (x69 + x55); -+ { u64 x71 = (x44 + (x54 << 0x4)); -+ { u64 x72 = (x71 + (x54 << 0x1)); -+ { u64 x73 = (x72 + x54); -+ { u64 x74 = (x43 + (x53 << 0x4)); -+ { u64 x75 = (x74 + (x53 << 0x1)); -+ { u64 x76 = (x75 + x53); -+ { u64 x77 = (x42 + (x52 << 0x4)); -+ { u64 x78 = (x77 + (x52 << 0x1)); -+ { u64 x79 = (x78 + x52); -+ { u64 x80 = (x41 + (x51 << 0x4)); -+ { u64 x81 = (x80 + (x51 << 0x1)); -+ { u64 x82 = (x81 + x51); -+ { u64 x83 = (x40 + (x50 << 0x4)); -+ { u64 x84 = (x83 + (x50 << 0x1)); -+ { u64 x85 = (x84 + x50); -+ { u64 x86 = (x85 >> 0x1a); -+ { u32 x87 = ((u32)x85 & 0x3ffffff); -+ { u64 x88 = (x86 + x82); -+ { u64 x89 = (x88 >> 0x19); -+ { u32 x90 = ((u32)x88 & 0x1ffffff); -+ { u64 x91 = (x89 + x79); -+ { u64 x92 = (x91 >> 0x1a); -+ { u32 x93 = ((u32)x91 & 0x3ffffff); -+ { u64 x94 = (x92 + x76); -+ { u64 x95 = (x94 >> 0x19); -+ { u32 x96 = ((u32)x94 & 0x1ffffff); -+ { u64 x97 = (x95 + x73); -+ { u64 x98 = (x97 >> 0x1a); -+ { u32 x99 = ((u32)x97 & 0x3ffffff); -+ { u64 x100 = (x98 + x70); -+ { u64 x101 = (x100 >> 0x19); -+ { u32 x102 = ((u32)x100 & 0x1ffffff); -+ { u64 x103 = (x101 + x67); -+ { u64 x104 = (x103 >> 0x1a); -+ { u32 x105 = ((u32)x103 & 0x3ffffff); -+ { u64 x106 = (x104 + x64); -+ { u64 x107 = (x106 >> 0x19); -+ { u32 x108 = ((u32)x106 & 0x1ffffff); -+ { u64 x109 = (x107 + x61); -+ { u64 x110 = (x109 >> 0x1a); -+ { u32 x111 = ((u32)x109 & 0x3ffffff); -+ { u64 x112 = (x110 + x49); -+ { u64 x113 = (x112 >> 0x19); -+ { u32 x114 = ((u32)x112 & 0x1ffffff); -+ { u64 x115 = (x87 + (0x13 * x113)); -+ { u32 x116 = (u32) (x115 >> 0x1a); -+ { u32 x117 = ((u32)x115 & 0x3ffffff); -+ { u32 x118 = (x116 + x90); -+ { u32 x119 = (x118 >> 0x19); -+ { u32 x120 = (x118 & 0x1ffffff); -+ out[0] = x117; -+ out[1] = x120; -+ out[2] = (x119 + x93); -+ out[3] = x96; -+ out[4] = x99; -+ out[5] = x102; -+ out[6] = x105; -+ out[7] = x108; -+ out[8] = x111; -+ out[9] = x114; -+ }}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}}} -+} -+ -+static __always_inline void fe_mul121666(fe *h, const fe_loose *f) -+{ -+ fe_mul_121666_impl(h->v, f->v); -+} -+ -+void curve25519_generic(u8 out[CURVE25519_KEY_SIZE], -+ const u8 scalar[CURVE25519_KEY_SIZE], -+ const u8 point[CURVE25519_KEY_SIZE]) -+{ -+ fe x1, x2, z2, x3, z3; -+ fe_loose x2l, z2l, x3l; -+ unsigned swap = 0; -+ int pos; -+ u8 e[32]; -+ -+ memcpy(e, scalar, 32); -+ curve25519_clamp_secret(e); -+ -+ /* The following implementation was transcribed to Coq and proven to -+ * correspond to unary scalar multiplication in affine coordinates given -+ * that x1 != 0 is the x coordinate of some point on the curve. It was -+ * also checked in Coq that doing a ladderstep with x1 = x3 = 0 gives -+ * z2' = z3' = 0, and z2 = z3 = 0 gives z2' = z3' = 0. The statement was -+ * quantified over the underlying field, so it applies to Curve25519 -+ * itself and the quadratic twist of Curve25519. It was not proven in -+ * Coq that prime-field arithmetic correctly simulates extension-field -+ * arithmetic on prime-field values. The decoding of the byte array -+ * representation of e was not considered. -+ * -+ * Specification of Montgomery curves in affine coordinates: -+ * -+ * -+ * Proof that these form a group that is isomorphic to a Weierstrass -+ * curve: -+ * -+ * -+ * Coq transcription and correctness proof of the loop -+ * (where scalarbits=255): -+ * -+ * -+ * preconditions: 0 <= e < 2^255 (not necessarily e < order), -+ * fe_invert(0) = 0 -+ */ -+ fe_frombytes(&x1, point); -+ fe_1(&x2); -+ fe_0(&z2); -+ fe_copy(&x3, &x1); -+ fe_1(&z3); -+ -+ for (pos = 254; pos >= 0; --pos) { -+ fe tmp0, tmp1; -+ fe_loose tmp0l, tmp1l; -+ /* loop invariant as of right before the test, for the case -+ * where x1 != 0: -+ * pos >= -1; if z2 = 0 then x2 is nonzero; if z3 = 0 then x3 -+ * is nonzero -+ * let r := e >> (pos+1) in the following equalities of -+ * projective points: -+ * to_xz (r*P) === if swap then (x3, z3) else (x2, z2) -+ * to_xz ((r+1)*P) === if swap then (x2, z2) else (x3, z3) -+ * x1 is the nonzero x coordinate of the nonzero -+ * point (r*P-(r+1)*P) -+ */ -+ unsigned b = 1 & (e[pos / 8] >> (pos & 7)); -+ swap ^= b; -+ fe_cswap(&x2, &x3, swap); -+ fe_cswap(&z2, &z3, swap); -+ swap = b; -+ /* Coq transcription of ladderstep formula (called from -+ * transcribed loop): -+ * -+ * -+ * x1 != 0 -+ * x1 = 0 -+ */ -+ fe_sub(&tmp0l, &x3, &z3); -+ fe_sub(&tmp1l, &x2, &z2); -+ fe_add(&x2l, &x2, &z2); -+ fe_add(&z2l, &x3, &z3); -+ fe_mul_tll(&z3, &tmp0l, &x2l); -+ fe_mul_tll(&z2, &z2l, &tmp1l); -+ fe_sq_tl(&tmp0, &tmp1l); -+ fe_sq_tl(&tmp1, &x2l); -+ fe_add(&x3l, &z3, &z2); -+ fe_sub(&z2l, &z3, &z2); -+ fe_mul_ttt(&x2, &tmp1, &tmp0); -+ fe_sub(&tmp1l, &tmp1, &tmp0); -+ fe_sq_tl(&z2, &z2l); -+ fe_mul121666(&z3, &tmp1l); -+ fe_sq_tl(&x3, &x3l); -+ fe_add(&tmp0l, &tmp0, &z3); -+ fe_mul_ttt(&z3, &x1, &z2); -+ fe_mul_tll(&z2, &tmp1l, &tmp0l); -+ } -+ /* here pos=-1, so r=e, so to_xz (e*P) === if swap then (x3, z3) -+ * else (x2, z2) -+ */ -+ fe_cswap(&x2, &x3, swap); -+ fe_cswap(&z2, &z3, swap); -+ -+ fe_invert(&z2, &z2); -+ fe_mul_ttt(&x2, &x2, &z2); -+ fe_tobytes(out, &x2); -+ -+ memzero_explicit(&x1, sizeof(x1)); -+ memzero_explicit(&x2, sizeof(x2)); -+ memzero_explicit(&z2, sizeof(z2)); -+ memzero_explicit(&x3, sizeof(x3)); -+ memzero_explicit(&z3, sizeof(z3)); -+ memzero_explicit(&x2l, sizeof(x2l)); -+ memzero_explicit(&z2l, sizeof(z2l)); -+ memzero_explicit(&x3l, sizeof(x3l)); -+ memzero_explicit(&e, sizeof(e)); -+} ---- /dev/null -+++ b/lib/crypto/curve25519-hacl64.c -@@ -0,0 +1,788 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2016-2017 INRIA and Microsoft Corporation. -+ * Copyright (C) 2018-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This is a machine-generated formally verified implementation of Curve25519 -+ * ECDH from: . Though originally machine -+ * generated, it has been tweaked to be suitable for use in the kernel. It is -+ * optimized for 64-bit machines that can efficiently work with 128-bit -+ * integer types. -+ */ -+ -+#include -+#include -+#include -+ -+typedef __uint128_t u128; -+ -+static __always_inline u64 u64_eq_mask(u64 a, u64 b) -+{ -+ u64 x = a ^ b; -+ u64 minus_x = ~x + (u64)1U; -+ u64 x_or_minus_x = x | minus_x; -+ u64 xnx = x_or_minus_x >> (u32)63U; -+ u64 c = xnx - (u64)1U; -+ return c; -+} -+ -+static __always_inline u64 u64_gte_mask(u64 a, u64 b) -+{ -+ u64 x = a; -+ u64 y = b; -+ u64 x_xor_y = x ^ y; -+ u64 x_sub_y = x - y; -+ u64 x_sub_y_xor_y = x_sub_y ^ y; -+ u64 q = x_xor_y | x_sub_y_xor_y; -+ u64 x_xor_q = x ^ q; -+ u64 x_xor_q_ = x_xor_q >> (u32)63U; -+ u64 c = x_xor_q_ - (u64)1U; -+ return c; -+} -+ -+static __always_inline void modulo_carry_top(u64 *b) -+{ -+ u64 b4 = b[4]; -+ u64 b0 = b[0]; -+ u64 b4_ = b4 & 0x7ffffffffffffLLU; -+ u64 b0_ = b0 + 19 * (b4 >> 51); -+ b[4] = b4_; -+ b[0] = b0_; -+} -+ -+static __always_inline void fproduct_copy_from_wide_(u64 *output, u128 *input) -+{ -+ { -+ u128 xi = input[0]; -+ output[0] = ((u64)(xi)); -+ } -+ { -+ u128 xi = input[1]; -+ output[1] = ((u64)(xi)); -+ } -+ { -+ u128 xi = input[2]; -+ output[2] = ((u64)(xi)); -+ } -+ { -+ u128 xi = input[3]; -+ output[3] = ((u64)(xi)); -+ } -+ { -+ u128 xi = input[4]; -+ output[4] = ((u64)(xi)); -+ } -+} -+ -+static __always_inline void -+fproduct_sum_scalar_multiplication_(u128 *output, u64 *input, u64 s) -+{ -+ output[0] += (u128)input[0] * s; -+ output[1] += (u128)input[1] * s; -+ output[2] += (u128)input[2] * s; -+ output[3] += (u128)input[3] * s; -+ output[4] += (u128)input[4] * s; -+} -+ -+static __always_inline void fproduct_carry_wide_(u128 *tmp) -+{ -+ { -+ u32 ctr = 0; -+ u128 tctr = tmp[ctr]; -+ u128 tctrp1 = tmp[ctr + 1]; -+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; -+ u128 c = ((tctr) >> (51)); -+ tmp[ctr] = ((u128)(r0)); -+ tmp[ctr + 1] = ((tctrp1) + (c)); -+ } -+ { -+ u32 ctr = 1; -+ u128 tctr = tmp[ctr]; -+ u128 tctrp1 = tmp[ctr + 1]; -+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; -+ u128 c = ((tctr) >> (51)); -+ tmp[ctr] = ((u128)(r0)); -+ tmp[ctr + 1] = ((tctrp1) + (c)); -+ } -+ -+ { -+ u32 ctr = 2; -+ u128 tctr = tmp[ctr]; -+ u128 tctrp1 = tmp[ctr + 1]; -+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; -+ u128 c = ((tctr) >> (51)); -+ tmp[ctr] = ((u128)(r0)); -+ tmp[ctr + 1] = ((tctrp1) + (c)); -+ } -+ { -+ u32 ctr = 3; -+ u128 tctr = tmp[ctr]; -+ u128 tctrp1 = tmp[ctr + 1]; -+ u64 r0 = ((u64)(tctr)) & 0x7ffffffffffffLLU; -+ u128 c = ((tctr) >> (51)); -+ tmp[ctr] = ((u128)(r0)); -+ tmp[ctr + 1] = ((tctrp1) + (c)); -+ } -+} -+ -+static __always_inline void fmul_shift_reduce(u64 *output) -+{ -+ u64 tmp = output[4]; -+ u64 b0; -+ { -+ u32 ctr = 5 - 0 - 1; -+ u64 z = output[ctr - 1]; -+ output[ctr] = z; -+ } -+ { -+ u32 ctr = 5 - 1 - 1; -+ u64 z = output[ctr - 1]; -+ output[ctr] = z; -+ } -+ { -+ u32 ctr = 5 - 2 - 1; -+ u64 z = output[ctr - 1]; -+ output[ctr] = z; -+ } -+ { -+ u32 ctr = 5 - 3 - 1; -+ u64 z = output[ctr - 1]; -+ output[ctr] = z; -+ } -+ output[0] = tmp; -+ b0 = output[0]; -+ output[0] = 19 * b0; -+} -+ -+static __always_inline void fmul_mul_shift_reduce_(u128 *output, u64 *input, -+ u64 *input21) -+{ -+ u32 i; -+ u64 input2i; -+ { -+ u64 input2i = input21[0]; -+ fproduct_sum_scalar_multiplication_(output, input, input2i); -+ fmul_shift_reduce(input); -+ } -+ { -+ u64 input2i = input21[1]; -+ fproduct_sum_scalar_multiplication_(output, input, input2i); -+ fmul_shift_reduce(input); -+ } -+ { -+ u64 input2i = input21[2]; -+ fproduct_sum_scalar_multiplication_(output, input, input2i); -+ fmul_shift_reduce(input); -+ } -+ { -+ u64 input2i = input21[3]; -+ fproduct_sum_scalar_multiplication_(output, input, input2i); -+ fmul_shift_reduce(input); -+ } -+ i = 4; -+ input2i = input21[i]; -+ fproduct_sum_scalar_multiplication_(output, input, input2i); -+} -+ -+static __always_inline void fmul_fmul(u64 *output, u64 *input, u64 *input21) -+{ -+ u64 tmp[5] = { input[0], input[1], input[2], input[3], input[4] }; -+ { -+ u128 b4; -+ u128 b0; -+ u128 b4_; -+ u128 b0_; -+ u64 i0; -+ u64 i1; -+ u64 i0_; -+ u64 i1_; -+ u128 t[5] = { 0 }; -+ fmul_mul_shift_reduce_(t, tmp, input21); -+ fproduct_carry_wide_(t); -+ b4 = t[4]; -+ b0 = t[0]; -+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU)))); -+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); -+ t[4] = b4_; -+ t[0] = b0_; -+ fproduct_copy_from_wide_(output, t); -+ i0 = output[0]; -+ i1 = output[1]; -+ i0_ = i0 & 0x7ffffffffffffLLU; -+ i1_ = i1 + (i0 >> 51); -+ output[0] = i0_; -+ output[1] = i1_; -+ } -+} -+ -+static __always_inline void fsquare_fsquare__(u128 *tmp, u64 *output) -+{ -+ u64 r0 = output[0]; -+ u64 r1 = output[1]; -+ u64 r2 = output[2]; -+ u64 r3 = output[3]; -+ u64 r4 = output[4]; -+ u64 d0 = r0 * 2; -+ u64 d1 = r1 * 2; -+ u64 d2 = r2 * 2 * 19; -+ u64 d419 = r4 * 19; -+ u64 d4 = d419 * 2; -+ u128 s0 = ((((((u128)(r0) * (r0))) + (((u128)(d4) * (r1))))) + -+ (((u128)(d2) * (r3)))); -+ u128 s1 = ((((((u128)(d0) * (r1))) + (((u128)(d4) * (r2))))) + -+ (((u128)(r3 * 19) * (r3)))); -+ u128 s2 = ((((((u128)(d0) * (r2))) + (((u128)(r1) * (r1))))) + -+ (((u128)(d4) * (r3)))); -+ u128 s3 = ((((((u128)(d0) * (r3))) + (((u128)(d1) * (r2))))) + -+ (((u128)(r4) * (d419)))); -+ u128 s4 = ((((((u128)(d0) * (r4))) + (((u128)(d1) * (r3))))) + -+ (((u128)(r2) * (r2)))); -+ tmp[0] = s0; -+ tmp[1] = s1; -+ tmp[2] = s2; -+ tmp[3] = s3; -+ tmp[4] = s4; -+} -+ -+static __always_inline void fsquare_fsquare_(u128 *tmp, u64 *output) -+{ -+ u128 b4; -+ u128 b0; -+ u128 b4_; -+ u128 b0_; -+ u64 i0; -+ u64 i1; -+ u64 i0_; -+ u64 i1_; -+ fsquare_fsquare__(tmp, output); -+ fproduct_carry_wide_(tmp); -+ b4 = tmp[4]; -+ b0 = tmp[0]; -+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU)))); -+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); -+ tmp[4] = b4_; -+ tmp[0] = b0_; -+ fproduct_copy_from_wide_(output, tmp); -+ i0 = output[0]; -+ i1 = output[1]; -+ i0_ = i0 & 0x7ffffffffffffLLU; -+ i1_ = i1 + (i0 >> 51); -+ output[0] = i0_; -+ output[1] = i1_; -+} -+ -+static __always_inline void fsquare_fsquare_times_(u64 *output, u128 *tmp, -+ u32 count1) -+{ -+ u32 i; -+ fsquare_fsquare_(tmp, output); -+ for (i = 1; i < count1; ++i) -+ fsquare_fsquare_(tmp, output); -+} -+ -+static __always_inline void fsquare_fsquare_times(u64 *output, u64 *input, -+ u32 count1) -+{ -+ u128 t[5]; -+ memcpy(output, input, 5 * sizeof(*input)); -+ fsquare_fsquare_times_(output, t, count1); -+} -+ -+static __always_inline void fsquare_fsquare_times_inplace(u64 *output, -+ u32 count1) -+{ -+ u128 t[5]; -+ fsquare_fsquare_times_(output, t, count1); -+} -+ -+static __always_inline void crecip_crecip(u64 *out, u64 *z) -+{ -+ u64 buf[20] = { 0 }; -+ u64 *a0 = buf; -+ u64 *t00 = buf + 5; -+ u64 *b0 = buf + 10; -+ u64 *t01; -+ u64 *b1; -+ u64 *c0; -+ u64 *a; -+ u64 *t0; -+ u64 *b; -+ u64 *c; -+ fsquare_fsquare_times(a0, z, 1); -+ fsquare_fsquare_times(t00, a0, 2); -+ fmul_fmul(b0, t00, z); -+ fmul_fmul(a0, b0, a0); -+ fsquare_fsquare_times(t00, a0, 1); -+ fmul_fmul(b0, t00, b0); -+ fsquare_fsquare_times(t00, b0, 5); -+ t01 = buf + 5; -+ b1 = buf + 10; -+ c0 = buf + 15; -+ fmul_fmul(b1, t01, b1); -+ fsquare_fsquare_times(t01, b1, 10); -+ fmul_fmul(c0, t01, b1); -+ fsquare_fsquare_times(t01, c0, 20); -+ fmul_fmul(t01, t01, c0); -+ fsquare_fsquare_times_inplace(t01, 10); -+ fmul_fmul(b1, t01, b1); -+ fsquare_fsquare_times(t01, b1, 50); -+ a = buf; -+ t0 = buf + 5; -+ b = buf + 10; -+ c = buf + 15; -+ fmul_fmul(c, t0, b); -+ fsquare_fsquare_times(t0, c, 100); -+ fmul_fmul(t0, t0, c); -+ fsquare_fsquare_times_inplace(t0, 50); -+ fmul_fmul(t0, t0, b); -+ fsquare_fsquare_times_inplace(t0, 5); -+ fmul_fmul(out, t0, a); -+} -+ -+static __always_inline void fsum(u64 *a, u64 *b) -+{ -+ a[0] += b[0]; -+ a[1] += b[1]; -+ a[2] += b[2]; -+ a[3] += b[3]; -+ a[4] += b[4]; -+} -+ -+static __always_inline void fdifference(u64 *a, u64 *b) -+{ -+ u64 tmp[5] = { 0 }; -+ u64 b0; -+ u64 b1; -+ u64 b2; -+ u64 b3; -+ u64 b4; -+ memcpy(tmp, b, 5 * sizeof(*b)); -+ b0 = tmp[0]; -+ b1 = tmp[1]; -+ b2 = tmp[2]; -+ b3 = tmp[3]; -+ b4 = tmp[4]; -+ tmp[0] = b0 + 0x3fffffffffff68LLU; -+ tmp[1] = b1 + 0x3ffffffffffff8LLU; -+ tmp[2] = b2 + 0x3ffffffffffff8LLU; -+ tmp[3] = b3 + 0x3ffffffffffff8LLU; -+ tmp[4] = b4 + 0x3ffffffffffff8LLU; -+ { -+ u64 xi = a[0]; -+ u64 yi = tmp[0]; -+ a[0] = yi - xi; -+ } -+ { -+ u64 xi = a[1]; -+ u64 yi = tmp[1]; -+ a[1] = yi - xi; -+ } -+ { -+ u64 xi = a[2]; -+ u64 yi = tmp[2]; -+ a[2] = yi - xi; -+ } -+ { -+ u64 xi = a[3]; -+ u64 yi = tmp[3]; -+ a[3] = yi - xi; -+ } -+ { -+ u64 xi = a[4]; -+ u64 yi = tmp[4]; -+ a[4] = yi - xi; -+ } -+} -+ -+static __always_inline void fscalar(u64 *output, u64 *b, u64 s) -+{ -+ u128 tmp[5]; -+ u128 b4; -+ u128 b0; -+ u128 b4_; -+ u128 b0_; -+ { -+ u64 xi = b[0]; -+ tmp[0] = ((u128)(xi) * (s)); -+ } -+ { -+ u64 xi = b[1]; -+ tmp[1] = ((u128)(xi) * (s)); -+ } -+ { -+ u64 xi = b[2]; -+ tmp[2] = ((u128)(xi) * (s)); -+ } -+ { -+ u64 xi = b[3]; -+ tmp[3] = ((u128)(xi) * (s)); -+ } -+ { -+ u64 xi = b[4]; -+ tmp[4] = ((u128)(xi) * (s)); -+ } -+ fproduct_carry_wide_(tmp); -+ b4 = tmp[4]; -+ b0 = tmp[0]; -+ b4_ = ((b4) & (((u128)(0x7ffffffffffffLLU)))); -+ b0_ = ((b0) + (((u128)(19) * (((u64)(((b4) >> (51)))))))); -+ tmp[4] = b4_; -+ tmp[0] = b0_; -+ fproduct_copy_from_wide_(output, tmp); -+} -+ -+static __always_inline void fmul(u64 *output, u64 *a, u64 *b) -+{ -+ fmul_fmul(output, a, b); -+} -+ -+static __always_inline void crecip(u64 *output, u64 *input) -+{ -+ crecip_crecip(output, input); -+} -+ -+static __always_inline void point_swap_conditional_step(u64 *a, u64 *b, -+ u64 swap1, u32 ctr) -+{ -+ u32 i = ctr - 1; -+ u64 ai = a[i]; -+ u64 bi = b[i]; -+ u64 x = swap1 & (ai ^ bi); -+ u64 ai1 = ai ^ x; -+ u64 bi1 = bi ^ x; -+ a[i] = ai1; -+ b[i] = bi1; -+} -+ -+static __always_inline void point_swap_conditional5(u64 *a, u64 *b, u64 swap1) -+{ -+ point_swap_conditional_step(a, b, swap1, 5); -+ point_swap_conditional_step(a, b, swap1, 4); -+ point_swap_conditional_step(a, b, swap1, 3); -+ point_swap_conditional_step(a, b, swap1, 2); -+ point_swap_conditional_step(a, b, swap1, 1); -+} -+ -+static __always_inline void point_swap_conditional(u64 *a, u64 *b, u64 iswap) -+{ -+ u64 swap1 = 0 - iswap; -+ point_swap_conditional5(a, b, swap1); -+ point_swap_conditional5(a + 5, b + 5, swap1); -+} -+ -+static __always_inline void point_copy(u64 *output, u64 *input) -+{ -+ memcpy(output, input, 5 * sizeof(*input)); -+ memcpy(output + 5, input + 5, 5 * sizeof(*input)); -+} -+ -+static __always_inline void addanddouble_fmonty(u64 *pp, u64 *ppq, u64 *p, -+ u64 *pq, u64 *qmqp) -+{ -+ u64 *qx = qmqp; -+ u64 *x2 = pp; -+ u64 *z2 = pp + 5; -+ u64 *x3 = ppq; -+ u64 *z3 = ppq + 5; -+ u64 *x = p; -+ u64 *z = p + 5; -+ u64 *xprime = pq; -+ u64 *zprime = pq + 5; -+ u64 buf[40] = { 0 }; -+ u64 *origx = buf; -+ u64 *origxprime0 = buf + 5; -+ u64 *xxprime0; -+ u64 *zzprime0; -+ u64 *origxprime; -+ xxprime0 = buf + 25; -+ zzprime0 = buf + 30; -+ memcpy(origx, x, 5 * sizeof(*x)); -+ fsum(x, z); -+ fdifference(z, origx); -+ memcpy(origxprime0, xprime, 5 * sizeof(*xprime)); -+ fsum(xprime, zprime); -+ fdifference(zprime, origxprime0); -+ fmul(xxprime0, xprime, z); -+ fmul(zzprime0, x, zprime); -+ origxprime = buf + 5; -+ { -+ u64 *xx0; -+ u64 *zz0; -+ u64 *xxprime; -+ u64 *zzprime; -+ u64 *zzzprime; -+ xx0 = buf + 15; -+ zz0 = buf + 20; -+ xxprime = buf + 25; -+ zzprime = buf + 30; -+ zzzprime = buf + 35; -+ memcpy(origxprime, xxprime, 5 * sizeof(*xxprime)); -+ fsum(xxprime, zzprime); -+ fdifference(zzprime, origxprime); -+ fsquare_fsquare_times(x3, xxprime, 1); -+ fsquare_fsquare_times(zzzprime, zzprime, 1); -+ fmul(z3, zzzprime, qx); -+ fsquare_fsquare_times(xx0, x, 1); -+ fsquare_fsquare_times(zz0, z, 1); -+ { -+ u64 *zzz; -+ u64 *xx; -+ u64 *zz; -+ u64 scalar; -+ zzz = buf + 10; -+ xx = buf + 15; -+ zz = buf + 20; -+ fmul(x2, xx, zz); -+ fdifference(zz, xx); -+ scalar = 121665; -+ fscalar(zzz, zz, scalar); -+ fsum(zzz, xx); -+ fmul(z2, zzz, zz); -+ } -+ } -+} -+ -+static __always_inline void -+ladder_smallloop_cmult_small_loop_step(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, -+ u64 *q, u8 byt) -+{ -+ u64 bit0 = (u64)(byt >> 7); -+ u64 bit; -+ point_swap_conditional(nq, nqpq, bit0); -+ addanddouble_fmonty(nq2, nqpq2, nq, nqpq, q); -+ bit = (u64)(byt >> 7); -+ point_swap_conditional(nq2, nqpq2, bit); -+} -+ -+static __always_inline void -+ladder_smallloop_cmult_small_loop_double_step(u64 *nq, u64 *nqpq, u64 *nq2, -+ u64 *nqpq2, u64 *q, u8 byt) -+{ -+ u8 byt1; -+ ladder_smallloop_cmult_small_loop_step(nq, nqpq, nq2, nqpq2, q, byt); -+ byt1 = byt << 1; -+ ladder_smallloop_cmult_small_loop_step(nq2, nqpq2, nq, nqpq, q, byt1); -+} -+ -+static __always_inline void -+ladder_smallloop_cmult_small_loop(u64 *nq, u64 *nqpq, u64 *nq2, u64 *nqpq2, -+ u64 *q, u8 byt, u32 i) -+{ -+ while (i--) { -+ ladder_smallloop_cmult_small_loop_double_step(nq, nqpq, nq2, -+ nqpq2, q, byt); -+ byt <<= 2; -+ } -+} -+ -+static __always_inline void ladder_bigloop_cmult_big_loop(u8 *n1, u64 *nq, -+ u64 *nqpq, u64 *nq2, -+ u64 *nqpq2, u64 *q, -+ u32 i) -+{ -+ while (i--) { -+ u8 byte = n1[i]; -+ ladder_smallloop_cmult_small_loop(nq, nqpq, nq2, nqpq2, q, -+ byte, 4); -+ } -+} -+ -+static void ladder_cmult(u64 *result, u8 *n1, u64 *q) -+{ -+ u64 point_buf[40] = { 0 }; -+ u64 *nq = point_buf; -+ u64 *nqpq = point_buf + 10; -+ u64 *nq2 = point_buf + 20; -+ u64 *nqpq2 = point_buf + 30; -+ point_copy(nqpq, q); -+ nq[0] = 1; -+ ladder_bigloop_cmult_big_loop(n1, nq, nqpq, nq2, nqpq2, q, 32); -+ point_copy(result, nq); -+} -+ -+static __always_inline void format_fexpand(u64 *output, const u8 *input) -+{ -+ const u8 *x00 = input + 6; -+ const u8 *x01 = input + 12; -+ const u8 *x02 = input + 19; -+ const u8 *x0 = input + 24; -+ u64 i0, i1, i2, i3, i4, output0, output1, output2, output3, output4; -+ i0 = get_unaligned_le64(input); -+ i1 = get_unaligned_le64(x00); -+ i2 = get_unaligned_le64(x01); -+ i3 = get_unaligned_le64(x02); -+ i4 = get_unaligned_le64(x0); -+ output0 = i0 & 0x7ffffffffffffLLU; -+ output1 = i1 >> 3 & 0x7ffffffffffffLLU; -+ output2 = i2 >> 6 & 0x7ffffffffffffLLU; -+ output3 = i3 >> 1 & 0x7ffffffffffffLLU; -+ output4 = i4 >> 12 & 0x7ffffffffffffLLU; -+ output[0] = output0; -+ output[1] = output1; -+ output[2] = output2; -+ output[3] = output3; -+ output[4] = output4; -+} -+ -+static __always_inline void format_fcontract_first_carry_pass(u64 *input) -+{ -+ u64 t0 = input[0]; -+ u64 t1 = input[1]; -+ u64 t2 = input[2]; -+ u64 t3 = input[3]; -+ u64 t4 = input[4]; -+ u64 t1_ = t1 + (t0 >> 51); -+ u64 t0_ = t0 & 0x7ffffffffffffLLU; -+ u64 t2_ = t2 + (t1_ >> 51); -+ u64 t1__ = t1_ & 0x7ffffffffffffLLU; -+ u64 t3_ = t3 + (t2_ >> 51); -+ u64 t2__ = t2_ & 0x7ffffffffffffLLU; -+ u64 t4_ = t4 + (t3_ >> 51); -+ u64 t3__ = t3_ & 0x7ffffffffffffLLU; -+ input[0] = t0_; -+ input[1] = t1__; -+ input[2] = t2__; -+ input[3] = t3__; -+ input[4] = t4_; -+} -+ -+static __always_inline void format_fcontract_first_carry_full(u64 *input) -+{ -+ format_fcontract_first_carry_pass(input); -+ modulo_carry_top(input); -+} -+ -+static __always_inline void format_fcontract_second_carry_pass(u64 *input) -+{ -+ u64 t0 = input[0]; -+ u64 t1 = input[1]; -+ u64 t2 = input[2]; -+ u64 t3 = input[3]; -+ u64 t4 = input[4]; -+ u64 t1_ = t1 + (t0 >> 51); -+ u64 t0_ = t0 & 0x7ffffffffffffLLU; -+ u64 t2_ = t2 + (t1_ >> 51); -+ u64 t1__ = t1_ & 0x7ffffffffffffLLU; -+ u64 t3_ = t3 + (t2_ >> 51); -+ u64 t2__ = t2_ & 0x7ffffffffffffLLU; -+ u64 t4_ = t4 + (t3_ >> 51); -+ u64 t3__ = t3_ & 0x7ffffffffffffLLU; -+ input[0] = t0_; -+ input[1] = t1__; -+ input[2] = t2__; -+ input[3] = t3__; -+ input[4] = t4_; -+} -+ -+static __always_inline void format_fcontract_second_carry_full(u64 *input) -+{ -+ u64 i0; -+ u64 i1; -+ u64 i0_; -+ u64 i1_; -+ format_fcontract_second_carry_pass(input); -+ modulo_carry_top(input); -+ i0 = input[0]; -+ i1 = input[1]; -+ i0_ = i0 & 0x7ffffffffffffLLU; -+ i1_ = i1 + (i0 >> 51); -+ input[0] = i0_; -+ input[1] = i1_; -+} -+ -+static __always_inline void format_fcontract_trim(u64 *input) -+{ -+ u64 a0 = input[0]; -+ u64 a1 = input[1]; -+ u64 a2 = input[2]; -+ u64 a3 = input[3]; -+ u64 a4 = input[4]; -+ u64 mask0 = u64_gte_mask(a0, 0x7ffffffffffedLLU); -+ u64 mask1 = u64_eq_mask(a1, 0x7ffffffffffffLLU); -+ u64 mask2 = u64_eq_mask(a2, 0x7ffffffffffffLLU); -+ u64 mask3 = u64_eq_mask(a3, 0x7ffffffffffffLLU); -+ u64 mask4 = u64_eq_mask(a4, 0x7ffffffffffffLLU); -+ u64 mask = (((mask0 & mask1) & mask2) & mask3) & mask4; -+ u64 a0_ = a0 - (0x7ffffffffffedLLU & mask); -+ u64 a1_ = a1 - (0x7ffffffffffffLLU & mask); -+ u64 a2_ = a2 - (0x7ffffffffffffLLU & mask); -+ u64 a3_ = a3 - (0x7ffffffffffffLLU & mask); -+ u64 a4_ = a4 - (0x7ffffffffffffLLU & mask); -+ input[0] = a0_; -+ input[1] = a1_; -+ input[2] = a2_; -+ input[3] = a3_; -+ input[4] = a4_; -+} -+ -+static __always_inline void format_fcontract_store(u8 *output, u64 *input) -+{ -+ u64 t0 = input[0]; -+ u64 t1 = input[1]; -+ u64 t2 = input[2]; -+ u64 t3 = input[3]; -+ u64 t4 = input[4]; -+ u64 o0 = t1 << 51 | t0; -+ u64 o1 = t2 << 38 | t1 >> 13; -+ u64 o2 = t3 << 25 | t2 >> 26; -+ u64 o3 = t4 << 12 | t3 >> 39; -+ u8 *b0 = output; -+ u8 *b1 = output + 8; -+ u8 *b2 = output + 16; -+ u8 *b3 = output + 24; -+ put_unaligned_le64(o0, b0); -+ put_unaligned_le64(o1, b1); -+ put_unaligned_le64(o2, b2); -+ put_unaligned_le64(o3, b3); -+} -+ -+static __always_inline void format_fcontract(u8 *output, u64 *input) -+{ -+ format_fcontract_first_carry_full(input); -+ format_fcontract_second_carry_full(input); -+ format_fcontract_trim(input); -+ format_fcontract_store(output, input); -+} -+ -+static __always_inline void format_scalar_of_point(u8 *scalar, u64 *point) -+{ -+ u64 *x = point; -+ u64 *z = point + 5; -+ u64 buf[10] __aligned(32) = { 0 }; -+ u64 *zmone = buf; -+ u64 *sc = buf + 5; -+ crecip(zmone, z); -+ fmul(sc, x, zmone); -+ format_fcontract(scalar, sc); -+} -+ -+void curve25519_generic(u8 mypublic[CURVE25519_KEY_SIZE], -+ const u8 secret[CURVE25519_KEY_SIZE], -+ const u8 basepoint[CURVE25519_KEY_SIZE]) -+{ -+ u64 buf0[10] __aligned(32) = { 0 }; -+ u64 *x0 = buf0; -+ u64 *z = buf0 + 5; -+ u64 *q; -+ format_fexpand(x0, basepoint); -+ z[0] = 1; -+ q = buf0; -+ { -+ u8 e[32] __aligned(32) = { 0 }; -+ u8 *scalar; -+ memcpy(e, secret, 32); -+ curve25519_clamp_secret(e); -+ scalar = e; -+ { -+ u64 buf[15] = { 0 }; -+ u64 *nq = buf; -+ u64 *x = nq; -+ x[0] = 1; -+ ladder_cmult(nq, scalar, q); -+ format_scalar_of_point(mypublic, nq); -+ memzero_explicit(buf, sizeof(buf)); -+ } -+ memzero_explicit(e, sizeof(e)); -+ } -+ memzero_explicit(buf0, sizeof(buf0)); -+} ---- /dev/null -+++ b/lib/crypto/curve25519.c -@@ -0,0 +1,25 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This is an implementation of the Curve25519 ECDH algorithm, using either -+ * a 32-bit implementation or a 64-bit implementation with 128-bit integers, -+ * depending on what is supported by the target compiler. -+ * -+ * Information: https://cr.yp.to/ecdh.html -+ */ -+ -+#include -+#include -+#include -+ -+const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 }; -+const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 }; -+ -+EXPORT_SYMBOL(curve25519_null_point); -+EXPORT_SYMBOL(curve25519_base_point); -+EXPORT_SYMBOL(curve25519_generic); -+ -+MODULE_LICENSE("GPL v2"); -+MODULE_DESCRIPTION("Curve25519 scalar multiplication"); -+MODULE_AUTHOR("Jason A. Donenfeld "); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0026-crypto-curve25519-add-kpp-selftest.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0026-crypto-curve25519-add-kpp-selftest.patch deleted file mode 100644 index b2813aeb6..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0026-crypto-curve25519-add-kpp-selftest.patch +++ /dev/null @@ -1,1268 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:33 +0100 -Subject: [PATCH] crypto: curve25519 - add kpp selftest - -commit f613457a7af085728297bef71233c37faf3c01b1 upstream. - -In preparation of introducing KPP implementations of Curve25519, import -the set of test cases proposed by the Zinc patch set, but converted to -the KPP format. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - crypto/testmgr.c | 6 + - crypto/testmgr.h | 1225 ++++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 1231 insertions(+) - ---- a/crypto/testmgr.c -+++ b/crypto/testmgr.c -@@ -4296,6 +4296,12 @@ static const struct alg_test_desc alg_te - .test = alg_test_null, - .fips_allowed = 1, - }, { -+ .alg = "curve25519", -+ .test = alg_test_kpp, -+ .suite = { -+ .kpp = __VECS(curve25519_tv_template) -+ } -+ }, { - .alg = "deflate", - .test = alg_test_comp, - .fips_allowed = 1, ---- a/crypto/testmgr.h -+++ b/crypto/testmgr.h -@@ -1030,6 +1030,1231 @@ static const struct kpp_testvec dh_tv_te - } - }; - -+static const struct kpp_testvec curve25519_tv_template[] = { -+{ -+ .secret = (u8[32]){ 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d, -+ 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45, -+ 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a, -+ 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a }, -+ .b_public = (u8[32]){ 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4, -+ 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37, -+ 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d, -+ 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f }, -+ .expected_ss = (u8[32]){ 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, -+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, -+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, -+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+{ -+ .secret = (u8[32]){ 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b, -+ 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6, -+ 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd, -+ 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb }, -+ .b_public = (u8[32]){ 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54, -+ 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a, -+ 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4, -+ 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a }, -+ .expected_ss = (u8[32]){ 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, -+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, -+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, -+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+{ -+ .secret = (u8[32]){ 1 }, -+ .b_public = (u8[32]){ 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .expected_ss = (u8[32]){ 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64, -+ 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d, -+ 0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98, -+ 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+{ -+ .secret = (u8[32]){ 1 }, -+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .expected_ss = (u8[32]){ 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f, -+ 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d, -+ 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3, -+ 0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+{ -+ .secret = (u8[32]){ 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, -+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, -+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, -+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 }, -+ .b_public = (u8[32]){ 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, -+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, -+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, -+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, -+ .expected_ss = (u8[32]){ 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, -+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, -+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, -+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+{ -+ .secret = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f }, -+ .expected_ss = (u8[32]){ 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2, -+ 0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57, -+ 0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05, -+ 0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+{ -+ .secret = (u8[32]){ 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 }, -+ .expected_ss = (u8[32]){ 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d, -+ 0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12, -+ 0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99, -+ 0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - normal case */ -+{ -+ .secret = (u8[32]){ 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda, -+ 0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66, -+ 0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3, -+ 0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba }, -+ .b_public = (u8[32]){ 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5, -+ 0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9, -+ 0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e, -+ 0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a }, -+ .expected_ss = (u8[32]){ 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5, -+ 0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38, -+ 0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e, -+ 0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key on twist */ -+{ -+ .secret = (u8[32]){ 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4, -+ 0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5, -+ 0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49, -+ 0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 }, -+ .b_public = (u8[32]){ 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5, -+ 0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8, -+ 0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3, -+ 0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 }, -+ .expected_ss = (u8[32]){ 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff, -+ 0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d, -+ 0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe, -+ 0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key on twist */ -+{ -+ .secret = (u8[32]){ 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9, -+ 0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39, -+ 0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5, -+ 0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 }, -+ .b_public = (u8[32]){ 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f, -+ 0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b, -+ 0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c, -+ 0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 }, -+ .expected_ss = (u8[32]){ 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53, -+ 0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57, -+ 0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0, -+ 0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key on twist */ -+{ -+ .secret = (u8[32]){ 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc, -+ 0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d, -+ 0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67, -+ 0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c }, -+ .b_public = (u8[32]){ 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97, -+ 0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f, -+ 0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45, -+ 0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a }, -+ .expected_ss = (u8[32]){ 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93, -+ 0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2, -+ 0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44, -+ 0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key on twist */ -+{ -+ .secret = (u8[32]){ 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1, -+ 0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95, -+ 0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99, -+ 0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d }, -+ .b_public = (u8[32]){ 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27, -+ 0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07, -+ 0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae, -+ 0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c }, -+ .expected_ss = (u8[32]){ 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73, -+ 0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2, -+ 0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f, -+ 0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key on twist */ -+{ -+ .secret = (u8[32]){ 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9, -+ 0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd, -+ 0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b, -+ 0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 }, -+ .b_public = (u8[32]){ 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5, -+ 0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52, -+ 0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8, -+ 0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 }, -+ .expected_ss = (u8[32]){ 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86, -+ 0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4, -+ 0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6, -+ 0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case on twist */ -+{ -+ .secret = (u8[32]){ 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04, -+ 0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77, -+ 0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90, -+ 0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 }, -+ .b_public = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .expected_ss = (u8[32]){ 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97, -+ 0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9, -+ 0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7, -+ 0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case on twist */ -+{ -+ .secret = (u8[32]){ 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36, -+ 0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd, -+ 0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c, -+ 0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 }, -+ .b_public = (u8[32]){ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .expected_ss = (u8[32]){ 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e, -+ 0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b, -+ 0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e, -+ 0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case on twist */ -+{ -+ .secret = (u8[32]){ 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed, -+ 0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e, -+ 0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd, -+ 0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 }, -+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff, -+ 0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00, -+ 0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 }, -+ .expected_ss = (u8[32]){ 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f, -+ 0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1, -+ 0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10, -+ 0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case on twist */ -+{ -+ .secret = (u8[32]){ 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3, -+ 0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d, -+ 0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00, -+ 0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 }, -+ .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00, -+ 0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff, -+ 0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f }, -+ .expected_ss = (u8[32]){ 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8, -+ 0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4, -+ 0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70, -+ 0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case on twist */ -+{ -+ .secret = (u8[32]){ 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3, -+ 0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a, -+ 0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e, -+ 0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 }, -+ .b_public = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f }, -+ .expected_ss = (u8[32]){ 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57, -+ 0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c, -+ 0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59, -+ 0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case on twist */ -+{ -+ .secret = (u8[32]){ 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f, -+ 0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42, -+ 0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9, -+ 0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 }, -+ .b_public = (u8[32]){ 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .expected_ss = (u8[32]){ 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c, -+ 0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5, -+ 0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65, -+ 0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for public key */ -+{ -+ .secret = (u8[32]){ 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6, -+ 0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4, -+ 0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8, -+ 0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe }, -+ .b_public = (u8[32]){ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .expected_ss = (u8[32]){ 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7, -+ 0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca, -+ 0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f, -+ 0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for public key */ -+{ -+ .secret = (u8[32]){ 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa, -+ 0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3, -+ 0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52, -+ 0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 }, -+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }, -+ .expected_ss = (u8[32]){ 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3, -+ 0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e, -+ 0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75, -+ 0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for public key */ -+{ -+ .secret = (u8[32]){ 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26, -+ 0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea, -+ 0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00, -+ 0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 }, -+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 }, -+ .expected_ss = (u8[32]){ 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8, -+ 0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32, -+ 0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87, -+ 0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for public key */ -+{ -+ .secret = (u8[32]){ 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c, -+ 0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6, -+ 0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb, -+ 0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 }, -+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff, -+ 0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff, -+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff, -+ 0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f }, -+ .expected_ss = (u8[32]){ 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85, -+ 0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f, -+ 0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0, -+ 0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for public key */ -+{ -+ .secret = (u8[32]){ 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38, -+ 0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b, -+ 0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c, -+ 0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb }, -+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, -+ .expected_ss = (u8[32]){ 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b, -+ 0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81, -+ 0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3, -+ 0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for public key */ -+{ -+ .secret = (u8[32]){ 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d, -+ 0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42, -+ 0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98, -+ 0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 }, -+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f }, -+ .expected_ss = (u8[32]){ 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c, -+ 0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9, -+ 0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89, -+ 0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for public key */ -+{ -+ .secret = (u8[32]){ 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29, -+ 0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6, -+ 0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c, -+ 0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f }, -+ .b_public = (u8[32]){ 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .expected_ss = (u8[32]){ 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75, -+ 0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89, -+ 0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c, -+ 0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc, -+ 0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1, -+ 0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d, -+ 0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae }, -+ .b_public = (u8[32]){ 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .expected_ss = (u8[32]){ 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09, -+ 0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde, -+ 0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1, -+ 0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81, -+ 0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a, -+ 0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99, -+ 0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d }, -+ .b_public = (u8[32]){ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .expected_ss = (u8[32]){ 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17, -+ 0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35, -+ 0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55, -+ 0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11, -+ 0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b, -+ 0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9, -+ 0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 }, -+ .b_public = (u8[32]){ 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .expected_ss = (u8[32]){ 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53, -+ 0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e, -+ 0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6, -+ 0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78, -+ 0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2, -+ 0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd, -+ 0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 }, -+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .expected_ss = (u8[32]){ 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb, -+ 0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40, -+ 0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2, -+ 0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9, -+ 0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60, -+ 0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13, -+ 0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 }, -+ .b_public = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, -+ .expected_ss = (u8[32]){ 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c, -+ 0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3, -+ 0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65, -+ 0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a, -+ 0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7, -+ 0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11, -+ 0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e }, -+ .b_public = (u8[32]){ 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, -+ .expected_ss = (u8[32]){ 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82, -+ 0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4, -+ 0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c, -+ 0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e, -+ 0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a, -+ 0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d, -+ 0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f }, -+ .b_public = (u8[32]){ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, -+ .expected_ss = (u8[32]){ 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2, -+ 0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60, -+ 0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25, -+ 0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb, -+ 0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97, -+ 0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c, -+ 0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 }, -+ .b_public = (u8[32]){ 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .expected_ss = (u8[32]){ 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23, -+ 0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8, -+ 0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69, -+ 0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a, -+ 0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23, -+ 0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b, -+ 0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 }, -+ .b_public = (u8[32]){ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .expected_ss = (u8[32]){ 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b, -+ 0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44, -+ 0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37, -+ 0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80, -+ 0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d, -+ 0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b, -+ 0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 }, -+ .b_public = (u8[32]){ 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .expected_ss = (u8[32]){ 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63, -+ 0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae, -+ 0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f, -+ 0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0, -+ 0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd, -+ 0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49, -+ 0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 }, -+ .b_public = (u8[32]){ 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .expected_ss = (u8[32]){ 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41, -+ 0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0, -+ 0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf, -+ 0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9, -+ 0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa, -+ 0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5, -+ 0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e }, -+ .b_public = (u8[32]){ 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .expected_ss = (u8[32]){ 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47, -+ 0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3, -+ 0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b, -+ 0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8, -+ 0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98, -+ 0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0, -+ 0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 }, -+ .b_public = (u8[32]){ 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .expected_ss = (u8[32]){ 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0, -+ 0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1, -+ 0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a, -+ 0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02, -+ 0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4, -+ 0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68, -+ 0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d }, -+ .b_public = (u8[32]){ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .expected_ss = (u8[32]){ 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f, -+ 0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2, -+ 0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95, -+ 0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7, -+ 0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06, -+ 0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9, -+ 0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 }, -+ .b_public = (u8[32]){ 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .expected_ss = (u8[32]){ 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5, -+ 0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0, -+ 0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80, -+ 0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - public key >= p */ -+{ -+ .secret = (u8[32]){ 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd, -+ 0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4, -+ 0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04, -+ 0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 }, -+ .b_public = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .expected_ss = (u8[32]){ 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0, -+ 0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac, -+ 0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48, -+ 0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - RFC 7748 */ -+{ -+ .secret = (u8[32]){ 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, -+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, -+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, -+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 }, -+ .b_public = (u8[32]){ 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, -+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, -+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, -+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, -+ .expected_ss = (u8[32]){ 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, -+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, -+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, -+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - RFC 7748 */ -+{ -+ .secret = (u8[32]){ 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c, -+ 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5, -+ 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4, -+ 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d }, -+ .b_public = (u8[32]){ 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3, -+ 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c, -+ 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e, -+ 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 }, -+ .expected_ss = (u8[32]){ 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d, -+ 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8, -+ 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52, -+ 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde, -+ 0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8, -+ 0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4, -+ 0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 }, -+ .expected_ss = (u8[32]){ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d, -+ 0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64, -+ 0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd, -+ 0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 }, -+ .expected_ss = (u8[32]){ 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8, -+ 0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf, -+ 0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94, -+ 0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d }, -+ .expected_ss = (u8[32]){ 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84, -+ 0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62, -+ 0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e, -+ 0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 }, -+ .expected_ss = (u8[32]){ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8, -+ 0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58, -+ 0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02, -+ 0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 }, -+ .expected_ss = (u8[32]){ 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9, -+ 0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a, -+ 0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44, -+ 0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b }, -+ .expected_ss = (u8[32]){ 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd, -+ 0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22, -+ 0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56, -+ 0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b }, -+ .expected_ss = (u8[32]){ 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53, -+ 0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f, -+ 0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18, -+ 0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f }, -+ .expected_ss = (u8[32]){ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55, -+ 0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b, -+ 0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79, -+ 0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f }, -+ .expected_ss = (u8[32]){ 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39, -+ 0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c, -+ 0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb, -+ 0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e }, -+ .expected_ss = (u8[32]){ 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04, -+ 0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10, -+ 0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58, -+ 0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c }, -+ .expected_ss = (u8[32]){ 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3, -+ 0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c, -+ 0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88, -+ 0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 }, -+ .expected_ss = (u8[32]){ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a, -+ 0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49, -+ 0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a, -+ 0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f }, -+ .expected_ss = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - edge case for shared secret */ -+{ -+ .secret = (u8[32]){ 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .b_public = (u8[32]){ 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca, -+ 0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c, -+ 0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb, -+ 0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 }, -+ .expected_ss = (u8[32]){ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - checking for overflow */ -+{ -+ .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, -+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, -+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, -+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, -+ .b_public = (u8[32]){ 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58, -+ 0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7, -+ 0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01, -+ 0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d }, -+ .expected_ss = (u8[32]){ 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d, -+ 0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27, -+ 0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b, -+ 0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - checking for overflow */ -+{ -+ .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, -+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, -+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, -+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, -+ .b_public = (u8[32]){ 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26, -+ 0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2, -+ 0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44, -+ 0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e }, -+ .expected_ss = (u8[32]){ 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6, -+ 0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d, -+ 0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e, -+ 0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - checking for overflow */ -+{ -+ .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, -+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, -+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, -+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, -+ .b_public = (u8[32]){ 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61, -+ 0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67, -+ 0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e, -+ 0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c }, -+ .expected_ss = (u8[32]){ 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65, -+ 0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce, -+ 0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0, -+ 0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - checking for overflow */ -+{ -+ .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, -+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, -+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, -+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, -+ .b_public = (u8[32]){ 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee, -+ 0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d, -+ 0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14, -+ 0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 }, -+ .expected_ss = (u8[32]){ 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e, -+ 0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc, -+ 0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5, -+ 0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - checking for overflow */ -+{ -+ .secret = (u8[32]){ 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, -+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, -+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, -+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, -+ .b_public = (u8[32]){ 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4, -+ 0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5, -+ 0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c, -+ 0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 }, -+ .expected_ss = (u8[32]){ 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b, -+ 0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93, -+ 0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f, -+ 0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - private key == -1 (mod order) */ -+{ -+ .secret = (u8[32]){ 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8, -+ 0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 }, -+ .b_public = (u8[32]){ 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e, -+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57, -+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f, -+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 }, -+ .expected_ss = (u8[32]){ 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e, -+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57, -+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f, -+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+}, -+/* wycheproof - private key == 1 (mod order) on twist */ -+{ -+ .secret = (u8[32]){ 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef, -+ 0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f }, -+ .b_public = (u8[32]){ 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f, -+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6, -+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64, -+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 }, -+ .expected_ss = (u8[32]){ 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f, -+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6, -+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64, -+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 }, -+ .secret_size = 32, -+ .b_public_size = 32, -+ .expected_ss_size = 32, -+ -+} -+}; -+ - static const struct kpp_testvec ecdh_tv_template[] = { - { - #ifndef CONFIG_CRYPTO_FIPS diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0027-crypto-curve25519-implement-generic-KPP-driver.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0027-crypto-curve25519-implement-generic-KPP-driver.patch deleted file mode 100644 index d90956169..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0027-crypto-curve25519-implement-generic-KPP-driver.patch +++ /dev/null @@ -1,136 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:34 +0100 -Subject: [PATCH] crypto: curve25519 - implement generic KPP driver - -commit ee772cb641135739c1530647391d5a04c39db192 upstream. - -Expose the generic Curve25519 library via the crypto API KPP interface. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - crypto/Kconfig | 5 +++ - crypto/Makefile | 1 + - crypto/curve25519-generic.c | 90 +++++++++++++++++++++++++++++++++++++ - 3 files changed, 96 insertions(+) - create mode 100644 crypto/curve25519-generic.c - ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -264,6 +264,11 @@ config CRYPTO_ECRDSA - standard algorithms (called GOST algorithms). Only signature verification - is implemented. - -+config CRYPTO_CURVE25519 -+ tristate "Curve25519 algorithm" -+ select CRYPTO_KPP -+ select CRYPTO_LIB_CURVE25519_GENERIC -+ - comment "Authenticated Encryption with Associated Data" - - config CRYPTO_CCM ---- a/crypto/Makefile -+++ b/crypto/Makefile -@@ -167,6 +167,7 @@ obj-$(CONFIG_CRYPTO_ZSTD) += zstd.o - obj-$(CONFIG_CRYPTO_OFB) += ofb.o - obj-$(CONFIG_CRYPTO_ECC) += ecc.o - obj-$(CONFIG_CRYPTO_ESSIV) += essiv.o -+obj-$(CONFIG_CRYPTO_CURVE25519) += curve25519-generic.o - - ecdh_generic-y += ecdh.o - ecdh_generic-y += ecdh_helper.o ---- /dev/null -+++ b/crypto/curve25519-generic.c -@@ -0,0 +1,90 @@ -+// SPDX-License-Identifier: GPL-2.0-or-later -+ -+#include -+#include -+#include -+#include -+#include -+ -+static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, -+ unsigned int len) -+{ -+ u8 *secret = kpp_tfm_ctx(tfm); -+ -+ if (!len) -+ curve25519_generate_secret(secret); -+ else if (len == CURVE25519_KEY_SIZE && -+ crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) -+ memcpy(secret, buf, CURVE25519_KEY_SIZE); -+ else -+ return -EINVAL; -+ return 0; -+} -+ -+static int curve25519_compute_value(struct kpp_request *req) -+{ -+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); -+ const u8 *secret = kpp_tfm_ctx(tfm); -+ u8 public_key[CURVE25519_KEY_SIZE]; -+ u8 buf[CURVE25519_KEY_SIZE]; -+ int copied, nbytes; -+ u8 const *bp; -+ -+ if (req->src) { -+ copied = sg_copy_to_buffer(req->src, -+ sg_nents_for_len(req->src, -+ CURVE25519_KEY_SIZE), -+ public_key, CURVE25519_KEY_SIZE); -+ if (copied != CURVE25519_KEY_SIZE) -+ return -EINVAL; -+ bp = public_key; -+ } else { -+ bp = curve25519_base_point; -+ } -+ -+ curve25519_generic(buf, secret, bp); -+ -+ /* might want less than we've got */ -+ nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len); -+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, -+ nbytes), -+ buf, nbytes); -+ if (copied != nbytes) -+ return -EINVAL; -+ return 0; -+} -+ -+static unsigned int curve25519_max_size(struct crypto_kpp *tfm) -+{ -+ return CURVE25519_KEY_SIZE; -+} -+ -+static struct kpp_alg curve25519_alg = { -+ .base.cra_name = "curve25519", -+ .base.cra_driver_name = "curve25519-generic", -+ .base.cra_priority = 100, -+ .base.cra_module = THIS_MODULE, -+ .base.cra_ctxsize = CURVE25519_KEY_SIZE, -+ -+ .set_secret = curve25519_set_secret, -+ .generate_public_key = curve25519_compute_value, -+ .compute_shared_secret = curve25519_compute_value, -+ .max_size = curve25519_max_size, -+}; -+ -+static int curve25519_init(void) -+{ -+ return crypto_register_kpp(&curve25519_alg); -+} -+ -+static void curve25519_exit(void) -+{ -+ crypto_unregister_kpp(&curve25519_alg); -+} -+ -+subsys_initcall(curve25519_init); -+module_exit(curve25519_exit); -+ -+MODULE_ALIAS_CRYPTO("curve25519"); -+MODULE_ALIAS_CRYPTO("curve25519-generic"); -+MODULE_LICENSE("GPL"); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0028-crypto-lib-curve25519-work-around-Clang-stack-spilli.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0028-crypto-lib-curve25519-work-around-Clang-stack-spilli.patch deleted file mode 100644 index 36b59c9aa..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0028-crypto-lib-curve25519-work-around-Clang-stack-spilli.patch +++ /dev/null @@ -1,75 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:35 +0100 -Subject: [PATCH] crypto: lib/curve25519 - work around Clang stack spilling - issue - -commit 660bb8e1f833ea63185fe80fde847e3e42f18e3b upstream. - -Arnd reports that the 32-bit generic library code for Curve25119 ends -up using an excessive amount of stack space when built with Clang: - - lib/crypto/curve25519-fiat32.c:756:6: error: stack frame size - of 1384 bytes in function 'curve25519_generic' - [-Werror,-Wframe-larger-than=] - -Let's give some hints to the compiler regarding which routines should -not be inlined, to prevent it from running out of registers and spilling -to the stack. The resulting code performs identically under both GCC -and Clang, and makes the warning go away. - -Suggested-by: Arnd Bergmann -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - lib/crypto/curve25519-fiat32.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - ---- a/lib/crypto/curve25519-fiat32.c -+++ b/lib/crypto/curve25519-fiat32.c -@@ -223,7 +223,7 @@ static __always_inline void fe_1(fe *h) - h->v[0] = 1; - } - --static void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) -+static noinline void fe_add_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) - { - { const u32 x20 = in1[9]; - { const u32 x21 = in1[8]; -@@ -266,7 +266,7 @@ static __always_inline void fe_add(fe_lo - fe_add_impl(h->v, f->v, g->v); - } - --static void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) -+static noinline void fe_sub_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) - { - { const u32 x20 = in1[9]; - { const u32 x21 = in1[8]; -@@ -309,7 +309,7 @@ static __always_inline void fe_sub(fe_lo - fe_sub_impl(h->v, f->v, g->v); - } - --static void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) -+static noinline void fe_mul_impl(u32 out[10], const u32 in1[10], const u32 in2[10]) - { - { const u32 x20 = in1[9]; - { const u32 x21 = in1[8]; -@@ -441,7 +441,7 @@ fe_mul_tll(fe *h, const fe_loose *f, con - fe_mul_impl(h->v, f->v, g->v); - } - --static void fe_sqr_impl(u32 out[10], const u32 in1[10]) -+static noinline void fe_sqr_impl(u32 out[10], const u32 in1[10]) - { - { const u32 x17 = in1[9]; - { const u32 x18 = in1[8]; -@@ -619,7 +619,7 @@ static __always_inline void fe_invert(fe - * - * Preconditions: b in {0,1} - */ --static __always_inline void fe_cswap(fe *f, fe *g, unsigned int b) -+static noinline void fe_cswap(fe *f, fe *g, unsigned int b) - { - unsigned i; - b = 0 - b; diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0029-crypto-curve25519-x86_64-library-and-KPP-implementat.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0029-crypto-curve25519-x86_64-library-and-KPP-implementat.patch deleted file mode 100644 index 49fd97076..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0029-crypto-curve25519-x86_64-library-and-KPP-implementat.patch +++ /dev/null @@ -1,2536 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 8 Nov 2019 13:22:36 +0100 -Subject: [PATCH] crypto: curve25519 - x86_64 library and KPP implementations -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit bb611bdfd6be34d9f822c73305fcc83720499d38 upstream. - -This implementation is the fastest available x86_64 implementation, and -unlike Sandy2x, it doesn't requie use of the floating point registers at -all. Instead it makes use of BMI2 and ADX, available on recent -microarchitectures. The implementation was written by Armando -Faz-Hernández with contributions (upstream) from Samuel Neves and me, -in addition to further changes in the kernel implementation from us. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Samuel Neves -Co-developed-by: Samuel Neves -[ardb: - move to arch/x86/crypto - - wire into lib/crypto framework - - implement crypto API KPP hooks ] -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/Makefile | 1 + - arch/x86/crypto/curve25519-x86_64.c | 2475 +++++++++++++++++++++++++++ - crypto/Kconfig | 6 + - 3 files changed, 2482 insertions(+) - create mode 100644 arch/x86/crypto/curve25519-x86_64.c - ---- a/arch/x86/crypto/Makefile -+++ b/arch/x86/crypto/Makefile -@@ -39,6 +39,7 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) - - obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o - obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o -+obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o - - # These modules require assembler to support AVX. - ifeq ($(avx_supported),yes) ---- /dev/null -+++ b/arch/x86/crypto/curve25519-x86_64.c -@@ -0,0 +1,2475 @@ -+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause -+/* -+ * Copyright (c) 2017 Armando Faz . All Rights Reserved. -+ * Copyright (C) 2018-2019 Jason A. Donenfeld . All Rights Reserved. -+ * Copyright (C) 2018 Samuel Neves . All Rights Reserved. -+ */ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_bmi2); -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_adx); -+ -+enum { NUM_WORDS_ELTFP25519 = 4 }; -+typedef __aligned(32) u64 eltfp25519_1w[NUM_WORDS_ELTFP25519]; -+typedef __aligned(32) u64 eltfp25519_1w_buffer[2 * NUM_WORDS_ELTFP25519]; -+ -+#define mul_eltfp25519_1w_adx(c, a, b) do { \ -+ mul_256x256_integer_adx(m.buffer, a, b); \ -+ red_eltfp25519_1w_adx(c, m.buffer); \ -+} while (0) -+ -+#define mul_eltfp25519_1w_bmi2(c, a, b) do { \ -+ mul_256x256_integer_bmi2(m.buffer, a, b); \ -+ red_eltfp25519_1w_bmi2(c, m.buffer); \ -+} while (0) -+ -+#define sqr_eltfp25519_1w_adx(a) do { \ -+ sqr_256x256_integer_adx(m.buffer, a); \ -+ red_eltfp25519_1w_adx(a, m.buffer); \ -+} while (0) -+ -+#define sqr_eltfp25519_1w_bmi2(a) do { \ -+ sqr_256x256_integer_bmi2(m.buffer, a); \ -+ red_eltfp25519_1w_bmi2(a, m.buffer); \ -+} while (0) -+ -+#define mul_eltfp25519_2w_adx(c, a, b) do { \ -+ mul2_256x256_integer_adx(m.buffer, a, b); \ -+ red_eltfp25519_2w_adx(c, m.buffer); \ -+} while (0) -+ -+#define mul_eltfp25519_2w_bmi2(c, a, b) do { \ -+ mul2_256x256_integer_bmi2(m.buffer, a, b); \ -+ red_eltfp25519_2w_bmi2(c, m.buffer); \ -+} while (0) -+ -+#define sqr_eltfp25519_2w_adx(a) do { \ -+ sqr2_256x256_integer_adx(m.buffer, a); \ -+ red_eltfp25519_2w_adx(a, m.buffer); \ -+} while (0) -+ -+#define sqr_eltfp25519_2w_bmi2(a) do { \ -+ sqr2_256x256_integer_bmi2(m.buffer, a); \ -+ red_eltfp25519_2w_bmi2(a, m.buffer); \ -+} while (0) -+ -+#define sqrn_eltfp25519_1w_adx(a, times) do { \ -+ int ____counter = (times); \ -+ while (____counter-- > 0) \ -+ sqr_eltfp25519_1w_adx(a); \ -+} while (0) -+ -+#define sqrn_eltfp25519_1w_bmi2(a, times) do { \ -+ int ____counter = (times); \ -+ while (____counter-- > 0) \ -+ sqr_eltfp25519_1w_bmi2(a); \ -+} while (0) -+ -+#define copy_eltfp25519_1w(C, A) do { \ -+ (C)[0] = (A)[0]; \ -+ (C)[1] = (A)[1]; \ -+ (C)[2] = (A)[2]; \ -+ (C)[3] = (A)[3]; \ -+} while (0) -+ -+#define setzero_eltfp25519_1w(C) do { \ -+ (C)[0] = 0; \ -+ (C)[1] = 0; \ -+ (C)[2] = 0; \ -+ (C)[3] = 0; \ -+} while (0) -+ -+__aligned(32) static const u64 table_ladder_8k[252 * NUM_WORDS_ELTFP25519] = { -+ /* 1 */ 0xfffffffffffffff3UL, 0xffffffffffffffffUL, -+ 0xffffffffffffffffUL, 0x5fffffffffffffffUL, -+ /* 2 */ 0x6b8220f416aafe96UL, 0x82ebeb2b4f566a34UL, -+ 0xd5a9a5b075a5950fUL, 0x5142b2cf4b2488f4UL, -+ /* 3 */ 0x6aaebc750069680cUL, 0x89cf7820a0f99c41UL, -+ 0x2a58d9183b56d0f4UL, 0x4b5aca80e36011a4UL, -+ /* 4 */ 0x329132348c29745dUL, 0xf4a2e616e1642fd7UL, -+ 0x1e45bb03ff67bc34UL, 0x306912d0f42a9b4aUL, -+ /* 5 */ 0xff886507e6af7154UL, 0x04f50e13dfeec82fUL, -+ 0xaa512fe82abab5ceUL, 0x174e251a68d5f222UL, -+ /* 6 */ 0xcf96700d82028898UL, 0x1743e3370a2c02c5UL, -+ 0x379eec98b4e86eaaUL, 0x0c59888a51e0482eUL, -+ /* 7 */ 0xfbcbf1d699b5d189UL, 0xacaef0d58e9fdc84UL, -+ 0xc1c20d06231f7614UL, 0x2938218da274f972UL, -+ /* 8 */ 0xf6af49beff1d7f18UL, 0xcc541c22387ac9c2UL, -+ 0x96fcc9ef4015c56bUL, 0x69c1627c690913a9UL, -+ /* 9 */ 0x7a86fd2f4733db0eUL, 0xfdb8c4f29e087de9UL, -+ 0x095e4b1a8ea2a229UL, 0x1ad7a7c829b37a79UL, -+ /* 10 */ 0x342d89cad17ea0c0UL, 0x67bedda6cced2051UL, -+ 0x19ca31bf2bb42f74UL, 0x3df7b4c84980acbbUL, -+ /* 11 */ 0xa8c6444dc80ad883UL, 0xb91e440366e3ab85UL, -+ 0xc215cda00164f6d8UL, 0x3d867c6ef247e668UL, -+ /* 12 */ 0xc7dd582bcc3e658cUL, 0xfd2c4748ee0e5528UL, -+ 0xa0fd9b95cc9f4f71UL, 0x7529d871b0675ddfUL, -+ /* 13 */ 0xb8f568b42d3cbd78UL, 0x1233011b91f3da82UL, -+ 0x2dce6ccd4a7c3b62UL, 0x75e7fc8e9e498603UL, -+ /* 14 */ 0x2f4f13f1fcd0b6ecUL, 0xf1a8ca1f29ff7a45UL, -+ 0xc249c1a72981e29bUL, 0x6ebe0dbb8c83b56aUL, -+ /* 15 */ 0x7114fa8d170bb222UL, 0x65a2dcd5bf93935fUL, -+ 0xbdc41f68b59c979aUL, 0x2f0eef79a2ce9289UL, -+ /* 16 */ 0x42ecbf0c083c37ceUL, 0x2930bc09ec496322UL, -+ 0xf294b0c19cfeac0dUL, 0x3780aa4bedfabb80UL, -+ /* 17 */ 0x56c17d3e7cead929UL, 0xe7cb4beb2e5722c5UL, -+ 0x0ce931732dbfe15aUL, 0x41b883c7621052f8UL, -+ /* 18 */ 0xdbf75ca0c3d25350UL, 0x2936be086eb1e351UL, -+ 0xc936e03cb4a9b212UL, 0x1d45bf82322225aaUL, -+ /* 19 */ 0xe81ab1036a024cc5UL, 0xe212201c304c9a72UL, -+ 0xc5d73fba6832b1fcUL, 0x20ffdb5a4d839581UL, -+ /* 20 */ 0xa283d367be5d0fadUL, 0x6c2b25ca8b164475UL, -+ 0x9d4935467caaf22eUL, 0x5166408eee85ff49UL, -+ /* 21 */ 0x3c67baa2fab4e361UL, 0xb3e433c67ef35cefUL, -+ 0x5259729241159b1cUL, 0x6a621892d5b0ab33UL, -+ /* 22 */ 0x20b74a387555cdcbUL, 0x532aa10e1208923fUL, -+ 0xeaa17b7762281dd1UL, 0x61ab3443f05c44bfUL, -+ /* 23 */ 0x257a6c422324def8UL, 0x131c6c1017e3cf7fUL, -+ 0x23758739f630a257UL, 0x295a407a01a78580UL, -+ /* 24 */ 0xf8c443246d5da8d9UL, 0x19d775450c52fa5dUL, -+ 0x2afcfc92731bf83dUL, 0x7d10c8e81b2b4700UL, -+ /* 25 */ 0xc8e0271f70baa20bUL, 0x993748867ca63957UL, -+ 0x5412efb3cb7ed4bbUL, 0x3196d36173e62975UL, -+ /* 26 */ 0xde5bcad141c7dffcUL, 0x47cc8cd2b395c848UL, -+ 0xa34cd942e11af3cbUL, 0x0256dbf2d04ecec2UL, -+ /* 27 */ 0x875ab7e94b0e667fUL, 0xcad4dd83c0850d10UL, -+ 0x47f12e8f4e72c79fUL, 0x5f1a87bb8c85b19bUL, -+ /* 28 */ 0x7ae9d0b6437f51b8UL, 0x12c7ce5518879065UL, -+ 0x2ade09fe5cf77aeeUL, 0x23a05a2f7d2c5627UL, -+ /* 29 */ 0x5908e128f17c169aUL, 0xf77498dd8ad0852dUL, -+ 0x74b4c4ceab102f64UL, 0x183abadd10139845UL, -+ /* 30 */ 0xb165ba8daa92aaacUL, 0xd5c5ef9599386705UL, -+ 0xbe2f8f0cf8fc40d1UL, 0x2701e635ee204514UL, -+ /* 31 */ 0x629fa80020156514UL, 0xf223868764a8c1ceUL, -+ 0x5b894fff0b3f060eUL, 0x60d9944cf708a3faUL, -+ /* 32 */ 0xaeea001a1c7a201fUL, 0xebf16a633ee2ce63UL, -+ 0x6f7709594c7a07e1UL, 0x79b958150d0208cbUL, -+ /* 33 */ 0x24b55e5301d410e7UL, 0xe3a34edff3fdc84dUL, -+ 0xd88768e4904032d8UL, 0x131384427b3aaeecUL, -+ /* 34 */ 0x8405e51286234f14UL, 0x14dc4739adb4c529UL, -+ 0xb8a2b5b250634ffdUL, 0x2fe2a94ad8a7ff93UL, -+ /* 35 */ 0xec5c57efe843faddUL, 0x2843ce40f0bb9918UL, -+ 0xa4b561d6cf3d6305UL, 0x743629bde8fb777eUL, -+ /* 36 */ 0x343edd46bbaf738fUL, 0xed981828b101a651UL, -+ 0xa401760b882c797aUL, 0x1fc223e28dc88730UL, -+ /* 37 */ 0x48604e91fc0fba0eUL, 0xb637f78f052c6fa4UL, -+ 0x91ccac3d09e9239cUL, 0x23f7eed4437a687cUL, -+ /* 38 */ 0x5173b1118d9bd800UL, 0x29d641b63189d4a7UL, -+ 0xfdbf177988bbc586UL, 0x2959894fcad81df5UL, -+ /* 39 */ 0xaebc8ef3b4bbc899UL, 0x4148995ab26992b9UL, -+ 0x24e20b0134f92cfbUL, 0x40d158894a05dee8UL, -+ /* 40 */ 0x46b00b1185af76f6UL, 0x26bac77873187a79UL, -+ 0x3dc0bf95ab8fff5fUL, 0x2a608bd8945524d7UL, -+ /* 41 */ 0x26449588bd446302UL, 0x7c4bc21c0388439cUL, -+ 0x8e98a4f383bd11b2UL, 0x26218d7bc9d876b9UL, -+ /* 42 */ 0xe3081542997c178aUL, 0x3c2d29a86fb6606fUL, -+ 0x5c217736fa279374UL, 0x7dde05734afeb1faUL, -+ /* 43 */ 0x3bf10e3906d42babUL, 0xe4f7803e1980649cUL, -+ 0xe6053bf89595bf7aUL, 0x394faf38da245530UL, -+ /* 44 */ 0x7a8efb58896928f4UL, 0xfbc778e9cc6a113cUL, -+ 0x72670ce330af596fUL, 0x48f222a81d3d6cf7UL, -+ /* 45 */ 0xf01fce410d72caa7UL, 0x5a20ecc7213b5595UL, -+ 0x7bc21165c1fa1483UL, 0x07f89ae31da8a741UL, -+ /* 46 */ 0x05d2c2b4c6830ff9UL, 0xd43e330fc6316293UL, -+ 0xa5a5590a96d3a904UL, 0x705edb91a65333b6UL, -+ /* 47 */ 0x048ee15e0bb9a5f7UL, 0x3240cfca9e0aaf5dUL, -+ 0x8f4b71ceedc4a40bUL, 0x621c0da3de544a6dUL, -+ /* 48 */ 0x92872836a08c4091UL, 0xce8375b010c91445UL, -+ 0x8a72eb524f276394UL, 0x2667fcfa7ec83635UL, -+ /* 49 */ 0x7f4c173345e8752aUL, 0x061b47feee7079a5UL, -+ 0x25dd9afa9f86ff34UL, 0x3780cef5425dc89cUL, -+ /* 50 */ 0x1a46035a513bb4e9UL, 0x3e1ef379ac575adaUL, -+ 0xc78c5f1c5fa24b50UL, 0x321a967634fd9f22UL, -+ /* 51 */ 0x946707b8826e27faUL, 0x3dca84d64c506fd0UL, -+ 0xc189218075e91436UL, 0x6d9284169b3b8484UL, -+ /* 52 */ 0x3a67e840383f2ddfUL, 0x33eec9a30c4f9b75UL, -+ 0x3ec7c86fa783ef47UL, 0x26ec449fbac9fbc4UL, -+ /* 53 */ 0x5c0f38cba09b9e7dUL, 0x81168cc762a3478cUL, -+ 0x3e23b0d306fc121cUL, 0x5a238aa0a5efdcddUL, -+ /* 54 */ 0x1ba26121c4ea43ffUL, 0x36f8c77f7c8832b5UL, -+ 0x88fbea0b0adcf99aUL, 0x5ca9938ec25bebf9UL, -+ /* 55 */ 0xd5436a5e51fccda0UL, 0x1dbc4797c2cd893bUL, -+ 0x19346a65d3224a08UL, 0x0f5034e49b9af466UL, -+ /* 56 */ 0xf23c3967a1e0b96eUL, 0xe58b08fa867a4d88UL, -+ 0xfb2fabc6a7341679UL, 0x2a75381eb6026946UL, -+ /* 57 */ 0xc80a3be4c19420acUL, 0x66b1f6c681f2b6dcUL, -+ 0x7cf7036761e93388UL, 0x25abbbd8a660a4c4UL, -+ /* 58 */ 0x91ea12ba14fd5198UL, 0x684950fc4a3cffa9UL, -+ 0xf826842130f5ad28UL, 0x3ea988f75301a441UL, -+ /* 59 */ 0xc978109a695f8c6fUL, 0x1746eb4a0530c3f3UL, -+ 0x444d6d77b4459995UL, 0x75952b8c054e5cc7UL, -+ /* 60 */ 0xa3703f7915f4d6aaUL, 0x66c346202f2647d8UL, -+ 0xd01469df811d644bUL, 0x77fea47d81a5d71fUL, -+ /* 61 */ 0xc5e9529ef57ca381UL, 0x6eeeb4b9ce2f881aUL, -+ 0xb6e91a28e8009bd6UL, 0x4b80be3e9afc3fecUL, -+ /* 62 */ 0x7e3773c526aed2c5UL, 0x1b4afcb453c9a49dUL, -+ 0xa920bdd7baffb24dUL, 0x7c54699f122d400eUL, -+ /* 63 */ 0xef46c8e14fa94bc8UL, 0xe0b074ce2952ed5eUL, -+ 0xbea450e1dbd885d5UL, 0x61b68649320f712cUL, -+ /* 64 */ 0x8a485f7309ccbdd1UL, 0xbd06320d7d4d1a2dUL, -+ 0x25232973322dbef4UL, 0x445dc4758c17f770UL, -+ /* 65 */ 0xdb0434177cc8933cUL, 0xed6fe82175ea059fUL, -+ 0x1efebefdc053db34UL, 0x4adbe867c65daf99UL, -+ /* 66 */ 0x3acd71a2a90609dfUL, 0xe5e991856dd04050UL, -+ 0x1ec69b688157c23cUL, 0x697427f6885cfe4dUL, -+ /* 67 */ 0xd7be7b9b65e1a851UL, 0xa03d28d522c536ddUL, -+ 0x28399d658fd2b645UL, 0x49e5b7e17c2641e1UL, -+ /* 68 */ 0x6f8c3a98700457a4UL, 0x5078f0a25ebb6778UL, -+ 0xd13c3ccbc382960fUL, 0x2e003258a7df84b1UL, -+ /* 69 */ 0x8ad1f39be6296a1cUL, 0xc1eeaa652a5fbfb2UL, -+ 0x33ee0673fd26f3cbUL, 0x59256173a69d2cccUL, -+ /* 70 */ 0x41ea07aa4e18fc41UL, 0xd9fc19527c87a51eUL, -+ 0xbdaacb805831ca6fUL, 0x445b652dc916694fUL, -+ /* 71 */ 0xce92a3a7f2172315UL, 0x1edc282de11b9964UL, -+ 0xa1823aafe04c314aUL, 0x790a2d94437cf586UL, -+ /* 72 */ 0x71c447fb93f6e009UL, 0x8922a56722845276UL, -+ 0xbf70903b204f5169UL, 0x2f7a89891ba319feUL, -+ /* 73 */ 0x02a08eb577e2140cUL, 0xed9a4ed4427bdcf4UL, -+ 0x5253ec44e4323cd1UL, 0x3e88363c14e9355bUL, -+ /* 74 */ 0xaa66c14277110b8cUL, 0x1ae0391610a23390UL, -+ 0x2030bd12c93fc2a2UL, 0x3ee141579555c7abUL, -+ /* 75 */ 0x9214de3a6d6e7d41UL, 0x3ccdd88607f17efeUL, -+ 0x674f1288f8e11217UL, 0x5682250f329f93d0UL, -+ /* 76 */ 0x6cf00b136d2e396eUL, 0x6e4cf86f1014debfUL, -+ 0x5930b1b5bfcc4e83UL, 0x047069b48aba16b6UL, -+ /* 77 */ 0x0d4ce4ab69b20793UL, 0xb24db91a97d0fb9eUL, -+ 0xcdfa50f54e00d01dUL, 0x221b1085368bddb5UL, -+ /* 78 */ 0xe7e59468b1e3d8d2UL, 0x53c56563bd122f93UL, -+ 0xeee8a903e0663f09UL, 0x61efa662cbbe3d42UL, -+ /* 79 */ 0x2cf8ddddde6eab2aUL, 0x9bf80ad51435f231UL, -+ 0x5deadacec9f04973UL, 0x29275b5d41d29b27UL, -+ /* 80 */ 0xcfde0f0895ebf14fUL, 0xb9aab96b054905a7UL, -+ 0xcae80dd9a1c420fdUL, 0x0a63bf2f1673bbc7UL, -+ /* 81 */ 0x092f6e11958fbc8cUL, 0x672a81e804822fadUL, -+ 0xcac8351560d52517UL, 0x6f3f7722c8f192f8UL, -+ /* 82 */ 0xf8ba90ccc2e894b7UL, 0x2c7557a438ff9f0dUL, -+ 0x894d1d855ae52359UL, 0x68e122157b743d69UL, -+ /* 83 */ 0xd87e5570cfb919f3UL, 0x3f2cdecd95798db9UL, -+ 0x2121154710c0a2ceUL, 0x3c66a115246dc5b2UL, -+ /* 84 */ 0xcbedc562294ecb72UL, 0xba7143c36a280b16UL, -+ 0x9610c2efd4078b67UL, 0x6144735d946a4b1eUL, -+ /* 85 */ 0x536f111ed75b3350UL, 0x0211db8c2041d81bUL, -+ 0xf93cb1000e10413cUL, 0x149dfd3c039e8876UL, -+ /* 86 */ 0xd479dde46b63155bUL, 0xb66e15e93c837976UL, -+ 0xdafde43b1f13e038UL, 0x5fafda1a2e4b0b35UL, -+ /* 87 */ 0x3600bbdf17197581UL, 0x3972050bbe3cd2c2UL, -+ 0x5938906dbdd5be86UL, 0x34fce5e43f9b860fUL, -+ /* 88 */ 0x75a8a4cd42d14d02UL, 0x828dabc53441df65UL, -+ 0x33dcabedd2e131d3UL, 0x3ebad76fb814d25fUL, -+ /* 89 */ 0xd4906f566f70e10fUL, 0x5d12f7aa51690f5aUL, -+ 0x45adb16e76cefcf2UL, 0x01f768aead232999UL, -+ /* 90 */ 0x2b6cc77b6248febdUL, 0x3cd30628ec3aaffdUL, -+ 0xce1c0b80d4ef486aUL, 0x4c3bff2ea6f66c23UL, -+ /* 91 */ 0x3f2ec4094aeaeb5fUL, 0x61b19b286e372ca7UL, -+ 0x5eefa966de2a701dUL, 0x23b20565de55e3efUL, -+ /* 92 */ 0xe301ca5279d58557UL, 0x07b2d4ce27c2874fUL, -+ 0xa532cd8a9dcf1d67UL, 0x2a52fee23f2bff56UL, -+ /* 93 */ 0x8624efb37cd8663dUL, 0xbbc7ac20ffbd7594UL, -+ 0x57b85e9c82d37445UL, 0x7b3052cb86a6ec66UL, -+ /* 94 */ 0x3482f0ad2525e91eUL, 0x2cb68043d28edca0UL, -+ 0xaf4f6d052e1b003aUL, 0x185f8c2529781b0aUL, -+ /* 95 */ 0xaa41de5bd80ce0d6UL, 0x9407b2416853e9d6UL, -+ 0x563ec36e357f4c3aUL, 0x4cc4b8dd0e297bceUL, -+ /* 96 */ 0xa2fc1a52ffb8730eUL, 0x1811f16e67058e37UL, -+ 0x10f9a366cddf4ee1UL, 0x72f4a0c4a0b9f099UL, -+ /* 97 */ 0x8c16c06f663f4ea7UL, 0x693b3af74e970fbaUL, -+ 0x2102e7f1d69ec345UL, 0x0ba53cbc968a8089UL, -+ /* 98 */ 0xca3d9dc7fea15537UL, 0x4c6824bb51536493UL, -+ 0xb9886314844006b1UL, 0x40d2a72ab454cc60UL, -+ /* 99 */ 0x5936a1b712570975UL, 0x91b9d648debda657UL, -+ 0x3344094bb64330eaUL, 0x006ba10d12ee51d0UL, -+ /* 100 */ 0x19228468f5de5d58UL, 0x0eb12f4c38cc05b0UL, -+ 0xa1039f9dd5601990UL, 0x4502d4ce4fff0e0bUL, -+ /* 101 */ 0xeb2054106837c189UL, 0xd0f6544c6dd3b93cUL, -+ 0x40727064c416d74fUL, 0x6e15c6114b502ef0UL, -+ /* 102 */ 0x4df2a398cfb1a76bUL, 0x11256c7419f2f6b1UL, -+ 0x4a497962066e6043UL, 0x705b3aab41355b44UL, -+ /* 103 */ 0x365ef536d797b1d8UL, 0x00076bd622ddf0dbUL, -+ 0x3bbf33b0e0575a88UL, 0x3777aa05c8e4ca4dUL, -+ /* 104 */ 0x392745c85578db5fUL, 0x6fda4149dbae5ae2UL, -+ 0xb1f0b00b8adc9867UL, 0x09963437d36f1da3UL, -+ /* 105 */ 0x7e824e90a5dc3853UL, 0xccb5f6641f135cbdUL, -+ 0x6736d86c87ce8fccUL, 0x625f3ce26604249fUL, -+ /* 106 */ 0xaf8ac8059502f63fUL, 0x0c05e70a2e351469UL, -+ 0x35292e9c764b6305UL, 0x1a394360c7e23ac3UL, -+ /* 107 */ 0xd5c6d53251183264UL, 0x62065abd43c2b74fUL, -+ 0xb5fbf5d03b973f9bUL, 0x13a3da3661206e5eUL, -+ /* 108 */ 0xc6bd5837725d94e5UL, 0x18e30912205016c5UL, -+ 0x2088ce1570033c68UL, 0x7fba1f495c837987UL, -+ /* 109 */ 0x5a8c7423f2f9079dUL, 0x1735157b34023fc5UL, -+ 0xe4f9b49ad2fab351UL, 0x6691ff72c878e33cUL, -+ /* 110 */ 0x122c2adedc5eff3eUL, 0xf8dd4bf1d8956cf4UL, -+ 0xeb86205d9e9e5bdaUL, 0x049b92b9d975c743UL, -+ /* 111 */ 0xa5379730b0f6c05aUL, 0x72a0ffacc6f3a553UL, -+ 0xb0032c34b20dcd6dUL, 0x470e9dbc88d5164aUL, -+ /* 112 */ 0xb19cf10ca237c047UL, 0xb65466711f6c81a2UL, -+ 0xb3321bd16dd80b43UL, 0x48c14f600c5fbe8eUL, -+ /* 113 */ 0x66451c264aa6c803UL, 0xb66e3904a4fa7da6UL, -+ 0xd45f19b0b3128395UL, 0x31602627c3c9bc10UL, -+ /* 114 */ 0x3120dc4832e4e10dUL, 0xeb20c46756c717f7UL, -+ 0x00f52e3f67280294UL, 0x566d4fc14730c509UL, -+ /* 115 */ 0x7e3a5d40fd837206UL, 0xc1e926dc7159547aUL, -+ 0x216730fba68d6095UL, 0x22e8c3843f69cea7UL, -+ /* 116 */ 0x33d074e8930e4b2bUL, 0xb6e4350e84d15816UL, -+ 0x5534c26ad6ba2365UL, 0x7773c12f89f1f3f3UL, -+ /* 117 */ 0x8cba404da57962aaUL, 0x5b9897a81999ce56UL, -+ 0x508e862f121692fcUL, 0x3a81907fa093c291UL, -+ /* 118 */ 0x0dded0ff4725a510UL, 0x10d8cc10673fc503UL, -+ 0x5b9d151c9f1f4e89UL, 0x32a5c1d5cb09a44cUL, -+ /* 119 */ 0x1e0aa442b90541fbUL, 0x5f85eb7cc1b485dbUL, -+ 0xbee595ce8a9df2e5UL, 0x25e496c722422236UL, -+ /* 120 */ 0x5edf3c46cd0fe5b9UL, 0x34e75a7ed2a43388UL, -+ 0xe488de11d761e352UL, 0x0e878a01a085545cUL, -+ /* 121 */ 0xba493c77e021bb04UL, 0x2b4d1843c7df899aUL, -+ 0x9ea37a487ae80d67UL, 0x67a9958011e41794UL, -+ /* 122 */ 0x4b58051a6697b065UL, 0x47e33f7d8d6ba6d4UL, -+ 0xbb4da8d483ca46c1UL, 0x68becaa181c2db0dUL, -+ /* 123 */ 0x8d8980e90b989aa5UL, 0xf95eb14a2c93c99bUL, -+ 0x51c6c7c4796e73a2UL, 0x6e228363b5efb569UL, -+ /* 124 */ 0xc6bbc0b02dd624c8UL, 0x777eb47dec8170eeUL, -+ 0x3cde15a004cfafa9UL, 0x1dc6bc087160bf9bUL, -+ /* 125 */ 0x2e07e043eec34002UL, 0x18e9fc677a68dc7fUL, -+ 0xd8da03188bd15b9aUL, 0x48fbc3bb00568253UL, -+ /* 126 */ 0x57547d4cfb654ce1UL, 0xd3565b82a058e2adUL, -+ 0xf63eaf0bbf154478UL, 0x47531ef114dfbb18UL, -+ /* 127 */ 0xe1ec630a4278c587UL, 0x5507d546ca8e83f3UL, -+ 0x85e135c63adc0c2bUL, 0x0aa7efa85682844eUL, -+ /* 128 */ 0x72691ba8b3e1f615UL, 0x32b4e9701fbe3ffaUL, -+ 0x97b6d92e39bb7868UL, 0x2cfe53dea02e39e8UL, -+ /* 129 */ 0x687392cd85cd52b0UL, 0x27ff66c910e29831UL, -+ 0x97134556a9832d06UL, 0x269bb0360a84f8a0UL, -+ /* 130 */ 0x706e55457643f85cUL, 0x3734a48c9b597d1bUL, -+ 0x7aee91e8c6efa472UL, 0x5cd6abc198a9d9e0UL, -+ /* 131 */ 0x0e04de06cb3ce41aUL, 0xd8c6eb893402e138UL, -+ 0x904659bb686e3772UL, 0x7215c371746ba8c8UL, -+ /* 132 */ 0xfd12a97eeae4a2d9UL, 0x9514b7516394f2c5UL, -+ 0x266fd5809208f294UL, 0x5c847085619a26b9UL, -+ /* 133 */ 0x52985410fed694eaUL, 0x3c905b934a2ed254UL, -+ 0x10bb47692d3be467UL, 0x063b3d2d69e5e9e1UL, -+ /* 134 */ 0x472726eedda57debUL, 0xefb6c4ae10f41891UL, -+ 0x2b1641917b307614UL, 0x117c554fc4f45b7cUL, -+ /* 135 */ 0xc07cf3118f9d8812UL, 0x01dbd82050017939UL, -+ 0xd7e803f4171b2827UL, 0x1015e87487d225eaUL, -+ /* 136 */ 0xc58de3fed23acc4dUL, 0x50db91c294a7be2dUL, -+ 0x0b94d43d1c9cf457UL, 0x6b1640fa6e37524aUL, -+ /* 137 */ 0x692f346c5fda0d09UL, 0x200b1c59fa4d3151UL, -+ 0xb8c46f760777a296UL, 0x4b38395f3ffdfbcfUL, -+ /* 138 */ 0x18d25e00be54d671UL, 0x60d50582bec8aba6UL, -+ 0x87ad8f263b78b982UL, 0x50fdf64e9cda0432UL, -+ /* 139 */ 0x90f567aac578dcf0UL, 0xef1e9b0ef2a3133bUL, -+ 0x0eebba9242d9de71UL, 0x15473c9bf03101c7UL, -+ /* 140 */ 0x7c77e8ae56b78095UL, 0xb678e7666e6f078eUL, -+ 0x2da0b9615348ba1fUL, 0x7cf931c1ff733f0bUL, -+ /* 141 */ 0x26b357f50a0a366cUL, 0xe9708cf42b87d732UL, -+ 0xc13aeea5f91cb2c0UL, 0x35d90c991143bb4cUL, -+ /* 142 */ 0x47c1c404a9a0d9dcUL, 0x659e58451972d251UL, -+ 0x3875a8c473b38c31UL, 0x1fbd9ed379561f24UL, -+ /* 143 */ 0x11fabc6fd41ec28dUL, 0x7ef8dfe3cd2a2dcaUL, -+ 0x72e73b5d8c404595UL, 0x6135fa4954b72f27UL, -+ /* 144 */ 0xccfc32a2de24b69cUL, 0x3f55698c1f095d88UL, -+ 0xbe3350ed5ac3f929UL, 0x5e9bf806ca477eebUL, -+ /* 145 */ 0xe9ce8fb63c309f68UL, 0x5376f63565e1f9f4UL, -+ 0xd1afcfb35a6393f1UL, 0x6632a1ede5623506UL, -+ /* 146 */ 0x0b7d6c390c2ded4cUL, 0x56cb3281df04cb1fUL, -+ 0x66305a1249ecc3c7UL, 0x5d588b60a38ca72aUL, -+ /* 147 */ 0xa6ecbf78e8e5f42dUL, 0x86eeb44b3c8a3eecUL, -+ 0xec219c48fbd21604UL, 0x1aaf1af517c36731UL, -+ /* 148 */ 0xc306a2836769bde7UL, 0x208280622b1e2adbUL, -+ 0x8027f51ffbff94a6UL, 0x76cfa1ce1124f26bUL, -+ /* 149 */ 0x18eb00562422abb6UL, 0xf377c4d58f8c29c3UL, -+ 0x4dbbc207f531561aUL, 0x0253b7f082128a27UL, -+ /* 150 */ 0x3d1f091cb62c17e0UL, 0x4860e1abd64628a9UL, -+ 0x52d17436309d4253UL, 0x356f97e13efae576UL, -+ /* 151 */ 0xd351e11aa150535bUL, 0x3e6b45bb1dd878ccUL, -+ 0x0c776128bed92c98UL, 0x1d34ae93032885b8UL, -+ /* 152 */ 0x4ba0488ca85ba4c3UL, 0x985348c33c9ce6ceUL, -+ 0x66124c6f97bda770UL, 0x0f81a0290654124aUL, -+ /* 153 */ 0x9ed09ca6569b86fdUL, 0x811009fd18af9a2dUL, -+ 0xff08d03f93d8c20aUL, 0x52a148199faef26bUL, -+ /* 154 */ 0x3e03f9dc2d8d1b73UL, 0x4205801873961a70UL, -+ 0xc0d987f041a35970UL, 0x07aa1f15a1c0d549UL, -+ /* 155 */ 0xdfd46ce08cd27224UL, 0x6d0a024f934e4239UL, -+ 0x808a7a6399897b59UL, 0x0a4556e9e13d95a2UL, -+ /* 156 */ 0xd21a991fe9c13045UL, 0x9b0e8548fe7751b8UL, -+ 0x5da643cb4bf30035UL, 0x77db28d63940f721UL, -+ /* 157 */ 0xfc5eeb614adc9011UL, 0x5229419ae8c411ebUL, -+ 0x9ec3e7787d1dcf74UL, 0x340d053e216e4cb5UL, -+ /* 158 */ 0xcac7af39b48df2b4UL, 0xc0faec2871a10a94UL, -+ 0x140a69245ca575edUL, 0x0cf1c37134273a4cUL, -+ /* 159 */ 0xc8ee306ac224b8a5UL, 0x57eaee7ccb4930b0UL, -+ 0xa1e806bdaacbe74fUL, 0x7d9a62742eeb657dUL, -+ /* 160 */ 0x9eb6b6ef546c4830UL, 0x885cca1fddb36e2eUL, -+ 0xe6b9f383ef0d7105UL, 0x58654fef9d2e0412UL, -+ /* 161 */ 0xa905c4ffbe0e8e26UL, 0x942de5df9b31816eUL, -+ 0x497d723f802e88e1UL, 0x30684dea602f408dUL, -+ /* 162 */ 0x21e5a278a3e6cb34UL, 0xaefb6e6f5b151dc4UL, -+ 0xb30b8e049d77ca15UL, 0x28c3c9cf53b98981UL, -+ /* 163 */ 0x287fb721556cdd2aUL, 0x0d317ca897022274UL, -+ 0x7468c7423a543258UL, 0x4a7f11464eb5642fUL, -+ /* 164 */ 0xa237a4774d193aa6UL, 0xd865986ea92129a1UL, -+ 0x24c515ecf87c1a88UL, 0x604003575f39f5ebUL, -+ /* 165 */ 0x47b9f189570a9b27UL, 0x2b98cede465e4b78UL, -+ 0x026df551dbb85c20UL, 0x74fcd91047e21901UL, -+ /* 166 */ 0x13e2a90a23c1bfa3UL, 0x0cb0074e478519f6UL, -+ 0x5ff1cbbe3af6cf44UL, 0x67fe5438be812dbeUL, -+ /* 167 */ 0xd13cf64fa40f05b0UL, 0x054dfb2f32283787UL, -+ 0x4173915b7f0d2aeaUL, 0x482f144f1f610d4eUL, -+ /* 168 */ 0xf6210201b47f8234UL, 0x5d0ae1929e70b990UL, -+ 0xdcd7f455b049567cUL, 0x7e93d0f1f0916f01UL, -+ /* 169 */ 0xdd79cbf18a7db4faUL, 0xbe8391bf6f74c62fUL, -+ 0x027145d14b8291bdUL, 0x585a73ea2cbf1705UL, -+ /* 170 */ 0x485ca03e928a0db2UL, 0x10fc01a5742857e7UL, -+ 0x2f482edbd6d551a7UL, 0x0f0433b5048fdb8aUL, -+ /* 171 */ 0x60da2e8dd7dc6247UL, 0x88b4c9d38cd4819aUL, -+ 0x13033ac001f66697UL, 0x273b24fe3b367d75UL, -+ /* 172 */ 0xc6e8f66a31b3b9d4UL, 0x281514a494df49d5UL, -+ 0xd1726fdfc8b23da7UL, 0x4b3ae7d103dee548UL, -+ /* 173 */ 0xc6256e19ce4b9d7eUL, 0xff5c5cf186e3c61cUL, -+ 0xacc63ca34b8ec145UL, 0x74621888fee66574UL, -+ /* 174 */ 0x956f409645290a1eUL, 0xef0bf8e3263a962eUL, -+ 0xed6a50eb5ec2647bUL, 0x0694283a9dca7502UL, -+ /* 175 */ 0x769b963643a2dcd1UL, 0x42b7c8ea09fc5353UL, -+ 0x4f002aee13397eabUL, 0x63005e2c19b7d63aUL, -+ /* 176 */ 0xca6736da63023beaUL, 0x966c7f6db12a99b7UL, -+ 0xace09390c537c5e1UL, 0x0b696063a1aa89eeUL, -+ /* 177 */ 0xebb03e97288c56e5UL, 0x432a9f9f938c8be8UL, -+ 0xa6a5a93d5b717f71UL, 0x1a5fb4c3e18f9d97UL, -+ /* 178 */ 0x1c94e7ad1c60cdceUL, 0xee202a43fc02c4a0UL, -+ 0x8dafe4d867c46a20UL, 0x0a10263c8ac27b58UL, -+ /* 179 */ 0xd0dea9dfe4432a4aUL, 0x856af87bbe9277c5UL, -+ 0xce8472acc212c71aUL, 0x6f151b6d9bbb1e91UL, -+ /* 180 */ 0x26776c527ceed56aUL, 0x7d211cb7fbf8faecUL, -+ 0x37ae66a6fd4609ccUL, 0x1f81b702d2770c42UL, -+ /* 181 */ 0x2fb0b057eac58392UL, 0xe1dd89fe29744e9dUL, -+ 0xc964f8eb17beb4f8UL, 0x29571073c9a2d41eUL, -+ /* 182 */ 0xa948a18981c0e254UL, 0x2df6369b65b22830UL, -+ 0xa33eb2d75fcfd3c6UL, 0x078cd6ec4199a01fUL, -+ /* 183 */ 0x4a584a41ad900d2fUL, 0x32142b78e2c74c52UL, -+ 0x68c4e8338431c978UL, 0x7f69ea9008689fc2UL, -+ /* 184 */ 0x52f2c81e46a38265UL, 0xfd78072d04a832fdUL, -+ 0x8cd7d5fa25359e94UL, 0x4de71b7454cc29d2UL, -+ /* 185 */ 0x42eb60ad1eda6ac9UL, 0x0aad37dfdbc09c3aUL, -+ 0x81004b71e33cc191UL, 0x44e6be345122803cUL, -+ /* 186 */ 0x03fe8388ba1920dbUL, 0xf5d57c32150db008UL, -+ 0x49c8c4281af60c29UL, 0x21edb518de701aeeUL, -+ /* 187 */ 0x7fb63e418f06dc99UL, 0xa4460d99c166d7b8UL, -+ 0x24dd5248ce520a83UL, 0x5ec3ad712b928358UL, -+ /* 188 */ 0x15022a5fbd17930fUL, 0xa4f64a77d82570e3UL, -+ 0x12bc8d6915783712UL, 0x498194c0fc620abbUL, -+ /* 189 */ 0x38a2d9d255686c82UL, 0x785c6bd9193e21f0UL, -+ 0xe4d5c81ab24a5484UL, 0x56307860b2e20989UL, -+ /* 190 */ 0x429d55f78b4d74c4UL, 0x22f1834643350131UL, -+ 0x1e60c24598c71fffUL, 0x59f2f014979983efUL, -+ /* 191 */ 0x46a47d56eb494a44UL, 0x3e22a854d636a18eUL, -+ 0xb346e15274491c3bUL, 0x2ceafd4e5390cde7UL, -+ /* 192 */ 0xba8a8538be0d6675UL, 0x4b9074bb50818e23UL, -+ 0xcbdab89085d304c3UL, 0x61a24fe0e56192c4UL, -+ /* 193 */ 0xcb7615e6db525bcbUL, 0xdd7d8c35a567e4caUL, -+ 0xe6b4153acafcdd69UL, 0x2d668e097f3c9766UL, -+ /* 194 */ 0xa57e7e265ce55ef0UL, 0x5d9f4e527cd4b967UL, -+ 0xfbc83606492fd1e5UL, 0x090d52beb7c3f7aeUL, -+ /* 195 */ 0x09b9515a1e7b4d7cUL, 0x1f266a2599da44c0UL, -+ 0xa1c49548e2c55504UL, 0x7ef04287126f15ccUL, -+ /* 196 */ 0xfed1659dbd30ef15UL, 0x8b4ab9eec4e0277bUL, -+ 0x884d6236a5df3291UL, 0x1fd96ea6bf5cf788UL, -+ /* 197 */ 0x42a161981f190d9aUL, 0x61d849507e6052c1UL, -+ 0x9fe113bf285a2cd5UL, 0x7c22d676dbad85d8UL, -+ /* 198 */ 0x82e770ed2bfbd27dUL, 0x4c05b2ece996f5a5UL, -+ 0xcd40a9c2b0900150UL, 0x5895319213d9bf64UL, -+ /* 199 */ 0xe7cc5d703fea2e08UL, 0xb50c491258e2188cUL, -+ 0xcce30baa48205bf0UL, 0x537c659ccfa32d62UL, -+ /* 200 */ 0x37b6623a98cfc088UL, 0xfe9bed1fa4d6aca4UL, -+ 0x04d29b8e56a8d1b0UL, 0x725f71c40b519575UL, -+ /* 201 */ 0x28c7f89cd0339ce6UL, 0x8367b14469ddc18bUL, -+ 0x883ada83a6a1652cUL, 0x585f1974034d6c17UL, -+ /* 202 */ 0x89cfb266f1b19188UL, 0xe63b4863e7c35217UL, -+ 0xd88c9da6b4c0526aUL, 0x3e035c9df0954635UL, -+ /* 203 */ 0xdd9d5412fb45de9dUL, 0xdd684532e4cff40dUL, -+ 0x4b5c999b151d671cUL, 0x2d8c2cc811e7f690UL, -+ /* 204 */ 0x7f54be1d90055d40UL, 0xa464c5df464aaf40UL, -+ 0x33979624f0e917beUL, 0x2c018dc527356b30UL, -+ /* 205 */ 0xa5415024e330b3d4UL, 0x73ff3d96691652d3UL, -+ 0x94ec42c4ef9b59f1UL, 0x0747201618d08e5aUL, -+ /* 206 */ 0x4d6ca48aca411c53UL, 0x66415f2fcfa66119UL, -+ 0x9c4dd40051e227ffUL, 0x59810bc09a02f7ebUL, -+ /* 207 */ 0x2a7eb171b3dc101dUL, 0x441c5ab99ffef68eUL, -+ 0x32025c9b93b359eaUL, 0x5e8ce0a71e9d112fUL, -+ /* 208 */ 0xbfcccb92429503fdUL, 0xd271ba752f095d55UL, -+ 0x345ead5e972d091eUL, 0x18c8df11a83103baUL, -+ /* 209 */ 0x90cd949a9aed0f4cUL, 0xc5d1f4cb6660e37eUL, -+ 0xb8cac52d56c52e0bUL, 0x6e42e400c5808e0dUL, -+ /* 210 */ 0xa3b46966eeaefd23UL, 0x0c4f1f0be39ecdcaUL, -+ 0x189dc8c9d683a51dUL, 0x51f27f054c09351bUL, -+ /* 211 */ 0x4c487ccd2a320682UL, 0x587ea95bb3df1c96UL, -+ 0xc8ccf79e555cb8e8UL, 0x547dc829a206d73dUL, -+ /* 212 */ 0xb822a6cd80c39b06UL, 0xe96d54732000d4c6UL, -+ 0x28535b6f91463b4dUL, 0x228f4660e2486e1dUL, -+ /* 213 */ 0x98799538de8d3abfUL, 0x8cd8330045ebca6eUL, -+ 0x79952a008221e738UL, 0x4322e1a7535cd2bbUL, -+ /* 214 */ 0xb114c11819d1801cUL, 0x2016e4d84f3f5ec7UL, -+ 0xdd0e2df409260f4cUL, 0x5ec362c0ae5f7266UL, -+ /* 215 */ 0xc0462b18b8b2b4eeUL, 0x7cc8d950274d1afbUL, -+ 0xf25f7105436b02d2UL, 0x43bbf8dcbff9ccd3UL, -+ /* 216 */ 0xb6ad1767a039e9dfUL, 0xb0714da8f69d3583UL, -+ 0x5e55fa18b42931f5UL, 0x4ed5558f33c60961UL, -+ /* 217 */ 0x1fe37901c647a5ddUL, 0x593ddf1f8081d357UL, -+ 0x0249a4fd813fd7a6UL, 0x69acca274e9caf61UL, -+ /* 218 */ 0x047ba3ea330721c9UL, 0x83423fc20e7e1ea0UL, -+ 0x1df4c0af01314a60UL, 0x09a62dab89289527UL, -+ /* 219 */ 0xa5b325a49cc6cb00UL, 0xe94b5dc654b56cb6UL, -+ 0x3be28779adc994a0UL, 0x4296e8f8ba3a4aadUL, -+ /* 220 */ 0x328689761e451eabUL, 0x2e4d598bff59594aUL, -+ 0x49b96853d7a7084aUL, 0x4980a319601420a8UL, -+ /* 221 */ 0x9565b9e12f552c42UL, 0x8a5318db7100fe96UL, -+ 0x05c90b4d43add0d7UL, 0x538b4cd66a5d4edaUL, -+ /* 222 */ 0xf4e94fc3e89f039fUL, 0x592c9af26f618045UL, -+ 0x08a36eb5fd4b9550UL, 0x25fffaf6c2ed1419UL, -+ /* 223 */ 0x34434459cc79d354UL, 0xeeecbfb4b1d5476bUL, -+ 0xddeb34a061615d99UL, 0x5129cecceb64b773UL, -+ /* 224 */ 0xee43215894993520UL, 0x772f9c7cf14c0b3bUL, -+ 0xd2e2fce306bedad5UL, 0x715f42b546f06a97UL, -+ /* 225 */ 0x434ecdceda5b5f1aUL, 0x0da17115a49741a9UL, -+ 0x680bd77c73edad2eUL, 0x487c02354edd9041UL, -+ /* 226 */ 0xb8efeff3a70ed9c4UL, 0x56a32aa3e857e302UL, -+ 0xdf3a68bd48a2a5a0UL, 0x07f650b73176c444UL, -+ /* 227 */ 0xe38b9b1626e0ccb1UL, 0x79e053c18b09fb36UL, -+ 0x56d90319c9f94964UL, 0x1ca941e7ac9ff5c4UL, -+ /* 228 */ 0x49c4df29162fa0bbUL, 0x8488cf3282b33305UL, -+ 0x95dfda14cabb437dUL, 0x3391f78264d5ad86UL, -+ /* 229 */ 0x729ae06ae2b5095dUL, 0xd58a58d73259a946UL, -+ 0xe9834262d13921edUL, 0x27fedafaa54bb592UL, -+ /* 230 */ 0xa99dc5b829ad48bbUL, 0x5f025742499ee260UL, -+ 0x802c8ecd5d7513fdUL, 0x78ceb3ef3f6dd938UL, -+ /* 231 */ 0xc342f44f8a135d94UL, 0x7b9edb44828cdda3UL, -+ 0x9436d11a0537cfe7UL, 0x5064b164ec1ab4c8UL, -+ /* 232 */ 0x7020eccfd37eb2fcUL, 0x1f31ea3ed90d25fcUL, -+ 0x1b930d7bdfa1bb34UL, 0x5344467a48113044UL, -+ /* 233 */ 0x70073170f25e6dfbUL, 0xe385dc1a50114cc8UL, -+ 0x2348698ac8fc4f00UL, 0x2a77a55284dd40d8UL, -+ /* 234 */ 0xfe06afe0c98c6ce4UL, 0xc235df96dddfd6e4UL, -+ 0x1428d01e33bf1ed3UL, 0x785768ec9300bdafUL, -+ /* 235 */ 0x9702e57a91deb63bUL, 0x61bdb8bfe5ce8b80UL, -+ 0x645b426f3d1d58acUL, 0x4804a82227a557bcUL, -+ /* 236 */ 0x8e57048ab44d2601UL, 0x68d6501a4b3a6935UL, -+ 0xc39c9ec3f9e1c293UL, 0x4172f257d4de63e2UL, -+ /* 237 */ 0xd368b450330c6401UL, 0x040d3017418f2391UL, -+ 0x2c34bb6090b7d90dUL, 0x16f649228fdfd51fUL, -+ /* 238 */ 0xbea6818e2b928ef5UL, 0xe28ccf91cdc11e72UL, -+ 0x594aaa68e77a36cdUL, 0x313034806c7ffd0fUL, -+ /* 239 */ 0x8a9d27ac2249bd65UL, 0x19a3b464018e9512UL, -+ 0xc26ccff352b37ec7UL, 0x056f68341d797b21UL, -+ /* 240 */ 0x5e79d6757efd2327UL, 0xfabdbcb6553afe15UL, -+ 0xd3e7222c6eaf5a60UL, 0x7046c76d4dae743bUL, -+ /* 241 */ 0x660be872b18d4a55UL, 0x19992518574e1496UL, -+ 0xc103053a302bdcbbUL, 0x3ed8e9800b218e8eUL, -+ /* 242 */ 0x7b0b9239fa75e03eUL, 0xefe9fb684633c083UL, -+ 0x98a35fbe391a7793UL, 0x6065510fe2d0fe34UL, -+ /* 243 */ 0x55cb668548abad0cUL, 0xb4584548da87e527UL, -+ 0x2c43ecea0107c1ddUL, 0x526028809372de35UL, -+ /* 244 */ 0x3415c56af9213b1fUL, 0x5bee1a4d017e98dbUL, -+ 0x13f6b105b5cf709bUL, 0x5ff20e3482b29ab6UL, -+ /* 245 */ 0x0aa29c75cc2e6c90UL, 0xfc7d73ca3a70e206UL, -+ 0x899fc38fc4b5c515UL, 0x250386b124ffc207UL, -+ /* 246 */ 0x54ea28d5ae3d2b56UL, 0x9913149dd6de60ceUL, -+ 0x16694fc58f06d6c1UL, 0x46b23975eb018fc7UL, -+ /* 247 */ 0x470a6a0fb4b7b4e2UL, 0x5d92475a8f7253deUL, -+ 0xabeee5b52fbd3adbUL, 0x7fa20801a0806968UL, -+ /* 248 */ 0x76f3faf19f7714d2UL, 0xb3e840c12f4660c3UL, -+ 0x0fb4cd8df212744eUL, 0x4b065a251d3a2dd2UL, -+ /* 249 */ 0x5cebde383d77cd4aUL, 0x6adf39df882c9cb1UL, -+ 0xa2dd242eb09af759UL, 0x3147c0e50e5f6422UL, -+ /* 250 */ 0x164ca5101d1350dbUL, 0xf8d13479c33fc962UL, -+ 0xe640ce4d13e5da08UL, 0x4bdee0c45061f8baUL, -+ /* 251 */ 0xd7c46dc1a4edb1c9UL, 0x5514d7b6437fd98aUL, -+ 0x58942f6bb2a1c00bUL, 0x2dffb2ab1d70710eUL, -+ /* 252 */ 0xccdfcf2fc18b6d68UL, 0xa8ebcba8b7806167UL, -+ 0x980697f95e2937e3UL, 0x02fbba1cd0126e8cUL -+}; -+ -+/* c is two 512-bit products: c0[0:7]=a0[0:3]*b0[0:3] and c1[8:15]=a1[4:7]*b1[4:7] -+ * a is two 256-bit integers: a0[0:3] and a1[4:7] -+ * b is two 256-bit integers: b0[0:3] and b1[4:7] -+ */ -+static void mul2_256x256_integer_adx(u64 *const c, const u64 *const a, -+ const u64 *const b) -+{ -+ asm volatile( -+ "xorl %%r14d, %%r14d ;" -+ "movq (%1), %%rdx; " /* A[0] */ -+ "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */ -+ "xorl %%r10d, %%r10d ;" -+ "movq %%r8, (%0) ;" -+ "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */ -+ "adox %%r10, %%r15 ;" -+ "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */ -+ "adox %%r8, %%rax ;" -+ "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */ -+ "adox %%r10, %%rbx ;" -+ /******************************************/ -+ "adox %%r14, %%rcx ;" -+ -+ "movq 8(%1), %%rdx; " /* A[1] */ -+ "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */ -+ "adox %%r15, %%r8 ;" -+ "movq %%r8, 8(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */ -+ "adox %%r10, %%r9 ;" -+ "adcx %%r9, %%rax ;" -+ "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */ -+ "adox %%r8, %%r11 ;" -+ "adcx %%r11, %%rbx ;" -+ "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */ -+ "adox %%r10, %%r13 ;" -+ "adcx %%r13, %%rcx ;" -+ /******************************************/ -+ "adox %%r14, %%r15 ;" -+ "adcx %%r14, %%r15 ;" -+ -+ "movq 16(%1), %%rdx; " /* A[2] */ -+ "xorl %%r10d, %%r10d ;" -+ "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */ -+ "adox %%rax, %%r8 ;" -+ "movq %%r8, 16(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */ -+ "adox %%r10, %%r9 ;" -+ "adcx %%r9, %%rbx ;" -+ "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */ -+ "adox %%r8, %%r11 ;" -+ "adcx %%r11, %%rcx ;" -+ "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */ -+ "adox %%r10, %%r13 ;" -+ "adcx %%r13, %%r15 ;" -+ /******************************************/ -+ "adox %%r14, %%rax ;" -+ "adcx %%r14, %%rax ;" -+ -+ "movq 24(%1), %%rdx; " /* A[3] */ -+ "xorl %%r10d, %%r10d ;" -+ "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */ -+ "adox %%rbx, %%r8 ;" -+ "movq %%r8, 24(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */ -+ "adox %%r10, %%r9 ;" -+ "adcx %%r9, %%rcx ;" -+ "movq %%rcx, 32(%0) ;" -+ "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */ -+ "adox %%r8, %%r11 ;" -+ "adcx %%r11, %%r15 ;" -+ "movq %%r15, 40(%0) ;" -+ "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */ -+ "adox %%r10, %%r13 ;" -+ "adcx %%r13, %%rax ;" -+ "movq %%rax, 48(%0) ;" -+ /******************************************/ -+ "adox %%r14, %%rbx ;" -+ "adcx %%r14, %%rbx ;" -+ "movq %%rbx, 56(%0) ;" -+ -+ "movq 32(%1), %%rdx; " /* C[0] */ -+ "mulx 32(%2), %%r8, %%r15; " /* C[0]*D[0] */ -+ "xorl %%r10d, %%r10d ;" -+ "movq %%r8, 64(%0);" -+ "mulx 40(%2), %%r10, %%rax; " /* C[0]*D[1] */ -+ "adox %%r10, %%r15 ;" -+ "mulx 48(%2), %%r8, %%rbx; " /* C[0]*D[2] */ -+ "adox %%r8, %%rax ;" -+ "mulx 56(%2), %%r10, %%rcx; " /* C[0]*D[3] */ -+ "adox %%r10, %%rbx ;" -+ /******************************************/ -+ "adox %%r14, %%rcx ;" -+ -+ "movq 40(%1), %%rdx; " /* C[1] */ -+ "xorl %%r10d, %%r10d ;" -+ "mulx 32(%2), %%r8, %%r9; " /* C[1]*D[0] */ -+ "adox %%r15, %%r8 ;" -+ "movq %%r8, 72(%0);" -+ "mulx 40(%2), %%r10, %%r11; " /* C[1]*D[1] */ -+ "adox %%r10, %%r9 ;" -+ "adcx %%r9, %%rax ;" -+ "mulx 48(%2), %%r8, %%r13; " /* C[1]*D[2] */ -+ "adox %%r8, %%r11 ;" -+ "adcx %%r11, %%rbx ;" -+ "mulx 56(%2), %%r10, %%r15; " /* C[1]*D[3] */ -+ "adox %%r10, %%r13 ;" -+ "adcx %%r13, %%rcx ;" -+ /******************************************/ -+ "adox %%r14, %%r15 ;" -+ "adcx %%r14, %%r15 ;" -+ -+ "movq 48(%1), %%rdx; " /* C[2] */ -+ "xorl %%r10d, %%r10d ;" -+ "mulx 32(%2), %%r8, %%r9; " /* C[2]*D[0] */ -+ "adox %%rax, %%r8 ;" -+ "movq %%r8, 80(%0);" -+ "mulx 40(%2), %%r10, %%r11; " /* C[2]*D[1] */ -+ "adox %%r10, %%r9 ;" -+ "adcx %%r9, %%rbx ;" -+ "mulx 48(%2), %%r8, %%r13; " /* C[2]*D[2] */ -+ "adox %%r8, %%r11 ;" -+ "adcx %%r11, %%rcx ;" -+ "mulx 56(%2), %%r10, %%rax; " /* C[2]*D[3] */ -+ "adox %%r10, %%r13 ;" -+ "adcx %%r13, %%r15 ;" -+ /******************************************/ -+ "adox %%r14, %%rax ;" -+ "adcx %%r14, %%rax ;" -+ -+ "movq 56(%1), %%rdx; " /* C[3] */ -+ "xorl %%r10d, %%r10d ;" -+ "mulx 32(%2), %%r8, %%r9; " /* C[3]*D[0] */ -+ "adox %%rbx, %%r8 ;" -+ "movq %%r8, 88(%0);" -+ "mulx 40(%2), %%r10, %%r11; " /* C[3]*D[1] */ -+ "adox %%r10, %%r9 ;" -+ "adcx %%r9, %%rcx ;" -+ "movq %%rcx, 96(%0) ;" -+ "mulx 48(%2), %%r8, %%r13; " /* C[3]*D[2] */ -+ "adox %%r8, %%r11 ;" -+ "adcx %%r11, %%r15 ;" -+ "movq %%r15, 104(%0) ;" -+ "mulx 56(%2), %%r10, %%rbx; " /* C[3]*D[3] */ -+ "adox %%r10, %%r13 ;" -+ "adcx %%r13, %%rax ;" -+ "movq %%rax, 112(%0) ;" -+ /******************************************/ -+ "adox %%r14, %%rbx ;" -+ "adcx %%r14, %%rbx ;" -+ "movq %%rbx, 120(%0) ;" -+ : -+ : "r"(c), "r"(a), "r"(b) -+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -+ "%r10", "%r11", "%r13", "%r14", "%r15"); -+} -+ -+static void mul2_256x256_integer_bmi2(u64 *const c, const u64 *const a, -+ const u64 *const b) -+{ -+ asm volatile( -+ "movq (%1), %%rdx; " /* A[0] */ -+ "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */ -+ "movq %%r8, (%0) ;" -+ "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */ -+ "addq %%r10, %%r15 ;" -+ "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */ -+ "adcq %%r8, %%rax ;" -+ "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */ -+ "adcq %%r10, %%rbx ;" -+ /******************************************/ -+ "adcq $0, %%rcx ;" -+ -+ "movq 8(%1), %%rdx; " /* A[1] */ -+ "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */ -+ "addq %%r15, %%r8 ;" -+ "movq %%r8, 8(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */ -+ "adcq %%r10, %%r9 ;" -+ "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */ -+ "adcq %%r8, %%r11 ;" -+ "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */ -+ "adcq %%r10, %%r13 ;" -+ /******************************************/ -+ "adcq $0, %%r15 ;" -+ -+ "addq %%r9, %%rax ;" -+ "adcq %%r11, %%rbx ;" -+ "adcq %%r13, %%rcx ;" -+ "adcq $0, %%r15 ;" -+ -+ "movq 16(%1), %%rdx; " /* A[2] */ -+ "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */ -+ "addq %%rax, %%r8 ;" -+ "movq %%r8, 16(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */ -+ "adcq %%r10, %%r9 ;" -+ "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */ -+ "adcq %%r8, %%r11 ;" -+ "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */ -+ "adcq %%r10, %%r13 ;" -+ /******************************************/ -+ "adcq $0, %%rax ;" -+ -+ "addq %%r9, %%rbx ;" -+ "adcq %%r11, %%rcx ;" -+ "adcq %%r13, %%r15 ;" -+ "adcq $0, %%rax ;" -+ -+ "movq 24(%1), %%rdx; " /* A[3] */ -+ "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */ -+ "addq %%rbx, %%r8 ;" -+ "movq %%r8, 24(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */ -+ "adcq %%r10, %%r9 ;" -+ "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */ -+ "adcq %%r8, %%r11 ;" -+ "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */ -+ "adcq %%r10, %%r13 ;" -+ /******************************************/ -+ "adcq $0, %%rbx ;" -+ -+ "addq %%r9, %%rcx ;" -+ "movq %%rcx, 32(%0) ;" -+ "adcq %%r11, %%r15 ;" -+ "movq %%r15, 40(%0) ;" -+ "adcq %%r13, %%rax ;" -+ "movq %%rax, 48(%0) ;" -+ "adcq $0, %%rbx ;" -+ "movq %%rbx, 56(%0) ;" -+ -+ "movq 32(%1), %%rdx; " /* C[0] */ -+ "mulx 32(%2), %%r8, %%r15; " /* C[0]*D[0] */ -+ "movq %%r8, 64(%0) ;" -+ "mulx 40(%2), %%r10, %%rax; " /* C[0]*D[1] */ -+ "addq %%r10, %%r15 ;" -+ "mulx 48(%2), %%r8, %%rbx; " /* C[0]*D[2] */ -+ "adcq %%r8, %%rax ;" -+ "mulx 56(%2), %%r10, %%rcx; " /* C[0]*D[3] */ -+ "adcq %%r10, %%rbx ;" -+ /******************************************/ -+ "adcq $0, %%rcx ;" -+ -+ "movq 40(%1), %%rdx; " /* C[1] */ -+ "mulx 32(%2), %%r8, %%r9; " /* C[1]*D[0] */ -+ "addq %%r15, %%r8 ;" -+ "movq %%r8, 72(%0) ;" -+ "mulx 40(%2), %%r10, %%r11; " /* C[1]*D[1] */ -+ "adcq %%r10, %%r9 ;" -+ "mulx 48(%2), %%r8, %%r13; " /* C[1]*D[2] */ -+ "adcq %%r8, %%r11 ;" -+ "mulx 56(%2), %%r10, %%r15; " /* C[1]*D[3] */ -+ "adcq %%r10, %%r13 ;" -+ /******************************************/ -+ "adcq $0, %%r15 ;" -+ -+ "addq %%r9, %%rax ;" -+ "adcq %%r11, %%rbx ;" -+ "adcq %%r13, %%rcx ;" -+ "adcq $0, %%r15 ;" -+ -+ "movq 48(%1), %%rdx; " /* C[2] */ -+ "mulx 32(%2), %%r8, %%r9; " /* C[2]*D[0] */ -+ "addq %%rax, %%r8 ;" -+ "movq %%r8, 80(%0) ;" -+ "mulx 40(%2), %%r10, %%r11; " /* C[2]*D[1] */ -+ "adcq %%r10, %%r9 ;" -+ "mulx 48(%2), %%r8, %%r13; " /* C[2]*D[2] */ -+ "adcq %%r8, %%r11 ;" -+ "mulx 56(%2), %%r10, %%rax; " /* C[2]*D[3] */ -+ "adcq %%r10, %%r13 ;" -+ /******************************************/ -+ "adcq $0, %%rax ;" -+ -+ "addq %%r9, %%rbx ;" -+ "adcq %%r11, %%rcx ;" -+ "adcq %%r13, %%r15 ;" -+ "adcq $0, %%rax ;" -+ -+ "movq 56(%1), %%rdx; " /* C[3] */ -+ "mulx 32(%2), %%r8, %%r9; " /* C[3]*D[0] */ -+ "addq %%rbx, %%r8 ;" -+ "movq %%r8, 88(%0) ;" -+ "mulx 40(%2), %%r10, %%r11; " /* C[3]*D[1] */ -+ "adcq %%r10, %%r9 ;" -+ "mulx 48(%2), %%r8, %%r13; " /* C[3]*D[2] */ -+ "adcq %%r8, %%r11 ;" -+ "mulx 56(%2), %%r10, %%rbx; " /* C[3]*D[3] */ -+ "adcq %%r10, %%r13 ;" -+ /******************************************/ -+ "adcq $0, %%rbx ;" -+ -+ "addq %%r9, %%rcx ;" -+ "movq %%rcx, 96(%0) ;" -+ "adcq %%r11, %%r15 ;" -+ "movq %%r15, 104(%0) ;" -+ "adcq %%r13, %%rax ;" -+ "movq %%rax, 112(%0) ;" -+ "adcq $0, %%rbx ;" -+ "movq %%rbx, 120(%0) ;" -+ : -+ : "r"(c), "r"(a), "r"(b) -+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -+ "%r10", "%r11", "%r13", "%r15"); -+} -+ -+static void sqr2_256x256_integer_adx(u64 *const c, const u64 *const a) -+{ -+ asm volatile( -+ "movq (%1), %%rdx ;" /* A[0] */ -+ "mulx 8(%1), %%r8, %%r14 ;" /* A[1]*A[0] */ -+ "xorl %%r15d, %%r15d;" -+ "mulx 16(%1), %%r9, %%r10 ;" /* A[2]*A[0] */ -+ "adcx %%r14, %%r9 ;" -+ "mulx 24(%1), %%rax, %%rcx ;" /* A[3]*A[0] */ -+ "adcx %%rax, %%r10 ;" -+ "movq 24(%1), %%rdx ;" /* A[3] */ -+ "mulx 8(%1), %%r11, %%rbx ;" /* A[1]*A[3] */ -+ "adcx %%rcx, %%r11 ;" -+ "mulx 16(%1), %%rax, %%r13 ;" /* A[2]*A[3] */ -+ "adcx %%rax, %%rbx ;" -+ "movq 8(%1), %%rdx ;" /* A[1] */ -+ "adcx %%r15, %%r13 ;" -+ "mulx 16(%1), %%rax, %%rcx ;" /* A[2]*A[1] */ -+ "movq $0, %%r14 ;" -+ /******************************************/ -+ "adcx %%r15, %%r14 ;" -+ -+ "xorl %%r15d, %%r15d;" -+ "adox %%rax, %%r10 ;" -+ "adcx %%r8, %%r8 ;" -+ "adox %%rcx, %%r11 ;" -+ "adcx %%r9, %%r9 ;" -+ "adox %%r15, %%rbx ;" -+ "adcx %%r10, %%r10 ;" -+ "adox %%r15, %%r13 ;" -+ "adcx %%r11, %%r11 ;" -+ "adox %%r15, %%r14 ;" -+ "adcx %%rbx, %%rbx ;" -+ "adcx %%r13, %%r13 ;" -+ "adcx %%r14, %%r14 ;" -+ -+ "movq (%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */ -+ /*******************/ -+ "movq %%rax, 0(%0) ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, 8(%0) ;" -+ "movq 8(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */ -+ "adcq %%rax, %%r9 ;" -+ "movq %%r9, 16(%0) ;" -+ "adcq %%rcx, %%r10 ;" -+ "movq %%r10, 24(%0) ;" -+ "movq 16(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */ -+ "adcq %%rax, %%r11 ;" -+ "movq %%r11, 32(%0) ;" -+ "adcq %%rcx, %%rbx ;" -+ "movq %%rbx, 40(%0) ;" -+ "movq 24(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */ -+ "adcq %%rax, %%r13 ;" -+ "movq %%r13, 48(%0) ;" -+ "adcq %%rcx, %%r14 ;" -+ "movq %%r14, 56(%0) ;" -+ -+ -+ "movq 32(%1), %%rdx ;" /* B[0] */ -+ "mulx 40(%1), %%r8, %%r14 ;" /* B[1]*B[0] */ -+ "xorl %%r15d, %%r15d;" -+ "mulx 48(%1), %%r9, %%r10 ;" /* B[2]*B[0] */ -+ "adcx %%r14, %%r9 ;" -+ "mulx 56(%1), %%rax, %%rcx ;" /* B[3]*B[0] */ -+ "adcx %%rax, %%r10 ;" -+ "movq 56(%1), %%rdx ;" /* B[3] */ -+ "mulx 40(%1), %%r11, %%rbx ;" /* B[1]*B[3] */ -+ "adcx %%rcx, %%r11 ;" -+ "mulx 48(%1), %%rax, %%r13 ;" /* B[2]*B[3] */ -+ "adcx %%rax, %%rbx ;" -+ "movq 40(%1), %%rdx ;" /* B[1] */ -+ "adcx %%r15, %%r13 ;" -+ "mulx 48(%1), %%rax, %%rcx ;" /* B[2]*B[1] */ -+ "movq $0, %%r14 ;" -+ /******************************************/ -+ "adcx %%r15, %%r14 ;" -+ -+ "xorl %%r15d, %%r15d;" -+ "adox %%rax, %%r10 ;" -+ "adcx %%r8, %%r8 ;" -+ "adox %%rcx, %%r11 ;" -+ "adcx %%r9, %%r9 ;" -+ "adox %%r15, %%rbx ;" -+ "adcx %%r10, %%r10 ;" -+ "adox %%r15, %%r13 ;" -+ "adcx %%r11, %%r11 ;" -+ "adox %%r15, %%r14 ;" -+ "adcx %%rbx, %%rbx ;" -+ "adcx %%r13, %%r13 ;" -+ "adcx %%r14, %%r14 ;" -+ -+ "movq 32(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* B[0]^2 */ -+ /*******************/ -+ "movq %%rax, 64(%0) ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, 72(%0) ;" -+ "movq 40(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* B[1]^2 */ -+ "adcq %%rax, %%r9 ;" -+ "movq %%r9, 80(%0) ;" -+ "adcq %%rcx, %%r10 ;" -+ "movq %%r10, 88(%0) ;" -+ "movq 48(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* B[2]^2 */ -+ "adcq %%rax, %%r11 ;" -+ "movq %%r11, 96(%0) ;" -+ "adcq %%rcx, %%rbx ;" -+ "movq %%rbx, 104(%0) ;" -+ "movq 56(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* B[3]^2 */ -+ "adcq %%rax, %%r13 ;" -+ "movq %%r13, 112(%0) ;" -+ "adcq %%rcx, %%r14 ;" -+ "movq %%r14, 120(%0) ;" -+ : -+ : "r"(c), "r"(a) -+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -+ "%r10", "%r11", "%r13", "%r14", "%r15"); -+} -+ -+static void sqr2_256x256_integer_bmi2(u64 *const c, const u64 *const a) -+{ -+ asm volatile( -+ "movq 8(%1), %%rdx ;" /* A[1] */ -+ "mulx (%1), %%r8, %%r9 ;" /* A[0]*A[1] */ -+ "mulx 16(%1), %%r10, %%r11 ;" /* A[2]*A[1] */ -+ "mulx 24(%1), %%rcx, %%r14 ;" /* A[3]*A[1] */ -+ -+ "movq 16(%1), %%rdx ;" /* A[2] */ -+ "mulx 24(%1), %%r15, %%r13 ;" /* A[3]*A[2] */ -+ "mulx (%1), %%rax, %%rdx ;" /* A[0]*A[2] */ -+ -+ "addq %%rax, %%r9 ;" -+ "adcq %%rdx, %%r10 ;" -+ "adcq %%rcx, %%r11 ;" -+ "adcq %%r14, %%r15 ;" -+ "adcq $0, %%r13 ;" -+ "movq $0, %%r14 ;" -+ "adcq $0, %%r14 ;" -+ -+ "movq (%1), %%rdx ;" /* A[0] */ -+ "mulx 24(%1), %%rax, %%rcx ;" /* A[0]*A[3] */ -+ -+ "addq %%rax, %%r10 ;" -+ "adcq %%rcx, %%r11 ;" -+ "adcq $0, %%r15 ;" -+ "adcq $0, %%r13 ;" -+ "adcq $0, %%r14 ;" -+ -+ "shldq $1, %%r13, %%r14 ;" -+ "shldq $1, %%r15, %%r13 ;" -+ "shldq $1, %%r11, %%r15 ;" -+ "shldq $1, %%r10, %%r11 ;" -+ "shldq $1, %%r9, %%r10 ;" -+ "shldq $1, %%r8, %%r9 ;" -+ "shlq $1, %%r8 ;" -+ -+ /*******************/ -+ "mulx %%rdx, %%rax, %%rcx ; " /* A[0]^2 */ -+ /*******************/ -+ "movq %%rax, 0(%0) ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, 8(%0) ;" -+ "movq 8(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ; " /* A[1]^2 */ -+ "adcq %%rax, %%r9 ;" -+ "movq %%r9, 16(%0) ;" -+ "adcq %%rcx, %%r10 ;" -+ "movq %%r10, 24(%0) ;" -+ "movq 16(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ; " /* A[2]^2 */ -+ "adcq %%rax, %%r11 ;" -+ "movq %%r11, 32(%0) ;" -+ "adcq %%rcx, %%r15 ;" -+ "movq %%r15, 40(%0) ;" -+ "movq 24(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ; " /* A[3]^2 */ -+ "adcq %%rax, %%r13 ;" -+ "movq %%r13, 48(%0) ;" -+ "adcq %%rcx, %%r14 ;" -+ "movq %%r14, 56(%0) ;" -+ -+ "movq 40(%1), %%rdx ;" /* B[1] */ -+ "mulx 32(%1), %%r8, %%r9 ;" /* B[0]*B[1] */ -+ "mulx 48(%1), %%r10, %%r11 ;" /* B[2]*B[1] */ -+ "mulx 56(%1), %%rcx, %%r14 ;" /* B[3]*B[1] */ -+ -+ "movq 48(%1), %%rdx ;" /* B[2] */ -+ "mulx 56(%1), %%r15, %%r13 ;" /* B[3]*B[2] */ -+ "mulx 32(%1), %%rax, %%rdx ;" /* B[0]*B[2] */ -+ -+ "addq %%rax, %%r9 ;" -+ "adcq %%rdx, %%r10 ;" -+ "adcq %%rcx, %%r11 ;" -+ "adcq %%r14, %%r15 ;" -+ "adcq $0, %%r13 ;" -+ "movq $0, %%r14 ;" -+ "adcq $0, %%r14 ;" -+ -+ "movq 32(%1), %%rdx ;" /* B[0] */ -+ "mulx 56(%1), %%rax, %%rcx ;" /* B[0]*B[3] */ -+ -+ "addq %%rax, %%r10 ;" -+ "adcq %%rcx, %%r11 ;" -+ "adcq $0, %%r15 ;" -+ "adcq $0, %%r13 ;" -+ "adcq $0, %%r14 ;" -+ -+ "shldq $1, %%r13, %%r14 ;" -+ "shldq $1, %%r15, %%r13 ;" -+ "shldq $1, %%r11, %%r15 ;" -+ "shldq $1, %%r10, %%r11 ;" -+ "shldq $1, %%r9, %%r10 ;" -+ "shldq $1, %%r8, %%r9 ;" -+ "shlq $1, %%r8 ;" -+ -+ /*******************/ -+ "mulx %%rdx, %%rax, %%rcx ; " /* B[0]^2 */ -+ /*******************/ -+ "movq %%rax, 64(%0) ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, 72(%0) ;" -+ "movq 40(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ; " /* B[1]^2 */ -+ "adcq %%rax, %%r9 ;" -+ "movq %%r9, 80(%0) ;" -+ "adcq %%rcx, %%r10 ;" -+ "movq %%r10, 88(%0) ;" -+ "movq 48(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ; " /* B[2]^2 */ -+ "adcq %%rax, %%r11 ;" -+ "movq %%r11, 96(%0) ;" -+ "adcq %%rcx, %%r15 ;" -+ "movq %%r15, 104(%0) ;" -+ "movq 56(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ; " /* B[3]^2 */ -+ "adcq %%rax, %%r13 ;" -+ "movq %%r13, 112(%0) ;" -+ "adcq %%rcx, %%r14 ;" -+ "movq %%r14, 120(%0) ;" -+ : -+ : "r"(c), "r"(a) -+ : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", -+ "%r11", "%r13", "%r14", "%r15"); -+} -+ -+static void red_eltfp25519_2w_adx(u64 *const c, const u64 *const a) -+{ -+ asm volatile( -+ "movl $38, %%edx; " /* 2*c = 38 = 2^256 */ -+ "mulx 32(%1), %%r8, %%r10; " /* c*C[4] */ -+ "xorl %%ebx, %%ebx ;" -+ "adox (%1), %%r8 ;" -+ "mulx 40(%1), %%r9, %%r11; " /* c*C[5] */ -+ "adcx %%r10, %%r9 ;" -+ "adox 8(%1), %%r9 ;" -+ "mulx 48(%1), %%r10, %%rax; " /* c*C[6] */ -+ "adcx %%r11, %%r10 ;" -+ "adox 16(%1), %%r10 ;" -+ "mulx 56(%1), %%r11, %%rcx; " /* c*C[7] */ -+ "adcx %%rax, %%r11 ;" -+ "adox 24(%1), %%r11 ;" -+ /***************************************/ -+ "adcx %%rbx, %%rcx ;" -+ "adox %%rbx, %%rcx ;" -+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */ -+ "adcx %%rcx, %%r8 ;" -+ "adcx %%rbx, %%r9 ;" -+ "movq %%r9, 8(%0) ;" -+ "adcx %%rbx, %%r10 ;" -+ "movq %%r10, 16(%0) ;" -+ "adcx %%rbx, %%r11 ;" -+ "movq %%r11, 24(%0) ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%edx, %%ecx ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, (%0) ;" -+ -+ "mulx 96(%1), %%r8, %%r10; " /* c*C[4] */ -+ "xorl %%ebx, %%ebx ;" -+ "adox 64(%1), %%r8 ;" -+ "mulx 104(%1), %%r9, %%r11; " /* c*C[5] */ -+ "adcx %%r10, %%r9 ;" -+ "adox 72(%1), %%r9 ;" -+ "mulx 112(%1), %%r10, %%rax; " /* c*C[6] */ -+ "adcx %%r11, %%r10 ;" -+ "adox 80(%1), %%r10 ;" -+ "mulx 120(%1), %%r11, %%rcx; " /* c*C[7] */ -+ "adcx %%rax, %%r11 ;" -+ "adox 88(%1), %%r11 ;" -+ /****************************************/ -+ "adcx %%rbx, %%rcx ;" -+ "adox %%rbx, %%rcx ;" -+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */ -+ "adcx %%rcx, %%r8 ;" -+ "adcx %%rbx, %%r9 ;" -+ "movq %%r9, 40(%0) ;" -+ "adcx %%rbx, %%r10 ;" -+ "movq %%r10, 48(%0) ;" -+ "adcx %%rbx, %%r11 ;" -+ "movq %%r11, 56(%0) ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%edx, %%ecx ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, 32(%0) ;" -+ : -+ : "r"(c), "r"(a) -+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -+ "%r10", "%r11"); -+} -+ -+static void red_eltfp25519_2w_bmi2(u64 *const c, const u64 *const a) -+{ -+ asm volatile( -+ "movl $38, %%edx ; " /* 2*c = 38 = 2^256 */ -+ "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */ -+ "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */ -+ "addq %%r10, %%r9 ;" -+ "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */ -+ "adcq %%r11, %%r10 ;" -+ "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */ -+ "adcq %%rax, %%r11 ;" -+ /***************************************/ -+ "adcq $0, %%rcx ;" -+ "addq (%1), %%r8 ;" -+ "adcq 8(%1), %%r9 ;" -+ "adcq 16(%1), %%r10 ;" -+ "adcq 24(%1), %%r11 ;" -+ "adcq $0, %%rcx ;" -+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */ -+ "addq %%rcx, %%r8 ;" -+ "adcq $0, %%r9 ;" -+ "movq %%r9, 8(%0) ;" -+ "adcq $0, %%r10 ;" -+ "movq %%r10, 16(%0) ;" -+ "adcq $0, %%r11 ;" -+ "movq %%r11, 24(%0) ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%edx, %%ecx ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, (%0) ;" -+ -+ "mulx 96(%1), %%r8, %%r10 ;" /* c*C[4] */ -+ "mulx 104(%1), %%r9, %%r11 ;" /* c*C[5] */ -+ "addq %%r10, %%r9 ;" -+ "mulx 112(%1), %%r10, %%rax ;" /* c*C[6] */ -+ "adcq %%r11, %%r10 ;" -+ "mulx 120(%1), %%r11, %%rcx ;" /* c*C[7] */ -+ "adcq %%rax, %%r11 ;" -+ /****************************************/ -+ "adcq $0, %%rcx ;" -+ "addq 64(%1), %%r8 ;" -+ "adcq 72(%1), %%r9 ;" -+ "adcq 80(%1), %%r10 ;" -+ "adcq 88(%1), %%r11 ;" -+ "adcq $0, %%rcx ;" -+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */ -+ "addq %%rcx, %%r8 ;" -+ "adcq $0, %%r9 ;" -+ "movq %%r9, 40(%0) ;" -+ "adcq $0, %%r10 ;" -+ "movq %%r10, 48(%0) ;" -+ "adcq $0, %%r11 ;" -+ "movq %%r11, 56(%0) ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%edx, %%ecx ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, 32(%0) ;" -+ : -+ : "r"(c), "r"(a) -+ : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", -+ "%r11"); -+} -+ -+static void mul_256x256_integer_adx(u64 *const c, const u64 *const a, -+ const u64 *const b) -+{ -+ asm volatile( -+ "movq (%1), %%rdx; " /* A[0] */ -+ "mulx (%2), %%r8, %%r9; " /* A[0]*B[0] */ -+ "xorl %%r10d, %%r10d ;" -+ "movq %%r8, (%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[0]*B[1] */ -+ "adox %%r9, %%r10 ;" -+ "movq %%r10, 8(%0) ;" -+ "mulx 16(%2), %%r15, %%r13; " /* A[0]*B[2] */ -+ "adox %%r11, %%r15 ;" -+ "mulx 24(%2), %%r14, %%rdx; " /* A[0]*B[3] */ -+ "adox %%r13, %%r14 ;" -+ "movq $0, %%rax ;" -+ /******************************************/ -+ "adox %%rdx, %%rax ;" -+ -+ "movq 8(%1), %%rdx; " /* A[1] */ -+ "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */ -+ "xorl %%r10d, %%r10d ;" -+ "adcx 8(%0), %%r8 ;" -+ "movq %%r8, 8(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */ -+ "adox %%r9, %%r10 ;" -+ "adcx %%r15, %%r10 ;" -+ "movq %%r10, 16(%0) ;" -+ "mulx 16(%2), %%r15, %%r13; " /* A[1]*B[2] */ -+ "adox %%r11, %%r15 ;" -+ "adcx %%r14, %%r15 ;" -+ "movq $0, %%r8 ;" -+ "mulx 24(%2), %%r14, %%rdx; " /* A[1]*B[3] */ -+ "adox %%r13, %%r14 ;" -+ "adcx %%rax, %%r14 ;" -+ "movq $0, %%rax ;" -+ /******************************************/ -+ "adox %%rdx, %%rax ;" -+ "adcx %%r8, %%rax ;" -+ -+ "movq 16(%1), %%rdx; " /* A[2] */ -+ "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */ -+ "xorl %%r10d, %%r10d ;" -+ "adcx 16(%0), %%r8 ;" -+ "movq %%r8, 16(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */ -+ "adox %%r9, %%r10 ;" -+ "adcx %%r15, %%r10 ;" -+ "movq %%r10, 24(%0) ;" -+ "mulx 16(%2), %%r15, %%r13; " /* A[2]*B[2] */ -+ "adox %%r11, %%r15 ;" -+ "adcx %%r14, %%r15 ;" -+ "movq $0, %%r8 ;" -+ "mulx 24(%2), %%r14, %%rdx; " /* A[2]*B[3] */ -+ "adox %%r13, %%r14 ;" -+ "adcx %%rax, %%r14 ;" -+ "movq $0, %%rax ;" -+ /******************************************/ -+ "adox %%rdx, %%rax ;" -+ "adcx %%r8, %%rax ;" -+ -+ "movq 24(%1), %%rdx; " /* A[3] */ -+ "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */ -+ "xorl %%r10d, %%r10d ;" -+ "adcx 24(%0), %%r8 ;" -+ "movq %%r8, 24(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */ -+ "adox %%r9, %%r10 ;" -+ "adcx %%r15, %%r10 ;" -+ "movq %%r10, 32(%0) ;" -+ "mulx 16(%2), %%r15, %%r13; " /* A[3]*B[2] */ -+ "adox %%r11, %%r15 ;" -+ "adcx %%r14, %%r15 ;" -+ "movq %%r15, 40(%0) ;" -+ "movq $0, %%r8 ;" -+ "mulx 24(%2), %%r14, %%rdx; " /* A[3]*B[3] */ -+ "adox %%r13, %%r14 ;" -+ "adcx %%rax, %%r14 ;" -+ "movq %%r14, 48(%0) ;" -+ "movq $0, %%rax ;" -+ /******************************************/ -+ "adox %%rdx, %%rax ;" -+ "adcx %%r8, %%rax ;" -+ "movq %%rax, 56(%0) ;" -+ : -+ : "r"(c), "r"(a), "r"(b) -+ : "memory", "cc", "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", -+ "%r13", "%r14", "%r15"); -+} -+ -+static void mul_256x256_integer_bmi2(u64 *const c, const u64 *const a, -+ const u64 *const b) -+{ -+ asm volatile( -+ "movq (%1), %%rdx; " /* A[0] */ -+ "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */ -+ "movq %%r8, (%0) ;" -+ "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */ -+ "addq %%r10, %%r15 ;" -+ "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */ -+ "adcq %%r8, %%rax ;" -+ "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */ -+ "adcq %%r10, %%rbx ;" -+ /******************************************/ -+ "adcq $0, %%rcx ;" -+ -+ "movq 8(%1), %%rdx; " /* A[1] */ -+ "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */ -+ "addq %%r15, %%r8 ;" -+ "movq %%r8, 8(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */ -+ "adcq %%r10, %%r9 ;" -+ "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */ -+ "adcq %%r8, %%r11 ;" -+ "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */ -+ "adcq %%r10, %%r13 ;" -+ /******************************************/ -+ "adcq $0, %%r15 ;" -+ -+ "addq %%r9, %%rax ;" -+ "adcq %%r11, %%rbx ;" -+ "adcq %%r13, %%rcx ;" -+ "adcq $0, %%r15 ;" -+ -+ "movq 16(%1), %%rdx; " /* A[2] */ -+ "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */ -+ "addq %%rax, %%r8 ;" -+ "movq %%r8, 16(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */ -+ "adcq %%r10, %%r9 ;" -+ "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */ -+ "adcq %%r8, %%r11 ;" -+ "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */ -+ "adcq %%r10, %%r13 ;" -+ /******************************************/ -+ "adcq $0, %%rax ;" -+ -+ "addq %%r9, %%rbx ;" -+ "adcq %%r11, %%rcx ;" -+ "adcq %%r13, %%r15 ;" -+ "adcq $0, %%rax ;" -+ -+ "movq 24(%1), %%rdx; " /* A[3] */ -+ "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */ -+ "addq %%rbx, %%r8 ;" -+ "movq %%r8, 24(%0) ;" -+ "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */ -+ "adcq %%r10, %%r9 ;" -+ "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */ -+ "adcq %%r8, %%r11 ;" -+ "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */ -+ "adcq %%r10, %%r13 ;" -+ /******************************************/ -+ "adcq $0, %%rbx ;" -+ -+ "addq %%r9, %%rcx ;" -+ "movq %%rcx, 32(%0) ;" -+ "adcq %%r11, %%r15 ;" -+ "movq %%r15, 40(%0) ;" -+ "adcq %%r13, %%rax ;" -+ "movq %%rax, 48(%0) ;" -+ "adcq $0, %%rbx ;" -+ "movq %%rbx, 56(%0) ;" -+ : -+ : "r"(c), "r"(a), "r"(b) -+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -+ "%r10", "%r11", "%r13", "%r15"); -+} -+ -+static void sqr_256x256_integer_adx(u64 *const c, const u64 *const a) -+{ -+ asm volatile( -+ "movq (%1), %%rdx ;" /* A[0] */ -+ "mulx 8(%1), %%r8, %%r14 ;" /* A[1]*A[0] */ -+ "xorl %%r15d, %%r15d;" -+ "mulx 16(%1), %%r9, %%r10 ;" /* A[2]*A[0] */ -+ "adcx %%r14, %%r9 ;" -+ "mulx 24(%1), %%rax, %%rcx ;" /* A[3]*A[0] */ -+ "adcx %%rax, %%r10 ;" -+ "movq 24(%1), %%rdx ;" /* A[3] */ -+ "mulx 8(%1), %%r11, %%rbx ;" /* A[1]*A[3] */ -+ "adcx %%rcx, %%r11 ;" -+ "mulx 16(%1), %%rax, %%r13 ;" /* A[2]*A[3] */ -+ "adcx %%rax, %%rbx ;" -+ "movq 8(%1), %%rdx ;" /* A[1] */ -+ "adcx %%r15, %%r13 ;" -+ "mulx 16(%1), %%rax, %%rcx ;" /* A[2]*A[1] */ -+ "movq $0, %%r14 ;" -+ /******************************************/ -+ "adcx %%r15, %%r14 ;" -+ -+ "xorl %%r15d, %%r15d;" -+ "adox %%rax, %%r10 ;" -+ "adcx %%r8, %%r8 ;" -+ "adox %%rcx, %%r11 ;" -+ "adcx %%r9, %%r9 ;" -+ "adox %%r15, %%rbx ;" -+ "adcx %%r10, %%r10 ;" -+ "adox %%r15, %%r13 ;" -+ "adcx %%r11, %%r11 ;" -+ "adox %%r15, %%r14 ;" -+ "adcx %%rbx, %%rbx ;" -+ "adcx %%r13, %%r13 ;" -+ "adcx %%r14, %%r14 ;" -+ -+ "movq (%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */ -+ /*******************/ -+ "movq %%rax, 0(%0) ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, 8(%0) ;" -+ "movq 8(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */ -+ "adcq %%rax, %%r9 ;" -+ "movq %%r9, 16(%0) ;" -+ "adcq %%rcx, %%r10 ;" -+ "movq %%r10, 24(%0) ;" -+ "movq 16(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */ -+ "adcq %%rax, %%r11 ;" -+ "movq %%r11, 32(%0) ;" -+ "adcq %%rcx, %%rbx ;" -+ "movq %%rbx, 40(%0) ;" -+ "movq 24(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */ -+ "adcq %%rax, %%r13 ;" -+ "movq %%r13, 48(%0) ;" -+ "adcq %%rcx, %%r14 ;" -+ "movq %%r14, 56(%0) ;" -+ : -+ : "r"(c), "r"(a) -+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -+ "%r10", "%r11", "%r13", "%r14", "%r15"); -+} -+ -+static void sqr_256x256_integer_bmi2(u64 *const c, const u64 *const a) -+{ -+ asm volatile( -+ "movq 8(%1), %%rdx ;" /* A[1] */ -+ "mulx (%1), %%r8, %%r9 ;" /* A[0]*A[1] */ -+ "mulx 16(%1), %%r10, %%r11 ;" /* A[2]*A[1] */ -+ "mulx 24(%1), %%rcx, %%r14 ;" /* A[3]*A[1] */ -+ -+ "movq 16(%1), %%rdx ;" /* A[2] */ -+ "mulx 24(%1), %%r15, %%r13 ;" /* A[3]*A[2] */ -+ "mulx (%1), %%rax, %%rdx ;" /* A[0]*A[2] */ -+ -+ "addq %%rax, %%r9 ;" -+ "adcq %%rdx, %%r10 ;" -+ "adcq %%rcx, %%r11 ;" -+ "adcq %%r14, %%r15 ;" -+ "adcq $0, %%r13 ;" -+ "movq $0, %%r14 ;" -+ "adcq $0, %%r14 ;" -+ -+ "movq (%1), %%rdx ;" /* A[0] */ -+ "mulx 24(%1), %%rax, %%rcx ;" /* A[0]*A[3] */ -+ -+ "addq %%rax, %%r10 ;" -+ "adcq %%rcx, %%r11 ;" -+ "adcq $0, %%r15 ;" -+ "adcq $0, %%r13 ;" -+ "adcq $0, %%r14 ;" -+ -+ "shldq $1, %%r13, %%r14 ;" -+ "shldq $1, %%r15, %%r13 ;" -+ "shldq $1, %%r11, %%r15 ;" -+ "shldq $1, %%r10, %%r11 ;" -+ "shldq $1, %%r9, %%r10 ;" -+ "shldq $1, %%r8, %%r9 ;" -+ "shlq $1, %%r8 ;" -+ -+ /*******************/ -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */ -+ /*******************/ -+ "movq %%rax, 0(%0) ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, 8(%0) ;" -+ "movq 8(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */ -+ "adcq %%rax, %%r9 ;" -+ "movq %%r9, 16(%0) ;" -+ "adcq %%rcx, %%r10 ;" -+ "movq %%r10, 24(%0) ;" -+ "movq 16(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */ -+ "adcq %%rax, %%r11 ;" -+ "movq %%r11, 32(%0) ;" -+ "adcq %%rcx, %%r15 ;" -+ "movq %%r15, 40(%0) ;" -+ "movq 24(%1), %%rdx ;" -+ "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */ -+ "adcq %%rax, %%r13 ;" -+ "movq %%r13, 48(%0) ;" -+ "adcq %%rcx, %%r14 ;" -+ "movq %%r14, 56(%0) ;" -+ : -+ : "r"(c), "r"(a) -+ : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", -+ "%r11", "%r13", "%r14", "%r15"); -+} -+ -+static void red_eltfp25519_1w_adx(u64 *const c, const u64 *const a) -+{ -+ asm volatile( -+ "movl $38, %%edx ;" /* 2*c = 38 = 2^256 */ -+ "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */ -+ "xorl %%ebx, %%ebx ;" -+ "adox (%1), %%r8 ;" -+ "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */ -+ "adcx %%r10, %%r9 ;" -+ "adox 8(%1), %%r9 ;" -+ "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */ -+ "adcx %%r11, %%r10 ;" -+ "adox 16(%1), %%r10 ;" -+ "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */ -+ "adcx %%rax, %%r11 ;" -+ "adox 24(%1), %%r11 ;" -+ /***************************************/ -+ "adcx %%rbx, %%rcx ;" -+ "adox %%rbx, %%rcx ;" -+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */ -+ "adcx %%rcx, %%r8 ;" -+ "adcx %%rbx, %%r9 ;" -+ "movq %%r9, 8(%0) ;" -+ "adcx %%rbx, %%r10 ;" -+ "movq %%r10, 16(%0) ;" -+ "adcx %%rbx, %%r11 ;" -+ "movq %%r11, 24(%0) ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%edx, %%ecx ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, (%0) ;" -+ : -+ : "r"(c), "r"(a) -+ : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -+ "%r10", "%r11"); -+} -+ -+static void red_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a) -+{ -+ asm volatile( -+ "movl $38, %%edx ;" /* 2*c = 38 = 2^256 */ -+ "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */ -+ "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */ -+ "addq %%r10, %%r9 ;" -+ "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */ -+ "adcq %%r11, %%r10 ;" -+ "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */ -+ "adcq %%rax, %%r11 ;" -+ /***************************************/ -+ "adcq $0, %%rcx ;" -+ "addq (%1), %%r8 ;" -+ "adcq 8(%1), %%r9 ;" -+ "adcq 16(%1), %%r10 ;" -+ "adcq 24(%1), %%r11 ;" -+ "adcq $0, %%rcx ;" -+ "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */ -+ "addq %%rcx, %%r8 ;" -+ "adcq $0, %%r9 ;" -+ "movq %%r9, 8(%0) ;" -+ "adcq $0, %%r10 ;" -+ "movq %%r10, 16(%0) ;" -+ "adcq $0, %%r11 ;" -+ "movq %%r11, 24(%0) ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%edx, %%ecx ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, (%0) ;" -+ : -+ : "r"(c), "r"(a) -+ : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", -+ "%r11"); -+} -+ -+static __always_inline void -+add_eltfp25519_1w_adx(u64 *const c, const u64 *const a, const u64 *const b) -+{ -+ asm volatile( -+ "mov $38, %%eax ;" -+ "xorl %%ecx, %%ecx ;" -+ "movq (%2), %%r8 ;" -+ "adcx (%1), %%r8 ;" -+ "movq 8(%2), %%r9 ;" -+ "adcx 8(%1), %%r9 ;" -+ "movq 16(%2), %%r10 ;" -+ "adcx 16(%1), %%r10 ;" -+ "movq 24(%2), %%r11 ;" -+ "adcx 24(%1), %%r11 ;" -+ "cmovc %%eax, %%ecx ;" -+ "xorl %%eax, %%eax ;" -+ "adcx %%rcx, %%r8 ;" -+ "adcx %%rax, %%r9 ;" -+ "movq %%r9, 8(%0) ;" -+ "adcx %%rax, %%r10 ;" -+ "movq %%r10, 16(%0) ;" -+ "adcx %%rax, %%r11 ;" -+ "movq %%r11, 24(%0) ;" -+ "mov $38, %%ecx ;" -+ "cmovc %%ecx, %%eax ;" -+ "addq %%rax, %%r8 ;" -+ "movq %%r8, (%0) ;" -+ : -+ : "r"(c), "r"(a), "r"(b) -+ : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11"); -+} -+ -+static __always_inline void -+add_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a, const u64 *const b) -+{ -+ asm volatile( -+ "mov $38, %%eax ;" -+ "movq (%2), %%r8 ;" -+ "addq (%1), %%r8 ;" -+ "movq 8(%2), %%r9 ;" -+ "adcq 8(%1), %%r9 ;" -+ "movq 16(%2), %%r10 ;" -+ "adcq 16(%1), %%r10 ;" -+ "movq 24(%2), %%r11 ;" -+ "adcq 24(%1), %%r11 ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%eax, %%ecx ;" -+ "addq %%rcx, %%r8 ;" -+ "adcq $0, %%r9 ;" -+ "movq %%r9, 8(%0) ;" -+ "adcq $0, %%r10 ;" -+ "movq %%r10, 16(%0) ;" -+ "adcq $0, %%r11 ;" -+ "movq %%r11, 24(%0) ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%eax, %%ecx ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, (%0) ;" -+ : -+ : "r"(c), "r"(a), "r"(b) -+ : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11"); -+} -+ -+static __always_inline void -+sub_eltfp25519_1w(u64 *const c, const u64 *const a, const u64 *const b) -+{ -+ asm volatile( -+ "mov $38, %%eax ;" -+ "movq (%1), %%r8 ;" -+ "subq (%2), %%r8 ;" -+ "movq 8(%1), %%r9 ;" -+ "sbbq 8(%2), %%r9 ;" -+ "movq 16(%1), %%r10 ;" -+ "sbbq 16(%2), %%r10 ;" -+ "movq 24(%1), %%r11 ;" -+ "sbbq 24(%2), %%r11 ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%eax, %%ecx ;" -+ "subq %%rcx, %%r8 ;" -+ "sbbq $0, %%r9 ;" -+ "movq %%r9, 8(%0) ;" -+ "sbbq $0, %%r10 ;" -+ "movq %%r10, 16(%0) ;" -+ "sbbq $0, %%r11 ;" -+ "movq %%r11, 24(%0) ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%eax, %%ecx ;" -+ "subq %%rcx, %%r8 ;" -+ "movq %%r8, (%0) ;" -+ : -+ : "r"(c), "r"(a), "r"(b) -+ : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11"); -+} -+ -+/* Multiplication by a24 = (A+2)/4 = (486662+2)/4 = 121666 */ -+static __always_inline void -+mul_a24_eltfp25519_1w(u64 *const c, const u64 *const a) -+{ -+ const u64 a24 = 121666; -+ asm volatile( -+ "movq %2, %%rdx ;" -+ "mulx (%1), %%r8, %%r10 ;" -+ "mulx 8(%1), %%r9, %%r11 ;" -+ "addq %%r10, %%r9 ;" -+ "mulx 16(%1), %%r10, %%rax ;" -+ "adcq %%r11, %%r10 ;" -+ "mulx 24(%1), %%r11, %%rcx ;" -+ "adcq %%rax, %%r11 ;" -+ /**************************/ -+ "adcq $0, %%rcx ;" -+ "movl $38, %%edx ;" /* 2*c = 38 = 2^256 mod 2^255-19*/ -+ "imul %%rdx, %%rcx ;" -+ "addq %%rcx, %%r8 ;" -+ "adcq $0, %%r9 ;" -+ "movq %%r9, 8(%0) ;" -+ "adcq $0, %%r10 ;" -+ "movq %%r10, 16(%0) ;" -+ "adcq $0, %%r11 ;" -+ "movq %%r11, 24(%0) ;" -+ "mov $0, %%ecx ;" -+ "cmovc %%edx, %%ecx ;" -+ "addq %%rcx, %%r8 ;" -+ "movq %%r8, (%0) ;" -+ : -+ : "r"(c), "r"(a), "r"(a24) -+ : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", -+ "%r11"); -+} -+ -+static void inv_eltfp25519_1w_adx(u64 *const c, const u64 *const a) -+{ -+ struct { -+ eltfp25519_1w_buffer buffer; -+ eltfp25519_1w x0, x1, x2; -+ } __aligned(32) m; -+ u64 *T[4]; -+ -+ T[0] = m.x0; -+ T[1] = c; /* x^(-1) */ -+ T[2] = m.x1; -+ T[3] = m.x2; -+ -+ copy_eltfp25519_1w(T[1], a); -+ sqrn_eltfp25519_1w_adx(T[1], 1); -+ copy_eltfp25519_1w(T[2], T[1]); -+ sqrn_eltfp25519_1w_adx(T[2], 2); -+ mul_eltfp25519_1w_adx(T[0], a, T[2]); -+ mul_eltfp25519_1w_adx(T[1], T[1], T[0]); -+ copy_eltfp25519_1w(T[2], T[1]); -+ sqrn_eltfp25519_1w_adx(T[2], 1); -+ mul_eltfp25519_1w_adx(T[0], T[0], T[2]); -+ copy_eltfp25519_1w(T[2], T[0]); -+ sqrn_eltfp25519_1w_adx(T[2], 5); -+ mul_eltfp25519_1w_adx(T[0], T[0], T[2]); -+ copy_eltfp25519_1w(T[2], T[0]); -+ sqrn_eltfp25519_1w_adx(T[2], 10); -+ mul_eltfp25519_1w_adx(T[2], T[2], T[0]); -+ copy_eltfp25519_1w(T[3], T[2]); -+ sqrn_eltfp25519_1w_adx(T[3], 20); -+ mul_eltfp25519_1w_adx(T[3], T[3], T[2]); -+ sqrn_eltfp25519_1w_adx(T[3], 10); -+ mul_eltfp25519_1w_adx(T[3], T[3], T[0]); -+ copy_eltfp25519_1w(T[0], T[3]); -+ sqrn_eltfp25519_1w_adx(T[0], 50); -+ mul_eltfp25519_1w_adx(T[0], T[0], T[3]); -+ copy_eltfp25519_1w(T[2], T[0]); -+ sqrn_eltfp25519_1w_adx(T[2], 100); -+ mul_eltfp25519_1w_adx(T[2], T[2], T[0]); -+ sqrn_eltfp25519_1w_adx(T[2], 50); -+ mul_eltfp25519_1w_adx(T[2], T[2], T[3]); -+ sqrn_eltfp25519_1w_adx(T[2], 5); -+ mul_eltfp25519_1w_adx(T[1], T[1], T[2]); -+ -+ memzero_explicit(&m, sizeof(m)); -+} -+ -+static void inv_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a) -+{ -+ struct { -+ eltfp25519_1w_buffer buffer; -+ eltfp25519_1w x0, x1, x2; -+ } __aligned(32) m; -+ u64 *T[5]; -+ -+ T[0] = m.x0; -+ T[1] = c; /* x^(-1) */ -+ T[2] = m.x1; -+ T[3] = m.x2; -+ -+ copy_eltfp25519_1w(T[1], a); -+ sqrn_eltfp25519_1w_bmi2(T[1], 1); -+ copy_eltfp25519_1w(T[2], T[1]); -+ sqrn_eltfp25519_1w_bmi2(T[2], 2); -+ mul_eltfp25519_1w_bmi2(T[0], a, T[2]); -+ mul_eltfp25519_1w_bmi2(T[1], T[1], T[0]); -+ copy_eltfp25519_1w(T[2], T[1]); -+ sqrn_eltfp25519_1w_bmi2(T[2], 1); -+ mul_eltfp25519_1w_bmi2(T[0], T[0], T[2]); -+ copy_eltfp25519_1w(T[2], T[0]); -+ sqrn_eltfp25519_1w_bmi2(T[2], 5); -+ mul_eltfp25519_1w_bmi2(T[0], T[0], T[2]); -+ copy_eltfp25519_1w(T[2], T[0]); -+ sqrn_eltfp25519_1w_bmi2(T[2], 10); -+ mul_eltfp25519_1w_bmi2(T[2], T[2], T[0]); -+ copy_eltfp25519_1w(T[3], T[2]); -+ sqrn_eltfp25519_1w_bmi2(T[3], 20); -+ mul_eltfp25519_1w_bmi2(T[3], T[3], T[2]); -+ sqrn_eltfp25519_1w_bmi2(T[3], 10); -+ mul_eltfp25519_1w_bmi2(T[3], T[3], T[0]); -+ copy_eltfp25519_1w(T[0], T[3]); -+ sqrn_eltfp25519_1w_bmi2(T[0], 50); -+ mul_eltfp25519_1w_bmi2(T[0], T[0], T[3]); -+ copy_eltfp25519_1w(T[2], T[0]); -+ sqrn_eltfp25519_1w_bmi2(T[2], 100); -+ mul_eltfp25519_1w_bmi2(T[2], T[2], T[0]); -+ sqrn_eltfp25519_1w_bmi2(T[2], 50); -+ mul_eltfp25519_1w_bmi2(T[2], T[2], T[3]); -+ sqrn_eltfp25519_1w_bmi2(T[2], 5); -+ mul_eltfp25519_1w_bmi2(T[1], T[1], T[2]); -+ -+ memzero_explicit(&m, sizeof(m)); -+} -+ -+/* Given c, a 256-bit number, fred_eltfp25519_1w updates c -+ * with a number such that 0 <= C < 2**255-19. -+ */ -+static __always_inline void fred_eltfp25519_1w(u64 *const c) -+{ -+ u64 tmp0 = 38, tmp1 = 19; -+ asm volatile( -+ "btrq $63, %3 ;" /* Put bit 255 in carry flag and clear */ -+ "cmovncl %k5, %k4 ;" /* c[255] ? 38 : 19 */ -+ -+ /* Add either 19 or 38 to c */ -+ "addq %4, %0 ;" -+ "adcq $0, %1 ;" -+ "adcq $0, %2 ;" -+ "adcq $0, %3 ;" -+ -+ /* Test for bit 255 again; only triggered on overflow modulo 2^255-19 */ -+ "movl $0, %k4 ;" -+ "cmovnsl %k5, %k4 ;" /* c[255] ? 0 : 19 */ -+ "btrq $63, %3 ;" /* Clear bit 255 */ -+ -+ /* Subtract 19 if necessary */ -+ "subq %4, %0 ;" -+ "sbbq $0, %1 ;" -+ "sbbq $0, %2 ;" -+ "sbbq $0, %3 ;" -+ -+ : "+r"(c[0]), "+r"(c[1]), "+r"(c[2]), "+r"(c[3]), "+r"(tmp0), -+ "+r"(tmp1) -+ : -+ : "memory", "cc"); -+} -+ -+static __always_inline void cswap(u8 bit, u64 *const px, u64 *const py) -+{ -+ u64 temp; -+ asm volatile( -+ "test %9, %9 ;" -+ "movq %0, %8 ;" -+ "cmovnzq %4, %0 ;" -+ "cmovnzq %8, %4 ;" -+ "movq %1, %8 ;" -+ "cmovnzq %5, %1 ;" -+ "cmovnzq %8, %5 ;" -+ "movq %2, %8 ;" -+ "cmovnzq %6, %2 ;" -+ "cmovnzq %8, %6 ;" -+ "movq %3, %8 ;" -+ "cmovnzq %7, %3 ;" -+ "cmovnzq %8, %7 ;" -+ : "+r"(px[0]), "+r"(px[1]), "+r"(px[2]), "+r"(px[3]), -+ "+r"(py[0]), "+r"(py[1]), "+r"(py[2]), "+r"(py[3]), -+ "=r"(temp) -+ : "r"(bit) -+ : "cc" -+ ); -+} -+ -+static __always_inline void cselect(u8 bit, u64 *const px, const u64 *const py) -+{ -+ asm volatile( -+ "test %4, %4 ;" -+ "cmovnzq %5, %0 ;" -+ "cmovnzq %6, %1 ;" -+ "cmovnzq %7, %2 ;" -+ "cmovnzq %8, %3 ;" -+ : "+r"(px[0]), "+r"(px[1]), "+r"(px[2]), "+r"(px[3]) -+ : "r"(bit), "rm"(py[0]), "rm"(py[1]), "rm"(py[2]), "rm"(py[3]) -+ : "cc" -+ ); -+} -+ -+static void curve25519_adx(u8 shared[CURVE25519_KEY_SIZE], -+ const u8 private_key[CURVE25519_KEY_SIZE], -+ const u8 session_key[CURVE25519_KEY_SIZE]) -+{ -+ struct { -+ u64 buffer[4 * NUM_WORDS_ELTFP25519]; -+ u64 coordinates[4 * NUM_WORDS_ELTFP25519]; -+ u64 workspace[6 * NUM_WORDS_ELTFP25519]; -+ u8 session[CURVE25519_KEY_SIZE]; -+ u8 private[CURVE25519_KEY_SIZE]; -+ } __aligned(32) m; -+ -+ int i = 0, j = 0; -+ u64 prev = 0; -+ u64 *const X1 = (u64 *)m.session; -+ u64 *const key = (u64 *)m.private; -+ u64 *const Px = m.coordinates + 0; -+ u64 *const Pz = m.coordinates + 4; -+ u64 *const Qx = m.coordinates + 8; -+ u64 *const Qz = m.coordinates + 12; -+ u64 *const X2 = Qx; -+ u64 *const Z2 = Qz; -+ u64 *const X3 = Px; -+ u64 *const Z3 = Pz; -+ u64 *const X2Z2 = Qx; -+ u64 *const X3Z3 = Px; -+ -+ u64 *const A = m.workspace + 0; -+ u64 *const B = m.workspace + 4; -+ u64 *const D = m.workspace + 8; -+ u64 *const C = m.workspace + 12; -+ u64 *const DA = m.workspace + 16; -+ u64 *const CB = m.workspace + 20; -+ u64 *const AB = A; -+ u64 *const DC = D; -+ u64 *const DACB = DA; -+ -+ memcpy(m.private, private_key, sizeof(m.private)); -+ memcpy(m.session, session_key, sizeof(m.session)); -+ -+ curve25519_clamp_secret(m.private); -+ -+ /* As in the draft: -+ * When receiving such an array, implementations of curve25519 -+ * MUST mask the most-significant bit in the final byte. This -+ * is done to preserve compatibility with point formats which -+ * reserve the sign bit for use in other protocols and to -+ * increase resistance to implementation fingerprinting -+ */ -+ m.session[CURVE25519_KEY_SIZE - 1] &= (1 << (255 % 8)) - 1; -+ -+ copy_eltfp25519_1w(Px, X1); -+ setzero_eltfp25519_1w(Pz); -+ setzero_eltfp25519_1w(Qx); -+ setzero_eltfp25519_1w(Qz); -+ -+ Pz[0] = 1; -+ Qx[0] = 1; -+ -+ /* main-loop */ -+ prev = 0; -+ j = 62; -+ for (i = 3; i >= 0; --i) { -+ while (j >= 0) { -+ u64 bit = (key[i] >> j) & 0x1; -+ u64 swap = bit ^ prev; -+ prev = bit; -+ -+ add_eltfp25519_1w_adx(A, X2, Z2); /* A = (X2+Z2) */ -+ sub_eltfp25519_1w(B, X2, Z2); /* B = (X2-Z2) */ -+ add_eltfp25519_1w_adx(C, X3, Z3); /* C = (X3+Z3) */ -+ sub_eltfp25519_1w(D, X3, Z3); /* D = (X3-Z3) */ -+ mul_eltfp25519_2w_adx(DACB, AB, DC); /* [DA|CB] = [A|B]*[D|C] */ -+ -+ cselect(swap, A, C); -+ cselect(swap, B, D); -+ -+ sqr_eltfp25519_2w_adx(AB); /* [AA|BB] = [A^2|B^2] */ -+ add_eltfp25519_1w_adx(X3, DA, CB); /* X3 = (DA+CB) */ -+ sub_eltfp25519_1w(Z3, DA, CB); /* Z3 = (DA-CB) */ -+ sqr_eltfp25519_2w_adx(X3Z3); /* [X3|Z3] = [(DA+CB)|(DA+CB)]^2 */ -+ -+ copy_eltfp25519_1w(X2, B); /* X2 = B^2 */ -+ sub_eltfp25519_1w(Z2, A, B); /* Z2 = E = AA-BB */ -+ -+ mul_a24_eltfp25519_1w(B, Z2); /* B = a24*E */ -+ add_eltfp25519_1w_adx(B, B, X2); /* B = a24*E+B */ -+ mul_eltfp25519_2w_adx(X2Z2, X2Z2, AB); /* [X2|Z2] = [B|E]*[A|a24*E+B] */ -+ mul_eltfp25519_1w_adx(Z3, Z3, X1); /* Z3 = Z3*X1 */ -+ --j; -+ } -+ j = 63; -+ } -+ -+ inv_eltfp25519_1w_adx(A, Qz); -+ mul_eltfp25519_1w_adx((u64 *)shared, Qx, A); -+ fred_eltfp25519_1w((u64 *)shared); -+ -+ memzero_explicit(&m, sizeof(m)); -+} -+ -+static void curve25519_adx_base(u8 session_key[CURVE25519_KEY_SIZE], -+ const u8 private_key[CURVE25519_KEY_SIZE]) -+{ -+ struct { -+ u64 buffer[4 * NUM_WORDS_ELTFP25519]; -+ u64 coordinates[4 * NUM_WORDS_ELTFP25519]; -+ u64 workspace[4 * NUM_WORDS_ELTFP25519]; -+ u8 private[CURVE25519_KEY_SIZE]; -+ } __aligned(32) m; -+ -+ const int ite[4] = { 64, 64, 64, 63 }; -+ const int q = 3; -+ u64 swap = 1; -+ -+ int i = 0, j = 0, k = 0; -+ u64 *const key = (u64 *)m.private; -+ u64 *const Ur1 = m.coordinates + 0; -+ u64 *const Zr1 = m.coordinates + 4; -+ u64 *const Ur2 = m.coordinates + 8; -+ u64 *const Zr2 = m.coordinates + 12; -+ -+ u64 *const UZr1 = m.coordinates + 0; -+ u64 *const ZUr2 = m.coordinates + 8; -+ -+ u64 *const A = m.workspace + 0; -+ u64 *const B = m.workspace + 4; -+ u64 *const C = m.workspace + 8; -+ u64 *const D = m.workspace + 12; -+ -+ u64 *const AB = m.workspace + 0; -+ u64 *const CD = m.workspace + 8; -+ -+ const u64 *const P = table_ladder_8k; -+ -+ memcpy(m.private, private_key, sizeof(m.private)); -+ -+ curve25519_clamp_secret(m.private); -+ -+ setzero_eltfp25519_1w(Ur1); -+ setzero_eltfp25519_1w(Zr1); -+ setzero_eltfp25519_1w(Zr2); -+ Ur1[0] = 1; -+ Zr1[0] = 1; -+ Zr2[0] = 1; -+ -+ /* G-S */ -+ Ur2[3] = 0x1eaecdeee27cab34UL; -+ Ur2[2] = 0xadc7a0b9235d48e2UL; -+ Ur2[1] = 0xbbf095ae14b2edf8UL; -+ Ur2[0] = 0x7e94e1fec82faabdUL; -+ -+ /* main-loop */ -+ j = q; -+ for (i = 0; i < NUM_WORDS_ELTFP25519; ++i) { -+ while (j < ite[i]) { -+ u64 bit = (key[i] >> j) & 0x1; -+ k = (64 * i + j - q); -+ swap = swap ^ bit; -+ cswap(swap, Ur1, Ur2); -+ cswap(swap, Zr1, Zr2); -+ swap = bit; -+ /* Addition */ -+ sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */ -+ add_eltfp25519_1w_adx(A, Ur1, Zr1); /* A = Ur1+Zr1 */ -+ mul_eltfp25519_1w_adx(C, &P[4 * k], B); /* C = M0-B */ -+ sub_eltfp25519_1w(B, A, C); /* B = (Ur1+Zr1) - M*(Ur1-Zr1) */ -+ add_eltfp25519_1w_adx(A, A, C); /* A = (Ur1+Zr1) + M*(Ur1-Zr1) */ -+ sqr_eltfp25519_2w_adx(AB); /* A = A^2 | B = B^2 */ -+ mul_eltfp25519_2w_adx(UZr1, ZUr2, AB); /* Ur1 = Zr2*A | Zr1 = Ur2*B */ -+ ++j; -+ } -+ j = 0; -+ } -+ -+ /* Doubling */ -+ for (i = 0; i < q; ++i) { -+ add_eltfp25519_1w_adx(A, Ur1, Zr1); /* A = Ur1+Zr1 */ -+ sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */ -+ sqr_eltfp25519_2w_adx(AB); /* A = A**2 B = B**2 */ -+ copy_eltfp25519_1w(C, B); /* C = B */ -+ sub_eltfp25519_1w(B, A, B); /* B = A-B */ -+ mul_a24_eltfp25519_1w(D, B); /* D = my_a24*B */ -+ add_eltfp25519_1w_adx(D, D, C); /* D = D+C */ -+ mul_eltfp25519_2w_adx(UZr1, AB, CD); /* Ur1 = A*B Zr1 = Zr1*A */ -+ } -+ -+ /* Convert to affine coordinates */ -+ inv_eltfp25519_1w_adx(A, Zr1); -+ mul_eltfp25519_1w_adx((u64 *)session_key, Ur1, A); -+ fred_eltfp25519_1w((u64 *)session_key); -+ -+ memzero_explicit(&m, sizeof(m)); -+} -+ -+static void curve25519_bmi2(u8 shared[CURVE25519_KEY_SIZE], -+ const u8 private_key[CURVE25519_KEY_SIZE], -+ const u8 session_key[CURVE25519_KEY_SIZE]) -+{ -+ struct { -+ u64 buffer[4 * NUM_WORDS_ELTFP25519]; -+ u64 coordinates[4 * NUM_WORDS_ELTFP25519]; -+ u64 workspace[6 * NUM_WORDS_ELTFP25519]; -+ u8 session[CURVE25519_KEY_SIZE]; -+ u8 private[CURVE25519_KEY_SIZE]; -+ } __aligned(32) m; -+ -+ int i = 0, j = 0; -+ u64 prev = 0; -+ u64 *const X1 = (u64 *)m.session; -+ u64 *const key = (u64 *)m.private; -+ u64 *const Px = m.coordinates + 0; -+ u64 *const Pz = m.coordinates + 4; -+ u64 *const Qx = m.coordinates + 8; -+ u64 *const Qz = m.coordinates + 12; -+ u64 *const X2 = Qx; -+ u64 *const Z2 = Qz; -+ u64 *const X3 = Px; -+ u64 *const Z3 = Pz; -+ u64 *const X2Z2 = Qx; -+ u64 *const X3Z3 = Px; -+ -+ u64 *const A = m.workspace + 0; -+ u64 *const B = m.workspace + 4; -+ u64 *const D = m.workspace + 8; -+ u64 *const C = m.workspace + 12; -+ u64 *const DA = m.workspace + 16; -+ u64 *const CB = m.workspace + 20; -+ u64 *const AB = A; -+ u64 *const DC = D; -+ u64 *const DACB = DA; -+ -+ memcpy(m.private, private_key, sizeof(m.private)); -+ memcpy(m.session, session_key, sizeof(m.session)); -+ -+ curve25519_clamp_secret(m.private); -+ -+ /* As in the draft: -+ * When receiving such an array, implementations of curve25519 -+ * MUST mask the most-significant bit in the final byte. This -+ * is done to preserve compatibility with point formats which -+ * reserve the sign bit for use in other protocols and to -+ * increase resistance to implementation fingerprinting -+ */ -+ m.session[CURVE25519_KEY_SIZE - 1] &= (1 << (255 % 8)) - 1; -+ -+ copy_eltfp25519_1w(Px, X1); -+ setzero_eltfp25519_1w(Pz); -+ setzero_eltfp25519_1w(Qx); -+ setzero_eltfp25519_1w(Qz); -+ -+ Pz[0] = 1; -+ Qx[0] = 1; -+ -+ /* main-loop */ -+ prev = 0; -+ j = 62; -+ for (i = 3; i >= 0; --i) { -+ while (j >= 0) { -+ u64 bit = (key[i] >> j) & 0x1; -+ u64 swap = bit ^ prev; -+ prev = bit; -+ -+ add_eltfp25519_1w_bmi2(A, X2, Z2); /* A = (X2+Z2) */ -+ sub_eltfp25519_1w(B, X2, Z2); /* B = (X2-Z2) */ -+ add_eltfp25519_1w_bmi2(C, X3, Z3); /* C = (X3+Z3) */ -+ sub_eltfp25519_1w(D, X3, Z3); /* D = (X3-Z3) */ -+ mul_eltfp25519_2w_bmi2(DACB, AB, DC); /* [DA|CB] = [A|B]*[D|C] */ -+ -+ cselect(swap, A, C); -+ cselect(swap, B, D); -+ -+ sqr_eltfp25519_2w_bmi2(AB); /* [AA|BB] = [A^2|B^2] */ -+ add_eltfp25519_1w_bmi2(X3, DA, CB); /* X3 = (DA+CB) */ -+ sub_eltfp25519_1w(Z3, DA, CB); /* Z3 = (DA-CB) */ -+ sqr_eltfp25519_2w_bmi2(X3Z3); /* [X3|Z3] = [(DA+CB)|(DA+CB)]^2 */ -+ -+ copy_eltfp25519_1w(X2, B); /* X2 = B^2 */ -+ sub_eltfp25519_1w(Z2, A, B); /* Z2 = E = AA-BB */ -+ -+ mul_a24_eltfp25519_1w(B, Z2); /* B = a24*E */ -+ add_eltfp25519_1w_bmi2(B, B, X2); /* B = a24*E+B */ -+ mul_eltfp25519_2w_bmi2(X2Z2, X2Z2, AB); /* [X2|Z2] = [B|E]*[A|a24*E+B] */ -+ mul_eltfp25519_1w_bmi2(Z3, Z3, X1); /* Z3 = Z3*X1 */ -+ --j; -+ } -+ j = 63; -+ } -+ -+ inv_eltfp25519_1w_bmi2(A, Qz); -+ mul_eltfp25519_1w_bmi2((u64 *)shared, Qx, A); -+ fred_eltfp25519_1w((u64 *)shared); -+ -+ memzero_explicit(&m, sizeof(m)); -+} -+ -+static void curve25519_bmi2_base(u8 session_key[CURVE25519_KEY_SIZE], -+ const u8 private_key[CURVE25519_KEY_SIZE]) -+{ -+ struct { -+ u64 buffer[4 * NUM_WORDS_ELTFP25519]; -+ u64 coordinates[4 * NUM_WORDS_ELTFP25519]; -+ u64 workspace[4 * NUM_WORDS_ELTFP25519]; -+ u8 private[CURVE25519_KEY_SIZE]; -+ } __aligned(32) m; -+ -+ const int ite[4] = { 64, 64, 64, 63 }; -+ const int q = 3; -+ u64 swap = 1; -+ -+ int i = 0, j = 0, k = 0; -+ u64 *const key = (u64 *)m.private; -+ u64 *const Ur1 = m.coordinates + 0; -+ u64 *const Zr1 = m.coordinates + 4; -+ u64 *const Ur2 = m.coordinates + 8; -+ u64 *const Zr2 = m.coordinates + 12; -+ -+ u64 *const UZr1 = m.coordinates + 0; -+ u64 *const ZUr2 = m.coordinates + 8; -+ -+ u64 *const A = m.workspace + 0; -+ u64 *const B = m.workspace + 4; -+ u64 *const C = m.workspace + 8; -+ u64 *const D = m.workspace + 12; -+ -+ u64 *const AB = m.workspace + 0; -+ u64 *const CD = m.workspace + 8; -+ -+ const u64 *const P = table_ladder_8k; -+ -+ memcpy(m.private, private_key, sizeof(m.private)); -+ -+ curve25519_clamp_secret(m.private); -+ -+ setzero_eltfp25519_1w(Ur1); -+ setzero_eltfp25519_1w(Zr1); -+ setzero_eltfp25519_1w(Zr2); -+ Ur1[0] = 1; -+ Zr1[0] = 1; -+ Zr2[0] = 1; -+ -+ /* G-S */ -+ Ur2[3] = 0x1eaecdeee27cab34UL; -+ Ur2[2] = 0xadc7a0b9235d48e2UL; -+ Ur2[1] = 0xbbf095ae14b2edf8UL; -+ Ur2[0] = 0x7e94e1fec82faabdUL; -+ -+ /* main-loop */ -+ j = q; -+ for (i = 0; i < NUM_WORDS_ELTFP25519; ++i) { -+ while (j < ite[i]) { -+ u64 bit = (key[i] >> j) & 0x1; -+ k = (64 * i + j - q); -+ swap = swap ^ bit; -+ cswap(swap, Ur1, Ur2); -+ cswap(swap, Zr1, Zr2); -+ swap = bit; -+ /* Addition */ -+ sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */ -+ add_eltfp25519_1w_bmi2(A, Ur1, Zr1); /* A = Ur1+Zr1 */ -+ mul_eltfp25519_1w_bmi2(C, &P[4 * k], B);/* C = M0-B */ -+ sub_eltfp25519_1w(B, A, C); /* B = (Ur1+Zr1) - M*(Ur1-Zr1) */ -+ add_eltfp25519_1w_bmi2(A, A, C); /* A = (Ur1+Zr1) + M*(Ur1-Zr1) */ -+ sqr_eltfp25519_2w_bmi2(AB); /* A = A^2 | B = B^2 */ -+ mul_eltfp25519_2w_bmi2(UZr1, ZUr2, AB); /* Ur1 = Zr2*A | Zr1 = Ur2*B */ -+ ++j; -+ } -+ j = 0; -+ } -+ -+ /* Doubling */ -+ for (i = 0; i < q; ++i) { -+ add_eltfp25519_1w_bmi2(A, Ur1, Zr1); /* A = Ur1+Zr1 */ -+ sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */ -+ sqr_eltfp25519_2w_bmi2(AB); /* A = A**2 B = B**2 */ -+ copy_eltfp25519_1w(C, B); /* C = B */ -+ sub_eltfp25519_1w(B, A, B); /* B = A-B */ -+ mul_a24_eltfp25519_1w(D, B); /* D = my_a24*B */ -+ add_eltfp25519_1w_bmi2(D, D, C); /* D = D+C */ -+ mul_eltfp25519_2w_bmi2(UZr1, AB, CD); /* Ur1 = A*B Zr1 = Zr1*A */ -+ } -+ -+ /* Convert to affine coordinates */ -+ inv_eltfp25519_1w_bmi2(A, Zr1); -+ mul_eltfp25519_1w_bmi2((u64 *)session_key, Ur1, A); -+ fred_eltfp25519_1w((u64 *)session_key); -+ -+ memzero_explicit(&m, sizeof(m)); -+} -+ -+void curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE], -+ const u8 secret[CURVE25519_KEY_SIZE], -+ const u8 basepoint[CURVE25519_KEY_SIZE]) -+{ -+ if (static_branch_likely(&curve25519_use_adx)) -+ curve25519_adx(mypublic, secret, basepoint); -+ else if (static_branch_likely(&curve25519_use_bmi2)) -+ curve25519_bmi2(mypublic, secret, basepoint); -+ else -+ curve25519_generic(mypublic, secret, basepoint); -+} -+EXPORT_SYMBOL(curve25519_arch); -+ -+void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], -+ const u8 secret[CURVE25519_KEY_SIZE]) -+{ -+ if (static_branch_likely(&curve25519_use_adx)) -+ curve25519_adx_base(pub, secret); -+ else if (static_branch_likely(&curve25519_use_bmi2)) -+ curve25519_bmi2_base(pub, secret); -+ else -+ curve25519_generic(pub, secret, curve25519_base_point); -+} -+EXPORT_SYMBOL(curve25519_base_arch); -+ -+static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, -+ unsigned int len) -+{ -+ u8 *secret = kpp_tfm_ctx(tfm); -+ -+ if (!len) -+ curve25519_generate_secret(secret); -+ else if (len == CURVE25519_KEY_SIZE && -+ crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) -+ memcpy(secret, buf, CURVE25519_KEY_SIZE); -+ else -+ return -EINVAL; -+ return 0; -+} -+ -+static int curve25519_generate_public_key(struct kpp_request *req) -+{ -+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); -+ const u8 *secret = kpp_tfm_ctx(tfm); -+ u8 buf[CURVE25519_KEY_SIZE]; -+ int copied, nbytes; -+ -+ if (req->src) -+ return -EINVAL; -+ -+ curve25519_base_arch(buf, secret); -+ -+ /* might want less than we've got */ -+ nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len); -+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, -+ nbytes), -+ buf, nbytes); -+ if (copied != nbytes) -+ return -EINVAL; -+ return 0; -+} -+ -+static int curve25519_compute_shared_secret(struct kpp_request *req) -+{ -+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); -+ const u8 *secret = kpp_tfm_ctx(tfm); -+ u8 public_key[CURVE25519_KEY_SIZE]; -+ u8 buf[CURVE25519_KEY_SIZE]; -+ int copied, nbytes; -+ -+ if (!req->src) -+ return -EINVAL; -+ -+ copied = sg_copy_to_buffer(req->src, -+ sg_nents_for_len(req->src, -+ CURVE25519_KEY_SIZE), -+ public_key, CURVE25519_KEY_SIZE); -+ if (copied != CURVE25519_KEY_SIZE) -+ return -EINVAL; -+ -+ curve25519_arch(buf, secret, public_key); -+ -+ /* might want less than we've got */ -+ nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len); -+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, -+ nbytes), -+ buf, nbytes); -+ if (copied != nbytes) -+ return -EINVAL; -+ return 0; -+} -+ -+static unsigned int curve25519_max_size(struct crypto_kpp *tfm) -+{ -+ return CURVE25519_KEY_SIZE; -+} -+ -+static struct kpp_alg curve25519_alg = { -+ .base.cra_name = "curve25519", -+ .base.cra_driver_name = "curve25519-x86", -+ .base.cra_priority = 200, -+ .base.cra_module = THIS_MODULE, -+ .base.cra_ctxsize = CURVE25519_KEY_SIZE, -+ -+ .set_secret = curve25519_set_secret, -+ .generate_public_key = curve25519_generate_public_key, -+ .compute_shared_secret = curve25519_compute_shared_secret, -+ .max_size = curve25519_max_size, -+}; -+ -+static int __init curve25519_mod_init(void) -+{ -+ if (boot_cpu_has(X86_FEATURE_BMI2)) -+ static_branch_enable(&curve25519_use_bmi2); -+ else if (boot_cpu_has(X86_FEATURE_ADX)) -+ static_branch_enable(&curve25519_use_adx); -+ else -+ return 0; -+ return crypto_register_kpp(&curve25519_alg); -+} -+ -+static void __exit curve25519_mod_exit(void) -+{ -+ if (boot_cpu_has(X86_FEATURE_BMI2) || -+ boot_cpu_has(X86_FEATURE_ADX)) -+ crypto_unregister_kpp(&curve25519_alg); -+} -+ -+module_init(curve25519_mod_init); -+module_exit(curve25519_mod_exit); -+ -+MODULE_ALIAS_CRYPTO("curve25519"); -+MODULE_ALIAS_CRYPTO("curve25519-x86"); -+MODULE_LICENSE("GPL v2"); ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -269,6 +269,12 @@ config CRYPTO_CURVE25519 - select CRYPTO_KPP - select CRYPTO_LIB_CURVE25519_GENERIC - -+config CRYPTO_CURVE25519_X86 -+ tristate "x86_64 accelerated Curve25519 scalar multiplication library" -+ depends on X86 && 64BIT -+ select CRYPTO_LIB_CURVE25519_GENERIC -+ select CRYPTO_ARCH_HAVE_LIB_CURVE25519 -+ - comment "Authenticated Encryption with Associated Data" - - config CRYPTO_CCM diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0030-crypto-arm-curve25519-import-Bernstein-and-Schwabe-s.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0030-crypto-arm-curve25519-import-Bernstein-and-Schwabe-s.patch deleted file mode 100644 index 8fda25d60..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0030-crypto-arm-curve25519-import-Bernstein-and-Schwabe-s.patch +++ /dev/null @@ -1,2135 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 8 Nov 2019 13:22:37 +0100 -Subject: [PATCH] crypto: arm/curve25519 - import Bernstein and Schwabe's - Curve25519 ARM implementation - -commit f0fb006b604f98e2309a30f34ef455ac734f7c1c upstream. - -This comes from Dan Bernstein and Peter Schwabe's public domain NEON -code, and is included here in raw form so that subsequent commits that -fix these up for the kernel can see how it has changed. This code does -have some entirely cosmetic formatting differences, adding indentation -and so forth, so that when we actually port it for use in the kernel in -the subsequent commit, it's obvious what's changed in the process. - -This code originates from SUPERCOP 20180818, available at -. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/curve25519-core.S | 2105 +++++++++++++++++++++++++++++ - 1 file changed, 2105 insertions(+) - create mode 100644 arch/arm/crypto/curve25519-core.S - ---- /dev/null -+++ b/arch/arm/crypto/curve25519-core.S -@@ -0,0 +1,2105 @@ -+/* -+ * Public domain code from Daniel J. Bernstein and Peter Schwabe, from -+ * SUPERCOP's curve25519/neon2/scalarmult.s. -+ */ -+ -+.fpu neon -+.text -+.align 4 -+.global _crypto_scalarmult_curve25519_neon2 -+.global crypto_scalarmult_curve25519_neon2 -+.type _crypto_scalarmult_curve25519_neon2 STT_FUNC -+.type crypto_scalarmult_curve25519_neon2 STT_FUNC -+ _crypto_scalarmult_curve25519_neon2: -+ crypto_scalarmult_curve25519_neon2: -+ vpush {q4, q5, q6, q7} -+ mov r12, sp -+ sub sp, sp, #736 -+ and sp, sp, #0xffffffe0 -+ strd r4, [sp, #0] -+ strd r6, [sp, #8] -+ strd r8, [sp, #16] -+ strd r10, [sp, #24] -+ str r12, [sp, #480] -+ str r14, [sp, #484] -+ mov r0, r0 -+ mov r1, r1 -+ mov r2, r2 -+ add r3, sp, #32 -+ ldr r4, =0 -+ ldr r5, =254 -+ vmov.i32 q0, #1 -+ vshr.u64 q1, q0, #7 -+ vshr.u64 q0, q0, #8 -+ vmov.i32 d4, #19 -+ vmov.i32 d5, #38 -+ add r6, sp, #512 -+ vst1.8 {d2-d3}, [r6, : 128] -+ add r6, sp, #528 -+ vst1.8 {d0-d1}, [r6, : 128] -+ add r6, sp, #544 -+ vst1.8 {d4-d5}, [r6, : 128] -+ add r6, r3, #0 -+ vmov.i32 q2, #0 -+ vst1.8 {d4-d5}, [r6, : 128]! -+ vst1.8 {d4-d5}, [r6, : 128]! -+ vst1.8 d4, [r6, : 64] -+ add r6, r3, #0 -+ ldr r7, =960 -+ sub r7, r7, #2 -+ neg r7, r7 -+ sub r7, r7, r7, LSL #7 -+ str r7, [r6] -+ add r6, sp, #704 -+ vld1.8 {d4-d5}, [r1]! -+ vld1.8 {d6-d7}, [r1] -+ vst1.8 {d4-d5}, [r6, : 128]! -+ vst1.8 {d6-d7}, [r6, : 128] -+ sub r1, r6, #16 -+ ldrb r6, [r1] -+ and r6, r6, #248 -+ strb r6, [r1] -+ ldrb r6, [r1, #31] -+ and r6, r6, #127 -+ orr r6, r6, #64 -+ strb r6, [r1, #31] -+ vmov.i64 q2, #0xffffffff -+ vshr.u64 q3, q2, #7 -+ vshr.u64 q2, q2, #6 -+ vld1.8 {d8}, [r2] -+ vld1.8 {d10}, [r2] -+ add r2, r2, #6 -+ vld1.8 {d12}, [r2] -+ vld1.8 {d14}, [r2] -+ add r2, r2, #6 -+ vld1.8 {d16}, [r2] -+ add r2, r2, #4 -+ vld1.8 {d18}, [r2] -+ vld1.8 {d20}, [r2] -+ add r2, r2, #6 -+ vld1.8 {d22}, [r2] -+ add r2, r2, #2 -+ vld1.8 {d24}, [r2] -+ vld1.8 {d26}, [r2] -+ vshr.u64 q5, q5, #26 -+ vshr.u64 q6, q6, #3 -+ vshr.u64 q7, q7, #29 -+ vshr.u64 q8, q8, #6 -+ vshr.u64 q10, q10, #25 -+ vshr.u64 q11, q11, #3 -+ vshr.u64 q12, q12, #12 -+ vshr.u64 q13, q13, #38 -+ vand q4, q4, q2 -+ vand q6, q6, q2 -+ vand q8, q8, q2 -+ vand q10, q10, q2 -+ vand q2, q12, q2 -+ vand q5, q5, q3 -+ vand q7, q7, q3 -+ vand q9, q9, q3 -+ vand q11, q11, q3 -+ vand q3, q13, q3 -+ add r2, r3, #48 -+ vadd.i64 q12, q4, q1 -+ vadd.i64 q13, q10, q1 -+ vshr.s64 q12, q12, #26 -+ vshr.s64 q13, q13, #26 -+ vadd.i64 q5, q5, q12 -+ vshl.i64 q12, q12, #26 -+ vadd.i64 q14, q5, q0 -+ vadd.i64 q11, q11, q13 -+ vshl.i64 q13, q13, #26 -+ vadd.i64 q15, q11, q0 -+ vsub.i64 q4, q4, q12 -+ vshr.s64 q12, q14, #25 -+ vsub.i64 q10, q10, q13 -+ vshr.s64 q13, q15, #25 -+ vadd.i64 q6, q6, q12 -+ vshl.i64 q12, q12, #25 -+ vadd.i64 q14, q6, q1 -+ vadd.i64 q2, q2, q13 -+ vsub.i64 q5, q5, q12 -+ vshr.s64 q12, q14, #26 -+ vshl.i64 q13, q13, #25 -+ vadd.i64 q14, q2, q1 -+ vadd.i64 q7, q7, q12 -+ vshl.i64 q12, q12, #26 -+ vadd.i64 q15, q7, q0 -+ vsub.i64 q11, q11, q13 -+ vshr.s64 q13, q14, #26 -+ vsub.i64 q6, q6, q12 -+ vshr.s64 q12, q15, #25 -+ vadd.i64 q3, q3, q13 -+ vshl.i64 q13, q13, #26 -+ vadd.i64 q14, q3, q0 -+ vadd.i64 q8, q8, q12 -+ vshl.i64 q12, q12, #25 -+ vadd.i64 q15, q8, q1 -+ add r2, r2, #8 -+ vsub.i64 q2, q2, q13 -+ vshr.s64 q13, q14, #25 -+ vsub.i64 q7, q7, q12 -+ vshr.s64 q12, q15, #26 -+ vadd.i64 q14, q13, q13 -+ vadd.i64 q9, q9, q12 -+ vtrn.32 d12, d14 -+ vshl.i64 q12, q12, #26 -+ vtrn.32 d13, d15 -+ vadd.i64 q0, q9, q0 -+ vadd.i64 q4, q4, q14 -+ vst1.8 d12, [r2, : 64]! -+ vshl.i64 q6, q13, #4 -+ vsub.i64 q7, q8, q12 -+ vshr.s64 q0, q0, #25 -+ vadd.i64 q4, q4, q6 -+ vadd.i64 q6, q10, q0 -+ vshl.i64 q0, q0, #25 -+ vadd.i64 q8, q6, q1 -+ vadd.i64 q4, q4, q13 -+ vshl.i64 q10, q13, #25 -+ vadd.i64 q1, q4, q1 -+ vsub.i64 q0, q9, q0 -+ vshr.s64 q8, q8, #26 -+ vsub.i64 q3, q3, q10 -+ vtrn.32 d14, d0 -+ vshr.s64 q1, q1, #26 -+ vtrn.32 d15, d1 -+ vadd.i64 q0, q11, q8 -+ vst1.8 d14, [r2, : 64] -+ vshl.i64 q7, q8, #26 -+ vadd.i64 q5, q5, q1 -+ vtrn.32 d4, d6 -+ vshl.i64 q1, q1, #26 -+ vtrn.32 d5, d7 -+ vsub.i64 q3, q6, q7 -+ add r2, r2, #16 -+ vsub.i64 q1, q4, q1 -+ vst1.8 d4, [r2, : 64] -+ vtrn.32 d6, d0 -+ vtrn.32 d7, d1 -+ sub r2, r2, #8 -+ vtrn.32 d2, d10 -+ vtrn.32 d3, d11 -+ vst1.8 d6, [r2, : 64] -+ sub r2, r2, #24 -+ vst1.8 d2, [r2, : 64] -+ add r2, r3, #96 -+ vmov.i32 q0, #0 -+ vmov.i64 d2, #0xff -+ vmov.i64 d3, #0 -+ vshr.u32 q1, q1, #7 -+ vst1.8 {d2-d3}, [r2, : 128]! -+ vst1.8 {d0-d1}, [r2, : 128]! -+ vst1.8 d0, [r2, : 64] -+ add r2, r3, #144 -+ vmov.i32 q0, #0 -+ vst1.8 {d0-d1}, [r2, : 128]! -+ vst1.8 {d0-d1}, [r2, : 128]! -+ vst1.8 d0, [r2, : 64] -+ add r2, r3, #240 -+ vmov.i32 q0, #0 -+ vmov.i64 d2, #0xff -+ vmov.i64 d3, #0 -+ vshr.u32 q1, q1, #7 -+ vst1.8 {d2-d3}, [r2, : 128]! -+ vst1.8 {d0-d1}, [r2, : 128]! -+ vst1.8 d0, [r2, : 64] -+ add r2, r3, #48 -+ add r6, r3, #192 -+ vld1.8 {d0-d1}, [r2, : 128]! -+ vld1.8 {d2-d3}, [r2, : 128]! -+ vld1.8 {d4}, [r2, : 64] -+ vst1.8 {d0-d1}, [r6, : 128]! -+ vst1.8 {d2-d3}, [r6, : 128]! -+ vst1.8 d4, [r6, : 64] -+._mainloop: -+ mov r2, r5, LSR #3 -+ and r6, r5, #7 -+ ldrb r2, [r1, r2] -+ mov r2, r2, LSR r6 -+ and r2, r2, #1 -+ str r5, [sp, #488] -+ eor r4, r4, r2 -+ str r2, [sp, #492] -+ neg r2, r4 -+ add r4, r3, #96 -+ add r5, r3, #192 -+ add r6, r3, #144 -+ vld1.8 {d8-d9}, [r4, : 128]! -+ add r7, r3, #240 -+ vld1.8 {d10-d11}, [r5, : 128]! -+ veor q6, q4, q5 -+ vld1.8 {d14-d15}, [r6, : 128]! -+ vdup.i32 q8, r2 -+ vld1.8 {d18-d19}, [r7, : 128]! -+ veor q10, q7, q9 -+ vld1.8 {d22-d23}, [r4, : 128]! -+ vand q6, q6, q8 -+ vld1.8 {d24-d25}, [r5, : 128]! -+ vand q10, q10, q8 -+ vld1.8 {d26-d27}, [r6, : 128]! -+ veor q4, q4, q6 -+ vld1.8 {d28-d29}, [r7, : 128]! -+ veor q5, q5, q6 -+ vld1.8 {d0}, [r4, : 64] -+ veor q6, q7, q10 -+ vld1.8 {d2}, [r5, : 64] -+ veor q7, q9, q10 -+ vld1.8 {d4}, [r6, : 64] -+ veor q9, q11, q12 -+ vld1.8 {d6}, [r7, : 64] -+ veor q10, q0, q1 -+ sub r2, r4, #32 -+ vand q9, q9, q8 -+ sub r4, r5, #32 -+ vand q10, q10, q8 -+ sub r5, r6, #32 -+ veor q11, q11, q9 -+ sub r6, r7, #32 -+ veor q0, q0, q10 -+ veor q9, q12, q9 -+ veor q1, q1, q10 -+ veor q10, q13, q14 -+ veor q12, q2, q3 -+ vand q10, q10, q8 -+ vand q8, q12, q8 -+ veor q12, q13, q10 -+ veor q2, q2, q8 -+ veor q10, q14, q10 -+ veor q3, q3, q8 -+ vadd.i32 q8, q4, q6 -+ vsub.i32 q4, q4, q6 -+ vst1.8 {d16-d17}, [r2, : 128]! -+ vadd.i32 q6, q11, q12 -+ vst1.8 {d8-d9}, [r5, : 128]! -+ vsub.i32 q4, q11, q12 -+ vst1.8 {d12-d13}, [r2, : 128]! -+ vadd.i32 q6, q0, q2 -+ vst1.8 {d8-d9}, [r5, : 128]! -+ vsub.i32 q0, q0, q2 -+ vst1.8 d12, [r2, : 64] -+ vadd.i32 q2, q5, q7 -+ vst1.8 d0, [r5, : 64] -+ vsub.i32 q0, q5, q7 -+ vst1.8 {d4-d5}, [r4, : 128]! -+ vadd.i32 q2, q9, q10 -+ vst1.8 {d0-d1}, [r6, : 128]! -+ vsub.i32 q0, q9, q10 -+ vst1.8 {d4-d5}, [r4, : 128]! -+ vadd.i32 q2, q1, q3 -+ vst1.8 {d0-d1}, [r6, : 128]! -+ vsub.i32 q0, q1, q3 -+ vst1.8 d4, [r4, : 64] -+ vst1.8 d0, [r6, : 64] -+ add r2, sp, #544 -+ add r4, r3, #96 -+ add r5, r3, #144 -+ vld1.8 {d0-d1}, [r2, : 128] -+ vld1.8 {d2-d3}, [r4, : 128]! -+ vld1.8 {d4-d5}, [r5, : 128]! -+ vzip.i32 q1, q2 -+ vld1.8 {d6-d7}, [r4, : 128]! -+ vld1.8 {d8-d9}, [r5, : 128]! -+ vshl.i32 q5, q1, #1 -+ vzip.i32 q3, q4 -+ vshl.i32 q6, q2, #1 -+ vld1.8 {d14}, [r4, : 64] -+ vshl.i32 q8, q3, #1 -+ vld1.8 {d15}, [r5, : 64] -+ vshl.i32 q9, q4, #1 -+ vmul.i32 d21, d7, d1 -+ vtrn.32 d14, d15 -+ vmul.i32 q11, q4, q0 -+ vmul.i32 q0, q7, q0 -+ vmull.s32 q12, d2, d2 -+ vmlal.s32 q12, d11, d1 -+ vmlal.s32 q12, d12, d0 -+ vmlal.s32 q12, d13, d23 -+ vmlal.s32 q12, d16, d22 -+ vmlal.s32 q12, d7, d21 -+ vmull.s32 q10, d2, d11 -+ vmlal.s32 q10, d4, d1 -+ vmlal.s32 q10, d13, d0 -+ vmlal.s32 q10, d6, d23 -+ vmlal.s32 q10, d17, d22 -+ vmull.s32 q13, d10, d4 -+ vmlal.s32 q13, d11, d3 -+ vmlal.s32 q13, d13, d1 -+ vmlal.s32 q13, d16, d0 -+ vmlal.s32 q13, d17, d23 -+ vmlal.s32 q13, d8, d22 -+ vmull.s32 q1, d10, d5 -+ vmlal.s32 q1, d11, d4 -+ vmlal.s32 q1, d6, d1 -+ vmlal.s32 q1, d17, d0 -+ vmlal.s32 q1, d8, d23 -+ vmull.s32 q14, d10, d6 -+ vmlal.s32 q14, d11, d13 -+ vmlal.s32 q14, d4, d4 -+ vmlal.s32 q14, d17, d1 -+ vmlal.s32 q14, d18, d0 -+ vmlal.s32 q14, d9, d23 -+ vmull.s32 q11, d10, d7 -+ vmlal.s32 q11, d11, d6 -+ vmlal.s32 q11, d12, d5 -+ vmlal.s32 q11, d8, d1 -+ vmlal.s32 q11, d19, d0 -+ vmull.s32 q15, d10, d8 -+ vmlal.s32 q15, d11, d17 -+ vmlal.s32 q15, d12, d6 -+ vmlal.s32 q15, d13, d5 -+ vmlal.s32 q15, d19, d1 -+ vmlal.s32 q15, d14, d0 -+ vmull.s32 q2, d10, d9 -+ vmlal.s32 q2, d11, d8 -+ vmlal.s32 q2, d12, d7 -+ vmlal.s32 q2, d13, d6 -+ vmlal.s32 q2, d14, d1 -+ vmull.s32 q0, d15, d1 -+ vmlal.s32 q0, d10, d14 -+ vmlal.s32 q0, d11, d19 -+ vmlal.s32 q0, d12, d8 -+ vmlal.s32 q0, d13, d17 -+ vmlal.s32 q0, d6, d6 -+ add r2, sp, #512 -+ vld1.8 {d18-d19}, [r2, : 128] -+ vmull.s32 q3, d16, d7 -+ vmlal.s32 q3, d10, d15 -+ vmlal.s32 q3, d11, d14 -+ vmlal.s32 q3, d12, d9 -+ vmlal.s32 q3, d13, d8 -+ add r2, sp, #528 -+ vld1.8 {d8-d9}, [r2, : 128] -+ vadd.i64 q5, q12, q9 -+ vadd.i64 q6, q15, q9 -+ vshr.s64 q5, q5, #26 -+ vshr.s64 q6, q6, #26 -+ vadd.i64 q7, q10, q5 -+ vshl.i64 q5, q5, #26 -+ vadd.i64 q8, q7, q4 -+ vadd.i64 q2, q2, q6 -+ vshl.i64 q6, q6, #26 -+ vadd.i64 q10, q2, q4 -+ vsub.i64 q5, q12, q5 -+ vshr.s64 q8, q8, #25 -+ vsub.i64 q6, q15, q6 -+ vshr.s64 q10, q10, #25 -+ vadd.i64 q12, q13, q8 -+ vshl.i64 q8, q8, #25 -+ vadd.i64 q13, q12, q9 -+ vadd.i64 q0, q0, q10 -+ vsub.i64 q7, q7, q8 -+ vshr.s64 q8, q13, #26 -+ vshl.i64 q10, q10, #25 -+ vadd.i64 q13, q0, q9 -+ vadd.i64 q1, q1, q8 -+ vshl.i64 q8, q8, #26 -+ vadd.i64 q15, q1, q4 -+ vsub.i64 q2, q2, q10 -+ vshr.s64 q10, q13, #26 -+ vsub.i64 q8, q12, q8 -+ vshr.s64 q12, q15, #25 -+ vadd.i64 q3, q3, q10 -+ vshl.i64 q10, q10, #26 -+ vadd.i64 q13, q3, q4 -+ vadd.i64 q14, q14, q12 -+ add r2, r3, #288 -+ vshl.i64 q12, q12, #25 -+ add r4, r3, #336 -+ vadd.i64 q15, q14, q9 -+ add r2, r2, #8 -+ vsub.i64 q0, q0, q10 -+ add r4, r4, #8 -+ vshr.s64 q10, q13, #25 -+ vsub.i64 q1, q1, q12 -+ vshr.s64 q12, q15, #26 -+ vadd.i64 q13, q10, q10 -+ vadd.i64 q11, q11, q12 -+ vtrn.32 d16, d2 -+ vshl.i64 q12, q12, #26 -+ vtrn.32 d17, d3 -+ vadd.i64 q1, q11, q4 -+ vadd.i64 q4, q5, q13 -+ vst1.8 d16, [r2, : 64]! -+ vshl.i64 q5, q10, #4 -+ vst1.8 d17, [r4, : 64]! -+ vsub.i64 q8, q14, q12 -+ vshr.s64 q1, q1, #25 -+ vadd.i64 q4, q4, q5 -+ vadd.i64 q5, q6, q1 -+ vshl.i64 q1, q1, #25 -+ vadd.i64 q6, q5, q9 -+ vadd.i64 q4, q4, q10 -+ vshl.i64 q10, q10, #25 -+ vadd.i64 q9, q4, q9 -+ vsub.i64 q1, q11, q1 -+ vshr.s64 q6, q6, #26 -+ vsub.i64 q3, q3, q10 -+ vtrn.32 d16, d2 -+ vshr.s64 q9, q9, #26 -+ vtrn.32 d17, d3 -+ vadd.i64 q1, q2, q6 -+ vst1.8 d16, [r2, : 64] -+ vshl.i64 q2, q6, #26 -+ vst1.8 d17, [r4, : 64] -+ vadd.i64 q6, q7, q9 -+ vtrn.32 d0, d6 -+ vshl.i64 q7, q9, #26 -+ vtrn.32 d1, d7 -+ vsub.i64 q2, q5, q2 -+ add r2, r2, #16 -+ vsub.i64 q3, q4, q7 -+ vst1.8 d0, [r2, : 64] -+ add r4, r4, #16 -+ vst1.8 d1, [r4, : 64] -+ vtrn.32 d4, d2 -+ vtrn.32 d5, d3 -+ sub r2, r2, #8 -+ sub r4, r4, #8 -+ vtrn.32 d6, d12 -+ vtrn.32 d7, d13 -+ vst1.8 d4, [r2, : 64] -+ vst1.8 d5, [r4, : 64] -+ sub r2, r2, #24 -+ sub r4, r4, #24 -+ vst1.8 d6, [r2, : 64] -+ vst1.8 d7, [r4, : 64] -+ add r2, r3, #240 -+ add r4, r3, #96 -+ vld1.8 {d0-d1}, [r4, : 128]! -+ vld1.8 {d2-d3}, [r4, : 128]! -+ vld1.8 {d4}, [r4, : 64] -+ add r4, r3, #144 -+ vld1.8 {d6-d7}, [r4, : 128]! -+ vtrn.32 q0, q3 -+ vld1.8 {d8-d9}, [r4, : 128]! -+ vshl.i32 q5, q0, #4 -+ vtrn.32 q1, q4 -+ vshl.i32 q6, q3, #4 -+ vadd.i32 q5, q5, q0 -+ vadd.i32 q6, q6, q3 -+ vshl.i32 q7, q1, #4 -+ vld1.8 {d5}, [r4, : 64] -+ vshl.i32 q8, q4, #4 -+ vtrn.32 d4, d5 -+ vadd.i32 q7, q7, q1 -+ vadd.i32 q8, q8, q4 -+ vld1.8 {d18-d19}, [r2, : 128]! -+ vshl.i32 q10, q2, #4 -+ vld1.8 {d22-d23}, [r2, : 128]! -+ vadd.i32 q10, q10, q2 -+ vld1.8 {d24}, [r2, : 64] -+ vadd.i32 q5, q5, q0 -+ add r2, r3, #192 -+ vld1.8 {d26-d27}, [r2, : 128]! -+ vadd.i32 q6, q6, q3 -+ vld1.8 {d28-d29}, [r2, : 128]! -+ vadd.i32 q8, q8, q4 -+ vld1.8 {d25}, [r2, : 64] -+ vadd.i32 q10, q10, q2 -+ vtrn.32 q9, q13 -+ vadd.i32 q7, q7, q1 -+ vadd.i32 q5, q5, q0 -+ vtrn.32 q11, q14 -+ vadd.i32 q6, q6, q3 -+ add r2, sp, #560 -+ vadd.i32 q10, q10, q2 -+ vtrn.32 d24, d25 -+ vst1.8 {d12-d13}, [r2, : 128] -+ vshl.i32 q6, q13, #1 -+ add r2, sp, #576 -+ vst1.8 {d20-d21}, [r2, : 128] -+ vshl.i32 q10, q14, #1 -+ add r2, sp, #592 -+ vst1.8 {d12-d13}, [r2, : 128] -+ vshl.i32 q15, q12, #1 -+ vadd.i32 q8, q8, q4 -+ vext.32 d10, d31, d30, #0 -+ vadd.i32 q7, q7, q1 -+ add r2, sp, #608 -+ vst1.8 {d16-d17}, [r2, : 128] -+ vmull.s32 q8, d18, d5 -+ vmlal.s32 q8, d26, d4 -+ vmlal.s32 q8, d19, d9 -+ vmlal.s32 q8, d27, d3 -+ vmlal.s32 q8, d22, d8 -+ vmlal.s32 q8, d28, d2 -+ vmlal.s32 q8, d23, d7 -+ vmlal.s32 q8, d29, d1 -+ vmlal.s32 q8, d24, d6 -+ vmlal.s32 q8, d25, d0 -+ add r2, sp, #624 -+ vst1.8 {d14-d15}, [r2, : 128] -+ vmull.s32 q2, d18, d4 -+ vmlal.s32 q2, d12, d9 -+ vmlal.s32 q2, d13, d8 -+ vmlal.s32 q2, d19, d3 -+ vmlal.s32 q2, d22, d2 -+ vmlal.s32 q2, d23, d1 -+ vmlal.s32 q2, d24, d0 -+ add r2, sp, #640 -+ vst1.8 {d20-d21}, [r2, : 128] -+ vmull.s32 q7, d18, d9 -+ vmlal.s32 q7, d26, d3 -+ vmlal.s32 q7, d19, d8 -+ vmlal.s32 q7, d27, d2 -+ vmlal.s32 q7, d22, d7 -+ vmlal.s32 q7, d28, d1 -+ vmlal.s32 q7, d23, d6 -+ vmlal.s32 q7, d29, d0 -+ add r2, sp, #656 -+ vst1.8 {d10-d11}, [r2, : 128] -+ vmull.s32 q5, d18, d3 -+ vmlal.s32 q5, d19, d2 -+ vmlal.s32 q5, d22, d1 -+ vmlal.s32 q5, d23, d0 -+ vmlal.s32 q5, d12, d8 -+ add r2, sp, #672 -+ vst1.8 {d16-d17}, [r2, : 128] -+ vmull.s32 q4, d18, d8 -+ vmlal.s32 q4, d26, d2 -+ vmlal.s32 q4, d19, d7 -+ vmlal.s32 q4, d27, d1 -+ vmlal.s32 q4, d22, d6 -+ vmlal.s32 q4, d28, d0 -+ vmull.s32 q8, d18, d7 -+ vmlal.s32 q8, d26, d1 -+ vmlal.s32 q8, d19, d6 -+ vmlal.s32 q8, d27, d0 -+ add r2, sp, #576 -+ vld1.8 {d20-d21}, [r2, : 128] -+ vmlal.s32 q7, d24, d21 -+ vmlal.s32 q7, d25, d20 -+ vmlal.s32 q4, d23, d21 -+ vmlal.s32 q4, d29, d20 -+ vmlal.s32 q8, d22, d21 -+ vmlal.s32 q8, d28, d20 -+ vmlal.s32 q5, d24, d20 -+ add r2, sp, #576 -+ vst1.8 {d14-d15}, [r2, : 128] -+ vmull.s32 q7, d18, d6 -+ vmlal.s32 q7, d26, d0 -+ add r2, sp, #656 -+ vld1.8 {d30-d31}, [r2, : 128] -+ vmlal.s32 q2, d30, d21 -+ vmlal.s32 q7, d19, d21 -+ vmlal.s32 q7, d27, d20 -+ add r2, sp, #624 -+ vld1.8 {d26-d27}, [r2, : 128] -+ vmlal.s32 q4, d25, d27 -+ vmlal.s32 q8, d29, d27 -+ vmlal.s32 q8, d25, d26 -+ vmlal.s32 q7, d28, d27 -+ vmlal.s32 q7, d29, d26 -+ add r2, sp, #608 -+ vld1.8 {d28-d29}, [r2, : 128] -+ vmlal.s32 q4, d24, d29 -+ vmlal.s32 q8, d23, d29 -+ vmlal.s32 q8, d24, d28 -+ vmlal.s32 q7, d22, d29 -+ vmlal.s32 q7, d23, d28 -+ add r2, sp, #608 -+ vst1.8 {d8-d9}, [r2, : 128] -+ add r2, sp, #560 -+ vld1.8 {d8-d9}, [r2, : 128] -+ vmlal.s32 q7, d24, d9 -+ vmlal.s32 q7, d25, d31 -+ vmull.s32 q1, d18, d2 -+ vmlal.s32 q1, d19, d1 -+ vmlal.s32 q1, d22, d0 -+ vmlal.s32 q1, d24, d27 -+ vmlal.s32 q1, d23, d20 -+ vmlal.s32 q1, d12, d7 -+ vmlal.s32 q1, d13, d6 -+ vmull.s32 q6, d18, d1 -+ vmlal.s32 q6, d19, d0 -+ vmlal.s32 q6, d23, d27 -+ vmlal.s32 q6, d22, d20 -+ vmlal.s32 q6, d24, d26 -+ vmull.s32 q0, d18, d0 -+ vmlal.s32 q0, d22, d27 -+ vmlal.s32 q0, d23, d26 -+ vmlal.s32 q0, d24, d31 -+ vmlal.s32 q0, d19, d20 -+ add r2, sp, #640 -+ vld1.8 {d18-d19}, [r2, : 128] -+ vmlal.s32 q2, d18, d7 -+ vmlal.s32 q2, d19, d6 -+ vmlal.s32 q5, d18, d6 -+ vmlal.s32 q5, d19, d21 -+ vmlal.s32 q1, d18, d21 -+ vmlal.s32 q1, d19, d29 -+ vmlal.s32 q0, d18, d28 -+ vmlal.s32 q0, d19, d9 -+ vmlal.s32 q6, d18, d29 -+ vmlal.s32 q6, d19, d28 -+ add r2, sp, #592 -+ vld1.8 {d18-d19}, [r2, : 128] -+ add r2, sp, #512 -+ vld1.8 {d22-d23}, [r2, : 128] -+ vmlal.s32 q5, d19, d7 -+ vmlal.s32 q0, d18, d21 -+ vmlal.s32 q0, d19, d29 -+ vmlal.s32 q6, d18, d6 -+ add r2, sp, #528 -+ vld1.8 {d6-d7}, [r2, : 128] -+ vmlal.s32 q6, d19, d21 -+ add r2, sp, #576 -+ vld1.8 {d18-d19}, [r2, : 128] -+ vmlal.s32 q0, d30, d8 -+ add r2, sp, #672 -+ vld1.8 {d20-d21}, [r2, : 128] -+ vmlal.s32 q5, d30, d29 -+ add r2, sp, #608 -+ vld1.8 {d24-d25}, [r2, : 128] -+ vmlal.s32 q1, d30, d28 -+ vadd.i64 q13, q0, q11 -+ vadd.i64 q14, q5, q11 -+ vmlal.s32 q6, d30, d9 -+ vshr.s64 q4, q13, #26 -+ vshr.s64 q13, q14, #26 -+ vadd.i64 q7, q7, q4 -+ vshl.i64 q4, q4, #26 -+ vadd.i64 q14, q7, q3 -+ vadd.i64 q9, q9, q13 -+ vshl.i64 q13, q13, #26 -+ vadd.i64 q15, q9, q3 -+ vsub.i64 q0, q0, q4 -+ vshr.s64 q4, q14, #25 -+ vsub.i64 q5, q5, q13 -+ vshr.s64 q13, q15, #25 -+ vadd.i64 q6, q6, q4 -+ vshl.i64 q4, q4, #25 -+ vadd.i64 q14, q6, q11 -+ vadd.i64 q2, q2, q13 -+ vsub.i64 q4, q7, q4 -+ vshr.s64 q7, q14, #26 -+ vshl.i64 q13, q13, #25 -+ vadd.i64 q14, q2, q11 -+ vadd.i64 q8, q8, q7 -+ vshl.i64 q7, q7, #26 -+ vadd.i64 q15, q8, q3 -+ vsub.i64 q9, q9, q13 -+ vshr.s64 q13, q14, #26 -+ vsub.i64 q6, q6, q7 -+ vshr.s64 q7, q15, #25 -+ vadd.i64 q10, q10, q13 -+ vshl.i64 q13, q13, #26 -+ vadd.i64 q14, q10, q3 -+ vadd.i64 q1, q1, q7 -+ add r2, r3, #144 -+ vshl.i64 q7, q7, #25 -+ add r4, r3, #96 -+ vadd.i64 q15, q1, q11 -+ add r2, r2, #8 -+ vsub.i64 q2, q2, q13 -+ add r4, r4, #8 -+ vshr.s64 q13, q14, #25 -+ vsub.i64 q7, q8, q7 -+ vshr.s64 q8, q15, #26 -+ vadd.i64 q14, q13, q13 -+ vadd.i64 q12, q12, q8 -+ vtrn.32 d12, d14 -+ vshl.i64 q8, q8, #26 -+ vtrn.32 d13, d15 -+ vadd.i64 q3, q12, q3 -+ vadd.i64 q0, q0, q14 -+ vst1.8 d12, [r2, : 64]! -+ vshl.i64 q7, q13, #4 -+ vst1.8 d13, [r4, : 64]! -+ vsub.i64 q1, q1, q8 -+ vshr.s64 q3, q3, #25 -+ vadd.i64 q0, q0, q7 -+ vadd.i64 q5, q5, q3 -+ vshl.i64 q3, q3, #25 -+ vadd.i64 q6, q5, q11 -+ vadd.i64 q0, q0, q13 -+ vshl.i64 q7, q13, #25 -+ vadd.i64 q8, q0, q11 -+ vsub.i64 q3, q12, q3 -+ vshr.s64 q6, q6, #26 -+ vsub.i64 q7, q10, q7 -+ vtrn.32 d2, d6 -+ vshr.s64 q8, q8, #26 -+ vtrn.32 d3, d7 -+ vadd.i64 q3, q9, q6 -+ vst1.8 d2, [r2, : 64] -+ vshl.i64 q6, q6, #26 -+ vst1.8 d3, [r4, : 64] -+ vadd.i64 q1, q4, q8 -+ vtrn.32 d4, d14 -+ vshl.i64 q4, q8, #26 -+ vtrn.32 d5, d15 -+ vsub.i64 q5, q5, q6 -+ add r2, r2, #16 -+ vsub.i64 q0, q0, q4 -+ vst1.8 d4, [r2, : 64] -+ add r4, r4, #16 -+ vst1.8 d5, [r4, : 64] -+ vtrn.32 d10, d6 -+ vtrn.32 d11, d7 -+ sub r2, r2, #8 -+ sub r4, r4, #8 -+ vtrn.32 d0, d2 -+ vtrn.32 d1, d3 -+ vst1.8 d10, [r2, : 64] -+ vst1.8 d11, [r4, : 64] -+ sub r2, r2, #24 -+ sub r4, r4, #24 -+ vst1.8 d0, [r2, : 64] -+ vst1.8 d1, [r4, : 64] -+ add r2, r3, #288 -+ add r4, r3, #336 -+ vld1.8 {d0-d1}, [r2, : 128]! -+ vld1.8 {d2-d3}, [r4, : 128]! -+ vsub.i32 q0, q0, q1 -+ vld1.8 {d2-d3}, [r2, : 128]! -+ vld1.8 {d4-d5}, [r4, : 128]! -+ vsub.i32 q1, q1, q2 -+ add r5, r3, #240 -+ vld1.8 {d4}, [r2, : 64] -+ vld1.8 {d6}, [r4, : 64] -+ vsub.i32 q2, q2, q3 -+ vst1.8 {d0-d1}, [r5, : 128]! -+ vst1.8 {d2-d3}, [r5, : 128]! -+ vst1.8 d4, [r5, : 64] -+ add r2, r3, #144 -+ add r4, r3, #96 -+ add r5, r3, #144 -+ add r6, r3, #192 -+ vld1.8 {d0-d1}, [r2, : 128]! -+ vld1.8 {d2-d3}, [r4, : 128]! -+ vsub.i32 q2, q0, q1 -+ vadd.i32 q0, q0, q1 -+ vld1.8 {d2-d3}, [r2, : 128]! -+ vld1.8 {d6-d7}, [r4, : 128]! -+ vsub.i32 q4, q1, q3 -+ vadd.i32 q1, q1, q3 -+ vld1.8 {d6}, [r2, : 64] -+ vld1.8 {d10}, [r4, : 64] -+ vsub.i32 q6, q3, q5 -+ vadd.i32 q3, q3, q5 -+ vst1.8 {d4-d5}, [r5, : 128]! -+ vst1.8 {d0-d1}, [r6, : 128]! -+ vst1.8 {d8-d9}, [r5, : 128]! -+ vst1.8 {d2-d3}, [r6, : 128]! -+ vst1.8 d12, [r5, : 64] -+ vst1.8 d6, [r6, : 64] -+ add r2, r3, #0 -+ add r4, r3, #240 -+ vld1.8 {d0-d1}, [r4, : 128]! -+ vld1.8 {d2-d3}, [r4, : 128]! -+ vld1.8 {d4}, [r4, : 64] -+ add r4, r3, #336 -+ vld1.8 {d6-d7}, [r4, : 128]! -+ vtrn.32 q0, q3 -+ vld1.8 {d8-d9}, [r4, : 128]! -+ vshl.i32 q5, q0, #4 -+ vtrn.32 q1, q4 -+ vshl.i32 q6, q3, #4 -+ vadd.i32 q5, q5, q0 -+ vadd.i32 q6, q6, q3 -+ vshl.i32 q7, q1, #4 -+ vld1.8 {d5}, [r4, : 64] -+ vshl.i32 q8, q4, #4 -+ vtrn.32 d4, d5 -+ vadd.i32 q7, q7, q1 -+ vadd.i32 q8, q8, q4 -+ vld1.8 {d18-d19}, [r2, : 128]! -+ vshl.i32 q10, q2, #4 -+ vld1.8 {d22-d23}, [r2, : 128]! -+ vadd.i32 q10, q10, q2 -+ vld1.8 {d24}, [r2, : 64] -+ vadd.i32 q5, q5, q0 -+ add r2, r3, #288 -+ vld1.8 {d26-d27}, [r2, : 128]! -+ vadd.i32 q6, q6, q3 -+ vld1.8 {d28-d29}, [r2, : 128]! -+ vadd.i32 q8, q8, q4 -+ vld1.8 {d25}, [r2, : 64] -+ vadd.i32 q10, q10, q2 -+ vtrn.32 q9, q13 -+ vadd.i32 q7, q7, q1 -+ vadd.i32 q5, q5, q0 -+ vtrn.32 q11, q14 -+ vadd.i32 q6, q6, q3 -+ add r2, sp, #560 -+ vadd.i32 q10, q10, q2 -+ vtrn.32 d24, d25 -+ vst1.8 {d12-d13}, [r2, : 128] -+ vshl.i32 q6, q13, #1 -+ add r2, sp, #576 -+ vst1.8 {d20-d21}, [r2, : 128] -+ vshl.i32 q10, q14, #1 -+ add r2, sp, #592 -+ vst1.8 {d12-d13}, [r2, : 128] -+ vshl.i32 q15, q12, #1 -+ vadd.i32 q8, q8, q4 -+ vext.32 d10, d31, d30, #0 -+ vadd.i32 q7, q7, q1 -+ add r2, sp, #608 -+ vst1.8 {d16-d17}, [r2, : 128] -+ vmull.s32 q8, d18, d5 -+ vmlal.s32 q8, d26, d4 -+ vmlal.s32 q8, d19, d9 -+ vmlal.s32 q8, d27, d3 -+ vmlal.s32 q8, d22, d8 -+ vmlal.s32 q8, d28, d2 -+ vmlal.s32 q8, d23, d7 -+ vmlal.s32 q8, d29, d1 -+ vmlal.s32 q8, d24, d6 -+ vmlal.s32 q8, d25, d0 -+ add r2, sp, #624 -+ vst1.8 {d14-d15}, [r2, : 128] -+ vmull.s32 q2, d18, d4 -+ vmlal.s32 q2, d12, d9 -+ vmlal.s32 q2, d13, d8 -+ vmlal.s32 q2, d19, d3 -+ vmlal.s32 q2, d22, d2 -+ vmlal.s32 q2, d23, d1 -+ vmlal.s32 q2, d24, d0 -+ add r2, sp, #640 -+ vst1.8 {d20-d21}, [r2, : 128] -+ vmull.s32 q7, d18, d9 -+ vmlal.s32 q7, d26, d3 -+ vmlal.s32 q7, d19, d8 -+ vmlal.s32 q7, d27, d2 -+ vmlal.s32 q7, d22, d7 -+ vmlal.s32 q7, d28, d1 -+ vmlal.s32 q7, d23, d6 -+ vmlal.s32 q7, d29, d0 -+ add r2, sp, #656 -+ vst1.8 {d10-d11}, [r2, : 128] -+ vmull.s32 q5, d18, d3 -+ vmlal.s32 q5, d19, d2 -+ vmlal.s32 q5, d22, d1 -+ vmlal.s32 q5, d23, d0 -+ vmlal.s32 q5, d12, d8 -+ add r2, sp, #672 -+ vst1.8 {d16-d17}, [r2, : 128] -+ vmull.s32 q4, d18, d8 -+ vmlal.s32 q4, d26, d2 -+ vmlal.s32 q4, d19, d7 -+ vmlal.s32 q4, d27, d1 -+ vmlal.s32 q4, d22, d6 -+ vmlal.s32 q4, d28, d0 -+ vmull.s32 q8, d18, d7 -+ vmlal.s32 q8, d26, d1 -+ vmlal.s32 q8, d19, d6 -+ vmlal.s32 q8, d27, d0 -+ add r2, sp, #576 -+ vld1.8 {d20-d21}, [r2, : 128] -+ vmlal.s32 q7, d24, d21 -+ vmlal.s32 q7, d25, d20 -+ vmlal.s32 q4, d23, d21 -+ vmlal.s32 q4, d29, d20 -+ vmlal.s32 q8, d22, d21 -+ vmlal.s32 q8, d28, d20 -+ vmlal.s32 q5, d24, d20 -+ add r2, sp, #576 -+ vst1.8 {d14-d15}, [r2, : 128] -+ vmull.s32 q7, d18, d6 -+ vmlal.s32 q7, d26, d0 -+ add r2, sp, #656 -+ vld1.8 {d30-d31}, [r2, : 128] -+ vmlal.s32 q2, d30, d21 -+ vmlal.s32 q7, d19, d21 -+ vmlal.s32 q7, d27, d20 -+ add r2, sp, #624 -+ vld1.8 {d26-d27}, [r2, : 128] -+ vmlal.s32 q4, d25, d27 -+ vmlal.s32 q8, d29, d27 -+ vmlal.s32 q8, d25, d26 -+ vmlal.s32 q7, d28, d27 -+ vmlal.s32 q7, d29, d26 -+ add r2, sp, #608 -+ vld1.8 {d28-d29}, [r2, : 128] -+ vmlal.s32 q4, d24, d29 -+ vmlal.s32 q8, d23, d29 -+ vmlal.s32 q8, d24, d28 -+ vmlal.s32 q7, d22, d29 -+ vmlal.s32 q7, d23, d28 -+ add r2, sp, #608 -+ vst1.8 {d8-d9}, [r2, : 128] -+ add r2, sp, #560 -+ vld1.8 {d8-d9}, [r2, : 128] -+ vmlal.s32 q7, d24, d9 -+ vmlal.s32 q7, d25, d31 -+ vmull.s32 q1, d18, d2 -+ vmlal.s32 q1, d19, d1 -+ vmlal.s32 q1, d22, d0 -+ vmlal.s32 q1, d24, d27 -+ vmlal.s32 q1, d23, d20 -+ vmlal.s32 q1, d12, d7 -+ vmlal.s32 q1, d13, d6 -+ vmull.s32 q6, d18, d1 -+ vmlal.s32 q6, d19, d0 -+ vmlal.s32 q6, d23, d27 -+ vmlal.s32 q6, d22, d20 -+ vmlal.s32 q6, d24, d26 -+ vmull.s32 q0, d18, d0 -+ vmlal.s32 q0, d22, d27 -+ vmlal.s32 q0, d23, d26 -+ vmlal.s32 q0, d24, d31 -+ vmlal.s32 q0, d19, d20 -+ add r2, sp, #640 -+ vld1.8 {d18-d19}, [r2, : 128] -+ vmlal.s32 q2, d18, d7 -+ vmlal.s32 q2, d19, d6 -+ vmlal.s32 q5, d18, d6 -+ vmlal.s32 q5, d19, d21 -+ vmlal.s32 q1, d18, d21 -+ vmlal.s32 q1, d19, d29 -+ vmlal.s32 q0, d18, d28 -+ vmlal.s32 q0, d19, d9 -+ vmlal.s32 q6, d18, d29 -+ vmlal.s32 q6, d19, d28 -+ add r2, sp, #592 -+ vld1.8 {d18-d19}, [r2, : 128] -+ add r2, sp, #512 -+ vld1.8 {d22-d23}, [r2, : 128] -+ vmlal.s32 q5, d19, d7 -+ vmlal.s32 q0, d18, d21 -+ vmlal.s32 q0, d19, d29 -+ vmlal.s32 q6, d18, d6 -+ add r2, sp, #528 -+ vld1.8 {d6-d7}, [r2, : 128] -+ vmlal.s32 q6, d19, d21 -+ add r2, sp, #576 -+ vld1.8 {d18-d19}, [r2, : 128] -+ vmlal.s32 q0, d30, d8 -+ add r2, sp, #672 -+ vld1.8 {d20-d21}, [r2, : 128] -+ vmlal.s32 q5, d30, d29 -+ add r2, sp, #608 -+ vld1.8 {d24-d25}, [r2, : 128] -+ vmlal.s32 q1, d30, d28 -+ vadd.i64 q13, q0, q11 -+ vadd.i64 q14, q5, q11 -+ vmlal.s32 q6, d30, d9 -+ vshr.s64 q4, q13, #26 -+ vshr.s64 q13, q14, #26 -+ vadd.i64 q7, q7, q4 -+ vshl.i64 q4, q4, #26 -+ vadd.i64 q14, q7, q3 -+ vadd.i64 q9, q9, q13 -+ vshl.i64 q13, q13, #26 -+ vadd.i64 q15, q9, q3 -+ vsub.i64 q0, q0, q4 -+ vshr.s64 q4, q14, #25 -+ vsub.i64 q5, q5, q13 -+ vshr.s64 q13, q15, #25 -+ vadd.i64 q6, q6, q4 -+ vshl.i64 q4, q4, #25 -+ vadd.i64 q14, q6, q11 -+ vadd.i64 q2, q2, q13 -+ vsub.i64 q4, q7, q4 -+ vshr.s64 q7, q14, #26 -+ vshl.i64 q13, q13, #25 -+ vadd.i64 q14, q2, q11 -+ vadd.i64 q8, q8, q7 -+ vshl.i64 q7, q7, #26 -+ vadd.i64 q15, q8, q3 -+ vsub.i64 q9, q9, q13 -+ vshr.s64 q13, q14, #26 -+ vsub.i64 q6, q6, q7 -+ vshr.s64 q7, q15, #25 -+ vadd.i64 q10, q10, q13 -+ vshl.i64 q13, q13, #26 -+ vadd.i64 q14, q10, q3 -+ vadd.i64 q1, q1, q7 -+ add r2, r3, #288 -+ vshl.i64 q7, q7, #25 -+ add r4, r3, #96 -+ vadd.i64 q15, q1, q11 -+ add r2, r2, #8 -+ vsub.i64 q2, q2, q13 -+ add r4, r4, #8 -+ vshr.s64 q13, q14, #25 -+ vsub.i64 q7, q8, q7 -+ vshr.s64 q8, q15, #26 -+ vadd.i64 q14, q13, q13 -+ vadd.i64 q12, q12, q8 -+ vtrn.32 d12, d14 -+ vshl.i64 q8, q8, #26 -+ vtrn.32 d13, d15 -+ vadd.i64 q3, q12, q3 -+ vadd.i64 q0, q0, q14 -+ vst1.8 d12, [r2, : 64]! -+ vshl.i64 q7, q13, #4 -+ vst1.8 d13, [r4, : 64]! -+ vsub.i64 q1, q1, q8 -+ vshr.s64 q3, q3, #25 -+ vadd.i64 q0, q0, q7 -+ vadd.i64 q5, q5, q3 -+ vshl.i64 q3, q3, #25 -+ vadd.i64 q6, q5, q11 -+ vadd.i64 q0, q0, q13 -+ vshl.i64 q7, q13, #25 -+ vadd.i64 q8, q0, q11 -+ vsub.i64 q3, q12, q3 -+ vshr.s64 q6, q6, #26 -+ vsub.i64 q7, q10, q7 -+ vtrn.32 d2, d6 -+ vshr.s64 q8, q8, #26 -+ vtrn.32 d3, d7 -+ vadd.i64 q3, q9, q6 -+ vst1.8 d2, [r2, : 64] -+ vshl.i64 q6, q6, #26 -+ vst1.8 d3, [r4, : 64] -+ vadd.i64 q1, q4, q8 -+ vtrn.32 d4, d14 -+ vshl.i64 q4, q8, #26 -+ vtrn.32 d5, d15 -+ vsub.i64 q5, q5, q6 -+ add r2, r2, #16 -+ vsub.i64 q0, q0, q4 -+ vst1.8 d4, [r2, : 64] -+ add r4, r4, #16 -+ vst1.8 d5, [r4, : 64] -+ vtrn.32 d10, d6 -+ vtrn.32 d11, d7 -+ sub r2, r2, #8 -+ sub r4, r4, #8 -+ vtrn.32 d0, d2 -+ vtrn.32 d1, d3 -+ vst1.8 d10, [r2, : 64] -+ vst1.8 d11, [r4, : 64] -+ sub r2, r2, #24 -+ sub r4, r4, #24 -+ vst1.8 d0, [r2, : 64] -+ vst1.8 d1, [r4, : 64] -+ add r2, sp, #544 -+ add r4, r3, #144 -+ add r5, r3, #192 -+ vld1.8 {d0-d1}, [r2, : 128] -+ vld1.8 {d2-d3}, [r4, : 128]! -+ vld1.8 {d4-d5}, [r5, : 128]! -+ vzip.i32 q1, q2 -+ vld1.8 {d6-d7}, [r4, : 128]! -+ vld1.8 {d8-d9}, [r5, : 128]! -+ vshl.i32 q5, q1, #1 -+ vzip.i32 q3, q4 -+ vshl.i32 q6, q2, #1 -+ vld1.8 {d14}, [r4, : 64] -+ vshl.i32 q8, q3, #1 -+ vld1.8 {d15}, [r5, : 64] -+ vshl.i32 q9, q4, #1 -+ vmul.i32 d21, d7, d1 -+ vtrn.32 d14, d15 -+ vmul.i32 q11, q4, q0 -+ vmul.i32 q0, q7, q0 -+ vmull.s32 q12, d2, d2 -+ vmlal.s32 q12, d11, d1 -+ vmlal.s32 q12, d12, d0 -+ vmlal.s32 q12, d13, d23 -+ vmlal.s32 q12, d16, d22 -+ vmlal.s32 q12, d7, d21 -+ vmull.s32 q10, d2, d11 -+ vmlal.s32 q10, d4, d1 -+ vmlal.s32 q10, d13, d0 -+ vmlal.s32 q10, d6, d23 -+ vmlal.s32 q10, d17, d22 -+ vmull.s32 q13, d10, d4 -+ vmlal.s32 q13, d11, d3 -+ vmlal.s32 q13, d13, d1 -+ vmlal.s32 q13, d16, d0 -+ vmlal.s32 q13, d17, d23 -+ vmlal.s32 q13, d8, d22 -+ vmull.s32 q1, d10, d5 -+ vmlal.s32 q1, d11, d4 -+ vmlal.s32 q1, d6, d1 -+ vmlal.s32 q1, d17, d0 -+ vmlal.s32 q1, d8, d23 -+ vmull.s32 q14, d10, d6 -+ vmlal.s32 q14, d11, d13 -+ vmlal.s32 q14, d4, d4 -+ vmlal.s32 q14, d17, d1 -+ vmlal.s32 q14, d18, d0 -+ vmlal.s32 q14, d9, d23 -+ vmull.s32 q11, d10, d7 -+ vmlal.s32 q11, d11, d6 -+ vmlal.s32 q11, d12, d5 -+ vmlal.s32 q11, d8, d1 -+ vmlal.s32 q11, d19, d0 -+ vmull.s32 q15, d10, d8 -+ vmlal.s32 q15, d11, d17 -+ vmlal.s32 q15, d12, d6 -+ vmlal.s32 q15, d13, d5 -+ vmlal.s32 q15, d19, d1 -+ vmlal.s32 q15, d14, d0 -+ vmull.s32 q2, d10, d9 -+ vmlal.s32 q2, d11, d8 -+ vmlal.s32 q2, d12, d7 -+ vmlal.s32 q2, d13, d6 -+ vmlal.s32 q2, d14, d1 -+ vmull.s32 q0, d15, d1 -+ vmlal.s32 q0, d10, d14 -+ vmlal.s32 q0, d11, d19 -+ vmlal.s32 q0, d12, d8 -+ vmlal.s32 q0, d13, d17 -+ vmlal.s32 q0, d6, d6 -+ add r2, sp, #512 -+ vld1.8 {d18-d19}, [r2, : 128] -+ vmull.s32 q3, d16, d7 -+ vmlal.s32 q3, d10, d15 -+ vmlal.s32 q3, d11, d14 -+ vmlal.s32 q3, d12, d9 -+ vmlal.s32 q3, d13, d8 -+ add r2, sp, #528 -+ vld1.8 {d8-d9}, [r2, : 128] -+ vadd.i64 q5, q12, q9 -+ vadd.i64 q6, q15, q9 -+ vshr.s64 q5, q5, #26 -+ vshr.s64 q6, q6, #26 -+ vadd.i64 q7, q10, q5 -+ vshl.i64 q5, q5, #26 -+ vadd.i64 q8, q7, q4 -+ vadd.i64 q2, q2, q6 -+ vshl.i64 q6, q6, #26 -+ vadd.i64 q10, q2, q4 -+ vsub.i64 q5, q12, q5 -+ vshr.s64 q8, q8, #25 -+ vsub.i64 q6, q15, q6 -+ vshr.s64 q10, q10, #25 -+ vadd.i64 q12, q13, q8 -+ vshl.i64 q8, q8, #25 -+ vadd.i64 q13, q12, q9 -+ vadd.i64 q0, q0, q10 -+ vsub.i64 q7, q7, q8 -+ vshr.s64 q8, q13, #26 -+ vshl.i64 q10, q10, #25 -+ vadd.i64 q13, q0, q9 -+ vadd.i64 q1, q1, q8 -+ vshl.i64 q8, q8, #26 -+ vadd.i64 q15, q1, q4 -+ vsub.i64 q2, q2, q10 -+ vshr.s64 q10, q13, #26 -+ vsub.i64 q8, q12, q8 -+ vshr.s64 q12, q15, #25 -+ vadd.i64 q3, q3, q10 -+ vshl.i64 q10, q10, #26 -+ vadd.i64 q13, q3, q4 -+ vadd.i64 q14, q14, q12 -+ add r2, r3, #144 -+ vshl.i64 q12, q12, #25 -+ add r4, r3, #192 -+ vadd.i64 q15, q14, q9 -+ add r2, r2, #8 -+ vsub.i64 q0, q0, q10 -+ add r4, r4, #8 -+ vshr.s64 q10, q13, #25 -+ vsub.i64 q1, q1, q12 -+ vshr.s64 q12, q15, #26 -+ vadd.i64 q13, q10, q10 -+ vadd.i64 q11, q11, q12 -+ vtrn.32 d16, d2 -+ vshl.i64 q12, q12, #26 -+ vtrn.32 d17, d3 -+ vadd.i64 q1, q11, q4 -+ vadd.i64 q4, q5, q13 -+ vst1.8 d16, [r2, : 64]! -+ vshl.i64 q5, q10, #4 -+ vst1.8 d17, [r4, : 64]! -+ vsub.i64 q8, q14, q12 -+ vshr.s64 q1, q1, #25 -+ vadd.i64 q4, q4, q5 -+ vadd.i64 q5, q6, q1 -+ vshl.i64 q1, q1, #25 -+ vadd.i64 q6, q5, q9 -+ vadd.i64 q4, q4, q10 -+ vshl.i64 q10, q10, #25 -+ vadd.i64 q9, q4, q9 -+ vsub.i64 q1, q11, q1 -+ vshr.s64 q6, q6, #26 -+ vsub.i64 q3, q3, q10 -+ vtrn.32 d16, d2 -+ vshr.s64 q9, q9, #26 -+ vtrn.32 d17, d3 -+ vadd.i64 q1, q2, q6 -+ vst1.8 d16, [r2, : 64] -+ vshl.i64 q2, q6, #26 -+ vst1.8 d17, [r4, : 64] -+ vadd.i64 q6, q7, q9 -+ vtrn.32 d0, d6 -+ vshl.i64 q7, q9, #26 -+ vtrn.32 d1, d7 -+ vsub.i64 q2, q5, q2 -+ add r2, r2, #16 -+ vsub.i64 q3, q4, q7 -+ vst1.8 d0, [r2, : 64] -+ add r4, r4, #16 -+ vst1.8 d1, [r4, : 64] -+ vtrn.32 d4, d2 -+ vtrn.32 d5, d3 -+ sub r2, r2, #8 -+ sub r4, r4, #8 -+ vtrn.32 d6, d12 -+ vtrn.32 d7, d13 -+ vst1.8 d4, [r2, : 64] -+ vst1.8 d5, [r4, : 64] -+ sub r2, r2, #24 -+ sub r4, r4, #24 -+ vst1.8 d6, [r2, : 64] -+ vst1.8 d7, [r4, : 64] -+ add r2, r3, #336 -+ add r4, r3, #288 -+ vld1.8 {d0-d1}, [r2, : 128]! -+ vld1.8 {d2-d3}, [r4, : 128]! -+ vadd.i32 q0, q0, q1 -+ vld1.8 {d2-d3}, [r2, : 128]! -+ vld1.8 {d4-d5}, [r4, : 128]! -+ vadd.i32 q1, q1, q2 -+ add r5, r3, #288 -+ vld1.8 {d4}, [r2, : 64] -+ vld1.8 {d6}, [r4, : 64] -+ vadd.i32 q2, q2, q3 -+ vst1.8 {d0-d1}, [r5, : 128]! -+ vst1.8 {d2-d3}, [r5, : 128]! -+ vst1.8 d4, [r5, : 64] -+ add r2, r3, #48 -+ add r4, r3, #144 -+ vld1.8 {d0-d1}, [r4, : 128]! -+ vld1.8 {d2-d3}, [r4, : 128]! -+ vld1.8 {d4}, [r4, : 64] -+ add r4, r3, #288 -+ vld1.8 {d6-d7}, [r4, : 128]! -+ vtrn.32 q0, q3 -+ vld1.8 {d8-d9}, [r4, : 128]! -+ vshl.i32 q5, q0, #4 -+ vtrn.32 q1, q4 -+ vshl.i32 q6, q3, #4 -+ vadd.i32 q5, q5, q0 -+ vadd.i32 q6, q6, q3 -+ vshl.i32 q7, q1, #4 -+ vld1.8 {d5}, [r4, : 64] -+ vshl.i32 q8, q4, #4 -+ vtrn.32 d4, d5 -+ vadd.i32 q7, q7, q1 -+ vadd.i32 q8, q8, q4 -+ vld1.8 {d18-d19}, [r2, : 128]! -+ vshl.i32 q10, q2, #4 -+ vld1.8 {d22-d23}, [r2, : 128]! -+ vadd.i32 q10, q10, q2 -+ vld1.8 {d24}, [r2, : 64] -+ vadd.i32 q5, q5, q0 -+ add r2, r3, #240 -+ vld1.8 {d26-d27}, [r2, : 128]! -+ vadd.i32 q6, q6, q3 -+ vld1.8 {d28-d29}, [r2, : 128]! -+ vadd.i32 q8, q8, q4 -+ vld1.8 {d25}, [r2, : 64] -+ vadd.i32 q10, q10, q2 -+ vtrn.32 q9, q13 -+ vadd.i32 q7, q7, q1 -+ vadd.i32 q5, q5, q0 -+ vtrn.32 q11, q14 -+ vadd.i32 q6, q6, q3 -+ add r2, sp, #560 -+ vadd.i32 q10, q10, q2 -+ vtrn.32 d24, d25 -+ vst1.8 {d12-d13}, [r2, : 128] -+ vshl.i32 q6, q13, #1 -+ add r2, sp, #576 -+ vst1.8 {d20-d21}, [r2, : 128] -+ vshl.i32 q10, q14, #1 -+ add r2, sp, #592 -+ vst1.8 {d12-d13}, [r2, : 128] -+ vshl.i32 q15, q12, #1 -+ vadd.i32 q8, q8, q4 -+ vext.32 d10, d31, d30, #0 -+ vadd.i32 q7, q7, q1 -+ add r2, sp, #608 -+ vst1.8 {d16-d17}, [r2, : 128] -+ vmull.s32 q8, d18, d5 -+ vmlal.s32 q8, d26, d4 -+ vmlal.s32 q8, d19, d9 -+ vmlal.s32 q8, d27, d3 -+ vmlal.s32 q8, d22, d8 -+ vmlal.s32 q8, d28, d2 -+ vmlal.s32 q8, d23, d7 -+ vmlal.s32 q8, d29, d1 -+ vmlal.s32 q8, d24, d6 -+ vmlal.s32 q8, d25, d0 -+ add r2, sp, #624 -+ vst1.8 {d14-d15}, [r2, : 128] -+ vmull.s32 q2, d18, d4 -+ vmlal.s32 q2, d12, d9 -+ vmlal.s32 q2, d13, d8 -+ vmlal.s32 q2, d19, d3 -+ vmlal.s32 q2, d22, d2 -+ vmlal.s32 q2, d23, d1 -+ vmlal.s32 q2, d24, d0 -+ add r2, sp, #640 -+ vst1.8 {d20-d21}, [r2, : 128] -+ vmull.s32 q7, d18, d9 -+ vmlal.s32 q7, d26, d3 -+ vmlal.s32 q7, d19, d8 -+ vmlal.s32 q7, d27, d2 -+ vmlal.s32 q7, d22, d7 -+ vmlal.s32 q7, d28, d1 -+ vmlal.s32 q7, d23, d6 -+ vmlal.s32 q7, d29, d0 -+ add r2, sp, #656 -+ vst1.8 {d10-d11}, [r2, : 128] -+ vmull.s32 q5, d18, d3 -+ vmlal.s32 q5, d19, d2 -+ vmlal.s32 q5, d22, d1 -+ vmlal.s32 q5, d23, d0 -+ vmlal.s32 q5, d12, d8 -+ add r2, sp, #672 -+ vst1.8 {d16-d17}, [r2, : 128] -+ vmull.s32 q4, d18, d8 -+ vmlal.s32 q4, d26, d2 -+ vmlal.s32 q4, d19, d7 -+ vmlal.s32 q4, d27, d1 -+ vmlal.s32 q4, d22, d6 -+ vmlal.s32 q4, d28, d0 -+ vmull.s32 q8, d18, d7 -+ vmlal.s32 q8, d26, d1 -+ vmlal.s32 q8, d19, d6 -+ vmlal.s32 q8, d27, d0 -+ add r2, sp, #576 -+ vld1.8 {d20-d21}, [r2, : 128] -+ vmlal.s32 q7, d24, d21 -+ vmlal.s32 q7, d25, d20 -+ vmlal.s32 q4, d23, d21 -+ vmlal.s32 q4, d29, d20 -+ vmlal.s32 q8, d22, d21 -+ vmlal.s32 q8, d28, d20 -+ vmlal.s32 q5, d24, d20 -+ add r2, sp, #576 -+ vst1.8 {d14-d15}, [r2, : 128] -+ vmull.s32 q7, d18, d6 -+ vmlal.s32 q7, d26, d0 -+ add r2, sp, #656 -+ vld1.8 {d30-d31}, [r2, : 128] -+ vmlal.s32 q2, d30, d21 -+ vmlal.s32 q7, d19, d21 -+ vmlal.s32 q7, d27, d20 -+ add r2, sp, #624 -+ vld1.8 {d26-d27}, [r2, : 128] -+ vmlal.s32 q4, d25, d27 -+ vmlal.s32 q8, d29, d27 -+ vmlal.s32 q8, d25, d26 -+ vmlal.s32 q7, d28, d27 -+ vmlal.s32 q7, d29, d26 -+ add r2, sp, #608 -+ vld1.8 {d28-d29}, [r2, : 128] -+ vmlal.s32 q4, d24, d29 -+ vmlal.s32 q8, d23, d29 -+ vmlal.s32 q8, d24, d28 -+ vmlal.s32 q7, d22, d29 -+ vmlal.s32 q7, d23, d28 -+ add r2, sp, #608 -+ vst1.8 {d8-d9}, [r2, : 128] -+ add r2, sp, #560 -+ vld1.8 {d8-d9}, [r2, : 128] -+ vmlal.s32 q7, d24, d9 -+ vmlal.s32 q7, d25, d31 -+ vmull.s32 q1, d18, d2 -+ vmlal.s32 q1, d19, d1 -+ vmlal.s32 q1, d22, d0 -+ vmlal.s32 q1, d24, d27 -+ vmlal.s32 q1, d23, d20 -+ vmlal.s32 q1, d12, d7 -+ vmlal.s32 q1, d13, d6 -+ vmull.s32 q6, d18, d1 -+ vmlal.s32 q6, d19, d0 -+ vmlal.s32 q6, d23, d27 -+ vmlal.s32 q6, d22, d20 -+ vmlal.s32 q6, d24, d26 -+ vmull.s32 q0, d18, d0 -+ vmlal.s32 q0, d22, d27 -+ vmlal.s32 q0, d23, d26 -+ vmlal.s32 q0, d24, d31 -+ vmlal.s32 q0, d19, d20 -+ add r2, sp, #640 -+ vld1.8 {d18-d19}, [r2, : 128] -+ vmlal.s32 q2, d18, d7 -+ vmlal.s32 q2, d19, d6 -+ vmlal.s32 q5, d18, d6 -+ vmlal.s32 q5, d19, d21 -+ vmlal.s32 q1, d18, d21 -+ vmlal.s32 q1, d19, d29 -+ vmlal.s32 q0, d18, d28 -+ vmlal.s32 q0, d19, d9 -+ vmlal.s32 q6, d18, d29 -+ vmlal.s32 q6, d19, d28 -+ add r2, sp, #592 -+ vld1.8 {d18-d19}, [r2, : 128] -+ add r2, sp, #512 -+ vld1.8 {d22-d23}, [r2, : 128] -+ vmlal.s32 q5, d19, d7 -+ vmlal.s32 q0, d18, d21 -+ vmlal.s32 q0, d19, d29 -+ vmlal.s32 q6, d18, d6 -+ add r2, sp, #528 -+ vld1.8 {d6-d7}, [r2, : 128] -+ vmlal.s32 q6, d19, d21 -+ add r2, sp, #576 -+ vld1.8 {d18-d19}, [r2, : 128] -+ vmlal.s32 q0, d30, d8 -+ add r2, sp, #672 -+ vld1.8 {d20-d21}, [r2, : 128] -+ vmlal.s32 q5, d30, d29 -+ add r2, sp, #608 -+ vld1.8 {d24-d25}, [r2, : 128] -+ vmlal.s32 q1, d30, d28 -+ vadd.i64 q13, q0, q11 -+ vadd.i64 q14, q5, q11 -+ vmlal.s32 q6, d30, d9 -+ vshr.s64 q4, q13, #26 -+ vshr.s64 q13, q14, #26 -+ vadd.i64 q7, q7, q4 -+ vshl.i64 q4, q4, #26 -+ vadd.i64 q14, q7, q3 -+ vadd.i64 q9, q9, q13 -+ vshl.i64 q13, q13, #26 -+ vadd.i64 q15, q9, q3 -+ vsub.i64 q0, q0, q4 -+ vshr.s64 q4, q14, #25 -+ vsub.i64 q5, q5, q13 -+ vshr.s64 q13, q15, #25 -+ vadd.i64 q6, q6, q4 -+ vshl.i64 q4, q4, #25 -+ vadd.i64 q14, q6, q11 -+ vadd.i64 q2, q2, q13 -+ vsub.i64 q4, q7, q4 -+ vshr.s64 q7, q14, #26 -+ vshl.i64 q13, q13, #25 -+ vadd.i64 q14, q2, q11 -+ vadd.i64 q8, q8, q7 -+ vshl.i64 q7, q7, #26 -+ vadd.i64 q15, q8, q3 -+ vsub.i64 q9, q9, q13 -+ vshr.s64 q13, q14, #26 -+ vsub.i64 q6, q6, q7 -+ vshr.s64 q7, q15, #25 -+ vadd.i64 q10, q10, q13 -+ vshl.i64 q13, q13, #26 -+ vadd.i64 q14, q10, q3 -+ vadd.i64 q1, q1, q7 -+ add r2, r3, #240 -+ vshl.i64 q7, q7, #25 -+ add r4, r3, #144 -+ vadd.i64 q15, q1, q11 -+ add r2, r2, #8 -+ vsub.i64 q2, q2, q13 -+ add r4, r4, #8 -+ vshr.s64 q13, q14, #25 -+ vsub.i64 q7, q8, q7 -+ vshr.s64 q8, q15, #26 -+ vadd.i64 q14, q13, q13 -+ vadd.i64 q12, q12, q8 -+ vtrn.32 d12, d14 -+ vshl.i64 q8, q8, #26 -+ vtrn.32 d13, d15 -+ vadd.i64 q3, q12, q3 -+ vadd.i64 q0, q0, q14 -+ vst1.8 d12, [r2, : 64]! -+ vshl.i64 q7, q13, #4 -+ vst1.8 d13, [r4, : 64]! -+ vsub.i64 q1, q1, q8 -+ vshr.s64 q3, q3, #25 -+ vadd.i64 q0, q0, q7 -+ vadd.i64 q5, q5, q3 -+ vshl.i64 q3, q3, #25 -+ vadd.i64 q6, q5, q11 -+ vadd.i64 q0, q0, q13 -+ vshl.i64 q7, q13, #25 -+ vadd.i64 q8, q0, q11 -+ vsub.i64 q3, q12, q3 -+ vshr.s64 q6, q6, #26 -+ vsub.i64 q7, q10, q7 -+ vtrn.32 d2, d6 -+ vshr.s64 q8, q8, #26 -+ vtrn.32 d3, d7 -+ vadd.i64 q3, q9, q6 -+ vst1.8 d2, [r2, : 64] -+ vshl.i64 q6, q6, #26 -+ vst1.8 d3, [r4, : 64] -+ vadd.i64 q1, q4, q8 -+ vtrn.32 d4, d14 -+ vshl.i64 q4, q8, #26 -+ vtrn.32 d5, d15 -+ vsub.i64 q5, q5, q6 -+ add r2, r2, #16 -+ vsub.i64 q0, q0, q4 -+ vst1.8 d4, [r2, : 64] -+ add r4, r4, #16 -+ vst1.8 d5, [r4, : 64] -+ vtrn.32 d10, d6 -+ vtrn.32 d11, d7 -+ sub r2, r2, #8 -+ sub r4, r4, #8 -+ vtrn.32 d0, d2 -+ vtrn.32 d1, d3 -+ vst1.8 d10, [r2, : 64] -+ vst1.8 d11, [r4, : 64] -+ sub r2, r2, #24 -+ sub r4, r4, #24 -+ vst1.8 d0, [r2, : 64] -+ vst1.8 d1, [r4, : 64] -+ ldr r2, [sp, #488] -+ ldr r4, [sp, #492] -+ subs r5, r2, #1 -+ bge ._mainloop -+ add r1, r3, #144 -+ add r2, r3, #336 -+ vld1.8 {d0-d1}, [r1, : 128]! -+ vld1.8 {d2-d3}, [r1, : 128]! -+ vld1.8 {d4}, [r1, : 64] -+ vst1.8 {d0-d1}, [r2, : 128]! -+ vst1.8 {d2-d3}, [r2, : 128]! -+ vst1.8 d4, [r2, : 64] -+ ldr r1, =0 -+._invertloop: -+ add r2, r3, #144 -+ ldr r4, =0 -+ ldr r5, =2 -+ cmp r1, #1 -+ ldreq r5, =1 -+ addeq r2, r3, #336 -+ addeq r4, r3, #48 -+ cmp r1, #2 -+ ldreq r5, =1 -+ addeq r2, r3, #48 -+ cmp r1, #3 -+ ldreq r5, =5 -+ addeq r4, r3, #336 -+ cmp r1, #4 -+ ldreq r5, =10 -+ cmp r1, #5 -+ ldreq r5, =20 -+ cmp r1, #6 -+ ldreq r5, =10 -+ addeq r2, r3, #336 -+ addeq r4, r3, #336 -+ cmp r1, #7 -+ ldreq r5, =50 -+ cmp r1, #8 -+ ldreq r5, =100 -+ cmp r1, #9 -+ ldreq r5, =50 -+ addeq r2, r3, #336 -+ cmp r1, #10 -+ ldreq r5, =5 -+ addeq r2, r3, #48 -+ cmp r1, #11 -+ ldreq r5, =0 -+ addeq r2, r3, #96 -+ add r6, r3, #144 -+ add r7, r3, #288 -+ vld1.8 {d0-d1}, [r6, : 128]! -+ vld1.8 {d2-d3}, [r6, : 128]! -+ vld1.8 {d4}, [r6, : 64] -+ vst1.8 {d0-d1}, [r7, : 128]! -+ vst1.8 {d2-d3}, [r7, : 128]! -+ vst1.8 d4, [r7, : 64] -+ cmp r5, #0 -+ beq ._skipsquaringloop -+._squaringloop: -+ add r6, r3, #288 -+ add r7, r3, #288 -+ add r8, r3, #288 -+ vmov.i32 q0, #19 -+ vmov.i32 q1, #0 -+ vmov.i32 q2, #1 -+ vzip.i32 q1, q2 -+ vld1.8 {d4-d5}, [r7, : 128]! -+ vld1.8 {d6-d7}, [r7, : 128]! -+ vld1.8 {d9}, [r7, : 64] -+ vld1.8 {d10-d11}, [r6, : 128]! -+ add r7, sp, #416 -+ vld1.8 {d12-d13}, [r6, : 128]! -+ vmul.i32 q7, q2, q0 -+ vld1.8 {d8}, [r6, : 64] -+ vext.32 d17, d11, d10, #1 -+ vmul.i32 q9, q3, q0 -+ vext.32 d16, d10, d8, #1 -+ vshl.u32 q10, q5, q1 -+ vext.32 d22, d14, d4, #1 -+ vext.32 d24, d18, d6, #1 -+ vshl.u32 q13, q6, q1 -+ vshl.u32 d28, d8, d2 -+ vrev64.i32 d22, d22 -+ vmul.i32 d1, d9, d1 -+ vrev64.i32 d24, d24 -+ vext.32 d29, d8, d13, #1 -+ vext.32 d0, d1, d9, #1 -+ vrev64.i32 d0, d0 -+ vext.32 d2, d9, d1, #1 -+ vext.32 d23, d15, d5, #1 -+ vmull.s32 q4, d20, d4 -+ vrev64.i32 d23, d23 -+ vmlal.s32 q4, d21, d1 -+ vrev64.i32 d2, d2 -+ vmlal.s32 q4, d26, d19 -+ vext.32 d3, d5, d15, #1 -+ vmlal.s32 q4, d27, d18 -+ vrev64.i32 d3, d3 -+ vmlal.s32 q4, d28, d15 -+ vext.32 d14, d12, d11, #1 -+ vmull.s32 q5, d16, d23 -+ vext.32 d15, d13, d12, #1 -+ vmlal.s32 q5, d17, d4 -+ vst1.8 d8, [r7, : 64]! -+ vmlal.s32 q5, d14, d1 -+ vext.32 d12, d9, d8, #0 -+ vmlal.s32 q5, d15, d19 -+ vmov.i64 d13, #0 -+ vmlal.s32 q5, d29, d18 -+ vext.32 d25, d19, d7, #1 -+ vmlal.s32 q6, d20, d5 -+ vrev64.i32 d25, d25 -+ vmlal.s32 q6, d21, d4 -+ vst1.8 d11, [r7, : 64]! -+ vmlal.s32 q6, d26, d1 -+ vext.32 d9, d10, d10, #0 -+ vmlal.s32 q6, d27, d19 -+ vmov.i64 d8, #0 -+ vmlal.s32 q6, d28, d18 -+ vmlal.s32 q4, d16, d24 -+ vmlal.s32 q4, d17, d5 -+ vmlal.s32 q4, d14, d4 -+ vst1.8 d12, [r7, : 64]! -+ vmlal.s32 q4, d15, d1 -+ vext.32 d10, d13, d12, #0 -+ vmlal.s32 q4, d29, d19 -+ vmov.i64 d11, #0 -+ vmlal.s32 q5, d20, d6 -+ vmlal.s32 q5, d21, d5 -+ vmlal.s32 q5, d26, d4 -+ vext.32 d13, d8, d8, #0 -+ vmlal.s32 q5, d27, d1 -+ vmov.i64 d12, #0 -+ vmlal.s32 q5, d28, d19 -+ vst1.8 d9, [r7, : 64]! -+ vmlal.s32 q6, d16, d25 -+ vmlal.s32 q6, d17, d6 -+ vst1.8 d10, [r7, : 64] -+ vmlal.s32 q6, d14, d5 -+ vext.32 d8, d11, d10, #0 -+ vmlal.s32 q6, d15, d4 -+ vmov.i64 d9, #0 -+ vmlal.s32 q6, d29, d1 -+ vmlal.s32 q4, d20, d7 -+ vmlal.s32 q4, d21, d6 -+ vmlal.s32 q4, d26, d5 -+ vext.32 d11, d12, d12, #0 -+ vmlal.s32 q4, d27, d4 -+ vmov.i64 d10, #0 -+ vmlal.s32 q4, d28, d1 -+ vmlal.s32 q5, d16, d0 -+ sub r6, r7, #32 -+ vmlal.s32 q5, d17, d7 -+ vmlal.s32 q5, d14, d6 -+ vext.32 d30, d9, d8, #0 -+ vmlal.s32 q5, d15, d5 -+ vld1.8 {d31}, [r6, : 64]! -+ vmlal.s32 q5, d29, d4 -+ vmlal.s32 q15, d20, d0 -+ vext.32 d0, d6, d18, #1 -+ vmlal.s32 q15, d21, d25 -+ vrev64.i32 d0, d0 -+ vmlal.s32 q15, d26, d24 -+ vext.32 d1, d7, d19, #1 -+ vext.32 d7, d10, d10, #0 -+ vmlal.s32 q15, d27, d23 -+ vrev64.i32 d1, d1 -+ vld1.8 {d6}, [r6, : 64] -+ vmlal.s32 q15, d28, d22 -+ vmlal.s32 q3, d16, d4 -+ add r6, r6, #24 -+ vmlal.s32 q3, d17, d2 -+ vext.32 d4, d31, d30, #0 -+ vmov d17, d11 -+ vmlal.s32 q3, d14, d1 -+ vext.32 d11, d13, d13, #0 -+ vext.32 d13, d30, d30, #0 -+ vmlal.s32 q3, d15, d0 -+ vext.32 d1, d8, d8, #0 -+ vmlal.s32 q3, d29, d3 -+ vld1.8 {d5}, [r6, : 64] -+ sub r6, r6, #16 -+ vext.32 d10, d6, d6, #0 -+ vmov.i32 q1, #0xffffffff -+ vshl.i64 q4, q1, #25 -+ add r7, sp, #512 -+ vld1.8 {d14-d15}, [r7, : 128] -+ vadd.i64 q9, q2, q7 -+ vshl.i64 q1, q1, #26 -+ vshr.s64 q10, q9, #26 -+ vld1.8 {d0}, [r6, : 64]! -+ vadd.i64 q5, q5, q10 -+ vand q9, q9, q1 -+ vld1.8 {d16}, [r6, : 64]! -+ add r6, sp, #528 -+ vld1.8 {d20-d21}, [r6, : 128] -+ vadd.i64 q11, q5, q10 -+ vsub.i64 q2, q2, q9 -+ vshr.s64 q9, q11, #25 -+ vext.32 d12, d5, d4, #0 -+ vand q11, q11, q4 -+ vadd.i64 q0, q0, q9 -+ vmov d19, d7 -+ vadd.i64 q3, q0, q7 -+ vsub.i64 q5, q5, q11 -+ vshr.s64 q11, q3, #26 -+ vext.32 d18, d11, d10, #0 -+ vand q3, q3, q1 -+ vadd.i64 q8, q8, q11 -+ vadd.i64 q11, q8, q10 -+ vsub.i64 q0, q0, q3 -+ vshr.s64 q3, q11, #25 -+ vand q11, q11, q4 -+ vadd.i64 q3, q6, q3 -+ vadd.i64 q6, q3, q7 -+ vsub.i64 q8, q8, q11 -+ vshr.s64 q11, q6, #26 -+ vand q6, q6, q1 -+ vadd.i64 q9, q9, q11 -+ vadd.i64 d25, d19, d21 -+ vsub.i64 q3, q3, q6 -+ vshr.s64 d23, d25, #25 -+ vand q4, q12, q4 -+ vadd.i64 d21, d23, d23 -+ vshl.i64 d25, d23, #4 -+ vadd.i64 d21, d21, d23 -+ vadd.i64 d25, d25, d21 -+ vadd.i64 d4, d4, d25 -+ vzip.i32 q0, q8 -+ vadd.i64 d12, d4, d14 -+ add r6, r8, #8 -+ vst1.8 d0, [r6, : 64] -+ vsub.i64 d19, d19, d9 -+ add r6, r6, #16 -+ vst1.8 d16, [r6, : 64] -+ vshr.s64 d22, d12, #26 -+ vand q0, q6, q1 -+ vadd.i64 d10, d10, d22 -+ vzip.i32 q3, q9 -+ vsub.i64 d4, d4, d0 -+ sub r6, r6, #8 -+ vst1.8 d6, [r6, : 64] -+ add r6, r6, #16 -+ vst1.8 d18, [r6, : 64] -+ vzip.i32 q2, q5 -+ sub r6, r6, #32 -+ vst1.8 d4, [r6, : 64] -+ subs r5, r5, #1 -+ bhi ._squaringloop -+._skipsquaringloop: -+ mov r2, r2 -+ add r5, r3, #288 -+ add r6, r3, #144 -+ vmov.i32 q0, #19 -+ vmov.i32 q1, #0 -+ vmov.i32 q2, #1 -+ vzip.i32 q1, q2 -+ vld1.8 {d4-d5}, [r5, : 128]! -+ vld1.8 {d6-d7}, [r5, : 128]! -+ vld1.8 {d9}, [r5, : 64] -+ vld1.8 {d10-d11}, [r2, : 128]! -+ add r5, sp, #416 -+ vld1.8 {d12-d13}, [r2, : 128]! -+ vmul.i32 q7, q2, q0 -+ vld1.8 {d8}, [r2, : 64] -+ vext.32 d17, d11, d10, #1 -+ vmul.i32 q9, q3, q0 -+ vext.32 d16, d10, d8, #1 -+ vshl.u32 q10, q5, q1 -+ vext.32 d22, d14, d4, #1 -+ vext.32 d24, d18, d6, #1 -+ vshl.u32 q13, q6, q1 -+ vshl.u32 d28, d8, d2 -+ vrev64.i32 d22, d22 -+ vmul.i32 d1, d9, d1 -+ vrev64.i32 d24, d24 -+ vext.32 d29, d8, d13, #1 -+ vext.32 d0, d1, d9, #1 -+ vrev64.i32 d0, d0 -+ vext.32 d2, d9, d1, #1 -+ vext.32 d23, d15, d5, #1 -+ vmull.s32 q4, d20, d4 -+ vrev64.i32 d23, d23 -+ vmlal.s32 q4, d21, d1 -+ vrev64.i32 d2, d2 -+ vmlal.s32 q4, d26, d19 -+ vext.32 d3, d5, d15, #1 -+ vmlal.s32 q4, d27, d18 -+ vrev64.i32 d3, d3 -+ vmlal.s32 q4, d28, d15 -+ vext.32 d14, d12, d11, #1 -+ vmull.s32 q5, d16, d23 -+ vext.32 d15, d13, d12, #1 -+ vmlal.s32 q5, d17, d4 -+ vst1.8 d8, [r5, : 64]! -+ vmlal.s32 q5, d14, d1 -+ vext.32 d12, d9, d8, #0 -+ vmlal.s32 q5, d15, d19 -+ vmov.i64 d13, #0 -+ vmlal.s32 q5, d29, d18 -+ vext.32 d25, d19, d7, #1 -+ vmlal.s32 q6, d20, d5 -+ vrev64.i32 d25, d25 -+ vmlal.s32 q6, d21, d4 -+ vst1.8 d11, [r5, : 64]! -+ vmlal.s32 q6, d26, d1 -+ vext.32 d9, d10, d10, #0 -+ vmlal.s32 q6, d27, d19 -+ vmov.i64 d8, #0 -+ vmlal.s32 q6, d28, d18 -+ vmlal.s32 q4, d16, d24 -+ vmlal.s32 q4, d17, d5 -+ vmlal.s32 q4, d14, d4 -+ vst1.8 d12, [r5, : 64]! -+ vmlal.s32 q4, d15, d1 -+ vext.32 d10, d13, d12, #0 -+ vmlal.s32 q4, d29, d19 -+ vmov.i64 d11, #0 -+ vmlal.s32 q5, d20, d6 -+ vmlal.s32 q5, d21, d5 -+ vmlal.s32 q5, d26, d4 -+ vext.32 d13, d8, d8, #0 -+ vmlal.s32 q5, d27, d1 -+ vmov.i64 d12, #0 -+ vmlal.s32 q5, d28, d19 -+ vst1.8 d9, [r5, : 64]! -+ vmlal.s32 q6, d16, d25 -+ vmlal.s32 q6, d17, d6 -+ vst1.8 d10, [r5, : 64] -+ vmlal.s32 q6, d14, d5 -+ vext.32 d8, d11, d10, #0 -+ vmlal.s32 q6, d15, d4 -+ vmov.i64 d9, #0 -+ vmlal.s32 q6, d29, d1 -+ vmlal.s32 q4, d20, d7 -+ vmlal.s32 q4, d21, d6 -+ vmlal.s32 q4, d26, d5 -+ vext.32 d11, d12, d12, #0 -+ vmlal.s32 q4, d27, d4 -+ vmov.i64 d10, #0 -+ vmlal.s32 q4, d28, d1 -+ vmlal.s32 q5, d16, d0 -+ sub r2, r5, #32 -+ vmlal.s32 q5, d17, d7 -+ vmlal.s32 q5, d14, d6 -+ vext.32 d30, d9, d8, #0 -+ vmlal.s32 q5, d15, d5 -+ vld1.8 {d31}, [r2, : 64]! -+ vmlal.s32 q5, d29, d4 -+ vmlal.s32 q15, d20, d0 -+ vext.32 d0, d6, d18, #1 -+ vmlal.s32 q15, d21, d25 -+ vrev64.i32 d0, d0 -+ vmlal.s32 q15, d26, d24 -+ vext.32 d1, d7, d19, #1 -+ vext.32 d7, d10, d10, #0 -+ vmlal.s32 q15, d27, d23 -+ vrev64.i32 d1, d1 -+ vld1.8 {d6}, [r2, : 64] -+ vmlal.s32 q15, d28, d22 -+ vmlal.s32 q3, d16, d4 -+ add r2, r2, #24 -+ vmlal.s32 q3, d17, d2 -+ vext.32 d4, d31, d30, #0 -+ vmov d17, d11 -+ vmlal.s32 q3, d14, d1 -+ vext.32 d11, d13, d13, #0 -+ vext.32 d13, d30, d30, #0 -+ vmlal.s32 q3, d15, d0 -+ vext.32 d1, d8, d8, #0 -+ vmlal.s32 q3, d29, d3 -+ vld1.8 {d5}, [r2, : 64] -+ sub r2, r2, #16 -+ vext.32 d10, d6, d6, #0 -+ vmov.i32 q1, #0xffffffff -+ vshl.i64 q4, q1, #25 -+ add r5, sp, #512 -+ vld1.8 {d14-d15}, [r5, : 128] -+ vadd.i64 q9, q2, q7 -+ vshl.i64 q1, q1, #26 -+ vshr.s64 q10, q9, #26 -+ vld1.8 {d0}, [r2, : 64]! -+ vadd.i64 q5, q5, q10 -+ vand q9, q9, q1 -+ vld1.8 {d16}, [r2, : 64]! -+ add r2, sp, #528 -+ vld1.8 {d20-d21}, [r2, : 128] -+ vadd.i64 q11, q5, q10 -+ vsub.i64 q2, q2, q9 -+ vshr.s64 q9, q11, #25 -+ vext.32 d12, d5, d4, #0 -+ vand q11, q11, q4 -+ vadd.i64 q0, q0, q9 -+ vmov d19, d7 -+ vadd.i64 q3, q0, q7 -+ vsub.i64 q5, q5, q11 -+ vshr.s64 q11, q3, #26 -+ vext.32 d18, d11, d10, #0 -+ vand q3, q3, q1 -+ vadd.i64 q8, q8, q11 -+ vadd.i64 q11, q8, q10 -+ vsub.i64 q0, q0, q3 -+ vshr.s64 q3, q11, #25 -+ vand q11, q11, q4 -+ vadd.i64 q3, q6, q3 -+ vadd.i64 q6, q3, q7 -+ vsub.i64 q8, q8, q11 -+ vshr.s64 q11, q6, #26 -+ vand q6, q6, q1 -+ vadd.i64 q9, q9, q11 -+ vadd.i64 d25, d19, d21 -+ vsub.i64 q3, q3, q6 -+ vshr.s64 d23, d25, #25 -+ vand q4, q12, q4 -+ vadd.i64 d21, d23, d23 -+ vshl.i64 d25, d23, #4 -+ vadd.i64 d21, d21, d23 -+ vadd.i64 d25, d25, d21 -+ vadd.i64 d4, d4, d25 -+ vzip.i32 q0, q8 -+ vadd.i64 d12, d4, d14 -+ add r2, r6, #8 -+ vst1.8 d0, [r2, : 64] -+ vsub.i64 d19, d19, d9 -+ add r2, r2, #16 -+ vst1.8 d16, [r2, : 64] -+ vshr.s64 d22, d12, #26 -+ vand q0, q6, q1 -+ vadd.i64 d10, d10, d22 -+ vzip.i32 q3, q9 -+ vsub.i64 d4, d4, d0 -+ sub r2, r2, #8 -+ vst1.8 d6, [r2, : 64] -+ add r2, r2, #16 -+ vst1.8 d18, [r2, : 64] -+ vzip.i32 q2, q5 -+ sub r2, r2, #32 -+ vst1.8 d4, [r2, : 64] -+ cmp r4, #0 -+ beq ._skippostcopy -+ add r2, r3, #144 -+ mov r4, r4 -+ vld1.8 {d0-d1}, [r2, : 128]! -+ vld1.8 {d2-d3}, [r2, : 128]! -+ vld1.8 {d4}, [r2, : 64] -+ vst1.8 {d0-d1}, [r4, : 128]! -+ vst1.8 {d2-d3}, [r4, : 128]! -+ vst1.8 d4, [r4, : 64] -+._skippostcopy: -+ cmp r1, #1 -+ bne ._skipfinalcopy -+ add r2, r3, #288 -+ add r4, r3, #144 -+ vld1.8 {d0-d1}, [r2, : 128]! -+ vld1.8 {d2-d3}, [r2, : 128]! -+ vld1.8 {d4}, [r2, : 64] -+ vst1.8 {d0-d1}, [r4, : 128]! -+ vst1.8 {d2-d3}, [r4, : 128]! -+ vst1.8 d4, [r4, : 64] -+._skipfinalcopy: -+ add r1, r1, #1 -+ cmp r1, #12 -+ blo ._invertloop -+ add r1, r3, #144 -+ ldr r2, [r1], #4 -+ ldr r3, [r1], #4 -+ ldr r4, [r1], #4 -+ ldr r5, [r1], #4 -+ ldr r6, [r1], #4 -+ ldr r7, [r1], #4 -+ ldr r8, [r1], #4 -+ ldr r9, [r1], #4 -+ ldr r10, [r1], #4 -+ ldr r1, [r1] -+ add r11, r1, r1, LSL #4 -+ add r11, r11, r1, LSL #1 -+ add r11, r11, #16777216 -+ mov r11, r11, ASR #25 -+ add r11, r11, r2 -+ mov r11, r11, ASR #26 -+ add r11, r11, r3 -+ mov r11, r11, ASR #25 -+ add r11, r11, r4 -+ mov r11, r11, ASR #26 -+ add r11, r11, r5 -+ mov r11, r11, ASR #25 -+ add r11, r11, r6 -+ mov r11, r11, ASR #26 -+ add r11, r11, r7 -+ mov r11, r11, ASR #25 -+ add r11, r11, r8 -+ mov r11, r11, ASR #26 -+ add r11, r11, r9 -+ mov r11, r11, ASR #25 -+ add r11, r11, r10 -+ mov r11, r11, ASR #26 -+ add r11, r11, r1 -+ mov r11, r11, ASR #25 -+ add r2, r2, r11 -+ add r2, r2, r11, LSL #1 -+ add r2, r2, r11, LSL #4 -+ mov r11, r2, ASR #26 -+ add r3, r3, r11 -+ sub r2, r2, r11, LSL #26 -+ mov r11, r3, ASR #25 -+ add r4, r4, r11 -+ sub r3, r3, r11, LSL #25 -+ mov r11, r4, ASR #26 -+ add r5, r5, r11 -+ sub r4, r4, r11, LSL #26 -+ mov r11, r5, ASR #25 -+ add r6, r6, r11 -+ sub r5, r5, r11, LSL #25 -+ mov r11, r6, ASR #26 -+ add r7, r7, r11 -+ sub r6, r6, r11, LSL #26 -+ mov r11, r7, ASR #25 -+ add r8, r8, r11 -+ sub r7, r7, r11, LSL #25 -+ mov r11, r8, ASR #26 -+ add r9, r9, r11 -+ sub r8, r8, r11, LSL #26 -+ mov r11, r9, ASR #25 -+ add r10, r10, r11 -+ sub r9, r9, r11, LSL #25 -+ mov r11, r10, ASR #26 -+ add r1, r1, r11 -+ sub r10, r10, r11, LSL #26 -+ mov r11, r1, ASR #25 -+ sub r1, r1, r11, LSL #25 -+ add r2, r2, r3, LSL #26 -+ mov r3, r3, LSR #6 -+ add r3, r3, r4, LSL #19 -+ mov r4, r4, LSR #13 -+ add r4, r4, r5, LSL #13 -+ mov r5, r5, LSR #19 -+ add r5, r5, r6, LSL #6 -+ add r6, r7, r8, LSL #25 -+ mov r7, r8, LSR #7 -+ add r7, r7, r9, LSL #19 -+ mov r8, r9, LSR #13 -+ add r8, r8, r10, LSL #12 -+ mov r9, r10, LSR #20 -+ add r1, r9, r1, LSL #6 -+ str r2, [r0], #4 -+ str r3, [r0], #4 -+ str r4, [r0], #4 -+ str r5, [r0], #4 -+ str r6, [r0], #4 -+ str r7, [r0], #4 -+ str r8, [r0], #4 -+ str r1, [r0] -+ ldrd r4, [sp, #0] -+ ldrd r6, [sp, #8] -+ ldrd r8, [sp, #16] -+ ldrd r10, [sp, #24] -+ ldr r12, [sp, #480] -+ ldr r14, [sp, #484] -+ ldr r0, =0 -+ mov sp, r12 -+ vpop {q4, q5, q6, q7} -+ bx lr diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0031-crypto-arm-curve25519-wire-up-NEON-implementation.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0031-crypto-arm-curve25519-wire-up-NEON-implementation.patch deleted file mode 100644 index d84726b61..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0031-crypto-arm-curve25519-wire-up-NEON-implementation.patch +++ /dev/null @@ -1,1058 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 8 Nov 2019 13:22:38 +0100 -Subject: [PATCH] crypto: arm/curve25519 - wire up NEON implementation - -commit d8f1308a025fc7e00414194ed742d5f05a21e13c upstream. - -This ports the SUPERCOP implementation for usage in kernel space. In -addition to the usual header, macro, and style changes required for -kernel space, it makes a few small changes to the code: - - - The stack alignment is relaxed to 16 bytes. - - Superfluous mov statements have been removed. - - ldr for constants has been replaced with movw. - - ldreq has been replaced with moveq. - - The str epilogue has been made more idiomatic. - - SIMD registers are not pushed and popped at the beginning and end. - - The prologue and epilogue have been made idiomatic. - - A hole has been removed from the stack, saving 32 bytes. - - We write-back the base register whenever possible for vld1.8. - - Some multiplications have been reordered for better A7 performance. - -There are more opportunities for cleanup, since this code is from qhasm, -which doesn't always do the most opportune thing. But even prior to -extensive hand optimizations, this code delivers significant performance -improvements (given in get_cycles() per call): - - ----------- ------------- - | generic C | this commit | - ------------ ----------- ------------- - | Cortex-A7 | 49136 | 22395 | - ------------ ----------- ------------- - | Cortex-A17 | 17326 | 4983 | - ------------ ----------- ------------- - -Signed-off-by: Jason A. Donenfeld -[ardb: - move to arch/arm/crypto - - wire into lib/crypto framework - - implement crypto API KPP hooks ] -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/Kconfig | 6 + - arch/arm/crypto/Makefile | 2 + - arch/arm/crypto/curve25519-core.S | 347 +++++++++++++----------------- - arch/arm/crypto/curve25519-glue.c | 127 +++++++++++ - 4 files changed, 287 insertions(+), 195 deletions(-) - create mode 100644 arch/arm/crypto/curve25519-glue.c - ---- a/arch/arm/crypto/Kconfig -+++ b/arch/arm/crypto/Kconfig -@@ -141,4 +141,10 @@ config CRYPTO_NHPOLY1305_NEON - depends on KERNEL_MODE_NEON - select CRYPTO_NHPOLY1305 - -+config CRYPTO_CURVE25519_NEON -+ tristate "NEON accelerated Curve25519 scalar multiplication library" -+ depends on KERNEL_MODE_NEON -+ select CRYPTO_LIB_CURVE25519_GENERIC -+ select CRYPTO_ARCH_HAVE_LIB_CURVE25519 -+ - endif ---- a/arch/arm/crypto/Makefile -+++ b/arch/arm/crypto/Makefile -@@ -12,6 +12,7 @@ obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha51 - obj-$(CONFIG_CRYPTO_CHACHA20_NEON) += chacha-neon.o - obj-$(CONFIG_CRYPTO_POLY1305_ARM) += poly1305-arm.o - obj-$(CONFIG_CRYPTO_NHPOLY1305_NEON) += nhpoly1305-neon.o -+obj-$(CONFIG_CRYPTO_CURVE25519_NEON) += curve25519-neon.o - - ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o - ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o -@@ -58,6 +59,7 @@ chacha-neon-y := chacha-scalar-core.o ch - chacha-neon-$(CONFIG_KERNEL_MODE_NEON) += chacha-neon-core.o - poly1305-arm-y := poly1305-core.o poly1305-glue.o - nhpoly1305-neon-y := nh-neon-core.o nhpoly1305-neon-glue.o -+curve25519-neon-y := curve25519-core.o curve25519-glue.o - - ifdef REGENERATE_ARM_CRYPTO - quiet_cmd_perl = PERL $@ ---- a/arch/arm/crypto/curve25519-core.S -+++ b/arch/arm/crypto/curve25519-core.S -@@ -1,43 +1,35 @@ -+/* SPDX-License-Identifier: GPL-2.0 OR MIT */ - /* -- * Public domain code from Daniel J. Bernstein and Peter Schwabe, from -- * SUPERCOP's curve25519/neon2/scalarmult.s. -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This -+ * began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been -+ * manually reworked for use in kernel space. - */ - --.fpu neon -+#include -+ - .text -+.fpu neon -+.arch armv7-a - .align 4 --.global _crypto_scalarmult_curve25519_neon2 --.global crypto_scalarmult_curve25519_neon2 --.type _crypto_scalarmult_curve25519_neon2 STT_FUNC --.type crypto_scalarmult_curve25519_neon2 STT_FUNC -- _crypto_scalarmult_curve25519_neon2: -- crypto_scalarmult_curve25519_neon2: -- vpush {q4, q5, q6, q7} -- mov r12, sp -- sub sp, sp, #736 -- and sp, sp, #0xffffffe0 -- strd r4, [sp, #0] -- strd r6, [sp, #8] -- strd r8, [sp, #16] -- strd r10, [sp, #24] -- str r12, [sp, #480] -- str r14, [sp, #484] -- mov r0, r0 -- mov r1, r1 -- mov r2, r2 -- add r3, sp, #32 -- ldr r4, =0 -- ldr r5, =254 -+ -+ENTRY(curve25519_neon) -+ push {r4-r11, lr} -+ mov ip, sp -+ sub r3, sp, #704 -+ and r3, r3, #0xfffffff0 -+ mov sp, r3 -+ movw r4, #0 -+ movw r5, #254 - vmov.i32 q0, #1 - vshr.u64 q1, q0, #7 - vshr.u64 q0, q0, #8 - vmov.i32 d4, #19 - vmov.i32 d5, #38 -- add r6, sp, #512 -- vst1.8 {d2-d3}, [r6, : 128] -- add r6, sp, #528 -- vst1.8 {d0-d1}, [r6, : 128] -- add r6, sp, #544 -+ add r6, sp, #480 -+ vst1.8 {d2-d3}, [r6, : 128]! -+ vst1.8 {d0-d1}, [r6, : 128]! - vst1.8 {d4-d5}, [r6, : 128] - add r6, r3, #0 - vmov.i32 q2, #0 -@@ -45,12 +37,12 @@ - vst1.8 {d4-d5}, [r6, : 128]! - vst1.8 d4, [r6, : 64] - add r6, r3, #0 -- ldr r7, =960 -+ movw r7, #960 - sub r7, r7, #2 - neg r7, r7 - sub r7, r7, r7, LSL #7 - str r7, [r6] -- add r6, sp, #704 -+ add r6, sp, #672 - vld1.8 {d4-d5}, [r1]! - vld1.8 {d6-d7}, [r1] - vst1.8 {d4-d5}, [r6, : 128]! -@@ -212,15 +204,15 @@ - vst1.8 {d0-d1}, [r6, : 128]! - vst1.8 {d2-d3}, [r6, : 128]! - vst1.8 d4, [r6, : 64] --._mainloop: -+.Lmainloop: - mov r2, r5, LSR #3 - and r6, r5, #7 - ldrb r2, [r1, r2] - mov r2, r2, LSR r6 - and r2, r2, #1 -- str r5, [sp, #488] -+ str r5, [sp, #456] - eor r4, r4, r2 -- str r2, [sp, #492] -+ str r2, [sp, #460] - neg r2, r4 - add r4, r3, #96 - add r5, r3, #192 -@@ -291,7 +283,7 @@ - vsub.i32 q0, q1, q3 - vst1.8 d4, [r4, : 64] - vst1.8 d0, [r6, : 64] -- add r2, sp, #544 -+ add r2, sp, #512 - add r4, r3, #96 - add r5, r3, #144 - vld1.8 {d0-d1}, [r2, : 128] -@@ -361,14 +353,13 @@ - vmlal.s32 q0, d12, d8 - vmlal.s32 q0, d13, d17 - vmlal.s32 q0, d6, d6 -- add r2, sp, #512 -- vld1.8 {d18-d19}, [r2, : 128] -+ add r2, sp, #480 -+ vld1.8 {d18-d19}, [r2, : 128]! - vmull.s32 q3, d16, d7 - vmlal.s32 q3, d10, d15 - vmlal.s32 q3, d11, d14 - vmlal.s32 q3, d12, d9 - vmlal.s32 q3, d13, d8 -- add r2, sp, #528 - vld1.8 {d8-d9}, [r2, : 128] - vadd.i64 q5, q12, q9 - vadd.i64 q6, q15, q9 -@@ -502,22 +493,19 @@ - vadd.i32 q5, q5, q0 - vtrn.32 q11, q14 - vadd.i32 q6, q6, q3 -- add r2, sp, #560 -+ add r2, sp, #528 - vadd.i32 q10, q10, q2 - vtrn.32 d24, d25 -- vst1.8 {d12-d13}, [r2, : 128] -+ vst1.8 {d12-d13}, [r2, : 128]! - vshl.i32 q6, q13, #1 -- add r2, sp, #576 -- vst1.8 {d20-d21}, [r2, : 128] -+ vst1.8 {d20-d21}, [r2, : 128]! - vshl.i32 q10, q14, #1 -- add r2, sp, #592 -- vst1.8 {d12-d13}, [r2, : 128] -+ vst1.8 {d12-d13}, [r2, : 128]! - vshl.i32 q15, q12, #1 - vadd.i32 q8, q8, q4 - vext.32 d10, d31, d30, #0 - vadd.i32 q7, q7, q1 -- add r2, sp, #608 -- vst1.8 {d16-d17}, [r2, : 128] -+ vst1.8 {d16-d17}, [r2, : 128]! - vmull.s32 q8, d18, d5 - vmlal.s32 q8, d26, d4 - vmlal.s32 q8, d19, d9 -@@ -528,8 +516,7 @@ - vmlal.s32 q8, d29, d1 - vmlal.s32 q8, d24, d6 - vmlal.s32 q8, d25, d0 -- add r2, sp, #624 -- vst1.8 {d14-d15}, [r2, : 128] -+ vst1.8 {d14-d15}, [r2, : 128]! - vmull.s32 q2, d18, d4 - vmlal.s32 q2, d12, d9 - vmlal.s32 q2, d13, d8 -@@ -537,8 +524,7 @@ - vmlal.s32 q2, d22, d2 - vmlal.s32 q2, d23, d1 - vmlal.s32 q2, d24, d0 -- add r2, sp, #640 -- vst1.8 {d20-d21}, [r2, : 128] -+ vst1.8 {d20-d21}, [r2, : 128]! - vmull.s32 q7, d18, d9 - vmlal.s32 q7, d26, d3 - vmlal.s32 q7, d19, d8 -@@ -547,14 +533,12 @@ - vmlal.s32 q7, d28, d1 - vmlal.s32 q7, d23, d6 - vmlal.s32 q7, d29, d0 -- add r2, sp, #656 -- vst1.8 {d10-d11}, [r2, : 128] -+ vst1.8 {d10-d11}, [r2, : 128]! - vmull.s32 q5, d18, d3 - vmlal.s32 q5, d19, d2 - vmlal.s32 q5, d22, d1 - vmlal.s32 q5, d23, d0 - vmlal.s32 q5, d12, d8 -- add r2, sp, #672 - vst1.8 {d16-d17}, [r2, : 128] - vmull.s32 q4, d18, d8 - vmlal.s32 q4, d26, d2 -@@ -566,7 +550,7 @@ - vmlal.s32 q8, d26, d1 - vmlal.s32 q8, d19, d6 - vmlal.s32 q8, d27, d0 -- add r2, sp, #576 -+ add r2, sp, #544 - vld1.8 {d20-d21}, [r2, : 128] - vmlal.s32 q7, d24, d21 - vmlal.s32 q7, d25, d20 -@@ -575,32 +559,30 @@ - vmlal.s32 q8, d22, d21 - vmlal.s32 q8, d28, d20 - vmlal.s32 q5, d24, d20 -- add r2, sp, #576 - vst1.8 {d14-d15}, [r2, : 128] - vmull.s32 q7, d18, d6 - vmlal.s32 q7, d26, d0 -- add r2, sp, #656 -+ add r2, sp, #624 - vld1.8 {d30-d31}, [r2, : 128] - vmlal.s32 q2, d30, d21 - vmlal.s32 q7, d19, d21 - vmlal.s32 q7, d27, d20 -- add r2, sp, #624 -+ add r2, sp, #592 - vld1.8 {d26-d27}, [r2, : 128] - vmlal.s32 q4, d25, d27 - vmlal.s32 q8, d29, d27 - vmlal.s32 q8, d25, d26 - vmlal.s32 q7, d28, d27 - vmlal.s32 q7, d29, d26 -- add r2, sp, #608 -+ add r2, sp, #576 - vld1.8 {d28-d29}, [r2, : 128] - vmlal.s32 q4, d24, d29 - vmlal.s32 q8, d23, d29 - vmlal.s32 q8, d24, d28 - vmlal.s32 q7, d22, d29 - vmlal.s32 q7, d23, d28 -- add r2, sp, #608 - vst1.8 {d8-d9}, [r2, : 128] -- add r2, sp, #560 -+ add r2, sp, #528 - vld1.8 {d8-d9}, [r2, : 128] - vmlal.s32 q7, d24, d9 - vmlal.s32 q7, d25, d31 -@@ -621,36 +603,36 @@ - vmlal.s32 q0, d23, d26 - vmlal.s32 q0, d24, d31 - vmlal.s32 q0, d19, d20 -- add r2, sp, #640 -+ add r2, sp, #608 - vld1.8 {d18-d19}, [r2, : 128] - vmlal.s32 q2, d18, d7 -- vmlal.s32 q2, d19, d6 - vmlal.s32 q5, d18, d6 -- vmlal.s32 q5, d19, d21 - vmlal.s32 q1, d18, d21 -- vmlal.s32 q1, d19, d29 - vmlal.s32 q0, d18, d28 -- vmlal.s32 q0, d19, d9 - vmlal.s32 q6, d18, d29 -+ vmlal.s32 q2, d19, d6 -+ vmlal.s32 q5, d19, d21 -+ vmlal.s32 q1, d19, d29 -+ vmlal.s32 q0, d19, d9 - vmlal.s32 q6, d19, d28 -- add r2, sp, #592 -+ add r2, sp, #560 - vld1.8 {d18-d19}, [r2, : 128] -- add r2, sp, #512 -+ add r2, sp, #480 - vld1.8 {d22-d23}, [r2, : 128] - vmlal.s32 q5, d19, d7 - vmlal.s32 q0, d18, d21 - vmlal.s32 q0, d19, d29 - vmlal.s32 q6, d18, d6 -- add r2, sp, #528 -+ add r2, sp, #496 - vld1.8 {d6-d7}, [r2, : 128] - vmlal.s32 q6, d19, d21 -- add r2, sp, #576 -+ add r2, sp, #544 - vld1.8 {d18-d19}, [r2, : 128] - vmlal.s32 q0, d30, d8 -- add r2, sp, #672 -+ add r2, sp, #640 - vld1.8 {d20-d21}, [r2, : 128] - vmlal.s32 q5, d30, d29 -- add r2, sp, #608 -+ add r2, sp, #576 - vld1.8 {d24-d25}, [r2, : 128] - vmlal.s32 q1, d30, d28 - vadd.i64 q13, q0, q11 -@@ -823,22 +805,19 @@ - vadd.i32 q5, q5, q0 - vtrn.32 q11, q14 - vadd.i32 q6, q6, q3 -- add r2, sp, #560 -+ add r2, sp, #528 - vadd.i32 q10, q10, q2 - vtrn.32 d24, d25 -- vst1.8 {d12-d13}, [r2, : 128] -+ vst1.8 {d12-d13}, [r2, : 128]! - vshl.i32 q6, q13, #1 -- add r2, sp, #576 -- vst1.8 {d20-d21}, [r2, : 128] -+ vst1.8 {d20-d21}, [r2, : 128]! - vshl.i32 q10, q14, #1 -- add r2, sp, #592 -- vst1.8 {d12-d13}, [r2, : 128] -+ vst1.8 {d12-d13}, [r2, : 128]! - vshl.i32 q15, q12, #1 - vadd.i32 q8, q8, q4 - vext.32 d10, d31, d30, #0 - vadd.i32 q7, q7, q1 -- add r2, sp, #608 -- vst1.8 {d16-d17}, [r2, : 128] -+ vst1.8 {d16-d17}, [r2, : 128]! - vmull.s32 q8, d18, d5 - vmlal.s32 q8, d26, d4 - vmlal.s32 q8, d19, d9 -@@ -849,8 +828,7 @@ - vmlal.s32 q8, d29, d1 - vmlal.s32 q8, d24, d6 - vmlal.s32 q8, d25, d0 -- add r2, sp, #624 -- vst1.8 {d14-d15}, [r2, : 128] -+ vst1.8 {d14-d15}, [r2, : 128]! - vmull.s32 q2, d18, d4 - vmlal.s32 q2, d12, d9 - vmlal.s32 q2, d13, d8 -@@ -858,8 +836,7 @@ - vmlal.s32 q2, d22, d2 - vmlal.s32 q2, d23, d1 - vmlal.s32 q2, d24, d0 -- add r2, sp, #640 -- vst1.8 {d20-d21}, [r2, : 128] -+ vst1.8 {d20-d21}, [r2, : 128]! - vmull.s32 q7, d18, d9 - vmlal.s32 q7, d26, d3 - vmlal.s32 q7, d19, d8 -@@ -868,15 +845,13 @@ - vmlal.s32 q7, d28, d1 - vmlal.s32 q7, d23, d6 - vmlal.s32 q7, d29, d0 -- add r2, sp, #656 -- vst1.8 {d10-d11}, [r2, : 128] -+ vst1.8 {d10-d11}, [r2, : 128]! - vmull.s32 q5, d18, d3 - vmlal.s32 q5, d19, d2 - vmlal.s32 q5, d22, d1 - vmlal.s32 q5, d23, d0 - vmlal.s32 q5, d12, d8 -- add r2, sp, #672 -- vst1.8 {d16-d17}, [r2, : 128] -+ vst1.8 {d16-d17}, [r2, : 128]! - vmull.s32 q4, d18, d8 - vmlal.s32 q4, d26, d2 - vmlal.s32 q4, d19, d7 -@@ -887,7 +862,7 @@ - vmlal.s32 q8, d26, d1 - vmlal.s32 q8, d19, d6 - vmlal.s32 q8, d27, d0 -- add r2, sp, #576 -+ add r2, sp, #544 - vld1.8 {d20-d21}, [r2, : 128] - vmlal.s32 q7, d24, d21 - vmlal.s32 q7, d25, d20 -@@ -896,32 +871,30 @@ - vmlal.s32 q8, d22, d21 - vmlal.s32 q8, d28, d20 - vmlal.s32 q5, d24, d20 -- add r2, sp, #576 - vst1.8 {d14-d15}, [r2, : 128] - vmull.s32 q7, d18, d6 - vmlal.s32 q7, d26, d0 -- add r2, sp, #656 -+ add r2, sp, #624 - vld1.8 {d30-d31}, [r2, : 128] - vmlal.s32 q2, d30, d21 - vmlal.s32 q7, d19, d21 - vmlal.s32 q7, d27, d20 -- add r2, sp, #624 -+ add r2, sp, #592 - vld1.8 {d26-d27}, [r2, : 128] - vmlal.s32 q4, d25, d27 - vmlal.s32 q8, d29, d27 - vmlal.s32 q8, d25, d26 - vmlal.s32 q7, d28, d27 - vmlal.s32 q7, d29, d26 -- add r2, sp, #608 -+ add r2, sp, #576 - vld1.8 {d28-d29}, [r2, : 128] - vmlal.s32 q4, d24, d29 - vmlal.s32 q8, d23, d29 - vmlal.s32 q8, d24, d28 - vmlal.s32 q7, d22, d29 - vmlal.s32 q7, d23, d28 -- add r2, sp, #608 - vst1.8 {d8-d9}, [r2, : 128] -- add r2, sp, #560 -+ add r2, sp, #528 - vld1.8 {d8-d9}, [r2, : 128] - vmlal.s32 q7, d24, d9 - vmlal.s32 q7, d25, d31 -@@ -942,36 +915,36 @@ - vmlal.s32 q0, d23, d26 - vmlal.s32 q0, d24, d31 - vmlal.s32 q0, d19, d20 -- add r2, sp, #640 -+ add r2, sp, #608 - vld1.8 {d18-d19}, [r2, : 128] - vmlal.s32 q2, d18, d7 -- vmlal.s32 q2, d19, d6 - vmlal.s32 q5, d18, d6 -- vmlal.s32 q5, d19, d21 - vmlal.s32 q1, d18, d21 -- vmlal.s32 q1, d19, d29 - vmlal.s32 q0, d18, d28 -- vmlal.s32 q0, d19, d9 - vmlal.s32 q6, d18, d29 -+ vmlal.s32 q2, d19, d6 -+ vmlal.s32 q5, d19, d21 -+ vmlal.s32 q1, d19, d29 -+ vmlal.s32 q0, d19, d9 - vmlal.s32 q6, d19, d28 -- add r2, sp, #592 -+ add r2, sp, #560 - vld1.8 {d18-d19}, [r2, : 128] -- add r2, sp, #512 -+ add r2, sp, #480 - vld1.8 {d22-d23}, [r2, : 128] - vmlal.s32 q5, d19, d7 - vmlal.s32 q0, d18, d21 - vmlal.s32 q0, d19, d29 - vmlal.s32 q6, d18, d6 -- add r2, sp, #528 -+ add r2, sp, #496 - vld1.8 {d6-d7}, [r2, : 128] - vmlal.s32 q6, d19, d21 -- add r2, sp, #576 -+ add r2, sp, #544 - vld1.8 {d18-d19}, [r2, : 128] - vmlal.s32 q0, d30, d8 -- add r2, sp, #672 -+ add r2, sp, #640 - vld1.8 {d20-d21}, [r2, : 128] - vmlal.s32 q5, d30, d29 -- add r2, sp, #608 -+ add r2, sp, #576 - vld1.8 {d24-d25}, [r2, : 128] - vmlal.s32 q1, d30, d28 - vadd.i64 q13, q0, q11 -@@ -1069,7 +1042,7 @@ - sub r4, r4, #24 - vst1.8 d0, [r2, : 64] - vst1.8 d1, [r4, : 64] -- add r2, sp, #544 -+ add r2, sp, #512 - add r4, r3, #144 - add r5, r3, #192 - vld1.8 {d0-d1}, [r2, : 128] -@@ -1139,14 +1112,13 @@ - vmlal.s32 q0, d12, d8 - vmlal.s32 q0, d13, d17 - vmlal.s32 q0, d6, d6 -- add r2, sp, #512 -- vld1.8 {d18-d19}, [r2, : 128] -+ add r2, sp, #480 -+ vld1.8 {d18-d19}, [r2, : 128]! - vmull.s32 q3, d16, d7 - vmlal.s32 q3, d10, d15 - vmlal.s32 q3, d11, d14 - vmlal.s32 q3, d12, d9 - vmlal.s32 q3, d13, d8 -- add r2, sp, #528 - vld1.8 {d8-d9}, [r2, : 128] - vadd.i64 q5, q12, q9 - vadd.i64 q6, q15, q9 -@@ -1295,22 +1267,19 @@ - vadd.i32 q5, q5, q0 - vtrn.32 q11, q14 - vadd.i32 q6, q6, q3 -- add r2, sp, #560 -+ add r2, sp, #528 - vadd.i32 q10, q10, q2 - vtrn.32 d24, d25 -- vst1.8 {d12-d13}, [r2, : 128] -+ vst1.8 {d12-d13}, [r2, : 128]! - vshl.i32 q6, q13, #1 -- add r2, sp, #576 -- vst1.8 {d20-d21}, [r2, : 128] -+ vst1.8 {d20-d21}, [r2, : 128]! - vshl.i32 q10, q14, #1 -- add r2, sp, #592 -- vst1.8 {d12-d13}, [r2, : 128] -+ vst1.8 {d12-d13}, [r2, : 128]! - vshl.i32 q15, q12, #1 - vadd.i32 q8, q8, q4 - vext.32 d10, d31, d30, #0 - vadd.i32 q7, q7, q1 -- add r2, sp, #608 -- vst1.8 {d16-d17}, [r2, : 128] -+ vst1.8 {d16-d17}, [r2, : 128]! - vmull.s32 q8, d18, d5 - vmlal.s32 q8, d26, d4 - vmlal.s32 q8, d19, d9 -@@ -1321,8 +1290,7 @@ - vmlal.s32 q8, d29, d1 - vmlal.s32 q8, d24, d6 - vmlal.s32 q8, d25, d0 -- add r2, sp, #624 -- vst1.8 {d14-d15}, [r2, : 128] -+ vst1.8 {d14-d15}, [r2, : 128]! - vmull.s32 q2, d18, d4 - vmlal.s32 q2, d12, d9 - vmlal.s32 q2, d13, d8 -@@ -1330,8 +1298,7 @@ - vmlal.s32 q2, d22, d2 - vmlal.s32 q2, d23, d1 - vmlal.s32 q2, d24, d0 -- add r2, sp, #640 -- vst1.8 {d20-d21}, [r2, : 128] -+ vst1.8 {d20-d21}, [r2, : 128]! - vmull.s32 q7, d18, d9 - vmlal.s32 q7, d26, d3 - vmlal.s32 q7, d19, d8 -@@ -1340,15 +1307,13 @@ - vmlal.s32 q7, d28, d1 - vmlal.s32 q7, d23, d6 - vmlal.s32 q7, d29, d0 -- add r2, sp, #656 -- vst1.8 {d10-d11}, [r2, : 128] -+ vst1.8 {d10-d11}, [r2, : 128]! - vmull.s32 q5, d18, d3 - vmlal.s32 q5, d19, d2 - vmlal.s32 q5, d22, d1 - vmlal.s32 q5, d23, d0 - vmlal.s32 q5, d12, d8 -- add r2, sp, #672 -- vst1.8 {d16-d17}, [r2, : 128] -+ vst1.8 {d16-d17}, [r2, : 128]! - vmull.s32 q4, d18, d8 - vmlal.s32 q4, d26, d2 - vmlal.s32 q4, d19, d7 -@@ -1359,7 +1324,7 @@ - vmlal.s32 q8, d26, d1 - vmlal.s32 q8, d19, d6 - vmlal.s32 q8, d27, d0 -- add r2, sp, #576 -+ add r2, sp, #544 - vld1.8 {d20-d21}, [r2, : 128] - vmlal.s32 q7, d24, d21 - vmlal.s32 q7, d25, d20 -@@ -1368,32 +1333,30 @@ - vmlal.s32 q8, d22, d21 - vmlal.s32 q8, d28, d20 - vmlal.s32 q5, d24, d20 -- add r2, sp, #576 - vst1.8 {d14-d15}, [r2, : 128] - vmull.s32 q7, d18, d6 - vmlal.s32 q7, d26, d0 -- add r2, sp, #656 -+ add r2, sp, #624 - vld1.8 {d30-d31}, [r2, : 128] - vmlal.s32 q2, d30, d21 - vmlal.s32 q7, d19, d21 - vmlal.s32 q7, d27, d20 -- add r2, sp, #624 -+ add r2, sp, #592 - vld1.8 {d26-d27}, [r2, : 128] - vmlal.s32 q4, d25, d27 - vmlal.s32 q8, d29, d27 - vmlal.s32 q8, d25, d26 - vmlal.s32 q7, d28, d27 - vmlal.s32 q7, d29, d26 -- add r2, sp, #608 -+ add r2, sp, #576 - vld1.8 {d28-d29}, [r2, : 128] - vmlal.s32 q4, d24, d29 - vmlal.s32 q8, d23, d29 - vmlal.s32 q8, d24, d28 - vmlal.s32 q7, d22, d29 - vmlal.s32 q7, d23, d28 -- add r2, sp, #608 - vst1.8 {d8-d9}, [r2, : 128] -- add r2, sp, #560 -+ add r2, sp, #528 - vld1.8 {d8-d9}, [r2, : 128] - vmlal.s32 q7, d24, d9 - vmlal.s32 q7, d25, d31 -@@ -1414,36 +1377,36 @@ - vmlal.s32 q0, d23, d26 - vmlal.s32 q0, d24, d31 - vmlal.s32 q0, d19, d20 -- add r2, sp, #640 -+ add r2, sp, #608 - vld1.8 {d18-d19}, [r2, : 128] - vmlal.s32 q2, d18, d7 -- vmlal.s32 q2, d19, d6 - vmlal.s32 q5, d18, d6 -- vmlal.s32 q5, d19, d21 - vmlal.s32 q1, d18, d21 -- vmlal.s32 q1, d19, d29 - vmlal.s32 q0, d18, d28 -- vmlal.s32 q0, d19, d9 - vmlal.s32 q6, d18, d29 -+ vmlal.s32 q2, d19, d6 -+ vmlal.s32 q5, d19, d21 -+ vmlal.s32 q1, d19, d29 -+ vmlal.s32 q0, d19, d9 - vmlal.s32 q6, d19, d28 -- add r2, sp, #592 -+ add r2, sp, #560 - vld1.8 {d18-d19}, [r2, : 128] -- add r2, sp, #512 -+ add r2, sp, #480 - vld1.8 {d22-d23}, [r2, : 128] - vmlal.s32 q5, d19, d7 - vmlal.s32 q0, d18, d21 - vmlal.s32 q0, d19, d29 - vmlal.s32 q6, d18, d6 -- add r2, sp, #528 -+ add r2, sp, #496 - vld1.8 {d6-d7}, [r2, : 128] - vmlal.s32 q6, d19, d21 -- add r2, sp, #576 -+ add r2, sp, #544 - vld1.8 {d18-d19}, [r2, : 128] - vmlal.s32 q0, d30, d8 -- add r2, sp, #672 -+ add r2, sp, #640 - vld1.8 {d20-d21}, [r2, : 128] - vmlal.s32 q5, d30, d29 -- add r2, sp, #608 -+ add r2, sp, #576 - vld1.8 {d24-d25}, [r2, : 128] - vmlal.s32 q1, d30, d28 - vadd.i64 q13, q0, q11 -@@ -1541,10 +1504,10 @@ - sub r4, r4, #24 - vst1.8 d0, [r2, : 64] - vst1.8 d1, [r4, : 64] -- ldr r2, [sp, #488] -- ldr r4, [sp, #492] -+ ldr r2, [sp, #456] -+ ldr r4, [sp, #460] - subs r5, r2, #1 -- bge ._mainloop -+ bge .Lmainloop - add r1, r3, #144 - add r2, r3, #336 - vld1.8 {d0-d1}, [r1, : 128]! -@@ -1553,41 +1516,41 @@ - vst1.8 {d0-d1}, [r2, : 128]! - vst1.8 {d2-d3}, [r2, : 128]! - vst1.8 d4, [r2, : 64] -- ldr r1, =0 --._invertloop: -+ movw r1, #0 -+.Linvertloop: - add r2, r3, #144 -- ldr r4, =0 -- ldr r5, =2 -+ movw r4, #0 -+ movw r5, #2 - cmp r1, #1 -- ldreq r5, =1 -+ moveq r5, #1 - addeq r2, r3, #336 - addeq r4, r3, #48 - cmp r1, #2 -- ldreq r5, =1 -+ moveq r5, #1 - addeq r2, r3, #48 - cmp r1, #3 -- ldreq r5, =5 -+ moveq r5, #5 - addeq r4, r3, #336 - cmp r1, #4 -- ldreq r5, =10 -+ moveq r5, #10 - cmp r1, #5 -- ldreq r5, =20 -+ moveq r5, #20 - cmp r1, #6 -- ldreq r5, =10 -+ moveq r5, #10 - addeq r2, r3, #336 - addeq r4, r3, #336 - cmp r1, #7 -- ldreq r5, =50 -+ moveq r5, #50 - cmp r1, #8 -- ldreq r5, =100 -+ moveq r5, #100 - cmp r1, #9 -- ldreq r5, =50 -+ moveq r5, #50 - addeq r2, r3, #336 - cmp r1, #10 -- ldreq r5, =5 -+ moveq r5, #5 - addeq r2, r3, #48 - cmp r1, #11 -- ldreq r5, =0 -+ moveq r5, #0 - addeq r2, r3, #96 - add r6, r3, #144 - add r7, r3, #288 -@@ -1598,8 +1561,8 @@ - vst1.8 {d2-d3}, [r7, : 128]! - vst1.8 d4, [r7, : 64] - cmp r5, #0 -- beq ._skipsquaringloop --._squaringloop: -+ beq .Lskipsquaringloop -+.Lsquaringloop: - add r6, r3, #288 - add r7, r3, #288 - add r8, r3, #288 -@@ -1611,7 +1574,7 @@ - vld1.8 {d6-d7}, [r7, : 128]! - vld1.8 {d9}, [r7, : 64] - vld1.8 {d10-d11}, [r6, : 128]! -- add r7, sp, #416 -+ add r7, sp, #384 - vld1.8 {d12-d13}, [r6, : 128]! - vmul.i32 q7, q2, q0 - vld1.8 {d8}, [r6, : 64] -@@ -1726,7 +1689,7 @@ - vext.32 d10, d6, d6, #0 - vmov.i32 q1, #0xffffffff - vshl.i64 q4, q1, #25 -- add r7, sp, #512 -+ add r7, sp, #480 - vld1.8 {d14-d15}, [r7, : 128] - vadd.i64 q9, q2, q7 - vshl.i64 q1, q1, #26 -@@ -1735,7 +1698,7 @@ - vadd.i64 q5, q5, q10 - vand q9, q9, q1 - vld1.8 {d16}, [r6, : 64]! -- add r6, sp, #528 -+ add r6, sp, #496 - vld1.8 {d20-d21}, [r6, : 128] - vadd.i64 q11, q5, q10 - vsub.i64 q2, q2, q9 -@@ -1789,8 +1752,8 @@ - sub r6, r6, #32 - vst1.8 d4, [r6, : 64] - subs r5, r5, #1 -- bhi ._squaringloop --._skipsquaringloop: -+ bhi .Lsquaringloop -+.Lskipsquaringloop: - mov r2, r2 - add r5, r3, #288 - add r6, r3, #144 -@@ -1802,7 +1765,7 @@ - vld1.8 {d6-d7}, [r5, : 128]! - vld1.8 {d9}, [r5, : 64] - vld1.8 {d10-d11}, [r2, : 128]! -- add r5, sp, #416 -+ add r5, sp, #384 - vld1.8 {d12-d13}, [r2, : 128]! - vmul.i32 q7, q2, q0 - vld1.8 {d8}, [r2, : 64] -@@ -1917,7 +1880,7 @@ - vext.32 d10, d6, d6, #0 - vmov.i32 q1, #0xffffffff - vshl.i64 q4, q1, #25 -- add r5, sp, #512 -+ add r5, sp, #480 - vld1.8 {d14-d15}, [r5, : 128] - vadd.i64 q9, q2, q7 - vshl.i64 q1, q1, #26 -@@ -1926,7 +1889,7 @@ - vadd.i64 q5, q5, q10 - vand q9, q9, q1 - vld1.8 {d16}, [r2, : 64]! -- add r2, sp, #528 -+ add r2, sp, #496 - vld1.8 {d20-d21}, [r2, : 128] - vadd.i64 q11, q5, q10 - vsub.i64 q2, q2, q9 -@@ -1980,7 +1943,7 @@ - sub r2, r2, #32 - vst1.8 d4, [r2, : 64] - cmp r4, #0 -- beq ._skippostcopy -+ beq .Lskippostcopy - add r2, r3, #144 - mov r4, r4 - vld1.8 {d0-d1}, [r2, : 128]! -@@ -1989,9 +1952,9 @@ - vst1.8 {d0-d1}, [r4, : 128]! - vst1.8 {d2-d3}, [r4, : 128]! - vst1.8 d4, [r4, : 64] --._skippostcopy: -+.Lskippostcopy: - cmp r1, #1 -- bne ._skipfinalcopy -+ bne .Lskipfinalcopy - add r2, r3, #288 - add r4, r3, #144 - vld1.8 {d0-d1}, [r2, : 128]! -@@ -2000,10 +1963,10 @@ - vst1.8 {d0-d1}, [r4, : 128]! - vst1.8 {d2-d3}, [r4, : 128]! - vst1.8 d4, [r4, : 64] --._skipfinalcopy: -+.Lskipfinalcopy: - add r1, r1, #1 - cmp r1, #12 -- blo ._invertloop -+ blo .Linvertloop - add r1, r3, #144 - ldr r2, [r1], #4 - ldr r3, [r1], #4 -@@ -2085,21 +2048,15 @@ - add r8, r8, r10, LSL #12 - mov r9, r10, LSR #20 - add r1, r9, r1, LSL #6 -- str r2, [r0], #4 -- str r3, [r0], #4 -- str r4, [r0], #4 -- str r5, [r0], #4 -- str r6, [r0], #4 -- str r7, [r0], #4 -- str r8, [r0], #4 -- str r1, [r0] -- ldrd r4, [sp, #0] -- ldrd r6, [sp, #8] -- ldrd r8, [sp, #16] -- ldrd r10, [sp, #24] -- ldr r12, [sp, #480] -- ldr r14, [sp, #484] -- ldr r0, =0 -- mov sp, r12 -- vpop {q4, q5, q6, q7} -- bx lr -+ str r2, [r0] -+ str r3, [r0, #4] -+ str r4, [r0, #8] -+ str r5, [r0, #12] -+ str r6, [r0, #16] -+ str r7, [r0, #20] -+ str r8, [r0, #24] -+ str r1, [r0, #28] -+ movw r0, #0 -+ mov sp, ip -+ pop {r4-r11, pc} -+ENDPROC(curve25519_neon) ---- /dev/null -+++ b/arch/arm/crypto/curve25519-glue.c -@@ -0,0 +1,127 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * Based on public domain code from Daniel J. Bernstein and Peter Schwabe. This -+ * began from SUPERCOP's curve25519/neon2/scalarmult.s, but has subsequently been -+ * manually reworked for use in kernel space. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE], -+ const u8 secret[CURVE25519_KEY_SIZE], -+ const u8 basepoint[CURVE25519_KEY_SIZE]); -+ -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); -+ -+void curve25519_arch(u8 out[CURVE25519_KEY_SIZE], -+ const u8 scalar[CURVE25519_KEY_SIZE], -+ const u8 point[CURVE25519_KEY_SIZE]) -+{ -+ if (static_branch_likely(&have_neon) && crypto_simd_usable()) { -+ kernel_neon_begin(); -+ curve25519_neon(out, scalar, point); -+ kernel_neon_end(); -+ } else { -+ curve25519_generic(out, scalar, point); -+ } -+} -+EXPORT_SYMBOL(curve25519_arch); -+ -+static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, -+ unsigned int len) -+{ -+ u8 *secret = kpp_tfm_ctx(tfm); -+ -+ if (!len) -+ curve25519_generate_secret(secret); -+ else if (len == CURVE25519_KEY_SIZE && -+ crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) -+ memcpy(secret, buf, CURVE25519_KEY_SIZE); -+ else -+ return -EINVAL; -+ return 0; -+} -+ -+static int curve25519_compute_value(struct kpp_request *req) -+{ -+ struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); -+ const u8 *secret = kpp_tfm_ctx(tfm); -+ u8 public_key[CURVE25519_KEY_SIZE]; -+ u8 buf[CURVE25519_KEY_SIZE]; -+ int copied, nbytes; -+ u8 const *bp; -+ -+ if (req->src) { -+ copied = sg_copy_to_buffer(req->src, -+ sg_nents_for_len(req->src, -+ CURVE25519_KEY_SIZE), -+ public_key, CURVE25519_KEY_SIZE); -+ if (copied != CURVE25519_KEY_SIZE) -+ return -EINVAL; -+ bp = public_key; -+ } else { -+ bp = curve25519_base_point; -+ } -+ -+ curve25519_arch(buf, secret, bp); -+ -+ /* might want less than we've got */ -+ nbytes = min_t(size_t, CURVE25519_KEY_SIZE, req->dst_len); -+ copied = sg_copy_from_buffer(req->dst, sg_nents_for_len(req->dst, -+ nbytes), -+ buf, nbytes); -+ if (copied != nbytes) -+ return -EINVAL; -+ return 0; -+} -+ -+static unsigned int curve25519_max_size(struct crypto_kpp *tfm) -+{ -+ return CURVE25519_KEY_SIZE; -+} -+ -+static struct kpp_alg curve25519_alg = { -+ .base.cra_name = "curve25519", -+ .base.cra_driver_name = "curve25519-neon", -+ .base.cra_priority = 200, -+ .base.cra_module = THIS_MODULE, -+ .base.cra_ctxsize = CURVE25519_KEY_SIZE, -+ -+ .set_secret = curve25519_set_secret, -+ .generate_public_key = curve25519_compute_value, -+ .compute_shared_secret = curve25519_compute_value, -+ .max_size = curve25519_max_size, -+}; -+ -+static int __init mod_init(void) -+{ -+ if (elf_hwcap & HWCAP_NEON) { -+ static_branch_enable(&have_neon); -+ return crypto_register_kpp(&curve25519_alg); -+ } -+ return 0; -+} -+ -+static void __exit mod_exit(void) -+{ -+ if (elf_hwcap & HWCAP_NEON) -+ crypto_unregister_kpp(&curve25519_alg); -+} -+ -+module_init(mod_init); -+module_exit(mod_exit); -+ -+MODULE_ALIAS_CRYPTO("curve25519"); -+MODULE_ALIAS_CRYPTO("curve25519-neon"); -+MODULE_LICENSE("GPL v2"); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0032-crypto-chacha20poly1305-import-construction-and-self.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0032-crypto-chacha20poly1305-import-construction-and-self.patch deleted file mode 100644 index 2d5601d7a..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0032-crypto-chacha20poly1305-import-construction-and-self.patch +++ /dev/null @@ -1,7677 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:39 +0100 -Subject: [PATCH] crypto: chacha20poly1305 - import construction and selftest - from Zinc - -commit ed20078b7e3331e82828be357147af6a3282e4ce upstream. - -This incorporates the chacha20poly1305 from the Zinc library, retaining -the library interface, but replacing the implementation with calls into -the code that already existed in the kernel's crypto API. - -Note that this library API does not implement RFC7539 fully, given that -it is limited to 64-bit nonces. (The 96-bit nonce version that was part -of the selftest only has been removed, along with the 96-bit nonce test -vectors that only tested the selftest but not the actual library itself) - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - include/crypto/chacha20poly1305.h | 37 + - lib/crypto/Kconfig | 7 + - lib/crypto/Makefile | 4 + - lib/crypto/chacha20poly1305-selftest.c | 7348 ++++++++++++++++++++++++ - lib/crypto/chacha20poly1305.c | 219 + - 5 files changed, 7615 insertions(+) - create mode 100644 include/crypto/chacha20poly1305.h - create mode 100644 lib/crypto/chacha20poly1305-selftest.c - create mode 100644 lib/crypto/chacha20poly1305.c - ---- /dev/null -+++ b/include/crypto/chacha20poly1305.h -@@ -0,0 +1,37 @@ -+/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef __CHACHA20POLY1305_H -+#define __CHACHA20POLY1305_H -+ -+#include -+ -+enum chacha20poly1305_lengths { -+ XCHACHA20POLY1305_NONCE_SIZE = 24, -+ CHACHA20POLY1305_KEY_SIZE = 32, -+ CHACHA20POLY1305_AUTHTAG_SIZE = 16 -+}; -+ -+void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u64 nonce, -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]); -+ -+bool __must_check -+chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, const u64 nonce, -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]); -+ -+void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]); -+ -+bool __must_check xchacha20poly1305_decrypt( -+ u8 *dst, const u8 *src, const size_t src_len, const u8 *ad, -+ const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]); -+ -+#endif /* __CHACHA20POLY1305_H */ ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -119,5 +119,12 @@ config CRYPTO_LIB_POLY1305 - by either the generic implementation or an arch-specific one, if one - is available and enabled. - -+config CRYPTO_LIB_CHACHA20POLY1305 -+ tristate "ChaCha20-Poly1305 AEAD support (8-byte nonce library version)" -+ depends on CRYPTO_ARCH_HAVE_LIB_CHACHA || !CRYPTO_ARCH_HAVE_LIB_CHACHA -+ depends on CRYPTO_ARCH_HAVE_LIB_POLY1305 || !CRYPTO_ARCH_HAVE_LIB_POLY1305 -+ select CRYPTO_LIB_CHACHA -+ select CRYPTO_LIB_POLY1305 -+ - config CRYPTO_LIB_SHA256 - tristate ---- a/lib/crypto/Makefile -+++ b/lib/crypto/Makefile -@@ -16,6 +16,9 @@ libblake2s-generic-y += blake2s-gener - obj-$(CONFIG_CRYPTO_LIB_BLAKE2S) += libblake2s.o - libblake2s-y += blake2s.o - -+obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o -+libchacha20poly1305-y += chacha20poly1305.o -+ - obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519.o - libcurve25519-y := curve25519-fiat32.o - libcurve25519-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o -@@ -32,4 +35,5 @@ libsha256-y := sha256.o - - ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y) - libblake2s-y += blake2s-selftest.o -+libchacha20poly1305-y += chacha20poly1305-selftest.o - endif ---- /dev/null -+++ b/lib/crypto/chacha20poly1305-selftest.c -@@ -0,0 +1,7348 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct chacha20poly1305_testvec { -+ const u8 *input, *output, *assoc, *nonce, *key; -+ size_t ilen, alen, nlen; -+ bool failure; -+}; -+ -+/* The first of these are the ChaCha20-Poly1305 AEAD test vectors from RFC7539 -+ * 2.8.2. After they are generated by reference implementations. And the final -+ * marked ones are taken from wycheproof, but we only do these for the encrypt -+ * side, because mostly we're stressing the primitives rather than the actual -+ * chapoly construction. -+ */ -+ -+static const u8 enc_input001[] __initconst = { -+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, -+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, -+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, -+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, -+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, -+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, -+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, -+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, -+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, -+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, -+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, -+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, -+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, -+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, -+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, -+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, -+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, -+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, -+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, -+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, -+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, -+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, -+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, -+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, -+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, -+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, -+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, -+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, -+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, -+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, -+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, -+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, -+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, -+ 0x9d -+}; -+static const u8 enc_output001[] __initconst = { -+ 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4, -+ 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd, -+ 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89, -+ 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2, -+ 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee, -+ 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0, -+ 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00, -+ 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf, -+ 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce, -+ 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81, -+ 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd, -+ 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55, -+ 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61, -+ 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38, -+ 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0, -+ 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4, -+ 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46, -+ 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9, -+ 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e, -+ 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e, -+ 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15, -+ 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a, -+ 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea, -+ 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a, -+ 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99, -+ 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e, -+ 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10, -+ 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10, -+ 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94, -+ 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30, -+ 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf, -+ 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29, -+ 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70, -+ 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb, -+ 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f, -+ 0x38 -+}; -+static const u8 enc_assoc001[] __initconst = { -+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x4e, 0x91 -+}; -+static const u8 enc_nonce001[] __initconst = { -+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 -+}; -+static const u8 enc_key001[] __initconst = { -+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, -+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, -+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, -+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 -+}; -+ -+static const u8 enc_input002[] __initconst = { }; -+static const u8 enc_output002[] __initconst = { -+ 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1, -+ 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92 -+}; -+static const u8 enc_assoc002[] __initconst = { }; -+static const u8 enc_nonce002[] __initconst = { -+ 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e -+}; -+static const u8 enc_key002[] __initconst = { -+ 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f, -+ 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86, -+ 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef, -+ 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68 -+}; -+ -+static const u8 enc_input003[] __initconst = { }; -+static const u8 enc_output003[] __initconst = { -+ 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6, -+ 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77 -+}; -+static const u8 enc_assoc003[] __initconst = { -+ 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b -+}; -+static const u8 enc_nonce003[] __initconst = { -+ 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d -+}; -+static const u8 enc_key003[] __initconst = { -+ 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88, -+ 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a, -+ 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08, -+ 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d -+}; -+ -+static const u8 enc_input004[] __initconst = { -+ 0xa4 -+}; -+static const u8 enc_output004[] __initconst = { -+ 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2, -+ 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac, -+ 0x89 -+}; -+static const u8 enc_assoc004[] __initconst = { -+ 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40 -+}; -+static const u8 enc_nonce004[] __initconst = { -+ 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4 -+}; -+static const u8 enc_key004[] __initconst = { -+ 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8, -+ 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1, -+ 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d, -+ 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e -+}; -+ -+static const u8 enc_input005[] __initconst = { -+ 0x2d -+}; -+static const u8 enc_output005[] __initconst = { -+ 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e, -+ 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c, -+ 0xac -+}; -+static const u8 enc_assoc005[] __initconst = { }; -+static const u8 enc_nonce005[] __initconst = { -+ 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30 -+}; -+static const u8 enc_key005[] __initconst = { -+ 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31, -+ 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87, -+ 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01, -+ 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87 -+}; -+ -+static const u8 enc_input006[] __initconst = { -+ 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a, -+ 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92, -+ 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37, -+ 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50, -+ 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec, -+ 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb, -+ 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66, -+ 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb, -+ 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b, -+ 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e, -+ 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3, -+ 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0, -+ 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb, -+ 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41, -+ 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc, -+ 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde, -+ 0x8f -+}; -+static const u8 enc_output006[] __initconst = { -+ 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1, -+ 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15, -+ 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c, -+ 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda, -+ 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11, -+ 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8, -+ 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc, -+ 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3, -+ 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5, -+ 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02, -+ 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93, -+ 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78, -+ 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1, -+ 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66, -+ 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc, -+ 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0, -+ 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d, -+ 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a, -+ 0xeb -+}; -+static const u8 enc_assoc006[] __initconst = { -+ 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b -+}; -+static const u8 enc_nonce006[] __initconst = { -+ 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c -+}; -+static const u8 enc_key006[] __initconst = { -+ 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae, -+ 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78, -+ 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9, -+ 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01 -+}; -+ -+static const u8 enc_input007[] __initconst = { -+ 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5, -+ 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a, -+ 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1, -+ 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17, -+ 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c, -+ 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1, -+ 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51, -+ 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1, -+ 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86, -+ 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a, -+ 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a, -+ 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98, -+ 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36, -+ 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34, -+ 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57, -+ 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84, -+ 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4, -+ 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80, -+ 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82, -+ 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5, -+ 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d, -+ 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c, -+ 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf, -+ 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc, -+ 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3, -+ 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14, -+ 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81, -+ 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77, -+ 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3, -+ 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2, -+ 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b, -+ 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3 -+}; -+static const u8 enc_output007[] __initconst = { -+ 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c, -+ 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8, -+ 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c, -+ 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb, -+ 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0, -+ 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21, -+ 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70, -+ 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac, -+ 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99, -+ 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9, -+ 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f, -+ 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7, -+ 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53, -+ 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12, -+ 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6, -+ 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0, -+ 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54, -+ 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6, -+ 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e, -+ 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb, -+ 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30, -+ 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f, -+ 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2, -+ 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e, -+ 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34, -+ 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39, -+ 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7, -+ 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9, -+ 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82, -+ 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04, -+ 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34, -+ 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef, -+ 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42, -+ 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53 -+}; -+static const u8 enc_assoc007[] __initconst = { }; -+static const u8 enc_nonce007[] __initconst = { -+ 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0 -+}; -+static const u8 enc_key007[] __initconst = { -+ 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd, -+ 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c, -+ 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80, -+ 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01 -+}; -+ -+static const u8 enc_input008[] __initconst = { -+ 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10, -+ 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2, -+ 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c, -+ 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb, -+ 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12, -+ 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa, -+ 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6, -+ 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4, -+ 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91, -+ 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb, -+ 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47, -+ 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15, -+ 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f, -+ 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a, -+ 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3, -+ 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97, -+ 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80, -+ 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e, -+ 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f, -+ 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10, -+ 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a, -+ 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0, -+ 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35, -+ 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d, -+ 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d, -+ 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57, -+ 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4, -+ 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f, -+ 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39, -+ 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda, -+ 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17, -+ 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43, -+ 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19, -+ 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09, -+ 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21, -+ 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07, -+ 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f, -+ 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b, -+ 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a, -+ 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed, -+ 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2, -+ 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca, -+ 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff, -+ 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b, -+ 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b, -+ 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b, -+ 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6, -+ 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04, -+ 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48, -+ 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b, -+ 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13, -+ 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8, -+ 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f, -+ 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0, -+ 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92, -+ 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a, -+ 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41, -+ 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17, -+ 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30, -+ 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20, -+ 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49, -+ 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a, -+ 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b, -+ 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3 -+}; -+static const u8 enc_output008[] __initconst = { -+ 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd, -+ 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1, -+ 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93, -+ 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d, -+ 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c, -+ 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6, -+ 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4, -+ 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5, -+ 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84, -+ 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd, -+ 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed, -+ 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab, -+ 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13, -+ 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49, -+ 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6, -+ 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8, -+ 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2, -+ 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94, -+ 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18, -+ 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60, -+ 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8, -+ 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b, -+ 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f, -+ 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c, -+ 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20, -+ 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff, -+ 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9, -+ 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c, -+ 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9, -+ 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6, -+ 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea, -+ 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e, -+ 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82, -+ 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1, -+ 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70, -+ 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1, -+ 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c, -+ 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7, -+ 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc, -+ 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc, -+ 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3, -+ 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb, -+ 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97, -+ 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f, -+ 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39, -+ 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f, -+ 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d, -+ 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2, -+ 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d, -+ 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96, -+ 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b, -+ 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20, -+ 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95, -+ 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb, -+ 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35, -+ 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62, -+ 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9, -+ 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6, -+ 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8, -+ 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a, -+ 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93, -+ 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14, -+ 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99, -+ 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86, -+ 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f, -+ 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54 -+}; -+static const u8 enc_assoc008[] __initconst = { }; -+static const u8 enc_nonce008[] __initconst = { -+ 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02 -+}; -+static const u8 enc_key008[] __initconst = { -+ 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53, -+ 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0, -+ 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86, -+ 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba -+}; -+ -+static const u8 enc_input009[] __initconst = { -+ 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b, -+ 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8, -+ 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca, -+ 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09, -+ 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5, -+ 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85, -+ 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44, -+ 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97, -+ 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77, -+ 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41, -+ 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c, -+ 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00, -+ 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82, -+ 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f, -+ 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e, -+ 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55, -+ 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab, -+ 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17, -+ 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e, -+ 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f, -+ 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82, -+ 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3, -+ 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f, -+ 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0, -+ 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08, -+ 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b, -+ 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85, -+ 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28, -+ 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c, -+ 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62, -+ 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2, -+ 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3, -+ 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62, -+ 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40, -+ 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f, -+ 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b, -+ 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91, -+ 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5, -+ 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c, -+ 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4, -+ 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49, -+ 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04, -+ 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03, -+ 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa, -+ 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec, -+ 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6, -+ 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69, -+ 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36, -+ 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8, -+ 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf, -+ 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe, -+ 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82, -+ 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab, -+ 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d, -+ 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3, -+ 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5, -+ 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34, -+ 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49, -+ 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f, -+ 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d, -+ 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42, -+ 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef, -+ 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27, -+ 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52, -+ 0x65 -+}; -+static const u8 enc_output009[] __initconst = { -+ 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf, -+ 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66, -+ 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72, -+ 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd, -+ 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28, -+ 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe, -+ 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06, -+ 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5, -+ 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7, -+ 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09, -+ 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a, -+ 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00, -+ 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62, -+ 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb, -+ 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2, -+ 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28, -+ 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e, -+ 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a, -+ 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6, -+ 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83, -+ 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9, -+ 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a, -+ 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79, -+ 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a, -+ 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea, -+ 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b, -+ 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52, -+ 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb, -+ 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89, -+ 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad, -+ 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19, -+ 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71, -+ 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d, -+ 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54, -+ 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a, -+ 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d, -+ 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95, -+ 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42, -+ 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16, -+ 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6, -+ 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf, -+ 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d, -+ 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f, -+ 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b, -+ 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e, -+ 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4, -+ 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c, -+ 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4, -+ 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1, -+ 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb, -+ 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff, -+ 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2, -+ 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06, -+ 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66, -+ 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90, -+ 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55, -+ 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc, -+ 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8, -+ 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62, -+ 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba, -+ 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2, -+ 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89, -+ 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06, -+ 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90, -+ 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf, -+ 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8, -+ 0xae -+}; -+static const u8 enc_assoc009[] __initconst = { -+ 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e, -+ 0xef -+}; -+static const u8 enc_nonce009[] __initconst = { -+ 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78 -+}; -+static const u8 enc_key009[] __initconst = { -+ 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5, -+ 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86, -+ 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2, -+ 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b -+}; -+ -+static const u8 enc_input010[] __initconst = { -+ 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf, -+ 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c, -+ 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22, -+ 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc, -+ 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16, -+ 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7, -+ 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4, -+ 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d, -+ 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5, -+ 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46, -+ 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82, -+ 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b, -+ 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a, -+ 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf, -+ 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca, -+ 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95, -+ 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09, -+ 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3, -+ 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3, -+ 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f, -+ 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58, -+ 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad, -+ 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde, -+ 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44, -+ 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a, -+ 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9, -+ 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26, -+ 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc, -+ 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74, -+ 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b, -+ 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93, -+ 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37, -+ 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f, -+ 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d, -+ 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca, -+ 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73, -+ 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f, -+ 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1, -+ 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9, -+ 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76, -+ 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac, -+ 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7, -+ 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce, -+ 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30, -+ 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb, -+ 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa, -+ 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd, -+ 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f, -+ 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb, -+ 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34, -+ 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e, -+ 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f, -+ 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53, -+ 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41, -+ 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e, -+ 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d, -+ 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27, -+ 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e, -+ 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8, -+ 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a, -+ 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12, -+ 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3, -+ 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66, -+ 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0, -+ 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c, -+ 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4, -+ 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49, -+ 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90, -+ 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11, -+ 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c, -+ 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b, -+ 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74, -+ 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c, -+ 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27, -+ 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1, -+ 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27, -+ 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88, -+ 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27, -+ 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b, -+ 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39, -+ 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7, -+ 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc, -+ 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe, -+ 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5, -+ 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf, -+ 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05, -+ 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73, -+ 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda, -+ 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe, -+ 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71, -+ 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed, -+ 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d, -+ 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33, -+ 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f, -+ 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a, -+ 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa, -+ 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e, -+ 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e, -+ 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87, -+ 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5, -+ 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4, -+ 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38, -+ 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34, -+ 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f, -+ 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36, -+ 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69, -+ 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44, -+ 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5, -+ 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce, -+ 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd, -+ 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27, -+ 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f, -+ 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8, -+ 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a, -+ 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5, -+ 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca, -+ 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e, -+ 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92, -+ 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13, -+ 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf, -+ 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6, -+ 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3, -+ 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b, -+ 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d, -+ 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f, -+ 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40, -+ 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c, -+ 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f -+}; -+static const u8 enc_output010[] __initconst = { -+ 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b, -+ 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74, -+ 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1, -+ 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd, -+ 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6, -+ 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5, -+ 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96, -+ 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02, -+ 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30, -+ 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57, -+ 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53, -+ 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65, -+ 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71, -+ 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9, -+ 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18, -+ 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce, -+ 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a, -+ 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69, -+ 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2, -+ 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95, -+ 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49, -+ 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e, -+ 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a, -+ 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a, -+ 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e, -+ 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19, -+ 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b, -+ 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75, -+ 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d, -+ 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d, -+ 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f, -+ 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a, -+ 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d, -+ 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5, -+ 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c, -+ 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77, -+ 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46, -+ 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43, -+ 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe, -+ 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8, -+ 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76, -+ 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47, -+ 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8, -+ 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32, -+ 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59, -+ 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae, -+ 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a, -+ 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3, -+ 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74, -+ 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75, -+ 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2, -+ 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e, -+ 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2, -+ 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9, -+ 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1, -+ 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07, -+ 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79, -+ 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71, -+ 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad, -+ 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a, -+ 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c, -+ 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9, -+ 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79, -+ 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27, -+ 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90, -+ 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe, -+ 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99, -+ 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1, -+ 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9, -+ 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0, -+ 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28, -+ 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e, -+ 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20, -+ 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60, -+ 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47, -+ 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68, -+ 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe, -+ 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33, -+ 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8, -+ 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38, -+ 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7, -+ 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04, -+ 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c, -+ 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f, -+ 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c, -+ 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77, -+ 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54, -+ 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5, -+ 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4, -+ 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2, -+ 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e, -+ 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27, -+ 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f, -+ 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92, -+ 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55, -+ 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe, -+ 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04, -+ 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4, -+ 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56, -+ 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02, -+ 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2, -+ 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8, -+ 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27, -+ 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47, -+ 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10, -+ 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43, -+ 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0, -+ 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee, -+ 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47, -+ 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6, -+ 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d, -+ 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c, -+ 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3, -+ 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b, -+ 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09, -+ 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d, -+ 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1, -+ 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd, -+ 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4, -+ 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63, -+ 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87, -+ 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd, -+ 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e, -+ 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a, -+ 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c, -+ 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38, -+ 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a, -+ 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5, -+ 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9, -+ 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0 -+}; -+static const u8 enc_assoc010[] __initconst = { -+ 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27, -+ 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2 -+}; -+static const u8 enc_nonce010[] __initconst = { -+ 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30 -+}; -+static const u8 enc_key010[] __initconst = { -+ 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44, -+ 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf, -+ 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74, -+ 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7 -+}; -+ -+static const u8 enc_input011[] __initconst = { -+ 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b, -+ 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b, -+ 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d, -+ 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee, -+ 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30, -+ 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20, -+ 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f, -+ 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e, -+ 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66, -+ 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46, -+ 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35, -+ 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6, -+ 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0, -+ 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15, -+ 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13, -+ 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7, -+ 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3, -+ 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37, -+ 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc, -+ 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95, -+ 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8, -+ 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac, -+ 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45, -+ 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf, -+ 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d, -+ 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc, -+ 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45, -+ 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a, -+ 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec, -+ 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e, -+ 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10, -+ 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8, -+ 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66, -+ 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0, -+ 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62, -+ 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b, -+ 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4, -+ 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96, -+ 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7, -+ 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74, -+ 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8, -+ 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b, -+ 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70, -+ 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95, -+ 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3, -+ 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9, -+ 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d, -+ 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e, -+ 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32, -+ 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5, -+ 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80, -+ 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3, -+ 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad, -+ 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d, -+ 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20, -+ 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17, -+ 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6, -+ 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d, -+ 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82, -+ 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c, -+ 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9, -+ 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb, -+ 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96, -+ 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9, -+ 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f, -+ 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40, -+ 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc, -+ 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce, -+ 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71, -+ 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f, -+ 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35, -+ 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90, -+ 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8, -+ 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01, -+ 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1, -+ 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe, -+ 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4, -+ 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf, -+ 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9, -+ 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f, -+ 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04, -+ 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7, -+ 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15, -+ 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc, -+ 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0, -+ 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae, -+ 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb, -+ 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed, -+ 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51, -+ 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52, -+ 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84, -+ 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5, -+ 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4, -+ 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e, -+ 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74, -+ 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f, -+ 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13, -+ 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea, -+ 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b, -+ 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef, -+ 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09, -+ 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe, -+ 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1, -+ 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9, -+ 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15, -+ 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a, -+ 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab, -+ 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36, -+ 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd, -+ 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde, -+ 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd, -+ 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47, -+ 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5, -+ 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69, -+ 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21, -+ 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98, -+ 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07, -+ 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57, -+ 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd, -+ 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03, -+ 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11, -+ 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96, -+ 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91, -+ 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d, -+ 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0, -+ 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9, -+ 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42, -+ 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a, -+ 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18, -+ 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc, -+ 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce, -+ 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc, -+ 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0, -+ 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf, -+ 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7, -+ 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80, -+ 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c, -+ 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82, -+ 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9, -+ 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20, -+ 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58, -+ 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6, -+ 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc, -+ 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50, -+ 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86, -+ 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a, -+ 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80, -+ 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec, -+ 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08, -+ 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c, -+ 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde, -+ 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d, -+ 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17, -+ 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f, -+ 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26, -+ 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96, -+ 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97, -+ 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6, -+ 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55, -+ 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e, -+ 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88, -+ 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5, -+ 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b, -+ 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15, -+ 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1, -+ 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4, -+ 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3, -+ 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf, -+ 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e, -+ 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb, -+ 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76, -+ 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5, -+ 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c, -+ 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde, -+ 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f, -+ 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51, -+ 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9, -+ 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99, -+ 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6, -+ 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04, -+ 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31, -+ 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a, -+ 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56, -+ 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e, -+ 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78, -+ 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a, -+ 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7, -+ 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb, -+ 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6, -+ 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8, -+ 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc, -+ 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84, -+ 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86, -+ 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76, -+ 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a, -+ 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73, -+ 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8, -+ 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6, -+ 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2, -+ 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56, -+ 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb, -+ 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab, -+ 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76, -+ 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69, -+ 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d, -+ 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc, -+ 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22, -+ 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39, -+ 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6, -+ 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9, -+ 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f, -+ 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1, -+ 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83, -+ 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc, -+ 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4, -+ 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59, -+ 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68, -+ 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef, -+ 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1, -+ 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3, -+ 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44, -+ 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09, -+ 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8, -+ 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a, -+ 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d, -+ 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae, -+ 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2, -+ 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10, -+ 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a, -+ 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34, -+ 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f, -+ 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9, -+ 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b, -+ 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d, -+ 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57, -+ 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03, -+ 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87, -+ 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca, -+ 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53, -+ 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f, -+ 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61, -+ 0x10, 0x1e, 0xbf, 0xec, 0xa8 -+}; -+static const u8 enc_output011[] __initconst = { -+ 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8, -+ 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc, -+ 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74, -+ 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73, -+ 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e, -+ 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9, -+ 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e, -+ 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd, -+ 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57, -+ 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19, -+ 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f, -+ 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45, -+ 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e, -+ 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39, -+ 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03, -+ 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f, -+ 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0, -+ 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce, -+ 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb, -+ 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52, -+ 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21, -+ 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a, -+ 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35, -+ 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91, -+ 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b, -+ 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e, -+ 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19, -+ 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07, -+ 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18, -+ 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96, -+ 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68, -+ 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4, -+ 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57, -+ 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c, -+ 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23, -+ 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8, -+ 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6, -+ 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40, -+ 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab, -+ 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb, -+ 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea, -+ 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8, -+ 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31, -+ 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0, -+ 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc, -+ 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94, -+ 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1, -+ 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46, -+ 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6, -+ 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7, -+ 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71, -+ 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a, -+ 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33, -+ 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38, -+ 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23, -+ 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb, -+ 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65, -+ 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73, -+ 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8, -+ 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb, -+ 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a, -+ 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca, -+ 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5, -+ 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71, -+ 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8, -+ 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d, -+ 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6, -+ 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d, -+ 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7, -+ 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5, -+ 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8, -+ 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd, -+ 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29, -+ 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22, -+ 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5, -+ 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67, -+ 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11, -+ 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e, -+ 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09, -+ 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4, -+ 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f, -+ 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa, -+ 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec, -+ 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b, -+ 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d, -+ 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b, -+ 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48, -+ 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3, -+ 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63, -+ 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd, -+ 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78, -+ 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed, -+ 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82, -+ 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f, -+ 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3, -+ 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9, -+ 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72, -+ 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74, -+ 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40, -+ 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b, -+ 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a, -+ 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5, -+ 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98, -+ 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71, -+ 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e, -+ 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4, -+ 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46, -+ 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e, -+ 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f, -+ 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93, -+ 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0, -+ 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5, -+ 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61, -+ 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64, -+ 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85, -+ 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20, -+ 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6, -+ 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc, -+ 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8, -+ 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50, -+ 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4, -+ 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80, -+ 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0, -+ 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a, -+ 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35, -+ 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43, -+ 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12, -+ 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7, -+ 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34, -+ 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42, -+ 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0, -+ 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95, -+ 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74, -+ 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5, -+ 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12, -+ 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6, -+ 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86, -+ 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97, -+ 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45, -+ 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19, -+ 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86, -+ 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c, -+ 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba, -+ 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29, -+ 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6, -+ 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6, -+ 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09, -+ 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31, -+ 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99, -+ 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b, -+ 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca, -+ 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00, -+ 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93, -+ 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3, -+ 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07, -+ 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda, -+ 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90, -+ 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b, -+ 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a, -+ 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6, -+ 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c, -+ 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57, -+ 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15, -+ 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e, -+ 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51, -+ 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75, -+ 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19, -+ 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08, -+ 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14, -+ 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba, -+ 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff, -+ 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90, -+ 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e, -+ 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93, -+ 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad, -+ 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2, -+ 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac, -+ 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d, -+ 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06, -+ 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c, -+ 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91, -+ 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17, -+ 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20, -+ 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7, -+ 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf, -+ 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c, -+ 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2, -+ 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e, -+ 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a, -+ 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05, -+ 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58, -+ 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8, -+ 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d, -+ 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71, -+ 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3, -+ 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe, -+ 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62, -+ 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16, -+ 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66, -+ 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4, -+ 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2, -+ 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35, -+ 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3, -+ 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4, -+ 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f, -+ 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe, -+ 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56, -+ 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b, -+ 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37, -+ 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3, -+ 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f, -+ 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f, -+ 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0, -+ 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70, -+ 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd, -+ 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f, -+ 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e, -+ 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67, -+ 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51, -+ 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23, -+ 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3, -+ 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5, -+ 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09, -+ 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7, -+ 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed, -+ 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb, -+ 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6, -+ 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5, -+ 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96, -+ 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe, -+ 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44, -+ 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6, -+ 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e, -+ 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0, -+ 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79, -+ 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f, -+ 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d, -+ 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82, -+ 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47, -+ 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93, -+ 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6, -+ 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69, -+ 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e, -+ 0x2b, 0xdf, 0xcd, 0xf9, 0x3c -+}; -+static const u8 enc_assoc011[] __initconst = { -+ 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7 -+}; -+static const u8 enc_nonce011[] __initconst = { -+ 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa -+}; -+static const u8 enc_key011[] __initconst = { -+ 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85, -+ 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca, -+ 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52, -+ 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38 -+}; -+ -+static const u8 enc_input012[] __initconst = { -+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0, -+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5, -+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57, -+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff, -+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5, -+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b, -+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46, -+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b, -+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71, -+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0, -+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b, -+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d, -+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f, -+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24, -+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23, -+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e, -+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14, -+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d, -+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb, -+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4, -+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf, -+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e, -+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6, -+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33, -+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb, -+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0, -+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe, -+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00, -+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d, -+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b, -+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50, -+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e, -+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4, -+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28, -+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8, -+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b, -+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86, -+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67, -+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff, -+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59, -+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe, -+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6, -+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e, -+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b, -+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50, -+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39, -+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02, -+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9, -+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a, -+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38, -+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9, -+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65, -+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb, -+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2, -+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae, -+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee, -+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00, -+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c, -+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8, -+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31, -+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68, -+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4, -+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0, -+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11, -+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7, -+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39, -+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1, -+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1, -+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2, -+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66, -+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49, -+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2, -+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5, -+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3, -+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c, -+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa, -+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00, -+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54, -+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87, -+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03, -+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39, -+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40, -+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6, -+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22, -+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5, -+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e, -+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32, -+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53, -+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42, -+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c, -+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68, -+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48, -+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c, -+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce, -+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd, -+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa, -+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69, -+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8, -+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58, -+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0, -+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45, -+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb, -+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33, -+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c, -+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23, -+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80, -+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1, -+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff, -+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24, -+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9, -+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46, -+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8, -+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20, -+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35, -+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63, -+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb, -+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36, -+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a, -+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c, -+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f, -+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02, -+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03, -+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa, -+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16, -+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d, -+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5, -+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7, -+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac, -+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47, -+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3, -+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35, -+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e, -+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6, -+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74, -+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e, -+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a, -+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0, -+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4, -+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8, -+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16, -+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32, -+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65, -+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06, -+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a, -+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7, -+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85, -+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb, -+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46, -+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e, -+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61, -+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb, -+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d, -+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00, -+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5, -+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6, -+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1, -+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a, -+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7, -+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63, -+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38, -+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3, -+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed, -+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49, -+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42, -+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0, -+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f, -+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1, -+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd, -+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d, -+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88, -+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1, -+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25, -+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22, -+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28, -+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f, -+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53, -+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28, -+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8, -+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc, -+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8, -+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb, -+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3, -+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3, -+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac, -+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2, -+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a, -+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad, -+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e, -+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd, -+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf, -+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba, -+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41, -+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91, -+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d, -+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6, -+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf, -+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92, -+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e, -+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72, -+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04, -+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46, -+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55, -+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84, -+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61, -+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d, -+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8, -+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d, -+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87, -+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70, -+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94, -+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f, -+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb, -+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90, -+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31, -+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06, -+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05, -+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7, -+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e, -+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae, -+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2, -+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21, -+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0, -+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d, -+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0, -+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6, -+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5, -+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9, -+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8, -+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57, -+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1, -+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c, -+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b, -+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69, -+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d, -+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d, -+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19, -+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82, -+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20, -+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f, -+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e, -+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f, -+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47, -+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b, -+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4, -+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b, -+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4, -+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9, -+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3, -+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0, -+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16, -+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d, -+ 0x78, 0xec, 0x00 -+}; -+static const u8 enc_output012[] __initconst = { -+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3, -+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf, -+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1, -+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f, -+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e, -+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5, -+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b, -+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b, -+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2, -+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1, -+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74, -+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e, -+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae, -+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd, -+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04, -+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55, -+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef, -+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b, -+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74, -+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26, -+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f, -+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64, -+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd, -+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad, -+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b, -+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e, -+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e, -+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0, -+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f, -+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50, -+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97, -+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03, -+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a, -+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15, -+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb, -+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34, -+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47, -+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86, -+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24, -+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c, -+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9, -+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7, -+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48, -+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b, -+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e, -+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61, -+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75, -+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26, -+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74, -+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43, -+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1, -+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79, -+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3, -+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5, -+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9, -+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d, -+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8, -+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26, -+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5, -+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d, -+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29, -+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57, -+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92, -+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9, -+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc, -+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd, -+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57, -+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3, -+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4, -+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c, -+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27, -+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c, -+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5, -+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14, -+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94, -+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b, -+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99, -+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84, -+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a, -+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa, -+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75, -+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74, -+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40, -+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72, -+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f, -+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92, -+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8, -+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c, -+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f, -+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb, -+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a, -+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b, -+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d, -+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c, -+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4, -+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00, -+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b, -+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4, -+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84, -+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba, -+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47, -+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4, -+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88, -+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81, -+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1, -+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a, -+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e, -+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1, -+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07, -+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24, -+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f, -+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a, -+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9, -+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9, -+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51, -+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1, -+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c, -+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53, -+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40, -+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a, -+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2, -+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2, -+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8, -+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07, -+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9, -+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d, -+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde, -+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f, -+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d, -+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d, -+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56, -+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c, -+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3, -+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d, -+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26, -+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10, -+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c, -+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11, -+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf, -+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c, -+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb, -+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79, -+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa, -+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80, -+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08, -+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c, -+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc, -+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab, -+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6, -+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9, -+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7, -+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2, -+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33, -+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2, -+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e, -+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c, -+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b, -+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66, -+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6, -+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44, -+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74, -+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6, -+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f, -+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24, -+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1, -+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2, -+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5, -+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d, -+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0, -+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b, -+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3, -+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0, -+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3, -+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c, -+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b, -+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5, -+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51, -+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71, -+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68, -+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb, -+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e, -+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b, -+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8, -+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb, -+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54, -+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7, -+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff, -+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd, -+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde, -+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c, -+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1, -+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8, -+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14, -+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c, -+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4, -+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06, -+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52, -+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d, -+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c, -+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6, -+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5, -+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f, -+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e, -+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98, -+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8, -+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb, -+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b, -+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79, -+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11, -+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d, -+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10, -+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23, -+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23, -+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90, -+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4, -+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1, -+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7, -+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11, -+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50, -+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8, -+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97, -+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38, -+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f, -+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33, -+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f, -+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75, -+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21, -+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90, -+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8, -+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91, -+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1, -+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f, -+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3, -+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc, -+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a, -+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62, -+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55, -+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23, -+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6, -+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac, -+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12, -+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a, -+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7, -+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec, -+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28, -+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88, -+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4, -+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17, -+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2, -+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33, -+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a, -+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28, -+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62, -+ 0x70, 0xcf, 0xd6 -+}; -+static const u8 enc_assoc012[] __initconst = { -+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8, -+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce, -+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c, -+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc, -+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e, -+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f, -+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b, -+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9 -+}; -+static const u8 enc_nonce012[] __initconst = { -+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06 -+}; -+static const u8 enc_key012[] __initconst = { -+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e, -+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d, -+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e, -+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input053[] __initconst = { -+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83, -+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8, -+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b, -+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe -+}; -+static const u8 enc_output053[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0xe6, 0xd3, 0xd7, 0x32, 0x4a, 0x1c, 0xbb, 0xa7, -+ 0x77, 0xbb, 0xb0, 0xec, 0xdd, 0xa3, 0x78, 0x07 -+}; -+static const u8 enc_assoc053[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 -+}; -+static const u8 enc_nonce053[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key053[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input054[] __initconst = { -+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83, -+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8, -+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b, -+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe, -+ 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe, -+ 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b, -+ 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5, -+ 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd -+}; -+static const u8 enc_output054[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x06, 0x2d, 0xe6, 0x79, 0x5f, 0x27, 0x4f, 0xd2, -+ 0xa3, 0x05, 0xd7, 0x69, 0x80, 0xbc, 0x9c, 0xce -+}; -+static const u8 enc_assoc054[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 -+}; -+static const u8 enc_nonce054[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key054[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input055[] __initconst = { -+ 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83, -+ 0x55, 0xd3, 0x04, 0x84, 0x64, 0x43, 0xfe, 0xe8, -+ 0xdf, 0x99, 0x47, 0x03, 0x03, 0xfb, 0x3b, 0x7b, -+ 0x80, 0xe0, 0x30, 0xbe, 0xeb, 0xd3, 0x29, 0xbe, -+ 0xe3, 0xbc, 0xdb, 0x5b, 0x1e, 0xde, 0xfc, 0xfe, -+ 0x8b, 0xcd, 0xa1, 0xb6, 0xa1, 0x5c, 0x8c, 0x2b, -+ 0x08, 0x69, 0xff, 0xd2, 0xec, 0x5e, 0x26, 0xe5, -+ 0x53, 0xb7, 0xb2, 0x27, 0xfe, 0x87, 0xfd, 0xbd, -+ 0x7a, 0xda, 0x44, 0x42, 0x42, 0x69, 0xbf, 0xfa, -+ 0x55, 0x27, 0xf2, 0x70, 0xac, 0xf6, 0x85, 0x02, -+ 0xb7, 0x4c, 0x5a, 0xe2, 0xe6, 0x0c, 0x05, 0x80, -+ 0x98, 0x1a, 0x49, 0x38, 0x45, 0x93, 0x92, 0xc4, -+ 0x9b, 0xb2, 0xf2, 0x84, 0xb6, 0x46, 0xef, 0xc7, -+ 0xf3, 0xf0, 0xb1, 0x36, 0x1d, 0xc3, 0x48, 0xed, -+ 0x77, 0xd3, 0x0b, 0xc5, 0x76, 0x92, 0xed, 0x38, -+ 0xfb, 0xac, 0x01, 0x88, 0x38, 0x04, 0x88, 0xc7 -+}; -+static const u8 enc_output055[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0xd8, 0xb4, 0x79, 0x02, 0xba, 0xae, 0xaf, 0xb3, -+ 0x42, 0x03, 0x05, 0x15, 0x29, 0xaf, 0x28, 0x2e -+}; -+static const u8 enc_assoc055[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 -+}; -+static const u8 enc_nonce055[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key055[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input056[] __initconst = { -+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c, -+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17, -+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84, -+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41 -+}; -+static const u8 enc_output056[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xb3, 0x89, 0x1c, 0x84, 0x9c, 0xb5, 0x2c, 0x27, -+ 0x74, 0x7e, 0xdf, 0xcf, 0x31, 0x21, 0x3b, 0xb6 -+}; -+static const u8 enc_assoc056[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce056[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key056[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input057[] __initconst = { -+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c, -+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17, -+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84, -+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41, -+ 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01, -+ 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4, -+ 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a, -+ 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42 -+}; -+static const u8 enc_output057[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xf0, 0xc1, 0x2d, 0x26, 0xef, 0x03, 0x02, 0x9b, -+ 0x62, 0xc0, 0x08, 0xda, 0x27, 0xc5, 0xdc, 0x68 -+}; -+static const u8 enc_assoc057[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce057[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key057[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input058[] __initconst = { -+ 0xda, 0x92, 0xbf, 0x77, 0x7f, 0x6b, 0xe8, 0x7c, -+ 0xaa, 0x2c, 0xfb, 0x7b, 0x9b, 0xbc, 0x01, 0x17, -+ 0x20, 0x66, 0xb8, 0xfc, 0xfc, 0x04, 0xc4, 0x84, -+ 0x7f, 0x1f, 0xcf, 0x41, 0x14, 0x2c, 0xd6, 0x41, -+ 0x1c, 0x43, 0x24, 0xa4, 0xe1, 0x21, 0x03, 0x01, -+ 0x74, 0x32, 0x5e, 0x49, 0x5e, 0xa3, 0x73, 0xd4, -+ 0xf7, 0x96, 0x00, 0x2d, 0x13, 0xa1, 0xd9, 0x1a, -+ 0xac, 0x48, 0x4d, 0xd8, 0x01, 0x78, 0x02, 0x42, -+ 0x85, 0x25, 0xbb, 0xbd, 0xbd, 0x96, 0x40, 0x05, -+ 0xaa, 0xd8, 0x0d, 0x8f, 0x53, 0x09, 0x7a, 0xfd, -+ 0x48, 0xb3, 0xa5, 0x1d, 0x19, 0xf3, 0xfa, 0x7f, -+ 0x67, 0xe5, 0xb6, 0xc7, 0xba, 0x6c, 0x6d, 0x3b, -+ 0x64, 0x4d, 0x0d, 0x7b, 0x49, 0xb9, 0x10, 0x38, -+ 0x0c, 0x0f, 0x4e, 0xc9, 0xe2, 0x3c, 0xb7, 0x12, -+ 0x88, 0x2c, 0xf4, 0x3a, 0x89, 0x6d, 0x12, 0xc7, -+ 0x04, 0x53, 0xfe, 0x77, 0xc7, 0xfb, 0x77, 0x38 -+}; -+static const u8 enc_output058[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xee, 0x65, 0x78, 0x30, 0x01, 0xc2, 0x56, 0x91, -+ 0xfa, 0x28, 0xd0, 0xf5, 0xf1, 0xc1, 0xd7, 0x62 -+}; -+static const u8 enc_assoc058[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce058[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key058[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input059[] __initconst = { -+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03, -+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68, -+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb, -+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e -+}; -+static const u8 enc_output059[] __initconst = { -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x79, 0xba, 0x7a, 0x29, 0xf5, 0xa7, 0xbb, 0x75, -+ 0x79, 0x7a, 0xf8, 0x7a, 0x61, 0x01, 0x29, 0xa4 -+}; -+static const u8 enc_assoc059[] __initconst = { -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 -+}; -+static const u8 enc_nonce059[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key059[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input060[] __initconst = { -+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03, -+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68, -+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb, -+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e, -+ 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e, -+ 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab, -+ 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65, -+ 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d -+}; -+static const u8 enc_output060[] __initconst = { -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x36, 0xb1, 0x74, 0x38, 0x19, 0xe1, 0xb9, 0xba, -+ 0x15, 0x51, 0xe8, 0xed, 0x92, 0x2a, 0x95, 0x9a -+}; -+static const u8 enc_assoc060[] __initconst = { -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 -+}; -+static const u8 enc_nonce060[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key060[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input061[] __initconst = { -+ 0x25, 0x6d, 0x40, 0x08, 0x80, 0x94, 0x17, 0x03, -+ 0x55, 0xd3, 0x04, 0x04, 0x64, 0x43, 0xfe, 0x68, -+ 0xdf, 0x99, 0x47, 0x83, 0x03, 0xfb, 0x3b, 0xfb, -+ 0x80, 0xe0, 0x30, 0x3e, 0xeb, 0xd3, 0x29, 0x3e, -+ 0xe3, 0xbc, 0xdb, 0xdb, 0x1e, 0xde, 0xfc, 0x7e, -+ 0x8b, 0xcd, 0xa1, 0x36, 0xa1, 0x5c, 0x8c, 0xab, -+ 0x08, 0x69, 0xff, 0x52, 0xec, 0x5e, 0x26, 0x65, -+ 0x53, 0xb7, 0xb2, 0xa7, 0xfe, 0x87, 0xfd, 0x3d, -+ 0x7a, 0xda, 0x44, 0xc2, 0x42, 0x69, 0xbf, 0x7a, -+ 0x55, 0x27, 0xf2, 0xf0, 0xac, 0xf6, 0x85, 0x82, -+ 0xb7, 0x4c, 0x5a, 0x62, 0xe6, 0x0c, 0x05, 0x00, -+ 0x98, 0x1a, 0x49, 0xb8, 0x45, 0x93, 0x92, 0x44, -+ 0x9b, 0xb2, 0xf2, 0x04, 0xb6, 0x46, 0xef, 0x47, -+ 0xf3, 0xf0, 0xb1, 0xb6, 0x1d, 0xc3, 0x48, 0x6d, -+ 0x77, 0xd3, 0x0b, 0x45, 0x76, 0x92, 0xed, 0xb8, -+ 0xfb, 0xac, 0x01, 0x08, 0x38, 0x04, 0x88, 0x47 -+}; -+static const u8 enc_output061[] __initconst = { -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0xfe, 0xac, 0x49, 0x55, 0x55, 0x4e, 0x80, 0x6f, -+ 0x3a, 0x19, 0x02, 0xe2, 0x44, 0x32, 0xc0, 0x8a -+}; -+static const u8 enc_assoc061[] __initconst = { -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 -+}; -+static const u8 enc_nonce061[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key061[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input062[] __initconst = { -+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc, -+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97, -+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04, -+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1 -+}; -+static const u8 enc_output062[] __initconst = { -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0x20, 0xa3, 0x79, 0x8d, 0xf1, 0x29, 0x2c, 0x59, -+ 0x72, 0xbf, 0x97, 0x41, 0xae, 0xc3, 0x8a, 0x19 -+}; -+static const u8 enc_assoc062[] __initconst = { -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f -+}; -+static const u8 enc_nonce062[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key062[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input063[] __initconst = { -+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc, -+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97, -+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04, -+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1, -+ 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81, -+ 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54, -+ 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a, -+ 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2 -+}; -+static const u8 enc_output063[] __initconst = { -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xc0, 0x3d, 0x9f, 0x67, 0x35, 0x4a, 0x97, 0xb2, -+ 0xf0, 0x74, 0xf7, 0x55, 0x15, 0x57, 0xe4, 0x9c -+}; -+static const u8 enc_assoc063[] __initconst = { -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f -+}; -+static const u8 enc_nonce063[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key063[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input064[] __initconst = { -+ 0xda, 0x92, 0xbf, 0xf7, 0x7f, 0x6b, 0xe8, 0xfc, -+ 0xaa, 0x2c, 0xfb, 0xfb, 0x9b, 0xbc, 0x01, 0x97, -+ 0x20, 0x66, 0xb8, 0x7c, 0xfc, 0x04, 0xc4, 0x04, -+ 0x7f, 0x1f, 0xcf, 0xc1, 0x14, 0x2c, 0xd6, 0xc1, -+ 0x1c, 0x43, 0x24, 0x24, 0xe1, 0x21, 0x03, 0x81, -+ 0x74, 0x32, 0x5e, 0xc9, 0x5e, 0xa3, 0x73, 0x54, -+ 0xf7, 0x96, 0x00, 0xad, 0x13, 0xa1, 0xd9, 0x9a, -+ 0xac, 0x48, 0x4d, 0x58, 0x01, 0x78, 0x02, 0xc2, -+ 0x85, 0x25, 0xbb, 0x3d, 0xbd, 0x96, 0x40, 0x85, -+ 0xaa, 0xd8, 0x0d, 0x0f, 0x53, 0x09, 0x7a, 0x7d, -+ 0x48, 0xb3, 0xa5, 0x9d, 0x19, 0xf3, 0xfa, 0xff, -+ 0x67, 0xe5, 0xb6, 0x47, 0xba, 0x6c, 0x6d, 0xbb, -+ 0x64, 0x4d, 0x0d, 0xfb, 0x49, 0xb9, 0x10, 0xb8, -+ 0x0c, 0x0f, 0x4e, 0x49, 0xe2, 0x3c, 0xb7, 0x92, -+ 0x88, 0x2c, 0xf4, 0xba, 0x89, 0x6d, 0x12, 0x47, -+ 0x04, 0x53, 0xfe, 0xf7, 0xc7, 0xfb, 0x77, 0xb8 -+}; -+static const u8 enc_output064[] __initconst = { -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xc8, 0x6d, 0xa8, 0xdd, 0x65, 0x22, 0x86, 0xd5, -+ 0x02, 0x13, 0xd3, 0x28, 0xd6, 0x3e, 0x40, 0x06 -+}; -+static const u8 enc_assoc064[] __initconst = { -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f -+}; -+static const u8 enc_nonce064[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key064[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input065[] __initconst = { -+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c, -+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17, -+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84, -+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41 -+}; -+static const u8 enc_output065[] __initconst = { -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0xbe, 0xde, 0x90, 0x83, 0xce, 0xb3, 0x6d, 0xdf, -+ 0xe5, 0xfa, 0x81, 0x1f, 0x95, 0x47, 0x1c, 0x67 -+}; -+static const u8 enc_assoc065[] __initconst = { -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce065[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key065[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input066[] __initconst = { -+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c, -+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17, -+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84, -+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41, -+ 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01, -+ 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4, -+ 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a, -+ 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42 -+}; -+static const u8 enc_output066[] __initconst = { -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x30, 0x08, 0x74, 0xbb, 0x06, 0x92, 0xb6, 0x89, -+ 0xde, 0xad, 0x9a, 0xe1, 0x5b, 0x06, 0x73, 0x90 -+}; -+static const u8 enc_assoc066[] __initconst = { -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce066[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key066[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input067[] __initconst = { -+ 0x5a, 0x92, 0xbf, 0x77, 0xff, 0x6b, 0xe8, 0x7c, -+ 0x2a, 0x2c, 0xfb, 0x7b, 0x1b, 0xbc, 0x01, 0x17, -+ 0xa0, 0x66, 0xb8, 0xfc, 0x7c, 0x04, 0xc4, 0x84, -+ 0xff, 0x1f, 0xcf, 0x41, 0x94, 0x2c, 0xd6, 0x41, -+ 0x9c, 0x43, 0x24, 0xa4, 0x61, 0x21, 0x03, 0x01, -+ 0xf4, 0x32, 0x5e, 0x49, 0xde, 0xa3, 0x73, 0xd4, -+ 0x77, 0x96, 0x00, 0x2d, 0x93, 0xa1, 0xd9, 0x1a, -+ 0x2c, 0x48, 0x4d, 0xd8, 0x81, 0x78, 0x02, 0x42, -+ 0x05, 0x25, 0xbb, 0xbd, 0x3d, 0x96, 0x40, 0x05, -+ 0x2a, 0xd8, 0x0d, 0x8f, 0xd3, 0x09, 0x7a, 0xfd, -+ 0xc8, 0xb3, 0xa5, 0x1d, 0x99, 0xf3, 0xfa, 0x7f, -+ 0xe7, 0xe5, 0xb6, 0xc7, 0x3a, 0x6c, 0x6d, 0x3b, -+ 0xe4, 0x4d, 0x0d, 0x7b, 0xc9, 0xb9, 0x10, 0x38, -+ 0x8c, 0x0f, 0x4e, 0xc9, 0x62, 0x3c, 0xb7, 0x12, -+ 0x08, 0x2c, 0xf4, 0x3a, 0x09, 0x6d, 0x12, 0xc7, -+ 0x84, 0x53, 0xfe, 0x77, 0x47, 0xfb, 0x77, 0x38 -+}; -+static const u8 enc_output067[] __initconst = { -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x99, 0xca, 0xd8, 0x5f, 0x45, 0xca, 0x40, 0x94, -+ 0x2d, 0x0d, 0x4d, 0x5e, 0x95, 0x0a, 0xde, 0x22 -+}; -+static const u8 enc_assoc067[] __initconst = { -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, -+ 0x7f, 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce067[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key067[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input068[] __initconst = { -+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c, -+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17, -+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84, -+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41 -+}; -+static const u8 enc_output068[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x8b, 0xbe, 0x14, 0x52, 0x72, 0xe7, 0xc2, 0xd9, -+ 0xa1, 0x89, 0x1a, 0x3a, 0xb0, 0x98, 0x3d, 0x9d -+}; -+static const u8 enc_assoc068[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce068[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key068[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input069[] __initconst = { -+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c, -+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17, -+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84, -+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41, -+ 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01, -+ 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4, -+ 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a, -+ 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42 -+}; -+static const u8 enc_output069[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x3b, 0x41, 0x86, 0x19, 0x13, 0xa8, 0xf6, 0xde, -+ 0x7f, 0x61, 0xe2, 0x25, 0x63, 0x1b, 0xc3, 0x82 -+}; -+static const u8 enc_assoc069[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce069[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key069[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input070[] __initconst = { -+ 0x25, 0x6d, 0x40, 0x88, 0x7f, 0x6b, 0xe8, 0x7c, -+ 0x55, 0xd3, 0x04, 0x84, 0x9b, 0xbc, 0x01, 0x17, -+ 0xdf, 0x99, 0x47, 0x03, 0xfc, 0x04, 0xc4, 0x84, -+ 0x80, 0xe0, 0x30, 0xbe, 0x14, 0x2c, 0xd6, 0x41, -+ 0xe3, 0xbc, 0xdb, 0x5b, 0xe1, 0x21, 0x03, 0x01, -+ 0x8b, 0xcd, 0xa1, 0xb6, 0x5e, 0xa3, 0x73, 0xd4, -+ 0x08, 0x69, 0xff, 0xd2, 0x13, 0xa1, 0xd9, 0x1a, -+ 0x53, 0xb7, 0xb2, 0x27, 0x01, 0x78, 0x02, 0x42, -+ 0x7a, 0xda, 0x44, 0x42, 0xbd, 0x96, 0x40, 0x05, -+ 0x55, 0x27, 0xf2, 0x70, 0x53, 0x09, 0x7a, 0xfd, -+ 0xb7, 0x4c, 0x5a, 0xe2, 0x19, 0xf3, 0xfa, 0x7f, -+ 0x98, 0x1a, 0x49, 0x38, 0xba, 0x6c, 0x6d, 0x3b, -+ 0x9b, 0xb2, 0xf2, 0x84, 0x49, 0xb9, 0x10, 0x38, -+ 0xf3, 0xf0, 0xb1, 0x36, 0xe2, 0x3c, 0xb7, 0x12, -+ 0x77, 0xd3, 0x0b, 0xc5, 0x89, 0x6d, 0x12, 0xc7, -+ 0xfb, 0xac, 0x01, 0x88, 0xc7, 0xfb, 0x77, 0x38 -+}; -+static const u8 enc_output070[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x84, 0x28, 0xbc, 0xf0, 0x23, 0xec, 0x6b, 0xf3, -+ 0x1f, 0xd9, 0xef, 0xb2, 0x03, 0xff, 0x08, 0x71 -+}; -+static const u8 enc_assoc070[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce070[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key070[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input071[] __initconst = { -+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83, -+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8, -+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b, -+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe -+}; -+static const u8 enc_output071[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0x13, 0x9f, 0xdf, 0x64, 0x74, 0xea, 0x24, 0xf5, -+ 0x49, 0xb0, 0x75, 0x82, 0x5f, 0x2c, 0x76, 0x20 -+}; -+static const u8 enc_assoc071[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 -+}; -+static const u8 enc_nonce071[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key071[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input072[] __initconst = { -+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83, -+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8, -+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b, -+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe, -+ 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe, -+ 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b, -+ 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5, -+ 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd -+}; -+static const u8 enc_output072[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xbb, 0xad, 0x8d, 0x86, 0x3b, 0x83, 0x5a, 0x8e, -+ 0x86, 0x64, 0xfd, 0x1d, 0x45, 0x66, 0xb6, 0xb4 -+}; -+static const u8 enc_assoc072[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 -+}; -+static const u8 enc_nonce072[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key072[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input073[] __initconst = { -+ 0xda, 0x92, 0xbf, 0x77, 0x80, 0x94, 0x17, 0x83, -+ 0xaa, 0x2c, 0xfb, 0x7b, 0x64, 0x43, 0xfe, 0xe8, -+ 0x20, 0x66, 0xb8, 0xfc, 0x03, 0xfb, 0x3b, 0x7b, -+ 0x7f, 0x1f, 0xcf, 0x41, 0xeb, 0xd3, 0x29, 0xbe, -+ 0x1c, 0x43, 0x24, 0xa4, 0x1e, 0xde, 0xfc, 0xfe, -+ 0x74, 0x32, 0x5e, 0x49, 0xa1, 0x5c, 0x8c, 0x2b, -+ 0xf7, 0x96, 0x00, 0x2d, 0xec, 0x5e, 0x26, 0xe5, -+ 0xac, 0x48, 0x4d, 0xd8, 0xfe, 0x87, 0xfd, 0xbd, -+ 0x85, 0x25, 0xbb, 0xbd, 0x42, 0x69, 0xbf, 0xfa, -+ 0xaa, 0xd8, 0x0d, 0x8f, 0xac, 0xf6, 0x85, 0x02, -+ 0x48, 0xb3, 0xa5, 0x1d, 0xe6, 0x0c, 0x05, 0x80, -+ 0x67, 0xe5, 0xb6, 0xc7, 0x45, 0x93, 0x92, 0xc4, -+ 0x64, 0x4d, 0x0d, 0x7b, 0xb6, 0x46, 0xef, 0xc7, -+ 0x0c, 0x0f, 0x4e, 0xc9, 0x1d, 0xc3, 0x48, 0xed, -+ 0x88, 0x2c, 0xf4, 0x3a, 0x76, 0x92, 0xed, 0x38, -+ 0x04, 0x53, 0xfe, 0x77, 0x38, 0x04, 0x88, 0xc7 -+}; -+static const u8 enc_output073[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0x42, 0xf2, 0x35, 0x42, 0x97, 0x84, 0x9a, 0x51, -+ 0x1d, 0x53, 0xe5, 0x57, 0x17, 0x72, 0xf7, 0x1f -+}; -+static const u8 enc_assoc073[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 -+}; -+static const u8 enc_nonce073[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0xee, 0x32, 0x00 -+}; -+static const u8 enc_key073[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input076[] __initconst = { -+ 0x1b, 0x99, 0x6f, 0x9a, 0x3c, 0xcc, 0x67, 0x85, -+ 0xde, 0x22, 0xff, 0x5b, 0x8a, 0xdd, 0x95, 0x02, -+ 0xce, 0x03, 0xa0, 0xfa, 0xf5, 0x99, 0x2a, 0x09, -+ 0x52, 0x2c, 0xdd, 0x12, 0x06, 0xd2, 0x20, 0xb8, -+ 0xf8, 0xbd, 0x07, 0xd1, 0xf1, 0xf5, 0xa1, 0xbd, -+ 0x9a, 0x71, 0xd1, 0x1c, 0x7f, 0x57, 0x9b, 0x85, -+ 0x58, 0x18, 0xc0, 0x8d, 0x4d, 0xe0, 0x36, 0x39, -+ 0x31, 0x83, 0xb7, 0xf5, 0x90, 0xb3, 0x35, 0xae, -+ 0xd8, 0xde, 0x5b, 0x57, 0xb1, 0x3c, 0x5f, 0xed, -+ 0xe2, 0x44, 0x1c, 0x3e, 0x18, 0x4a, 0xa9, 0xd4, -+ 0x6e, 0x61, 0x59, 0x85, 0x06, 0xb3, 0xe1, 0x1c, -+ 0x43, 0xc6, 0x2c, 0xbc, 0xac, 0xec, 0xed, 0x33, -+ 0x19, 0x08, 0x75, 0xb0, 0x12, 0x21, 0x8b, 0x19, -+ 0x30, 0xfb, 0x7c, 0x38, 0xec, 0x45, 0xac, 0x11, -+ 0xc3, 0x53, 0xd0, 0xcf, 0x93, 0x8d, 0xcc, 0xb9, -+ 0xef, 0xad, 0x8f, 0xed, 0xbe, 0x46, 0xda, 0xa5 -+}; -+static const u8 enc_output076[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x4b, 0x0b, 0xda, 0x8a, 0xd0, 0x43, 0x83, 0x0d, -+ 0x83, 0x19, 0xab, 0x82, 0xc5, 0x0c, 0x76, 0x63 -+}; -+static const u8 enc_assoc076[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce076[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x07, 0xb4, 0xf0 -+}; -+static const u8 enc_key076[] __initconst = { -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input077[] __initconst = { -+ 0x86, 0xcb, 0xac, 0xae, 0x4d, 0x3f, 0x74, 0xae, -+ 0x01, 0x21, 0x3e, 0x05, 0x51, 0xcc, 0x15, 0x16, -+ 0x0e, 0xa1, 0xbe, 0x84, 0x08, 0xe3, 0xd5, 0xd7, -+ 0x4f, 0x01, 0x46, 0x49, 0x95, 0xa6, 0x9e, 0x61, -+ 0x76, 0xcb, 0x9e, 0x02, 0xb2, 0x24, 0x7e, 0xd2, -+ 0x99, 0x89, 0x2f, 0x91, 0x82, 0xa4, 0x5c, 0xaf, -+ 0x4c, 0x69, 0x40, 0x56, 0x11, 0x76, 0x6e, 0xdf, -+ 0xaf, 0xdc, 0x28, 0x55, 0x19, 0xea, 0x30, 0x48, -+ 0x0c, 0x44, 0xf0, 0x5e, 0x78, 0x1e, 0xac, 0xf8, -+ 0xfc, 0xec, 0xc7, 0x09, 0x0a, 0xbb, 0x28, 0xfa, -+ 0x5f, 0xd5, 0x85, 0xac, 0x8c, 0xda, 0x7e, 0x87, -+ 0x72, 0xe5, 0x94, 0xe4, 0xce, 0x6c, 0x88, 0x32, -+ 0x81, 0x93, 0x2e, 0x0f, 0x89, 0xf8, 0x77, 0xa1, -+ 0xf0, 0x4d, 0x9c, 0x32, 0xb0, 0x6c, 0xf9, 0x0b, -+ 0x0e, 0x76, 0x2b, 0x43, 0x0c, 0x4d, 0x51, 0x7c, -+ 0x97, 0x10, 0x70, 0x68, 0xf4, 0x98, 0xef, 0x7f -+}; -+static const u8 enc_output077[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x4b, 0xc9, 0x8f, 0x72, 0xc4, 0x94, 0xc2, 0xa4, -+ 0x3c, 0x2b, 0x15, 0xa1, 0x04, 0x3f, 0x1c, 0xfa -+}; -+static const u8 enc_assoc077[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce077[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, 0xfb, 0x66 -+}; -+static const u8 enc_key077[] __initconst = { -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input078[] __initconst = { -+ 0xfa, 0xb1, 0xcd, 0xdf, 0x4f, 0xe1, 0x98, 0xef, -+ 0x63, 0xad, 0xd8, 0x81, 0xd6, 0xea, 0xd6, 0xc5, -+ 0x76, 0x37, 0xbb, 0xe9, 0x20, 0x18, 0xca, 0x7c, -+ 0x0b, 0x96, 0xfb, 0xa0, 0x87, 0x1e, 0x93, 0x2d, -+ 0xb1, 0xfb, 0xf9, 0x07, 0x61, 0xbe, 0x25, 0xdf, -+ 0x8d, 0xfa, 0xf9, 0x31, 0xce, 0x57, 0x57, 0xe6, -+ 0x17, 0xb3, 0xd7, 0xa9, 0xf0, 0xbf, 0x0f, 0xfe, -+ 0x5d, 0x59, 0x1a, 0x33, 0xc1, 0x43, 0xb8, 0xf5, -+ 0x3f, 0xd0, 0xb5, 0xa1, 0x96, 0x09, 0xfd, 0x62, -+ 0xe5, 0xc2, 0x51, 0xa4, 0x28, 0x1a, 0x20, 0x0c, -+ 0xfd, 0xc3, 0x4f, 0x28, 0x17, 0x10, 0x40, 0x6f, -+ 0x4e, 0x37, 0x62, 0x54, 0x46, 0xff, 0x6e, 0xf2, -+ 0x24, 0x91, 0x3d, 0xeb, 0x0d, 0x89, 0xaf, 0x33, -+ 0x71, 0x28, 0xe3, 0xd1, 0x55, 0xd1, 0x6d, 0x3e, -+ 0xc3, 0x24, 0x60, 0x41, 0x43, 0x21, 0x43, 0xe9, -+ 0xab, 0x3a, 0x6d, 0x2c, 0xcc, 0x2f, 0x4d, 0x62 -+}; -+static const u8 enc_output078[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xf7, 0xe9, 0xe1, 0x51, 0xb0, 0x25, 0x33, 0xc7, -+ 0x46, 0x58, 0xbf, 0xc7, 0x73, 0x7c, 0x68, 0x0d -+}; -+static const u8 enc_assoc078[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce078[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0xbb, 0x90 -+}; -+static const u8 enc_key078[] __initconst = { -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input079[] __initconst = { -+ 0x22, 0x72, 0x02, 0xbe, 0x7f, 0x35, 0x15, 0xe9, -+ 0xd1, 0xc0, 0x2e, 0xea, 0x2f, 0x19, 0x50, 0xb6, -+ 0x48, 0x1b, 0x04, 0x8a, 0x4c, 0x91, 0x50, 0x6c, -+ 0xb4, 0x0d, 0x50, 0x4e, 0x6c, 0x94, 0x9f, 0x82, -+ 0xd1, 0x97, 0xc2, 0x5a, 0xd1, 0x7d, 0xc7, 0x21, -+ 0x65, 0x11, 0x25, 0x78, 0x2a, 0xc7, 0xa7, 0x12, -+ 0x47, 0xfe, 0xae, 0xf3, 0x2f, 0x1f, 0x25, 0x0c, -+ 0xe4, 0xbb, 0x8f, 0x79, 0xac, 0xaa, 0x17, 0x9d, -+ 0x45, 0xa7, 0xb0, 0x54, 0x5f, 0x09, 0x24, 0x32, -+ 0x5e, 0xfa, 0x87, 0xd5, 0xe4, 0x41, 0xd2, 0x84, -+ 0x78, 0xc6, 0x1f, 0x22, 0x23, 0xee, 0x67, 0xc3, -+ 0xb4, 0x1f, 0x43, 0x94, 0x53, 0x5e, 0x2a, 0x24, -+ 0x36, 0x9a, 0x2e, 0x16, 0x61, 0x3c, 0x45, 0x94, -+ 0x90, 0xc1, 0x4f, 0xb1, 0xd7, 0x55, 0xfe, 0x53, -+ 0xfb, 0xe1, 0xee, 0x45, 0xb1, 0xb2, 0x1f, 0x71, -+ 0x62, 0xe2, 0xfc, 0xaa, 0x74, 0x2a, 0xbe, 0xfd -+}; -+static const u8 enc_output079[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x79, 0x5b, 0xcf, 0xf6, 0x47, 0xc5, 0x53, 0xc2, -+ 0xe4, 0xeb, 0x6e, 0x0e, 0xaf, 0xd9, 0xe0, 0x4e -+}; -+static const u8 enc_assoc079[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce079[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x48, 0x4a -+}; -+static const u8 enc_key079[] __initconst = { -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input080[] __initconst = { -+ 0xfa, 0xe5, 0x83, 0x45, 0xc1, 0x6c, 0xb0, 0xf5, -+ 0xcc, 0x53, 0x7f, 0x2b, 0x1b, 0x34, 0x69, 0xc9, -+ 0x69, 0x46, 0x3b, 0x3e, 0xa7, 0x1b, 0xcf, 0x6b, -+ 0x98, 0xd6, 0x69, 0xa8, 0xe6, 0x0e, 0x04, 0xfc, -+ 0x08, 0xd5, 0xfd, 0x06, 0x9c, 0x36, 0x26, 0x38, -+ 0xe3, 0x40, 0x0e, 0xf4, 0xcb, 0x24, 0x2e, 0x27, -+ 0xe2, 0x24, 0x5e, 0x68, 0xcb, 0x9e, 0xc5, 0x83, -+ 0xda, 0x53, 0x40, 0xb1, 0x2e, 0xdf, 0x42, 0x3b, -+ 0x73, 0x26, 0xad, 0x20, 0xfe, 0xeb, 0x57, 0xda, -+ 0xca, 0x2e, 0x04, 0x67, 0xa3, 0x28, 0x99, 0xb4, -+ 0x2d, 0xf8, 0xe5, 0x6d, 0x84, 0xe0, 0x06, 0xbc, -+ 0x8a, 0x7a, 0xcc, 0x73, 0x1e, 0x7c, 0x1f, 0x6b, -+ 0xec, 0xb5, 0x71, 0x9f, 0x70, 0x77, 0xf0, 0xd4, -+ 0xf4, 0xc6, 0x1a, 0xb1, 0x1e, 0xba, 0xc1, 0x00, -+ 0x18, 0x01, 0xce, 0x33, 0xc4, 0xe4, 0xa7, 0x7d, -+ 0x83, 0x1d, 0x3c, 0xe3, 0x4e, 0x84, 0x10, 0xe1 -+}; -+static const u8 enc_output080[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x19, 0x46, 0xd6, 0x53, 0x96, 0x0f, 0x94, 0x7a, -+ 0x74, 0xd3, 0xe8, 0x09, 0x3c, 0xf4, 0x85, 0x02 -+}; -+static const u8 enc_assoc080[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce080[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x2f, 0x40 -+}; -+static const u8 enc_key080[] __initconst = { -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input081[] __initconst = { -+ 0xeb, 0xb2, 0x16, 0xdd, 0xd7, 0xca, 0x70, 0x92, -+ 0x15, 0xf5, 0x03, 0xdf, 0x9c, 0xe6, 0x3c, 0x5c, -+ 0xd2, 0x19, 0x4e, 0x7d, 0x90, 0x99, 0xe8, 0xa9, -+ 0x0b, 0x2a, 0xfa, 0xad, 0x5e, 0xba, 0x35, 0x06, -+ 0x99, 0x25, 0xa6, 0x03, 0xfd, 0xbc, 0x34, 0x1a, -+ 0xae, 0xd4, 0x15, 0x05, 0xb1, 0x09, 0x41, 0xfa, -+ 0x38, 0x56, 0xa7, 0xe2, 0x47, 0xb1, 0x04, 0x07, -+ 0x09, 0x74, 0x6c, 0xfc, 0x20, 0x96, 0xca, 0xa6, -+ 0x31, 0xb2, 0xff, 0xf4, 0x1c, 0x25, 0x05, 0x06, -+ 0xd8, 0x89, 0xc1, 0xc9, 0x06, 0x71, 0xad, 0xe8, -+ 0x53, 0xee, 0x63, 0x94, 0xc1, 0x91, 0x92, 0xa5, -+ 0xcf, 0x37, 0x10, 0xd1, 0x07, 0x30, 0x99, 0xe5, -+ 0xbc, 0x94, 0x65, 0x82, 0xfc, 0x0f, 0xab, 0x9f, -+ 0x54, 0x3c, 0x71, 0x6a, 0xe2, 0x48, 0x6a, 0x86, -+ 0x83, 0xfd, 0xca, 0x39, 0xd2, 0xe1, 0x4f, 0x23, -+ 0xd0, 0x0a, 0x58, 0x26, 0x64, 0xf4, 0xec, 0xb1 -+}; -+static const u8 enc_output081[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x36, 0xc3, 0x00, 0x29, 0x85, 0xdd, 0x21, 0xba, -+ 0xf8, 0x95, 0xd6, 0x33, 0x57, 0x3f, 0x12, 0xc0 -+}; -+static const u8 enc_assoc081[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce081[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, 0x93, 0x35 -+}; -+static const u8 enc_key081[] __initconst = { -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input082[] __initconst = { -+ 0x40, 0x8a, 0xe6, 0xef, 0x1c, 0x7e, 0xf0, 0xfb, -+ 0x2c, 0x2d, 0x61, 0x08, 0x16, 0xfc, 0x78, 0x49, -+ 0xef, 0xa5, 0x8f, 0x78, 0x27, 0x3f, 0x5f, 0x16, -+ 0x6e, 0xa6, 0x5f, 0x81, 0xb5, 0x75, 0x74, 0x7d, -+ 0x03, 0x5b, 0x30, 0x40, 0xfe, 0xde, 0x1e, 0xb9, -+ 0x45, 0x97, 0x88, 0x66, 0x97, 0x88, 0x40, 0x8e, -+ 0x00, 0x41, 0x3b, 0x3e, 0x37, 0x6d, 0x15, 0x2d, -+ 0x20, 0x4a, 0xa2, 0xb7, 0xa8, 0x35, 0x58, 0xfc, -+ 0xd4, 0x8a, 0x0e, 0xf7, 0xa2, 0x6b, 0x1c, 0xd6, -+ 0xd3, 0x5d, 0x23, 0xb3, 0xf5, 0xdf, 0xe0, 0xca, -+ 0x77, 0xa4, 0xce, 0x32, 0xb9, 0x4a, 0xbf, 0x83, -+ 0xda, 0x2a, 0xef, 0xca, 0xf0, 0x68, 0x38, 0x08, -+ 0x79, 0xe8, 0x9f, 0xb0, 0xa3, 0x82, 0x95, 0x95, -+ 0xcf, 0x44, 0xc3, 0x85, 0x2a, 0xe2, 0xcc, 0x66, -+ 0x2b, 0x68, 0x9f, 0x93, 0x55, 0xd9, 0xc1, 0x83, -+ 0x80, 0x1f, 0x6a, 0xcc, 0x31, 0x3f, 0x89, 0x07 -+}; -+static const u8 enc_output082[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x65, 0x14, 0x51, 0x8e, 0x0a, 0x26, 0x41, 0x42, -+ 0xe0, 0xb7, 0x35, 0x1f, 0x96, 0x7f, 0xc2, 0xae -+}; -+static const u8 enc_assoc082[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce082[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0xf7, 0xd5 -+}; -+static const u8 enc_key082[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input083[] __initconst = { -+ 0x0a, 0x0a, 0x24, 0x49, 0x9b, 0xca, 0xde, 0x58, -+ 0xcf, 0x15, 0x76, 0xc3, 0x12, 0xac, 0xa9, 0x84, -+ 0x71, 0x8c, 0xb4, 0xcc, 0x7e, 0x01, 0x53, 0xf5, -+ 0xa9, 0x01, 0x58, 0x10, 0x85, 0x96, 0x44, 0xdf, -+ 0xc0, 0x21, 0x17, 0x4e, 0x0b, 0x06, 0x0a, 0x39, -+ 0x74, 0x48, 0xde, 0x8b, 0x48, 0x4a, 0x86, 0x03, -+ 0xbe, 0x68, 0x0a, 0x69, 0x34, 0xc0, 0x90, 0x6f, -+ 0x30, 0xdd, 0x17, 0xea, 0xe2, 0xd4, 0xc5, 0xfa, -+ 0xa7, 0x77, 0xf8, 0xca, 0x53, 0x37, 0x0e, 0x08, -+ 0x33, 0x1b, 0x88, 0xc3, 0x42, 0xba, 0xc9, 0x59, -+ 0x78, 0x7b, 0xbb, 0x33, 0x93, 0x0e, 0x3b, 0x56, -+ 0xbe, 0x86, 0xda, 0x7f, 0x2a, 0x6e, 0xb1, 0xf9, -+ 0x40, 0x89, 0xd1, 0xd1, 0x81, 0x07, 0x4d, 0x43, -+ 0x02, 0xf8, 0xe0, 0x55, 0x2d, 0x0d, 0xe1, 0xfa, -+ 0xb3, 0x06, 0xa2, 0x1b, 0x42, 0xd4, 0xc3, 0xba, -+ 0x6e, 0x6f, 0x0c, 0xbc, 0xc8, 0x1e, 0x87, 0x7a -+}; -+static const u8 enc_output083[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x4c, 0x19, 0x4d, 0xa6, 0xa9, 0x9f, 0xd6, 0x5b, -+ 0x40, 0xe9, 0xca, 0xd7, 0x98, 0xf4, 0x4b, 0x19 -+}; -+static const u8 enc_assoc083[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce083[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x3d, 0xfc, 0xe4 -+}; -+static const u8 enc_key083[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input084[] __initconst = { -+ 0x4a, 0x0a, 0xaf, 0xf8, 0x49, 0x47, 0x29, 0x18, -+ 0x86, 0x91, 0x70, 0x13, 0x40, 0xf3, 0xce, 0x2b, -+ 0x8a, 0x78, 0xee, 0xd3, 0xa0, 0xf0, 0x65, 0x99, -+ 0x4b, 0x72, 0x48, 0x4e, 0x79, 0x91, 0xd2, 0x5c, -+ 0x29, 0xaa, 0x07, 0x5e, 0xb1, 0xfc, 0x16, 0xde, -+ 0x93, 0xfe, 0x06, 0x90, 0x58, 0x11, 0x2a, 0xb2, -+ 0x84, 0xa3, 0xed, 0x18, 0x78, 0x03, 0x26, 0xd1, -+ 0x25, 0x8a, 0x47, 0x22, 0x2f, 0xa6, 0x33, 0xd8, -+ 0xb2, 0x9f, 0x3b, 0xd9, 0x15, 0x0b, 0x23, 0x9b, -+ 0x15, 0x46, 0xc2, 0xbb, 0x9b, 0x9f, 0x41, 0x0f, -+ 0xeb, 0xea, 0xd3, 0x96, 0x00, 0x0e, 0xe4, 0x77, -+ 0x70, 0x15, 0x32, 0xc3, 0xd0, 0xf5, 0xfb, 0xf8, -+ 0x95, 0xd2, 0x80, 0x19, 0x6d, 0x2f, 0x73, 0x7c, -+ 0x5e, 0x9f, 0xec, 0x50, 0xd9, 0x2b, 0xb0, 0xdf, -+ 0x5d, 0x7e, 0x51, 0x3b, 0xe5, 0xb8, 0xea, 0x97, -+ 0x13, 0x10, 0xd5, 0xbf, 0x16, 0xba, 0x7a, 0xee -+}; -+static const u8 enc_output084[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xc8, 0xae, 0x77, 0x88, 0xcd, 0x28, 0x74, 0xab, -+ 0xc1, 0x38, 0x54, 0x1e, 0x11, 0xfd, 0x05, 0x87 -+}; -+static const u8 enc_assoc084[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce084[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x01, 0x84, 0x86, 0xa8 -+}; -+static const u8 enc_key084[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input085[] __initconst = { -+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0x78, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0x9c, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0xd4, 0xd2, 0x06, 0x61, 0x6f, 0x92, 0x93, 0xf6, -+ 0x5b, 0x45, 0xdb, 0xbc, 0x74, 0xe7, 0xc2, 0xed, -+ 0xfb, 0xcb, 0xbf, 0x1c, 0xfb, 0x67, 0x9b, 0xb7, -+ 0x39, 0xa5, 0x86, 0x2d, 0xe2, 0xbc, 0xb9, 0x37, -+ 0xf7, 0x4d, 0x5b, 0xf8, 0x67, 0x1c, 0x5a, 0x8a, -+ 0x50, 0x92, 0xf6, 0x1d, 0x54, 0xc9, 0xaa, 0x5b -+}; -+static const u8 enc_output085[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x93, 0x3a, 0x51, 0x63, 0xc7, 0xf6, 0x23, 0x68, -+ 0x32, 0x7b, 0x3f, 0xbc, 0x10, 0x36, 0xc9, 0x43 -+}; -+static const u8 enc_assoc085[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce085[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key085[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input093[] __initconst = { -+ 0x00, 0x52, 0x35, 0xd2, 0xa9, 0x19, 0xf2, 0x8d, -+ 0x3d, 0xb7, 0x66, 0x4a, 0x34, 0xae, 0x6b, 0x44, -+ 0x4d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0x5b, 0x8b, 0x94, 0x50, 0x9e, 0x2b, 0x74, 0xa3, -+ 0x6d, 0x34, 0x6e, 0x33, 0xd5, 0x72, 0x65, 0x9b, -+ 0xa9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0x83, 0xdc, 0xe9, 0xf3, 0x07, 0x3e, 0xfa, 0xdb, -+ 0x7d, 0x23, 0xb8, 0x7a, 0xce, 0x35, 0x16, 0x8c -+}; -+static const u8 enc_output093[] __initconst = { -+ 0x00, 0x39, 0xe2, 0xfd, 0x2f, 0xd3, 0x12, 0x14, -+ 0x9e, 0x98, 0x98, 0x80, 0x88, 0x48, 0x13, 0xe7, -+ 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96, -+ 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00, -+ 0xca, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x3b, 0x0e, 0x86, 0x9a, 0xaa, 0x8e, 0xa4, 0x96, -+ 0x32, 0xff, 0xff, 0x37, 0xb9, 0xe8, 0xce, 0x00, -+ 0xa5, 0x19, 0xac, 0x1a, 0x35, 0xb4, 0xa5, 0x77, -+ 0x87, 0x51, 0x0a, 0xf7, 0x8d, 0x8d, 0x20, 0x0a -+}; -+static const u8 enc_assoc093[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce093[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key093[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input094[] __initconst = { -+ 0xd3, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0xe5, 0xda, 0x78, 0x76, 0x6f, 0xa1, 0x92, 0x90, -+ 0xc0, 0x31, 0xf7, 0x52, 0x08, 0x50, 0x67, 0x45, -+ 0xae, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0x49, 0x6d, 0xde, 0xb0, 0x55, 0x09, 0xc6, 0xef, -+ 0xff, 0xab, 0x75, 0xeb, 0x2d, 0xf4, 0xab, 0x09, -+ 0x76, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0x01, 0x49, 0xef, 0x50, 0x4b, 0x71, 0xb1, 0x20, -+ 0xca, 0x4f, 0xf3, 0x95, 0x19, 0xc2, 0xc2, 0x10 -+}; -+static const u8 enc_output094[] __initconst = { -+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x62, 0x18, 0xb2, 0x7f, 0x83, 0xb8, 0xb4, 0x66, -+ 0x02, 0xf6, 0xe1, 0xd8, 0x34, 0x20, 0x7b, 0x02, -+ 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29, -+ 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02, -+ 0xce, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x2a, 0x64, 0x16, 0xce, 0xdb, 0x1c, 0xdd, 0x29, -+ 0x6e, 0xf5, 0xd7, 0xd6, 0x92, 0xda, 0xff, 0x02, -+ 0x30, 0x2f, 0xe8, 0x2a, 0xb0, 0xa0, 0x9a, 0xf6, -+ 0x44, 0x00, 0xd0, 0x15, 0xae, 0x83, 0xd9, 0xcc -+}; -+static const u8 enc_assoc094[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce094[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key094[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input095[] __initconst = { -+ 0xe9, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0x6d, 0xf1, 0x39, 0x4e, 0xdc, 0x53, 0x9b, 0x5b, -+ 0x3a, 0x09, 0x57, 0xbe, 0x0f, 0xb8, 0x59, 0x46, -+ 0x80, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0xd1, 0x76, 0x9f, 0xe8, 0x06, 0xbb, 0xfe, 0xb6, -+ 0xf5, 0x90, 0x95, 0x0f, 0x2e, 0xac, 0x9e, 0x0a, -+ 0x58, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0x99, 0x52, 0xae, 0x08, 0x18, 0xc3, 0x89, 0x79, -+ 0xc0, 0x74, 0x13, 0x71, 0x1a, 0x9a, 0xf7, 0x13 -+}; -+static const u8 enc_output095[] __initconst = { -+ 0xe9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xea, 0x33, 0xf3, 0x47, 0x30, 0x4a, 0xbd, 0xad, -+ 0xf8, 0xce, 0x41, 0x34, 0x33, 0xc8, 0x45, 0x01, -+ 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70, -+ 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01, -+ 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xb2, 0x7f, 0x57, 0x96, 0x88, 0xae, 0xe5, 0x70, -+ 0x64, 0xce, 0x37, 0x32, 0x91, 0x82, 0xca, 0x01, -+ 0x98, 0xa7, 0xe8, 0x36, 0xe0, 0xee, 0x4d, 0x02, -+ 0x35, 0x00, 0xd0, 0x55, 0x7e, 0xc2, 0xcb, 0xe0 -+}; -+static const u8 enc_assoc095[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce095[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key095[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input096[] __initconst = { -+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0x64, 0xf9, 0x0f, 0x5b, 0x26, 0x92, 0xb8, 0x60, -+ 0xd4, 0x59, 0x6f, 0xf4, 0xb3, 0x40, 0x2c, 0x5c, -+ 0x00, 0xb9, 0xbb, 0x53, 0x70, 0x7a, 0xa6, 0x67, -+ 0xd3, 0x56, 0xfe, 0x50, 0xc7, 0x19, 0x96, 0x94, -+ 0x03, 0x35, 0x61, 0xe7, 0xca, 0xca, 0x6d, 0x94, -+ 0x1d, 0xc3, 0xcd, 0x69, 0x14, 0xad, 0x69, 0x04 -+}; -+static const u8 enc_output096[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xe3, 0x3b, 0xc5, 0x52, 0xca, 0x8b, 0x9e, 0x96, -+ 0x16, 0x9e, 0x79, 0x7e, 0x8f, 0x30, 0x30, 0x1b, -+ 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52, -+ 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f, -+ 0x60, 0x3c, 0xa9, 0x99, 0x44, 0xdf, 0x76, 0x52, -+ 0x8c, 0x9d, 0x6f, 0x54, 0xab, 0x83, 0x3d, 0x0f, -+ 0x6a, 0xb8, 0xdc, 0xe2, 0xc5, 0x9d, 0xa4, 0x73, -+ 0x71, 0x30, 0xb0, 0x25, 0x2f, 0x68, 0xa8, 0xd8 -+}; -+static const u8 enc_assoc096[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce096[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key096[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input097[] __initconst = { -+ 0x68, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0xb0, 0x8f, 0x25, 0x67, 0x5b, 0x9b, 0xcb, 0xf6, -+ 0xe3, 0x84, 0x07, 0xde, 0x2e, 0xc7, 0x5a, 0x47, -+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0x2d, 0x2a, 0xf7, 0xcd, 0x6b, 0x08, 0x05, 0x01, -+ 0xd3, 0x1b, 0xa5, 0x4f, 0xb2, 0xeb, 0x75, 0x96, -+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0x65, 0x0e, 0xc6, 0x2d, 0x75, 0x70, 0x72, 0xce, -+ 0xe6, 0xff, 0x23, 0x31, 0x86, 0xdd, 0x1c, 0x8f -+}; -+static const u8 enc_output097[] __initconst = { -+ 0x68, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x37, 0x4d, 0xef, 0x6e, 0xb7, 0x82, 0xed, 0x00, -+ 0x21, 0x43, 0x11, 0x54, 0x12, 0xb7, 0x46, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7, -+ 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x4e, 0x23, 0x3f, 0xb3, 0xe5, 0x1d, 0x1e, 0xc7, -+ 0x42, 0x45, 0x07, 0x72, 0x0d, 0xc5, 0x21, 0x9d, -+ 0x04, 0x4d, 0xea, 0x60, 0x88, 0x80, 0x41, 0x2b, -+ 0xfd, 0xff, 0xcf, 0x35, 0x57, 0x9e, 0x9b, 0x26 -+}; -+static const u8 enc_assoc097[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce097[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key097[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input098[] __initconst = { -+ 0x6d, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0xa1, 0x61, 0xb5, 0xab, 0x04, 0x09, 0x00, 0x62, -+ 0x9e, 0xfe, 0xff, 0x78, 0xd7, 0xd8, 0x6b, 0x45, -+ 0x9f, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0xc6, 0xf8, 0x07, 0x8c, 0xc8, 0xef, 0x12, 0xa0, -+ 0xff, 0x65, 0x7d, 0x6d, 0x08, 0xdb, 0x10, 0xb8, -+ 0x47, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0x8e, 0xdc, 0x36, 0x6c, 0xd6, 0x97, 0x65, 0x6f, -+ 0xca, 0x81, 0xfb, 0x13, 0x3c, 0xed, 0x79, 0xa1 -+}; -+static const u8 enc_output098[] __initconst = { -+ 0x6d, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x26, 0xa3, 0x7f, 0xa2, 0xe8, 0x10, 0x26, 0x94, -+ 0x5c, 0x39, 0xe9, 0xf2, 0xeb, 0xa8, 0x77, 0x02, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66, -+ 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xa5, 0xf1, 0xcf, 0xf2, 0x46, 0xfa, 0x09, 0x66, -+ 0x6e, 0x3b, 0xdf, 0x50, 0xb7, 0xf5, 0x44, 0xb3, -+ 0x1e, 0x6b, 0xea, 0x63, 0x14, 0x54, 0x2e, 0x2e, -+ 0xf9, 0xff, 0xcf, 0x45, 0x0b, 0x2e, 0x98, 0x2b -+}; -+static const u8 enc_assoc098[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce098[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key098[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input099[] __initconst = { -+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0xfc, 0x01, 0xb8, 0x91, 0xe5, 0xf0, 0xf9, 0x12, -+ 0x8d, 0x7d, 0x1c, 0x57, 0x91, 0x92, 0xb6, 0x98, -+ 0x63, 0x41, 0x44, 0x15, 0xb6, 0x99, 0x68, 0x95, -+ 0x9a, 0x72, 0x91, 0xb7, 0xa5, 0xaf, 0x13, 0x48, -+ 0x60, 0xcd, 0x9e, 0xa1, 0x0c, 0x29, 0xa3, 0x66, -+ 0x54, 0xe7, 0xa2, 0x8e, 0x76, 0x1b, 0xec, 0xd8 -+}; -+static const u8 enc_output099[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x7b, 0xc3, 0x72, 0x98, 0x09, 0xe9, 0xdf, 0xe4, -+ 0x4f, 0xba, 0x0a, 0xdd, 0xad, 0xe2, 0xaa, 0xdf, -+ 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0, -+ 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3, -+ 0x03, 0xc4, 0x56, 0xdf, 0x82, 0x3c, 0xb8, 0xa0, -+ 0xc5, 0xb9, 0x00, 0xb3, 0xc9, 0x35, 0xb8, 0xd3, -+ 0xed, 0x20, 0x17, 0xc8, 0xdb, 0xa4, 0x77, 0x56, -+ 0x29, 0x04, 0x9d, 0x78, 0x6e, 0x3b, 0xce, 0xb1 -+}; -+static const u8 enc_assoc099[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce099[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key099[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input100[] __initconst = { -+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0x6b, 0x6d, 0xc9, 0xd2, 0x1a, 0x81, 0x9e, 0x70, -+ 0xb5, 0x77, 0xf4, 0x41, 0x37, 0xd3, 0xd6, 0xbd, -+ 0x13, 0x35, 0xf5, 0xeb, 0x44, 0x49, 0x40, 0x77, -+ 0xb2, 0x64, 0x49, 0xa5, 0x4b, 0x6c, 0x7c, 0x75, -+ 0x10, 0xb9, 0x2f, 0x5f, 0xfe, 0xf9, 0x8b, 0x84, -+ 0x7c, 0xf1, 0x7a, 0x9c, 0x98, 0xd8, 0x83, 0xe5 -+}; -+static const u8 enc_output100[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xec, 0xaf, 0x03, 0xdb, 0xf6, 0x98, 0xb8, 0x86, -+ 0x77, 0xb0, 0xe2, 0xcb, 0x0b, 0xa3, 0xca, 0xfa, -+ 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42, -+ 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee, -+ 0x73, 0xb0, 0xe7, 0x21, 0x70, 0xec, 0x90, 0x42, -+ 0xed, 0xaf, 0xd8, 0xa1, 0x27, 0xf6, 0xd7, 0xee, -+ 0x07, 0x3f, 0x17, 0xcb, 0x67, 0x78, 0x64, 0x59, -+ 0x25, 0x04, 0x9d, 0x88, 0x22, 0xcb, 0xca, 0xb6 -+}; -+static const u8 enc_assoc100[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce100[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key100[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input101[] __initconst = { -+ 0xff, 0xcb, 0x2b, 0x11, 0x06, 0xf8, 0x23, 0x4c, -+ 0x5e, 0x99, 0xd4, 0xdb, 0x4c, 0x70, 0x48, 0xde, -+ 0x32, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0x16, 0xe9, 0x88, 0x4a, 0x11, 0x4f, 0x0e, 0x92, -+ 0x66, 0xce, 0xa3, 0x88, 0x5f, 0xe3, 0x6b, 0x9f, -+ 0xd6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0xce, 0xbe, 0xf5, 0xe9, 0x88, 0x5a, 0x80, 0xea, -+ 0x76, 0xd9, 0x75, 0xc1, 0x44, 0xa4, 0x18, 0x88 -+}; -+static const u8 enc_output101[] __initconst = { -+ 0xff, 0xa0, 0xfc, 0x3e, 0x80, 0x32, 0xc3, 0xd5, -+ 0xfd, 0xb6, 0x2a, 0x11, 0xf0, 0x96, 0x30, 0x7d, -+ 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7, -+ 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04, -+ 0xb5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x76, 0x6c, 0x9a, 0x80, 0x25, 0xea, 0xde, 0xa7, -+ 0x39, 0x05, 0x32, 0x8c, 0x33, 0x79, 0xc0, 0x04, -+ 0x8b, 0x9b, 0xb4, 0xb4, 0x86, 0x12, 0x89, 0x65, -+ 0x8c, 0x69, 0x6a, 0x83, 0x40, 0x15, 0x04, 0x05 -+}; -+static const u8 enc_assoc101[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce101[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key101[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input102[] __initconst = { -+ 0x6f, 0x9e, 0x70, 0xed, 0x3b, 0x8b, 0xac, 0xa0, -+ 0x26, 0xe4, 0x6a, 0x5a, 0x09, 0x43, 0x15, 0x8d, -+ 0x21, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0x0c, 0x61, 0x2c, 0x5e, 0x8d, 0x89, 0xa8, 0x73, -+ 0xdb, 0xca, 0xad, 0x5b, 0x73, 0x46, 0x42, 0x9b, -+ 0xc5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0xd4, 0x36, 0x51, 0xfd, 0x14, 0x9c, 0x26, 0x0b, -+ 0xcb, 0xdd, 0x7b, 0x12, 0x68, 0x01, 0x31, 0x8c -+}; -+static const u8 enc_output102[] __initconst = { -+ 0x6f, 0xf5, 0xa7, 0xc2, 0xbd, 0x41, 0x4c, 0x39, -+ 0x85, 0xcb, 0x94, 0x90, 0xb5, 0xa5, 0x6d, 0x2e, -+ 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46, -+ 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00, -+ 0xa6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x6c, 0xe4, 0x3e, 0x94, 0xb9, 0x2c, 0x78, 0x46, -+ 0x84, 0x01, 0x3c, 0x5f, 0x1f, 0xdc, 0xe9, 0x00, -+ 0x8b, 0x3b, 0xbd, 0x51, 0x64, 0x44, 0x59, 0x56, -+ 0x8d, 0x81, 0xca, 0x1f, 0xa7, 0x2c, 0xe4, 0x04 -+}; -+static const u8 enc_assoc102[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce102[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key102[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input103[] __initconst = { -+ 0x41, 0x2b, 0x08, 0x0a, 0x3e, 0x19, 0xc1, 0x0d, -+ 0x44, 0xa1, 0xaf, 0x1e, 0xab, 0xde, 0xb4, 0xce, -+ 0x35, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0x6b, 0x83, 0x94, 0x33, 0x09, 0x21, 0x48, 0x6c, -+ 0xa1, 0x1d, 0x29, 0x1c, 0x3e, 0x97, 0xee, 0x9a, -+ 0xd1, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0xb3, 0xd4, 0xe9, 0x90, 0x90, 0x34, 0xc6, 0x14, -+ 0xb1, 0x0a, 0xff, 0x55, 0x25, 0xd0, 0x9d, 0x8d -+}; -+static const u8 enc_output103[] __initconst = { -+ 0x41, 0x40, 0xdf, 0x25, 0xb8, 0xd3, 0x21, 0x94, -+ 0xe7, 0x8e, 0x51, 0xd4, 0x17, 0x38, 0xcc, 0x6d, -+ 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59, -+ 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01, -+ 0xb2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x0b, 0x06, 0x86, 0xf9, 0x3d, 0x84, 0x98, 0x59, -+ 0xfe, 0xd6, 0xb8, 0x18, 0x52, 0x0d, 0x45, 0x01, -+ 0x86, 0xfb, 0xab, 0x2b, 0x4a, 0x94, 0xf4, 0x7a, -+ 0xa5, 0x6f, 0x0a, 0xea, 0x65, 0xd1, 0x10, 0x08 -+}; -+static const u8 enc_assoc103[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce103[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key103[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input104[] __initconst = { -+ 0xb2, 0x47, 0xa7, 0x47, 0x23, 0x49, 0x1a, 0xac, -+ 0xac, 0xaa, 0xd7, 0x09, 0xc9, 0x1e, 0x93, 0x2b, -+ 0x31, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0x9a, 0xde, 0x04, 0xe7, 0x5b, 0xb7, 0x01, 0xd9, -+ 0x66, 0x06, 0x01, 0xb3, 0x47, 0x65, 0xde, 0x98, -+ 0xd5, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0x42, 0x89, 0x79, 0x44, 0xc2, 0xa2, 0x8f, 0xa1, -+ 0x76, 0x11, 0xd7, 0xfa, 0x5c, 0x22, 0xad, 0x8f -+}; -+static const u8 enc_output104[] __initconst = { -+ 0xb2, 0x2c, 0x70, 0x68, 0xa5, 0x83, 0xfa, 0x35, -+ 0x0f, 0x85, 0x29, 0xc3, 0x75, 0xf8, 0xeb, 0x88, -+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec, -+ 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03, -+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xfa, 0x5b, 0x16, 0x2d, 0x6f, 0x12, 0xd1, 0xec, -+ 0x39, 0xcd, 0x90, 0xb7, 0x2b, 0xff, 0x75, 0x03, -+ 0xa0, 0x19, 0xac, 0x2e, 0xd6, 0x67, 0xe1, 0x7d, -+ 0xa1, 0x6f, 0x0a, 0xfa, 0x19, 0x61, 0x0d, 0x0d -+}; -+static const u8 enc_assoc104[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce104[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key104[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input105[] __initconst = { -+ 0x74, 0x0f, 0x9e, 0x49, 0xf6, 0x10, 0xef, 0xa5, -+ 0x85, 0xb6, 0x59, 0xca, 0x6e, 0xd8, 0xb4, 0x99, -+ 0x2d, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0x41, 0x2d, 0x96, 0xaf, 0xbe, 0x80, 0xec, 0x3e, -+ 0x79, 0xd4, 0x51, 0xb0, 0x0a, 0x2d, 0xb2, 0x9a, -+ 0xc9, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0x99, 0x7a, 0xeb, 0x0c, 0x27, 0x95, 0x62, 0x46, -+ 0x69, 0xc3, 0x87, 0xf9, 0x11, 0x6a, 0xc1, 0x8d -+}; -+static const u8 enc_output105[] __initconst = { -+ 0x74, 0x64, 0x49, 0x66, 0x70, 0xda, 0x0f, 0x3c, -+ 0x26, 0x99, 0xa7, 0x00, 0xd2, 0x3e, 0xcc, 0x3a, -+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b, -+ 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01, -+ 0xaa, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x21, 0xa8, 0x84, 0x65, 0x8a, 0x25, 0x3c, 0x0b, -+ 0x26, 0x1f, 0xc0, 0xb4, 0x66, 0xb7, 0x19, 0x01, -+ 0x73, 0x6e, 0x18, 0x18, 0x16, 0x96, 0xa5, 0x88, -+ 0x9c, 0x31, 0x59, 0xfa, 0xab, 0xab, 0x20, 0xfd -+}; -+static const u8 enc_assoc105[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce105[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key105[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input106[] __initconst = { -+ 0xad, 0xba, 0x5d, 0x10, 0x5b, 0xc8, 0xaa, 0x06, -+ 0x2c, 0x23, 0x36, 0xcb, 0x88, 0x9d, 0xdb, 0xd5, -+ 0x37, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0x17, 0x7c, 0x5f, 0xfe, 0x28, 0x75, 0xf4, 0x68, -+ 0xf6, 0xc2, 0x96, 0x57, 0x48, 0xf3, 0x59, 0x9a, -+ 0xd3, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0xcf, 0x2b, 0x22, 0x5d, 0xb1, 0x60, 0x7a, 0x10, -+ 0xe6, 0xd5, 0x40, 0x1e, 0x53, 0xb4, 0x2a, 0x8d -+}; -+static const u8 enc_output106[] __initconst = { -+ 0xad, 0xd1, 0x8a, 0x3f, 0xdd, 0x02, 0x4a, 0x9f, -+ 0x8f, 0x0c, 0xc8, 0x01, 0x34, 0x7b, 0xa3, 0x76, -+ 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d, -+ 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01, -+ 0xb0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x77, 0xf9, 0x4d, 0x34, 0x1c, 0xd0, 0x24, 0x5d, -+ 0xa9, 0x09, 0x07, 0x53, 0x24, 0x69, 0xf2, 0x01, -+ 0xba, 0xd5, 0x8f, 0x10, 0xa9, 0x1e, 0x6a, 0x88, -+ 0x9a, 0xba, 0x32, 0xfd, 0x17, 0xd8, 0x33, 0x1a -+}; -+static const u8 enc_assoc106[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce106[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key106[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input107[] __initconst = { -+ 0xfe, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0xc0, 0x01, 0xed, 0xc5, 0xda, 0x44, 0x2e, 0x71, -+ 0x9b, 0xce, 0x9a, 0xbe, 0x27, 0x3a, 0xf1, 0x44, -+ 0xb4, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0x48, 0x02, 0x5f, 0x41, 0xfa, 0x4e, 0x33, 0x6c, -+ 0x78, 0x69, 0x57, 0xa2, 0xa7, 0xc4, 0x93, 0x0a, -+ 0x6c, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0x00, 0x26, 0x6e, 0xa1, 0xe4, 0x36, 0x44, 0xa3, -+ 0x4d, 0x8d, 0xd1, 0xdc, 0x93, 0xf2, 0xfa, 0x13 -+}; -+static const u8 enc_output107[] __initconst = { -+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x47, 0xc3, 0x27, 0xcc, 0x36, 0x5d, 0x08, 0x87, -+ 0x59, 0x09, 0x8c, 0x34, 0x1b, 0x4a, 0xed, 0x03, -+ 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa, -+ 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01, -+ 0xd4, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x2b, 0x0b, 0x97, 0x3f, 0x74, 0x5b, 0x28, 0xaa, -+ 0xe9, 0x37, 0xf5, 0x9f, 0x18, 0xea, 0xc7, 0x01, -+ 0xd6, 0x8c, 0xe1, 0x74, 0x07, 0x9a, 0xdd, 0x02, -+ 0x8d, 0xd0, 0x5c, 0xf8, 0x14, 0x63, 0x04, 0x88 -+}; -+static const u8 enc_assoc107[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce107[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key107[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input108[] __initconst = { -+ 0xb5, 0x13, 0xb0, 0x6a, 0xb9, 0xac, 0x14, 0x43, -+ 0x5a, 0xcb, 0x8a, 0xa3, 0xa3, 0x7a, 0xfd, 0xb6, -+ 0x54, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0x61, 0x95, 0x01, 0x93, 0xb1, 0xbf, 0x03, 0x11, -+ 0xff, 0x11, 0x79, 0x89, 0xae, 0xd9, 0xa9, 0x99, -+ 0xb0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0xb9, 0xc2, 0x7c, 0x30, 0x28, 0xaa, 0x8d, 0x69, -+ 0xef, 0x06, 0xaf, 0xc0, 0xb5, 0x9e, 0xda, 0x8e -+}; -+static const u8 enc_output108[] __initconst = { -+ 0xb5, 0x78, 0x67, 0x45, 0x3f, 0x66, 0xf4, 0xda, -+ 0xf9, 0xe4, 0x74, 0x69, 0x1f, 0x9c, 0x85, 0x15, -+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24, -+ 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02, -+ 0xd3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x01, 0x10, 0x13, 0x59, 0x85, 0x1a, 0xd3, 0x24, -+ 0xa0, 0xda, 0xe8, 0x8d, 0xc2, 0x43, 0x02, 0x02, -+ 0xaa, 0x48, 0xa3, 0x88, 0x7d, 0x4b, 0x05, 0x96, -+ 0x99, 0xc2, 0xfd, 0xf9, 0xc6, 0x78, 0x7e, 0x0a -+}; -+static const u8 enc_assoc108[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce108[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key108[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input109[] __initconst = { -+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0xd4, 0xf1, 0x09, 0xe8, 0x14, 0xce, 0xa8, 0x5a, -+ 0x08, 0xc0, 0x11, 0xd8, 0x50, 0xdd, 0x1d, 0xcb, -+ 0xcf, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0x53, 0x40, 0xb8, 0x5a, 0x9a, 0xa0, 0x82, 0x96, -+ 0xb7, 0x7a, 0x5f, 0xc3, 0x96, 0x1f, 0x66, 0x0f, -+ 0x17, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0x1b, 0x64, 0x89, 0xba, 0x84, 0xd8, 0xf5, 0x59, -+ 0x82, 0x9e, 0xd9, 0xbd, 0xa2, 0x29, 0x0f, 0x16 -+}; -+static const u8 enc_output109[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x53, 0x33, 0xc3, 0xe1, 0xf8, 0xd7, 0x8e, 0xac, -+ 0xca, 0x07, 0x07, 0x52, 0x6c, 0xad, 0x01, 0x8c, -+ 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50, -+ 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04, -+ 0xaf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x30, 0x49, 0x70, 0x24, 0x14, 0xb5, 0x99, 0x50, -+ 0x26, 0x24, 0xfd, 0xfe, 0x29, 0x31, 0x32, 0x04, -+ 0xb9, 0x36, 0xa8, 0x17, 0xf2, 0x21, 0x1a, 0xf1, -+ 0x29, 0xe2, 0xcf, 0x16, 0x0f, 0xd4, 0x2b, 0xcb -+}; -+static const u8 enc_assoc109[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce109[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key109[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input110[] __initconst = { -+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0xdf, 0x4c, 0x62, 0x03, 0x2d, 0x41, 0x19, 0xb5, -+ 0x88, 0x47, 0x7e, 0x99, 0x92, 0x5a, 0x56, 0xd9, -+ 0xd6, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0xfa, 0x84, 0xf0, 0x64, 0x55, 0x36, 0x42, 0x1b, -+ 0x2b, 0xb9, 0x24, 0x6e, 0xc2, 0x19, 0xed, 0x0b, -+ 0x0e, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0xb2, 0xa0, 0xc1, 0x84, 0x4b, 0x4e, 0x35, 0xd4, -+ 0x1e, 0x5d, 0xa2, 0x10, 0xf6, 0x2f, 0x84, 0x12 -+}; -+static const u8 enc_output110[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x58, 0x8e, 0xa8, 0x0a, 0xc1, 0x58, 0x3f, 0x43, -+ 0x4a, 0x80, 0x68, 0x13, 0xae, 0x2a, 0x4a, 0x9e, -+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd, -+ 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00, -+ 0xb6, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x99, 0x8d, 0x38, 0x1a, 0xdb, 0x23, 0x59, 0xdd, -+ 0xba, 0xe7, 0x86, 0x53, 0x7d, 0x37, 0xb9, 0x00, -+ 0x9f, 0x7a, 0xc4, 0x35, 0x1f, 0x6b, 0x91, 0xe6, -+ 0x30, 0x97, 0xa7, 0x13, 0x11, 0x5d, 0x05, 0xbe -+}; -+static const u8 enc_assoc110[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce110[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key110[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input111[] __initconst = { -+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0x13, 0xf8, 0x0a, 0x00, 0x6d, 0xc1, 0xbb, 0xda, -+ 0xd6, 0x39, 0xa9, 0x2f, 0xc7, 0xec, 0xa6, 0x55, -+ 0xf7, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0x63, 0x48, 0xb8, 0xfd, 0x29, 0xbf, 0x96, 0xd5, -+ 0x63, 0xa5, 0x17, 0xe2, 0x7d, 0x7b, 0xfc, 0x0f, -+ 0x2f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0x2b, 0x6c, 0x89, 0x1d, 0x37, 0xc7, 0xe1, 0x1a, -+ 0x56, 0x41, 0x91, 0x9c, 0x49, 0x4d, 0x95, 0x16 -+}; -+static const u8 enc_output111[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x94, 0x3a, 0xc0, 0x09, 0x81, 0xd8, 0x9d, 0x2c, -+ 0x14, 0xfe, 0xbf, 0xa5, 0xfb, 0x9c, 0xba, 0x12, -+ 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13, -+ 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04, -+ 0x97, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x41, 0x70, 0x83, 0xa7, 0xaa, 0x8d, 0x13, -+ 0xf2, 0xfb, 0xb5, 0xdf, 0xc2, 0x55, 0xa8, 0x04, -+ 0x9a, 0x18, 0xa8, 0x28, 0x07, 0x02, 0x69, 0xf4, -+ 0x47, 0x00, 0xd0, 0x09, 0xe7, 0x17, 0x1c, 0xc9 -+}; -+static const u8 enc_assoc111[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce111[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key111[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input112[] __initconst = { -+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0x82, 0xe5, 0x9b, 0x45, 0x82, 0x91, 0x50, 0x38, -+ 0xf9, 0x33, 0x81, 0x1e, 0x65, 0x2d, 0xc6, 0x6a, -+ 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0xb6, 0x71, 0xc8, 0xca, 0xc2, 0x70, 0xc2, 0x65, -+ 0xa0, 0xac, 0x2f, 0x53, 0x57, 0x99, 0x88, 0x0a, -+ 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0xfe, 0x55, 0xf9, 0x2a, 0xdc, 0x08, 0xb5, 0xaa, -+ 0x95, 0x48, 0xa9, 0x2d, 0x63, 0xaf, 0xe1, 0x13 -+}; -+static const u8 enc_output112[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x05, 0x27, 0x51, 0x4c, 0x6e, 0x88, 0x76, 0xce, -+ 0x3b, 0xf4, 0x97, 0x94, 0x59, 0x5d, 0xda, 0x2d, -+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3, -+ 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01, -+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xd5, 0x78, 0x00, 0xb4, 0x4c, 0x65, 0xd9, 0xa3, -+ 0x31, 0xf2, 0x8d, 0x6e, 0xe8, 0xb7, 0xdc, 0x01, -+ 0xb4, 0x36, 0xa8, 0x2b, 0x93, 0xd5, 0x55, 0xf7, -+ 0x43, 0x00, 0xd0, 0x19, 0x9b, 0xa7, 0x18, 0xce -+}; -+static const u8 enc_assoc112[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce112[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key112[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input113[] __initconst = { -+ 0xff, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0xf1, 0xd1, 0x28, 0x87, 0xb7, 0x21, 0x69, 0x86, -+ 0xa1, 0x2d, 0x79, 0x09, 0x8b, 0x6d, 0xe6, 0x0f, -+ 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0xa7, 0xc7, 0x58, 0x99, 0xf3, 0xe6, 0x0a, 0xf1, -+ 0xfc, 0xb6, 0xc7, 0x30, 0x7d, 0x87, 0x59, 0x0f, -+ 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0xef, 0xe3, 0x69, 0x79, 0xed, 0x9e, 0x7d, 0x3e, -+ 0xc9, 0x52, 0x41, 0x4e, 0x49, 0xb1, 0x30, 0x16 -+}; -+static const u8 enc_output113[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x76, 0x13, 0xe2, 0x8e, 0x5b, 0x38, 0x4f, 0x70, -+ 0x63, 0xea, 0x6f, 0x83, 0xb7, 0x1d, 0xfa, 0x48, -+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37, -+ 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04, -+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xc4, 0xce, 0x90, 0xe7, 0x7d, 0xf3, 0x11, 0x37, -+ 0x6d, 0xe8, 0x65, 0x0d, 0xc2, 0xa9, 0x0d, 0x04, -+ 0xce, 0x54, 0xa8, 0x2e, 0x1f, 0xa9, 0x42, 0xfa, -+ 0x3f, 0x00, 0xd0, 0x29, 0x4f, 0x37, 0x15, 0xd3 -+}; -+static const u8 enc_assoc113[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce113[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key113[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input114[] __initconst = { -+ 0xcb, 0xf1, 0xda, 0x9e, 0x0b, 0xa9, 0x37, 0x73, -+ 0x74, 0xe6, 0x9e, 0x1c, 0x0e, 0x60, 0x0c, 0xfc, -+ 0x34, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0xbe, 0x3f, 0xa6, 0x6b, 0x6c, 0xe7, 0x80, 0x8a, -+ 0xa3, 0xe4, 0x59, 0x49, 0xf9, 0x44, 0x64, 0x9f, -+ 0xd0, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0x66, 0x68, 0xdb, 0xc8, 0xf5, 0xf2, 0x0e, 0xf2, -+ 0xb3, 0xf3, 0x8f, 0x00, 0xe2, 0x03, 0x17, 0x88 -+}; -+static const u8 enc_output114[] __initconst = { -+ 0xcb, 0x9a, 0x0d, 0xb1, 0x8d, 0x63, 0xd7, 0xea, -+ 0xd7, 0xc9, 0x60, 0xd6, 0xb2, 0x86, 0x74, 0x5f, -+ 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf, -+ 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04, -+ 0xb3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xde, 0xba, 0xb4, 0xa1, 0x58, 0x42, 0x50, 0xbf, -+ 0xfc, 0x2f, 0xc8, 0x4d, 0x95, 0xde, 0xcf, 0x04, -+ 0x23, 0x83, 0xab, 0x0b, 0x79, 0x92, 0x05, 0x69, -+ 0x9b, 0x51, 0x0a, 0xa7, 0x09, 0xbf, 0x31, 0xf1 -+}; -+static const u8 enc_assoc114[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce114[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key114[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input115[] __initconst = { -+ 0x8f, 0x27, 0x86, 0x94, 0xc4, 0xe9, 0xda, 0xeb, -+ 0xd5, 0x8d, 0x3e, 0x5b, 0x96, 0x6e, 0x8b, 0x68, -+ 0x42, 0x3d, 0x35, 0xf6, 0x13, 0xe6, 0xd9, 0x09, -+ 0x3d, 0x38, 0xe9, 0x75, 0xc3, 0x8f, 0xe3, 0xb8, -+ 0x06, 0x53, 0xe7, 0xa3, 0x31, 0x71, 0x88, 0x33, -+ 0xac, 0xc3, 0xb9, 0xad, 0xff, 0x1c, 0x31, 0x98, -+ 0xa6, 0xf6, 0x37, 0x81, 0x71, 0xea, 0xe4, 0x39, -+ 0x6e, 0xa1, 0x5d, 0xc2, 0x40, 0xd1, 0xab, 0xf4, -+ 0xde, 0x04, 0x9a, 0x00, 0xa8, 0x64, 0x06, 0x4b, -+ 0xbc, 0xd4, 0x6f, 0xe4, 0xe4, 0x5b, 0x42, 0x8f -+}; -+static const u8 enc_output115[] __initconst = { -+ 0x8f, 0x4c, 0x51, 0xbb, 0x42, 0x23, 0x3a, 0x72, -+ 0x76, 0xa2, 0xc0, 0x91, 0x2a, 0x88, 0xf3, 0xcb, -+ 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06, -+ 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03, -+ 0xc5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x66, 0xd6, 0xf5, 0x69, 0x05, 0xd4, 0x58, 0x06, -+ 0xf3, 0x08, 0x28, 0xa9, 0x93, 0x86, 0x9a, 0x03, -+ 0x8b, 0xfb, 0xab, 0x17, 0xa9, 0xe0, 0xb8, 0x74, -+ 0x8b, 0x51, 0x0a, 0xe7, 0xd9, 0xfd, 0x23, 0x05 -+}; -+static const u8 enc_assoc115[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce115[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key115[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input116[] __initconst = { -+ 0xd5, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0x9a, 0x22, 0xd7, 0x0a, 0x48, 0xe2, 0x4f, 0xdd, -+ 0xcd, 0xd4, 0x41, 0x9d, 0xe6, 0x4c, 0x8f, 0x44, -+ 0xfc, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0x77, 0xb5, 0xc9, 0x07, 0xd9, 0xc9, 0xe1, 0xea, -+ 0x51, 0x85, 0x1a, 0x20, 0x4a, 0xad, 0x9f, 0x0a, -+ 0x24, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0x3f, 0x91, 0xf8, 0xe7, 0xc7, 0xb1, 0x96, 0x25, -+ 0x64, 0x61, 0x9c, 0x5e, 0x7e, 0x9b, 0xf6, 0x13 -+}; -+static const u8 enc_output116[] __initconst = { -+ 0xd5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x1d, 0xe0, 0x1d, 0x03, 0xa4, 0xfb, 0x69, 0x2b, -+ 0x0f, 0x13, 0x57, 0x17, 0xda, 0x3c, 0x93, 0x03, -+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c, -+ 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01, -+ 0x9c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x14, 0xbc, 0x01, 0x79, 0x57, 0xdc, 0xfa, 0x2c, -+ 0xc0, 0xdb, 0xb8, 0x1d, 0xf5, 0x83, 0xcb, 0x01, -+ 0x49, 0xbc, 0x6e, 0x9f, 0xc5, 0x1c, 0x4d, 0x50, -+ 0x30, 0x36, 0x64, 0x4d, 0x84, 0x27, 0x73, 0xd2 -+}; -+static const u8 enc_assoc116[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce116[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key116[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input117[] __initconst = { -+ 0xdb, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0x75, 0xd5, 0x64, 0x3a, 0xa5, 0xaf, 0x93, 0x4d, -+ 0x8c, 0xce, 0x39, 0x2c, 0xc3, 0xee, 0xdb, 0x47, -+ 0xc0, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0x60, 0x1b, 0x5a, 0xd2, 0x06, 0x7f, 0x28, 0x06, -+ 0x6a, 0x8f, 0x32, 0x81, 0x71, 0x5b, 0xa8, 0x08, -+ 0x18, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0x28, 0x3f, 0x6b, 0x32, 0x18, 0x07, 0x5f, 0xc9, -+ 0x5f, 0x6b, 0xb4, 0xff, 0x45, 0x6d, 0xc1, 0x11 -+}; -+static const u8 enc_output117[] __initconst = { -+ 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xf2, 0x17, 0xae, 0x33, 0x49, 0xb6, 0xb5, 0xbb, -+ 0x4e, 0x09, 0x2f, 0xa6, 0xff, 0x9e, 0xc7, 0x00, -+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0, -+ 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03, -+ 0xa0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x03, 0x12, 0x92, 0xac, 0x88, 0x6a, 0x33, 0xc0, -+ 0xfb, 0xd1, 0x90, 0xbc, 0xce, 0x75, 0xfc, 0x03, -+ 0x63, 0xda, 0x6e, 0xa2, 0x51, 0xf0, 0x39, 0x53, -+ 0x2c, 0x36, 0x64, 0x5d, 0x38, 0xb7, 0x6f, 0xd7 -+}; -+static const u8 enc_assoc117[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce117[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key117[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - edge case intermediate sums in poly1305 */ -+static const u8 enc_input118[] __initconst = { -+ 0x93, 0x94, 0x28, 0xd0, 0x79, 0x35, 0x1f, 0x66, -+ 0x5c, 0xd0, 0x01, 0x35, 0x43, 0x19, 0x87, 0x5c, -+ 0x62, 0x48, 0x39, 0x60, 0x42, 0x16, 0xe4, 0x03, -+ 0xeb, 0xcc, 0x6a, 0xf5, 0x59, 0xec, 0x8b, 0x43, -+ 0x97, 0x7a, 0xed, 0x35, 0xcb, 0x5a, 0x2f, 0xca, -+ 0xa0, 0x34, 0x6e, 0xfb, 0x93, 0x65, 0x54, 0x64, -+ 0xd8, 0xc8, 0xc3, 0xfa, 0x1a, 0x9e, 0x47, 0x4a, -+ 0xbe, 0x52, 0xd0, 0x2c, 0x81, 0x87, 0xe9, 0x0f, -+ 0x4f, 0x2d, 0x90, 0x96, 0x52, 0x4f, 0xa1, 0xb2, -+ 0xb0, 0x23, 0xb8, 0xb2, 0x88, 0x22, 0x27, 0x73, -+ 0x90, 0xec, 0xf2, 0x1a, 0x04, 0xe6, 0x30, 0x85, -+ 0x8b, 0xb6, 0x56, 0x52, 0xb5, 0xb1, 0x80, 0x16 -+}; -+static const u8 enc_output118[] __initconst = { -+ 0x93, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xe5, 0x8a, 0xf3, 0x69, 0xae, 0x0f, 0xc2, 0xf5, -+ 0x29, 0x0b, 0x7c, 0x7f, 0x65, 0x9c, 0x97, 0x04, -+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c, -+ 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04, -+ 0xf7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xbb, 0xc1, 0x0b, 0x84, 0x94, 0x8b, 0x5c, 0x8c, -+ 0x2f, 0x0c, 0x72, 0x11, 0x3e, 0xa9, 0xbd, 0x04, -+ 0x73, 0xeb, 0x27, 0x24, 0xb5, 0xc4, 0x05, 0xf0, -+ 0x4d, 0x00, 0xd0, 0xf1, 0x58, 0x40, 0xa1, 0xc1 -+}; -+static const u8 enc_assoc118[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce118[] __initconst = { -+ 0x00, 0x00, 0x00, 0x00, 0x06, 0x4c, 0x2d, 0x52 -+}; -+static const u8 enc_key118[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+static const struct chacha20poly1305_testvec -+chacha20poly1305_enc_vectors[] __initconst = { -+ { enc_input001, enc_output001, enc_assoc001, enc_nonce001, enc_key001, -+ sizeof(enc_input001), sizeof(enc_assoc001), sizeof(enc_nonce001) }, -+ { enc_input002, enc_output002, enc_assoc002, enc_nonce002, enc_key002, -+ sizeof(enc_input002), sizeof(enc_assoc002), sizeof(enc_nonce002) }, -+ { enc_input003, enc_output003, enc_assoc003, enc_nonce003, enc_key003, -+ sizeof(enc_input003), sizeof(enc_assoc003), sizeof(enc_nonce003) }, -+ { enc_input004, enc_output004, enc_assoc004, enc_nonce004, enc_key004, -+ sizeof(enc_input004), sizeof(enc_assoc004), sizeof(enc_nonce004) }, -+ { enc_input005, enc_output005, enc_assoc005, enc_nonce005, enc_key005, -+ sizeof(enc_input005), sizeof(enc_assoc005), sizeof(enc_nonce005) }, -+ { enc_input006, enc_output006, enc_assoc006, enc_nonce006, enc_key006, -+ sizeof(enc_input006), sizeof(enc_assoc006), sizeof(enc_nonce006) }, -+ { enc_input007, enc_output007, enc_assoc007, enc_nonce007, enc_key007, -+ sizeof(enc_input007), sizeof(enc_assoc007), sizeof(enc_nonce007) }, -+ { enc_input008, enc_output008, enc_assoc008, enc_nonce008, enc_key008, -+ sizeof(enc_input008), sizeof(enc_assoc008), sizeof(enc_nonce008) }, -+ { enc_input009, enc_output009, enc_assoc009, enc_nonce009, enc_key009, -+ sizeof(enc_input009), sizeof(enc_assoc009), sizeof(enc_nonce009) }, -+ { enc_input010, enc_output010, enc_assoc010, enc_nonce010, enc_key010, -+ sizeof(enc_input010), sizeof(enc_assoc010), sizeof(enc_nonce010) }, -+ { enc_input011, enc_output011, enc_assoc011, enc_nonce011, enc_key011, -+ sizeof(enc_input011), sizeof(enc_assoc011), sizeof(enc_nonce011) }, -+ { enc_input012, enc_output012, enc_assoc012, enc_nonce012, enc_key012, -+ sizeof(enc_input012), sizeof(enc_assoc012), sizeof(enc_nonce012) }, -+ { enc_input053, enc_output053, enc_assoc053, enc_nonce053, enc_key053, -+ sizeof(enc_input053), sizeof(enc_assoc053), sizeof(enc_nonce053) }, -+ { enc_input054, enc_output054, enc_assoc054, enc_nonce054, enc_key054, -+ sizeof(enc_input054), sizeof(enc_assoc054), sizeof(enc_nonce054) }, -+ { enc_input055, enc_output055, enc_assoc055, enc_nonce055, enc_key055, -+ sizeof(enc_input055), sizeof(enc_assoc055), sizeof(enc_nonce055) }, -+ { enc_input056, enc_output056, enc_assoc056, enc_nonce056, enc_key056, -+ sizeof(enc_input056), sizeof(enc_assoc056), sizeof(enc_nonce056) }, -+ { enc_input057, enc_output057, enc_assoc057, enc_nonce057, enc_key057, -+ sizeof(enc_input057), sizeof(enc_assoc057), sizeof(enc_nonce057) }, -+ { enc_input058, enc_output058, enc_assoc058, enc_nonce058, enc_key058, -+ sizeof(enc_input058), sizeof(enc_assoc058), sizeof(enc_nonce058) }, -+ { enc_input059, enc_output059, enc_assoc059, enc_nonce059, enc_key059, -+ sizeof(enc_input059), sizeof(enc_assoc059), sizeof(enc_nonce059) }, -+ { enc_input060, enc_output060, enc_assoc060, enc_nonce060, enc_key060, -+ sizeof(enc_input060), sizeof(enc_assoc060), sizeof(enc_nonce060) }, -+ { enc_input061, enc_output061, enc_assoc061, enc_nonce061, enc_key061, -+ sizeof(enc_input061), sizeof(enc_assoc061), sizeof(enc_nonce061) }, -+ { enc_input062, enc_output062, enc_assoc062, enc_nonce062, enc_key062, -+ sizeof(enc_input062), sizeof(enc_assoc062), sizeof(enc_nonce062) }, -+ { enc_input063, enc_output063, enc_assoc063, enc_nonce063, enc_key063, -+ sizeof(enc_input063), sizeof(enc_assoc063), sizeof(enc_nonce063) }, -+ { enc_input064, enc_output064, enc_assoc064, enc_nonce064, enc_key064, -+ sizeof(enc_input064), sizeof(enc_assoc064), sizeof(enc_nonce064) }, -+ { enc_input065, enc_output065, enc_assoc065, enc_nonce065, enc_key065, -+ sizeof(enc_input065), sizeof(enc_assoc065), sizeof(enc_nonce065) }, -+ { enc_input066, enc_output066, enc_assoc066, enc_nonce066, enc_key066, -+ sizeof(enc_input066), sizeof(enc_assoc066), sizeof(enc_nonce066) }, -+ { enc_input067, enc_output067, enc_assoc067, enc_nonce067, enc_key067, -+ sizeof(enc_input067), sizeof(enc_assoc067), sizeof(enc_nonce067) }, -+ { enc_input068, enc_output068, enc_assoc068, enc_nonce068, enc_key068, -+ sizeof(enc_input068), sizeof(enc_assoc068), sizeof(enc_nonce068) }, -+ { enc_input069, enc_output069, enc_assoc069, enc_nonce069, enc_key069, -+ sizeof(enc_input069), sizeof(enc_assoc069), sizeof(enc_nonce069) }, -+ { enc_input070, enc_output070, enc_assoc070, enc_nonce070, enc_key070, -+ sizeof(enc_input070), sizeof(enc_assoc070), sizeof(enc_nonce070) }, -+ { enc_input071, enc_output071, enc_assoc071, enc_nonce071, enc_key071, -+ sizeof(enc_input071), sizeof(enc_assoc071), sizeof(enc_nonce071) }, -+ { enc_input072, enc_output072, enc_assoc072, enc_nonce072, enc_key072, -+ sizeof(enc_input072), sizeof(enc_assoc072), sizeof(enc_nonce072) }, -+ { enc_input073, enc_output073, enc_assoc073, enc_nonce073, enc_key073, -+ sizeof(enc_input073), sizeof(enc_assoc073), sizeof(enc_nonce073) }, -+ { enc_input076, enc_output076, enc_assoc076, enc_nonce076, enc_key076, -+ sizeof(enc_input076), sizeof(enc_assoc076), sizeof(enc_nonce076) }, -+ { enc_input077, enc_output077, enc_assoc077, enc_nonce077, enc_key077, -+ sizeof(enc_input077), sizeof(enc_assoc077), sizeof(enc_nonce077) }, -+ { enc_input078, enc_output078, enc_assoc078, enc_nonce078, enc_key078, -+ sizeof(enc_input078), sizeof(enc_assoc078), sizeof(enc_nonce078) }, -+ { enc_input079, enc_output079, enc_assoc079, enc_nonce079, enc_key079, -+ sizeof(enc_input079), sizeof(enc_assoc079), sizeof(enc_nonce079) }, -+ { enc_input080, enc_output080, enc_assoc080, enc_nonce080, enc_key080, -+ sizeof(enc_input080), sizeof(enc_assoc080), sizeof(enc_nonce080) }, -+ { enc_input081, enc_output081, enc_assoc081, enc_nonce081, enc_key081, -+ sizeof(enc_input081), sizeof(enc_assoc081), sizeof(enc_nonce081) }, -+ { enc_input082, enc_output082, enc_assoc082, enc_nonce082, enc_key082, -+ sizeof(enc_input082), sizeof(enc_assoc082), sizeof(enc_nonce082) }, -+ { enc_input083, enc_output083, enc_assoc083, enc_nonce083, enc_key083, -+ sizeof(enc_input083), sizeof(enc_assoc083), sizeof(enc_nonce083) }, -+ { enc_input084, enc_output084, enc_assoc084, enc_nonce084, enc_key084, -+ sizeof(enc_input084), sizeof(enc_assoc084), sizeof(enc_nonce084) }, -+ { enc_input085, enc_output085, enc_assoc085, enc_nonce085, enc_key085, -+ sizeof(enc_input085), sizeof(enc_assoc085), sizeof(enc_nonce085) }, -+ { enc_input093, enc_output093, enc_assoc093, enc_nonce093, enc_key093, -+ sizeof(enc_input093), sizeof(enc_assoc093), sizeof(enc_nonce093) }, -+ { enc_input094, enc_output094, enc_assoc094, enc_nonce094, enc_key094, -+ sizeof(enc_input094), sizeof(enc_assoc094), sizeof(enc_nonce094) }, -+ { enc_input095, enc_output095, enc_assoc095, enc_nonce095, enc_key095, -+ sizeof(enc_input095), sizeof(enc_assoc095), sizeof(enc_nonce095) }, -+ { enc_input096, enc_output096, enc_assoc096, enc_nonce096, enc_key096, -+ sizeof(enc_input096), sizeof(enc_assoc096), sizeof(enc_nonce096) }, -+ { enc_input097, enc_output097, enc_assoc097, enc_nonce097, enc_key097, -+ sizeof(enc_input097), sizeof(enc_assoc097), sizeof(enc_nonce097) }, -+ { enc_input098, enc_output098, enc_assoc098, enc_nonce098, enc_key098, -+ sizeof(enc_input098), sizeof(enc_assoc098), sizeof(enc_nonce098) }, -+ { enc_input099, enc_output099, enc_assoc099, enc_nonce099, enc_key099, -+ sizeof(enc_input099), sizeof(enc_assoc099), sizeof(enc_nonce099) }, -+ { enc_input100, enc_output100, enc_assoc100, enc_nonce100, enc_key100, -+ sizeof(enc_input100), sizeof(enc_assoc100), sizeof(enc_nonce100) }, -+ { enc_input101, enc_output101, enc_assoc101, enc_nonce101, enc_key101, -+ sizeof(enc_input101), sizeof(enc_assoc101), sizeof(enc_nonce101) }, -+ { enc_input102, enc_output102, enc_assoc102, enc_nonce102, enc_key102, -+ sizeof(enc_input102), sizeof(enc_assoc102), sizeof(enc_nonce102) }, -+ { enc_input103, enc_output103, enc_assoc103, enc_nonce103, enc_key103, -+ sizeof(enc_input103), sizeof(enc_assoc103), sizeof(enc_nonce103) }, -+ { enc_input104, enc_output104, enc_assoc104, enc_nonce104, enc_key104, -+ sizeof(enc_input104), sizeof(enc_assoc104), sizeof(enc_nonce104) }, -+ { enc_input105, enc_output105, enc_assoc105, enc_nonce105, enc_key105, -+ sizeof(enc_input105), sizeof(enc_assoc105), sizeof(enc_nonce105) }, -+ { enc_input106, enc_output106, enc_assoc106, enc_nonce106, enc_key106, -+ sizeof(enc_input106), sizeof(enc_assoc106), sizeof(enc_nonce106) }, -+ { enc_input107, enc_output107, enc_assoc107, enc_nonce107, enc_key107, -+ sizeof(enc_input107), sizeof(enc_assoc107), sizeof(enc_nonce107) }, -+ { enc_input108, enc_output108, enc_assoc108, enc_nonce108, enc_key108, -+ sizeof(enc_input108), sizeof(enc_assoc108), sizeof(enc_nonce108) }, -+ { enc_input109, enc_output109, enc_assoc109, enc_nonce109, enc_key109, -+ sizeof(enc_input109), sizeof(enc_assoc109), sizeof(enc_nonce109) }, -+ { enc_input110, enc_output110, enc_assoc110, enc_nonce110, enc_key110, -+ sizeof(enc_input110), sizeof(enc_assoc110), sizeof(enc_nonce110) }, -+ { enc_input111, enc_output111, enc_assoc111, enc_nonce111, enc_key111, -+ sizeof(enc_input111), sizeof(enc_assoc111), sizeof(enc_nonce111) }, -+ { enc_input112, enc_output112, enc_assoc112, enc_nonce112, enc_key112, -+ sizeof(enc_input112), sizeof(enc_assoc112), sizeof(enc_nonce112) }, -+ { enc_input113, enc_output113, enc_assoc113, enc_nonce113, enc_key113, -+ sizeof(enc_input113), sizeof(enc_assoc113), sizeof(enc_nonce113) }, -+ { enc_input114, enc_output114, enc_assoc114, enc_nonce114, enc_key114, -+ sizeof(enc_input114), sizeof(enc_assoc114), sizeof(enc_nonce114) }, -+ { enc_input115, enc_output115, enc_assoc115, enc_nonce115, enc_key115, -+ sizeof(enc_input115), sizeof(enc_assoc115), sizeof(enc_nonce115) }, -+ { enc_input116, enc_output116, enc_assoc116, enc_nonce116, enc_key116, -+ sizeof(enc_input116), sizeof(enc_assoc116), sizeof(enc_nonce116) }, -+ { enc_input117, enc_output117, enc_assoc117, enc_nonce117, enc_key117, -+ sizeof(enc_input117), sizeof(enc_assoc117), sizeof(enc_nonce117) }, -+ { enc_input118, enc_output118, enc_assoc118, enc_nonce118, enc_key118, -+ sizeof(enc_input118), sizeof(enc_assoc118), sizeof(enc_nonce118) } -+}; -+ -+static const u8 dec_input001[] __initconst = { -+ 0x64, 0xa0, 0x86, 0x15, 0x75, 0x86, 0x1a, 0xf4, -+ 0x60, 0xf0, 0x62, 0xc7, 0x9b, 0xe6, 0x43, 0xbd, -+ 0x5e, 0x80, 0x5c, 0xfd, 0x34, 0x5c, 0xf3, 0x89, -+ 0xf1, 0x08, 0x67, 0x0a, 0xc7, 0x6c, 0x8c, 0xb2, -+ 0x4c, 0x6c, 0xfc, 0x18, 0x75, 0x5d, 0x43, 0xee, -+ 0xa0, 0x9e, 0xe9, 0x4e, 0x38, 0x2d, 0x26, 0xb0, -+ 0xbd, 0xb7, 0xb7, 0x3c, 0x32, 0x1b, 0x01, 0x00, -+ 0xd4, 0xf0, 0x3b, 0x7f, 0x35, 0x58, 0x94, 0xcf, -+ 0x33, 0x2f, 0x83, 0x0e, 0x71, 0x0b, 0x97, 0xce, -+ 0x98, 0xc8, 0xa8, 0x4a, 0xbd, 0x0b, 0x94, 0x81, -+ 0x14, 0xad, 0x17, 0x6e, 0x00, 0x8d, 0x33, 0xbd, -+ 0x60, 0xf9, 0x82, 0xb1, 0xff, 0x37, 0xc8, 0x55, -+ 0x97, 0x97, 0xa0, 0x6e, 0xf4, 0xf0, 0xef, 0x61, -+ 0xc1, 0x86, 0x32, 0x4e, 0x2b, 0x35, 0x06, 0x38, -+ 0x36, 0x06, 0x90, 0x7b, 0x6a, 0x7c, 0x02, 0xb0, -+ 0xf9, 0xf6, 0x15, 0x7b, 0x53, 0xc8, 0x67, 0xe4, -+ 0xb9, 0x16, 0x6c, 0x76, 0x7b, 0x80, 0x4d, 0x46, -+ 0xa5, 0x9b, 0x52, 0x16, 0xcd, 0xe7, 0xa4, 0xe9, -+ 0x90, 0x40, 0xc5, 0xa4, 0x04, 0x33, 0x22, 0x5e, -+ 0xe2, 0x82, 0xa1, 0xb0, 0xa0, 0x6c, 0x52, 0x3e, -+ 0xaf, 0x45, 0x34, 0xd7, 0xf8, 0x3f, 0xa1, 0x15, -+ 0x5b, 0x00, 0x47, 0x71, 0x8c, 0xbc, 0x54, 0x6a, -+ 0x0d, 0x07, 0x2b, 0x04, 0xb3, 0x56, 0x4e, 0xea, -+ 0x1b, 0x42, 0x22, 0x73, 0xf5, 0x48, 0x27, 0x1a, -+ 0x0b, 0xb2, 0x31, 0x60, 0x53, 0xfa, 0x76, 0x99, -+ 0x19, 0x55, 0xeb, 0xd6, 0x31, 0x59, 0x43, 0x4e, -+ 0xce, 0xbb, 0x4e, 0x46, 0x6d, 0xae, 0x5a, 0x10, -+ 0x73, 0xa6, 0x72, 0x76, 0x27, 0x09, 0x7a, 0x10, -+ 0x49, 0xe6, 0x17, 0xd9, 0x1d, 0x36, 0x10, 0x94, -+ 0xfa, 0x68, 0xf0, 0xff, 0x77, 0x98, 0x71, 0x30, -+ 0x30, 0x5b, 0xea, 0xba, 0x2e, 0xda, 0x04, 0xdf, -+ 0x99, 0x7b, 0x71, 0x4d, 0x6c, 0x6f, 0x2c, 0x29, -+ 0xa6, 0xad, 0x5c, 0xb4, 0x02, 0x2b, 0x02, 0x70, -+ 0x9b, 0xee, 0xad, 0x9d, 0x67, 0x89, 0x0c, 0xbb, -+ 0x22, 0x39, 0x23, 0x36, 0xfe, 0xa1, 0x85, 0x1f, -+ 0x38 -+}; -+static const u8 dec_output001[] __initconst = { -+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, -+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, -+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, -+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, -+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, -+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, -+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, -+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, -+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, -+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, -+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, -+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, -+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, -+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, -+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, -+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, -+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, -+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, -+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, -+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, -+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, -+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, -+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, -+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, -+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, -+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, -+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, -+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, -+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, -+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, -+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, -+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, -+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, -+ 0x9d -+}; -+static const u8 dec_assoc001[] __initconst = { -+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x4e, 0x91 -+}; -+static const u8 dec_nonce001[] __initconst = { -+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 -+}; -+static const u8 dec_key001[] __initconst = { -+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, -+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, -+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, -+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 -+}; -+ -+static const u8 dec_input002[] __initconst = { -+ 0xea, 0xe0, 0x1e, 0x9e, 0x2c, 0x91, 0xaa, 0xe1, -+ 0xdb, 0x5d, 0x99, 0x3f, 0x8a, 0xf7, 0x69, 0x92 -+}; -+static const u8 dec_output002[] __initconst = { }; -+static const u8 dec_assoc002[] __initconst = { }; -+static const u8 dec_nonce002[] __initconst = { -+ 0xca, 0xbf, 0x33, 0x71, 0x32, 0x45, 0x77, 0x8e -+}; -+static const u8 dec_key002[] __initconst = { -+ 0x4c, 0xf5, 0x96, 0x83, 0x38, 0xe6, 0xae, 0x7f, -+ 0x2d, 0x29, 0x25, 0x76, 0xd5, 0x75, 0x27, 0x86, -+ 0x91, 0x9a, 0x27, 0x7a, 0xfb, 0x46, 0xc5, 0xef, -+ 0x94, 0x81, 0x79, 0x57, 0x14, 0x59, 0x40, 0x68 -+}; -+ -+static const u8 dec_input003[] __initconst = { -+ 0xdd, 0x6b, 0x3b, 0x82, 0xce, 0x5a, 0xbd, 0xd6, -+ 0xa9, 0x35, 0x83, 0xd8, 0x8c, 0x3d, 0x85, 0x77 -+}; -+static const u8 dec_output003[] __initconst = { }; -+static const u8 dec_assoc003[] __initconst = { -+ 0x33, 0x10, 0x41, 0x12, 0x1f, 0xf3, 0xd2, 0x6b -+}; -+static const u8 dec_nonce003[] __initconst = { -+ 0x3d, 0x86, 0xb5, 0x6b, 0xc8, 0xa3, 0x1f, 0x1d -+}; -+static const u8 dec_key003[] __initconst = { -+ 0x2d, 0xb0, 0x5d, 0x40, 0xc8, 0xed, 0x44, 0x88, -+ 0x34, 0xd1, 0x13, 0xaf, 0x57, 0xa1, 0xeb, 0x3a, -+ 0x2a, 0x80, 0x51, 0x36, 0xec, 0x5b, 0xbc, 0x08, -+ 0x93, 0x84, 0x21, 0xb5, 0x13, 0x88, 0x3c, 0x0d -+}; -+ -+static const u8 dec_input004[] __initconst = { -+ 0xb7, 0x1b, 0xb0, 0x73, 0x59, 0xb0, 0x84, 0xb2, -+ 0x6d, 0x8e, 0xab, 0x94, 0x31, 0xa1, 0xae, 0xac, -+ 0x89 -+}; -+static const u8 dec_output004[] __initconst = { -+ 0xa4 -+}; -+static const u8 dec_assoc004[] __initconst = { -+ 0x6a, 0xe2, 0xad, 0x3f, 0x88, 0x39, 0x5a, 0x40 -+}; -+static const u8 dec_nonce004[] __initconst = { -+ 0xd2, 0x32, 0x1f, 0x29, 0x28, 0xc6, 0xc4, 0xc4 -+}; -+static const u8 dec_key004[] __initconst = { -+ 0x4b, 0x28, 0x4b, 0xa3, 0x7b, 0xbe, 0xe9, 0xf8, -+ 0x31, 0x80, 0x82, 0xd7, 0xd8, 0xe8, 0xb5, 0xa1, -+ 0xe2, 0x18, 0x18, 0x8a, 0x9c, 0xfa, 0xa3, 0x3d, -+ 0x25, 0x71, 0x3e, 0x40, 0xbc, 0x54, 0x7a, 0x3e -+}; -+ -+static const u8 dec_input005[] __initconst = { -+ 0xbf, 0xe1, 0x5b, 0x0b, 0xdb, 0x6b, 0xf5, 0x5e, -+ 0x6c, 0x5d, 0x84, 0x44, 0x39, 0x81, 0xc1, 0x9c, -+ 0xac -+}; -+static const u8 dec_output005[] __initconst = { -+ 0x2d -+}; -+static const u8 dec_assoc005[] __initconst = { }; -+static const u8 dec_nonce005[] __initconst = { -+ 0x20, 0x1c, 0xaa, 0x5f, 0x9c, 0xbf, 0x92, 0x30 -+}; -+static const u8 dec_key005[] __initconst = { -+ 0x66, 0xca, 0x9c, 0x23, 0x2a, 0x4b, 0x4b, 0x31, -+ 0x0e, 0x92, 0x89, 0x8b, 0xf4, 0x93, 0xc7, 0x87, -+ 0x98, 0xa3, 0xd8, 0x39, 0xf8, 0xf4, 0xa7, 0x01, -+ 0xc0, 0x2e, 0x0a, 0xa6, 0x7e, 0x5a, 0x78, 0x87 -+}; -+ -+static const u8 dec_input006[] __initconst = { -+ 0x8b, 0x06, 0xd3, 0x31, 0xb0, 0x93, 0x45, 0xb1, -+ 0x75, 0x6e, 0x26, 0xf9, 0x67, 0xbc, 0x90, 0x15, -+ 0x81, 0x2c, 0xb5, 0xf0, 0xc6, 0x2b, 0xc7, 0x8c, -+ 0x56, 0xd1, 0xbf, 0x69, 0x6c, 0x07, 0xa0, 0xda, -+ 0x65, 0x27, 0xc9, 0x90, 0x3d, 0xef, 0x4b, 0x11, -+ 0x0f, 0x19, 0x07, 0xfd, 0x29, 0x92, 0xd9, 0xc8, -+ 0xf7, 0x99, 0x2e, 0x4a, 0xd0, 0xb8, 0x2c, 0xdc, -+ 0x93, 0xf5, 0x9e, 0x33, 0x78, 0xd1, 0x37, 0xc3, -+ 0x66, 0xd7, 0x5e, 0xbc, 0x44, 0xbf, 0x53, 0xa5, -+ 0xbc, 0xc4, 0xcb, 0x7b, 0x3a, 0x8e, 0x7f, 0x02, -+ 0xbd, 0xbb, 0xe7, 0xca, 0xa6, 0x6c, 0x6b, 0x93, -+ 0x21, 0x93, 0x10, 0x61, 0xe7, 0x69, 0xd0, 0x78, -+ 0xf3, 0x07, 0x5a, 0x1a, 0x8f, 0x73, 0xaa, 0xb1, -+ 0x4e, 0xd3, 0xda, 0x4f, 0xf3, 0x32, 0xe1, 0x66, -+ 0x3e, 0x6c, 0xc6, 0x13, 0xba, 0x06, 0x5b, 0xfc, -+ 0x6a, 0xe5, 0x6f, 0x60, 0xfb, 0x07, 0x40, 0xb0, -+ 0x8c, 0x9d, 0x84, 0x43, 0x6b, 0xc1, 0xf7, 0x8d, -+ 0x8d, 0x31, 0xf7, 0x7a, 0x39, 0x4d, 0x8f, 0x9a, -+ 0xeb -+}; -+static const u8 dec_output006[] __initconst = { -+ 0x33, 0x2f, 0x94, 0xc1, 0xa4, 0xef, 0xcc, 0x2a, -+ 0x5b, 0xa6, 0xe5, 0x8f, 0x1d, 0x40, 0xf0, 0x92, -+ 0x3c, 0xd9, 0x24, 0x11, 0xa9, 0x71, 0xf9, 0x37, -+ 0x14, 0x99, 0xfa, 0xbe, 0xe6, 0x80, 0xde, 0x50, -+ 0xc9, 0x96, 0xd4, 0xb0, 0xec, 0x9e, 0x17, 0xec, -+ 0xd2, 0x5e, 0x72, 0x99, 0xfc, 0x0a, 0xe1, 0xcb, -+ 0x48, 0xd2, 0x85, 0xdd, 0x2f, 0x90, 0xe0, 0x66, -+ 0x3b, 0xe6, 0x20, 0x74, 0xbe, 0x23, 0x8f, 0xcb, -+ 0xb4, 0xe4, 0xda, 0x48, 0x40, 0xa6, 0xd1, 0x1b, -+ 0xc7, 0x42, 0xce, 0x2f, 0x0c, 0xa6, 0x85, 0x6e, -+ 0x87, 0x37, 0x03, 0xb1, 0x7c, 0x25, 0x96, 0xa3, -+ 0x05, 0xd8, 0xb0, 0xf4, 0xed, 0xea, 0xc2, 0xf0, -+ 0x31, 0x98, 0x6c, 0xd1, 0x14, 0x25, 0xc0, 0xcb, -+ 0x01, 0x74, 0xd0, 0x82, 0xf4, 0x36, 0xf5, 0x41, -+ 0xd5, 0xdc, 0xca, 0xc5, 0xbb, 0x98, 0xfe, 0xfc, -+ 0x69, 0x21, 0x70, 0xd8, 0xa4, 0x4b, 0xc8, 0xde, -+ 0x8f -+}; -+static const u8 dec_assoc006[] __initconst = { -+ 0x70, 0xd3, 0x33, 0xf3, 0x8b, 0x18, 0x0b -+}; -+static const u8 dec_nonce006[] __initconst = { -+ 0xdf, 0x51, 0x84, 0x82, 0x42, 0x0c, 0x75, 0x9c -+}; -+static const u8 dec_key006[] __initconst = { -+ 0x68, 0x7b, 0x8d, 0x8e, 0xe3, 0xc4, 0xdd, 0xae, -+ 0xdf, 0x72, 0x7f, 0x53, 0x72, 0x25, 0x1e, 0x78, -+ 0x91, 0xcb, 0x69, 0x76, 0x1f, 0x49, 0x93, 0xf9, -+ 0x6f, 0x21, 0xcc, 0x39, 0x9c, 0xad, 0xb1, 0x01 -+}; -+ -+static const u8 dec_input007[] __initconst = { -+ 0x85, 0x04, 0xc2, 0xed, 0x8d, 0xfd, 0x97, 0x5c, -+ 0xd2, 0xb7, 0xe2, 0xc1, 0x6b, 0xa3, 0xba, 0xf8, -+ 0xc9, 0x50, 0xc3, 0xc6, 0xa5, 0xe3, 0xa4, 0x7c, -+ 0xc3, 0x23, 0x49, 0x5e, 0xa9, 0xb9, 0x32, 0xeb, -+ 0x8a, 0x7c, 0xca, 0xe5, 0xec, 0xfb, 0x7c, 0xc0, -+ 0xcb, 0x7d, 0xdc, 0x2c, 0x9d, 0x92, 0x55, 0x21, -+ 0x0a, 0xc8, 0x43, 0x63, 0x59, 0x0a, 0x31, 0x70, -+ 0x82, 0x67, 0x41, 0x03, 0xf8, 0xdf, 0xf2, 0xac, -+ 0xa7, 0x02, 0xd4, 0xd5, 0x8a, 0x2d, 0xc8, 0x99, -+ 0x19, 0x66, 0xd0, 0xf6, 0x88, 0x2c, 0x77, 0xd9, -+ 0xd4, 0x0d, 0x6c, 0xbd, 0x98, 0xde, 0xe7, 0x7f, -+ 0xad, 0x7e, 0x8a, 0xfb, 0xe9, 0x4b, 0xe5, 0xf7, -+ 0xe5, 0x50, 0xa0, 0x90, 0x3f, 0xd6, 0x22, 0x53, -+ 0xe3, 0xfe, 0x1b, 0xcc, 0x79, 0x3b, 0xec, 0x12, -+ 0x47, 0x52, 0xa7, 0xd6, 0x04, 0xe3, 0x52, 0xe6, -+ 0x93, 0x90, 0x91, 0x32, 0x73, 0x79, 0xb8, 0xd0, -+ 0x31, 0xde, 0x1f, 0x9f, 0x2f, 0x05, 0x38, 0x54, -+ 0x2f, 0x35, 0x04, 0x39, 0xe0, 0xa7, 0xba, 0xc6, -+ 0x52, 0xf6, 0x37, 0x65, 0x4c, 0x07, 0xa9, 0x7e, -+ 0xb3, 0x21, 0x6f, 0x74, 0x8c, 0xc9, 0xde, 0xdb, -+ 0x65, 0x1b, 0x9b, 0xaa, 0x60, 0xb1, 0x03, 0x30, -+ 0x6b, 0xb2, 0x03, 0xc4, 0x1c, 0x04, 0xf8, 0x0f, -+ 0x64, 0xaf, 0x46, 0xe4, 0x65, 0x99, 0x49, 0xe2, -+ 0xea, 0xce, 0x78, 0x00, 0xd8, 0x8b, 0xd5, 0x2e, -+ 0xcf, 0xfc, 0x40, 0x49, 0xe8, 0x58, 0xdc, 0x34, -+ 0x9c, 0x8c, 0x61, 0xbf, 0x0a, 0x8e, 0xec, 0x39, -+ 0xa9, 0x30, 0x05, 0x5a, 0xd2, 0x56, 0x01, 0xc7, -+ 0xda, 0x8f, 0x4e, 0xbb, 0x43, 0xa3, 0x3a, 0xf9, -+ 0x15, 0x2a, 0xd0, 0xa0, 0x7a, 0x87, 0x34, 0x82, -+ 0xfe, 0x8a, 0xd1, 0x2d, 0x5e, 0xc7, 0xbf, 0x04, -+ 0x53, 0x5f, 0x3b, 0x36, 0xd4, 0x25, 0x5c, 0x34, -+ 0x7a, 0x8d, 0xd5, 0x05, 0xce, 0x72, 0xca, 0xef, -+ 0x7a, 0x4b, 0xbc, 0xb0, 0x10, 0x5c, 0x96, 0x42, -+ 0x3a, 0x00, 0x98, 0xcd, 0x15, 0xe8, 0xb7, 0x53 -+}; -+static const u8 dec_output007[] __initconst = { -+ 0x9b, 0x18, 0xdb, 0xdd, 0x9a, 0x0f, 0x3e, 0xa5, -+ 0x15, 0x17, 0xde, 0xdf, 0x08, 0x9d, 0x65, 0x0a, -+ 0x67, 0x30, 0x12, 0xe2, 0x34, 0x77, 0x4b, 0xc1, -+ 0xd9, 0xc6, 0x1f, 0xab, 0xc6, 0x18, 0x50, 0x17, -+ 0xa7, 0x9d, 0x3c, 0xa6, 0xc5, 0x35, 0x8c, 0x1c, -+ 0xc0, 0xa1, 0x7c, 0x9f, 0x03, 0x89, 0xca, 0xe1, -+ 0xe6, 0xe9, 0xd4, 0xd3, 0x88, 0xdb, 0xb4, 0x51, -+ 0x9d, 0xec, 0xb4, 0xfc, 0x52, 0xee, 0x6d, 0xf1, -+ 0x75, 0x42, 0xc6, 0xfd, 0xbd, 0x7a, 0x8e, 0x86, -+ 0xfc, 0x44, 0xb3, 0x4f, 0xf3, 0xea, 0x67, 0x5a, -+ 0x41, 0x13, 0xba, 0xb0, 0xdc, 0xe1, 0xd3, 0x2a, -+ 0x7c, 0x22, 0xb3, 0xca, 0xac, 0x6a, 0x37, 0x98, -+ 0x3e, 0x1d, 0x40, 0x97, 0xf7, 0x9b, 0x1d, 0x36, -+ 0x6b, 0xb3, 0x28, 0xbd, 0x60, 0x82, 0x47, 0x34, -+ 0xaa, 0x2f, 0x7d, 0xe9, 0xa8, 0x70, 0x81, 0x57, -+ 0xd4, 0xb9, 0x77, 0x0a, 0x9d, 0x29, 0xa7, 0x84, -+ 0x52, 0x4f, 0xc2, 0x4a, 0x40, 0x3b, 0x3c, 0xd4, -+ 0xc9, 0x2a, 0xdb, 0x4a, 0x53, 0xc4, 0xbe, 0x80, -+ 0xe9, 0x51, 0x7f, 0x8f, 0xc7, 0xa2, 0xce, 0x82, -+ 0x5c, 0x91, 0x1e, 0x74, 0xd9, 0xd0, 0xbd, 0xd5, -+ 0xf3, 0xfd, 0xda, 0x4d, 0x25, 0xb4, 0xbb, 0x2d, -+ 0xac, 0x2f, 0x3d, 0x71, 0x85, 0x7b, 0xcf, 0x3c, -+ 0x7b, 0x3e, 0x0e, 0x22, 0x78, 0x0c, 0x29, 0xbf, -+ 0xe4, 0xf4, 0x57, 0xb3, 0xcb, 0x49, 0xa0, 0xfc, -+ 0x1e, 0x05, 0x4e, 0x16, 0xbc, 0xd5, 0xa8, 0xa3, -+ 0xee, 0x05, 0x35, 0xc6, 0x7c, 0xab, 0x60, 0x14, -+ 0x55, 0x1a, 0x8e, 0xc5, 0x88, 0x5d, 0xd5, 0x81, -+ 0xc2, 0x81, 0xa5, 0xc4, 0x60, 0xdb, 0xaf, 0x77, -+ 0x91, 0xe1, 0xce, 0xa2, 0x7e, 0x7f, 0x42, 0xe3, -+ 0xb0, 0x13, 0x1c, 0x1f, 0x25, 0x60, 0x21, 0xe2, -+ 0x40, 0x5f, 0x99, 0xb7, 0x73, 0xec, 0x9b, 0x2b, -+ 0xf0, 0x65, 0x11, 0xc8, 0xd0, 0x0a, 0x9f, 0xd3 -+}; -+static const u8 dec_assoc007[] __initconst = { }; -+static const u8 dec_nonce007[] __initconst = { -+ 0xde, 0x7b, 0xef, 0xc3, 0x65, 0x1b, 0x68, 0xb0 -+}; -+static const u8 dec_key007[] __initconst = { -+ 0x8d, 0xb8, 0x91, 0x48, 0xf0, 0xe7, 0x0a, 0xbd, -+ 0xf9, 0x3f, 0xcd, 0xd9, 0xa0, 0x1e, 0x42, 0x4c, -+ 0xe7, 0xde, 0x25, 0x3d, 0xa3, 0xd7, 0x05, 0x80, -+ 0x8d, 0xf2, 0x82, 0xac, 0x44, 0x16, 0x51, 0x01 -+}; -+ -+static const u8 dec_input008[] __initconst = { -+ 0x14, 0xf6, 0x41, 0x37, 0xa6, 0xd4, 0x27, 0xcd, -+ 0xdb, 0x06, 0x3e, 0x9a, 0x4e, 0xab, 0xd5, 0xb1, -+ 0x1e, 0x6b, 0xd2, 0xbc, 0x11, 0xf4, 0x28, 0x93, -+ 0x63, 0x54, 0xef, 0xbb, 0x5e, 0x1d, 0x3a, 0x1d, -+ 0x37, 0x3c, 0x0a, 0x6c, 0x1e, 0xc2, 0xd1, 0x2c, -+ 0xb5, 0xa3, 0xb5, 0x7b, 0xb8, 0x8f, 0x25, 0xa6, -+ 0x1b, 0x61, 0x1c, 0xec, 0x28, 0x58, 0x26, 0xa4, -+ 0xa8, 0x33, 0x28, 0x25, 0x5c, 0x45, 0x05, 0xe5, -+ 0x6c, 0x99, 0xe5, 0x45, 0xc4, 0xa2, 0x03, 0x84, -+ 0x03, 0x73, 0x1e, 0x8c, 0x49, 0xac, 0x20, 0xdd, -+ 0x8d, 0xb3, 0xc4, 0xf5, 0xe7, 0x4f, 0xf1, 0xed, -+ 0xa1, 0x98, 0xde, 0xa4, 0x96, 0xdd, 0x2f, 0xab, -+ 0xab, 0x97, 0xcf, 0x3e, 0xd2, 0x9e, 0xb8, 0x13, -+ 0x07, 0x28, 0x29, 0x19, 0xaf, 0xfd, 0xf2, 0x49, -+ 0x43, 0xea, 0x49, 0x26, 0x91, 0xc1, 0x07, 0xd6, -+ 0xbb, 0x81, 0x75, 0x35, 0x0d, 0x24, 0x7f, 0xc8, -+ 0xda, 0xd4, 0xb7, 0xeb, 0xe8, 0x5c, 0x09, 0xa2, -+ 0x2f, 0xdc, 0x28, 0x7d, 0x3a, 0x03, 0xfa, 0x94, -+ 0xb5, 0x1d, 0x17, 0x99, 0x36, 0xc3, 0x1c, 0x18, -+ 0x34, 0xe3, 0x9f, 0xf5, 0x55, 0x7c, 0xb0, 0x60, -+ 0x9d, 0xff, 0xac, 0xd4, 0x61, 0xf2, 0xad, 0xf8, -+ 0xce, 0xc7, 0xbe, 0x5c, 0xd2, 0x95, 0xa8, 0x4b, -+ 0x77, 0x13, 0x19, 0x59, 0x26, 0xc9, 0xb7, 0x8f, -+ 0x6a, 0xcb, 0x2d, 0x37, 0x91, 0xea, 0x92, 0x9c, -+ 0x94, 0x5b, 0xda, 0x0b, 0xce, 0xfe, 0x30, 0x20, -+ 0xf8, 0x51, 0xad, 0xf2, 0xbe, 0xe7, 0xc7, 0xff, -+ 0xb3, 0x33, 0x91, 0x6a, 0xc9, 0x1a, 0x41, 0xc9, -+ 0x0f, 0xf3, 0x10, 0x0e, 0xfd, 0x53, 0xff, 0x6c, -+ 0x16, 0x52, 0xd9, 0xf3, 0xf7, 0x98, 0x2e, 0xc9, -+ 0x07, 0x31, 0x2c, 0x0c, 0x72, 0xd7, 0xc5, 0xc6, -+ 0x08, 0x2a, 0x7b, 0xda, 0xbd, 0x7e, 0x02, 0xea, -+ 0x1a, 0xbb, 0xf2, 0x04, 0x27, 0x61, 0x28, 0x8e, -+ 0xf5, 0x04, 0x03, 0x1f, 0x4c, 0x07, 0x55, 0x82, -+ 0xec, 0x1e, 0xd7, 0x8b, 0x2f, 0x65, 0x56, 0xd1, -+ 0xd9, 0x1e, 0x3c, 0xe9, 0x1f, 0x5e, 0x98, 0x70, -+ 0x38, 0x4a, 0x8c, 0x49, 0xc5, 0x43, 0xa0, 0xa1, -+ 0x8b, 0x74, 0x9d, 0x4c, 0x62, 0x0d, 0x10, 0x0c, -+ 0xf4, 0x6c, 0x8f, 0xe0, 0xaa, 0x9a, 0x8d, 0xb7, -+ 0xe0, 0xbe, 0x4c, 0x87, 0xf1, 0x98, 0x2f, 0xcc, -+ 0xed, 0xc0, 0x52, 0x29, 0xdc, 0x83, 0xf8, 0xfc, -+ 0x2c, 0x0e, 0xa8, 0x51, 0x4d, 0x80, 0x0d, 0xa3, -+ 0xfe, 0xd8, 0x37, 0xe7, 0x41, 0x24, 0xfc, 0xfb, -+ 0x75, 0xe3, 0x71, 0x7b, 0x57, 0x45, 0xf5, 0x97, -+ 0x73, 0x65, 0x63, 0x14, 0x74, 0xb8, 0x82, 0x9f, -+ 0xf8, 0x60, 0x2f, 0x8a, 0xf2, 0x4e, 0xf1, 0x39, -+ 0xda, 0x33, 0x91, 0xf8, 0x36, 0xe0, 0x8d, 0x3f, -+ 0x1f, 0x3b, 0x56, 0xdc, 0xa0, 0x8f, 0x3c, 0x9d, -+ 0x71, 0x52, 0xa7, 0xb8, 0xc0, 0xa5, 0xc6, 0xa2, -+ 0x73, 0xda, 0xf4, 0x4b, 0x74, 0x5b, 0x00, 0x3d, -+ 0x99, 0xd7, 0x96, 0xba, 0xe6, 0xe1, 0xa6, 0x96, -+ 0x38, 0xad, 0xb3, 0xc0, 0xd2, 0xba, 0x91, 0x6b, -+ 0xf9, 0x19, 0xdd, 0x3b, 0xbe, 0xbe, 0x9c, 0x20, -+ 0x50, 0xba, 0xa1, 0xd0, 0xce, 0x11, 0xbd, 0x95, -+ 0xd8, 0xd1, 0xdd, 0x33, 0x85, 0x74, 0xdc, 0xdb, -+ 0x66, 0x76, 0x44, 0xdc, 0x03, 0x74, 0x48, 0x35, -+ 0x98, 0xb1, 0x18, 0x47, 0x94, 0x7d, 0xff, 0x62, -+ 0xe4, 0x58, 0x78, 0xab, 0xed, 0x95, 0x36, 0xd9, -+ 0x84, 0x91, 0x82, 0x64, 0x41, 0xbb, 0x58, 0xe6, -+ 0x1c, 0x20, 0x6d, 0x15, 0x6b, 0x13, 0x96, 0xe8, -+ 0x35, 0x7f, 0xdc, 0x40, 0x2c, 0xe9, 0xbc, 0x8a, -+ 0x4f, 0x92, 0xec, 0x06, 0x2d, 0x50, 0xdf, 0x93, -+ 0x5d, 0x65, 0x5a, 0xa8, 0xfc, 0x20, 0x50, 0x14, -+ 0xa9, 0x8a, 0x7e, 0x1d, 0x08, 0x1f, 0xe2, 0x99, -+ 0xd0, 0xbe, 0xfb, 0x3a, 0x21, 0x9d, 0xad, 0x86, -+ 0x54, 0xfd, 0x0d, 0x98, 0x1c, 0x5a, 0x6f, 0x1f, -+ 0x9a, 0x40, 0xcd, 0xa2, 0xff, 0x6a, 0xf1, 0x54 -+}; -+static const u8 dec_output008[] __initconst = { -+ 0xc3, 0x09, 0x94, 0x62, 0xe6, 0x46, 0x2e, 0x10, -+ 0xbe, 0x00, 0xe4, 0xfc, 0xf3, 0x40, 0xa3, 0xe2, -+ 0x0f, 0xc2, 0x8b, 0x28, 0xdc, 0xba, 0xb4, 0x3c, -+ 0xe4, 0x21, 0x58, 0x61, 0xcd, 0x8b, 0xcd, 0xfb, -+ 0xac, 0x94, 0xa1, 0x45, 0xf5, 0x1c, 0xe1, 0x12, -+ 0xe0, 0x3b, 0x67, 0x21, 0x54, 0x5e, 0x8c, 0xaa, -+ 0xcf, 0xdb, 0xb4, 0x51, 0xd4, 0x13, 0xda, 0xe6, -+ 0x83, 0x89, 0xb6, 0x92, 0xe9, 0x21, 0x76, 0xa4, -+ 0x93, 0x7d, 0x0e, 0xfd, 0x96, 0x36, 0x03, 0x91, -+ 0x43, 0x5c, 0x92, 0x49, 0x62, 0x61, 0x7b, 0xeb, -+ 0x43, 0x89, 0xb8, 0x12, 0x20, 0x43, 0xd4, 0x47, -+ 0x06, 0x84, 0xee, 0x47, 0xe9, 0x8a, 0x73, 0x15, -+ 0x0f, 0x72, 0xcf, 0xed, 0xce, 0x96, 0xb2, 0x7f, -+ 0x21, 0x45, 0x76, 0xeb, 0x26, 0x28, 0x83, 0x6a, -+ 0xad, 0xaa, 0xa6, 0x81, 0xd8, 0x55, 0xb1, 0xa3, -+ 0x85, 0xb3, 0x0c, 0xdf, 0xf1, 0x69, 0x2d, 0x97, -+ 0x05, 0x2a, 0xbc, 0x7c, 0x7b, 0x25, 0xf8, 0x80, -+ 0x9d, 0x39, 0x25, 0xf3, 0x62, 0xf0, 0x66, 0x5e, -+ 0xf4, 0xa0, 0xcf, 0xd8, 0xfd, 0x4f, 0xb1, 0x1f, -+ 0x60, 0x3a, 0x08, 0x47, 0xaf, 0xe1, 0xf6, 0x10, -+ 0x77, 0x09, 0xa7, 0x27, 0x8f, 0x9a, 0x97, 0x5a, -+ 0x26, 0xfa, 0xfe, 0x41, 0x32, 0x83, 0x10, 0xe0, -+ 0x1d, 0xbf, 0x64, 0x0d, 0xf4, 0x1c, 0x32, 0x35, -+ 0xe5, 0x1b, 0x36, 0xef, 0xd4, 0x4a, 0x93, 0x4d, -+ 0x00, 0x7c, 0xec, 0x02, 0x07, 0x8b, 0x5d, 0x7d, -+ 0x1b, 0x0e, 0xd1, 0xa6, 0xa5, 0x5d, 0x7d, 0x57, -+ 0x88, 0xa8, 0xcc, 0x81, 0xb4, 0x86, 0x4e, 0xb4, -+ 0x40, 0xe9, 0x1d, 0xc3, 0xb1, 0x24, 0x3e, 0x7f, -+ 0xcc, 0x8a, 0x24, 0x9b, 0xdf, 0x6d, 0xf0, 0x39, -+ 0x69, 0x3e, 0x4c, 0xc0, 0x96, 0xe4, 0x13, 0xda, -+ 0x90, 0xda, 0xf4, 0x95, 0x66, 0x8b, 0x17, 0x17, -+ 0xfe, 0x39, 0x43, 0x25, 0xaa, 0xda, 0xa0, 0x43, -+ 0x3c, 0xb1, 0x41, 0x02, 0xa3, 0xf0, 0xa7, 0x19, -+ 0x59, 0xbc, 0x1d, 0x7d, 0x6c, 0x6d, 0x91, 0x09, -+ 0x5c, 0xb7, 0x5b, 0x01, 0xd1, 0x6f, 0x17, 0x21, -+ 0x97, 0xbf, 0x89, 0x71, 0xa5, 0xb0, 0x6e, 0x07, -+ 0x45, 0xfd, 0x9d, 0xea, 0x07, 0xf6, 0x7a, 0x9f, -+ 0x10, 0x18, 0x22, 0x30, 0x73, 0xac, 0xd4, 0x6b, -+ 0x72, 0x44, 0xed, 0xd9, 0x19, 0x9b, 0x2d, 0x4a, -+ 0x41, 0xdd, 0xd1, 0x85, 0x5e, 0x37, 0x19, 0xed, -+ 0xd2, 0x15, 0x8f, 0x5e, 0x91, 0xdb, 0x33, 0xf2, -+ 0xe4, 0xdb, 0xff, 0x98, 0xfb, 0xa3, 0xb5, 0xca, -+ 0x21, 0x69, 0x08, 0xe7, 0x8a, 0xdf, 0x90, 0xff, -+ 0x3e, 0xe9, 0x20, 0x86, 0x3c, 0xe9, 0xfc, 0x0b, -+ 0xfe, 0x5c, 0x61, 0xaa, 0x13, 0x92, 0x7f, 0x7b, -+ 0xec, 0xe0, 0x6d, 0xa8, 0x23, 0x22, 0xf6, 0x6b, -+ 0x77, 0xc4, 0xfe, 0x40, 0x07, 0x3b, 0xb6, 0xf6, -+ 0x8e, 0x5f, 0xd4, 0xb9, 0xb7, 0x0f, 0x21, 0x04, -+ 0xef, 0x83, 0x63, 0x91, 0x69, 0x40, 0xa3, 0x48, -+ 0x5c, 0xd2, 0x60, 0xf9, 0x4f, 0x6c, 0x47, 0x8b, -+ 0x3b, 0xb1, 0x9f, 0x8e, 0xee, 0x16, 0x8a, 0x13, -+ 0xfc, 0x46, 0x17, 0xc3, 0xc3, 0x32, 0x56, 0xf8, -+ 0x3c, 0x85, 0x3a, 0xb6, 0x3e, 0xaa, 0x89, 0x4f, -+ 0xb3, 0xdf, 0x38, 0xfd, 0xf1, 0xe4, 0x3a, 0xc0, -+ 0xe6, 0x58, 0xb5, 0x8f, 0xc5, 0x29, 0xa2, 0x92, -+ 0x4a, 0xb6, 0xa0, 0x34, 0x7f, 0xab, 0xb5, 0x8a, -+ 0x90, 0xa1, 0xdb, 0x4d, 0xca, 0xb6, 0x2c, 0x41, -+ 0x3c, 0xf7, 0x2b, 0x21, 0xc3, 0xfd, 0xf4, 0x17, -+ 0x5c, 0xb5, 0x33, 0x17, 0x68, 0x2b, 0x08, 0x30, -+ 0xf3, 0xf7, 0x30, 0x3c, 0x96, 0xe6, 0x6a, 0x20, -+ 0x97, 0xe7, 0x4d, 0x10, 0x5f, 0x47, 0x5f, 0x49, -+ 0x96, 0x09, 0xf0, 0x27, 0x91, 0xc8, 0xf8, 0x5a, -+ 0x2e, 0x79, 0xb5, 0xe2, 0xb8, 0xe8, 0xb9, 0x7b, -+ 0xd5, 0x10, 0xcb, 0xff, 0x5d, 0x14, 0x73, 0xf3 -+}; -+static const u8 dec_assoc008[] __initconst = { }; -+static const u8 dec_nonce008[] __initconst = { -+ 0x0e, 0x0d, 0x57, 0xbb, 0x7b, 0x40, 0x54, 0x02 -+}; -+static const u8 dec_key008[] __initconst = { -+ 0xf2, 0xaa, 0x4f, 0x99, 0xfd, 0x3e, 0xa8, 0x53, -+ 0xc1, 0x44, 0xe9, 0x81, 0x18, 0xdc, 0xf5, 0xf0, -+ 0x3e, 0x44, 0x15, 0x59, 0xe0, 0xc5, 0x44, 0x86, -+ 0xc3, 0x91, 0xa8, 0x75, 0xc0, 0x12, 0x46, 0xba -+}; -+ -+static const u8 dec_input009[] __initconst = { -+ 0xfd, 0x81, 0x8d, 0xd0, 0x3d, 0xb4, 0xd5, 0xdf, -+ 0xd3, 0x42, 0x47, 0x5a, 0x6d, 0x19, 0x27, 0x66, -+ 0x4b, 0x2e, 0x0c, 0x27, 0x9c, 0x96, 0x4c, 0x72, -+ 0x02, 0xa3, 0x65, 0xc3, 0xb3, 0x6f, 0x2e, 0xbd, -+ 0x63, 0x8a, 0x4a, 0x5d, 0x29, 0xa2, 0xd0, 0x28, -+ 0x48, 0xc5, 0x3d, 0x98, 0xa3, 0xbc, 0xe0, 0xbe, -+ 0x3b, 0x3f, 0xe6, 0x8a, 0xa4, 0x7f, 0x53, 0x06, -+ 0xfa, 0x7f, 0x27, 0x76, 0x72, 0x31, 0xa1, 0xf5, -+ 0xd6, 0x0c, 0x52, 0x47, 0xba, 0xcd, 0x4f, 0xd7, -+ 0xeb, 0x05, 0x48, 0x0d, 0x7c, 0x35, 0x4a, 0x09, -+ 0xc9, 0x76, 0x71, 0x02, 0xa3, 0xfb, 0xb7, 0x1a, -+ 0x65, 0xb7, 0xed, 0x98, 0xc6, 0x30, 0x8a, 0x00, -+ 0xae, 0xa1, 0x31, 0xe5, 0xb5, 0x9e, 0x6d, 0x62, -+ 0xda, 0xda, 0x07, 0x0f, 0x38, 0x38, 0xd3, 0xcb, -+ 0xc1, 0xb0, 0xad, 0xec, 0x72, 0xec, 0xb1, 0xa2, -+ 0x7b, 0x59, 0xf3, 0x3d, 0x2b, 0xef, 0xcd, 0x28, -+ 0x5b, 0x83, 0xcc, 0x18, 0x91, 0x88, 0xb0, 0x2e, -+ 0xf9, 0x29, 0x31, 0x18, 0xf9, 0x4e, 0xe9, 0x0a, -+ 0x91, 0x92, 0x9f, 0xae, 0x2d, 0xad, 0xf4, 0xe6, -+ 0x1a, 0xe2, 0xa4, 0xee, 0x47, 0x15, 0xbf, 0x83, -+ 0x6e, 0xd7, 0x72, 0x12, 0x3b, 0x2d, 0x24, 0xe9, -+ 0xb2, 0x55, 0xcb, 0x3c, 0x10, 0xf0, 0x24, 0x8a, -+ 0x4a, 0x02, 0xea, 0x90, 0x25, 0xf0, 0xb4, 0x79, -+ 0x3a, 0xef, 0x6e, 0xf5, 0x52, 0xdf, 0xb0, 0x0a, -+ 0xcd, 0x24, 0x1c, 0xd3, 0x2e, 0x22, 0x74, 0xea, -+ 0x21, 0x6f, 0xe9, 0xbd, 0xc8, 0x3e, 0x36, 0x5b, -+ 0x19, 0xf1, 0xca, 0x99, 0x0a, 0xb4, 0xa7, 0x52, -+ 0x1a, 0x4e, 0xf2, 0xad, 0x8d, 0x56, 0x85, 0xbb, -+ 0x64, 0x89, 0xba, 0x26, 0xf9, 0xc7, 0xe1, 0x89, -+ 0x19, 0x22, 0x77, 0xc3, 0xa8, 0xfc, 0xff, 0xad, -+ 0xfe, 0xb9, 0x48, 0xae, 0x12, 0x30, 0x9f, 0x19, -+ 0xfb, 0x1b, 0xef, 0x14, 0x87, 0x8a, 0x78, 0x71, -+ 0xf3, 0xf4, 0xb7, 0x00, 0x9c, 0x1d, 0xb5, 0x3d, -+ 0x49, 0x00, 0x0c, 0x06, 0xd4, 0x50, 0xf9, 0x54, -+ 0x45, 0xb2, 0x5b, 0x43, 0xdb, 0x6d, 0xcf, 0x1a, -+ 0xe9, 0x7a, 0x7a, 0xcf, 0xfc, 0x8a, 0x4e, 0x4d, -+ 0x0b, 0x07, 0x63, 0x28, 0xd8, 0xe7, 0x08, 0x95, -+ 0xdf, 0xa6, 0x72, 0x93, 0x2e, 0xbb, 0xa0, 0x42, -+ 0x89, 0x16, 0xf1, 0xd9, 0x0c, 0xf9, 0xa1, 0x16, -+ 0xfd, 0xd9, 0x03, 0xb4, 0x3b, 0x8a, 0xf5, 0xf6, -+ 0xe7, 0x6b, 0x2e, 0x8e, 0x4c, 0x3d, 0xe2, 0xaf, -+ 0x08, 0x45, 0x03, 0xff, 0x09, 0xb6, 0xeb, 0x2d, -+ 0xc6, 0x1b, 0x88, 0x94, 0xac, 0x3e, 0xf1, 0x9f, -+ 0x0e, 0x0e, 0x2b, 0xd5, 0x00, 0x4d, 0x3f, 0x3b, -+ 0x53, 0xae, 0xaf, 0x1c, 0x33, 0x5f, 0x55, 0x6e, -+ 0x8d, 0xaf, 0x05, 0x7a, 0x10, 0x34, 0xc9, 0xf4, -+ 0x66, 0xcb, 0x62, 0x12, 0xa6, 0xee, 0xe8, 0x1c, -+ 0x5d, 0x12, 0x86, 0xdb, 0x6f, 0x1c, 0x33, 0xc4, -+ 0x1c, 0xda, 0x82, 0x2d, 0x3b, 0x59, 0xfe, 0xb1, -+ 0xa4, 0x59, 0x41, 0x86, 0xd0, 0xef, 0xae, 0xfb, -+ 0xda, 0x6d, 0x11, 0xb8, 0xca, 0xe9, 0x6e, 0xff, -+ 0xf7, 0xa9, 0xd9, 0x70, 0x30, 0xfc, 0x53, 0xe2, -+ 0xd7, 0xa2, 0x4e, 0xc7, 0x91, 0xd9, 0x07, 0x06, -+ 0xaa, 0xdd, 0xb0, 0x59, 0x28, 0x1d, 0x00, 0x66, -+ 0xc5, 0x54, 0xc2, 0xfc, 0x06, 0xda, 0x05, 0x90, -+ 0x52, 0x1d, 0x37, 0x66, 0xee, 0xf0, 0xb2, 0x55, -+ 0x8a, 0x5d, 0xd2, 0x38, 0x86, 0x94, 0x9b, 0xfc, -+ 0x10, 0x4c, 0xa1, 0xb9, 0x64, 0x3e, 0x44, 0xb8, -+ 0x5f, 0xb0, 0x0c, 0xec, 0xe0, 0xc9, 0xe5, 0x62, -+ 0x75, 0x3f, 0x09, 0xd5, 0xf5, 0xd9, 0x26, 0xba, -+ 0x9e, 0xd2, 0xf4, 0xb9, 0x48, 0x0a, 0xbc, 0xa2, -+ 0xd6, 0x7c, 0x36, 0x11, 0x7d, 0x26, 0x81, 0x89, -+ 0xcf, 0xa4, 0xad, 0x73, 0x0e, 0xee, 0xcc, 0x06, -+ 0xa9, 0xdb, 0xb1, 0xfd, 0xfb, 0x09, 0x7f, 0x90, -+ 0x42, 0x37, 0x2f, 0xe1, 0x9c, 0x0f, 0x6f, 0xcf, -+ 0x43, 0xb5, 0xd9, 0x90, 0xe1, 0x85, 0xf5, 0xa8, -+ 0xae -+}; -+static const u8 dec_output009[] __initconst = { -+ 0xe6, 0xc3, 0xdb, 0x63, 0x55, 0x15, 0xe3, 0x5b, -+ 0xb7, 0x4b, 0x27, 0x8b, 0x5a, 0xdd, 0xc2, 0xe8, -+ 0x3a, 0x6b, 0xd7, 0x81, 0x96, 0x35, 0x97, 0xca, -+ 0xd7, 0x68, 0xe8, 0xef, 0xce, 0xab, 0xda, 0x09, -+ 0x6e, 0xd6, 0x8e, 0xcb, 0x55, 0xb5, 0xe1, 0xe5, -+ 0x57, 0xfd, 0xc4, 0xe3, 0xe0, 0x18, 0x4f, 0x85, -+ 0xf5, 0x3f, 0x7e, 0x4b, 0x88, 0xc9, 0x52, 0x44, -+ 0x0f, 0xea, 0xaf, 0x1f, 0x71, 0x48, 0x9f, 0x97, -+ 0x6d, 0xb9, 0x6f, 0x00, 0xa6, 0xde, 0x2b, 0x77, -+ 0x8b, 0x15, 0xad, 0x10, 0xa0, 0x2b, 0x7b, 0x41, -+ 0x90, 0x03, 0x2d, 0x69, 0xae, 0xcc, 0x77, 0x7c, -+ 0xa5, 0x9d, 0x29, 0x22, 0xc2, 0xea, 0xb4, 0x00, -+ 0x1a, 0xd2, 0x7a, 0x98, 0x8a, 0xf9, 0xf7, 0x82, -+ 0xb0, 0xab, 0xd8, 0xa6, 0x94, 0x8d, 0x58, 0x2f, -+ 0x01, 0x9e, 0x00, 0x20, 0xfc, 0x49, 0xdc, 0x0e, -+ 0x03, 0xe8, 0x45, 0x10, 0xd6, 0xa8, 0xda, 0x55, -+ 0x10, 0x9a, 0xdf, 0x67, 0x22, 0x8b, 0x43, 0xab, -+ 0x00, 0xbb, 0x02, 0xc8, 0xdd, 0x7b, 0x97, 0x17, -+ 0xd7, 0x1d, 0x9e, 0x02, 0x5e, 0x48, 0xde, 0x8e, -+ 0xcf, 0x99, 0x07, 0x95, 0x92, 0x3c, 0x5f, 0x9f, -+ 0xc5, 0x8a, 0xc0, 0x23, 0xaa, 0xd5, 0x8c, 0x82, -+ 0x6e, 0x16, 0x92, 0xb1, 0x12, 0x17, 0x07, 0xc3, -+ 0xfb, 0x36, 0xf5, 0x6c, 0x35, 0xd6, 0x06, 0x1f, -+ 0x9f, 0xa7, 0x94, 0xa2, 0x38, 0x63, 0x9c, 0xb0, -+ 0x71, 0xb3, 0xa5, 0xd2, 0xd8, 0xba, 0x9f, 0x08, -+ 0x01, 0xb3, 0xff, 0x04, 0x97, 0x73, 0x45, 0x1b, -+ 0xd5, 0xa9, 0x9c, 0x80, 0xaf, 0x04, 0x9a, 0x85, -+ 0xdb, 0x32, 0x5b, 0x5d, 0x1a, 0xc1, 0x36, 0x28, -+ 0x10, 0x79, 0xf1, 0x3c, 0xbf, 0x1a, 0x41, 0x5c, -+ 0x4e, 0xdf, 0xb2, 0x7c, 0x79, 0x3b, 0x7a, 0x62, -+ 0x3d, 0x4b, 0xc9, 0x9b, 0x2a, 0x2e, 0x7c, 0xa2, -+ 0xb1, 0x11, 0x98, 0xa7, 0x34, 0x1a, 0x00, 0xf3, -+ 0xd1, 0xbc, 0x18, 0x22, 0xba, 0x02, 0x56, 0x62, -+ 0x31, 0x10, 0x11, 0x6d, 0xe0, 0x54, 0x9d, 0x40, -+ 0x1f, 0x26, 0x80, 0x41, 0xca, 0x3f, 0x68, 0x0f, -+ 0x32, 0x1d, 0x0a, 0x8e, 0x79, 0xd8, 0xa4, 0x1b, -+ 0x29, 0x1c, 0x90, 0x8e, 0xc5, 0xe3, 0xb4, 0x91, -+ 0x37, 0x9a, 0x97, 0x86, 0x99, 0xd5, 0x09, 0xc5, -+ 0xbb, 0xa3, 0x3f, 0x21, 0x29, 0x82, 0x14, 0x5c, -+ 0xab, 0x25, 0xfb, 0xf2, 0x4f, 0x58, 0x26, 0xd4, -+ 0x83, 0xaa, 0x66, 0x89, 0x67, 0x7e, 0xc0, 0x49, -+ 0xe1, 0x11, 0x10, 0x7f, 0x7a, 0xda, 0x29, 0x04, -+ 0xff, 0xf0, 0xcb, 0x09, 0x7c, 0x9d, 0xfa, 0x03, -+ 0x6f, 0x81, 0x09, 0x31, 0x60, 0xfb, 0x08, 0xfa, -+ 0x74, 0xd3, 0x64, 0x44, 0x7c, 0x55, 0x85, 0xec, -+ 0x9c, 0x6e, 0x25, 0xb7, 0x6c, 0xc5, 0x37, 0xb6, -+ 0x83, 0x87, 0x72, 0x95, 0x8b, 0x9d, 0xe1, 0x69, -+ 0x5c, 0x31, 0x95, 0x42, 0xa6, 0x2c, 0xd1, 0x36, -+ 0x47, 0x1f, 0xec, 0x54, 0xab, 0xa2, 0x1c, 0xd8, -+ 0x00, 0xcc, 0xbc, 0x0d, 0x65, 0xe2, 0x67, 0xbf, -+ 0xbc, 0xea, 0xee, 0x9e, 0xe4, 0x36, 0x95, 0xbe, -+ 0x73, 0xd9, 0xa6, 0xd9, 0x0f, 0xa0, 0xcc, 0x82, -+ 0x76, 0x26, 0xad, 0x5b, 0x58, 0x6c, 0x4e, 0xab, -+ 0x29, 0x64, 0xd3, 0xd9, 0xa9, 0x08, 0x8c, 0x1d, -+ 0xa1, 0x4f, 0x80, 0xd8, 0x3f, 0x94, 0xfb, 0xd3, -+ 0x7b, 0xfc, 0xd1, 0x2b, 0xc3, 0x21, 0xeb, 0xe5, -+ 0x1c, 0x84, 0x23, 0x7f, 0x4b, 0xfa, 0xdb, 0x34, -+ 0x18, 0xa2, 0xc2, 0xe5, 0x13, 0xfe, 0x6c, 0x49, -+ 0x81, 0xd2, 0x73, 0xe7, 0xe2, 0xd7, 0xe4, 0x4f, -+ 0x4b, 0x08, 0x6e, 0xb1, 0x12, 0x22, 0x10, 0x9d, -+ 0xac, 0x51, 0x1e, 0x17, 0xd9, 0x8a, 0x0b, 0x42, -+ 0x88, 0x16, 0x81, 0x37, 0x7c, 0x6a, 0xf7, 0xef, -+ 0x2d, 0xe3, 0xd9, 0xf8, 0x5f, 0xe0, 0x53, 0x27, -+ 0x74, 0xb9, 0xe2, 0xd6, 0x1c, 0x80, 0x2c, 0x52, -+ 0x65 -+}; -+static const u8 dec_assoc009[] __initconst = { -+ 0x5a, 0x27, 0xff, 0xeb, 0xdf, 0x84, 0xb2, 0x9e, -+ 0xef -+}; -+static const u8 dec_nonce009[] __initconst = { -+ 0xef, 0x2d, 0x63, 0xee, 0x6b, 0x80, 0x8b, 0x78 -+}; -+static const u8 dec_key009[] __initconst = { -+ 0xea, 0xbc, 0x56, 0x99, 0xe3, 0x50, 0xff, 0xc5, -+ 0xcc, 0x1a, 0xd7, 0xc1, 0x57, 0x72, 0xea, 0x86, -+ 0x5b, 0x89, 0x88, 0x61, 0x3d, 0x2f, 0x9b, 0xb2, -+ 0xe7, 0x9c, 0xec, 0x74, 0x6e, 0x3e, 0xf4, 0x3b -+}; -+ -+static const u8 dec_input010[] __initconst = { -+ 0xe5, 0x26, 0xa4, 0x3d, 0xbd, 0x33, 0xd0, 0x4b, -+ 0x6f, 0x05, 0xa7, 0x6e, 0x12, 0x7a, 0xd2, 0x74, -+ 0xa6, 0xdd, 0xbd, 0x95, 0xeb, 0xf9, 0xa4, 0xf1, -+ 0x59, 0x93, 0x91, 0x70, 0xd9, 0xfe, 0x9a, 0xcd, -+ 0x53, 0x1f, 0x3a, 0xab, 0xa6, 0x7c, 0x9f, 0xa6, -+ 0x9e, 0xbd, 0x99, 0xd9, 0xb5, 0x97, 0x44, 0xd5, -+ 0x14, 0x48, 0x4d, 0x9d, 0xc0, 0xd0, 0x05, 0x96, -+ 0xeb, 0x4c, 0x78, 0x55, 0x09, 0x08, 0x01, 0x02, -+ 0x30, 0x90, 0x7b, 0x96, 0x7a, 0x7b, 0x5f, 0x30, -+ 0x41, 0x24, 0xce, 0x68, 0x61, 0x49, 0x86, 0x57, -+ 0x82, 0xdd, 0x53, 0x1c, 0x51, 0x28, 0x2b, 0x53, -+ 0x6e, 0x2d, 0xc2, 0x20, 0x4c, 0xdd, 0x8f, 0x65, -+ 0x10, 0x20, 0x50, 0xdd, 0x9d, 0x50, 0xe5, 0x71, -+ 0x40, 0x53, 0x69, 0xfc, 0x77, 0x48, 0x11, 0xb9, -+ 0xde, 0xa4, 0x8d, 0x58, 0xe4, 0xa6, 0x1a, 0x18, -+ 0x47, 0x81, 0x7e, 0xfc, 0xdd, 0xf6, 0xef, 0xce, -+ 0x2f, 0x43, 0x68, 0xd6, 0x06, 0xe2, 0x74, 0x6a, -+ 0xad, 0x90, 0xf5, 0x37, 0xf3, 0x3d, 0x82, 0x69, -+ 0x40, 0xe9, 0x6b, 0xa7, 0x3d, 0xa8, 0x1e, 0xd2, -+ 0x02, 0x7c, 0xb7, 0x9b, 0xe4, 0xda, 0x8f, 0x95, -+ 0x06, 0xc5, 0xdf, 0x73, 0xa3, 0x20, 0x9a, 0x49, -+ 0xde, 0x9c, 0xbc, 0xee, 0x14, 0x3f, 0x81, 0x5e, -+ 0xf8, 0x3b, 0x59, 0x3c, 0xe1, 0x68, 0x12, 0x5a, -+ 0x3a, 0x76, 0x3a, 0x3f, 0xf7, 0x87, 0x33, 0x0a, -+ 0x01, 0xb8, 0xd4, 0xed, 0xb6, 0xbe, 0x94, 0x5e, -+ 0x70, 0x40, 0x56, 0x67, 0x1f, 0x50, 0x44, 0x19, -+ 0xce, 0x82, 0x70, 0x10, 0x87, 0x13, 0x20, 0x0b, -+ 0x4c, 0x5a, 0xb6, 0xf6, 0xa7, 0xae, 0x81, 0x75, -+ 0x01, 0x81, 0xe6, 0x4b, 0x57, 0x7c, 0xdd, 0x6d, -+ 0xf8, 0x1c, 0x29, 0x32, 0xf7, 0xda, 0x3c, 0x2d, -+ 0xf8, 0x9b, 0x25, 0x6e, 0x00, 0xb4, 0xf7, 0x2f, -+ 0xf7, 0x04, 0xf7, 0xa1, 0x56, 0xac, 0x4f, 0x1a, -+ 0x64, 0xb8, 0x47, 0x55, 0x18, 0x7b, 0x07, 0x4d, -+ 0xbd, 0x47, 0x24, 0x80, 0x5d, 0xa2, 0x70, 0xc5, -+ 0xdd, 0x8e, 0x82, 0xd4, 0xeb, 0xec, 0xb2, 0x0c, -+ 0x39, 0xd2, 0x97, 0xc1, 0xcb, 0xeb, 0xf4, 0x77, -+ 0x59, 0xb4, 0x87, 0xef, 0xcb, 0x43, 0x2d, 0x46, -+ 0x54, 0xd1, 0xa7, 0xd7, 0x15, 0x99, 0x0a, 0x43, -+ 0xa1, 0xe0, 0x99, 0x33, 0x71, 0xc1, 0xed, 0xfe, -+ 0x72, 0x46, 0x33, 0x8e, 0x91, 0x08, 0x9f, 0xc8, -+ 0x2e, 0xca, 0xfa, 0xdc, 0x59, 0xd5, 0xc3, 0x76, -+ 0x84, 0x9f, 0xa3, 0x37, 0x68, 0xc3, 0xf0, 0x47, -+ 0x2c, 0x68, 0xdb, 0x5e, 0xc3, 0x49, 0x4c, 0xe8, -+ 0x92, 0x85, 0xe2, 0x23, 0xd3, 0x3f, 0xad, 0x32, -+ 0xe5, 0x2b, 0x82, 0xd7, 0x8f, 0x99, 0x0a, 0x59, -+ 0x5c, 0x45, 0xd9, 0xb4, 0x51, 0x52, 0xc2, 0xae, -+ 0xbf, 0x80, 0xcf, 0xc9, 0xc9, 0x51, 0x24, 0x2a, -+ 0x3b, 0x3a, 0x4d, 0xae, 0xeb, 0xbd, 0x22, 0xc3, -+ 0x0e, 0x0f, 0x59, 0x25, 0x92, 0x17, 0xe9, 0x74, -+ 0xc7, 0x8b, 0x70, 0x70, 0x36, 0x55, 0x95, 0x75, -+ 0x4b, 0xad, 0x61, 0x2b, 0x09, 0xbc, 0x82, 0xf2, -+ 0x6e, 0x94, 0x43, 0xae, 0xc3, 0xd5, 0xcd, 0x8e, -+ 0xfe, 0x5b, 0x9a, 0x88, 0x43, 0x01, 0x75, 0xb2, -+ 0x23, 0x09, 0xf7, 0x89, 0x83, 0xe7, 0xfa, 0xf9, -+ 0xb4, 0x9b, 0xf8, 0xef, 0xbd, 0x1c, 0x92, 0xc1, -+ 0xda, 0x7e, 0xfe, 0x05, 0xba, 0x5a, 0xcd, 0x07, -+ 0x6a, 0x78, 0x9e, 0x5d, 0xfb, 0x11, 0x2f, 0x79, -+ 0x38, 0xb6, 0xc2, 0x5b, 0x6b, 0x51, 0xb4, 0x71, -+ 0xdd, 0xf7, 0x2a, 0xe4, 0xf4, 0x72, 0x76, 0xad, -+ 0xc2, 0xdd, 0x64, 0x5d, 0x79, 0xb6, 0xf5, 0x7a, -+ 0x77, 0x20, 0x05, 0x3d, 0x30, 0x06, 0xd4, 0x4c, -+ 0x0a, 0x2c, 0x98, 0x5a, 0xb9, 0xd4, 0x98, 0xa9, -+ 0x3f, 0xc6, 0x12, 0xea, 0x3b, 0x4b, 0xc5, 0x79, -+ 0x64, 0x63, 0x6b, 0x09, 0x54, 0x3b, 0x14, 0x27, -+ 0xba, 0x99, 0x80, 0xc8, 0x72, 0xa8, 0x12, 0x90, -+ 0x29, 0xba, 0x40, 0x54, 0x97, 0x2b, 0x7b, 0xfe, -+ 0xeb, 0xcd, 0x01, 0x05, 0x44, 0x72, 0xdb, 0x99, -+ 0xe4, 0x61, 0xc9, 0x69, 0xd6, 0xb9, 0x28, 0xd1, -+ 0x05, 0x3e, 0xf9, 0x0b, 0x49, 0x0a, 0x49, 0xe9, -+ 0x8d, 0x0e, 0xa7, 0x4a, 0x0f, 0xaf, 0x32, 0xd0, -+ 0xe0, 0xb2, 0x3a, 0x55, 0x58, 0xfe, 0x5c, 0x28, -+ 0x70, 0x51, 0x23, 0xb0, 0x7b, 0x6a, 0x5f, 0x1e, -+ 0xb8, 0x17, 0xd7, 0x94, 0x15, 0x8f, 0xee, 0x20, -+ 0xc7, 0x42, 0x25, 0x3e, 0x9a, 0x14, 0xd7, 0x60, -+ 0x72, 0x39, 0x47, 0x48, 0xa9, 0xfe, 0xdd, 0x47, -+ 0x0a, 0xb1, 0xe6, 0x60, 0x28, 0x8c, 0x11, 0x68, -+ 0xe1, 0xff, 0xd7, 0xce, 0xc8, 0xbe, 0xb3, 0xfe, -+ 0x27, 0x30, 0x09, 0x70, 0xd7, 0xfa, 0x02, 0x33, -+ 0x3a, 0x61, 0x2e, 0xc7, 0xff, 0xa4, 0x2a, 0xa8, -+ 0x6e, 0xb4, 0x79, 0x35, 0x6d, 0x4c, 0x1e, 0x38, -+ 0xf8, 0xee, 0xd4, 0x84, 0x4e, 0x6e, 0x28, 0xa7, -+ 0xce, 0xc8, 0xc1, 0xcf, 0x80, 0x05, 0xf3, 0x04, -+ 0xef, 0xc8, 0x18, 0x28, 0x2e, 0x8d, 0x5e, 0x0c, -+ 0xdf, 0xb8, 0x5f, 0x96, 0xe8, 0xc6, 0x9c, 0x2f, -+ 0xe5, 0xa6, 0x44, 0xd7, 0xe7, 0x99, 0x44, 0x0c, -+ 0xec, 0xd7, 0x05, 0x60, 0x97, 0xbb, 0x74, 0x77, -+ 0x58, 0xd5, 0xbb, 0x48, 0xde, 0x5a, 0xb2, 0x54, -+ 0x7f, 0x0e, 0x46, 0x70, 0x6a, 0x6f, 0x78, 0xa5, -+ 0x08, 0x89, 0x05, 0x4e, 0x7e, 0xa0, 0x69, 0xb4, -+ 0x40, 0x60, 0x55, 0x77, 0x75, 0x9b, 0x19, 0xf2, -+ 0xd5, 0x13, 0x80, 0x77, 0xf9, 0x4b, 0x3f, 0x1e, -+ 0xee, 0xe6, 0x76, 0x84, 0x7b, 0x8c, 0xe5, 0x27, -+ 0xa8, 0x0a, 0x91, 0x01, 0x68, 0x71, 0x8a, 0x3f, -+ 0x06, 0xab, 0xf6, 0xa9, 0xa5, 0xe6, 0x72, 0x92, -+ 0xe4, 0x67, 0xe2, 0xa2, 0x46, 0x35, 0x84, 0x55, -+ 0x7d, 0xca, 0xa8, 0x85, 0xd0, 0xf1, 0x3f, 0xbe, -+ 0xd7, 0x34, 0x64, 0xfc, 0xae, 0xe3, 0xe4, 0x04, -+ 0x9f, 0x66, 0x02, 0xb9, 0x88, 0x10, 0xd9, 0xc4, -+ 0x4c, 0x31, 0x43, 0x7a, 0x93, 0xe2, 0x9b, 0x56, -+ 0x43, 0x84, 0xdc, 0xdc, 0xde, 0x1d, 0xa4, 0x02, -+ 0x0e, 0xc2, 0xef, 0xc3, 0xf8, 0x78, 0xd1, 0xb2, -+ 0x6b, 0x63, 0x18, 0xc9, 0xa9, 0xe5, 0x72, 0xd8, -+ 0xf3, 0xb9, 0xd1, 0x8a, 0xc7, 0x1a, 0x02, 0x27, -+ 0x20, 0x77, 0x10, 0xe5, 0xc8, 0xd4, 0x4a, 0x47, -+ 0xe5, 0xdf, 0x5f, 0x01, 0xaa, 0xb0, 0xd4, 0x10, -+ 0xbb, 0x69, 0xe3, 0x36, 0xc8, 0xe1, 0x3d, 0x43, -+ 0xfb, 0x86, 0xcd, 0xcc, 0xbf, 0xf4, 0x88, 0xe0, -+ 0x20, 0xca, 0xb7, 0x1b, 0xf1, 0x2f, 0x5c, 0xee, -+ 0xd4, 0xd3, 0xa3, 0xcc, 0xa4, 0x1e, 0x1c, 0x47, -+ 0xfb, 0xbf, 0xfc, 0xa2, 0x41, 0x55, 0x9d, 0xf6, -+ 0x5a, 0x5e, 0x65, 0x32, 0x34, 0x7b, 0x52, 0x8d, -+ 0xd5, 0xd0, 0x20, 0x60, 0x03, 0xab, 0x3f, 0x8c, -+ 0xd4, 0x21, 0xea, 0x2a, 0xd9, 0xc4, 0xd0, 0xd3, -+ 0x65, 0xd8, 0x7a, 0x13, 0x28, 0x62, 0x32, 0x4b, -+ 0x2c, 0x87, 0x93, 0xa8, 0xb4, 0x52, 0x45, 0x09, -+ 0x44, 0xec, 0xec, 0xc3, 0x17, 0xdb, 0x9a, 0x4d, -+ 0x5c, 0xa9, 0x11, 0xd4, 0x7d, 0xaf, 0x9e, 0xf1, -+ 0x2d, 0xb2, 0x66, 0xc5, 0x1d, 0xed, 0xb7, 0xcd, -+ 0x0b, 0x25, 0x5e, 0x30, 0x47, 0x3f, 0x40, 0xf4, -+ 0xa1, 0xa0, 0x00, 0x94, 0x10, 0xc5, 0x6a, 0x63, -+ 0x1a, 0xd5, 0x88, 0x92, 0x8e, 0x82, 0x39, 0x87, -+ 0x3c, 0x78, 0x65, 0x58, 0x42, 0x75, 0x5b, 0xdd, -+ 0x77, 0x3e, 0x09, 0x4e, 0x76, 0x5b, 0xe6, 0x0e, -+ 0x4d, 0x38, 0xb2, 0xc0, 0xb8, 0x95, 0x01, 0x7a, -+ 0x10, 0xe0, 0xfb, 0x07, 0xf2, 0xab, 0x2d, 0x8c, -+ 0x32, 0xed, 0x2b, 0xc0, 0x46, 0xc2, 0xf5, 0x38, -+ 0x83, 0xf0, 0x17, 0xec, 0xc1, 0x20, 0x6a, 0x9a, -+ 0x0b, 0x00, 0xa0, 0x98, 0x22, 0x50, 0x23, 0xd5, -+ 0x80, 0x6b, 0xf6, 0x1f, 0xc3, 0xcc, 0x97, 0xc9, -+ 0x24, 0x9f, 0xf3, 0xaf, 0x43, 0x14, 0xd5, 0xa0 -+}; -+static const u8 dec_output010[] __initconst = { -+ 0x42, 0x93, 0xe4, 0xeb, 0x97, 0xb0, 0x57, 0xbf, -+ 0x1a, 0x8b, 0x1f, 0xe4, 0x5f, 0x36, 0x20, 0x3c, -+ 0xef, 0x0a, 0xa9, 0x48, 0x5f, 0x5f, 0x37, 0x22, -+ 0x3a, 0xde, 0xe3, 0xae, 0xbe, 0xad, 0x07, 0xcc, -+ 0xb1, 0xf6, 0xf5, 0xf9, 0x56, 0xdd, 0xe7, 0x16, -+ 0x1e, 0x7f, 0xdf, 0x7a, 0x9e, 0x75, 0xb7, 0xc7, -+ 0xbe, 0xbe, 0x8a, 0x36, 0x04, 0xc0, 0x10, 0xf4, -+ 0x95, 0x20, 0x03, 0xec, 0xdc, 0x05, 0xa1, 0x7d, -+ 0xc4, 0xa9, 0x2c, 0x82, 0xd0, 0xbc, 0x8b, 0xc5, -+ 0xc7, 0x45, 0x50, 0xf6, 0xa2, 0x1a, 0xb5, 0x46, -+ 0x3b, 0x73, 0x02, 0xa6, 0x83, 0x4b, 0x73, 0x82, -+ 0x58, 0x5e, 0x3b, 0x65, 0x2f, 0x0e, 0xfd, 0x2b, -+ 0x59, 0x16, 0xce, 0xa1, 0x60, 0x9c, 0xe8, 0x3a, -+ 0x99, 0xed, 0x8d, 0x5a, 0xcf, 0xf6, 0x83, 0xaf, -+ 0xba, 0xd7, 0x73, 0x73, 0x40, 0x97, 0x3d, 0xca, -+ 0xef, 0x07, 0x57, 0xe6, 0xd9, 0x70, 0x0e, 0x95, -+ 0xae, 0xa6, 0x8d, 0x04, 0xcc, 0xee, 0xf7, 0x09, -+ 0x31, 0x77, 0x12, 0xa3, 0x23, 0x97, 0x62, 0xb3, -+ 0x7b, 0x32, 0xfb, 0x80, 0x14, 0x48, 0x81, 0xc3, -+ 0xe5, 0xea, 0x91, 0x39, 0x52, 0x81, 0xa2, 0x4f, -+ 0xe4, 0xb3, 0x09, 0xff, 0xde, 0x5e, 0xe9, 0x58, -+ 0x84, 0x6e, 0xf9, 0x3d, 0xdf, 0x25, 0xea, 0xad, -+ 0xae, 0xe6, 0x9a, 0xd1, 0x89, 0x55, 0xd3, 0xde, -+ 0x6c, 0x52, 0xdb, 0x70, 0xfe, 0x37, 0xce, 0x44, -+ 0x0a, 0xa8, 0x25, 0x5f, 0x92, 0xc1, 0x33, 0x4a, -+ 0x4f, 0x9b, 0x62, 0x35, 0xff, 0xce, 0xc0, 0xa9, -+ 0x60, 0xce, 0x52, 0x00, 0x97, 0x51, 0x35, 0x26, -+ 0x2e, 0xb9, 0x36, 0xa9, 0x87, 0x6e, 0x1e, 0xcc, -+ 0x91, 0x78, 0x53, 0x98, 0x86, 0x5b, 0x9c, 0x74, -+ 0x7d, 0x88, 0x33, 0xe1, 0xdf, 0x37, 0x69, 0x2b, -+ 0xbb, 0xf1, 0x4d, 0xf4, 0xd1, 0xf1, 0x39, 0x93, -+ 0x17, 0x51, 0x19, 0xe3, 0x19, 0x1e, 0x76, 0x37, -+ 0x25, 0xfb, 0x09, 0x27, 0x6a, 0xab, 0x67, 0x6f, -+ 0x14, 0x12, 0x64, 0xe7, 0xc4, 0x07, 0xdf, 0x4d, -+ 0x17, 0xbb, 0x6d, 0xe0, 0xe9, 0xb9, 0xab, 0xca, -+ 0x10, 0x68, 0xaf, 0x7e, 0xb7, 0x33, 0x54, 0x73, -+ 0x07, 0x6e, 0xf7, 0x81, 0x97, 0x9c, 0x05, 0x6f, -+ 0x84, 0x5f, 0xd2, 0x42, 0xfb, 0x38, 0xcf, 0xd1, -+ 0x2f, 0x14, 0x30, 0x88, 0x98, 0x4d, 0x5a, 0xa9, -+ 0x76, 0xd5, 0x4f, 0x3e, 0x70, 0x6c, 0x85, 0x76, -+ 0xd7, 0x01, 0xa0, 0x1a, 0xc8, 0x4e, 0xaa, 0xac, -+ 0x78, 0xfe, 0x46, 0xde, 0x6a, 0x05, 0x46, 0xa7, -+ 0x43, 0x0c, 0xb9, 0xde, 0xb9, 0x68, 0xfb, 0xce, -+ 0x42, 0x99, 0x07, 0x4d, 0x0b, 0x3b, 0x5a, 0x30, -+ 0x35, 0xa8, 0xf9, 0x3a, 0x73, 0xef, 0x0f, 0xdb, -+ 0x1e, 0x16, 0x42, 0xc4, 0xba, 0xae, 0x58, 0xaa, -+ 0xf8, 0xe5, 0x75, 0x2f, 0x1b, 0x15, 0x5c, 0xfd, -+ 0x0a, 0x97, 0xd0, 0xe4, 0x37, 0x83, 0x61, 0x5f, -+ 0x43, 0xa6, 0xc7, 0x3f, 0x38, 0x59, 0xe6, 0xeb, -+ 0xa3, 0x90, 0xc3, 0xaa, 0xaa, 0x5a, 0xd3, 0x34, -+ 0xd4, 0x17, 0xc8, 0x65, 0x3e, 0x57, 0xbc, 0x5e, -+ 0xdd, 0x9e, 0xb7, 0xf0, 0x2e, 0x5b, 0xb2, 0x1f, -+ 0x8a, 0x08, 0x0d, 0x45, 0x91, 0x0b, 0x29, 0x53, -+ 0x4f, 0x4c, 0x5a, 0x73, 0x56, 0xfe, 0xaf, 0x41, -+ 0x01, 0x39, 0x0a, 0x24, 0x3c, 0x7e, 0xbe, 0x4e, -+ 0x53, 0xf3, 0xeb, 0x06, 0x66, 0x51, 0x28, 0x1d, -+ 0xbd, 0x41, 0x0a, 0x01, 0xab, 0x16, 0x47, 0x27, -+ 0x47, 0x47, 0xf7, 0xcb, 0x46, 0x0a, 0x70, 0x9e, -+ 0x01, 0x9c, 0x09, 0xe1, 0x2a, 0x00, 0x1a, 0xd8, -+ 0xd4, 0x79, 0x9d, 0x80, 0x15, 0x8e, 0x53, 0x2a, -+ 0x65, 0x83, 0x78, 0x3e, 0x03, 0x00, 0x07, 0x12, -+ 0x1f, 0x33, 0x3e, 0x7b, 0x13, 0x37, 0xf1, 0xc3, -+ 0xef, 0xb7, 0xc1, 0x20, 0x3c, 0x3e, 0x67, 0x66, -+ 0x5d, 0x88, 0xa7, 0x7d, 0x33, 0x50, 0x77, 0xb0, -+ 0x28, 0x8e, 0xe7, 0x2c, 0x2e, 0x7a, 0xf4, 0x3c, -+ 0x8d, 0x74, 0x83, 0xaf, 0x8e, 0x87, 0x0f, 0xe4, -+ 0x50, 0xff, 0x84, 0x5c, 0x47, 0x0c, 0x6a, 0x49, -+ 0xbf, 0x42, 0x86, 0x77, 0x15, 0x48, 0xa5, 0x90, -+ 0x5d, 0x93, 0xd6, 0x2a, 0x11, 0xd5, 0xd5, 0x11, -+ 0xaa, 0xce, 0xe7, 0x6f, 0xa5, 0xb0, 0x09, 0x2c, -+ 0x8d, 0xd3, 0x92, 0xf0, 0x5a, 0x2a, 0xda, 0x5b, -+ 0x1e, 0xd5, 0x9a, 0xc4, 0xc4, 0xf3, 0x49, 0x74, -+ 0x41, 0xca, 0xe8, 0xc1, 0xf8, 0x44, 0xd6, 0x3c, -+ 0xae, 0x6c, 0x1d, 0x9a, 0x30, 0x04, 0x4d, 0x27, -+ 0x0e, 0xb1, 0x5f, 0x59, 0xa2, 0x24, 0xe8, 0xe1, -+ 0x98, 0xc5, 0x6a, 0x4c, 0xfe, 0x41, 0xd2, 0x27, -+ 0x42, 0x52, 0xe1, 0xe9, 0x7d, 0x62, 0xe4, 0x88, -+ 0x0f, 0xad, 0xb2, 0x70, 0xcb, 0x9d, 0x4c, 0x27, -+ 0x2e, 0x76, 0x1e, 0x1a, 0x63, 0x65, 0xf5, 0x3b, -+ 0xf8, 0x57, 0x69, 0xeb, 0x5b, 0x38, 0x26, 0x39, -+ 0x33, 0x25, 0x45, 0x3e, 0x91, 0xb8, 0xd8, 0xc7, -+ 0xd5, 0x42, 0xc0, 0x22, 0x31, 0x74, 0xf4, 0xbc, -+ 0x0c, 0x23, 0xf1, 0xca, 0xc1, 0x8d, 0xd7, 0xbe, -+ 0xc9, 0x62, 0xe4, 0x08, 0x1a, 0xcf, 0x36, 0xd5, -+ 0xfe, 0x55, 0x21, 0x59, 0x91, 0x87, 0x87, 0xdf, -+ 0x06, 0xdb, 0xdf, 0x96, 0x45, 0x58, 0xda, 0x05, -+ 0xcd, 0x50, 0x4d, 0xd2, 0x7d, 0x05, 0x18, 0x73, -+ 0x6a, 0x8d, 0x11, 0x85, 0xa6, 0x88, 0xe8, 0xda, -+ 0xe6, 0x30, 0x33, 0xa4, 0x89, 0x31, 0x75, 0xbe, -+ 0x69, 0x43, 0x84, 0x43, 0x50, 0x87, 0xdd, 0x71, -+ 0x36, 0x83, 0xc3, 0x78, 0x74, 0x24, 0x0a, 0xed, -+ 0x7b, 0xdb, 0xa4, 0x24, 0x0b, 0xb9, 0x7e, 0x5d, -+ 0xff, 0xde, 0xb1, 0xef, 0x61, 0x5a, 0x45, 0x33, -+ 0xf6, 0x17, 0x07, 0x08, 0x98, 0x83, 0x92, 0x0f, -+ 0x23, 0x6d, 0xe6, 0xaa, 0x17, 0x54, 0xad, 0x6a, -+ 0xc8, 0xdb, 0x26, 0xbe, 0xb8, 0xb6, 0x08, 0xfa, -+ 0x68, 0xf1, 0xd7, 0x79, 0x6f, 0x18, 0xb4, 0x9e, -+ 0x2d, 0x3f, 0x1b, 0x64, 0xaf, 0x8d, 0x06, 0x0e, -+ 0x49, 0x28, 0xe0, 0x5d, 0x45, 0x68, 0x13, 0x87, -+ 0xfa, 0xde, 0x40, 0x7b, 0xd2, 0xc3, 0x94, 0xd5, -+ 0xe1, 0xd9, 0xc2, 0xaf, 0x55, 0x89, 0xeb, 0xb4, -+ 0x12, 0x59, 0xa8, 0xd4, 0xc5, 0x29, 0x66, 0x38, -+ 0xe6, 0xac, 0x22, 0x22, 0xd9, 0x64, 0x9b, 0x34, -+ 0x0a, 0x32, 0x9f, 0xc2, 0xbf, 0x17, 0x6c, 0x3f, -+ 0x71, 0x7a, 0x38, 0x6b, 0x98, 0xfb, 0x49, 0x36, -+ 0x89, 0xc9, 0xe2, 0xd6, 0xc7, 0x5d, 0xd0, 0x69, -+ 0x5f, 0x23, 0x35, 0xc9, 0x30, 0xe2, 0xfd, 0x44, -+ 0x58, 0x39, 0xd7, 0x97, 0xfb, 0x5c, 0x00, 0xd5, -+ 0x4f, 0x7a, 0x1a, 0x95, 0x8b, 0x62, 0x4b, 0xce, -+ 0xe5, 0x91, 0x21, 0x7b, 0x30, 0x00, 0xd6, 0xdd, -+ 0x6d, 0x02, 0x86, 0x49, 0x0f, 0x3c, 0x1a, 0x27, -+ 0x3c, 0xd3, 0x0e, 0x71, 0xf2, 0xff, 0xf5, 0x2f, -+ 0x87, 0xac, 0x67, 0x59, 0x81, 0xa3, 0xf7, 0xf8, -+ 0xd6, 0x11, 0x0c, 0x84, 0xa9, 0x03, 0xee, 0x2a, -+ 0xc4, 0xf3, 0x22, 0xab, 0x7c, 0xe2, 0x25, 0xf5, -+ 0x67, 0xa3, 0xe4, 0x11, 0xe0, 0x59, 0xb3, 0xca, -+ 0x87, 0xa0, 0xae, 0xc9, 0xa6, 0x62, 0x1b, 0x6e, -+ 0x4d, 0x02, 0x6b, 0x07, 0x9d, 0xfd, 0xd0, 0x92, -+ 0x06, 0xe1, 0xb2, 0x9a, 0x4a, 0x1f, 0x1f, 0x13, -+ 0x49, 0x99, 0x97, 0x08, 0xde, 0x7f, 0x98, 0xaf, -+ 0x51, 0x98, 0xee, 0x2c, 0xcb, 0xf0, 0x0b, 0xc6, -+ 0xb6, 0xb7, 0x2d, 0x9a, 0xb1, 0xac, 0xa6, 0xe3, -+ 0x15, 0x77, 0x9d, 0x6b, 0x1a, 0xe4, 0xfc, 0x8b, -+ 0xf2, 0x17, 0x59, 0x08, 0x04, 0x58, 0x81, 0x9d, -+ 0x1b, 0x1b, 0x69, 0x55, 0xc2, 0xb4, 0x3c, 0x1f, -+ 0x50, 0xf1, 0x7f, 0x77, 0x90, 0x4c, 0x66, 0x40, -+ 0x5a, 0xc0, 0x33, 0x1f, 0xcb, 0x05, 0x6d, 0x5c, -+ 0x06, 0x87, 0x52, 0xa2, 0x8f, 0x26, 0xd5, 0x4f -+}; -+static const u8 dec_assoc010[] __initconst = { -+ 0xd2, 0xa1, 0x70, 0xdb, 0x7a, 0xf8, 0xfa, 0x27, -+ 0xba, 0x73, 0x0f, 0xbf, 0x3d, 0x1e, 0x82, 0xb2 -+}; -+static const u8 dec_nonce010[] __initconst = { -+ 0xdb, 0x92, 0x0f, 0x7f, 0x17, 0x54, 0x0c, 0x30 -+}; -+static const u8 dec_key010[] __initconst = { -+ 0x47, 0x11, 0xeb, 0x86, 0x2b, 0x2c, 0xab, 0x44, -+ 0x34, 0xda, 0x7f, 0x57, 0x03, 0x39, 0x0c, 0xaf, -+ 0x2c, 0x14, 0xfd, 0x65, 0x23, 0xe9, 0x8e, 0x74, -+ 0xd5, 0x08, 0x68, 0x08, 0xe7, 0xb4, 0x72, 0xd7 -+}; -+ -+static const u8 dec_input011[] __initconst = { -+ 0x6a, 0xfc, 0x4b, 0x25, 0xdf, 0xc0, 0xe4, 0xe8, -+ 0x17, 0x4d, 0x4c, 0xc9, 0x7e, 0xde, 0x3a, 0xcc, -+ 0x3c, 0xba, 0x6a, 0x77, 0x47, 0xdb, 0xe3, 0x74, -+ 0x7a, 0x4d, 0x5f, 0x8d, 0x37, 0x55, 0x80, 0x73, -+ 0x90, 0x66, 0x5d, 0x3a, 0x7d, 0x5d, 0x86, 0x5e, -+ 0x8d, 0xfd, 0x83, 0xff, 0x4e, 0x74, 0x6f, 0xf9, -+ 0xe6, 0x70, 0x17, 0x70, 0x3e, 0x96, 0xa7, 0x7e, -+ 0xcb, 0xab, 0x8f, 0x58, 0x24, 0x9b, 0x01, 0xfd, -+ 0xcb, 0xe6, 0x4d, 0x9b, 0xf0, 0x88, 0x94, 0x57, -+ 0x66, 0xef, 0x72, 0x4c, 0x42, 0x6e, 0x16, 0x19, -+ 0x15, 0xea, 0x70, 0x5b, 0xac, 0x13, 0xdb, 0x9f, -+ 0x18, 0xe2, 0x3c, 0x26, 0x97, 0xbc, 0xdc, 0x45, -+ 0x8c, 0x6c, 0x24, 0x69, 0x9c, 0xf7, 0x65, 0x1e, -+ 0x18, 0x59, 0x31, 0x7c, 0xe4, 0x73, 0xbc, 0x39, -+ 0x62, 0xc6, 0x5c, 0x9f, 0xbf, 0xfa, 0x90, 0x03, -+ 0xc9, 0x72, 0x26, 0xb6, 0x1b, 0xc2, 0xb7, 0x3f, -+ 0xf2, 0x13, 0x77, 0xf2, 0x8d, 0xb9, 0x47, 0xd0, -+ 0x53, 0xdd, 0xc8, 0x91, 0x83, 0x8b, 0xb1, 0xce, -+ 0xa3, 0xfe, 0xcd, 0xd9, 0xdd, 0x92, 0x7b, 0xdb, -+ 0xb8, 0xfb, 0xc9, 0x2d, 0x01, 0x59, 0x39, 0x52, -+ 0xad, 0x1b, 0xec, 0xcf, 0xd7, 0x70, 0x13, 0x21, -+ 0xf5, 0x47, 0xaa, 0x18, 0x21, 0x5c, 0xc9, 0x9a, -+ 0xd2, 0x6b, 0x05, 0x9c, 0x01, 0xa1, 0xda, 0x35, -+ 0x5d, 0xb3, 0x70, 0xe6, 0xa9, 0x80, 0x8b, 0x91, -+ 0xb7, 0xb3, 0x5f, 0x24, 0x9a, 0xb7, 0xd1, 0x6b, -+ 0xa1, 0x1c, 0x50, 0xba, 0x49, 0xe0, 0xee, 0x2e, -+ 0x75, 0xac, 0x69, 0xc0, 0xeb, 0x03, 0xdd, 0x19, -+ 0xe5, 0xf6, 0x06, 0xdd, 0xc3, 0xd7, 0x2b, 0x07, -+ 0x07, 0x30, 0xa7, 0x19, 0x0c, 0xbf, 0xe6, 0x18, -+ 0xcc, 0xb1, 0x01, 0x11, 0x85, 0x77, 0x1d, 0x96, -+ 0xa7, 0xa3, 0x00, 0x84, 0x02, 0xa2, 0x83, 0x68, -+ 0xda, 0x17, 0x27, 0xc8, 0x7f, 0x23, 0xb7, 0xf4, -+ 0x13, 0x85, 0xcf, 0xdd, 0x7a, 0x7d, 0x24, 0x57, -+ 0xfe, 0x05, 0x93, 0xf5, 0x74, 0xce, 0xed, 0x0c, -+ 0x20, 0x98, 0x8d, 0x92, 0x30, 0xa1, 0x29, 0x23, -+ 0x1a, 0xa0, 0x4f, 0x69, 0x56, 0x4c, 0xe1, 0xc8, -+ 0xce, 0xf6, 0x9a, 0x0c, 0xa4, 0xfa, 0x04, 0xf6, -+ 0x62, 0x95, 0xf2, 0xfa, 0xc7, 0x40, 0x68, 0x40, -+ 0x8f, 0x41, 0xda, 0xb4, 0x26, 0x6f, 0x70, 0xab, -+ 0x40, 0x61, 0xa4, 0x0e, 0x75, 0xfb, 0x86, 0xeb, -+ 0x9d, 0x9a, 0x1f, 0xec, 0x76, 0x99, 0xe7, 0xea, -+ 0xaa, 0x1e, 0x2d, 0xb5, 0xd4, 0xa6, 0x1a, 0xb8, -+ 0x61, 0x0a, 0x1d, 0x16, 0x5b, 0x98, 0xc2, 0x31, -+ 0x40, 0xe7, 0x23, 0x1d, 0x66, 0x99, 0xc8, 0xc0, -+ 0xd7, 0xce, 0xf3, 0x57, 0x40, 0x04, 0x3f, 0xfc, -+ 0xea, 0xb3, 0xfc, 0xd2, 0xd3, 0x99, 0xa4, 0x94, -+ 0x69, 0xa0, 0xef, 0xd1, 0x85, 0xb3, 0xa6, 0xb1, -+ 0x28, 0xbf, 0x94, 0x67, 0x22, 0xc3, 0x36, 0x46, -+ 0xf8, 0xd2, 0x0f, 0x5f, 0xf4, 0x59, 0x80, 0xe6, -+ 0x2d, 0x43, 0x08, 0x7d, 0x19, 0x09, 0x97, 0xa7, -+ 0x4c, 0x3d, 0x8d, 0xba, 0x65, 0x62, 0xa3, 0x71, -+ 0x33, 0x29, 0x62, 0xdb, 0xc1, 0x33, 0x34, 0x1a, -+ 0x63, 0x33, 0x16, 0xb6, 0x64, 0x7e, 0xab, 0x33, -+ 0xf0, 0xe6, 0x26, 0x68, 0xba, 0x1d, 0x2e, 0x38, -+ 0x08, 0xe6, 0x02, 0xd3, 0x25, 0x2c, 0x47, 0x23, -+ 0x58, 0x34, 0x0f, 0x9d, 0x63, 0x4f, 0x63, 0xbb, -+ 0x7f, 0x3b, 0x34, 0x38, 0xa7, 0xb5, 0x8d, 0x65, -+ 0xd9, 0x9f, 0x79, 0x55, 0x3e, 0x4d, 0xe7, 0x73, -+ 0xd8, 0xf6, 0x98, 0x97, 0x84, 0x60, 0x9c, 0xc8, -+ 0xa9, 0x3c, 0xf6, 0xdc, 0x12, 0x5c, 0xe1, 0xbb, -+ 0x0b, 0x8b, 0x98, 0x9c, 0x9d, 0x26, 0x7c, 0x4a, -+ 0xe6, 0x46, 0x36, 0x58, 0x21, 0x4a, 0xee, 0xca, -+ 0xd7, 0x3b, 0xc2, 0x6c, 0x49, 0x2f, 0xe5, 0xd5, -+ 0x03, 0x59, 0x84, 0x53, 0xcb, 0xfe, 0x92, 0x71, -+ 0x2e, 0x7c, 0x21, 0xcc, 0x99, 0x85, 0x7f, 0xb8, -+ 0x74, 0x90, 0x13, 0x42, 0x3f, 0xe0, 0x6b, 0x1d, -+ 0xf2, 0x4d, 0x54, 0xd4, 0xfc, 0x3a, 0x05, 0xe6, -+ 0x74, 0xaf, 0xa6, 0xa0, 0x2a, 0x20, 0x23, 0x5d, -+ 0x34, 0x5c, 0xd9, 0x3e, 0x4e, 0xfa, 0x93, 0xe7, -+ 0xaa, 0xe9, 0x6f, 0x08, 0x43, 0x67, 0x41, 0xc5, -+ 0xad, 0xfb, 0x31, 0x95, 0x82, 0x73, 0x32, 0xd8, -+ 0xa6, 0xa3, 0xed, 0x0e, 0x2d, 0xf6, 0x5f, 0xfd, -+ 0x80, 0xa6, 0x7a, 0xe0, 0xdf, 0x78, 0x15, 0x29, -+ 0x74, 0x33, 0xd0, 0x9e, 0x83, 0x86, 0x72, 0x22, -+ 0x57, 0x29, 0xb9, 0x9e, 0x5d, 0xd3, 0x1a, 0xb5, -+ 0x96, 0x72, 0x41, 0x3d, 0xf1, 0x64, 0x43, 0x67, -+ 0xee, 0xaa, 0x5c, 0xd3, 0x9a, 0x96, 0x13, 0x11, -+ 0x5d, 0xf3, 0x0c, 0x87, 0x82, 0x1e, 0x41, 0x9e, -+ 0xd0, 0x27, 0xd7, 0x54, 0x3b, 0x67, 0x73, 0x09, -+ 0x91, 0xe9, 0xd5, 0x36, 0xa7, 0xb5, 0x55, 0xe4, -+ 0xf3, 0x21, 0x51, 0x49, 0x22, 0x07, 0x55, 0x4f, -+ 0x44, 0x4b, 0xd2, 0x15, 0x93, 0x17, 0x2a, 0xfa, -+ 0x4d, 0x4a, 0x57, 0xdb, 0x4c, 0xa6, 0xeb, 0xec, -+ 0x53, 0x25, 0x6c, 0x21, 0xed, 0x00, 0x4c, 0x3b, -+ 0xca, 0x14, 0x57, 0xa9, 0xd6, 0x6a, 0xcd, 0x8d, -+ 0x5e, 0x74, 0xac, 0x72, 0xc1, 0x97, 0xe5, 0x1b, -+ 0x45, 0x4e, 0xda, 0xfc, 0xcc, 0x40, 0xe8, 0x48, -+ 0x88, 0x0b, 0xa3, 0xe3, 0x8d, 0x83, 0x42, 0xc3, -+ 0x23, 0xfd, 0x68, 0xb5, 0x8e, 0xf1, 0x9d, 0x63, -+ 0x77, 0xe9, 0xa3, 0x8e, 0x8c, 0x26, 0x6b, 0xbd, -+ 0x72, 0x73, 0x35, 0x0c, 0x03, 0xf8, 0x43, 0x78, -+ 0x52, 0x71, 0x15, 0x1f, 0x71, 0x5d, 0x6e, 0xed, -+ 0xb9, 0xcc, 0x86, 0x30, 0xdb, 0x2b, 0xd3, 0x82, -+ 0x88, 0x23, 0x71, 0x90, 0x53, 0x5c, 0xa9, 0x2f, -+ 0x76, 0x01, 0xb7, 0x9a, 0xfe, 0x43, 0x55, 0xa3, -+ 0x04, 0x9b, 0x0e, 0xe4, 0x59, 0xdf, 0xc9, 0xe9, -+ 0xb1, 0xea, 0x29, 0x28, 0x3c, 0x5c, 0xae, 0x72, -+ 0x84, 0xb6, 0xc6, 0xeb, 0x0c, 0x27, 0x07, 0x74, -+ 0x90, 0x0d, 0x31, 0xb0, 0x00, 0x77, 0xe9, 0x40, -+ 0x70, 0x6f, 0x68, 0xa7, 0xfd, 0x06, 0xec, 0x4b, -+ 0xc0, 0xb7, 0xac, 0xbc, 0x33, 0xb7, 0x6d, 0x0a, -+ 0xbd, 0x12, 0x1b, 0x59, 0xcb, 0xdd, 0x32, 0xf5, -+ 0x1d, 0x94, 0x57, 0x76, 0x9e, 0x0c, 0x18, 0x98, -+ 0x71, 0xd7, 0x2a, 0xdb, 0x0b, 0x7b, 0xa7, 0x71, -+ 0xb7, 0x67, 0x81, 0x23, 0x96, 0xae, 0xb9, 0x7e, -+ 0x32, 0x43, 0x92, 0x8a, 0x19, 0xa0, 0xc4, 0xd4, -+ 0x3b, 0x57, 0xf9, 0x4a, 0x2c, 0xfb, 0x51, 0x46, -+ 0xbb, 0xcb, 0x5d, 0xb3, 0xef, 0x13, 0x93, 0x6e, -+ 0x68, 0x42, 0x54, 0x57, 0xd3, 0x6a, 0x3a, 0x8f, -+ 0x9d, 0x66, 0xbf, 0xbd, 0x36, 0x23, 0xf5, 0x93, -+ 0x83, 0x7b, 0x9c, 0xc0, 0xdd, 0xc5, 0x49, 0xc0, -+ 0x64, 0xed, 0x07, 0x12, 0xb3, 0xe6, 0xe4, 0xe5, -+ 0x38, 0x95, 0x23, 0xb1, 0xa0, 0x3b, 0x1a, 0x61, -+ 0xda, 0x17, 0xac, 0xc3, 0x58, 0xdd, 0x74, 0x64, -+ 0x22, 0x11, 0xe8, 0x32, 0x1d, 0x16, 0x93, 0x85, -+ 0x99, 0xa5, 0x9c, 0x34, 0x55, 0xb1, 0xe9, 0x20, -+ 0x72, 0xc9, 0x28, 0x7b, 0x79, 0x00, 0xa1, 0xa6, -+ 0xa3, 0x27, 0x40, 0x18, 0x8a, 0x54, 0xe0, 0xcc, -+ 0xe8, 0x4e, 0x8e, 0x43, 0x96, 0xe7, 0x3f, 0xc8, -+ 0xe9, 0xb2, 0xf9, 0xc9, 0xda, 0x04, 0x71, 0x50, -+ 0x47, 0xe4, 0xaa, 0xce, 0xa2, 0x30, 0xc8, 0xe4, -+ 0xac, 0xc7, 0x0d, 0x06, 0x2e, 0xe6, 0xe8, 0x80, -+ 0x36, 0x29, 0x9e, 0x01, 0xb8, 0xc3, 0xf0, 0xa0, -+ 0x5d, 0x7a, 0xca, 0x4d, 0xa0, 0x57, 0xbd, 0x2a, -+ 0x45, 0xa7, 0x7f, 0x9c, 0x93, 0x07, 0x8f, 0x35, -+ 0x67, 0x92, 0xe3, 0xe9, 0x7f, 0xa8, 0x61, 0x43, -+ 0x9e, 0x25, 0x4f, 0x33, 0x76, 0x13, 0x6e, 0x12, -+ 0xb9, 0xdd, 0xa4, 0x7c, 0x08, 0x9f, 0x7c, 0xe7, -+ 0x0a, 0x8d, 0x84, 0x06, 0xa4, 0x33, 0x17, 0x34, -+ 0x5e, 0x10, 0x7c, 0xc0, 0xa8, 0x3d, 0x1f, 0x42, -+ 0x20, 0x51, 0x65, 0x5d, 0x09, 0xc3, 0xaa, 0xc0, -+ 0xc8, 0x0d, 0xf0, 0x79, 0xbc, 0x20, 0x1b, 0x95, -+ 0xe7, 0x06, 0x7d, 0x47, 0x20, 0x03, 0x1a, 0x74, -+ 0xdd, 0xe2, 0xd4, 0xae, 0x38, 0x71, 0x9b, 0xf5, -+ 0x80, 0xec, 0x08, 0x4e, 0x56, 0xba, 0x76, 0x12, -+ 0x1a, 0xdf, 0x48, 0xf3, 0xae, 0xb3, 0xe6, 0xe6, -+ 0xbe, 0xc0, 0x91, 0x2e, 0x01, 0xb3, 0x01, 0x86, -+ 0xa2, 0xb9, 0x52, 0xd1, 0x21, 0xae, 0xd4, 0x97, -+ 0x1d, 0xef, 0x41, 0x12, 0x95, 0x3d, 0x48, 0x45, -+ 0x1c, 0x56, 0x32, 0x8f, 0xb8, 0x43, 0xbb, 0x19, -+ 0xf3, 0xca, 0xe9, 0xeb, 0x6d, 0x84, 0xbe, 0x86, -+ 0x06, 0xe2, 0x36, 0xb2, 0x62, 0x9d, 0xd3, 0x4c, -+ 0x48, 0x18, 0x54, 0x13, 0x4e, 0xcf, 0xfd, 0xba, -+ 0x84, 0xb9, 0x30, 0x53, 0xcf, 0xfb, 0xb9, 0x29, -+ 0x8f, 0xdc, 0x9f, 0xef, 0x60, 0x0b, 0x64, 0xf6, -+ 0x8b, 0xee, 0xa6, 0x91, 0xc2, 0x41, 0x6c, 0xf6, -+ 0xfa, 0x79, 0x67, 0x4b, 0xc1, 0x3f, 0xaf, 0x09, -+ 0x81, 0xd4, 0x5d, 0xcb, 0x09, 0xdf, 0x36, 0x31, -+ 0xc0, 0x14, 0x3c, 0x7c, 0x0e, 0x65, 0x95, 0x99, -+ 0x6d, 0xa3, 0xf4, 0xd7, 0x38, 0xee, 0x1a, 0x2b, -+ 0x37, 0xe2, 0xa4, 0x3b, 0x4b, 0xd0, 0x65, 0xca, -+ 0xf8, 0xc3, 0xe8, 0x15, 0x20, 0xef, 0xf2, 0x00, -+ 0xfd, 0x01, 0x09, 0xc5, 0xc8, 0x17, 0x04, 0x93, -+ 0xd0, 0x93, 0x03, 0x55, 0xc5, 0xfe, 0x32, 0xa3, -+ 0x3e, 0x28, 0x2d, 0x3b, 0x93, 0x8a, 0xcc, 0x07, -+ 0x72, 0x80, 0x8b, 0x74, 0x16, 0x24, 0xbb, 0xda, -+ 0x94, 0x39, 0x30, 0x8f, 0xb1, 0xcd, 0x4a, 0x90, -+ 0x92, 0x7c, 0x14, 0x8f, 0x95, 0x4e, 0xac, 0x9b, -+ 0xd8, 0x8f, 0x1a, 0x87, 0xa4, 0x32, 0x27, 0x8a, -+ 0xba, 0xf7, 0x41, 0xcf, 0x84, 0x37, 0x19, 0xe6, -+ 0x06, 0xf5, 0x0e, 0xcf, 0x36, 0xf5, 0x9e, 0x6c, -+ 0xde, 0xbc, 0xff, 0x64, 0x7e, 0x4e, 0x59, 0x57, -+ 0x48, 0xfe, 0x14, 0xf7, 0x9c, 0x93, 0x5d, 0x15, -+ 0xad, 0xcc, 0x11, 0xb1, 0x17, 0x18, 0xb2, 0x7e, -+ 0xcc, 0xab, 0xe9, 0xce, 0x7d, 0x77, 0x5b, 0x51, -+ 0x1b, 0x1e, 0x20, 0xa8, 0x32, 0x06, 0x0e, 0x75, -+ 0x93, 0xac, 0xdb, 0x35, 0x37, 0x1f, 0xe9, 0x19, -+ 0x1d, 0xb4, 0x71, 0x97, 0xd6, 0x4e, 0x2c, 0x08, -+ 0xa5, 0x13, 0xf9, 0x0e, 0x7e, 0x78, 0x6e, 0x14, -+ 0xe0, 0xa9, 0xb9, 0x96, 0x4c, 0x80, 0x82, 0xba, -+ 0x17, 0xb3, 0x9d, 0x69, 0xb0, 0x84, 0x46, 0xff, -+ 0xf9, 0x52, 0x79, 0x94, 0x58, 0x3a, 0x62, 0x90, -+ 0x15, 0x35, 0x71, 0x10, 0x37, 0xed, 0xa1, 0x8e, -+ 0x53, 0x6e, 0xf4, 0x26, 0x57, 0x93, 0x15, 0x93, -+ 0xf6, 0x81, 0x2c, 0x5a, 0x10, 0xda, 0x92, 0xad, -+ 0x2f, 0xdb, 0x28, 0x31, 0x2d, 0x55, 0x04, 0xd2, -+ 0x06, 0x28, 0x8c, 0x1e, 0xdc, 0xea, 0x54, 0xac, -+ 0xff, 0xb7, 0x6c, 0x30, 0x15, 0xd4, 0xb4, 0x0d, -+ 0x00, 0x93, 0x57, 0xdd, 0xd2, 0x07, 0x07, 0x06, -+ 0xd9, 0x43, 0x9b, 0xcd, 0x3a, 0xf4, 0x7d, 0x4c, -+ 0x36, 0x5d, 0x23, 0xa2, 0xcc, 0x57, 0x40, 0x91, -+ 0xe9, 0x2c, 0x2f, 0x2c, 0xd5, 0x30, 0x9b, 0x17, -+ 0xb0, 0xc9, 0xf7, 0xa7, 0x2f, 0xd1, 0x93, 0x20, -+ 0x6b, 0xc6, 0xc1, 0xe4, 0x6f, 0xcb, 0xd1, 0xe7, -+ 0x09, 0x0f, 0x9e, 0xdc, 0xaa, 0x9f, 0x2f, 0xdf, -+ 0x56, 0x9f, 0xd4, 0x33, 0x04, 0xaf, 0xd3, 0x6c, -+ 0x58, 0x61, 0xf0, 0x30, 0xec, 0xf2, 0x7f, 0xf2, -+ 0x9c, 0xdf, 0x39, 0xbb, 0x6f, 0xa2, 0x8c, 0x7e, -+ 0xc4, 0x22, 0x51, 0x71, 0xc0, 0x4d, 0x14, 0x1a, -+ 0xc4, 0xcd, 0x04, 0xd9, 0x87, 0x08, 0x50, 0x05, -+ 0xcc, 0xaf, 0xf6, 0xf0, 0x8f, 0x92, 0x54, 0x58, -+ 0xc2, 0xc7, 0x09, 0x7a, 0x59, 0x02, 0x05, 0xe8, -+ 0xb0, 0x86, 0xd9, 0xbf, 0x7b, 0x35, 0x51, 0x4d, -+ 0xaf, 0x08, 0x97, 0x2c, 0x65, 0xda, 0x2a, 0x71, -+ 0x3a, 0xa8, 0x51, 0xcc, 0xf2, 0x73, 0x27, 0xc3, -+ 0xfd, 0x62, 0xcf, 0xe3, 0xb2, 0xca, 0xcb, 0xbe, -+ 0x1a, 0x0a, 0xa1, 0x34, 0x7b, 0x77, 0xc4, 0x62, -+ 0x68, 0x78, 0x5f, 0x94, 0x07, 0x04, 0x65, 0x16, -+ 0x4b, 0x61, 0xcb, 0xff, 0x75, 0x26, 0x50, 0x66, -+ 0x1f, 0x6e, 0x93, 0xf8, 0xc5, 0x51, 0xeb, 0xa4, -+ 0x4a, 0x48, 0x68, 0x6b, 0xe2, 0x5e, 0x44, 0xb2, -+ 0x50, 0x2c, 0x6c, 0xae, 0x79, 0x4e, 0x66, 0x35, -+ 0x81, 0x50, 0xac, 0xbc, 0x3f, 0xb1, 0x0c, 0xf3, -+ 0x05, 0x3c, 0x4a, 0xa3, 0x6c, 0x2a, 0x79, 0xb4, -+ 0xb7, 0xab, 0xca, 0xc7, 0x9b, 0x8e, 0xcd, 0x5f, -+ 0x11, 0x03, 0xcb, 0x30, 0xa3, 0xab, 0xda, 0xfe, -+ 0x64, 0xb9, 0xbb, 0xd8, 0x5e, 0x3a, 0x1a, 0x56, -+ 0xe5, 0x05, 0x48, 0x90, 0x1e, 0x61, 0x69, 0x1b, -+ 0x22, 0xe6, 0x1a, 0x3c, 0x75, 0xad, 0x1f, 0x37, -+ 0x28, 0xdc, 0xe4, 0x6d, 0xbd, 0x42, 0xdc, 0xd3, -+ 0xc8, 0xb6, 0x1c, 0x48, 0xfe, 0x94, 0x77, 0x7f, -+ 0xbd, 0x62, 0xac, 0xa3, 0x47, 0x27, 0xcf, 0x5f, -+ 0xd9, 0xdb, 0xaf, 0xec, 0xf7, 0x5e, 0xc1, 0xb0, -+ 0x9d, 0x01, 0x26, 0x99, 0x7e, 0x8f, 0x03, 0x70, -+ 0xb5, 0x42, 0xbe, 0x67, 0x28, 0x1b, 0x7c, 0xbd, -+ 0x61, 0x21, 0x97, 0xcc, 0x5c, 0xe1, 0x97, 0x8f, -+ 0x8d, 0xde, 0x2b, 0xaa, 0xa7, 0x71, 0x1d, 0x1e, -+ 0x02, 0x73, 0x70, 0x58, 0x32, 0x5b, 0x1d, 0x67, -+ 0x3d, 0xe0, 0x74, 0x4f, 0x03, 0xf2, 0x70, 0x51, -+ 0x79, 0xf1, 0x61, 0x70, 0x15, 0x74, 0x9d, 0x23, -+ 0x89, 0xde, 0xac, 0xfd, 0xde, 0xd0, 0x1f, 0xc3, -+ 0x87, 0x44, 0x35, 0x4b, 0xe5, 0xb0, 0x60, 0xc5, -+ 0x22, 0xe4, 0x9e, 0xca, 0xeb, 0xd5, 0x3a, 0x09, -+ 0x45, 0xa4, 0xdb, 0xfa, 0x3f, 0xeb, 0x1b, 0xc7, -+ 0xc8, 0x14, 0x99, 0x51, 0x92, 0x10, 0xed, 0xed, -+ 0x28, 0xe0, 0xa1, 0xf8, 0x26, 0xcf, 0xcd, 0xcb, -+ 0x63, 0xa1, 0x3b, 0xe3, 0xdf, 0x7e, 0xfe, 0xa6, -+ 0xf0, 0x81, 0x9a, 0xbf, 0x55, 0xde, 0x54, 0xd5, -+ 0x56, 0x60, 0x98, 0x10, 0x68, 0xf4, 0x38, 0x96, -+ 0x8e, 0x6f, 0x1d, 0x44, 0x7f, 0xd6, 0x2f, 0xfe, -+ 0x55, 0xfb, 0x0c, 0x7e, 0x67, 0xe2, 0x61, 0x44, -+ 0xed, 0xf2, 0x35, 0x30, 0x5d, 0xe9, 0xc7, 0xd6, -+ 0x6d, 0xe0, 0xa0, 0xed, 0xf3, 0xfc, 0xd8, 0x3e, -+ 0x0a, 0x7b, 0xcd, 0xaf, 0x65, 0x68, 0x18, 0xc0, -+ 0xec, 0x04, 0x1c, 0x74, 0x6d, 0xe2, 0x6e, 0x79, -+ 0xd4, 0x11, 0x2b, 0x62, 0xd5, 0x27, 0xad, 0x4f, -+ 0x01, 0x59, 0x73, 0xcc, 0x6a, 0x53, 0xfb, 0x2d, -+ 0xd5, 0x4e, 0x99, 0x21, 0x65, 0x4d, 0xf5, 0x82, -+ 0xf7, 0xd8, 0x42, 0xce, 0x6f, 0x3d, 0x36, 0x47, -+ 0xf1, 0x05, 0x16, 0xe8, 0x1b, 0x6a, 0x8f, 0x93, -+ 0xf2, 0x8f, 0x37, 0x40, 0x12, 0x28, 0xa3, 0xe6, -+ 0xb9, 0x17, 0x4a, 0x1f, 0xb1, 0xd1, 0x66, 0x69, -+ 0x86, 0xc4, 0xfc, 0x97, 0xae, 0x3f, 0x8f, 0x1e, -+ 0x2b, 0xdf, 0xcd, 0xf9, 0x3c -+}; -+static const u8 dec_output011[] __initconst = { -+ 0x7a, 0x57, 0xf2, 0xc7, 0x06, 0x3f, 0x50, 0x7b, -+ 0x36, 0x1a, 0x66, 0x5c, 0xb9, 0x0e, 0x5e, 0x3b, -+ 0x45, 0x60, 0xbe, 0x9a, 0x31, 0x9f, 0xff, 0x5d, -+ 0x66, 0x34, 0xb4, 0xdc, 0xfb, 0x9d, 0x8e, 0xee, -+ 0x6a, 0x33, 0xa4, 0x07, 0x3c, 0xf9, 0x4c, 0x30, -+ 0xa1, 0x24, 0x52, 0xf9, 0x50, 0x46, 0x88, 0x20, -+ 0x02, 0x32, 0x3a, 0x0e, 0x99, 0x63, 0xaf, 0x1f, -+ 0x15, 0x28, 0x2a, 0x05, 0xff, 0x57, 0x59, 0x5e, -+ 0x18, 0xa1, 0x1f, 0xd0, 0x92, 0x5c, 0x88, 0x66, -+ 0x1b, 0x00, 0x64, 0xa5, 0x93, 0x8d, 0x06, 0x46, -+ 0xb0, 0x64, 0x8b, 0x8b, 0xef, 0x99, 0x05, 0x35, -+ 0x85, 0xb3, 0xf3, 0x33, 0xbb, 0xec, 0x66, 0xb6, -+ 0x3d, 0x57, 0x42, 0xe3, 0xb4, 0xc6, 0xaa, 0xb0, -+ 0x41, 0x2a, 0xb9, 0x59, 0xa9, 0xf6, 0x3e, 0x15, -+ 0x26, 0x12, 0x03, 0x21, 0x4c, 0x74, 0x43, 0x13, -+ 0x2a, 0x03, 0x27, 0x09, 0xb4, 0xfb, 0xe7, 0xb7, -+ 0x40, 0xff, 0x5e, 0xce, 0x48, 0x9a, 0x60, 0xe3, -+ 0x8b, 0x80, 0x8c, 0x38, 0x2d, 0xcb, 0x93, 0x37, -+ 0x74, 0x05, 0x52, 0x6f, 0x73, 0x3e, 0xc3, 0xbc, -+ 0xca, 0x72, 0x0a, 0xeb, 0xf1, 0x3b, 0xa0, 0x95, -+ 0xdc, 0x8a, 0xc4, 0xa9, 0xdc, 0xca, 0x44, 0xd8, -+ 0x08, 0x63, 0x6a, 0x36, 0xd3, 0x3c, 0xb8, 0xac, -+ 0x46, 0x7d, 0xfd, 0xaa, 0xeb, 0x3e, 0x0f, 0x45, -+ 0x8f, 0x49, 0xda, 0x2b, 0xf2, 0x12, 0xbd, 0xaf, -+ 0x67, 0x8a, 0x63, 0x48, 0x4b, 0x55, 0x5f, 0x6d, -+ 0x8c, 0xb9, 0x76, 0x34, 0x84, 0xae, 0xc2, 0xfc, -+ 0x52, 0x64, 0x82, 0xf7, 0xb0, 0x06, 0xf0, 0x45, -+ 0x73, 0x12, 0x50, 0x30, 0x72, 0xea, 0x78, 0x9a, -+ 0xa8, 0xaf, 0xb5, 0xe3, 0xbb, 0x77, 0x52, 0xec, -+ 0x59, 0x84, 0xbf, 0x6b, 0x8f, 0xce, 0x86, 0x5e, -+ 0x1f, 0x23, 0xe9, 0xfb, 0x08, 0x86, 0xf7, 0x10, -+ 0xb9, 0xf2, 0x44, 0x96, 0x44, 0x63, 0xa9, 0xa8, -+ 0x78, 0x00, 0x23, 0xd6, 0xc7, 0xe7, 0x6e, 0x66, -+ 0x4f, 0xcc, 0xee, 0x15, 0xb3, 0xbd, 0x1d, 0xa0, -+ 0xe5, 0x9c, 0x1b, 0x24, 0x2c, 0x4d, 0x3c, 0x62, -+ 0x35, 0x9c, 0x88, 0x59, 0x09, 0xdd, 0x82, 0x1b, -+ 0xcf, 0x0a, 0x83, 0x6b, 0x3f, 0xae, 0x03, 0xc4, -+ 0xb4, 0xdd, 0x7e, 0x5b, 0x28, 0x76, 0x25, 0x96, -+ 0xd9, 0xc9, 0x9d, 0x5f, 0x86, 0xfa, 0xf6, 0xd7, -+ 0xd2, 0xe6, 0x76, 0x1d, 0x0f, 0xa1, 0xdc, 0x74, -+ 0x05, 0x1b, 0x1d, 0xe0, 0xcd, 0x16, 0xb0, 0xa8, -+ 0x8a, 0x34, 0x7b, 0x15, 0x11, 0x77, 0xe5, 0x7b, -+ 0x7e, 0x20, 0xf7, 0xda, 0x38, 0xda, 0xce, 0x70, -+ 0xe9, 0xf5, 0x6c, 0xd9, 0xbe, 0x0c, 0x4c, 0x95, -+ 0x4c, 0xc2, 0x9b, 0x34, 0x55, 0x55, 0xe1, 0xf3, -+ 0x46, 0x8e, 0x48, 0x74, 0x14, 0x4f, 0x9d, 0xc9, -+ 0xf5, 0xe8, 0x1a, 0xf0, 0x11, 0x4a, 0xc1, 0x8d, -+ 0xe0, 0x93, 0xa0, 0xbe, 0x09, 0x1c, 0x2b, 0x4e, -+ 0x0f, 0xb2, 0x87, 0x8b, 0x84, 0xfe, 0x92, 0x32, -+ 0x14, 0xd7, 0x93, 0xdf, 0xe7, 0x44, 0xbc, 0xc5, -+ 0xae, 0x53, 0x69, 0xd8, 0xb3, 0x79, 0x37, 0x80, -+ 0xe3, 0x17, 0x5c, 0xec, 0x53, 0x00, 0x9a, 0xe3, -+ 0x8e, 0xdc, 0x38, 0xb8, 0x66, 0xf0, 0xd3, 0xad, -+ 0x1d, 0x02, 0x96, 0x86, 0x3e, 0x9d, 0x3b, 0x5d, -+ 0xa5, 0x7f, 0x21, 0x10, 0xf1, 0x1f, 0x13, 0x20, -+ 0xf9, 0x57, 0x87, 0x20, 0xf5, 0x5f, 0xf1, 0x17, -+ 0x48, 0x0a, 0x51, 0x5a, 0xcd, 0x19, 0x03, 0xa6, -+ 0x5a, 0xd1, 0x12, 0x97, 0xe9, 0x48, 0xe2, 0x1d, -+ 0x83, 0x75, 0x50, 0xd9, 0x75, 0x7d, 0x6a, 0x82, -+ 0xa1, 0xf9, 0x4e, 0x54, 0x87, 0x89, 0xc9, 0x0c, -+ 0xb7, 0x5b, 0x6a, 0x91, 0xc1, 0x9c, 0xb2, 0xa9, -+ 0xdc, 0x9a, 0xa4, 0x49, 0x0a, 0x6d, 0x0d, 0xbb, -+ 0xde, 0x86, 0x44, 0xdd, 0x5d, 0x89, 0x2b, 0x96, -+ 0x0f, 0x23, 0x95, 0xad, 0xcc, 0xa2, 0xb3, 0xb9, -+ 0x7e, 0x74, 0x38, 0xba, 0x9f, 0x73, 0xae, 0x5f, -+ 0xf8, 0x68, 0xa2, 0xe0, 0xa9, 0xce, 0xbd, 0x40, -+ 0xd4, 0x4c, 0x6b, 0xd2, 0x56, 0x62, 0xb0, 0xcc, -+ 0x63, 0x7e, 0x5b, 0xd3, 0xae, 0xd1, 0x75, 0xce, -+ 0xbb, 0xb4, 0x5b, 0xa8, 0xf8, 0xb4, 0xac, 0x71, -+ 0x75, 0xaa, 0xc9, 0x9f, 0xbb, 0x6c, 0xad, 0x0f, -+ 0x55, 0x5d, 0xe8, 0x85, 0x7d, 0xf9, 0x21, 0x35, -+ 0xea, 0x92, 0x85, 0x2b, 0x00, 0xec, 0x84, 0x90, -+ 0x0a, 0x63, 0x96, 0xe4, 0x6b, 0xa9, 0x77, 0xb8, -+ 0x91, 0xf8, 0x46, 0x15, 0x72, 0x63, 0x70, 0x01, -+ 0x40, 0xa3, 0xa5, 0x76, 0x62, 0x2b, 0xbf, 0xf1, -+ 0xe5, 0x8d, 0x9f, 0xa3, 0xfa, 0x9b, 0x03, 0xbe, -+ 0xfe, 0x65, 0x6f, 0xa2, 0x29, 0x0d, 0x54, 0xb4, -+ 0x71, 0xce, 0xa9, 0xd6, 0x3d, 0x88, 0xf9, 0xaf, -+ 0x6b, 0xa8, 0x9e, 0xf4, 0x16, 0x96, 0x36, 0xb9, -+ 0x00, 0xdc, 0x10, 0xab, 0xb5, 0x08, 0x31, 0x1f, -+ 0x00, 0xb1, 0x3c, 0xd9, 0x38, 0x3e, 0xc6, 0x04, -+ 0xa7, 0x4e, 0xe8, 0xae, 0xed, 0x98, 0xc2, 0xf7, -+ 0xb9, 0x00, 0x5f, 0x8c, 0x60, 0xd1, 0xe5, 0x15, -+ 0xf7, 0xae, 0x1e, 0x84, 0x88, 0xd1, 0xf6, 0xbc, -+ 0x3a, 0x89, 0x35, 0x22, 0x83, 0x7c, 0xca, 0xf0, -+ 0x33, 0x82, 0x4c, 0x79, 0x3c, 0xfd, 0xb1, 0xae, -+ 0x52, 0x62, 0x55, 0xd2, 0x41, 0x60, 0xc6, 0xbb, -+ 0xfa, 0x0e, 0x59, 0xd6, 0xa8, 0xfe, 0x5d, 0xed, -+ 0x47, 0x3d, 0xe0, 0xea, 0x1f, 0x6e, 0x43, 0x51, -+ 0xec, 0x10, 0x52, 0x56, 0x77, 0x42, 0x6b, 0x52, -+ 0x87, 0xd8, 0xec, 0xe0, 0xaa, 0x76, 0xa5, 0x84, -+ 0x2a, 0x22, 0x24, 0xfd, 0x92, 0x40, 0x88, 0xd5, -+ 0x85, 0x1c, 0x1f, 0x6b, 0x47, 0xa0, 0xc4, 0xe4, -+ 0xef, 0xf4, 0xea, 0xd7, 0x59, 0xac, 0x2a, 0x9e, -+ 0x8c, 0xfa, 0x1f, 0x42, 0x08, 0xfe, 0x4f, 0x74, -+ 0xa0, 0x26, 0xf5, 0xb3, 0x84, 0xf6, 0x58, 0x5f, -+ 0x26, 0x66, 0x3e, 0xd7, 0xe4, 0x22, 0x91, 0x13, -+ 0xc8, 0xac, 0x25, 0x96, 0x23, 0xd8, 0x09, 0xea, -+ 0x45, 0x75, 0x23, 0xb8, 0x5f, 0xc2, 0x90, 0x8b, -+ 0x09, 0xc4, 0xfc, 0x47, 0x6c, 0x6d, 0x0a, 0xef, -+ 0x69, 0xa4, 0x38, 0x19, 0xcf, 0x7d, 0xf9, 0x09, -+ 0x73, 0x9b, 0x60, 0x5a, 0xf7, 0x37, 0xb5, 0xfe, -+ 0x9f, 0xe3, 0x2b, 0x4c, 0x0d, 0x6e, 0x19, 0xf1, -+ 0xd6, 0xc0, 0x70, 0xf3, 0x9d, 0x22, 0x3c, 0xf9, -+ 0x49, 0xce, 0x30, 0x8e, 0x44, 0xb5, 0x76, 0x15, -+ 0x8f, 0x52, 0xfd, 0xa5, 0x04, 0xb8, 0x55, 0x6a, -+ 0x36, 0x59, 0x7c, 0xc4, 0x48, 0xb8, 0xd7, 0xab, -+ 0x05, 0x66, 0xe9, 0x5e, 0x21, 0x6f, 0x6b, 0x36, -+ 0x29, 0xbb, 0xe9, 0xe3, 0xa2, 0x9a, 0xa8, 0xcd, -+ 0x55, 0x25, 0x11, 0xba, 0x5a, 0x58, 0xa0, 0xde, -+ 0xae, 0x19, 0x2a, 0x48, 0x5a, 0xff, 0x36, 0xcd, -+ 0x6d, 0x16, 0x7a, 0x73, 0x38, 0x46, 0xe5, 0x47, -+ 0x59, 0xc8, 0xa2, 0xf6, 0xe2, 0x6c, 0x83, 0xc5, -+ 0x36, 0x2c, 0x83, 0x7d, 0xb4, 0x01, 0x05, 0x69, -+ 0xe7, 0xaf, 0x5c, 0xc4, 0x64, 0x82, 0x12, 0x21, -+ 0xef, 0xf7, 0xd1, 0x7d, 0xb8, 0x8d, 0x8c, 0x98, -+ 0x7c, 0x5f, 0x7d, 0x92, 0x88, 0xb9, 0x94, 0x07, -+ 0x9c, 0xd8, 0xe9, 0x9c, 0x17, 0x38, 0xe3, 0x57, -+ 0x6c, 0xe0, 0xdc, 0xa5, 0x92, 0x42, 0xb3, 0xbd, -+ 0x50, 0xa2, 0x7e, 0xb5, 0xb1, 0x52, 0x72, 0x03, -+ 0x97, 0xd8, 0xaa, 0x9a, 0x1e, 0x75, 0x41, 0x11, -+ 0xa3, 0x4f, 0xcc, 0xd4, 0xe3, 0x73, 0xad, 0x96, -+ 0xdc, 0x47, 0x41, 0x9f, 0xb0, 0xbe, 0x79, 0x91, -+ 0xf5, 0xb6, 0x18, 0xfe, 0xc2, 0x83, 0x18, 0x7d, -+ 0x73, 0xd9, 0x4f, 0x83, 0x84, 0x03, 0xb3, 0xf0, -+ 0x77, 0x66, 0x3d, 0x83, 0x63, 0x2e, 0x2c, 0xf9, -+ 0xdd, 0xa6, 0x1f, 0x89, 0x82, 0xb8, 0x23, 0x42, -+ 0xeb, 0xe2, 0xca, 0x70, 0x82, 0x61, 0x41, 0x0a, -+ 0x6d, 0x5f, 0x75, 0xc5, 0xe2, 0xc4, 0x91, 0x18, -+ 0x44, 0x22, 0xfa, 0x34, 0x10, 0xf5, 0x20, 0xdc, -+ 0xb7, 0xdd, 0x2a, 0x20, 0x77, 0xf5, 0xf9, 0xce, -+ 0xdb, 0xa0, 0x0a, 0x52, 0x2a, 0x4e, 0xdd, 0xcc, -+ 0x97, 0xdf, 0x05, 0xe4, 0x5e, 0xb7, 0xaa, 0xf0, -+ 0xe2, 0x80, 0xff, 0xba, 0x1a, 0x0f, 0xac, 0xdf, -+ 0x02, 0x32, 0xe6, 0xf7, 0xc7, 0x17, 0x13, 0xb7, -+ 0xfc, 0x98, 0x48, 0x8c, 0x0d, 0x82, 0xc9, 0x80, -+ 0x7a, 0xe2, 0x0a, 0xc5, 0xb4, 0xde, 0x7c, 0x3c, -+ 0x79, 0x81, 0x0e, 0x28, 0x65, 0x79, 0x67, 0x82, -+ 0x69, 0x44, 0x66, 0x09, 0xf7, 0x16, 0x1a, 0xf9, -+ 0x7d, 0x80, 0xa1, 0x79, 0x14, 0xa9, 0xc8, 0x20, -+ 0xfb, 0xa2, 0x46, 0xbe, 0x08, 0x35, 0x17, 0x58, -+ 0xc1, 0x1a, 0xda, 0x2a, 0x6b, 0x2e, 0x1e, 0xe6, -+ 0x27, 0x55, 0x7b, 0x19, 0xe2, 0xfb, 0x64, 0xfc, -+ 0x5e, 0x15, 0x54, 0x3c, 0xe7, 0xc2, 0x11, 0x50, -+ 0x30, 0xb8, 0x72, 0x03, 0x0b, 0x1a, 0x9f, 0x86, -+ 0x27, 0x11, 0x5c, 0x06, 0x2b, 0xbd, 0x75, 0x1a, -+ 0x0a, 0xda, 0x01, 0xfa, 0x5c, 0x4a, 0xc1, 0x80, -+ 0x3a, 0x6e, 0x30, 0xc8, 0x2c, 0xeb, 0x56, 0xec, -+ 0x89, 0xfa, 0x35, 0x7b, 0xb2, 0xf0, 0x97, 0x08, -+ 0x86, 0x53, 0xbe, 0xbd, 0x40, 0x41, 0x38, 0x1c, -+ 0xb4, 0x8b, 0x79, 0x2e, 0x18, 0x96, 0x94, 0xde, -+ 0xe8, 0xca, 0xe5, 0x9f, 0x92, 0x9f, 0x15, 0x5d, -+ 0x56, 0x60, 0x5c, 0x09, 0xf9, 0x16, 0xf4, 0x17, -+ 0x0f, 0xf6, 0x4c, 0xda, 0xe6, 0x67, 0x89, 0x9f, -+ 0xca, 0x6c, 0xe7, 0x9b, 0x04, 0x62, 0x0e, 0x26, -+ 0xa6, 0x52, 0xbd, 0x29, 0xff, 0xc7, 0xa4, 0x96, -+ 0xe6, 0x6a, 0x02, 0xa5, 0x2e, 0x7b, 0xfe, 0x97, -+ 0x68, 0x3e, 0x2e, 0x5f, 0x3b, 0x0f, 0x36, 0xd6, -+ 0x98, 0x19, 0x59, 0x48, 0xd2, 0xc6, 0xe1, 0x55, -+ 0x1a, 0x6e, 0xd6, 0xed, 0x2c, 0xba, 0xc3, 0x9e, -+ 0x64, 0xc9, 0x95, 0x86, 0x35, 0x5e, 0x3e, 0x88, -+ 0x69, 0x99, 0x4b, 0xee, 0xbe, 0x9a, 0x99, 0xb5, -+ 0x6e, 0x58, 0xae, 0xdd, 0x22, 0xdb, 0xdd, 0x6b, -+ 0xfc, 0xaf, 0x90, 0xa3, 0x3d, 0xa4, 0xc1, 0x15, -+ 0x92, 0x18, 0x8d, 0xd2, 0x4b, 0x7b, 0x06, 0xd1, -+ 0x37, 0xb5, 0xe2, 0x7c, 0x2c, 0xf0, 0x25, 0xe4, -+ 0x94, 0x2a, 0xbd, 0xe3, 0x82, 0x70, 0x78, 0xa3, -+ 0x82, 0x10, 0x5a, 0x90, 0xd7, 0xa4, 0xfa, 0xaf, -+ 0x1a, 0x88, 0x59, 0xdc, 0x74, 0x12, 0xb4, 0x8e, -+ 0xd7, 0x19, 0x46, 0xf4, 0x84, 0x69, 0x9f, 0xbb, -+ 0x70, 0xa8, 0x4c, 0x52, 0x81, 0xa9, 0xff, 0x76, -+ 0x1c, 0xae, 0xd8, 0x11, 0x3d, 0x7f, 0x7d, 0xc5, -+ 0x12, 0x59, 0x28, 0x18, 0xc2, 0xa2, 0xb7, 0x1c, -+ 0x88, 0xf8, 0xd6, 0x1b, 0xa6, 0x7d, 0x9e, 0xde, -+ 0x29, 0xf8, 0xed, 0xff, 0xeb, 0x92, 0x24, 0x4f, -+ 0x05, 0xaa, 0xd9, 0x49, 0xba, 0x87, 0x59, 0x51, -+ 0xc9, 0x20, 0x5c, 0x9b, 0x74, 0xcf, 0x03, 0xd9, -+ 0x2d, 0x34, 0xc7, 0x5b, 0xa5, 0x40, 0xb2, 0x99, -+ 0xf5, 0xcb, 0xb4, 0xf6, 0xb7, 0x72, 0x4a, 0xd6, -+ 0xbd, 0xb0, 0xf3, 0x93, 0xe0, 0x1b, 0xa8, 0x04, -+ 0x1e, 0x35, 0xd4, 0x80, 0x20, 0xf4, 0x9c, 0x31, -+ 0x6b, 0x45, 0xb9, 0x15, 0xb0, 0x5e, 0xdd, 0x0a, -+ 0x33, 0x9c, 0x83, 0xcd, 0x58, 0x89, 0x50, 0x56, -+ 0xbb, 0x81, 0x00, 0x91, 0x32, 0xf3, 0x1b, 0x3e, -+ 0xcf, 0x45, 0xe1, 0xf9, 0xe1, 0x2c, 0x26, 0x78, -+ 0x93, 0x9a, 0x60, 0x46, 0xc9, 0xb5, 0x5e, 0x6a, -+ 0x28, 0x92, 0x87, 0x3f, 0x63, 0x7b, 0xdb, 0xf7, -+ 0xd0, 0x13, 0x9d, 0x32, 0x40, 0x5e, 0xcf, 0xfb, -+ 0x79, 0x68, 0x47, 0x4c, 0xfd, 0x01, 0x17, 0xe6, -+ 0x97, 0x93, 0x78, 0xbb, 0xa6, 0x27, 0xa3, 0xe8, -+ 0x1a, 0xe8, 0x94, 0x55, 0x7d, 0x08, 0xe5, 0xdc, -+ 0x66, 0xa3, 0x69, 0xc8, 0xca, 0xc5, 0xa1, 0x84, -+ 0x55, 0xde, 0x08, 0x91, 0x16, 0x3a, 0x0c, 0x86, -+ 0xab, 0x27, 0x2b, 0x64, 0x34, 0x02, 0x6c, 0x76, -+ 0x8b, 0xc6, 0xaf, 0xcc, 0xe1, 0xd6, 0x8c, 0x2a, -+ 0x18, 0x3d, 0xa6, 0x1b, 0x37, 0x75, 0x45, 0x73, -+ 0xc2, 0x75, 0xd7, 0x53, 0x78, 0x3a, 0xd6, 0xe8, -+ 0x29, 0xd2, 0x4a, 0xa8, 0x1e, 0x82, 0xf6, 0xb6, -+ 0x81, 0xde, 0x21, 0xed, 0x2b, 0x56, 0xbb, 0xf2, -+ 0xd0, 0x57, 0xc1, 0x7c, 0xd2, 0x6a, 0xd2, 0x56, -+ 0xf5, 0x13, 0x5f, 0x1c, 0x6a, 0x0b, 0x74, 0xfb, -+ 0xe9, 0xfe, 0x9e, 0xea, 0x95, 0xb2, 0x46, 0xab, -+ 0x0a, 0xfc, 0xfd, 0xf3, 0xbb, 0x04, 0x2b, 0x76, -+ 0x1b, 0xa4, 0x74, 0xb0, 0xc1, 0x78, 0xc3, 0x69, -+ 0xe2, 0xb0, 0x01, 0xe1, 0xde, 0x32, 0x4c, 0x8d, -+ 0x1a, 0xb3, 0x38, 0x08, 0xd5, 0xfc, 0x1f, 0xdc, -+ 0x0e, 0x2c, 0x9c, 0xb1, 0xa1, 0x63, 0x17, 0x22, -+ 0xf5, 0x6c, 0x93, 0x70, 0x74, 0x00, 0xf8, 0x39, -+ 0x01, 0x94, 0xd1, 0x32, 0x23, 0x56, 0x5d, 0xa6, -+ 0x02, 0x76, 0x76, 0x93, 0xce, 0x2f, 0x19, 0xe9, -+ 0x17, 0x52, 0xae, 0x6e, 0x2c, 0x6d, 0x61, 0x7f, -+ 0x3b, 0xaa, 0xe0, 0x52, 0x85, 0xc5, 0x65, 0xc1, -+ 0xbb, 0x8e, 0x5b, 0x21, 0xd5, 0xc9, 0x78, 0x83, -+ 0x07, 0x97, 0x4c, 0x62, 0x61, 0x41, 0xd4, 0xfc, -+ 0xc9, 0x39, 0xe3, 0x9b, 0xd0, 0xcc, 0x75, 0xc4, -+ 0x97, 0xe6, 0xdd, 0x2a, 0x5f, 0xa6, 0xe8, 0x59, -+ 0x6c, 0x98, 0xb9, 0x02, 0xe2, 0xa2, 0xd6, 0x68, -+ 0xee, 0x3b, 0x1d, 0xe3, 0x4d, 0x5b, 0x30, 0xef, -+ 0x03, 0xf2, 0xeb, 0x18, 0x57, 0x36, 0xe8, 0xa1, -+ 0xf4, 0x47, 0xfb, 0xcb, 0x8f, 0xcb, 0xc8, 0xf3, -+ 0x4f, 0x74, 0x9d, 0x9d, 0xb1, 0x8d, 0x14, 0x44, -+ 0xd9, 0x19, 0xb4, 0x54, 0x4f, 0x75, 0x19, 0x09, -+ 0xa0, 0x75, 0xbc, 0x3b, 0x82, 0xc6, 0x3f, 0xb8, -+ 0x83, 0x19, 0x6e, 0xd6, 0x37, 0xfe, 0x6e, 0x8a, -+ 0x4e, 0xe0, 0x4a, 0xab, 0x7b, 0xc8, 0xb4, 0x1d, -+ 0xf4, 0xed, 0x27, 0x03, 0x65, 0xa2, 0xa1, 0xae, -+ 0x11, 0xe7, 0x98, 0x78, 0x48, 0x91, 0xd2, 0xd2, -+ 0xd4, 0x23, 0x78, 0x50, 0xb1, 0x5b, 0x85, 0x10, -+ 0x8d, 0xca, 0x5f, 0x0f, 0x71, 0xae, 0x72, 0x9a, -+ 0xf6, 0x25, 0x19, 0x60, 0x06, 0xf7, 0x10, 0x34, -+ 0x18, 0x0d, 0xc9, 0x9f, 0x7b, 0x0c, 0x9b, 0x8f, -+ 0x91, 0x1b, 0x9f, 0xcd, 0x10, 0xee, 0x75, 0xf9, -+ 0x97, 0x66, 0xfc, 0x4d, 0x33, 0x6e, 0x28, 0x2b, -+ 0x92, 0x85, 0x4f, 0xab, 0x43, 0x8d, 0x8f, 0x7d, -+ 0x86, 0xa7, 0xc7, 0xd8, 0xd3, 0x0b, 0x8b, 0x57, -+ 0xb6, 0x1d, 0x95, 0x0d, 0xe9, 0xbc, 0xd9, 0x03, -+ 0xd9, 0x10, 0x19, 0xc3, 0x46, 0x63, 0x55, 0x87, -+ 0x61, 0x79, 0x6c, 0x95, 0x0e, 0x9c, 0xdd, 0xca, -+ 0xc3, 0xf3, 0x64, 0xf0, 0x7d, 0x76, 0xb7, 0x53, -+ 0x67, 0x2b, 0x1e, 0x44, 0x56, 0x81, 0xea, 0x8f, -+ 0x5c, 0x42, 0x16, 0xb8, 0x28, 0xeb, 0x1b, 0x61, -+ 0x10, 0x1e, 0xbf, 0xec, 0xa8 -+}; -+static const u8 dec_assoc011[] __initconst = { -+ 0xd6, 0x31, 0xda, 0x5d, 0x42, 0x5e, 0xd7 -+}; -+static const u8 dec_nonce011[] __initconst = { -+ 0xfd, 0x87, 0xd4, 0xd8, 0x62, 0xfd, 0xec, 0xaa -+}; -+static const u8 dec_key011[] __initconst = { -+ 0x35, 0x4e, 0xb5, 0x70, 0x50, 0x42, 0x8a, 0x85, -+ 0xf2, 0xfb, 0xed, 0x7b, 0xd0, 0x9e, 0x97, 0xca, -+ 0xfa, 0x98, 0x66, 0x63, 0xee, 0x37, 0xcc, 0x52, -+ 0xfe, 0xd1, 0xdf, 0x95, 0x15, 0x34, 0x29, 0x38 -+}; -+ -+static const u8 dec_input012[] __initconst = { -+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3, -+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf, -+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1, -+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f, -+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e, -+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5, -+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b, -+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b, -+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2, -+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1, -+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74, -+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e, -+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae, -+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd, -+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04, -+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55, -+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef, -+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b, -+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74, -+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26, -+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f, -+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64, -+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd, -+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad, -+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b, -+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e, -+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e, -+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0, -+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f, -+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50, -+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97, -+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03, -+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a, -+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15, -+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb, -+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34, -+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47, -+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86, -+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24, -+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c, -+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9, -+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7, -+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48, -+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b, -+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e, -+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61, -+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75, -+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26, -+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74, -+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43, -+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1, -+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79, -+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3, -+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5, -+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9, -+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d, -+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8, -+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26, -+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5, -+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d, -+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29, -+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57, -+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92, -+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9, -+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc, -+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd, -+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57, -+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3, -+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4, -+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c, -+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27, -+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c, -+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5, -+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14, -+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94, -+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b, -+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99, -+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84, -+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a, -+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa, -+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75, -+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74, -+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40, -+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72, -+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f, -+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92, -+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8, -+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c, -+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f, -+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb, -+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a, -+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b, -+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d, -+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c, -+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4, -+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00, -+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b, -+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4, -+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84, -+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba, -+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47, -+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4, -+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88, -+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81, -+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1, -+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a, -+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e, -+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1, -+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07, -+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24, -+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f, -+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a, -+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9, -+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9, -+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51, -+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1, -+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c, -+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53, -+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40, -+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a, -+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2, -+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2, -+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8, -+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07, -+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9, -+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d, -+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde, -+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f, -+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d, -+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d, -+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56, -+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c, -+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3, -+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d, -+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26, -+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10, -+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c, -+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11, -+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf, -+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c, -+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb, -+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79, -+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa, -+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80, -+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08, -+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c, -+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc, -+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab, -+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6, -+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9, -+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7, -+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2, -+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33, -+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2, -+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e, -+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c, -+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b, -+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66, -+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6, -+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44, -+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74, -+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6, -+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f, -+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24, -+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1, -+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2, -+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5, -+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d, -+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0, -+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b, -+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3, -+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0, -+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3, -+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c, -+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b, -+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5, -+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51, -+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71, -+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68, -+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb, -+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e, -+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b, -+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8, -+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb, -+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54, -+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7, -+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff, -+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd, -+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde, -+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c, -+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1, -+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8, -+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14, -+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c, -+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4, -+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06, -+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52, -+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d, -+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c, -+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6, -+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5, -+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f, -+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e, -+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98, -+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8, -+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb, -+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b, -+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79, -+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11, -+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d, -+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10, -+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23, -+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23, -+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90, -+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4, -+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1, -+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7, -+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11, -+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50, -+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8, -+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97, -+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38, -+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f, -+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33, -+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f, -+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75, -+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21, -+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90, -+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8, -+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91, -+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1, -+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f, -+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3, -+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc, -+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a, -+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62, -+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55, -+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23, -+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6, -+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac, -+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12, -+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a, -+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7, -+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec, -+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28, -+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88, -+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4, -+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17, -+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2, -+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33, -+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a, -+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28, -+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62, -+ 0x70, 0xcf, 0xd6 -+}; -+static const u8 dec_output012[] __initconst = { -+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0, -+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5, -+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57, -+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff, -+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5, -+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b, -+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46, -+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b, -+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71, -+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0, -+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b, -+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d, -+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f, -+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24, -+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23, -+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e, -+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14, -+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d, -+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb, -+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4, -+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf, -+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e, -+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6, -+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33, -+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb, -+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0, -+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe, -+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00, -+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d, -+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b, -+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50, -+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e, -+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4, -+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28, -+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8, -+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b, -+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86, -+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67, -+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff, -+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59, -+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe, -+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6, -+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e, -+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b, -+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50, -+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39, -+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02, -+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9, -+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a, -+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38, -+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9, -+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65, -+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb, -+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2, -+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae, -+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee, -+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00, -+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c, -+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8, -+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31, -+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68, -+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4, -+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0, -+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11, -+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7, -+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39, -+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1, -+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1, -+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2, -+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66, -+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49, -+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2, -+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5, -+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3, -+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c, -+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa, -+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00, -+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54, -+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87, -+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03, -+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39, -+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40, -+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6, -+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22, -+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5, -+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e, -+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32, -+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53, -+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42, -+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c, -+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68, -+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48, -+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c, -+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce, -+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd, -+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa, -+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69, -+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8, -+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58, -+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0, -+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45, -+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb, -+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33, -+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c, -+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23, -+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80, -+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1, -+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff, -+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24, -+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9, -+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46, -+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8, -+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20, -+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35, -+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63, -+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb, -+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36, -+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a, -+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c, -+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f, -+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02, -+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03, -+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa, -+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16, -+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d, -+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5, -+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7, -+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac, -+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47, -+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3, -+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35, -+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e, -+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6, -+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74, -+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e, -+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a, -+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0, -+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4, -+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8, -+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16, -+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32, -+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65, -+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06, -+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a, -+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7, -+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85, -+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb, -+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46, -+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e, -+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61, -+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb, -+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d, -+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00, -+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5, -+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6, -+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1, -+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a, -+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7, -+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63, -+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38, -+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3, -+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed, -+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49, -+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42, -+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0, -+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f, -+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1, -+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd, -+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d, -+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88, -+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1, -+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25, -+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22, -+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28, -+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f, -+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53, -+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28, -+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8, -+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc, -+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8, -+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb, -+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3, -+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3, -+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac, -+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2, -+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a, -+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad, -+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e, -+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd, -+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf, -+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba, -+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41, -+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91, -+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d, -+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6, -+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf, -+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92, -+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e, -+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72, -+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04, -+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46, -+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55, -+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84, -+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61, -+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d, -+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8, -+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d, -+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87, -+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70, -+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94, -+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f, -+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb, -+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90, -+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31, -+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06, -+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05, -+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7, -+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e, -+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae, -+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2, -+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21, -+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0, -+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d, -+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0, -+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6, -+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5, -+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9, -+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8, -+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57, -+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1, -+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c, -+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b, -+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69, -+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d, -+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d, -+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19, -+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82, -+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20, -+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f, -+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e, -+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f, -+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47, -+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b, -+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4, -+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b, -+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4, -+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9, -+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3, -+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0, -+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16, -+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d, -+ 0x78, 0xec, 0x00 -+}; -+static const u8 dec_assoc012[] __initconst = { -+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8, -+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce, -+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c, -+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc, -+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e, -+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f, -+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b, -+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9 -+}; -+static const u8 dec_nonce012[] __initconst = { -+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06 -+}; -+static const u8 dec_key012[] __initconst = { -+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e, -+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d, -+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e, -+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64 -+}; -+ -+static const u8 dec_input013[] __initconst = { -+ 0x52, 0x34, 0xb3, 0x65, 0x3b, 0xb7, 0xe5, 0xd3, -+ 0xab, 0x49, 0x17, 0x60, 0xd2, 0x52, 0x56, 0xdf, -+ 0xdf, 0x34, 0x56, 0x82, 0xe2, 0xbe, 0xe5, 0xe1, -+ 0x28, 0xd1, 0x4e, 0x5f, 0x4f, 0x01, 0x7d, 0x3f, -+ 0x99, 0x6b, 0x30, 0x6e, 0x1a, 0x7c, 0x4c, 0x8e, -+ 0x62, 0x81, 0xae, 0x86, 0x3f, 0x6b, 0xd0, 0xb5, -+ 0xa9, 0xcf, 0x50, 0xf1, 0x02, 0x12, 0xa0, 0x0b, -+ 0x24, 0xe9, 0xe6, 0x72, 0x89, 0x2c, 0x52, 0x1b, -+ 0x34, 0x38, 0xf8, 0x75, 0x5f, 0xa0, 0x74, 0xe2, -+ 0x99, 0xdd, 0xa6, 0x4b, 0x14, 0x50, 0x4e, 0xf1, -+ 0xbe, 0xd6, 0x9e, 0xdb, 0xb2, 0x24, 0x27, 0x74, -+ 0x12, 0x4a, 0x78, 0x78, 0x17, 0xa5, 0x58, 0x8e, -+ 0x2f, 0xf9, 0xf4, 0x8d, 0xee, 0x03, 0x88, 0xae, -+ 0xb8, 0x29, 0xa1, 0x2f, 0x4b, 0xee, 0x92, 0xbd, -+ 0x87, 0xb3, 0xce, 0x34, 0x21, 0x57, 0x46, 0x04, -+ 0x49, 0x0c, 0x80, 0xf2, 0x01, 0x13, 0xa1, 0x55, -+ 0xb3, 0xff, 0x44, 0x30, 0x3c, 0x1c, 0xd0, 0xef, -+ 0xbc, 0x18, 0x74, 0x26, 0xad, 0x41, 0x5b, 0x5b, -+ 0x3e, 0x9a, 0x7a, 0x46, 0x4f, 0x16, 0xd6, 0x74, -+ 0x5a, 0xb7, 0x3a, 0x28, 0x31, 0xd8, 0xae, 0x26, -+ 0xac, 0x50, 0x53, 0x86, 0xf2, 0x56, 0xd7, 0x3f, -+ 0x29, 0xbc, 0x45, 0x68, 0x8e, 0xcb, 0x98, 0x64, -+ 0xdd, 0xc9, 0xba, 0xb8, 0x4b, 0x7b, 0x82, 0xdd, -+ 0x14, 0xa7, 0xcb, 0x71, 0x72, 0x00, 0x5c, 0xad, -+ 0x7b, 0x6a, 0x89, 0xa4, 0x3d, 0xbf, 0xb5, 0x4b, -+ 0x3e, 0x7c, 0x5a, 0xcf, 0xb8, 0xa1, 0xc5, 0x6e, -+ 0xc8, 0xb6, 0x31, 0x57, 0x7b, 0xdf, 0xa5, 0x7e, -+ 0xb1, 0xd6, 0x42, 0x2a, 0x31, 0x36, 0xd1, 0xd0, -+ 0x3f, 0x7a, 0xe5, 0x94, 0xd6, 0x36, 0xa0, 0x6f, -+ 0xb7, 0x40, 0x7d, 0x37, 0xc6, 0x55, 0x7c, 0x50, -+ 0x40, 0x6d, 0x29, 0x89, 0xe3, 0x5a, 0xae, 0x97, -+ 0xe7, 0x44, 0x49, 0x6e, 0xbd, 0x81, 0x3d, 0x03, -+ 0x93, 0x06, 0x12, 0x06, 0xe2, 0x41, 0x12, 0x4a, -+ 0xf1, 0x6a, 0xa4, 0x58, 0xa2, 0xfb, 0xd2, 0x15, -+ 0xba, 0xc9, 0x79, 0xc9, 0xce, 0x5e, 0x13, 0xbb, -+ 0xf1, 0x09, 0x04, 0xcc, 0xfd, 0xe8, 0x51, 0x34, -+ 0x6a, 0xe8, 0x61, 0x88, 0xda, 0xed, 0x01, 0x47, -+ 0x84, 0xf5, 0x73, 0x25, 0xf9, 0x1c, 0x42, 0x86, -+ 0x07, 0xf3, 0x5b, 0x1a, 0x01, 0xb3, 0xeb, 0x24, -+ 0x32, 0x8d, 0xf6, 0xed, 0x7c, 0x4b, 0xeb, 0x3c, -+ 0x36, 0x42, 0x28, 0xdf, 0xdf, 0xb6, 0xbe, 0xd9, -+ 0x8c, 0x52, 0xd3, 0x2b, 0x08, 0x90, 0x8c, 0xe7, -+ 0x98, 0x31, 0xe2, 0x32, 0x8e, 0xfc, 0x11, 0x48, -+ 0x00, 0xa8, 0x6a, 0x42, 0x4a, 0x02, 0xc6, 0x4b, -+ 0x09, 0xf1, 0xe3, 0x49, 0xf3, 0x45, 0x1f, 0x0e, -+ 0xbc, 0x56, 0xe2, 0xe4, 0xdf, 0xfb, 0xeb, 0x61, -+ 0xfa, 0x24, 0xc1, 0x63, 0x75, 0xbb, 0x47, 0x75, -+ 0xaf, 0xe1, 0x53, 0x16, 0x96, 0x21, 0x85, 0x26, -+ 0x11, 0xb3, 0x76, 0xe3, 0x23, 0xa1, 0x6b, 0x74, -+ 0x37, 0xd0, 0xde, 0x06, 0x90, 0x71, 0x5d, 0x43, -+ 0x88, 0x9b, 0x00, 0x54, 0xa6, 0x75, 0x2f, 0xa1, -+ 0xc2, 0x0b, 0x73, 0x20, 0x1d, 0xb6, 0x21, 0x79, -+ 0x57, 0x3f, 0xfa, 0x09, 0xbe, 0x8a, 0x33, 0xc3, -+ 0x52, 0xf0, 0x1d, 0x82, 0x31, 0xd1, 0x55, 0xb5, -+ 0x6c, 0x99, 0x25, 0xcf, 0x5c, 0x32, 0xce, 0xe9, -+ 0x0d, 0xfa, 0x69, 0x2c, 0xd5, 0x0d, 0xc5, 0x6d, -+ 0x86, 0xd0, 0x0c, 0x3b, 0x06, 0x50, 0x79, 0xe8, -+ 0xc3, 0xae, 0x04, 0xe6, 0xcd, 0x51, 0xe4, 0x26, -+ 0x9b, 0x4f, 0x7e, 0xa6, 0x0f, 0xab, 0xd8, 0xe5, -+ 0xde, 0xa9, 0x00, 0x95, 0xbe, 0xa3, 0x9d, 0x5d, -+ 0xb2, 0x09, 0x70, 0x18, 0x1c, 0xf0, 0xac, 0x29, -+ 0x23, 0x02, 0x29, 0x28, 0xd2, 0x74, 0x35, 0x57, -+ 0x62, 0x0f, 0x24, 0xea, 0x5e, 0x33, 0xc2, 0x92, -+ 0xf3, 0x78, 0x4d, 0x30, 0x1e, 0xa1, 0x99, 0xa9, -+ 0x82, 0xb0, 0x42, 0x31, 0x8d, 0xad, 0x8a, 0xbc, -+ 0xfc, 0xd4, 0x57, 0x47, 0x3e, 0xb4, 0x50, 0xdd, -+ 0x6e, 0x2c, 0x80, 0x4d, 0x22, 0xf1, 0xfb, 0x57, -+ 0xc4, 0xdd, 0x17, 0xe1, 0x8a, 0x36, 0x4a, 0xb3, -+ 0x37, 0xca, 0xc9, 0x4e, 0xab, 0xd5, 0x69, 0xc4, -+ 0xf4, 0xbc, 0x0b, 0x3b, 0x44, 0x4b, 0x29, 0x9c, -+ 0xee, 0xd4, 0x35, 0x22, 0x21, 0xb0, 0x1f, 0x27, -+ 0x64, 0xa8, 0x51, 0x1b, 0xf0, 0x9f, 0x19, 0x5c, -+ 0xfb, 0x5a, 0x64, 0x74, 0x70, 0x45, 0x09, 0xf5, -+ 0x64, 0xfe, 0x1a, 0x2d, 0xc9, 0x14, 0x04, 0x14, -+ 0xcf, 0xd5, 0x7d, 0x60, 0xaf, 0x94, 0x39, 0x94, -+ 0xe2, 0x7d, 0x79, 0x82, 0xd0, 0x65, 0x3b, 0x6b, -+ 0x9c, 0x19, 0x84, 0xb4, 0x6d, 0xb3, 0x0c, 0x99, -+ 0xc0, 0x56, 0xa8, 0xbd, 0x73, 0xce, 0x05, 0x84, -+ 0x3e, 0x30, 0xaa, 0xc4, 0x9b, 0x1b, 0x04, 0x2a, -+ 0x9f, 0xd7, 0x43, 0x2b, 0x23, 0xdf, 0xbf, 0xaa, -+ 0xd5, 0xc2, 0x43, 0x2d, 0x70, 0xab, 0xdc, 0x75, -+ 0xad, 0xac, 0xf7, 0xc0, 0xbe, 0x67, 0xb2, 0x74, -+ 0xed, 0x67, 0x10, 0x4a, 0x92, 0x60, 0xc1, 0x40, -+ 0x50, 0x19, 0x8a, 0x8a, 0x8c, 0x09, 0x0e, 0x72, -+ 0xe1, 0x73, 0x5e, 0xe8, 0x41, 0x85, 0x63, 0x9f, -+ 0x3f, 0xd7, 0x7d, 0xc4, 0xfb, 0x22, 0x5d, 0x92, -+ 0x6c, 0xb3, 0x1e, 0xe2, 0x50, 0x2f, 0x82, 0xa8, -+ 0x28, 0xc0, 0xb5, 0xd7, 0x5f, 0x68, 0x0d, 0x2c, -+ 0x2d, 0xaf, 0x7e, 0xfa, 0x2e, 0x08, 0x0f, 0x1f, -+ 0x70, 0x9f, 0xe9, 0x19, 0x72, 0x55, 0xf8, 0xfb, -+ 0x51, 0xd2, 0x33, 0x5d, 0xa0, 0xd3, 0x2b, 0x0a, -+ 0x6c, 0xbc, 0x4e, 0xcf, 0x36, 0x4d, 0xdc, 0x3b, -+ 0xe9, 0x3e, 0x81, 0x7c, 0x61, 0xdb, 0x20, 0x2d, -+ 0x3a, 0xc3, 0xb3, 0x0c, 0x1e, 0x00, 0xb9, 0x7c, -+ 0xf5, 0xca, 0x10, 0x5f, 0x3a, 0x71, 0xb3, 0xe4, -+ 0x20, 0xdb, 0x0c, 0x2a, 0x98, 0x63, 0x45, 0x00, -+ 0x58, 0xf6, 0x68, 0xe4, 0x0b, 0xda, 0x13, 0x3b, -+ 0x60, 0x5c, 0x76, 0xdb, 0xb9, 0x97, 0x71, 0xe4, -+ 0xd9, 0xb7, 0xdb, 0xbd, 0x68, 0xc7, 0x84, 0x84, -+ 0xaa, 0x7c, 0x68, 0x62, 0x5e, 0x16, 0xfc, 0xba, -+ 0x72, 0xaa, 0x9a, 0xa9, 0xeb, 0x7c, 0x75, 0x47, -+ 0x97, 0x7e, 0xad, 0xe2, 0xd9, 0x91, 0xe8, 0xe4, -+ 0xa5, 0x31, 0xd7, 0x01, 0x8e, 0xa2, 0x11, 0x88, -+ 0x95, 0xb9, 0xf2, 0x9b, 0xd3, 0x7f, 0x1b, 0x81, -+ 0x22, 0xf7, 0x98, 0x60, 0x0a, 0x64, 0xa6, 0xc1, -+ 0xf6, 0x49, 0xc7, 0xe3, 0x07, 0x4d, 0x94, 0x7a, -+ 0xcf, 0x6e, 0x68, 0x0c, 0x1b, 0x3f, 0x6e, 0x2e, -+ 0xee, 0x92, 0xfa, 0x52, 0xb3, 0x59, 0xf8, 0xf1, -+ 0x8f, 0x6a, 0x66, 0xa3, 0x82, 0x76, 0x4a, 0x07, -+ 0x1a, 0xc7, 0xdd, 0xf5, 0xda, 0x9c, 0x3c, 0x24, -+ 0xbf, 0xfd, 0x42, 0xa1, 0x10, 0x64, 0x6a, 0x0f, -+ 0x89, 0xee, 0x36, 0xa5, 0xce, 0x99, 0x48, 0x6a, -+ 0xf0, 0x9f, 0x9e, 0x69, 0xa4, 0x40, 0x20, 0xe9, -+ 0x16, 0x15, 0xf7, 0xdb, 0x75, 0x02, 0xcb, 0xe9, -+ 0x73, 0x8b, 0x3b, 0x49, 0x2f, 0xf0, 0xaf, 0x51, -+ 0x06, 0x5c, 0xdf, 0x27, 0x27, 0x49, 0x6a, 0xd1, -+ 0xcc, 0xc7, 0xb5, 0x63, 0xb5, 0xfc, 0xb8, 0x5c, -+ 0x87, 0x7f, 0x84, 0xb4, 0xcc, 0x14, 0xa9, 0x53, -+ 0xda, 0xa4, 0x56, 0xf8, 0xb6, 0x1b, 0xcc, 0x40, -+ 0x27, 0x52, 0x06, 0x5a, 0x13, 0x81, 0xd7, 0x3a, -+ 0xd4, 0x3b, 0xfb, 0x49, 0x65, 0x31, 0x33, 0xb2, -+ 0xfa, 0xcd, 0xad, 0x58, 0x4e, 0x2b, 0xae, 0xd2, -+ 0x20, 0xfb, 0x1a, 0x48, 0xb4, 0x3f, 0x9a, 0xd8, -+ 0x7a, 0x35, 0x4a, 0xc8, 0xee, 0x88, 0x5e, 0x07, -+ 0x66, 0x54, 0xb9, 0xec, 0x9f, 0xa3, 0xe3, 0xb9, -+ 0x37, 0xaa, 0x49, 0x76, 0x31, 0xda, 0x74, 0x2d, -+ 0x3c, 0xa4, 0x65, 0x10, 0x32, 0x38, 0xf0, 0xde, -+ 0xd3, 0x99, 0x17, 0xaa, 0x71, 0xaa, 0x8f, 0x0f, -+ 0x8c, 0xaf, 0xa2, 0xf8, 0x5d, 0x64, 0xba, 0x1d, -+ 0xa3, 0xef, 0x96, 0x73, 0xe8, 0xa1, 0x02, 0x8d, -+ 0x0c, 0x6d, 0xb8, 0x06, 0x90, 0xb8, 0x08, 0x56, -+ 0x2c, 0xa7, 0x06, 0xc9, 0xc2, 0x38, 0xdb, 0x7c, -+ 0x63, 0xb1, 0x57, 0x8e, 0xea, 0x7c, 0x79, 0xf3, -+ 0x49, 0x1d, 0xfe, 0x9f, 0xf3, 0x6e, 0xb1, 0x1d, -+ 0xba, 0x19, 0x80, 0x1a, 0x0a, 0xd3, 0xb0, 0x26, -+ 0x21, 0x40, 0xb1, 0x7c, 0xf9, 0x4d, 0x8d, 0x10, -+ 0xc1, 0x7e, 0xf4, 0xf6, 0x3c, 0xa8, 0xfd, 0x7c, -+ 0xa3, 0x92, 0xb2, 0x0f, 0xaa, 0xcc, 0xa6, 0x11, -+ 0xfe, 0x04, 0xe3, 0xd1, 0x7a, 0x32, 0x89, 0xdf, -+ 0x0d, 0xc4, 0x8f, 0x79, 0x6b, 0xca, 0x16, 0x7c, -+ 0x6e, 0xf9, 0xad, 0x0f, 0xf6, 0xfe, 0x27, 0xdb, -+ 0xc4, 0x13, 0x70, 0xf1, 0x62, 0x1a, 0x4f, 0x79, -+ 0x40, 0xc9, 0x9b, 0x8b, 0x21, 0xea, 0x84, 0xfa, -+ 0xf5, 0xf1, 0x89, 0xce, 0xb7, 0x55, 0x0a, 0x80, -+ 0x39, 0x2f, 0x55, 0x36, 0x16, 0x9c, 0x7b, 0x08, -+ 0xbd, 0x87, 0x0d, 0xa5, 0x32, 0xf1, 0x52, 0x7c, -+ 0xe8, 0x55, 0x60, 0x5b, 0xd7, 0x69, 0xe4, 0xfc, -+ 0xfa, 0x12, 0x85, 0x96, 0xea, 0x50, 0x28, 0xab, -+ 0x8a, 0xf7, 0xbb, 0x0e, 0x53, 0x74, 0xca, 0xa6, -+ 0x27, 0x09, 0xc2, 0xb5, 0xde, 0x18, 0x14, 0xd9, -+ 0xea, 0xe5, 0x29, 0x1c, 0x40, 0x56, 0xcf, 0xd7, -+ 0xae, 0x05, 0x3f, 0x65, 0xaf, 0x05, 0x73, 0xe2, -+ 0x35, 0x96, 0x27, 0x07, 0x14, 0xc0, 0xad, 0x33, -+ 0xf1, 0xdc, 0x44, 0x7a, 0x89, 0x17, 0x77, 0xd2, -+ 0x9c, 0x58, 0x60, 0xf0, 0x3f, 0x7b, 0x2d, 0x2e, -+ 0x57, 0x95, 0x54, 0x87, 0xed, 0xf2, 0xc7, 0x4c, -+ 0xf0, 0xae, 0x56, 0x29, 0x19, 0x7d, 0x66, 0x4b, -+ 0x9b, 0x83, 0x84, 0x42, 0x3b, 0x01, 0x25, 0x66, -+ 0x8e, 0x02, 0xde, 0xb9, 0x83, 0x54, 0x19, 0xf6, -+ 0x9f, 0x79, 0x0d, 0x67, 0xc5, 0x1d, 0x7a, 0x44, -+ 0x02, 0x98, 0xa7, 0x16, 0x1c, 0x29, 0x0d, 0x74, -+ 0xff, 0x85, 0x40, 0x06, 0xef, 0x2c, 0xa9, 0xc6, -+ 0xf5, 0x53, 0x07, 0x06, 0xae, 0xe4, 0xfa, 0x5f, -+ 0xd8, 0x39, 0x4d, 0xf1, 0x9b, 0x6b, 0xd9, 0x24, -+ 0x84, 0xfe, 0x03, 0x4c, 0xb2, 0x3f, 0xdf, 0xa1, -+ 0x05, 0x9e, 0x50, 0x14, 0x5a, 0xd9, 0x1a, 0xa2, -+ 0xa7, 0xfa, 0xfa, 0x17, 0xf7, 0x78, 0xd6, 0xb5, -+ 0x92, 0x61, 0x91, 0xac, 0x36, 0xfa, 0x56, 0x0d, -+ 0x38, 0x32, 0x18, 0x85, 0x08, 0x58, 0x37, 0xf0, -+ 0x4b, 0xdb, 0x59, 0xe7, 0xa4, 0x34, 0xc0, 0x1b, -+ 0x01, 0xaf, 0x2d, 0xde, 0xa1, 0xaa, 0x5d, 0xd3, -+ 0xec, 0xe1, 0xd4, 0xf7, 0xe6, 0x54, 0x68, 0xf0, -+ 0x51, 0x97, 0xa7, 0x89, 0xea, 0x24, 0xad, 0xd3, -+ 0x6e, 0x47, 0x93, 0x8b, 0x4b, 0xb4, 0xf7, 0x1c, -+ 0x42, 0x06, 0x67, 0xe8, 0x99, 0xf6, 0xf5, 0x7b, -+ 0x85, 0xb5, 0x65, 0xb5, 0xb5, 0xd2, 0x37, 0xf5, -+ 0xf3, 0x02, 0xa6, 0x4d, 0x11, 0xa7, 0xdc, 0x51, -+ 0x09, 0x7f, 0xa0, 0xd8, 0x88, 0x1c, 0x13, 0x71, -+ 0xae, 0x9c, 0xb7, 0x7b, 0x34, 0xd6, 0x4e, 0x68, -+ 0x26, 0x83, 0x51, 0xaf, 0x1d, 0xee, 0x8b, 0xbb, -+ 0x69, 0x43, 0x2b, 0x9e, 0x8a, 0xbc, 0x02, 0x0e, -+ 0xa0, 0x1b, 0xe0, 0xa8, 0x5f, 0x6f, 0xaf, 0x1b, -+ 0x8f, 0xe7, 0x64, 0x71, 0x74, 0x11, 0x7e, 0xa8, -+ 0xd8, 0xf9, 0x97, 0x06, 0xc3, 0xb6, 0xfb, 0xfb, -+ 0xb7, 0x3d, 0x35, 0x9d, 0x3b, 0x52, 0xed, 0x54, -+ 0xca, 0xf4, 0x81, 0x01, 0x2d, 0x1b, 0xc3, 0xa7, -+ 0x00, 0x3d, 0x1a, 0x39, 0x54, 0xe1, 0xf6, 0xff, -+ 0xed, 0x6f, 0x0b, 0x5a, 0x68, 0xda, 0x58, 0xdd, -+ 0xa9, 0xcf, 0x5c, 0x4a, 0xe5, 0x09, 0x4e, 0xde, -+ 0x9d, 0xbc, 0x3e, 0xee, 0x5a, 0x00, 0x3b, 0x2c, -+ 0x87, 0x10, 0x65, 0x60, 0xdd, 0xd7, 0x56, 0xd1, -+ 0x4c, 0x64, 0x45, 0xe4, 0x21, 0xec, 0x78, 0xf8, -+ 0x25, 0x7a, 0x3e, 0x16, 0x5d, 0x09, 0x53, 0x14, -+ 0xbe, 0x4f, 0xae, 0x87, 0xd8, 0xd1, 0xaa, 0x3c, -+ 0xf6, 0x3e, 0xa4, 0x70, 0x8c, 0x5e, 0x70, 0xa4, -+ 0xb3, 0x6b, 0x66, 0x73, 0xd3, 0xbf, 0x31, 0x06, -+ 0x19, 0x62, 0x93, 0x15, 0xf2, 0x86, 0xe4, 0x52, -+ 0x7e, 0x53, 0x4c, 0x12, 0x38, 0xcc, 0x34, 0x7d, -+ 0x57, 0xf6, 0x42, 0x93, 0x8a, 0xc4, 0xee, 0x5c, -+ 0x8a, 0xe1, 0x52, 0x8f, 0x56, 0x64, 0xf6, 0xa6, -+ 0xd1, 0x91, 0x57, 0x70, 0xcd, 0x11, 0x76, 0xf5, -+ 0x59, 0x60, 0x60, 0x3c, 0xc1, 0xc3, 0x0b, 0x7f, -+ 0x58, 0x1a, 0x50, 0x91, 0xf1, 0x68, 0x8f, 0x6e, -+ 0x74, 0x74, 0xa8, 0x51, 0x0b, 0xf7, 0x7a, 0x98, -+ 0x37, 0xf2, 0x0a, 0x0e, 0xa4, 0x97, 0x04, 0xb8, -+ 0x9b, 0xfd, 0xa0, 0xea, 0xf7, 0x0d, 0xe1, 0xdb, -+ 0x03, 0xf0, 0x31, 0x29, 0xf8, 0xdd, 0x6b, 0x8b, -+ 0x5d, 0xd8, 0x59, 0xa9, 0x29, 0xcf, 0x9a, 0x79, -+ 0x89, 0x19, 0x63, 0x46, 0x09, 0x79, 0x6a, 0x11, -+ 0xda, 0x63, 0x68, 0x48, 0x77, 0x23, 0xfb, 0x7d, -+ 0x3a, 0x43, 0xcb, 0x02, 0x3b, 0x7a, 0x6d, 0x10, -+ 0x2a, 0x9e, 0xac, 0xf1, 0xd4, 0x19, 0xf8, 0x23, -+ 0x64, 0x1d, 0x2c, 0x5f, 0xf2, 0xb0, 0x5c, 0x23, -+ 0x27, 0xf7, 0x27, 0x30, 0x16, 0x37, 0xb1, 0x90, -+ 0xab, 0x38, 0xfb, 0x55, 0xcd, 0x78, 0x58, 0xd4, -+ 0x7d, 0x43, 0xf6, 0x45, 0x5e, 0x55, 0x8d, 0xb1, -+ 0x02, 0x65, 0x58, 0xb4, 0x13, 0x4b, 0x36, 0xf7, -+ 0xcc, 0xfe, 0x3d, 0x0b, 0x82, 0xe2, 0x12, 0x11, -+ 0xbb, 0xe6, 0xb8, 0x3a, 0x48, 0x71, 0xc7, 0x50, -+ 0x06, 0x16, 0x3a, 0xe6, 0x7c, 0x05, 0xc7, 0xc8, -+ 0x4d, 0x2f, 0x08, 0x6a, 0x17, 0x9a, 0x95, 0x97, -+ 0x50, 0x68, 0xdc, 0x28, 0x18, 0xc4, 0x61, 0x38, -+ 0xb9, 0xe0, 0x3e, 0x78, 0xdb, 0x29, 0xe0, 0x9f, -+ 0x52, 0xdd, 0xf8, 0x4f, 0x91, 0xc1, 0xd0, 0x33, -+ 0xa1, 0x7a, 0x8e, 0x30, 0x13, 0x82, 0x07, 0x9f, -+ 0xd3, 0x31, 0x0f, 0x23, 0xbe, 0x32, 0x5a, 0x75, -+ 0xcf, 0x96, 0xb2, 0xec, 0xb5, 0x32, 0xac, 0x21, -+ 0xd1, 0x82, 0x33, 0xd3, 0x15, 0x74, 0xbd, 0x90, -+ 0xf1, 0x2c, 0xe6, 0x5f, 0x8d, 0xe3, 0x02, 0xe8, -+ 0xe9, 0xc4, 0xca, 0x96, 0xeb, 0x0e, 0xbc, 0x91, -+ 0xf4, 0xb9, 0xea, 0xd9, 0x1b, 0x75, 0xbd, 0xe1, -+ 0xac, 0x2a, 0x05, 0x37, 0x52, 0x9b, 0x1b, 0x3f, -+ 0x5a, 0xdc, 0x21, 0xc3, 0x98, 0xbb, 0xaf, 0xa3, -+ 0xf2, 0x00, 0xbf, 0x0d, 0x30, 0x89, 0x05, 0xcc, -+ 0xa5, 0x76, 0xf5, 0x06, 0xf0, 0xc6, 0x54, 0x8a, -+ 0x5d, 0xd4, 0x1e, 0xc1, 0xf2, 0xce, 0xb0, 0x62, -+ 0xc8, 0xfc, 0x59, 0x42, 0x9a, 0x90, 0x60, 0x55, -+ 0xfe, 0x88, 0xa5, 0x8b, 0xb8, 0x33, 0x0c, 0x23, -+ 0x24, 0x0d, 0x15, 0x70, 0x37, 0x1e, 0x3d, 0xf6, -+ 0xd2, 0xea, 0x92, 0x10, 0xb2, 0xc4, 0x51, 0xac, -+ 0xf2, 0xac, 0xf3, 0x6b, 0x6c, 0xaa, 0xcf, 0x12, -+ 0xc5, 0x6c, 0x90, 0x50, 0xb5, 0x0c, 0xfc, 0x1a, -+ 0x15, 0x52, 0xe9, 0x26, 0xc6, 0x52, 0xa4, 0xe7, -+ 0x81, 0x69, 0xe1, 0xe7, 0x9e, 0x30, 0x01, 0xec, -+ 0x84, 0x89, 0xb2, 0x0d, 0x66, 0xdd, 0xce, 0x28, -+ 0x5c, 0xec, 0x98, 0x46, 0x68, 0x21, 0x9f, 0x88, -+ 0x3f, 0x1f, 0x42, 0x77, 0xce, 0xd0, 0x61, 0xd4, -+ 0x20, 0xa7, 0xff, 0x53, 0xad, 0x37, 0xd0, 0x17, -+ 0x35, 0xc9, 0xfc, 0xba, 0x0a, 0x78, 0x3f, 0xf2, -+ 0xcc, 0x86, 0x89, 0xe8, 0x4b, 0x3c, 0x48, 0x33, -+ 0x09, 0x7f, 0xc6, 0xc0, 0xdd, 0xb8, 0xfd, 0x7a, -+ 0x66, 0x66, 0x65, 0xeb, 0x47, 0xa7, 0x04, 0x28, -+ 0xa3, 0x19, 0x8e, 0xa9, 0xb1, 0x13, 0x67, 0x62, -+ 0x70, 0xcf, 0xd7 -+}; -+static const u8 dec_output013[] __initconst = { -+ 0x74, 0xa6, 0x3e, 0xe4, 0xb1, 0xcb, 0xaf, 0xb0, -+ 0x40, 0xe5, 0x0f, 0x9e, 0xf1, 0xf2, 0x89, 0xb5, -+ 0x42, 0x34, 0x8a, 0xa1, 0x03, 0xb7, 0xe9, 0x57, -+ 0x46, 0xbe, 0x20, 0xe4, 0x6e, 0xb0, 0xeb, 0xff, -+ 0xea, 0x07, 0x7e, 0xef, 0xe2, 0x55, 0x9f, 0xe5, -+ 0x78, 0x3a, 0xb7, 0x83, 0xc2, 0x18, 0x40, 0x7b, -+ 0xeb, 0xcd, 0x81, 0xfb, 0x90, 0x12, 0x9e, 0x46, -+ 0xa9, 0xd6, 0x4a, 0xba, 0xb0, 0x62, 0xdb, 0x6b, -+ 0x99, 0xc4, 0xdb, 0x54, 0x4b, 0xb8, 0xa5, 0x71, -+ 0xcb, 0xcd, 0x63, 0x32, 0x55, 0xfb, 0x31, 0xf0, -+ 0x38, 0xf5, 0xbe, 0x78, 0xe4, 0x45, 0xce, 0x1b, -+ 0x6a, 0x5b, 0x0e, 0xf4, 0x16, 0xe4, 0xb1, 0x3d, -+ 0xf6, 0x63, 0x7b, 0xa7, 0x0c, 0xde, 0x6f, 0x8f, -+ 0x74, 0xdf, 0xe0, 0x1e, 0x9d, 0xce, 0x8f, 0x24, -+ 0xef, 0x23, 0x35, 0x33, 0x7b, 0x83, 0x34, 0x23, -+ 0x58, 0x74, 0x14, 0x77, 0x1f, 0xc2, 0x4f, 0x4e, -+ 0xc6, 0x89, 0xf9, 0x52, 0x09, 0x37, 0x64, 0x14, -+ 0xc4, 0x01, 0x6b, 0x9d, 0x77, 0xe8, 0x90, 0x5d, -+ 0xa8, 0x4a, 0x2a, 0xef, 0x5c, 0x7f, 0xeb, 0xbb, -+ 0xb2, 0xc6, 0x93, 0x99, 0x66, 0xdc, 0x7f, 0xd4, -+ 0x9e, 0x2a, 0xca, 0x8d, 0xdb, 0xe7, 0x20, 0xcf, -+ 0xe4, 0x73, 0xae, 0x49, 0x7d, 0x64, 0x0f, 0x0e, -+ 0x28, 0x46, 0xa9, 0xa8, 0x32, 0xe4, 0x0e, 0xf6, -+ 0x51, 0x53, 0xb8, 0x3c, 0xb1, 0xff, 0xa3, 0x33, -+ 0x41, 0x75, 0xff, 0xf1, 0x6f, 0xf1, 0xfb, 0xbb, -+ 0x83, 0x7f, 0x06, 0x9b, 0xe7, 0x1b, 0x0a, 0xe0, -+ 0x5c, 0x33, 0x60, 0x5b, 0xdb, 0x5b, 0xed, 0xfe, -+ 0xa5, 0x16, 0x19, 0x72, 0xa3, 0x64, 0x23, 0x00, -+ 0x02, 0xc7, 0xf3, 0x6a, 0x81, 0x3e, 0x44, 0x1d, -+ 0x79, 0x15, 0x5f, 0x9a, 0xde, 0xe2, 0xfd, 0x1b, -+ 0x73, 0xc1, 0xbc, 0x23, 0xba, 0x31, 0xd2, 0x50, -+ 0xd5, 0xad, 0x7f, 0x74, 0xa7, 0xc9, 0xf8, 0x3e, -+ 0x2b, 0x26, 0x10, 0xf6, 0x03, 0x36, 0x74, 0xe4, -+ 0x0e, 0x6a, 0x72, 0xb7, 0x73, 0x0a, 0x42, 0x28, -+ 0xc2, 0xad, 0x5e, 0x03, 0xbe, 0xb8, 0x0b, 0xa8, -+ 0x5b, 0xd4, 0xb8, 0xba, 0x52, 0x89, 0xb1, 0x9b, -+ 0xc1, 0xc3, 0x65, 0x87, 0xed, 0xa5, 0xf4, 0x86, -+ 0xfd, 0x41, 0x80, 0x91, 0x27, 0x59, 0x53, 0x67, -+ 0x15, 0x78, 0x54, 0x8b, 0x2d, 0x3d, 0xc7, 0xff, -+ 0x02, 0x92, 0x07, 0x5f, 0x7a, 0x4b, 0x60, 0x59, -+ 0x3c, 0x6f, 0x5c, 0xd8, 0xec, 0x95, 0xd2, 0xfe, -+ 0xa0, 0x3b, 0xd8, 0x3f, 0xd1, 0x69, 0xa6, 0xd6, -+ 0x41, 0xb2, 0xf4, 0x4d, 0x12, 0xf4, 0x58, 0x3e, -+ 0x66, 0x64, 0x80, 0x31, 0x9b, 0xa8, 0x4c, 0x8b, -+ 0x07, 0xb2, 0xec, 0x66, 0x94, 0x66, 0x47, 0x50, -+ 0x50, 0x5f, 0x18, 0x0b, 0x0e, 0xd6, 0xc0, 0x39, -+ 0x21, 0x13, 0x9e, 0x33, 0xbc, 0x79, 0x36, 0x02, -+ 0x96, 0x70, 0xf0, 0x48, 0x67, 0x2f, 0x26, 0xe9, -+ 0x6d, 0x10, 0xbb, 0xd6, 0x3f, 0xd1, 0x64, 0x7a, -+ 0x2e, 0xbe, 0x0c, 0x61, 0xf0, 0x75, 0x42, 0x38, -+ 0x23, 0xb1, 0x9e, 0x9f, 0x7c, 0x67, 0x66, 0xd9, -+ 0x58, 0x9a, 0xf1, 0xbb, 0x41, 0x2a, 0x8d, 0x65, -+ 0x84, 0x94, 0xfc, 0xdc, 0x6a, 0x50, 0x64, 0xdb, -+ 0x56, 0x33, 0x76, 0x00, 0x10, 0xed, 0xbe, 0xd2, -+ 0x12, 0xf6, 0xf6, 0x1b, 0xa2, 0x16, 0xde, 0xae, -+ 0x31, 0x95, 0xdd, 0xb1, 0x08, 0x7e, 0x4e, 0xee, -+ 0xe7, 0xf9, 0xa5, 0xfb, 0x5b, 0x61, 0x43, 0x00, -+ 0x40, 0xf6, 0x7e, 0x02, 0x04, 0x32, 0x4e, 0x0c, -+ 0xe2, 0x66, 0x0d, 0xd7, 0x07, 0x98, 0x0e, 0xf8, -+ 0x72, 0x34, 0x6d, 0x95, 0x86, 0xd7, 0xcb, 0x31, -+ 0x54, 0x47, 0xd0, 0x38, 0x29, 0x9c, 0x5a, 0x68, -+ 0xd4, 0x87, 0x76, 0xc9, 0xe7, 0x7e, 0xe3, 0xf4, -+ 0x81, 0x6d, 0x18, 0xcb, 0xc9, 0x05, 0xaf, 0xa0, -+ 0xfb, 0x66, 0xf7, 0xf1, 0x1c, 0xc6, 0x14, 0x11, -+ 0x4f, 0x2b, 0x79, 0x42, 0x8b, 0xbc, 0xac, 0xe7, -+ 0x6c, 0xfe, 0x0f, 0x58, 0xe7, 0x7c, 0x78, 0x39, -+ 0x30, 0xb0, 0x66, 0x2c, 0x9b, 0x6d, 0x3a, 0xe1, -+ 0xcf, 0xc9, 0xa4, 0x0e, 0x6d, 0x6d, 0x8a, 0xa1, -+ 0x3a, 0xe7, 0x28, 0xd4, 0x78, 0x4c, 0xa6, 0xa2, -+ 0x2a, 0xa6, 0x03, 0x30, 0xd7, 0xa8, 0x25, 0x66, -+ 0x87, 0x2f, 0x69, 0x5c, 0x4e, 0xdd, 0xa5, 0x49, -+ 0x5d, 0x37, 0x4a, 0x59, 0xc4, 0xaf, 0x1f, 0xa2, -+ 0xe4, 0xf8, 0xa6, 0x12, 0x97, 0xd5, 0x79, 0xf5, -+ 0xe2, 0x4a, 0x2b, 0x5f, 0x61, 0xe4, 0x9e, 0xe3, -+ 0xee, 0xb8, 0xa7, 0x5b, 0x2f, 0xf4, 0x9e, 0x6c, -+ 0xfb, 0xd1, 0xc6, 0x56, 0x77, 0xba, 0x75, 0xaa, -+ 0x3d, 0x1a, 0xa8, 0x0b, 0xb3, 0x68, 0x24, 0x00, -+ 0x10, 0x7f, 0xfd, 0xd7, 0xa1, 0x8d, 0x83, 0x54, -+ 0x4f, 0x1f, 0xd8, 0x2a, 0xbe, 0x8a, 0x0c, 0x87, -+ 0xab, 0xa2, 0xde, 0xc3, 0x39, 0xbf, 0x09, 0x03, -+ 0xa5, 0xf3, 0x05, 0x28, 0xe1, 0xe1, 0xee, 0x39, -+ 0x70, 0x9c, 0xd8, 0x81, 0x12, 0x1e, 0x02, 0x40, -+ 0xd2, 0x6e, 0xf0, 0xeb, 0x1b, 0x3d, 0x22, 0xc6, -+ 0xe5, 0xe3, 0xb4, 0x5a, 0x98, 0xbb, 0xf0, 0x22, -+ 0x28, 0x8d, 0xe5, 0xd3, 0x16, 0x48, 0x24, 0xa5, -+ 0xe6, 0x66, 0x0c, 0xf9, 0x08, 0xf9, 0x7e, 0x1e, -+ 0xe1, 0x28, 0x26, 0x22, 0xc7, 0xc7, 0x0a, 0x32, -+ 0x47, 0xfa, 0xa3, 0xbe, 0x3c, 0xc4, 0xc5, 0x53, -+ 0x0a, 0xd5, 0x94, 0x4a, 0xd7, 0x93, 0xd8, 0x42, -+ 0x99, 0xb9, 0x0a, 0xdb, 0x56, 0xf7, 0xb9, 0x1c, -+ 0x53, 0x4f, 0xfa, 0xd3, 0x74, 0xad, 0xd9, 0x68, -+ 0xf1, 0x1b, 0xdf, 0x61, 0xc6, 0x5e, 0xa8, 0x48, -+ 0xfc, 0xd4, 0x4a, 0x4c, 0x3c, 0x32, 0xf7, 0x1c, -+ 0x96, 0x21, 0x9b, 0xf9, 0xa3, 0xcc, 0x5a, 0xce, -+ 0xd5, 0xd7, 0x08, 0x24, 0xf6, 0x1c, 0xfd, 0xdd, -+ 0x38, 0xc2, 0x32, 0xe9, 0xb8, 0xe7, 0xb6, 0xfa, -+ 0x9d, 0x45, 0x13, 0x2c, 0x83, 0xfd, 0x4a, 0x69, -+ 0x82, 0xcd, 0xdc, 0xb3, 0x76, 0x0c, 0x9e, 0xd8, -+ 0xf4, 0x1b, 0x45, 0x15, 0xb4, 0x97, 0xe7, 0x58, -+ 0x34, 0xe2, 0x03, 0x29, 0x5a, 0xbf, 0xb6, 0xe0, -+ 0x5d, 0x13, 0xd9, 0x2b, 0xb4, 0x80, 0xb2, 0x45, -+ 0x81, 0x6a, 0x2e, 0x6c, 0x89, 0x7d, 0xee, 0xbb, -+ 0x52, 0xdd, 0x1f, 0x18, 0xe7, 0x13, 0x6b, 0x33, -+ 0x0e, 0xea, 0x36, 0x92, 0x77, 0x7b, 0x6d, 0x9c, -+ 0x5a, 0x5f, 0x45, 0x7b, 0x7b, 0x35, 0x62, 0x23, -+ 0xd1, 0xbf, 0x0f, 0xd0, 0x08, 0x1b, 0x2b, 0x80, -+ 0x6b, 0x7e, 0xf1, 0x21, 0x47, 0xb0, 0x57, 0xd1, -+ 0x98, 0x72, 0x90, 0x34, 0x1c, 0x20, 0x04, 0xff, -+ 0x3d, 0x5c, 0xee, 0x0e, 0x57, 0x5f, 0x6f, 0x24, -+ 0x4e, 0x3c, 0xea, 0xfc, 0xa5, 0xa9, 0x83, 0xc9, -+ 0x61, 0xb4, 0x51, 0x24, 0xf8, 0x27, 0x5e, 0x46, -+ 0x8c, 0xb1, 0x53, 0x02, 0x96, 0x35, 0xba, 0xb8, -+ 0x4c, 0x71, 0xd3, 0x15, 0x59, 0x35, 0x22, 0x20, -+ 0xad, 0x03, 0x9f, 0x66, 0x44, 0x3b, 0x9c, 0x35, -+ 0x37, 0x1f, 0x9b, 0xbb, 0xf3, 0xdb, 0x35, 0x63, -+ 0x30, 0x64, 0xaa, 0xa2, 0x06, 0xa8, 0x5d, 0xbb, -+ 0xe1, 0x9f, 0x70, 0xec, 0x82, 0x11, 0x06, 0x36, -+ 0xec, 0x8b, 0x69, 0x66, 0x24, 0x44, 0xc9, 0x4a, -+ 0x57, 0xbb, 0x9b, 0x78, 0x13, 0xce, 0x9c, 0x0c, -+ 0xba, 0x92, 0x93, 0x63, 0xb8, 0xe2, 0x95, 0x0f, -+ 0x0f, 0x16, 0x39, 0x52, 0xfd, 0x3a, 0x6d, 0x02, -+ 0x4b, 0xdf, 0x13, 0xd3, 0x2a, 0x22, 0xb4, 0x03, -+ 0x7c, 0x54, 0x49, 0x96, 0x68, 0x54, 0x10, 0xfa, -+ 0xef, 0xaa, 0x6c, 0xe8, 0x22, 0xdc, 0x71, 0x16, -+ 0x13, 0x1a, 0xf6, 0x28, 0xe5, 0x6d, 0x77, 0x3d, -+ 0xcd, 0x30, 0x63, 0xb1, 0x70, 0x52, 0xa1, 0xc5, -+ 0x94, 0x5f, 0xcf, 0xe8, 0xb8, 0x26, 0x98, 0xf7, -+ 0x06, 0xa0, 0x0a, 0x70, 0xfa, 0x03, 0x80, 0xac, -+ 0xc1, 0xec, 0xd6, 0x4c, 0x54, 0xd7, 0xfe, 0x47, -+ 0xb6, 0x88, 0x4a, 0xf7, 0x71, 0x24, 0xee, 0xf3, -+ 0xd2, 0xc2, 0x4a, 0x7f, 0xfe, 0x61, 0xc7, 0x35, -+ 0xc9, 0x37, 0x67, 0xcb, 0x24, 0x35, 0xda, 0x7e, -+ 0xca, 0x5f, 0xf3, 0x8d, 0xd4, 0x13, 0x8e, 0xd6, -+ 0xcb, 0x4d, 0x53, 0x8f, 0x53, 0x1f, 0xc0, 0x74, -+ 0xf7, 0x53, 0xb9, 0x5e, 0x23, 0x37, 0xba, 0x6e, -+ 0xe3, 0x9d, 0x07, 0x55, 0x25, 0x7b, 0xe6, 0x2a, -+ 0x64, 0xd1, 0x32, 0xdd, 0x54, 0x1b, 0x4b, 0xc0, -+ 0xe1, 0xd7, 0x69, 0x58, 0xf8, 0x93, 0x29, 0xc4, -+ 0xdd, 0x23, 0x2f, 0xa5, 0xfc, 0x9d, 0x7e, 0xf8, -+ 0xd4, 0x90, 0xcd, 0x82, 0x55, 0xdc, 0x16, 0x16, -+ 0x9f, 0x07, 0x52, 0x9b, 0x9d, 0x25, 0xed, 0x32, -+ 0xc5, 0x7b, 0xdf, 0xf6, 0x83, 0x46, 0x3d, 0x65, -+ 0xb7, 0xef, 0x87, 0x7a, 0x12, 0x69, 0x8f, 0x06, -+ 0x7c, 0x51, 0x15, 0x4a, 0x08, 0xe8, 0xac, 0x9a, -+ 0x0c, 0x24, 0xa7, 0x27, 0xd8, 0x46, 0x2f, 0xe7, -+ 0x01, 0x0e, 0x1c, 0xc6, 0x91, 0xb0, 0x6e, 0x85, -+ 0x65, 0xf0, 0x29, 0x0d, 0x2e, 0x6b, 0x3b, 0xfb, -+ 0x4b, 0xdf, 0xe4, 0x80, 0x93, 0x03, 0x66, 0x46, -+ 0x3e, 0x8a, 0x6e, 0xf3, 0x5e, 0x4d, 0x62, 0x0e, -+ 0x49, 0x05, 0xaf, 0xd4, 0xf8, 0x21, 0x20, 0x61, -+ 0x1d, 0x39, 0x17, 0xf4, 0x61, 0x47, 0x95, 0xfb, -+ 0x15, 0x2e, 0xb3, 0x4f, 0xd0, 0x5d, 0xf5, 0x7d, -+ 0x40, 0xda, 0x90, 0x3c, 0x6b, 0xcb, 0x17, 0x00, -+ 0x13, 0x3b, 0x64, 0x34, 0x1b, 0xf0, 0xf2, 0xe5, -+ 0x3b, 0xb2, 0xc7, 0xd3, 0x5f, 0x3a, 0x44, 0xa6, -+ 0x9b, 0xb7, 0x78, 0x0e, 0x42, 0x5d, 0x4c, 0xc1, -+ 0xe9, 0xd2, 0xcb, 0xb7, 0x78, 0xd1, 0xfe, 0x9a, -+ 0xb5, 0x07, 0xe9, 0xe0, 0xbe, 0xe2, 0x8a, 0xa7, -+ 0x01, 0x83, 0x00, 0x8c, 0x5c, 0x08, 0xe6, 0x63, -+ 0x12, 0x92, 0xb7, 0xb7, 0xa6, 0x19, 0x7d, 0x38, -+ 0x13, 0x38, 0x92, 0x87, 0x24, 0xf9, 0x48, 0xb3, -+ 0x5e, 0x87, 0x6a, 0x40, 0x39, 0x5c, 0x3f, 0xed, -+ 0x8f, 0xee, 0xdb, 0x15, 0x82, 0x06, 0xda, 0x49, -+ 0x21, 0x2b, 0xb5, 0xbf, 0x32, 0x7c, 0x9f, 0x42, -+ 0x28, 0x63, 0xcf, 0xaf, 0x1e, 0xf8, 0xc6, 0xa0, -+ 0xd1, 0x02, 0x43, 0x57, 0x62, 0xec, 0x9b, 0x0f, -+ 0x01, 0x9e, 0x71, 0xd8, 0x87, 0x9d, 0x01, 0xc1, -+ 0x58, 0x77, 0xd9, 0xaf, 0xb1, 0x10, 0x7e, 0xdd, -+ 0xa6, 0x50, 0x96, 0xe5, 0xf0, 0x72, 0x00, 0x6d, -+ 0x4b, 0xf8, 0x2a, 0x8f, 0x19, 0xf3, 0x22, 0x88, -+ 0x11, 0x4a, 0x8b, 0x7c, 0xfd, 0xb7, 0xed, 0xe1, -+ 0xf6, 0x40, 0x39, 0xe0, 0xe9, 0xf6, 0x3d, 0x25, -+ 0xe6, 0x74, 0x3c, 0x58, 0x57, 0x7f, 0xe1, 0x22, -+ 0x96, 0x47, 0x31, 0x91, 0xba, 0x70, 0x85, 0x28, -+ 0x6b, 0x9f, 0x6e, 0x25, 0xac, 0x23, 0x66, 0x2f, -+ 0x29, 0x88, 0x28, 0xce, 0x8c, 0x5c, 0x88, 0x53, -+ 0xd1, 0x3b, 0xcc, 0x6a, 0x51, 0xb2, 0xe1, 0x28, -+ 0x3f, 0x91, 0xb4, 0x0d, 0x00, 0x3a, 0xe3, 0xf8, -+ 0xc3, 0x8f, 0xd7, 0x96, 0x62, 0x0e, 0x2e, 0xfc, -+ 0xc8, 0x6c, 0x77, 0xa6, 0x1d, 0x22, 0xc1, 0xb8, -+ 0xe6, 0x61, 0xd7, 0x67, 0x36, 0x13, 0x7b, 0xbb, -+ 0x9b, 0x59, 0x09, 0xa6, 0xdf, 0xf7, 0x6b, 0xa3, -+ 0x40, 0x1a, 0xf5, 0x4f, 0xb4, 0xda, 0xd3, 0xf3, -+ 0x81, 0x93, 0xc6, 0x18, 0xd9, 0x26, 0xee, 0xac, -+ 0xf0, 0xaa, 0xdf, 0xc5, 0x9c, 0xca, 0xc2, 0xa2, -+ 0xcc, 0x7b, 0x5c, 0x24, 0xb0, 0xbc, 0xd0, 0x6a, -+ 0x4d, 0x89, 0x09, 0xb8, 0x07, 0xfe, 0x87, 0xad, -+ 0x0a, 0xea, 0xb8, 0x42, 0xf9, 0x5e, 0xb3, 0x3e, -+ 0x36, 0x4c, 0xaf, 0x75, 0x9e, 0x1c, 0xeb, 0xbd, -+ 0xbc, 0xbb, 0x80, 0x40, 0xa7, 0x3a, 0x30, 0xbf, -+ 0xa8, 0x44, 0xf4, 0xeb, 0x38, 0xad, 0x29, 0xba, -+ 0x23, 0xed, 0x41, 0x0c, 0xea, 0xd2, 0xbb, 0x41, -+ 0x18, 0xd6, 0xb9, 0xba, 0x65, 0x2b, 0xa3, 0x91, -+ 0x6d, 0x1f, 0xa9, 0xf4, 0xd1, 0x25, 0x8d, 0x4d, -+ 0x38, 0xff, 0x64, 0xa0, 0xec, 0xde, 0xa6, 0xb6, -+ 0x79, 0xab, 0x8e, 0x33, 0x6c, 0x47, 0xde, 0xaf, -+ 0x94, 0xa4, 0xa5, 0x86, 0x77, 0x55, 0x09, 0x92, -+ 0x81, 0x31, 0x76, 0xc7, 0x34, 0x22, 0x89, 0x8e, -+ 0x3d, 0x26, 0x26, 0xd7, 0xfc, 0x1e, 0x16, 0x72, -+ 0x13, 0x33, 0x63, 0xd5, 0x22, 0xbe, 0xb8, 0x04, -+ 0x34, 0x84, 0x41, 0xbb, 0x80, 0xd0, 0x9f, 0x46, -+ 0x48, 0x07, 0xa7, 0xfc, 0x2b, 0x3a, 0x75, 0x55, -+ 0x8c, 0xc7, 0x6a, 0xbd, 0x7e, 0x46, 0x08, 0x84, -+ 0x0f, 0xd5, 0x74, 0xc0, 0x82, 0x8e, 0xaa, 0x61, -+ 0x05, 0x01, 0xb2, 0x47, 0x6e, 0x20, 0x6a, 0x2d, -+ 0x58, 0x70, 0x48, 0x32, 0xa7, 0x37, 0xd2, 0xb8, -+ 0x82, 0x1a, 0x51, 0xb9, 0x61, 0xdd, 0xfd, 0x9d, -+ 0x6b, 0x0e, 0x18, 0x97, 0xf8, 0x45, 0x5f, 0x87, -+ 0x10, 0xcf, 0x34, 0x72, 0x45, 0x26, 0x49, 0x70, -+ 0xe7, 0xa3, 0x78, 0xe0, 0x52, 0x89, 0x84, 0x94, -+ 0x83, 0x82, 0xc2, 0x69, 0x8f, 0xe3, 0xe1, 0x3f, -+ 0x60, 0x74, 0x88, 0xc4, 0xf7, 0x75, 0x2c, 0xfb, -+ 0xbd, 0xb6, 0xc4, 0x7e, 0x10, 0x0a, 0x6c, 0x90, -+ 0x04, 0x9e, 0xc3, 0x3f, 0x59, 0x7c, 0xce, 0x31, -+ 0x18, 0x60, 0x57, 0x73, 0x46, 0x94, 0x7d, 0x06, -+ 0xa0, 0x6d, 0x44, 0xec, 0xa2, 0x0a, 0x9e, 0x05, -+ 0x15, 0xef, 0xca, 0x5c, 0xbf, 0x00, 0xeb, 0xf7, -+ 0x3d, 0x32, 0xd4, 0xa5, 0xef, 0x49, 0x89, 0x5e, -+ 0x46, 0xb0, 0xa6, 0x63, 0x5b, 0x8a, 0x73, 0xae, -+ 0x6f, 0xd5, 0x9d, 0xf8, 0x4f, 0x40, 0xb5, 0xb2, -+ 0x6e, 0xd3, 0xb6, 0x01, 0xa9, 0x26, 0xa2, 0x21, -+ 0xcf, 0x33, 0x7a, 0x3a, 0xa4, 0x23, 0x13, 0xb0, -+ 0x69, 0x6a, 0xee, 0xce, 0xd8, 0x9d, 0x01, 0x1d, -+ 0x50, 0xc1, 0x30, 0x6c, 0xb1, 0xcd, 0xa0, 0xf0, -+ 0xf0, 0xa2, 0x64, 0x6f, 0xbb, 0xbf, 0x5e, 0xe6, -+ 0xab, 0x87, 0xb4, 0x0f, 0x4f, 0x15, 0xaf, 0xb5, -+ 0x25, 0xa1, 0xb2, 0xd0, 0x80, 0x2c, 0xfb, 0xf9, -+ 0xfe, 0xd2, 0x33, 0xbb, 0x76, 0xfe, 0x7c, 0xa8, -+ 0x66, 0xf7, 0xe7, 0x85, 0x9f, 0x1f, 0x85, 0x57, -+ 0x88, 0xe1, 0xe9, 0x63, 0xe4, 0xd8, 0x1c, 0xa1, -+ 0xfb, 0xda, 0x44, 0x05, 0x2e, 0x1d, 0x3a, 0x1c, -+ 0xff, 0xc8, 0x3b, 0xc0, 0xfe, 0xda, 0x22, 0x0b, -+ 0x43, 0xd6, 0x88, 0x39, 0x4c, 0x4a, 0xa6, 0x69, -+ 0x18, 0x93, 0x42, 0x4e, 0xb5, 0xcc, 0x66, 0x0d, -+ 0x09, 0xf8, 0x1e, 0x7c, 0xd3, 0x3c, 0x99, 0x0d, -+ 0x50, 0x1d, 0x62, 0xe9, 0x57, 0x06, 0xbf, 0x19, -+ 0x88, 0xdd, 0xad, 0x7b, 0x4f, 0xf9, 0xc7, 0x82, -+ 0x6d, 0x8d, 0xc8, 0xc4, 0xc5, 0x78, 0x17, 0x20, -+ 0x15, 0xc5, 0x52, 0x41, 0xcf, 0x5b, 0xd6, 0x7f, -+ 0x94, 0x02, 0x41, 0xe0, 0x40, 0x22, 0x03, 0x5e, -+ 0xd1, 0x53, 0xd4, 0x86, 0xd3, 0x2c, 0x9f, 0x0f, -+ 0x96, 0xe3, 0x6b, 0x9a, 0x76, 0x32, 0x06, 0x47, -+ 0x4b, 0x11, 0xb3, 0xdd, 0x03, 0x65, 0xbd, 0x9b, -+ 0x01, 0xda, 0x9c, 0xb9, 0x7e, 0x3f, 0x6a, 0xc4, -+ 0x7b, 0xea, 0xd4, 0x3c, 0xb9, 0xfb, 0x5c, 0x6b, -+ 0x64, 0x33, 0x52, 0xba, 0x64, 0x78, 0x8f, 0xa4, -+ 0xaf, 0x7a, 0x61, 0x8d, 0xbc, 0xc5, 0x73, 0xe9, -+ 0x6b, 0x58, 0x97, 0x4b, 0xbf, 0x63, 0x22, 0xd3, -+ 0x37, 0x02, 0x54, 0xc5, 0xb9, 0x16, 0x4a, 0xf0, -+ 0x19, 0xd8, 0x94, 0x57, 0xb8, 0x8a, 0xb3, 0x16, -+ 0x3b, 0xd0, 0x84, 0x8e, 0x67, 0xa6, 0xa3, 0x7d, -+ 0x78, 0xec, 0x00 -+}; -+static const u8 dec_assoc013[] __initconst = { -+ 0xb1, 0x69, 0x83, 0x87, 0x30, 0xaa, 0x5d, 0xb8, -+ 0x77, 0xe8, 0x21, 0xff, 0x06, 0x59, 0x35, 0xce, -+ 0x75, 0xfe, 0x38, 0xef, 0xb8, 0x91, 0x43, 0x8c, -+ 0xcf, 0x70, 0xdd, 0x0a, 0x68, 0xbf, 0xd4, 0xbc, -+ 0x16, 0x76, 0x99, 0x36, 0x1e, 0x58, 0x79, 0x5e, -+ 0xd4, 0x29, 0xf7, 0x33, 0x93, 0x48, 0xdb, 0x5f, -+ 0x01, 0xae, 0x9c, 0xb6, 0xe4, 0x88, 0x6d, 0x2b, -+ 0x76, 0x75, 0xe0, 0xf3, 0x74, 0xe2, 0xc9 -+}; -+static const u8 dec_nonce013[] __initconst = { -+ 0x05, 0xa3, 0x93, 0xed, 0x30, 0xc5, 0xa2, 0x06 -+}; -+static const u8 dec_key013[] __initconst = { -+ 0xb3, 0x35, 0x50, 0x03, 0x54, 0x2e, 0x40, 0x5e, -+ 0x8f, 0x59, 0x8e, 0xc5, 0x90, 0xd5, 0x27, 0x2d, -+ 0xba, 0x29, 0x2e, 0xcb, 0x1b, 0x70, 0x44, 0x1e, -+ 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64 -+}; -+ -+static const struct chacha20poly1305_testvec -+chacha20poly1305_dec_vectors[] __initconst = { -+ { dec_input001, dec_output001, dec_assoc001, dec_nonce001, dec_key001, -+ sizeof(dec_input001), sizeof(dec_assoc001), sizeof(dec_nonce001) }, -+ { dec_input002, dec_output002, dec_assoc002, dec_nonce002, dec_key002, -+ sizeof(dec_input002), sizeof(dec_assoc002), sizeof(dec_nonce002) }, -+ { dec_input003, dec_output003, dec_assoc003, dec_nonce003, dec_key003, -+ sizeof(dec_input003), sizeof(dec_assoc003), sizeof(dec_nonce003) }, -+ { dec_input004, dec_output004, dec_assoc004, dec_nonce004, dec_key004, -+ sizeof(dec_input004), sizeof(dec_assoc004), sizeof(dec_nonce004) }, -+ { dec_input005, dec_output005, dec_assoc005, dec_nonce005, dec_key005, -+ sizeof(dec_input005), sizeof(dec_assoc005), sizeof(dec_nonce005) }, -+ { dec_input006, dec_output006, dec_assoc006, dec_nonce006, dec_key006, -+ sizeof(dec_input006), sizeof(dec_assoc006), sizeof(dec_nonce006) }, -+ { dec_input007, dec_output007, dec_assoc007, dec_nonce007, dec_key007, -+ sizeof(dec_input007), sizeof(dec_assoc007), sizeof(dec_nonce007) }, -+ { dec_input008, dec_output008, dec_assoc008, dec_nonce008, dec_key008, -+ sizeof(dec_input008), sizeof(dec_assoc008), sizeof(dec_nonce008) }, -+ { dec_input009, dec_output009, dec_assoc009, dec_nonce009, dec_key009, -+ sizeof(dec_input009), sizeof(dec_assoc009), sizeof(dec_nonce009) }, -+ { dec_input010, dec_output010, dec_assoc010, dec_nonce010, dec_key010, -+ sizeof(dec_input010), sizeof(dec_assoc010), sizeof(dec_nonce010) }, -+ { dec_input011, dec_output011, dec_assoc011, dec_nonce011, dec_key011, -+ sizeof(dec_input011), sizeof(dec_assoc011), sizeof(dec_nonce011) }, -+ { dec_input012, dec_output012, dec_assoc012, dec_nonce012, dec_key012, -+ sizeof(dec_input012), sizeof(dec_assoc012), sizeof(dec_nonce012) }, -+ { dec_input013, dec_output013, dec_assoc013, dec_nonce013, dec_key013, -+ sizeof(dec_input013), sizeof(dec_assoc013), sizeof(dec_nonce013), -+ true } -+}; -+ -+static const u8 xenc_input001[] __initconst = { -+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, -+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, -+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, -+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, -+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, -+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, -+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, -+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, -+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, -+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, -+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, -+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, -+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, -+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, -+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, -+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, -+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, -+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, -+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, -+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, -+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, -+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, -+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, -+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, -+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, -+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, -+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, -+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, -+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, -+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, -+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, -+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, -+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, -+ 0x9d -+}; -+static const u8 xenc_output001[] __initconst = { -+ 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77, -+ 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92, -+ 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18, -+ 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d, -+ 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e, -+ 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86, -+ 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2, -+ 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85, -+ 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09, -+ 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49, -+ 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd, -+ 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8, -+ 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f, -+ 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79, -+ 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8, -+ 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0, -+ 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88, -+ 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71, -+ 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91, -+ 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf, -+ 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89, -+ 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46, -+ 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e, -+ 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90, -+ 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b, -+ 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58, -+ 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54, -+ 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1, -+ 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73, -+ 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69, -+ 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05, -+ 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83, -+ 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13, -+ 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8, -+ 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5, -+ 0x9c -+}; -+static const u8 xenc_assoc001[] __initconst = { -+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x4e, 0x91 -+}; -+static const u8 xenc_nonce001[] __initconst = { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, -+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 -+}; -+static const u8 xenc_key001[] __initconst = { -+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, -+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, -+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, -+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 -+}; -+ -+static const struct chacha20poly1305_testvec -+xchacha20poly1305_enc_vectors[] __initconst = { -+ { xenc_input001, xenc_output001, xenc_assoc001, xenc_nonce001, xenc_key001, -+ sizeof(xenc_input001), sizeof(xenc_assoc001), sizeof(xenc_nonce001) } -+}; -+ -+static const u8 xdec_input001[] __initconst = { -+ 0x1a, 0x6e, 0x3a, 0xd9, 0xfd, 0x41, 0x3f, 0x77, -+ 0x54, 0x72, 0x0a, 0x70, 0x9a, 0xa0, 0x29, 0x92, -+ 0x2e, 0xed, 0x93, 0xcf, 0x0f, 0x71, 0x88, 0x18, -+ 0x7a, 0x9d, 0x2d, 0x24, 0xe0, 0xf5, 0xea, 0x3d, -+ 0x55, 0x64, 0xd7, 0xad, 0x2a, 0x1a, 0x1f, 0x7e, -+ 0x86, 0x6d, 0xb0, 0xce, 0x80, 0x41, 0x72, 0x86, -+ 0x26, 0xee, 0x84, 0xd7, 0xef, 0x82, 0x9e, 0xe2, -+ 0x60, 0x9d, 0x5a, 0xfc, 0xf0, 0xe4, 0x19, 0x85, -+ 0xea, 0x09, 0xc6, 0xfb, 0xb3, 0xa9, 0x50, 0x09, -+ 0xec, 0x5e, 0x11, 0x90, 0xa1, 0xc5, 0x4e, 0x49, -+ 0xef, 0x50, 0xd8, 0x8f, 0xe0, 0x78, 0xd7, 0xfd, -+ 0xb9, 0x3b, 0xc9, 0xf2, 0x91, 0xc8, 0x25, 0xc8, -+ 0xa7, 0x63, 0x60, 0xce, 0x10, 0xcd, 0xc6, 0x7f, -+ 0xf8, 0x16, 0xf8, 0xe1, 0x0a, 0xd9, 0xde, 0x79, -+ 0x50, 0x33, 0xf2, 0x16, 0x0f, 0x17, 0xba, 0xb8, -+ 0x5d, 0xd8, 0xdf, 0x4e, 0x51, 0xa8, 0x39, 0xd0, -+ 0x85, 0xca, 0x46, 0x6a, 0x10, 0xa7, 0xa3, 0x88, -+ 0xef, 0x79, 0xb9, 0xf8, 0x24, 0xf3, 0xe0, 0x71, -+ 0x7b, 0x76, 0x28, 0x46, 0x3a, 0x3a, 0x1b, 0x91, -+ 0xb6, 0xd4, 0x3e, 0x23, 0xe5, 0x44, 0x15, 0xbf, -+ 0x60, 0x43, 0x9d, 0xa4, 0xbb, 0xd5, 0x5f, 0x89, -+ 0xeb, 0xef, 0x8e, 0xfd, 0xdd, 0xb4, 0x0d, 0x46, -+ 0xf0, 0x69, 0x23, 0x63, 0xae, 0x94, 0xf5, 0x5e, -+ 0xa5, 0xad, 0x13, 0x1c, 0x41, 0x76, 0xe6, 0x90, -+ 0xd6, 0x6d, 0xa2, 0x8f, 0x97, 0x4c, 0xa8, 0x0b, -+ 0xcf, 0x8d, 0x43, 0x2b, 0x9c, 0x9b, 0xc5, 0x58, -+ 0xa5, 0xb6, 0x95, 0x9a, 0xbf, 0x81, 0xc6, 0x54, -+ 0xc9, 0x66, 0x0c, 0xe5, 0x4f, 0x6a, 0x53, 0xa1, -+ 0xe5, 0x0c, 0xba, 0x31, 0xde, 0x34, 0x64, 0x73, -+ 0x8a, 0x3b, 0xbd, 0x92, 0x01, 0xdb, 0x71, 0x69, -+ 0xf3, 0x58, 0x99, 0xbc, 0xd1, 0xcb, 0x4a, 0x05, -+ 0xe2, 0x58, 0x9c, 0x25, 0x17, 0xcd, 0xdc, 0x83, -+ 0xb7, 0xff, 0xfb, 0x09, 0x61, 0xad, 0xbf, 0x13, -+ 0x5b, 0x5e, 0xed, 0x46, 0x82, 0x6f, 0x22, 0xd8, -+ 0x93, 0xa6, 0x85, 0x5b, 0x40, 0x39, 0x5c, 0xc5, -+ 0x9c -+}; -+static const u8 xdec_output001[] __initconst = { -+ 0x49, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x65, 0x74, -+ 0x2d, 0x44, 0x72, 0x61, 0x66, 0x74, 0x73, 0x20, -+ 0x61, 0x72, 0x65, 0x20, 0x64, 0x72, 0x61, 0x66, -+ 0x74, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, -+ 0x6e, 0x74, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x69, -+ 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, -+ 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, -+ 0x6f, 0x66, 0x20, 0x73, 0x69, 0x78, 0x20, 0x6d, -+ 0x6f, 0x6e, 0x74, 0x68, 0x73, 0x20, 0x61, 0x6e, -+ 0x64, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, -+ 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, -+ 0x2c, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, -+ 0x65, 0x64, 0x2c, 0x20, 0x6f, 0x72, 0x20, 0x6f, -+ 0x62, 0x73, 0x6f, 0x6c, 0x65, 0x74, 0x65, 0x64, -+ 0x20, 0x62, 0x79, 0x20, 0x6f, 0x74, 0x68, 0x65, -+ 0x72, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, -+ 0x6e, 0x74, 0x73, 0x20, 0x61, 0x74, 0x20, 0x61, -+ 0x6e, 0x79, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, -+ 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x69, -+ 0x6e, 0x61, 0x70, 0x70, 0x72, 0x6f, 0x70, 0x72, -+ 0x69, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, -+ 0x75, 0x73, 0x65, 0x20, 0x49, 0x6e, 0x74, 0x65, -+ 0x72, 0x6e, 0x65, 0x74, 0x2d, 0x44, 0x72, 0x61, -+ 0x66, 0x74, 0x73, 0x20, 0x61, 0x73, 0x20, 0x72, -+ 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, -+ 0x20, 0x6d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, -+ 0x6c, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x20, -+ 0x63, 0x69, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, -+ 0x6d, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, -+ 0x74, 0x68, 0x61, 0x6e, 0x20, 0x61, 0x73, 0x20, -+ 0x2f, 0xe2, 0x80, 0x9c, 0x77, 0x6f, 0x72, 0x6b, -+ 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x67, -+ 0x72, 0x65, 0x73, 0x73, 0x2e, 0x2f, 0xe2, 0x80, -+ 0x9d -+}; -+static const u8 xdec_assoc001[] __initconst = { -+ 0xf3, 0x33, 0x88, 0x86, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x4e, 0x91 -+}; -+static const u8 xdec_nonce001[] __initconst = { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, -+ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 -+}; -+static const u8 xdec_key001[] __initconst = { -+ 0x1c, 0x92, 0x40, 0xa5, 0xeb, 0x55, 0xd3, 0x8a, -+ 0xf3, 0x33, 0x88, 0x86, 0x04, 0xf6, 0xb5, 0xf0, -+ 0x47, 0x39, 0x17, 0xc1, 0x40, 0x2b, 0x80, 0x09, -+ 0x9d, 0xca, 0x5c, 0xbc, 0x20, 0x70, 0x75, 0xc0 -+}; -+ -+static const struct chacha20poly1305_testvec -+xchacha20poly1305_dec_vectors[] __initconst = { -+ { xdec_input001, xdec_output001, xdec_assoc001, xdec_nonce001, xdec_key001, -+ sizeof(xdec_input001), sizeof(xdec_assoc001), sizeof(xdec_nonce001) } -+}; -+ -+static void __init -+chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u8 *nonce, const size_t nonce_len, -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]) -+{ -+ if (nonce_len == 8) -+ chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, -+ get_unaligned_le64(nonce), key); -+ else -+ BUG(); -+} -+ -+static bool __init -+decryption_success(bool func_ret, bool expect_failure, int memcmp_result) -+{ -+ if (expect_failure) -+ return !func_ret; -+ return func_ret && !memcmp_result; -+} -+ -+bool __init chacha20poly1305_selftest(void) -+{ -+ enum { MAXIMUM_TEST_BUFFER_LEN = 1UL << 12 }; -+ size_t i; -+ u8 *computed_output = NULL, *heap_src = NULL; -+ bool success = true, ret; -+ -+ heap_src = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL); -+ computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL); -+ if (!heap_src || !computed_output) { -+ pr_err("chacha20poly1305 self-test malloc: FAIL\n"); -+ success = false; -+ goto out; -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) { -+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); -+ chacha20poly1305_selftest_encrypt(computed_output, -+ chacha20poly1305_enc_vectors[i].input, -+ chacha20poly1305_enc_vectors[i].ilen, -+ chacha20poly1305_enc_vectors[i].assoc, -+ chacha20poly1305_enc_vectors[i].alen, -+ chacha20poly1305_enc_vectors[i].nonce, -+ chacha20poly1305_enc_vectors[i].nlen, -+ chacha20poly1305_enc_vectors[i].key); -+ if (memcmp(computed_output, -+ chacha20poly1305_enc_vectors[i].output, -+ chacha20poly1305_enc_vectors[i].ilen + -+ POLY1305_DIGEST_SIZE)) { -+ pr_err("chacha20poly1305 encryption self-test %zu: FAIL\n", -+ i + 1); -+ success = false; -+ } -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) { -+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); -+ ret = chacha20poly1305_decrypt(computed_output, -+ chacha20poly1305_dec_vectors[i].input, -+ chacha20poly1305_dec_vectors[i].ilen, -+ chacha20poly1305_dec_vectors[i].assoc, -+ chacha20poly1305_dec_vectors[i].alen, -+ get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce), -+ chacha20poly1305_dec_vectors[i].key); -+ if (!decryption_success(ret, -+ chacha20poly1305_dec_vectors[i].failure, -+ memcmp(computed_output, -+ chacha20poly1305_dec_vectors[i].output, -+ chacha20poly1305_dec_vectors[i].ilen - -+ POLY1305_DIGEST_SIZE))) { -+ pr_err("chacha20poly1305 decryption self-test %zu: FAIL\n", -+ i + 1); -+ success = false; -+ } -+ } -+ -+ -+ for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_enc_vectors); ++i) { -+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); -+ xchacha20poly1305_encrypt(computed_output, -+ xchacha20poly1305_enc_vectors[i].input, -+ xchacha20poly1305_enc_vectors[i].ilen, -+ xchacha20poly1305_enc_vectors[i].assoc, -+ xchacha20poly1305_enc_vectors[i].alen, -+ xchacha20poly1305_enc_vectors[i].nonce, -+ xchacha20poly1305_enc_vectors[i].key); -+ if (memcmp(computed_output, -+ xchacha20poly1305_enc_vectors[i].output, -+ xchacha20poly1305_enc_vectors[i].ilen + -+ POLY1305_DIGEST_SIZE)) { -+ pr_err("xchacha20poly1305 encryption self-test %zu: FAIL\n", -+ i + 1); -+ success = false; -+ } -+ } -+ for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_dec_vectors); ++i) { -+ memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); -+ ret = xchacha20poly1305_decrypt(computed_output, -+ xchacha20poly1305_dec_vectors[i].input, -+ xchacha20poly1305_dec_vectors[i].ilen, -+ xchacha20poly1305_dec_vectors[i].assoc, -+ xchacha20poly1305_dec_vectors[i].alen, -+ xchacha20poly1305_dec_vectors[i].nonce, -+ xchacha20poly1305_dec_vectors[i].key); -+ if (!decryption_success(ret, -+ xchacha20poly1305_dec_vectors[i].failure, -+ memcmp(computed_output, -+ xchacha20poly1305_dec_vectors[i].output, -+ xchacha20poly1305_dec_vectors[i].ilen - -+ POLY1305_DIGEST_SIZE))) { -+ pr_err("xchacha20poly1305 decryption self-test %zu: FAIL\n", -+ i + 1); -+ success = false; -+ } -+ } -+ -+out: -+ kfree(heap_src); -+ kfree(computed_output); -+ return success; -+} ---- /dev/null -+++ b/lib/crypto/chacha20poly1305.c -@@ -0,0 +1,219 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This is an implementation of the ChaCha20Poly1305 AEAD construction. -+ * -+ * Information: https://tools.ietf.org/html/rfc8439 -+ */ -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32)) -+ -+bool __init chacha20poly1305_selftest(void); -+ -+static void chacha_load_key(u32 *k, const u8 *in) -+{ -+ k[0] = get_unaligned_le32(in); -+ k[1] = get_unaligned_le32(in + 4); -+ k[2] = get_unaligned_le32(in + 8); -+ k[3] = get_unaligned_le32(in + 12); -+ k[4] = get_unaligned_le32(in + 16); -+ k[5] = get_unaligned_le32(in + 20); -+ k[6] = get_unaligned_le32(in + 24); -+ k[7] = get_unaligned_le32(in + 28); -+} -+ -+static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce) -+{ -+ u32 k[CHACHA_KEY_WORDS]; -+ u8 iv[CHACHA_IV_SIZE]; -+ -+ memset(iv, 0, 8); -+ memcpy(iv + 8, nonce + 16, 8); -+ -+ chacha_load_key(k, key); -+ -+ /* Compute the subkey given the original key and first 128 nonce bits */ -+ chacha_init(chacha_state, k, nonce); -+ hchacha_block(chacha_state, k, 20); -+ -+ chacha_init(chacha_state, k, iv); -+ -+ memzero_explicit(k, sizeof(k)); -+ memzero_explicit(iv, sizeof(iv)); -+} -+ -+static void -+__chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, u32 *chacha_state) -+{ -+ const u8 *pad0 = page_address(ZERO_PAGE(0)); -+ struct poly1305_desc_ctx poly1305_state; -+ union { -+ u8 block0[POLY1305_KEY_SIZE]; -+ __le64 lens[2]; -+ } b; -+ -+ chacha_crypt(chacha_state, b.block0, pad0, sizeof(b.block0), 20); -+ poly1305_init(&poly1305_state, b.block0); -+ -+ poly1305_update(&poly1305_state, ad, ad_len); -+ if (ad_len & 0xf) -+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf)); -+ -+ chacha_crypt(chacha_state, dst, src, src_len, 20); -+ -+ poly1305_update(&poly1305_state, dst, src_len); -+ if (src_len & 0xf) -+ poly1305_update(&poly1305_state, pad0, 0x10 - (src_len & 0xf)); -+ -+ b.lens[0] = cpu_to_le64(ad_len); -+ b.lens[1] = cpu_to_le64(src_len); -+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens)); -+ -+ poly1305_final(&poly1305_state, dst + src_len); -+ -+ memzero_explicit(chacha_state, CHACHA_STATE_WORDS * sizeof(u32)); -+ memzero_explicit(&b, sizeof(b)); -+} -+ -+void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u64 nonce, -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]) -+{ -+ u32 chacha_state[CHACHA_STATE_WORDS]; -+ u32 k[CHACHA_KEY_WORDS]; -+ __le64 iv[2]; -+ -+ chacha_load_key(k, key); -+ -+ iv[0] = 0; -+ iv[1] = cpu_to_le64(nonce); -+ -+ chacha_init(chacha_state, k, (u8 *)iv); -+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state); -+ -+ memzero_explicit(iv, sizeof(iv)); -+ memzero_explicit(k, sizeof(k)); -+} -+EXPORT_SYMBOL(chacha20poly1305_encrypt); -+ -+void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]) -+{ -+ u32 chacha_state[CHACHA_STATE_WORDS]; -+ -+ xchacha_init(chacha_state, key, nonce); -+ __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state); -+} -+EXPORT_SYMBOL(xchacha20poly1305_encrypt); -+ -+static bool -+__chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, u32 *chacha_state) -+{ -+ const u8 *pad0 = page_address(ZERO_PAGE(0)); -+ struct poly1305_desc_ctx poly1305_state; -+ size_t dst_len; -+ int ret; -+ union { -+ u8 block0[POLY1305_KEY_SIZE]; -+ u8 mac[POLY1305_DIGEST_SIZE]; -+ __le64 lens[2]; -+ } b; -+ -+ if (unlikely(src_len < POLY1305_DIGEST_SIZE)) -+ return false; -+ -+ chacha_crypt(chacha_state, b.block0, pad0, sizeof(b.block0), 20); -+ poly1305_init(&poly1305_state, b.block0); -+ -+ poly1305_update(&poly1305_state, ad, ad_len); -+ if (ad_len & 0xf) -+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf)); -+ -+ dst_len = src_len - POLY1305_DIGEST_SIZE; -+ poly1305_update(&poly1305_state, src, dst_len); -+ if (dst_len & 0xf) -+ poly1305_update(&poly1305_state, pad0, 0x10 - (dst_len & 0xf)); -+ -+ b.lens[0] = cpu_to_le64(ad_len); -+ b.lens[1] = cpu_to_le64(dst_len); -+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens)); -+ -+ poly1305_final(&poly1305_state, b.mac); -+ -+ ret = crypto_memneq(b.mac, src + dst_len, POLY1305_DIGEST_SIZE); -+ if (likely(!ret)) -+ chacha_crypt(chacha_state, dst, src, dst_len, 20); -+ -+ memzero_explicit(&b, sizeof(b)); -+ -+ return !ret; -+} -+ -+bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u64 nonce, -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]) -+{ -+ u32 chacha_state[CHACHA_STATE_WORDS]; -+ u32 k[CHACHA_KEY_WORDS]; -+ __le64 iv[2]; -+ bool ret; -+ -+ chacha_load_key(k, key); -+ -+ iv[0] = 0; -+ iv[1] = cpu_to_le64(nonce); -+ -+ chacha_init(chacha_state, k, (u8 *)iv); -+ ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, -+ chacha_state); -+ -+ memzero_explicit(chacha_state, sizeof(chacha_state)); -+ memzero_explicit(iv, sizeof(iv)); -+ memzero_explicit(k, sizeof(k)); -+ return ret; -+} -+EXPORT_SYMBOL(chacha20poly1305_decrypt); -+ -+bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]) -+{ -+ u32 chacha_state[CHACHA_STATE_WORDS]; -+ -+ xchacha_init(chacha_state, key, nonce); -+ return __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, -+ chacha_state); -+} -+EXPORT_SYMBOL(xchacha20poly1305_decrypt); -+ -+static int __init mod_init(void) -+{ -+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && -+ WARN_ON(!chacha20poly1305_selftest())) -+ return -ENODEV; -+ return 0; -+} -+ -+module_init(mod_init); -+MODULE_LICENSE("GPL v2"); -+MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction"); -+MODULE_AUTHOR("Jason A. Donenfeld "); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0033-crypto-lib-chacha20poly1305-reimplement-crypt_from_s.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0033-crypto-lib-chacha20poly1305-reimplement-crypt_from_s.patch deleted file mode 100644 index e4b2b58b8..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0033-crypto-lib-chacha20poly1305-reimplement-crypt_from_s.patch +++ /dev/null @@ -1,295 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 8 Nov 2019 13:22:40 +0100 -Subject: [PATCH] crypto: lib/chacha20poly1305 - reimplement crypt_from_sg() - routine - -commit d95312a3ccc0cd544d374be2fc45aeaa803e5fd9 upstream. - -Reimplement the library routines to perform chacha20poly1305 en/decryption -on scatterlists, without [ab]using the [deprecated] blkcipher interface, -which is rather heavyweight and does things we don't really need. - -Instead, we use the sg_miter API in a novel and clever way, to iterate -over the scatterlist in-place (i.e., source == destination, which is the -only way this library is expected to be used). That way, we don't have to -iterate over two scatterlists in parallel. - -Another optimization is that, instead of relying on the blkcipher walker -to present the input in suitable chunks, we recognize that ChaCha is a -streamcipher, and so we can simply deal with partial blocks by keeping a -block of cipherstream on the stack and use crypto_xor() to mix it with -the in/output. - -Finally, we omit the scatterwalk_and_copy() call if the last element of -the scatterlist covers the MAC as well (which is the common case), -avoiding the need to walk the scatterlist and kmap() the page twice. - -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - include/crypto/chacha20poly1305.h | 11 ++ - lib/crypto/chacha20poly1305-selftest.c | 45 ++++++++ - lib/crypto/chacha20poly1305.c | 150 +++++++++++++++++++++++++ - 3 files changed, 206 insertions(+) - ---- a/include/crypto/chacha20poly1305.h -+++ b/include/crypto/chacha20poly1305.h -@@ -7,6 +7,7 @@ - #define __CHACHA20POLY1305_H - - #include -+#include - - enum chacha20poly1305_lengths { - XCHACHA20POLY1305_NONCE_SIZE = 24, -@@ -34,4 +35,14 @@ bool __must_check xchacha20poly1305_decr - const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], - const u8 key[CHACHA20POLY1305_KEY_SIZE]); - -+bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u64 nonce, -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]); -+ -+bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u64 nonce, -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]); -+ - #endif /* __CHACHA20POLY1305_H */ ---- a/lib/crypto/chacha20poly1305-selftest.c -+++ b/lib/crypto/chacha20poly1305-selftest.c -@@ -7250,6 +7250,7 @@ bool __init chacha20poly1305_selftest(vo - enum { MAXIMUM_TEST_BUFFER_LEN = 1UL << 12 }; - size_t i; - u8 *computed_output = NULL, *heap_src = NULL; -+ struct scatterlist sg_src; - bool success = true, ret; - - heap_src = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL); -@@ -7280,6 +7281,29 @@ bool __init chacha20poly1305_selftest(vo - } - } - -+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) { -+ if (chacha20poly1305_enc_vectors[i].nlen != 8) -+ continue; -+ memcpy(heap_src, chacha20poly1305_enc_vectors[i].input, -+ chacha20poly1305_enc_vectors[i].ilen); -+ sg_init_one(&sg_src, heap_src, -+ chacha20poly1305_enc_vectors[i].ilen + POLY1305_DIGEST_SIZE); -+ chacha20poly1305_encrypt_sg_inplace(&sg_src, -+ chacha20poly1305_enc_vectors[i].ilen, -+ chacha20poly1305_enc_vectors[i].assoc, -+ chacha20poly1305_enc_vectors[i].alen, -+ get_unaligned_le64(chacha20poly1305_enc_vectors[i].nonce), -+ chacha20poly1305_enc_vectors[i].key); -+ if (memcmp(heap_src, -+ chacha20poly1305_enc_vectors[i].output, -+ chacha20poly1305_enc_vectors[i].ilen + -+ POLY1305_DIGEST_SIZE)) { -+ pr_err("chacha20poly1305 sg encryption self-test %zu: FAIL\n", -+ i + 1); -+ success = false; -+ } -+ } -+ - for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) { - memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); - ret = chacha20poly1305_decrypt(computed_output, -@@ -7301,6 +7325,27 @@ bool __init chacha20poly1305_selftest(vo - } - } - -+ for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) { -+ memcpy(heap_src, chacha20poly1305_dec_vectors[i].input, -+ chacha20poly1305_dec_vectors[i].ilen); -+ sg_init_one(&sg_src, heap_src, -+ chacha20poly1305_dec_vectors[i].ilen); -+ ret = chacha20poly1305_decrypt_sg_inplace(&sg_src, -+ chacha20poly1305_dec_vectors[i].ilen, -+ chacha20poly1305_dec_vectors[i].assoc, -+ chacha20poly1305_dec_vectors[i].alen, -+ get_unaligned_le64(chacha20poly1305_dec_vectors[i].nonce), -+ chacha20poly1305_dec_vectors[i].key); -+ if (!decryption_success(ret, -+ chacha20poly1305_dec_vectors[i].failure, -+ memcmp(heap_src, chacha20poly1305_dec_vectors[i].output, -+ chacha20poly1305_dec_vectors[i].ilen - -+ POLY1305_DIGEST_SIZE))) { -+ pr_err("chacha20poly1305 sg decryption self-test %zu: FAIL\n", -+ i + 1); -+ success = false; -+ } -+ } - - for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_enc_vectors); ++i) { - memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); ---- a/lib/crypto/chacha20poly1305.c -+++ b/lib/crypto/chacha20poly1305.c -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -205,6 +206,155 @@ bool xchacha20poly1305_decrypt(u8 *dst, - } - EXPORT_SYMBOL(xchacha20poly1305_decrypt); - -+static -+bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, -+ const size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u64 nonce, -+ const u8 key[CHACHA20POLY1305_KEY_SIZE], -+ int encrypt) -+{ -+ const u8 *pad0 = page_address(ZERO_PAGE(0)); -+ struct poly1305_desc_ctx poly1305_state; -+ u32 chacha_state[CHACHA_STATE_WORDS]; -+ struct sg_mapping_iter miter; -+ size_t partial = 0; -+ unsigned int flags; -+ bool ret = true; -+ int sl; -+ union { -+ struct { -+ u32 k[CHACHA_KEY_WORDS]; -+ __le64 iv[2]; -+ }; -+ u8 block0[POLY1305_KEY_SIZE]; -+ u8 chacha_stream[CHACHA_BLOCK_SIZE]; -+ struct { -+ u8 mac[2][POLY1305_DIGEST_SIZE]; -+ }; -+ __le64 lens[2]; -+ } b __aligned(16); -+ -+ chacha_load_key(b.k, key); -+ -+ b.iv[0] = 0; -+ b.iv[1] = cpu_to_le64(nonce); -+ -+ chacha_init(chacha_state, b.k, (u8 *)b.iv); -+ chacha_crypt(chacha_state, b.block0, pad0, sizeof(b.block0), 20); -+ poly1305_init(&poly1305_state, b.block0); -+ -+ if (unlikely(ad_len)) { -+ poly1305_update(&poly1305_state, ad, ad_len); -+ if (ad_len & 0xf) -+ poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf)); -+ } -+ -+ flags = SG_MITER_TO_SG; -+ if (!preemptible()) -+ flags |= SG_MITER_ATOMIC; -+ -+ sg_miter_start(&miter, src, sg_nents(src), flags); -+ -+ for (sl = src_len; sl > 0 && sg_miter_next(&miter); sl -= miter.length) { -+ u8 *addr = miter.addr; -+ size_t length = min_t(size_t, sl, miter.length); -+ -+ if (!encrypt) -+ poly1305_update(&poly1305_state, addr, length); -+ -+ if (unlikely(partial)) { -+ size_t l = min(length, CHACHA_BLOCK_SIZE - partial); -+ -+ crypto_xor(addr, b.chacha_stream + partial, l); -+ partial = (partial + l) & (CHACHA_BLOCK_SIZE - 1); -+ -+ addr += l; -+ length -= l; -+ } -+ -+ if (likely(length >= CHACHA_BLOCK_SIZE || length == sl)) { -+ size_t l = length; -+ -+ if (unlikely(length < sl)) -+ l &= ~(CHACHA_BLOCK_SIZE - 1); -+ chacha_crypt(chacha_state, addr, addr, l, 20); -+ addr += l; -+ length -= l; -+ } -+ -+ if (unlikely(length > 0)) { -+ chacha_crypt(chacha_state, b.chacha_stream, pad0, -+ CHACHA_BLOCK_SIZE, 20); -+ crypto_xor(addr, b.chacha_stream, length); -+ partial = length; -+ } -+ -+ if (encrypt) -+ poly1305_update(&poly1305_state, miter.addr, -+ min_t(size_t, sl, miter.length)); -+ } -+ -+ if (src_len & 0xf) -+ poly1305_update(&poly1305_state, pad0, 0x10 - (src_len & 0xf)); -+ -+ b.lens[0] = cpu_to_le64(ad_len); -+ b.lens[1] = cpu_to_le64(src_len); -+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens)); -+ -+ if (likely(sl <= -POLY1305_DIGEST_SIZE)) { -+ if (encrypt) { -+ poly1305_final(&poly1305_state, -+ miter.addr + miter.length + sl); -+ ret = true; -+ } else { -+ poly1305_final(&poly1305_state, b.mac[0]); -+ ret = !crypto_memneq(b.mac[0], -+ miter.addr + miter.length + sl, -+ POLY1305_DIGEST_SIZE); -+ } -+ } -+ -+ sg_miter_stop(&miter); -+ -+ if (unlikely(sl > -POLY1305_DIGEST_SIZE)) { -+ poly1305_final(&poly1305_state, b.mac[1]); -+ scatterwalk_map_and_copy(b.mac[encrypt], src, src_len, -+ sizeof(b.mac[1]), encrypt); -+ ret = encrypt || -+ !crypto_memneq(b.mac[0], b.mac[1], POLY1305_DIGEST_SIZE); -+ } -+ -+ memzero_explicit(chacha_state, sizeof(chacha_state)); -+ memzero_explicit(&b, sizeof(b)); -+ -+ return ret; -+} -+ -+bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u64 nonce, -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]) -+{ -+ return chacha20poly1305_crypt_sg_inplace(src, src_len, ad, ad_len, -+ nonce, key, 1); -+} -+EXPORT_SYMBOL(chacha20poly1305_encrypt_sg_inplace); -+ -+bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u64 nonce, -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]) -+{ -+ if (unlikely(src_len < POLY1305_DIGEST_SIZE)) -+ return false; -+ -+ return chacha20poly1305_crypt_sg_inplace(src, -+ src_len - POLY1305_DIGEST_SIZE, -+ ad, ad_len, nonce, key, 0); -+} -+EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace); -+ - static int __init mod_init(void) - { - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0034-crypto-chacha_generic-remove-unnecessary-setkey-func.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0034-crypto-chacha_generic-remove-unnecessary-setkey-func.patch deleted file mode 100644 index 709b1fbcf..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0034-crypto-chacha_generic-remove-unnecessary-setkey-func.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Eric Biggers -Date: Sun, 17 Nov 2019 23:21:29 -0800 -Subject: [PATCH] crypto: chacha_generic - remove unnecessary setkey() - functions - -commit 2043323a799a660bc84bbee404cf7a2617ec6157 upstream. - -Use chacha20_setkey() and chacha12_setkey() from - instead of defining them again in -chacha_generic.c. - -Signed-off-by: Eric Biggers -Acked-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - crypto/chacha_generic.c | 18 +++--------------- - 1 file changed, 3 insertions(+), 15 deletions(-) - ---- a/crypto/chacha_generic.c -+++ b/crypto/chacha_generic.c -@@ -37,18 +37,6 @@ static int chacha_stream_xor(struct skci - return err; - } - --static int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, -- unsigned int keysize) --{ -- return chacha_setkey(tfm, key, keysize, 20); --} -- --static int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, -- unsigned int keysize) --{ -- return chacha_setkey(tfm, key, keysize, 12); --} -- - static int crypto_chacha_crypt(struct skcipher_request *req) - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); -@@ -91,7 +79,7 @@ static struct skcipher_alg algs[] = { - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = CHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha20_setkey, -+ .setkey = chacha20_setkey, - .encrypt = crypto_chacha_crypt, - .decrypt = crypto_chacha_crypt, - }, { -@@ -106,7 +94,7 @@ static struct skcipher_alg algs[] = { - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha20_setkey, -+ .setkey = chacha20_setkey, - .encrypt = crypto_xchacha_crypt, - .decrypt = crypto_xchacha_crypt, - }, { -@@ -121,7 +109,7 @@ static struct skcipher_alg algs[] = { - .max_keysize = CHACHA_KEY_SIZE, - .ivsize = XCHACHA_IV_SIZE, - .chunksize = CHACHA_BLOCK_SIZE, -- .setkey = crypto_chacha12_setkey, -+ .setkey = chacha12_setkey, - .encrypt = crypto_xchacha_crypt, - .decrypt = crypto_xchacha_crypt, - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0035-crypto-x86-chacha-only-unregister-algorithms-if-regi.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0035-crypto-x86-chacha-only-unregister-algorithms-if-regi.patch deleted file mode 100644 index 4554ea898..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0035-crypto-x86-chacha-only-unregister-algorithms-if-regi.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Eric Biggers -Date: Sun, 17 Nov 2019 23:21:58 -0800 -Subject: [PATCH] crypto: x86/chacha - only unregister algorithms if registered - -commit b62755aed3a3f5ca9edd2718339ccea3b6bbbe57 upstream. - -It's not valid to call crypto_unregister_skciphers() without a prior -call to crypto_register_skciphers(). - -Fixes: 84e03fa39fbe ("crypto: x86/chacha - expose SIMD ChaCha routine as library function") -Signed-off-by: Eric Biggers -Acked-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/chacha_glue.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/arch/x86/crypto/chacha_glue.c -+++ b/arch/x86/crypto/chacha_glue.c -@@ -304,7 +304,8 @@ static int __init chacha_simd_mod_init(v - - static void __exit chacha_simd_mod_fini(void) - { -- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -+ if (boot_cpu_has(X86_FEATURE_SSSE3)) -+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); - } - - module_init(chacha_simd_mod_init); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0036-crypto-lib-chacha20poly1305-use-chacha20_crypt.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0036-crypto-lib-chacha20poly1305-use-chacha20_crypt.patch deleted file mode 100644 index 6ad20b999..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0036-crypto-lib-chacha20poly1305-use-chacha20_crypt.patch +++ /dev/null @@ -1,83 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Eric Biggers -Date: Sun, 17 Nov 2019 23:22:16 -0800 -Subject: [PATCH] crypto: lib/chacha20poly1305 - use chacha20_crypt() - -commit 413808b71e6204b0cc1eeaa77960f7c3cd381d33 upstream. - -Use chacha20_crypt() instead of chacha_crypt(), since it's not really -appropriate for users of the ChaCha library API to be passing the number -of rounds as an argument. - -Signed-off-by: Eric Biggers -Acked-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - lib/crypto/chacha20poly1305.c | 16 ++++++++-------- - 1 file changed, 8 insertions(+), 8 deletions(-) - ---- a/lib/crypto/chacha20poly1305.c -+++ b/lib/crypto/chacha20poly1305.c -@@ -66,14 +66,14 @@ __chacha20poly1305_encrypt(u8 *dst, cons - __le64 lens[2]; - } b; - -- chacha_crypt(chacha_state, b.block0, pad0, sizeof(b.block0), 20); -+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0)); - poly1305_init(&poly1305_state, b.block0); - - poly1305_update(&poly1305_state, ad, ad_len); - if (ad_len & 0xf) - poly1305_update(&poly1305_state, pad0, 0x10 - (ad_len & 0xf)); - -- chacha_crypt(chacha_state, dst, src, src_len, 20); -+ chacha20_crypt(chacha_state, dst, src, src_len); - - poly1305_update(&poly1305_state, dst, src_len); - if (src_len & 0xf) -@@ -140,7 +140,7 @@ __chacha20poly1305_decrypt(u8 *dst, cons - if (unlikely(src_len < POLY1305_DIGEST_SIZE)) - return false; - -- chacha_crypt(chacha_state, b.block0, pad0, sizeof(b.block0), 20); -+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0)); - poly1305_init(&poly1305_state, b.block0); - - poly1305_update(&poly1305_state, ad, ad_len); -@@ -160,7 +160,7 @@ __chacha20poly1305_decrypt(u8 *dst, cons - - ret = crypto_memneq(b.mac, src + dst_len, POLY1305_DIGEST_SIZE); - if (likely(!ret)) -- chacha_crypt(chacha_state, dst, src, dst_len, 20); -+ chacha20_crypt(chacha_state, dst, src, dst_len); - - memzero_explicit(&b, sizeof(b)); - -@@ -241,7 +241,7 @@ bool chacha20poly1305_crypt_sg_inplace(s - b.iv[1] = cpu_to_le64(nonce); - - chacha_init(chacha_state, b.k, (u8 *)b.iv); -- chacha_crypt(chacha_state, b.block0, pad0, sizeof(b.block0), 20); -+ chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0)); - poly1305_init(&poly1305_state, b.block0); - - if (unlikely(ad_len)) { -@@ -278,14 +278,14 @@ bool chacha20poly1305_crypt_sg_inplace(s - - if (unlikely(length < sl)) - l &= ~(CHACHA_BLOCK_SIZE - 1); -- chacha_crypt(chacha_state, addr, addr, l, 20); -+ chacha20_crypt(chacha_state, addr, addr, l); - addr += l; - length -= l; - } - - if (unlikely(length > 0)) { -- chacha_crypt(chacha_state, b.chacha_stream, pad0, -- CHACHA_BLOCK_SIZE, 20); -+ chacha20_crypt(chacha_state, b.chacha_stream, pad0, -+ CHACHA_BLOCK_SIZE); - crypto_xor(addr, b.chacha_stream, length); - partial = length; - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0037-crypto-arch-conditionalize-crypto-api-in-arch-glue-f.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0037-crypto-arch-conditionalize-crypto-api-in-arch-glue-f.patch deleted file mode 100644 index d510438f1..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0037-crypto-arch-conditionalize-crypto-api-in-arch-glue-f.patch +++ /dev/null @@ -1,275 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 25 Nov 2019 11:31:12 +0100 -Subject: [PATCH] crypto: arch - conditionalize crypto api in arch glue for lib - code - -commit 8394bfec51e0e565556101bcc4e2fe7551104cd8 upstream. - -For glue code that's used by Zinc, the actual Crypto API functions might -not necessarily exist, and don't need to exist either. Before this -patch, there are valid build configurations that lead to a unbuildable -kernel. This fixes it to conditionalize those symbols on the existence -of the proper config entry. - -Signed-off-by: Jason A. Donenfeld -Acked-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/chacha-glue.c | 26 ++++++++++++++++---------- - arch/arm/crypto/curve25519-glue.c | 5 +++-- - arch/arm/crypto/poly1305-glue.c | 9 ++++++--- - arch/arm64/crypto/chacha-neon-glue.c | 5 +++-- - arch/arm64/crypto/poly1305-glue.c | 5 +++-- - arch/mips/crypto/chacha-glue.c | 6 ++++-- - arch/mips/crypto/poly1305-glue.c | 6 ++++-- - arch/x86/crypto/blake2s-glue.c | 6 ++++-- - arch/x86/crypto/chacha_glue.c | 5 +++-- - arch/x86/crypto/curve25519-x86_64.c | 7 ++++--- - arch/x86/crypto/poly1305_glue.c | 5 +++-- - 11 files changed, 53 insertions(+), 32 deletions(-) - ---- a/arch/arm/crypto/chacha-glue.c -+++ b/arch/arm/crypto/chacha-glue.c -@@ -286,11 +286,13 @@ static struct skcipher_alg neon_algs[] = - - static int __init chacha_simd_mod_init(void) - { -- int err; -+ int err = 0; - -- err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); -- if (err) -- return err; -+ if (IS_REACHABLE(CONFIG_CRYPTO_BLKCIPHER)) { -+ err = crypto_register_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); -+ if (err) -+ return err; -+ } - - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) { - int i; -@@ -310,18 +312,22 @@ static int __init chacha_simd_mod_init(v - static_branch_enable(&use_neon); - } - -- err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); -- if (err) -- crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); -+ if (IS_REACHABLE(CONFIG_CRYPTO_BLKCIPHER)) { -+ err = crypto_register_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); -+ if (err) -+ crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); -+ } - } - return err; - } - - static void __exit chacha_simd_mod_fini(void) - { -- crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); -- if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) -- crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); -+ if (IS_REACHABLE(CONFIG_CRYPTO_BLKCIPHER)) { -+ crypto_unregister_skciphers(arm_algs, ARRAY_SIZE(arm_algs)); -+ if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && (elf_hwcap & HWCAP_NEON)) -+ crypto_unregister_skciphers(neon_algs, ARRAY_SIZE(neon_algs)); -+ } - } - - module_init(chacha_simd_mod_init); ---- a/arch/arm/crypto/curve25519-glue.c -+++ b/arch/arm/crypto/curve25519-glue.c -@@ -108,14 +108,15 @@ static int __init mod_init(void) - { - if (elf_hwcap & HWCAP_NEON) { - static_branch_enable(&have_neon); -- return crypto_register_kpp(&curve25519_alg); -+ return IS_REACHABLE(CONFIG_CRYPTO_KPP) ? -+ crypto_register_kpp(&curve25519_alg) : 0; - } - return 0; - } - - static void __exit mod_exit(void) - { -- if (elf_hwcap & HWCAP_NEON) -+ if (IS_REACHABLE(CONFIG_CRYPTO_KPP) && elf_hwcap & HWCAP_NEON) - crypto_unregister_kpp(&curve25519_alg); - } - ---- a/arch/arm/crypto/poly1305-glue.c -+++ b/arch/arm/crypto/poly1305-glue.c -@@ -249,16 +249,19 @@ static int __init arm_poly1305_mod_init( - if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && - (elf_hwcap & HWCAP_NEON)) - static_branch_enable(&have_neon); -- else -+ else if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) - /* register only the first entry */ - return crypto_register_shash(&arm_poly1305_algs[0]); - -- return crypto_register_shashes(arm_poly1305_algs, -- ARRAY_SIZE(arm_poly1305_algs)); -+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? -+ crypto_register_shashes(arm_poly1305_algs, -+ ARRAY_SIZE(arm_poly1305_algs)) : 0; - } - - static void __exit arm_poly1305_mod_exit(void) - { -+ if (!IS_REACHABLE(CONFIG_CRYPTO_HASH)) -+ return; - if (!static_branch_likely(&have_neon)) { - crypto_unregister_shash(&arm_poly1305_algs[0]); - return; ---- a/arch/arm64/crypto/chacha-neon-glue.c -+++ b/arch/arm64/crypto/chacha-neon-glue.c -@@ -211,12 +211,13 @@ static int __init chacha_simd_mod_init(v - - static_branch_enable(&have_neon); - -- return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); -+ return IS_REACHABLE(CONFIG_CRYPTO_BLKCIPHER) ? -+ crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0; - } - - static void __exit chacha_simd_mod_fini(void) - { -- if (cpu_have_named_feature(ASIMD)) -+ if (IS_REACHABLE(CONFIG_CRYPTO_BLKCIPHER) && cpu_have_named_feature(ASIMD)) - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); - } - ---- a/arch/arm64/crypto/poly1305-glue.c -+++ b/arch/arm64/crypto/poly1305-glue.c -@@ -220,12 +220,13 @@ static int __init neon_poly1305_mod_init - - static_branch_enable(&have_neon); - -- return crypto_register_shash(&neon_poly1305_alg); -+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? -+ crypto_register_shash(&neon_poly1305_alg) : 0; - } - - static void __exit neon_poly1305_mod_exit(void) - { -- if (cpu_have_named_feature(ASIMD)) -+ if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && cpu_have_named_feature(ASIMD)) - crypto_unregister_shash(&neon_poly1305_alg); - } - ---- a/arch/mips/crypto/chacha-glue.c -+++ b/arch/mips/crypto/chacha-glue.c -@@ -128,12 +128,14 @@ static struct skcipher_alg algs[] = { - - static int __init chacha_simd_mod_init(void) - { -- return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); -+ return IS_REACHABLE(CONFIG_CRYPTO_BLKCIPHER) ? -+ crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0; - } - - static void __exit chacha_simd_mod_fini(void) - { -- crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); -+ if (IS_REACHABLE(CONFIG_CRYPTO_BLKCIPHER)) -+ crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); - } - - module_init(chacha_simd_mod_init); ---- a/arch/mips/crypto/poly1305-glue.c -+++ b/arch/mips/crypto/poly1305-glue.c -@@ -187,12 +187,14 @@ static struct shash_alg mips_poly1305_al - - static int __init mips_poly1305_mod_init(void) - { -- return crypto_register_shash(&mips_poly1305_alg); -+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? -+ crypto_register_shash(&mips_poly1305_alg) : 0; - } - - static void __exit mips_poly1305_mod_exit(void) - { -- crypto_unregister_shash(&mips_poly1305_alg); -+ if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) -+ crypto_unregister_shash(&mips_poly1305_alg); - } - - module_init(mips_poly1305_mod_init); ---- a/arch/x86/crypto/blake2s-glue.c -+++ b/arch/x86/crypto/blake2s-glue.c -@@ -210,12 +210,14 @@ static int __init blake2s_mod_init(void) - XFEATURE_MASK_AVX512, NULL)) - static_branch_enable(&blake2s_use_avx512); - -- return crypto_register_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); -+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? -+ crypto_register_shashes(blake2s_algs, -+ ARRAY_SIZE(blake2s_algs)) : 0; - } - - static void __exit blake2s_mod_exit(void) - { -- if (boot_cpu_has(X86_FEATURE_SSSE3)) -+ if (IS_REACHABLE(CONFIG_CRYPTO_HASH) && boot_cpu_has(X86_FEATURE_SSSE3)) - crypto_unregister_shashes(blake2s_algs, ARRAY_SIZE(blake2s_algs)); - } - ---- a/arch/x86/crypto/chacha_glue.c -+++ b/arch/x86/crypto/chacha_glue.c -@@ -299,12 +299,13 @@ static int __init chacha_simd_mod_init(v - boot_cpu_has(X86_FEATURE_AVX512BW)) /* kmovq */ - static_branch_enable(&chacha_use_avx512vl); - } -- return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); -+ return IS_REACHABLE(CONFIG_CRYPTO_BLKCIPHER) ? -+ crypto_register_skciphers(algs, ARRAY_SIZE(algs)) : 0; - } - - static void __exit chacha_simd_mod_fini(void) - { -- if (boot_cpu_has(X86_FEATURE_SSSE3)) -+ if (IS_REACHABLE(CONFIG_CRYPTO_BLKCIPHER) && boot_cpu_has(X86_FEATURE_SSSE3)) - crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); - } - ---- a/arch/x86/crypto/curve25519-x86_64.c -+++ b/arch/x86/crypto/curve25519-x86_64.c -@@ -2457,13 +2457,14 @@ static int __init curve25519_mod_init(vo - static_branch_enable(&curve25519_use_adx); - else - return 0; -- return crypto_register_kpp(&curve25519_alg); -+ return IS_REACHABLE(CONFIG_CRYPTO_KPP) ? -+ crypto_register_kpp(&curve25519_alg) : 0; - } - - static void __exit curve25519_mod_exit(void) - { -- if (boot_cpu_has(X86_FEATURE_BMI2) || -- boot_cpu_has(X86_FEATURE_ADX)) -+ if (IS_REACHABLE(CONFIG_CRYPTO_KPP) && -+ (boot_cpu_has(X86_FEATURE_BMI2) || boot_cpu_has(X86_FEATURE_ADX))) - crypto_unregister_kpp(&curve25519_alg); - } - ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -224,12 +224,13 @@ static int __init poly1305_simd_mod_init - cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) - static_branch_enable(&poly1305_use_avx2); - -- return crypto_register_shash(&alg); -+ return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shash(&alg) : 0; - } - - static void __exit poly1305_simd_mod_exit(void) - { -- crypto_unregister_shash(&alg); -+ if (IS_REACHABLE(CONFIG_CRYPTO_HASH)) -+ crypto_unregister_shash(&alg); - } - - module_init(poly1305_simd_mod_init); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0038-crypto-chacha-fix-warning-message-in-header-file.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0038-crypto-chacha-fix-warning-message-in-header-file.patch deleted file mode 100644 index ccd03e352..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0038-crypto-chacha-fix-warning-message-in-header-file.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Valdis=20Kl=C4=93tnieks?= -Date: Thu, 5 Dec 2019 20:58:36 -0500 -Subject: [PATCH] crypto: chacha - fix warning message in header file - -commit 579d705cd64e44f3fcda1a6cfd5f37468a5ddf63 upstream. - -Building with W=1 causes a warning: - - CC [M] arch/x86/crypto/chacha_glue.o -In file included from arch/x86/crypto/chacha_glue.c:10: -./include/crypto/internal/chacha.h:37:1: warning: 'inline' is not at beginning of declaration [-Wold-style-declaration] - 37 | static int inline chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, - | ^~~~~~ - -Straighten out the order to match the rest of the header file. - -Signed-off-by: Valdis Kletnieks -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - include/crypto/internal/chacha.h | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/include/crypto/internal/chacha.h -+++ b/include/crypto/internal/chacha.h -@@ -34,7 +34,7 @@ static inline int chacha20_setkey(struct - return chacha_setkey(tfm, key, keysize, 20); - } - --static int inline chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, -+static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key, - unsigned int keysize) - { - return chacha_setkey(tfm, key, keysize, 12); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0039-crypto-arm-curve25519-add-arch-specific-key-generati.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0039-crypto-arm-curve25519-add-arch-specific-key-generati.patch deleted file mode 100644 index 67de22deb..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0039-crypto-arm-curve25519-add-arch-specific-key-generati.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 11 Dec 2019 10:26:39 +0100 -Subject: [PATCH] crypto: arm/curve25519 - add arch-specific key generation - function - -commit 84faa307249b341f6ad8de3e1869d77a65e26669 upstream. - -Somehow this was forgotten when Zinc was being split into oddly shaped -pieces, resulting in linker errors. The x86_64 glue has a specific key -generation implementation, but the Arm one does not. However, it can -still receive the NEON speedups by calling the ordinary DH function -using the base point. - -Signed-off-by: Jason A. Donenfeld -Acked-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/curve25519-glue.c | 7 +++++++ - 1 file changed, 7 insertions(+) - ---- a/arch/arm/crypto/curve25519-glue.c -+++ b/arch/arm/crypto/curve25519-glue.c -@@ -38,6 +38,13 @@ void curve25519_arch(u8 out[CURVE25519_K - } - EXPORT_SYMBOL(curve25519_arch); - -+void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], -+ const u8 secret[CURVE25519_KEY_SIZE]) -+{ -+ return curve25519_arch(pub, secret, curve25519_base_point); -+} -+EXPORT_SYMBOL(curve25519_base_arch); -+ - static int curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, - unsigned int len) - { diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0040-crypto-lib-curve25519-re-add-selftests.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0040-crypto-lib-curve25519-re-add-selftests.patch deleted file mode 100644 index e43d196a3..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0040-crypto-lib-curve25519-re-add-selftests.patch +++ /dev/null @@ -1,1387 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 16 Dec 2019 19:53:26 +0100 -Subject: [PATCH] crypto: lib/curve25519 - re-add selftests - -commit aa127963f1cab2b93c74c9b128a84610203fb674 upstream. - -Somehow these were dropped when Zinc was being integrated, which is -problematic, because testing the library interface for Curve25519 is -important.. This commit simply adds them back and wires them in in the -same way that the blake2s selftests are wired in. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - lib/crypto/Makefile | 1 + - lib/crypto/curve25519-selftest.c | 1321 ++++++++++++++++++++++++++++++ - lib/crypto/curve25519.c | 17 + - 3 files changed, 1339 insertions(+) - create mode 100644 lib/crypto/curve25519-selftest.c - ---- a/lib/crypto/Makefile -+++ b/lib/crypto/Makefile -@@ -36,4 +36,5 @@ libsha256-y := sha256.o - ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y) - libblake2s-y += blake2s-selftest.o - libchacha20poly1305-y += chacha20poly1305-selftest.o -+libcurve25519-y += curve25519-selftest.o - endif ---- /dev/null -+++ b/lib/crypto/curve25519-selftest.c -@@ -0,0 +1,1321 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include -+ -+struct curve25519_test_vector { -+ u8 private[CURVE25519_KEY_SIZE]; -+ u8 public[CURVE25519_KEY_SIZE]; -+ u8 result[CURVE25519_KEY_SIZE]; -+ bool valid; -+}; -+static const struct curve25519_test_vector curve25519_test_vectors[] __initconst = { -+ { -+ .private = { 0x77, 0x07, 0x6d, 0x0a, 0x73, 0x18, 0xa5, 0x7d, -+ 0x3c, 0x16, 0xc1, 0x72, 0x51, 0xb2, 0x66, 0x45, -+ 0xdf, 0x4c, 0x2f, 0x87, 0xeb, 0xc0, 0x99, 0x2a, -+ 0xb1, 0x77, 0xfb, 0xa5, 0x1d, 0xb9, 0x2c, 0x2a }, -+ .public = { 0xde, 0x9e, 0xdb, 0x7d, 0x7b, 0x7d, 0xc1, 0xb4, -+ 0xd3, 0x5b, 0x61, 0xc2, 0xec, 0xe4, 0x35, 0x37, -+ 0x3f, 0x83, 0x43, 0xc8, 0x5b, 0x78, 0x67, 0x4d, -+ 0xad, 0xfc, 0x7e, 0x14, 0x6f, 0x88, 0x2b, 0x4f }, -+ .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, -+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, -+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, -+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, -+ .valid = true -+ }, -+ { -+ .private = { 0x5d, 0xab, 0x08, 0x7e, 0x62, 0x4a, 0x8a, 0x4b, -+ 0x79, 0xe1, 0x7f, 0x8b, 0x83, 0x80, 0x0e, 0xe6, -+ 0x6f, 0x3b, 0xb1, 0x29, 0x26, 0x18, 0xb6, 0xfd, -+ 0x1c, 0x2f, 0x8b, 0x27, 0xff, 0x88, 0xe0, 0xeb }, -+ .public = { 0x85, 0x20, 0xf0, 0x09, 0x89, 0x30, 0xa7, 0x54, -+ 0x74, 0x8b, 0x7d, 0xdc, 0xb4, 0x3e, 0xf7, 0x5a, -+ 0x0d, 0xbf, 0x3a, 0x0d, 0x26, 0x38, 0x1a, 0xf4, -+ 0xeb, 0xa4, 0xa9, 0x8e, 0xaa, 0x9b, 0x4e, 0x6a }, -+ .result = { 0x4a, 0x5d, 0x9d, 0x5b, 0xa4, 0xce, 0x2d, 0xe1, -+ 0x72, 0x8e, 0x3b, 0xf4, 0x80, 0x35, 0x0f, 0x25, -+ 0xe0, 0x7e, 0x21, 0xc9, 0x47, 0xd1, 0x9e, 0x33, -+ 0x76, 0xf0, 0x9b, 0x3c, 0x1e, 0x16, 0x17, 0x42 }, -+ .valid = true -+ }, -+ { -+ .private = { 1 }, -+ .public = { 0x25, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .result = { 0x3c, 0x77, 0x77, 0xca, 0xf9, 0x97, 0xb2, 0x64, -+ 0x41, 0x60, 0x77, 0x66, 0x5b, 0x4e, 0x22, 0x9d, -+ 0x0b, 0x95, 0x48, 0xdc, 0x0c, 0xd8, 0x19, 0x98, -+ 0xdd, 0xcd, 0xc5, 0xc8, 0x53, 0x3c, 0x79, 0x7f }, -+ .valid = true -+ }, -+ { -+ .private = { 1 }, -+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0xb3, 0x2d, 0x13, 0x62, 0xc2, 0x48, 0xd6, 0x2f, -+ 0xe6, 0x26, 0x19, 0xcf, 0xf0, 0x4d, 0xd4, 0x3d, -+ 0xb7, 0x3f, 0xfc, 0x1b, 0x63, 0x08, 0xed, 0xe3, -+ 0x0b, 0x78, 0xd8, 0x73, 0x80, 0xf1, 0xe8, 0x34 }, -+ .valid = true -+ }, -+ { -+ .private = { 0xa5, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, -+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, -+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, -+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0xc4 }, -+ .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, -+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, -+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, -+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, -+ .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, -+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, -+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, -+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, -+ .valid = true -+ }, -+ { -+ .private = { 1, 2, 3, 4 }, -+ .public = { 0 }, -+ .result = { 0 }, -+ .valid = false -+ }, -+ { -+ .private = { 2, 4, 6, 8 }, -+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, -+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, -+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, -+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8 }, -+ .result = { 0 }, -+ .valid = false -+ }, -+ { -+ .private = { 0xff, 0xff, 0xff, 0xff, 0x0a, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0x0a, 0x00, 0xfb, 0x9f }, -+ .result = { 0x77, 0x52, 0xb6, 0x18, 0xc1, 0x2d, 0x48, 0xd2, -+ 0xc6, 0x93, 0x46, 0x83, 0x81, 0x7c, 0xc6, 0x57, -+ 0xf3, 0x31, 0x03, 0x19, 0x49, 0x48, 0x20, 0x05, -+ 0x42, 0x2b, 0x4e, 0xae, 0x8d, 0x1d, 0x43, 0x23 }, -+ .valid = true -+ }, -+ { -+ .private = { 0x8e, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x8e, 0x06 }, -+ .result = { 0x5a, 0xdf, 0xaa, 0x25, 0x86, 0x8e, 0x32, 0x3d, -+ 0xae, 0x49, 0x62, 0xc1, 0x01, 0x5c, 0xb3, 0x12, -+ 0xe1, 0xc5, 0xc7, 0x9e, 0x95, 0x3f, 0x03, 0x99, -+ 0xb0, 0xba, 0x16, 0x22, 0xf3, 0xb6, 0xf7, 0x0c }, -+ .valid = true -+ }, -+ /* wycheproof - normal case */ -+ { -+ .private = { 0x48, 0x52, 0x83, 0x4d, 0x9d, 0x6b, 0x77, 0xda, -+ 0xde, 0xab, 0xaa, 0xf2, 0xe1, 0x1d, 0xca, 0x66, -+ 0xd1, 0x9f, 0xe7, 0x49, 0x93, 0xa7, 0xbe, 0xc3, -+ 0x6c, 0x6e, 0x16, 0xa0, 0x98, 0x3f, 0xea, 0xba }, -+ .public = { 0x9c, 0x64, 0x7d, 0x9a, 0xe5, 0x89, 0xb9, 0xf5, -+ 0x8f, 0xdc, 0x3c, 0xa4, 0x94, 0x7e, 0xfb, 0xc9, -+ 0x15, 0xc4, 0xb2, 0xe0, 0x8e, 0x74, 0x4a, 0x0e, -+ 0xdf, 0x46, 0x9d, 0xac, 0x59, 0xc8, 0xf8, 0x5a }, -+ .result = { 0x87, 0xb7, 0xf2, 0x12, 0xb6, 0x27, 0xf7, 0xa5, -+ 0x4c, 0xa5, 0xe0, 0xbc, 0xda, 0xdd, 0xd5, 0x38, -+ 0x9d, 0x9d, 0xe6, 0x15, 0x6c, 0xdb, 0xcf, 0x8e, -+ 0xbe, 0x14, 0xff, 0xbc, 0xfb, 0x43, 0x65, 0x51 }, -+ .valid = true -+ }, -+ /* wycheproof - public key on twist */ -+ { -+ .private = { 0x58, 0x8c, 0x06, 0x1a, 0x50, 0x80, 0x4a, 0xc4, -+ 0x88, 0xad, 0x77, 0x4a, 0xc7, 0x16, 0xc3, 0xf5, -+ 0xba, 0x71, 0x4b, 0x27, 0x12, 0xe0, 0x48, 0x49, -+ 0x13, 0x79, 0xa5, 0x00, 0x21, 0x19, 0x98, 0xa8 }, -+ .public = { 0x63, 0xaa, 0x40, 0xc6, 0xe3, 0x83, 0x46, 0xc5, -+ 0xca, 0xf2, 0x3a, 0x6d, 0xf0, 0xa5, 0xe6, 0xc8, -+ 0x08, 0x89, 0xa0, 0x86, 0x47, 0xe5, 0x51, 0xb3, -+ 0x56, 0x34, 0x49, 0xbe, 0xfc, 0xfc, 0x97, 0x33 }, -+ .result = { 0xb1, 0xa7, 0x07, 0x51, 0x94, 0x95, 0xff, 0xff, -+ 0xb2, 0x98, 0xff, 0x94, 0x17, 0x16, 0xb0, 0x6d, -+ 0xfa, 0xb8, 0x7c, 0xf8, 0xd9, 0x11, 0x23, 0xfe, -+ 0x2b, 0xe9, 0xa2, 0x33, 0xdd, 0xa2, 0x22, 0x12 }, -+ .valid = true -+ }, -+ /* wycheproof - public key on twist */ -+ { -+ .private = { 0xb0, 0x5b, 0xfd, 0x32, 0xe5, 0x53, 0x25, 0xd9, -+ 0xfd, 0x64, 0x8c, 0xb3, 0x02, 0x84, 0x80, 0x39, -+ 0x00, 0x0b, 0x39, 0x0e, 0x44, 0xd5, 0x21, 0xe5, -+ 0x8a, 0xab, 0x3b, 0x29, 0xa6, 0x96, 0x0b, 0xa8 }, -+ .public = { 0x0f, 0x83, 0xc3, 0x6f, 0xde, 0xd9, 0xd3, 0x2f, -+ 0xad, 0xf4, 0xef, 0xa3, 0xae, 0x93, 0xa9, 0x0b, -+ 0xb5, 0xcf, 0xa6, 0x68, 0x93, 0xbc, 0x41, 0x2c, -+ 0x43, 0xfa, 0x72, 0x87, 0xdb, 0xb9, 0x97, 0x79 }, -+ .result = { 0x67, 0xdd, 0x4a, 0x6e, 0x16, 0x55, 0x33, 0x53, -+ 0x4c, 0x0e, 0x3f, 0x17, 0x2e, 0x4a, 0xb8, 0x57, -+ 0x6b, 0xca, 0x92, 0x3a, 0x5f, 0x07, 0xb2, 0xc0, -+ 0x69, 0xb4, 0xc3, 0x10, 0xff, 0x2e, 0x93, 0x5b }, -+ .valid = true -+ }, -+ /* wycheproof - public key on twist */ -+ { -+ .private = { 0x70, 0xe3, 0x4b, 0xcb, 0xe1, 0xf4, 0x7f, 0xbc, -+ 0x0f, 0xdd, 0xfd, 0x7c, 0x1e, 0x1a, 0xa5, 0x3d, -+ 0x57, 0xbf, 0xe0, 0xf6, 0x6d, 0x24, 0x30, 0x67, -+ 0xb4, 0x24, 0xbb, 0x62, 0x10, 0xbe, 0xd1, 0x9c }, -+ .public = { 0x0b, 0x82, 0x11, 0xa2, 0xb6, 0x04, 0x90, 0x97, -+ 0xf6, 0x87, 0x1c, 0x6c, 0x05, 0x2d, 0x3c, 0x5f, -+ 0xc1, 0xba, 0x17, 0xda, 0x9e, 0x32, 0xae, 0x45, -+ 0x84, 0x03, 0xb0, 0x5b, 0xb2, 0x83, 0x09, 0x2a }, -+ .result = { 0x4a, 0x06, 0x38, 0xcf, 0xaa, 0x9e, 0xf1, 0x93, -+ 0x3b, 0x47, 0xf8, 0x93, 0x92, 0x96, 0xa6, 0xb2, -+ 0x5b, 0xe5, 0x41, 0xef, 0x7f, 0x70, 0xe8, 0x44, -+ 0xc0, 0xbc, 0xc0, 0x0b, 0x13, 0x4d, 0xe6, 0x4a }, -+ .valid = true -+ }, -+ /* wycheproof - public key on twist */ -+ { -+ .private = { 0x68, 0xc1, 0xf3, 0xa6, 0x53, 0xa4, 0xcd, 0xb1, -+ 0xd3, 0x7b, 0xba, 0x94, 0x73, 0x8f, 0x8b, 0x95, -+ 0x7a, 0x57, 0xbe, 0xb2, 0x4d, 0x64, 0x6e, 0x99, -+ 0x4d, 0xc2, 0x9a, 0x27, 0x6a, 0xad, 0x45, 0x8d }, -+ .public = { 0x34, 0x3a, 0xc2, 0x0a, 0x3b, 0x9c, 0x6a, 0x27, -+ 0xb1, 0x00, 0x81, 0x76, 0x50, 0x9a, 0xd3, 0x07, -+ 0x35, 0x85, 0x6e, 0xc1, 0xc8, 0xd8, 0xfc, 0xae, -+ 0x13, 0x91, 0x2d, 0x08, 0xd1, 0x52, 0xf4, 0x6c }, -+ .result = { 0x39, 0x94, 0x91, 0xfc, 0xe8, 0xdf, 0xab, 0x73, -+ 0xb4, 0xf9, 0xf6, 0x11, 0xde, 0x8e, 0xa0, 0xb2, -+ 0x7b, 0x28, 0xf8, 0x59, 0x94, 0x25, 0x0b, 0x0f, -+ 0x47, 0x5d, 0x58, 0x5d, 0x04, 0x2a, 0xc2, 0x07 }, -+ .valid = true -+ }, -+ /* wycheproof - public key on twist */ -+ { -+ .private = { 0xd8, 0x77, 0xb2, 0x6d, 0x06, 0xdf, 0xf9, 0xd9, -+ 0xf7, 0xfd, 0x4c, 0x5b, 0x37, 0x69, 0xf8, 0xcd, -+ 0xd5, 0xb3, 0x05, 0x16, 0xa5, 0xab, 0x80, 0x6b, -+ 0xe3, 0x24, 0xff, 0x3e, 0xb6, 0x9e, 0xa0, 0xb2 }, -+ .public = { 0xfa, 0x69, 0x5f, 0xc7, 0xbe, 0x8d, 0x1b, 0xe5, -+ 0xbf, 0x70, 0x48, 0x98, 0xf3, 0x88, 0xc4, 0x52, -+ 0xba, 0xfd, 0xd3, 0xb8, 0xea, 0xe8, 0x05, 0xf8, -+ 0x68, 0x1a, 0x8d, 0x15, 0xc2, 0xd4, 0xe1, 0x42 }, -+ .result = { 0x2c, 0x4f, 0xe1, 0x1d, 0x49, 0x0a, 0x53, 0x86, -+ 0x17, 0x76, 0xb1, 0x3b, 0x43, 0x54, 0xab, 0xd4, -+ 0xcf, 0x5a, 0x97, 0x69, 0x9d, 0xb6, 0xe6, 0xc6, -+ 0x8c, 0x16, 0x26, 0xd0, 0x76, 0x62, 0xf7, 0x58 }, -+ .valid = true -+ }, -+ /* wycheproof - public key = 0 */ -+ { -+ .private = { 0x20, 0x74, 0x94, 0x03, 0x8f, 0x2b, 0xb8, 0x11, -+ 0xd4, 0x78, 0x05, 0xbc, 0xdf, 0x04, 0xa2, 0xac, -+ 0x58, 0x5a, 0xda, 0x7f, 0x2f, 0x23, 0x38, 0x9b, -+ 0xfd, 0x46, 0x58, 0xf9, 0xdd, 0xd4, 0xde, 0xbc }, -+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key = 1 */ -+ { -+ .private = { 0x20, 0x2e, 0x89, 0x72, 0xb6, 0x1c, 0x7e, 0x61, -+ 0x93, 0x0e, 0xb9, 0x45, 0x0b, 0x50, 0x70, 0xea, -+ 0xe1, 0xc6, 0x70, 0x47, 0x56, 0x85, 0x54, 0x1f, -+ 0x04, 0x76, 0x21, 0x7e, 0x48, 0x18, 0xcf, 0xab }, -+ .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - edge case on twist */ -+ { -+ .private = { 0x38, 0xdd, 0xe9, 0xf3, 0xe7, 0xb7, 0x99, 0x04, -+ 0x5f, 0x9a, 0xc3, 0x79, 0x3d, 0x4a, 0x92, 0x77, -+ 0xda, 0xde, 0xad, 0xc4, 0x1b, 0xec, 0x02, 0x90, -+ 0xf8, 0x1f, 0x74, 0x4f, 0x73, 0x77, 0x5f, 0x84 }, -+ .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .result = { 0x9a, 0x2c, 0xfe, 0x84, 0xff, 0x9c, 0x4a, 0x97, -+ 0x39, 0x62, 0x5c, 0xae, 0x4a, 0x3b, 0x82, 0xa9, -+ 0x06, 0x87, 0x7a, 0x44, 0x19, 0x46, 0xf8, 0xd7, -+ 0xb3, 0xd7, 0x95, 0xfe, 0x8f, 0x5d, 0x16, 0x39 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case on twist */ -+ { -+ .private = { 0x98, 0x57, 0xa9, 0x14, 0xe3, 0xc2, 0x90, 0x36, -+ 0xfd, 0x9a, 0x44, 0x2b, 0xa5, 0x26, 0xb5, 0xcd, -+ 0xcd, 0xf2, 0x82, 0x16, 0x15, 0x3e, 0x63, 0x6c, -+ 0x10, 0x67, 0x7a, 0xca, 0xb6, 0xbd, 0x6a, 0xa5 }, -+ .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .result = { 0x4d, 0xa4, 0xe0, 0xaa, 0x07, 0x2c, 0x23, 0x2e, -+ 0xe2, 0xf0, 0xfa, 0x4e, 0x51, 0x9a, 0xe5, 0x0b, -+ 0x52, 0xc1, 0xed, 0xd0, 0x8a, 0x53, 0x4d, 0x4e, -+ 0xf3, 0x46, 0xc2, 0xe1, 0x06, 0xd2, 0x1d, 0x60 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case on twist */ -+ { -+ .private = { 0x48, 0xe2, 0x13, 0x0d, 0x72, 0x33, 0x05, 0xed, -+ 0x05, 0xe6, 0xe5, 0x89, 0x4d, 0x39, 0x8a, 0x5e, -+ 0x33, 0x36, 0x7a, 0x8c, 0x6a, 0xac, 0x8f, 0xcd, -+ 0xf0, 0xa8, 0x8e, 0x4b, 0x42, 0x82, 0x0d, 0xb7 }, -+ .public = { 0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0xf8, 0xff, -+ 0xff, 0x1f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0x07, 0x00, -+ 0x00, 0xf0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00 }, -+ .result = { 0x9e, 0xd1, 0x0c, 0x53, 0x74, 0x7f, 0x64, 0x7f, -+ 0x82, 0xf4, 0x51, 0x25, 0xd3, 0xde, 0x15, 0xa1, -+ 0xe6, 0xb8, 0x24, 0x49, 0x6a, 0xb4, 0x04, 0x10, -+ 0xff, 0xcc, 0x3c, 0xfe, 0x95, 0x76, 0x0f, 0x3b }, -+ .valid = true -+ }, -+ /* wycheproof - edge case on twist */ -+ { -+ .private = { 0x28, 0xf4, 0x10, 0x11, 0x69, 0x18, 0x51, 0xb3, -+ 0xa6, 0x2b, 0x64, 0x15, 0x53, 0xb3, 0x0d, 0x0d, -+ 0xfd, 0xdc, 0xb8, 0xff, 0xfc, 0xf5, 0x37, 0x00, -+ 0xa7, 0xbe, 0x2f, 0x6a, 0x87, 0x2e, 0x9f, 0xb0 }, -+ .public = { 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0x07, 0x00, -+ 0x00, 0xe0, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0xf8, 0xff, -+ 0xff, 0x0f, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x7f }, -+ .result = { 0xcf, 0x72, 0xb4, 0xaa, 0x6a, 0xa1, 0xc9, 0xf8, -+ 0x94, 0xf4, 0x16, 0x5b, 0x86, 0x10, 0x9a, 0xa4, -+ 0x68, 0x51, 0x76, 0x48, 0xe1, 0xf0, 0xcc, 0x70, -+ 0xe1, 0xab, 0x08, 0x46, 0x01, 0x76, 0x50, 0x6b }, -+ .valid = true -+ }, -+ /* wycheproof - edge case on twist */ -+ { -+ .private = { 0x18, 0xa9, 0x3b, 0x64, 0x99, 0xb9, 0xf6, 0xb3, -+ 0x22, 0x5c, 0xa0, 0x2f, 0xef, 0x41, 0x0e, 0x0a, -+ 0xde, 0xc2, 0x35, 0x32, 0x32, 0x1d, 0x2d, 0x8e, -+ 0xf1, 0xa6, 0xd6, 0x02, 0xa8, 0xc6, 0x5b, 0x83 }, -+ .public = { 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0x7f }, -+ .result = { 0x5d, 0x50, 0xb6, 0x28, 0x36, 0xbb, 0x69, 0x57, -+ 0x94, 0x10, 0x38, 0x6c, 0xf7, 0xbb, 0x81, 0x1c, -+ 0x14, 0xbf, 0x85, 0xb1, 0xc7, 0xb1, 0x7e, 0x59, -+ 0x24, 0xc7, 0xff, 0xea, 0x91, 0xef, 0x9e, 0x12 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case on twist */ -+ { -+ .private = { 0xc0, 0x1d, 0x13, 0x05, 0xa1, 0x33, 0x8a, 0x1f, -+ 0xca, 0xc2, 0xba, 0x7e, 0x2e, 0x03, 0x2b, 0x42, -+ 0x7e, 0x0b, 0x04, 0x90, 0x31, 0x65, 0xac, 0xa9, -+ 0x57, 0xd8, 0xd0, 0x55, 0x3d, 0x87, 0x17, 0xb0 }, -+ .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .result = { 0x19, 0x23, 0x0e, 0xb1, 0x48, 0xd5, 0xd6, 0x7c, -+ 0x3c, 0x22, 0xab, 0x1d, 0xae, 0xff, 0x80, 0xa5, -+ 0x7e, 0xae, 0x42, 0x65, 0xce, 0x28, 0x72, 0x65, -+ 0x7b, 0x2c, 0x80, 0x99, 0xfc, 0x69, 0x8e, 0x50 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for public key */ -+ { -+ .private = { 0x38, 0x6f, 0x7f, 0x16, 0xc5, 0x07, 0x31, 0xd6, -+ 0x4f, 0x82, 0xe6, 0xa1, 0x70, 0xb1, 0x42, 0xa4, -+ 0xe3, 0x4f, 0x31, 0xfd, 0x77, 0x68, 0xfc, 0xb8, -+ 0x90, 0x29, 0x25, 0xe7, 0xd1, 0xe2, 0x1a, 0xbe }, -+ .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .result = { 0x0f, 0xca, 0xb5, 0xd8, 0x42, 0xa0, 0x78, 0xd7, -+ 0xa7, 0x1f, 0xc5, 0x9b, 0x57, 0xbf, 0xb4, 0xca, -+ 0x0b, 0xe6, 0x87, 0x3b, 0x49, 0xdc, 0xdb, 0x9f, -+ 0x44, 0xe1, 0x4a, 0xe8, 0xfb, 0xdf, 0xa5, 0x42 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for public key */ -+ { -+ .private = { 0xe0, 0x23, 0xa2, 0x89, 0xbd, 0x5e, 0x90, 0xfa, -+ 0x28, 0x04, 0xdd, 0xc0, 0x19, 0xa0, 0x5e, 0xf3, -+ 0xe7, 0x9d, 0x43, 0x4b, 0xb6, 0xea, 0x2f, 0x52, -+ 0x2e, 0xcb, 0x64, 0x3a, 0x75, 0x29, 0x6e, 0x95 }, -+ .public = { 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 }, -+ .result = { 0x54, 0xce, 0x8f, 0x22, 0x75, 0xc0, 0x77, 0xe3, -+ 0xb1, 0x30, 0x6a, 0x39, 0x39, 0xc5, 0xe0, 0x3e, -+ 0xef, 0x6b, 0xbb, 0x88, 0x06, 0x05, 0x44, 0x75, -+ 0x8d, 0x9f, 0xef, 0x59, 0xb0, 0xbc, 0x3e, 0x4f }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for public key */ -+ { -+ .private = { 0x68, 0xf0, 0x10, 0xd6, 0x2e, 0xe8, 0xd9, 0x26, -+ 0x05, 0x3a, 0x36, 0x1c, 0x3a, 0x75, 0xc6, 0xea, -+ 0x4e, 0xbd, 0xc8, 0x60, 0x6a, 0xb2, 0x85, 0x00, -+ 0x3a, 0x6f, 0x8f, 0x40, 0x76, 0xb0, 0x1e, 0x83 }, -+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 }, -+ .result = { 0xf1, 0x36, 0x77, 0x5c, 0x5b, 0xeb, 0x0a, 0xf8, -+ 0x11, 0x0a, 0xf1, 0x0b, 0x20, 0x37, 0x23, 0x32, -+ 0x04, 0x3c, 0xab, 0x75, 0x24, 0x19, 0x67, 0x87, -+ 0x75, 0xa2, 0x23, 0xdf, 0x57, 0xc9, 0xd3, 0x0d }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for public key */ -+ { -+ .private = { 0x58, 0xeb, 0xcb, 0x35, 0xb0, 0xf8, 0x84, 0x5c, -+ 0xaf, 0x1e, 0xc6, 0x30, 0xf9, 0x65, 0x76, 0xb6, -+ 0x2c, 0x4b, 0x7b, 0x6c, 0x36, 0xb2, 0x9d, 0xeb, -+ 0x2c, 0xb0, 0x08, 0x46, 0x51, 0x75, 0x5c, 0x96 }, -+ .public = { 0xff, 0xff, 0xff, 0xfb, 0xff, 0xff, 0xfb, 0xff, -+ 0xff, 0xdf, 0xff, 0xff, 0xdf, 0xff, 0xff, 0xff, -+ 0xfe, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xf7, 0xff, -+ 0xff, 0xf7, 0xff, 0xff, 0xbf, 0xff, 0xff, 0x3f }, -+ .result = { 0xbf, 0x9a, 0xff, 0xd0, 0x6b, 0x84, 0x40, 0x85, -+ 0x58, 0x64, 0x60, 0x96, 0x2e, 0xf2, 0x14, 0x6f, -+ 0xf3, 0xd4, 0x53, 0x3d, 0x94, 0x44, 0xaa, 0xb0, -+ 0x06, 0xeb, 0x88, 0xcc, 0x30, 0x54, 0x40, 0x7d }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for public key */ -+ { -+ .private = { 0x18, 0x8c, 0x4b, 0xc5, 0xb9, 0xc4, 0x4b, 0x38, -+ 0xbb, 0x65, 0x8b, 0x9b, 0x2a, 0xe8, 0x2d, 0x5b, -+ 0x01, 0x01, 0x5e, 0x09, 0x31, 0x84, 0xb1, 0x7c, -+ 0xb7, 0x86, 0x35, 0x03, 0xa7, 0x83, 0xe1, 0xbb }, -+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, -+ .result = { 0xd4, 0x80, 0xde, 0x04, 0xf6, 0x99, 0xcb, 0x3b, -+ 0xe0, 0x68, 0x4a, 0x9c, 0xc2, 0xe3, 0x12, 0x81, -+ 0xea, 0x0b, 0xc5, 0xa9, 0xdc, 0xc1, 0x57, 0xd3, -+ 0xd2, 0x01, 0x58, 0xd4, 0x6c, 0xa5, 0x24, 0x6d }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for public key */ -+ { -+ .private = { 0xe0, 0x6c, 0x11, 0xbb, 0x2e, 0x13, 0xce, 0x3d, -+ 0xc7, 0x67, 0x3f, 0x67, 0xf5, 0x48, 0x22, 0x42, -+ 0x90, 0x94, 0x23, 0xa9, 0xae, 0x95, 0xee, 0x98, -+ 0x6a, 0x98, 0x8d, 0x98, 0xfa, 0xee, 0x23, 0xa2 }, -+ .public = { 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0x7f }, -+ .result = { 0x4c, 0x44, 0x01, 0xcc, 0xe6, 0xb5, 0x1e, 0x4c, -+ 0xb1, 0x8f, 0x27, 0x90, 0x24, 0x6c, 0x9b, 0xf9, -+ 0x14, 0xdb, 0x66, 0x77, 0x50, 0xa1, 0xcb, 0x89, -+ 0x06, 0x90, 0x92, 0xaf, 0x07, 0x29, 0x22, 0x76 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for public key */ -+ { -+ .private = { 0xc0, 0x65, 0x8c, 0x46, 0xdd, 0xe1, 0x81, 0x29, -+ 0x29, 0x38, 0x77, 0x53, 0x5b, 0x11, 0x62, 0xb6, -+ 0xf9, 0xf5, 0x41, 0x4a, 0x23, 0xcf, 0x4d, 0x2c, -+ 0xbc, 0x14, 0x0a, 0x4d, 0x99, 0xda, 0x2b, 0x8f }, -+ .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .result = { 0x57, 0x8b, 0xa8, 0xcc, 0x2d, 0xbd, 0xc5, 0x75, -+ 0xaf, 0xcf, 0x9d, 0xf2, 0xb3, 0xee, 0x61, 0x89, -+ 0xf5, 0x33, 0x7d, 0x68, 0x54, 0xc7, 0x9b, 0x4c, -+ 0xe1, 0x65, 0xea, 0x12, 0x29, 0x3b, 0x3a, 0x0f }, -+ .valid = true -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0x10, 0x25, 0x5c, 0x92, 0x30, 0xa9, 0x7a, 0x30, -+ 0xa4, 0x58, 0xca, 0x28, 0x4a, 0x62, 0x96, 0x69, -+ 0x29, 0x3a, 0x31, 0x89, 0x0c, 0xda, 0x9d, 0x14, -+ 0x7f, 0xeb, 0xc7, 0xd1, 0xe2, 0x2d, 0x6b, 0xb1 }, -+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, -+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, -+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, -+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x00 }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0x78, 0xf1, 0xe8, 0xed, 0xf1, 0x44, 0x81, 0xb3, -+ 0x89, 0x44, 0x8d, 0xac, 0x8f, 0x59, 0xc7, 0x0b, -+ 0x03, 0x8e, 0x7c, 0xf9, 0x2e, 0xf2, 0xc7, 0xef, -+ 0xf5, 0x7a, 0x72, 0x46, 0x6e, 0x11, 0x52, 0x96 }, -+ .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, -+ 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, -+ 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, -+ 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0x57 }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0xa0, 0xa0, 0x5a, 0x3e, 0x8f, 0x9f, 0x44, 0x20, -+ 0x4d, 0x5f, 0x80, 0x59, 0xa9, 0x4a, 0xc7, 0xdf, -+ 0xc3, 0x9a, 0x49, 0xac, 0x01, 0x6d, 0xd7, 0x43, -+ 0xdb, 0xfa, 0x43, 0xc5, 0xd6, 0x71, 0xfd, 0x88 }, -+ .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0xd0, 0xdb, 0xb3, 0xed, 0x19, 0x06, 0x66, 0x3f, -+ 0x15, 0x42, 0x0a, 0xf3, 0x1f, 0x4e, 0xaf, 0x65, -+ 0x09, 0xd9, 0xa9, 0x94, 0x97, 0x23, 0x50, 0x06, -+ 0x05, 0xad, 0x7c, 0x1c, 0x6e, 0x74, 0x50, 0xa9 }, -+ .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0xc0, 0xb1, 0xd0, 0xeb, 0x22, 0xb2, 0x44, 0xfe, -+ 0x32, 0x91, 0x14, 0x00, 0x72, 0xcd, 0xd9, 0xd9, -+ 0x89, 0xb5, 0xf0, 0xec, 0xd9, 0x6c, 0x10, 0x0f, -+ 0xeb, 0x5b, 0xca, 0x24, 0x1c, 0x1d, 0x9f, 0x8f }, -+ .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0x48, 0x0b, 0xf4, 0x5f, 0x59, 0x49, 0x42, 0xa8, -+ 0xbc, 0x0f, 0x33, 0x53, 0xc6, 0xe8, 0xb8, 0x85, -+ 0x3d, 0x77, 0xf3, 0x51, 0xf1, 0xc2, 0xca, 0x6c, -+ 0x2d, 0x1a, 0xbf, 0x8a, 0x00, 0xb4, 0x22, 0x9c }, -+ .public = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0x30, 0xf9, 0x93, 0xfc, 0xf8, 0x51, 0x4f, 0xc8, -+ 0x9b, 0xd8, 0xdb, 0x14, 0xcd, 0x43, 0xba, 0x0d, -+ 0x4b, 0x25, 0x30, 0xe7, 0x3c, 0x42, 0x76, 0xa0, -+ 0x5e, 0x1b, 0x14, 0x5d, 0x42, 0x0c, 0xed, 0xb4 }, -+ .public = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0xc0, 0x49, 0x74, 0xb7, 0x58, 0x38, 0x0e, 0x2a, -+ 0x5b, 0x5d, 0xf6, 0xeb, 0x09, 0xbb, 0x2f, 0x6b, -+ 0x34, 0x34, 0xf9, 0x82, 0x72, 0x2a, 0x8e, 0x67, -+ 0x6d, 0x3d, 0xa2, 0x51, 0xd1, 0xb3, 0xde, 0x83 }, -+ .public = { 0xe0, 0xeb, 0x7a, 0x7c, 0x3b, 0x41, 0xb8, 0xae, -+ 0x16, 0x56, 0xe3, 0xfa, 0xf1, 0x9f, 0xc4, 0x6a, -+ 0xda, 0x09, 0x8d, 0xeb, 0x9c, 0x32, 0xb1, 0xfd, -+ 0x86, 0x62, 0x05, 0x16, 0x5f, 0x49, 0xb8, 0x80 }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0x50, 0x2a, 0x31, 0x37, 0x3d, 0xb3, 0x24, 0x46, -+ 0x84, 0x2f, 0xe5, 0xad, 0xd3, 0xe0, 0x24, 0x02, -+ 0x2e, 0xa5, 0x4f, 0x27, 0x41, 0x82, 0xaf, 0xc3, -+ 0xd9, 0xf1, 0xbb, 0x3d, 0x39, 0x53, 0x4e, 0xb5 }, -+ .public = { 0x5f, 0x9c, 0x95, 0xbc, 0xa3, 0x50, 0x8c, 0x24, -+ 0xb1, 0xd0, 0xb1, 0x55, 0x9c, 0x83, 0xef, 0x5b, -+ 0x04, 0x44, 0x5c, 0xc4, 0x58, 0x1c, 0x8e, 0x86, -+ 0xd8, 0x22, 0x4e, 0xdd, 0xd0, 0x9f, 0x11, 0xd7 }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0x90, 0xfa, 0x64, 0x17, 0xb0, 0xe3, 0x70, 0x30, -+ 0xfd, 0x6e, 0x43, 0xef, 0xf2, 0xab, 0xae, 0xf1, -+ 0x4c, 0x67, 0x93, 0x11, 0x7a, 0x03, 0x9c, 0xf6, -+ 0x21, 0x31, 0x8b, 0xa9, 0x0f, 0x4e, 0x98, 0xbe }, -+ .public = { 0xec, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0x78, 0xad, 0x3f, 0x26, 0x02, 0x7f, 0x1c, 0x9f, -+ 0xdd, 0x97, 0x5a, 0x16, 0x13, 0xb9, 0x47, 0x77, -+ 0x9b, 0xad, 0x2c, 0xf2, 0xb7, 0x41, 0xad, 0xe0, -+ 0x18, 0x40, 0x88, 0x5a, 0x30, 0xbb, 0x97, 0x9c }, -+ .public = { 0xed, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key with low order */ -+ { -+ .private = { 0x98, 0xe2, 0x3d, 0xe7, 0xb1, 0xe0, 0x92, 0x6e, -+ 0xd9, 0xc8, 0x7e, 0x7b, 0x14, 0xba, 0xf5, 0x5f, -+ 0x49, 0x7a, 0x1d, 0x70, 0x96, 0xf9, 0x39, 0x77, -+ 0x68, 0x0e, 0x44, 0xdc, 0x1c, 0x7b, 0x7b, 0x8b }, -+ .public = { 0xee, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = false -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0xf0, 0x1e, 0x48, 0xda, 0xfa, 0xc9, 0xd7, 0xbc, -+ 0xf5, 0x89, 0xcb, 0xc3, 0x82, 0xc8, 0x78, 0xd1, -+ 0x8b, 0xda, 0x35, 0x50, 0x58, 0x9f, 0xfb, 0x5d, -+ 0x50, 0xb5, 0x23, 0xbe, 0xbe, 0x32, 0x9d, 0xae }, -+ .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .result = { 0xbd, 0x36, 0xa0, 0x79, 0x0e, 0xb8, 0x83, 0x09, -+ 0x8c, 0x98, 0x8b, 0x21, 0x78, 0x67, 0x73, 0xde, -+ 0x0b, 0x3a, 0x4d, 0xf1, 0x62, 0x28, 0x2c, 0xf1, -+ 0x10, 0xde, 0x18, 0xdd, 0x48, 0x4c, 0xe7, 0x4b }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0x28, 0x87, 0x96, 0xbc, 0x5a, 0xff, 0x4b, 0x81, -+ 0xa3, 0x75, 0x01, 0x75, 0x7b, 0xc0, 0x75, 0x3a, -+ 0x3c, 0x21, 0x96, 0x47, 0x90, 0xd3, 0x86, 0x99, -+ 0x30, 0x8d, 0xeb, 0xc1, 0x7a, 0x6e, 0xaf, 0x8d }, -+ .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .result = { 0xb4, 0xe0, 0xdd, 0x76, 0xda, 0x7b, 0x07, 0x17, -+ 0x28, 0xb6, 0x1f, 0x85, 0x67, 0x71, 0xaa, 0x35, -+ 0x6e, 0x57, 0xed, 0xa7, 0x8a, 0x5b, 0x16, 0x55, -+ 0xcc, 0x38, 0x20, 0xfb, 0x5f, 0x85, 0x4c, 0x5c }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0x98, 0xdf, 0x84, 0x5f, 0x66, 0x51, 0xbf, 0x11, -+ 0x38, 0x22, 0x1f, 0x11, 0x90, 0x41, 0xf7, 0x2b, -+ 0x6d, 0xbc, 0x3c, 0x4a, 0xce, 0x71, 0x43, 0xd9, -+ 0x9f, 0xd5, 0x5a, 0xd8, 0x67, 0x48, 0x0d, 0xa8 }, -+ .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .result = { 0x6f, 0xdf, 0x6c, 0x37, 0x61, 0x1d, 0xbd, 0x53, -+ 0x04, 0xdc, 0x0f, 0x2e, 0xb7, 0xc9, 0x51, 0x7e, -+ 0xb3, 0xc5, 0x0e, 0x12, 0xfd, 0x05, 0x0a, 0xc6, -+ 0xde, 0xc2, 0x70, 0x71, 0xd4, 0xbf, 0xc0, 0x34 }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0xf0, 0x94, 0x98, 0xe4, 0x6f, 0x02, 0xf8, 0x78, -+ 0x82, 0x9e, 0x78, 0xb8, 0x03, 0xd3, 0x16, 0xa2, -+ 0xed, 0x69, 0x5d, 0x04, 0x98, 0xa0, 0x8a, 0xbd, -+ 0xf8, 0x27, 0x69, 0x30, 0xe2, 0x4e, 0xdc, 0xb0 }, -+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .result = { 0x4c, 0x8f, 0xc4, 0xb1, 0xc6, 0xab, 0x88, 0xfb, -+ 0x21, 0xf1, 0x8f, 0x6d, 0x4c, 0x81, 0x02, 0x40, -+ 0xd4, 0xe9, 0x46, 0x51, 0xba, 0x44, 0xf7, 0xa2, -+ 0xc8, 0x63, 0xce, 0xc7, 0xdc, 0x56, 0x60, 0x2d }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0x18, 0x13, 0xc1, 0x0a, 0x5c, 0x7f, 0x21, 0xf9, -+ 0x6e, 0x17, 0xf2, 0x88, 0xc0, 0xcc, 0x37, 0x60, -+ 0x7c, 0x04, 0xc5, 0xf5, 0xae, 0xa2, 0xdb, 0x13, -+ 0x4f, 0x9e, 0x2f, 0xfc, 0x66, 0xbd, 0x9d, 0xb8 }, -+ .public = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, -+ .result = { 0x1c, 0xd0, 0xb2, 0x82, 0x67, 0xdc, 0x54, 0x1c, -+ 0x64, 0x2d, 0x6d, 0x7d, 0xca, 0x44, 0xa8, 0xb3, -+ 0x8a, 0x63, 0x73, 0x6e, 0xef, 0x5c, 0x4e, 0x65, -+ 0x01, 0xff, 0xbb, 0xb1, 0x78, 0x0c, 0x03, 0x3c }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0x78, 0x57, 0xfb, 0x80, 0x86, 0x53, 0x64, 0x5a, -+ 0x0b, 0xeb, 0x13, 0x8a, 0x64, 0xf5, 0xf4, 0xd7, -+ 0x33, 0xa4, 0x5e, 0xa8, 0x4c, 0x3c, 0xda, 0x11, -+ 0xa9, 0xc0, 0x6f, 0x7e, 0x71, 0x39, 0x14, 0x9e }, -+ .public = { 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, -+ .result = { 0x87, 0x55, 0xbe, 0x01, 0xc6, 0x0a, 0x7e, 0x82, -+ 0x5c, 0xff, 0x3e, 0x0e, 0x78, 0xcb, 0x3a, 0xa4, -+ 0x33, 0x38, 0x61, 0x51, 0x6a, 0xa5, 0x9b, 0x1c, -+ 0x51, 0xa8, 0xb2, 0xa5, 0x43, 0xdf, 0xa8, 0x22 }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0xe0, 0x3a, 0xa8, 0x42, 0xe2, 0xab, 0xc5, 0x6e, -+ 0x81, 0xe8, 0x7b, 0x8b, 0x9f, 0x41, 0x7b, 0x2a, -+ 0x1e, 0x59, 0x13, 0xc7, 0x23, 0xee, 0xd2, 0x8d, -+ 0x75, 0x2f, 0x8d, 0x47, 0xa5, 0x9f, 0x49, 0x8f }, -+ .public = { 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80 }, -+ .result = { 0x54, 0xc9, 0xa1, 0xed, 0x95, 0xe5, 0x46, 0xd2, -+ 0x78, 0x22, 0xa3, 0x60, 0x93, 0x1d, 0xda, 0x60, -+ 0xa1, 0xdf, 0x04, 0x9d, 0xa6, 0xf9, 0x04, 0x25, -+ 0x3c, 0x06, 0x12, 0xbb, 0xdc, 0x08, 0x74, 0x76 }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0xf8, 0xf7, 0x07, 0xb7, 0x99, 0x9b, 0x18, 0xcb, -+ 0x0d, 0x6b, 0x96, 0x12, 0x4f, 0x20, 0x45, 0x97, -+ 0x2c, 0xa2, 0x74, 0xbf, 0xc1, 0x54, 0xad, 0x0c, -+ 0x87, 0x03, 0x8c, 0x24, 0xc6, 0xd0, 0xd4, 0xb2 }, -+ .public = { 0xda, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0xcc, 0x1f, 0x40, 0xd7, 0x43, 0xcd, 0xc2, 0x23, -+ 0x0e, 0x10, 0x43, 0xda, 0xba, 0x8b, 0x75, 0xe8, -+ 0x10, 0xf1, 0xfb, 0xab, 0x7f, 0x25, 0x52, 0x69, -+ 0xbd, 0x9e, 0xbb, 0x29, 0xe6, 0xbf, 0x49, 0x4f }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0xa0, 0x34, 0xf6, 0x84, 0xfa, 0x63, 0x1e, 0x1a, -+ 0x34, 0x81, 0x18, 0xc1, 0xce, 0x4c, 0x98, 0x23, -+ 0x1f, 0x2d, 0x9e, 0xec, 0x9b, 0xa5, 0x36, 0x5b, -+ 0x4a, 0x05, 0xd6, 0x9a, 0x78, 0x5b, 0x07, 0x96 }, -+ .public = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0x54, 0x99, 0x8e, 0xe4, 0x3a, 0x5b, 0x00, 0x7b, -+ 0xf4, 0x99, 0xf0, 0x78, 0xe7, 0x36, 0x52, 0x44, -+ 0x00, 0xa8, 0xb5, 0xc7, 0xe9, 0xb9, 0xb4, 0x37, -+ 0x71, 0x74, 0x8c, 0x7c, 0xdf, 0x88, 0x04, 0x12 }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0x30, 0xb6, 0xc6, 0xa0, 0xf2, 0xff, 0xa6, 0x80, -+ 0x76, 0x8f, 0x99, 0x2b, 0xa8, 0x9e, 0x15, 0x2d, -+ 0x5b, 0xc9, 0x89, 0x3d, 0x38, 0xc9, 0x11, 0x9b, -+ 0xe4, 0xf7, 0x67, 0xbf, 0xab, 0x6e, 0x0c, 0xa5 }, -+ .public = { 0xdc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0xea, 0xd9, 0xb3, 0x8e, 0xfd, 0xd7, 0x23, 0x63, -+ 0x79, 0x34, 0xe5, 0x5a, 0xb7, 0x17, 0xa7, 0xae, -+ 0x09, 0xeb, 0x86, 0xa2, 0x1d, 0xc3, 0x6a, 0x3f, -+ 0xee, 0xb8, 0x8b, 0x75, 0x9e, 0x39, 0x1e, 0x09 }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0x90, 0x1b, 0x9d, 0xcf, 0x88, 0x1e, 0x01, 0xe0, -+ 0x27, 0x57, 0x50, 0x35, 0xd4, 0x0b, 0x43, 0xbd, -+ 0xc1, 0xc5, 0x24, 0x2e, 0x03, 0x08, 0x47, 0x49, -+ 0x5b, 0x0c, 0x72, 0x86, 0x46, 0x9b, 0x65, 0x91 }, -+ .public = { 0xea, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0x60, 0x2f, 0xf4, 0x07, 0x89, 0xb5, 0x4b, 0x41, -+ 0x80, 0x59, 0x15, 0xfe, 0x2a, 0x62, 0x21, 0xf0, -+ 0x7a, 0x50, 0xff, 0xc2, 0xc3, 0xfc, 0x94, 0xcf, -+ 0x61, 0xf1, 0x3d, 0x79, 0x04, 0xe8, 0x8e, 0x0e }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0x80, 0x46, 0x67, 0x7c, 0x28, 0xfd, 0x82, 0xc9, -+ 0xa1, 0xbd, 0xb7, 0x1a, 0x1a, 0x1a, 0x34, 0xfa, -+ 0xba, 0x12, 0x25, 0xe2, 0x50, 0x7f, 0xe3, 0xf5, -+ 0x4d, 0x10, 0xbd, 0x5b, 0x0d, 0x86, 0x5f, 0x8e }, -+ .public = { 0xeb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0xe0, 0x0a, 0xe8, 0xb1, 0x43, 0x47, 0x12, 0x47, -+ 0xba, 0x24, 0xf1, 0x2c, 0x88, 0x55, 0x36, 0xc3, -+ 0xcb, 0x98, 0x1b, 0x58, 0xe1, 0xe5, 0x6b, 0x2b, -+ 0xaf, 0x35, 0xc1, 0x2a, 0xe1, 0xf7, 0x9c, 0x26 }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0x60, 0x2f, 0x7e, 0x2f, 0x68, 0xa8, 0x46, 0xb8, -+ 0x2c, 0xc2, 0x69, 0xb1, 0xd4, 0x8e, 0x93, 0x98, -+ 0x86, 0xae, 0x54, 0xfd, 0x63, 0x6c, 0x1f, 0xe0, -+ 0x74, 0xd7, 0x10, 0x12, 0x7d, 0x47, 0x24, 0x91 }, -+ .public = { 0xef, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0x98, 0xcb, 0x9b, 0x50, 0xdd, 0x3f, 0xc2, 0xb0, -+ 0xd4, 0xf2, 0xd2, 0xbf, 0x7c, 0x5c, 0xfd, 0xd1, -+ 0x0c, 0x8f, 0xcd, 0x31, 0xfc, 0x40, 0xaf, 0x1a, -+ 0xd4, 0x4f, 0x47, 0xc1, 0x31, 0x37, 0x63, 0x62 }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0x60, 0x88, 0x7b, 0x3d, 0xc7, 0x24, 0x43, 0x02, -+ 0x6e, 0xbe, 0xdb, 0xbb, 0xb7, 0x06, 0x65, 0xf4, -+ 0x2b, 0x87, 0xad, 0xd1, 0x44, 0x0e, 0x77, 0x68, -+ 0xfb, 0xd7, 0xe8, 0xe2, 0xce, 0x5f, 0x63, 0x9d }, -+ .public = { 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0x38, 0xd6, 0x30, 0x4c, 0x4a, 0x7e, 0x6d, 0x9f, -+ 0x79, 0x59, 0x33, 0x4f, 0xb5, 0x24, 0x5b, 0xd2, -+ 0xc7, 0x54, 0x52, 0x5d, 0x4c, 0x91, 0xdb, 0x95, -+ 0x02, 0x06, 0x92, 0x62, 0x34, 0xc1, 0xf6, 0x33 }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0x78, 0xd3, 0x1d, 0xfa, 0x85, 0x44, 0x97, 0xd7, -+ 0x2d, 0x8d, 0xef, 0x8a, 0x1b, 0x7f, 0xb0, 0x06, -+ 0xce, 0xc2, 0xd8, 0xc4, 0x92, 0x46, 0x47, 0xc9, -+ 0x38, 0x14, 0xae, 0x56, 0xfa, 0xed, 0xa4, 0x95 }, -+ .public = { 0xf1, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0x78, 0x6c, 0xd5, 0x49, 0x96, 0xf0, 0x14, 0xa5, -+ 0xa0, 0x31, 0xec, 0x14, 0xdb, 0x81, 0x2e, 0xd0, -+ 0x83, 0x55, 0x06, 0x1f, 0xdb, 0x5d, 0xe6, 0x80, -+ 0xa8, 0x00, 0xac, 0x52, 0x1f, 0x31, 0x8e, 0x23 }, -+ .valid = true -+ }, -+ /* wycheproof - public key >= p */ -+ { -+ .private = { 0xc0, 0x4c, 0x5b, 0xae, 0xfa, 0x83, 0x02, 0xdd, -+ 0xde, 0xd6, 0xa4, 0xbb, 0x95, 0x77, 0x61, 0xb4, -+ 0xeb, 0x97, 0xae, 0xfa, 0x4f, 0xc3, 0xb8, 0x04, -+ 0x30, 0x85, 0xf9, 0x6a, 0x56, 0x59, 0xb3, 0xa5 }, -+ .public = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, -+ .result = { 0x29, 0xae, 0x8b, 0xc7, 0x3e, 0x9b, 0x10, 0xa0, -+ 0x8b, 0x4f, 0x68, 0x1c, 0x43, 0xc3, 0xe0, 0xac, -+ 0x1a, 0x17, 0x1d, 0x31, 0xb3, 0x8f, 0x1a, 0x48, -+ 0xef, 0xba, 0x29, 0xae, 0x63, 0x9e, 0xa1, 0x34 }, -+ .valid = true -+ }, -+ /* wycheproof - RFC 7748 */ -+ { -+ .private = { 0xa0, 0x46, 0xe3, 0x6b, 0xf0, 0x52, 0x7c, 0x9d, -+ 0x3b, 0x16, 0x15, 0x4b, 0x82, 0x46, 0x5e, 0xdd, -+ 0x62, 0x14, 0x4c, 0x0a, 0xc1, 0xfc, 0x5a, 0x18, -+ 0x50, 0x6a, 0x22, 0x44, 0xba, 0x44, 0x9a, 0x44 }, -+ .public = { 0xe6, 0xdb, 0x68, 0x67, 0x58, 0x30, 0x30, 0xdb, -+ 0x35, 0x94, 0xc1, 0xa4, 0x24, 0xb1, 0x5f, 0x7c, -+ 0x72, 0x66, 0x24, 0xec, 0x26, 0xb3, 0x35, 0x3b, -+ 0x10, 0xa9, 0x03, 0xa6, 0xd0, 0xab, 0x1c, 0x4c }, -+ .result = { 0xc3, 0xda, 0x55, 0x37, 0x9d, 0xe9, 0xc6, 0x90, -+ 0x8e, 0x94, 0xea, 0x4d, 0xf2, 0x8d, 0x08, 0x4f, -+ 0x32, 0xec, 0xcf, 0x03, 0x49, 0x1c, 0x71, 0xf7, -+ 0x54, 0xb4, 0x07, 0x55, 0x77, 0xa2, 0x85, 0x52 }, -+ .valid = true -+ }, -+ /* wycheproof - RFC 7748 */ -+ { -+ .private = { 0x48, 0x66, 0xe9, 0xd4, 0xd1, 0xb4, 0x67, 0x3c, -+ 0x5a, 0xd2, 0x26, 0x91, 0x95, 0x7d, 0x6a, 0xf5, -+ 0xc1, 0x1b, 0x64, 0x21, 0xe0, 0xea, 0x01, 0xd4, -+ 0x2c, 0xa4, 0x16, 0x9e, 0x79, 0x18, 0xba, 0x4d }, -+ .public = { 0xe5, 0x21, 0x0f, 0x12, 0x78, 0x68, 0x11, 0xd3, -+ 0xf4, 0xb7, 0x95, 0x9d, 0x05, 0x38, 0xae, 0x2c, -+ 0x31, 0xdb, 0xe7, 0x10, 0x6f, 0xc0, 0x3c, 0x3e, -+ 0xfc, 0x4c, 0xd5, 0x49, 0xc7, 0x15, 0xa4, 0x13 }, -+ .result = { 0x95, 0xcb, 0xde, 0x94, 0x76, 0xe8, 0x90, 0x7d, -+ 0x7a, 0xad, 0xe4, 0x5c, 0xb4, 0xb8, 0x73, 0xf8, -+ 0x8b, 0x59, 0x5a, 0x68, 0x79, 0x9f, 0xa1, 0x52, -+ 0xe6, 0xf8, 0xf7, 0x64, 0x7a, 0xac, 0x79, 0x57 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0x0a, 0xb4, 0xe7, 0x63, 0x80, 0xd8, 0x4d, 0xde, -+ 0x4f, 0x68, 0x33, 0xc5, 0x8f, 0x2a, 0x9f, 0xb8, -+ 0xf8, 0x3b, 0xb0, 0x16, 0x9b, 0x17, 0x2b, 0xe4, -+ 0xb6, 0xe0, 0x59, 0x28, 0x87, 0x74, 0x1a, 0x36 }, -+ .result = { 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0x89, 0xe1, 0x0d, 0x57, 0x01, 0xb4, 0x33, 0x7d, -+ 0x2d, 0x03, 0x21, 0x81, 0x53, 0x8b, 0x10, 0x64, -+ 0xbd, 0x40, 0x84, 0x40, 0x1c, 0xec, 0xa1, 0xfd, -+ 0x12, 0x66, 0x3a, 0x19, 0x59, 0x38, 0x80, 0x00 }, -+ .result = { 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0x2b, 0x55, 0xd3, 0xaa, 0x4a, 0x8f, 0x80, 0xc8, -+ 0xc0, 0xb2, 0xae, 0x5f, 0x93, 0x3e, 0x85, 0xaf, -+ 0x49, 0xbe, 0xac, 0x36, 0xc2, 0xfa, 0x73, 0x94, -+ 0xba, 0xb7, 0x6c, 0x89, 0x33, 0xf8, 0xf8, 0x1d }, -+ .result = { 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0x63, 0xe5, 0xb1, 0xfe, 0x96, 0x01, 0xfe, 0x84, -+ 0x38, 0x5d, 0x88, 0x66, 0xb0, 0x42, 0x12, 0x62, -+ 0xf7, 0x8f, 0xbf, 0xa5, 0xaf, 0xf9, 0x58, 0x5e, -+ 0x62, 0x66, 0x79, 0xb1, 0x85, 0x47, 0xd9, 0x59 }, -+ .result = { 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0xe4, 0x28, 0xf3, 0xda, 0xc1, 0x78, 0x09, 0xf8, -+ 0x27, 0xa5, 0x22, 0xce, 0x32, 0x35, 0x50, 0x58, -+ 0xd0, 0x73, 0x69, 0x36, 0x4a, 0xa7, 0x89, 0x02, -+ 0xee, 0x10, 0x13, 0x9b, 0x9f, 0x9d, 0xd6, 0x53 }, -+ .result = { 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0xb3, 0xb5, 0x0e, 0x3e, 0xd3, 0xa4, 0x07, 0xb9, -+ 0x5d, 0xe9, 0x42, 0xef, 0x74, 0x57, 0x5b, 0x5a, -+ 0xb8, 0xa1, 0x0c, 0x09, 0xee, 0x10, 0x35, 0x44, -+ 0xd6, 0x0b, 0xdf, 0xed, 0x81, 0x38, 0xab, 0x2b }, -+ .result = { 0xf9, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0x21, 0x3f, 0xff, 0xe9, 0x3d, 0x5e, 0xa8, 0xcd, -+ 0x24, 0x2e, 0x46, 0x28, 0x44, 0x02, 0x99, 0x22, -+ 0xc4, 0x3c, 0x77, 0xc9, 0xe3, 0xe4, 0x2f, 0x56, -+ 0x2f, 0x48, 0x5d, 0x24, 0xc5, 0x01, 0xa2, 0x0b }, -+ .result = { 0xf3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0x91, 0xb2, 0x32, 0xa1, 0x78, 0xb3, 0xcd, 0x53, -+ 0x09, 0x32, 0x44, 0x1e, 0x61, 0x39, 0x41, 0x8f, -+ 0x72, 0x17, 0x22, 0x92, 0xf1, 0xda, 0x4c, 0x18, -+ 0x34, 0xfc, 0x5e, 0xbf, 0xef, 0xb5, 0x1e, 0x3f }, -+ .result = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0x04, 0x5c, 0x6e, 0x11, 0xc5, 0xd3, 0x32, 0x55, -+ 0x6c, 0x78, 0x22, 0xfe, 0x94, 0xeb, 0xf8, 0x9b, -+ 0x56, 0xa3, 0x87, 0x8d, 0xc2, 0x7c, 0xa0, 0x79, -+ 0x10, 0x30, 0x58, 0x84, 0x9f, 0xab, 0xcb, 0x4f }, -+ .result = { 0xe5, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0x1c, 0xa2, 0x19, 0x0b, 0x71, 0x16, 0x35, 0x39, -+ 0x06, 0x3c, 0x35, 0x77, 0x3b, 0xda, 0x0c, 0x9c, -+ 0x92, 0x8e, 0x91, 0x36, 0xf0, 0x62, 0x0a, 0xeb, -+ 0x09, 0x3f, 0x09, 0x91, 0x97, 0xb7, 0xf7, 0x4e }, -+ .result = { 0xe3, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0xf7, 0x6e, 0x90, 0x10, 0xac, 0x33, 0xc5, 0x04, -+ 0x3b, 0x2d, 0x3b, 0x76, 0xa8, 0x42, 0x17, 0x10, -+ 0x00, 0xc4, 0x91, 0x62, 0x22, 0xe9, 0xe8, 0x58, -+ 0x97, 0xa0, 0xae, 0xc7, 0xf6, 0x35, 0x0b, 0x3c }, -+ .result = { 0xdd, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0xbb, 0x72, 0x68, 0x8d, 0x8f, 0x8a, 0xa7, 0xa3, -+ 0x9c, 0xd6, 0x06, 0x0c, 0xd5, 0xc8, 0x09, 0x3c, -+ 0xde, 0xc6, 0xfe, 0x34, 0x19, 0x37, 0xc3, 0x88, -+ 0x6a, 0x99, 0x34, 0x6c, 0xd0, 0x7f, 0xaa, 0x55 }, -+ .result = { 0xdb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0x88, 0xfd, 0xde, 0xa1, 0x93, 0x39, 0x1c, 0x6a, -+ 0x59, 0x33, 0xef, 0x9b, 0x71, 0x90, 0x15, 0x49, -+ 0x44, 0x72, 0x05, 0xaa, 0xe9, 0xda, 0x92, 0x8a, -+ 0x6b, 0x91, 0xa3, 0x52, 0xba, 0x10, 0xf4, 0x1f }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02 }, -+ .valid = true -+ }, -+ /* wycheproof - edge case for shared secret */ -+ { -+ .private = { 0xa0, 0xa4, 0xf1, 0x30, 0xb9, 0x8a, 0x5b, 0xe4, -+ 0xb1, 0xce, 0xdb, 0x7c, 0xb8, 0x55, 0x84, 0xa3, -+ 0x52, 0x0e, 0x14, 0x2d, 0x47, 0x4d, 0xc9, 0xcc, -+ 0xb9, 0x09, 0xa0, 0x73, 0xa9, 0x76, 0xbf, 0x63 }, -+ .public = { 0x30, 0x3b, 0x39, 0x2f, 0x15, 0x31, 0x16, 0xca, -+ 0xd9, 0xcc, 0x68, 0x2a, 0x00, 0xcc, 0xc4, 0x4c, -+ 0x95, 0xff, 0x0d, 0x3b, 0xbe, 0x56, 0x8b, 0xeb, -+ 0x6c, 0x4e, 0x73, 0x9b, 0xaf, 0xdc, 0x2c, 0x68 }, -+ .result = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00 }, -+ .valid = true -+ }, -+ /* wycheproof - checking for overflow */ -+ { -+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, -+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, -+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, -+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, -+ .public = { 0xfd, 0x30, 0x0a, 0xeb, 0x40, 0xe1, 0xfa, 0x58, -+ 0x25, 0x18, 0x41, 0x2b, 0x49, 0xb2, 0x08, 0xa7, -+ 0x84, 0x2b, 0x1e, 0x1f, 0x05, 0x6a, 0x04, 0x01, -+ 0x78, 0xea, 0x41, 0x41, 0x53, 0x4f, 0x65, 0x2d }, -+ .result = { 0xb7, 0x34, 0x10, 0x5d, 0xc2, 0x57, 0x58, 0x5d, -+ 0x73, 0xb5, 0x66, 0xcc, 0xb7, 0x6f, 0x06, 0x27, -+ 0x95, 0xcc, 0xbe, 0xc8, 0x91, 0x28, 0xe5, 0x2b, -+ 0x02, 0xf3, 0xe5, 0x96, 0x39, 0xf1, 0x3c, 0x46 }, -+ .valid = true -+ }, -+ /* wycheproof - checking for overflow */ -+ { -+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, -+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, -+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, -+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, -+ .public = { 0xc8, 0xef, 0x79, 0xb5, 0x14, 0xd7, 0x68, 0x26, -+ 0x77, 0xbc, 0x79, 0x31, 0xe0, 0x6e, 0xe5, 0xc2, -+ 0x7c, 0x9b, 0x39, 0x2b, 0x4a, 0xe9, 0x48, 0x44, -+ 0x73, 0xf5, 0x54, 0xe6, 0x67, 0x8e, 0xcc, 0x2e }, -+ .result = { 0x64, 0x7a, 0x46, 0xb6, 0xfc, 0x3f, 0x40, 0xd6, -+ 0x21, 0x41, 0xee, 0x3c, 0xee, 0x70, 0x6b, 0x4d, -+ 0x7a, 0x92, 0x71, 0x59, 0x3a, 0x7b, 0x14, 0x3e, -+ 0x8e, 0x2e, 0x22, 0x79, 0x88, 0x3e, 0x45, 0x50 }, -+ .valid = true -+ }, -+ /* wycheproof - checking for overflow */ -+ { -+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, -+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, -+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, -+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, -+ .public = { 0x64, 0xae, 0xac, 0x25, 0x04, 0x14, 0x48, 0x61, -+ 0x53, 0x2b, 0x7b, 0xbc, 0xb6, 0xc8, 0x7d, 0x67, -+ 0xdd, 0x4c, 0x1f, 0x07, 0xeb, 0xc2, 0xe0, 0x6e, -+ 0xff, 0xb9, 0x5a, 0xec, 0xc6, 0x17, 0x0b, 0x2c }, -+ .result = { 0x4f, 0xf0, 0x3d, 0x5f, 0xb4, 0x3c, 0xd8, 0x65, -+ 0x7a, 0x3c, 0xf3, 0x7c, 0x13, 0x8c, 0xad, 0xce, -+ 0xcc, 0xe5, 0x09, 0xe4, 0xeb, 0xa0, 0x89, 0xd0, -+ 0xef, 0x40, 0xb4, 0xe4, 0xfb, 0x94, 0x61, 0x55 }, -+ .valid = true -+ }, -+ /* wycheproof - checking for overflow */ -+ { -+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, -+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, -+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, -+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, -+ .public = { 0xbf, 0x68, 0xe3, 0x5e, 0x9b, 0xdb, 0x7e, 0xee, -+ 0x1b, 0x50, 0x57, 0x02, 0x21, 0x86, 0x0f, 0x5d, -+ 0xcd, 0xad, 0x8a, 0xcb, 0xab, 0x03, 0x1b, 0x14, -+ 0x97, 0x4c, 0xc4, 0x90, 0x13, 0xc4, 0x98, 0x31 }, -+ .result = { 0x21, 0xce, 0xe5, 0x2e, 0xfd, 0xbc, 0x81, 0x2e, -+ 0x1d, 0x02, 0x1a, 0x4a, 0xf1, 0xe1, 0xd8, 0xbc, -+ 0x4d, 0xb3, 0xc4, 0x00, 0xe4, 0xd2, 0xa2, 0xc5, -+ 0x6a, 0x39, 0x26, 0xdb, 0x4d, 0x99, 0xc6, 0x5b }, -+ .valid = true -+ }, -+ /* wycheproof - checking for overflow */ -+ { -+ .private = { 0xc8, 0x17, 0x24, 0x70, 0x40, 0x00, 0xb2, 0x6d, -+ 0x31, 0x70, 0x3c, 0xc9, 0x7e, 0x3a, 0x37, 0x8d, -+ 0x56, 0xfa, 0xd8, 0x21, 0x93, 0x61, 0xc8, 0x8c, -+ 0xca, 0x8b, 0xd7, 0xc5, 0x71, 0x9b, 0x12, 0xb2 }, -+ .public = { 0x53, 0x47, 0xc4, 0x91, 0x33, 0x1a, 0x64, 0xb4, -+ 0x3d, 0xdc, 0x68, 0x30, 0x34, 0xe6, 0x77, 0xf5, -+ 0x3d, 0xc3, 0x2b, 0x52, 0xa5, 0x2a, 0x57, 0x7c, -+ 0x15, 0xa8, 0x3b, 0xf2, 0x98, 0xe9, 0x9f, 0x19 }, -+ .result = { 0x18, 0xcb, 0x89, 0xe4, 0xe2, 0x0c, 0x0c, 0x2b, -+ 0xd3, 0x24, 0x30, 0x52, 0x45, 0x26, 0x6c, 0x93, -+ 0x27, 0x69, 0x0b, 0xbe, 0x79, 0xac, 0xb8, 0x8f, -+ 0x5b, 0x8f, 0xb3, 0xf7, 0x4e, 0xca, 0x3e, 0x52 }, -+ .valid = true -+ }, -+ /* wycheproof - private key == -1 (mod order) */ -+ { -+ .private = { 0xa0, 0x23, 0xcd, 0xd0, 0x83, 0xef, 0x5b, 0xb8, -+ 0x2f, 0x10, 0xd6, 0x2e, 0x59, 0xe1, 0x5a, 0x68, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50 }, -+ .public = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e, -+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57, -+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f, -+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 }, -+ .result = { 0x25, 0x8e, 0x04, 0x52, 0x3b, 0x8d, 0x25, 0x3e, -+ 0xe6, 0x57, 0x19, 0xfc, 0x69, 0x06, 0xc6, 0x57, -+ 0x19, 0x2d, 0x80, 0x71, 0x7e, 0xdc, 0x82, 0x8f, -+ 0xa0, 0xaf, 0x21, 0x68, 0x6e, 0x2f, 0xaa, 0x75 }, -+ .valid = true -+ }, -+ /* wycheproof - private key == 1 (mod order) on twist */ -+ { -+ .private = { 0x58, 0x08, 0x3d, 0xd2, 0x61, 0xad, 0x91, 0xef, -+ 0xf9, 0x52, 0x32, 0x2e, 0xc8, 0x24, 0xc6, 0x82, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x5f }, -+ .public = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f, -+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6, -+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64, -+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 }, -+ .result = { 0x2e, 0xae, 0x5e, 0xc3, 0xdd, 0x49, 0x4e, 0x9f, -+ 0x2d, 0x37, 0xd2, 0x58, 0xf8, 0x73, 0xa8, 0xe6, -+ 0xe9, 0xd0, 0xdb, 0xd1, 0xe3, 0x83, 0xef, 0x64, -+ 0xd9, 0x8b, 0xb9, 0x1b, 0x3e, 0x0b, 0xe0, 0x35 }, -+ .valid = true -+ } -+}; -+ -+bool __init curve25519_selftest(void) -+{ -+ bool success = true, ret, ret2; -+ size_t i = 0, j; -+ u8 in[CURVE25519_KEY_SIZE]; -+ u8 out[CURVE25519_KEY_SIZE], out2[CURVE25519_KEY_SIZE], -+ out3[CURVE25519_KEY_SIZE]; -+ -+ for (i = 0; i < ARRAY_SIZE(curve25519_test_vectors); ++i) { -+ memset(out, 0, CURVE25519_KEY_SIZE); -+ ret = curve25519(out, curve25519_test_vectors[i].private, -+ curve25519_test_vectors[i].public); -+ if (ret != curve25519_test_vectors[i].valid || -+ memcmp(out, curve25519_test_vectors[i].result, -+ CURVE25519_KEY_SIZE)) { -+ pr_err("curve25519 self-test %zu: FAIL\n", i + 1); -+ success = false; -+ } -+ } -+ -+ for (i = 0; i < 5; ++i) { -+ get_random_bytes(in, sizeof(in)); -+ ret = curve25519_generate_public(out, in); -+ ret2 = curve25519(out2, in, (u8[CURVE25519_KEY_SIZE]){ 9 }); -+ curve25519_generic(out3, in, (u8[CURVE25519_KEY_SIZE]){ 9 }); -+ if (ret != ret2 || -+ memcmp(out, out2, CURVE25519_KEY_SIZE) || -+ memcmp(out, out3, CURVE25519_KEY_SIZE)) { -+ pr_err("curve25519 basepoint self-test %zu: FAIL: input - 0x", -+ i + 1); -+ for (j = CURVE25519_KEY_SIZE; j-- > 0;) -+ printk(KERN_CONT "%02x", in[j]); -+ printk(KERN_CONT "\n"); -+ success = false; -+ } -+ } -+ -+ return success; -+} ---- a/lib/crypto/curve25519.c -+++ b/lib/crypto/curve25519.c -@@ -13,6 +13,8 @@ - #include - #include - -+bool curve25519_selftest(void); -+ - const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 }; - const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 }; - -@@ -20,6 +22,21 @@ EXPORT_SYMBOL(curve25519_null_point); - EXPORT_SYMBOL(curve25519_base_point); - EXPORT_SYMBOL(curve25519_generic); - -+static int __init mod_init(void) -+{ -+ if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && -+ WARN_ON(!curve25519_selftest())) -+ return -ENODEV; -+ return 0; -+} -+ -+static void __exit mod_exit(void) -+{ -+} -+ -+module_init(mod_init); -+module_exit(mod_exit); -+ - MODULE_LICENSE("GPL v2"); - MODULE_DESCRIPTION("Curve25519 scalar multiplication"); - MODULE_AUTHOR("Jason A. Donenfeld "); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0041-crypto-poly1305-add-new-32-and-64-bit-generic-versio.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0041-crypto-poly1305-add-new-32-and-64-bit-generic-versio.patch deleted file mode 100644 index c41ef55b1..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0041-crypto-poly1305-add-new-32-and-64-bit-generic-versio.patch +++ /dev/null @@ -1,1164 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Sun, 5 Jan 2020 22:40:46 -0500 -Subject: [PATCH] crypto: poly1305 - add new 32 and 64-bit generic versions - -commit 1c08a104360f3e18f4ee6346c21cc3923efb952e upstream. - -These two C implementations from Zinc -- a 32x32 one and a 64x64 one, -depending on the platform -- come from Andrew Moon's public domain -poly1305-donna portable code, modified for usage in the kernel. The -precomputation in the 32-bit version and the use of 64x64 multiplies in -the 64-bit version make these perform better than the code it replaces. -Moon's code is also very widespread and has received many eyeballs of -scrutiny. - -There's a bit of interference between the x86 implementation, which -relies on internal details of the old scalar implementation. In the next -commit, the x86 implementation will be replaced with a faster one that -doesn't rely on this, so none of this matters much. But for now, to keep -this passing the tests, we inline the bits of the old implementation -that the x86 implementation relied on. Also, since we now support a -slightly larger key space, via the union, some offsets had to be fixed -up. - -Nonce calculation was folded in with the emit function, to take -advantage of 64x64 arithmetic. However, Adiantum appeared to rely on no -nonce handling in emit, so this path was conditionalized. We also -introduced a new struct, poly1305_core_key, to represent the precise -amount of space that particular implementation uses. - -Testing with kbench9000, depending on the CPU, the update function for -the 32x32 version has been improved by 4%-7%, and for the 64x64 by -19%-30%. The 32x32 gains are small, but I think there's great value in -having a parallel implementation to the 64x64 one so that the two can be -compared side-by-side as nice stand-alone units. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/poly1305-avx2-x86_64.S | 20 +-- - arch/x86/crypto/poly1305_glue.c | 215 +++++++++++++++++++++++-- - crypto/adiantum.c | 4 +- - crypto/nhpoly1305.c | 2 +- - crypto/poly1305_generic.c | 25 ++- - include/crypto/internal/poly1305.h | 45 ++---- - include/crypto/nhpoly1305.h | 4 +- - include/crypto/poly1305.h | 26 ++- - lib/crypto/Makefile | 4 +- - lib/crypto/poly1305-donna32.c | 204 +++++++++++++++++++++++ - lib/crypto/poly1305-donna64.c | 185 +++++++++++++++++++++ - lib/crypto/poly1305.c | 169 +------------------ - 12 files changed, 675 insertions(+), 228 deletions(-) - create mode 100644 lib/crypto/poly1305-donna32.c - create mode 100644 lib/crypto/poly1305-donna64.c - ---- a/arch/x86/crypto/poly1305-avx2-x86_64.S -+++ b/arch/x86/crypto/poly1305-avx2-x86_64.S -@@ -34,16 +34,16 @@ ORMASK: .octa 0x000000000100000000000000 - #define u2 0x08(%r8) - #define u3 0x0c(%r8) - #define u4 0x10(%r8) --#define w0 0x14(%r8) --#define w1 0x18(%r8) --#define w2 0x1c(%r8) --#define w3 0x20(%r8) --#define w4 0x24(%r8) --#define y0 0x28(%r8) --#define y1 0x2c(%r8) --#define y2 0x30(%r8) --#define y3 0x34(%r8) --#define y4 0x38(%r8) -+#define w0 0x18(%r8) -+#define w1 0x1c(%r8) -+#define w2 0x20(%r8) -+#define w3 0x24(%r8) -+#define w4 0x28(%r8) -+#define y0 0x30(%r8) -+#define y1 0x34(%r8) -+#define y2 0x38(%r8) -+#define y3 0x3c(%r8) -+#define y4 0x40(%r8) - #define m %rsi - #define hc0 %ymm0 - #define hc1 %ymm1 ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -25,6 +25,21 @@ asmlinkage void poly1305_4block_avx2(u32 - static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_simd); - static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2); - -+static inline u64 mlt(u64 a, u64 b) -+{ -+ return a * b; -+} -+ -+static inline u32 sr(u64 v, u_char n) -+{ -+ return v >> n; -+} -+ -+static inline u32 and(u32 v, u32 mask) -+{ -+ return v & mask; -+} -+ - static void poly1305_simd_mult(u32 *a, const u32 *b) - { - u8 m[POLY1305_BLOCK_SIZE]; -@@ -36,6 +51,168 @@ static void poly1305_simd_mult(u32 *a, c - poly1305_block_sse2(a, m, b, 1); - } - -+static void poly1305_integer_setkey(struct poly1305_key *key, const u8 *raw_key) -+{ -+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ -+ key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff; -+ key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03; -+ key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff; -+ key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff; -+ key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff; -+} -+ -+static void poly1305_integer_blocks(struct poly1305_state *state, -+ const struct poly1305_key *key, -+ const void *src, -+ unsigned int nblocks, u32 hibit) -+{ -+ u32 r0, r1, r2, r3, r4; -+ u32 s1, s2, s3, s4; -+ u32 h0, h1, h2, h3, h4; -+ u64 d0, d1, d2, d3, d4; -+ -+ if (!nblocks) -+ return; -+ -+ r0 = key->r[0]; -+ r1 = key->r[1]; -+ r2 = key->r[2]; -+ r3 = key->r[3]; -+ r4 = key->r[4]; -+ -+ s1 = r1 * 5; -+ s2 = r2 * 5; -+ s3 = r3 * 5; -+ s4 = r4 * 5; -+ -+ h0 = state->h[0]; -+ h1 = state->h[1]; -+ h2 = state->h[2]; -+ h3 = state->h[3]; -+ h4 = state->h[4]; -+ -+ do { -+ /* h += m[i] */ -+ h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff; -+ h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff; -+ h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff; -+ h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff; -+ h4 += (get_unaligned_le32(src + 12) >> 8) | (hibit << 24); -+ -+ /* h *= r */ -+ d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + -+ mlt(h3, s2) + mlt(h4, s1); -+ d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) + -+ mlt(h3, s3) + mlt(h4, s2); -+ d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) + -+ mlt(h3, s4) + mlt(h4, s3); -+ d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) + -+ mlt(h3, r0) + mlt(h4, s4); -+ d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) + -+ mlt(h3, r1) + mlt(h4, r0); -+ -+ /* (partial) h %= p */ -+ d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff); -+ d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff); -+ d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff); -+ d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff); -+ h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff); -+ h1 += h0 >> 26; h0 = h0 & 0x3ffffff; -+ -+ src += POLY1305_BLOCK_SIZE; -+ } while (--nblocks); -+ -+ state->h[0] = h0; -+ state->h[1] = h1; -+ state->h[2] = h2; -+ state->h[3] = h3; -+ state->h[4] = h4; -+} -+ -+static void poly1305_integer_emit(const struct poly1305_state *state, void *dst) -+{ -+ u32 h0, h1, h2, h3, h4; -+ u32 g0, g1, g2, g3, g4; -+ u32 mask; -+ -+ /* fully carry h */ -+ h0 = state->h[0]; -+ h1 = state->h[1]; -+ h2 = state->h[2]; -+ h3 = state->h[3]; -+ h4 = state->h[4]; -+ -+ h2 += (h1 >> 26); h1 = h1 & 0x3ffffff; -+ h3 += (h2 >> 26); h2 = h2 & 0x3ffffff; -+ h4 += (h3 >> 26); h3 = h3 & 0x3ffffff; -+ h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff; -+ h1 += (h0 >> 26); h0 = h0 & 0x3ffffff; -+ -+ /* compute h + -p */ -+ g0 = h0 + 5; -+ g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff; -+ g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff; -+ g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff; -+ g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff; -+ -+ /* select h if h < p, or h + -p if h >= p */ -+ mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; -+ g0 &= mask; -+ g1 &= mask; -+ g2 &= mask; -+ g3 &= mask; -+ g4 &= mask; -+ mask = ~mask; -+ h0 = (h0 & mask) | g0; -+ h1 = (h1 & mask) | g1; -+ h2 = (h2 & mask) | g2; -+ h3 = (h3 & mask) | g3; -+ h4 = (h4 & mask) | g4; -+ -+ /* h = h % (2^128) */ -+ put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0); -+ put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4); -+ put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8); -+ put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12); -+} -+ -+void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key) -+{ -+ poly1305_integer_setkey(desc->opaque_r, key); -+ desc->s[0] = get_unaligned_le32(key + 16); -+ desc->s[1] = get_unaligned_le32(key + 20); -+ desc->s[2] = get_unaligned_le32(key + 24); -+ desc->s[3] = get_unaligned_le32(key + 28); -+ poly1305_core_init(&desc->h); -+ desc->buflen = 0; -+ desc->sset = true; -+ desc->rset = 1; -+} -+EXPORT_SYMBOL_GPL(poly1305_init_arch); -+ -+static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, -+ const u8 *src, unsigned int srclen) -+{ -+ if (!dctx->sset) { -+ if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { -+ poly1305_integer_setkey(dctx->r, src); -+ src += POLY1305_BLOCK_SIZE; -+ srclen -= POLY1305_BLOCK_SIZE; -+ dctx->rset = 1; -+ } -+ if (srclen >= POLY1305_BLOCK_SIZE) { -+ dctx->s[0] = get_unaligned_le32(src + 0); -+ dctx->s[1] = get_unaligned_le32(src + 4); -+ dctx->s[2] = get_unaligned_le32(src + 8); -+ dctx->s[3] = get_unaligned_le32(src + 12); -+ src += POLY1305_BLOCK_SIZE; -+ srclen -= POLY1305_BLOCK_SIZE; -+ dctx->sset = true; -+ } -+ } -+ return srclen; -+} -+ - static unsigned int poly1305_scalar_blocks(struct poly1305_desc_ctx *dctx, - const u8 *src, unsigned int srclen) - { -@@ -47,8 +224,8 @@ static unsigned int poly1305_scalar_bloc - srclen = datalen; - } - if (srclen >= POLY1305_BLOCK_SIZE) { -- poly1305_core_blocks(&dctx->h, dctx->r, src, -- srclen / POLY1305_BLOCK_SIZE, 1); -+ poly1305_integer_blocks(&dctx->h, dctx->opaque_r, src, -+ srclen / POLY1305_BLOCK_SIZE, 1); - srclen %= POLY1305_BLOCK_SIZE; - } - return srclen; -@@ -105,12 +282,6 @@ static unsigned int poly1305_simd_blocks - return srclen; - } - --void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key) --{ -- poly1305_init_generic(desc, key); --} --EXPORT_SYMBOL(poly1305_init_arch); -- - void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int srclen) - { -@@ -158,9 +329,31 @@ void poly1305_update_arch(struct poly130 - } - EXPORT_SYMBOL(poly1305_update_arch); - --void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest) -+void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *dst) - { -- poly1305_final_generic(desc, digest); -+ __le32 digest[4]; -+ u64 f = 0; -+ -+ if (unlikely(desc->buflen)) { -+ desc->buf[desc->buflen++] = 1; -+ memset(desc->buf + desc->buflen, 0, -+ POLY1305_BLOCK_SIZE - desc->buflen); -+ poly1305_integer_blocks(&desc->h, desc->opaque_r, desc->buf, 1, 0); -+ } -+ -+ poly1305_integer_emit(&desc->h, digest); -+ -+ /* mac = (h + s) % (2^128) */ -+ f = (f >> 32) + le32_to_cpu(digest[0]) + desc->s[0]; -+ put_unaligned_le32(f, dst + 0); -+ f = (f >> 32) + le32_to_cpu(digest[1]) + desc->s[1]; -+ put_unaligned_le32(f, dst + 4); -+ f = (f >> 32) + le32_to_cpu(digest[2]) + desc->s[2]; -+ put_unaligned_le32(f, dst + 8); -+ f = (f >> 32) + le32_to_cpu(digest[3]) + desc->s[3]; -+ put_unaligned_le32(f, dst + 12); -+ -+ *desc = (struct poly1305_desc_ctx){}; - } - EXPORT_SYMBOL(poly1305_final_arch); - -@@ -183,7 +376,7 @@ static int crypto_poly1305_final(struct - if (unlikely(!dctx->sset)) - return -ENOKEY; - -- poly1305_final_generic(dctx, dst); -+ poly1305_final_arch(dctx, dst); - return 0; - } - ---- a/crypto/adiantum.c -+++ b/crypto/adiantum.c -@@ -72,7 +72,7 @@ struct adiantum_tfm_ctx { - struct crypto_skcipher *streamcipher; - struct crypto_cipher *blockcipher; - struct crypto_shash *hash; -- struct poly1305_key header_hash_key; -+ struct poly1305_core_key header_hash_key; - }; - - struct adiantum_request_ctx { -@@ -249,7 +249,7 @@ static void adiantum_hash_header(struct - poly1305_core_blocks(&state, &tctx->header_hash_key, req->iv, - TWEAK_SIZE / POLY1305_BLOCK_SIZE, 1); - -- poly1305_core_emit(&state, &rctx->header_hash); -+ poly1305_core_emit(&state, NULL, &rctx->header_hash); - } - - /* Hash the left-hand part (the "bulk") of the message using NHPoly1305 */ ---- a/crypto/nhpoly1305.c -+++ b/crypto/nhpoly1305.c -@@ -210,7 +210,7 @@ int crypto_nhpoly1305_final_helper(struc - if (state->nh_remaining) - process_nh_hash_value(state, key); - -- poly1305_core_emit(&state->poly_state, dst); -+ poly1305_core_emit(&state->poly_state, NULL, dst); - return 0; - } - EXPORT_SYMBOL(crypto_nhpoly1305_final_helper); ---- a/crypto/poly1305_generic.c -+++ b/crypto/poly1305_generic.c -@@ -31,6 +31,29 @@ static int crypto_poly1305_init(struct s - return 0; - } - -+static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, -+ const u8 *src, unsigned int srclen) -+{ -+ if (!dctx->sset) { -+ if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { -+ poly1305_core_setkey(&dctx->core_r, src); -+ src += POLY1305_BLOCK_SIZE; -+ srclen -= POLY1305_BLOCK_SIZE; -+ dctx->rset = 2; -+ } -+ if (srclen >= POLY1305_BLOCK_SIZE) { -+ dctx->s[0] = get_unaligned_le32(src + 0); -+ dctx->s[1] = get_unaligned_le32(src + 4); -+ dctx->s[2] = get_unaligned_le32(src + 8); -+ dctx->s[3] = get_unaligned_le32(src + 12); -+ src += POLY1305_BLOCK_SIZE; -+ srclen -= POLY1305_BLOCK_SIZE; -+ dctx->sset = true; -+ } -+ } -+ return srclen; -+} -+ - static void poly1305_blocks(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int srclen) - { -@@ -42,7 +65,7 @@ static void poly1305_blocks(struct poly1 - srclen = datalen; - } - -- poly1305_core_blocks(&dctx->h, dctx->r, src, -+ poly1305_core_blocks(&dctx->h, &dctx->core_r, src, - srclen / POLY1305_BLOCK_SIZE, 1); - } - ---- a/include/crypto/internal/poly1305.h -+++ b/include/crypto/internal/poly1305.h -@@ -11,48 +11,23 @@ - #include - - /* -- * Poly1305 core functions. These implement the ε-almost-∆-universal hash -- * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce -- * ("s key") at the end. They also only support block-aligned inputs. -+ * Poly1305 core functions. These only accept whole blocks; the caller must -+ * handle any needed block buffering and padding. 'hibit' must be 1 for any -+ * full blocks, or 0 for the final block if it had to be padded. If 'nonce' is -+ * non-NULL, then it's added at the end to compute the Poly1305 MAC. Otherwise, -+ * only the ε-almost-∆-universal hash function (not the full MAC) is computed. - */ --void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key); -+ -+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key); - static inline void poly1305_core_init(struct poly1305_state *state) - { - *state = (struct poly1305_state){}; - } - - void poly1305_core_blocks(struct poly1305_state *state, -- const struct poly1305_key *key, const void *src, -+ const struct poly1305_core_key *key, const void *src, - unsigned int nblocks, u32 hibit); --void poly1305_core_emit(const struct poly1305_state *state, void *dst); -- --/* -- * Poly1305 requires a unique key for each tag, which implies that we can't set -- * it on the tfm that gets accessed by multiple users simultaneously. Instead we -- * expect the key as the first 32 bytes in the update() call. -- */ --static inline --unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, -- const u8 *src, unsigned int srclen) --{ -- if (!dctx->sset) { -- if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { -- poly1305_core_setkey(dctx->r, src); -- src += POLY1305_BLOCK_SIZE; -- srclen -= POLY1305_BLOCK_SIZE; -- dctx->rset = 1; -- } -- if (srclen >= POLY1305_BLOCK_SIZE) { -- dctx->s[0] = get_unaligned_le32(src + 0); -- dctx->s[1] = get_unaligned_le32(src + 4); -- dctx->s[2] = get_unaligned_le32(src + 8); -- dctx->s[3] = get_unaligned_le32(src + 12); -- src += POLY1305_BLOCK_SIZE; -- srclen -= POLY1305_BLOCK_SIZE; -- dctx->sset = true; -- } -- } -- return srclen; --} -+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], -+ void *dst); - - #endif ---- a/include/crypto/nhpoly1305.h -+++ b/include/crypto/nhpoly1305.h -@@ -7,7 +7,7 @@ - #define _NHPOLY1305_H - - #include --#include -+#include - - /* NH parameterization: */ - -@@ -33,7 +33,7 @@ - #define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES) - - struct nhpoly1305_key { -- struct poly1305_key poly_key; -+ struct poly1305_core_key poly_key; - u32 nh_key[NH_KEY_WORDS]; - }; - ---- a/include/crypto/poly1305.h -+++ b/include/crypto/poly1305.h -@@ -13,12 +13,29 @@ - #define POLY1305_KEY_SIZE 32 - #define POLY1305_DIGEST_SIZE 16 - -+/* The poly1305_key and poly1305_state types are mostly opaque and -+ * implementation-defined. Limbs might be in base 2^64 or base 2^26, or -+ * different yet. The union type provided keeps these 64-bit aligned for the -+ * case in which this is implemented using 64x64 multiplies. -+ */ -+ - struct poly1305_key { -- u32 r[5]; /* key, base 2^26 */ -+ union { -+ u32 r[5]; -+ u64 r64[3]; -+ }; -+}; -+ -+struct poly1305_core_key { -+ struct poly1305_key key; -+ struct poly1305_key precomputed_s; - }; - - struct poly1305_state { -- u32 h[5]; /* accumulator, base 2^26 */ -+ union { -+ u32 h[5]; -+ u64 h64[3]; -+ }; - }; - - struct poly1305_desc_ctx { -@@ -35,7 +52,10 @@ struct poly1305_desc_ctx { - /* accumulator */ - struct poly1305_state h; - /* key */ -- struct poly1305_key r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE]; -+ union { -+ struct poly1305_key opaque_r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE]; -+ struct poly1305_core_key core_r; -+ }; - }; - - void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key); ---- a/lib/crypto/Makefile -+++ b/lib/crypto/Makefile -@@ -28,7 +28,9 @@ obj-$(CONFIG_CRYPTO_LIB_DES) += libdes - libdes-y := des.o - - obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305.o --libpoly1305-y := poly1305.o -+libpoly1305-y := poly1305-donna32.o -+libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o -+libpoly1305-y += poly1305.o - - obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o - libsha256-y := sha256.o ---- /dev/null -+++ b/lib/crypto/poly1305-donna32.c -@@ -0,0 +1,204 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This is based in part on Andrew Moon's poly1305-donna, which is in the -+ * public domain. -+ */ -+ -+#include -+#include -+#include -+ -+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) -+{ -+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ -+ key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff; -+ key->key.r[1] = (get_unaligned_le32(&raw_key[3]) >> 2) & 0x3ffff03; -+ key->key.r[2] = (get_unaligned_le32(&raw_key[6]) >> 4) & 0x3ffc0ff; -+ key->key.r[3] = (get_unaligned_le32(&raw_key[9]) >> 6) & 0x3f03fff; -+ key->key.r[4] = (get_unaligned_le32(&raw_key[12]) >> 8) & 0x00fffff; -+ -+ /* s = 5*r */ -+ key->precomputed_s.r[0] = key->key.r[1] * 5; -+ key->precomputed_s.r[1] = key->key.r[2] * 5; -+ key->precomputed_s.r[2] = key->key.r[3] * 5; -+ key->precomputed_s.r[3] = key->key.r[4] * 5; -+} -+EXPORT_SYMBOL(poly1305_core_setkey); -+ -+void poly1305_core_blocks(struct poly1305_state *state, -+ const struct poly1305_core_key *key, const void *src, -+ unsigned int nblocks, u32 hibit) -+{ -+ const u8 *input = src; -+ u32 r0, r1, r2, r3, r4; -+ u32 s1, s2, s3, s4; -+ u32 h0, h1, h2, h3, h4; -+ u64 d0, d1, d2, d3, d4; -+ u32 c; -+ -+ if (!nblocks) -+ return; -+ -+ hibit <<= 24; -+ -+ r0 = key->key.r[0]; -+ r1 = key->key.r[1]; -+ r2 = key->key.r[2]; -+ r3 = key->key.r[3]; -+ r4 = key->key.r[4]; -+ -+ s1 = key->precomputed_s.r[0]; -+ s2 = key->precomputed_s.r[1]; -+ s3 = key->precomputed_s.r[2]; -+ s4 = key->precomputed_s.r[3]; -+ -+ h0 = state->h[0]; -+ h1 = state->h[1]; -+ h2 = state->h[2]; -+ h3 = state->h[3]; -+ h4 = state->h[4]; -+ -+ do { -+ /* h += m[i] */ -+ h0 += (get_unaligned_le32(&input[0])) & 0x3ffffff; -+ h1 += (get_unaligned_le32(&input[3]) >> 2) & 0x3ffffff; -+ h2 += (get_unaligned_le32(&input[6]) >> 4) & 0x3ffffff; -+ h3 += (get_unaligned_le32(&input[9]) >> 6) & 0x3ffffff; -+ h4 += (get_unaligned_le32(&input[12]) >> 8) | hibit; -+ -+ /* h *= r */ -+ d0 = ((u64)h0 * r0) + ((u64)h1 * s4) + -+ ((u64)h2 * s3) + ((u64)h3 * s2) + -+ ((u64)h4 * s1); -+ d1 = ((u64)h0 * r1) + ((u64)h1 * r0) + -+ ((u64)h2 * s4) + ((u64)h3 * s3) + -+ ((u64)h4 * s2); -+ d2 = ((u64)h0 * r2) + ((u64)h1 * r1) + -+ ((u64)h2 * r0) + ((u64)h3 * s4) + -+ ((u64)h4 * s3); -+ d3 = ((u64)h0 * r3) + ((u64)h1 * r2) + -+ ((u64)h2 * r1) + ((u64)h3 * r0) + -+ ((u64)h4 * s4); -+ d4 = ((u64)h0 * r4) + ((u64)h1 * r3) + -+ ((u64)h2 * r2) + ((u64)h3 * r1) + -+ ((u64)h4 * r0); -+ -+ /* (partial) h %= p */ -+ c = (u32)(d0 >> 26); -+ h0 = (u32)d0 & 0x3ffffff; -+ d1 += c; -+ c = (u32)(d1 >> 26); -+ h1 = (u32)d1 & 0x3ffffff; -+ d2 += c; -+ c = (u32)(d2 >> 26); -+ h2 = (u32)d2 & 0x3ffffff; -+ d3 += c; -+ c = (u32)(d3 >> 26); -+ h3 = (u32)d3 & 0x3ffffff; -+ d4 += c; -+ c = (u32)(d4 >> 26); -+ h4 = (u32)d4 & 0x3ffffff; -+ h0 += c * 5; -+ c = (h0 >> 26); -+ h0 = h0 & 0x3ffffff; -+ h1 += c; -+ -+ input += POLY1305_BLOCK_SIZE; -+ } while (--nblocks); -+ -+ state->h[0] = h0; -+ state->h[1] = h1; -+ state->h[2] = h2; -+ state->h[3] = h3; -+ state->h[4] = h4; -+} -+EXPORT_SYMBOL(poly1305_core_blocks); -+ -+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], -+ void *dst) -+{ -+ u8 *mac = dst; -+ u32 h0, h1, h2, h3, h4, c; -+ u32 g0, g1, g2, g3, g4; -+ u64 f; -+ u32 mask; -+ -+ /* fully carry h */ -+ h0 = state->h[0]; -+ h1 = state->h[1]; -+ h2 = state->h[2]; -+ h3 = state->h[3]; -+ h4 = state->h[4]; -+ -+ c = h1 >> 26; -+ h1 = h1 & 0x3ffffff; -+ h2 += c; -+ c = h2 >> 26; -+ h2 = h2 & 0x3ffffff; -+ h3 += c; -+ c = h3 >> 26; -+ h3 = h3 & 0x3ffffff; -+ h4 += c; -+ c = h4 >> 26; -+ h4 = h4 & 0x3ffffff; -+ h0 += c * 5; -+ c = h0 >> 26; -+ h0 = h0 & 0x3ffffff; -+ h1 += c; -+ -+ /* compute h + -p */ -+ g0 = h0 + 5; -+ c = g0 >> 26; -+ g0 &= 0x3ffffff; -+ g1 = h1 + c; -+ c = g1 >> 26; -+ g1 &= 0x3ffffff; -+ g2 = h2 + c; -+ c = g2 >> 26; -+ g2 &= 0x3ffffff; -+ g3 = h3 + c; -+ c = g3 >> 26; -+ g3 &= 0x3ffffff; -+ g4 = h4 + c - (1UL << 26); -+ -+ /* select h if h < p, or h + -p if h >= p */ -+ mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; -+ g0 &= mask; -+ g1 &= mask; -+ g2 &= mask; -+ g3 &= mask; -+ g4 &= mask; -+ mask = ~mask; -+ -+ h0 = (h0 & mask) | g0; -+ h1 = (h1 & mask) | g1; -+ h2 = (h2 & mask) | g2; -+ h3 = (h3 & mask) | g3; -+ h4 = (h4 & mask) | g4; -+ -+ /* h = h % (2^128) */ -+ h0 = ((h0) | (h1 << 26)) & 0xffffffff; -+ h1 = ((h1 >> 6) | (h2 << 20)) & 0xffffffff; -+ h2 = ((h2 >> 12) | (h3 << 14)) & 0xffffffff; -+ h3 = ((h3 >> 18) | (h4 << 8)) & 0xffffffff; -+ -+ if (likely(nonce)) { -+ /* mac = (h + nonce) % (2^128) */ -+ f = (u64)h0 + nonce[0]; -+ h0 = (u32)f; -+ f = (u64)h1 + nonce[1] + (f >> 32); -+ h1 = (u32)f; -+ f = (u64)h2 + nonce[2] + (f >> 32); -+ h2 = (u32)f; -+ f = (u64)h3 + nonce[3] + (f >> 32); -+ h3 = (u32)f; -+ } -+ -+ put_unaligned_le32(h0, &mac[0]); -+ put_unaligned_le32(h1, &mac[4]); -+ put_unaligned_le32(h2, &mac[8]); -+ put_unaligned_le32(h3, &mac[12]); -+} -+EXPORT_SYMBOL(poly1305_core_emit); ---- /dev/null -+++ b/lib/crypto/poly1305-donna64.c -@@ -0,0 +1,185 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This is based in part on Andrew Moon's poly1305-donna, which is in the -+ * public domain. -+ */ -+ -+#include -+#include -+#include -+ -+typedef __uint128_t u128; -+ -+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) -+{ -+ u64 t0, t1; -+ -+ /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ -+ t0 = get_unaligned_le64(&raw_key[0]); -+ t1 = get_unaligned_le64(&raw_key[8]); -+ -+ key->key.r64[0] = t0 & 0xffc0fffffffULL; -+ key->key.r64[1] = ((t0 >> 44) | (t1 << 20)) & 0xfffffc0ffffULL; -+ key->key.r64[2] = ((t1 >> 24)) & 0x00ffffffc0fULL; -+ -+ /* s = 20*r */ -+ key->precomputed_s.r64[0] = key->key.r64[1] * 20; -+ key->precomputed_s.r64[1] = key->key.r64[2] * 20; -+} -+EXPORT_SYMBOL(poly1305_core_setkey); -+ -+void poly1305_core_blocks(struct poly1305_state *state, -+ const struct poly1305_core_key *key, const void *src, -+ unsigned int nblocks, u32 hibit) -+{ -+ const u8 *input = src; -+ u64 hibit64; -+ u64 r0, r1, r2; -+ u64 s1, s2; -+ u64 h0, h1, h2; -+ u64 c; -+ u128 d0, d1, d2, d; -+ -+ if (!nblocks) -+ return; -+ -+ hibit64 = ((u64)hibit) << 40; -+ -+ r0 = key->key.r64[0]; -+ r1 = key->key.r64[1]; -+ r2 = key->key.r64[2]; -+ -+ h0 = state->h64[0]; -+ h1 = state->h64[1]; -+ h2 = state->h64[2]; -+ -+ s1 = key->precomputed_s.r64[0]; -+ s2 = key->precomputed_s.r64[1]; -+ -+ do { -+ u64 t0, t1; -+ -+ /* h += m[i] */ -+ t0 = get_unaligned_le64(&input[0]); -+ t1 = get_unaligned_le64(&input[8]); -+ -+ h0 += t0 & 0xfffffffffffULL; -+ h1 += ((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL; -+ h2 += (((t1 >> 24)) & 0x3ffffffffffULL) | hibit64; -+ -+ /* h *= r */ -+ d0 = (u128)h0 * r0; -+ d = (u128)h1 * s2; -+ d0 += d; -+ d = (u128)h2 * s1; -+ d0 += d; -+ d1 = (u128)h0 * r1; -+ d = (u128)h1 * r0; -+ d1 += d; -+ d = (u128)h2 * s2; -+ d1 += d; -+ d2 = (u128)h0 * r2; -+ d = (u128)h1 * r1; -+ d2 += d; -+ d = (u128)h2 * r0; -+ d2 += d; -+ -+ /* (partial) h %= p */ -+ c = (u64)(d0 >> 44); -+ h0 = (u64)d0 & 0xfffffffffffULL; -+ d1 += c; -+ c = (u64)(d1 >> 44); -+ h1 = (u64)d1 & 0xfffffffffffULL; -+ d2 += c; -+ c = (u64)(d2 >> 42); -+ h2 = (u64)d2 & 0x3ffffffffffULL; -+ h0 += c * 5; -+ c = h0 >> 44; -+ h0 = h0 & 0xfffffffffffULL; -+ h1 += c; -+ -+ input += POLY1305_BLOCK_SIZE; -+ } while (--nblocks); -+ -+ state->h64[0] = h0; -+ state->h64[1] = h1; -+ state->h64[2] = h2; -+} -+EXPORT_SYMBOL(poly1305_core_blocks); -+ -+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4], -+ void *dst) -+{ -+ u8 *mac = dst; -+ u64 h0, h1, h2, c; -+ u64 g0, g1, g2; -+ u64 t0, t1; -+ -+ /* fully carry h */ -+ h0 = state->h64[0]; -+ h1 = state->h64[1]; -+ h2 = state->h64[2]; -+ -+ c = h1 >> 44; -+ h1 &= 0xfffffffffffULL; -+ h2 += c; -+ c = h2 >> 42; -+ h2 &= 0x3ffffffffffULL; -+ h0 += c * 5; -+ c = h0 >> 44; -+ h0 &= 0xfffffffffffULL; -+ h1 += c; -+ c = h1 >> 44; -+ h1 &= 0xfffffffffffULL; -+ h2 += c; -+ c = h2 >> 42; -+ h2 &= 0x3ffffffffffULL; -+ h0 += c * 5; -+ c = h0 >> 44; -+ h0 &= 0xfffffffffffULL; -+ h1 += c; -+ -+ /* compute h + -p */ -+ g0 = h0 + 5; -+ c = g0 >> 44; -+ g0 &= 0xfffffffffffULL; -+ g1 = h1 + c; -+ c = g1 >> 44; -+ g1 &= 0xfffffffffffULL; -+ g2 = h2 + c - (1ULL << 42); -+ -+ /* select h if h < p, or h + -p if h >= p */ -+ c = (g2 >> ((sizeof(u64) * 8) - 1)) - 1; -+ g0 &= c; -+ g1 &= c; -+ g2 &= c; -+ c = ~c; -+ h0 = (h0 & c) | g0; -+ h1 = (h1 & c) | g1; -+ h2 = (h2 & c) | g2; -+ -+ if (likely(nonce)) { -+ /* h = (h + nonce) */ -+ t0 = ((u64)nonce[1] << 32) | nonce[0]; -+ t1 = ((u64)nonce[3] << 32) | nonce[2]; -+ -+ h0 += t0 & 0xfffffffffffULL; -+ c = h0 >> 44; -+ h0 &= 0xfffffffffffULL; -+ h1 += (((t0 >> 44) | (t1 << 20)) & 0xfffffffffffULL) + c; -+ c = h1 >> 44; -+ h1 &= 0xfffffffffffULL; -+ h2 += (((t1 >> 24)) & 0x3ffffffffffULL) + c; -+ h2 &= 0x3ffffffffffULL; -+ } -+ -+ /* mac = h % (2^128) */ -+ h0 = h0 | (h1 << 44); -+ h1 = (h1 >> 20) | (h2 << 24); -+ -+ put_unaligned_le64(h0, &mac[0]); -+ put_unaligned_le64(h1, &mac[8]); -+} -+EXPORT_SYMBOL(poly1305_core_emit); ---- a/lib/crypto/poly1305.c -+++ b/lib/crypto/poly1305.c -@@ -12,151 +12,9 @@ - #include - #include - --static inline u64 mlt(u64 a, u64 b) --{ -- return a * b; --} -- --static inline u32 sr(u64 v, u_char n) --{ -- return v >> n; --} -- --static inline u32 and(u32 v, u32 mask) --{ -- return v & mask; --} -- --void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key) --{ -- /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ -- key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff; -- key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03; -- key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff; -- key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff; -- key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff; --} --EXPORT_SYMBOL_GPL(poly1305_core_setkey); -- --void poly1305_core_blocks(struct poly1305_state *state, -- const struct poly1305_key *key, const void *src, -- unsigned int nblocks, u32 hibit) --{ -- u32 r0, r1, r2, r3, r4; -- u32 s1, s2, s3, s4; -- u32 h0, h1, h2, h3, h4; -- u64 d0, d1, d2, d3, d4; -- -- if (!nblocks) -- return; -- -- r0 = key->r[0]; -- r1 = key->r[1]; -- r2 = key->r[2]; -- r3 = key->r[3]; -- r4 = key->r[4]; -- -- s1 = r1 * 5; -- s2 = r2 * 5; -- s3 = r3 * 5; -- s4 = r4 * 5; -- -- h0 = state->h[0]; -- h1 = state->h[1]; -- h2 = state->h[2]; -- h3 = state->h[3]; -- h4 = state->h[4]; -- -- do { -- /* h += m[i] */ -- h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff; -- h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff; -- h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff; -- h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff; -- h4 += (get_unaligned_le32(src + 12) >> 8) | (hibit << 24); -- -- /* h *= r */ -- d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + -- mlt(h3, s2) + mlt(h4, s1); -- d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) + -- mlt(h3, s3) + mlt(h4, s2); -- d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) + -- mlt(h3, s4) + mlt(h4, s3); -- d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) + -- mlt(h3, r0) + mlt(h4, s4); -- d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) + -- mlt(h3, r1) + mlt(h4, r0); -- -- /* (partial) h %= p */ -- d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff); -- d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff); -- d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff); -- d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff); -- h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff); -- h1 += h0 >> 26; h0 = h0 & 0x3ffffff; -- -- src += POLY1305_BLOCK_SIZE; -- } while (--nblocks); -- -- state->h[0] = h0; -- state->h[1] = h1; -- state->h[2] = h2; -- state->h[3] = h3; -- state->h[4] = h4; --} --EXPORT_SYMBOL_GPL(poly1305_core_blocks); -- --void poly1305_core_emit(const struct poly1305_state *state, void *dst) --{ -- u32 h0, h1, h2, h3, h4; -- u32 g0, g1, g2, g3, g4; -- u32 mask; -- -- /* fully carry h */ -- h0 = state->h[0]; -- h1 = state->h[1]; -- h2 = state->h[2]; -- h3 = state->h[3]; -- h4 = state->h[4]; -- -- h2 += (h1 >> 26); h1 = h1 & 0x3ffffff; -- h3 += (h2 >> 26); h2 = h2 & 0x3ffffff; -- h4 += (h3 >> 26); h3 = h3 & 0x3ffffff; -- h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff; -- h1 += (h0 >> 26); h0 = h0 & 0x3ffffff; -- -- /* compute h + -p */ -- g0 = h0 + 5; -- g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff; -- g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff; -- g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff; -- g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff; -- -- /* select h if h < p, or h + -p if h >= p */ -- mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; -- g0 &= mask; -- g1 &= mask; -- g2 &= mask; -- g3 &= mask; -- g4 &= mask; -- mask = ~mask; -- h0 = (h0 & mask) | g0; -- h1 = (h1 & mask) | g1; -- h2 = (h2 & mask) | g2; -- h3 = (h3 & mask) | g3; -- h4 = (h4 & mask) | g4; -- -- /* h = h % (2^128) */ -- put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0); -- put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4); -- put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8); -- put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12); --} --EXPORT_SYMBOL_GPL(poly1305_core_emit); -- - void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key) - { -- poly1305_core_setkey(desc->r, key); -+ poly1305_core_setkey(&desc->core_r, key); - desc->s[0] = get_unaligned_le32(key + 16); - desc->s[1] = get_unaligned_le32(key + 20); - desc->s[2] = get_unaligned_le32(key + 24); -@@ -164,7 +22,7 @@ void poly1305_init_generic(struct poly13 - poly1305_core_init(&desc->h); - desc->buflen = 0; - desc->sset = true; -- desc->rset = 1; -+ desc->rset = 2; - } - EXPORT_SYMBOL_GPL(poly1305_init_generic); - -@@ -181,13 +39,14 @@ void poly1305_update_generic(struct poly - desc->buflen += bytes; - - if (desc->buflen == POLY1305_BLOCK_SIZE) { -- poly1305_core_blocks(&desc->h, desc->r, desc->buf, 1, 1); -+ poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, -+ 1, 1); - desc->buflen = 0; - } - } - - if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { -- poly1305_core_blocks(&desc->h, desc->r, src, -+ poly1305_core_blocks(&desc->h, &desc->core_r, src, - nbytes / POLY1305_BLOCK_SIZE, 1); - src += nbytes - (nbytes % POLY1305_BLOCK_SIZE); - nbytes %= POLY1305_BLOCK_SIZE; -@@ -202,28 +61,14 @@ EXPORT_SYMBOL_GPL(poly1305_update_generi - - void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst) - { -- __le32 digest[4]; -- u64 f = 0; -- - if (unlikely(desc->buflen)) { - desc->buf[desc->buflen++] = 1; - memset(desc->buf + desc->buflen, 0, - POLY1305_BLOCK_SIZE - desc->buflen); -- poly1305_core_blocks(&desc->h, desc->r, desc->buf, 1, 0); -+ poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, 1, 0); - } - -- poly1305_core_emit(&desc->h, digest); -- -- /* mac = (h + s) % (2^128) */ -- f = (f >> 32) + le32_to_cpu(digest[0]) + desc->s[0]; -- put_unaligned_le32(f, dst + 0); -- f = (f >> 32) + le32_to_cpu(digest[1]) + desc->s[1]; -- put_unaligned_le32(f, dst + 4); -- f = (f >> 32) + le32_to_cpu(digest[2]) + desc->s[2]; -- put_unaligned_le32(f, dst + 8); -- f = (f >> 32) + le32_to_cpu(digest[3]) + desc->s[3]; -- put_unaligned_le32(f, dst + 12); -- -+ poly1305_core_emit(&desc->h, desc->s, dst); - *desc = (struct poly1305_desc_ctx){}; - } - EXPORT_SYMBOL_GPL(poly1305_final_generic); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0042-crypto-x86-poly1305-import-unmodified-cryptogams-imp.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0042-crypto-x86-poly1305-import-unmodified-cryptogams-imp.patch deleted file mode 100644 index 8e52383ae..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0042-crypto-x86-poly1305-import-unmodified-cryptogams-imp.patch +++ /dev/null @@ -1,4183 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Sun, 5 Jan 2020 22:40:47 -0500 -Subject: [PATCH] crypto: x86/poly1305 - import unmodified cryptogams - implementation - -commit 0896ca2a0cb6127e8a129f1f2a680d49b6b0f65c upstream. - -These x86_64 vectorized implementations come from Andy Polyakov's -CRYPTOGAMS implementation, and are included here in raw form without -modification, so that subsequent commits that fix these up for the -kernel can see how it has changed. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/poly1305-x86_64-cryptogams.pl | 4159 +++++++++++++++++ - 1 file changed, 4159 insertions(+) - create mode 100644 arch/x86/crypto/poly1305-x86_64-cryptogams.pl - ---- /dev/null -+++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl -@@ -0,0 +1,4159 @@ -+#! /usr/bin/env perl -+# Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved. -+# -+# Licensed under the OpenSSL license (the "License"). You may not use -+# this file except in compliance with the License. You can obtain a copy -+# in the file LICENSE in the source distribution or at -+# https://www.openssl.org/source/license.html -+ -+# -+# ==================================================================== -+# Written by Andy Polyakov for the OpenSSL -+# project. The module is, however, dual licensed under OpenSSL and -+# CRYPTOGAMS licenses depending on where you obtain it. For further -+# details see http://www.openssl.org/~appro/cryptogams/. -+# ==================================================================== -+# -+# This module implements Poly1305 hash for x86_64. -+# -+# March 2015 -+# -+# Initial release. -+# -+# December 2016 -+# -+# Add AVX512F+VL+BW code path. -+# -+# November 2017 -+# -+# Convert AVX512F+VL+BW code path to pure AVX512F, so that it can be -+# executed even on Knights Landing. Trigger for modification was -+# observation that AVX512 code paths can negatively affect overall -+# Skylake-X system performance. Since we are likely to suppress -+# AVX512F capability flag [at least on Skylake-X], conversion serves -+# as kind of "investment protection". Note that next *lake processor, -+# Cannolake, has AVX512IFMA code path to execute... -+# -+# Numbers are cycles per processed byte with poly1305_blocks alone, -+# measured with rdtsc at fixed clock frequency. -+# -+# IALU/gcc-4.8(*) AVX(**) AVX2 AVX-512 -+# P4 4.46/+120% - -+# Core 2 2.41/+90% - -+# Westmere 1.88/+120% - -+# Sandy Bridge 1.39/+140% 1.10 -+# Haswell 1.14/+175% 1.11 0.65 -+# Skylake[-X] 1.13/+120% 0.96 0.51 [0.35] -+# Silvermont 2.83/+95% - -+# Knights L 3.60/? 1.65 1.10 0.41(***) -+# Goldmont 1.70/+180% - -+# VIA Nano 1.82/+150% - -+# Sledgehammer 1.38/+160% - -+# Bulldozer 2.30/+130% 0.97 -+# Ryzen 1.15/+200% 1.08 1.18 -+# -+# (*) improvement coefficients relative to clang are more modest and -+# are ~50% on most processors, in both cases we are comparing to -+# __int128 code; -+# (**) SSE2 implementation was attempted, but among non-AVX processors -+# it was faster than integer-only code only on older Intel P4 and -+# Core processors, 50-30%, less newer processor is, but slower on -+# contemporary ones, for example almost 2x slower on Atom, and as -+# former are naturally disappearing, SSE2 is deemed unnecessary; -+# (***) strangely enough performance seems to vary from core to core, -+# listed result is best case; -+ -+$flavour = shift; -+$output = shift; -+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } -+ -+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); -+ -+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; -+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or -+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or -+die "can't locate x86_64-xlate.pl"; -+ -+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` -+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) { -+ $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25) + ($1>=2.26); -+} -+ -+if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && -+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) { -+ $avx = ($1>=2.09) + ($1>=2.10) + 2 * ($1>=2.12); -+ $avx += 2 if ($1==2.11 && $2>=8); -+} -+ -+if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && -+ `ml64 2>&1` =~ /Version ([0-9]+)\./) { -+ $avx = ($1>=10) + ($1>=12); -+} -+ -+if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { -+ $avx = ($2>=3.0) + ($2>3.0); -+} -+ -+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; -+*STDOUT=*OUT; -+ -+my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx"); -+my ($mac,$nonce)=($inp,$len); # *_emit arguments -+my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13)); -+my ($h0,$h1,$h2)=("%r14","%rbx","%rbp"); -+ -+sub poly1305_iteration { -+# input: copy of $r1 in %rax, $h0-$h2, $r0-$r1 -+# output: $h0-$h2 *= $r0-$r1 -+$code.=<<___; -+ mulq $h0 # h0*r1 -+ mov %rax,$d2 -+ mov $r0,%rax -+ mov %rdx,$d3 -+ -+ mulq $h0 # h0*r0 -+ mov %rax,$h0 # future $h0 -+ mov $r0,%rax -+ mov %rdx,$d1 -+ -+ mulq $h1 # h1*r0 -+ add %rax,$d2 -+ mov $s1,%rax -+ adc %rdx,$d3 -+ -+ mulq $h1 # h1*s1 -+ mov $h2,$h1 # borrow $h1 -+ add %rax,$h0 -+ adc %rdx,$d1 -+ -+ imulq $s1,$h1 # h2*s1 -+ add $h1,$d2 -+ mov $d1,$h1 -+ adc \$0,$d3 -+ -+ imulq $r0,$h2 # h2*r0 -+ add $d2,$h1 -+ mov \$-4,%rax # mask value -+ adc $h2,$d3 -+ -+ and $d3,%rax # last reduction step -+ mov $d3,$h2 -+ shr \$2,$d3 -+ and \$3,$h2 -+ add $d3,%rax -+ add %rax,$h0 -+ adc \$0,$h1 -+ adc \$0,$h2 -+___ -+} -+ -+######################################################################## -+# Layout of opaque area is following. -+# -+# unsigned __int64 h[3]; # current hash value base 2^64 -+# unsigned __int64 r[2]; # key value base 2^64 -+ -+$code.=<<___; -+.text -+ -+.extern OPENSSL_ia32cap_P -+ -+.globl poly1305_init -+.hidden poly1305_init -+.globl poly1305_blocks -+.hidden poly1305_blocks -+.globl poly1305_emit -+.hidden poly1305_emit -+ -+.type poly1305_init,\@function,3 -+.align 32 -+poly1305_init: -+ xor %rax,%rax -+ mov %rax,0($ctx) # initialize hash value -+ mov %rax,8($ctx) -+ mov %rax,16($ctx) -+ -+ cmp \$0,$inp -+ je .Lno_key -+ -+ lea poly1305_blocks(%rip),%r10 -+ lea poly1305_emit(%rip),%r11 -+___ -+$code.=<<___ if ($avx); -+ mov OPENSSL_ia32cap_P+4(%rip),%r9 -+ lea poly1305_blocks_avx(%rip),%rax -+ lea poly1305_emit_avx(%rip),%rcx -+ bt \$`60-32`,%r9 # AVX? -+ cmovc %rax,%r10 -+ cmovc %rcx,%r11 -+___ -+$code.=<<___ if ($avx>1); -+ lea poly1305_blocks_avx2(%rip),%rax -+ bt \$`5+32`,%r9 # AVX2? -+ cmovc %rax,%r10 -+___ -+$code.=<<___ if ($avx>3); -+ mov \$`(1<<31|1<<21|1<<16)`,%rax -+ shr \$32,%r9 -+ and %rax,%r9 -+ cmp %rax,%r9 -+ je .Linit_base2_44 -+___ -+$code.=<<___; -+ mov \$0x0ffffffc0fffffff,%rax -+ mov \$0x0ffffffc0ffffffc,%rcx -+ and 0($inp),%rax -+ and 8($inp),%rcx -+ mov %rax,24($ctx) -+ mov %rcx,32($ctx) -+___ -+$code.=<<___ if ($flavour !~ /elf32/); -+ mov %r10,0(%rdx) -+ mov %r11,8(%rdx) -+___ -+$code.=<<___ if ($flavour =~ /elf32/); -+ mov %r10d,0(%rdx) -+ mov %r11d,4(%rdx) -+___ -+$code.=<<___; -+ mov \$1,%eax -+.Lno_key: -+ ret -+.size poly1305_init,.-poly1305_init -+ -+.type poly1305_blocks,\@function,4 -+.align 32 -+poly1305_blocks: -+.cfi_startproc -+.Lblocks: -+ shr \$4,$len -+ jz .Lno_data # too short -+ -+ push %rbx -+.cfi_push %rbx -+ push %rbp -+.cfi_push %rbp -+ push %r12 -+.cfi_push %r12 -+ push %r13 -+.cfi_push %r13 -+ push %r14 -+.cfi_push %r14 -+ push %r15 -+.cfi_push %r15 -+.Lblocks_body: -+ -+ mov $len,%r15 # reassign $len -+ -+ mov 24($ctx),$r0 # load r -+ mov 32($ctx),$s1 -+ -+ mov 0($ctx),$h0 # load hash value -+ mov 8($ctx),$h1 -+ mov 16($ctx),$h2 -+ -+ mov $s1,$r1 -+ shr \$2,$s1 -+ mov $r1,%rax -+ add $r1,$s1 # s1 = r1 + (r1 >> 2) -+ jmp .Loop -+ -+.align 32 -+.Loop: -+ add 0($inp),$h0 # accumulate input -+ adc 8($inp),$h1 -+ lea 16($inp),$inp -+ adc $padbit,$h2 -+___ -+ &poly1305_iteration(); -+$code.=<<___; -+ mov $r1,%rax -+ dec %r15 # len-=16 -+ jnz .Loop -+ -+ mov $h0,0($ctx) # store hash value -+ mov $h1,8($ctx) -+ mov $h2,16($ctx) -+ -+ mov 0(%rsp),%r15 -+.cfi_restore %r15 -+ mov 8(%rsp),%r14 -+.cfi_restore %r14 -+ mov 16(%rsp),%r13 -+.cfi_restore %r13 -+ mov 24(%rsp),%r12 -+.cfi_restore %r12 -+ mov 32(%rsp),%rbp -+.cfi_restore %rbp -+ mov 40(%rsp),%rbx -+.cfi_restore %rbx -+ lea 48(%rsp),%rsp -+.cfi_adjust_cfa_offset -48 -+.Lno_data: -+.Lblocks_epilogue: -+ ret -+.cfi_endproc -+.size poly1305_blocks,.-poly1305_blocks -+ -+.type poly1305_emit,\@function,3 -+.align 32 -+poly1305_emit: -+.Lemit: -+ mov 0($ctx),%r8 # load hash value -+ mov 8($ctx),%r9 -+ mov 16($ctx),%r10 -+ -+ mov %r8,%rax -+ add \$5,%r8 # compare to modulus -+ mov %r9,%rcx -+ adc \$0,%r9 -+ adc \$0,%r10 -+ shr \$2,%r10 # did 130-bit value overflow? -+ cmovnz %r8,%rax -+ cmovnz %r9,%rcx -+ -+ add 0($nonce),%rax # accumulate nonce -+ adc 8($nonce),%rcx -+ mov %rax,0($mac) # write result -+ mov %rcx,8($mac) -+ -+ ret -+.size poly1305_emit,.-poly1305_emit -+___ -+if ($avx) { -+ -+######################################################################## -+# Layout of opaque area is following. -+# -+# unsigned __int32 h[5]; # current hash value base 2^26 -+# unsigned __int32 is_base2_26; -+# unsigned __int64 r[2]; # key value base 2^64 -+# unsigned __int64 pad; -+# struct { unsigned __int32 r^2, r^1, r^4, r^3; } r[9]; -+# -+# where r^n are base 2^26 digits of degrees of multiplier key. There are -+# 5 digits, but last four are interleaved with multiples of 5, totalling -+# in 9 elements: r0, r1, 5*r1, r2, 5*r2, r3, 5*r3, r4, 5*r4. -+ -+my ($H0,$H1,$H2,$H3,$H4, $T0,$T1,$T2,$T3,$T4, $D0,$D1,$D2,$D3,$D4, $MASK) = -+ map("%xmm$_",(0..15)); -+ -+$code.=<<___; -+.type __poly1305_block,\@abi-omnipotent -+.align 32 -+__poly1305_block: -+___ -+ &poly1305_iteration(); -+$code.=<<___; -+ ret -+.size __poly1305_block,.-__poly1305_block -+ -+.type __poly1305_init_avx,\@abi-omnipotent -+.align 32 -+__poly1305_init_avx: -+ mov $r0,$h0 -+ mov $r1,$h1 -+ xor $h2,$h2 -+ -+ lea 48+64($ctx),$ctx # size optimization -+ -+ mov $r1,%rax -+ call __poly1305_block # r^2 -+ -+ mov \$0x3ffffff,%eax # save interleaved r^2 and r base 2^26 -+ mov \$0x3ffffff,%edx -+ mov $h0,$d1 -+ and $h0#d,%eax -+ mov $r0,$d2 -+ and $r0#d,%edx -+ mov %eax,`16*0+0-64`($ctx) -+ shr \$26,$d1 -+ mov %edx,`16*0+4-64`($ctx) -+ shr \$26,$d2 -+ -+ mov \$0x3ffffff,%eax -+ mov \$0x3ffffff,%edx -+ and $d1#d,%eax -+ and $d2#d,%edx -+ mov %eax,`16*1+0-64`($ctx) -+ lea (%rax,%rax,4),%eax # *5 -+ mov %edx,`16*1+4-64`($ctx) -+ lea (%rdx,%rdx,4),%edx # *5 -+ mov %eax,`16*2+0-64`($ctx) -+ shr \$26,$d1 -+ mov %edx,`16*2+4-64`($ctx) -+ shr \$26,$d2 -+ -+ mov $h1,%rax -+ mov $r1,%rdx -+ shl \$12,%rax -+ shl \$12,%rdx -+ or $d1,%rax -+ or $d2,%rdx -+ and \$0x3ffffff,%eax -+ and \$0x3ffffff,%edx -+ mov %eax,`16*3+0-64`($ctx) -+ lea (%rax,%rax,4),%eax # *5 -+ mov %edx,`16*3+4-64`($ctx) -+ lea (%rdx,%rdx,4),%edx # *5 -+ mov %eax,`16*4+0-64`($ctx) -+ mov $h1,$d1 -+ mov %edx,`16*4+4-64`($ctx) -+ mov $r1,$d2 -+ -+ mov \$0x3ffffff,%eax -+ mov \$0x3ffffff,%edx -+ shr \$14,$d1 -+ shr \$14,$d2 -+ and $d1#d,%eax -+ and $d2#d,%edx -+ mov %eax,`16*5+0-64`($ctx) -+ lea (%rax,%rax,4),%eax # *5 -+ mov %edx,`16*5+4-64`($ctx) -+ lea (%rdx,%rdx,4),%edx # *5 -+ mov %eax,`16*6+0-64`($ctx) -+ shr \$26,$d1 -+ mov %edx,`16*6+4-64`($ctx) -+ shr \$26,$d2 -+ -+ mov $h2,%rax -+ shl \$24,%rax -+ or %rax,$d1 -+ mov $d1#d,`16*7+0-64`($ctx) -+ lea ($d1,$d1,4),$d1 # *5 -+ mov $d2#d,`16*7+4-64`($ctx) -+ lea ($d2,$d2,4),$d2 # *5 -+ mov $d1#d,`16*8+0-64`($ctx) -+ mov $d2#d,`16*8+4-64`($ctx) -+ -+ mov $r1,%rax -+ call __poly1305_block # r^3 -+ -+ mov \$0x3ffffff,%eax # save r^3 base 2^26 -+ mov $h0,$d1 -+ and $h0#d,%eax -+ shr \$26,$d1 -+ mov %eax,`16*0+12-64`($ctx) -+ -+ mov \$0x3ffffff,%edx -+ and $d1#d,%edx -+ mov %edx,`16*1+12-64`($ctx) -+ lea (%rdx,%rdx,4),%edx # *5 -+ shr \$26,$d1 -+ mov %edx,`16*2+12-64`($ctx) -+ -+ mov $h1,%rax -+ shl \$12,%rax -+ or $d1,%rax -+ and \$0x3ffffff,%eax -+ mov %eax,`16*3+12-64`($ctx) -+ lea (%rax,%rax,4),%eax # *5 -+ mov $h1,$d1 -+ mov %eax,`16*4+12-64`($ctx) -+ -+ mov \$0x3ffffff,%edx -+ shr \$14,$d1 -+ and $d1#d,%edx -+ mov %edx,`16*5+12-64`($ctx) -+ lea (%rdx,%rdx,4),%edx # *5 -+ shr \$26,$d1 -+ mov %edx,`16*6+12-64`($ctx) -+ -+ mov $h2,%rax -+ shl \$24,%rax -+ or %rax,$d1 -+ mov $d1#d,`16*7+12-64`($ctx) -+ lea ($d1,$d1,4),$d1 # *5 -+ mov $d1#d,`16*8+12-64`($ctx) -+ -+ mov $r1,%rax -+ call __poly1305_block # r^4 -+ -+ mov \$0x3ffffff,%eax # save r^4 base 2^26 -+ mov $h0,$d1 -+ and $h0#d,%eax -+ shr \$26,$d1 -+ mov %eax,`16*0+8-64`($ctx) -+ -+ mov \$0x3ffffff,%edx -+ and $d1#d,%edx -+ mov %edx,`16*1+8-64`($ctx) -+ lea (%rdx,%rdx,4),%edx # *5 -+ shr \$26,$d1 -+ mov %edx,`16*2+8-64`($ctx) -+ -+ mov $h1,%rax -+ shl \$12,%rax -+ or $d1,%rax -+ and \$0x3ffffff,%eax -+ mov %eax,`16*3+8-64`($ctx) -+ lea (%rax,%rax,4),%eax # *5 -+ mov $h1,$d1 -+ mov %eax,`16*4+8-64`($ctx) -+ -+ mov \$0x3ffffff,%edx -+ shr \$14,$d1 -+ and $d1#d,%edx -+ mov %edx,`16*5+8-64`($ctx) -+ lea (%rdx,%rdx,4),%edx # *5 -+ shr \$26,$d1 -+ mov %edx,`16*6+8-64`($ctx) -+ -+ mov $h2,%rax -+ shl \$24,%rax -+ or %rax,$d1 -+ mov $d1#d,`16*7+8-64`($ctx) -+ lea ($d1,$d1,4),$d1 # *5 -+ mov $d1#d,`16*8+8-64`($ctx) -+ -+ lea -48-64($ctx),$ctx # size [de-]optimization -+ ret -+.size __poly1305_init_avx,.-__poly1305_init_avx -+ -+.type poly1305_blocks_avx,\@function,4 -+.align 32 -+poly1305_blocks_avx: -+.cfi_startproc -+ mov 20($ctx),%r8d # is_base2_26 -+ cmp \$128,$len -+ jae .Lblocks_avx -+ test %r8d,%r8d -+ jz .Lblocks -+ -+.Lblocks_avx: -+ and \$-16,$len -+ jz .Lno_data_avx -+ -+ vzeroupper -+ -+ test %r8d,%r8d -+ jz .Lbase2_64_avx -+ -+ test \$31,$len -+ jz .Leven_avx -+ -+ push %rbx -+.cfi_push %rbx -+ push %rbp -+.cfi_push %rbp -+ push %r12 -+.cfi_push %r12 -+ push %r13 -+.cfi_push %r13 -+ push %r14 -+.cfi_push %r14 -+ push %r15 -+.cfi_push %r15 -+.Lblocks_avx_body: -+ -+ mov $len,%r15 # reassign $len -+ -+ mov 0($ctx),$d1 # load hash value -+ mov 8($ctx),$d2 -+ mov 16($ctx),$h2#d -+ -+ mov 24($ctx),$r0 # load r -+ mov 32($ctx),$s1 -+ -+ ################################# base 2^26 -> base 2^64 -+ mov $d1#d,$h0#d -+ and \$`-1*(1<<31)`,$d1 -+ mov $d2,$r1 # borrow $r1 -+ mov $d2#d,$h1#d -+ and \$`-1*(1<<31)`,$d2 -+ -+ shr \$6,$d1 -+ shl \$52,$r1 -+ add $d1,$h0 -+ shr \$12,$h1 -+ shr \$18,$d2 -+ add $r1,$h0 -+ adc $d2,$h1 -+ -+ mov $h2,$d1 -+ shl \$40,$d1 -+ shr \$24,$h2 -+ add $d1,$h1 -+ adc \$0,$h2 # can be partially reduced... -+ -+ mov \$-4,$d2 # ... so reduce -+ mov $h2,$d1 -+ and $h2,$d2 -+ shr \$2,$d1 -+ and \$3,$h2 -+ add $d2,$d1 # =*5 -+ add $d1,$h0 -+ adc \$0,$h1 -+ adc \$0,$h2 -+ -+ mov $s1,$r1 -+ mov $s1,%rax -+ shr \$2,$s1 -+ add $r1,$s1 # s1 = r1 + (r1 >> 2) -+ -+ add 0($inp),$h0 # accumulate input -+ adc 8($inp),$h1 -+ lea 16($inp),$inp -+ adc $padbit,$h2 -+ -+ call __poly1305_block -+ -+ test $padbit,$padbit # if $padbit is zero, -+ jz .Lstore_base2_64_avx # store hash in base 2^64 format -+ -+ ################################# base 2^64 -> base 2^26 -+ mov $h0,%rax -+ mov $h0,%rdx -+ shr \$52,$h0 -+ mov $h1,$r0 -+ mov $h1,$r1 -+ shr \$26,%rdx -+ and \$0x3ffffff,%rax # h[0] -+ shl \$12,$r0 -+ and \$0x3ffffff,%rdx # h[1] -+ shr \$14,$h1 -+ or $r0,$h0 -+ shl \$24,$h2 -+ and \$0x3ffffff,$h0 # h[2] -+ shr \$40,$r1 -+ and \$0x3ffffff,$h1 # h[3] -+ or $r1,$h2 # h[4] -+ -+ sub \$16,%r15 -+ jz .Lstore_base2_26_avx -+ -+ vmovd %rax#d,$H0 -+ vmovd %rdx#d,$H1 -+ vmovd $h0#d,$H2 -+ vmovd $h1#d,$H3 -+ vmovd $h2#d,$H4 -+ jmp .Lproceed_avx -+ -+.align 32 -+.Lstore_base2_64_avx: -+ mov $h0,0($ctx) -+ mov $h1,8($ctx) -+ mov $h2,16($ctx) # note that is_base2_26 is zeroed -+ jmp .Ldone_avx -+ -+.align 16 -+.Lstore_base2_26_avx: -+ mov %rax#d,0($ctx) # store hash value base 2^26 -+ mov %rdx#d,4($ctx) -+ mov $h0#d,8($ctx) -+ mov $h1#d,12($ctx) -+ mov $h2#d,16($ctx) -+.align 16 -+.Ldone_avx: -+ mov 0(%rsp),%r15 -+.cfi_restore %r15 -+ mov 8(%rsp),%r14 -+.cfi_restore %r14 -+ mov 16(%rsp),%r13 -+.cfi_restore %r13 -+ mov 24(%rsp),%r12 -+.cfi_restore %r12 -+ mov 32(%rsp),%rbp -+.cfi_restore %rbp -+ mov 40(%rsp),%rbx -+.cfi_restore %rbx -+ lea 48(%rsp),%rsp -+.cfi_adjust_cfa_offset -48 -+.Lno_data_avx: -+.Lblocks_avx_epilogue: -+ ret -+.cfi_endproc -+ -+.align 32 -+.Lbase2_64_avx: -+.cfi_startproc -+ push %rbx -+.cfi_push %rbx -+ push %rbp -+.cfi_push %rbp -+ push %r12 -+.cfi_push %r12 -+ push %r13 -+.cfi_push %r13 -+ push %r14 -+.cfi_push %r14 -+ push %r15 -+.cfi_push %r15 -+.Lbase2_64_avx_body: -+ -+ mov $len,%r15 # reassign $len -+ -+ mov 24($ctx),$r0 # load r -+ mov 32($ctx),$s1 -+ -+ mov 0($ctx),$h0 # load hash value -+ mov 8($ctx),$h1 -+ mov 16($ctx),$h2#d -+ -+ mov $s1,$r1 -+ mov $s1,%rax -+ shr \$2,$s1 -+ add $r1,$s1 # s1 = r1 + (r1 >> 2) -+ -+ test \$31,$len -+ jz .Linit_avx -+ -+ add 0($inp),$h0 # accumulate input -+ adc 8($inp),$h1 -+ lea 16($inp),$inp -+ adc $padbit,$h2 -+ sub \$16,%r15 -+ -+ call __poly1305_block -+ -+.Linit_avx: -+ ################################# base 2^64 -> base 2^26 -+ mov $h0,%rax -+ mov $h0,%rdx -+ shr \$52,$h0 -+ mov $h1,$d1 -+ mov $h1,$d2 -+ shr \$26,%rdx -+ and \$0x3ffffff,%rax # h[0] -+ shl \$12,$d1 -+ and \$0x3ffffff,%rdx # h[1] -+ shr \$14,$h1 -+ or $d1,$h0 -+ shl \$24,$h2 -+ and \$0x3ffffff,$h0 # h[2] -+ shr \$40,$d2 -+ and \$0x3ffffff,$h1 # h[3] -+ or $d2,$h2 # h[4] -+ -+ vmovd %rax#d,$H0 -+ vmovd %rdx#d,$H1 -+ vmovd $h0#d,$H2 -+ vmovd $h1#d,$H3 -+ vmovd $h2#d,$H4 -+ movl \$1,20($ctx) # set is_base2_26 -+ -+ call __poly1305_init_avx -+ -+.Lproceed_avx: -+ mov %r15,$len -+ -+ mov 0(%rsp),%r15 -+.cfi_restore %r15 -+ mov 8(%rsp),%r14 -+.cfi_restore %r14 -+ mov 16(%rsp),%r13 -+.cfi_restore %r13 -+ mov 24(%rsp),%r12 -+.cfi_restore %r12 -+ mov 32(%rsp),%rbp -+.cfi_restore %rbp -+ mov 40(%rsp),%rbx -+.cfi_restore %rbx -+ lea 48(%rsp),%rax -+ lea 48(%rsp),%rsp -+.cfi_adjust_cfa_offset -48 -+.Lbase2_64_avx_epilogue: -+ jmp .Ldo_avx -+.cfi_endproc -+ -+.align 32 -+.Leven_avx: -+.cfi_startproc -+ vmovd 4*0($ctx),$H0 # load hash value -+ vmovd 4*1($ctx),$H1 -+ vmovd 4*2($ctx),$H2 -+ vmovd 4*3($ctx),$H3 -+ vmovd 4*4($ctx),$H4 -+ -+.Ldo_avx: -+___ -+$code.=<<___ if (!$win64); -+ lea -0x58(%rsp),%r11 -+.cfi_def_cfa %r11,0x60 -+ sub \$0x178,%rsp -+___ -+$code.=<<___ if ($win64); -+ lea -0xf8(%rsp),%r11 -+ sub \$0x218,%rsp -+ vmovdqa %xmm6,0x50(%r11) -+ vmovdqa %xmm7,0x60(%r11) -+ vmovdqa %xmm8,0x70(%r11) -+ vmovdqa %xmm9,0x80(%r11) -+ vmovdqa %xmm10,0x90(%r11) -+ vmovdqa %xmm11,0xa0(%r11) -+ vmovdqa %xmm12,0xb0(%r11) -+ vmovdqa %xmm13,0xc0(%r11) -+ vmovdqa %xmm14,0xd0(%r11) -+ vmovdqa %xmm15,0xe0(%r11) -+.Ldo_avx_body: -+___ -+$code.=<<___; -+ sub \$64,$len -+ lea -32($inp),%rax -+ cmovc %rax,$inp -+ -+ vmovdqu `16*3`($ctx),$D4 # preload r0^2 -+ lea `16*3+64`($ctx),$ctx # size optimization -+ lea .Lconst(%rip),%rcx -+ -+ ################################################################ -+ # load input -+ vmovdqu 16*2($inp),$T0 -+ vmovdqu 16*3($inp),$T1 -+ vmovdqa 64(%rcx),$MASK # .Lmask26 -+ -+ vpsrldq \$6,$T0,$T2 # splat input -+ vpsrldq \$6,$T1,$T3 -+ vpunpckhqdq $T1,$T0,$T4 # 4 -+ vpunpcklqdq $T1,$T0,$T0 # 0:1 -+ vpunpcklqdq $T3,$T2,$T3 # 2:3 -+ -+ vpsrlq \$40,$T4,$T4 # 4 -+ vpsrlq \$26,$T0,$T1 -+ vpand $MASK,$T0,$T0 # 0 -+ vpsrlq \$4,$T3,$T2 -+ vpand $MASK,$T1,$T1 # 1 -+ vpsrlq \$30,$T3,$T3 -+ vpand $MASK,$T2,$T2 # 2 -+ vpand $MASK,$T3,$T3 # 3 -+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always -+ -+ jbe .Lskip_loop_avx -+ -+ # expand and copy pre-calculated table to stack -+ vmovdqu `16*1-64`($ctx),$D1 -+ vmovdqu `16*2-64`($ctx),$D2 -+ vpshufd \$0xEE,$D4,$D3 # 34xx -> 3434 -+ vpshufd \$0x44,$D4,$D0 # xx12 -> 1212 -+ vmovdqa $D3,-0x90(%r11) -+ vmovdqa $D0,0x00(%rsp) -+ vpshufd \$0xEE,$D1,$D4 -+ vmovdqu `16*3-64`($ctx),$D0 -+ vpshufd \$0x44,$D1,$D1 -+ vmovdqa $D4,-0x80(%r11) -+ vmovdqa $D1,0x10(%rsp) -+ vpshufd \$0xEE,$D2,$D3 -+ vmovdqu `16*4-64`($ctx),$D1 -+ vpshufd \$0x44,$D2,$D2 -+ vmovdqa $D3,-0x70(%r11) -+ vmovdqa $D2,0x20(%rsp) -+ vpshufd \$0xEE,$D0,$D4 -+ vmovdqu `16*5-64`($ctx),$D2 -+ vpshufd \$0x44,$D0,$D0 -+ vmovdqa $D4,-0x60(%r11) -+ vmovdqa $D0,0x30(%rsp) -+ vpshufd \$0xEE,$D1,$D3 -+ vmovdqu `16*6-64`($ctx),$D0 -+ vpshufd \$0x44,$D1,$D1 -+ vmovdqa $D3,-0x50(%r11) -+ vmovdqa $D1,0x40(%rsp) -+ vpshufd \$0xEE,$D2,$D4 -+ vmovdqu `16*7-64`($ctx),$D1 -+ vpshufd \$0x44,$D2,$D2 -+ vmovdqa $D4,-0x40(%r11) -+ vmovdqa $D2,0x50(%rsp) -+ vpshufd \$0xEE,$D0,$D3 -+ vmovdqu `16*8-64`($ctx),$D2 -+ vpshufd \$0x44,$D0,$D0 -+ vmovdqa $D3,-0x30(%r11) -+ vmovdqa $D0,0x60(%rsp) -+ vpshufd \$0xEE,$D1,$D4 -+ vpshufd \$0x44,$D1,$D1 -+ vmovdqa $D4,-0x20(%r11) -+ vmovdqa $D1,0x70(%rsp) -+ vpshufd \$0xEE,$D2,$D3 -+ vmovdqa 0x00(%rsp),$D4 # preload r0^2 -+ vpshufd \$0x44,$D2,$D2 -+ vmovdqa $D3,-0x10(%r11) -+ vmovdqa $D2,0x80(%rsp) -+ -+ jmp .Loop_avx -+ -+.align 32 -+.Loop_avx: -+ ################################################################ -+ # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2 -+ # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^3+inp[7]*r -+ # \___________________/ -+ # ((inp[0]*r^4+inp[2]*r^2+inp[4])*r^4+inp[6]*r^2+inp[8])*r^2 -+ # ((inp[1]*r^4+inp[3]*r^2+inp[5])*r^4+inp[7]*r^2+inp[9])*r -+ # \___________________/ \____________________/ -+ # -+ # Note that we start with inp[2:3]*r^2. This is because it -+ # doesn't depend on reduction in previous iteration. -+ ################################################################ -+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 -+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 -+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 -+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 -+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 -+ # -+ # though note that $Tx and $Hx are "reversed" in this section, -+ # and $D4 is preloaded with r0^2... -+ -+ vpmuludq $T0,$D4,$D0 # d0 = h0*r0 -+ vpmuludq $T1,$D4,$D1 # d1 = h1*r0 -+ vmovdqa $H2,0x20(%r11) # offload hash -+ vpmuludq $T2,$D4,$D2 # d3 = h2*r0 -+ vmovdqa 0x10(%rsp),$H2 # r1^2 -+ vpmuludq $T3,$D4,$D3 # d3 = h3*r0 -+ vpmuludq $T4,$D4,$D4 # d4 = h4*r0 -+ -+ vmovdqa $H0,0x00(%r11) # -+ vpmuludq 0x20(%rsp),$T4,$H0 # h4*s1 -+ vmovdqa $H1,0x10(%r11) # -+ vpmuludq $T3,$H2,$H1 # h3*r1 -+ vpaddq $H0,$D0,$D0 # d0 += h4*s1 -+ vpaddq $H1,$D4,$D4 # d4 += h3*r1 -+ vmovdqa $H3,0x30(%r11) # -+ vpmuludq $T2,$H2,$H0 # h2*r1 -+ vpmuludq $T1,$H2,$H1 # h1*r1 -+ vpaddq $H0,$D3,$D3 # d3 += h2*r1 -+ vmovdqa 0x30(%rsp),$H3 # r2^2 -+ vpaddq $H1,$D2,$D2 # d2 += h1*r1 -+ vmovdqa $H4,0x40(%r11) # -+ vpmuludq $T0,$H2,$H2 # h0*r1 -+ vpmuludq $T2,$H3,$H0 # h2*r2 -+ vpaddq $H2,$D1,$D1 # d1 += h0*r1 -+ -+ vmovdqa 0x40(%rsp),$H4 # s2^2 -+ vpaddq $H0,$D4,$D4 # d4 += h2*r2 -+ vpmuludq $T1,$H3,$H1 # h1*r2 -+ vpmuludq $T0,$H3,$H3 # h0*r2 -+ vpaddq $H1,$D3,$D3 # d3 += h1*r2 -+ vmovdqa 0x50(%rsp),$H2 # r3^2 -+ vpaddq $H3,$D2,$D2 # d2 += h0*r2 -+ vpmuludq $T4,$H4,$H0 # h4*s2 -+ vpmuludq $T3,$H4,$H4 # h3*s2 -+ vpaddq $H0,$D1,$D1 # d1 += h4*s2 -+ vmovdqa 0x60(%rsp),$H3 # s3^2 -+ vpaddq $H4,$D0,$D0 # d0 += h3*s2 -+ -+ vmovdqa 0x80(%rsp),$H4 # s4^2 -+ vpmuludq $T1,$H2,$H1 # h1*r3 -+ vpmuludq $T0,$H2,$H2 # h0*r3 -+ vpaddq $H1,$D4,$D4 # d4 += h1*r3 -+ vpaddq $H2,$D3,$D3 # d3 += h0*r3 -+ vpmuludq $T4,$H3,$H0 # h4*s3 -+ vpmuludq $T3,$H3,$H1 # h3*s3 -+ vpaddq $H0,$D2,$D2 # d2 += h4*s3 -+ vmovdqu 16*0($inp),$H0 # load input -+ vpaddq $H1,$D1,$D1 # d1 += h3*s3 -+ vpmuludq $T2,$H3,$H3 # h2*s3 -+ vpmuludq $T2,$H4,$T2 # h2*s4 -+ vpaddq $H3,$D0,$D0 # d0 += h2*s3 -+ -+ vmovdqu 16*1($inp),$H1 # -+ vpaddq $T2,$D1,$D1 # d1 += h2*s4 -+ vpmuludq $T3,$H4,$T3 # h3*s4 -+ vpmuludq $T4,$H4,$T4 # h4*s4 -+ vpsrldq \$6,$H0,$H2 # splat input -+ vpaddq $T3,$D2,$D2 # d2 += h3*s4 -+ vpaddq $T4,$D3,$D3 # d3 += h4*s4 -+ vpsrldq \$6,$H1,$H3 # -+ vpmuludq 0x70(%rsp),$T0,$T4 # h0*r4 -+ vpmuludq $T1,$H4,$T0 # h1*s4 -+ vpunpckhqdq $H1,$H0,$H4 # 4 -+ vpaddq $T4,$D4,$D4 # d4 += h0*r4 -+ vmovdqa -0x90(%r11),$T4 # r0^4 -+ vpaddq $T0,$D0,$D0 # d0 += h1*s4 -+ -+ vpunpcklqdq $H1,$H0,$H0 # 0:1 -+ vpunpcklqdq $H3,$H2,$H3 # 2:3 -+ -+ #vpsrlq \$40,$H4,$H4 # 4 -+ vpsrldq \$`40/8`,$H4,$H4 # 4 -+ vpsrlq \$26,$H0,$H1 -+ vpand $MASK,$H0,$H0 # 0 -+ vpsrlq \$4,$H3,$H2 -+ vpand $MASK,$H1,$H1 # 1 -+ vpand 0(%rcx),$H4,$H4 # .Lmask24 -+ vpsrlq \$30,$H3,$H3 -+ vpand $MASK,$H2,$H2 # 2 -+ vpand $MASK,$H3,$H3 # 3 -+ vpor 32(%rcx),$H4,$H4 # padbit, yes, always -+ -+ vpaddq 0x00(%r11),$H0,$H0 # add hash value -+ vpaddq 0x10(%r11),$H1,$H1 -+ vpaddq 0x20(%r11),$H2,$H2 -+ vpaddq 0x30(%r11),$H3,$H3 -+ vpaddq 0x40(%r11),$H4,$H4 -+ -+ lea 16*2($inp),%rax -+ lea 16*4($inp),$inp -+ sub \$64,$len -+ cmovc %rax,$inp -+ -+ ################################################################ -+ # Now we accumulate (inp[0:1]+hash)*r^4 -+ ################################################################ -+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 -+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 -+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 -+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 -+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 -+ -+ vpmuludq $H0,$T4,$T0 # h0*r0 -+ vpmuludq $H1,$T4,$T1 # h1*r0 -+ vpaddq $T0,$D0,$D0 -+ vpaddq $T1,$D1,$D1 -+ vmovdqa -0x80(%r11),$T2 # r1^4 -+ vpmuludq $H2,$T4,$T0 # h2*r0 -+ vpmuludq $H3,$T4,$T1 # h3*r0 -+ vpaddq $T0,$D2,$D2 -+ vpaddq $T1,$D3,$D3 -+ vpmuludq $H4,$T4,$T4 # h4*r0 -+ vpmuludq -0x70(%r11),$H4,$T0 # h4*s1 -+ vpaddq $T4,$D4,$D4 -+ -+ vpaddq $T0,$D0,$D0 # d0 += h4*s1 -+ vpmuludq $H2,$T2,$T1 # h2*r1 -+ vpmuludq $H3,$T2,$T0 # h3*r1 -+ vpaddq $T1,$D3,$D3 # d3 += h2*r1 -+ vmovdqa -0x60(%r11),$T3 # r2^4 -+ vpaddq $T0,$D4,$D4 # d4 += h3*r1 -+ vpmuludq $H1,$T2,$T1 # h1*r1 -+ vpmuludq $H0,$T2,$T2 # h0*r1 -+ vpaddq $T1,$D2,$D2 # d2 += h1*r1 -+ vpaddq $T2,$D1,$D1 # d1 += h0*r1 -+ -+ vmovdqa -0x50(%r11),$T4 # s2^4 -+ vpmuludq $H2,$T3,$T0 # h2*r2 -+ vpmuludq $H1,$T3,$T1 # h1*r2 -+ vpaddq $T0,$D4,$D4 # d4 += h2*r2 -+ vpaddq $T1,$D3,$D3 # d3 += h1*r2 -+ vmovdqa -0x40(%r11),$T2 # r3^4 -+ vpmuludq $H0,$T3,$T3 # h0*r2 -+ vpmuludq $H4,$T4,$T0 # h4*s2 -+ vpaddq $T3,$D2,$D2 # d2 += h0*r2 -+ vpaddq $T0,$D1,$D1 # d1 += h4*s2 -+ vmovdqa -0x30(%r11),$T3 # s3^4 -+ vpmuludq $H3,$T4,$T4 # h3*s2 -+ vpmuludq $H1,$T2,$T1 # h1*r3 -+ vpaddq $T4,$D0,$D0 # d0 += h3*s2 -+ -+ vmovdqa -0x10(%r11),$T4 # s4^4 -+ vpaddq $T1,$D4,$D4 # d4 += h1*r3 -+ vpmuludq $H0,$T2,$T2 # h0*r3 -+ vpmuludq $H4,$T3,$T0 # h4*s3 -+ vpaddq $T2,$D3,$D3 # d3 += h0*r3 -+ vpaddq $T0,$D2,$D2 # d2 += h4*s3 -+ vmovdqu 16*2($inp),$T0 # load input -+ vpmuludq $H3,$T3,$T2 # h3*s3 -+ vpmuludq $H2,$T3,$T3 # h2*s3 -+ vpaddq $T2,$D1,$D1 # d1 += h3*s3 -+ vmovdqu 16*3($inp),$T1 # -+ vpaddq $T3,$D0,$D0 # d0 += h2*s3 -+ -+ vpmuludq $H2,$T4,$H2 # h2*s4 -+ vpmuludq $H3,$T4,$H3 # h3*s4 -+ vpsrldq \$6,$T0,$T2 # splat input -+ vpaddq $H2,$D1,$D1 # d1 += h2*s4 -+ vpmuludq $H4,$T4,$H4 # h4*s4 -+ vpsrldq \$6,$T1,$T3 # -+ vpaddq $H3,$D2,$H2 # h2 = d2 + h3*s4 -+ vpaddq $H4,$D3,$H3 # h3 = d3 + h4*s4 -+ vpmuludq -0x20(%r11),$H0,$H4 # h0*r4 -+ vpmuludq $H1,$T4,$H0 -+ vpunpckhqdq $T1,$T0,$T4 # 4 -+ vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 -+ vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 -+ -+ vpunpcklqdq $T1,$T0,$T0 # 0:1 -+ vpunpcklqdq $T3,$T2,$T3 # 2:3 -+ -+ #vpsrlq \$40,$T4,$T4 # 4 -+ vpsrldq \$`40/8`,$T4,$T4 # 4 -+ vpsrlq \$26,$T0,$T1 -+ vmovdqa 0x00(%rsp),$D4 # preload r0^2 -+ vpand $MASK,$T0,$T0 # 0 -+ vpsrlq \$4,$T3,$T2 -+ vpand $MASK,$T1,$T1 # 1 -+ vpand 0(%rcx),$T4,$T4 # .Lmask24 -+ vpsrlq \$30,$T3,$T3 -+ vpand $MASK,$T2,$T2 # 2 -+ vpand $MASK,$T3,$T3 # 3 -+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always -+ -+ ################################################################ -+ # lazy reduction as discussed in "NEON crypto" by D.J. Bernstein -+ # and P. Schwabe -+ -+ vpsrlq \$26,$H3,$D3 -+ vpand $MASK,$H3,$H3 -+ vpaddq $D3,$H4,$H4 # h3 -> h4 -+ -+ vpsrlq \$26,$H0,$D0 -+ vpand $MASK,$H0,$H0 -+ vpaddq $D0,$D1,$H1 # h0 -> h1 -+ -+ vpsrlq \$26,$H4,$D0 -+ vpand $MASK,$H4,$H4 -+ -+ vpsrlq \$26,$H1,$D1 -+ vpand $MASK,$H1,$H1 -+ vpaddq $D1,$H2,$H2 # h1 -> h2 -+ -+ vpaddq $D0,$H0,$H0 -+ vpsllq \$2,$D0,$D0 -+ vpaddq $D0,$H0,$H0 # h4 -> h0 -+ -+ vpsrlq \$26,$H2,$D2 -+ vpand $MASK,$H2,$H2 -+ vpaddq $D2,$H3,$H3 # h2 -> h3 -+ -+ vpsrlq \$26,$H0,$D0 -+ vpand $MASK,$H0,$H0 -+ vpaddq $D0,$H1,$H1 # h0 -> h1 -+ -+ vpsrlq \$26,$H3,$D3 -+ vpand $MASK,$H3,$H3 -+ vpaddq $D3,$H4,$H4 # h3 -> h4 -+ -+ ja .Loop_avx -+ -+.Lskip_loop_avx: -+ ################################################################ -+ # multiply (inp[0:1]+hash) or inp[2:3] by r^2:r^1 -+ -+ vpshufd \$0x10,$D4,$D4 # r0^n, xx12 -> x1x2 -+ add \$32,$len -+ jnz .Long_tail_avx -+ -+ vpaddq $H2,$T2,$T2 -+ vpaddq $H0,$T0,$T0 -+ vpaddq $H1,$T1,$T1 -+ vpaddq $H3,$T3,$T3 -+ vpaddq $H4,$T4,$T4 -+ -+.Long_tail_avx: -+ vmovdqa $H2,0x20(%r11) -+ vmovdqa $H0,0x00(%r11) -+ vmovdqa $H1,0x10(%r11) -+ vmovdqa $H3,0x30(%r11) -+ vmovdqa $H4,0x40(%r11) -+ -+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 -+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 -+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 -+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 -+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 -+ -+ vpmuludq $T2,$D4,$D2 # d2 = h2*r0 -+ vpmuludq $T0,$D4,$D0 # d0 = h0*r0 -+ vpshufd \$0x10,`16*1-64`($ctx),$H2 # r1^n -+ vpmuludq $T1,$D4,$D1 # d1 = h1*r0 -+ vpmuludq $T3,$D4,$D3 # d3 = h3*r0 -+ vpmuludq $T4,$D4,$D4 # d4 = h4*r0 -+ -+ vpmuludq $T3,$H2,$H0 # h3*r1 -+ vpaddq $H0,$D4,$D4 # d4 += h3*r1 -+ vpshufd \$0x10,`16*2-64`($ctx),$H3 # s1^n -+ vpmuludq $T2,$H2,$H1 # h2*r1 -+ vpaddq $H1,$D3,$D3 # d3 += h2*r1 -+ vpshufd \$0x10,`16*3-64`($ctx),$H4 # r2^n -+ vpmuludq $T1,$H2,$H0 # h1*r1 -+ vpaddq $H0,$D2,$D2 # d2 += h1*r1 -+ vpmuludq $T0,$H2,$H2 # h0*r1 -+ vpaddq $H2,$D1,$D1 # d1 += h0*r1 -+ vpmuludq $T4,$H3,$H3 # h4*s1 -+ vpaddq $H3,$D0,$D0 # d0 += h4*s1 -+ -+ vpshufd \$0x10,`16*4-64`($ctx),$H2 # s2^n -+ vpmuludq $T2,$H4,$H1 # h2*r2 -+ vpaddq $H1,$D4,$D4 # d4 += h2*r2 -+ vpmuludq $T1,$H4,$H0 # h1*r2 -+ vpaddq $H0,$D3,$D3 # d3 += h1*r2 -+ vpshufd \$0x10,`16*5-64`($ctx),$H3 # r3^n -+ vpmuludq $T0,$H4,$H4 # h0*r2 -+ vpaddq $H4,$D2,$D2 # d2 += h0*r2 -+ vpmuludq $T4,$H2,$H1 # h4*s2 -+ vpaddq $H1,$D1,$D1 # d1 += h4*s2 -+ vpshufd \$0x10,`16*6-64`($ctx),$H4 # s3^n -+ vpmuludq $T3,$H2,$H2 # h3*s2 -+ vpaddq $H2,$D0,$D0 # d0 += h3*s2 -+ -+ vpmuludq $T1,$H3,$H0 # h1*r3 -+ vpaddq $H0,$D4,$D4 # d4 += h1*r3 -+ vpmuludq $T0,$H3,$H3 # h0*r3 -+ vpaddq $H3,$D3,$D3 # d3 += h0*r3 -+ vpshufd \$0x10,`16*7-64`($ctx),$H2 # r4^n -+ vpmuludq $T4,$H4,$H1 # h4*s3 -+ vpaddq $H1,$D2,$D2 # d2 += h4*s3 -+ vpshufd \$0x10,`16*8-64`($ctx),$H3 # s4^n -+ vpmuludq $T3,$H4,$H0 # h3*s3 -+ vpaddq $H0,$D1,$D1 # d1 += h3*s3 -+ vpmuludq $T2,$H4,$H4 # h2*s3 -+ vpaddq $H4,$D0,$D0 # d0 += h2*s3 -+ -+ vpmuludq $T0,$H2,$H2 # h0*r4 -+ vpaddq $H2,$D4,$D4 # h4 = d4 + h0*r4 -+ vpmuludq $T4,$H3,$H1 # h4*s4 -+ vpaddq $H1,$D3,$D3 # h3 = d3 + h4*s4 -+ vpmuludq $T3,$H3,$H0 # h3*s4 -+ vpaddq $H0,$D2,$D2 # h2 = d2 + h3*s4 -+ vpmuludq $T2,$H3,$H1 # h2*s4 -+ vpaddq $H1,$D1,$D1 # h1 = d1 + h2*s4 -+ vpmuludq $T1,$H3,$H3 # h1*s4 -+ vpaddq $H3,$D0,$D0 # h0 = d0 + h1*s4 -+ -+ jz .Lshort_tail_avx -+ -+ vmovdqu 16*0($inp),$H0 # load input -+ vmovdqu 16*1($inp),$H1 -+ -+ vpsrldq \$6,$H0,$H2 # splat input -+ vpsrldq \$6,$H1,$H3 -+ vpunpckhqdq $H1,$H0,$H4 # 4 -+ vpunpcklqdq $H1,$H0,$H0 # 0:1 -+ vpunpcklqdq $H3,$H2,$H3 # 2:3 -+ -+ vpsrlq \$40,$H4,$H4 # 4 -+ vpsrlq \$26,$H0,$H1 -+ vpand $MASK,$H0,$H0 # 0 -+ vpsrlq \$4,$H3,$H2 -+ vpand $MASK,$H1,$H1 # 1 -+ vpsrlq \$30,$H3,$H3 -+ vpand $MASK,$H2,$H2 # 2 -+ vpand $MASK,$H3,$H3 # 3 -+ vpor 32(%rcx),$H4,$H4 # padbit, yes, always -+ -+ vpshufd \$0x32,`16*0-64`($ctx),$T4 # r0^n, 34xx -> x3x4 -+ vpaddq 0x00(%r11),$H0,$H0 -+ vpaddq 0x10(%r11),$H1,$H1 -+ vpaddq 0x20(%r11),$H2,$H2 -+ vpaddq 0x30(%r11),$H3,$H3 -+ vpaddq 0x40(%r11),$H4,$H4 -+ -+ ################################################################ -+ # multiply (inp[0:1]+hash) by r^4:r^3 and accumulate -+ -+ vpmuludq $H0,$T4,$T0 # h0*r0 -+ vpaddq $T0,$D0,$D0 # d0 += h0*r0 -+ vpmuludq $H1,$T4,$T1 # h1*r0 -+ vpaddq $T1,$D1,$D1 # d1 += h1*r0 -+ vpmuludq $H2,$T4,$T0 # h2*r0 -+ vpaddq $T0,$D2,$D2 # d2 += h2*r0 -+ vpshufd \$0x32,`16*1-64`($ctx),$T2 # r1^n -+ vpmuludq $H3,$T4,$T1 # h3*r0 -+ vpaddq $T1,$D3,$D3 # d3 += h3*r0 -+ vpmuludq $H4,$T4,$T4 # h4*r0 -+ vpaddq $T4,$D4,$D4 # d4 += h4*r0 -+ -+ vpmuludq $H3,$T2,$T0 # h3*r1 -+ vpaddq $T0,$D4,$D4 # d4 += h3*r1 -+ vpshufd \$0x32,`16*2-64`($ctx),$T3 # s1 -+ vpmuludq $H2,$T2,$T1 # h2*r1 -+ vpaddq $T1,$D3,$D3 # d3 += h2*r1 -+ vpshufd \$0x32,`16*3-64`($ctx),$T4 # r2 -+ vpmuludq $H1,$T2,$T0 # h1*r1 -+ vpaddq $T0,$D2,$D2 # d2 += h1*r1 -+ vpmuludq $H0,$T2,$T2 # h0*r1 -+ vpaddq $T2,$D1,$D1 # d1 += h0*r1 -+ vpmuludq $H4,$T3,$T3 # h4*s1 -+ vpaddq $T3,$D0,$D0 # d0 += h4*s1 -+ -+ vpshufd \$0x32,`16*4-64`($ctx),$T2 # s2 -+ vpmuludq $H2,$T4,$T1 # h2*r2 -+ vpaddq $T1,$D4,$D4 # d4 += h2*r2 -+ vpmuludq $H1,$T4,$T0 # h1*r2 -+ vpaddq $T0,$D3,$D3 # d3 += h1*r2 -+ vpshufd \$0x32,`16*5-64`($ctx),$T3 # r3 -+ vpmuludq $H0,$T4,$T4 # h0*r2 -+ vpaddq $T4,$D2,$D2 # d2 += h0*r2 -+ vpmuludq $H4,$T2,$T1 # h4*s2 -+ vpaddq $T1,$D1,$D1 # d1 += h4*s2 -+ vpshufd \$0x32,`16*6-64`($ctx),$T4 # s3 -+ vpmuludq $H3,$T2,$T2 # h3*s2 -+ vpaddq $T2,$D0,$D0 # d0 += h3*s2 -+ -+ vpmuludq $H1,$T3,$T0 # h1*r3 -+ vpaddq $T0,$D4,$D4 # d4 += h1*r3 -+ vpmuludq $H0,$T3,$T3 # h0*r3 -+ vpaddq $T3,$D3,$D3 # d3 += h0*r3 -+ vpshufd \$0x32,`16*7-64`($ctx),$T2 # r4 -+ vpmuludq $H4,$T4,$T1 # h4*s3 -+ vpaddq $T1,$D2,$D2 # d2 += h4*s3 -+ vpshufd \$0x32,`16*8-64`($ctx),$T3 # s4 -+ vpmuludq $H3,$T4,$T0 # h3*s3 -+ vpaddq $T0,$D1,$D1 # d1 += h3*s3 -+ vpmuludq $H2,$T4,$T4 # h2*s3 -+ vpaddq $T4,$D0,$D0 # d0 += h2*s3 -+ -+ vpmuludq $H0,$T2,$T2 # h0*r4 -+ vpaddq $T2,$D4,$D4 # d4 += h0*r4 -+ vpmuludq $H4,$T3,$T1 # h4*s4 -+ vpaddq $T1,$D3,$D3 # d3 += h4*s4 -+ vpmuludq $H3,$T3,$T0 # h3*s4 -+ vpaddq $T0,$D2,$D2 # d2 += h3*s4 -+ vpmuludq $H2,$T3,$T1 # h2*s4 -+ vpaddq $T1,$D1,$D1 # d1 += h2*s4 -+ vpmuludq $H1,$T3,$T3 # h1*s4 -+ vpaddq $T3,$D0,$D0 # d0 += h1*s4 -+ -+.Lshort_tail_avx: -+ ################################################################ -+ # horizontal addition -+ -+ vpsrldq \$8,$D4,$T4 -+ vpsrldq \$8,$D3,$T3 -+ vpsrldq \$8,$D1,$T1 -+ vpsrldq \$8,$D0,$T0 -+ vpsrldq \$8,$D2,$T2 -+ vpaddq $T3,$D3,$D3 -+ vpaddq $T4,$D4,$D4 -+ vpaddq $T0,$D0,$D0 -+ vpaddq $T1,$D1,$D1 -+ vpaddq $T2,$D2,$D2 -+ -+ ################################################################ -+ # lazy reduction -+ -+ vpsrlq \$26,$D3,$H3 -+ vpand $MASK,$D3,$D3 -+ vpaddq $H3,$D4,$D4 # h3 -> h4 -+ -+ vpsrlq \$26,$D0,$H0 -+ vpand $MASK,$D0,$D0 -+ vpaddq $H0,$D1,$D1 # h0 -> h1 -+ -+ vpsrlq \$26,$D4,$H4 -+ vpand $MASK,$D4,$D4 -+ -+ vpsrlq \$26,$D1,$H1 -+ vpand $MASK,$D1,$D1 -+ vpaddq $H1,$D2,$D2 # h1 -> h2 -+ -+ vpaddq $H4,$D0,$D0 -+ vpsllq \$2,$H4,$H4 -+ vpaddq $H4,$D0,$D0 # h4 -> h0 -+ -+ vpsrlq \$26,$D2,$H2 -+ vpand $MASK,$D2,$D2 -+ vpaddq $H2,$D3,$D3 # h2 -> h3 -+ -+ vpsrlq \$26,$D0,$H0 -+ vpand $MASK,$D0,$D0 -+ vpaddq $H0,$D1,$D1 # h0 -> h1 -+ -+ vpsrlq \$26,$D3,$H3 -+ vpand $MASK,$D3,$D3 -+ vpaddq $H3,$D4,$D4 # h3 -> h4 -+ -+ vmovd $D0,`4*0-48-64`($ctx) # save partially reduced -+ vmovd $D1,`4*1-48-64`($ctx) -+ vmovd $D2,`4*2-48-64`($ctx) -+ vmovd $D3,`4*3-48-64`($ctx) -+ vmovd $D4,`4*4-48-64`($ctx) -+___ -+$code.=<<___ if ($win64); -+ vmovdqa 0x50(%r11),%xmm6 -+ vmovdqa 0x60(%r11),%xmm7 -+ vmovdqa 0x70(%r11),%xmm8 -+ vmovdqa 0x80(%r11),%xmm9 -+ vmovdqa 0x90(%r11),%xmm10 -+ vmovdqa 0xa0(%r11),%xmm11 -+ vmovdqa 0xb0(%r11),%xmm12 -+ vmovdqa 0xc0(%r11),%xmm13 -+ vmovdqa 0xd0(%r11),%xmm14 -+ vmovdqa 0xe0(%r11),%xmm15 -+ lea 0xf8(%r11),%rsp -+.Ldo_avx_epilogue: -+___ -+$code.=<<___ if (!$win64); -+ lea 0x58(%r11),%rsp -+.cfi_def_cfa %rsp,8 -+___ -+$code.=<<___; -+ vzeroupper -+ ret -+.cfi_endproc -+.size poly1305_blocks_avx,.-poly1305_blocks_avx -+ -+.type poly1305_emit_avx,\@function,3 -+.align 32 -+poly1305_emit_avx: -+ cmpl \$0,20($ctx) # is_base2_26? -+ je .Lemit -+ -+ mov 0($ctx),%eax # load hash value base 2^26 -+ mov 4($ctx),%ecx -+ mov 8($ctx),%r8d -+ mov 12($ctx),%r11d -+ mov 16($ctx),%r10d -+ -+ shl \$26,%rcx # base 2^26 -> base 2^64 -+ mov %r8,%r9 -+ shl \$52,%r8 -+ add %rcx,%rax -+ shr \$12,%r9 -+ add %rax,%r8 # h0 -+ adc \$0,%r9 -+ -+ shl \$14,%r11 -+ mov %r10,%rax -+ shr \$24,%r10 -+ add %r11,%r9 -+ shl \$40,%rax -+ add %rax,%r9 # h1 -+ adc \$0,%r10 # h2 -+ -+ mov %r10,%rax # could be partially reduced, so reduce -+ mov %r10,%rcx -+ and \$3,%r10 -+ shr \$2,%rax -+ and \$-4,%rcx -+ add %rcx,%rax -+ add %rax,%r8 -+ adc \$0,%r9 -+ adc \$0,%r10 -+ -+ mov %r8,%rax -+ add \$5,%r8 # compare to modulus -+ mov %r9,%rcx -+ adc \$0,%r9 -+ adc \$0,%r10 -+ shr \$2,%r10 # did 130-bit value overflow? -+ cmovnz %r8,%rax -+ cmovnz %r9,%rcx -+ -+ add 0($nonce),%rax # accumulate nonce -+ adc 8($nonce),%rcx -+ mov %rax,0($mac) # write result -+ mov %rcx,8($mac) -+ -+ ret -+.size poly1305_emit_avx,.-poly1305_emit_avx -+___ -+ -+if ($avx>1) { -+my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) = -+ map("%ymm$_",(0..15)); -+my $S4=$MASK; -+ -+$code.=<<___; -+.type poly1305_blocks_avx2,\@function,4 -+.align 32 -+poly1305_blocks_avx2: -+.cfi_startproc -+ mov 20($ctx),%r8d # is_base2_26 -+ cmp \$128,$len -+ jae .Lblocks_avx2 -+ test %r8d,%r8d -+ jz .Lblocks -+ -+.Lblocks_avx2: -+ and \$-16,$len -+ jz .Lno_data_avx2 -+ -+ vzeroupper -+ -+ test %r8d,%r8d -+ jz .Lbase2_64_avx2 -+ -+ test \$63,$len -+ jz .Leven_avx2 -+ -+ push %rbx -+.cfi_push %rbx -+ push %rbp -+.cfi_push %rbp -+ push %r12 -+.cfi_push %r12 -+ push %r13 -+.cfi_push %r13 -+ push %r14 -+.cfi_push %r14 -+ push %r15 -+.cfi_push %r15 -+.Lblocks_avx2_body: -+ -+ mov $len,%r15 # reassign $len -+ -+ mov 0($ctx),$d1 # load hash value -+ mov 8($ctx),$d2 -+ mov 16($ctx),$h2#d -+ -+ mov 24($ctx),$r0 # load r -+ mov 32($ctx),$s1 -+ -+ ################################# base 2^26 -> base 2^64 -+ mov $d1#d,$h0#d -+ and \$`-1*(1<<31)`,$d1 -+ mov $d2,$r1 # borrow $r1 -+ mov $d2#d,$h1#d -+ and \$`-1*(1<<31)`,$d2 -+ -+ shr \$6,$d1 -+ shl \$52,$r1 -+ add $d1,$h0 -+ shr \$12,$h1 -+ shr \$18,$d2 -+ add $r1,$h0 -+ adc $d2,$h1 -+ -+ mov $h2,$d1 -+ shl \$40,$d1 -+ shr \$24,$h2 -+ add $d1,$h1 -+ adc \$0,$h2 # can be partially reduced... -+ -+ mov \$-4,$d2 # ... so reduce -+ mov $h2,$d1 -+ and $h2,$d2 -+ shr \$2,$d1 -+ and \$3,$h2 -+ add $d2,$d1 # =*5 -+ add $d1,$h0 -+ adc \$0,$h1 -+ adc \$0,$h2 -+ -+ mov $s1,$r1 -+ mov $s1,%rax -+ shr \$2,$s1 -+ add $r1,$s1 # s1 = r1 + (r1 >> 2) -+ -+.Lbase2_26_pre_avx2: -+ add 0($inp),$h0 # accumulate input -+ adc 8($inp),$h1 -+ lea 16($inp),$inp -+ adc $padbit,$h2 -+ sub \$16,%r15 -+ -+ call __poly1305_block -+ mov $r1,%rax -+ -+ test \$63,%r15 -+ jnz .Lbase2_26_pre_avx2 -+ -+ test $padbit,$padbit # if $padbit is zero, -+ jz .Lstore_base2_64_avx2 # store hash in base 2^64 format -+ -+ ################################# base 2^64 -> base 2^26 -+ mov $h0,%rax -+ mov $h0,%rdx -+ shr \$52,$h0 -+ mov $h1,$r0 -+ mov $h1,$r1 -+ shr \$26,%rdx -+ and \$0x3ffffff,%rax # h[0] -+ shl \$12,$r0 -+ and \$0x3ffffff,%rdx # h[1] -+ shr \$14,$h1 -+ or $r0,$h0 -+ shl \$24,$h2 -+ and \$0x3ffffff,$h0 # h[2] -+ shr \$40,$r1 -+ and \$0x3ffffff,$h1 # h[3] -+ or $r1,$h2 # h[4] -+ -+ test %r15,%r15 -+ jz .Lstore_base2_26_avx2 -+ -+ vmovd %rax#d,%x#$H0 -+ vmovd %rdx#d,%x#$H1 -+ vmovd $h0#d,%x#$H2 -+ vmovd $h1#d,%x#$H3 -+ vmovd $h2#d,%x#$H4 -+ jmp .Lproceed_avx2 -+ -+.align 32 -+.Lstore_base2_64_avx2: -+ mov $h0,0($ctx) -+ mov $h1,8($ctx) -+ mov $h2,16($ctx) # note that is_base2_26 is zeroed -+ jmp .Ldone_avx2 -+ -+.align 16 -+.Lstore_base2_26_avx2: -+ mov %rax#d,0($ctx) # store hash value base 2^26 -+ mov %rdx#d,4($ctx) -+ mov $h0#d,8($ctx) -+ mov $h1#d,12($ctx) -+ mov $h2#d,16($ctx) -+.align 16 -+.Ldone_avx2: -+ mov 0(%rsp),%r15 -+.cfi_restore %r15 -+ mov 8(%rsp),%r14 -+.cfi_restore %r14 -+ mov 16(%rsp),%r13 -+.cfi_restore %r13 -+ mov 24(%rsp),%r12 -+.cfi_restore %r12 -+ mov 32(%rsp),%rbp -+.cfi_restore %rbp -+ mov 40(%rsp),%rbx -+.cfi_restore %rbx -+ lea 48(%rsp),%rsp -+.cfi_adjust_cfa_offset -48 -+.Lno_data_avx2: -+.Lblocks_avx2_epilogue: -+ ret -+.cfi_endproc -+ -+.align 32 -+.Lbase2_64_avx2: -+.cfi_startproc -+ push %rbx -+.cfi_push %rbx -+ push %rbp -+.cfi_push %rbp -+ push %r12 -+.cfi_push %r12 -+ push %r13 -+.cfi_push %r13 -+ push %r14 -+.cfi_push %r14 -+ push %r15 -+.cfi_push %r15 -+.Lbase2_64_avx2_body: -+ -+ mov $len,%r15 # reassign $len -+ -+ mov 24($ctx),$r0 # load r -+ mov 32($ctx),$s1 -+ -+ mov 0($ctx),$h0 # load hash value -+ mov 8($ctx),$h1 -+ mov 16($ctx),$h2#d -+ -+ mov $s1,$r1 -+ mov $s1,%rax -+ shr \$2,$s1 -+ add $r1,$s1 # s1 = r1 + (r1 >> 2) -+ -+ test \$63,$len -+ jz .Linit_avx2 -+ -+.Lbase2_64_pre_avx2: -+ add 0($inp),$h0 # accumulate input -+ adc 8($inp),$h1 -+ lea 16($inp),$inp -+ adc $padbit,$h2 -+ sub \$16,%r15 -+ -+ call __poly1305_block -+ mov $r1,%rax -+ -+ test \$63,%r15 -+ jnz .Lbase2_64_pre_avx2 -+ -+.Linit_avx2: -+ ################################# base 2^64 -> base 2^26 -+ mov $h0,%rax -+ mov $h0,%rdx -+ shr \$52,$h0 -+ mov $h1,$d1 -+ mov $h1,$d2 -+ shr \$26,%rdx -+ and \$0x3ffffff,%rax # h[0] -+ shl \$12,$d1 -+ and \$0x3ffffff,%rdx # h[1] -+ shr \$14,$h1 -+ or $d1,$h0 -+ shl \$24,$h2 -+ and \$0x3ffffff,$h0 # h[2] -+ shr \$40,$d2 -+ and \$0x3ffffff,$h1 # h[3] -+ or $d2,$h2 # h[4] -+ -+ vmovd %rax#d,%x#$H0 -+ vmovd %rdx#d,%x#$H1 -+ vmovd $h0#d,%x#$H2 -+ vmovd $h1#d,%x#$H3 -+ vmovd $h2#d,%x#$H4 -+ movl \$1,20($ctx) # set is_base2_26 -+ -+ call __poly1305_init_avx -+ -+.Lproceed_avx2: -+ mov %r15,$len # restore $len -+ mov OPENSSL_ia32cap_P+8(%rip),%r10d -+ mov \$`(1<<31|1<<30|1<<16)`,%r11d -+ -+ mov 0(%rsp),%r15 -+.cfi_restore %r15 -+ mov 8(%rsp),%r14 -+.cfi_restore %r14 -+ mov 16(%rsp),%r13 -+.cfi_restore %r13 -+ mov 24(%rsp),%r12 -+.cfi_restore %r12 -+ mov 32(%rsp),%rbp -+.cfi_restore %rbp -+ mov 40(%rsp),%rbx -+.cfi_restore %rbx -+ lea 48(%rsp),%rax -+ lea 48(%rsp),%rsp -+.cfi_adjust_cfa_offset -48 -+.Lbase2_64_avx2_epilogue: -+ jmp .Ldo_avx2 -+.cfi_endproc -+ -+.align 32 -+.Leven_avx2: -+.cfi_startproc -+ mov OPENSSL_ia32cap_P+8(%rip),%r10d -+ vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26 -+ vmovd 4*1($ctx),%x#$H1 -+ vmovd 4*2($ctx),%x#$H2 -+ vmovd 4*3($ctx),%x#$H3 -+ vmovd 4*4($ctx),%x#$H4 -+ -+.Ldo_avx2: -+___ -+$code.=<<___ if ($avx>2); -+ cmp \$512,$len -+ jb .Lskip_avx512 -+ and %r11d,%r10d -+ test \$`1<<16`,%r10d # check for AVX512F -+ jnz .Lblocks_avx512 -+.Lskip_avx512: -+___ -+$code.=<<___ if (!$win64); -+ lea -8(%rsp),%r11 -+.cfi_def_cfa %r11,16 -+ sub \$0x128,%rsp -+___ -+$code.=<<___ if ($win64); -+ lea -0xf8(%rsp),%r11 -+ sub \$0x1c8,%rsp -+ vmovdqa %xmm6,0x50(%r11) -+ vmovdqa %xmm7,0x60(%r11) -+ vmovdqa %xmm8,0x70(%r11) -+ vmovdqa %xmm9,0x80(%r11) -+ vmovdqa %xmm10,0x90(%r11) -+ vmovdqa %xmm11,0xa0(%r11) -+ vmovdqa %xmm12,0xb0(%r11) -+ vmovdqa %xmm13,0xc0(%r11) -+ vmovdqa %xmm14,0xd0(%r11) -+ vmovdqa %xmm15,0xe0(%r11) -+.Ldo_avx2_body: -+___ -+$code.=<<___; -+ lea .Lconst(%rip),%rcx -+ lea 48+64($ctx),$ctx # size optimization -+ vmovdqa 96(%rcx),$T0 # .Lpermd_avx2 -+ -+ # expand and copy pre-calculated table to stack -+ vmovdqu `16*0-64`($ctx),%x#$T2 -+ and \$-512,%rsp -+ vmovdqu `16*1-64`($ctx),%x#$T3 -+ vmovdqu `16*2-64`($ctx),%x#$T4 -+ vmovdqu `16*3-64`($ctx),%x#$D0 -+ vmovdqu `16*4-64`($ctx),%x#$D1 -+ vmovdqu `16*5-64`($ctx),%x#$D2 -+ lea 0x90(%rsp),%rax # size optimization -+ vmovdqu `16*6-64`($ctx),%x#$D3 -+ vpermd $T2,$T0,$T2 # 00003412 -> 14243444 -+ vmovdqu `16*7-64`($ctx),%x#$D4 -+ vpermd $T3,$T0,$T3 -+ vmovdqu `16*8-64`($ctx),%x#$MASK -+ vpermd $T4,$T0,$T4 -+ vmovdqa $T2,0x00(%rsp) -+ vpermd $D0,$T0,$D0 -+ vmovdqa $T3,0x20-0x90(%rax) -+ vpermd $D1,$T0,$D1 -+ vmovdqa $T4,0x40-0x90(%rax) -+ vpermd $D2,$T0,$D2 -+ vmovdqa $D0,0x60-0x90(%rax) -+ vpermd $D3,$T0,$D3 -+ vmovdqa $D1,0x80-0x90(%rax) -+ vpermd $D4,$T0,$D4 -+ vmovdqa $D2,0xa0-0x90(%rax) -+ vpermd $MASK,$T0,$MASK -+ vmovdqa $D3,0xc0-0x90(%rax) -+ vmovdqa $D4,0xe0-0x90(%rax) -+ vmovdqa $MASK,0x100-0x90(%rax) -+ vmovdqa 64(%rcx),$MASK # .Lmask26 -+ -+ ################################################################ -+ # load input -+ vmovdqu 16*0($inp),%x#$T0 -+ vmovdqu 16*1($inp),%x#$T1 -+ vinserti128 \$1,16*2($inp),$T0,$T0 -+ vinserti128 \$1,16*3($inp),$T1,$T1 -+ lea 16*4($inp),$inp -+ -+ vpsrldq \$6,$T0,$T2 # splat input -+ vpsrldq \$6,$T1,$T3 -+ vpunpckhqdq $T1,$T0,$T4 # 4 -+ vpunpcklqdq $T3,$T2,$T2 # 2:3 -+ vpunpcklqdq $T1,$T0,$T0 # 0:1 -+ -+ vpsrlq \$30,$T2,$T3 -+ vpsrlq \$4,$T2,$T2 -+ vpsrlq \$26,$T0,$T1 -+ vpsrlq \$40,$T4,$T4 # 4 -+ vpand $MASK,$T2,$T2 # 2 -+ vpand $MASK,$T0,$T0 # 0 -+ vpand $MASK,$T1,$T1 # 1 -+ vpand $MASK,$T3,$T3 # 3 -+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always -+ -+ vpaddq $H2,$T2,$H2 # accumulate input -+ sub \$64,$len -+ jz .Ltail_avx2 -+ jmp .Loop_avx2 -+ -+.align 32 -+.Loop_avx2: -+ ################################################################ -+ # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4 -+ # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3 -+ # ((inp[2]*r^4+inp[6])*r^4+inp[10])*r^2 -+ # ((inp[3]*r^4+inp[7])*r^4+inp[11])*r^1 -+ # \________/\__________/ -+ ################################################################ -+ #vpaddq $H2,$T2,$H2 # accumulate input -+ vpaddq $H0,$T0,$H0 -+ vmovdqa `32*0`(%rsp),$T0 # r0^4 -+ vpaddq $H1,$T1,$H1 -+ vmovdqa `32*1`(%rsp),$T1 # r1^4 -+ vpaddq $H3,$T3,$H3 -+ vmovdqa `32*3`(%rsp),$T2 # r2^4 -+ vpaddq $H4,$T4,$H4 -+ vmovdqa `32*6-0x90`(%rax),$T3 # s3^4 -+ vmovdqa `32*8-0x90`(%rax),$S4 # s4^4 -+ -+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 -+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 -+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 -+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 -+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 -+ # -+ # however, as h2 is "chronologically" first one available pull -+ # corresponding operations up, so it's -+ # -+ # d4 = h2*r2 + h4*r0 + h3*r1 + h1*r3 + h0*r4 -+ # d3 = h2*r1 + h3*r0 + h1*r2 + h0*r3 + h4*5*r4 -+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 -+ # d1 = h2*5*r4 + h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 -+ # d0 = h2*5*r3 + h0*r0 + h4*5*r1 + h3*5*r2 + h1*5*r4 -+ -+ vpmuludq $H2,$T0,$D2 # d2 = h2*r0 -+ vpmuludq $H2,$T1,$D3 # d3 = h2*r1 -+ vpmuludq $H2,$T2,$D4 # d4 = h2*r2 -+ vpmuludq $H2,$T3,$D0 # d0 = h2*s3 -+ vpmuludq $H2,$S4,$D1 # d1 = h2*s4 -+ -+ vpmuludq $H0,$T1,$T4 # h0*r1 -+ vpmuludq $H1,$T1,$H2 # h1*r1, borrow $H2 as temp -+ vpaddq $T4,$D1,$D1 # d1 += h0*r1 -+ vpaddq $H2,$D2,$D2 # d2 += h1*r1 -+ vpmuludq $H3,$T1,$T4 # h3*r1 -+ vpmuludq `32*2`(%rsp),$H4,$H2 # h4*s1 -+ vpaddq $T4,$D4,$D4 # d4 += h3*r1 -+ vpaddq $H2,$D0,$D0 # d0 += h4*s1 -+ vmovdqa `32*4-0x90`(%rax),$T1 # s2 -+ -+ vpmuludq $H0,$T0,$T4 # h0*r0 -+ vpmuludq $H1,$T0,$H2 # h1*r0 -+ vpaddq $T4,$D0,$D0 # d0 += h0*r0 -+ vpaddq $H2,$D1,$D1 # d1 += h1*r0 -+ vpmuludq $H3,$T0,$T4 # h3*r0 -+ vpmuludq $H4,$T0,$H2 # h4*r0 -+ vmovdqu 16*0($inp),%x#$T0 # load input -+ vpaddq $T4,$D3,$D3 # d3 += h3*r0 -+ vpaddq $H2,$D4,$D4 # d4 += h4*r0 -+ vinserti128 \$1,16*2($inp),$T0,$T0 -+ -+ vpmuludq $H3,$T1,$T4 # h3*s2 -+ vpmuludq $H4,$T1,$H2 # h4*s2 -+ vmovdqu 16*1($inp),%x#$T1 -+ vpaddq $T4,$D0,$D0 # d0 += h3*s2 -+ vpaddq $H2,$D1,$D1 # d1 += h4*s2 -+ vmovdqa `32*5-0x90`(%rax),$H2 # r3 -+ vpmuludq $H1,$T2,$T4 # h1*r2 -+ vpmuludq $H0,$T2,$T2 # h0*r2 -+ vpaddq $T4,$D3,$D3 # d3 += h1*r2 -+ vpaddq $T2,$D2,$D2 # d2 += h0*r2 -+ vinserti128 \$1,16*3($inp),$T1,$T1 -+ lea 16*4($inp),$inp -+ -+ vpmuludq $H1,$H2,$T4 # h1*r3 -+ vpmuludq $H0,$H2,$H2 # h0*r3 -+ vpsrldq \$6,$T0,$T2 # splat input -+ vpaddq $T4,$D4,$D4 # d4 += h1*r3 -+ vpaddq $H2,$D3,$D3 # d3 += h0*r3 -+ vpmuludq $H3,$T3,$T4 # h3*s3 -+ vpmuludq $H4,$T3,$H2 # h4*s3 -+ vpsrldq \$6,$T1,$T3 -+ vpaddq $T4,$D1,$D1 # d1 += h3*s3 -+ vpaddq $H2,$D2,$D2 # d2 += h4*s3 -+ vpunpckhqdq $T1,$T0,$T4 # 4 -+ -+ vpmuludq $H3,$S4,$H3 # h3*s4 -+ vpmuludq $H4,$S4,$H4 # h4*s4 -+ vpunpcklqdq $T1,$T0,$T0 # 0:1 -+ vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4 -+ vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4 -+ vpunpcklqdq $T3,$T2,$T3 # 2:3 -+ vpmuludq `32*7-0x90`(%rax),$H0,$H4 # h0*r4 -+ vpmuludq $H1,$S4,$H0 # h1*s4 -+ vmovdqa 64(%rcx),$MASK # .Lmask26 -+ vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 -+ vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 -+ -+ ################################################################ -+ # lazy reduction (interleaved with tail of input splat) -+ -+ vpsrlq \$26,$H3,$D3 -+ vpand $MASK,$H3,$H3 -+ vpaddq $D3,$H4,$H4 # h3 -> h4 -+ -+ vpsrlq \$26,$H0,$D0 -+ vpand $MASK,$H0,$H0 -+ vpaddq $D0,$D1,$H1 # h0 -> h1 -+ -+ vpsrlq \$26,$H4,$D4 -+ vpand $MASK,$H4,$H4 -+ -+ vpsrlq \$4,$T3,$T2 -+ -+ vpsrlq \$26,$H1,$D1 -+ vpand $MASK,$H1,$H1 -+ vpaddq $D1,$H2,$H2 # h1 -> h2 -+ -+ vpaddq $D4,$H0,$H0 -+ vpsllq \$2,$D4,$D4 -+ vpaddq $D4,$H0,$H0 # h4 -> h0 -+ -+ vpand $MASK,$T2,$T2 # 2 -+ vpsrlq \$26,$T0,$T1 -+ -+ vpsrlq \$26,$H2,$D2 -+ vpand $MASK,$H2,$H2 -+ vpaddq $D2,$H3,$H3 # h2 -> h3 -+ -+ vpaddq $T2,$H2,$H2 # modulo-scheduled -+ vpsrlq \$30,$T3,$T3 -+ -+ vpsrlq \$26,$H0,$D0 -+ vpand $MASK,$H0,$H0 -+ vpaddq $D0,$H1,$H1 # h0 -> h1 -+ -+ vpsrlq \$40,$T4,$T4 # 4 -+ -+ vpsrlq \$26,$H3,$D3 -+ vpand $MASK,$H3,$H3 -+ vpaddq $D3,$H4,$H4 # h3 -> h4 -+ -+ vpand $MASK,$T0,$T0 # 0 -+ vpand $MASK,$T1,$T1 # 1 -+ vpand $MASK,$T3,$T3 # 3 -+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always -+ -+ sub \$64,$len -+ jnz .Loop_avx2 -+ -+ .byte 0x66,0x90 -+.Ltail_avx2: -+ ################################################################ -+ # while above multiplications were by r^4 in all lanes, in last -+ # iteration we multiply least significant lane by r^4 and most -+ # significant one by r, so copy of above except that references -+ # to the precomputed table are displaced by 4... -+ -+ #vpaddq $H2,$T2,$H2 # accumulate input -+ vpaddq $H0,$T0,$H0 -+ vmovdqu `32*0+4`(%rsp),$T0 # r0^4 -+ vpaddq $H1,$T1,$H1 -+ vmovdqu `32*1+4`(%rsp),$T1 # r1^4 -+ vpaddq $H3,$T3,$H3 -+ vmovdqu `32*3+4`(%rsp),$T2 # r2^4 -+ vpaddq $H4,$T4,$H4 -+ vmovdqu `32*6+4-0x90`(%rax),$T3 # s3^4 -+ vmovdqu `32*8+4-0x90`(%rax),$S4 # s4^4 -+ -+ vpmuludq $H2,$T0,$D2 # d2 = h2*r0 -+ vpmuludq $H2,$T1,$D3 # d3 = h2*r1 -+ vpmuludq $H2,$T2,$D4 # d4 = h2*r2 -+ vpmuludq $H2,$T3,$D0 # d0 = h2*s3 -+ vpmuludq $H2,$S4,$D1 # d1 = h2*s4 -+ -+ vpmuludq $H0,$T1,$T4 # h0*r1 -+ vpmuludq $H1,$T1,$H2 # h1*r1 -+ vpaddq $T4,$D1,$D1 # d1 += h0*r1 -+ vpaddq $H2,$D2,$D2 # d2 += h1*r1 -+ vpmuludq $H3,$T1,$T4 # h3*r1 -+ vpmuludq `32*2+4`(%rsp),$H4,$H2 # h4*s1 -+ vpaddq $T4,$D4,$D4 # d4 += h3*r1 -+ vpaddq $H2,$D0,$D0 # d0 += h4*s1 -+ -+ vpmuludq $H0,$T0,$T4 # h0*r0 -+ vpmuludq $H1,$T0,$H2 # h1*r0 -+ vpaddq $T4,$D0,$D0 # d0 += h0*r0 -+ vmovdqu `32*4+4-0x90`(%rax),$T1 # s2 -+ vpaddq $H2,$D1,$D1 # d1 += h1*r0 -+ vpmuludq $H3,$T0,$T4 # h3*r0 -+ vpmuludq $H4,$T0,$H2 # h4*r0 -+ vpaddq $T4,$D3,$D3 # d3 += h3*r0 -+ vpaddq $H2,$D4,$D4 # d4 += h4*r0 -+ -+ vpmuludq $H3,$T1,$T4 # h3*s2 -+ vpmuludq $H4,$T1,$H2 # h4*s2 -+ vpaddq $T4,$D0,$D0 # d0 += h3*s2 -+ vpaddq $H2,$D1,$D1 # d1 += h4*s2 -+ vmovdqu `32*5+4-0x90`(%rax),$H2 # r3 -+ vpmuludq $H1,$T2,$T4 # h1*r2 -+ vpmuludq $H0,$T2,$T2 # h0*r2 -+ vpaddq $T4,$D3,$D3 # d3 += h1*r2 -+ vpaddq $T2,$D2,$D2 # d2 += h0*r2 -+ -+ vpmuludq $H1,$H2,$T4 # h1*r3 -+ vpmuludq $H0,$H2,$H2 # h0*r3 -+ vpaddq $T4,$D4,$D4 # d4 += h1*r3 -+ vpaddq $H2,$D3,$D3 # d3 += h0*r3 -+ vpmuludq $H3,$T3,$T4 # h3*s3 -+ vpmuludq $H4,$T3,$H2 # h4*s3 -+ vpaddq $T4,$D1,$D1 # d1 += h3*s3 -+ vpaddq $H2,$D2,$D2 # d2 += h4*s3 -+ -+ vpmuludq $H3,$S4,$H3 # h3*s4 -+ vpmuludq $H4,$S4,$H4 # h4*s4 -+ vpaddq $H3,$D2,$H2 # h2 = d2 + h3*r4 -+ vpaddq $H4,$D3,$H3 # h3 = d3 + h4*r4 -+ vpmuludq `32*7+4-0x90`(%rax),$H0,$H4 # h0*r4 -+ vpmuludq $H1,$S4,$H0 # h1*s4 -+ vmovdqa 64(%rcx),$MASK # .Lmask26 -+ vpaddq $H4,$D4,$H4 # h4 = d4 + h0*r4 -+ vpaddq $H0,$D0,$H0 # h0 = d0 + h1*s4 -+ -+ ################################################################ -+ # horizontal addition -+ -+ vpsrldq \$8,$D1,$T1 -+ vpsrldq \$8,$H2,$T2 -+ vpsrldq \$8,$H3,$T3 -+ vpsrldq \$8,$H4,$T4 -+ vpsrldq \$8,$H0,$T0 -+ vpaddq $T1,$D1,$D1 -+ vpaddq $T2,$H2,$H2 -+ vpaddq $T3,$H3,$H3 -+ vpaddq $T4,$H4,$H4 -+ vpaddq $T0,$H0,$H0 -+ -+ vpermq \$0x2,$H3,$T3 -+ vpermq \$0x2,$H4,$T4 -+ vpermq \$0x2,$H0,$T0 -+ vpermq \$0x2,$D1,$T1 -+ vpermq \$0x2,$H2,$T2 -+ vpaddq $T3,$H3,$H3 -+ vpaddq $T4,$H4,$H4 -+ vpaddq $T0,$H0,$H0 -+ vpaddq $T1,$D1,$D1 -+ vpaddq $T2,$H2,$H2 -+ -+ ################################################################ -+ # lazy reduction -+ -+ vpsrlq \$26,$H3,$D3 -+ vpand $MASK,$H3,$H3 -+ vpaddq $D3,$H4,$H4 # h3 -> h4 -+ -+ vpsrlq \$26,$H0,$D0 -+ vpand $MASK,$H0,$H0 -+ vpaddq $D0,$D1,$H1 # h0 -> h1 -+ -+ vpsrlq \$26,$H4,$D4 -+ vpand $MASK,$H4,$H4 -+ -+ vpsrlq \$26,$H1,$D1 -+ vpand $MASK,$H1,$H1 -+ vpaddq $D1,$H2,$H2 # h1 -> h2 -+ -+ vpaddq $D4,$H0,$H0 -+ vpsllq \$2,$D4,$D4 -+ vpaddq $D4,$H0,$H0 # h4 -> h0 -+ -+ vpsrlq \$26,$H2,$D2 -+ vpand $MASK,$H2,$H2 -+ vpaddq $D2,$H3,$H3 # h2 -> h3 -+ -+ vpsrlq \$26,$H0,$D0 -+ vpand $MASK,$H0,$H0 -+ vpaddq $D0,$H1,$H1 # h0 -> h1 -+ -+ vpsrlq \$26,$H3,$D3 -+ vpand $MASK,$H3,$H3 -+ vpaddq $D3,$H4,$H4 # h3 -> h4 -+ -+ vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced -+ vmovd %x#$H1,`4*1-48-64`($ctx) -+ vmovd %x#$H2,`4*2-48-64`($ctx) -+ vmovd %x#$H3,`4*3-48-64`($ctx) -+ vmovd %x#$H4,`4*4-48-64`($ctx) -+___ -+$code.=<<___ if ($win64); -+ vmovdqa 0x50(%r11),%xmm6 -+ vmovdqa 0x60(%r11),%xmm7 -+ vmovdqa 0x70(%r11),%xmm8 -+ vmovdqa 0x80(%r11),%xmm9 -+ vmovdqa 0x90(%r11),%xmm10 -+ vmovdqa 0xa0(%r11),%xmm11 -+ vmovdqa 0xb0(%r11),%xmm12 -+ vmovdqa 0xc0(%r11),%xmm13 -+ vmovdqa 0xd0(%r11),%xmm14 -+ vmovdqa 0xe0(%r11),%xmm15 -+ lea 0xf8(%r11),%rsp -+.Ldo_avx2_epilogue: -+___ -+$code.=<<___ if (!$win64); -+ lea 8(%r11),%rsp -+.cfi_def_cfa %rsp,8 -+___ -+$code.=<<___; -+ vzeroupper -+ ret -+.cfi_endproc -+.size poly1305_blocks_avx2,.-poly1305_blocks_avx2 -+___ -+####################################################################### -+if ($avx>2) { -+# On entry we have input length divisible by 64. But since inner loop -+# processes 128 bytes per iteration, cases when length is not divisible -+# by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this -+# reason stack layout is kept identical to poly1305_blocks_avx2. If not -+# for this tail, we wouldn't have to even allocate stack frame... -+ -+my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24)); -+my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29)); -+my $PADBIT="%zmm30"; -+ -+map(s/%y/%z/,($T4,$T0,$T1,$T2,$T3)); # switch to %zmm domain -+map(s/%y/%z/,($D0,$D1,$D2,$D3,$D4)); -+map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4)); -+map(s/%y/%z/,($MASK)); -+ -+$code.=<<___; -+.type poly1305_blocks_avx512,\@function,4 -+.align 32 -+poly1305_blocks_avx512: -+.cfi_startproc -+.Lblocks_avx512: -+ mov \$15,%eax -+ kmovw %eax,%k2 -+___ -+$code.=<<___ if (!$win64); -+ lea -8(%rsp),%r11 -+.cfi_def_cfa %r11,16 -+ sub \$0x128,%rsp -+___ -+$code.=<<___ if ($win64); -+ lea -0xf8(%rsp),%r11 -+ sub \$0x1c8,%rsp -+ vmovdqa %xmm6,0x50(%r11) -+ vmovdqa %xmm7,0x60(%r11) -+ vmovdqa %xmm8,0x70(%r11) -+ vmovdqa %xmm9,0x80(%r11) -+ vmovdqa %xmm10,0x90(%r11) -+ vmovdqa %xmm11,0xa0(%r11) -+ vmovdqa %xmm12,0xb0(%r11) -+ vmovdqa %xmm13,0xc0(%r11) -+ vmovdqa %xmm14,0xd0(%r11) -+ vmovdqa %xmm15,0xe0(%r11) -+.Ldo_avx512_body: -+___ -+$code.=<<___; -+ lea .Lconst(%rip),%rcx -+ lea 48+64($ctx),$ctx # size optimization -+ vmovdqa 96(%rcx),%y#$T2 # .Lpermd_avx2 -+ -+ # expand pre-calculated table -+ vmovdqu `16*0-64`($ctx),%x#$D0 # will become expanded ${R0} -+ and \$-512,%rsp -+ vmovdqu `16*1-64`($ctx),%x#$D1 # will become ... ${R1} -+ mov \$0x20,%rax -+ vmovdqu `16*2-64`($ctx),%x#$T0 # ... ${S1} -+ vmovdqu `16*3-64`($ctx),%x#$D2 # ... ${R2} -+ vmovdqu `16*4-64`($ctx),%x#$T1 # ... ${S2} -+ vmovdqu `16*5-64`($ctx),%x#$D3 # ... ${R3} -+ vmovdqu `16*6-64`($ctx),%x#$T3 # ... ${S3} -+ vmovdqu `16*7-64`($ctx),%x#$D4 # ... ${R4} -+ vmovdqu `16*8-64`($ctx),%x#$T4 # ... ${S4} -+ vpermd $D0,$T2,$R0 # 00003412 -> 14243444 -+ vpbroadcastq 64(%rcx),$MASK # .Lmask26 -+ vpermd $D1,$T2,$R1 -+ vpermd $T0,$T2,$S1 -+ vpermd $D2,$T2,$R2 -+ vmovdqa64 $R0,0x00(%rsp){%k2} # save in case $len%128 != 0 -+ vpsrlq \$32,$R0,$T0 # 14243444 -> 01020304 -+ vpermd $T1,$T2,$S2 -+ vmovdqu64 $R1,0x00(%rsp,%rax){%k2} -+ vpsrlq \$32,$R1,$T1 -+ vpermd $D3,$T2,$R3 -+ vmovdqa64 $S1,0x40(%rsp){%k2} -+ vpermd $T3,$T2,$S3 -+ vpermd $D4,$T2,$R4 -+ vmovdqu64 $R2,0x40(%rsp,%rax){%k2} -+ vpermd $T4,$T2,$S4 -+ vmovdqa64 $S2,0x80(%rsp){%k2} -+ vmovdqu64 $R3,0x80(%rsp,%rax){%k2} -+ vmovdqa64 $S3,0xc0(%rsp){%k2} -+ vmovdqu64 $R4,0xc0(%rsp,%rax){%k2} -+ vmovdqa64 $S4,0x100(%rsp){%k2} -+ -+ ################################################################ -+ # calculate 5th through 8th powers of the key -+ # -+ # d0 = r0'*r0 + r1'*5*r4 + r2'*5*r3 + r3'*5*r2 + r4'*5*r1 -+ # d1 = r0'*r1 + r1'*r0 + r2'*5*r4 + r3'*5*r3 + r4'*5*r2 -+ # d2 = r0'*r2 + r1'*r1 + r2'*r0 + r3'*5*r4 + r4'*5*r3 -+ # d3 = r0'*r3 + r1'*r2 + r2'*r1 + r3'*r0 + r4'*5*r4 -+ # d4 = r0'*r4 + r1'*r3 + r2'*r2 + r3'*r1 + r4'*r0 -+ -+ vpmuludq $T0,$R0,$D0 # d0 = r0'*r0 -+ vpmuludq $T0,$R1,$D1 # d1 = r0'*r1 -+ vpmuludq $T0,$R2,$D2 # d2 = r0'*r2 -+ vpmuludq $T0,$R3,$D3 # d3 = r0'*r3 -+ vpmuludq $T0,$R4,$D4 # d4 = r0'*r4 -+ vpsrlq \$32,$R2,$T2 -+ -+ vpmuludq $T1,$S4,$M0 -+ vpmuludq $T1,$R0,$M1 -+ vpmuludq $T1,$R1,$M2 -+ vpmuludq $T1,$R2,$M3 -+ vpmuludq $T1,$R3,$M4 -+ vpsrlq \$32,$R3,$T3 -+ vpaddq $M0,$D0,$D0 # d0 += r1'*5*r4 -+ vpaddq $M1,$D1,$D1 # d1 += r1'*r0 -+ vpaddq $M2,$D2,$D2 # d2 += r1'*r1 -+ vpaddq $M3,$D3,$D3 # d3 += r1'*r2 -+ vpaddq $M4,$D4,$D4 # d4 += r1'*r3 -+ -+ vpmuludq $T2,$S3,$M0 -+ vpmuludq $T2,$S4,$M1 -+ vpmuludq $T2,$R1,$M3 -+ vpmuludq $T2,$R2,$M4 -+ vpmuludq $T2,$R0,$M2 -+ vpsrlq \$32,$R4,$T4 -+ vpaddq $M0,$D0,$D0 # d0 += r2'*5*r3 -+ vpaddq $M1,$D1,$D1 # d1 += r2'*5*r4 -+ vpaddq $M3,$D3,$D3 # d3 += r2'*r1 -+ vpaddq $M4,$D4,$D4 # d4 += r2'*r2 -+ vpaddq $M2,$D2,$D2 # d2 += r2'*r0 -+ -+ vpmuludq $T3,$S2,$M0 -+ vpmuludq $T3,$R0,$M3 -+ vpmuludq $T3,$R1,$M4 -+ vpmuludq $T3,$S3,$M1 -+ vpmuludq $T3,$S4,$M2 -+ vpaddq $M0,$D0,$D0 # d0 += r3'*5*r2 -+ vpaddq $M3,$D3,$D3 # d3 += r3'*r0 -+ vpaddq $M4,$D4,$D4 # d4 += r3'*r1 -+ vpaddq $M1,$D1,$D1 # d1 += r3'*5*r3 -+ vpaddq $M2,$D2,$D2 # d2 += r3'*5*r4 -+ -+ vpmuludq $T4,$S4,$M3 -+ vpmuludq $T4,$R0,$M4 -+ vpmuludq $T4,$S1,$M0 -+ vpmuludq $T4,$S2,$M1 -+ vpmuludq $T4,$S3,$M2 -+ vpaddq $M3,$D3,$D3 # d3 += r2'*5*r4 -+ vpaddq $M4,$D4,$D4 # d4 += r2'*r0 -+ vpaddq $M0,$D0,$D0 # d0 += r2'*5*r1 -+ vpaddq $M1,$D1,$D1 # d1 += r2'*5*r2 -+ vpaddq $M2,$D2,$D2 # d2 += r2'*5*r3 -+ -+ ################################################################ -+ # load input -+ vmovdqu64 16*0($inp),%z#$T3 -+ vmovdqu64 16*4($inp),%z#$T4 -+ lea 16*8($inp),$inp -+ -+ ################################################################ -+ # lazy reduction -+ -+ vpsrlq \$26,$D3,$M3 -+ vpandq $MASK,$D3,$D3 -+ vpaddq $M3,$D4,$D4 # d3 -> d4 -+ -+ vpsrlq \$26,$D0,$M0 -+ vpandq $MASK,$D0,$D0 -+ vpaddq $M0,$D1,$D1 # d0 -> d1 -+ -+ vpsrlq \$26,$D4,$M4 -+ vpandq $MASK,$D4,$D4 -+ -+ vpsrlq \$26,$D1,$M1 -+ vpandq $MASK,$D1,$D1 -+ vpaddq $M1,$D2,$D2 # d1 -> d2 -+ -+ vpaddq $M4,$D0,$D0 -+ vpsllq \$2,$M4,$M4 -+ vpaddq $M4,$D0,$D0 # d4 -> d0 -+ -+ vpsrlq \$26,$D2,$M2 -+ vpandq $MASK,$D2,$D2 -+ vpaddq $M2,$D3,$D3 # d2 -> d3 -+ -+ vpsrlq \$26,$D0,$M0 -+ vpandq $MASK,$D0,$D0 -+ vpaddq $M0,$D1,$D1 # d0 -> d1 -+ -+ vpsrlq \$26,$D3,$M3 -+ vpandq $MASK,$D3,$D3 -+ vpaddq $M3,$D4,$D4 # d3 -> d4 -+ -+ ################################################################ -+ # at this point we have 14243444 in $R0-$S4 and 05060708 in -+ # $D0-$D4, ... -+ -+ vpunpcklqdq $T4,$T3,$T0 # transpose input -+ vpunpckhqdq $T4,$T3,$T4 -+ -+ # ... since input 64-bit lanes are ordered as 73625140, we could -+ # "vperm" it to 76543210 (here and in each loop iteration), *or* -+ # we could just flow along, hence the goal for $R0-$S4 is -+ # 1858286838784888 ... -+ -+ vmovdqa32 128(%rcx),$M0 # .Lpermd_avx512: -+ mov \$0x7777,%eax -+ kmovw %eax,%k1 -+ -+ vpermd $R0,$M0,$R0 # 14243444 -> 1---2---3---4--- -+ vpermd $R1,$M0,$R1 -+ vpermd $R2,$M0,$R2 -+ vpermd $R3,$M0,$R3 -+ vpermd $R4,$M0,$R4 -+ -+ vpermd $D0,$M0,${R0}{%k1} # 05060708 -> 1858286838784888 -+ vpermd $D1,$M0,${R1}{%k1} -+ vpermd $D2,$M0,${R2}{%k1} -+ vpermd $D3,$M0,${R3}{%k1} -+ vpermd $D4,$M0,${R4}{%k1} -+ -+ vpslld \$2,$R1,$S1 # *5 -+ vpslld \$2,$R2,$S2 -+ vpslld \$2,$R3,$S3 -+ vpslld \$2,$R4,$S4 -+ vpaddd $R1,$S1,$S1 -+ vpaddd $R2,$S2,$S2 -+ vpaddd $R3,$S3,$S3 -+ vpaddd $R4,$S4,$S4 -+ -+ vpbroadcastq 32(%rcx),$PADBIT # .L129 -+ -+ vpsrlq \$52,$T0,$T2 # splat input -+ vpsllq \$12,$T4,$T3 -+ vporq $T3,$T2,$T2 -+ vpsrlq \$26,$T0,$T1 -+ vpsrlq \$14,$T4,$T3 -+ vpsrlq \$40,$T4,$T4 # 4 -+ vpandq $MASK,$T2,$T2 # 2 -+ vpandq $MASK,$T0,$T0 # 0 -+ #vpandq $MASK,$T1,$T1 # 1 -+ #vpandq $MASK,$T3,$T3 # 3 -+ #vporq $PADBIT,$T4,$T4 # padbit, yes, always -+ -+ vpaddq $H2,$T2,$H2 # accumulate input -+ sub \$192,$len -+ jbe .Ltail_avx512 -+ jmp .Loop_avx512 -+ -+.align 32 -+.Loop_avx512: -+ ################################################################ -+ # ((inp[0]*r^8+inp[ 8])*r^8+inp[16])*r^8 -+ # ((inp[1]*r^8+inp[ 9])*r^8+inp[17])*r^7 -+ # ((inp[2]*r^8+inp[10])*r^8+inp[18])*r^6 -+ # ((inp[3]*r^8+inp[11])*r^8+inp[19])*r^5 -+ # ((inp[4]*r^8+inp[12])*r^8+inp[20])*r^4 -+ # ((inp[5]*r^8+inp[13])*r^8+inp[21])*r^3 -+ # ((inp[6]*r^8+inp[14])*r^8+inp[22])*r^2 -+ # ((inp[7]*r^8+inp[15])*r^8+inp[23])*r^1 -+ # \________/\___________/ -+ ################################################################ -+ #vpaddq $H2,$T2,$H2 # accumulate input -+ -+ # d4 = h4*r0 + h3*r1 + h2*r2 + h1*r3 + h0*r4 -+ # d3 = h3*r0 + h2*r1 + h1*r2 + h0*r3 + h4*5*r4 -+ # d2 = h2*r0 + h1*r1 + h0*r2 + h4*5*r3 + h3*5*r4 -+ # d1 = h1*r0 + h0*r1 + h4*5*r2 + h3*5*r3 + h2*5*r4 -+ # d0 = h0*r0 + h4*5*r1 + h3*5*r2 + h2*5*r3 + h1*5*r4 -+ # -+ # however, as h2 is "chronologically" first one available pull -+ # corresponding operations up, so it's -+ # -+ # d3 = h2*r1 + h0*r3 + h1*r2 + h3*r0 + h4*5*r4 -+ # d4 = h2*r2 + h0*r4 + h1*r3 + h3*r1 + h4*r0 -+ # d0 = h2*5*r3 + h0*r0 + h1*5*r4 + h3*5*r2 + h4*5*r1 -+ # d1 = h2*5*r4 + h0*r1 + h1*r0 + h3*5*r3 + h4*5*r2 -+ # d2 = h2*r0 + h0*r2 + h1*r1 + h3*5*r4 + h4*5*r3 -+ -+ vpmuludq $H2,$R1,$D3 # d3 = h2*r1 -+ vpaddq $H0,$T0,$H0 -+ vpmuludq $H2,$R2,$D4 # d4 = h2*r2 -+ vpandq $MASK,$T1,$T1 # 1 -+ vpmuludq $H2,$S3,$D0 # d0 = h2*s3 -+ vpandq $MASK,$T3,$T3 # 3 -+ vpmuludq $H2,$S4,$D1 # d1 = h2*s4 -+ vporq $PADBIT,$T4,$T4 # padbit, yes, always -+ vpmuludq $H2,$R0,$D2 # d2 = h2*r0 -+ vpaddq $H1,$T1,$H1 # accumulate input -+ vpaddq $H3,$T3,$H3 -+ vpaddq $H4,$T4,$H4 -+ -+ vmovdqu64 16*0($inp),$T3 # load input -+ vmovdqu64 16*4($inp),$T4 -+ lea 16*8($inp),$inp -+ vpmuludq $H0,$R3,$M3 -+ vpmuludq $H0,$R4,$M4 -+ vpmuludq $H0,$R0,$M0 -+ vpmuludq $H0,$R1,$M1 -+ vpaddq $M3,$D3,$D3 # d3 += h0*r3 -+ vpaddq $M4,$D4,$D4 # d4 += h0*r4 -+ vpaddq $M0,$D0,$D0 # d0 += h0*r0 -+ vpaddq $M1,$D1,$D1 # d1 += h0*r1 -+ -+ vpmuludq $H1,$R2,$M3 -+ vpmuludq $H1,$R3,$M4 -+ vpmuludq $H1,$S4,$M0 -+ vpmuludq $H0,$R2,$M2 -+ vpaddq $M3,$D3,$D3 # d3 += h1*r2 -+ vpaddq $M4,$D4,$D4 # d4 += h1*r3 -+ vpaddq $M0,$D0,$D0 # d0 += h1*s4 -+ vpaddq $M2,$D2,$D2 # d2 += h0*r2 -+ -+ vpunpcklqdq $T4,$T3,$T0 # transpose input -+ vpunpckhqdq $T4,$T3,$T4 -+ -+ vpmuludq $H3,$R0,$M3 -+ vpmuludq $H3,$R1,$M4 -+ vpmuludq $H1,$R0,$M1 -+ vpmuludq $H1,$R1,$M2 -+ vpaddq $M3,$D3,$D3 # d3 += h3*r0 -+ vpaddq $M4,$D4,$D4 # d4 += h3*r1 -+ vpaddq $M1,$D1,$D1 # d1 += h1*r0 -+ vpaddq $M2,$D2,$D2 # d2 += h1*r1 -+ -+ vpmuludq $H4,$S4,$M3 -+ vpmuludq $H4,$R0,$M4 -+ vpmuludq $H3,$S2,$M0 -+ vpmuludq $H3,$S3,$M1 -+ vpaddq $M3,$D3,$D3 # d3 += h4*s4 -+ vpmuludq $H3,$S4,$M2 -+ vpaddq $M4,$D4,$D4 # d4 += h4*r0 -+ vpaddq $M0,$D0,$D0 # d0 += h3*s2 -+ vpaddq $M1,$D1,$D1 # d1 += h3*s3 -+ vpaddq $M2,$D2,$D2 # d2 += h3*s4 -+ -+ vpmuludq $H4,$S1,$M0 -+ vpmuludq $H4,$S2,$M1 -+ vpmuludq $H4,$S3,$M2 -+ vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1 -+ vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2 -+ vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3 -+ -+ ################################################################ -+ # lazy reduction (interleaved with input splat) -+ -+ vpsrlq \$52,$T0,$T2 # splat input -+ vpsllq \$12,$T4,$T3 -+ -+ vpsrlq \$26,$D3,$H3 -+ vpandq $MASK,$D3,$D3 -+ vpaddq $H3,$D4,$H4 # h3 -> h4 -+ -+ vporq $T3,$T2,$T2 -+ -+ vpsrlq \$26,$H0,$D0 -+ vpandq $MASK,$H0,$H0 -+ vpaddq $D0,$H1,$H1 # h0 -> h1 -+ -+ vpandq $MASK,$T2,$T2 # 2 -+ -+ vpsrlq \$26,$H4,$D4 -+ vpandq $MASK,$H4,$H4 -+ -+ vpsrlq \$26,$H1,$D1 -+ vpandq $MASK,$H1,$H1 -+ vpaddq $D1,$H2,$H2 # h1 -> h2 -+ -+ vpaddq $D4,$H0,$H0 -+ vpsllq \$2,$D4,$D4 -+ vpaddq $D4,$H0,$H0 # h4 -> h0 -+ -+ vpaddq $T2,$H2,$H2 # modulo-scheduled -+ vpsrlq \$26,$T0,$T1 -+ -+ vpsrlq \$26,$H2,$D2 -+ vpandq $MASK,$H2,$H2 -+ vpaddq $D2,$D3,$H3 # h2 -> h3 -+ -+ vpsrlq \$14,$T4,$T3 -+ -+ vpsrlq \$26,$H0,$D0 -+ vpandq $MASK,$H0,$H0 -+ vpaddq $D0,$H1,$H1 # h0 -> h1 -+ -+ vpsrlq \$40,$T4,$T4 # 4 -+ -+ vpsrlq \$26,$H3,$D3 -+ vpandq $MASK,$H3,$H3 -+ vpaddq $D3,$H4,$H4 # h3 -> h4 -+ -+ vpandq $MASK,$T0,$T0 # 0 -+ #vpandq $MASK,$T1,$T1 # 1 -+ #vpandq $MASK,$T3,$T3 # 3 -+ #vporq $PADBIT,$T4,$T4 # padbit, yes, always -+ -+ sub \$128,$len -+ ja .Loop_avx512 -+ -+.Ltail_avx512: -+ ################################################################ -+ # while above multiplications were by r^8 in all lanes, in last -+ # iteration we multiply least significant lane by r^8 and most -+ # significant one by r, that's why table gets shifted... -+ -+ vpsrlq \$32,$R0,$R0 # 0105020603070408 -+ vpsrlq \$32,$R1,$R1 -+ vpsrlq \$32,$R2,$R2 -+ vpsrlq \$32,$S3,$S3 -+ vpsrlq \$32,$S4,$S4 -+ vpsrlq \$32,$R3,$R3 -+ vpsrlq \$32,$R4,$R4 -+ vpsrlq \$32,$S1,$S1 -+ vpsrlq \$32,$S2,$S2 -+ -+ ################################################################ -+ # load either next or last 64 byte of input -+ lea ($inp,$len),$inp -+ -+ #vpaddq $H2,$T2,$H2 # accumulate input -+ vpaddq $H0,$T0,$H0 -+ -+ vpmuludq $H2,$R1,$D3 # d3 = h2*r1 -+ vpmuludq $H2,$R2,$D4 # d4 = h2*r2 -+ vpmuludq $H2,$S3,$D0 # d0 = h2*s3 -+ vpandq $MASK,$T1,$T1 # 1 -+ vpmuludq $H2,$S4,$D1 # d1 = h2*s4 -+ vpandq $MASK,$T3,$T3 # 3 -+ vpmuludq $H2,$R0,$D2 # d2 = h2*r0 -+ vporq $PADBIT,$T4,$T4 # padbit, yes, always -+ vpaddq $H1,$T1,$H1 # accumulate input -+ vpaddq $H3,$T3,$H3 -+ vpaddq $H4,$T4,$H4 -+ -+ vmovdqu 16*0($inp),%x#$T0 -+ vpmuludq $H0,$R3,$M3 -+ vpmuludq $H0,$R4,$M4 -+ vpmuludq $H0,$R0,$M0 -+ vpmuludq $H0,$R1,$M1 -+ vpaddq $M3,$D3,$D3 # d3 += h0*r3 -+ vpaddq $M4,$D4,$D4 # d4 += h0*r4 -+ vpaddq $M0,$D0,$D0 # d0 += h0*r0 -+ vpaddq $M1,$D1,$D1 # d1 += h0*r1 -+ -+ vmovdqu 16*1($inp),%x#$T1 -+ vpmuludq $H1,$R2,$M3 -+ vpmuludq $H1,$R3,$M4 -+ vpmuludq $H1,$S4,$M0 -+ vpmuludq $H0,$R2,$M2 -+ vpaddq $M3,$D3,$D3 # d3 += h1*r2 -+ vpaddq $M4,$D4,$D4 # d4 += h1*r3 -+ vpaddq $M0,$D0,$D0 # d0 += h1*s4 -+ vpaddq $M2,$D2,$D2 # d2 += h0*r2 -+ -+ vinserti128 \$1,16*2($inp),%y#$T0,%y#$T0 -+ vpmuludq $H3,$R0,$M3 -+ vpmuludq $H3,$R1,$M4 -+ vpmuludq $H1,$R0,$M1 -+ vpmuludq $H1,$R1,$M2 -+ vpaddq $M3,$D3,$D3 # d3 += h3*r0 -+ vpaddq $M4,$D4,$D4 # d4 += h3*r1 -+ vpaddq $M1,$D1,$D1 # d1 += h1*r0 -+ vpaddq $M2,$D2,$D2 # d2 += h1*r1 -+ -+ vinserti128 \$1,16*3($inp),%y#$T1,%y#$T1 -+ vpmuludq $H4,$S4,$M3 -+ vpmuludq $H4,$R0,$M4 -+ vpmuludq $H3,$S2,$M0 -+ vpmuludq $H3,$S3,$M1 -+ vpmuludq $H3,$S4,$M2 -+ vpaddq $M3,$D3,$H3 # h3 = d3 + h4*s4 -+ vpaddq $M4,$D4,$D4 # d4 += h4*r0 -+ vpaddq $M0,$D0,$D0 # d0 += h3*s2 -+ vpaddq $M1,$D1,$D1 # d1 += h3*s3 -+ vpaddq $M2,$D2,$D2 # d2 += h3*s4 -+ -+ vpmuludq $H4,$S1,$M0 -+ vpmuludq $H4,$S2,$M1 -+ vpmuludq $H4,$S3,$M2 -+ vpaddq $M0,$D0,$H0 # h0 = d0 + h4*s1 -+ vpaddq $M1,$D1,$H1 # h1 = d2 + h4*s2 -+ vpaddq $M2,$D2,$H2 # h2 = d3 + h4*s3 -+ -+ ################################################################ -+ # horizontal addition -+ -+ mov \$1,%eax -+ vpermq \$0xb1,$H3,$D3 -+ vpermq \$0xb1,$D4,$H4 -+ vpermq \$0xb1,$H0,$D0 -+ vpermq \$0xb1,$H1,$D1 -+ vpermq \$0xb1,$H2,$D2 -+ vpaddq $D3,$H3,$H3 -+ vpaddq $D4,$H4,$H4 -+ vpaddq $D0,$H0,$H0 -+ vpaddq $D1,$H1,$H1 -+ vpaddq $D2,$H2,$H2 -+ -+ kmovw %eax,%k3 -+ vpermq \$0x2,$H3,$D3 -+ vpermq \$0x2,$H4,$D4 -+ vpermq \$0x2,$H0,$D0 -+ vpermq \$0x2,$H1,$D1 -+ vpermq \$0x2,$H2,$D2 -+ vpaddq $D3,$H3,$H3 -+ vpaddq $D4,$H4,$H4 -+ vpaddq $D0,$H0,$H0 -+ vpaddq $D1,$H1,$H1 -+ vpaddq $D2,$H2,$H2 -+ -+ vextracti64x4 \$0x1,$H3,%y#$D3 -+ vextracti64x4 \$0x1,$H4,%y#$D4 -+ vextracti64x4 \$0x1,$H0,%y#$D0 -+ vextracti64x4 \$0x1,$H1,%y#$D1 -+ vextracti64x4 \$0x1,$H2,%y#$D2 -+ vpaddq $D3,$H3,${H3}{%k3}{z} # keep single qword in case -+ vpaddq $D4,$H4,${H4}{%k3}{z} # it's passed to .Ltail_avx2 -+ vpaddq $D0,$H0,${H0}{%k3}{z} -+ vpaddq $D1,$H1,${H1}{%k3}{z} -+ vpaddq $D2,$H2,${H2}{%k3}{z} -+___ -+map(s/%z/%y/,($T0,$T1,$T2,$T3,$T4, $PADBIT)); -+map(s/%z/%y/,($H0,$H1,$H2,$H3,$H4, $D0,$D1,$D2,$D3,$D4, $MASK)); -+$code.=<<___; -+ ################################################################ -+ # lazy reduction (interleaved with input splat) -+ -+ vpsrlq \$26,$H3,$D3 -+ vpand $MASK,$H3,$H3 -+ vpsrldq \$6,$T0,$T2 # splat input -+ vpsrldq \$6,$T1,$T3 -+ vpunpckhqdq $T1,$T0,$T4 # 4 -+ vpaddq $D3,$H4,$H4 # h3 -> h4 -+ -+ vpsrlq \$26,$H0,$D0 -+ vpand $MASK,$H0,$H0 -+ vpunpcklqdq $T3,$T2,$T2 # 2:3 -+ vpunpcklqdq $T1,$T0,$T0 # 0:1 -+ vpaddq $D0,$H1,$H1 # h0 -> h1 -+ -+ vpsrlq \$26,$H4,$D4 -+ vpand $MASK,$H4,$H4 -+ -+ vpsrlq \$26,$H1,$D1 -+ vpand $MASK,$H1,$H1 -+ vpsrlq \$30,$T2,$T3 -+ vpsrlq \$4,$T2,$T2 -+ vpaddq $D1,$H2,$H2 # h1 -> h2 -+ -+ vpaddq $D4,$H0,$H0 -+ vpsllq \$2,$D4,$D4 -+ vpsrlq \$26,$T0,$T1 -+ vpsrlq \$40,$T4,$T4 # 4 -+ vpaddq $D4,$H0,$H0 # h4 -> h0 -+ -+ vpsrlq \$26,$H2,$D2 -+ vpand $MASK,$H2,$H2 -+ vpand $MASK,$T2,$T2 # 2 -+ vpand $MASK,$T0,$T0 # 0 -+ vpaddq $D2,$H3,$H3 # h2 -> h3 -+ -+ vpsrlq \$26,$H0,$D0 -+ vpand $MASK,$H0,$H0 -+ vpaddq $H2,$T2,$H2 # accumulate input for .Ltail_avx2 -+ vpand $MASK,$T1,$T1 # 1 -+ vpaddq $D0,$H1,$H1 # h0 -> h1 -+ -+ vpsrlq \$26,$H3,$D3 -+ vpand $MASK,$H3,$H3 -+ vpand $MASK,$T3,$T3 # 3 -+ vpor 32(%rcx),$T4,$T4 # padbit, yes, always -+ vpaddq $D3,$H4,$H4 # h3 -> h4 -+ -+ lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2 -+ add \$64,$len -+ jnz .Ltail_avx2 -+ -+ vpsubq $T2,$H2,$H2 # undo input accumulation -+ vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced -+ vmovd %x#$H1,`4*1-48-64`($ctx) -+ vmovd %x#$H2,`4*2-48-64`($ctx) -+ vmovd %x#$H3,`4*3-48-64`($ctx) -+ vmovd %x#$H4,`4*4-48-64`($ctx) -+ vzeroall -+___ -+$code.=<<___ if ($win64); -+ movdqa 0x50(%r11),%xmm6 -+ movdqa 0x60(%r11),%xmm7 -+ movdqa 0x70(%r11),%xmm8 -+ movdqa 0x80(%r11),%xmm9 -+ movdqa 0x90(%r11),%xmm10 -+ movdqa 0xa0(%r11),%xmm11 -+ movdqa 0xb0(%r11),%xmm12 -+ movdqa 0xc0(%r11),%xmm13 -+ movdqa 0xd0(%r11),%xmm14 -+ movdqa 0xe0(%r11),%xmm15 -+ lea 0xf8(%r11),%rsp -+.Ldo_avx512_epilogue: -+___ -+$code.=<<___ if (!$win64); -+ lea 8(%r11),%rsp -+.cfi_def_cfa %rsp,8 -+___ -+$code.=<<___; -+ ret -+.cfi_endproc -+.size poly1305_blocks_avx512,.-poly1305_blocks_avx512 -+___ -+if ($avx>3) { -+######################################################################## -+# VPMADD52 version using 2^44 radix. -+# -+# One can argue that base 2^52 would be more natural. Well, even though -+# some operations would be more natural, one has to recognize couple of -+# things. Base 2^52 doesn't provide advantage over base 2^44 if you look -+# at amount of multiply-n-accumulate operations. Secondly, it makes it -+# impossible to pre-compute multiples of 5 [referred to as s[]/sN in -+# reference implementations], which means that more such operations -+# would have to be performed in inner loop, which in turn makes critical -+# path longer. In other words, even though base 2^44 reduction might -+# look less elegant, overall critical path is actually shorter... -+ -+######################################################################## -+# Layout of opaque area is following. -+# -+# unsigned __int64 h[3]; # current hash value base 2^44 -+# unsigned __int64 s[2]; # key value*20 base 2^44 -+# unsigned __int64 r[3]; # key value base 2^44 -+# struct { unsigned __int64 r^1, r^3, r^2, r^4; } R[4]; -+# # r^n positions reflect -+# # placement in register, not -+# # memory, R[3] is R[1]*20 -+ -+$code.=<<___; -+.type poly1305_init_base2_44,\@function,3 -+.align 32 -+poly1305_init_base2_44: -+ xor %rax,%rax -+ mov %rax,0($ctx) # initialize hash value -+ mov %rax,8($ctx) -+ mov %rax,16($ctx) -+ -+.Linit_base2_44: -+ lea poly1305_blocks_vpmadd52(%rip),%r10 -+ lea poly1305_emit_base2_44(%rip),%r11 -+ -+ mov \$0x0ffffffc0fffffff,%rax -+ mov \$0x0ffffffc0ffffffc,%rcx -+ and 0($inp),%rax -+ mov \$0x00000fffffffffff,%r8 -+ and 8($inp),%rcx -+ mov \$0x00000fffffffffff,%r9 -+ and %rax,%r8 -+ shrd \$44,%rcx,%rax -+ mov %r8,40($ctx) # r0 -+ and %r9,%rax -+ shr \$24,%rcx -+ mov %rax,48($ctx) # r1 -+ lea (%rax,%rax,4),%rax # *5 -+ mov %rcx,56($ctx) # r2 -+ shl \$2,%rax # magic <<2 -+ lea (%rcx,%rcx,4),%rcx # *5 -+ shl \$2,%rcx # magic <<2 -+ mov %rax,24($ctx) # s1 -+ mov %rcx,32($ctx) # s2 -+ movq \$-1,64($ctx) # write impossible value -+___ -+$code.=<<___ if ($flavour !~ /elf32/); -+ mov %r10,0(%rdx) -+ mov %r11,8(%rdx) -+___ -+$code.=<<___ if ($flavour =~ /elf32/); -+ mov %r10d,0(%rdx) -+ mov %r11d,4(%rdx) -+___ -+$code.=<<___; -+ mov \$1,%eax -+ ret -+.size poly1305_init_base2_44,.-poly1305_init_base2_44 -+___ -+{ -+my ($H0,$H1,$H2,$r2r1r0,$r1r0s2,$r0s2s1,$Dlo,$Dhi) = map("%ymm$_",(0..5,16,17)); -+my ($T0,$inp_permd,$inp_shift,$PAD) = map("%ymm$_",(18..21)); -+my ($reduc_mask,$reduc_rght,$reduc_left) = map("%ymm$_",(22..25)); -+ -+$code.=<<___; -+.type poly1305_blocks_vpmadd52,\@function,4 -+.align 32 -+poly1305_blocks_vpmadd52: -+ shr \$4,$len -+ jz .Lno_data_vpmadd52 # too short -+ -+ shl \$40,$padbit -+ mov 64($ctx),%r8 # peek on power of the key -+ -+ # if powers of the key are not calculated yet, process up to 3 -+ # blocks with this single-block subroutine, otherwise ensure that -+ # length is divisible by 2 blocks and pass the rest down to next -+ # subroutine... -+ -+ mov \$3,%rax -+ mov \$1,%r10 -+ cmp \$4,$len # is input long -+ cmovae %r10,%rax -+ test %r8,%r8 # is power value impossible? -+ cmovns %r10,%rax -+ -+ and $len,%rax # is input of favourable length? -+ jz .Lblocks_vpmadd52_4x -+ -+ sub %rax,$len -+ mov \$7,%r10d -+ mov \$1,%r11d -+ kmovw %r10d,%k7 -+ lea .L2_44_inp_permd(%rip),%r10 -+ kmovw %r11d,%k1 -+ -+ vmovq $padbit,%x#$PAD -+ vmovdqa64 0(%r10),$inp_permd # .L2_44_inp_permd -+ vmovdqa64 32(%r10),$inp_shift # .L2_44_inp_shift -+ vpermq \$0xcf,$PAD,$PAD -+ vmovdqa64 64(%r10),$reduc_mask # .L2_44_mask -+ -+ vmovdqu64 0($ctx),${Dlo}{%k7}{z} # load hash value -+ vmovdqu64 40($ctx),${r2r1r0}{%k7}{z} # load keys -+ vmovdqu64 32($ctx),${r1r0s2}{%k7}{z} -+ vmovdqu64 24($ctx),${r0s2s1}{%k7}{z} -+ -+ vmovdqa64 96(%r10),$reduc_rght # .L2_44_shift_rgt -+ vmovdqa64 128(%r10),$reduc_left # .L2_44_shift_lft -+ -+ jmp .Loop_vpmadd52 -+ -+.align 32 -+.Loop_vpmadd52: -+ vmovdqu32 0($inp),%x#$T0 # load input as ----3210 -+ lea 16($inp),$inp -+ -+ vpermd $T0,$inp_permd,$T0 # ----3210 -> --322110 -+ vpsrlvq $inp_shift,$T0,$T0 -+ vpandq $reduc_mask,$T0,$T0 -+ vporq $PAD,$T0,$T0 -+ -+ vpaddq $T0,$Dlo,$Dlo # accumulate input -+ -+ vpermq \$0,$Dlo,${H0}{%k7}{z} # smash hash value -+ vpermq \$0b01010101,$Dlo,${H1}{%k7}{z} -+ vpermq \$0b10101010,$Dlo,${H2}{%k7}{z} -+ -+ vpxord $Dlo,$Dlo,$Dlo -+ vpxord $Dhi,$Dhi,$Dhi -+ -+ vpmadd52luq $r2r1r0,$H0,$Dlo -+ vpmadd52huq $r2r1r0,$H0,$Dhi -+ -+ vpmadd52luq $r1r0s2,$H1,$Dlo -+ vpmadd52huq $r1r0s2,$H1,$Dhi -+ -+ vpmadd52luq $r0s2s1,$H2,$Dlo -+ vpmadd52huq $r0s2s1,$H2,$Dhi -+ -+ vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost qword -+ vpsllvq $reduc_left,$Dhi,$Dhi # 0 in topmost qword -+ vpandq $reduc_mask,$Dlo,$Dlo -+ -+ vpaddq $T0,$Dhi,$Dhi -+ -+ vpermq \$0b10010011,$Dhi,$Dhi # 0 in lowest qword -+ -+ vpaddq $Dhi,$Dlo,$Dlo # note topmost qword :-) -+ -+ vpsrlvq $reduc_rght,$Dlo,$T0 # 0 in topmost word -+ vpandq $reduc_mask,$Dlo,$Dlo -+ -+ vpermq \$0b10010011,$T0,$T0 -+ -+ vpaddq $T0,$Dlo,$Dlo -+ -+ vpermq \$0b10010011,$Dlo,${T0}{%k1}{z} -+ -+ vpaddq $T0,$Dlo,$Dlo -+ vpsllq \$2,$T0,$T0 -+ -+ vpaddq $T0,$Dlo,$Dlo -+ -+ dec %rax # len-=16 -+ jnz .Loop_vpmadd52 -+ -+ vmovdqu64 $Dlo,0($ctx){%k7} # store hash value -+ -+ test $len,$len -+ jnz .Lblocks_vpmadd52_4x -+ -+.Lno_data_vpmadd52: -+ ret -+.size poly1305_blocks_vpmadd52,.-poly1305_blocks_vpmadd52 -+___ -+} -+{ -+######################################################################## -+# As implied by its name 4x subroutine processes 4 blocks in parallel -+# (but handles even 4*n+2 blocks lengths). It takes up to 4th key power -+# and is handled in 256-bit %ymm registers. -+ -+my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17)); -+my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23)); -+my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31)); -+ -+$code.=<<___; -+.type poly1305_blocks_vpmadd52_4x,\@function,4 -+.align 32 -+poly1305_blocks_vpmadd52_4x: -+ shr \$4,$len -+ jz .Lno_data_vpmadd52_4x # too short -+ -+ shl \$40,$padbit -+ mov 64($ctx),%r8 # peek on power of the key -+ -+.Lblocks_vpmadd52_4x: -+ vpbroadcastq $padbit,$PAD -+ -+ vmovdqa64 .Lx_mask44(%rip),$mask44 -+ mov \$5,%eax -+ vmovdqa64 .Lx_mask42(%rip),$mask42 -+ kmovw %eax,%k1 # used in 2x path -+ -+ test %r8,%r8 # is power value impossible? -+ js .Linit_vpmadd52 # if it is, then init R[4] -+ -+ vmovq 0($ctx),%x#$H0 # load current hash value -+ vmovq 8($ctx),%x#$H1 -+ vmovq 16($ctx),%x#$H2 -+ -+ test \$3,$len # is length 4*n+2? -+ jnz .Lblocks_vpmadd52_2x_do -+ -+.Lblocks_vpmadd52_4x_do: -+ vpbroadcastq 64($ctx),$R0 # load 4th power of the key -+ vpbroadcastq 96($ctx),$R1 -+ vpbroadcastq 128($ctx),$R2 -+ vpbroadcastq 160($ctx),$S1 -+ -+.Lblocks_vpmadd52_4x_key_loaded: -+ vpsllq \$2,$R2,$S2 # S2 = R2*5*4 -+ vpaddq $R2,$S2,$S2 -+ vpsllq \$2,$S2,$S2 -+ -+ test \$7,$len # is len 8*n? -+ jz .Lblocks_vpmadd52_8x -+ -+ vmovdqu64 16*0($inp),$T2 # load data -+ vmovdqu64 16*2($inp),$T3 -+ lea 16*4($inp),$inp -+ -+ vpunpcklqdq $T3,$T2,$T1 # transpose data -+ vpunpckhqdq $T3,$T2,$T3 -+ -+ # at this point 64-bit lanes are ordered as 3-1-2-0 -+ -+ vpsrlq \$24,$T3,$T2 # splat the data -+ vporq $PAD,$T2,$T2 -+ vpaddq $T2,$H2,$H2 # accumulate input -+ vpandq $mask44,$T1,$T0 -+ vpsrlq \$44,$T1,$T1 -+ vpsllq \$20,$T3,$T3 -+ vporq $T3,$T1,$T1 -+ vpandq $mask44,$T1,$T1 -+ -+ sub \$4,$len -+ jz .Ltail_vpmadd52_4x -+ jmp .Loop_vpmadd52_4x -+ ud2 -+ -+.align 32 -+.Linit_vpmadd52: -+ vmovq 24($ctx),%x#$S1 # load key -+ vmovq 56($ctx),%x#$H2 -+ vmovq 32($ctx),%x#$S2 -+ vmovq 40($ctx),%x#$R0 -+ vmovq 48($ctx),%x#$R1 -+ -+ vmovdqa $R0,$H0 -+ vmovdqa $R1,$H1 -+ vmovdqa $H2,$R2 -+ -+ mov \$2,%eax -+ -+.Lmul_init_vpmadd52: -+ vpxorq $D0lo,$D0lo,$D0lo -+ vpmadd52luq $H2,$S1,$D0lo -+ vpxorq $D0hi,$D0hi,$D0hi -+ vpmadd52huq $H2,$S1,$D0hi -+ vpxorq $D1lo,$D1lo,$D1lo -+ vpmadd52luq $H2,$S2,$D1lo -+ vpxorq $D1hi,$D1hi,$D1hi -+ vpmadd52huq $H2,$S2,$D1hi -+ vpxorq $D2lo,$D2lo,$D2lo -+ vpmadd52luq $H2,$R0,$D2lo -+ vpxorq $D2hi,$D2hi,$D2hi -+ vpmadd52huq $H2,$R0,$D2hi -+ -+ vpmadd52luq $H0,$R0,$D0lo -+ vpmadd52huq $H0,$R0,$D0hi -+ vpmadd52luq $H0,$R1,$D1lo -+ vpmadd52huq $H0,$R1,$D1hi -+ vpmadd52luq $H0,$R2,$D2lo -+ vpmadd52huq $H0,$R2,$D2hi -+ -+ vpmadd52luq $H1,$S2,$D0lo -+ vpmadd52huq $H1,$S2,$D0hi -+ vpmadd52luq $H1,$R0,$D1lo -+ vpmadd52huq $H1,$R0,$D1hi -+ vpmadd52luq $H1,$R1,$D2lo -+ vpmadd52huq $H1,$R1,$D2hi -+ -+ ################################################################ -+ # partial reduction -+ vpsrlq \$44,$D0lo,$tmp -+ vpsllq \$8,$D0hi,$D0hi -+ vpandq $mask44,$D0lo,$H0 -+ vpaddq $tmp,$D0hi,$D0hi -+ -+ vpaddq $D0hi,$D1lo,$D1lo -+ -+ vpsrlq \$44,$D1lo,$tmp -+ vpsllq \$8,$D1hi,$D1hi -+ vpandq $mask44,$D1lo,$H1 -+ vpaddq $tmp,$D1hi,$D1hi -+ -+ vpaddq $D1hi,$D2lo,$D2lo -+ -+ vpsrlq \$42,$D2lo,$tmp -+ vpsllq \$10,$D2hi,$D2hi -+ vpandq $mask42,$D2lo,$H2 -+ vpaddq $tmp,$D2hi,$D2hi -+ -+ vpaddq $D2hi,$H0,$H0 -+ vpsllq \$2,$D2hi,$D2hi -+ -+ vpaddq $D2hi,$H0,$H0 -+ -+ vpsrlq \$44,$H0,$tmp # additional step -+ vpandq $mask44,$H0,$H0 -+ -+ vpaddq $tmp,$H1,$H1 -+ -+ dec %eax -+ jz .Ldone_init_vpmadd52 -+ -+ vpunpcklqdq $R1,$H1,$R1 # 1,2 -+ vpbroadcastq %x#$H1,%x#$H1 # 2,2 -+ vpunpcklqdq $R2,$H2,$R2 -+ vpbroadcastq %x#$H2,%x#$H2 -+ vpunpcklqdq $R0,$H0,$R0 -+ vpbroadcastq %x#$H0,%x#$H0 -+ -+ vpsllq \$2,$R1,$S1 # S1 = R1*5*4 -+ vpsllq \$2,$R2,$S2 # S2 = R2*5*4 -+ vpaddq $R1,$S1,$S1 -+ vpaddq $R2,$S2,$S2 -+ vpsllq \$2,$S1,$S1 -+ vpsllq \$2,$S2,$S2 -+ -+ jmp .Lmul_init_vpmadd52 -+ ud2 -+ -+.align 32 -+.Ldone_init_vpmadd52: -+ vinserti128 \$1,%x#$R1,$H1,$R1 # 1,2,3,4 -+ vinserti128 \$1,%x#$R2,$H2,$R2 -+ vinserti128 \$1,%x#$R0,$H0,$R0 -+ -+ vpermq \$0b11011000,$R1,$R1 # 1,3,2,4 -+ vpermq \$0b11011000,$R2,$R2 -+ vpermq \$0b11011000,$R0,$R0 -+ -+ vpsllq \$2,$R1,$S1 # S1 = R1*5*4 -+ vpaddq $R1,$S1,$S1 -+ vpsllq \$2,$S1,$S1 -+ -+ vmovq 0($ctx),%x#$H0 # load current hash value -+ vmovq 8($ctx),%x#$H1 -+ vmovq 16($ctx),%x#$H2 -+ -+ test \$3,$len # is length 4*n+2? -+ jnz .Ldone_init_vpmadd52_2x -+ -+ vmovdqu64 $R0,64($ctx) # save key powers -+ vpbroadcastq %x#$R0,$R0 # broadcast 4th power -+ vmovdqu64 $R1,96($ctx) -+ vpbroadcastq %x#$R1,$R1 -+ vmovdqu64 $R2,128($ctx) -+ vpbroadcastq %x#$R2,$R2 -+ vmovdqu64 $S1,160($ctx) -+ vpbroadcastq %x#$S1,$S1 -+ -+ jmp .Lblocks_vpmadd52_4x_key_loaded -+ ud2 -+ -+.align 32 -+.Ldone_init_vpmadd52_2x: -+ vmovdqu64 $R0,64($ctx) # save key powers -+ vpsrldq \$8,$R0,$R0 # 0-1-0-2 -+ vmovdqu64 $R1,96($ctx) -+ vpsrldq \$8,$R1,$R1 -+ vmovdqu64 $R2,128($ctx) -+ vpsrldq \$8,$R2,$R2 -+ vmovdqu64 $S1,160($ctx) -+ vpsrldq \$8,$S1,$S1 -+ jmp .Lblocks_vpmadd52_2x_key_loaded -+ ud2 -+ -+.align 32 -+.Lblocks_vpmadd52_2x_do: -+ vmovdqu64 128+8($ctx),${R2}{%k1}{z}# load 2nd and 1st key powers -+ vmovdqu64 160+8($ctx),${S1}{%k1}{z} -+ vmovdqu64 64+8($ctx),${R0}{%k1}{z} -+ vmovdqu64 96+8($ctx),${R1}{%k1}{z} -+ -+.Lblocks_vpmadd52_2x_key_loaded: -+ vmovdqu64 16*0($inp),$T2 # load data -+ vpxorq $T3,$T3,$T3 -+ lea 16*2($inp),$inp -+ -+ vpunpcklqdq $T3,$T2,$T1 # transpose data -+ vpunpckhqdq $T3,$T2,$T3 -+ -+ # at this point 64-bit lanes are ordered as x-1-x-0 -+ -+ vpsrlq \$24,$T3,$T2 # splat the data -+ vporq $PAD,$T2,$T2 -+ vpaddq $T2,$H2,$H2 # accumulate input -+ vpandq $mask44,$T1,$T0 -+ vpsrlq \$44,$T1,$T1 -+ vpsllq \$20,$T3,$T3 -+ vporq $T3,$T1,$T1 -+ vpandq $mask44,$T1,$T1 -+ -+ jmp .Ltail_vpmadd52_2x -+ ud2 -+ -+.align 32 -+.Loop_vpmadd52_4x: -+ #vpaddq $T2,$H2,$H2 # accumulate input -+ vpaddq $T0,$H0,$H0 -+ vpaddq $T1,$H1,$H1 -+ -+ vpxorq $D0lo,$D0lo,$D0lo -+ vpmadd52luq $H2,$S1,$D0lo -+ vpxorq $D0hi,$D0hi,$D0hi -+ vpmadd52huq $H2,$S1,$D0hi -+ vpxorq $D1lo,$D1lo,$D1lo -+ vpmadd52luq $H2,$S2,$D1lo -+ vpxorq $D1hi,$D1hi,$D1hi -+ vpmadd52huq $H2,$S2,$D1hi -+ vpxorq $D2lo,$D2lo,$D2lo -+ vpmadd52luq $H2,$R0,$D2lo -+ vpxorq $D2hi,$D2hi,$D2hi -+ vpmadd52huq $H2,$R0,$D2hi -+ -+ vmovdqu64 16*0($inp),$T2 # load data -+ vmovdqu64 16*2($inp),$T3 -+ lea 16*4($inp),$inp -+ vpmadd52luq $H0,$R0,$D0lo -+ vpmadd52huq $H0,$R0,$D0hi -+ vpmadd52luq $H0,$R1,$D1lo -+ vpmadd52huq $H0,$R1,$D1hi -+ vpmadd52luq $H0,$R2,$D2lo -+ vpmadd52huq $H0,$R2,$D2hi -+ -+ vpunpcklqdq $T3,$T2,$T1 # transpose data -+ vpunpckhqdq $T3,$T2,$T3 -+ vpmadd52luq $H1,$S2,$D0lo -+ vpmadd52huq $H1,$S2,$D0hi -+ vpmadd52luq $H1,$R0,$D1lo -+ vpmadd52huq $H1,$R0,$D1hi -+ vpmadd52luq $H1,$R1,$D2lo -+ vpmadd52huq $H1,$R1,$D2hi -+ -+ ################################################################ -+ # partial reduction (interleaved with data splat) -+ vpsrlq \$44,$D0lo,$tmp -+ vpsllq \$8,$D0hi,$D0hi -+ vpandq $mask44,$D0lo,$H0 -+ vpaddq $tmp,$D0hi,$D0hi -+ -+ vpsrlq \$24,$T3,$T2 -+ vporq $PAD,$T2,$T2 -+ vpaddq $D0hi,$D1lo,$D1lo -+ -+ vpsrlq \$44,$D1lo,$tmp -+ vpsllq \$8,$D1hi,$D1hi -+ vpandq $mask44,$D1lo,$H1 -+ vpaddq $tmp,$D1hi,$D1hi -+ -+ vpandq $mask44,$T1,$T0 -+ vpsrlq \$44,$T1,$T1 -+ vpsllq \$20,$T3,$T3 -+ vpaddq $D1hi,$D2lo,$D2lo -+ -+ vpsrlq \$42,$D2lo,$tmp -+ vpsllq \$10,$D2hi,$D2hi -+ vpandq $mask42,$D2lo,$H2 -+ vpaddq $tmp,$D2hi,$D2hi -+ -+ vpaddq $T2,$H2,$H2 # accumulate input -+ vpaddq $D2hi,$H0,$H0 -+ vpsllq \$2,$D2hi,$D2hi -+ -+ vpaddq $D2hi,$H0,$H0 -+ vporq $T3,$T1,$T1 -+ vpandq $mask44,$T1,$T1 -+ -+ vpsrlq \$44,$H0,$tmp # additional step -+ vpandq $mask44,$H0,$H0 -+ -+ vpaddq $tmp,$H1,$H1 -+ -+ sub \$4,$len # len-=64 -+ jnz .Loop_vpmadd52_4x -+ -+.Ltail_vpmadd52_4x: -+ vmovdqu64 128($ctx),$R2 # load all key powers -+ vmovdqu64 160($ctx),$S1 -+ vmovdqu64 64($ctx),$R0 -+ vmovdqu64 96($ctx),$R1 -+ -+.Ltail_vpmadd52_2x: -+ vpsllq \$2,$R2,$S2 # S2 = R2*5*4 -+ vpaddq $R2,$S2,$S2 -+ vpsllq \$2,$S2,$S2 -+ -+ #vpaddq $T2,$H2,$H2 # accumulate input -+ vpaddq $T0,$H0,$H0 -+ vpaddq $T1,$H1,$H1 -+ -+ vpxorq $D0lo,$D0lo,$D0lo -+ vpmadd52luq $H2,$S1,$D0lo -+ vpxorq $D0hi,$D0hi,$D0hi -+ vpmadd52huq $H2,$S1,$D0hi -+ vpxorq $D1lo,$D1lo,$D1lo -+ vpmadd52luq $H2,$S2,$D1lo -+ vpxorq $D1hi,$D1hi,$D1hi -+ vpmadd52huq $H2,$S2,$D1hi -+ vpxorq $D2lo,$D2lo,$D2lo -+ vpmadd52luq $H2,$R0,$D2lo -+ vpxorq $D2hi,$D2hi,$D2hi -+ vpmadd52huq $H2,$R0,$D2hi -+ -+ vpmadd52luq $H0,$R0,$D0lo -+ vpmadd52huq $H0,$R0,$D0hi -+ vpmadd52luq $H0,$R1,$D1lo -+ vpmadd52huq $H0,$R1,$D1hi -+ vpmadd52luq $H0,$R2,$D2lo -+ vpmadd52huq $H0,$R2,$D2hi -+ -+ vpmadd52luq $H1,$S2,$D0lo -+ vpmadd52huq $H1,$S2,$D0hi -+ vpmadd52luq $H1,$R0,$D1lo -+ vpmadd52huq $H1,$R0,$D1hi -+ vpmadd52luq $H1,$R1,$D2lo -+ vpmadd52huq $H1,$R1,$D2hi -+ -+ ################################################################ -+ # horizontal addition -+ -+ mov \$1,%eax -+ kmovw %eax,%k1 -+ vpsrldq \$8,$D0lo,$T0 -+ vpsrldq \$8,$D0hi,$H0 -+ vpsrldq \$8,$D1lo,$T1 -+ vpsrldq \$8,$D1hi,$H1 -+ vpaddq $T0,$D0lo,$D0lo -+ vpaddq $H0,$D0hi,$D0hi -+ vpsrldq \$8,$D2lo,$T2 -+ vpsrldq \$8,$D2hi,$H2 -+ vpaddq $T1,$D1lo,$D1lo -+ vpaddq $H1,$D1hi,$D1hi -+ vpermq \$0x2,$D0lo,$T0 -+ vpermq \$0x2,$D0hi,$H0 -+ vpaddq $T2,$D2lo,$D2lo -+ vpaddq $H2,$D2hi,$D2hi -+ -+ vpermq \$0x2,$D1lo,$T1 -+ vpermq \$0x2,$D1hi,$H1 -+ vpaddq $T0,$D0lo,${D0lo}{%k1}{z} -+ vpaddq $H0,$D0hi,${D0hi}{%k1}{z} -+ vpermq \$0x2,$D2lo,$T2 -+ vpermq \$0x2,$D2hi,$H2 -+ vpaddq $T1,$D1lo,${D1lo}{%k1}{z} -+ vpaddq $H1,$D1hi,${D1hi}{%k1}{z} -+ vpaddq $T2,$D2lo,${D2lo}{%k1}{z} -+ vpaddq $H2,$D2hi,${D2hi}{%k1}{z} -+ -+ ################################################################ -+ # partial reduction -+ vpsrlq \$44,$D0lo,$tmp -+ vpsllq \$8,$D0hi,$D0hi -+ vpandq $mask44,$D0lo,$H0 -+ vpaddq $tmp,$D0hi,$D0hi -+ -+ vpaddq $D0hi,$D1lo,$D1lo -+ -+ vpsrlq \$44,$D1lo,$tmp -+ vpsllq \$8,$D1hi,$D1hi -+ vpandq $mask44,$D1lo,$H1 -+ vpaddq $tmp,$D1hi,$D1hi -+ -+ vpaddq $D1hi,$D2lo,$D2lo -+ -+ vpsrlq \$42,$D2lo,$tmp -+ vpsllq \$10,$D2hi,$D2hi -+ vpandq $mask42,$D2lo,$H2 -+ vpaddq $tmp,$D2hi,$D2hi -+ -+ vpaddq $D2hi,$H0,$H0 -+ vpsllq \$2,$D2hi,$D2hi -+ -+ vpaddq $D2hi,$H0,$H0 -+ -+ vpsrlq \$44,$H0,$tmp # additional step -+ vpandq $mask44,$H0,$H0 -+ -+ vpaddq $tmp,$H1,$H1 -+ # at this point $len is -+ # either 4*n+2 or 0... -+ sub \$2,$len # len-=32 -+ ja .Lblocks_vpmadd52_4x_do -+ -+ vmovq %x#$H0,0($ctx) -+ vmovq %x#$H1,8($ctx) -+ vmovq %x#$H2,16($ctx) -+ vzeroall -+ -+.Lno_data_vpmadd52_4x: -+ ret -+.size poly1305_blocks_vpmadd52_4x,.-poly1305_blocks_vpmadd52_4x -+___ -+} -+{ -+######################################################################## -+# As implied by its name 8x subroutine processes 8 blocks in parallel... -+# This is intermediate version, as it's used only in cases when input -+# length is either 8*n, 8*n+1 or 8*n+2... -+ -+my ($H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2) = map("%ymm$_",(0..5,16,17)); -+my ($D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi) = map("%ymm$_",(18..23)); -+my ($T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD) = map("%ymm$_",(24..31)); -+my ($RR0,$RR1,$RR2,$SS1,$SS2) = map("%ymm$_",(6..10)); -+ -+$code.=<<___; -+.type poly1305_blocks_vpmadd52_8x,\@function,4 -+.align 32 -+poly1305_blocks_vpmadd52_8x: -+ shr \$4,$len -+ jz .Lno_data_vpmadd52_8x # too short -+ -+ shl \$40,$padbit -+ mov 64($ctx),%r8 # peek on power of the key -+ -+ vmovdqa64 .Lx_mask44(%rip),$mask44 -+ vmovdqa64 .Lx_mask42(%rip),$mask42 -+ -+ test %r8,%r8 # is power value impossible? -+ js .Linit_vpmadd52 # if it is, then init R[4] -+ -+ vmovq 0($ctx),%x#$H0 # load current hash value -+ vmovq 8($ctx),%x#$H1 -+ vmovq 16($ctx),%x#$H2 -+ -+.Lblocks_vpmadd52_8x: -+ ################################################################ -+ # fist we calculate more key powers -+ -+ vmovdqu64 128($ctx),$R2 # load 1-3-2-4 powers -+ vmovdqu64 160($ctx),$S1 -+ vmovdqu64 64($ctx),$R0 -+ vmovdqu64 96($ctx),$R1 -+ -+ vpsllq \$2,$R2,$S2 # S2 = R2*5*4 -+ vpaddq $R2,$S2,$S2 -+ vpsllq \$2,$S2,$S2 -+ -+ vpbroadcastq %x#$R2,$RR2 # broadcast 4th power -+ vpbroadcastq %x#$R0,$RR0 -+ vpbroadcastq %x#$R1,$RR1 -+ -+ vpxorq $D0lo,$D0lo,$D0lo -+ vpmadd52luq $RR2,$S1,$D0lo -+ vpxorq $D0hi,$D0hi,$D0hi -+ vpmadd52huq $RR2,$S1,$D0hi -+ vpxorq $D1lo,$D1lo,$D1lo -+ vpmadd52luq $RR2,$S2,$D1lo -+ vpxorq $D1hi,$D1hi,$D1hi -+ vpmadd52huq $RR2,$S2,$D1hi -+ vpxorq $D2lo,$D2lo,$D2lo -+ vpmadd52luq $RR2,$R0,$D2lo -+ vpxorq $D2hi,$D2hi,$D2hi -+ vpmadd52huq $RR2,$R0,$D2hi -+ -+ vpmadd52luq $RR0,$R0,$D0lo -+ vpmadd52huq $RR0,$R0,$D0hi -+ vpmadd52luq $RR0,$R1,$D1lo -+ vpmadd52huq $RR0,$R1,$D1hi -+ vpmadd52luq $RR0,$R2,$D2lo -+ vpmadd52huq $RR0,$R2,$D2hi -+ -+ vpmadd52luq $RR1,$S2,$D0lo -+ vpmadd52huq $RR1,$S2,$D0hi -+ vpmadd52luq $RR1,$R0,$D1lo -+ vpmadd52huq $RR1,$R0,$D1hi -+ vpmadd52luq $RR1,$R1,$D2lo -+ vpmadd52huq $RR1,$R1,$D2hi -+ -+ ################################################################ -+ # partial reduction -+ vpsrlq \$44,$D0lo,$tmp -+ vpsllq \$8,$D0hi,$D0hi -+ vpandq $mask44,$D0lo,$RR0 -+ vpaddq $tmp,$D0hi,$D0hi -+ -+ vpaddq $D0hi,$D1lo,$D1lo -+ -+ vpsrlq \$44,$D1lo,$tmp -+ vpsllq \$8,$D1hi,$D1hi -+ vpandq $mask44,$D1lo,$RR1 -+ vpaddq $tmp,$D1hi,$D1hi -+ -+ vpaddq $D1hi,$D2lo,$D2lo -+ -+ vpsrlq \$42,$D2lo,$tmp -+ vpsllq \$10,$D2hi,$D2hi -+ vpandq $mask42,$D2lo,$RR2 -+ vpaddq $tmp,$D2hi,$D2hi -+ -+ vpaddq $D2hi,$RR0,$RR0 -+ vpsllq \$2,$D2hi,$D2hi -+ -+ vpaddq $D2hi,$RR0,$RR0 -+ -+ vpsrlq \$44,$RR0,$tmp # additional step -+ vpandq $mask44,$RR0,$RR0 -+ -+ vpaddq $tmp,$RR1,$RR1 -+ -+ ################################################################ -+ # At this point Rx holds 1324 powers, RRx - 5768, and the goal -+ # is 15263748, which reflects how data is loaded... -+ -+ vpunpcklqdq $R2,$RR2,$T2 # 3748 -+ vpunpckhqdq $R2,$RR2,$R2 # 1526 -+ vpunpcklqdq $R0,$RR0,$T0 -+ vpunpckhqdq $R0,$RR0,$R0 -+ vpunpcklqdq $R1,$RR1,$T1 -+ vpunpckhqdq $R1,$RR1,$R1 -+___ -+######## switch to %zmm -+map(s/%y/%z/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2); -+map(s/%y/%z/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi); -+map(s/%y/%z/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD); -+map(s/%y/%z/, $RR0,$RR1,$RR2,$SS1,$SS2); -+ -+$code.=<<___; -+ vshufi64x2 \$0x44,$R2,$T2,$RR2 # 15263748 -+ vshufi64x2 \$0x44,$R0,$T0,$RR0 -+ vshufi64x2 \$0x44,$R1,$T1,$RR1 -+ -+ vmovdqu64 16*0($inp),$T2 # load data -+ vmovdqu64 16*4($inp),$T3 -+ lea 16*8($inp),$inp -+ -+ vpsllq \$2,$RR2,$SS2 # S2 = R2*5*4 -+ vpsllq \$2,$RR1,$SS1 # S1 = R1*5*4 -+ vpaddq $RR2,$SS2,$SS2 -+ vpaddq $RR1,$SS1,$SS1 -+ vpsllq \$2,$SS2,$SS2 -+ vpsllq \$2,$SS1,$SS1 -+ -+ vpbroadcastq $padbit,$PAD -+ vpbroadcastq %x#$mask44,$mask44 -+ vpbroadcastq %x#$mask42,$mask42 -+ -+ vpbroadcastq %x#$SS1,$S1 # broadcast 8th power -+ vpbroadcastq %x#$SS2,$S2 -+ vpbroadcastq %x#$RR0,$R0 -+ vpbroadcastq %x#$RR1,$R1 -+ vpbroadcastq %x#$RR2,$R2 -+ -+ vpunpcklqdq $T3,$T2,$T1 # transpose data -+ vpunpckhqdq $T3,$T2,$T3 -+ -+ # at this point 64-bit lanes are ordered as 73625140 -+ -+ vpsrlq \$24,$T3,$T2 # splat the data -+ vporq $PAD,$T2,$T2 -+ vpaddq $T2,$H2,$H2 # accumulate input -+ vpandq $mask44,$T1,$T0 -+ vpsrlq \$44,$T1,$T1 -+ vpsllq \$20,$T3,$T3 -+ vporq $T3,$T1,$T1 -+ vpandq $mask44,$T1,$T1 -+ -+ sub \$8,$len -+ jz .Ltail_vpmadd52_8x -+ jmp .Loop_vpmadd52_8x -+ -+.align 32 -+.Loop_vpmadd52_8x: -+ #vpaddq $T2,$H2,$H2 # accumulate input -+ vpaddq $T0,$H0,$H0 -+ vpaddq $T1,$H1,$H1 -+ -+ vpxorq $D0lo,$D0lo,$D0lo -+ vpmadd52luq $H2,$S1,$D0lo -+ vpxorq $D0hi,$D0hi,$D0hi -+ vpmadd52huq $H2,$S1,$D0hi -+ vpxorq $D1lo,$D1lo,$D1lo -+ vpmadd52luq $H2,$S2,$D1lo -+ vpxorq $D1hi,$D1hi,$D1hi -+ vpmadd52huq $H2,$S2,$D1hi -+ vpxorq $D2lo,$D2lo,$D2lo -+ vpmadd52luq $H2,$R0,$D2lo -+ vpxorq $D2hi,$D2hi,$D2hi -+ vpmadd52huq $H2,$R0,$D2hi -+ -+ vmovdqu64 16*0($inp),$T2 # load data -+ vmovdqu64 16*4($inp),$T3 -+ lea 16*8($inp),$inp -+ vpmadd52luq $H0,$R0,$D0lo -+ vpmadd52huq $H0,$R0,$D0hi -+ vpmadd52luq $H0,$R1,$D1lo -+ vpmadd52huq $H0,$R1,$D1hi -+ vpmadd52luq $H0,$R2,$D2lo -+ vpmadd52huq $H0,$R2,$D2hi -+ -+ vpunpcklqdq $T3,$T2,$T1 # transpose data -+ vpunpckhqdq $T3,$T2,$T3 -+ vpmadd52luq $H1,$S2,$D0lo -+ vpmadd52huq $H1,$S2,$D0hi -+ vpmadd52luq $H1,$R0,$D1lo -+ vpmadd52huq $H1,$R0,$D1hi -+ vpmadd52luq $H1,$R1,$D2lo -+ vpmadd52huq $H1,$R1,$D2hi -+ -+ ################################################################ -+ # partial reduction (interleaved with data splat) -+ vpsrlq \$44,$D0lo,$tmp -+ vpsllq \$8,$D0hi,$D0hi -+ vpandq $mask44,$D0lo,$H0 -+ vpaddq $tmp,$D0hi,$D0hi -+ -+ vpsrlq \$24,$T3,$T2 -+ vporq $PAD,$T2,$T2 -+ vpaddq $D0hi,$D1lo,$D1lo -+ -+ vpsrlq \$44,$D1lo,$tmp -+ vpsllq \$8,$D1hi,$D1hi -+ vpandq $mask44,$D1lo,$H1 -+ vpaddq $tmp,$D1hi,$D1hi -+ -+ vpandq $mask44,$T1,$T0 -+ vpsrlq \$44,$T1,$T1 -+ vpsllq \$20,$T3,$T3 -+ vpaddq $D1hi,$D2lo,$D2lo -+ -+ vpsrlq \$42,$D2lo,$tmp -+ vpsllq \$10,$D2hi,$D2hi -+ vpandq $mask42,$D2lo,$H2 -+ vpaddq $tmp,$D2hi,$D2hi -+ -+ vpaddq $T2,$H2,$H2 # accumulate input -+ vpaddq $D2hi,$H0,$H0 -+ vpsllq \$2,$D2hi,$D2hi -+ -+ vpaddq $D2hi,$H0,$H0 -+ vporq $T3,$T1,$T1 -+ vpandq $mask44,$T1,$T1 -+ -+ vpsrlq \$44,$H0,$tmp # additional step -+ vpandq $mask44,$H0,$H0 -+ -+ vpaddq $tmp,$H1,$H1 -+ -+ sub \$8,$len # len-=128 -+ jnz .Loop_vpmadd52_8x -+ -+.Ltail_vpmadd52_8x: -+ #vpaddq $T2,$H2,$H2 # accumulate input -+ vpaddq $T0,$H0,$H0 -+ vpaddq $T1,$H1,$H1 -+ -+ vpxorq $D0lo,$D0lo,$D0lo -+ vpmadd52luq $H2,$SS1,$D0lo -+ vpxorq $D0hi,$D0hi,$D0hi -+ vpmadd52huq $H2,$SS1,$D0hi -+ vpxorq $D1lo,$D1lo,$D1lo -+ vpmadd52luq $H2,$SS2,$D1lo -+ vpxorq $D1hi,$D1hi,$D1hi -+ vpmadd52huq $H2,$SS2,$D1hi -+ vpxorq $D2lo,$D2lo,$D2lo -+ vpmadd52luq $H2,$RR0,$D2lo -+ vpxorq $D2hi,$D2hi,$D2hi -+ vpmadd52huq $H2,$RR0,$D2hi -+ -+ vpmadd52luq $H0,$RR0,$D0lo -+ vpmadd52huq $H0,$RR0,$D0hi -+ vpmadd52luq $H0,$RR1,$D1lo -+ vpmadd52huq $H0,$RR1,$D1hi -+ vpmadd52luq $H0,$RR2,$D2lo -+ vpmadd52huq $H0,$RR2,$D2hi -+ -+ vpmadd52luq $H1,$SS2,$D0lo -+ vpmadd52huq $H1,$SS2,$D0hi -+ vpmadd52luq $H1,$RR0,$D1lo -+ vpmadd52huq $H1,$RR0,$D1hi -+ vpmadd52luq $H1,$RR1,$D2lo -+ vpmadd52huq $H1,$RR1,$D2hi -+ -+ ################################################################ -+ # horizontal addition -+ -+ mov \$1,%eax -+ kmovw %eax,%k1 -+ vpsrldq \$8,$D0lo,$T0 -+ vpsrldq \$8,$D0hi,$H0 -+ vpsrldq \$8,$D1lo,$T1 -+ vpsrldq \$8,$D1hi,$H1 -+ vpaddq $T0,$D0lo,$D0lo -+ vpaddq $H0,$D0hi,$D0hi -+ vpsrldq \$8,$D2lo,$T2 -+ vpsrldq \$8,$D2hi,$H2 -+ vpaddq $T1,$D1lo,$D1lo -+ vpaddq $H1,$D1hi,$D1hi -+ vpermq \$0x2,$D0lo,$T0 -+ vpermq \$0x2,$D0hi,$H0 -+ vpaddq $T2,$D2lo,$D2lo -+ vpaddq $H2,$D2hi,$D2hi -+ -+ vpermq \$0x2,$D1lo,$T1 -+ vpermq \$0x2,$D1hi,$H1 -+ vpaddq $T0,$D0lo,$D0lo -+ vpaddq $H0,$D0hi,$D0hi -+ vpermq \$0x2,$D2lo,$T2 -+ vpermq \$0x2,$D2hi,$H2 -+ vpaddq $T1,$D1lo,$D1lo -+ vpaddq $H1,$D1hi,$D1hi -+ vextracti64x4 \$1,$D0lo,%y#$T0 -+ vextracti64x4 \$1,$D0hi,%y#$H0 -+ vpaddq $T2,$D2lo,$D2lo -+ vpaddq $H2,$D2hi,$D2hi -+ -+ vextracti64x4 \$1,$D1lo,%y#$T1 -+ vextracti64x4 \$1,$D1hi,%y#$H1 -+ vextracti64x4 \$1,$D2lo,%y#$T2 -+ vextracti64x4 \$1,$D2hi,%y#$H2 -+___ -+######## switch back to %ymm -+map(s/%z/%y/, $H0,$H1,$H2,$R0,$R1,$R2,$S1,$S2); -+map(s/%z/%y/, $D0lo,$D0hi,$D1lo,$D1hi,$D2lo,$D2hi); -+map(s/%z/%y/, $T0,$T1,$T2,$T3,$mask44,$mask42,$tmp,$PAD); -+ -+$code.=<<___; -+ vpaddq $T0,$D0lo,${D0lo}{%k1}{z} -+ vpaddq $H0,$D0hi,${D0hi}{%k1}{z} -+ vpaddq $T1,$D1lo,${D1lo}{%k1}{z} -+ vpaddq $H1,$D1hi,${D1hi}{%k1}{z} -+ vpaddq $T2,$D2lo,${D2lo}{%k1}{z} -+ vpaddq $H2,$D2hi,${D2hi}{%k1}{z} -+ -+ ################################################################ -+ # partial reduction -+ vpsrlq \$44,$D0lo,$tmp -+ vpsllq \$8,$D0hi,$D0hi -+ vpandq $mask44,$D0lo,$H0 -+ vpaddq $tmp,$D0hi,$D0hi -+ -+ vpaddq $D0hi,$D1lo,$D1lo -+ -+ vpsrlq \$44,$D1lo,$tmp -+ vpsllq \$8,$D1hi,$D1hi -+ vpandq $mask44,$D1lo,$H1 -+ vpaddq $tmp,$D1hi,$D1hi -+ -+ vpaddq $D1hi,$D2lo,$D2lo -+ -+ vpsrlq \$42,$D2lo,$tmp -+ vpsllq \$10,$D2hi,$D2hi -+ vpandq $mask42,$D2lo,$H2 -+ vpaddq $tmp,$D2hi,$D2hi -+ -+ vpaddq $D2hi,$H0,$H0 -+ vpsllq \$2,$D2hi,$D2hi -+ -+ vpaddq $D2hi,$H0,$H0 -+ -+ vpsrlq \$44,$H0,$tmp # additional step -+ vpandq $mask44,$H0,$H0 -+ -+ vpaddq $tmp,$H1,$H1 -+ -+ ################################################################ -+ -+ vmovq %x#$H0,0($ctx) -+ vmovq %x#$H1,8($ctx) -+ vmovq %x#$H2,16($ctx) -+ vzeroall -+ -+.Lno_data_vpmadd52_8x: -+ ret -+.size poly1305_blocks_vpmadd52_8x,.-poly1305_blocks_vpmadd52_8x -+___ -+} -+$code.=<<___; -+.type poly1305_emit_base2_44,\@function,3 -+.align 32 -+poly1305_emit_base2_44: -+ mov 0($ctx),%r8 # load hash value -+ mov 8($ctx),%r9 -+ mov 16($ctx),%r10 -+ -+ mov %r9,%rax -+ shr \$20,%r9 -+ shl \$44,%rax -+ mov %r10,%rcx -+ shr \$40,%r10 -+ shl \$24,%rcx -+ -+ add %rax,%r8 -+ adc %rcx,%r9 -+ adc \$0,%r10 -+ -+ mov %r8,%rax -+ add \$5,%r8 # compare to modulus -+ mov %r9,%rcx -+ adc \$0,%r9 -+ adc \$0,%r10 -+ shr \$2,%r10 # did 130-bit value overflow? -+ cmovnz %r8,%rax -+ cmovnz %r9,%rcx -+ -+ add 0($nonce),%rax # accumulate nonce -+ adc 8($nonce),%rcx -+ mov %rax,0($mac) # write result -+ mov %rcx,8($mac) -+ -+ ret -+.size poly1305_emit_base2_44,.-poly1305_emit_base2_44 -+___ -+} } } -+$code.=<<___; -+.align 64 -+.Lconst: -+.Lmask24: -+.long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0 -+.L129: -+.long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0 -+.Lmask26: -+.long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0 -+.Lpermd_avx2: -+.long 2,2,2,3,2,0,2,1 -+.Lpermd_avx512: -+.long 0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7 -+ -+.L2_44_inp_permd: -+.long 0,1,1,2,2,3,7,7 -+.L2_44_inp_shift: -+.quad 0,12,24,64 -+.L2_44_mask: -+.quad 0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff -+.L2_44_shift_rgt: -+.quad 44,44,42,64 -+.L2_44_shift_lft: -+.quad 8,8,10,64 -+ -+.align 64 -+.Lx_mask44: -+.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff -+.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff -+.Lx_mask42: -+.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff -+.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff -+___ -+} -+$code.=<<___; -+.asciz "Poly1305 for x86_64, CRYPTOGAMS by " -+.align 16 -+___ -+ -+{ # chacha20-poly1305 helpers -+my ($out,$inp,$otp,$len)=$win64 ? ("%rcx","%rdx","%r8", "%r9") : # Win64 order -+ ("%rdi","%rsi","%rdx","%rcx"); # Unix order -+$code.=<<___; -+.globl xor128_encrypt_n_pad -+.type xor128_encrypt_n_pad,\@abi-omnipotent -+.align 16 -+xor128_encrypt_n_pad: -+ sub $otp,$inp -+ sub $otp,$out -+ mov $len,%r10 # put len aside -+ shr \$4,$len # len / 16 -+ jz .Ltail_enc -+ nop -+.Loop_enc_xmm: -+ movdqu ($inp,$otp),%xmm0 -+ pxor ($otp),%xmm0 -+ movdqu %xmm0,($out,$otp) -+ movdqa %xmm0,($otp) -+ lea 16($otp),$otp -+ dec $len -+ jnz .Loop_enc_xmm -+ -+ and \$15,%r10 # len % 16 -+ jz .Ldone_enc -+ -+.Ltail_enc: -+ mov \$16,$len -+ sub %r10,$len -+ xor %eax,%eax -+.Loop_enc_byte: -+ mov ($inp,$otp),%al -+ xor ($otp),%al -+ mov %al,($out,$otp) -+ mov %al,($otp) -+ lea 1($otp),$otp -+ dec %r10 -+ jnz .Loop_enc_byte -+ -+ xor %eax,%eax -+.Loop_enc_pad: -+ mov %al,($otp) -+ lea 1($otp),$otp -+ dec $len -+ jnz .Loop_enc_pad -+ -+.Ldone_enc: -+ mov $otp,%rax -+ ret -+.size xor128_encrypt_n_pad,.-xor128_encrypt_n_pad -+ -+.globl xor128_decrypt_n_pad -+.type xor128_decrypt_n_pad,\@abi-omnipotent -+.align 16 -+xor128_decrypt_n_pad: -+ sub $otp,$inp -+ sub $otp,$out -+ mov $len,%r10 # put len aside -+ shr \$4,$len # len / 16 -+ jz .Ltail_dec -+ nop -+.Loop_dec_xmm: -+ movdqu ($inp,$otp),%xmm0 -+ movdqa ($otp),%xmm1 -+ pxor %xmm0,%xmm1 -+ movdqu %xmm1,($out,$otp) -+ movdqa %xmm0,($otp) -+ lea 16($otp),$otp -+ dec $len -+ jnz .Loop_dec_xmm -+ -+ pxor %xmm1,%xmm1 -+ and \$15,%r10 # len % 16 -+ jz .Ldone_dec -+ -+.Ltail_dec: -+ mov \$16,$len -+ sub %r10,$len -+ xor %eax,%eax -+ xor %r11,%r11 -+.Loop_dec_byte: -+ mov ($inp,$otp),%r11b -+ mov ($otp),%al -+ xor %r11b,%al -+ mov %al,($out,$otp) -+ mov %r11b,($otp) -+ lea 1($otp),$otp -+ dec %r10 -+ jnz .Loop_dec_byte -+ -+ xor %eax,%eax -+.Loop_dec_pad: -+ mov %al,($otp) -+ lea 1($otp),$otp -+ dec $len -+ jnz .Loop_dec_pad -+ -+.Ldone_dec: -+ mov $otp,%rax -+ ret -+.size xor128_decrypt_n_pad,.-xor128_decrypt_n_pad -+___ -+} -+ -+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, -+# CONTEXT *context,DISPATCHER_CONTEXT *disp) -+if ($win64) { -+$rec="%rcx"; -+$frame="%rdx"; -+$context="%r8"; -+$disp="%r9"; -+ -+$code.=<<___; -+.extern __imp_RtlVirtualUnwind -+.type se_handler,\@abi-omnipotent -+.align 16 -+se_handler: -+ push %rsi -+ push %rdi -+ push %rbx -+ push %rbp -+ push %r12 -+ push %r13 -+ push %r14 -+ push %r15 -+ pushfq -+ sub \$64,%rsp -+ -+ mov 120($context),%rax # pull context->Rax -+ mov 248($context),%rbx # pull context->Rip -+ -+ mov 8($disp),%rsi # disp->ImageBase -+ mov 56($disp),%r11 # disp->HandlerData -+ -+ mov 0(%r11),%r10d # HandlerData[0] -+ lea (%rsi,%r10),%r10 # prologue label -+ cmp %r10,%rbx # context->Rip<.Lprologue -+ jb .Lcommon_seh_tail -+ -+ mov 152($context),%rax # pull context->Rsp -+ -+ mov 4(%r11),%r10d # HandlerData[1] -+ lea (%rsi,%r10),%r10 # epilogue label -+ cmp %r10,%rbx # context->Rip>=.Lepilogue -+ jae .Lcommon_seh_tail -+ -+ lea 48(%rax),%rax -+ -+ mov -8(%rax),%rbx -+ mov -16(%rax),%rbp -+ mov -24(%rax),%r12 -+ mov -32(%rax),%r13 -+ mov -40(%rax),%r14 -+ mov -48(%rax),%r15 -+ mov %rbx,144($context) # restore context->Rbx -+ mov %rbp,160($context) # restore context->Rbp -+ mov %r12,216($context) # restore context->R12 -+ mov %r13,224($context) # restore context->R13 -+ mov %r14,232($context) # restore context->R14 -+ mov %r15,240($context) # restore context->R14 -+ -+ jmp .Lcommon_seh_tail -+.size se_handler,.-se_handler -+ -+.type avx_handler,\@abi-omnipotent -+.align 16 -+avx_handler: -+ push %rsi -+ push %rdi -+ push %rbx -+ push %rbp -+ push %r12 -+ push %r13 -+ push %r14 -+ push %r15 -+ pushfq -+ sub \$64,%rsp -+ -+ mov 120($context),%rax # pull context->Rax -+ mov 248($context),%rbx # pull context->Rip -+ -+ mov 8($disp),%rsi # disp->ImageBase -+ mov 56($disp),%r11 # disp->HandlerData -+ -+ mov 0(%r11),%r10d # HandlerData[0] -+ lea (%rsi,%r10),%r10 # prologue label -+ cmp %r10,%rbx # context->RipRsp -+ -+ mov 4(%r11),%r10d # HandlerData[1] -+ lea (%rsi,%r10),%r10 # epilogue label -+ cmp %r10,%rbx # context->Rip>=epilogue label -+ jae .Lcommon_seh_tail -+ -+ mov 208($context),%rax # pull context->R11 -+ -+ lea 0x50(%rax),%rsi -+ lea 0xf8(%rax),%rax -+ lea 512($context),%rdi # &context.Xmm6 -+ mov \$20,%ecx -+ .long 0xa548f3fc # cld; rep movsq -+ -+.Lcommon_seh_tail: -+ mov 8(%rax),%rdi -+ mov 16(%rax),%rsi -+ mov %rax,152($context) # restore context->Rsp -+ mov %rsi,168($context) # restore context->Rsi -+ mov %rdi,176($context) # restore context->Rdi -+ -+ mov 40($disp),%rdi # disp->ContextRecord -+ mov $context,%rsi # context -+ mov \$154,%ecx # sizeof(CONTEXT) -+ .long 0xa548f3fc # cld; rep movsq -+ -+ mov $disp,%rsi -+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER -+ mov 8(%rsi),%rdx # arg2, disp->ImageBase -+ mov 0(%rsi),%r8 # arg3, disp->ControlPc -+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry -+ mov 40(%rsi),%r10 # disp->ContextRecord -+ lea 56(%rsi),%r11 # &disp->HandlerData -+ lea 24(%rsi),%r12 # &disp->EstablisherFrame -+ mov %r10,32(%rsp) # arg5 -+ mov %r11,40(%rsp) # arg6 -+ mov %r12,48(%rsp) # arg7 -+ mov %rcx,56(%rsp) # arg8, (NULL) -+ call *__imp_RtlVirtualUnwind(%rip) -+ -+ mov \$1,%eax # ExceptionContinueSearch -+ add \$64,%rsp -+ popfq -+ pop %r15 -+ pop %r14 -+ pop %r13 -+ pop %r12 -+ pop %rbp -+ pop %rbx -+ pop %rdi -+ pop %rsi -+ ret -+.size avx_handler,.-avx_handler -+ -+.section .pdata -+.align 4 -+ .rva .LSEH_begin_poly1305_init -+ .rva .LSEH_end_poly1305_init -+ .rva .LSEH_info_poly1305_init -+ -+ .rva .LSEH_begin_poly1305_blocks -+ .rva .LSEH_end_poly1305_blocks -+ .rva .LSEH_info_poly1305_blocks -+ -+ .rva .LSEH_begin_poly1305_emit -+ .rva .LSEH_end_poly1305_emit -+ .rva .LSEH_info_poly1305_emit -+___ -+$code.=<<___ if ($avx); -+ .rva .LSEH_begin_poly1305_blocks_avx -+ .rva .Lbase2_64_avx -+ .rva .LSEH_info_poly1305_blocks_avx_1 -+ -+ .rva .Lbase2_64_avx -+ .rva .Leven_avx -+ .rva .LSEH_info_poly1305_blocks_avx_2 -+ -+ .rva .Leven_avx -+ .rva .LSEH_end_poly1305_blocks_avx -+ .rva .LSEH_info_poly1305_blocks_avx_3 -+ -+ .rva .LSEH_begin_poly1305_emit_avx -+ .rva .LSEH_end_poly1305_emit_avx -+ .rva .LSEH_info_poly1305_emit_avx -+___ -+$code.=<<___ if ($avx>1); -+ .rva .LSEH_begin_poly1305_blocks_avx2 -+ .rva .Lbase2_64_avx2 -+ .rva .LSEH_info_poly1305_blocks_avx2_1 -+ -+ .rva .Lbase2_64_avx2 -+ .rva .Leven_avx2 -+ .rva .LSEH_info_poly1305_blocks_avx2_2 -+ -+ .rva .Leven_avx2 -+ .rva .LSEH_end_poly1305_blocks_avx2 -+ .rva .LSEH_info_poly1305_blocks_avx2_3 -+___ -+$code.=<<___ if ($avx>2); -+ .rva .LSEH_begin_poly1305_blocks_avx512 -+ .rva .LSEH_end_poly1305_blocks_avx512 -+ .rva .LSEH_info_poly1305_blocks_avx512 -+___ -+$code.=<<___; -+.section .xdata -+.align 8 -+.LSEH_info_poly1305_init: -+ .byte 9,0,0,0 -+ .rva se_handler -+ .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init -+ -+.LSEH_info_poly1305_blocks: -+ .byte 9,0,0,0 -+ .rva se_handler -+ .rva .Lblocks_body,.Lblocks_epilogue -+ -+.LSEH_info_poly1305_emit: -+ .byte 9,0,0,0 -+ .rva se_handler -+ .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit -+___ -+$code.=<<___ if ($avx); -+.LSEH_info_poly1305_blocks_avx_1: -+ .byte 9,0,0,0 -+ .rva se_handler -+ .rva .Lblocks_avx_body,.Lblocks_avx_epilogue # HandlerData[] -+ -+.LSEH_info_poly1305_blocks_avx_2: -+ .byte 9,0,0,0 -+ .rva se_handler -+ .rva .Lbase2_64_avx_body,.Lbase2_64_avx_epilogue # HandlerData[] -+ -+.LSEH_info_poly1305_blocks_avx_3: -+ .byte 9,0,0,0 -+ .rva avx_handler -+ .rva .Ldo_avx_body,.Ldo_avx_epilogue # HandlerData[] -+ -+.LSEH_info_poly1305_emit_avx: -+ .byte 9,0,0,0 -+ .rva se_handler -+ .rva .LSEH_begin_poly1305_emit_avx,.LSEH_begin_poly1305_emit_avx -+___ -+$code.=<<___ if ($avx>1); -+.LSEH_info_poly1305_blocks_avx2_1: -+ .byte 9,0,0,0 -+ .rva se_handler -+ .rva .Lblocks_avx2_body,.Lblocks_avx2_epilogue # HandlerData[] -+ -+.LSEH_info_poly1305_blocks_avx2_2: -+ .byte 9,0,0,0 -+ .rva se_handler -+ .rva .Lbase2_64_avx2_body,.Lbase2_64_avx2_epilogue # HandlerData[] -+ -+.LSEH_info_poly1305_blocks_avx2_3: -+ .byte 9,0,0,0 -+ .rva avx_handler -+ .rva .Ldo_avx2_body,.Ldo_avx2_epilogue # HandlerData[] -+___ -+$code.=<<___ if ($avx>2); -+.LSEH_info_poly1305_blocks_avx512: -+ .byte 9,0,0,0 -+ .rva avx_handler -+ .rva .Ldo_avx512_body,.Ldo_avx512_epilogue # HandlerData[] -+___ -+} -+ -+foreach (split('\n',$code)) { -+ s/\`([^\`]*)\`/eval($1)/ge; -+ s/%r([a-z]+)#d/%e$1/g; -+ s/%r([0-9]+)#d/%r$1d/g; -+ s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g; -+ -+ print $_,"\n"; -+} -+close STDOUT; diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0043-crypto-x86-poly1305-wire-up-faster-implementations-f.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0043-crypto-x86-poly1305-wire-up-faster-implementations-f.patch deleted file mode 100644 index 0fc834858..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0043-crypto-x86-poly1305-wire-up-faster-implementations-f.patch +++ /dev/null @@ -1,2927 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Sun, 5 Jan 2020 22:40:48 -0500 -Subject: [PATCH] crypto: x86/poly1305 - wire up faster implementations for - kernel - -commit d7d7b853566254648df59f7ea27ea05952a6cfa8 upstream. - -These x86_64 vectorized implementations support AVX, AVX-2, and AVX512F. -The AVX-512F implementation is disabled on Skylake, due to throttling, -but it is quite fast on >= Cannonlake. - -On the left is cycle counts on a Core i7 6700HQ using the AVX-2 -codepath, comparing this implementation ("new") to the implementation in -the current crypto api ("old"). On the right are benchmarks on a Xeon -Gold 5120 using the AVX-512 codepath. The new implementation is faster -on all benchmarks. - - AVX-2 AVX-512 - --------- ----------- - - size old new size old new - ---- ---- ---- ---- ---- ---- - 0 70 68 0 74 70 - 16 92 90 16 96 92 - 32 134 104 32 136 106 - 48 172 120 48 184 124 - 64 218 136 64 218 138 - 80 254 158 80 260 160 - 96 298 174 96 300 176 - 112 342 192 112 342 194 - 128 388 212 128 384 212 - 144 428 228 144 420 226 - 160 466 246 160 464 248 - 176 510 264 176 504 264 - 192 550 282 192 544 282 - 208 594 302 208 582 300 - 224 628 316 224 624 318 - 240 676 334 240 662 338 - 256 716 354 256 708 358 - 272 764 374 272 748 372 - 288 802 352 288 788 358 - 304 420 366 304 422 370 - 320 428 360 320 432 364 - 336 484 378 336 486 380 - 352 426 384 352 434 390 - 368 478 400 368 480 408 - 384 488 394 384 490 398 - 400 542 408 400 542 412 - 416 486 416 416 492 426 - 432 534 430 432 538 436 - 448 544 422 448 546 432 - 464 600 438 464 600 448 - 480 540 448 480 548 456 - 496 594 464 496 594 476 - 512 602 456 512 606 470 - 528 656 476 528 656 480 - 544 600 480 544 606 498 - 560 650 494 560 652 512 - 576 664 490 576 662 508 - 592 714 508 592 716 522 - 608 656 514 608 664 538 - 624 708 532 624 710 552 - 640 716 524 640 720 516 - 656 770 536 656 772 526 - 672 716 548 672 722 544 - 688 770 562 688 768 556 - 704 774 552 704 778 556 - 720 826 568 720 832 568 - 736 768 574 736 780 584 - 752 822 592 752 826 600 - 768 830 584 768 836 560 - 784 884 602 784 888 572 - 800 828 610 800 838 588 - 816 884 628 816 884 604 - 832 888 618 832 894 598 - 848 942 632 848 946 612 - 864 884 644 864 896 628 - 880 936 660 880 942 644 - 896 948 652 896 952 608 - 912 1000 664 912 1004 616 - 928 942 676 928 954 634 - 944 994 690 944 1000 646 - 960 1002 680 960 1008 646 - 976 1054 694 976 1062 658 - 992 1002 706 992 1012 674 - 1008 1052 720 1008 1058 690 - -This commit wires in the prior implementation from Andy, and makes the -following changes to be suitable for kernel land. - - - Some cosmetic and structural changes, like renaming labels to - .Lname, constants, and other Linux conventions, as well as making - the code easy for us to maintain moving forward. - - - CPU feature checking is done in C by the glue code. - - - We avoid jumping into the middle of functions, to appease objtool, - and instead parameterize shared code. - - - We maintain frame pointers so that stack traces make sense. - - - We remove the dependency on the perl xlate code, which transforms - the output into things that assemblers we don't care about use. - -Importantly, none of our changes affect the arithmetic or core code, but -just involve the differing environment of kernel space. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Samuel Neves -Co-developed-by: Samuel Neves -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/.gitignore | 1 + - arch/x86/crypto/Makefile | 11 +- - arch/x86/crypto/poly1305-avx2-x86_64.S | 390 ---------- - arch/x86/crypto/poly1305-sse2-x86_64.S | 590 --------------- - arch/x86/crypto/poly1305-x86_64-cryptogams.pl | 682 ++++++++++-------- - arch/x86/crypto/poly1305_glue.c | 473 +++++------- - lib/crypto/Kconfig | 2 +- - 7 files changed, 572 insertions(+), 1577 deletions(-) - create mode 100644 arch/x86/crypto/.gitignore - delete mode 100644 arch/x86/crypto/poly1305-avx2-x86_64.S - delete mode 100644 arch/x86/crypto/poly1305-sse2-x86_64.S - ---- /dev/null -+++ b/arch/x86/crypto/.gitignore -@@ -0,0 +1 @@ -+poly1305-x86_64.S ---- a/arch/x86/crypto/Makefile -+++ b/arch/x86/crypto/Makefile -@@ -73,6 +73,10 @@ aegis128-aesni-y := aegis128-aesni-asm.o - - nhpoly1305-sse2-y := nh-sse2-x86_64.o nhpoly1305-sse2-glue.o - blake2s-x86_64-y := blake2s-core.o blake2s-glue.o -+poly1305-x86_64-y := poly1305-x86_64-cryptogams.o poly1305_glue.o -+ifneq ($(CONFIG_CRYPTO_POLY1305_X86_64),) -+targets += poly1305-x86_64-cryptogams.S -+endif - - ifeq ($(avx_supported),yes) - camellia-aesni-avx-x86_64-y := camellia-aesni-avx-asm_64.o \ -@@ -101,10 +105,8 @@ aesni-intel-y := aesni-intel_asm.o aesni - aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o - ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o - sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o --poly1305-x86_64-y := poly1305-sse2-x86_64.o poly1305_glue.o - ifeq ($(avx2_supported),yes) - sha1-ssse3-y += sha1_avx2_x86_64_asm.o --poly1305-x86_64-y += poly1305-avx2-x86_64.o - endif - ifeq ($(sha1_ni_supported),yes) - sha1-ssse3-y += sha1_ni_asm.o -@@ -118,3 +120,8 @@ sha256-ssse3-y += sha256_ni_asm.o - endif - sha512-ssse3-y := sha512-ssse3-asm.o sha512-avx-asm.o sha512-avx2-asm.o sha512_ssse3_glue.o - crct10dif-pclmul-y := crct10dif-pcl-asm_64.o crct10dif-pclmul_glue.o -+ -+quiet_cmd_perlasm = PERLASM $@ -+ cmd_perlasm = $(PERL) $< > $@ -+$(obj)/%.S: $(src)/%.pl FORCE -+ $(call if_changed,perlasm) ---- a/arch/x86/crypto/poly1305-avx2-x86_64.S -+++ /dev/null -@@ -1,390 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0-or-later */ --/* -- * Poly1305 authenticator algorithm, RFC7539, x64 AVX2 functions -- * -- * Copyright (C) 2015 Martin Willi -- */ -- --#include -- --.section .rodata.cst32.ANMASK, "aM", @progbits, 32 --.align 32 --ANMASK: .octa 0x0000000003ffffff0000000003ffffff -- .octa 0x0000000003ffffff0000000003ffffff -- --.section .rodata.cst32.ORMASK, "aM", @progbits, 32 --.align 32 --ORMASK: .octa 0x00000000010000000000000001000000 -- .octa 0x00000000010000000000000001000000 -- --.text -- --#define h0 0x00(%rdi) --#define h1 0x04(%rdi) --#define h2 0x08(%rdi) --#define h3 0x0c(%rdi) --#define h4 0x10(%rdi) --#define r0 0x00(%rdx) --#define r1 0x04(%rdx) --#define r2 0x08(%rdx) --#define r3 0x0c(%rdx) --#define r4 0x10(%rdx) --#define u0 0x00(%r8) --#define u1 0x04(%r8) --#define u2 0x08(%r8) --#define u3 0x0c(%r8) --#define u4 0x10(%r8) --#define w0 0x18(%r8) --#define w1 0x1c(%r8) --#define w2 0x20(%r8) --#define w3 0x24(%r8) --#define w4 0x28(%r8) --#define y0 0x30(%r8) --#define y1 0x34(%r8) --#define y2 0x38(%r8) --#define y3 0x3c(%r8) --#define y4 0x40(%r8) --#define m %rsi --#define hc0 %ymm0 --#define hc1 %ymm1 --#define hc2 %ymm2 --#define hc3 %ymm3 --#define hc4 %ymm4 --#define hc0x %xmm0 --#define hc1x %xmm1 --#define hc2x %xmm2 --#define hc3x %xmm3 --#define hc4x %xmm4 --#define t1 %ymm5 --#define t2 %ymm6 --#define t1x %xmm5 --#define t2x %xmm6 --#define ruwy0 %ymm7 --#define ruwy1 %ymm8 --#define ruwy2 %ymm9 --#define ruwy3 %ymm10 --#define ruwy4 %ymm11 --#define ruwy0x %xmm7 --#define ruwy1x %xmm8 --#define ruwy2x %xmm9 --#define ruwy3x %xmm10 --#define ruwy4x %xmm11 --#define svxz1 %ymm12 --#define svxz2 %ymm13 --#define svxz3 %ymm14 --#define svxz4 %ymm15 --#define d0 %r9 --#define d1 %r10 --#define d2 %r11 --#define d3 %r12 --#define d4 %r13 -- --ENTRY(poly1305_4block_avx2) -- # %rdi: Accumulator h[5] -- # %rsi: 64 byte input block m -- # %rdx: Poly1305 key r[5] -- # %rcx: Quadblock count -- # %r8: Poly1305 derived key r^2 u[5], r^3 w[5], r^4 y[5], -- -- # This four-block variant uses loop unrolled block processing. It -- # requires 4 Poly1305 keys: r, r^2, r^3 and r^4: -- # h = (h + m) * r => h = (h + m1) * r^4 + m2 * r^3 + m3 * r^2 + m4 * r -- -- vzeroupper -- push %rbx -- push %r12 -- push %r13 -- -- # combine r0,u0,w0,y0 -- vmovd y0,ruwy0x -- vmovd w0,t1x -- vpunpcklqdq t1,ruwy0,ruwy0 -- vmovd u0,t1x -- vmovd r0,t2x -- vpunpcklqdq t2,t1,t1 -- vperm2i128 $0x20,t1,ruwy0,ruwy0 -- -- # combine r1,u1,w1,y1 and s1=r1*5,v1=u1*5,x1=w1*5,z1=y1*5 -- vmovd y1,ruwy1x -- vmovd w1,t1x -- vpunpcklqdq t1,ruwy1,ruwy1 -- vmovd u1,t1x -- vmovd r1,t2x -- vpunpcklqdq t2,t1,t1 -- vperm2i128 $0x20,t1,ruwy1,ruwy1 -- vpslld $2,ruwy1,svxz1 -- vpaddd ruwy1,svxz1,svxz1 -- -- # combine r2,u2,w2,y2 and s2=r2*5,v2=u2*5,x2=w2*5,z2=y2*5 -- vmovd y2,ruwy2x -- vmovd w2,t1x -- vpunpcklqdq t1,ruwy2,ruwy2 -- vmovd u2,t1x -- vmovd r2,t2x -- vpunpcklqdq t2,t1,t1 -- vperm2i128 $0x20,t1,ruwy2,ruwy2 -- vpslld $2,ruwy2,svxz2 -- vpaddd ruwy2,svxz2,svxz2 -- -- # combine r3,u3,w3,y3 and s3=r3*5,v3=u3*5,x3=w3*5,z3=y3*5 -- vmovd y3,ruwy3x -- vmovd w3,t1x -- vpunpcklqdq t1,ruwy3,ruwy3 -- vmovd u3,t1x -- vmovd r3,t2x -- vpunpcklqdq t2,t1,t1 -- vperm2i128 $0x20,t1,ruwy3,ruwy3 -- vpslld $2,ruwy3,svxz3 -- vpaddd ruwy3,svxz3,svxz3 -- -- # combine r4,u4,w4,y4 and s4=r4*5,v4=u4*5,x4=w4*5,z4=y4*5 -- vmovd y4,ruwy4x -- vmovd w4,t1x -- vpunpcklqdq t1,ruwy4,ruwy4 -- vmovd u4,t1x -- vmovd r4,t2x -- vpunpcklqdq t2,t1,t1 -- vperm2i128 $0x20,t1,ruwy4,ruwy4 -- vpslld $2,ruwy4,svxz4 -- vpaddd ruwy4,svxz4,svxz4 -- --.Ldoblock4: -- # hc0 = [m[48-51] & 0x3ffffff, m[32-35] & 0x3ffffff, -- # m[16-19] & 0x3ffffff, m[ 0- 3] & 0x3ffffff + h0] -- vmovd 0x00(m),hc0x -- vmovd 0x10(m),t1x -- vpunpcklqdq t1,hc0,hc0 -- vmovd 0x20(m),t1x -- vmovd 0x30(m),t2x -- vpunpcklqdq t2,t1,t1 -- vperm2i128 $0x20,t1,hc0,hc0 -- vpand ANMASK(%rip),hc0,hc0 -- vmovd h0,t1x -- vpaddd t1,hc0,hc0 -- # hc1 = [(m[51-54] >> 2) & 0x3ffffff, (m[35-38] >> 2) & 0x3ffffff, -- # (m[19-22] >> 2) & 0x3ffffff, (m[ 3- 6] >> 2) & 0x3ffffff + h1] -- vmovd 0x03(m),hc1x -- vmovd 0x13(m),t1x -- vpunpcklqdq t1,hc1,hc1 -- vmovd 0x23(m),t1x -- vmovd 0x33(m),t2x -- vpunpcklqdq t2,t1,t1 -- vperm2i128 $0x20,t1,hc1,hc1 -- vpsrld $2,hc1,hc1 -- vpand ANMASK(%rip),hc1,hc1 -- vmovd h1,t1x -- vpaddd t1,hc1,hc1 -- # hc2 = [(m[54-57] >> 4) & 0x3ffffff, (m[38-41] >> 4) & 0x3ffffff, -- # (m[22-25] >> 4) & 0x3ffffff, (m[ 6- 9] >> 4) & 0x3ffffff + h2] -- vmovd 0x06(m),hc2x -- vmovd 0x16(m),t1x -- vpunpcklqdq t1,hc2,hc2 -- vmovd 0x26(m),t1x -- vmovd 0x36(m),t2x -- vpunpcklqdq t2,t1,t1 -- vperm2i128 $0x20,t1,hc2,hc2 -- vpsrld $4,hc2,hc2 -- vpand ANMASK(%rip),hc2,hc2 -- vmovd h2,t1x -- vpaddd t1,hc2,hc2 -- # hc3 = [(m[57-60] >> 6) & 0x3ffffff, (m[41-44] >> 6) & 0x3ffffff, -- # (m[25-28] >> 6) & 0x3ffffff, (m[ 9-12] >> 6) & 0x3ffffff + h3] -- vmovd 0x09(m),hc3x -- vmovd 0x19(m),t1x -- vpunpcklqdq t1,hc3,hc3 -- vmovd 0x29(m),t1x -- vmovd 0x39(m),t2x -- vpunpcklqdq t2,t1,t1 -- vperm2i128 $0x20,t1,hc3,hc3 -- vpsrld $6,hc3,hc3 -- vpand ANMASK(%rip),hc3,hc3 -- vmovd h3,t1x -- vpaddd t1,hc3,hc3 -- # hc4 = [(m[60-63] >> 8) | (1<<24), (m[44-47] >> 8) | (1<<24), -- # (m[28-31] >> 8) | (1<<24), (m[12-15] >> 8) | (1<<24) + h4] -- vmovd 0x0c(m),hc4x -- vmovd 0x1c(m),t1x -- vpunpcklqdq t1,hc4,hc4 -- vmovd 0x2c(m),t1x -- vmovd 0x3c(m),t2x -- vpunpcklqdq t2,t1,t1 -- vperm2i128 $0x20,t1,hc4,hc4 -- vpsrld $8,hc4,hc4 -- vpor ORMASK(%rip),hc4,hc4 -- vmovd h4,t1x -- vpaddd t1,hc4,hc4 -- -- # t1 = [ hc0[3] * r0, hc0[2] * u0, hc0[1] * w0, hc0[0] * y0 ] -- vpmuludq hc0,ruwy0,t1 -- # t1 += [ hc1[3] * s4, hc1[2] * v4, hc1[1] * x4, hc1[0] * z4 ] -- vpmuludq hc1,svxz4,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc2[3] * s3, hc2[2] * v3, hc2[1] * x3, hc2[0] * z3 ] -- vpmuludq hc2,svxz3,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc3[3] * s2, hc3[2] * v2, hc3[1] * x2, hc3[0] * z2 ] -- vpmuludq hc3,svxz2,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc4[3] * s1, hc4[2] * v1, hc4[1] * x1, hc4[0] * z1 ] -- vpmuludq hc4,svxz1,t2 -- vpaddq t2,t1,t1 -- # d0 = t1[0] + t1[1] + t[2] + t[3] -- vpermq $0xee,t1,t2 -- vpaddq t2,t1,t1 -- vpsrldq $8,t1,t2 -- vpaddq t2,t1,t1 -- vmovq t1x,d0 -- -- # t1 = [ hc0[3] * r1, hc0[2] * u1,hc0[1] * w1, hc0[0] * y1 ] -- vpmuludq hc0,ruwy1,t1 -- # t1 += [ hc1[3] * r0, hc1[2] * u0, hc1[1] * w0, hc1[0] * y0 ] -- vpmuludq hc1,ruwy0,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc2[3] * s4, hc2[2] * v4, hc2[1] * x4, hc2[0] * z4 ] -- vpmuludq hc2,svxz4,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc3[3] * s3, hc3[2] * v3, hc3[1] * x3, hc3[0] * z3 ] -- vpmuludq hc3,svxz3,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc4[3] * s2, hc4[2] * v2, hc4[1] * x2, hc4[0] * z2 ] -- vpmuludq hc4,svxz2,t2 -- vpaddq t2,t1,t1 -- # d1 = t1[0] + t1[1] + t1[3] + t1[4] -- vpermq $0xee,t1,t2 -- vpaddq t2,t1,t1 -- vpsrldq $8,t1,t2 -- vpaddq t2,t1,t1 -- vmovq t1x,d1 -- -- # t1 = [ hc0[3] * r2, hc0[2] * u2, hc0[1] * w2, hc0[0] * y2 ] -- vpmuludq hc0,ruwy2,t1 -- # t1 += [ hc1[3] * r1, hc1[2] * u1, hc1[1] * w1, hc1[0] * y1 ] -- vpmuludq hc1,ruwy1,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc2[3] * r0, hc2[2] * u0, hc2[1] * w0, hc2[0] * y0 ] -- vpmuludq hc2,ruwy0,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc3[3] * s4, hc3[2] * v4, hc3[1] * x4, hc3[0] * z4 ] -- vpmuludq hc3,svxz4,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc4[3] * s3, hc4[2] * v3, hc4[1] * x3, hc4[0] * z3 ] -- vpmuludq hc4,svxz3,t2 -- vpaddq t2,t1,t1 -- # d2 = t1[0] + t1[1] + t1[2] + t1[3] -- vpermq $0xee,t1,t2 -- vpaddq t2,t1,t1 -- vpsrldq $8,t1,t2 -- vpaddq t2,t1,t1 -- vmovq t1x,d2 -- -- # t1 = [ hc0[3] * r3, hc0[2] * u3, hc0[1] * w3, hc0[0] * y3 ] -- vpmuludq hc0,ruwy3,t1 -- # t1 += [ hc1[3] * r2, hc1[2] * u2, hc1[1] * w2, hc1[0] * y2 ] -- vpmuludq hc1,ruwy2,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc2[3] * r1, hc2[2] * u1, hc2[1] * w1, hc2[0] * y1 ] -- vpmuludq hc2,ruwy1,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc3[3] * r0, hc3[2] * u0, hc3[1] * w0, hc3[0] * y0 ] -- vpmuludq hc3,ruwy0,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc4[3] * s4, hc4[2] * v4, hc4[1] * x4, hc4[0] * z4 ] -- vpmuludq hc4,svxz4,t2 -- vpaddq t2,t1,t1 -- # d3 = t1[0] + t1[1] + t1[2] + t1[3] -- vpermq $0xee,t1,t2 -- vpaddq t2,t1,t1 -- vpsrldq $8,t1,t2 -- vpaddq t2,t1,t1 -- vmovq t1x,d3 -- -- # t1 = [ hc0[3] * r4, hc0[2] * u4, hc0[1] * w4, hc0[0] * y4 ] -- vpmuludq hc0,ruwy4,t1 -- # t1 += [ hc1[3] * r3, hc1[2] * u3, hc1[1] * w3, hc1[0] * y3 ] -- vpmuludq hc1,ruwy3,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc2[3] * r2, hc2[2] * u2, hc2[1] * w2, hc2[0] * y2 ] -- vpmuludq hc2,ruwy2,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc3[3] * r1, hc3[2] * u1, hc3[1] * w1, hc3[0] * y1 ] -- vpmuludq hc3,ruwy1,t2 -- vpaddq t2,t1,t1 -- # t1 += [ hc4[3] * r0, hc4[2] * u0, hc4[1] * w0, hc4[0] * y0 ] -- vpmuludq hc4,ruwy0,t2 -- vpaddq t2,t1,t1 -- # d4 = t1[0] + t1[1] + t1[2] + t1[3] -- vpermq $0xee,t1,t2 -- vpaddq t2,t1,t1 -- vpsrldq $8,t1,t2 -- vpaddq t2,t1,t1 -- vmovq t1x,d4 -- -- # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 -> -- # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small -- # amount. Careful: we must not assume the carry bits 'd0 >> 26', -- # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit -- # integers. It's true in a single-block implementation, but not here. -- -- # d1 += d0 >> 26 -- mov d0,%rax -- shr $26,%rax -- add %rax,d1 -- # h0 = d0 & 0x3ffffff -- mov d0,%rbx -- and $0x3ffffff,%ebx -- -- # d2 += d1 >> 26 -- mov d1,%rax -- shr $26,%rax -- add %rax,d2 -- # h1 = d1 & 0x3ffffff -- mov d1,%rax -- and $0x3ffffff,%eax -- mov %eax,h1 -- -- # d3 += d2 >> 26 -- mov d2,%rax -- shr $26,%rax -- add %rax,d3 -- # h2 = d2 & 0x3ffffff -- mov d2,%rax -- and $0x3ffffff,%eax -- mov %eax,h2 -- -- # d4 += d3 >> 26 -- mov d3,%rax -- shr $26,%rax -- add %rax,d4 -- # h3 = d3 & 0x3ffffff -- mov d3,%rax -- and $0x3ffffff,%eax -- mov %eax,h3 -- -- # h0 += (d4 >> 26) * 5 -- mov d4,%rax -- shr $26,%rax -- lea (%rax,%rax,4),%rax -- add %rax,%rbx -- # h4 = d4 & 0x3ffffff -- mov d4,%rax -- and $0x3ffffff,%eax -- mov %eax,h4 -- -- # h1 += h0 >> 26 -- mov %rbx,%rax -- shr $26,%rax -- add %eax,h1 -- # h0 = h0 & 0x3ffffff -- andl $0x3ffffff,%ebx -- mov %ebx,h0 -- -- add $0x40,m -- dec %rcx -- jnz .Ldoblock4 -- -- vzeroupper -- pop %r13 -- pop %r12 -- pop %rbx -- ret --ENDPROC(poly1305_4block_avx2) ---- a/arch/x86/crypto/poly1305-sse2-x86_64.S -+++ /dev/null -@@ -1,590 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0-or-later */ --/* -- * Poly1305 authenticator algorithm, RFC7539, x64 SSE2 functions -- * -- * Copyright (C) 2015 Martin Willi -- */ -- --#include -- --.section .rodata.cst16.ANMASK, "aM", @progbits, 16 --.align 16 --ANMASK: .octa 0x0000000003ffffff0000000003ffffff -- --.section .rodata.cst16.ORMASK, "aM", @progbits, 16 --.align 16 --ORMASK: .octa 0x00000000010000000000000001000000 -- --.text -- --#define h0 0x00(%rdi) --#define h1 0x04(%rdi) --#define h2 0x08(%rdi) --#define h3 0x0c(%rdi) --#define h4 0x10(%rdi) --#define r0 0x00(%rdx) --#define r1 0x04(%rdx) --#define r2 0x08(%rdx) --#define r3 0x0c(%rdx) --#define r4 0x10(%rdx) --#define s1 0x00(%rsp) --#define s2 0x04(%rsp) --#define s3 0x08(%rsp) --#define s4 0x0c(%rsp) --#define m %rsi --#define h01 %xmm0 --#define h23 %xmm1 --#define h44 %xmm2 --#define t1 %xmm3 --#define t2 %xmm4 --#define t3 %xmm5 --#define t4 %xmm6 --#define mask %xmm7 --#define d0 %r8 --#define d1 %r9 --#define d2 %r10 --#define d3 %r11 --#define d4 %r12 -- --ENTRY(poly1305_block_sse2) -- # %rdi: Accumulator h[5] -- # %rsi: 16 byte input block m -- # %rdx: Poly1305 key r[5] -- # %rcx: Block count -- -- # This single block variant tries to improve performance by doing two -- # multiplications in parallel using SSE instructions. There is quite -- # some quardword packing involved, hence the speedup is marginal. -- -- push %rbx -- push %r12 -- sub $0x10,%rsp -- -- # s1..s4 = r1..r4 * 5 -- mov r1,%eax -- lea (%eax,%eax,4),%eax -- mov %eax,s1 -- mov r2,%eax -- lea (%eax,%eax,4),%eax -- mov %eax,s2 -- mov r3,%eax -- lea (%eax,%eax,4),%eax -- mov %eax,s3 -- mov r4,%eax -- lea (%eax,%eax,4),%eax -- mov %eax,s4 -- -- movdqa ANMASK(%rip),mask -- --.Ldoblock: -- # h01 = [0, h1, 0, h0] -- # h23 = [0, h3, 0, h2] -- # h44 = [0, h4, 0, h4] -- movd h0,h01 -- movd h1,t1 -- movd h2,h23 -- movd h3,t2 -- movd h4,h44 -- punpcklqdq t1,h01 -- punpcklqdq t2,h23 -- punpcklqdq h44,h44 -- -- # h01 += [ (m[3-6] >> 2) & 0x3ffffff, m[0-3] & 0x3ffffff ] -- movd 0x00(m),t1 -- movd 0x03(m),t2 -- psrld $2,t2 -- punpcklqdq t2,t1 -- pand mask,t1 -- paddd t1,h01 -- # h23 += [ (m[9-12] >> 6) & 0x3ffffff, (m[6-9] >> 4) & 0x3ffffff ] -- movd 0x06(m),t1 -- movd 0x09(m),t2 -- psrld $4,t1 -- psrld $6,t2 -- punpcklqdq t2,t1 -- pand mask,t1 -- paddd t1,h23 -- # h44 += [ (m[12-15] >> 8) | (1 << 24), (m[12-15] >> 8) | (1 << 24) ] -- mov 0x0c(m),%eax -- shr $8,%eax -- or $0x01000000,%eax -- movd %eax,t1 -- pshufd $0xc4,t1,t1 -- paddd t1,h44 -- -- # t1[0] = h0 * r0 + h2 * s3 -- # t1[1] = h1 * s4 + h3 * s2 -- movd r0,t1 -- movd s4,t2 -- punpcklqdq t2,t1 -- pmuludq h01,t1 -- movd s3,t2 -- movd s2,t3 -- punpcklqdq t3,t2 -- pmuludq h23,t2 -- paddq t2,t1 -- # t2[0] = h0 * r1 + h2 * s4 -- # t2[1] = h1 * r0 + h3 * s3 -- movd r1,t2 -- movd r0,t3 -- punpcklqdq t3,t2 -- pmuludq h01,t2 -- movd s4,t3 -- movd s3,t4 -- punpcklqdq t4,t3 -- pmuludq h23,t3 -- paddq t3,t2 -- # t3[0] = h4 * s1 -- # t3[1] = h4 * s2 -- movd s1,t3 -- movd s2,t4 -- punpcklqdq t4,t3 -- pmuludq h44,t3 -- # d0 = t1[0] + t1[1] + t3[0] -- # d1 = t2[0] + t2[1] + t3[1] -- movdqa t1,t4 -- punpcklqdq t2,t4 -- punpckhqdq t2,t1 -- paddq t4,t1 -- paddq t3,t1 -- movq t1,d0 -- psrldq $8,t1 -- movq t1,d1 -- -- # t1[0] = h0 * r2 + h2 * r0 -- # t1[1] = h1 * r1 + h3 * s4 -- movd r2,t1 -- movd r1,t2 -- punpcklqdq t2,t1 -- pmuludq h01,t1 -- movd r0,t2 -- movd s4,t3 -- punpcklqdq t3,t2 -- pmuludq h23,t2 -- paddq t2,t1 -- # t2[0] = h0 * r3 + h2 * r1 -- # t2[1] = h1 * r2 + h3 * r0 -- movd r3,t2 -- movd r2,t3 -- punpcklqdq t3,t2 -- pmuludq h01,t2 -- movd r1,t3 -- movd r0,t4 -- punpcklqdq t4,t3 -- pmuludq h23,t3 -- paddq t3,t2 -- # t3[0] = h4 * s3 -- # t3[1] = h4 * s4 -- movd s3,t3 -- movd s4,t4 -- punpcklqdq t4,t3 -- pmuludq h44,t3 -- # d2 = t1[0] + t1[1] + t3[0] -- # d3 = t2[0] + t2[1] + t3[1] -- movdqa t1,t4 -- punpcklqdq t2,t4 -- punpckhqdq t2,t1 -- paddq t4,t1 -- paddq t3,t1 -- movq t1,d2 -- psrldq $8,t1 -- movq t1,d3 -- -- # t1[0] = h0 * r4 + h2 * r2 -- # t1[1] = h1 * r3 + h3 * r1 -- movd r4,t1 -- movd r3,t2 -- punpcklqdq t2,t1 -- pmuludq h01,t1 -- movd r2,t2 -- movd r1,t3 -- punpcklqdq t3,t2 -- pmuludq h23,t2 -- paddq t2,t1 -- # t3[0] = h4 * r0 -- movd r0,t3 -- pmuludq h44,t3 -- # d4 = t1[0] + t1[1] + t3[0] -- movdqa t1,t4 -- psrldq $8,t4 -- paddq t4,t1 -- paddq t3,t1 -- movq t1,d4 -- -- # d1 += d0 >> 26 -- mov d0,%rax -- shr $26,%rax -- add %rax,d1 -- # h0 = d0 & 0x3ffffff -- mov d0,%rbx -- and $0x3ffffff,%ebx -- -- # d2 += d1 >> 26 -- mov d1,%rax -- shr $26,%rax -- add %rax,d2 -- # h1 = d1 & 0x3ffffff -- mov d1,%rax -- and $0x3ffffff,%eax -- mov %eax,h1 -- -- # d3 += d2 >> 26 -- mov d2,%rax -- shr $26,%rax -- add %rax,d3 -- # h2 = d2 & 0x3ffffff -- mov d2,%rax -- and $0x3ffffff,%eax -- mov %eax,h2 -- -- # d4 += d3 >> 26 -- mov d3,%rax -- shr $26,%rax -- add %rax,d4 -- # h3 = d3 & 0x3ffffff -- mov d3,%rax -- and $0x3ffffff,%eax -- mov %eax,h3 -- -- # h0 += (d4 >> 26) * 5 -- mov d4,%rax -- shr $26,%rax -- lea (%rax,%rax,4),%rax -- add %rax,%rbx -- # h4 = d4 & 0x3ffffff -- mov d4,%rax -- and $0x3ffffff,%eax -- mov %eax,h4 -- -- # h1 += h0 >> 26 -- mov %rbx,%rax -- shr $26,%rax -- add %eax,h1 -- # h0 = h0 & 0x3ffffff -- andl $0x3ffffff,%ebx -- mov %ebx,h0 -- -- add $0x10,m -- dec %rcx -- jnz .Ldoblock -- -- # Zeroing of key material -- mov %rcx,0x00(%rsp) -- mov %rcx,0x08(%rsp) -- -- add $0x10,%rsp -- pop %r12 -- pop %rbx -- ret --ENDPROC(poly1305_block_sse2) -- -- --#define u0 0x00(%r8) --#define u1 0x04(%r8) --#define u2 0x08(%r8) --#define u3 0x0c(%r8) --#define u4 0x10(%r8) --#define hc0 %xmm0 --#define hc1 %xmm1 --#define hc2 %xmm2 --#define hc3 %xmm5 --#define hc4 %xmm6 --#define ru0 %xmm7 --#define ru1 %xmm8 --#define ru2 %xmm9 --#define ru3 %xmm10 --#define ru4 %xmm11 --#define sv1 %xmm12 --#define sv2 %xmm13 --#define sv3 %xmm14 --#define sv4 %xmm15 --#undef d0 --#define d0 %r13 -- --ENTRY(poly1305_2block_sse2) -- # %rdi: Accumulator h[5] -- # %rsi: 16 byte input block m -- # %rdx: Poly1305 key r[5] -- # %rcx: Doubleblock count -- # %r8: Poly1305 derived key r^2 u[5] -- -- # This two-block variant further improves performance by using loop -- # unrolled block processing. This is more straight forward and does -- # less byte shuffling, but requires a second Poly1305 key r^2: -- # h = (h + m) * r => h = (h + m1) * r^2 + m2 * r -- -- push %rbx -- push %r12 -- push %r13 -- -- # combine r0,u0 -- movd u0,ru0 -- movd r0,t1 -- punpcklqdq t1,ru0 -- -- # combine r1,u1 and s1=r1*5,v1=u1*5 -- movd u1,ru1 -- movd r1,t1 -- punpcklqdq t1,ru1 -- movdqa ru1,sv1 -- pslld $2,sv1 -- paddd ru1,sv1 -- -- # combine r2,u2 and s2=r2*5,v2=u2*5 -- movd u2,ru2 -- movd r2,t1 -- punpcklqdq t1,ru2 -- movdqa ru2,sv2 -- pslld $2,sv2 -- paddd ru2,sv2 -- -- # combine r3,u3 and s3=r3*5,v3=u3*5 -- movd u3,ru3 -- movd r3,t1 -- punpcklqdq t1,ru3 -- movdqa ru3,sv3 -- pslld $2,sv3 -- paddd ru3,sv3 -- -- # combine r4,u4 and s4=r4*5,v4=u4*5 -- movd u4,ru4 -- movd r4,t1 -- punpcklqdq t1,ru4 -- movdqa ru4,sv4 -- pslld $2,sv4 -- paddd ru4,sv4 -- --.Ldoblock2: -- # hc0 = [ m[16-19] & 0x3ffffff, h0 + m[0-3] & 0x3ffffff ] -- movd 0x00(m),hc0 -- movd 0x10(m),t1 -- punpcklqdq t1,hc0 -- pand ANMASK(%rip),hc0 -- movd h0,t1 -- paddd t1,hc0 -- # hc1 = [ (m[19-22] >> 2) & 0x3ffffff, h1 + (m[3-6] >> 2) & 0x3ffffff ] -- movd 0x03(m),hc1 -- movd 0x13(m),t1 -- punpcklqdq t1,hc1 -- psrld $2,hc1 -- pand ANMASK(%rip),hc1 -- movd h1,t1 -- paddd t1,hc1 -- # hc2 = [ (m[22-25] >> 4) & 0x3ffffff, h2 + (m[6-9] >> 4) & 0x3ffffff ] -- movd 0x06(m),hc2 -- movd 0x16(m),t1 -- punpcklqdq t1,hc2 -- psrld $4,hc2 -- pand ANMASK(%rip),hc2 -- movd h2,t1 -- paddd t1,hc2 -- # hc3 = [ (m[25-28] >> 6) & 0x3ffffff, h3 + (m[9-12] >> 6) & 0x3ffffff ] -- movd 0x09(m),hc3 -- movd 0x19(m),t1 -- punpcklqdq t1,hc3 -- psrld $6,hc3 -- pand ANMASK(%rip),hc3 -- movd h3,t1 -- paddd t1,hc3 -- # hc4 = [ (m[28-31] >> 8) | (1<<24), h4 + (m[12-15] >> 8) | (1<<24) ] -- movd 0x0c(m),hc4 -- movd 0x1c(m),t1 -- punpcklqdq t1,hc4 -- psrld $8,hc4 -- por ORMASK(%rip),hc4 -- movd h4,t1 -- paddd t1,hc4 -- -- # t1 = [ hc0[1] * r0, hc0[0] * u0 ] -- movdqa ru0,t1 -- pmuludq hc0,t1 -- # t1 += [ hc1[1] * s4, hc1[0] * v4 ] -- movdqa sv4,t2 -- pmuludq hc1,t2 -- paddq t2,t1 -- # t1 += [ hc2[1] * s3, hc2[0] * v3 ] -- movdqa sv3,t2 -- pmuludq hc2,t2 -- paddq t2,t1 -- # t1 += [ hc3[1] * s2, hc3[0] * v2 ] -- movdqa sv2,t2 -- pmuludq hc3,t2 -- paddq t2,t1 -- # t1 += [ hc4[1] * s1, hc4[0] * v1 ] -- movdqa sv1,t2 -- pmuludq hc4,t2 -- paddq t2,t1 -- # d0 = t1[0] + t1[1] -- movdqa t1,t2 -- psrldq $8,t2 -- paddq t2,t1 -- movq t1,d0 -- -- # t1 = [ hc0[1] * r1, hc0[0] * u1 ] -- movdqa ru1,t1 -- pmuludq hc0,t1 -- # t1 += [ hc1[1] * r0, hc1[0] * u0 ] -- movdqa ru0,t2 -- pmuludq hc1,t2 -- paddq t2,t1 -- # t1 += [ hc2[1] * s4, hc2[0] * v4 ] -- movdqa sv4,t2 -- pmuludq hc2,t2 -- paddq t2,t1 -- # t1 += [ hc3[1] * s3, hc3[0] * v3 ] -- movdqa sv3,t2 -- pmuludq hc3,t2 -- paddq t2,t1 -- # t1 += [ hc4[1] * s2, hc4[0] * v2 ] -- movdqa sv2,t2 -- pmuludq hc4,t2 -- paddq t2,t1 -- # d1 = t1[0] + t1[1] -- movdqa t1,t2 -- psrldq $8,t2 -- paddq t2,t1 -- movq t1,d1 -- -- # t1 = [ hc0[1] * r2, hc0[0] * u2 ] -- movdqa ru2,t1 -- pmuludq hc0,t1 -- # t1 += [ hc1[1] * r1, hc1[0] * u1 ] -- movdqa ru1,t2 -- pmuludq hc1,t2 -- paddq t2,t1 -- # t1 += [ hc2[1] * r0, hc2[0] * u0 ] -- movdqa ru0,t2 -- pmuludq hc2,t2 -- paddq t2,t1 -- # t1 += [ hc3[1] * s4, hc3[0] * v4 ] -- movdqa sv4,t2 -- pmuludq hc3,t2 -- paddq t2,t1 -- # t1 += [ hc4[1] * s3, hc4[0] * v3 ] -- movdqa sv3,t2 -- pmuludq hc4,t2 -- paddq t2,t1 -- # d2 = t1[0] + t1[1] -- movdqa t1,t2 -- psrldq $8,t2 -- paddq t2,t1 -- movq t1,d2 -- -- # t1 = [ hc0[1] * r3, hc0[0] * u3 ] -- movdqa ru3,t1 -- pmuludq hc0,t1 -- # t1 += [ hc1[1] * r2, hc1[0] * u2 ] -- movdqa ru2,t2 -- pmuludq hc1,t2 -- paddq t2,t1 -- # t1 += [ hc2[1] * r1, hc2[0] * u1 ] -- movdqa ru1,t2 -- pmuludq hc2,t2 -- paddq t2,t1 -- # t1 += [ hc3[1] * r0, hc3[0] * u0 ] -- movdqa ru0,t2 -- pmuludq hc3,t2 -- paddq t2,t1 -- # t1 += [ hc4[1] * s4, hc4[0] * v4 ] -- movdqa sv4,t2 -- pmuludq hc4,t2 -- paddq t2,t1 -- # d3 = t1[0] + t1[1] -- movdqa t1,t2 -- psrldq $8,t2 -- paddq t2,t1 -- movq t1,d3 -- -- # t1 = [ hc0[1] * r4, hc0[0] * u4 ] -- movdqa ru4,t1 -- pmuludq hc0,t1 -- # t1 += [ hc1[1] * r3, hc1[0] * u3 ] -- movdqa ru3,t2 -- pmuludq hc1,t2 -- paddq t2,t1 -- # t1 += [ hc2[1] * r2, hc2[0] * u2 ] -- movdqa ru2,t2 -- pmuludq hc2,t2 -- paddq t2,t1 -- # t1 += [ hc3[1] * r1, hc3[0] * u1 ] -- movdqa ru1,t2 -- pmuludq hc3,t2 -- paddq t2,t1 -- # t1 += [ hc4[1] * r0, hc4[0] * u0 ] -- movdqa ru0,t2 -- pmuludq hc4,t2 -- paddq t2,t1 -- # d4 = t1[0] + t1[1] -- movdqa t1,t2 -- psrldq $8,t2 -- paddq t2,t1 -- movq t1,d4 -- -- # Now do a partial reduction mod (2^130)-5, carrying h0 -> h1 -> h2 -> -- # h3 -> h4 -> h0 -> h1 to get h0,h2,h3,h4 < 2^26 and h1 < 2^26 + a small -- # amount. Careful: we must not assume the carry bits 'd0 >> 26', -- # 'd1 >> 26', 'd2 >> 26', 'd3 >> 26', and '(d4 >> 26) * 5' fit in 32-bit -- # integers. It's true in a single-block implementation, but not here. -- -- # d1 += d0 >> 26 -- mov d0,%rax -- shr $26,%rax -- add %rax,d1 -- # h0 = d0 & 0x3ffffff -- mov d0,%rbx -- and $0x3ffffff,%ebx -- -- # d2 += d1 >> 26 -- mov d1,%rax -- shr $26,%rax -- add %rax,d2 -- # h1 = d1 & 0x3ffffff -- mov d1,%rax -- and $0x3ffffff,%eax -- mov %eax,h1 -- -- # d3 += d2 >> 26 -- mov d2,%rax -- shr $26,%rax -- add %rax,d3 -- # h2 = d2 & 0x3ffffff -- mov d2,%rax -- and $0x3ffffff,%eax -- mov %eax,h2 -- -- # d4 += d3 >> 26 -- mov d3,%rax -- shr $26,%rax -- add %rax,d4 -- # h3 = d3 & 0x3ffffff -- mov d3,%rax -- and $0x3ffffff,%eax -- mov %eax,h3 -- -- # h0 += (d4 >> 26) * 5 -- mov d4,%rax -- shr $26,%rax -- lea (%rax,%rax,4),%rax -- add %rax,%rbx -- # h4 = d4 & 0x3ffffff -- mov d4,%rax -- and $0x3ffffff,%eax -- mov %eax,h4 -- -- # h1 += h0 >> 26 -- mov %rbx,%rax -- shr $26,%rax -- add %eax,h1 -- # h0 = h0 & 0x3ffffff -- andl $0x3ffffff,%ebx -- mov %ebx,h0 -- -- add $0x20,m -- dec %rcx -- jnz .Ldoblock2 -- -- pop %r13 -- pop %r12 -- pop %rbx -- ret --ENDPROC(poly1305_2block_sse2) ---- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl -+++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl -@@ -1,11 +1,14 @@ --#! /usr/bin/env perl --# Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved. -+#!/usr/bin/env perl -+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause - # --# Licensed under the OpenSSL license (the "License"). You may not use --# this file except in compliance with the License. You can obtain a copy --# in the file LICENSE in the source distribution or at --# https://www.openssl.org/source/license.html -- -+# Copyright (C) 2017-2018 Samuel Neves . All Rights Reserved. -+# Copyright (C) 2017-2019 Jason A. Donenfeld . All Rights Reserved. -+# Copyright (C) 2006-2017 CRYPTOGAMS by . All Rights Reserved. -+# -+# This code is taken from the OpenSSL project but the author, Andy Polyakov, -+# has relicensed it under the licenses specified in the SPDX header above. -+# The original headers, including the original license headers, are -+# included below for completeness. - # - # ==================================================================== - # Written by Andy Polyakov for the OpenSSL -@@ -32,7 +35,7 @@ - # Skylake-X system performance. Since we are likely to suppress - # AVX512F capability flag [at least on Skylake-X], conversion serves - # as kind of "investment protection". Note that next *lake processor, --# Cannolake, has AVX512IFMA code path to execute... -+# Cannonlake, has AVX512IFMA code path to execute... - # - # Numbers are cycles per processed byte with poly1305_blocks alone, - # measured with rdtsc at fixed clock frequency. -@@ -68,39 +71,114 @@ $output = shift; - if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } - - $win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); -+$kernel=0; $kernel=1 if (!$flavour && !$output); - --$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; --( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or --( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or --die "can't locate x86_64-xlate.pl"; -- --if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` -- =~ /GNU assembler version ([2-9]\.[0-9]+)/) { -- $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25) + ($1>=2.26); -+if (!$kernel) { -+ $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; -+ ( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or -+ ( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or -+ die "can't locate x86_64-xlate.pl"; -+ -+ open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; -+ *STDOUT=*OUT; -+ -+ if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` -+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) { -+ $avx = ($1>=2.19) + ($1>=2.22) + ($1>=2.25); -+ } -+ -+ if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && -+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) { -+ $avx = ($1>=2.09) + ($1>=2.10) + ($1>=2.12); -+ $avx += 1 if ($1==2.11 && $2>=8); -+ } -+ -+ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && -+ `ml64 2>&1` =~ /Version ([0-9]+)\./) { -+ $avx = ($1>=10) + ($1>=11); -+ } -+ -+ if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { -+ $avx = ($2>=3.0) + ($2>3.0); -+ } -+} else { -+ $avx = 4; # The kernel uses ifdefs for this. - } - --if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && -- `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)(?:\.([0-9]+))?/) { -- $avx = ($1>=2.09) + ($1>=2.10) + 2 * ($1>=2.12); -- $avx += 2 if ($1==2.11 && $2>=8); -+sub declare_function() { -+ my ($name, $align, $nargs) = @_; -+ if($kernel) { -+ $code .= ".align $align\n"; -+ $code .= "ENTRY($name)\n"; -+ $code .= ".L$name:\n"; -+ } else { -+ $code .= ".globl $name\n"; -+ $code .= ".type $name,\@function,$nargs\n"; -+ $code .= ".align $align\n"; -+ $code .= "$name:\n"; -+ } - } - --if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && -- `ml64 2>&1` =~ /Version ([0-9]+)\./) { -- $avx = ($1>=10) + ($1>=12); -+sub end_function() { -+ my ($name) = @_; -+ if($kernel) { -+ $code .= "ENDPROC($name)\n"; -+ } else { -+ $code .= ".size $name,.-$name\n"; -+ } - } - --if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { -- $avx = ($2>=3.0) + ($2>3.0); --} -+$code.=<<___ if $kernel; -+#include -+___ - --open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\""; --*STDOUT=*OUT; -+if ($avx) { -+$code.=<<___ if $kernel; -+.section .rodata -+___ -+$code.=<<___; -+.align 64 -+.Lconst: -+.Lmask24: -+.long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0 -+.L129: -+.long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0 -+.Lmask26: -+.long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0 -+.Lpermd_avx2: -+.long 2,2,2,3,2,0,2,1 -+.Lpermd_avx512: -+.long 0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7 -+ -+.L2_44_inp_permd: -+.long 0,1,1,2,2,3,7,7 -+.L2_44_inp_shift: -+.quad 0,12,24,64 -+.L2_44_mask: -+.quad 0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff -+.L2_44_shift_rgt: -+.quad 44,44,42,64 -+.L2_44_shift_lft: -+.quad 8,8,10,64 -+ -+.align 64 -+.Lx_mask44: -+.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff -+.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff -+.Lx_mask42: -+.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff -+.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff -+___ -+} -+$code.=<<___ if (!$kernel); -+.asciz "Poly1305 for x86_64, CRYPTOGAMS by " -+.align 16 -+___ - - my ($ctx,$inp,$len,$padbit)=("%rdi","%rsi","%rdx","%rcx"); - my ($mac,$nonce)=($inp,$len); # *_emit arguments --my ($d1,$d2,$d3, $r0,$r1,$s1)=map("%r$_",(8..13)); --my ($h0,$h1,$h2)=("%r14","%rbx","%rbp"); -+my ($d1,$d2,$d3, $r0,$r1,$s1)=("%r8","%r9","%rdi","%r11","%r12","%r13"); -+my ($h0,$h1,$h2)=("%r14","%rbx","%r10"); - - sub poly1305_iteration { - # input: copy of $r1 in %rax, $h0-$h2, $r0-$r1 -@@ -155,19 +233,19 @@ ___ - - $code.=<<___; - .text -- -+___ -+$code.=<<___ if (!$kernel); - .extern OPENSSL_ia32cap_P - --.globl poly1305_init --.hidden poly1305_init --.globl poly1305_blocks --.hidden poly1305_blocks --.globl poly1305_emit --.hidden poly1305_emit -- --.type poly1305_init,\@function,3 --.align 32 --poly1305_init: -+.globl poly1305_init_x86_64 -+.hidden poly1305_init_x86_64 -+.globl poly1305_blocks_x86_64 -+.hidden poly1305_blocks_x86_64 -+.globl poly1305_emit_x86_64 -+.hidden poly1305_emit_x86_64 -+___ -+&declare_function("poly1305_init_x86_64", 32, 3); -+$code.=<<___; - xor %rax,%rax - mov %rax,0($ctx) # initialize hash value - mov %rax,8($ctx) -@@ -175,11 +253,12 @@ poly1305_init: - - cmp \$0,$inp - je .Lno_key -- -- lea poly1305_blocks(%rip),%r10 -- lea poly1305_emit(%rip),%r11 - ___ --$code.=<<___ if ($avx); -+$code.=<<___ if (!$kernel); -+ lea poly1305_blocks_x86_64(%rip),%r10 -+ lea poly1305_emit_x86_64(%rip),%r11 -+___ -+$code.=<<___ if (!$kernel && $avx); - mov OPENSSL_ia32cap_P+4(%rip),%r9 - lea poly1305_blocks_avx(%rip),%rax - lea poly1305_emit_avx(%rip),%rcx -@@ -187,12 +266,12 @@ $code.=<<___ if ($avx); - cmovc %rax,%r10 - cmovc %rcx,%r11 - ___ --$code.=<<___ if ($avx>1); -+$code.=<<___ if (!$kernel && $avx>1); - lea poly1305_blocks_avx2(%rip),%rax - bt \$`5+32`,%r9 # AVX2? - cmovc %rax,%r10 - ___ --$code.=<<___ if ($avx>3); -+$code.=<<___ if (!$kernel && $avx>3); - mov \$`(1<<31|1<<21|1<<16)`,%rax - shr \$32,%r9 - and %rax,%r9 -@@ -207,11 +286,11 @@ $code.=<<___; - mov %rax,24($ctx) - mov %rcx,32($ctx) - ___ --$code.=<<___ if ($flavour !~ /elf32/); -+$code.=<<___ if (!$kernel && $flavour !~ /elf32/); - mov %r10,0(%rdx) - mov %r11,8(%rdx) - ___ --$code.=<<___ if ($flavour =~ /elf32/); -+$code.=<<___ if (!$kernel && $flavour =~ /elf32/); - mov %r10d,0(%rdx) - mov %r11d,4(%rdx) - ___ -@@ -219,11 +298,11 @@ $code.=<<___; - mov \$1,%eax - .Lno_key: - ret --.size poly1305_init,.-poly1305_init -+___ -+&end_function("poly1305_init_x86_64"); - --.type poly1305_blocks,\@function,4 --.align 32 --poly1305_blocks: -+&declare_function("poly1305_blocks_x86_64", 32, 4); -+$code.=<<___; - .cfi_startproc - .Lblocks: - shr \$4,$len -@@ -231,8 +310,6 @@ poly1305_blocks: - - push %rbx - .cfi_push %rbx -- push %rbp --.cfi_push %rbp - push %r12 - .cfi_push %r12 - push %r13 -@@ -241,6 +318,8 @@ poly1305_blocks: - .cfi_push %r14 - push %r15 - .cfi_push %r15 -+ push $ctx -+.cfi_push $ctx - .Lblocks_body: - - mov $len,%r15 # reassign $len -@@ -265,26 +344,29 @@ poly1305_blocks: - lea 16($inp),$inp - adc $padbit,$h2 - ___ -+ - &poly1305_iteration(); -+ - $code.=<<___; - mov $r1,%rax - dec %r15 # len-=16 - jnz .Loop - -+ mov 0(%rsp),$ctx -+.cfi_restore $ctx -+ - mov $h0,0($ctx) # store hash value - mov $h1,8($ctx) - mov $h2,16($ctx) - -- mov 0(%rsp),%r15 -+ mov 8(%rsp),%r15 - .cfi_restore %r15 -- mov 8(%rsp),%r14 -+ mov 16(%rsp),%r14 - .cfi_restore %r14 -- mov 16(%rsp),%r13 -+ mov 24(%rsp),%r13 - .cfi_restore %r13 -- mov 24(%rsp),%r12 -+ mov 32(%rsp),%r12 - .cfi_restore %r12 -- mov 32(%rsp),%rbp --.cfi_restore %rbp - mov 40(%rsp),%rbx - .cfi_restore %rbx - lea 48(%rsp),%rsp -@@ -293,11 +375,11 @@ $code.=<<___; - .Lblocks_epilogue: - ret - .cfi_endproc --.size poly1305_blocks,.-poly1305_blocks -+___ -+&end_function("poly1305_blocks_x86_64"); - --.type poly1305_emit,\@function,3 --.align 32 --poly1305_emit: -+&declare_function("poly1305_emit_x86_64", 32, 3); -+$code.=<<___; - .Lemit: - mov 0($ctx),%r8 # load hash value - mov 8($ctx),%r9 -@@ -318,10 +400,14 @@ poly1305_emit: - mov %rcx,8($mac) - - ret --.size poly1305_emit,.-poly1305_emit - ___ -+&end_function("poly1305_emit_x86_64"); - if ($avx) { - -+if($kernel) { -+ $code .= "#ifdef CONFIG_AS_AVX\n"; -+} -+ - ######################################################################## - # Layout of opaque area is following. - # -@@ -342,15 +428,19 @@ $code.=<<___; - .type __poly1305_block,\@abi-omnipotent - .align 32 - __poly1305_block: -+ push $ctx - ___ - &poly1305_iteration(); - $code.=<<___; -+ pop $ctx - ret - .size __poly1305_block,.-__poly1305_block - - .type __poly1305_init_avx,\@abi-omnipotent - .align 32 - __poly1305_init_avx: -+ push %rbp -+ mov %rsp,%rbp - mov $r0,$h0 - mov $r1,$h1 - xor $h2,$h2 -@@ -507,12 +597,13 @@ __poly1305_init_avx: - mov $d1#d,`16*8+8-64`($ctx) - - lea -48-64($ctx),$ctx # size [de-]optimization -+ pop %rbp - ret - .size __poly1305_init_avx,.-__poly1305_init_avx -+___ - --.type poly1305_blocks_avx,\@function,4 --.align 32 --poly1305_blocks_avx: -+&declare_function("poly1305_blocks_avx", 32, 4); -+$code.=<<___; - .cfi_startproc - mov 20($ctx),%r8d # is_base2_26 - cmp \$128,$len -@@ -532,10 +623,11 @@ poly1305_blocks_avx: - test \$31,$len - jz .Leven_avx - -- push %rbx --.cfi_push %rbx - push %rbp - .cfi_push %rbp -+ mov %rsp,%rbp -+ push %rbx -+.cfi_push %rbx - push %r12 - .cfi_push %r12 - push %r13 -@@ -645,20 +737,18 @@ poly1305_blocks_avx: - mov $h2#d,16($ctx) - .align 16 - .Ldone_avx: -- mov 0(%rsp),%r15 -+ pop %r15 - .cfi_restore %r15 -- mov 8(%rsp),%r14 -+ pop %r14 - .cfi_restore %r14 -- mov 16(%rsp),%r13 -+ pop %r13 - .cfi_restore %r13 -- mov 24(%rsp),%r12 -+ pop %r12 - .cfi_restore %r12 -- mov 32(%rsp),%rbp --.cfi_restore %rbp -- mov 40(%rsp),%rbx -+ pop %rbx - .cfi_restore %rbx -- lea 48(%rsp),%rsp --.cfi_adjust_cfa_offset -48 -+ pop %rbp -+.cfi_restore %rbp - .Lno_data_avx: - .Lblocks_avx_epilogue: - ret -@@ -667,10 +757,11 @@ poly1305_blocks_avx: - .align 32 - .Lbase2_64_avx: - .cfi_startproc -- push %rbx --.cfi_push %rbx - push %rbp - .cfi_push %rbp -+ mov %rsp,%rbp -+ push %rbx -+.cfi_push %rbx - push %r12 - .cfi_push %r12 - push %r13 -@@ -736,22 +827,18 @@ poly1305_blocks_avx: - - .Lproceed_avx: - mov %r15,$len -- -- mov 0(%rsp),%r15 -+ pop %r15 - .cfi_restore %r15 -- mov 8(%rsp),%r14 -+ pop %r14 - .cfi_restore %r14 -- mov 16(%rsp),%r13 -+ pop %r13 - .cfi_restore %r13 -- mov 24(%rsp),%r12 -+ pop %r12 - .cfi_restore %r12 -- mov 32(%rsp),%rbp --.cfi_restore %rbp -- mov 40(%rsp),%rbx -+ pop %rbx - .cfi_restore %rbx -- lea 48(%rsp),%rax -- lea 48(%rsp),%rsp --.cfi_adjust_cfa_offset -48 -+ pop %rbp -+.cfi_restore %rbp - .Lbase2_64_avx_epilogue: - jmp .Ldo_avx - .cfi_endproc -@@ -768,8 +855,11 @@ poly1305_blocks_avx: - .Ldo_avx: - ___ - $code.=<<___ if (!$win64); -+ lea 8(%rsp),%r10 -+.cfi_def_cfa_register %r10 -+ and \$-32,%rsp -+ sub \$-8,%rsp - lea -0x58(%rsp),%r11 --.cfi_def_cfa %r11,0x60 - sub \$0x178,%rsp - ___ - $code.=<<___ if ($win64); -@@ -1361,18 +1451,18 @@ $code.=<<___ if ($win64); - .Ldo_avx_epilogue: - ___ - $code.=<<___ if (!$win64); -- lea 0x58(%r11),%rsp --.cfi_def_cfa %rsp,8 -+ lea -8(%r10),%rsp -+.cfi_def_cfa_register %rsp - ___ - $code.=<<___; - vzeroupper - ret - .cfi_endproc --.size poly1305_blocks_avx,.-poly1305_blocks_avx -+___ -+&end_function("poly1305_blocks_avx"); - --.type poly1305_emit_avx,\@function,3 --.align 32 --poly1305_emit_avx: -+&declare_function("poly1305_emit_avx", 32, 3); -+$code.=<<___; - cmpl \$0,20($ctx) # is_base2_26? - je .Lemit - -@@ -1423,41 +1513,51 @@ poly1305_emit_avx: - mov %rcx,8($mac) - - ret --.size poly1305_emit_avx,.-poly1305_emit_avx - ___ -+&end_function("poly1305_emit_avx"); -+ -+if ($kernel) { -+ $code .= "#endif\n"; -+} - - if ($avx>1) { -+ -+if ($kernel) { -+ $code .= "#ifdef CONFIG_AS_AVX2\n"; -+} -+ - my ($H0,$H1,$H2,$H3,$H4, $MASK, $T4,$T0,$T1,$T2,$T3, $D0,$D1,$D2,$D3,$D4) = - map("%ymm$_",(0..15)); - my $S4=$MASK; - -+sub poly1305_blocks_avxN { -+ my ($avx512) = @_; -+ my $suffix = $avx512 ? "_avx512" : ""; - $code.=<<___; --.type poly1305_blocks_avx2,\@function,4 --.align 32 --poly1305_blocks_avx2: - .cfi_startproc - mov 20($ctx),%r8d # is_base2_26 - cmp \$128,$len -- jae .Lblocks_avx2 -+ jae .Lblocks_avx2$suffix - test %r8d,%r8d - jz .Lblocks - --.Lblocks_avx2: -+.Lblocks_avx2$suffix: - and \$-16,$len -- jz .Lno_data_avx2 -+ jz .Lno_data_avx2$suffix - - vzeroupper - - test %r8d,%r8d -- jz .Lbase2_64_avx2 -+ jz .Lbase2_64_avx2$suffix - - test \$63,$len -- jz .Leven_avx2 -+ jz .Leven_avx2$suffix - -- push %rbx --.cfi_push %rbx - push %rbp - .cfi_push %rbp -+ mov %rsp,%rbp -+ push %rbx -+.cfi_push %rbx - push %r12 - .cfi_push %r12 - push %r13 -@@ -1466,7 +1566,7 @@ poly1305_blocks_avx2: - .cfi_push %r14 - push %r15 - .cfi_push %r15 --.Lblocks_avx2_body: -+.Lblocks_avx2_body$suffix: - - mov $len,%r15 # reassign $len - -@@ -1513,7 +1613,7 @@ poly1305_blocks_avx2: - shr \$2,$s1 - add $r1,$s1 # s1 = r1 + (r1 >> 2) - --.Lbase2_26_pre_avx2: -+.Lbase2_26_pre_avx2$suffix: - add 0($inp),$h0 # accumulate input - adc 8($inp),$h1 - lea 16($inp),$inp -@@ -1524,10 +1624,10 @@ poly1305_blocks_avx2: - mov $r1,%rax - - test \$63,%r15 -- jnz .Lbase2_26_pre_avx2 -+ jnz .Lbase2_26_pre_avx2$suffix - - test $padbit,$padbit # if $padbit is zero, -- jz .Lstore_base2_64_avx2 # store hash in base 2^64 format -+ jz .Lstore_base2_64_avx2$suffix # store hash in base 2^64 format - - ################################# base 2^64 -> base 2^26 - mov $h0,%rax -@@ -1548,57 +1648,56 @@ poly1305_blocks_avx2: - or $r1,$h2 # h[4] - - test %r15,%r15 -- jz .Lstore_base2_26_avx2 -+ jz .Lstore_base2_26_avx2$suffix - - vmovd %rax#d,%x#$H0 - vmovd %rdx#d,%x#$H1 - vmovd $h0#d,%x#$H2 - vmovd $h1#d,%x#$H3 - vmovd $h2#d,%x#$H4 -- jmp .Lproceed_avx2 -+ jmp .Lproceed_avx2$suffix - - .align 32 --.Lstore_base2_64_avx2: -+.Lstore_base2_64_avx2$suffix: - mov $h0,0($ctx) - mov $h1,8($ctx) - mov $h2,16($ctx) # note that is_base2_26 is zeroed -- jmp .Ldone_avx2 -+ jmp .Ldone_avx2$suffix - - .align 16 --.Lstore_base2_26_avx2: -+.Lstore_base2_26_avx2$suffix: - mov %rax#d,0($ctx) # store hash value base 2^26 - mov %rdx#d,4($ctx) - mov $h0#d,8($ctx) - mov $h1#d,12($ctx) - mov $h2#d,16($ctx) - .align 16 --.Ldone_avx2: -- mov 0(%rsp),%r15 -+.Ldone_avx2$suffix: -+ pop %r15 - .cfi_restore %r15 -- mov 8(%rsp),%r14 -+ pop %r14 - .cfi_restore %r14 -- mov 16(%rsp),%r13 -+ pop %r13 - .cfi_restore %r13 -- mov 24(%rsp),%r12 -+ pop %r12 - .cfi_restore %r12 -- mov 32(%rsp),%rbp --.cfi_restore %rbp -- mov 40(%rsp),%rbx -+ pop %rbx - .cfi_restore %rbx -- lea 48(%rsp),%rsp --.cfi_adjust_cfa_offset -48 --.Lno_data_avx2: --.Lblocks_avx2_epilogue: -+ pop %rbp -+.cfi_restore %rbp -+.Lno_data_avx2$suffix: -+.Lblocks_avx2_epilogue$suffix: - ret - .cfi_endproc - - .align 32 --.Lbase2_64_avx2: -+.Lbase2_64_avx2$suffix: - .cfi_startproc -- push %rbx --.cfi_push %rbx - push %rbp - .cfi_push %rbp -+ mov %rsp,%rbp -+ push %rbx -+.cfi_push %rbx - push %r12 - .cfi_push %r12 - push %r13 -@@ -1607,7 +1706,7 @@ poly1305_blocks_avx2: - .cfi_push %r14 - push %r15 - .cfi_push %r15 --.Lbase2_64_avx2_body: -+.Lbase2_64_avx2_body$suffix: - - mov $len,%r15 # reassign $len - -@@ -1624,9 +1723,9 @@ poly1305_blocks_avx2: - add $r1,$s1 # s1 = r1 + (r1 >> 2) - - test \$63,$len -- jz .Linit_avx2 -+ jz .Linit_avx2$suffix - --.Lbase2_64_pre_avx2: -+.Lbase2_64_pre_avx2$suffix: - add 0($inp),$h0 # accumulate input - adc 8($inp),$h1 - lea 16($inp),$inp -@@ -1637,9 +1736,9 @@ poly1305_blocks_avx2: - mov $r1,%rax - - test \$63,%r15 -- jnz .Lbase2_64_pre_avx2 -+ jnz .Lbase2_64_pre_avx2$suffix - --.Linit_avx2: -+.Linit_avx2$suffix: - ################################# base 2^64 -> base 2^26 - mov $h0,%rax - mov $h0,%rdx -@@ -1667,69 +1766,77 @@ poly1305_blocks_avx2: - - call __poly1305_init_avx - --.Lproceed_avx2: -+.Lproceed_avx2$suffix: - mov %r15,$len # restore $len -- mov OPENSSL_ia32cap_P+8(%rip),%r10d -+___ -+$code.=<<___ if (!$kernel); -+ mov OPENSSL_ia32cap_P+8(%rip),%r9d - mov \$`(1<<31|1<<30|1<<16)`,%r11d -- -- mov 0(%rsp),%r15 -+___ -+$code.=<<___; -+ pop %r15 - .cfi_restore %r15 -- mov 8(%rsp),%r14 -+ pop %r14 - .cfi_restore %r14 -- mov 16(%rsp),%r13 -+ pop %r13 - .cfi_restore %r13 -- mov 24(%rsp),%r12 -+ pop %r12 - .cfi_restore %r12 -- mov 32(%rsp),%rbp --.cfi_restore %rbp -- mov 40(%rsp),%rbx -+ pop %rbx - .cfi_restore %rbx -- lea 48(%rsp),%rax -- lea 48(%rsp),%rsp --.cfi_adjust_cfa_offset -48 --.Lbase2_64_avx2_epilogue: -- jmp .Ldo_avx2 -+ pop %rbp -+.cfi_restore %rbp -+.Lbase2_64_avx2_epilogue$suffix: -+ jmp .Ldo_avx2$suffix - .cfi_endproc - - .align 32 --.Leven_avx2: -+.Leven_avx2$suffix: - .cfi_startproc -- mov OPENSSL_ia32cap_P+8(%rip),%r10d -+___ -+$code.=<<___ if (!$kernel); -+ mov OPENSSL_ia32cap_P+8(%rip),%r9d -+___ -+$code.=<<___; - vmovd 4*0($ctx),%x#$H0 # load hash value base 2^26 - vmovd 4*1($ctx),%x#$H1 - vmovd 4*2($ctx),%x#$H2 - vmovd 4*3($ctx),%x#$H3 - vmovd 4*4($ctx),%x#$H4 - --.Ldo_avx2: -+.Ldo_avx2$suffix: - ___ --$code.=<<___ if ($avx>2); -+$code.=<<___ if (!$kernel && $avx>2); - cmp \$512,$len - jb .Lskip_avx512 -- and %r11d,%r10d -- test \$`1<<16`,%r10d # check for AVX512F -+ and %r11d,%r9d -+ test \$`1<<16`,%r9d # check for AVX512F - jnz .Lblocks_avx512 --.Lskip_avx512: -+.Lskip_avx512$suffix: -+___ -+$code.=<<___ if ($avx > 2 && $avx512 && $kernel); -+ cmp \$512,$len -+ jae .Lblocks_avx512 - ___ - $code.=<<___ if (!$win64); -- lea -8(%rsp),%r11 --.cfi_def_cfa %r11,16 -+ lea 8(%rsp),%r10 -+.cfi_def_cfa_register %r10 - sub \$0x128,%rsp - ___ - $code.=<<___ if ($win64); -- lea -0xf8(%rsp),%r11 -+ lea 8(%rsp),%r10 - sub \$0x1c8,%rsp -- vmovdqa %xmm6,0x50(%r11) -- vmovdqa %xmm7,0x60(%r11) -- vmovdqa %xmm8,0x70(%r11) -- vmovdqa %xmm9,0x80(%r11) -- vmovdqa %xmm10,0x90(%r11) -- vmovdqa %xmm11,0xa0(%r11) -- vmovdqa %xmm12,0xb0(%r11) -- vmovdqa %xmm13,0xc0(%r11) -- vmovdqa %xmm14,0xd0(%r11) -- vmovdqa %xmm15,0xe0(%r11) --.Ldo_avx2_body: -+ vmovdqa %xmm6,-0xb0(%r10) -+ vmovdqa %xmm7,-0xa0(%r10) -+ vmovdqa %xmm8,-0x90(%r10) -+ vmovdqa %xmm9,-0x80(%r10) -+ vmovdqa %xmm10,-0x70(%r10) -+ vmovdqa %xmm11,-0x60(%r10) -+ vmovdqa %xmm12,-0x50(%r10) -+ vmovdqa %xmm13,-0x40(%r10) -+ vmovdqa %xmm14,-0x30(%r10) -+ vmovdqa %xmm15,-0x20(%r10) -+.Ldo_avx2_body$suffix: - ___ - $code.=<<___; - lea .Lconst(%rip),%rcx -@@ -1794,11 +1901,11 @@ $code.=<<___; - - vpaddq $H2,$T2,$H2 # accumulate input - sub \$64,$len -- jz .Ltail_avx2 -- jmp .Loop_avx2 -+ jz .Ltail_avx2$suffix -+ jmp .Loop_avx2$suffix - - .align 32 --.Loop_avx2: -+.Loop_avx2$suffix: - ################################################################ - # ((inp[0]*r^4+inp[4])*r^4+inp[ 8])*r^4 - # ((inp[1]*r^4+inp[5])*r^4+inp[ 9])*r^3 -@@ -1946,10 +2053,10 @@ $code.=<<___; - vpor 32(%rcx),$T4,$T4 # padbit, yes, always - - sub \$64,$len -- jnz .Loop_avx2 -+ jnz .Loop_avx2$suffix - - .byte 0x66,0x90 --.Ltail_avx2: -+.Ltail_avx2$suffix: - ################################################################ - # while above multiplications were by r^4 in all lanes, in last - # iteration we multiply least significant lane by r^4 and most -@@ -2087,37 +2194,29 @@ $code.=<<___; - vmovd %x#$H4,`4*4-48-64`($ctx) - ___ - $code.=<<___ if ($win64); -- vmovdqa 0x50(%r11),%xmm6 -- vmovdqa 0x60(%r11),%xmm7 -- vmovdqa 0x70(%r11),%xmm8 -- vmovdqa 0x80(%r11),%xmm9 -- vmovdqa 0x90(%r11),%xmm10 -- vmovdqa 0xa0(%r11),%xmm11 -- vmovdqa 0xb0(%r11),%xmm12 -- vmovdqa 0xc0(%r11),%xmm13 -- vmovdqa 0xd0(%r11),%xmm14 -- vmovdqa 0xe0(%r11),%xmm15 -- lea 0xf8(%r11),%rsp --.Ldo_avx2_epilogue: -+ vmovdqa -0xb0(%r10),%xmm6 -+ vmovdqa -0xa0(%r10),%xmm7 -+ vmovdqa -0x90(%r10),%xmm8 -+ vmovdqa -0x80(%r10),%xmm9 -+ vmovdqa -0x70(%r10),%xmm10 -+ vmovdqa -0x60(%r10),%xmm11 -+ vmovdqa -0x50(%r10),%xmm12 -+ vmovdqa -0x40(%r10),%xmm13 -+ vmovdqa -0x30(%r10),%xmm14 -+ vmovdqa -0x20(%r10),%xmm15 -+ lea -8(%r10),%rsp -+.Ldo_avx2_epilogue$suffix: - ___ - $code.=<<___ if (!$win64); -- lea 8(%r11),%rsp --.cfi_def_cfa %rsp,8 -+ lea -8(%r10),%rsp -+.cfi_def_cfa_register %rsp - ___ - $code.=<<___; - vzeroupper - ret - .cfi_endproc --.size poly1305_blocks_avx2,.-poly1305_blocks_avx2 - ___ --####################################################################### --if ($avx>2) { --# On entry we have input length divisible by 64. But since inner loop --# processes 128 bytes per iteration, cases when length is not divisible --# by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this --# reason stack layout is kept identical to poly1305_blocks_avx2. If not --# for this tail, we wouldn't have to even allocate stack frame... -- -+if($avx > 2 && $avx512) { - my ($R0,$R1,$R2,$R3,$R4, $S1,$S2,$S3,$S4) = map("%zmm$_",(16..24)); - my ($M0,$M1,$M2,$M3,$M4) = map("%zmm$_",(25..29)); - my $PADBIT="%zmm30"; -@@ -2128,32 +2227,29 @@ map(s/%y/%z/,($H0,$H1,$H2,$H3,$H4)); - map(s/%y/%z/,($MASK)); - - $code.=<<___; --.type poly1305_blocks_avx512,\@function,4 --.align 32 --poly1305_blocks_avx512: - .cfi_startproc - .Lblocks_avx512: - mov \$15,%eax - kmovw %eax,%k2 - ___ - $code.=<<___ if (!$win64); -- lea -8(%rsp),%r11 --.cfi_def_cfa %r11,16 -+ lea 8(%rsp),%r10 -+.cfi_def_cfa_register %r10 - sub \$0x128,%rsp - ___ - $code.=<<___ if ($win64); -- lea -0xf8(%rsp),%r11 -+ lea 8(%rsp),%r10 - sub \$0x1c8,%rsp -- vmovdqa %xmm6,0x50(%r11) -- vmovdqa %xmm7,0x60(%r11) -- vmovdqa %xmm8,0x70(%r11) -- vmovdqa %xmm9,0x80(%r11) -- vmovdqa %xmm10,0x90(%r11) -- vmovdqa %xmm11,0xa0(%r11) -- vmovdqa %xmm12,0xb0(%r11) -- vmovdqa %xmm13,0xc0(%r11) -- vmovdqa %xmm14,0xd0(%r11) -- vmovdqa %xmm15,0xe0(%r11) -+ vmovdqa %xmm6,-0xb0(%r10) -+ vmovdqa %xmm7,-0xa0(%r10) -+ vmovdqa %xmm8,-0x90(%r10) -+ vmovdqa %xmm9,-0x80(%r10) -+ vmovdqa %xmm10,-0x70(%r10) -+ vmovdqa %xmm11,-0x60(%r10) -+ vmovdqa %xmm12,-0x50(%r10) -+ vmovdqa %xmm13,-0x40(%r10) -+ vmovdqa %xmm14,-0x30(%r10) -+ vmovdqa %xmm15,-0x20(%r10) - .Ldo_avx512_body: - ___ - $code.=<<___; -@@ -2679,7 +2775,7 @@ $code.=<<___; - - lea 0x90(%rsp),%rax # size optimization for .Ltail_avx2 - add \$64,$len -- jnz .Ltail_avx2 -+ jnz .Ltail_avx2$suffix - - vpsubq $T2,$H2,$H2 # undo input accumulation - vmovd %x#$H0,`4*0-48-64`($ctx)# save partially reduced -@@ -2690,29 +2786,61 @@ $code.=<<___; - vzeroall - ___ - $code.=<<___ if ($win64); -- movdqa 0x50(%r11),%xmm6 -- movdqa 0x60(%r11),%xmm7 -- movdqa 0x70(%r11),%xmm8 -- movdqa 0x80(%r11),%xmm9 -- movdqa 0x90(%r11),%xmm10 -- movdqa 0xa0(%r11),%xmm11 -- movdqa 0xb0(%r11),%xmm12 -- movdqa 0xc0(%r11),%xmm13 -- movdqa 0xd0(%r11),%xmm14 -- movdqa 0xe0(%r11),%xmm15 -- lea 0xf8(%r11),%rsp -+ movdqa -0xb0(%r10),%xmm6 -+ movdqa -0xa0(%r10),%xmm7 -+ movdqa -0x90(%r10),%xmm8 -+ movdqa -0x80(%r10),%xmm9 -+ movdqa -0x70(%r10),%xmm10 -+ movdqa -0x60(%r10),%xmm11 -+ movdqa -0x50(%r10),%xmm12 -+ movdqa -0x40(%r10),%xmm13 -+ movdqa -0x30(%r10),%xmm14 -+ movdqa -0x20(%r10),%xmm15 -+ lea -8(%r10),%rsp - .Ldo_avx512_epilogue: - ___ - $code.=<<___ if (!$win64); -- lea 8(%r11),%rsp --.cfi_def_cfa %rsp,8 -+ lea -8(%r10),%rsp -+.cfi_def_cfa_register %rsp - ___ - $code.=<<___; - ret - .cfi_endproc --.size poly1305_blocks_avx512,.-poly1305_blocks_avx512 - ___ --if ($avx>3) { -+ -+} -+ -+} -+ -+&declare_function("poly1305_blocks_avx2", 32, 4); -+poly1305_blocks_avxN(0); -+&end_function("poly1305_blocks_avx2"); -+ -+if($kernel) { -+ $code .= "#endif\n"; -+} -+ -+####################################################################### -+if ($avx>2) { -+# On entry we have input length divisible by 64. But since inner loop -+# processes 128 bytes per iteration, cases when length is not divisible -+# by 128 are handled by passing tail 64 bytes to .Ltail_avx2. For this -+# reason stack layout is kept identical to poly1305_blocks_avx2. If not -+# for this tail, we wouldn't have to even allocate stack frame... -+ -+if($kernel) { -+ $code .= "#ifdef CONFIG_AS_AVX512\n"; -+} -+ -+&declare_function("poly1305_blocks_avx512", 32, 4); -+poly1305_blocks_avxN(1); -+&end_function("poly1305_blocks_avx512"); -+ -+if ($kernel) { -+ $code .= "#endif\n"; -+} -+ -+if (!$kernel && $avx>3) { - ######################################################################## - # VPMADD52 version using 2^44 radix. - # -@@ -3753,45 +3881,9 @@ poly1305_emit_base2_44: - .size poly1305_emit_base2_44,.-poly1305_emit_base2_44 - ___ - } } } --$code.=<<___; --.align 64 --.Lconst: --.Lmask24: --.long 0x0ffffff,0,0x0ffffff,0,0x0ffffff,0,0x0ffffff,0 --.L129: --.long `1<<24`,0,`1<<24`,0,`1<<24`,0,`1<<24`,0 --.Lmask26: --.long 0x3ffffff,0,0x3ffffff,0,0x3ffffff,0,0x3ffffff,0 --.Lpermd_avx2: --.long 2,2,2,3,2,0,2,1 --.Lpermd_avx512: --.long 0,0,0,1, 0,2,0,3, 0,4,0,5, 0,6,0,7 -- --.L2_44_inp_permd: --.long 0,1,1,2,2,3,7,7 --.L2_44_inp_shift: --.quad 0,12,24,64 --.L2_44_mask: --.quad 0xfffffffffff,0xfffffffffff,0x3ffffffffff,0xffffffffffffffff --.L2_44_shift_rgt: --.quad 44,44,42,64 --.L2_44_shift_lft: --.quad 8,8,10,64 -- --.align 64 --.Lx_mask44: --.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff --.quad 0xfffffffffff,0xfffffffffff,0xfffffffffff,0xfffffffffff --.Lx_mask42: --.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff --.quad 0x3ffffffffff,0x3ffffffffff,0x3ffffffffff,0x3ffffffffff --___ - } --$code.=<<___; --.asciz "Poly1305 for x86_64, CRYPTOGAMS by " --.align 16 --___ - -+if (!$kernel) - { # chacha20-poly1305 helpers - my ($out,$inp,$otp,$len)=$win64 ? ("%rcx","%rdx","%r8", "%r9") : # Win64 order - ("%rdi","%rsi","%rdx","%rcx"); # Unix order -@@ -4038,17 +4130,17 @@ avx_handler: - - .section .pdata - .align 4 -- .rva .LSEH_begin_poly1305_init -- .rva .LSEH_end_poly1305_init -- .rva .LSEH_info_poly1305_init -- -- .rva .LSEH_begin_poly1305_blocks -- .rva .LSEH_end_poly1305_blocks -- .rva .LSEH_info_poly1305_blocks -- -- .rva .LSEH_begin_poly1305_emit -- .rva .LSEH_end_poly1305_emit -- .rva .LSEH_info_poly1305_emit -+ .rva .LSEH_begin_poly1305_init_x86_64 -+ .rva .LSEH_end_poly1305_init_x86_64 -+ .rva .LSEH_info_poly1305_init_x86_64 -+ -+ .rva .LSEH_begin_poly1305_blocks_x86_64 -+ .rva .LSEH_end_poly1305_blocks_x86_64 -+ .rva .LSEH_info_poly1305_blocks_x86_64 -+ -+ .rva .LSEH_begin_poly1305_emit_x86_64 -+ .rva .LSEH_end_poly1305_emit_x86_64 -+ .rva .LSEH_info_poly1305_emit_x86_64 - ___ - $code.=<<___ if ($avx); - .rva .LSEH_begin_poly1305_blocks_avx -@@ -4088,20 +4180,20 @@ ___ - $code.=<<___; - .section .xdata - .align 8 --.LSEH_info_poly1305_init: -+.LSEH_info_poly1305_init_x86_64: - .byte 9,0,0,0 - .rva se_handler -- .rva .LSEH_begin_poly1305_init,.LSEH_begin_poly1305_init -+ .rva .LSEH_begin_poly1305_init_x86_64,.LSEH_begin_poly1305_init_x86_64 - --.LSEH_info_poly1305_blocks: -+.LSEH_info_poly1305_blocks_x86_64: - .byte 9,0,0,0 - .rva se_handler - .rva .Lblocks_body,.Lblocks_epilogue - --.LSEH_info_poly1305_emit: -+.LSEH_info_poly1305_emit_x86_64: - .byte 9,0,0,0 - .rva se_handler -- .rva .LSEH_begin_poly1305_emit,.LSEH_begin_poly1305_emit -+ .rva .LSEH_begin_poly1305_emit_x86_64,.LSEH_begin_poly1305_emit_x86_64 - ___ - $code.=<<___ if ($avx); - .LSEH_info_poly1305_blocks_avx_1: -@@ -4148,12 +4240,26 @@ $code.=<<___ if ($avx>2); - ___ - } - -+open SELF,$0; -+while() { -+ next if (/^#!/); -+ last if (!s/^#/\/\// and !/^$/); -+ print; -+} -+close SELF; -+ - foreach (split('\n',$code)) { - s/\`([^\`]*)\`/eval($1)/ge; - s/%r([a-z]+)#d/%e$1/g; - s/%r([0-9]+)#d/%r$1d/g; - s/%x#%[yz]/%x/g or s/%y#%z/%y/g or s/%z#%[yz]/%z/g; - -+ if ($kernel) { -+ s/(^\.type.*),[0-9]+$/\1/; -+ s/(^\.type.*),\@abi-omnipotent+$/\1,\@function/; -+ next if /^\.cfi.*/; -+ } -+ - print $_,"\n"; - } - close STDOUT; ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -1,8 +1,6 @@ --// SPDX-License-Identifier: GPL-2.0-or-later -+// SPDX-License-Identifier: GPL-2.0 OR MIT - /* -- * Poly1305 authenticator algorithm, RFC7539, SIMD glue code -- * -- * Copyright (C) 2015 Martin Willi -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. - */ - - #include -@@ -13,279 +11,170 @@ - #include - #include - #include -+#include - #include - --asmlinkage void poly1305_block_sse2(u32 *h, const u8 *src, -- const u32 *r, unsigned int blocks); --asmlinkage void poly1305_2block_sse2(u32 *h, const u8 *src, const u32 *r, -- unsigned int blocks, const u32 *u); --asmlinkage void poly1305_4block_avx2(u32 *h, const u8 *src, const u32 *r, -- unsigned int blocks, const u32 *u); -+asmlinkage void poly1305_init_x86_64(void *ctx, -+ const u8 key[POLY1305_KEY_SIZE]); -+asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, -+ const size_t len, const u32 padbit); -+asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], -+ const u32 nonce[4]); -+asmlinkage void poly1305_emit_avx(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], -+ const u32 nonce[4]); -+asmlinkage void poly1305_blocks_avx(void *ctx, const u8 *inp, const size_t len, -+ const u32 padbit); -+asmlinkage void poly1305_blocks_avx2(void *ctx, const u8 *inp, const size_t len, -+ const u32 padbit); -+asmlinkage void poly1305_blocks_avx512(void *ctx, const u8 *inp, -+ const size_t len, const u32 padbit); - --static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_simd); -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx); - static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx2); -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(poly1305_use_avx512); - --static inline u64 mlt(u64 a, u64 b) --{ -- return a * b; --} -- --static inline u32 sr(u64 v, u_char n) --{ -- return v >> n; --} -- --static inline u32 and(u32 v, u32 mask) --{ -- return v & mask; --} -- --static void poly1305_simd_mult(u32 *a, const u32 *b) --{ -- u8 m[POLY1305_BLOCK_SIZE]; -- -- memset(m, 0, sizeof(m)); -- /* The poly1305 block function adds a hi-bit to the accumulator which -- * we don't need for key multiplication; compensate for it. */ -- a[4] -= 1 << 24; -- poly1305_block_sse2(a, m, b, 1); --} -- --static void poly1305_integer_setkey(struct poly1305_key *key, const u8 *raw_key) --{ -- /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ -- key->r[0] = (get_unaligned_le32(raw_key + 0) >> 0) & 0x3ffffff; -- key->r[1] = (get_unaligned_le32(raw_key + 3) >> 2) & 0x3ffff03; -- key->r[2] = (get_unaligned_le32(raw_key + 6) >> 4) & 0x3ffc0ff; -- key->r[3] = (get_unaligned_le32(raw_key + 9) >> 6) & 0x3f03fff; -- key->r[4] = (get_unaligned_le32(raw_key + 12) >> 8) & 0x00fffff; --} -+struct poly1305_arch_internal { -+ union { -+ struct { -+ u32 h[5]; -+ u32 is_base2_26; -+ }; -+ u64 hs[3]; -+ }; -+ u64 r[2]; -+ u64 pad; -+ struct { u32 r2, r1, r4, r3; } rn[9]; -+}; - --static void poly1305_integer_blocks(struct poly1305_state *state, -- const struct poly1305_key *key, -- const void *src, -- unsigned int nblocks, u32 hibit) -+/* The AVX code uses base 2^26, while the scalar code uses base 2^64. If we hit -+ * the unfortunate situation of using AVX and then having to go back to scalar -+ * -- because the user is silly and has called the update function from two -+ * separate contexts -- then we need to convert back to the original base before -+ * proceeding. It is possible to reason that the initial reduction below is -+ * sufficient given the implementation invariants. However, for an avoidance of -+ * doubt and because this is not performance critical, we do the full reduction -+ * anyway. Z3 proof of below function: https://xn--4db.cc/ltPtHCKN/py -+ */ -+static void convert_to_base2_64(void *ctx) - { -- u32 r0, r1, r2, r3, r4; -- u32 s1, s2, s3, s4; -- u32 h0, h1, h2, h3, h4; -- u64 d0, d1, d2, d3, d4; -+ struct poly1305_arch_internal *state = ctx; -+ u32 cy; - -- if (!nblocks) -+ if (!state->is_base2_26) - return; - -- r0 = key->r[0]; -- r1 = key->r[1]; -- r2 = key->r[2]; -- r3 = key->r[3]; -- r4 = key->r[4]; -- -- s1 = r1 * 5; -- s2 = r2 * 5; -- s3 = r3 * 5; -- s4 = r4 * 5; -- -- h0 = state->h[0]; -- h1 = state->h[1]; -- h2 = state->h[2]; -- h3 = state->h[3]; -- h4 = state->h[4]; -- -- do { -- /* h += m[i] */ -- h0 += (get_unaligned_le32(src + 0) >> 0) & 0x3ffffff; -- h1 += (get_unaligned_le32(src + 3) >> 2) & 0x3ffffff; -- h2 += (get_unaligned_le32(src + 6) >> 4) & 0x3ffffff; -- h3 += (get_unaligned_le32(src + 9) >> 6) & 0x3ffffff; -- h4 += (get_unaligned_le32(src + 12) >> 8) | (hibit << 24); -- -- /* h *= r */ -- d0 = mlt(h0, r0) + mlt(h1, s4) + mlt(h2, s3) + -- mlt(h3, s2) + mlt(h4, s1); -- d1 = mlt(h0, r1) + mlt(h1, r0) + mlt(h2, s4) + -- mlt(h3, s3) + mlt(h4, s2); -- d2 = mlt(h0, r2) + mlt(h1, r1) + mlt(h2, r0) + -- mlt(h3, s4) + mlt(h4, s3); -- d3 = mlt(h0, r3) + mlt(h1, r2) + mlt(h2, r1) + -- mlt(h3, r0) + mlt(h4, s4); -- d4 = mlt(h0, r4) + mlt(h1, r3) + mlt(h2, r2) + -- mlt(h3, r1) + mlt(h4, r0); -- -- /* (partial) h %= p */ -- d1 += sr(d0, 26); h0 = and(d0, 0x3ffffff); -- d2 += sr(d1, 26); h1 = and(d1, 0x3ffffff); -- d3 += sr(d2, 26); h2 = and(d2, 0x3ffffff); -- d4 += sr(d3, 26); h3 = and(d3, 0x3ffffff); -- h0 += sr(d4, 26) * 5; h4 = and(d4, 0x3ffffff); -- h1 += h0 >> 26; h0 = h0 & 0x3ffffff; -- -- src += POLY1305_BLOCK_SIZE; -- } while (--nblocks); -- -- state->h[0] = h0; -- state->h[1] = h1; -- state->h[2] = h2; -- state->h[3] = h3; -- state->h[4] = h4; --} -- --static void poly1305_integer_emit(const struct poly1305_state *state, void *dst) --{ -- u32 h0, h1, h2, h3, h4; -- u32 g0, g1, g2, g3, g4; -- u32 mask; -- -- /* fully carry h */ -- h0 = state->h[0]; -- h1 = state->h[1]; -- h2 = state->h[2]; -- h3 = state->h[3]; -- h4 = state->h[4]; -- -- h2 += (h1 >> 26); h1 = h1 & 0x3ffffff; -- h3 += (h2 >> 26); h2 = h2 & 0x3ffffff; -- h4 += (h3 >> 26); h3 = h3 & 0x3ffffff; -- h0 += (h4 >> 26) * 5; h4 = h4 & 0x3ffffff; -- h1 += (h0 >> 26); h0 = h0 & 0x3ffffff; -- -- /* compute h + -p */ -- g0 = h0 + 5; -- g1 = h1 + (g0 >> 26); g0 &= 0x3ffffff; -- g2 = h2 + (g1 >> 26); g1 &= 0x3ffffff; -- g3 = h3 + (g2 >> 26); g2 &= 0x3ffffff; -- g4 = h4 + (g3 >> 26) - (1 << 26); g3 &= 0x3ffffff; -- -- /* select h if h < p, or h + -p if h >= p */ -- mask = (g4 >> ((sizeof(u32) * 8) - 1)) - 1; -- g0 &= mask; -- g1 &= mask; -- g2 &= mask; -- g3 &= mask; -- g4 &= mask; -- mask = ~mask; -- h0 = (h0 & mask) | g0; -- h1 = (h1 & mask) | g1; -- h2 = (h2 & mask) | g2; -- h3 = (h3 & mask) | g3; -- h4 = (h4 & mask) | g4; -- -- /* h = h % (2^128) */ -- put_unaligned_le32((h0 >> 0) | (h1 << 26), dst + 0); -- put_unaligned_le32((h1 >> 6) | (h2 << 20), dst + 4); -- put_unaligned_le32((h2 >> 12) | (h3 << 14), dst + 8); -- put_unaligned_le32((h3 >> 18) | (h4 << 8), dst + 12); --} -- --void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key) --{ -- poly1305_integer_setkey(desc->opaque_r, key); -- desc->s[0] = get_unaligned_le32(key + 16); -- desc->s[1] = get_unaligned_le32(key + 20); -- desc->s[2] = get_unaligned_le32(key + 24); -- desc->s[3] = get_unaligned_le32(key + 28); -- poly1305_core_init(&desc->h); -- desc->buflen = 0; -- desc->sset = true; -- desc->rset = 1; --} --EXPORT_SYMBOL_GPL(poly1305_init_arch); -- --static unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx, -- const u8 *src, unsigned int srclen) --{ -- if (!dctx->sset) { -- if (!dctx->rset && srclen >= POLY1305_BLOCK_SIZE) { -- poly1305_integer_setkey(dctx->r, src); -- src += POLY1305_BLOCK_SIZE; -- srclen -= POLY1305_BLOCK_SIZE; -- dctx->rset = 1; -- } -- if (srclen >= POLY1305_BLOCK_SIZE) { -- dctx->s[0] = get_unaligned_le32(src + 0); -- dctx->s[1] = get_unaligned_le32(src + 4); -- dctx->s[2] = get_unaligned_le32(src + 8); -- dctx->s[3] = get_unaligned_le32(src + 12); -- src += POLY1305_BLOCK_SIZE; -- srclen -= POLY1305_BLOCK_SIZE; -- dctx->sset = true; -- } -+ cy = state->h[0] >> 26; state->h[0] &= 0x3ffffff; state->h[1] += cy; -+ cy = state->h[1] >> 26; state->h[1] &= 0x3ffffff; state->h[2] += cy; -+ cy = state->h[2] >> 26; state->h[2] &= 0x3ffffff; state->h[3] += cy; -+ cy = state->h[3] >> 26; state->h[3] &= 0x3ffffff; state->h[4] += cy; -+ state->hs[0] = ((u64)state->h[2] << 52) | ((u64)state->h[1] << 26) | state->h[0]; -+ state->hs[1] = ((u64)state->h[4] << 40) | ((u64)state->h[3] << 14) | (state->h[2] >> 12); -+ state->hs[2] = state->h[4] >> 24; -+#define ULT(a, b) ((a ^ ((a ^ b) | ((a - b) ^ b))) >> (sizeof(a) * 8 - 1)) -+ cy = (state->hs[2] >> 2) + (state->hs[2] & ~3ULL); -+ state->hs[2] &= 3; -+ state->hs[0] += cy; -+ state->hs[1] += (cy = ULT(state->hs[0], cy)); -+ state->hs[2] += ULT(state->hs[1], cy); -+#undef ULT -+ state->is_base2_26 = 0; -+} -+ -+static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE]) -+{ -+ poly1305_init_x86_64(ctx, key); -+} -+ -+static void poly1305_simd_blocks(void *ctx, const u8 *inp, size_t len, -+ const u32 padbit) -+{ -+ struct poly1305_arch_internal *state = ctx; -+ -+ /* SIMD disables preemption, so relax after processing each page. */ -+ BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE || -+ PAGE_SIZE % POLY1305_BLOCK_SIZE); -+ -+ if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx) || -+ (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) || -+ !crypto_simd_usable()) { -+ convert_to_base2_64(ctx); -+ poly1305_blocks_x86_64(ctx, inp, len, padbit); -+ return; - } -- return srclen; --} - --static unsigned int poly1305_scalar_blocks(struct poly1305_desc_ctx *dctx, -- const u8 *src, unsigned int srclen) --{ -- unsigned int datalen; -+ for (;;) { -+ const size_t bytes = min_t(size_t, len, PAGE_SIZE); - -- if (unlikely(!dctx->sset)) { -- datalen = crypto_poly1305_setdesckey(dctx, src, srclen); -- src += srclen - datalen; -- srclen = datalen; -- } -- if (srclen >= POLY1305_BLOCK_SIZE) { -- poly1305_integer_blocks(&dctx->h, dctx->opaque_r, src, -- srclen / POLY1305_BLOCK_SIZE, 1); -- srclen %= POLY1305_BLOCK_SIZE; -+ kernel_fpu_begin(); -+ if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512)) -+ poly1305_blocks_avx512(ctx, inp, bytes, padbit); -+ else if (IS_ENABLED(CONFIG_AS_AVX2) && static_branch_likely(&poly1305_use_avx2)) -+ poly1305_blocks_avx2(ctx, inp, bytes, padbit); -+ else -+ poly1305_blocks_avx(ctx, inp, bytes, padbit); -+ kernel_fpu_end(); -+ len -= bytes; -+ if (!len) -+ break; -+ inp += bytes; - } -- return srclen; - } - --static unsigned int poly1305_simd_blocks(struct poly1305_desc_ctx *dctx, -- const u8 *src, unsigned int srclen) --{ -- unsigned int blocks, datalen; -+static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], -+ const u32 nonce[4]) -+{ -+ struct poly1305_arch_internal *state = ctx; -+ -+ if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx) || -+ !state->is_base2_26 || !crypto_simd_usable()) { -+ convert_to_base2_64(ctx); -+ poly1305_emit_x86_64(ctx, mac, nonce); -+ } else -+ poly1305_emit_avx(ctx, mac, nonce); -+} -+ -+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) -+{ -+ poly1305_simd_init(&dctx->h, key); -+ dctx->s[0] = get_unaligned_le32(&key[16]); -+ dctx->s[1] = get_unaligned_le32(&key[20]); -+ dctx->s[2] = get_unaligned_le32(&key[24]); -+ dctx->s[3] = get_unaligned_le32(&key[28]); -+ dctx->buflen = 0; -+ dctx->sset = true; -+} -+EXPORT_SYMBOL(poly1305_init_arch); - -+static unsigned int crypto_poly1305_setdctxkey(struct poly1305_desc_ctx *dctx, -+ const u8 *inp, unsigned int len) -+{ -+ unsigned int acc = 0; - if (unlikely(!dctx->sset)) { -- datalen = crypto_poly1305_setdesckey(dctx, src, srclen); -- src += srclen - datalen; -- srclen = datalen; -- } -- -- if (IS_ENABLED(CONFIG_AS_AVX2) && -- static_branch_likely(&poly1305_use_avx2) && -- srclen >= POLY1305_BLOCK_SIZE * 4) { -- if (unlikely(dctx->rset < 4)) { -- if (dctx->rset < 2) { -- dctx->r[1] = dctx->r[0]; -- poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r); -- } -- dctx->r[2] = dctx->r[1]; -- poly1305_simd_mult(dctx->r[2].r, dctx->r[0].r); -- dctx->r[3] = dctx->r[2]; -- poly1305_simd_mult(dctx->r[3].r, dctx->r[0].r); -- dctx->rset = 4; -+ if (!dctx->rset && len >= POLY1305_BLOCK_SIZE) { -+ poly1305_simd_init(&dctx->h, inp); -+ inp += POLY1305_BLOCK_SIZE; -+ len -= POLY1305_BLOCK_SIZE; -+ acc += POLY1305_BLOCK_SIZE; -+ dctx->rset = 1; - } -- blocks = srclen / (POLY1305_BLOCK_SIZE * 4); -- poly1305_4block_avx2(dctx->h.h, src, dctx->r[0].r, blocks, -- dctx->r[1].r); -- src += POLY1305_BLOCK_SIZE * 4 * blocks; -- srclen -= POLY1305_BLOCK_SIZE * 4 * blocks; -- } -- -- if (likely(srclen >= POLY1305_BLOCK_SIZE * 2)) { -- if (unlikely(dctx->rset < 2)) { -- dctx->r[1] = dctx->r[0]; -- poly1305_simd_mult(dctx->r[1].r, dctx->r[0].r); -- dctx->rset = 2; -+ if (len >= POLY1305_BLOCK_SIZE) { -+ dctx->s[0] = get_unaligned_le32(&inp[0]); -+ dctx->s[1] = get_unaligned_le32(&inp[4]); -+ dctx->s[2] = get_unaligned_le32(&inp[8]); -+ dctx->s[3] = get_unaligned_le32(&inp[12]); -+ inp += POLY1305_BLOCK_SIZE; -+ len -= POLY1305_BLOCK_SIZE; -+ acc += POLY1305_BLOCK_SIZE; -+ dctx->sset = true; - } -- blocks = srclen / (POLY1305_BLOCK_SIZE * 2); -- poly1305_2block_sse2(dctx->h.h, src, dctx->r[0].r, -- blocks, dctx->r[1].r); -- src += POLY1305_BLOCK_SIZE * 2 * blocks; -- srclen -= POLY1305_BLOCK_SIZE * 2 * blocks; -- } -- if (srclen >= POLY1305_BLOCK_SIZE) { -- poly1305_block_sse2(dctx->h.h, src, dctx->r[0].r, 1); -- srclen -= POLY1305_BLOCK_SIZE; - } -- return srclen; -+ return acc; - } - - void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src, - unsigned int srclen) - { -- unsigned int bytes; -+ unsigned int bytes, used; - - if (unlikely(dctx->buflen)) { - bytes = min(srclen, POLY1305_BLOCK_SIZE - dctx->buflen); -@@ -295,31 +184,19 @@ void poly1305_update_arch(struct poly130 - dctx->buflen += bytes; - - if (dctx->buflen == POLY1305_BLOCK_SIZE) { -- if (static_branch_likely(&poly1305_use_simd) && -- likely(crypto_simd_usable())) { -- kernel_fpu_begin(); -- poly1305_simd_blocks(dctx, dctx->buf, -- POLY1305_BLOCK_SIZE); -- kernel_fpu_end(); -- } else { -- poly1305_scalar_blocks(dctx, dctx->buf, -- POLY1305_BLOCK_SIZE); -- } -+ if (likely(!crypto_poly1305_setdctxkey(dctx, dctx->buf, POLY1305_BLOCK_SIZE))) -+ poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 1); - dctx->buflen = 0; - } - } - - if (likely(srclen >= POLY1305_BLOCK_SIZE)) { -- if (static_branch_likely(&poly1305_use_simd) && -- likely(crypto_simd_usable())) { -- kernel_fpu_begin(); -- bytes = poly1305_simd_blocks(dctx, src, srclen); -- kernel_fpu_end(); -- } else { -- bytes = poly1305_scalar_blocks(dctx, src, srclen); -- } -- src += srclen - bytes; -- srclen = bytes; -+ bytes = round_down(srclen, POLY1305_BLOCK_SIZE); -+ srclen -= bytes; -+ used = crypto_poly1305_setdctxkey(dctx, src, bytes); -+ if (likely(bytes - used)) -+ poly1305_simd_blocks(&dctx->h, src + used, bytes - used, 1); -+ src += bytes; - } - - if (unlikely(srclen)) { -@@ -329,31 +206,17 @@ void poly1305_update_arch(struct poly130 - } - EXPORT_SYMBOL(poly1305_update_arch); - --void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *dst) -+void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) - { -- __le32 digest[4]; -- u64 f = 0; -- -- if (unlikely(desc->buflen)) { -- desc->buf[desc->buflen++] = 1; -- memset(desc->buf + desc->buflen, 0, -- POLY1305_BLOCK_SIZE - desc->buflen); -- poly1305_integer_blocks(&desc->h, desc->opaque_r, desc->buf, 1, 0); -+ if (unlikely(dctx->buflen)) { -+ dctx->buf[dctx->buflen++] = 1; -+ memset(dctx->buf + dctx->buflen, 0, -+ POLY1305_BLOCK_SIZE - dctx->buflen); -+ poly1305_simd_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - } - -- poly1305_integer_emit(&desc->h, digest); -- -- /* mac = (h + s) % (2^128) */ -- f = (f >> 32) + le32_to_cpu(digest[0]) + desc->s[0]; -- put_unaligned_le32(f, dst + 0); -- f = (f >> 32) + le32_to_cpu(digest[1]) + desc->s[1]; -- put_unaligned_le32(f, dst + 4); -- f = (f >> 32) + le32_to_cpu(digest[2]) + desc->s[2]; -- put_unaligned_le32(f, dst + 8); -- f = (f >> 32) + le32_to_cpu(digest[3]) + desc->s[3]; -- put_unaligned_le32(f, dst + 12); -- -- *desc = (struct poly1305_desc_ctx){}; -+ poly1305_simd_emit(&dctx->h, dst, dctx->s); -+ *dctx = (struct poly1305_desc_ctx){}; - } - EXPORT_SYMBOL(poly1305_final_arch); - -@@ -361,38 +224,34 @@ static int crypto_poly1305_init(struct s - { - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - -- poly1305_core_init(&dctx->h); -- dctx->buflen = 0; -- dctx->rset = 0; -- dctx->sset = false; -- -+ *dctx = (struct poly1305_desc_ctx){}; - return 0; - } - --static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) -+static int crypto_poly1305_update(struct shash_desc *desc, -+ const u8 *src, unsigned int srclen) - { - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - -- if (unlikely(!dctx->sset)) -- return -ENOKEY; -- -- poly1305_final_arch(dctx, dst); -+ poly1305_update_arch(dctx, src, srclen); - return 0; - } - --static int poly1305_simd_update(struct shash_desc *desc, -- const u8 *src, unsigned int srclen) -+static int crypto_poly1305_final(struct shash_desc *desc, u8 *dst) - { - struct poly1305_desc_ctx *dctx = shash_desc_ctx(desc); - -- poly1305_update_arch(dctx, src, srclen); -+ if (unlikely(!dctx->sset)) -+ return -ENOKEY; -+ -+ poly1305_final_arch(dctx, dst); - return 0; - } - - static struct shash_alg alg = { - .digestsize = POLY1305_DIGEST_SIZE, - .init = crypto_poly1305_init, -- .update = poly1305_simd_update, -+ .update = crypto_poly1305_update, - .final = crypto_poly1305_final, - .descsize = sizeof(struct poly1305_desc_ctx), - .base = { -@@ -406,17 +265,19 @@ static struct shash_alg alg = { - - static int __init poly1305_simd_mod_init(void) - { -- if (!boot_cpu_has(X86_FEATURE_XMM2)) -- return 0; -- -- static_branch_enable(&poly1305_use_simd); -- -- if (IS_ENABLED(CONFIG_AS_AVX2) && -- boot_cpu_has(X86_FEATURE_AVX) && -+ if (IS_ENABLED(CONFIG_AS_AVX) && boot_cpu_has(X86_FEATURE_AVX) && -+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) -+ static_branch_enable(&poly1305_use_avx); -+ if (IS_ENABLED(CONFIG_AS_AVX2) && boot_cpu_has(X86_FEATURE_AVX) && - boot_cpu_has(X86_FEATURE_AVX2) && - cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM, NULL)) - static_branch_enable(&poly1305_use_avx2); -- -+ if (IS_ENABLED(CONFIG_AS_AVX512) && boot_cpu_has(X86_FEATURE_AVX) && -+ boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX512F) && -+ cpu_has_xfeatures(XFEATURE_MASK_SSE | XFEATURE_MASK_YMM | XFEATURE_MASK_AVX512, NULL) && -+ /* Skylake downclocks unacceptably much when using zmm, but later generations are fast. */ -+ boot_cpu_data.x86_model != INTEL_FAM6_SKYLAKE_X) -+ static_branch_enable(&poly1305_use_avx512); - return IS_REACHABLE(CONFIG_CRYPTO_HASH) ? crypto_register_shash(&alg) : 0; - } - -@@ -430,7 +291,7 @@ module_init(poly1305_simd_mod_init); - module_exit(poly1305_simd_mod_exit); - - MODULE_LICENSE("GPL"); --MODULE_AUTHOR("Martin Willi "); -+MODULE_AUTHOR("Jason A. Donenfeld "); - MODULE_DESCRIPTION("Poly1305 authenticator"); - MODULE_ALIAS_CRYPTO("poly1305"); - MODULE_ALIAS_CRYPTO("poly1305-simd"); ---- a/lib/crypto/Kconfig -+++ b/lib/crypto/Kconfig -@@ -90,7 +90,7 @@ config CRYPTO_LIB_DES - config CRYPTO_LIB_POLY1305_RSIZE - int - default 2 if MIPS -- default 4 if X86_64 -+ default 11 if X86_64 - default 9 if ARM || ARM64 - default 1 - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0044-crypto-arm-arm64-mips-poly1305-remove-redundant-non-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0044-crypto-arm-arm64-mips-poly1305-remove-redundant-non-.patch deleted file mode 100644 index b95b99888..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0044-crypto-arm-arm64-mips-poly1305-remove-redundant-non-.patch +++ /dev/null @@ -1,171 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Sun, 5 Jan 2020 22:40:49 -0500 -Subject: [PATCH] crypto: {arm,arm64,mips}/poly1305 - remove redundant - non-reduction from emit -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit 31899908a0d248b030b4464425b86c717e0007d4 upstream. - -This appears to be some kind of copy and paste error, and is actually -dead code. - -Pre: f = 0 ⇒ (f >> 32) = 0 - f = (f >> 32) + le32_to_cpu(digest[0]); -Post: 0 ≤ f < 2³² - put_unaligned_le32(f, dst); - -Pre: 0 ≤ f < 2³² ⇒ (f >> 32) = 0 - f = (f >> 32) + le32_to_cpu(digest[1]); -Post: 0 ≤ f < 2³² - put_unaligned_le32(f, dst + 4); - -Pre: 0 ≤ f < 2³² ⇒ (f >> 32) = 0 - f = (f >> 32) + le32_to_cpu(digest[2]); -Post: 0 ≤ f < 2³² - put_unaligned_le32(f, dst + 8); - -Pre: 0 ≤ f < 2³² ⇒ (f >> 32) = 0 - f = (f >> 32) + le32_to_cpu(digest[3]); -Post: 0 ≤ f < 2³² - put_unaligned_le32(f, dst + 12); - -Therefore this sequence is redundant. And Andy's code appears to handle -misalignment acceptably. - -Signed-off-by: Jason A. Donenfeld -Tested-by: Ard Biesheuvel -Reviewed-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/poly1305-glue.c | 18 ++---------------- - arch/arm64/crypto/poly1305-glue.c | 18 ++---------------- - arch/mips/crypto/poly1305-glue.c | 18 ++---------------- - 3 files changed, 6 insertions(+), 48 deletions(-) - ---- a/arch/arm/crypto/poly1305-glue.c -+++ b/arch/arm/crypto/poly1305-glue.c -@@ -20,7 +20,7 @@ - - void poly1305_init_arm(void *state, const u8 *key); - void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit); --void poly1305_emit_arm(void *state, __le32 *digest, const u32 *nonce); -+void poly1305_emit_arm(void *state, u8 *digest, const u32 *nonce); - - void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit) - { -@@ -179,9 +179,6 @@ EXPORT_SYMBOL(poly1305_update_arch); - - void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) - { -- __le32 digest[4]; -- u64 f = 0; -- - if (unlikely(dctx->buflen)) { - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, -@@ -189,18 +186,7 @@ void poly1305_final_arch(struct poly1305 - poly1305_blocks_arm(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - } - -- poly1305_emit_arm(&dctx->h, digest, dctx->s); -- -- /* mac = (h + s) % (2^128) */ -- f = (f >> 32) + le32_to_cpu(digest[0]); -- put_unaligned_le32(f, dst); -- f = (f >> 32) + le32_to_cpu(digest[1]); -- put_unaligned_le32(f, dst + 4); -- f = (f >> 32) + le32_to_cpu(digest[2]); -- put_unaligned_le32(f, dst + 8); -- f = (f >> 32) + le32_to_cpu(digest[3]); -- put_unaligned_le32(f, dst + 12); -- -+ poly1305_emit_arm(&dctx->h, dst, dctx->s); - *dctx = (struct poly1305_desc_ctx){}; - } - EXPORT_SYMBOL(poly1305_final_arch); ---- a/arch/arm64/crypto/poly1305-glue.c -+++ b/arch/arm64/crypto/poly1305-glue.c -@@ -21,7 +21,7 @@ - asmlinkage void poly1305_init_arm64(void *state, const u8 *key); - asmlinkage void poly1305_blocks(void *state, const u8 *src, u32 len, u32 hibit); - asmlinkage void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit); --asmlinkage void poly1305_emit(void *state, __le32 *digest, const u32 *nonce); -+asmlinkage void poly1305_emit(void *state, u8 *digest, const u32 *nonce); - - static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); - -@@ -162,9 +162,6 @@ EXPORT_SYMBOL(poly1305_update_arch); - - void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) - { -- __le32 digest[4]; -- u64 f = 0; -- - if (unlikely(dctx->buflen)) { - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, -@@ -172,18 +169,7 @@ void poly1305_final_arch(struct poly1305 - poly1305_blocks(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - } - -- poly1305_emit(&dctx->h, digest, dctx->s); -- -- /* mac = (h + s) % (2^128) */ -- f = (f >> 32) + le32_to_cpu(digest[0]); -- put_unaligned_le32(f, dst); -- f = (f >> 32) + le32_to_cpu(digest[1]); -- put_unaligned_le32(f, dst + 4); -- f = (f >> 32) + le32_to_cpu(digest[2]); -- put_unaligned_le32(f, dst + 8); -- f = (f >> 32) + le32_to_cpu(digest[3]); -- put_unaligned_le32(f, dst + 12); -- -+ poly1305_emit(&dctx->h, dst, dctx->s); - *dctx = (struct poly1305_desc_ctx){}; - } - EXPORT_SYMBOL(poly1305_final_arch); ---- a/arch/mips/crypto/poly1305-glue.c -+++ b/arch/mips/crypto/poly1305-glue.c -@@ -15,7 +15,7 @@ - - asmlinkage void poly1305_init_mips(void *state, const u8 *key); - asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit); --asmlinkage void poly1305_emit_mips(void *state, __le32 *digest, const u32 *nonce); -+asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce); - - void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) - { -@@ -134,9 +134,6 @@ EXPORT_SYMBOL(poly1305_update_arch); - - void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) - { -- __le32 digest[4]; -- u64 f = 0; -- - if (unlikely(dctx->buflen)) { - dctx->buf[dctx->buflen++] = 1; - memset(dctx->buf + dctx->buflen, 0, -@@ -144,18 +141,7 @@ void poly1305_final_arch(struct poly1305 - poly1305_blocks_mips(&dctx->h, dctx->buf, POLY1305_BLOCK_SIZE, 0); - } - -- poly1305_emit_mips(&dctx->h, digest, dctx->s); -- -- /* mac = (h + s) % (2^128) */ -- f = (f >> 32) + le32_to_cpu(digest[0]); -- put_unaligned_le32(f, dst); -- f = (f >> 32) + le32_to_cpu(digest[1]); -- put_unaligned_le32(f, dst + 4); -- f = (f >> 32) + le32_to_cpu(digest[2]); -- put_unaligned_le32(f, dst + 8); -- f = (f >> 32) + le32_to_cpu(digest[3]); -- put_unaligned_le32(f, dst + 12); -- -+ poly1305_emit_mips(&dctx->h, dst, dctx->s); - *dctx = (struct poly1305_desc_ctx){}; - } - EXPORT_SYMBOL(poly1305_final_arch); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0045-crypto-curve25519-Fix-selftest-build-error.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0045-crypto-curve25519-Fix-selftest-build-error.patch deleted file mode 100644 index fa8d8fd6a..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0045-crypto-curve25519-Fix-selftest-build-error.patch +++ /dev/null @@ -1,102 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Herbert Xu -Date: Wed, 8 Jan 2020 12:37:35 +0800 -Subject: [PATCH] crypto: curve25519 - Fix selftest build error - -commit a8bdf2c42ee4d1ee42af1f3601f85de94e70a421 upstream. - -If CRYPTO_CURVE25519 is y, CRYPTO_LIB_CURVE25519_GENERIC will be -y, but CRYPTO_LIB_CURVE25519 may be set to m, this causes build -errors: - -lib/crypto/curve25519-selftest.o: In function `curve25519': -curve25519-selftest.c:(.text.unlikely+0xc): undefined reference to `curve25519_arch' -lib/crypto/curve25519-selftest.o: In function `curve25519_selftest': -curve25519-selftest.c:(.init.text+0x17e): undefined reference to `curve25519_base_arch' - -This is because the curve25519 self-test code is being controlled -by the GENERIC option rather than the overall CURVE25519 option, -as is the case with blake2s. To recap, the GENERIC and ARCH options -for CURVE25519 are internal only and selected by users such as -the Crypto API, or the externally visible CURVE25519 option which -in turn is selected by wireguard. The self-test is specific to the -the external CURVE25519 option and should not be enabled by the -Crypto API. - -This patch fixes this by splitting the GENERIC module from the -CURVE25519 module with the latter now containing just the self-test. - -Reported-by: Hulk Robot -Fixes: aa127963f1ca ("crypto: lib/curve25519 - re-add selftests") -Signed-off-by: Herbert Xu -Reviewed-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - lib/crypto/Makefile | 9 ++++++--- - lib/crypto/curve25519-generic.c | 24 ++++++++++++++++++++++++ - lib/crypto/curve25519.c | 7 ------- - 3 files changed, 30 insertions(+), 10 deletions(-) - create mode 100644 lib/crypto/curve25519-generic.c - ---- a/lib/crypto/Makefile -+++ b/lib/crypto/Makefile -@@ -19,9 +19,12 @@ libblake2s-y += blake2s.o - obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o - libchacha20poly1305-y += chacha20poly1305.o - --obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519.o --libcurve25519-y := curve25519-fiat32.o --libcurve25519-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o -+obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519-generic.o -+libcurve25519-generic-y := curve25519-fiat32.o -+libcurve25519-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := curve25519-hacl64.o -+libcurve25519-generic-y += curve25519-generic.o -+ -+obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o - libcurve25519-y += curve25519.o - - obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o ---- /dev/null -+++ b/lib/crypto/curve25519-generic.c -@@ -0,0 +1,24 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This is an implementation of the Curve25519 ECDH algorithm, using either -+ * a 32-bit implementation or a 64-bit implementation with 128-bit integers, -+ * depending on what is supported by the target compiler. -+ * -+ * Information: https://cr.yp.to/ecdh.html -+ */ -+ -+#include -+#include -+ -+const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 }; -+const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 }; -+ -+EXPORT_SYMBOL(curve25519_null_point); -+EXPORT_SYMBOL(curve25519_base_point); -+EXPORT_SYMBOL(curve25519_generic); -+ -+MODULE_LICENSE("GPL v2"); -+MODULE_DESCRIPTION("Curve25519 scalar multiplication"); -+MODULE_AUTHOR("Jason A. Donenfeld "); ---- a/lib/crypto/curve25519.c -+++ b/lib/crypto/curve25519.c -@@ -15,13 +15,6 @@ - - bool curve25519_selftest(void); - --const u8 curve25519_null_point[CURVE25519_KEY_SIZE] __aligned(32) = { 0 }; --const u8 curve25519_base_point[CURVE25519_KEY_SIZE] __aligned(32) = { 9 }; -- --EXPORT_SYMBOL(curve25519_null_point); --EXPORT_SYMBOL(curve25519_base_point); --EXPORT_SYMBOL(curve25519_generic); -- - static int __init mod_init(void) - { - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0046-crypto-x86-poly1305-fix-.gitignore-typo.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0046-crypto-x86-poly1305-fix-.gitignore-typo.patch deleted file mode 100644 index 27f0417ac..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0046-crypto-x86-poly1305-fix-.gitignore-typo.patch +++ /dev/null @@ -1,23 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 16 Jan 2020 18:23:55 +0100 -Subject: [PATCH] crypto: x86/poly1305 - fix .gitignore typo - -commit 1f6868995326cc82102049e349d8dbd116bdb656 upstream. - -Admist the kbuild robot induced changes, the .gitignore file for the -generated file wasn't updated with the non-clashing filename. This -commit adjusts that. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/.gitignore | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/x86/crypto/.gitignore -+++ b/arch/x86/crypto/.gitignore -@@ -1 +1 @@ --poly1305-x86_64.S -+poly1305-x86_64-cryptogams.S diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0047-crypto-chacha20poly1305-add-back-missing-test-vector.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0047-crypto-chacha20poly1305-add-back-missing-test-vector.patch deleted file mode 100644 index eda969577..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0047-crypto-chacha20poly1305-add-back-missing-test-vector.patch +++ /dev/null @@ -1,1858 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 16 Jan 2020 21:26:34 +0100 -Subject: [PATCH] crypto: chacha20poly1305 - add back missing test vectors and - test chunking - -commit 72c7943792c9e7788ddd182337bcf8f650cf56f5 upstream. - -When this was originally ported, the 12-byte nonce vectors were left out -to keep things simple. I agree that we don't need nor want a library -interface for 12-byte nonces. But these test vectors were specially -crafted to look at issues in the underlying primitives and related -interactions. Therefore, we actually want to keep around all of the -test vectors, and simply have a helper function to test them with. - -Secondly, the sglist-based chunking code in the library interface is -rather complicated, so this adds a developer-only test for ensuring that -all the book keeping is correct, across a wide array of possibilities. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - lib/crypto/chacha20poly1305-selftest.c | 1712 +++++++++++++++++++++++- - 1 file changed, 1698 insertions(+), 14 deletions(-) - ---- a/lib/crypto/chacha20poly1305-selftest.c -+++ b/lib/crypto/chacha20poly1305-selftest.c -@@ -4,6 +4,7 @@ - */ - - #include -+#include - #include - - #include -@@ -1926,6 +1927,1104 @@ static const u8 enc_key012[] __initconst - 0x65, 0x91, 0x6e, 0x2a, 0x79, 0x22, 0xda, 0x64 - }; - -+/* wycheproof - rfc7539 */ -+static const u8 enc_input013[] __initconst = { -+ 0x4c, 0x61, 0x64, 0x69, 0x65, 0x73, 0x20, 0x61, -+ 0x6e, 0x64, 0x20, 0x47, 0x65, 0x6e, 0x74, 0x6c, -+ 0x65, 0x6d, 0x65, 0x6e, 0x20, 0x6f, 0x66, 0x20, -+ 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73, -+ 0x73, 0x20, 0x6f, 0x66, 0x20, 0x27, 0x39, 0x39, -+ 0x3a, 0x20, 0x49, 0x66, 0x20, 0x49, 0x20, 0x63, -+ 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6f, 0x66, 0x66, -+ 0x65, 0x72, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6f, -+ 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20, -+ 0x74, 0x69, 0x70, 0x20, 0x66, 0x6f, 0x72, 0x20, -+ 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75, -+ 0x72, 0x65, 0x2c, 0x20, 0x73, 0x75, 0x6e, 0x73, -+ 0x63, 0x72, 0x65, 0x65, 0x6e, 0x20, 0x77, 0x6f, -+ 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69, -+ 0x74, 0x2e -+}; -+static const u8 enc_output013[] __initconst = { -+ 0xd3, 0x1a, 0x8d, 0x34, 0x64, 0x8e, 0x60, 0xdb, -+ 0x7b, 0x86, 0xaf, 0xbc, 0x53, 0xef, 0x7e, 0xc2, -+ 0xa4, 0xad, 0xed, 0x51, 0x29, 0x6e, 0x08, 0xfe, -+ 0xa9, 0xe2, 0xb5, 0xa7, 0x36, 0xee, 0x62, 0xd6, -+ 0x3d, 0xbe, 0xa4, 0x5e, 0x8c, 0xa9, 0x67, 0x12, -+ 0x82, 0xfa, 0xfb, 0x69, 0xda, 0x92, 0x72, 0x8b, -+ 0x1a, 0x71, 0xde, 0x0a, 0x9e, 0x06, 0x0b, 0x29, -+ 0x05, 0xd6, 0xa5, 0xb6, 0x7e, 0xcd, 0x3b, 0x36, -+ 0x92, 0xdd, 0xbd, 0x7f, 0x2d, 0x77, 0x8b, 0x8c, -+ 0x98, 0x03, 0xae, 0xe3, 0x28, 0x09, 0x1b, 0x58, -+ 0xfa, 0xb3, 0x24, 0xe4, 0xfa, 0xd6, 0x75, 0x94, -+ 0x55, 0x85, 0x80, 0x8b, 0x48, 0x31, 0xd7, 0xbc, -+ 0x3f, 0xf4, 0xde, 0xf0, 0x8e, 0x4b, 0x7a, 0x9d, -+ 0xe5, 0x76, 0xd2, 0x65, 0x86, 0xce, 0xc6, 0x4b, -+ 0x61, 0x16, 0x1a, 0xe1, 0x0b, 0x59, 0x4f, 0x09, -+ 0xe2, 0x6a, 0x7e, 0x90, 0x2e, 0xcb, 0xd0, 0x60, -+ 0x06, 0x91 -+}; -+static const u8 enc_assoc013[] __initconst = { -+ 0x50, 0x51, 0x52, 0x53, 0xc0, 0xc1, 0xc2, 0xc3, -+ 0xc4, 0xc5, 0xc6, 0xc7 -+}; -+static const u8 enc_nonce013[] __initconst = { -+ 0x07, 0x00, 0x00, 0x00, 0x40, 0x41, 0x42, 0x43, -+ 0x44, 0x45, 0x46, 0x47 -+}; -+static const u8 enc_key013[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input014[] __initconst = { }; -+static const u8 enc_output014[] __initconst = { -+ 0x76, 0xac, 0xb3, 0x42, 0xcf, 0x31, 0x66, 0xa5, -+ 0xb6, 0x3c, 0x0c, 0x0e, 0xa1, 0x38, 0x3c, 0x8d -+}; -+static const u8 enc_assoc014[] __initconst = { }; -+static const u8 enc_nonce014[] __initconst = { -+ 0x4d, 0xa5, 0xbf, 0x8d, 0xfd, 0x58, 0x52, 0xc1, -+ 0xea, 0x12, 0x37, 0x9d -+}; -+static const u8 enc_key014[] __initconst = { -+ 0x80, 0xba, 0x31, 0x92, 0xc8, 0x03, 0xce, 0x96, -+ 0x5e, 0xa3, 0x71, 0xd5, 0xff, 0x07, 0x3c, 0xf0, -+ 0xf4, 0x3b, 0x6a, 0x2a, 0xb5, 0x76, 0xb2, 0x08, -+ 0x42, 0x6e, 0x11, 0x40, 0x9c, 0x09, 0xb9, 0xb0 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input015[] __initconst = { }; -+static const u8 enc_output015[] __initconst = { -+ 0x90, 0x6f, 0xa6, 0x28, 0x4b, 0x52, 0xf8, 0x7b, -+ 0x73, 0x59, 0xcb, 0xaa, 0x75, 0x63, 0xc7, 0x09 -+}; -+static const u8 enc_assoc015[] __initconst = { -+ 0xbd, 0x50, 0x67, 0x64, 0xf2, 0xd2, 0xc4, 0x10 -+}; -+static const u8 enc_nonce015[] __initconst = { -+ 0xa9, 0x2e, 0xf0, 0xac, 0x99, 0x1d, 0xd5, 0x16, -+ 0xa3, 0xc6, 0xf6, 0x89 -+}; -+static const u8 enc_key015[] __initconst = { -+ 0x7a, 0x4c, 0xd7, 0x59, 0x17, 0x2e, 0x02, 0xeb, -+ 0x20, 0x4d, 0xb2, 0xc3, 0xf5, 0xc7, 0x46, 0x22, -+ 0x7d, 0xf5, 0x84, 0xfc, 0x13, 0x45, 0x19, 0x63, -+ 0x91, 0xdb, 0xb9, 0x57, 0x7a, 0x25, 0x07, 0x42 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input016[] __initconst = { -+ 0x2a -+}; -+static const u8 enc_output016[] __initconst = { -+ 0x3a, 0xca, 0xc2, 0x7d, 0xec, 0x09, 0x68, 0x80, -+ 0x1e, 0x9f, 0x6e, 0xde, 0xd6, 0x9d, 0x80, 0x75, -+ 0x22 -+}; -+static const u8 enc_assoc016[] __initconst = { }; -+static const u8 enc_nonce016[] __initconst = { -+ 0x99, 0xe2, 0x3e, 0xc4, 0x89, 0x85, 0xbc, 0xcd, -+ 0xee, 0xab, 0x60, 0xf1 -+}; -+static const u8 enc_key016[] __initconst = { -+ 0xcc, 0x56, 0xb6, 0x80, 0x55, 0x2e, 0xb7, 0x50, -+ 0x08, 0xf5, 0x48, 0x4b, 0x4c, 0xb8, 0x03, 0xfa, -+ 0x50, 0x63, 0xeb, 0xd6, 0xea, 0xb9, 0x1f, 0x6a, -+ 0xb6, 0xae, 0xf4, 0x91, 0x6a, 0x76, 0x62, 0x73 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input017[] __initconst = { -+ 0x51 -+}; -+static const u8 enc_output017[] __initconst = { -+ 0xc4, 0x16, 0x83, 0x10, 0xca, 0x45, 0xb1, 0xf7, -+ 0xc6, 0x6c, 0xad, 0x4e, 0x99, 0xe4, 0x3f, 0x72, -+ 0xb9 -+}; -+static const u8 enc_assoc017[] __initconst = { -+ 0x91, 0xca, 0x6c, 0x59, 0x2c, 0xbc, 0xca, 0x53 -+}; -+static const u8 enc_nonce017[] __initconst = { -+ 0xab, 0x0d, 0xca, 0x71, 0x6e, 0xe0, 0x51, 0xd2, -+ 0x78, 0x2f, 0x44, 0x03 -+}; -+static const u8 enc_key017[] __initconst = { -+ 0x46, 0xf0, 0x25, 0x49, 0x65, 0xf7, 0x69, 0xd5, -+ 0x2b, 0xdb, 0x4a, 0x70, 0xb4, 0x43, 0x19, 0x9f, -+ 0x8e, 0xf2, 0x07, 0x52, 0x0d, 0x12, 0x20, 0xc5, -+ 0x5e, 0x4b, 0x70, 0xf0, 0xfd, 0xa6, 0x20, 0xee -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input018[] __initconst = { -+ 0x5c, 0x60 -+}; -+static const u8 enc_output018[] __initconst = { -+ 0x4d, 0x13, 0x91, 0xe8, 0xb6, 0x1e, 0xfb, 0x39, -+ 0xc1, 0x22, 0x19, 0x54, 0x53, 0x07, 0x7b, 0x22, -+ 0xe5, 0xe2 -+}; -+static const u8 enc_assoc018[] __initconst = { }; -+static const u8 enc_nonce018[] __initconst = { -+ 0x46, 0x1a, 0xf1, 0x22, 0xe9, 0xf2, 0xe0, 0x34, -+ 0x7e, 0x03, 0xf2, 0xdb -+}; -+static const u8 enc_key018[] __initconst = { -+ 0x2f, 0x7f, 0x7e, 0x4f, 0x59, 0x2b, 0xb3, 0x89, -+ 0x19, 0x49, 0x89, 0x74, 0x35, 0x07, 0xbf, 0x3e, -+ 0xe9, 0xcb, 0xde, 0x17, 0x86, 0xb6, 0x69, 0x5f, -+ 0xe6, 0xc0, 0x25, 0xfd, 0x9b, 0xa4, 0xc1, 0x00 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input019[] __initconst = { -+ 0xdd, 0xf2 -+}; -+static const u8 enc_output019[] __initconst = { -+ 0xb6, 0x0d, 0xea, 0xd0, 0xfd, 0x46, 0x97, 0xec, -+ 0x2e, 0x55, 0x58, 0x23, 0x77, 0x19, 0xd0, 0x24, -+ 0x37, 0xa2 -+}; -+static const u8 enc_assoc019[] __initconst = { -+ 0x88, 0x36, 0x4f, 0xc8, 0x06, 0x05, 0x18, 0xbf -+}; -+static const u8 enc_nonce019[] __initconst = { -+ 0x61, 0x54, 0x6b, 0xa5, 0xf1, 0x72, 0x05, 0x90, -+ 0xb6, 0x04, 0x0a, 0xc6 -+}; -+static const u8 enc_key019[] __initconst = { -+ 0xc8, 0x83, 0x3d, 0xce, 0x5e, 0xa9, 0xf2, 0x48, -+ 0xaa, 0x20, 0x30, 0xea, 0xcf, 0xe7, 0x2b, 0xff, -+ 0xe6, 0x9a, 0x62, 0x0c, 0xaf, 0x79, 0x33, 0x44, -+ 0xe5, 0x71, 0x8f, 0xe0, 0xd7, 0xab, 0x1a, 0x58 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input020[] __initconst = { -+ 0xab, 0x85, 0xe9, 0xc1, 0x57, 0x17, 0x31 -+}; -+static const u8 enc_output020[] __initconst = { -+ 0x5d, 0xfe, 0x34, 0x40, 0xdb, 0xb3, 0xc3, 0xed, -+ 0x7a, 0x43, 0x4e, 0x26, 0x02, 0xd3, 0x94, 0x28, -+ 0x1e, 0x0a, 0xfa, 0x9f, 0xb7, 0xaa, 0x42 -+}; -+static const u8 enc_assoc020[] __initconst = { }; -+static const u8 enc_nonce020[] __initconst = { -+ 0x3c, 0x4e, 0x65, 0x4d, 0x66, 0x3f, 0xa4, 0x59, -+ 0x6d, 0xc5, 0x5b, 0xb7 -+}; -+static const u8 enc_key020[] __initconst = { -+ 0x55, 0x56, 0x81, 0x58, 0xd3, 0xa6, 0x48, 0x3f, -+ 0x1f, 0x70, 0x21, 0xea, 0xb6, 0x9b, 0x70, 0x3f, -+ 0x61, 0x42, 0x51, 0xca, 0xdc, 0x1a, 0xf5, 0xd3, -+ 0x4a, 0x37, 0x4f, 0xdb, 0xfc, 0x5a, 0xda, 0xc7 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input021[] __initconst = { -+ 0x4e, 0xe5, 0xcd, 0xa2, 0x0d, 0x42, 0x90 -+}; -+static const u8 enc_output021[] __initconst = { -+ 0x4b, 0xd4, 0x72, 0x12, 0x94, 0x1c, 0xe3, 0x18, -+ 0x5f, 0x14, 0x08, 0xee, 0x7f, 0xbf, 0x18, 0xf5, -+ 0xab, 0xad, 0x6e, 0x22, 0x53, 0xa1, 0xba -+}; -+static const u8 enc_assoc021[] __initconst = { -+ 0x84, 0xe4, 0x6b, 0xe8, 0xc0, 0x91, 0x90, 0x53 -+}; -+static const u8 enc_nonce021[] __initconst = { -+ 0x58, 0x38, 0x93, 0x75, 0xc6, 0x9e, 0xe3, 0x98, -+ 0xde, 0x94, 0x83, 0x96 -+}; -+static const u8 enc_key021[] __initconst = { -+ 0xe3, 0xc0, 0x9e, 0x7f, 0xab, 0x1a, 0xef, 0xb5, -+ 0x16, 0xda, 0x6a, 0x33, 0x02, 0x2a, 0x1d, 0xd4, -+ 0xeb, 0x27, 0x2c, 0x80, 0xd5, 0x40, 0xc5, 0xda, -+ 0x52, 0xa7, 0x30, 0xf3, 0x4d, 0x84, 0x0d, 0x7f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input022[] __initconst = { -+ 0xbe, 0x33, 0x08, 0xf7, 0x2a, 0x2c, 0x6a, 0xed -+}; -+static const u8 enc_output022[] __initconst = { -+ 0x8e, 0x94, 0x39, 0xa5, 0x6e, 0xee, 0xc8, 0x17, -+ 0xfb, 0xe8, 0xa6, 0xed, 0x8f, 0xab, 0xb1, 0x93, -+ 0x75, 0x39, 0xdd, 0x6c, 0x00, 0xe9, 0x00, 0x21 -+}; -+static const u8 enc_assoc022[] __initconst = { }; -+static const u8 enc_nonce022[] __initconst = { -+ 0x4f, 0x07, 0xaf, 0xed, 0xfd, 0xc3, 0xb6, 0xc2, -+ 0x36, 0x18, 0x23, 0xd3 -+}; -+static const u8 enc_key022[] __initconst = { -+ 0x51, 0xe4, 0xbf, 0x2b, 0xad, 0x92, 0xb7, 0xaf, -+ 0xf1, 0xa4, 0xbc, 0x05, 0x55, 0x0b, 0xa8, 0x1d, -+ 0xf4, 0xb9, 0x6f, 0xab, 0xf4, 0x1c, 0x12, 0xc7, -+ 0xb0, 0x0e, 0x60, 0xe4, 0x8d, 0xb7, 0xe1, 0x52 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input023[] __initconst = { -+ 0xa4, 0xc9, 0xc2, 0x80, 0x1b, 0x71, 0xf7, 0xdf -+}; -+static const u8 enc_output023[] __initconst = { -+ 0xb9, 0xb9, 0x10, 0x43, 0x3a, 0xf0, 0x52, 0xb0, -+ 0x45, 0x30, 0xf5, 0x1a, 0xee, 0xe0, 0x24, 0xe0, -+ 0xa4, 0x45, 0xa6, 0x32, 0x8f, 0xa6, 0x7a, 0x18 -+}; -+static const u8 enc_assoc023[] __initconst = { -+ 0x66, 0xc0, 0xae, 0x70, 0x07, 0x6c, 0xb1, 0x4d -+}; -+static const u8 enc_nonce023[] __initconst = { -+ 0xb4, 0xea, 0x66, 0x6e, 0xe1, 0x19, 0x56, 0x33, -+ 0x66, 0x48, 0x4a, 0x78 -+}; -+static const u8 enc_key023[] __initconst = { -+ 0x11, 0x31, 0xc1, 0x41, 0x85, 0x77, 0xa0, 0x54, -+ 0xde, 0x7a, 0x4a, 0xc5, 0x51, 0x95, 0x0f, 0x1a, -+ 0x05, 0x3f, 0x9a, 0xe4, 0x6e, 0x5b, 0x75, 0xfe, -+ 0x4a, 0xbd, 0x56, 0x08, 0xd7, 0xcd, 0xda, 0xdd -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input024[] __initconst = { -+ 0x42, 0xba, 0xae, 0x59, 0x78, 0xfe, 0xaf, 0x5c, -+ 0x36, 0x8d, 0x14, 0xe0 -+}; -+static const u8 enc_output024[] __initconst = { -+ 0xff, 0x7d, 0xc2, 0x03, 0xb2, 0x6c, 0x46, 0x7a, -+ 0x6b, 0x50, 0xdb, 0x33, 0x57, 0x8c, 0x0f, 0x27, -+ 0x58, 0xc2, 0xe1, 0x4e, 0x36, 0xd4, 0xfc, 0x10, -+ 0x6d, 0xcb, 0x29, 0xb4 -+}; -+static const u8 enc_assoc024[] __initconst = { }; -+static const u8 enc_nonce024[] __initconst = { -+ 0x9a, 0x59, 0xfc, 0xe2, 0x6d, 0xf0, 0x00, 0x5e, -+ 0x07, 0x53, 0x86, 0x56 -+}; -+static const u8 enc_key024[] __initconst = { -+ 0x99, 0xb6, 0x2b, 0xd5, 0xaf, 0xbe, 0x3f, 0xb0, -+ 0x15, 0xbd, 0xe9, 0x3f, 0x0a, 0xbf, 0x48, 0x39, -+ 0x57, 0xa1, 0xc3, 0xeb, 0x3c, 0xa5, 0x9c, 0xb5, -+ 0x0b, 0x39, 0xf7, 0xf8, 0xa9, 0xcc, 0x51, 0xbe -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input025[] __initconst = { -+ 0xfd, 0xc8, 0x5b, 0x94, 0xa4, 0xb2, 0xa6, 0xb7, -+ 0x59, 0xb1, 0xa0, 0xda -+}; -+static const u8 enc_output025[] __initconst = { -+ 0x9f, 0x88, 0x16, 0xde, 0x09, 0x94, 0xe9, 0x38, -+ 0xd9, 0xe5, 0x3f, 0x95, 0xd0, 0x86, 0xfc, 0x6c, -+ 0x9d, 0x8f, 0xa9, 0x15, 0xfd, 0x84, 0x23, 0xa7, -+ 0xcf, 0x05, 0x07, 0x2f -+}; -+static const u8 enc_assoc025[] __initconst = { -+ 0xa5, 0x06, 0xe1, 0xa5, 0xc6, 0x90, 0x93, 0xf9 -+}; -+static const u8 enc_nonce025[] __initconst = { -+ 0x58, 0xdb, 0xd4, 0xad, 0x2c, 0x4a, 0xd3, 0x5d, -+ 0xd9, 0x06, 0xe9, 0xce -+}; -+static const u8 enc_key025[] __initconst = { -+ 0x85, 0xf3, 0x5b, 0x62, 0x82, 0xcf, 0xf4, 0x40, -+ 0xbc, 0x10, 0x20, 0xc8, 0x13, 0x6f, 0xf2, 0x70, -+ 0x31, 0x11, 0x0f, 0xa6, 0x3e, 0xc1, 0x6f, 0x1e, -+ 0x82, 0x51, 0x18, 0xb0, 0x06, 0xb9, 0x12, 0x57 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input026[] __initconst = { -+ 0x51, 0xf8, 0xc1, 0xf7, 0x31, 0xea, 0x14, 0xac, -+ 0xdb, 0x21, 0x0a, 0x6d, 0x97, 0x3e, 0x07 -+}; -+static const u8 enc_output026[] __initconst = { -+ 0x0b, 0x29, 0x63, 0x8e, 0x1f, 0xbd, 0xd6, 0xdf, -+ 0x53, 0x97, 0x0b, 0xe2, 0x21, 0x00, 0x42, 0x2a, -+ 0x91, 0x34, 0x08, 0x7d, 0x67, 0xa4, 0x6e, 0x79, -+ 0x17, 0x8d, 0x0a, 0x93, 0xf5, 0xe1, 0xd2 -+}; -+static const u8 enc_assoc026[] __initconst = { }; -+static const u8 enc_nonce026[] __initconst = { -+ 0x68, 0xab, 0x7f, 0xdb, 0xf6, 0x19, 0x01, 0xda, -+ 0xd4, 0x61, 0xd2, 0x3c -+}; -+static const u8 enc_key026[] __initconst = { -+ 0x67, 0x11, 0x96, 0x27, 0xbd, 0x98, 0x8e, 0xda, -+ 0x90, 0x62, 0x19, 0xe0, 0x8c, 0x0d, 0x0d, 0x77, -+ 0x9a, 0x07, 0xd2, 0x08, 0xce, 0x8a, 0x4f, 0xe0, -+ 0x70, 0x9a, 0xf7, 0x55, 0xee, 0xec, 0x6d, 0xcb -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input027[] __initconst = { -+ 0x97, 0x46, 0x9d, 0xa6, 0x67, 0xd6, 0x11, 0x0f, -+ 0x9c, 0xbd, 0xa1, 0xd1, 0xa2, 0x06, 0x73 -+}; -+static const u8 enc_output027[] __initconst = { -+ 0x32, 0xdb, 0x66, 0xc4, 0xa3, 0x81, 0x9d, 0x81, -+ 0x55, 0x74, 0x55, 0xe5, 0x98, 0x0f, 0xed, 0xfe, -+ 0xae, 0x30, 0xde, 0xc9, 0x4e, 0x6a, 0xd3, 0xa9, -+ 0xee, 0xa0, 0x6a, 0x0d, 0x70, 0x39, 0x17 -+}; -+static const u8 enc_assoc027[] __initconst = { -+ 0x64, 0x53, 0xa5, 0x33, 0x84, 0x63, 0x22, 0x12 -+}; -+static const u8 enc_nonce027[] __initconst = { -+ 0xd9, 0x5b, 0x32, 0x43, 0xaf, 0xae, 0xf7, 0x14, -+ 0xc5, 0x03, 0x5b, 0x6a -+}; -+static const u8 enc_key027[] __initconst = { -+ 0xe6, 0xf1, 0x11, 0x8d, 0x41, 0xe4, 0xb4, 0x3f, -+ 0xb5, 0x82, 0x21, 0xb7, 0xed, 0x79, 0x67, 0x38, -+ 0x34, 0xe0, 0xd8, 0xac, 0x5c, 0x4f, 0xa6, 0x0b, -+ 0xbc, 0x8b, 0xc4, 0x89, 0x3a, 0x58, 0x89, 0x4d -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input028[] __initconst = { -+ 0x54, 0x9b, 0x36, 0x5a, 0xf9, 0x13, 0xf3, 0xb0, -+ 0x81, 0x13, 0x1c, 0xcb, 0x6b, 0x82, 0x55, 0x88 -+}; -+static const u8 enc_output028[] __initconst = { -+ 0xe9, 0x11, 0x0e, 0x9f, 0x56, 0xab, 0x3c, 0xa4, -+ 0x83, 0x50, 0x0c, 0xea, 0xba, 0xb6, 0x7a, 0x13, -+ 0x83, 0x6c, 0xca, 0xbf, 0x15, 0xa6, 0xa2, 0x2a, -+ 0x51, 0xc1, 0x07, 0x1c, 0xfa, 0x68, 0xfa, 0x0c -+}; -+static const u8 enc_assoc028[] __initconst = { }; -+static const u8 enc_nonce028[] __initconst = { -+ 0x2f, 0xcb, 0x1b, 0x38, 0xa9, 0x9e, 0x71, 0xb8, -+ 0x47, 0x40, 0xad, 0x9b -+}; -+static const u8 enc_key028[] __initconst = { -+ 0x59, 0xd4, 0xea, 0xfb, 0x4d, 0xe0, 0xcf, 0xc7, -+ 0xd3, 0xdb, 0x99, 0xa8, 0xf5, 0x4b, 0x15, 0xd7, -+ 0xb3, 0x9f, 0x0a, 0xcc, 0x8d, 0xa6, 0x97, 0x63, -+ 0xb0, 0x19, 0xc1, 0x69, 0x9f, 0x87, 0x67, 0x4a -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input029[] __initconst = { -+ 0x55, 0xa4, 0x65, 0x64, 0x4f, 0x5b, 0x65, 0x09, -+ 0x28, 0xcb, 0xee, 0x7c, 0x06, 0x32, 0x14, 0xd6 -+}; -+static const u8 enc_output029[] __initconst = { -+ 0xe4, 0xb1, 0x13, 0xcb, 0x77, 0x59, 0x45, 0xf3, -+ 0xd3, 0xa8, 0xae, 0x9e, 0xc1, 0x41, 0xc0, 0x0c, -+ 0x7c, 0x43, 0xf1, 0x6c, 0xe0, 0x96, 0xd0, 0xdc, -+ 0x27, 0xc9, 0x58, 0x49, 0xdc, 0x38, 0x3b, 0x7d -+}; -+static const u8 enc_assoc029[] __initconst = { -+ 0x03, 0x45, 0x85, 0x62, 0x1a, 0xf8, 0xd7, 0xff -+}; -+static const u8 enc_nonce029[] __initconst = { -+ 0x11, 0x8a, 0x69, 0x64, 0xc2, 0xd3, 0xe3, 0x80, -+ 0x07, 0x1f, 0x52, 0x66 -+}; -+static const u8 enc_key029[] __initconst = { -+ 0xb9, 0x07, 0xa4, 0x50, 0x75, 0x51, 0x3f, 0xe8, -+ 0xa8, 0x01, 0x9e, 0xde, 0xe3, 0xf2, 0x59, 0x14, -+ 0x87, 0xb2, 0xa0, 0x30, 0xb0, 0x3c, 0x6e, 0x1d, -+ 0x77, 0x1c, 0x86, 0x25, 0x71, 0xd2, 0xea, 0x1e -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input030[] __initconst = { -+ 0x3f, 0xf1, 0x51, 0x4b, 0x1c, 0x50, 0x39, 0x15, -+ 0x91, 0x8f, 0x0c, 0x0c, 0x31, 0x09, 0x4a, 0x6e, -+ 0x1f -+}; -+static const u8 enc_output030[] __initconst = { -+ 0x02, 0xcc, 0x3a, 0xcb, 0x5e, 0xe1, 0xfc, 0xdd, -+ 0x12, 0xa0, 0x3b, 0xb8, 0x57, 0x97, 0x64, 0x74, -+ 0xd3, 0xd8, 0x3b, 0x74, 0x63, 0xa2, 0xc3, 0x80, -+ 0x0f, 0xe9, 0x58, 0xc2, 0x8e, 0xaa, 0x29, 0x08, -+ 0x13 -+}; -+static const u8 enc_assoc030[] __initconst = { }; -+static const u8 enc_nonce030[] __initconst = { -+ 0x45, 0xaa, 0xa3, 0xe5, 0xd1, 0x6d, 0x2d, 0x42, -+ 0xdc, 0x03, 0x44, 0x5d -+}; -+static const u8 enc_key030[] __initconst = { -+ 0x3b, 0x24, 0x58, 0xd8, 0x17, 0x6e, 0x16, 0x21, -+ 0xc0, 0xcc, 0x24, 0xc0, 0xc0, 0xe2, 0x4c, 0x1e, -+ 0x80, 0xd7, 0x2f, 0x7e, 0xe9, 0x14, 0x9a, 0x4b, -+ 0x16, 0x61, 0x76, 0x62, 0x96, 0x16, 0xd0, 0x11 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input031[] __initconst = { -+ 0x63, 0x85, 0x8c, 0xa3, 0xe2, 0xce, 0x69, 0x88, -+ 0x7b, 0x57, 0x8a, 0x3c, 0x16, 0x7b, 0x42, 0x1c, -+ 0x9c -+}; -+static const u8 enc_output031[] __initconst = { -+ 0x35, 0x76, 0x64, 0x88, 0xd2, 0xbc, 0x7c, 0x2b, -+ 0x8d, 0x17, 0xcb, 0xbb, 0x9a, 0xbf, 0xad, 0x9e, -+ 0x6d, 0x1f, 0x39, 0x1e, 0x65, 0x7b, 0x27, 0x38, -+ 0xdd, 0xa0, 0x84, 0x48, 0xcb, 0xa2, 0x81, 0x1c, -+ 0xeb -+}; -+static const u8 enc_assoc031[] __initconst = { -+ 0x9a, 0xaf, 0x29, 0x9e, 0xee, 0xa7, 0x8f, 0x79 -+}; -+static const u8 enc_nonce031[] __initconst = { -+ 0xf0, 0x38, 0x4f, 0xb8, 0x76, 0x12, 0x14, 0x10, -+ 0x63, 0x3d, 0x99, 0x3d -+}; -+static const u8 enc_key031[] __initconst = { -+ 0xf6, 0x0c, 0x6a, 0x1b, 0x62, 0x57, 0x25, 0xf7, -+ 0x6c, 0x70, 0x37, 0xb4, 0x8f, 0xe3, 0x57, 0x7f, -+ 0xa7, 0xf7, 0xb8, 0x7b, 0x1b, 0xd5, 0xa9, 0x82, -+ 0x17, 0x6d, 0x18, 0x23, 0x06, 0xff, 0xb8, 0x70 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input032[] __initconst = { -+ 0x10, 0xf1, 0xec, 0xf9, 0xc6, 0x05, 0x84, 0x66, -+ 0x5d, 0x9a, 0xe5, 0xef, 0xe2, 0x79, 0xe7, 0xf7, -+ 0x37, 0x7e, 0xea, 0x69, 0x16, 0xd2, 0xb1, 0x11 -+}; -+static const u8 enc_output032[] __initconst = { -+ 0x42, 0xf2, 0x6c, 0x56, 0xcb, 0x4b, 0xe2, 0x1d, -+ 0x9d, 0x8d, 0x0c, 0x80, 0xfc, 0x99, 0xdd, 0xe0, -+ 0x0d, 0x75, 0xf3, 0x80, 0x74, 0xbf, 0xe7, 0x64, -+ 0x54, 0xaa, 0x7e, 0x13, 0xd4, 0x8f, 0xff, 0x7d, -+ 0x75, 0x57, 0x03, 0x94, 0x57, 0x04, 0x0a, 0x3a -+}; -+static const u8 enc_assoc032[] __initconst = { }; -+static const u8 enc_nonce032[] __initconst = { -+ 0xe6, 0xb1, 0xad, 0xf2, 0xfd, 0x58, 0xa8, 0x76, -+ 0x2c, 0x65, 0xf3, 0x1b -+}; -+static const u8 enc_key032[] __initconst = { -+ 0x02, 0x12, 0xa8, 0xde, 0x50, 0x07, 0xed, 0x87, -+ 0xb3, 0x3f, 0x1a, 0x70, 0x90, 0xb6, 0x11, 0x4f, -+ 0x9e, 0x08, 0xce, 0xfd, 0x96, 0x07, 0xf2, 0xc2, -+ 0x76, 0xbd, 0xcf, 0xdb, 0xc5, 0xce, 0x9c, 0xd7 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input033[] __initconst = { -+ 0x92, 0x22, 0xf9, 0x01, 0x8e, 0x54, 0xfd, 0x6d, -+ 0xe1, 0x20, 0x08, 0x06, 0xa9, 0xee, 0x8e, 0x4c, -+ 0xc9, 0x04, 0xd2, 0x9f, 0x25, 0xcb, 0xa1, 0x93 -+}; -+static const u8 enc_output033[] __initconst = { -+ 0x12, 0x30, 0x32, 0x43, 0x7b, 0x4b, 0xfd, 0x69, -+ 0x20, 0xe8, 0xf7, 0xe7, 0xe0, 0x08, 0x7a, 0xe4, -+ 0x88, 0x9e, 0xbe, 0x7a, 0x0a, 0xd0, 0xe9, 0x00, -+ 0x3c, 0xf6, 0x8f, 0x17, 0x95, 0x50, 0xda, 0x63, -+ 0xd3, 0xb9, 0x6c, 0x2d, 0x55, 0x41, 0x18, 0x65 -+}; -+static const u8 enc_assoc033[] __initconst = { -+ 0x3e, 0x8b, 0xc5, 0xad, 0xe1, 0x82, 0xff, 0x08 -+}; -+static const u8 enc_nonce033[] __initconst = { -+ 0x6b, 0x28, 0x2e, 0xbe, 0xcc, 0x54, 0x1b, 0xcd, -+ 0x78, 0x34, 0xed, 0x55 -+}; -+static const u8 enc_key033[] __initconst = { -+ 0xc5, 0xbc, 0x09, 0x56, 0x56, 0x46, 0xe7, 0xed, -+ 0xda, 0x95, 0x4f, 0x1f, 0x73, 0x92, 0x23, 0xda, -+ 0xda, 0x20, 0xb9, 0x5c, 0x44, 0xab, 0x03, 0x3d, -+ 0x0f, 0xae, 0x4b, 0x02, 0x83, 0xd1, 0x8b, 0xe3 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input034[] __initconst = { -+ 0xb0, 0x53, 0x99, 0x92, 0x86, 0xa2, 0x82, 0x4f, -+ 0x42, 0xcc, 0x8c, 0x20, 0x3a, 0xb2, 0x4e, 0x2c, -+ 0x97, 0xa6, 0x85, 0xad, 0xcc, 0x2a, 0xd3, 0x26, -+ 0x62, 0x55, 0x8e, 0x55, 0xa5, 0xc7, 0x29 -+}; -+static const u8 enc_output034[] __initconst = { -+ 0x45, 0xc7, 0xd6, 0xb5, 0x3a, 0xca, 0xd4, 0xab, -+ 0xb6, 0x88, 0x76, 0xa6, 0xe9, 0x6a, 0x48, 0xfb, -+ 0x59, 0x52, 0x4d, 0x2c, 0x92, 0xc9, 0xd8, 0xa1, -+ 0x89, 0xc9, 0xfd, 0x2d, 0xb9, 0x17, 0x46, 0x56, -+ 0x6d, 0x3c, 0xa1, 0x0e, 0x31, 0x1b, 0x69, 0x5f, -+ 0x3e, 0xae, 0x15, 0x51, 0x65, 0x24, 0x93 -+}; -+static const u8 enc_assoc034[] __initconst = { }; -+static const u8 enc_nonce034[] __initconst = { -+ 0x04, 0xa9, 0xbe, 0x03, 0x50, 0x8a, 0x5f, 0x31, -+ 0x37, 0x1a, 0x6f, 0xd2 -+}; -+static const u8 enc_key034[] __initconst = { -+ 0x2e, 0xb5, 0x1c, 0x46, 0x9a, 0xa8, 0xeb, 0x9e, -+ 0x6c, 0x54, 0xa8, 0x34, 0x9b, 0xae, 0x50, 0xa2, -+ 0x0f, 0x0e, 0x38, 0x27, 0x11, 0xbb, 0xa1, 0x15, -+ 0x2c, 0x42, 0x4f, 0x03, 0xb6, 0x67, 0x1d, 0x71 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input035[] __initconst = { -+ 0xf4, 0x52, 0x06, 0xab, 0xc2, 0x55, 0x52, 0xb2, -+ 0xab, 0xc9, 0xab, 0x7f, 0xa2, 0x43, 0x03, 0x5f, -+ 0xed, 0xaa, 0xdd, 0xc3, 0xb2, 0x29, 0x39, 0x56, -+ 0xf1, 0xea, 0x6e, 0x71, 0x56, 0xe7, 0xeb -+}; -+static const u8 enc_output035[] __initconst = { -+ 0x46, 0xa8, 0x0c, 0x41, 0x87, 0x02, 0x47, 0x20, -+ 0x08, 0x46, 0x27, 0x58, 0x00, 0x80, 0xdd, 0xe5, -+ 0xa3, 0xf4, 0xa1, 0x10, 0x93, 0xa7, 0x07, 0x6e, -+ 0xd6, 0xf3, 0xd3, 0x26, 0xbc, 0x7b, 0x70, 0x53, -+ 0x4d, 0x4a, 0xa2, 0x83, 0x5a, 0x52, 0xe7, 0x2d, -+ 0x14, 0xdf, 0x0e, 0x4f, 0x47, 0xf2, 0x5f -+}; -+static const u8 enc_assoc035[] __initconst = { -+ 0x37, 0x46, 0x18, 0xa0, 0x6e, 0xa9, 0x8a, 0x48 -+}; -+static const u8 enc_nonce035[] __initconst = { -+ 0x47, 0x0a, 0x33, 0x9e, 0xcb, 0x32, 0x19, 0xb8, -+ 0xb8, 0x1a, 0x1f, 0x8b -+}; -+static const u8 enc_key035[] __initconst = { -+ 0x7f, 0x5b, 0x74, 0xc0, 0x7e, 0xd1, 0xb4, 0x0f, -+ 0xd1, 0x43, 0x58, 0xfe, 0x2f, 0xf2, 0xa7, 0x40, -+ 0xc1, 0x16, 0xc7, 0x70, 0x65, 0x10, 0xe6, 0xa4, -+ 0x37, 0xf1, 0x9e, 0xa4, 0x99, 0x11, 0xce, 0xc4 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input036[] __initconst = { -+ 0xb9, 0xc5, 0x54, 0xcb, 0xc3, 0x6a, 0xc1, 0x8a, -+ 0xe8, 0x97, 0xdf, 0x7b, 0xee, 0xca, 0xc1, 0xdb, -+ 0xeb, 0x4e, 0xaf, 0xa1, 0x56, 0xbb, 0x60, 0xce, -+ 0x2e, 0x5d, 0x48, 0xf0, 0x57, 0x15, 0xe6, 0x78 -+}; -+static const u8 enc_output036[] __initconst = { -+ 0xea, 0x29, 0xaf, 0xa4, 0x9d, 0x36, 0xe8, 0x76, -+ 0x0f, 0x5f, 0xe1, 0x97, 0x23, 0xb9, 0x81, 0x1e, -+ 0xd5, 0xd5, 0x19, 0x93, 0x4a, 0x44, 0x0f, 0x50, -+ 0x81, 0xac, 0x43, 0x0b, 0x95, 0x3b, 0x0e, 0x21, -+ 0x22, 0x25, 0x41, 0xaf, 0x46, 0xb8, 0x65, 0x33, -+ 0xc6, 0xb6, 0x8d, 0x2f, 0xf1, 0x08, 0xa7, 0xea -+}; -+static const u8 enc_assoc036[] __initconst = { }; -+static const u8 enc_nonce036[] __initconst = { -+ 0x72, 0xcf, 0xd9, 0x0e, 0xf3, 0x02, 0x6c, 0xa2, -+ 0x2b, 0x7e, 0x6e, 0x6a -+}; -+static const u8 enc_key036[] __initconst = { -+ 0xe1, 0x73, 0x1d, 0x58, 0x54, 0xe1, 0xb7, 0x0c, -+ 0xb3, 0xff, 0xe8, 0xb7, 0x86, 0xa2, 0xb3, 0xeb, -+ 0xf0, 0x99, 0x43, 0x70, 0x95, 0x47, 0x57, 0xb9, -+ 0xdc, 0x8c, 0x7b, 0xc5, 0x35, 0x46, 0x34, 0xa3 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input037[] __initconst = { -+ 0x6b, 0x26, 0x04, 0x99, 0x6c, 0xd3, 0x0c, 0x14, -+ 0xa1, 0x3a, 0x52, 0x57, 0xed, 0x6c, 0xff, 0xd3, -+ 0xbc, 0x5e, 0x29, 0xd6, 0xb9, 0x7e, 0xb1, 0x79, -+ 0x9e, 0xb3, 0x35, 0xe2, 0x81, 0xea, 0x45, 0x1e -+}; -+static const u8 enc_output037[] __initconst = { -+ 0x6d, 0xad, 0x63, 0x78, 0x97, 0x54, 0x4d, 0x8b, -+ 0xf6, 0xbe, 0x95, 0x07, 0xed, 0x4d, 0x1b, 0xb2, -+ 0xe9, 0x54, 0xbc, 0x42, 0x7e, 0x5d, 0xe7, 0x29, -+ 0xda, 0xf5, 0x07, 0x62, 0x84, 0x6f, 0xf2, 0xf4, -+ 0x7b, 0x99, 0x7d, 0x93, 0xc9, 0x82, 0x18, 0x9d, -+ 0x70, 0x95, 0xdc, 0x79, 0x4c, 0x74, 0x62, 0x32 -+}; -+static const u8 enc_assoc037[] __initconst = { -+ 0x23, 0x33, 0xe5, 0xce, 0x0f, 0x93, 0xb0, 0x59 -+}; -+static const u8 enc_nonce037[] __initconst = { -+ 0x26, 0x28, 0x80, 0xd4, 0x75, 0xf3, 0xda, 0xc5, -+ 0x34, 0x0d, 0xd1, 0xb8 -+}; -+static const u8 enc_key037[] __initconst = { -+ 0x27, 0xd8, 0x60, 0x63, 0x1b, 0x04, 0x85, 0xa4, -+ 0x10, 0x70, 0x2f, 0xea, 0x61, 0xbc, 0x87, 0x3f, -+ 0x34, 0x42, 0x26, 0x0c, 0xad, 0xed, 0x4a, 0xbd, -+ 0xe2, 0x5b, 0x78, 0x6a, 0x2d, 0x97, 0xf1, 0x45 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input038[] __initconst = { -+ 0x97, 0x3d, 0x0c, 0x75, 0x38, 0x26, 0xba, 0xe4, -+ 0x66, 0xcf, 0x9a, 0xbb, 0x34, 0x93, 0x15, 0x2e, -+ 0x9d, 0xe7, 0x81, 0x9e, 0x2b, 0xd0, 0xc7, 0x11, -+ 0x71, 0x34, 0x6b, 0x4d, 0x2c, 0xeb, 0xf8, 0x04, -+ 0x1a, 0xa3, 0xce, 0xdc, 0x0d, 0xfd, 0x7b, 0x46, -+ 0x7e, 0x26, 0x22, 0x8b, 0xc8, 0x6c, 0x9a -+}; -+static const u8 enc_output038[] __initconst = { -+ 0xfb, 0xa7, 0x8a, 0xe4, 0xf9, 0xd8, 0x08, 0xa6, -+ 0x2e, 0x3d, 0xa4, 0x0b, 0xe2, 0xcb, 0x77, 0x00, -+ 0xc3, 0x61, 0x3d, 0x9e, 0xb2, 0xc5, 0x29, 0xc6, -+ 0x52, 0xe7, 0x6a, 0x43, 0x2c, 0x65, 0x8d, 0x27, -+ 0x09, 0x5f, 0x0e, 0xb8, 0xf9, 0x40, 0xc3, 0x24, -+ 0x98, 0x1e, 0xa9, 0x35, 0xe5, 0x07, 0xf9, 0x8f, -+ 0x04, 0x69, 0x56, 0xdb, 0x3a, 0x51, 0x29, 0x08, -+ 0xbd, 0x7a, 0xfc, 0x8f, 0x2a, 0xb0, 0xa9 -+}; -+static const u8 enc_assoc038[] __initconst = { }; -+static const u8 enc_nonce038[] __initconst = { -+ 0xe7, 0x4a, 0x51, 0x5e, 0x7e, 0x21, 0x02, 0xb9, -+ 0x0b, 0xef, 0x55, 0xd2 -+}; -+static const u8 enc_key038[] __initconst = { -+ 0xcf, 0x0d, 0x40, 0xa4, 0x64, 0x4e, 0x5f, 0x51, -+ 0x81, 0x51, 0x65, 0xd5, 0x30, 0x1b, 0x22, 0x63, -+ 0x1f, 0x45, 0x44, 0xc4, 0x9a, 0x18, 0x78, 0xe3, -+ 0xa0, 0xa5, 0xe8, 0xe1, 0xaa, 0xe0, 0xf2, 0x64 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input039[] __initconst = { -+ 0xa9, 0x89, 0x95, 0x50, 0x4d, 0xf1, 0x6f, 0x74, -+ 0x8b, 0xfb, 0x77, 0x85, 0xff, 0x91, 0xee, 0xb3, -+ 0xb6, 0x60, 0xea, 0x9e, 0xd3, 0x45, 0x0c, 0x3d, -+ 0x5e, 0x7b, 0x0e, 0x79, 0xef, 0x65, 0x36, 0x59, -+ 0xa9, 0x97, 0x8d, 0x75, 0x54, 0x2e, 0xf9, 0x1c, -+ 0x45, 0x67, 0x62, 0x21, 0x56, 0x40, 0xb9 -+}; -+static const u8 enc_output039[] __initconst = { -+ 0xa1, 0xff, 0xed, 0x80, 0x76, 0x18, 0x29, 0xec, -+ 0xce, 0x24, 0x2e, 0x0e, 0x88, 0xb1, 0x38, 0x04, -+ 0x90, 0x16, 0xbc, 0xa0, 0x18, 0xda, 0x2b, 0x6e, -+ 0x19, 0x98, 0x6b, 0x3e, 0x31, 0x8c, 0xae, 0x8d, -+ 0x80, 0x61, 0x98, 0xfb, 0x4c, 0x52, 0x7c, 0xc3, -+ 0x93, 0x50, 0xeb, 0xdd, 0xea, 0xc5, 0x73, 0xc4, -+ 0xcb, 0xf0, 0xbe, 0xfd, 0xa0, 0xb7, 0x02, 0x42, -+ 0xc6, 0x40, 0xd7, 0xcd, 0x02, 0xd7, 0xa3 -+}; -+static const u8 enc_assoc039[] __initconst = { -+ 0xb3, 0xe4, 0x06, 0x46, 0x83, 0xb0, 0x2d, 0x84 -+}; -+static const u8 enc_nonce039[] __initconst = { -+ 0xd4, 0xd8, 0x07, 0x34, 0x16, 0x83, 0x82, 0x5b, -+ 0x31, 0xcd, 0x4d, 0x95 -+}; -+static const u8 enc_key039[] __initconst = { -+ 0x6c, 0xbf, 0xd7, 0x1c, 0x64, 0x5d, 0x18, 0x4c, -+ 0xf5, 0xd2, 0x3c, 0x40, 0x2b, 0xdb, 0x0d, 0x25, -+ 0xec, 0x54, 0x89, 0x8c, 0x8a, 0x02, 0x73, 0xd4, -+ 0x2e, 0xb5, 0xbe, 0x10, 0x9f, 0xdc, 0xb2, 0xac -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input040[] __initconst = { -+ 0xd0, 0x96, 0x80, 0x31, 0x81, 0xbe, 0xef, 0x9e, -+ 0x00, 0x8f, 0xf8, 0x5d, 0x5d, 0xdc, 0x38, 0xdd, -+ 0xac, 0xf0, 0xf0, 0x9e, 0xe5, 0xf7, 0xe0, 0x7f, -+ 0x1e, 0x40, 0x79, 0xcb, 0x64, 0xd0, 0xdc, 0x8f, -+ 0x5e, 0x67, 0x11, 0xcd, 0x49, 0x21, 0xa7, 0x88, -+ 0x7d, 0xe7, 0x6e, 0x26, 0x78, 0xfd, 0xc6, 0x76, -+ 0x18, 0xf1, 0x18, 0x55, 0x86, 0xbf, 0xea, 0x9d, -+ 0x4c, 0x68, 0x5d, 0x50, 0xe4, 0xbb, 0x9a, 0x82 -+}; -+static const u8 enc_output040[] __initconst = { -+ 0x9a, 0x4e, 0xf2, 0x2b, 0x18, 0x16, 0x77, 0xb5, -+ 0x75, 0x5c, 0x08, 0xf7, 0x47, 0xc0, 0xf8, 0xd8, -+ 0xe8, 0xd4, 0xc1, 0x8a, 0x9c, 0xc2, 0x40, 0x5c, -+ 0x12, 0xbb, 0x51, 0xbb, 0x18, 0x72, 0xc8, 0xe8, -+ 0xb8, 0x77, 0x67, 0x8b, 0xec, 0x44, 0x2c, 0xfc, -+ 0xbb, 0x0f, 0xf4, 0x64, 0xa6, 0x4b, 0x74, 0x33, -+ 0x2c, 0xf0, 0x72, 0x89, 0x8c, 0x7e, 0x0e, 0xdd, -+ 0xf6, 0x23, 0x2e, 0xa6, 0xe2, 0x7e, 0xfe, 0x50, -+ 0x9f, 0xf3, 0x42, 0x7a, 0x0f, 0x32, 0xfa, 0x56, -+ 0x6d, 0x9c, 0xa0, 0xa7, 0x8a, 0xef, 0xc0, 0x13 -+}; -+static const u8 enc_assoc040[] __initconst = { }; -+static const u8 enc_nonce040[] __initconst = { -+ 0xd6, 0x10, 0x40, 0xa3, 0x13, 0xed, 0x49, 0x28, -+ 0x23, 0xcc, 0x06, 0x5b -+}; -+static const u8 enc_key040[] __initconst = { -+ 0x5b, 0x1d, 0x10, 0x35, 0xc0, 0xb1, 0x7e, 0xe0, -+ 0xb0, 0x44, 0x47, 0x67, 0xf8, 0x0a, 0x25, 0xb8, -+ 0xc1, 0xb7, 0x41, 0xf4, 0xb5, 0x0a, 0x4d, 0x30, -+ 0x52, 0x22, 0x6b, 0xaa, 0x1c, 0x6f, 0xb7, 0x01 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input041[] __initconst = { -+ 0x94, 0xee, 0x16, 0x6d, 0x6d, 0x6e, 0xcf, 0x88, -+ 0x32, 0x43, 0x71, 0x36, 0xb4, 0xae, 0x80, 0x5d, -+ 0x42, 0x88, 0x64, 0x35, 0x95, 0x86, 0xd9, 0x19, -+ 0x3a, 0x25, 0x01, 0x62, 0x93, 0xed, 0xba, 0x44, -+ 0x3c, 0x58, 0xe0, 0x7e, 0x7b, 0x71, 0x95, 0xec, -+ 0x5b, 0xd8, 0x45, 0x82, 0xa9, 0xd5, 0x6c, 0x8d, -+ 0x4a, 0x10, 0x8c, 0x7d, 0x7c, 0xe3, 0x4e, 0x6c, -+ 0x6f, 0x8e, 0xa1, 0xbe, 0xc0, 0x56, 0x73, 0x17 -+}; -+static const u8 enc_output041[] __initconst = { -+ 0x5f, 0xbb, 0xde, 0xcc, 0x34, 0xbe, 0x20, 0x16, -+ 0x14, 0xf6, 0x36, 0x03, 0x1e, 0xeb, 0x42, 0xf1, -+ 0xca, 0xce, 0x3c, 0x79, 0xa1, 0x2c, 0xff, 0xd8, -+ 0x71, 0xee, 0x8e, 0x73, 0x82, 0x0c, 0x82, 0x97, -+ 0x49, 0xf1, 0xab, 0xb4, 0x29, 0x43, 0x67, 0x84, -+ 0x9f, 0xb6, 0xc2, 0xaa, 0x56, 0xbd, 0xa8, 0xa3, -+ 0x07, 0x8f, 0x72, 0x3d, 0x7c, 0x1c, 0x85, 0x20, -+ 0x24, 0xb0, 0x17, 0xb5, 0x89, 0x73, 0xfb, 0x1e, -+ 0x09, 0x26, 0x3d, 0xa7, 0xb4, 0xcb, 0x92, 0x14, -+ 0x52, 0xf9, 0x7d, 0xca, 0x40, 0xf5, 0x80, 0xec -+}; -+static const u8 enc_assoc041[] __initconst = { -+ 0x71, 0x93, 0xf6, 0x23, 0x66, 0x33, 0x21, 0xa2 -+}; -+static const u8 enc_nonce041[] __initconst = { -+ 0xd3, 0x1c, 0x21, 0xab, 0xa1, 0x75, 0xb7, 0x0d, -+ 0xe4, 0xeb, 0xb1, 0x9c -+}; -+static const u8 enc_key041[] __initconst = { -+ 0x97, 0xd6, 0x35, 0xc4, 0xf4, 0x75, 0x74, 0xd9, -+ 0x99, 0x8a, 0x90, 0x87, 0x5d, 0xa1, 0xd3, 0xa2, -+ 0x84, 0xb7, 0x55, 0xb2, 0xd3, 0x92, 0x97, 0xa5, -+ 0x72, 0x52, 0x35, 0x19, 0x0e, 0x10, 0xa9, 0x7e -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input042[] __initconst = { -+ 0xb4, 0x29, 0xeb, 0x80, 0xfb, 0x8f, 0xe8, 0xba, -+ 0xed, 0xa0, 0xc8, 0x5b, 0x9c, 0x33, 0x34, 0x58, -+ 0xe7, 0xc2, 0x99, 0x2e, 0x55, 0x84, 0x75, 0x06, -+ 0x9d, 0x12, 0xd4, 0x5c, 0x22, 0x21, 0x75, 0x64, -+ 0x12, 0x15, 0x88, 0x03, 0x22, 0x97, 0xef, 0xf5, -+ 0x67, 0x83, 0x74, 0x2a, 0x5f, 0xc2, 0x2d, 0x74, -+ 0x10, 0xff, 0xb2, 0x9d, 0x66, 0x09, 0x86, 0x61, -+ 0xd7, 0x6f, 0x12, 0x6c, 0x3c, 0x27, 0x68, 0x9e, -+ 0x43, 0xb3, 0x72, 0x67, 0xca, 0xc5, 0xa3, 0xa6, -+ 0xd3, 0xab, 0x49, 0xe3, 0x91, 0xda, 0x29, 0xcd, -+ 0x30, 0x54, 0xa5, 0x69, 0x2e, 0x28, 0x07, 0xe4, -+ 0xc3, 0xea, 0x46, 0xc8, 0x76, 0x1d, 0x50, 0xf5, -+ 0x92 -+}; -+static const u8 enc_output042[] __initconst = { -+ 0xd0, 0x10, 0x2f, 0x6c, 0x25, 0x8b, 0xf4, 0x97, -+ 0x42, 0xce, 0xc3, 0x4c, 0xf2, 0xd0, 0xfe, 0xdf, -+ 0x23, 0xd1, 0x05, 0xfb, 0x4c, 0x84, 0xcf, 0x98, -+ 0x51, 0x5e, 0x1b, 0xc9, 0xa6, 0x4f, 0x8a, 0xd5, -+ 0xbe, 0x8f, 0x07, 0x21, 0xbd, 0xe5, 0x06, 0x45, -+ 0xd0, 0x00, 0x83, 0xc3, 0xa2, 0x63, 0xa3, 0x10, -+ 0x53, 0xb7, 0x60, 0x24, 0x5f, 0x52, 0xae, 0x28, -+ 0x66, 0xa5, 0xec, 0x83, 0xb1, 0x9f, 0x61, 0xbe, -+ 0x1d, 0x30, 0xd5, 0xc5, 0xd9, 0xfe, 0xcc, 0x4c, -+ 0xbb, 0xe0, 0x8f, 0xd3, 0x85, 0x81, 0x3a, 0x2a, -+ 0xa3, 0x9a, 0x00, 0xff, 0x9c, 0x10, 0xf7, 0xf2, -+ 0x37, 0x02, 0xad, 0xd1, 0xe4, 0xb2, 0xff, 0xa3, -+ 0x1c, 0x41, 0x86, 0x5f, 0xc7, 0x1d, 0xe1, 0x2b, -+ 0x19, 0x61, 0x21, 0x27, 0xce, 0x49, 0x99, 0x3b, -+ 0xb0 -+}; -+static const u8 enc_assoc042[] __initconst = { }; -+static const u8 enc_nonce042[] __initconst = { -+ 0x17, 0xc8, 0x6a, 0x8a, 0xbb, 0xb7, 0xe0, 0x03, -+ 0xac, 0xde, 0x27, 0x99 -+}; -+static const u8 enc_key042[] __initconst = { -+ 0xfe, 0x6e, 0x55, 0xbd, 0xae, 0xd1, 0xf7, 0x28, -+ 0x4c, 0xa5, 0xfc, 0x0f, 0x8c, 0x5f, 0x2b, 0x8d, -+ 0xf5, 0x6d, 0xc0, 0xf4, 0x9e, 0x8c, 0xa6, 0x6a, -+ 0x41, 0x99, 0x5e, 0x78, 0x33, 0x51, 0xf9, 0x01 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input043[] __initconst = { -+ 0xce, 0xb5, 0x34, 0xce, 0x50, 0xdc, 0x23, 0xff, -+ 0x63, 0x8a, 0xce, 0x3e, 0xf6, 0x3a, 0xb2, 0xcc, -+ 0x29, 0x73, 0xee, 0xad, 0xa8, 0x07, 0x85, 0xfc, -+ 0x16, 0x5d, 0x06, 0xc2, 0xf5, 0x10, 0x0f, 0xf5, -+ 0xe8, 0xab, 0x28, 0x82, 0xc4, 0x75, 0xaf, 0xcd, -+ 0x05, 0xcc, 0xd4, 0x9f, 0x2e, 0x7d, 0x8f, 0x55, -+ 0xef, 0x3a, 0x72, 0xe3, 0xdc, 0x51, 0xd6, 0x85, -+ 0x2b, 0x8e, 0x6b, 0x9e, 0x7a, 0xec, 0xe5, 0x7b, -+ 0xe6, 0x55, 0x6b, 0x0b, 0x6d, 0x94, 0x13, 0xe3, -+ 0x3f, 0xc5, 0xfc, 0x24, 0xa9, 0xa2, 0x05, 0xad, -+ 0x59, 0x57, 0x4b, 0xb3, 0x9d, 0x94, 0x4a, 0x92, -+ 0xdc, 0x47, 0x97, 0x0d, 0x84, 0xa6, 0xad, 0x31, -+ 0x76 -+}; -+static const u8 enc_output043[] __initconst = { -+ 0x75, 0x45, 0x39, 0x1b, 0x51, 0xde, 0x01, 0xd5, -+ 0xc5, 0x3d, 0xfa, 0xca, 0x77, 0x79, 0x09, 0x06, -+ 0x3e, 0x58, 0xed, 0xee, 0x4b, 0xb1, 0x22, 0x7e, -+ 0x71, 0x10, 0xac, 0x4d, 0x26, 0x20, 0xc2, 0xae, -+ 0xc2, 0xf8, 0x48, 0xf5, 0x6d, 0xee, 0xb0, 0x37, -+ 0xa8, 0xdc, 0xed, 0x75, 0xaf, 0xa8, 0xa6, 0xc8, -+ 0x90, 0xe2, 0xde, 0xe4, 0x2f, 0x95, 0x0b, 0xb3, -+ 0x3d, 0x9e, 0x24, 0x24, 0xd0, 0x8a, 0x50, 0x5d, -+ 0x89, 0x95, 0x63, 0x97, 0x3e, 0xd3, 0x88, 0x70, -+ 0xf3, 0xde, 0x6e, 0xe2, 0xad, 0xc7, 0xfe, 0x07, -+ 0x2c, 0x36, 0x6c, 0x14, 0xe2, 0xcf, 0x7c, 0xa6, -+ 0x2f, 0xb3, 0xd3, 0x6b, 0xee, 0x11, 0x68, 0x54, -+ 0x61, 0xb7, 0x0d, 0x44, 0xef, 0x8c, 0x66, 0xc5, -+ 0xc7, 0xbb, 0xf1, 0x0d, 0xca, 0xdd, 0x7f, 0xac, -+ 0xf6 -+}; -+static const u8 enc_assoc043[] __initconst = { -+ 0xa1, 0x1c, 0x40, 0xb6, 0x03, 0x76, 0x73, 0x30 -+}; -+static const u8 enc_nonce043[] __initconst = { -+ 0x46, 0x36, 0x2f, 0x45, 0xd6, 0x37, 0x9e, 0x63, -+ 0xe5, 0x22, 0x94, 0x60 -+}; -+static const u8 enc_key043[] __initconst = { -+ 0xaa, 0xbc, 0x06, 0x34, 0x74, 0xe6, 0x5c, 0x4c, -+ 0x3e, 0x9b, 0xdc, 0x48, 0x0d, 0xea, 0x97, 0xb4, -+ 0x51, 0x10, 0xc8, 0x61, 0x88, 0x46, 0xff, 0x6b, -+ 0x15, 0xbd, 0xd2, 0xa4, 0xa5, 0x68, 0x2c, 0x4e -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input044[] __initconst = { -+ 0xe5, 0xcc, 0xaa, 0x44, 0x1b, 0xc8, 0x14, 0x68, -+ 0x8f, 0x8f, 0x6e, 0x8f, 0x28, 0xb5, 0x00, 0xb2 -+}; -+static const u8 enc_output044[] __initconst = { -+ 0x7e, 0x72, 0xf5, 0xa1, 0x85, 0xaf, 0x16, 0xa6, -+ 0x11, 0x92, 0x1b, 0x43, 0x8f, 0x74, 0x9f, 0x0b, -+ 0x12, 0x42, 0xc6, 0x70, 0x73, 0x23, 0x34, 0x02, -+ 0x9a, 0xdf, 0xe1, 0xc5, 0x00, 0x16, 0x51, 0xe4 -+}; -+static const u8 enc_assoc044[] __initconst = { -+ 0x02 -+}; -+static const u8 enc_nonce044[] __initconst = { -+ 0x87, 0x34, 0x5f, 0x10, 0x55, 0xfd, 0x9e, 0x21, -+ 0x02, 0xd5, 0x06, 0x56 -+}; -+static const u8 enc_key044[] __initconst = { -+ 0x7d, 0x00, 0xb4, 0x80, 0x95, 0xad, 0xfa, 0x32, -+ 0x72, 0x05, 0x06, 0x07, 0xb2, 0x64, 0x18, 0x50, -+ 0x02, 0xba, 0x99, 0x95, 0x7c, 0x49, 0x8b, 0xe0, -+ 0x22, 0x77, 0x0f, 0x2c, 0xe2, 0xf3, 0x14, 0x3c -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input045[] __initconst = { -+ 0x02, 0xcd, 0xe1, 0x68, 0xfb, 0xa3, 0xf5, 0x44, -+ 0xbb, 0xd0, 0x33, 0x2f, 0x7a, 0xde, 0xad, 0xa8 -+}; -+static const u8 enc_output045[] __initconst = { -+ 0x85, 0xf2, 0x9a, 0x71, 0x95, 0x57, 0xcd, 0xd1, -+ 0x4d, 0x1f, 0x8f, 0xff, 0xab, 0x6d, 0x9e, 0x60, -+ 0x73, 0x2c, 0xa3, 0x2b, 0xec, 0xd5, 0x15, 0xa1, -+ 0xed, 0x35, 0x3f, 0x54, 0x2e, 0x99, 0x98, 0x58 -+}; -+static const u8 enc_assoc045[] __initconst = { -+ 0xb6, 0x48 -+}; -+static const u8 enc_nonce045[] __initconst = { -+ 0x87, 0xa3, 0x16, 0x3e, 0xc0, 0x59, 0x8a, 0xd9, -+ 0x5b, 0x3a, 0xa7, 0x13 -+}; -+static const u8 enc_key045[] __initconst = { -+ 0x64, 0x32, 0x71, 0x7f, 0x1d, 0xb8, 0x5e, 0x41, -+ 0xac, 0x78, 0x36, 0xbc, 0xe2, 0x51, 0x85, 0xa0, -+ 0x80, 0xd5, 0x76, 0x2b, 0x9e, 0x2b, 0x18, 0x44, -+ 0x4b, 0x6e, 0xc7, 0x2c, 0x3b, 0xd8, 0xe4, 0xdc -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input046[] __initconst = { -+ 0x16, 0xdd, 0xd2, 0x3f, 0xf5, 0x3f, 0x3d, 0x23, -+ 0xc0, 0x63, 0x34, 0x48, 0x70, 0x40, 0xeb, 0x47 -+}; -+static const u8 enc_output046[] __initconst = { -+ 0xc1, 0xb2, 0x95, 0x93, 0x6d, 0x56, 0xfa, 0xda, -+ 0xc0, 0x3e, 0x5f, 0x74, 0x2b, 0xff, 0x73, 0xa1, -+ 0x39, 0xc4, 0x57, 0xdb, 0xab, 0x66, 0x38, 0x2b, -+ 0xab, 0xb3, 0xb5, 0x58, 0x00, 0xcd, 0xa5, 0xb8 -+}; -+static const u8 enc_assoc046[] __initconst = { -+ 0xbd, 0x4c, 0xd0, 0x2f, 0xc7, 0x50, 0x2b, 0xbd, -+ 0xbd, 0xf6, 0xc9, 0xa3, 0xcb, 0xe8, 0xf0 -+}; -+static const u8 enc_nonce046[] __initconst = { -+ 0x6f, 0x57, 0x3a, 0xa8, 0x6b, 0xaa, 0x49, 0x2b, -+ 0xa4, 0x65, 0x96, 0xdf -+}; -+static const u8 enc_key046[] __initconst = { -+ 0x8e, 0x34, 0xcf, 0x73, 0xd2, 0x45, 0xa1, 0x08, -+ 0x2a, 0x92, 0x0b, 0x86, 0x36, 0x4e, 0xb8, 0x96, -+ 0xc4, 0x94, 0x64, 0x67, 0xbc, 0xb3, 0xd5, 0x89, -+ 0x29, 0xfc, 0xb3, 0x66, 0x90, 0xe6, 0x39, 0x4f -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input047[] __initconst = { -+ 0x62, 0x3b, 0x78, 0x50, 0xc3, 0x21, 0xe2, 0xcf, -+ 0x0c, 0x6f, 0xbc, 0xc8, 0xdf, 0xd1, 0xaf, 0xf2 -+}; -+static const u8 enc_output047[] __initconst = { -+ 0xc8, 0x4c, 0x9b, 0xb7, 0xc6, 0x1c, 0x1b, 0xcb, -+ 0x17, 0x77, 0x2a, 0x1c, 0x50, 0x0c, 0x50, 0x95, -+ 0xdb, 0xad, 0xf7, 0xa5, 0x13, 0x8c, 0xa0, 0x34, -+ 0x59, 0xa2, 0xcd, 0x65, 0x83, 0x1e, 0x09, 0x2f -+}; -+static const u8 enc_assoc047[] __initconst = { -+ 0x89, 0xcc, 0xe9, 0xfb, 0x47, 0x44, 0x1d, 0x07, -+ 0xe0, 0x24, 0x5a, 0x66, 0xfe, 0x8b, 0x77, 0x8b -+}; -+static const u8 enc_nonce047[] __initconst = { -+ 0x1a, 0x65, 0x18, 0xf0, 0x2e, 0xde, 0x1d, 0xa6, -+ 0x80, 0x92, 0x66, 0xd9 -+}; -+static const u8 enc_key047[] __initconst = { -+ 0xcb, 0x55, 0x75, 0xf5, 0xc7, 0xc4, 0x5c, 0x91, -+ 0xcf, 0x32, 0x0b, 0x13, 0x9f, 0xb5, 0x94, 0x23, -+ 0x75, 0x60, 0xd0, 0xa3, 0xe6, 0xf8, 0x65, 0xa6, -+ 0x7d, 0x4f, 0x63, 0x3f, 0x2c, 0x08, 0xf0, 0x16 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input048[] __initconst = { -+ 0x87, 0xb3, 0xa4, 0xd7, 0xb2, 0x6d, 0x8d, 0x32, -+ 0x03, 0xa0, 0xde, 0x1d, 0x64, 0xef, 0x82, 0xe3 -+}; -+static const u8 enc_output048[] __initconst = { -+ 0x94, 0xbc, 0x80, 0x62, 0x1e, 0xd1, 0xe7, 0x1b, -+ 0x1f, 0xd2, 0xb5, 0xc3, 0xa1, 0x5e, 0x35, 0x68, -+ 0x33, 0x35, 0x11, 0x86, 0x17, 0x96, 0x97, 0x84, -+ 0x01, 0x59, 0x8b, 0x96, 0x37, 0x22, 0xf5, 0xb3 -+}; -+static const u8 enc_assoc048[] __initconst = { -+ 0xd1, 0x9f, 0x2d, 0x98, 0x90, 0x95, 0xf7, 0xab, -+ 0x03, 0xa5, 0xfd, 0xe8, 0x44, 0x16, 0xe0, 0x0c, -+ 0x0e -+}; -+static const u8 enc_nonce048[] __initconst = { -+ 0x56, 0x4d, 0xee, 0x49, 0xab, 0x00, 0xd2, 0x40, -+ 0xfc, 0x10, 0x68, 0xc3 -+}; -+static const u8 enc_key048[] __initconst = { -+ 0xa5, 0x56, 0x9e, 0x72, 0x9a, 0x69, 0xb2, 0x4b, -+ 0xa6, 0xe0, 0xff, 0x15, 0xc4, 0x62, 0x78, 0x97, -+ 0x43, 0x68, 0x24, 0xc9, 0x41, 0xe9, 0xd0, 0x0b, -+ 0x2e, 0x93, 0xfd, 0xdc, 0x4b, 0xa7, 0x76, 0x57 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input049[] __initconst = { -+ 0xe6, 0x01, 0xb3, 0x85, 0x57, 0x79, 0x7d, 0xa2, -+ 0xf8, 0xa4, 0x10, 0x6a, 0x08, 0x9d, 0x1d, 0xa6 -+}; -+static const u8 enc_output049[] __initconst = { -+ 0x29, 0x9b, 0x5d, 0x3f, 0x3d, 0x03, 0xc0, 0x87, -+ 0x20, 0x9a, 0x16, 0xe2, 0x85, 0x14, 0x31, 0x11, -+ 0x4b, 0x45, 0x4e, 0xd1, 0x98, 0xde, 0x11, 0x7e, -+ 0x83, 0xec, 0x49, 0xfa, 0x8d, 0x85, 0x08, 0xd6 -+}; -+static const u8 enc_assoc049[] __initconst = { -+ 0x5e, 0x64, 0x70, 0xfa, 0xcd, 0x99, 0xc1, 0xd8, -+ 0x1e, 0x37, 0xcd, 0x44, 0x01, 0x5f, 0xe1, 0x94, -+ 0x80, 0xa2, 0xa4, 0xd3, 0x35, 0x2a, 0x4f, 0xf5, -+ 0x60, 0xc0, 0x64, 0x0f, 0xdb, 0xda -+}; -+static const u8 enc_nonce049[] __initconst = { -+ 0xdf, 0x87, 0x13, 0xe8, 0x7e, 0xc3, 0xdb, 0xcf, -+ 0xad, 0x14, 0xd5, 0x3e -+}; -+static const u8 enc_key049[] __initconst = { -+ 0x56, 0x20, 0x74, 0x65, 0xb4, 0xe4, 0x8e, 0x6d, -+ 0x04, 0x63, 0x0f, 0x4a, 0x42, 0xf3, 0x5c, 0xfc, -+ 0x16, 0x3a, 0xb2, 0x89, 0xc2, 0x2a, 0x2b, 0x47, -+ 0x84, 0xf6, 0xf9, 0x29, 0x03, 0x30, 0xbe, 0xe0 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input050[] __initconst = { -+ 0xdc, 0x9e, 0x9e, 0xaf, 0x11, 0xe3, 0x14, 0x18, -+ 0x2d, 0xf6, 0xa4, 0xeb, 0xa1, 0x7a, 0xec, 0x9c -+}; -+static const u8 enc_output050[] __initconst = { -+ 0x60, 0x5b, 0xbf, 0x90, 0xae, 0xb9, 0x74, 0xf6, -+ 0x60, 0x2b, 0xc7, 0x78, 0x05, 0x6f, 0x0d, 0xca, -+ 0x38, 0xea, 0x23, 0xd9, 0x90, 0x54, 0xb4, 0x6b, -+ 0x42, 0xff, 0xe0, 0x04, 0x12, 0x9d, 0x22, 0x04 -+}; -+static const u8 enc_assoc050[] __initconst = { -+ 0xba, 0x44, 0x6f, 0x6f, 0x9a, 0x0c, 0xed, 0x22, -+ 0x45, 0x0f, 0xeb, 0x10, 0x73, 0x7d, 0x90, 0x07, -+ 0xfd, 0x69, 0xab, 0xc1, 0x9b, 0x1d, 0x4d, 0x90, -+ 0x49, 0xa5, 0x55, 0x1e, 0x86, 0xec, 0x2b, 0x37 -+}; -+static const u8 enc_nonce050[] __initconst = { -+ 0x8d, 0xf4, 0xb1, 0x5a, 0x88, 0x8c, 0x33, 0x28, -+ 0x6a, 0x7b, 0x76, 0x51 -+}; -+static const u8 enc_key050[] __initconst = { -+ 0x39, 0x37, 0x98, 0x6a, 0xf8, 0x6d, 0xaf, 0xc1, -+ 0xba, 0x0c, 0x46, 0x72, 0xd8, 0xab, 0xc4, 0x6c, -+ 0x20, 0x70, 0x62, 0x68, 0x2d, 0x9c, 0x26, 0x4a, -+ 0xb0, 0x6d, 0x6c, 0x58, 0x07, 0x20, 0x51, 0x30 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input051[] __initconst = { -+ 0x81, 0xce, 0x84, 0xed, 0xe9, 0xb3, 0x58, 0x59, -+ 0xcc, 0x8c, 0x49, 0xa8, 0xf6, 0xbe, 0x7d, 0xc6 -+}; -+static const u8 enc_output051[] __initconst = { -+ 0x7b, 0x7c, 0xe0, 0xd8, 0x24, 0x80, 0x9a, 0x70, -+ 0xde, 0x32, 0x56, 0x2c, 0xcf, 0x2c, 0x2b, 0xbd, -+ 0x15, 0xd4, 0x4a, 0x00, 0xce, 0x0d, 0x19, 0xb4, -+ 0x23, 0x1f, 0x92, 0x1e, 0x22, 0xbc, 0x0a, 0x43 -+}; -+static const u8 enc_assoc051[] __initconst = { -+ 0xd4, 0x1a, 0x82, 0x8d, 0x5e, 0x71, 0x82, 0x92, -+ 0x47, 0x02, 0x19, 0x05, 0x40, 0x2e, 0xa2, 0x57, -+ 0xdc, 0xcb, 0xc3, 0xb8, 0x0f, 0xcd, 0x56, 0x75, -+ 0x05, 0x6b, 0x68, 0xbb, 0x59, 0xe6, 0x2e, 0x88, -+ 0x73 -+}; -+static const u8 enc_nonce051[] __initconst = { -+ 0xbe, 0x40, 0xe5, 0xf1, 0xa1, 0x18, 0x17, 0xa0, -+ 0xa8, 0xfa, 0x89, 0x49 -+}; -+static const u8 enc_key051[] __initconst = { -+ 0x36, 0x37, 0x2a, 0xbc, 0xdb, 0x78, 0xe0, 0x27, -+ 0x96, 0x46, 0xac, 0x3d, 0x17, 0x6b, 0x96, 0x74, -+ 0xe9, 0x15, 0x4e, 0xec, 0xf0, 0xd5, 0x46, 0x9c, -+ 0x65, 0x1e, 0xc7, 0xe1, 0x6b, 0x4c, 0x11, 0x99 -+}; -+ -+/* wycheproof - misc */ -+static const u8 enc_input052[] __initconst = { -+ 0xa6, 0x67, 0x47, 0xc8, 0x9e, 0x85, 0x7a, 0xf3, -+ 0xa1, 0x8e, 0x2c, 0x79, 0x50, 0x00, 0x87, 0xed -+}; -+static const u8 enc_output052[] __initconst = { -+ 0xca, 0x82, 0xbf, 0xf3, 0xe2, 0xf3, 0x10, 0xcc, -+ 0xc9, 0x76, 0x67, 0x2c, 0x44, 0x15, 0xe6, 0x9b, -+ 0x57, 0x63, 0x8c, 0x62, 0xa5, 0xd8, 0x5d, 0xed, -+ 0x77, 0x4f, 0x91, 0x3c, 0x81, 0x3e, 0xa0, 0x32 -+}; -+static const u8 enc_assoc052[] __initconst = { -+ 0x3f, 0x2d, 0xd4, 0x9b, 0xbf, 0x09, 0xd6, 0x9a, -+ 0x78, 0xa3, 0xd8, 0x0e, 0xa2, 0x56, 0x66, 0x14, -+ 0xfc, 0x37, 0x94, 0x74, 0x19, 0x6c, 0x1a, 0xae, -+ 0x84, 0x58, 0x3d, 0xa7, 0x3d, 0x7f, 0xf8, 0x5c, -+ 0x6f, 0x42, 0xca, 0x42, 0x05, 0x6a, 0x97, 0x92, -+ 0xcc, 0x1b, 0x9f, 0xb3, 0xc7, 0xd2, 0x61 -+}; -+static const u8 enc_nonce052[] __initconst = { -+ 0x84, 0xc8, 0x7d, 0xae, 0x4e, 0xee, 0x27, 0x73, -+ 0x0e, 0xc3, 0x5d, 0x12 -+}; -+static const u8 enc_key052[] __initconst = { -+ 0x9f, 0x14, 0x79, 0xed, 0x09, 0x7d, 0x7f, 0xe5, -+ 0x29, 0xc1, 0x1f, 0x2f, 0x5a, 0xdd, 0x9a, 0xaf, -+ 0xf4, 0xa1, 0xca, 0x0b, 0x68, 0x99, 0x7a, 0x2c, -+ 0xb7, 0xf7, 0x97, 0x49, 0xbd, 0x90, 0xaa, 0xf4 -+}; -+ - /* wycheproof - misc */ - static const u8 enc_input053[] __initconst = { - 0x25, 0x6d, 0x40, 0x88, 0x80, 0x94, 0x17, 0x83, -@@ -2760,6 +3859,126 @@ static const u8 enc_key073[] __initconst - }; - - /* wycheproof - checking for int overflows */ -+static const u8 enc_input074[] __initconst = { -+ 0xd4, 0x50, 0x0b, 0xf0, 0x09, 0x49, 0x35, 0x51, -+ 0xc3, 0x80, 0xad, 0xf5, 0x2c, 0x57, 0x3a, 0x69, -+ 0xdf, 0x7e, 0x8b, 0x76, 0x24, 0x63, 0x33, 0x0f, -+ 0xac, 0xc1, 0x6a, 0x57, 0x26, 0xbe, 0x71, 0x90, -+ 0xc6, 0x3c, 0x5a, 0x1c, 0x92, 0x65, 0x84, 0xa0, -+ 0x96, 0x75, 0x68, 0x28, 0xdc, 0xdc, 0x64, 0xac, -+ 0xdf, 0x96, 0x3d, 0x93, 0x1b, 0xf1, 0xda, 0xe2, -+ 0x38, 0xf3, 0xf1, 0x57, 0x22, 0x4a, 0xc4, 0xb5, -+ 0x42, 0xd7, 0x85, 0xb0, 0xdd, 0x84, 0xdb, 0x6b, -+ 0xe3, 0xbc, 0x5a, 0x36, 0x63, 0xe8, 0x41, 0x49, -+ 0xff, 0xbe, 0xd0, 0x9e, 0x54, 0xf7, 0x8f, 0x16, -+ 0xa8, 0x22, 0x3b, 0x24, 0xcb, 0x01, 0x9f, 0x58, -+ 0xb2, 0x1b, 0x0e, 0x55, 0x1e, 0x7a, 0xa0, 0x73, -+ 0x27, 0x62, 0x95, 0x51, 0x37, 0x6c, 0xcb, 0xc3, -+ 0x93, 0x76, 0x71, 0xa0, 0x62, 0x9b, 0xd9, 0x5c, -+ 0x99, 0x15, 0xc7, 0x85, 0x55, 0x77, 0x1e, 0x7a -+}; -+static const u8 enc_output074[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x0b, 0x30, 0x0d, 0x8d, 0xa5, 0x6c, 0x21, 0x85, -+ 0x75, 0x52, 0x79, 0x55, 0x3c, 0x4c, 0x82, 0xca -+}; -+static const u8 enc_assoc074[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce074[] __initconst = { -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x00, 0x02, 0x50, 0x6e -+}; -+static const u8 enc_key074[] __initconst = { -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 -+}; -+ -+/* wycheproof - checking for int overflows */ -+static const u8 enc_input075[] __initconst = { -+ 0x7d, 0xe8, 0x7f, 0x67, 0x29, 0x94, 0x52, 0x75, -+ 0xd0, 0x65, 0x5d, 0xa4, 0xc7, 0xfd, 0xe4, 0x56, -+ 0x9e, 0x16, 0xf1, 0x11, 0xb5, 0xeb, 0x26, 0xc2, -+ 0x2d, 0x85, 0x9e, 0x3f, 0xf8, 0x22, 0xec, 0xed, -+ 0x3a, 0x6d, 0xd9, 0xa6, 0x0f, 0x22, 0x95, 0x7f, -+ 0x7b, 0x7c, 0x85, 0x7e, 0x88, 0x22, 0xeb, 0x9f, -+ 0xe0, 0xb8, 0xd7, 0x02, 0x21, 0x41, 0xf2, 0xd0, -+ 0xb4, 0x8f, 0x4b, 0x56, 0x12, 0xd3, 0x22, 0xa8, -+ 0x8d, 0xd0, 0xfe, 0x0b, 0x4d, 0x91, 0x79, 0x32, -+ 0x4f, 0x7c, 0x6c, 0x9e, 0x99, 0x0e, 0xfb, 0xd8, -+ 0x0e, 0x5e, 0xd6, 0x77, 0x58, 0x26, 0x49, 0x8b, -+ 0x1e, 0xfe, 0x0f, 0x71, 0xa0, 0xf3, 0xec, 0x5b, -+ 0x29, 0xcb, 0x28, 0xc2, 0x54, 0x0a, 0x7d, 0xcd, -+ 0x51, 0xb7, 0xda, 0xae, 0xe0, 0xff, 0x4a, 0x7f, -+ 0x3a, 0xc1, 0xee, 0x54, 0xc2, 0x9e, 0xe4, 0xc1, -+ 0x70, 0xde, 0x40, 0x8f, 0x66, 0x69, 0x21, 0x94 -+}; -+static const u8 enc_output075[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xc5, 0x78, 0xe2, 0xaa, 0x44, 0xd3, 0x09, 0xb7, -+ 0xb6, 0xa5, 0x19, 0x3b, 0xdc, 0x61, 0x18, 0xf5 -+}; -+static const u8 enc_assoc075[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_nonce075[] __initconst = { -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x00, 0x03, 0x18, 0xa5 -+}; -+static const u8 enc_key075[] __initconst = { -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, -+ 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30 -+}; -+ -+/* wycheproof - checking for int overflows */ - static const u8 enc_input076[] __initconst = { - 0x1b, 0x99, 0x6f, 0x9a, 0x3c, 0xcc, 0x67, 0x85, - 0xde, 0x22, 0xff, 0x5b, 0x8a, 0xdd, 0x95, 0x02, -@@ -3349,6 +4568,286 @@ static const u8 enc_key085[] __initconst - 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f - }; - -+/* wycheproof - special case tag */ -+static const u8 enc_input086[] __initconst = { -+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, -+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, -+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, -+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, -+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, -+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, -+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, -+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d -+}; -+static const u8 enc_output086[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f -+}; -+static const u8 enc_assoc086[] __initconst = { -+ 0x85, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xa6, 0x90, 0x2f, 0xcb, 0xc8, 0x83, 0xbb, 0xc1, -+ 0x80, 0xb2, 0x56, 0xae, 0x34, 0xad, 0x7f, 0x00 -+}; -+static const u8 enc_nonce086[] __initconst = { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b -+}; -+static const u8 enc_key086[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - special case tag */ -+static const u8 enc_input087[] __initconst = { -+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, -+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, -+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, -+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, -+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, -+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, -+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, -+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d -+}; -+static const u8 enc_output087[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 -+}; -+static const u8 enc_assoc087[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x24, 0x7e, 0x50, 0x64, 0x2a, 0x1c, 0x0a, 0x2f, -+ 0x8f, 0x77, 0x21, 0x96, 0x09, 0xdb, 0xa9, 0x58 -+}; -+static const u8 enc_nonce087[] __initconst = { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b -+}; -+static const u8 enc_key087[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - special case tag */ -+static const u8 enc_input088[] __initconst = { -+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, -+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, -+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, -+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, -+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, -+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, -+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, -+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d -+}; -+static const u8 enc_output088[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff -+}; -+static const u8 enc_assoc088[] __initconst = { -+ 0x7c, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xd9, 0xe7, 0x2c, 0x06, 0x4a, 0xc8, 0x96, 0x1f, -+ 0x3f, 0xa5, 0x85, 0xe0, 0xe2, 0xab, 0xd6, 0x00 -+}; -+static const u8 enc_nonce088[] __initconst = { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b -+}; -+static const u8 enc_key088[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - special case tag */ -+static const u8 enc_input089[] __initconst = { -+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, -+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, -+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, -+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, -+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, -+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, -+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, -+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d -+}; -+static const u8 enc_output089[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, -+ 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80 -+}; -+static const u8 enc_assoc089[] __initconst = { -+ 0x65, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x95, 0xaf, 0x0f, 0x4d, 0x0b, 0x68, 0x6e, 0xae, -+ 0xcc, 0xca, 0x43, 0x07, 0xd5, 0x96, 0xf5, 0x02 -+}; -+static const u8 enc_nonce089[] __initconst = { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b -+}; -+static const u8 enc_key089[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - special case tag */ -+static const u8 enc_input090[] __initconst = { -+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, -+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, -+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, -+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, -+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, -+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, -+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, -+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d -+}; -+static const u8 enc_output090[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f, -+ 0xff, 0xff, 0xff, 0x7f, 0xff, 0xff, 0xff, 0x7f -+}; -+static const u8 enc_assoc090[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x85, 0x40, 0xb4, 0x64, 0x35, 0x77, 0x07, 0xbe, -+ 0x3a, 0x39, 0xd5, 0x5c, 0x34, 0xf8, 0xbc, 0xb3 -+}; -+static const u8 enc_nonce090[] __initconst = { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b -+}; -+static const u8 enc_key090[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - special case tag */ -+static const u8 enc_input091[] __initconst = { -+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, -+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, -+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, -+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, -+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, -+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, -+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, -+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d -+}; -+static const u8 enc_output091[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, -+ 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00 -+}; -+static const u8 enc_assoc091[] __initconst = { -+ 0x4f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x66, 0x23, 0xd9, 0x90, 0xb8, 0x98, 0xd8, 0x30, -+ 0xd2, 0x12, 0xaf, 0x23, 0x83, 0x33, 0x07, 0x01 -+}; -+static const u8 enc_nonce091[] __initconst = { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b -+}; -+static const u8 enc_key091[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ -+/* wycheproof - special case tag */ -+static const u8 enc_input092[] __initconst = { -+ 0x9a, 0x49, 0xc4, 0x0f, 0x8b, 0x48, 0xd7, 0xc6, -+ 0x6d, 0x1d, 0xb4, 0xe5, 0x3f, 0x20, 0xf2, 0xdd, -+ 0x4a, 0xaa, 0x24, 0x1d, 0xda, 0xb2, 0x6b, 0x5b, -+ 0xc0, 0xe2, 0x18, 0xb7, 0x2c, 0x33, 0x90, 0xf2, -+ 0xdf, 0x3e, 0xbd, 0x01, 0x76, 0x70, 0x44, 0x19, -+ 0x97, 0x2b, 0xcd, 0xbc, 0x6b, 0xbc, 0xb3, 0xe4, -+ 0xe7, 0x4a, 0x71, 0x52, 0x8e, 0xf5, 0x12, 0x63, -+ 0xce, 0x24, 0xe0, 0xd5, 0x75, 0xe0, 0xe4, 0x4d -+}; -+static const u8 enc_output092[] __initconst = { -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, -+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 -+}; -+static const u8 enc_assoc092[] __initconst = { -+ 0x83, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, -+ 0x5f, 0x16, 0xd0, 0x9f, 0x17, 0x78, 0x72, 0x11, -+ 0xb7, 0xd4, 0x84, 0xe0, 0x24, 0xf8, 0x97, 0x01 -+}; -+static const u8 enc_nonce092[] __initconst = { -+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, -+ 0x08, 0x09, 0x0a, 0x0b -+}; -+static const u8 enc_key092[] __initconst = { -+ 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, -+ 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, -+ 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, -+ 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f -+}; -+ - /* wycheproof - edge case intermediate sums in poly1305 */ - static const u8 enc_input093[] __initconst = { - 0x00, 0x52, 0x35, 0xd2, 0xa9, 0x19, 0xf2, 0x8d, -@@ -4455,6 +5954,86 @@ chacha20poly1305_enc_vectors[] __initcon - sizeof(enc_input011), sizeof(enc_assoc011), sizeof(enc_nonce011) }, - { enc_input012, enc_output012, enc_assoc012, enc_nonce012, enc_key012, - sizeof(enc_input012), sizeof(enc_assoc012), sizeof(enc_nonce012) }, -+ { enc_input013, enc_output013, enc_assoc013, enc_nonce013, enc_key013, -+ sizeof(enc_input013), sizeof(enc_assoc013), sizeof(enc_nonce013) }, -+ { enc_input014, enc_output014, enc_assoc014, enc_nonce014, enc_key014, -+ sizeof(enc_input014), sizeof(enc_assoc014), sizeof(enc_nonce014) }, -+ { enc_input015, enc_output015, enc_assoc015, enc_nonce015, enc_key015, -+ sizeof(enc_input015), sizeof(enc_assoc015), sizeof(enc_nonce015) }, -+ { enc_input016, enc_output016, enc_assoc016, enc_nonce016, enc_key016, -+ sizeof(enc_input016), sizeof(enc_assoc016), sizeof(enc_nonce016) }, -+ { enc_input017, enc_output017, enc_assoc017, enc_nonce017, enc_key017, -+ sizeof(enc_input017), sizeof(enc_assoc017), sizeof(enc_nonce017) }, -+ { enc_input018, enc_output018, enc_assoc018, enc_nonce018, enc_key018, -+ sizeof(enc_input018), sizeof(enc_assoc018), sizeof(enc_nonce018) }, -+ { enc_input019, enc_output019, enc_assoc019, enc_nonce019, enc_key019, -+ sizeof(enc_input019), sizeof(enc_assoc019), sizeof(enc_nonce019) }, -+ { enc_input020, enc_output020, enc_assoc020, enc_nonce020, enc_key020, -+ sizeof(enc_input020), sizeof(enc_assoc020), sizeof(enc_nonce020) }, -+ { enc_input021, enc_output021, enc_assoc021, enc_nonce021, enc_key021, -+ sizeof(enc_input021), sizeof(enc_assoc021), sizeof(enc_nonce021) }, -+ { enc_input022, enc_output022, enc_assoc022, enc_nonce022, enc_key022, -+ sizeof(enc_input022), sizeof(enc_assoc022), sizeof(enc_nonce022) }, -+ { enc_input023, enc_output023, enc_assoc023, enc_nonce023, enc_key023, -+ sizeof(enc_input023), sizeof(enc_assoc023), sizeof(enc_nonce023) }, -+ { enc_input024, enc_output024, enc_assoc024, enc_nonce024, enc_key024, -+ sizeof(enc_input024), sizeof(enc_assoc024), sizeof(enc_nonce024) }, -+ { enc_input025, enc_output025, enc_assoc025, enc_nonce025, enc_key025, -+ sizeof(enc_input025), sizeof(enc_assoc025), sizeof(enc_nonce025) }, -+ { enc_input026, enc_output026, enc_assoc026, enc_nonce026, enc_key026, -+ sizeof(enc_input026), sizeof(enc_assoc026), sizeof(enc_nonce026) }, -+ { enc_input027, enc_output027, enc_assoc027, enc_nonce027, enc_key027, -+ sizeof(enc_input027), sizeof(enc_assoc027), sizeof(enc_nonce027) }, -+ { enc_input028, enc_output028, enc_assoc028, enc_nonce028, enc_key028, -+ sizeof(enc_input028), sizeof(enc_assoc028), sizeof(enc_nonce028) }, -+ { enc_input029, enc_output029, enc_assoc029, enc_nonce029, enc_key029, -+ sizeof(enc_input029), sizeof(enc_assoc029), sizeof(enc_nonce029) }, -+ { enc_input030, enc_output030, enc_assoc030, enc_nonce030, enc_key030, -+ sizeof(enc_input030), sizeof(enc_assoc030), sizeof(enc_nonce030) }, -+ { enc_input031, enc_output031, enc_assoc031, enc_nonce031, enc_key031, -+ sizeof(enc_input031), sizeof(enc_assoc031), sizeof(enc_nonce031) }, -+ { enc_input032, enc_output032, enc_assoc032, enc_nonce032, enc_key032, -+ sizeof(enc_input032), sizeof(enc_assoc032), sizeof(enc_nonce032) }, -+ { enc_input033, enc_output033, enc_assoc033, enc_nonce033, enc_key033, -+ sizeof(enc_input033), sizeof(enc_assoc033), sizeof(enc_nonce033) }, -+ { enc_input034, enc_output034, enc_assoc034, enc_nonce034, enc_key034, -+ sizeof(enc_input034), sizeof(enc_assoc034), sizeof(enc_nonce034) }, -+ { enc_input035, enc_output035, enc_assoc035, enc_nonce035, enc_key035, -+ sizeof(enc_input035), sizeof(enc_assoc035), sizeof(enc_nonce035) }, -+ { enc_input036, enc_output036, enc_assoc036, enc_nonce036, enc_key036, -+ sizeof(enc_input036), sizeof(enc_assoc036), sizeof(enc_nonce036) }, -+ { enc_input037, enc_output037, enc_assoc037, enc_nonce037, enc_key037, -+ sizeof(enc_input037), sizeof(enc_assoc037), sizeof(enc_nonce037) }, -+ { enc_input038, enc_output038, enc_assoc038, enc_nonce038, enc_key038, -+ sizeof(enc_input038), sizeof(enc_assoc038), sizeof(enc_nonce038) }, -+ { enc_input039, enc_output039, enc_assoc039, enc_nonce039, enc_key039, -+ sizeof(enc_input039), sizeof(enc_assoc039), sizeof(enc_nonce039) }, -+ { enc_input040, enc_output040, enc_assoc040, enc_nonce040, enc_key040, -+ sizeof(enc_input040), sizeof(enc_assoc040), sizeof(enc_nonce040) }, -+ { enc_input041, enc_output041, enc_assoc041, enc_nonce041, enc_key041, -+ sizeof(enc_input041), sizeof(enc_assoc041), sizeof(enc_nonce041) }, -+ { enc_input042, enc_output042, enc_assoc042, enc_nonce042, enc_key042, -+ sizeof(enc_input042), sizeof(enc_assoc042), sizeof(enc_nonce042) }, -+ { enc_input043, enc_output043, enc_assoc043, enc_nonce043, enc_key043, -+ sizeof(enc_input043), sizeof(enc_assoc043), sizeof(enc_nonce043) }, -+ { enc_input044, enc_output044, enc_assoc044, enc_nonce044, enc_key044, -+ sizeof(enc_input044), sizeof(enc_assoc044), sizeof(enc_nonce044) }, -+ { enc_input045, enc_output045, enc_assoc045, enc_nonce045, enc_key045, -+ sizeof(enc_input045), sizeof(enc_assoc045), sizeof(enc_nonce045) }, -+ { enc_input046, enc_output046, enc_assoc046, enc_nonce046, enc_key046, -+ sizeof(enc_input046), sizeof(enc_assoc046), sizeof(enc_nonce046) }, -+ { enc_input047, enc_output047, enc_assoc047, enc_nonce047, enc_key047, -+ sizeof(enc_input047), sizeof(enc_assoc047), sizeof(enc_nonce047) }, -+ { enc_input048, enc_output048, enc_assoc048, enc_nonce048, enc_key048, -+ sizeof(enc_input048), sizeof(enc_assoc048), sizeof(enc_nonce048) }, -+ { enc_input049, enc_output049, enc_assoc049, enc_nonce049, enc_key049, -+ sizeof(enc_input049), sizeof(enc_assoc049), sizeof(enc_nonce049) }, -+ { enc_input050, enc_output050, enc_assoc050, enc_nonce050, enc_key050, -+ sizeof(enc_input050), sizeof(enc_assoc050), sizeof(enc_nonce050) }, -+ { enc_input051, enc_output051, enc_assoc051, enc_nonce051, enc_key051, -+ sizeof(enc_input051), sizeof(enc_assoc051), sizeof(enc_nonce051) }, -+ { enc_input052, enc_output052, enc_assoc052, enc_nonce052, enc_key052, -+ sizeof(enc_input052), sizeof(enc_assoc052), sizeof(enc_nonce052) }, - { enc_input053, enc_output053, enc_assoc053, enc_nonce053, enc_key053, - sizeof(enc_input053), sizeof(enc_assoc053), sizeof(enc_nonce053) }, - { enc_input054, enc_output054, enc_assoc054, enc_nonce054, enc_key054, -@@ -4497,6 +6076,10 @@ chacha20poly1305_enc_vectors[] __initcon - sizeof(enc_input072), sizeof(enc_assoc072), sizeof(enc_nonce072) }, - { enc_input073, enc_output073, enc_assoc073, enc_nonce073, enc_key073, - sizeof(enc_input073), sizeof(enc_assoc073), sizeof(enc_nonce073) }, -+ { enc_input074, enc_output074, enc_assoc074, enc_nonce074, enc_key074, -+ sizeof(enc_input074), sizeof(enc_assoc074), sizeof(enc_nonce074) }, -+ { enc_input075, enc_output075, enc_assoc075, enc_nonce075, enc_key075, -+ sizeof(enc_input075), sizeof(enc_assoc075), sizeof(enc_nonce075) }, - { enc_input076, enc_output076, enc_assoc076, enc_nonce076, enc_key076, - sizeof(enc_input076), sizeof(enc_assoc076), sizeof(enc_nonce076) }, - { enc_input077, enc_output077, enc_assoc077, enc_nonce077, enc_key077, -@@ -4517,6 +6100,20 @@ chacha20poly1305_enc_vectors[] __initcon - sizeof(enc_input084), sizeof(enc_assoc084), sizeof(enc_nonce084) }, - { enc_input085, enc_output085, enc_assoc085, enc_nonce085, enc_key085, - sizeof(enc_input085), sizeof(enc_assoc085), sizeof(enc_nonce085) }, -+ { enc_input086, enc_output086, enc_assoc086, enc_nonce086, enc_key086, -+ sizeof(enc_input086), sizeof(enc_assoc086), sizeof(enc_nonce086) }, -+ { enc_input087, enc_output087, enc_assoc087, enc_nonce087, enc_key087, -+ sizeof(enc_input087), sizeof(enc_assoc087), sizeof(enc_nonce087) }, -+ { enc_input088, enc_output088, enc_assoc088, enc_nonce088, enc_key088, -+ sizeof(enc_input088), sizeof(enc_assoc088), sizeof(enc_nonce088) }, -+ { enc_input089, enc_output089, enc_assoc089, enc_nonce089, enc_key089, -+ sizeof(enc_input089), sizeof(enc_assoc089), sizeof(enc_nonce089) }, -+ { enc_input090, enc_output090, enc_assoc090, enc_nonce090, enc_key090, -+ sizeof(enc_input090), sizeof(enc_assoc090), sizeof(enc_nonce090) }, -+ { enc_input091, enc_output091, enc_assoc091, enc_nonce091, enc_key091, -+ sizeof(enc_input091), sizeof(enc_assoc091), sizeof(enc_nonce091) }, -+ { enc_input092, enc_output092, enc_assoc092, enc_nonce092, enc_key092, -+ sizeof(enc_input092), sizeof(enc_assoc092), sizeof(enc_nonce092) }, - { enc_input093, enc_output093, enc_assoc093, enc_nonce093, enc_key093, - sizeof(enc_input093), sizeof(enc_assoc093), sizeof(enc_nonce093) }, - { enc_input094, enc_output094, enc_assoc094, enc_nonce094, enc_key094, -@@ -7224,6 +8821,43 @@ xchacha20poly1305_dec_vectors[] __initco - sizeof(xdec_input001), sizeof(xdec_assoc001), sizeof(xdec_nonce001) } - }; - -+/* This is for the selftests-only, since it is only useful for the purpose of -+ * testing the underlying primitives and interactions. -+ */ -+static void __init -+chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len, -+ const u8 *ad, const size_t ad_len, -+ const u8 nonce[12], -+ const u8 key[CHACHA20POLY1305_KEY_SIZE]) -+{ -+ const u8 *pad0 = page_address(ZERO_PAGE(0)); -+ struct poly1305_desc_ctx poly1305_state; -+ u32 chacha20_state[CHACHA_STATE_WORDS]; -+ union { -+ u8 block0[POLY1305_KEY_SIZE]; -+ __le64 lens[2]; -+ } b = {{ 0 }}; -+ u8 bottom_row[16] = { 0 }; -+ u32 le_key[8]; -+ int i; -+ -+ memcpy(&bottom_row[4], nonce, 12); -+ for (i = 0; i < 8; ++i) -+ le_key[i] = get_unaligned_le32(key + sizeof(le_key[i]) * i); -+ chacha_init(chacha20_state, le_key, bottom_row); -+ chacha20_crypt(chacha20_state, b.block0, b.block0, sizeof(b.block0)); -+ poly1305_init(&poly1305_state, b.block0); -+ poly1305_update(&poly1305_state, ad, ad_len); -+ poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf); -+ chacha20_crypt(chacha20_state, dst, src, src_len); -+ poly1305_update(&poly1305_state, dst, src_len); -+ poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf); -+ b.lens[0] = cpu_to_le64(ad_len); -+ b.lens[1] = cpu_to_le64(src_len); -+ poly1305_update(&poly1305_state, (u8 *)b.lens, sizeof(b.lens)); -+ poly1305_final(&poly1305_state, dst + src_len); -+} -+ - static void __init - chacha20poly1305_selftest_encrypt(u8 *dst, const u8 *src, const size_t src_len, - const u8 *ad, const size_t ad_len, -@@ -7233,6 +8867,9 @@ chacha20poly1305_selftest_encrypt(u8 *ds - if (nonce_len == 8) - chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, - get_unaligned_le64(nonce), key); -+ else if (nonce_len == 12) -+ chacha20poly1305_encrypt_bignonce(dst, src, src_len, ad, -+ ad_len, nonce, key); - else - BUG(); - } -@@ -7248,14 +8885,14 @@ decryption_success(bool func_ret, bool e - bool __init chacha20poly1305_selftest(void) - { - enum { MAXIMUM_TEST_BUFFER_LEN = 1UL << 12 }; -- size_t i; -- u8 *computed_output = NULL, *heap_src = NULL; -- struct scatterlist sg_src; -+ size_t i, j, k, total_len; -+ u8 *computed_output = NULL, *input = NULL; - bool success = true, ret; -+ struct scatterlist sg_src[3]; - -- heap_src = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL); - computed_output = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL); -- if (!heap_src || !computed_output) { -+ input = kmalloc(MAXIMUM_TEST_BUFFER_LEN, GFP_KERNEL); -+ if (!computed_output || !input) { - pr_err("chacha20poly1305 self-test malloc: FAIL\n"); - success = false; - goto out; -@@ -7284,17 +8921,17 @@ bool __init chacha20poly1305_selftest(vo - for (i = 0; i < ARRAY_SIZE(chacha20poly1305_enc_vectors); ++i) { - if (chacha20poly1305_enc_vectors[i].nlen != 8) - continue; -- memcpy(heap_src, chacha20poly1305_enc_vectors[i].input, -+ memcpy(computed_output, chacha20poly1305_enc_vectors[i].input, - chacha20poly1305_enc_vectors[i].ilen); -- sg_init_one(&sg_src, heap_src, -+ sg_init_one(sg_src, computed_output, - chacha20poly1305_enc_vectors[i].ilen + POLY1305_DIGEST_SIZE); -- chacha20poly1305_encrypt_sg_inplace(&sg_src, -+ ret = chacha20poly1305_encrypt_sg_inplace(sg_src, - chacha20poly1305_enc_vectors[i].ilen, - chacha20poly1305_enc_vectors[i].assoc, - chacha20poly1305_enc_vectors[i].alen, - get_unaligned_le64(chacha20poly1305_enc_vectors[i].nonce), - chacha20poly1305_enc_vectors[i].key); -- if (memcmp(heap_src, -+ if (!ret || memcmp(computed_output, - chacha20poly1305_enc_vectors[i].output, - chacha20poly1305_enc_vectors[i].ilen + - POLY1305_DIGEST_SIZE)) { -@@ -7326,11 +8963,11 @@ bool __init chacha20poly1305_selftest(vo - } - - for (i = 0; i < ARRAY_SIZE(chacha20poly1305_dec_vectors); ++i) { -- memcpy(heap_src, chacha20poly1305_dec_vectors[i].input, -+ memcpy(computed_output, chacha20poly1305_dec_vectors[i].input, - chacha20poly1305_dec_vectors[i].ilen); -- sg_init_one(&sg_src, heap_src, -+ sg_init_one(sg_src, computed_output, - chacha20poly1305_dec_vectors[i].ilen); -- ret = chacha20poly1305_decrypt_sg_inplace(&sg_src, -+ ret = chacha20poly1305_decrypt_sg_inplace(sg_src, - chacha20poly1305_dec_vectors[i].ilen, - chacha20poly1305_dec_vectors[i].assoc, - chacha20poly1305_dec_vectors[i].alen, -@@ -7338,7 +8975,7 @@ bool __init chacha20poly1305_selftest(vo - chacha20poly1305_dec_vectors[i].key); - if (!decryption_success(ret, - chacha20poly1305_dec_vectors[i].failure, -- memcmp(heap_src, chacha20poly1305_dec_vectors[i].output, -+ memcmp(computed_output, chacha20poly1305_dec_vectors[i].output, - chacha20poly1305_dec_vectors[i].ilen - - POLY1305_DIGEST_SIZE))) { - pr_err("chacha20poly1305 sg decryption self-test %zu: FAIL\n", -@@ -7365,6 +9002,7 @@ bool __init chacha20poly1305_selftest(vo - success = false; - } - } -+ - for (i = 0; i < ARRAY_SIZE(xchacha20poly1305_dec_vectors); ++i) { - memset(computed_output, 0, MAXIMUM_TEST_BUFFER_LEN); - ret = xchacha20poly1305_decrypt(computed_output, -@@ -7386,8 +9024,54 @@ bool __init chacha20poly1305_selftest(vo - } - } - -+ for (total_len = POLY1305_DIGEST_SIZE; IS_ENABLED(DEBUG_CHACHA20POLY1305_SLOW_CHUNK_TEST) -+ && total_len <= 1 << 10; ++total_len) { -+ for (i = 0; i <= total_len; ++i) { -+ for (j = i; j <= total_len; ++j) { -+ sg_init_table(sg_src, 3); -+ sg_set_buf(&sg_src[0], input, i); -+ sg_set_buf(&sg_src[1], input + i, j - i); -+ sg_set_buf(&sg_src[2], input + j, total_len - j); -+ memset(computed_output, 0, total_len); -+ memset(input, 0, total_len); -+ -+ if (!chacha20poly1305_encrypt_sg_inplace(sg_src, -+ total_len - POLY1305_DIGEST_SIZE, NULL, 0, -+ 0, enc_key001)) -+ goto chunkfail; -+ chacha20poly1305_encrypt(computed_output, -+ computed_output, -+ total_len - POLY1305_DIGEST_SIZE, NULL, 0, 0, -+ enc_key001); -+ if (memcmp(computed_output, input, total_len)) -+ goto chunkfail; -+ if (!chacha20poly1305_decrypt(computed_output, -+ input, total_len, NULL, 0, 0, enc_key001)) -+ goto chunkfail; -+ for (k = 0; k < total_len - POLY1305_DIGEST_SIZE; ++k) { -+ if (computed_output[k]) -+ goto chunkfail; -+ } -+ if (!chacha20poly1305_decrypt_sg_inplace(sg_src, -+ total_len, NULL, 0, 0, enc_key001)) -+ goto chunkfail; -+ for (k = 0; k < total_len - POLY1305_DIGEST_SIZE; ++k) { -+ if (input[k]) -+ goto chunkfail; -+ } -+ continue; -+ -+ chunkfail: -+ pr_err("chacha20poly1305 chunked self-test %zu/%zu/%zu: FAIL\n", -+ total_len, i, j); -+ success = false; -+ } -+ -+ } -+ } -+ - out: -- kfree(heap_src); - kfree(computed_output); -+ kfree(input); - return success; - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0048-crypto-x86-poly1305-emit-does-base-conversion-itself.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0048-crypto-x86-poly1305-emit-does-base-conversion-itself.patch deleted file mode 100644 index 8209ca289..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0048-crypto-x86-poly1305-emit-does-base-conversion-itself.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 17 Jan 2020 11:42:22 +0100 -Subject: [PATCH] crypto: x86/poly1305 - emit does base conversion itself - -commit f9e7fe32a792726186301423ff63a465d63386e1 upstream. - -The emit code does optional base conversion itself in assembly, so we -don't need to do that here. Also, neither one of these functions uses -simd instructions, so checking for that doesn't make sense either. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/poly1305_glue.c | 8 ++------ - 1 file changed, 2 insertions(+), 6 deletions(-) - ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -123,13 +123,9 @@ static void poly1305_simd_blocks(void *c - static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], - const u32 nonce[4]) - { -- struct poly1305_arch_internal *state = ctx; -- -- if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx) || -- !state->is_base2_26 || !crypto_simd_usable()) { -- convert_to_base2_64(ctx); -+ if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx)) - poly1305_emit_x86_64(ctx, mac, nonce); -- } else -+ else - poly1305_emit_avx(ctx, mac, nonce); - } - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0049-crypto-arm-chacha-fix-build-failured-when-kernel-mod.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0049-crypto-arm-chacha-fix-build-failured-when-kernel-mod.patch deleted file mode 100644 index 354f58431..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0049-crypto-arm-chacha-fix-build-failured-when-kernel-mod.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 17 Jan 2020 17:43:18 +0100 -Subject: [PATCH] crypto: arm/chacha - fix build failured when kernel mode NEON - is disabled - -commit 0bc81767c5bd9d005fae1099fb39eb3688370cb1 upstream. - -When the ARM accelerated ChaCha driver is built as part of a configuration -that has kernel mode NEON disabled, we expect the compiler to propagate -the build time constant expression IS_ENABLED(CONFIG_KERNEL_MODE_NEON) in -a way that eliminates all the cross-object references to the actual NEON -routines, which allows the chacha-neon-core.o object to be omitted from -the build entirely. - -Unfortunately, this fails to work as expected in some cases, and we may -end up with a build error such as - - chacha-glue.c:(.text+0xc0): undefined reference to `chacha_4block_xor_neon' - -caused by the fact that chacha_doneon() has not been eliminated from the -object code, even though it will never be called in practice. - -Let's fix this by adding some IS_ENABLED(CONFIG_KERNEL_MODE_NEON) tests -that are not strictly needed from a logical point of view, but should -help the compiler infer that the NEON code paths are unreachable in -those cases. - -Fixes: b36d8c09e710c71f ("crypto: arm/chacha - remove dependency on generic ...") -Reported-by: Russell King -Cc: Arnd Bergmann -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/chacha-glue.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/arch/arm/crypto/chacha-glue.c -+++ b/arch/arm/crypto/chacha-glue.c -@@ -115,7 +115,7 @@ static int chacha_stream_xor(struct skci - if (nbytes < walk.total) - nbytes = round_down(nbytes, walk.stride); - -- if (!neon) { -+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) { - chacha_doarm(walk.dst.virt.addr, walk.src.virt.addr, - nbytes, state, ctx->nrounds); - state[12] += DIV_ROUND_UP(nbytes, CHACHA_BLOCK_SIZE); -@@ -159,7 +159,7 @@ static int do_xchacha(struct skcipher_re - - chacha_init_generic(state, ctx->key, req->iv); - -- if (!neon) { -+ if (!IS_ENABLED(CONFIG_KERNEL_MODE_NEON) || !neon) { - hchacha_block_arm(state, subctx.key, ctx->nrounds); - } else { - kernel_neon_begin(); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0050-crypto-Kconfig-allow-tests-to-be-disabled-when-manag.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0050-crypto-Kconfig-allow-tests-to-be-disabled-when-manag.patch deleted file mode 100644 index e3a6b1f4f..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0050-crypto-Kconfig-allow-tests-to-be-disabled-when-manag.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 17 Jan 2020 12:01:36 +0100 -Subject: [PATCH] crypto: Kconfig - allow tests to be disabled when manager is - disabled - -commit 2343d1529aff8b552589f622c23932035ed7a05d upstream. - -The library code uses CRYPTO_MANAGER_DISABLE_TESTS to conditionalize its -tests, but the library code can also exist without CRYPTO_MANAGER. That -means on minimal configs, the test code winds up being built with no way -to disable it. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - crypto/Kconfig | 4 ---- - 1 file changed, 4 deletions(-) - -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/crypto/Kconfig -=================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/crypto/Kconfig -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/crypto/Kconfig -@@ -143,8 +143,6 @@ config CRYPTO_MANAGER_DISABLE_TESTS - Disable run-time self tests that normally take place at - algorithm registration. - --if CRYPTO_MANAGER2 -- - config CRYPTO_MANAGER_EXTRA_TESTS - bool "Enable extra run-time crypto self tests" - depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS -@@ -155,8 +153,6 @@ config CRYPTO_MANAGER_EXTRA_TESTS - This is intended for developer use only, as these tests take much - longer to run than the normal self tests. - --endif # if CRYPTO_MANAGER2 -- - config CRYPTO_GF128MUL - tristate - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0051-crypto-chacha20poly1305-prevent-integer-overflow-on-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0051-crypto-chacha20poly1305-prevent-integer-overflow-on-.patch deleted file mode 100644 index 1ed49e5b6..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0051-crypto-chacha20poly1305-prevent-integer-overflow-on-.patch +++ /dev/null @@ -1,40 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 6 Feb 2020 12:42:01 +0100 -Subject: [PATCH] crypto: chacha20poly1305 - prevent integer overflow on large - input - -commit c9cc0517bba9f0213f1e55172feceb99e5512daf upstream. - -This code assigns src_len (size_t) to sl (int), which causes problems -when src_len is very large. Probably nobody in the kernel should be -passing this much data to chacha20poly1305 all in one go anyway, so I -don't think we need to change the algorithm or introduce larger types -or anything. But we should at least error out early in this case and -print a warning so that we get reports if this does happen and can look -into why anybody is possibly passing it that much data or if they're -accidently passing -1 or similar. - -Fixes: d95312a3ccc0 ("crypto: lib/chacha20poly1305 - reimplement crypt_from_sg() routine") -Cc: Ard Biesheuvel -Cc: stable@vger.kernel.org # 5.5+ -Signed-off-by: Jason A. Donenfeld -Acked-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - lib/crypto/chacha20poly1305.c | 3 +++ - 1 file changed, 3 insertions(+) - ---- a/lib/crypto/chacha20poly1305.c -+++ b/lib/crypto/chacha20poly1305.c -@@ -235,6 +235,9 @@ bool chacha20poly1305_crypt_sg_inplace(s - __le64 lens[2]; - } b __aligned(16); - -+ if (WARN_ON(src_len > INT_MAX)) -+ return false; -+ - chacha_load_key(b.k, key); - - b.iv[0] = 0; diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0052-crypto-x86-curve25519-support-assemblers-with-no-adx.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0052-crypto-x86-curve25519-support-assemblers-with-no-adx.patch deleted file mode 100644 index cd507b1e4..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0052-crypto-x86-curve25519-support-assemblers-with-no-adx.patch +++ /dev/null @@ -1,84 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Sun, 1 Mar 2020 22:52:35 +0800 -Subject: [PATCH] crypto: x86/curve25519 - support assemblers with no adx - support - -commit 1579f1bc3b753d17a44de3457d5c6f4a5b14c752 upstream. - -Some older version of GAS do not support the ADX instructions, similarly -to how they also don't support AVX and such. This commit adds the same -build-time detection mechanisms we use for AVX and others for ADX, and -then makes sure that the curve25519 library dispatcher calls the right -functions. - -Reported-by: Willy Tarreau -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/Makefile | 5 +++-- - arch/x86/crypto/Makefile | 7 ++++++- - include/crypto/curve25519.h | 6 ++++-- - 3 files changed, 13 insertions(+), 5 deletions(-) - ---- a/arch/x86/Makefile -+++ b/arch/x86/Makefile -@@ -198,9 +198,10 @@ avx2_instr :=$(call as-instr,vpbroadcast - avx512_instr :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,-DCONFIG_AS_AVX512=1) - sha1_ni_instr :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA1_NI=1) - sha256_ni_instr :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,-DCONFIG_AS_SHA256_NI=1) -+adx_instr := $(call as-instr,adox %r10$(comma)%r10,-DCONFIG_AS_ADX=1) - --KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) --KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) -+KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) -+KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr) $(avx512_instr) $(sha1_ni_instr) $(sha256_ni_instr) $(adx_instr) - - KBUILD_LDFLAGS := -m elf_$(UTS_MACHINE) - ---- a/arch/x86/crypto/Makefile -+++ b/arch/x86/crypto/Makefile -@@ -11,6 +11,7 @@ avx2_supported := $(call as-instr,vpgath - avx512_supported :=$(call as-instr,vpmovm2b %k1$(comma)%zmm5,yes,no) - sha1_ni_supported :=$(call as-instr,sha1msg1 %xmm0$(comma)%xmm1,yes,no) - sha256_ni_supported :=$(call as-instr,sha256msg1 %xmm0$(comma)%xmm1,yes,no) -+adx_supported := $(call as-instr,adox %r10$(comma)%r10,yes,no) - - obj-$(CONFIG_CRYPTO_GLUE_HELPER_X86) += glue_helper.o - -@@ -39,7 +40,11 @@ obj-$(CONFIG_CRYPTO_AEGIS128_AESNI_SSE2) - - obj-$(CONFIG_CRYPTO_NHPOLY1305_SSE2) += nhpoly1305-sse2.o - obj-$(CONFIG_CRYPTO_NHPOLY1305_AVX2) += nhpoly1305-avx2.o --obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o -+ -+# These modules require the assembler to support ADX. -+ifeq ($(adx_supported),yes) -+ obj-$(CONFIG_CRYPTO_CURVE25519_X86) += curve25519-x86_64.o -+endif - - # These modules require assembler to support AVX. - ifeq ($(avx_supported),yes) ---- a/include/crypto/curve25519.h -+++ b/include/crypto/curve25519.h -@@ -33,7 +33,8 @@ bool __must_check curve25519(u8 mypublic - const u8 secret[CURVE25519_KEY_SIZE], - const u8 basepoint[CURVE25519_KEY_SIZE]) - { -- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519)) -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) && -+ (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX))) - curve25519_arch(mypublic, secret, basepoint); - else - curve25519_generic(mypublic, secret, basepoint); -@@ -49,7 +50,8 @@ __must_check curve25519_generate_public( - CURVE25519_KEY_SIZE))) - return false; - -- if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519)) -+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) && -+ (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX))) - curve25519_base_arch(pub, secret); - else - curve25519_generic(pub, secret, curve25519_base_point); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0053-crypto-arm64-chacha-correctly-walk-through-blocks.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0053-crypto-arm64-chacha-correctly-walk-through-blocks.patch deleted file mode 100644 index 823a90837..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0053-crypto-arm64-chacha-correctly-walk-through-blocks.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 18 Mar 2020 20:27:32 -0600 -Subject: [PATCH] crypto: arm64/chacha - correctly walk through blocks - -commit c8cfcb78c65877313cda7bcbace624d3dbd1f3b3 upstream. - -Prior, passing in chunks of 2, 3, or 4, followed by any additional -chunks would result in the chacha state counter getting out of sync, -resulting in incorrect encryption/decryption, which is a pretty nasty -crypto vuln: "why do images look weird on webpages?" WireGuard users -never experienced this prior, because we have always, out of tree, used -a different crypto library, until the recent Frankenzinc addition. This -commit fixes the issue by advancing the pointers and state counter by -the actual size processed. It also fixes up a bug in the (optional, -costly) stride test that prevented it from running on arm64. - -Fixes: b3aad5bad26a ("crypto: arm64/chacha - expose arm64 ChaCha routine as library function") -Reported-and-tested-by: Emil Renner Berthing -Cc: Ard Biesheuvel -Cc: stable@vger.kernel.org # v5.5+ -Signed-off-by: Jason A. Donenfeld -Reviewed-by: Eric Biggers -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm64/crypto/chacha-neon-glue.c | 8 ++++---- - lib/crypto/chacha20poly1305-selftest.c | 11 ++++++++--- - 2 files changed, 12 insertions(+), 7 deletions(-) - ---- a/arch/arm64/crypto/chacha-neon-glue.c -+++ b/arch/arm64/crypto/chacha-neon-glue.c -@@ -55,10 +55,10 @@ static void chacha_doneon(u32 *state, u8 - break; - } - chacha_4block_xor_neon(state, dst, src, nrounds, l); -- bytes -= CHACHA_BLOCK_SIZE * 5; -- src += CHACHA_BLOCK_SIZE * 5; -- dst += CHACHA_BLOCK_SIZE * 5; -- state[12] += 5; -+ bytes -= l; -+ src += l; -+ dst += l; -+ state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE); - } - } - ---- a/lib/crypto/chacha20poly1305-selftest.c -+++ b/lib/crypto/chacha20poly1305-selftest.c -@@ -9028,10 +9028,15 @@ bool __init chacha20poly1305_selftest(vo - && total_len <= 1 << 10; ++total_len) { - for (i = 0; i <= total_len; ++i) { - for (j = i; j <= total_len; ++j) { -+ k = 0; - sg_init_table(sg_src, 3); -- sg_set_buf(&sg_src[0], input, i); -- sg_set_buf(&sg_src[1], input + i, j - i); -- sg_set_buf(&sg_src[2], input + j, total_len - j); -+ if (i) -+ sg_set_buf(&sg_src[k++], input, i); -+ if (j - i) -+ sg_set_buf(&sg_src[k++], input + i, j - i); -+ if (total_len - j) -+ sg_set_buf(&sg_src[k++], input + j, total_len - j); -+ sg_init_marker(sg_src, k); - memset(computed_output, 0, total_len); - memset(input, 0, total_len); - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0054-crypto-x86-curve25519-replace-with-formally-verified.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0054-crypto-x86-curve25519-replace-with-formally-verified.patch deleted file mode 100644 index 938d700da..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0054-crypto-x86-curve25519-replace-with-formally-verified.patch +++ /dev/null @@ -1,3765 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 20 Jan 2020 18:18:15 +0100 -Subject: [PATCH] crypto: x86/curve25519 - replace with formally verified - implementation - -commit 07b586fe06625b0b610dc3d3a969c51913d143d4 upstream. - -This comes from INRIA's HACL*/Vale. It implements the same algorithm and -implementation strategy as the code it replaces, only this code has been -formally verified, sans the base point multiplication, which uses code -similar to prior, only it uses the formally verified field arithmetic -alongside reproducable ladder generation steps. This doesn't have a -pure-bmi2 version, which means haswell no longer benefits, but the -increased (doubled) code complexity is not worth it for a single -generation of chips that's already old. - -Performance-wise, this is around 1% slower on older microarchitectures, -and slightly faster on newer microarchitectures, mainly 10nm ones or -backports of 10nm to 14nm. This implementation is "everest" below: - -Xeon E5-2680 v4 (Broadwell) - - armfazh: 133340 cycles per call - everest: 133436 cycles per call - -Xeon Gold 5120 (Sky Lake Server) - - armfazh: 112636 cycles per call - everest: 113906 cycles per call - -Core i5-6300U (Sky Lake Client) - - armfazh: 116810 cycles per call - everest: 117916 cycles per call - -Core i7-7600U (Kaby Lake) - - armfazh: 119523 cycles per call - everest: 119040 cycles per call - -Core i7-8750H (Coffee Lake) - - armfazh: 113914 cycles per call - everest: 113650 cycles per call - -Core i9-9880H (Coffee Lake Refresh) - - armfazh: 112616 cycles per call - everest: 114082 cycles per call - -Core i3-8121U (Cannon Lake) - - armfazh: 113202 cycles per call - everest: 111382 cycles per call - -Core i7-8265U (Whiskey Lake) - - armfazh: 127307 cycles per call - everest: 127697 cycles per call - -Core i7-8550U (Kaby Lake Refresh) - - armfazh: 127522 cycles per call - everest: 127083 cycles per call - -Xeon Platinum 8275CL (Cascade Lake) - - armfazh: 114380 cycles per call - everest: 114656 cycles per call - -Achieving these kind of results with formally verified code is quite -remarkable, especialy considering that performance is favorable for -newer chips. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/curve25519-x86_64.c | 3546 ++++++++++----------------- - 1 file changed, 1292 insertions(+), 2254 deletions(-) - ---- a/arch/x86/crypto/curve25519-x86_64.c -+++ b/arch/x86/crypto/curve25519-x86_64.c -@@ -1,8 +1,7 @@ --// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause -+// SPDX-License-Identifier: GPL-2.0 OR MIT - /* -- * Copyright (c) 2017 Armando Faz . All Rights Reserved. -- * Copyright (C) 2018-2019 Jason A. Donenfeld . All Rights Reserved. -- * Copyright (C) 2018 Samuel Neves . All Rights Reserved. -+ * Copyright (C) 2020 Jason A. Donenfeld . All Rights Reserved. -+ * Copyright (c) 2016-2020 INRIA, CMU and Microsoft Corporation - */ - - #include -@@ -16,2337 +15,1378 @@ - #include - #include - --static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_bmi2); --static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_adx); -- --enum { NUM_WORDS_ELTFP25519 = 4 }; --typedef __aligned(32) u64 eltfp25519_1w[NUM_WORDS_ELTFP25519]; --typedef __aligned(32) u64 eltfp25519_1w_buffer[2 * NUM_WORDS_ELTFP25519]; -- --#define mul_eltfp25519_1w_adx(c, a, b) do { \ -- mul_256x256_integer_adx(m.buffer, a, b); \ -- red_eltfp25519_1w_adx(c, m.buffer); \ --} while (0) -- --#define mul_eltfp25519_1w_bmi2(c, a, b) do { \ -- mul_256x256_integer_bmi2(m.buffer, a, b); \ -- red_eltfp25519_1w_bmi2(c, m.buffer); \ --} while (0) -- --#define sqr_eltfp25519_1w_adx(a) do { \ -- sqr_256x256_integer_adx(m.buffer, a); \ -- red_eltfp25519_1w_adx(a, m.buffer); \ --} while (0) -- --#define sqr_eltfp25519_1w_bmi2(a) do { \ -- sqr_256x256_integer_bmi2(m.buffer, a); \ -- red_eltfp25519_1w_bmi2(a, m.buffer); \ --} while (0) -- --#define mul_eltfp25519_2w_adx(c, a, b) do { \ -- mul2_256x256_integer_adx(m.buffer, a, b); \ -- red_eltfp25519_2w_adx(c, m.buffer); \ --} while (0) -- --#define mul_eltfp25519_2w_bmi2(c, a, b) do { \ -- mul2_256x256_integer_bmi2(m.buffer, a, b); \ -- red_eltfp25519_2w_bmi2(c, m.buffer); \ --} while (0) -- --#define sqr_eltfp25519_2w_adx(a) do { \ -- sqr2_256x256_integer_adx(m.buffer, a); \ -- red_eltfp25519_2w_adx(a, m.buffer); \ --} while (0) -- --#define sqr_eltfp25519_2w_bmi2(a) do { \ -- sqr2_256x256_integer_bmi2(m.buffer, a); \ -- red_eltfp25519_2w_bmi2(a, m.buffer); \ --} while (0) -- --#define sqrn_eltfp25519_1w_adx(a, times) do { \ -- int ____counter = (times); \ -- while (____counter-- > 0) \ -- sqr_eltfp25519_1w_adx(a); \ --} while (0) -- --#define sqrn_eltfp25519_1w_bmi2(a, times) do { \ -- int ____counter = (times); \ -- while (____counter-- > 0) \ -- sqr_eltfp25519_1w_bmi2(a); \ --} while (0) -- --#define copy_eltfp25519_1w(C, A) do { \ -- (C)[0] = (A)[0]; \ -- (C)[1] = (A)[1]; \ -- (C)[2] = (A)[2]; \ -- (C)[3] = (A)[3]; \ --} while (0) -- --#define setzero_eltfp25519_1w(C) do { \ -- (C)[0] = 0; \ -- (C)[1] = 0; \ -- (C)[2] = 0; \ -- (C)[3] = 0; \ --} while (0) -- --__aligned(32) static const u64 table_ladder_8k[252 * NUM_WORDS_ELTFP25519] = { -- /* 1 */ 0xfffffffffffffff3UL, 0xffffffffffffffffUL, -- 0xffffffffffffffffUL, 0x5fffffffffffffffUL, -- /* 2 */ 0x6b8220f416aafe96UL, 0x82ebeb2b4f566a34UL, -- 0xd5a9a5b075a5950fUL, 0x5142b2cf4b2488f4UL, -- /* 3 */ 0x6aaebc750069680cUL, 0x89cf7820a0f99c41UL, -- 0x2a58d9183b56d0f4UL, 0x4b5aca80e36011a4UL, -- /* 4 */ 0x329132348c29745dUL, 0xf4a2e616e1642fd7UL, -- 0x1e45bb03ff67bc34UL, 0x306912d0f42a9b4aUL, -- /* 5 */ 0xff886507e6af7154UL, 0x04f50e13dfeec82fUL, -- 0xaa512fe82abab5ceUL, 0x174e251a68d5f222UL, -- /* 6 */ 0xcf96700d82028898UL, 0x1743e3370a2c02c5UL, -- 0x379eec98b4e86eaaUL, 0x0c59888a51e0482eUL, -- /* 7 */ 0xfbcbf1d699b5d189UL, 0xacaef0d58e9fdc84UL, -- 0xc1c20d06231f7614UL, 0x2938218da274f972UL, -- /* 8 */ 0xf6af49beff1d7f18UL, 0xcc541c22387ac9c2UL, -- 0x96fcc9ef4015c56bUL, 0x69c1627c690913a9UL, -- /* 9 */ 0x7a86fd2f4733db0eUL, 0xfdb8c4f29e087de9UL, -- 0x095e4b1a8ea2a229UL, 0x1ad7a7c829b37a79UL, -- /* 10 */ 0x342d89cad17ea0c0UL, 0x67bedda6cced2051UL, -- 0x19ca31bf2bb42f74UL, 0x3df7b4c84980acbbUL, -- /* 11 */ 0xa8c6444dc80ad883UL, 0xb91e440366e3ab85UL, -- 0xc215cda00164f6d8UL, 0x3d867c6ef247e668UL, -- /* 12 */ 0xc7dd582bcc3e658cUL, 0xfd2c4748ee0e5528UL, -- 0xa0fd9b95cc9f4f71UL, 0x7529d871b0675ddfUL, -- /* 13 */ 0xb8f568b42d3cbd78UL, 0x1233011b91f3da82UL, -- 0x2dce6ccd4a7c3b62UL, 0x75e7fc8e9e498603UL, -- /* 14 */ 0x2f4f13f1fcd0b6ecUL, 0xf1a8ca1f29ff7a45UL, -- 0xc249c1a72981e29bUL, 0x6ebe0dbb8c83b56aUL, -- /* 15 */ 0x7114fa8d170bb222UL, 0x65a2dcd5bf93935fUL, -- 0xbdc41f68b59c979aUL, 0x2f0eef79a2ce9289UL, -- /* 16 */ 0x42ecbf0c083c37ceUL, 0x2930bc09ec496322UL, -- 0xf294b0c19cfeac0dUL, 0x3780aa4bedfabb80UL, -- /* 17 */ 0x56c17d3e7cead929UL, 0xe7cb4beb2e5722c5UL, -- 0x0ce931732dbfe15aUL, 0x41b883c7621052f8UL, -- /* 18 */ 0xdbf75ca0c3d25350UL, 0x2936be086eb1e351UL, -- 0xc936e03cb4a9b212UL, 0x1d45bf82322225aaUL, -- /* 19 */ 0xe81ab1036a024cc5UL, 0xe212201c304c9a72UL, -- 0xc5d73fba6832b1fcUL, 0x20ffdb5a4d839581UL, -- /* 20 */ 0xa283d367be5d0fadUL, 0x6c2b25ca8b164475UL, -- 0x9d4935467caaf22eUL, 0x5166408eee85ff49UL, -- /* 21 */ 0x3c67baa2fab4e361UL, 0xb3e433c67ef35cefUL, -- 0x5259729241159b1cUL, 0x6a621892d5b0ab33UL, -- /* 22 */ 0x20b74a387555cdcbUL, 0x532aa10e1208923fUL, -- 0xeaa17b7762281dd1UL, 0x61ab3443f05c44bfUL, -- /* 23 */ 0x257a6c422324def8UL, 0x131c6c1017e3cf7fUL, -- 0x23758739f630a257UL, 0x295a407a01a78580UL, -- /* 24 */ 0xf8c443246d5da8d9UL, 0x19d775450c52fa5dUL, -- 0x2afcfc92731bf83dUL, 0x7d10c8e81b2b4700UL, -- /* 25 */ 0xc8e0271f70baa20bUL, 0x993748867ca63957UL, -- 0x5412efb3cb7ed4bbUL, 0x3196d36173e62975UL, -- /* 26 */ 0xde5bcad141c7dffcUL, 0x47cc8cd2b395c848UL, -- 0xa34cd942e11af3cbUL, 0x0256dbf2d04ecec2UL, -- /* 27 */ 0x875ab7e94b0e667fUL, 0xcad4dd83c0850d10UL, -- 0x47f12e8f4e72c79fUL, 0x5f1a87bb8c85b19bUL, -- /* 28 */ 0x7ae9d0b6437f51b8UL, 0x12c7ce5518879065UL, -- 0x2ade09fe5cf77aeeUL, 0x23a05a2f7d2c5627UL, -- /* 29 */ 0x5908e128f17c169aUL, 0xf77498dd8ad0852dUL, -- 0x74b4c4ceab102f64UL, 0x183abadd10139845UL, -- /* 30 */ 0xb165ba8daa92aaacUL, 0xd5c5ef9599386705UL, -- 0xbe2f8f0cf8fc40d1UL, 0x2701e635ee204514UL, -- /* 31 */ 0x629fa80020156514UL, 0xf223868764a8c1ceUL, -- 0x5b894fff0b3f060eUL, 0x60d9944cf708a3faUL, -- /* 32 */ 0xaeea001a1c7a201fUL, 0xebf16a633ee2ce63UL, -- 0x6f7709594c7a07e1UL, 0x79b958150d0208cbUL, -- /* 33 */ 0x24b55e5301d410e7UL, 0xe3a34edff3fdc84dUL, -- 0xd88768e4904032d8UL, 0x131384427b3aaeecUL, -- /* 34 */ 0x8405e51286234f14UL, 0x14dc4739adb4c529UL, -- 0xb8a2b5b250634ffdUL, 0x2fe2a94ad8a7ff93UL, -- /* 35 */ 0xec5c57efe843faddUL, 0x2843ce40f0bb9918UL, -- 0xa4b561d6cf3d6305UL, 0x743629bde8fb777eUL, -- /* 36 */ 0x343edd46bbaf738fUL, 0xed981828b101a651UL, -- 0xa401760b882c797aUL, 0x1fc223e28dc88730UL, -- /* 37 */ 0x48604e91fc0fba0eUL, 0xb637f78f052c6fa4UL, -- 0x91ccac3d09e9239cUL, 0x23f7eed4437a687cUL, -- /* 38 */ 0x5173b1118d9bd800UL, 0x29d641b63189d4a7UL, -- 0xfdbf177988bbc586UL, 0x2959894fcad81df5UL, -- /* 39 */ 0xaebc8ef3b4bbc899UL, 0x4148995ab26992b9UL, -- 0x24e20b0134f92cfbUL, 0x40d158894a05dee8UL, -- /* 40 */ 0x46b00b1185af76f6UL, 0x26bac77873187a79UL, -- 0x3dc0bf95ab8fff5fUL, 0x2a608bd8945524d7UL, -- /* 41 */ 0x26449588bd446302UL, 0x7c4bc21c0388439cUL, -- 0x8e98a4f383bd11b2UL, 0x26218d7bc9d876b9UL, -- /* 42 */ 0xe3081542997c178aUL, 0x3c2d29a86fb6606fUL, -- 0x5c217736fa279374UL, 0x7dde05734afeb1faUL, -- /* 43 */ 0x3bf10e3906d42babUL, 0xe4f7803e1980649cUL, -- 0xe6053bf89595bf7aUL, 0x394faf38da245530UL, -- /* 44 */ 0x7a8efb58896928f4UL, 0xfbc778e9cc6a113cUL, -- 0x72670ce330af596fUL, 0x48f222a81d3d6cf7UL, -- /* 45 */ 0xf01fce410d72caa7UL, 0x5a20ecc7213b5595UL, -- 0x7bc21165c1fa1483UL, 0x07f89ae31da8a741UL, -- /* 46 */ 0x05d2c2b4c6830ff9UL, 0xd43e330fc6316293UL, -- 0xa5a5590a96d3a904UL, 0x705edb91a65333b6UL, -- /* 47 */ 0x048ee15e0bb9a5f7UL, 0x3240cfca9e0aaf5dUL, -- 0x8f4b71ceedc4a40bUL, 0x621c0da3de544a6dUL, -- /* 48 */ 0x92872836a08c4091UL, 0xce8375b010c91445UL, -- 0x8a72eb524f276394UL, 0x2667fcfa7ec83635UL, -- /* 49 */ 0x7f4c173345e8752aUL, 0x061b47feee7079a5UL, -- 0x25dd9afa9f86ff34UL, 0x3780cef5425dc89cUL, -- /* 50 */ 0x1a46035a513bb4e9UL, 0x3e1ef379ac575adaUL, -- 0xc78c5f1c5fa24b50UL, 0x321a967634fd9f22UL, -- /* 51 */ 0x946707b8826e27faUL, 0x3dca84d64c506fd0UL, -- 0xc189218075e91436UL, 0x6d9284169b3b8484UL, -- /* 52 */ 0x3a67e840383f2ddfUL, 0x33eec9a30c4f9b75UL, -- 0x3ec7c86fa783ef47UL, 0x26ec449fbac9fbc4UL, -- /* 53 */ 0x5c0f38cba09b9e7dUL, 0x81168cc762a3478cUL, -- 0x3e23b0d306fc121cUL, 0x5a238aa0a5efdcddUL, -- /* 54 */ 0x1ba26121c4ea43ffUL, 0x36f8c77f7c8832b5UL, -- 0x88fbea0b0adcf99aUL, 0x5ca9938ec25bebf9UL, -- /* 55 */ 0xd5436a5e51fccda0UL, 0x1dbc4797c2cd893bUL, -- 0x19346a65d3224a08UL, 0x0f5034e49b9af466UL, -- /* 56 */ 0xf23c3967a1e0b96eUL, 0xe58b08fa867a4d88UL, -- 0xfb2fabc6a7341679UL, 0x2a75381eb6026946UL, -- /* 57 */ 0xc80a3be4c19420acUL, 0x66b1f6c681f2b6dcUL, -- 0x7cf7036761e93388UL, 0x25abbbd8a660a4c4UL, -- /* 58 */ 0x91ea12ba14fd5198UL, 0x684950fc4a3cffa9UL, -- 0xf826842130f5ad28UL, 0x3ea988f75301a441UL, -- /* 59 */ 0xc978109a695f8c6fUL, 0x1746eb4a0530c3f3UL, -- 0x444d6d77b4459995UL, 0x75952b8c054e5cc7UL, -- /* 60 */ 0xa3703f7915f4d6aaUL, 0x66c346202f2647d8UL, -- 0xd01469df811d644bUL, 0x77fea47d81a5d71fUL, -- /* 61 */ 0xc5e9529ef57ca381UL, 0x6eeeb4b9ce2f881aUL, -- 0xb6e91a28e8009bd6UL, 0x4b80be3e9afc3fecUL, -- /* 62 */ 0x7e3773c526aed2c5UL, 0x1b4afcb453c9a49dUL, -- 0xa920bdd7baffb24dUL, 0x7c54699f122d400eUL, -- /* 63 */ 0xef46c8e14fa94bc8UL, 0xe0b074ce2952ed5eUL, -- 0xbea450e1dbd885d5UL, 0x61b68649320f712cUL, -- /* 64 */ 0x8a485f7309ccbdd1UL, 0xbd06320d7d4d1a2dUL, -- 0x25232973322dbef4UL, 0x445dc4758c17f770UL, -- /* 65 */ 0xdb0434177cc8933cUL, 0xed6fe82175ea059fUL, -- 0x1efebefdc053db34UL, 0x4adbe867c65daf99UL, -- /* 66 */ 0x3acd71a2a90609dfUL, 0xe5e991856dd04050UL, -- 0x1ec69b688157c23cUL, 0x697427f6885cfe4dUL, -- /* 67 */ 0xd7be7b9b65e1a851UL, 0xa03d28d522c536ddUL, -- 0x28399d658fd2b645UL, 0x49e5b7e17c2641e1UL, -- /* 68 */ 0x6f8c3a98700457a4UL, 0x5078f0a25ebb6778UL, -- 0xd13c3ccbc382960fUL, 0x2e003258a7df84b1UL, -- /* 69 */ 0x8ad1f39be6296a1cUL, 0xc1eeaa652a5fbfb2UL, -- 0x33ee0673fd26f3cbUL, 0x59256173a69d2cccUL, -- /* 70 */ 0x41ea07aa4e18fc41UL, 0xd9fc19527c87a51eUL, -- 0xbdaacb805831ca6fUL, 0x445b652dc916694fUL, -- /* 71 */ 0xce92a3a7f2172315UL, 0x1edc282de11b9964UL, -- 0xa1823aafe04c314aUL, 0x790a2d94437cf586UL, -- /* 72 */ 0x71c447fb93f6e009UL, 0x8922a56722845276UL, -- 0xbf70903b204f5169UL, 0x2f7a89891ba319feUL, -- /* 73 */ 0x02a08eb577e2140cUL, 0xed9a4ed4427bdcf4UL, -- 0x5253ec44e4323cd1UL, 0x3e88363c14e9355bUL, -- /* 74 */ 0xaa66c14277110b8cUL, 0x1ae0391610a23390UL, -- 0x2030bd12c93fc2a2UL, 0x3ee141579555c7abUL, -- /* 75 */ 0x9214de3a6d6e7d41UL, 0x3ccdd88607f17efeUL, -- 0x674f1288f8e11217UL, 0x5682250f329f93d0UL, -- /* 76 */ 0x6cf00b136d2e396eUL, 0x6e4cf86f1014debfUL, -- 0x5930b1b5bfcc4e83UL, 0x047069b48aba16b6UL, -- /* 77 */ 0x0d4ce4ab69b20793UL, 0xb24db91a97d0fb9eUL, -- 0xcdfa50f54e00d01dUL, 0x221b1085368bddb5UL, -- /* 78 */ 0xe7e59468b1e3d8d2UL, 0x53c56563bd122f93UL, -- 0xeee8a903e0663f09UL, 0x61efa662cbbe3d42UL, -- /* 79 */ 0x2cf8ddddde6eab2aUL, 0x9bf80ad51435f231UL, -- 0x5deadacec9f04973UL, 0x29275b5d41d29b27UL, -- /* 80 */ 0xcfde0f0895ebf14fUL, 0xb9aab96b054905a7UL, -- 0xcae80dd9a1c420fdUL, 0x0a63bf2f1673bbc7UL, -- /* 81 */ 0x092f6e11958fbc8cUL, 0x672a81e804822fadUL, -- 0xcac8351560d52517UL, 0x6f3f7722c8f192f8UL, -- /* 82 */ 0xf8ba90ccc2e894b7UL, 0x2c7557a438ff9f0dUL, -- 0x894d1d855ae52359UL, 0x68e122157b743d69UL, -- /* 83 */ 0xd87e5570cfb919f3UL, 0x3f2cdecd95798db9UL, -- 0x2121154710c0a2ceUL, 0x3c66a115246dc5b2UL, -- /* 84 */ 0xcbedc562294ecb72UL, 0xba7143c36a280b16UL, -- 0x9610c2efd4078b67UL, 0x6144735d946a4b1eUL, -- /* 85 */ 0x536f111ed75b3350UL, 0x0211db8c2041d81bUL, -- 0xf93cb1000e10413cUL, 0x149dfd3c039e8876UL, -- /* 86 */ 0xd479dde46b63155bUL, 0xb66e15e93c837976UL, -- 0xdafde43b1f13e038UL, 0x5fafda1a2e4b0b35UL, -- /* 87 */ 0x3600bbdf17197581UL, 0x3972050bbe3cd2c2UL, -- 0x5938906dbdd5be86UL, 0x34fce5e43f9b860fUL, -- /* 88 */ 0x75a8a4cd42d14d02UL, 0x828dabc53441df65UL, -- 0x33dcabedd2e131d3UL, 0x3ebad76fb814d25fUL, -- /* 89 */ 0xd4906f566f70e10fUL, 0x5d12f7aa51690f5aUL, -- 0x45adb16e76cefcf2UL, 0x01f768aead232999UL, -- /* 90 */ 0x2b6cc77b6248febdUL, 0x3cd30628ec3aaffdUL, -- 0xce1c0b80d4ef486aUL, 0x4c3bff2ea6f66c23UL, -- /* 91 */ 0x3f2ec4094aeaeb5fUL, 0x61b19b286e372ca7UL, -- 0x5eefa966de2a701dUL, 0x23b20565de55e3efUL, -- /* 92 */ 0xe301ca5279d58557UL, 0x07b2d4ce27c2874fUL, -- 0xa532cd8a9dcf1d67UL, 0x2a52fee23f2bff56UL, -- /* 93 */ 0x8624efb37cd8663dUL, 0xbbc7ac20ffbd7594UL, -- 0x57b85e9c82d37445UL, 0x7b3052cb86a6ec66UL, -- /* 94 */ 0x3482f0ad2525e91eUL, 0x2cb68043d28edca0UL, -- 0xaf4f6d052e1b003aUL, 0x185f8c2529781b0aUL, -- /* 95 */ 0xaa41de5bd80ce0d6UL, 0x9407b2416853e9d6UL, -- 0x563ec36e357f4c3aUL, 0x4cc4b8dd0e297bceUL, -- /* 96 */ 0xa2fc1a52ffb8730eUL, 0x1811f16e67058e37UL, -- 0x10f9a366cddf4ee1UL, 0x72f4a0c4a0b9f099UL, -- /* 97 */ 0x8c16c06f663f4ea7UL, 0x693b3af74e970fbaUL, -- 0x2102e7f1d69ec345UL, 0x0ba53cbc968a8089UL, -- /* 98 */ 0xca3d9dc7fea15537UL, 0x4c6824bb51536493UL, -- 0xb9886314844006b1UL, 0x40d2a72ab454cc60UL, -- /* 99 */ 0x5936a1b712570975UL, 0x91b9d648debda657UL, -- 0x3344094bb64330eaUL, 0x006ba10d12ee51d0UL, -- /* 100 */ 0x19228468f5de5d58UL, 0x0eb12f4c38cc05b0UL, -- 0xa1039f9dd5601990UL, 0x4502d4ce4fff0e0bUL, -- /* 101 */ 0xeb2054106837c189UL, 0xd0f6544c6dd3b93cUL, -- 0x40727064c416d74fUL, 0x6e15c6114b502ef0UL, -- /* 102 */ 0x4df2a398cfb1a76bUL, 0x11256c7419f2f6b1UL, -- 0x4a497962066e6043UL, 0x705b3aab41355b44UL, -- /* 103 */ 0x365ef536d797b1d8UL, 0x00076bd622ddf0dbUL, -- 0x3bbf33b0e0575a88UL, 0x3777aa05c8e4ca4dUL, -- /* 104 */ 0x392745c85578db5fUL, 0x6fda4149dbae5ae2UL, -- 0xb1f0b00b8adc9867UL, 0x09963437d36f1da3UL, -- /* 105 */ 0x7e824e90a5dc3853UL, 0xccb5f6641f135cbdUL, -- 0x6736d86c87ce8fccUL, 0x625f3ce26604249fUL, -- /* 106 */ 0xaf8ac8059502f63fUL, 0x0c05e70a2e351469UL, -- 0x35292e9c764b6305UL, 0x1a394360c7e23ac3UL, -- /* 107 */ 0xd5c6d53251183264UL, 0x62065abd43c2b74fUL, -- 0xb5fbf5d03b973f9bUL, 0x13a3da3661206e5eUL, -- /* 108 */ 0xc6bd5837725d94e5UL, 0x18e30912205016c5UL, -- 0x2088ce1570033c68UL, 0x7fba1f495c837987UL, -- /* 109 */ 0x5a8c7423f2f9079dUL, 0x1735157b34023fc5UL, -- 0xe4f9b49ad2fab351UL, 0x6691ff72c878e33cUL, -- /* 110 */ 0x122c2adedc5eff3eUL, 0xf8dd4bf1d8956cf4UL, -- 0xeb86205d9e9e5bdaUL, 0x049b92b9d975c743UL, -- /* 111 */ 0xa5379730b0f6c05aUL, 0x72a0ffacc6f3a553UL, -- 0xb0032c34b20dcd6dUL, 0x470e9dbc88d5164aUL, -- /* 112 */ 0xb19cf10ca237c047UL, 0xb65466711f6c81a2UL, -- 0xb3321bd16dd80b43UL, 0x48c14f600c5fbe8eUL, -- /* 113 */ 0x66451c264aa6c803UL, 0xb66e3904a4fa7da6UL, -- 0xd45f19b0b3128395UL, 0x31602627c3c9bc10UL, -- /* 114 */ 0x3120dc4832e4e10dUL, 0xeb20c46756c717f7UL, -- 0x00f52e3f67280294UL, 0x566d4fc14730c509UL, -- /* 115 */ 0x7e3a5d40fd837206UL, 0xc1e926dc7159547aUL, -- 0x216730fba68d6095UL, 0x22e8c3843f69cea7UL, -- /* 116 */ 0x33d074e8930e4b2bUL, 0xb6e4350e84d15816UL, -- 0x5534c26ad6ba2365UL, 0x7773c12f89f1f3f3UL, -- /* 117 */ 0x8cba404da57962aaUL, 0x5b9897a81999ce56UL, -- 0x508e862f121692fcUL, 0x3a81907fa093c291UL, -- /* 118 */ 0x0dded0ff4725a510UL, 0x10d8cc10673fc503UL, -- 0x5b9d151c9f1f4e89UL, 0x32a5c1d5cb09a44cUL, -- /* 119 */ 0x1e0aa442b90541fbUL, 0x5f85eb7cc1b485dbUL, -- 0xbee595ce8a9df2e5UL, 0x25e496c722422236UL, -- /* 120 */ 0x5edf3c46cd0fe5b9UL, 0x34e75a7ed2a43388UL, -- 0xe488de11d761e352UL, 0x0e878a01a085545cUL, -- /* 121 */ 0xba493c77e021bb04UL, 0x2b4d1843c7df899aUL, -- 0x9ea37a487ae80d67UL, 0x67a9958011e41794UL, -- /* 122 */ 0x4b58051a6697b065UL, 0x47e33f7d8d6ba6d4UL, -- 0xbb4da8d483ca46c1UL, 0x68becaa181c2db0dUL, -- /* 123 */ 0x8d8980e90b989aa5UL, 0xf95eb14a2c93c99bUL, -- 0x51c6c7c4796e73a2UL, 0x6e228363b5efb569UL, -- /* 124 */ 0xc6bbc0b02dd624c8UL, 0x777eb47dec8170eeUL, -- 0x3cde15a004cfafa9UL, 0x1dc6bc087160bf9bUL, -- /* 125 */ 0x2e07e043eec34002UL, 0x18e9fc677a68dc7fUL, -- 0xd8da03188bd15b9aUL, 0x48fbc3bb00568253UL, -- /* 126 */ 0x57547d4cfb654ce1UL, 0xd3565b82a058e2adUL, -- 0xf63eaf0bbf154478UL, 0x47531ef114dfbb18UL, -- /* 127 */ 0xe1ec630a4278c587UL, 0x5507d546ca8e83f3UL, -- 0x85e135c63adc0c2bUL, 0x0aa7efa85682844eUL, -- /* 128 */ 0x72691ba8b3e1f615UL, 0x32b4e9701fbe3ffaUL, -- 0x97b6d92e39bb7868UL, 0x2cfe53dea02e39e8UL, -- /* 129 */ 0x687392cd85cd52b0UL, 0x27ff66c910e29831UL, -- 0x97134556a9832d06UL, 0x269bb0360a84f8a0UL, -- /* 130 */ 0x706e55457643f85cUL, 0x3734a48c9b597d1bUL, -- 0x7aee91e8c6efa472UL, 0x5cd6abc198a9d9e0UL, -- /* 131 */ 0x0e04de06cb3ce41aUL, 0xd8c6eb893402e138UL, -- 0x904659bb686e3772UL, 0x7215c371746ba8c8UL, -- /* 132 */ 0xfd12a97eeae4a2d9UL, 0x9514b7516394f2c5UL, -- 0x266fd5809208f294UL, 0x5c847085619a26b9UL, -- /* 133 */ 0x52985410fed694eaUL, 0x3c905b934a2ed254UL, -- 0x10bb47692d3be467UL, 0x063b3d2d69e5e9e1UL, -- /* 134 */ 0x472726eedda57debUL, 0xefb6c4ae10f41891UL, -- 0x2b1641917b307614UL, 0x117c554fc4f45b7cUL, -- /* 135 */ 0xc07cf3118f9d8812UL, 0x01dbd82050017939UL, -- 0xd7e803f4171b2827UL, 0x1015e87487d225eaUL, -- /* 136 */ 0xc58de3fed23acc4dUL, 0x50db91c294a7be2dUL, -- 0x0b94d43d1c9cf457UL, 0x6b1640fa6e37524aUL, -- /* 137 */ 0x692f346c5fda0d09UL, 0x200b1c59fa4d3151UL, -- 0xb8c46f760777a296UL, 0x4b38395f3ffdfbcfUL, -- /* 138 */ 0x18d25e00be54d671UL, 0x60d50582bec8aba6UL, -- 0x87ad8f263b78b982UL, 0x50fdf64e9cda0432UL, -- /* 139 */ 0x90f567aac578dcf0UL, 0xef1e9b0ef2a3133bUL, -- 0x0eebba9242d9de71UL, 0x15473c9bf03101c7UL, -- /* 140 */ 0x7c77e8ae56b78095UL, 0xb678e7666e6f078eUL, -- 0x2da0b9615348ba1fUL, 0x7cf931c1ff733f0bUL, -- /* 141 */ 0x26b357f50a0a366cUL, 0xe9708cf42b87d732UL, -- 0xc13aeea5f91cb2c0UL, 0x35d90c991143bb4cUL, -- /* 142 */ 0x47c1c404a9a0d9dcUL, 0x659e58451972d251UL, -- 0x3875a8c473b38c31UL, 0x1fbd9ed379561f24UL, -- /* 143 */ 0x11fabc6fd41ec28dUL, 0x7ef8dfe3cd2a2dcaUL, -- 0x72e73b5d8c404595UL, 0x6135fa4954b72f27UL, -- /* 144 */ 0xccfc32a2de24b69cUL, 0x3f55698c1f095d88UL, -- 0xbe3350ed5ac3f929UL, 0x5e9bf806ca477eebUL, -- /* 145 */ 0xe9ce8fb63c309f68UL, 0x5376f63565e1f9f4UL, -- 0xd1afcfb35a6393f1UL, 0x6632a1ede5623506UL, -- /* 146 */ 0x0b7d6c390c2ded4cUL, 0x56cb3281df04cb1fUL, -- 0x66305a1249ecc3c7UL, 0x5d588b60a38ca72aUL, -- /* 147 */ 0xa6ecbf78e8e5f42dUL, 0x86eeb44b3c8a3eecUL, -- 0xec219c48fbd21604UL, 0x1aaf1af517c36731UL, -- /* 148 */ 0xc306a2836769bde7UL, 0x208280622b1e2adbUL, -- 0x8027f51ffbff94a6UL, 0x76cfa1ce1124f26bUL, -- /* 149 */ 0x18eb00562422abb6UL, 0xf377c4d58f8c29c3UL, -- 0x4dbbc207f531561aUL, 0x0253b7f082128a27UL, -- /* 150 */ 0x3d1f091cb62c17e0UL, 0x4860e1abd64628a9UL, -- 0x52d17436309d4253UL, 0x356f97e13efae576UL, -- /* 151 */ 0xd351e11aa150535bUL, 0x3e6b45bb1dd878ccUL, -- 0x0c776128bed92c98UL, 0x1d34ae93032885b8UL, -- /* 152 */ 0x4ba0488ca85ba4c3UL, 0x985348c33c9ce6ceUL, -- 0x66124c6f97bda770UL, 0x0f81a0290654124aUL, -- /* 153 */ 0x9ed09ca6569b86fdUL, 0x811009fd18af9a2dUL, -- 0xff08d03f93d8c20aUL, 0x52a148199faef26bUL, -- /* 154 */ 0x3e03f9dc2d8d1b73UL, 0x4205801873961a70UL, -- 0xc0d987f041a35970UL, 0x07aa1f15a1c0d549UL, -- /* 155 */ 0xdfd46ce08cd27224UL, 0x6d0a024f934e4239UL, -- 0x808a7a6399897b59UL, 0x0a4556e9e13d95a2UL, -- /* 156 */ 0xd21a991fe9c13045UL, 0x9b0e8548fe7751b8UL, -- 0x5da643cb4bf30035UL, 0x77db28d63940f721UL, -- /* 157 */ 0xfc5eeb614adc9011UL, 0x5229419ae8c411ebUL, -- 0x9ec3e7787d1dcf74UL, 0x340d053e216e4cb5UL, -- /* 158 */ 0xcac7af39b48df2b4UL, 0xc0faec2871a10a94UL, -- 0x140a69245ca575edUL, 0x0cf1c37134273a4cUL, -- /* 159 */ 0xc8ee306ac224b8a5UL, 0x57eaee7ccb4930b0UL, -- 0xa1e806bdaacbe74fUL, 0x7d9a62742eeb657dUL, -- /* 160 */ 0x9eb6b6ef546c4830UL, 0x885cca1fddb36e2eUL, -- 0xe6b9f383ef0d7105UL, 0x58654fef9d2e0412UL, -- /* 161 */ 0xa905c4ffbe0e8e26UL, 0x942de5df9b31816eUL, -- 0x497d723f802e88e1UL, 0x30684dea602f408dUL, -- /* 162 */ 0x21e5a278a3e6cb34UL, 0xaefb6e6f5b151dc4UL, -- 0xb30b8e049d77ca15UL, 0x28c3c9cf53b98981UL, -- /* 163 */ 0x287fb721556cdd2aUL, 0x0d317ca897022274UL, -- 0x7468c7423a543258UL, 0x4a7f11464eb5642fUL, -- /* 164 */ 0xa237a4774d193aa6UL, 0xd865986ea92129a1UL, -- 0x24c515ecf87c1a88UL, 0x604003575f39f5ebUL, -- /* 165 */ 0x47b9f189570a9b27UL, 0x2b98cede465e4b78UL, -- 0x026df551dbb85c20UL, 0x74fcd91047e21901UL, -- /* 166 */ 0x13e2a90a23c1bfa3UL, 0x0cb0074e478519f6UL, -- 0x5ff1cbbe3af6cf44UL, 0x67fe5438be812dbeUL, -- /* 167 */ 0xd13cf64fa40f05b0UL, 0x054dfb2f32283787UL, -- 0x4173915b7f0d2aeaUL, 0x482f144f1f610d4eUL, -- /* 168 */ 0xf6210201b47f8234UL, 0x5d0ae1929e70b990UL, -- 0xdcd7f455b049567cUL, 0x7e93d0f1f0916f01UL, -- /* 169 */ 0xdd79cbf18a7db4faUL, 0xbe8391bf6f74c62fUL, -- 0x027145d14b8291bdUL, 0x585a73ea2cbf1705UL, -- /* 170 */ 0x485ca03e928a0db2UL, 0x10fc01a5742857e7UL, -- 0x2f482edbd6d551a7UL, 0x0f0433b5048fdb8aUL, -- /* 171 */ 0x60da2e8dd7dc6247UL, 0x88b4c9d38cd4819aUL, -- 0x13033ac001f66697UL, 0x273b24fe3b367d75UL, -- /* 172 */ 0xc6e8f66a31b3b9d4UL, 0x281514a494df49d5UL, -- 0xd1726fdfc8b23da7UL, 0x4b3ae7d103dee548UL, -- /* 173 */ 0xc6256e19ce4b9d7eUL, 0xff5c5cf186e3c61cUL, -- 0xacc63ca34b8ec145UL, 0x74621888fee66574UL, -- /* 174 */ 0x956f409645290a1eUL, 0xef0bf8e3263a962eUL, -- 0xed6a50eb5ec2647bUL, 0x0694283a9dca7502UL, -- /* 175 */ 0x769b963643a2dcd1UL, 0x42b7c8ea09fc5353UL, -- 0x4f002aee13397eabUL, 0x63005e2c19b7d63aUL, -- /* 176 */ 0xca6736da63023beaUL, 0x966c7f6db12a99b7UL, -- 0xace09390c537c5e1UL, 0x0b696063a1aa89eeUL, -- /* 177 */ 0xebb03e97288c56e5UL, 0x432a9f9f938c8be8UL, -- 0xa6a5a93d5b717f71UL, 0x1a5fb4c3e18f9d97UL, -- /* 178 */ 0x1c94e7ad1c60cdceUL, 0xee202a43fc02c4a0UL, -- 0x8dafe4d867c46a20UL, 0x0a10263c8ac27b58UL, -- /* 179 */ 0xd0dea9dfe4432a4aUL, 0x856af87bbe9277c5UL, -- 0xce8472acc212c71aUL, 0x6f151b6d9bbb1e91UL, -- /* 180 */ 0x26776c527ceed56aUL, 0x7d211cb7fbf8faecUL, -- 0x37ae66a6fd4609ccUL, 0x1f81b702d2770c42UL, -- /* 181 */ 0x2fb0b057eac58392UL, 0xe1dd89fe29744e9dUL, -- 0xc964f8eb17beb4f8UL, 0x29571073c9a2d41eUL, -- /* 182 */ 0xa948a18981c0e254UL, 0x2df6369b65b22830UL, -- 0xa33eb2d75fcfd3c6UL, 0x078cd6ec4199a01fUL, -- /* 183 */ 0x4a584a41ad900d2fUL, 0x32142b78e2c74c52UL, -- 0x68c4e8338431c978UL, 0x7f69ea9008689fc2UL, -- /* 184 */ 0x52f2c81e46a38265UL, 0xfd78072d04a832fdUL, -- 0x8cd7d5fa25359e94UL, 0x4de71b7454cc29d2UL, -- /* 185 */ 0x42eb60ad1eda6ac9UL, 0x0aad37dfdbc09c3aUL, -- 0x81004b71e33cc191UL, 0x44e6be345122803cUL, -- /* 186 */ 0x03fe8388ba1920dbUL, 0xf5d57c32150db008UL, -- 0x49c8c4281af60c29UL, 0x21edb518de701aeeUL, -- /* 187 */ 0x7fb63e418f06dc99UL, 0xa4460d99c166d7b8UL, -- 0x24dd5248ce520a83UL, 0x5ec3ad712b928358UL, -- /* 188 */ 0x15022a5fbd17930fUL, 0xa4f64a77d82570e3UL, -- 0x12bc8d6915783712UL, 0x498194c0fc620abbUL, -- /* 189 */ 0x38a2d9d255686c82UL, 0x785c6bd9193e21f0UL, -- 0xe4d5c81ab24a5484UL, 0x56307860b2e20989UL, -- /* 190 */ 0x429d55f78b4d74c4UL, 0x22f1834643350131UL, -- 0x1e60c24598c71fffUL, 0x59f2f014979983efUL, -- /* 191 */ 0x46a47d56eb494a44UL, 0x3e22a854d636a18eUL, -- 0xb346e15274491c3bUL, 0x2ceafd4e5390cde7UL, -- /* 192 */ 0xba8a8538be0d6675UL, 0x4b9074bb50818e23UL, -- 0xcbdab89085d304c3UL, 0x61a24fe0e56192c4UL, -- /* 193 */ 0xcb7615e6db525bcbUL, 0xdd7d8c35a567e4caUL, -- 0xe6b4153acafcdd69UL, 0x2d668e097f3c9766UL, -- /* 194 */ 0xa57e7e265ce55ef0UL, 0x5d9f4e527cd4b967UL, -- 0xfbc83606492fd1e5UL, 0x090d52beb7c3f7aeUL, -- /* 195 */ 0x09b9515a1e7b4d7cUL, 0x1f266a2599da44c0UL, -- 0xa1c49548e2c55504UL, 0x7ef04287126f15ccUL, -- /* 196 */ 0xfed1659dbd30ef15UL, 0x8b4ab9eec4e0277bUL, -- 0x884d6236a5df3291UL, 0x1fd96ea6bf5cf788UL, -- /* 197 */ 0x42a161981f190d9aUL, 0x61d849507e6052c1UL, -- 0x9fe113bf285a2cd5UL, 0x7c22d676dbad85d8UL, -- /* 198 */ 0x82e770ed2bfbd27dUL, 0x4c05b2ece996f5a5UL, -- 0xcd40a9c2b0900150UL, 0x5895319213d9bf64UL, -- /* 199 */ 0xe7cc5d703fea2e08UL, 0xb50c491258e2188cUL, -- 0xcce30baa48205bf0UL, 0x537c659ccfa32d62UL, -- /* 200 */ 0x37b6623a98cfc088UL, 0xfe9bed1fa4d6aca4UL, -- 0x04d29b8e56a8d1b0UL, 0x725f71c40b519575UL, -- /* 201 */ 0x28c7f89cd0339ce6UL, 0x8367b14469ddc18bUL, -- 0x883ada83a6a1652cUL, 0x585f1974034d6c17UL, -- /* 202 */ 0x89cfb266f1b19188UL, 0xe63b4863e7c35217UL, -- 0xd88c9da6b4c0526aUL, 0x3e035c9df0954635UL, -- /* 203 */ 0xdd9d5412fb45de9dUL, 0xdd684532e4cff40dUL, -- 0x4b5c999b151d671cUL, 0x2d8c2cc811e7f690UL, -- /* 204 */ 0x7f54be1d90055d40UL, 0xa464c5df464aaf40UL, -- 0x33979624f0e917beUL, 0x2c018dc527356b30UL, -- /* 205 */ 0xa5415024e330b3d4UL, 0x73ff3d96691652d3UL, -- 0x94ec42c4ef9b59f1UL, 0x0747201618d08e5aUL, -- /* 206 */ 0x4d6ca48aca411c53UL, 0x66415f2fcfa66119UL, -- 0x9c4dd40051e227ffUL, 0x59810bc09a02f7ebUL, -- /* 207 */ 0x2a7eb171b3dc101dUL, 0x441c5ab99ffef68eUL, -- 0x32025c9b93b359eaUL, 0x5e8ce0a71e9d112fUL, -- /* 208 */ 0xbfcccb92429503fdUL, 0xd271ba752f095d55UL, -- 0x345ead5e972d091eUL, 0x18c8df11a83103baUL, -- /* 209 */ 0x90cd949a9aed0f4cUL, 0xc5d1f4cb6660e37eUL, -- 0xb8cac52d56c52e0bUL, 0x6e42e400c5808e0dUL, -- /* 210 */ 0xa3b46966eeaefd23UL, 0x0c4f1f0be39ecdcaUL, -- 0x189dc8c9d683a51dUL, 0x51f27f054c09351bUL, -- /* 211 */ 0x4c487ccd2a320682UL, 0x587ea95bb3df1c96UL, -- 0xc8ccf79e555cb8e8UL, 0x547dc829a206d73dUL, -- /* 212 */ 0xb822a6cd80c39b06UL, 0xe96d54732000d4c6UL, -- 0x28535b6f91463b4dUL, 0x228f4660e2486e1dUL, -- /* 213 */ 0x98799538de8d3abfUL, 0x8cd8330045ebca6eUL, -- 0x79952a008221e738UL, 0x4322e1a7535cd2bbUL, -- /* 214 */ 0xb114c11819d1801cUL, 0x2016e4d84f3f5ec7UL, -- 0xdd0e2df409260f4cUL, 0x5ec362c0ae5f7266UL, -- /* 215 */ 0xc0462b18b8b2b4eeUL, 0x7cc8d950274d1afbUL, -- 0xf25f7105436b02d2UL, 0x43bbf8dcbff9ccd3UL, -- /* 216 */ 0xb6ad1767a039e9dfUL, 0xb0714da8f69d3583UL, -- 0x5e55fa18b42931f5UL, 0x4ed5558f33c60961UL, -- /* 217 */ 0x1fe37901c647a5ddUL, 0x593ddf1f8081d357UL, -- 0x0249a4fd813fd7a6UL, 0x69acca274e9caf61UL, -- /* 218 */ 0x047ba3ea330721c9UL, 0x83423fc20e7e1ea0UL, -- 0x1df4c0af01314a60UL, 0x09a62dab89289527UL, -- /* 219 */ 0xa5b325a49cc6cb00UL, 0xe94b5dc654b56cb6UL, -- 0x3be28779adc994a0UL, 0x4296e8f8ba3a4aadUL, -- /* 220 */ 0x328689761e451eabUL, 0x2e4d598bff59594aUL, -- 0x49b96853d7a7084aUL, 0x4980a319601420a8UL, -- /* 221 */ 0x9565b9e12f552c42UL, 0x8a5318db7100fe96UL, -- 0x05c90b4d43add0d7UL, 0x538b4cd66a5d4edaUL, -- /* 222 */ 0xf4e94fc3e89f039fUL, 0x592c9af26f618045UL, -- 0x08a36eb5fd4b9550UL, 0x25fffaf6c2ed1419UL, -- /* 223 */ 0x34434459cc79d354UL, 0xeeecbfb4b1d5476bUL, -- 0xddeb34a061615d99UL, 0x5129cecceb64b773UL, -- /* 224 */ 0xee43215894993520UL, 0x772f9c7cf14c0b3bUL, -- 0xd2e2fce306bedad5UL, 0x715f42b546f06a97UL, -- /* 225 */ 0x434ecdceda5b5f1aUL, 0x0da17115a49741a9UL, -- 0x680bd77c73edad2eUL, 0x487c02354edd9041UL, -- /* 226 */ 0xb8efeff3a70ed9c4UL, 0x56a32aa3e857e302UL, -- 0xdf3a68bd48a2a5a0UL, 0x07f650b73176c444UL, -- /* 227 */ 0xe38b9b1626e0ccb1UL, 0x79e053c18b09fb36UL, -- 0x56d90319c9f94964UL, 0x1ca941e7ac9ff5c4UL, -- /* 228 */ 0x49c4df29162fa0bbUL, 0x8488cf3282b33305UL, -- 0x95dfda14cabb437dUL, 0x3391f78264d5ad86UL, -- /* 229 */ 0x729ae06ae2b5095dUL, 0xd58a58d73259a946UL, -- 0xe9834262d13921edUL, 0x27fedafaa54bb592UL, -- /* 230 */ 0xa99dc5b829ad48bbUL, 0x5f025742499ee260UL, -- 0x802c8ecd5d7513fdUL, 0x78ceb3ef3f6dd938UL, -- /* 231 */ 0xc342f44f8a135d94UL, 0x7b9edb44828cdda3UL, -- 0x9436d11a0537cfe7UL, 0x5064b164ec1ab4c8UL, -- /* 232 */ 0x7020eccfd37eb2fcUL, 0x1f31ea3ed90d25fcUL, -- 0x1b930d7bdfa1bb34UL, 0x5344467a48113044UL, -- /* 233 */ 0x70073170f25e6dfbUL, 0xe385dc1a50114cc8UL, -- 0x2348698ac8fc4f00UL, 0x2a77a55284dd40d8UL, -- /* 234 */ 0xfe06afe0c98c6ce4UL, 0xc235df96dddfd6e4UL, -- 0x1428d01e33bf1ed3UL, 0x785768ec9300bdafUL, -- /* 235 */ 0x9702e57a91deb63bUL, 0x61bdb8bfe5ce8b80UL, -- 0x645b426f3d1d58acUL, 0x4804a82227a557bcUL, -- /* 236 */ 0x8e57048ab44d2601UL, 0x68d6501a4b3a6935UL, -- 0xc39c9ec3f9e1c293UL, 0x4172f257d4de63e2UL, -- /* 237 */ 0xd368b450330c6401UL, 0x040d3017418f2391UL, -- 0x2c34bb6090b7d90dUL, 0x16f649228fdfd51fUL, -- /* 238 */ 0xbea6818e2b928ef5UL, 0xe28ccf91cdc11e72UL, -- 0x594aaa68e77a36cdUL, 0x313034806c7ffd0fUL, -- /* 239 */ 0x8a9d27ac2249bd65UL, 0x19a3b464018e9512UL, -- 0xc26ccff352b37ec7UL, 0x056f68341d797b21UL, -- /* 240 */ 0x5e79d6757efd2327UL, 0xfabdbcb6553afe15UL, -- 0xd3e7222c6eaf5a60UL, 0x7046c76d4dae743bUL, -- /* 241 */ 0x660be872b18d4a55UL, 0x19992518574e1496UL, -- 0xc103053a302bdcbbUL, 0x3ed8e9800b218e8eUL, -- /* 242 */ 0x7b0b9239fa75e03eUL, 0xefe9fb684633c083UL, -- 0x98a35fbe391a7793UL, 0x6065510fe2d0fe34UL, -- /* 243 */ 0x55cb668548abad0cUL, 0xb4584548da87e527UL, -- 0x2c43ecea0107c1ddUL, 0x526028809372de35UL, -- /* 244 */ 0x3415c56af9213b1fUL, 0x5bee1a4d017e98dbUL, -- 0x13f6b105b5cf709bUL, 0x5ff20e3482b29ab6UL, -- /* 245 */ 0x0aa29c75cc2e6c90UL, 0xfc7d73ca3a70e206UL, -- 0x899fc38fc4b5c515UL, 0x250386b124ffc207UL, -- /* 246 */ 0x54ea28d5ae3d2b56UL, 0x9913149dd6de60ceUL, -- 0x16694fc58f06d6c1UL, 0x46b23975eb018fc7UL, -- /* 247 */ 0x470a6a0fb4b7b4e2UL, 0x5d92475a8f7253deUL, -- 0xabeee5b52fbd3adbUL, 0x7fa20801a0806968UL, -- /* 248 */ 0x76f3faf19f7714d2UL, 0xb3e840c12f4660c3UL, -- 0x0fb4cd8df212744eUL, 0x4b065a251d3a2dd2UL, -- /* 249 */ 0x5cebde383d77cd4aUL, 0x6adf39df882c9cb1UL, -- 0xa2dd242eb09af759UL, 0x3147c0e50e5f6422UL, -- /* 250 */ 0x164ca5101d1350dbUL, 0xf8d13479c33fc962UL, -- 0xe640ce4d13e5da08UL, 0x4bdee0c45061f8baUL, -- /* 251 */ 0xd7c46dc1a4edb1c9UL, 0x5514d7b6437fd98aUL, -- 0x58942f6bb2a1c00bUL, 0x2dffb2ab1d70710eUL, -- /* 252 */ 0xccdfcf2fc18b6d68UL, 0xa8ebcba8b7806167UL, -- 0x980697f95e2937e3UL, 0x02fbba1cd0126e8cUL --}; -- --/* c is two 512-bit products: c0[0:7]=a0[0:3]*b0[0:3] and c1[8:15]=a1[4:7]*b1[4:7] -- * a is two 256-bit integers: a0[0:3] and a1[4:7] -- * b is two 256-bit integers: b0[0:3] and b1[4:7] -- */ --static void mul2_256x256_integer_adx(u64 *const c, const u64 *const a, -- const u64 *const b) --{ -- asm volatile( -- "xorl %%r14d, %%r14d ;" -- "movq (%1), %%rdx; " /* A[0] */ -- "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */ -- "xorl %%r10d, %%r10d ;" -- "movq %%r8, (%0) ;" -- "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */ -- "adox %%r10, %%r15 ;" -- "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */ -- "adox %%r8, %%rax ;" -- "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */ -- "adox %%r10, %%rbx ;" -- /******************************************/ -- "adox %%r14, %%rcx ;" -- -- "movq 8(%1), %%rdx; " /* A[1] */ -- "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */ -- "adox %%r15, %%r8 ;" -- "movq %%r8, 8(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */ -- "adox %%r10, %%r9 ;" -- "adcx %%r9, %%rax ;" -- "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */ -- "adox %%r8, %%r11 ;" -- "adcx %%r11, %%rbx ;" -- "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */ -- "adox %%r10, %%r13 ;" -- "adcx %%r13, %%rcx ;" -- /******************************************/ -- "adox %%r14, %%r15 ;" -- "adcx %%r14, %%r15 ;" -- -- "movq 16(%1), %%rdx; " /* A[2] */ -- "xorl %%r10d, %%r10d ;" -- "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */ -- "adox %%rax, %%r8 ;" -- "movq %%r8, 16(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */ -- "adox %%r10, %%r9 ;" -- "adcx %%r9, %%rbx ;" -- "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */ -- "adox %%r8, %%r11 ;" -- "adcx %%r11, %%rcx ;" -- "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */ -- "adox %%r10, %%r13 ;" -- "adcx %%r13, %%r15 ;" -- /******************************************/ -- "adox %%r14, %%rax ;" -- "adcx %%r14, %%rax ;" -- -- "movq 24(%1), %%rdx; " /* A[3] */ -- "xorl %%r10d, %%r10d ;" -- "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */ -- "adox %%rbx, %%r8 ;" -- "movq %%r8, 24(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */ -- "adox %%r10, %%r9 ;" -- "adcx %%r9, %%rcx ;" -- "movq %%rcx, 32(%0) ;" -- "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */ -- "adox %%r8, %%r11 ;" -- "adcx %%r11, %%r15 ;" -- "movq %%r15, 40(%0) ;" -- "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */ -- "adox %%r10, %%r13 ;" -- "adcx %%r13, %%rax ;" -- "movq %%rax, 48(%0) ;" -- /******************************************/ -- "adox %%r14, %%rbx ;" -- "adcx %%r14, %%rbx ;" -- "movq %%rbx, 56(%0) ;" -- -- "movq 32(%1), %%rdx; " /* C[0] */ -- "mulx 32(%2), %%r8, %%r15; " /* C[0]*D[0] */ -- "xorl %%r10d, %%r10d ;" -- "movq %%r8, 64(%0);" -- "mulx 40(%2), %%r10, %%rax; " /* C[0]*D[1] */ -- "adox %%r10, %%r15 ;" -- "mulx 48(%2), %%r8, %%rbx; " /* C[0]*D[2] */ -- "adox %%r8, %%rax ;" -- "mulx 56(%2), %%r10, %%rcx; " /* C[0]*D[3] */ -- "adox %%r10, %%rbx ;" -- /******************************************/ -- "adox %%r14, %%rcx ;" -- -- "movq 40(%1), %%rdx; " /* C[1] */ -- "xorl %%r10d, %%r10d ;" -- "mulx 32(%2), %%r8, %%r9; " /* C[1]*D[0] */ -- "adox %%r15, %%r8 ;" -- "movq %%r8, 72(%0);" -- "mulx 40(%2), %%r10, %%r11; " /* C[1]*D[1] */ -- "adox %%r10, %%r9 ;" -- "adcx %%r9, %%rax ;" -- "mulx 48(%2), %%r8, %%r13; " /* C[1]*D[2] */ -- "adox %%r8, %%r11 ;" -- "adcx %%r11, %%rbx ;" -- "mulx 56(%2), %%r10, %%r15; " /* C[1]*D[3] */ -- "adox %%r10, %%r13 ;" -- "adcx %%r13, %%rcx ;" -- /******************************************/ -- "adox %%r14, %%r15 ;" -- "adcx %%r14, %%r15 ;" -- -- "movq 48(%1), %%rdx; " /* C[2] */ -- "xorl %%r10d, %%r10d ;" -- "mulx 32(%2), %%r8, %%r9; " /* C[2]*D[0] */ -- "adox %%rax, %%r8 ;" -- "movq %%r8, 80(%0);" -- "mulx 40(%2), %%r10, %%r11; " /* C[2]*D[1] */ -- "adox %%r10, %%r9 ;" -- "adcx %%r9, %%rbx ;" -- "mulx 48(%2), %%r8, %%r13; " /* C[2]*D[2] */ -- "adox %%r8, %%r11 ;" -- "adcx %%r11, %%rcx ;" -- "mulx 56(%2), %%r10, %%rax; " /* C[2]*D[3] */ -- "adox %%r10, %%r13 ;" -- "adcx %%r13, %%r15 ;" -- /******************************************/ -- "adox %%r14, %%rax ;" -- "adcx %%r14, %%rax ;" -- -- "movq 56(%1), %%rdx; " /* C[3] */ -- "xorl %%r10d, %%r10d ;" -- "mulx 32(%2), %%r8, %%r9; " /* C[3]*D[0] */ -- "adox %%rbx, %%r8 ;" -- "movq %%r8, 88(%0);" -- "mulx 40(%2), %%r10, %%r11; " /* C[3]*D[1] */ -- "adox %%r10, %%r9 ;" -- "adcx %%r9, %%rcx ;" -- "movq %%rcx, 96(%0) ;" -- "mulx 48(%2), %%r8, %%r13; " /* C[3]*D[2] */ -- "adox %%r8, %%r11 ;" -- "adcx %%r11, %%r15 ;" -- "movq %%r15, 104(%0) ;" -- "mulx 56(%2), %%r10, %%rbx; " /* C[3]*D[3] */ -- "adox %%r10, %%r13 ;" -- "adcx %%r13, %%rax ;" -- "movq %%rax, 112(%0) ;" -- /******************************************/ -- "adox %%r14, %%rbx ;" -- "adcx %%r14, %%rbx ;" -- "movq %%rbx, 120(%0) ;" -- : -- : "r"(c), "r"(a), "r"(b) -- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -- "%r10", "%r11", "%r13", "%r14", "%r15"); --} -- --static void mul2_256x256_integer_bmi2(u64 *const c, const u64 *const a, -- const u64 *const b) -+static __always_inline u64 eq_mask(u64 a, u64 b) - { -- asm volatile( -- "movq (%1), %%rdx; " /* A[0] */ -- "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */ -- "movq %%r8, (%0) ;" -- "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */ -- "addq %%r10, %%r15 ;" -- "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */ -- "adcq %%r8, %%rax ;" -- "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */ -- "adcq %%r10, %%rbx ;" -- /******************************************/ -- "adcq $0, %%rcx ;" -- -- "movq 8(%1), %%rdx; " /* A[1] */ -- "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */ -- "addq %%r15, %%r8 ;" -- "movq %%r8, 8(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */ -- "adcq %%r10, %%r9 ;" -- "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */ -- "adcq %%r8, %%r11 ;" -- "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */ -- "adcq %%r10, %%r13 ;" -- /******************************************/ -- "adcq $0, %%r15 ;" -- -- "addq %%r9, %%rax ;" -- "adcq %%r11, %%rbx ;" -- "adcq %%r13, %%rcx ;" -- "adcq $0, %%r15 ;" -- -- "movq 16(%1), %%rdx; " /* A[2] */ -- "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */ -- "addq %%rax, %%r8 ;" -- "movq %%r8, 16(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */ -- "adcq %%r10, %%r9 ;" -- "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */ -- "adcq %%r8, %%r11 ;" -- "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */ -- "adcq %%r10, %%r13 ;" -- /******************************************/ -- "adcq $0, %%rax ;" -- -- "addq %%r9, %%rbx ;" -- "adcq %%r11, %%rcx ;" -- "adcq %%r13, %%r15 ;" -- "adcq $0, %%rax ;" -- -- "movq 24(%1), %%rdx; " /* A[3] */ -- "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */ -- "addq %%rbx, %%r8 ;" -- "movq %%r8, 24(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */ -- "adcq %%r10, %%r9 ;" -- "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */ -- "adcq %%r8, %%r11 ;" -- "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */ -- "adcq %%r10, %%r13 ;" -- /******************************************/ -- "adcq $0, %%rbx ;" -- -- "addq %%r9, %%rcx ;" -- "movq %%rcx, 32(%0) ;" -- "adcq %%r11, %%r15 ;" -- "movq %%r15, 40(%0) ;" -- "adcq %%r13, %%rax ;" -- "movq %%rax, 48(%0) ;" -- "adcq $0, %%rbx ;" -- "movq %%rbx, 56(%0) ;" -- -- "movq 32(%1), %%rdx; " /* C[0] */ -- "mulx 32(%2), %%r8, %%r15; " /* C[0]*D[0] */ -- "movq %%r8, 64(%0) ;" -- "mulx 40(%2), %%r10, %%rax; " /* C[0]*D[1] */ -- "addq %%r10, %%r15 ;" -- "mulx 48(%2), %%r8, %%rbx; " /* C[0]*D[2] */ -- "adcq %%r8, %%rax ;" -- "mulx 56(%2), %%r10, %%rcx; " /* C[0]*D[3] */ -- "adcq %%r10, %%rbx ;" -- /******************************************/ -- "adcq $0, %%rcx ;" -- -- "movq 40(%1), %%rdx; " /* C[1] */ -- "mulx 32(%2), %%r8, %%r9; " /* C[1]*D[0] */ -- "addq %%r15, %%r8 ;" -- "movq %%r8, 72(%0) ;" -- "mulx 40(%2), %%r10, %%r11; " /* C[1]*D[1] */ -- "adcq %%r10, %%r9 ;" -- "mulx 48(%2), %%r8, %%r13; " /* C[1]*D[2] */ -- "adcq %%r8, %%r11 ;" -- "mulx 56(%2), %%r10, %%r15; " /* C[1]*D[3] */ -- "adcq %%r10, %%r13 ;" -- /******************************************/ -- "adcq $0, %%r15 ;" -- -- "addq %%r9, %%rax ;" -- "adcq %%r11, %%rbx ;" -- "adcq %%r13, %%rcx ;" -- "adcq $0, %%r15 ;" -- -- "movq 48(%1), %%rdx; " /* C[2] */ -- "mulx 32(%2), %%r8, %%r9; " /* C[2]*D[0] */ -- "addq %%rax, %%r8 ;" -- "movq %%r8, 80(%0) ;" -- "mulx 40(%2), %%r10, %%r11; " /* C[2]*D[1] */ -- "adcq %%r10, %%r9 ;" -- "mulx 48(%2), %%r8, %%r13; " /* C[2]*D[2] */ -- "adcq %%r8, %%r11 ;" -- "mulx 56(%2), %%r10, %%rax; " /* C[2]*D[3] */ -- "adcq %%r10, %%r13 ;" -- /******************************************/ -- "adcq $0, %%rax ;" -- -- "addq %%r9, %%rbx ;" -- "adcq %%r11, %%rcx ;" -- "adcq %%r13, %%r15 ;" -- "adcq $0, %%rax ;" -- -- "movq 56(%1), %%rdx; " /* C[3] */ -- "mulx 32(%2), %%r8, %%r9; " /* C[3]*D[0] */ -- "addq %%rbx, %%r8 ;" -- "movq %%r8, 88(%0) ;" -- "mulx 40(%2), %%r10, %%r11; " /* C[3]*D[1] */ -- "adcq %%r10, %%r9 ;" -- "mulx 48(%2), %%r8, %%r13; " /* C[3]*D[2] */ -- "adcq %%r8, %%r11 ;" -- "mulx 56(%2), %%r10, %%rbx; " /* C[3]*D[3] */ -- "adcq %%r10, %%r13 ;" -- /******************************************/ -- "adcq $0, %%rbx ;" -- -- "addq %%r9, %%rcx ;" -- "movq %%rcx, 96(%0) ;" -- "adcq %%r11, %%r15 ;" -- "movq %%r15, 104(%0) ;" -- "adcq %%r13, %%rax ;" -- "movq %%rax, 112(%0) ;" -- "adcq $0, %%rbx ;" -- "movq %%rbx, 120(%0) ;" -- : -- : "r"(c), "r"(a), "r"(b) -- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -- "%r10", "%r11", "%r13", "%r15"); -+ u64 x = a ^ b; -+ u64 minus_x = ~x + (u64)1U; -+ u64 x_or_minus_x = x | minus_x; -+ u64 xnx = x_or_minus_x >> (u32)63U; -+ return xnx - (u64)1U; - } - --static void sqr2_256x256_integer_adx(u64 *const c, const u64 *const a) -+static __always_inline u64 gte_mask(u64 a, u64 b) - { -- asm volatile( -- "movq (%1), %%rdx ;" /* A[0] */ -- "mulx 8(%1), %%r8, %%r14 ;" /* A[1]*A[0] */ -- "xorl %%r15d, %%r15d;" -- "mulx 16(%1), %%r9, %%r10 ;" /* A[2]*A[0] */ -- "adcx %%r14, %%r9 ;" -- "mulx 24(%1), %%rax, %%rcx ;" /* A[3]*A[0] */ -- "adcx %%rax, %%r10 ;" -- "movq 24(%1), %%rdx ;" /* A[3] */ -- "mulx 8(%1), %%r11, %%rbx ;" /* A[1]*A[3] */ -- "adcx %%rcx, %%r11 ;" -- "mulx 16(%1), %%rax, %%r13 ;" /* A[2]*A[3] */ -- "adcx %%rax, %%rbx ;" -- "movq 8(%1), %%rdx ;" /* A[1] */ -- "adcx %%r15, %%r13 ;" -- "mulx 16(%1), %%rax, %%rcx ;" /* A[2]*A[1] */ -- "movq $0, %%r14 ;" -- /******************************************/ -- "adcx %%r15, %%r14 ;" -- -- "xorl %%r15d, %%r15d;" -- "adox %%rax, %%r10 ;" -- "adcx %%r8, %%r8 ;" -- "adox %%rcx, %%r11 ;" -- "adcx %%r9, %%r9 ;" -- "adox %%r15, %%rbx ;" -- "adcx %%r10, %%r10 ;" -- "adox %%r15, %%r13 ;" -- "adcx %%r11, %%r11 ;" -- "adox %%r15, %%r14 ;" -- "adcx %%rbx, %%rbx ;" -- "adcx %%r13, %%r13 ;" -- "adcx %%r14, %%r14 ;" -- -- "movq (%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */ -- /*******************/ -- "movq %%rax, 0(%0) ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, 8(%0) ;" -- "movq 8(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */ -- "adcq %%rax, %%r9 ;" -- "movq %%r9, 16(%0) ;" -- "adcq %%rcx, %%r10 ;" -- "movq %%r10, 24(%0) ;" -- "movq 16(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */ -- "adcq %%rax, %%r11 ;" -- "movq %%r11, 32(%0) ;" -- "adcq %%rcx, %%rbx ;" -- "movq %%rbx, 40(%0) ;" -- "movq 24(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */ -- "adcq %%rax, %%r13 ;" -- "movq %%r13, 48(%0) ;" -- "adcq %%rcx, %%r14 ;" -- "movq %%r14, 56(%0) ;" -- -- -- "movq 32(%1), %%rdx ;" /* B[0] */ -- "mulx 40(%1), %%r8, %%r14 ;" /* B[1]*B[0] */ -- "xorl %%r15d, %%r15d;" -- "mulx 48(%1), %%r9, %%r10 ;" /* B[2]*B[0] */ -- "adcx %%r14, %%r9 ;" -- "mulx 56(%1), %%rax, %%rcx ;" /* B[3]*B[0] */ -- "adcx %%rax, %%r10 ;" -- "movq 56(%1), %%rdx ;" /* B[3] */ -- "mulx 40(%1), %%r11, %%rbx ;" /* B[1]*B[3] */ -- "adcx %%rcx, %%r11 ;" -- "mulx 48(%1), %%rax, %%r13 ;" /* B[2]*B[3] */ -- "adcx %%rax, %%rbx ;" -- "movq 40(%1), %%rdx ;" /* B[1] */ -- "adcx %%r15, %%r13 ;" -- "mulx 48(%1), %%rax, %%rcx ;" /* B[2]*B[1] */ -- "movq $0, %%r14 ;" -- /******************************************/ -- "adcx %%r15, %%r14 ;" -- -- "xorl %%r15d, %%r15d;" -- "adox %%rax, %%r10 ;" -- "adcx %%r8, %%r8 ;" -- "adox %%rcx, %%r11 ;" -- "adcx %%r9, %%r9 ;" -- "adox %%r15, %%rbx ;" -- "adcx %%r10, %%r10 ;" -- "adox %%r15, %%r13 ;" -- "adcx %%r11, %%r11 ;" -- "adox %%r15, %%r14 ;" -- "adcx %%rbx, %%rbx ;" -- "adcx %%r13, %%r13 ;" -- "adcx %%r14, %%r14 ;" -- -- "movq 32(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* B[0]^2 */ -- /*******************/ -- "movq %%rax, 64(%0) ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, 72(%0) ;" -- "movq 40(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* B[1]^2 */ -- "adcq %%rax, %%r9 ;" -- "movq %%r9, 80(%0) ;" -- "adcq %%rcx, %%r10 ;" -- "movq %%r10, 88(%0) ;" -- "movq 48(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* B[2]^2 */ -- "adcq %%rax, %%r11 ;" -- "movq %%r11, 96(%0) ;" -- "adcq %%rcx, %%rbx ;" -- "movq %%rbx, 104(%0) ;" -- "movq 56(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* B[3]^2 */ -- "adcq %%rax, %%r13 ;" -- "movq %%r13, 112(%0) ;" -- "adcq %%rcx, %%r14 ;" -- "movq %%r14, 120(%0) ;" -- : -- : "r"(c), "r"(a) -- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -- "%r10", "%r11", "%r13", "%r14", "%r15"); -+ u64 x = a; -+ u64 y = b; -+ u64 x_xor_y = x ^ y; -+ u64 x_sub_y = x - y; -+ u64 x_sub_y_xor_y = x_sub_y ^ y; -+ u64 q = x_xor_y | x_sub_y_xor_y; -+ u64 x_xor_q = x ^ q; -+ u64 x_xor_q_ = x_xor_q >> (u32)63U; -+ return x_xor_q_ - (u64)1U; - } - --static void sqr2_256x256_integer_bmi2(u64 *const c, const u64 *const a) -+/* Computes the addition of four-element f1 with value in f2 -+ * and returns the carry (if any) */ -+static inline u64 add_scalar(u64 *out, const u64 *f1, u64 f2) - { -- asm volatile( -- "movq 8(%1), %%rdx ;" /* A[1] */ -- "mulx (%1), %%r8, %%r9 ;" /* A[0]*A[1] */ -- "mulx 16(%1), %%r10, %%r11 ;" /* A[2]*A[1] */ -- "mulx 24(%1), %%rcx, %%r14 ;" /* A[3]*A[1] */ -- -- "movq 16(%1), %%rdx ;" /* A[2] */ -- "mulx 24(%1), %%r15, %%r13 ;" /* A[3]*A[2] */ -- "mulx (%1), %%rax, %%rdx ;" /* A[0]*A[2] */ -- -- "addq %%rax, %%r9 ;" -- "adcq %%rdx, %%r10 ;" -- "adcq %%rcx, %%r11 ;" -- "adcq %%r14, %%r15 ;" -- "adcq $0, %%r13 ;" -- "movq $0, %%r14 ;" -- "adcq $0, %%r14 ;" -- -- "movq (%1), %%rdx ;" /* A[0] */ -- "mulx 24(%1), %%rax, %%rcx ;" /* A[0]*A[3] */ -- -- "addq %%rax, %%r10 ;" -- "adcq %%rcx, %%r11 ;" -- "adcq $0, %%r15 ;" -- "adcq $0, %%r13 ;" -- "adcq $0, %%r14 ;" -- -- "shldq $1, %%r13, %%r14 ;" -- "shldq $1, %%r15, %%r13 ;" -- "shldq $1, %%r11, %%r15 ;" -- "shldq $1, %%r10, %%r11 ;" -- "shldq $1, %%r9, %%r10 ;" -- "shldq $1, %%r8, %%r9 ;" -- "shlq $1, %%r8 ;" -- -- /*******************/ -- "mulx %%rdx, %%rax, %%rcx ; " /* A[0]^2 */ -- /*******************/ -- "movq %%rax, 0(%0) ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, 8(%0) ;" -- "movq 8(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ; " /* A[1]^2 */ -- "adcq %%rax, %%r9 ;" -- "movq %%r9, 16(%0) ;" -- "adcq %%rcx, %%r10 ;" -- "movq %%r10, 24(%0) ;" -- "movq 16(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ; " /* A[2]^2 */ -- "adcq %%rax, %%r11 ;" -- "movq %%r11, 32(%0) ;" -- "adcq %%rcx, %%r15 ;" -- "movq %%r15, 40(%0) ;" -- "movq 24(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ; " /* A[3]^2 */ -- "adcq %%rax, %%r13 ;" -- "movq %%r13, 48(%0) ;" -- "adcq %%rcx, %%r14 ;" -- "movq %%r14, 56(%0) ;" -- -- "movq 40(%1), %%rdx ;" /* B[1] */ -- "mulx 32(%1), %%r8, %%r9 ;" /* B[0]*B[1] */ -- "mulx 48(%1), %%r10, %%r11 ;" /* B[2]*B[1] */ -- "mulx 56(%1), %%rcx, %%r14 ;" /* B[3]*B[1] */ -- -- "movq 48(%1), %%rdx ;" /* B[2] */ -- "mulx 56(%1), %%r15, %%r13 ;" /* B[3]*B[2] */ -- "mulx 32(%1), %%rax, %%rdx ;" /* B[0]*B[2] */ -- -- "addq %%rax, %%r9 ;" -- "adcq %%rdx, %%r10 ;" -- "adcq %%rcx, %%r11 ;" -- "adcq %%r14, %%r15 ;" -- "adcq $0, %%r13 ;" -- "movq $0, %%r14 ;" -- "adcq $0, %%r14 ;" -- -- "movq 32(%1), %%rdx ;" /* B[0] */ -- "mulx 56(%1), %%rax, %%rcx ;" /* B[0]*B[3] */ -- -- "addq %%rax, %%r10 ;" -- "adcq %%rcx, %%r11 ;" -- "adcq $0, %%r15 ;" -- "adcq $0, %%r13 ;" -- "adcq $0, %%r14 ;" -- -- "shldq $1, %%r13, %%r14 ;" -- "shldq $1, %%r15, %%r13 ;" -- "shldq $1, %%r11, %%r15 ;" -- "shldq $1, %%r10, %%r11 ;" -- "shldq $1, %%r9, %%r10 ;" -- "shldq $1, %%r8, %%r9 ;" -- "shlq $1, %%r8 ;" -- -- /*******************/ -- "mulx %%rdx, %%rax, %%rcx ; " /* B[0]^2 */ -- /*******************/ -- "movq %%rax, 64(%0) ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, 72(%0) ;" -- "movq 40(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ; " /* B[1]^2 */ -- "adcq %%rax, %%r9 ;" -- "movq %%r9, 80(%0) ;" -- "adcq %%rcx, %%r10 ;" -- "movq %%r10, 88(%0) ;" -- "movq 48(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ; " /* B[2]^2 */ -- "adcq %%rax, %%r11 ;" -- "movq %%r11, 96(%0) ;" -- "adcq %%rcx, %%r15 ;" -- "movq %%r15, 104(%0) ;" -- "movq 56(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ; " /* B[3]^2 */ -- "adcq %%rax, %%r13 ;" -- "movq %%r13, 112(%0) ;" -- "adcq %%rcx, %%r14 ;" -- "movq %%r14, 120(%0) ;" -- : -- : "r"(c), "r"(a) -- : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", -- "%r11", "%r13", "%r14", "%r15"); --} -+ u64 carry_r; - --static void red_eltfp25519_2w_adx(u64 *const c, const u64 *const a) --{ - asm volatile( -- "movl $38, %%edx; " /* 2*c = 38 = 2^256 */ -- "mulx 32(%1), %%r8, %%r10; " /* c*C[4] */ -- "xorl %%ebx, %%ebx ;" -- "adox (%1), %%r8 ;" -- "mulx 40(%1), %%r9, %%r11; " /* c*C[5] */ -- "adcx %%r10, %%r9 ;" -- "adox 8(%1), %%r9 ;" -- "mulx 48(%1), %%r10, %%rax; " /* c*C[6] */ -- "adcx %%r11, %%r10 ;" -- "adox 16(%1), %%r10 ;" -- "mulx 56(%1), %%r11, %%rcx; " /* c*C[7] */ -- "adcx %%rax, %%r11 ;" -- "adox 24(%1), %%r11 ;" -- /***************************************/ -- "adcx %%rbx, %%rcx ;" -- "adox %%rbx, %%rcx ;" -- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */ -- "adcx %%rcx, %%r8 ;" -- "adcx %%rbx, %%r9 ;" -- "movq %%r9, 8(%0) ;" -- "adcx %%rbx, %%r10 ;" -- "movq %%r10, 16(%0) ;" -- "adcx %%rbx, %%r11 ;" -- "movq %%r11, 24(%0) ;" -- "mov $0, %%ecx ;" -- "cmovc %%edx, %%ecx ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, (%0) ;" -- -- "mulx 96(%1), %%r8, %%r10; " /* c*C[4] */ -- "xorl %%ebx, %%ebx ;" -- "adox 64(%1), %%r8 ;" -- "mulx 104(%1), %%r9, %%r11; " /* c*C[5] */ -- "adcx %%r10, %%r9 ;" -- "adox 72(%1), %%r9 ;" -- "mulx 112(%1), %%r10, %%rax; " /* c*C[6] */ -- "adcx %%r11, %%r10 ;" -- "adox 80(%1), %%r10 ;" -- "mulx 120(%1), %%r11, %%rcx; " /* c*C[7] */ -- "adcx %%rax, %%r11 ;" -- "adox 88(%1), %%r11 ;" -- /****************************************/ -- "adcx %%rbx, %%rcx ;" -- "adox %%rbx, %%rcx ;" -- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */ -- "adcx %%rcx, %%r8 ;" -- "adcx %%rbx, %%r9 ;" -- "movq %%r9, 40(%0) ;" -- "adcx %%rbx, %%r10 ;" -- "movq %%r10, 48(%0) ;" -- "adcx %%rbx, %%r11 ;" -- "movq %%r11, 56(%0) ;" -- "mov $0, %%ecx ;" -- "cmovc %%edx, %%ecx ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, 32(%0) ;" -- : -- : "r"(c), "r"(a) -- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -- "%r10", "%r11"); --} -+ /* Clear registers to propagate the carry bit */ -+ " xor %%r8, %%r8;" -+ " xor %%r9, %%r9;" -+ " xor %%r10, %%r10;" -+ " xor %%r11, %%r11;" -+ " xor %1, %1;" -+ -+ /* Begin addition chain */ -+ " addq 0(%3), %0;" -+ " movq %0, 0(%2);" -+ " adcxq 8(%3), %%r8;" -+ " movq %%r8, 8(%2);" -+ " adcxq 16(%3), %%r9;" -+ " movq %%r9, 16(%2);" -+ " adcxq 24(%3), %%r10;" -+ " movq %%r10, 24(%2);" -+ -+ /* Return the carry bit in a register */ -+ " adcx %%r11, %1;" -+ : "+&r" (f2), "=&r" (carry_r) -+ : "r" (out), "r" (f1) -+ : "%r8", "%r9", "%r10", "%r11", "memory", "cc" -+ ); - --static void red_eltfp25519_2w_bmi2(u64 *const c, const u64 *const a) --{ -- asm volatile( -- "movl $38, %%edx ; " /* 2*c = 38 = 2^256 */ -- "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */ -- "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */ -- "addq %%r10, %%r9 ;" -- "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */ -- "adcq %%r11, %%r10 ;" -- "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */ -- "adcq %%rax, %%r11 ;" -- /***************************************/ -- "adcq $0, %%rcx ;" -- "addq (%1), %%r8 ;" -- "adcq 8(%1), %%r9 ;" -- "adcq 16(%1), %%r10 ;" -- "adcq 24(%1), %%r11 ;" -- "adcq $0, %%rcx ;" -- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */ -- "addq %%rcx, %%r8 ;" -- "adcq $0, %%r9 ;" -- "movq %%r9, 8(%0) ;" -- "adcq $0, %%r10 ;" -- "movq %%r10, 16(%0) ;" -- "adcq $0, %%r11 ;" -- "movq %%r11, 24(%0) ;" -- "mov $0, %%ecx ;" -- "cmovc %%edx, %%ecx ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, (%0) ;" -- -- "mulx 96(%1), %%r8, %%r10 ;" /* c*C[4] */ -- "mulx 104(%1), %%r9, %%r11 ;" /* c*C[5] */ -- "addq %%r10, %%r9 ;" -- "mulx 112(%1), %%r10, %%rax ;" /* c*C[6] */ -- "adcq %%r11, %%r10 ;" -- "mulx 120(%1), %%r11, %%rcx ;" /* c*C[7] */ -- "adcq %%rax, %%r11 ;" -- /****************************************/ -- "adcq $0, %%rcx ;" -- "addq 64(%1), %%r8 ;" -- "adcq 72(%1), %%r9 ;" -- "adcq 80(%1), %%r10 ;" -- "adcq 88(%1), %%r11 ;" -- "adcq $0, %%rcx ;" -- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */ -- "addq %%rcx, %%r8 ;" -- "adcq $0, %%r9 ;" -- "movq %%r9, 40(%0) ;" -- "adcq $0, %%r10 ;" -- "movq %%r10, 48(%0) ;" -- "adcq $0, %%r11 ;" -- "movq %%r11, 56(%0) ;" -- "mov $0, %%ecx ;" -- "cmovc %%edx, %%ecx ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, 32(%0) ;" -- : -- : "r"(c), "r"(a) -- : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", -- "%r11"); -+ return carry_r; - } - --static void mul_256x256_integer_adx(u64 *const c, const u64 *const a, -- const u64 *const b) -+/* Computes the field addition of two field elements */ -+static inline void fadd(u64 *out, const u64 *f1, const u64 *f2) - { - asm volatile( -- "movq (%1), %%rdx; " /* A[0] */ -- "mulx (%2), %%r8, %%r9; " /* A[0]*B[0] */ -- "xorl %%r10d, %%r10d ;" -- "movq %%r8, (%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[0]*B[1] */ -- "adox %%r9, %%r10 ;" -- "movq %%r10, 8(%0) ;" -- "mulx 16(%2), %%r15, %%r13; " /* A[0]*B[2] */ -- "adox %%r11, %%r15 ;" -- "mulx 24(%2), %%r14, %%rdx; " /* A[0]*B[3] */ -- "adox %%r13, %%r14 ;" -- "movq $0, %%rax ;" -- /******************************************/ -- "adox %%rdx, %%rax ;" -- -- "movq 8(%1), %%rdx; " /* A[1] */ -- "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */ -- "xorl %%r10d, %%r10d ;" -- "adcx 8(%0), %%r8 ;" -- "movq %%r8, 8(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */ -- "adox %%r9, %%r10 ;" -- "adcx %%r15, %%r10 ;" -- "movq %%r10, 16(%0) ;" -- "mulx 16(%2), %%r15, %%r13; " /* A[1]*B[2] */ -- "adox %%r11, %%r15 ;" -- "adcx %%r14, %%r15 ;" -- "movq $0, %%r8 ;" -- "mulx 24(%2), %%r14, %%rdx; " /* A[1]*B[3] */ -- "adox %%r13, %%r14 ;" -- "adcx %%rax, %%r14 ;" -- "movq $0, %%rax ;" -- /******************************************/ -- "adox %%rdx, %%rax ;" -- "adcx %%r8, %%rax ;" -- -- "movq 16(%1), %%rdx; " /* A[2] */ -- "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */ -- "xorl %%r10d, %%r10d ;" -- "adcx 16(%0), %%r8 ;" -- "movq %%r8, 16(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */ -- "adox %%r9, %%r10 ;" -- "adcx %%r15, %%r10 ;" -- "movq %%r10, 24(%0) ;" -- "mulx 16(%2), %%r15, %%r13; " /* A[2]*B[2] */ -- "adox %%r11, %%r15 ;" -- "adcx %%r14, %%r15 ;" -- "movq $0, %%r8 ;" -- "mulx 24(%2), %%r14, %%rdx; " /* A[2]*B[3] */ -- "adox %%r13, %%r14 ;" -- "adcx %%rax, %%r14 ;" -- "movq $0, %%rax ;" -- /******************************************/ -- "adox %%rdx, %%rax ;" -- "adcx %%r8, %%rax ;" -- -- "movq 24(%1), %%rdx; " /* A[3] */ -- "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */ -- "xorl %%r10d, %%r10d ;" -- "adcx 24(%0), %%r8 ;" -- "movq %%r8, 24(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */ -- "adox %%r9, %%r10 ;" -- "adcx %%r15, %%r10 ;" -- "movq %%r10, 32(%0) ;" -- "mulx 16(%2), %%r15, %%r13; " /* A[3]*B[2] */ -- "adox %%r11, %%r15 ;" -- "adcx %%r14, %%r15 ;" -- "movq %%r15, 40(%0) ;" -- "movq $0, %%r8 ;" -- "mulx 24(%2), %%r14, %%rdx; " /* A[3]*B[3] */ -- "adox %%r13, %%r14 ;" -- "adcx %%rax, %%r14 ;" -- "movq %%r14, 48(%0) ;" -- "movq $0, %%rax ;" -- /******************************************/ -- "adox %%rdx, %%rax ;" -- "adcx %%r8, %%rax ;" -- "movq %%rax, 56(%0) ;" -- : -- : "r"(c), "r"(a), "r"(b) -- : "memory", "cc", "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", -- "%r13", "%r14", "%r15"); -+ /* Compute the raw addition of f1 + f2 */ -+ " movq 0(%0), %%r8;" -+ " addq 0(%2), %%r8;" -+ " movq 8(%0), %%r9;" -+ " adcxq 8(%2), %%r9;" -+ " movq 16(%0), %%r10;" -+ " adcxq 16(%2), %%r10;" -+ " movq 24(%0), %%r11;" -+ " adcxq 24(%2), %%r11;" -+ -+ /* Wrap the result back into the field */ -+ -+ /* Step 1: Compute carry*38 */ -+ " mov $0, %%rax;" -+ " mov $38, %0;" -+ " cmovc %0, %%rax;" -+ -+ /* Step 2: Add carry*38 to the original sum */ -+ " xor %%rcx, %%rcx;" -+ " add %%rax, %%r8;" -+ " adcx %%rcx, %%r9;" -+ " movq %%r9, 8(%1);" -+ " adcx %%rcx, %%r10;" -+ " movq %%r10, 16(%1);" -+ " adcx %%rcx, %%r11;" -+ " movq %%r11, 24(%1);" -+ -+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ -+ " mov $0, %%rax;" -+ " cmovc %0, %%rax;" -+ " add %%rax, %%r8;" -+ " movq %%r8, 0(%1);" -+ : "+&r" (f2) -+ : "r" (out), "r" (f1) -+ : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc" -+ ); - } - --static void mul_256x256_integer_bmi2(u64 *const c, const u64 *const a, -- const u64 *const b) -+/* Computes the field substraction of two field elements */ -+static inline void fsub(u64 *out, const u64 *f1, const u64 *f2) - { - asm volatile( -- "movq (%1), %%rdx; " /* A[0] */ -- "mulx (%2), %%r8, %%r15; " /* A[0]*B[0] */ -- "movq %%r8, (%0) ;" -- "mulx 8(%2), %%r10, %%rax; " /* A[0]*B[1] */ -- "addq %%r10, %%r15 ;" -- "mulx 16(%2), %%r8, %%rbx; " /* A[0]*B[2] */ -- "adcq %%r8, %%rax ;" -- "mulx 24(%2), %%r10, %%rcx; " /* A[0]*B[3] */ -- "adcq %%r10, %%rbx ;" -- /******************************************/ -- "adcq $0, %%rcx ;" -- -- "movq 8(%1), %%rdx; " /* A[1] */ -- "mulx (%2), %%r8, %%r9; " /* A[1]*B[0] */ -- "addq %%r15, %%r8 ;" -- "movq %%r8, 8(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[1]*B[1] */ -- "adcq %%r10, %%r9 ;" -- "mulx 16(%2), %%r8, %%r13; " /* A[1]*B[2] */ -- "adcq %%r8, %%r11 ;" -- "mulx 24(%2), %%r10, %%r15; " /* A[1]*B[3] */ -- "adcq %%r10, %%r13 ;" -- /******************************************/ -- "adcq $0, %%r15 ;" -- -- "addq %%r9, %%rax ;" -- "adcq %%r11, %%rbx ;" -- "adcq %%r13, %%rcx ;" -- "adcq $0, %%r15 ;" -- -- "movq 16(%1), %%rdx; " /* A[2] */ -- "mulx (%2), %%r8, %%r9; " /* A[2]*B[0] */ -- "addq %%rax, %%r8 ;" -- "movq %%r8, 16(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[2]*B[1] */ -- "adcq %%r10, %%r9 ;" -- "mulx 16(%2), %%r8, %%r13; " /* A[2]*B[2] */ -- "adcq %%r8, %%r11 ;" -- "mulx 24(%2), %%r10, %%rax; " /* A[2]*B[3] */ -- "adcq %%r10, %%r13 ;" -- /******************************************/ -- "adcq $0, %%rax ;" -- -- "addq %%r9, %%rbx ;" -- "adcq %%r11, %%rcx ;" -- "adcq %%r13, %%r15 ;" -- "adcq $0, %%rax ;" -- -- "movq 24(%1), %%rdx; " /* A[3] */ -- "mulx (%2), %%r8, %%r9; " /* A[3]*B[0] */ -- "addq %%rbx, %%r8 ;" -- "movq %%r8, 24(%0) ;" -- "mulx 8(%2), %%r10, %%r11; " /* A[3]*B[1] */ -- "adcq %%r10, %%r9 ;" -- "mulx 16(%2), %%r8, %%r13; " /* A[3]*B[2] */ -- "adcq %%r8, %%r11 ;" -- "mulx 24(%2), %%r10, %%rbx; " /* A[3]*B[3] */ -- "adcq %%r10, %%r13 ;" -- /******************************************/ -- "adcq $0, %%rbx ;" -- -- "addq %%r9, %%rcx ;" -- "movq %%rcx, 32(%0) ;" -- "adcq %%r11, %%r15 ;" -- "movq %%r15, 40(%0) ;" -- "adcq %%r13, %%rax ;" -- "movq %%rax, 48(%0) ;" -- "adcq $0, %%rbx ;" -- "movq %%rbx, 56(%0) ;" -- : -- : "r"(c), "r"(a), "r"(b) -- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -- "%r10", "%r11", "%r13", "%r15"); -+ /* Compute the raw substraction of f1-f2 */ -+ " movq 0(%1), %%r8;" -+ " subq 0(%2), %%r8;" -+ " movq 8(%1), %%r9;" -+ " sbbq 8(%2), %%r9;" -+ " movq 16(%1), %%r10;" -+ " sbbq 16(%2), %%r10;" -+ " movq 24(%1), %%r11;" -+ " sbbq 24(%2), %%r11;" -+ -+ /* Wrap the result back into the field */ -+ -+ /* Step 1: Compute carry*38 */ -+ " mov $0, %%rax;" -+ " mov $38, %%rcx;" -+ " cmovc %%rcx, %%rax;" -+ -+ /* Step 2: Substract carry*38 from the original difference */ -+ " sub %%rax, %%r8;" -+ " sbb $0, %%r9;" -+ " sbb $0, %%r10;" -+ " sbb $0, %%r11;" -+ -+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ -+ " mov $0, %%rax;" -+ " cmovc %%rcx, %%rax;" -+ " sub %%rax, %%r8;" -+ -+ /* Store the result */ -+ " movq %%r8, 0(%0);" -+ " movq %%r9, 8(%0);" -+ " movq %%r10, 16(%0);" -+ " movq %%r11, 24(%0);" -+ : -+ : "r" (out), "r" (f1), "r" (f2) -+ : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "memory", "cc" -+ ); - } - --static void sqr_256x256_integer_adx(u64 *const c, const u64 *const a) -+/* Computes a field multiplication: out <- f1 * f2 -+ * Uses the 8-element buffer tmp for intermediate results */ -+static inline void fmul(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) - { - asm volatile( -- "movq (%1), %%rdx ;" /* A[0] */ -- "mulx 8(%1), %%r8, %%r14 ;" /* A[1]*A[0] */ -- "xorl %%r15d, %%r15d;" -- "mulx 16(%1), %%r9, %%r10 ;" /* A[2]*A[0] */ -- "adcx %%r14, %%r9 ;" -- "mulx 24(%1), %%rax, %%rcx ;" /* A[3]*A[0] */ -- "adcx %%rax, %%r10 ;" -- "movq 24(%1), %%rdx ;" /* A[3] */ -- "mulx 8(%1), %%r11, %%rbx ;" /* A[1]*A[3] */ -- "adcx %%rcx, %%r11 ;" -- "mulx 16(%1), %%rax, %%r13 ;" /* A[2]*A[3] */ -- "adcx %%rax, %%rbx ;" -- "movq 8(%1), %%rdx ;" /* A[1] */ -- "adcx %%r15, %%r13 ;" -- "mulx 16(%1), %%rax, %%rcx ;" /* A[2]*A[1] */ -- "movq $0, %%r14 ;" -- /******************************************/ -- "adcx %%r15, %%r14 ;" -- -- "xorl %%r15d, %%r15d;" -- "adox %%rax, %%r10 ;" -- "adcx %%r8, %%r8 ;" -- "adox %%rcx, %%r11 ;" -- "adcx %%r9, %%r9 ;" -- "adox %%r15, %%rbx ;" -- "adcx %%r10, %%r10 ;" -- "adox %%r15, %%r13 ;" -- "adcx %%r11, %%r11 ;" -- "adox %%r15, %%r14 ;" -- "adcx %%rbx, %%rbx ;" -- "adcx %%r13, %%r13 ;" -- "adcx %%r14, %%r14 ;" -- -- "movq (%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */ -- /*******************/ -- "movq %%rax, 0(%0) ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, 8(%0) ;" -- "movq 8(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */ -- "adcq %%rax, %%r9 ;" -- "movq %%r9, 16(%0) ;" -- "adcq %%rcx, %%r10 ;" -- "movq %%r10, 24(%0) ;" -- "movq 16(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */ -- "adcq %%rax, %%r11 ;" -- "movq %%r11, 32(%0) ;" -- "adcq %%rcx, %%rbx ;" -- "movq %%rbx, 40(%0) ;" -- "movq 24(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */ -- "adcq %%rax, %%r13 ;" -- "movq %%r13, 48(%0) ;" -- "adcq %%rcx, %%r14 ;" -- "movq %%r14, 56(%0) ;" -- : -- : "r"(c), "r"(a) -- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -- "%r10", "%r11", "%r13", "%r14", "%r15"); --} -+ /* Compute the raw multiplication: tmp <- src1 * src2 */ - --static void sqr_256x256_integer_bmi2(u64 *const c, const u64 *const a) --{ -- asm volatile( -- "movq 8(%1), %%rdx ;" /* A[1] */ -- "mulx (%1), %%r8, %%r9 ;" /* A[0]*A[1] */ -- "mulx 16(%1), %%r10, %%r11 ;" /* A[2]*A[1] */ -- "mulx 24(%1), %%rcx, %%r14 ;" /* A[3]*A[1] */ -- -- "movq 16(%1), %%rdx ;" /* A[2] */ -- "mulx 24(%1), %%r15, %%r13 ;" /* A[3]*A[2] */ -- "mulx (%1), %%rax, %%rdx ;" /* A[0]*A[2] */ -- -- "addq %%rax, %%r9 ;" -- "adcq %%rdx, %%r10 ;" -- "adcq %%rcx, %%r11 ;" -- "adcq %%r14, %%r15 ;" -- "adcq $0, %%r13 ;" -- "movq $0, %%r14 ;" -- "adcq $0, %%r14 ;" -- -- "movq (%1), %%rdx ;" /* A[0] */ -- "mulx 24(%1), %%rax, %%rcx ;" /* A[0]*A[3] */ -- -- "addq %%rax, %%r10 ;" -- "adcq %%rcx, %%r11 ;" -- "adcq $0, %%r15 ;" -- "adcq $0, %%r13 ;" -- "adcq $0, %%r14 ;" -- -- "shldq $1, %%r13, %%r14 ;" -- "shldq $1, %%r15, %%r13 ;" -- "shldq $1, %%r11, %%r15 ;" -- "shldq $1, %%r10, %%r11 ;" -- "shldq $1, %%r9, %%r10 ;" -- "shldq $1, %%r8, %%r9 ;" -- "shlq $1, %%r8 ;" -- -- /*******************/ -- "mulx %%rdx, %%rax, %%rcx ;" /* A[0]^2 */ -- /*******************/ -- "movq %%rax, 0(%0) ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, 8(%0) ;" -- "movq 8(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[1]^2 */ -- "adcq %%rax, %%r9 ;" -- "movq %%r9, 16(%0) ;" -- "adcq %%rcx, %%r10 ;" -- "movq %%r10, 24(%0) ;" -- "movq 16(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[2]^2 */ -- "adcq %%rax, %%r11 ;" -- "movq %%r11, 32(%0) ;" -- "adcq %%rcx, %%r15 ;" -- "movq %%r15, 40(%0) ;" -- "movq 24(%1), %%rdx ;" -- "mulx %%rdx, %%rax, %%rcx ;" /* A[3]^2 */ -- "adcq %%rax, %%r13 ;" -- "movq %%r13, 48(%0) ;" -- "adcq %%rcx, %%r14 ;" -- "movq %%r14, 56(%0) ;" -- : -- : "r"(c), "r"(a) -- : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", -- "%r11", "%r13", "%r14", "%r15"); -+ /* Compute src1[0] * src2 */ -+ " movq 0(%1), %%rdx;" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);" -+ " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" -+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" -+ /* Compute src1[1] * src2 */ -+ " movq 8(%1), %%rdx;" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 16(%0);" -+ " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" -+ /* Compute src1[2] * src2 */ -+ " movq 16(%1), %%rdx;" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 24(%0);" -+ " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" -+ /* Compute src1[3] * src2 */ -+ " movq 24(%1), %%rdx;" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 32(%0);" -+ " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " movq %%r12, 40(%0);" " mov $0, %%r8;" -+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%0);" -+ /* Line up pointers */ -+ " mov %0, %1;" -+ " mov %2, %0;" -+ -+ /* Wrap the result back into the field */ -+ -+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ -+ " mov $38, %%rdx;" -+ " mulxq 32(%1), %%r8, %%r13;" -+ " xor %3, %3;" -+ " adoxq 0(%1), %%r8;" -+ " mulxq 40(%1), %%r9, %%r12;" -+ " adcx %%r13, %%r9;" -+ " adoxq 8(%1), %%r9;" -+ " mulxq 48(%1), %%r10, %%r13;" -+ " adcx %%r12, %%r10;" -+ " adoxq 16(%1), %%r10;" -+ " mulxq 56(%1), %%r11, %%rax;" -+ " adcx %%r13, %%r11;" -+ " adoxq 24(%1), %%r11;" -+ " adcx %3, %%rax;" -+ " adox %3, %%rax;" -+ " imul %%rdx, %%rax;" -+ -+ /* Step 2: Fold the carry back into dst */ -+ " add %%rax, %%r8;" -+ " adcx %3, %%r9;" -+ " movq %%r9, 8(%0);" -+ " adcx %3, %%r10;" -+ " movq %%r10, 16(%0);" -+ " adcx %3, %%r11;" -+ " movq %%r11, 24(%0);" -+ -+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ -+ " mov $0, %%rax;" -+ " cmovc %%rdx, %%rax;" -+ " add %%rax, %%r8;" -+ " movq %%r8, 0(%0);" -+ : "+&r" (tmp), "+&r" (f1), "+&r" (out), "+&r" (f2) -+ : -+ : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "memory", "cc" -+ ); - } - --static void red_eltfp25519_1w_adx(u64 *const c, const u64 *const a) -+/* Computes two field multiplications: -+ * out[0] <- f1[0] * f2[0] -+ * out[1] <- f1[1] * f2[1] -+ * Uses the 16-element buffer tmp for intermediate results. */ -+static inline void fmul2(u64 *out, const u64 *f1, const u64 *f2, u64 *tmp) - { - asm volatile( -- "movl $38, %%edx ;" /* 2*c = 38 = 2^256 */ -- "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */ -- "xorl %%ebx, %%ebx ;" -- "adox (%1), %%r8 ;" -- "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */ -- "adcx %%r10, %%r9 ;" -- "adox 8(%1), %%r9 ;" -- "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */ -- "adcx %%r11, %%r10 ;" -- "adox 16(%1), %%r10 ;" -- "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */ -- "adcx %%rax, %%r11 ;" -- "adox 24(%1), %%r11 ;" -- /***************************************/ -- "adcx %%rbx, %%rcx ;" -- "adox %%rbx, %%rcx ;" -- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0, of=0 */ -- "adcx %%rcx, %%r8 ;" -- "adcx %%rbx, %%r9 ;" -- "movq %%r9, 8(%0) ;" -- "adcx %%rbx, %%r10 ;" -- "movq %%r10, 16(%0) ;" -- "adcx %%rbx, %%r11 ;" -- "movq %%r11, 24(%0) ;" -- "mov $0, %%ecx ;" -- "cmovc %%edx, %%ecx ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, (%0) ;" -- : -- : "r"(c), "r"(a) -- : "memory", "cc", "%rax", "%rbx", "%rcx", "%rdx", "%r8", "%r9", -- "%r10", "%r11"); --} -+ /* Compute the raw multiplication tmp[0] <- f1[0] * f2[0] */ - --static void red_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a) --{ -- asm volatile( -- "movl $38, %%edx ;" /* 2*c = 38 = 2^256 */ -- "mulx 32(%1), %%r8, %%r10 ;" /* c*C[4] */ -- "mulx 40(%1), %%r9, %%r11 ;" /* c*C[5] */ -- "addq %%r10, %%r9 ;" -- "mulx 48(%1), %%r10, %%rax ;" /* c*C[6] */ -- "adcq %%r11, %%r10 ;" -- "mulx 56(%1), %%r11, %%rcx ;" /* c*C[7] */ -- "adcq %%rax, %%r11 ;" -- /***************************************/ -- "adcq $0, %%rcx ;" -- "addq (%1), %%r8 ;" -- "adcq 8(%1), %%r9 ;" -- "adcq 16(%1), %%r10 ;" -- "adcq 24(%1), %%r11 ;" -- "adcq $0, %%rcx ;" -- "imul %%rdx, %%rcx ;" /* c*C[4], cf=0 */ -- "addq %%rcx, %%r8 ;" -- "adcq $0, %%r9 ;" -- "movq %%r9, 8(%0) ;" -- "adcq $0, %%r10 ;" -- "movq %%r10, 16(%0) ;" -- "adcq $0, %%r11 ;" -- "movq %%r11, 24(%0) ;" -- "mov $0, %%ecx ;" -- "cmovc %%edx, %%ecx ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, (%0) ;" -- : -- : "r"(c), "r"(a) -- : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", -- "%r11"); -+ /* Compute src1[0] * src2 */ -+ " movq 0(%1), %%rdx;" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);" -+ " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" -+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" -+ /* Compute src1[1] * src2 */ -+ " movq 8(%1), %%rdx;" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 16(%0);" -+ " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" -+ /* Compute src1[2] * src2 */ -+ " movq 16(%1), %%rdx;" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 24(%0);" -+ " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" -+ /* Compute src1[3] * src2 */ -+ " movq 24(%1), %%rdx;" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 32(%0);" -+ " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " movq %%r12, 40(%0);" " mov $0, %%r8;" -+ " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%0);" -+ -+ /* Compute the raw multiplication tmp[1] <- f1[1] * f2[1] */ -+ -+ /* Compute src1[0] * src2 */ -+ " movq 32(%1), %%rdx;" -+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 64(%0);" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);" -+ " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" -+ " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" -+ /* Compute src1[1] * src2 */ -+ " movq 40(%1), %%rdx;" -+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 80(%0);" -+ " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" -+ /* Compute src1[2] * src2 */ -+ " movq 48(%1), %%rdx;" -+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 88(%0);" -+ " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" -+ /* Compute src1[3] * src2 */ -+ " movq 56(%1), %%rdx;" -+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 96(%0);" -+ " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " movq %%r12, 104(%0);" " mov $0, %%r8;" -+ " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 112(%0);" " mov $0, %%rax;" -+ " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 120(%0);" -+ /* Line up pointers */ -+ " mov %0, %1;" -+ " mov %2, %0;" -+ -+ /* Wrap the results back into the field */ -+ -+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ -+ " mov $38, %%rdx;" -+ " mulxq 32(%1), %%r8, %%r13;" -+ " xor %3, %3;" -+ " adoxq 0(%1), %%r8;" -+ " mulxq 40(%1), %%r9, %%r12;" -+ " adcx %%r13, %%r9;" -+ " adoxq 8(%1), %%r9;" -+ " mulxq 48(%1), %%r10, %%r13;" -+ " adcx %%r12, %%r10;" -+ " adoxq 16(%1), %%r10;" -+ " mulxq 56(%1), %%r11, %%rax;" -+ " adcx %%r13, %%r11;" -+ " adoxq 24(%1), %%r11;" -+ " adcx %3, %%rax;" -+ " adox %3, %%rax;" -+ " imul %%rdx, %%rax;" -+ -+ /* Step 2: Fold the carry back into dst */ -+ " add %%rax, %%r8;" -+ " adcx %3, %%r9;" -+ " movq %%r9, 8(%0);" -+ " adcx %3, %%r10;" -+ " movq %%r10, 16(%0);" -+ " adcx %3, %%r11;" -+ " movq %%r11, 24(%0);" -+ -+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ -+ " mov $0, %%rax;" -+ " cmovc %%rdx, %%rax;" -+ " add %%rax, %%r8;" -+ " movq %%r8, 0(%0);" -+ -+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ -+ " mov $38, %%rdx;" -+ " mulxq 96(%1), %%r8, %%r13;" -+ " xor %3, %3;" -+ " adoxq 64(%1), %%r8;" -+ " mulxq 104(%1), %%r9, %%r12;" -+ " adcx %%r13, %%r9;" -+ " adoxq 72(%1), %%r9;" -+ " mulxq 112(%1), %%r10, %%r13;" -+ " adcx %%r12, %%r10;" -+ " adoxq 80(%1), %%r10;" -+ " mulxq 120(%1), %%r11, %%rax;" -+ " adcx %%r13, %%r11;" -+ " adoxq 88(%1), %%r11;" -+ " adcx %3, %%rax;" -+ " adox %3, %%rax;" -+ " imul %%rdx, %%rax;" -+ -+ /* Step 2: Fold the carry back into dst */ -+ " add %%rax, %%r8;" -+ " adcx %3, %%r9;" -+ " movq %%r9, 40(%0);" -+ " adcx %3, %%r10;" -+ " movq %%r10, 48(%0);" -+ " adcx %3, %%r11;" -+ " movq %%r11, 56(%0);" -+ -+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ -+ " mov $0, %%rax;" -+ " cmovc %%rdx, %%rax;" -+ " add %%rax, %%r8;" -+ " movq %%r8, 32(%0);" -+ : "+&r" (tmp), "+&r" (f1), "+&r" (out), "+&r" (f2) -+ : -+ : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "memory", "cc" -+ ); - } - --static __always_inline void --add_eltfp25519_1w_adx(u64 *const c, const u64 *const a, const u64 *const b) -+/* Computes the field multiplication of four-element f1 with value in f2 */ -+static inline void fmul_scalar(u64 *out, const u64 *f1, u64 f2) - { -- asm volatile( -- "mov $38, %%eax ;" -- "xorl %%ecx, %%ecx ;" -- "movq (%2), %%r8 ;" -- "adcx (%1), %%r8 ;" -- "movq 8(%2), %%r9 ;" -- "adcx 8(%1), %%r9 ;" -- "movq 16(%2), %%r10 ;" -- "adcx 16(%1), %%r10 ;" -- "movq 24(%2), %%r11 ;" -- "adcx 24(%1), %%r11 ;" -- "cmovc %%eax, %%ecx ;" -- "xorl %%eax, %%eax ;" -- "adcx %%rcx, %%r8 ;" -- "adcx %%rax, %%r9 ;" -- "movq %%r9, 8(%0) ;" -- "adcx %%rax, %%r10 ;" -- "movq %%r10, 16(%0) ;" -- "adcx %%rax, %%r11 ;" -- "movq %%r11, 24(%0) ;" -- "mov $38, %%ecx ;" -- "cmovc %%ecx, %%eax ;" -- "addq %%rax, %%r8 ;" -- "movq %%r8, (%0) ;" -- : -- : "r"(c), "r"(a), "r"(b) -- : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11"); --} -+ register u64 f2_r asm("rdx") = f2; - --static __always_inline void --add_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a, const u64 *const b) --{ - asm volatile( -- "mov $38, %%eax ;" -- "movq (%2), %%r8 ;" -- "addq (%1), %%r8 ;" -- "movq 8(%2), %%r9 ;" -- "adcq 8(%1), %%r9 ;" -- "movq 16(%2), %%r10 ;" -- "adcq 16(%1), %%r10 ;" -- "movq 24(%2), %%r11 ;" -- "adcq 24(%1), %%r11 ;" -- "mov $0, %%ecx ;" -- "cmovc %%eax, %%ecx ;" -- "addq %%rcx, %%r8 ;" -- "adcq $0, %%r9 ;" -- "movq %%r9, 8(%0) ;" -- "adcq $0, %%r10 ;" -- "movq %%r10, 16(%0) ;" -- "adcq $0, %%r11 ;" -- "movq %%r11, 24(%0) ;" -- "mov $0, %%ecx ;" -- "cmovc %%eax, %%ecx ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, (%0) ;" -- : -- : "r"(c), "r"(a), "r"(b) -- : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11"); -+ /* Compute the raw multiplication of f1*f2 */ -+ " mulxq 0(%2), %%r8, %%rcx;" /* f1[0]*f2 */ -+ " mulxq 8(%2), %%r9, %%r12;" /* f1[1]*f2 */ -+ " add %%rcx, %%r9;" -+ " mov $0, %%rcx;" -+ " mulxq 16(%2), %%r10, %%r13;" /* f1[2]*f2 */ -+ " adcx %%r12, %%r10;" -+ " mulxq 24(%2), %%r11, %%rax;" /* f1[3]*f2 */ -+ " adcx %%r13, %%r11;" -+ " adcx %%rcx, %%rax;" -+ -+ /* Wrap the result back into the field */ -+ -+ /* Step 1: Compute carry*38 */ -+ " mov $38, %%rdx;" -+ " imul %%rdx, %%rax;" -+ -+ /* Step 2: Fold the carry back into dst */ -+ " add %%rax, %%r8;" -+ " adcx %%rcx, %%r9;" -+ " movq %%r9, 8(%1);" -+ " adcx %%rcx, %%r10;" -+ " movq %%r10, 16(%1);" -+ " adcx %%rcx, %%r11;" -+ " movq %%r11, 24(%1);" -+ -+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ -+ " mov $0, %%rax;" -+ " cmovc %%rdx, %%rax;" -+ " add %%rax, %%r8;" -+ " movq %%r8, 0(%1);" -+ : "+&r" (f2_r) -+ : "r" (out), "r" (f1) -+ : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "memory", "cc" -+ ); - } - --static __always_inline void --sub_eltfp25519_1w(u64 *const c, const u64 *const a, const u64 *const b) --{ -- asm volatile( -- "mov $38, %%eax ;" -- "movq (%1), %%r8 ;" -- "subq (%2), %%r8 ;" -- "movq 8(%1), %%r9 ;" -- "sbbq 8(%2), %%r9 ;" -- "movq 16(%1), %%r10 ;" -- "sbbq 16(%2), %%r10 ;" -- "movq 24(%1), %%r11 ;" -- "sbbq 24(%2), %%r11 ;" -- "mov $0, %%ecx ;" -- "cmovc %%eax, %%ecx ;" -- "subq %%rcx, %%r8 ;" -- "sbbq $0, %%r9 ;" -- "movq %%r9, 8(%0) ;" -- "sbbq $0, %%r10 ;" -- "movq %%r10, 16(%0) ;" -- "sbbq $0, %%r11 ;" -- "movq %%r11, 24(%0) ;" -- "mov $0, %%ecx ;" -- "cmovc %%eax, %%ecx ;" -- "subq %%rcx, %%r8 ;" -- "movq %%r8, (%0) ;" -- : -- : "r"(c), "r"(a), "r"(b) -- : "memory", "cc", "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11"); --} -- --/* Multiplication by a24 = (A+2)/4 = (486662+2)/4 = 121666 */ --static __always_inline void --mul_a24_eltfp25519_1w(u64 *const c, const u64 *const a) -+/* Computes p1 <- bit ? p2 : p1 in constant time */ -+static inline void cswap2(u64 bit, const u64 *p1, const u64 *p2) - { -- const u64 a24 = 121666; - asm volatile( -- "movq %2, %%rdx ;" -- "mulx (%1), %%r8, %%r10 ;" -- "mulx 8(%1), %%r9, %%r11 ;" -- "addq %%r10, %%r9 ;" -- "mulx 16(%1), %%r10, %%rax ;" -- "adcq %%r11, %%r10 ;" -- "mulx 24(%1), %%r11, %%rcx ;" -- "adcq %%rax, %%r11 ;" -- /**************************/ -- "adcq $0, %%rcx ;" -- "movl $38, %%edx ;" /* 2*c = 38 = 2^256 mod 2^255-19*/ -- "imul %%rdx, %%rcx ;" -- "addq %%rcx, %%r8 ;" -- "adcq $0, %%r9 ;" -- "movq %%r9, 8(%0) ;" -- "adcq $0, %%r10 ;" -- "movq %%r10, 16(%0) ;" -- "adcq $0, %%r11 ;" -- "movq %%r11, 24(%0) ;" -- "mov $0, %%ecx ;" -- "cmovc %%edx, %%ecx ;" -- "addq %%rcx, %%r8 ;" -- "movq %%r8, (%0) ;" -- : -- : "r"(c), "r"(a), "r"(a24) -- : "memory", "cc", "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", -- "%r11"); --} -- --static void inv_eltfp25519_1w_adx(u64 *const c, const u64 *const a) --{ -- struct { -- eltfp25519_1w_buffer buffer; -- eltfp25519_1w x0, x1, x2; -- } __aligned(32) m; -- u64 *T[4]; -- -- T[0] = m.x0; -- T[1] = c; /* x^(-1) */ -- T[2] = m.x1; -- T[3] = m.x2; -- -- copy_eltfp25519_1w(T[1], a); -- sqrn_eltfp25519_1w_adx(T[1], 1); -- copy_eltfp25519_1w(T[2], T[1]); -- sqrn_eltfp25519_1w_adx(T[2], 2); -- mul_eltfp25519_1w_adx(T[0], a, T[2]); -- mul_eltfp25519_1w_adx(T[1], T[1], T[0]); -- copy_eltfp25519_1w(T[2], T[1]); -- sqrn_eltfp25519_1w_adx(T[2], 1); -- mul_eltfp25519_1w_adx(T[0], T[0], T[2]); -- copy_eltfp25519_1w(T[2], T[0]); -- sqrn_eltfp25519_1w_adx(T[2], 5); -- mul_eltfp25519_1w_adx(T[0], T[0], T[2]); -- copy_eltfp25519_1w(T[2], T[0]); -- sqrn_eltfp25519_1w_adx(T[2], 10); -- mul_eltfp25519_1w_adx(T[2], T[2], T[0]); -- copy_eltfp25519_1w(T[3], T[2]); -- sqrn_eltfp25519_1w_adx(T[3], 20); -- mul_eltfp25519_1w_adx(T[3], T[3], T[2]); -- sqrn_eltfp25519_1w_adx(T[3], 10); -- mul_eltfp25519_1w_adx(T[3], T[3], T[0]); -- copy_eltfp25519_1w(T[0], T[3]); -- sqrn_eltfp25519_1w_adx(T[0], 50); -- mul_eltfp25519_1w_adx(T[0], T[0], T[3]); -- copy_eltfp25519_1w(T[2], T[0]); -- sqrn_eltfp25519_1w_adx(T[2], 100); -- mul_eltfp25519_1w_adx(T[2], T[2], T[0]); -- sqrn_eltfp25519_1w_adx(T[2], 50); -- mul_eltfp25519_1w_adx(T[2], T[2], T[3]); -- sqrn_eltfp25519_1w_adx(T[2], 5); -- mul_eltfp25519_1w_adx(T[1], T[1], T[2]); -- -- memzero_explicit(&m, sizeof(m)); --} -- --static void inv_eltfp25519_1w_bmi2(u64 *const c, const u64 *const a) --{ -- struct { -- eltfp25519_1w_buffer buffer; -- eltfp25519_1w x0, x1, x2; -- } __aligned(32) m; -- u64 *T[5]; -- -- T[0] = m.x0; -- T[1] = c; /* x^(-1) */ -- T[2] = m.x1; -- T[3] = m.x2; -- -- copy_eltfp25519_1w(T[1], a); -- sqrn_eltfp25519_1w_bmi2(T[1], 1); -- copy_eltfp25519_1w(T[2], T[1]); -- sqrn_eltfp25519_1w_bmi2(T[2], 2); -- mul_eltfp25519_1w_bmi2(T[0], a, T[2]); -- mul_eltfp25519_1w_bmi2(T[1], T[1], T[0]); -- copy_eltfp25519_1w(T[2], T[1]); -- sqrn_eltfp25519_1w_bmi2(T[2], 1); -- mul_eltfp25519_1w_bmi2(T[0], T[0], T[2]); -- copy_eltfp25519_1w(T[2], T[0]); -- sqrn_eltfp25519_1w_bmi2(T[2], 5); -- mul_eltfp25519_1w_bmi2(T[0], T[0], T[2]); -- copy_eltfp25519_1w(T[2], T[0]); -- sqrn_eltfp25519_1w_bmi2(T[2], 10); -- mul_eltfp25519_1w_bmi2(T[2], T[2], T[0]); -- copy_eltfp25519_1w(T[3], T[2]); -- sqrn_eltfp25519_1w_bmi2(T[3], 20); -- mul_eltfp25519_1w_bmi2(T[3], T[3], T[2]); -- sqrn_eltfp25519_1w_bmi2(T[3], 10); -- mul_eltfp25519_1w_bmi2(T[3], T[3], T[0]); -- copy_eltfp25519_1w(T[0], T[3]); -- sqrn_eltfp25519_1w_bmi2(T[0], 50); -- mul_eltfp25519_1w_bmi2(T[0], T[0], T[3]); -- copy_eltfp25519_1w(T[2], T[0]); -- sqrn_eltfp25519_1w_bmi2(T[2], 100); -- mul_eltfp25519_1w_bmi2(T[2], T[2], T[0]); -- sqrn_eltfp25519_1w_bmi2(T[2], 50); -- mul_eltfp25519_1w_bmi2(T[2], T[2], T[3]); -- sqrn_eltfp25519_1w_bmi2(T[2], 5); -- mul_eltfp25519_1w_bmi2(T[1], T[1], T[2]); -+ /* Invert the polarity of bit to match cmov expectations */ -+ " add $18446744073709551615, %0;" - -- memzero_explicit(&m, sizeof(m)); -+ /* cswap p1[0], p2[0] */ -+ " movq 0(%1), %%r8;" -+ " movq 0(%2), %%r9;" -+ " mov %%r8, %%r10;" -+ " cmovc %%r9, %%r8;" -+ " cmovc %%r10, %%r9;" -+ " movq %%r8, 0(%1);" -+ " movq %%r9, 0(%2);" -+ -+ /* cswap p1[1], p2[1] */ -+ " movq 8(%1), %%r8;" -+ " movq 8(%2), %%r9;" -+ " mov %%r8, %%r10;" -+ " cmovc %%r9, %%r8;" -+ " cmovc %%r10, %%r9;" -+ " movq %%r8, 8(%1);" -+ " movq %%r9, 8(%2);" -+ -+ /* cswap p1[2], p2[2] */ -+ " movq 16(%1), %%r8;" -+ " movq 16(%2), %%r9;" -+ " mov %%r8, %%r10;" -+ " cmovc %%r9, %%r8;" -+ " cmovc %%r10, %%r9;" -+ " movq %%r8, 16(%1);" -+ " movq %%r9, 16(%2);" -+ -+ /* cswap p1[3], p2[3] */ -+ " movq 24(%1), %%r8;" -+ " movq 24(%2), %%r9;" -+ " mov %%r8, %%r10;" -+ " cmovc %%r9, %%r8;" -+ " cmovc %%r10, %%r9;" -+ " movq %%r8, 24(%1);" -+ " movq %%r9, 24(%2);" -+ -+ /* cswap p1[4], p2[4] */ -+ " movq 32(%1), %%r8;" -+ " movq 32(%2), %%r9;" -+ " mov %%r8, %%r10;" -+ " cmovc %%r9, %%r8;" -+ " cmovc %%r10, %%r9;" -+ " movq %%r8, 32(%1);" -+ " movq %%r9, 32(%2);" -+ -+ /* cswap p1[5], p2[5] */ -+ " movq 40(%1), %%r8;" -+ " movq 40(%2), %%r9;" -+ " mov %%r8, %%r10;" -+ " cmovc %%r9, %%r8;" -+ " cmovc %%r10, %%r9;" -+ " movq %%r8, 40(%1);" -+ " movq %%r9, 40(%2);" -+ -+ /* cswap p1[6], p2[6] */ -+ " movq 48(%1), %%r8;" -+ " movq 48(%2), %%r9;" -+ " mov %%r8, %%r10;" -+ " cmovc %%r9, %%r8;" -+ " cmovc %%r10, %%r9;" -+ " movq %%r8, 48(%1);" -+ " movq %%r9, 48(%2);" -+ -+ /* cswap p1[7], p2[7] */ -+ " movq 56(%1), %%r8;" -+ " movq 56(%2), %%r9;" -+ " mov %%r8, %%r10;" -+ " cmovc %%r9, %%r8;" -+ " cmovc %%r10, %%r9;" -+ " movq %%r8, 56(%1);" -+ " movq %%r9, 56(%2);" -+ : "+&r" (bit) -+ : "r" (p1), "r" (p2) -+ : "%r8", "%r9", "%r10", "memory", "cc" -+ ); - } - --/* Given c, a 256-bit number, fred_eltfp25519_1w updates c -- * with a number such that 0 <= C < 2**255-19. -- */ --static __always_inline void fred_eltfp25519_1w(u64 *const c) -+/* Computes the square of a field element: out <- f * f -+ * Uses the 8-element buffer tmp for intermediate results */ -+static inline void fsqr(u64 *out, const u64 *f, u64 *tmp) - { -- u64 tmp0 = 38, tmp1 = 19; - asm volatile( -- "btrq $63, %3 ;" /* Put bit 255 in carry flag and clear */ -- "cmovncl %k5, %k4 ;" /* c[255] ? 38 : 19 */ -- -- /* Add either 19 or 38 to c */ -- "addq %4, %0 ;" -- "adcq $0, %1 ;" -- "adcq $0, %2 ;" -- "adcq $0, %3 ;" -- -- /* Test for bit 255 again; only triggered on overflow modulo 2^255-19 */ -- "movl $0, %k4 ;" -- "cmovnsl %k5, %k4 ;" /* c[255] ? 0 : 19 */ -- "btrq $63, %3 ;" /* Clear bit 255 */ -- -- /* Subtract 19 if necessary */ -- "subq %4, %0 ;" -- "sbbq $0, %1 ;" -- "sbbq $0, %2 ;" -- "sbbq $0, %3 ;" -- -- : "+r"(c[0]), "+r"(c[1]), "+r"(c[2]), "+r"(c[3]), "+r"(tmp0), -- "+r"(tmp1) -- : -- : "memory", "cc"); --} -+ /* Compute the raw multiplication: tmp <- f * f */ - --static __always_inline void cswap(u8 bit, u64 *const px, u64 *const py) --{ -- u64 temp; -- asm volatile( -- "test %9, %9 ;" -- "movq %0, %8 ;" -- "cmovnzq %4, %0 ;" -- "cmovnzq %8, %4 ;" -- "movq %1, %8 ;" -- "cmovnzq %5, %1 ;" -- "cmovnzq %8, %5 ;" -- "movq %2, %8 ;" -- "cmovnzq %6, %2 ;" -- "cmovnzq %8, %6 ;" -- "movq %3, %8 ;" -- "cmovnzq %7, %3 ;" -- "cmovnzq %8, %7 ;" -- : "+r"(px[0]), "+r"(px[1]), "+r"(px[2]), "+r"(px[3]), -- "+r"(py[0]), "+r"(py[1]), "+r"(py[2]), "+r"(py[3]), -- "=r"(temp) -- : "r"(bit) -- : "cc" -+ /* Step 1: Compute all partial products */ -+ " movq 0(%1), %%rdx;" /* f[0] */ -+ " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */ -+ " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ -+ " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ -+ " movq 24(%1), %%rdx;" /* f[3] */ -+ " mulxq 8(%1), %%r11, %%r12;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */ -+ " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%r12;" /* f[2]*f[3] */ -+ " movq 8(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */ -+ " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ -+ -+ /* Step 2: Compute two parallel carry chains */ -+ " xor %%r15, %%r15;" -+ " adox %%rax, %%r10;" -+ " adcx %%r8, %%r8;" -+ " adox %%rcx, %%r11;" -+ " adcx %%r9, %%r9;" -+ " adox %%r15, %%r12;" -+ " adcx %%r10, %%r10;" -+ " adox %%r15, %%r13;" -+ " adcx %%r11, %%r11;" -+ " adox %%r15, %%r14;" -+ " adcx %%r12, %%r12;" -+ " adcx %%r13, %%r13;" -+ " adcx %%r14, %%r14;" -+ -+ /* Step 3: Compute intermediate squares */ -+ " movq 0(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */ -+ " movq %%rax, 0(%0);" -+ " add %%rcx, %%r8;" " movq %%r8, 8(%0);" -+ " movq 8(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */ -+ " adcx %%rax, %%r9;" " movq %%r9, 16(%0);" -+ " adcx %%rcx, %%r10;" " movq %%r10, 24(%0);" -+ " movq 16(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ -+ " adcx %%rax, %%r11;" " movq %%r11, 32(%0);" -+ " adcx %%rcx, %%r12;" " movq %%r12, 40(%0);" -+ " movq 24(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ -+ " adcx %%rax, %%r13;" " movq %%r13, 48(%0);" -+ " adcx %%rcx, %%r14;" " movq %%r14, 56(%0);" -+ -+ /* Line up pointers */ -+ " mov %0, %1;" -+ " mov %2, %0;" -+ -+ /* Wrap the result back into the field */ -+ -+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ -+ " mov $38, %%rdx;" -+ " mulxq 32(%1), %%r8, %%r13;" -+ " xor %%rcx, %%rcx;" -+ " adoxq 0(%1), %%r8;" -+ " mulxq 40(%1), %%r9, %%r12;" -+ " adcx %%r13, %%r9;" -+ " adoxq 8(%1), %%r9;" -+ " mulxq 48(%1), %%r10, %%r13;" -+ " adcx %%r12, %%r10;" -+ " adoxq 16(%1), %%r10;" -+ " mulxq 56(%1), %%r11, %%rax;" -+ " adcx %%r13, %%r11;" -+ " adoxq 24(%1), %%r11;" -+ " adcx %%rcx, %%rax;" -+ " adox %%rcx, %%rax;" -+ " imul %%rdx, %%rax;" -+ -+ /* Step 2: Fold the carry back into dst */ -+ " add %%rax, %%r8;" -+ " adcx %%rcx, %%r9;" -+ " movq %%r9, 8(%0);" -+ " adcx %%rcx, %%r10;" -+ " movq %%r10, 16(%0);" -+ " adcx %%rcx, %%r11;" -+ " movq %%r11, 24(%0);" -+ -+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ -+ " mov $0, %%rax;" -+ " cmovc %%rdx, %%rax;" -+ " add %%rax, %%r8;" -+ " movq %%r8, 0(%0);" -+ : "+&r" (tmp), "+&r" (f), "+&r" (out) -+ : -+ : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "memory", "cc" - ); - } - --static __always_inline void cselect(u8 bit, u64 *const px, const u64 *const py) -+/* Computes two field squarings: -+ * out[0] <- f[0] * f[0] -+ * out[1] <- f[1] * f[1] -+ * Uses the 16-element buffer tmp for intermediate results */ -+static inline void fsqr2(u64 *out, const u64 *f, u64 *tmp) - { - asm volatile( -- "test %4, %4 ;" -- "cmovnzq %5, %0 ;" -- "cmovnzq %6, %1 ;" -- "cmovnzq %7, %2 ;" -- "cmovnzq %8, %3 ;" -- : "+r"(px[0]), "+r"(px[1]), "+r"(px[2]), "+r"(px[3]) -- : "r"(bit), "rm"(py[0]), "rm"(py[1]), "rm"(py[2]), "rm"(py[3]) -- : "cc" -+ /* Step 1: Compute all partial products */ -+ " movq 0(%1), %%rdx;" /* f[0] */ -+ " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */ -+ " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ -+ " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ -+ " movq 24(%1), %%rdx;" /* f[3] */ -+ " mulxq 8(%1), %%r11, %%r12;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */ -+ " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%r12;" /* f[2]*f[3] */ -+ " movq 8(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */ -+ " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ -+ -+ /* Step 2: Compute two parallel carry chains */ -+ " xor %%r15, %%r15;" -+ " adox %%rax, %%r10;" -+ " adcx %%r8, %%r8;" -+ " adox %%rcx, %%r11;" -+ " adcx %%r9, %%r9;" -+ " adox %%r15, %%r12;" -+ " adcx %%r10, %%r10;" -+ " adox %%r15, %%r13;" -+ " adcx %%r11, %%r11;" -+ " adox %%r15, %%r14;" -+ " adcx %%r12, %%r12;" -+ " adcx %%r13, %%r13;" -+ " adcx %%r14, %%r14;" -+ -+ /* Step 3: Compute intermediate squares */ -+ " movq 0(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */ -+ " movq %%rax, 0(%0);" -+ " add %%rcx, %%r8;" " movq %%r8, 8(%0);" -+ " movq 8(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */ -+ " adcx %%rax, %%r9;" " movq %%r9, 16(%0);" -+ " adcx %%rcx, %%r10;" " movq %%r10, 24(%0);" -+ " movq 16(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ -+ " adcx %%rax, %%r11;" " movq %%r11, 32(%0);" -+ " adcx %%rcx, %%r12;" " movq %%r12, 40(%0);" -+ " movq 24(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ -+ " adcx %%rax, %%r13;" " movq %%r13, 48(%0);" -+ " adcx %%rcx, %%r14;" " movq %%r14, 56(%0);" -+ -+ /* Step 1: Compute all partial products */ -+ " movq 32(%1), %%rdx;" /* f[0] */ -+ " mulxq 40(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */ -+ " mulxq 48(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ -+ " mulxq 56(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ -+ " movq 56(%1), %%rdx;" /* f[3] */ -+ " mulxq 40(%1), %%r11, %%r12;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */ -+ " mulxq 48(%1), %%rax, %%r13;" " adcx %%rax, %%r12;" /* f[2]*f[3] */ -+ " movq 40(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */ -+ " mulxq 48(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ -+ -+ /* Step 2: Compute two parallel carry chains */ -+ " xor %%r15, %%r15;" -+ " adox %%rax, %%r10;" -+ " adcx %%r8, %%r8;" -+ " adox %%rcx, %%r11;" -+ " adcx %%r9, %%r9;" -+ " adox %%r15, %%r12;" -+ " adcx %%r10, %%r10;" -+ " adox %%r15, %%r13;" -+ " adcx %%r11, %%r11;" -+ " adox %%r15, %%r14;" -+ " adcx %%r12, %%r12;" -+ " adcx %%r13, %%r13;" -+ " adcx %%r14, %%r14;" -+ -+ /* Step 3: Compute intermediate squares */ -+ " movq 32(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[0]^2 */ -+ " movq %%rax, 64(%0);" -+ " add %%rcx, %%r8;" " movq %%r8, 72(%0);" -+ " movq 40(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[1]^2 */ -+ " adcx %%rax, %%r9;" " movq %%r9, 80(%0);" -+ " adcx %%rcx, %%r10;" " movq %%r10, 88(%0);" -+ " movq 48(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ -+ " adcx %%rax, %%r11;" " movq %%r11, 96(%0);" -+ " adcx %%rcx, %%r12;" " movq %%r12, 104(%0);" -+ " movq 56(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ -+ " adcx %%rax, %%r13;" " movq %%r13, 112(%0);" -+ " adcx %%rcx, %%r14;" " movq %%r14, 120(%0);" -+ -+ /* Line up pointers */ -+ " mov %0, %1;" -+ " mov %2, %0;" -+ -+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ -+ " mov $38, %%rdx;" -+ " mulxq 32(%1), %%r8, %%r13;" -+ " xor %%rcx, %%rcx;" -+ " adoxq 0(%1), %%r8;" -+ " mulxq 40(%1), %%r9, %%r12;" -+ " adcx %%r13, %%r9;" -+ " adoxq 8(%1), %%r9;" -+ " mulxq 48(%1), %%r10, %%r13;" -+ " adcx %%r12, %%r10;" -+ " adoxq 16(%1), %%r10;" -+ " mulxq 56(%1), %%r11, %%rax;" -+ " adcx %%r13, %%r11;" -+ " adoxq 24(%1), %%r11;" -+ " adcx %%rcx, %%rax;" -+ " adox %%rcx, %%rax;" -+ " imul %%rdx, %%rax;" -+ -+ /* Step 2: Fold the carry back into dst */ -+ " add %%rax, %%r8;" -+ " adcx %%rcx, %%r9;" -+ " movq %%r9, 8(%0);" -+ " adcx %%rcx, %%r10;" -+ " movq %%r10, 16(%0);" -+ " adcx %%rcx, %%r11;" -+ " movq %%r11, 24(%0);" -+ -+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ -+ " mov $0, %%rax;" -+ " cmovc %%rdx, %%rax;" -+ " add %%rax, %%r8;" -+ " movq %%r8, 0(%0);" -+ -+ /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ -+ " mov $38, %%rdx;" -+ " mulxq 96(%1), %%r8, %%r13;" -+ " xor %%rcx, %%rcx;" -+ " adoxq 64(%1), %%r8;" -+ " mulxq 104(%1), %%r9, %%r12;" -+ " adcx %%r13, %%r9;" -+ " adoxq 72(%1), %%r9;" -+ " mulxq 112(%1), %%r10, %%r13;" -+ " adcx %%r12, %%r10;" -+ " adoxq 80(%1), %%r10;" -+ " mulxq 120(%1), %%r11, %%rax;" -+ " adcx %%r13, %%r11;" -+ " adoxq 88(%1), %%r11;" -+ " adcx %%rcx, %%rax;" -+ " adox %%rcx, %%rax;" -+ " imul %%rdx, %%rax;" -+ -+ /* Step 2: Fold the carry back into dst */ -+ " add %%rax, %%r8;" -+ " adcx %%rcx, %%r9;" -+ " movq %%r9, 40(%0);" -+ " adcx %%rcx, %%r10;" -+ " movq %%r10, 48(%0);" -+ " adcx %%rcx, %%r11;" -+ " movq %%r11, 56(%0);" -+ -+ /* Step 3: Fold the carry bit back in; guaranteed not to carry at this point */ -+ " mov $0, %%rax;" -+ " cmovc %%rdx, %%rax;" -+ " add %%rax, %%r8;" -+ " movq %%r8, 32(%0);" -+ : "+&r" (tmp), "+&r" (f), "+&r" (out) -+ : -+ : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "memory", "cc" - ); - } - --static void curve25519_adx(u8 shared[CURVE25519_KEY_SIZE], -- const u8 private_key[CURVE25519_KEY_SIZE], -- const u8 session_key[CURVE25519_KEY_SIZE]) --{ -- struct { -- u64 buffer[4 * NUM_WORDS_ELTFP25519]; -- u64 coordinates[4 * NUM_WORDS_ELTFP25519]; -- u64 workspace[6 * NUM_WORDS_ELTFP25519]; -- u8 session[CURVE25519_KEY_SIZE]; -- u8 private[CURVE25519_KEY_SIZE]; -- } __aligned(32) m; -- -- int i = 0, j = 0; -- u64 prev = 0; -- u64 *const X1 = (u64 *)m.session; -- u64 *const key = (u64 *)m.private; -- u64 *const Px = m.coordinates + 0; -- u64 *const Pz = m.coordinates + 4; -- u64 *const Qx = m.coordinates + 8; -- u64 *const Qz = m.coordinates + 12; -- u64 *const X2 = Qx; -- u64 *const Z2 = Qz; -- u64 *const X3 = Px; -- u64 *const Z3 = Pz; -- u64 *const X2Z2 = Qx; -- u64 *const X3Z3 = Px; -- -- u64 *const A = m.workspace + 0; -- u64 *const B = m.workspace + 4; -- u64 *const D = m.workspace + 8; -- u64 *const C = m.workspace + 12; -- u64 *const DA = m.workspace + 16; -- u64 *const CB = m.workspace + 20; -- u64 *const AB = A; -- u64 *const DC = D; -- u64 *const DACB = DA; -- -- memcpy(m.private, private_key, sizeof(m.private)); -- memcpy(m.session, session_key, sizeof(m.session)); -- -- curve25519_clamp_secret(m.private); -- -- /* As in the draft: -- * When receiving such an array, implementations of curve25519 -- * MUST mask the most-significant bit in the final byte. This -- * is done to preserve compatibility with point formats which -- * reserve the sign bit for use in other protocols and to -- * increase resistance to implementation fingerprinting -- */ -- m.session[CURVE25519_KEY_SIZE - 1] &= (1 << (255 % 8)) - 1; -- -- copy_eltfp25519_1w(Px, X1); -- setzero_eltfp25519_1w(Pz); -- setzero_eltfp25519_1w(Qx); -- setzero_eltfp25519_1w(Qz); -- -- Pz[0] = 1; -- Qx[0] = 1; -- -- /* main-loop */ -- prev = 0; -- j = 62; -- for (i = 3; i >= 0; --i) { -- while (j >= 0) { -- u64 bit = (key[i] >> j) & 0x1; -- u64 swap = bit ^ prev; -- prev = bit; -- -- add_eltfp25519_1w_adx(A, X2, Z2); /* A = (X2+Z2) */ -- sub_eltfp25519_1w(B, X2, Z2); /* B = (X2-Z2) */ -- add_eltfp25519_1w_adx(C, X3, Z3); /* C = (X3+Z3) */ -- sub_eltfp25519_1w(D, X3, Z3); /* D = (X3-Z3) */ -- mul_eltfp25519_2w_adx(DACB, AB, DC); /* [DA|CB] = [A|B]*[D|C] */ -- -- cselect(swap, A, C); -- cselect(swap, B, D); -- -- sqr_eltfp25519_2w_adx(AB); /* [AA|BB] = [A^2|B^2] */ -- add_eltfp25519_1w_adx(X3, DA, CB); /* X3 = (DA+CB) */ -- sub_eltfp25519_1w(Z3, DA, CB); /* Z3 = (DA-CB) */ -- sqr_eltfp25519_2w_adx(X3Z3); /* [X3|Z3] = [(DA+CB)|(DA+CB)]^2 */ -- -- copy_eltfp25519_1w(X2, B); /* X2 = B^2 */ -- sub_eltfp25519_1w(Z2, A, B); /* Z2 = E = AA-BB */ -- -- mul_a24_eltfp25519_1w(B, Z2); /* B = a24*E */ -- add_eltfp25519_1w_adx(B, B, X2); /* B = a24*E+B */ -- mul_eltfp25519_2w_adx(X2Z2, X2Z2, AB); /* [X2|Z2] = [B|E]*[A|a24*E+B] */ -- mul_eltfp25519_1w_adx(Z3, Z3, X1); /* Z3 = Z3*X1 */ -- --j; -- } -- j = 63; -- } -- -- inv_eltfp25519_1w_adx(A, Qz); -- mul_eltfp25519_1w_adx((u64 *)shared, Qx, A); -- fred_eltfp25519_1w((u64 *)shared); -- -- memzero_explicit(&m, sizeof(m)); --} -- --static void curve25519_adx_base(u8 session_key[CURVE25519_KEY_SIZE], -- const u8 private_key[CURVE25519_KEY_SIZE]) -+static void point_add_and_double(u64 *q, u64 *p01_tmp1, u64 *tmp2) - { -- struct { -- u64 buffer[4 * NUM_WORDS_ELTFP25519]; -- u64 coordinates[4 * NUM_WORDS_ELTFP25519]; -- u64 workspace[4 * NUM_WORDS_ELTFP25519]; -- u8 private[CURVE25519_KEY_SIZE]; -- } __aligned(32) m; -- -- const int ite[4] = { 64, 64, 64, 63 }; -- const int q = 3; -- u64 swap = 1; -- -- int i = 0, j = 0, k = 0; -- u64 *const key = (u64 *)m.private; -- u64 *const Ur1 = m.coordinates + 0; -- u64 *const Zr1 = m.coordinates + 4; -- u64 *const Ur2 = m.coordinates + 8; -- u64 *const Zr2 = m.coordinates + 12; -- -- u64 *const UZr1 = m.coordinates + 0; -- u64 *const ZUr2 = m.coordinates + 8; -- -- u64 *const A = m.workspace + 0; -- u64 *const B = m.workspace + 4; -- u64 *const C = m.workspace + 8; -- u64 *const D = m.workspace + 12; -- -- u64 *const AB = m.workspace + 0; -- u64 *const CD = m.workspace + 8; -- -- const u64 *const P = table_ladder_8k; -- -- memcpy(m.private, private_key, sizeof(m.private)); -- -- curve25519_clamp_secret(m.private); -- -- setzero_eltfp25519_1w(Ur1); -- setzero_eltfp25519_1w(Zr1); -- setzero_eltfp25519_1w(Zr2); -- Ur1[0] = 1; -- Zr1[0] = 1; -- Zr2[0] = 1; -- -- /* G-S */ -- Ur2[3] = 0x1eaecdeee27cab34UL; -- Ur2[2] = 0xadc7a0b9235d48e2UL; -- Ur2[1] = 0xbbf095ae14b2edf8UL; -- Ur2[0] = 0x7e94e1fec82faabdUL; -- -- /* main-loop */ -- j = q; -- for (i = 0; i < NUM_WORDS_ELTFP25519; ++i) { -- while (j < ite[i]) { -- u64 bit = (key[i] >> j) & 0x1; -- k = (64 * i + j - q); -- swap = swap ^ bit; -- cswap(swap, Ur1, Ur2); -- cswap(swap, Zr1, Zr2); -- swap = bit; -- /* Addition */ -- sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */ -- add_eltfp25519_1w_adx(A, Ur1, Zr1); /* A = Ur1+Zr1 */ -- mul_eltfp25519_1w_adx(C, &P[4 * k], B); /* C = M0-B */ -- sub_eltfp25519_1w(B, A, C); /* B = (Ur1+Zr1) - M*(Ur1-Zr1) */ -- add_eltfp25519_1w_adx(A, A, C); /* A = (Ur1+Zr1) + M*(Ur1-Zr1) */ -- sqr_eltfp25519_2w_adx(AB); /* A = A^2 | B = B^2 */ -- mul_eltfp25519_2w_adx(UZr1, ZUr2, AB); /* Ur1 = Zr2*A | Zr1 = Ur2*B */ -- ++j; -+ u64 *nq = p01_tmp1; -+ u64 *nq_p1 = p01_tmp1 + (u32)8U; -+ u64 *tmp1 = p01_tmp1 + (u32)16U; -+ u64 *x1 = q; -+ u64 *x2 = nq; -+ u64 *z2 = nq + (u32)4U; -+ u64 *z3 = nq_p1 + (u32)4U; -+ u64 *a = tmp1; -+ u64 *b = tmp1 + (u32)4U; -+ u64 *ab = tmp1; -+ u64 *dc = tmp1 + (u32)8U; -+ u64 *x3; -+ u64 *z31; -+ u64 *d0; -+ u64 *c0; -+ u64 *a1; -+ u64 *b1; -+ u64 *d; -+ u64 *c; -+ u64 *ab1; -+ u64 *dc1; -+ fadd(a, x2, z2); -+ fsub(b, x2, z2); -+ x3 = nq_p1; -+ z31 = nq_p1 + (u32)4U; -+ d0 = dc; -+ c0 = dc + (u32)4U; -+ fadd(c0, x3, z31); -+ fsub(d0, x3, z31); -+ fmul2(dc, dc, ab, tmp2); -+ fadd(x3, d0, c0); -+ fsub(z31, d0, c0); -+ a1 = tmp1; -+ b1 = tmp1 + (u32)4U; -+ d = tmp1 + (u32)8U; -+ c = tmp1 + (u32)12U; -+ ab1 = tmp1; -+ dc1 = tmp1 + (u32)8U; -+ fsqr2(dc1, ab1, tmp2); -+ fsqr2(nq_p1, nq_p1, tmp2); -+ a1[0U] = c[0U]; -+ a1[1U] = c[1U]; -+ a1[2U] = c[2U]; -+ a1[3U] = c[3U]; -+ fsub(c, d, c); -+ fmul_scalar(b1, c, (u64)121665U); -+ fadd(b1, b1, d); -+ fmul2(nq, dc1, ab1, tmp2); -+ fmul(z3, z3, x1, tmp2); -+} -+ -+static void point_double(u64 *nq, u64 *tmp1, u64 *tmp2) -+{ -+ u64 *x2 = nq; -+ u64 *z2 = nq + (u32)4U; -+ u64 *a = tmp1; -+ u64 *b = tmp1 + (u32)4U; -+ u64 *d = tmp1 + (u32)8U; -+ u64 *c = tmp1 + (u32)12U; -+ u64 *ab = tmp1; -+ u64 *dc = tmp1 + (u32)8U; -+ fadd(a, x2, z2); -+ fsub(b, x2, z2); -+ fsqr2(dc, ab, tmp2); -+ a[0U] = c[0U]; -+ a[1U] = c[1U]; -+ a[2U] = c[2U]; -+ a[3U] = c[3U]; -+ fsub(c, d, c); -+ fmul_scalar(b, c, (u64)121665U); -+ fadd(b, b, d); -+ fmul2(nq, dc, ab, tmp2); -+} -+ -+static void montgomery_ladder(u64 *out, const u8 *key, u64 *init1) -+{ -+ u64 tmp2[16U] = { 0U }; -+ u64 p01_tmp1_swap[33U] = { 0U }; -+ u64 *p0 = p01_tmp1_swap; -+ u64 *p01 = p01_tmp1_swap; -+ u64 *p03 = p01; -+ u64 *p11 = p01 + (u32)8U; -+ u64 *x0; -+ u64 *z0; -+ u64 *p01_tmp1; -+ u64 *p01_tmp11; -+ u64 *nq10; -+ u64 *nq_p11; -+ u64 *swap1; -+ u64 sw0; -+ u64 *nq1; -+ u64 *tmp1; -+ memcpy(p11, init1, (u32)8U * sizeof(init1[0U])); -+ x0 = p03; -+ z0 = p03 + (u32)4U; -+ x0[0U] = (u64)1U; -+ x0[1U] = (u64)0U; -+ x0[2U] = (u64)0U; -+ x0[3U] = (u64)0U; -+ z0[0U] = (u64)0U; -+ z0[1U] = (u64)0U; -+ z0[2U] = (u64)0U; -+ z0[3U] = (u64)0U; -+ p01_tmp1 = p01_tmp1_swap; -+ p01_tmp11 = p01_tmp1_swap; -+ nq10 = p01_tmp1_swap; -+ nq_p11 = p01_tmp1_swap + (u32)8U; -+ swap1 = p01_tmp1_swap + (u32)32U; -+ cswap2((u64)1U, nq10, nq_p11); -+ point_add_and_double(init1, p01_tmp11, tmp2); -+ swap1[0U] = (u64)1U; -+ { -+ u32 i; -+ for (i = (u32)0U; i < (u32)251U; i = i + (u32)1U) { -+ u64 *p01_tmp12 = p01_tmp1_swap; -+ u64 *swap2 = p01_tmp1_swap + (u32)32U; -+ u64 *nq2 = p01_tmp12; -+ u64 *nq_p12 = p01_tmp12 + (u32)8U; -+ u64 bit = (u64)(key[((u32)253U - i) / (u32)8U] >> ((u32)253U - i) % (u32)8U & (u8)1U); -+ u64 sw = swap2[0U] ^ bit; -+ cswap2(sw, nq2, nq_p12); -+ point_add_and_double(init1, p01_tmp12, tmp2); -+ swap2[0U] = bit; - } -- j = 0; - } -- -- /* Doubling */ -- for (i = 0; i < q; ++i) { -- add_eltfp25519_1w_adx(A, Ur1, Zr1); /* A = Ur1+Zr1 */ -- sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */ -- sqr_eltfp25519_2w_adx(AB); /* A = A**2 B = B**2 */ -- copy_eltfp25519_1w(C, B); /* C = B */ -- sub_eltfp25519_1w(B, A, B); /* B = A-B */ -- mul_a24_eltfp25519_1w(D, B); /* D = my_a24*B */ -- add_eltfp25519_1w_adx(D, D, C); /* D = D+C */ -- mul_eltfp25519_2w_adx(UZr1, AB, CD); /* Ur1 = A*B Zr1 = Zr1*A */ -- } -- -- /* Convert to affine coordinates */ -- inv_eltfp25519_1w_adx(A, Zr1); -- mul_eltfp25519_1w_adx((u64 *)session_key, Ur1, A); -- fred_eltfp25519_1w((u64 *)session_key); -- -- memzero_explicit(&m, sizeof(m)); --} -- --static void curve25519_bmi2(u8 shared[CURVE25519_KEY_SIZE], -- const u8 private_key[CURVE25519_KEY_SIZE], -- const u8 session_key[CURVE25519_KEY_SIZE]) --{ -- struct { -- u64 buffer[4 * NUM_WORDS_ELTFP25519]; -- u64 coordinates[4 * NUM_WORDS_ELTFP25519]; -- u64 workspace[6 * NUM_WORDS_ELTFP25519]; -- u8 session[CURVE25519_KEY_SIZE]; -- u8 private[CURVE25519_KEY_SIZE]; -- } __aligned(32) m; -- -- int i = 0, j = 0; -- u64 prev = 0; -- u64 *const X1 = (u64 *)m.session; -- u64 *const key = (u64 *)m.private; -- u64 *const Px = m.coordinates + 0; -- u64 *const Pz = m.coordinates + 4; -- u64 *const Qx = m.coordinates + 8; -- u64 *const Qz = m.coordinates + 12; -- u64 *const X2 = Qx; -- u64 *const Z2 = Qz; -- u64 *const X3 = Px; -- u64 *const Z3 = Pz; -- u64 *const X2Z2 = Qx; -- u64 *const X3Z3 = Px; -- -- u64 *const A = m.workspace + 0; -- u64 *const B = m.workspace + 4; -- u64 *const D = m.workspace + 8; -- u64 *const C = m.workspace + 12; -- u64 *const DA = m.workspace + 16; -- u64 *const CB = m.workspace + 20; -- u64 *const AB = A; -- u64 *const DC = D; -- u64 *const DACB = DA; -- -- memcpy(m.private, private_key, sizeof(m.private)); -- memcpy(m.session, session_key, sizeof(m.session)); -- -- curve25519_clamp_secret(m.private); -- -- /* As in the draft: -- * When receiving such an array, implementations of curve25519 -- * MUST mask the most-significant bit in the final byte. This -- * is done to preserve compatibility with point formats which -- * reserve the sign bit for use in other protocols and to -- * increase resistance to implementation fingerprinting -- */ -- m.session[CURVE25519_KEY_SIZE - 1] &= (1 << (255 % 8)) - 1; -- -- copy_eltfp25519_1w(Px, X1); -- setzero_eltfp25519_1w(Pz); -- setzero_eltfp25519_1w(Qx); -- setzero_eltfp25519_1w(Qz); -- -- Pz[0] = 1; -- Qx[0] = 1; -- -- /* main-loop */ -- prev = 0; -- j = 62; -- for (i = 3; i >= 0; --i) { -- while (j >= 0) { -- u64 bit = (key[i] >> j) & 0x1; -- u64 swap = bit ^ prev; -- prev = bit; -- -- add_eltfp25519_1w_bmi2(A, X2, Z2); /* A = (X2+Z2) */ -- sub_eltfp25519_1w(B, X2, Z2); /* B = (X2-Z2) */ -- add_eltfp25519_1w_bmi2(C, X3, Z3); /* C = (X3+Z3) */ -- sub_eltfp25519_1w(D, X3, Z3); /* D = (X3-Z3) */ -- mul_eltfp25519_2w_bmi2(DACB, AB, DC); /* [DA|CB] = [A|B]*[D|C] */ -- -- cselect(swap, A, C); -- cselect(swap, B, D); -- -- sqr_eltfp25519_2w_bmi2(AB); /* [AA|BB] = [A^2|B^2] */ -- add_eltfp25519_1w_bmi2(X3, DA, CB); /* X3 = (DA+CB) */ -- sub_eltfp25519_1w(Z3, DA, CB); /* Z3 = (DA-CB) */ -- sqr_eltfp25519_2w_bmi2(X3Z3); /* [X3|Z3] = [(DA+CB)|(DA+CB)]^2 */ -- -- copy_eltfp25519_1w(X2, B); /* X2 = B^2 */ -- sub_eltfp25519_1w(Z2, A, B); /* Z2 = E = AA-BB */ -- -- mul_a24_eltfp25519_1w(B, Z2); /* B = a24*E */ -- add_eltfp25519_1w_bmi2(B, B, X2); /* B = a24*E+B */ -- mul_eltfp25519_2w_bmi2(X2Z2, X2Z2, AB); /* [X2|Z2] = [B|E]*[A|a24*E+B] */ -- mul_eltfp25519_1w_bmi2(Z3, Z3, X1); /* Z3 = Z3*X1 */ -- --j; -+ sw0 = swap1[0U]; -+ cswap2(sw0, nq10, nq_p11); -+ nq1 = p01_tmp1; -+ tmp1 = p01_tmp1 + (u32)16U; -+ point_double(nq1, tmp1, tmp2); -+ point_double(nq1, tmp1, tmp2); -+ point_double(nq1, tmp1, tmp2); -+ memcpy(out, p0, (u32)8U * sizeof(p0[0U])); -+ -+ memzero_explicit(tmp2, sizeof(tmp2)); -+ memzero_explicit(p01_tmp1_swap, sizeof(p01_tmp1_swap)); -+} -+ -+static void fsquare_times(u64 *o, const u64 *inp, u64 *tmp, u32 n1) -+{ -+ u32 i; -+ fsqr(o, inp, tmp); -+ for (i = (u32)0U; i < n1 - (u32)1U; i = i + (u32)1U) -+ fsqr(o, o, tmp); -+} -+ -+static void finv(u64 *o, const u64 *i, u64 *tmp) -+{ -+ u64 t1[16U] = { 0U }; -+ u64 *a0 = t1; -+ u64 *b = t1 + (u32)4U; -+ u64 *c = t1 + (u32)8U; -+ u64 *t00 = t1 + (u32)12U; -+ u64 *tmp1 = tmp; -+ u64 *a; -+ u64 *t0; -+ fsquare_times(a0, i, tmp1, (u32)1U); -+ fsquare_times(t00, a0, tmp1, (u32)2U); -+ fmul(b, t00, i, tmp); -+ fmul(a0, b, a0, tmp); -+ fsquare_times(t00, a0, tmp1, (u32)1U); -+ fmul(b, t00, b, tmp); -+ fsquare_times(t00, b, tmp1, (u32)5U); -+ fmul(b, t00, b, tmp); -+ fsquare_times(t00, b, tmp1, (u32)10U); -+ fmul(c, t00, b, tmp); -+ fsquare_times(t00, c, tmp1, (u32)20U); -+ fmul(t00, t00, c, tmp); -+ fsquare_times(t00, t00, tmp1, (u32)10U); -+ fmul(b, t00, b, tmp); -+ fsquare_times(t00, b, tmp1, (u32)50U); -+ fmul(c, t00, b, tmp); -+ fsquare_times(t00, c, tmp1, (u32)100U); -+ fmul(t00, t00, c, tmp); -+ fsquare_times(t00, t00, tmp1, (u32)50U); -+ fmul(t00, t00, b, tmp); -+ fsquare_times(t00, t00, tmp1, (u32)5U); -+ a = t1; -+ t0 = t1 + (u32)12U; -+ fmul(o, t0, a, tmp); -+} -+ -+static void store_felem(u64 *b, u64 *f) -+{ -+ u64 f30 = f[3U]; -+ u64 top_bit0 = f30 >> (u32)63U; -+ u64 carry0; -+ u64 f31; -+ u64 top_bit; -+ u64 carry; -+ u64 f0; -+ u64 f1; -+ u64 f2; -+ u64 f3; -+ u64 m0; -+ u64 m1; -+ u64 m2; -+ u64 m3; -+ u64 mask; -+ u64 f0_; -+ u64 f1_; -+ u64 f2_; -+ u64 f3_; -+ u64 o0; -+ u64 o1; -+ u64 o2; -+ u64 o3; -+ f[3U] = f30 & (u64)0x7fffffffffffffffU; -+ carry0 = add_scalar(f, f, (u64)19U * top_bit0); -+ f31 = f[3U]; -+ top_bit = f31 >> (u32)63U; -+ f[3U] = f31 & (u64)0x7fffffffffffffffU; -+ carry = add_scalar(f, f, (u64)19U * top_bit); -+ f0 = f[0U]; -+ f1 = f[1U]; -+ f2 = f[2U]; -+ f3 = f[3U]; -+ m0 = gte_mask(f0, (u64)0xffffffffffffffedU); -+ m1 = eq_mask(f1, (u64)0xffffffffffffffffU); -+ m2 = eq_mask(f2, (u64)0xffffffffffffffffU); -+ m3 = eq_mask(f3, (u64)0x7fffffffffffffffU); -+ mask = ((m0 & m1) & m2) & m3; -+ f0_ = f0 - (mask & (u64)0xffffffffffffffedU); -+ f1_ = f1 - (mask & (u64)0xffffffffffffffffU); -+ f2_ = f2 - (mask & (u64)0xffffffffffffffffU); -+ f3_ = f3 - (mask & (u64)0x7fffffffffffffffU); -+ o0 = f0_; -+ o1 = f1_; -+ o2 = f2_; -+ o3 = f3_; -+ b[0U] = o0; -+ b[1U] = o1; -+ b[2U] = o2; -+ b[3U] = o3; -+} -+ -+static void encode_point(u8 *o, const u64 *i) -+{ -+ const u64 *x = i; -+ const u64 *z = i + (u32)4U; -+ u64 tmp[4U] = { 0U }; -+ u64 tmp_w[16U] = { 0U }; -+ finv(tmp, z, tmp_w); -+ fmul(tmp, tmp, x, tmp_w); -+ store_felem((u64 *)o, tmp); -+} -+ -+static void curve25519_ever64(u8 *out, const u8 *priv, const u8 *pub) -+{ -+ u64 init1[8U] = { 0U }; -+ u64 tmp[4U] = { 0U }; -+ u64 tmp3; -+ u64 *x; -+ u64 *z; -+ { -+ u32 i; -+ for (i = (u32)0U; i < (u32)4U; i = i + (u32)1U) { -+ u64 *os = tmp; -+ const u8 *bj = pub + i * (u32)8U; -+ u64 u = *(u64 *)bj; -+ u64 r = u; -+ u64 x0 = r; -+ os[i] = x0; - } -- j = 63; - } -+ tmp3 = tmp[3U]; -+ tmp[3U] = tmp3 & (u64)0x7fffffffffffffffU; -+ x = init1; -+ z = init1 + (u32)4U; -+ z[0U] = (u64)1U; -+ z[1U] = (u64)0U; -+ z[2U] = (u64)0U; -+ z[3U] = (u64)0U; -+ x[0U] = tmp[0U]; -+ x[1U] = tmp[1U]; -+ x[2U] = tmp[2U]; -+ x[3U] = tmp[3U]; -+ montgomery_ladder(init1, priv, init1); -+ encode_point(out, init1); -+} -+ -+/* The below constants were generated using this sage script: -+ * -+ * #!/usr/bin/env sage -+ * import sys -+ * from sage.all import * -+ * def limbs(n): -+ * n = int(n) -+ * l = ((n >> 0) % 2^64, (n >> 64) % 2^64, (n >> 128) % 2^64, (n >> 192) % 2^64) -+ * return "0x%016xULL, 0x%016xULL, 0x%016xULL, 0x%016xULL" % l -+ * ec = EllipticCurve(GF(2^255 - 19), [0, 486662, 0, 1, 0]) -+ * p_minus_s = (ec.lift_x(9) - ec.lift_x(1))[0] -+ * print("static const u64 p_minus_s[] = { %s };\n" % limbs(p_minus_s)) -+ * print("static const u64 table_ladder[] = {") -+ * p = ec.lift_x(9) -+ * for i in range(252): -+ * l = (p[0] + p[2]) / (p[0] - p[2]) -+ * print(("\t%s" + ("," if i != 251 else "")) % limbs(l)) -+ * p = p * 2 -+ * print("};") -+ * -+ */ - -- inv_eltfp25519_1w_bmi2(A, Qz); -- mul_eltfp25519_1w_bmi2((u64 *)shared, Qx, A); -- fred_eltfp25519_1w((u64 *)shared); -+static const u64 p_minus_s[] = { 0x816b1e0137d48290ULL, 0x440f6a51eb4d1207ULL, 0x52385f46dca2b71dULL, 0x215132111d8354cbULL }; - -- memzero_explicit(&m, sizeof(m)); --} -+static const u64 table_ladder[] = { -+ 0xfffffffffffffff3ULL, 0xffffffffffffffffULL, 0xffffffffffffffffULL, 0x5fffffffffffffffULL, -+ 0x6b8220f416aafe96ULL, 0x82ebeb2b4f566a34ULL, 0xd5a9a5b075a5950fULL, 0x5142b2cf4b2488f4ULL, -+ 0x6aaebc750069680cULL, 0x89cf7820a0f99c41ULL, 0x2a58d9183b56d0f4ULL, 0x4b5aca80e36011a4ULL, -+ 0x329132348c29745dULL, 0xf4a2e616e1642fd7ULL, 0x1e45bb03ff67bc34ULL, 0x306912d0f42a9b4aULL, -+ 0xff886507e6af7154ULL, 0x04f50e13dfeec82fULL, 0xaa512fe82abab5ceULL, 0x174e251a68d5f222ULL, -+ 0xcf96700d82028898ULL, 0x1743e3370a2c02c5ULL, 0x379eec98b4e86eaaULL, 0x0c59888a51e0482eULL, -+ 0xfbcbf1d699b5d189ULL, 0xacaef0d58e9fdc84ULL, 0xc1c20d06231f7614ULL, 0x2938218da274f972ULL, -+ 0xf6af49beff1d7f18ULL, 0xcc541c22387ac9c2ULL, 0x96fcc9ef4015c56bULL, 0x69c1627c690913a9ULL, -+ 0x7a86fd2f4733db0eULL, 0xfdb8c4f29e087de9ULL, 0x095e4b1a8ea2a229ULL, 0x1ad7a7c829b37a79ULL, -+ 0x342d89cad17ea0c0ULL, 0x67bedda6cced2051ULL, 0x19ca31bf2bb42f74ULL, 0x3df7b4c84980acbbULL, -+ 0xa8c6444dc80ad883ULL, 0xb91e440366e3ab85ULL, 0xc215cda00164f6d8ULL, 0x3d867c6ef247e668ULL, -+ 0xc7dd582bcc3e658cULL, 0xfd2c4748ee0e5528ULL, 0xa0fd9b95cc9f4f71ULL, 0x7529d871b0675ddfULL, -+ 0xb8f568b42d3cbd78ULL, 0x1233011b91f3da82ULL, 0x2dce6ccd4a7c3b62ULL, 0x75e7fc8e9e498603ULL, -+ 0x2f4f13f1fcd0b6ecULL, 0xf1a8ca1f29ff7a45ULL, 0xc249c1a72981e29bULL, 0x6ebe0dbb8c83b56aULL, -+ 0x7114fa8d170bb222ULL, 0x65a2dcd5bf93935fULL, 0xbdc41f68b59c979aULL, 0x2f0eef79a2ce9289ULL, -+ 0x42ecbf0c083c37ceULL, 0x2930bc09ec496322ULL, 0xf294b0c19cfeac0dULL, 0x3780aa4bedfabb80ULL, -+ 0x56c17d3e7cead929ULL, 0xe7cb4beb2e5722c5ULL, 0x0ce931732dbfe15aULL, 0x41b883c7621052f8ULL, -+ 0xdbf75ca0c3d25350ULL, 0x2936be086eb1e351ULL, 0xc936e03cb4a9b212ULL, 0x1d45bf82322225aaULL, -+ 0xe81ab1036a024cc5ULL, 0xe212201c304c9a72ULL, 0xc5d73fba6832b1fcULL, 0x20ffdb5a4d839581ULL, -+ 0xa283d367be5d0fadULL, 0x6c2b25ca8b164475ULL, 0x9d4935467caaf22eULL, 0x5166408eee85ff49ULL, -+ 0x3c67baa2fab4e361ULL, 0xb3e433c67ef35cefULL, 0x5259729241159b1cULL, 0x6a621892d5b0ab33ULL, -+ 0x20b74a387555cdcbULL, 0x532aa10e1208923fULL, 0xeaa17b7762281dd1ULL, 0x61ab3443f05c44bfULL, -+ 0x257a6c422324def8ULL, 0x131c6c1017e3cf7fULL, 0x23758739f630a257ULL, 0x295a407a01a78580ULL, -+ 0xf8c443246d5da8d9ULL, 0x19d775450c52fa5dULL, 0x2afcfc92731bf83dULL, 0x7d10c8e81b2b4700ULL, -+ 0xc8e0271f70baa20bULL, 0x993748867ca63957ULL, 0x5412efb3cb7ed4bbULL, 0x3196d36173e62975ULL, -+ 0xde5bcad141c7dffcULL, 0x47cc8cd2b395c848ULL, 0xa34cd942e11af3cbULL, 0x0256dbf2d04ecec2ULL, -+ 0x875ab7e94b0e667fULL, 0xcad4dd83c0850d10ULL, 0x47f12e8f4e72c79fULL, 0x5f1a87bb8c85b19bULL, -+ 0x7ae9d0b6437f51b8ULL, 0x12c7ce5518879065ULL, 0x2ade09fe5cf77aeeULL, 0x23a05a2f7d2c5627ULL, -+ 0x5908e128f17c169aULL, 0xf77498dd8ad0852dULL, 0x74b4c4ceab102f64ULL, 0x183abadd10139845ULL, -+ 0xb165ba8daa92aaacULL, 0xd5c5ef9599386705ULL, 0xbe2f8f0cf8fc40d1ULL, 0x2701e635ee204514ULL, -+ 0x629fa80020156514ULL, 0xf223868764a8c1ceULL, 0x5b894fff0b3f060eULL, 0x60d9944cf708a3faULL, -+ 0xaeea001a1c7a201fULL, 0xebf16a633ee2ce63ULL, 0x6f7709594c7a07e1ULL, 0x79b958150d0208cbULL, -+ 0x24b55e5301d410e7ULL, 0xe3a34edff3fdc84dULL, 0xd88768e4904032d8ULL, 0x131384427b3aaeecULL, -+ 0x8405e51286234f14ULL, 0x14dc4739adb4c529ULL, 0xb8a2b5b250634ffdULL, 0x2fe2a94ad8a7ff93ULL, -+ 0xec5c57efe843faddULL, 0x2843ce40f0bb9918ULL, 0xa4b561d6cf3d6305ULL, 0x743629bde8fb777eULL, -+ 0x343edd46bbaf738fULL, 0xed981828b101a651ULL, 0xa401760b882c797aULL, 0x1fc223e28dc88730ULL, -+ 0x48604e91fc0fba0eULL, 0xb637f78f052c6fa4ULL, 0x91ccac3d09e9239cULL, 0x23f7eed4437a687cULL, -+ 0x5173b1118d9bd800ULL, 0x29d641b63189d4a7ULL, 0xfdbf177988bbc586ULL, 0x2959894fcad81df5ULL, -+ 0xaebc8ef3b4bbc899ULL, 0x4148995ab26992b9ULL, 0x24e20b0134f92cfbULL, 0x40d158894a05dee8ULL, -+ 0x46b00b1185af76f6ULL, 0x26bac77873187a79ULL, 0x3dc0bf95ab8fff5fULL, 0x2a608bd8945524d7ULL, -+ 0x26449588bd446302ULL, 0x7c4bc21c0388439cULL, 0x8e98a4f383bd11b2ULL, 0x26218d7bc9d876b9ULL, -+ 0xe3081542997c178aULL, 0x3c2d29a86fb6606fULL, 0x5c217736fa279374ULL, 0x7dde05734afeb1faULL, -+ 0x3bf10e3906d42babULL, 0xe4f7803e1980649cULL, 0xe6053bf89595bf7aULL, 0x394faf38da245530ULL, -+ 0x7a8efb58896928f4ULL, 0xfbc778e9cc6a113cULL, 0x72670ce330af596fULL, 0x48f222a81d3d6cf7ULL, -+ 0xf01fce410d72caa7ULL, 0x5a20ecc7213b5595ULL, 0x7bc21165c1fa1483ULL, 0x07f89ae31da8a741ULL, -+ 0x05d2c2b4c6830ff9ULL, 0xd43e330fc6316293ULL, 0xa5a5590a96d3a904ULL, 0x705edb91a65333b6ULL, -+ 0x048ee15e0bb9a5f7ULL, 0x3240cfca9e0aaf5dULL, 0x8f4b71ceedc4a40bULL, 0x621c0da3de544a6dULL, -+ 0x92872836a08c4091ULL, 0xce8375b010c91445ULL, 0x8a72eb524f276394ULL, 0x2667fcfa7ec83635ULL, -+ 0x7f4c173345e8752aULL, 0x061b47feee7079a5ULL, 0x25dd9afa9f86ff34ULL, 0x3780cef5425dc89cULL, -+ 0x1a46035a513bb4e9ULL, 0x3e1ef379ac575adaULL, 0xc78c5f1c5fa24b50ULL, 0x321a967634fd9f22ULL, -+ 0x946707b8826e27faULL, 0x3dca84d64c506fd0ULL, 0xc189218075e91436ULL, 0x6d9284169b3b8484ULL, -+ 0x3a67e840383f2ddfULL, 0x33eec9a30c4f9b75ULL, 0x3ec7c86fa783ef47ULL, 0x26ec449fbac9fbc4ULL, -+ 0x5c0f38cba09b9e7dULL, 0x81168cc762a3478cULL, 0x3e23b0d306fc121cULL, 0x5a238aa0a5efdcddULL, -+ 0x1ba26121c4ea43ffULL, 0x36f8c77f7c8832b5ULL, 0x88fbea0b0adcf99aULL, 0x5ca9938ec25bebf9ULL, -+ 0xd5436a5e51fccda0ULL, 0x1dbc4797c2cd893bULL, 0x19346a65d3224a08ULL, 0x0f5034e49b9af466ULL, -+ 0xf23c3967a1e0b96eULL, 0xe58b08fa867a4d88ULL, 0xfb2fabc6a7341679ULL, 0x2a75381eb6026946ULL, -+ 0xc80a3be4c19420acULL, 0x66b1f6c681f2b6dcULL, 0x7cf7036761e93388ULL, 0x25abbbd8a660a4c4ULL, -+ 0x91ea12ba14fd5198ULL, 0x684950fc4a3cffa9ULL, 0xf826842130f5ad28ULL, 0x3ea988f75301a441ULL, -+ 0xc978109a695f8c6fULL, 0x1746eb4a0530c3f3ULL, 0x444d6d77b4459995ULL, 0x75952b8c054e5cc7ULL, -+ 0xa3703f7915f4d6aaULL, 0x66c346202f2647d8ULL, 0xd01469df811d644bULL, 0x77fea47d81a5d71fULL, -+ 0xc5e9529ef57ca381ULL, 0x6eeeb4b9ce2f881aULL, 0xb6e91a28e8009bd6ULL, 0x4b80be3e9afc3fecULL, -+ 0x7e3773c526aed2c5ULL, 0x1b4afcb453c9a49dULL, 0xa920bdd7baffb24dULL, 0x7c54699f122d400eULL, -+ 0xef46c8e14fa94bc8ULL, 0xe0b074ce2952ed5eULL, 0xbea450e1dbd885d5ULL, 0x61b68649320f712cULL, -+ 0x8a485f7309ccbdd1ULL, 0xbd06320d7d4d1a2dULL, 0x25232973322dbef4ULL, 0x445dc4758c17f770ULL, -+ 0xdb0434177cc8933cULL, 0xed6fe82175ea059fULL, 0x1efebefdc053db34ULL, 0x4adbe867c65daf99ULL, -+ 0x3acd71a2a90609dfULL, 0xe5e991856dd04050ULL, 0x1ec69b688157c23cULL, 0x697427f6885cfe4dULL, -+ 0xd7be7b9b65e1a851ULL, 0xa03d28d522c536ddULL, 0x28399d658fd2b645ULL, 0x49e5b7e17c2641e1ULL, -+ 0x6f8c3a98700457a4ULL, 0x5078f0a25ebb6778ULL, 0xd13c3ccbc382960fULL, 0x2e003258a7df84b1ULL, -+ 0x8ad1f39be6296a1cULL, 0xc1eeaa652a5fbfb2ULL, 0x33ee0673fd26f3cbULL, 0x59256173a69d2cccULL, -+ 0x41ea07aa4e18fc41ULL, 0xd9fc19527c87a51eULL, 0xbdaacb805831ca6fULL, 0x445b652dc916694fULL, -+ 0xce92a3a7f2172315ULL, 0x1edc282de11b9964ULL, 0xa1823aafe04c314aULL, 0x790a2d94437cf586ULL, -+ 0x71c447fb93f6e009ULL, 0x8922a56722845276ULL, 0xbf70903b204f5169ULL, 0x2f7a89891ba319feULL, -+ 0x02a08eb577e2140cULL, 0xed9a4ed4427bdcf4ULL, 0x5253ec44e4323cd1ULL, 0x3e88363c14e9355bULL, -+ 0xaa66c14277110b8cULL, 0x1ae0391610a23390ULL, 0x2030bd12c93fc2a2ULL, 0x3ee141579555c7abULL, -+ 0x9214de3a6d6e7d41ULL, 0x3ccdd88607f17efeULL, 0x674f1288f8e11217ULL, 0x5682250f329f93d0ULL, -+ 0x6cf00b136d2e396eULL, 0x6e4cf86f1014debfULL, 0x5930b1b5bfcc4e83ULL, 0x047069b48aba16b6ULL, -+ 0x0d4ce4ab69b20793ULL, 0xb24db91a97d0fb9eULL, 0xcdfa50f54e00d01dULL, 0x221b1085368bddb5ULL, -+ 0xe7e59468b1e3d8d2ULL, 0x53c56563bd122f93ULL, 0xeee8a903e0663f09ULL, 0x61efa662cbbe3d42ULL, -+ 0x2cf8ddddde6eab2aULL, 0x9bf80ad51435f231ULL, 0x5deadacec9f04973ULL, 0x29275b5d41d29b27ULL, -+ 0xcfde0f0895ebf14fULL, 0xb9aab96b054905a7ULL, 0xcae80dd9a1c420fdULL, 0x0a63bf2f1673bbc7ULL, -+ 0x092f6e11958fbc8cULL, 0x672a81e804822fadULL, 0xcac8351560d52517ULL, 0x6f3f7722c8f192f8ULL, -+ 0xf8ba90ccc2e894b7ULL, 0x2c7557a438ff9f0dULL, 0x894d1d855ae52359ULL, 0x68e122157b743d69ULL, -+ 0xd87e5570cfb919f3ULL, 0x3f2cdecd95798db9ULL, 0x2121154710c0a2ceULL, 0x3c66a115246dc5b2ULL, -+ 0xcbedc562294ecb72ULL, 0xba7143c36a280b16ULL, 0x9610c2efd4078b67ULL, 0x6144735d946a4b1eULL, -+ 0x536f111ed75b3350ULL, 0x0211db8c2041d81bULL, 0xf93cb1000e10413cULL, 0x149dfd3c039e8876ULL, -+ 0xd479dde46b63155bULL, 0xb66e15e93c837976ULL, 0xdafde43b1f13e038ULL, 0x5fafda1a2e4b0b35ULL, -+ 0x3600bbdf17197581ULL, 0x3972050bbe3cd2c2ULL, 0x5938906dbdd5be86ULL, 0x34fce5e43f9b860fULL, -+ 0x75a8a4cd42d14d02ULL, 0x828dabc53441df65ULL, 0x33dcabedd2e131d3ULL, 0x3ebad76fb814d25fULL, -+ 0xd4906f566f70e10fULL, 0x5d12f7aa51690f5aULL, 0x45adb16e76cefcf2ULL, 0x01f768aead232999ULL, -+ 0x2b6cc77b6248febdULL, 0x3cd30628ec3aaffdULL, 0xce1c0b80d4ef486aULL, 0x4c3bff2ea6f66c23ULL, -+ 0x3f2ec4094aeaeb5fULL, 0x61b19b286e372ca7ULL, 0x5eefa966de2a701dULL, 0x23b20565de55e3efULL, -+ 0xe301ca5279d58557ULL, 0x07b2d4ce27c2874fULL, 0xa532cd8a9dcf1d67ULL, 0x2a52fee23f2bff56ULL, -+ 0x8624efb37cd8663dULL, 0xbbc7ac20ffbd7594ULL, 0x57b85e9c82d37445ULL, 0x7b3052cb86a6ec66ULL, -+ 0x3482f0ad2525e91eULL, 0x2cb68043d28edca0ULL, 0xaf4f6d052e1b003aULL, 0x185f8c2529781b0aULL, -+ 0xaa41de5bd80ce0d6ULL, 0x9407b2416853e9d6ULL, 0x563ec36e357f4c3aULL, 0x4cc4b8dd0e297bceULL, -+ 0xa2fc1a52ffb8730eULL, 0x1811f16e67058e37ULL, 0x10f9a366cddf4ee1ULL, 0x72f4a0c4a0b9f099ULL, -+ 0x8c16c06f663f4ea7ULL, 0x693b3af74e970fbaULL, 0x2102e7f1d69ec345ULL, 0x0ba53cbc968a8089ULL, -+ 0xca3d9dc7fea15537ULL, 0x4c6824bb51536493ULL, 0xb9886314844006b1ULL, 0x40d2a72ab454cc60ULL, -+ 0x5936a1b712570975ULL, 0x91b9d648debda657ULL, 0x3344094bb64330eaULL, 0x006ba10d12ee51d0ULL, -+ 0x19228468f5de5d58ULL, 0x0eb12f4c38cc05b0ULL, 0xa1039f9dd5601990ULL, 0x4502d4ce4fff0e0bULL, -+ 0xeb2054106837c189ULL, 0xd0f6544c6dd3b93cULL, 0x40727064c416d74fULL, 0x6e15c6114b502ef0ULL, -+ 0x4df2a398cfb1a76bULL, 0x11256c7419f2f6b1ULL, 0x4a497962066e6043ULL, 0x705b3aab41355b44ULL, -+ 0x365ef536d797b1d8ULL, 0x00076bd622ddf0dbULL, 0x3bbf33b0e0575a88ULL, 0x3777aa05c8e4ca4dULL, -+ 0x392745c85578db5fULL, 0x6fda4149dbae5ae2ULL, 0xb1f0b00b8adc9867ULL, 0x09963437d36f1da3ULL, -+ 0x7e824e90a5dc3853ULL, 0xccb5f6641f135cbdULL, 0x6736d86c87ce8fccULL, 0x625f3ce26604249fULL, -+ 0xaf8ac8059502f63fULL, 0x0c05e70a2e351469ULL, 0x35292e9c764b6305ULL, 0x1a394360c7e23ac3ULL, -+ 0xd5c6d53251183264ULL, 0x62065abd43c2b74fULL, 0xb5fbf5d03b973f9bULL, 0x13a3da3661206e5eULL, -+ 0xc6bd5837725d94e5ULL, 0x18e30912205016c5ULL, 0x2088ce1570033c68ULL, 0x7fba1f495c837987ULL, -+ 0x5a8c7423f2f9079dULL, 0x1735157b34023fc5ULL, 0xe4f9b49ad2fab351ULL, 0x6691ff72c878e33cULL, -+ 0x122c2adedc5eff3eULL, 0xf8dd4bf1d8956cf4ULL, 0xeb86205d9e9e5bdaULL, 0x049b92b9d975c743ULL, -+ 0xa5379730b0f6c05aULL, 0x72a0ffacc6f3a553ULL, 0xb0032c34b20dcd6dULL, 0x470e9dbc88d5164aULL, -+ 0xb19cf10ca237c047ULL, 0xb65466711f6c81a2ULL, 0xb3321bd16dd80b43ULL, 0x48c14f600c5fbe8eULL, -+ 0x66451c264aa6c803ULL, 0xb66e3904a4fa7da6ULL, 0xd45f19b0b3128395ULL, 0x31602627c3c9bc10ULL, -+ 0x3120dc4832e4e10dULL, 0xeb20c46756c717f7ULL, 0x00f52e3f67280294ULL, 0x566d4fc14730c509ULL, -+ 0x7e3a5d40fd837206ULL, 0xc1e926dc7159547aULL, 0x216730fba68d6095ULL, 0x22e8c3843f69cea7ULL, -+ 0x33d074e8930e4b2bULL, 0xb6e4350e84d15816ULL, 0x5534c26ad6ba2365ULL, 0x7773c12f89f1f3f3ULL, -+ 0x8cba404da57962aaULL, 0x5b9897a81999ce56ULL, 0x508e862f121692fcULL, 0x3a81907fa093c291ULL, -+ 0x0dded0ff4725a510ULL, 0x10d8cc10673fc503ULL, 0x5b9d151c9f1f4e89ULL, 0x32a5c1d5cb09a44cULL, -+ 0x1e0aa442b90541fbULL, 0x5f85eb7cc1b485dbULL, 0xbee595ce8a9df2e5ULL, 0x25e496c722422236ULL, -+ 0x5edf3c46cd0fe5b9ULL, 0x34e75a7ed2a43388ULL, 0xe488de11d761e352ULL, 0x0e878a01a085545cULL, -+ 0xba493c77e021bb04ULL, 0x2b4d1843c7df899aULL, 0x9ea37a487ae80d67ULL, 0x67a9958011e41794ULL, -+ 0x4b58051a6697b065ULL, 0x47e33f7d8d6ba6d4ULL, 0xbb4da8d483ca46c1ULL, 0x68becaa181c2db0dULL, -+ 0x8d8980e90b989aa5ULL, 0xf95eb14a2c93c99bULL, 0x51c6c7c4796e73a2ULL, 0x6e228363b5efb569ULL, -+ 0xc6bbc0b02dd624c8ULL, 0x777eb47dec8170eeULL, 0x3cde15a004cfafa9ULL, 0x1dc6bc087160bf9bULL, -+ 0x2e07e043eec34002ULL, 0x18e9fc677a68dc7fULL, 0xd8da03188bd15b9aULL, 0x48fbc3bb00568253ULL, -+ 0x57547d4cfb654ce1ULL, 0xd3565b82a058e2adULL, 0xf63eaf0bbf154478ULL, 0x47531ef114dfbb18ULL, -+ 0xe1ec630a4278c587ULL, 0x5507d546ca8e83f3ULL, 0x85e135c63adc0c2bULL, 0x0aa7efa85682844eULL, -+ 0x72691ba8b3e1f615ULL, 0x32b4e9701fbe3ffaULL, 0x97b6d92e39bb7868ULL, 0x2cfe53dea02e39e8ULL, -+ 0x687392cd85cd52b0ULL, 0x27ff66c910e29831ULL, 0x97134556a9832d06ULL, 0x269bb0360a84f8a0ULL, -+ 0x706e55457643f85cULL, 0x3734a48c9b597d1bULL, 0x7aee91e8c6efa472ULL, 0x5cd6abc198a9d9e0ULL, -+ 0x0e04de06cb3ce41aULL, 0xd8c6eb893402e138ULL, 0x904659bb686e3772ULL, 0x7215c371746ba8c8ULL, -+ 0xfd12a97eeae4a2d9ULL, 0x9514b7516394f2c5ULL, 0x266fd5809208f294ULL, 0x5c847085619a26b9ULL, -+ 0x52985410fed694eaULL, 0x3c905b934a2ed254ULL, 0x10bb47692d3be467ULL, 0x063b3d2d69e5e9e1ULL, -+ 0x472726eedda57debULL, 0xefb6c4ae10f41891ULL, 0x2b1641917b307614ULL, 0x117c554fc4f45b7cULL, -+ 0xc07cf3118f9d8812ULL, 0x01dbd82050017939ULL, 0xd7e803f4171b2827ULL, 0x1015e87487d225eaULL, -+ 0xc58de3fed23acc4dULL, 0x50db91c294a7be2dULL, 0x0b94d43d1c9cf457ULL, 0x6b1640fa6e37524aULL, -+ 0x692f346c5fda0d09ULL, 0x200b1c59fa4d3151ULL, 0xb8c46f760777a296ULL, 0x4b38395f3ffdfbcfULL, -+ 0x18d25e00be54d671ULL, 0x60d50582bec8aba6ULL, 0x87ad8f263b78b982ULL, 0x50fdf64e9cda0432ULL, -+ 0x90f567aac578dcf0ULL, 0xef1e9b0ef2a3133bULL, 0x0eebba9242d9de71ULL, 0x15473c9bf03101c7ULL, -+ 0x7c77e8ae56b78095ULL, 0xb678e7666e6f078eULL, 0x2da0b9615348ba1fULL, 0x7cf931c1ff733f0bULL, -+ 0x26b357f50a0a366cULL, 0xe9708cf42b87d732ULL, 0xc13aeea5f91cb2c0ULL, 0x35d90c991143bb4cULL, -+ 0x47c1c404a9a0d9dcULL, 0x659e58451972d251ULL, 0x3875a8c473b38c31ULL, 0x1fbd9ed379561f24ULL, -+ 0x11fabc6fd41ec28dULL, 0x7ef8dfe3cd2a2dcaULL, 0x72e73b5d8c404595ULL, 0x6135fa4954b72f27ULL, -+ 0xccfc32a2de24b69cULL, 0x3f55698c1f095d88ULL, 0xbe3350ed5ac3f929ULL, 0x5e9bf806ca477eebULL, -+ 0xe9ce8fb63c309f68ULL, 0x5376f63565e1f9f4ULL, 0xd1afcfb35a6393f1ULL, 0x6632a1ede5623506ULL, -+ 0x0b7d6c390c2ded4cULL, 0x56cb3281df04cb1fULL, 0x66305a1249ecc3c7ULL, 0x5d588b60a38ca72aULL, -+ 0xa6ecbf78e8e5f42dULL, 0x86eeb44b3c8a3eecULL, 0xec219c48fbd21604ULL, 0x1aaf1af517c36731ULL, -+ 0xc306a2836769bde7ULL, 0x208280622b1e2adbULL, 0x8027f51ffbff94a6ULL, 0x76cfa1ce1124f26bULL, -+ 0x18eb00562422abb6ULL, 0xf377c4d58f8c29c3ULL, 0x4dbbc207f531561aULL, 0x0253b7f082128a27ULL, -+ 0x3d1f091cb62c17e0ULL, 0x4860e1abd64628a9ULL, 0x52d17436309d4253ULL, 0x356f97e13efae576ULL, -+ 0xd351e11aa150535bULL, 0x3e6b45bb1dd878ccULL, 0x0c776128bed92c98ULL, 0x1d34ae93032885b8ULL, -+ 0x4ba0488ca85ba4c3ULL, 0x985348c33c9ce6ceULL, 0x66124c6f97bda770ULL, 0x0f81a0290654124aULL, -+ 0x9ed09ca6569b86fdULL, 0x811009fd18af9a2dULL, 0xff08d03f93d8c20aULL, 0x52a148199faef26bULL, -+ 0x3e03f9dc2d8d1b73ULL, 0x4205801873961a70ULL, 0xc0d987f041a35970ULL, 0x07aa1f15a1c0d549ULL, -+ 0xdfd46ce08cd27224ULL, 0x6d0a024f934e4239ULL, 0x808a7a6399897b59ULL, 0x0a4556e9e13d95a2ULL, -+ 0xd21a991fe9c13045ULL, 0x9b0e8548fe7751b8ULL, 0x5da643cb4bf30035ULL, 0x77db28d63940f721ULL, -+ 0xfc5eeb614adc9011ULL, 0x5229419ae8c411ebULL, 0x9ec3e7787d1dcf74ULL, 0x340d053e216e4cb5ULL, -+ 0xcac7af39b48df2b4ULL, 0xc0faec2871a10a94ULL, 0x140a69245ca575edULL, 0x0cf1c37134273a4cULL, -+ 0xc8ee306ac224b8a5ULL, 0x57eaee7ccb4930b0ULL, 0xa1e806bdaacbe74fULL, 0x7d9a62742eeb657dULL, -+ 0x9eb6b6ef546c4830ULL, 0x885cca1fddb36e2eULL, 0xe6b9f383ef0d7105ULL, 0x58654fef9d2e0412ULL, -+ 0xa905c4ffbe0e8e26ULL, 0x942de5df9b31816eULL, 0x497d723f802e88e1ULL, 0x30684dea602f408dULL, -+ 0x21e5a278a3e6cb34ULL, 0xaefb6e6f5b151dc4ULL, 0xb30b8e049d77ca15ULL, 0x28c3c9cf53b98981ULL, -+ 0x287fb721556cdd2aULL, 0x0d317ca897022274ULL, 0x7468c7423a543258ULL, 0x4a7f11464eb5642fULL, -+ 0xa237a4774d193aa6ULL, 0xd865986ea92129a1ULL, 0x24c515ecf87c1a88ULL, 0x604003575f39f5ebULL, -+ 0x47b9f189570a9b27ULL, 0x2b98cede465e4b78ULL, 0x026df551dbb85c20ULL, 0x74fcd91047e21901ULL, -+ 0x13e2a90a23c1bfa3ULL, 0x0cb0074e478519f6ULL, 0x5ff1cbbe3af6cf44ULL, 0x67fe5438be812dbeULL, -+ 0xd13cf64fa40f05b0ULL, 0x054dfb2f32283787ULL, 0x4173915b7f0d2aeaULL, 0x482f144f1f610d4eULL, -+ 0xf6210201b47f8234ULL, 0x5d0ae1929e70b990ULL, 0xdcd7f455b049567cULL, 0x7e93d0f1f0916f01ULL, -+ 0xdd79cbf18a7db4faULL, 0xbe8391bf6f74c62fULL, 0x027145d14b8291bdULL, 0x585a73ea2cbf1705ULL, -+ 0x485ca03e928a0db2ULL, 0x10fc01a5742857e7ULL, 0x2f482edbd6d551a7ULL, 0x0f0433b5048fdb8aULL, -+ 0x60da2e8dd7dc6247ULL, 0x88b4c9d38cd4819aULL, 0x13033ac001f66697ULL, 0x273b24fe3b367d75ULL, -+ 0xc6e8f66a31b3b9d4ULL, 0x281514a494df49d5ULL, 0xd1726fdfc8b23da7ULL, 0x4b3ae7d103dee548ULL, -+ 0xc6256e19ce4b9d7eULL, 0xff5c5cf186e3c61cULL, 0xacc63ca34b8ec145ULL, 0x74621888fee66574ULL, -+ 0x956f409645290a1eULL, 0xef0bf8e3263a962eULL, 0xed6a50eb5ec2647bULL, 0x0694283a9dca7502ULL, -+ 0x769b963643a2dcd1ULL, 0x42b7c8ea09fc5353ULL, 0x4f002aee13397eabULL, 0x63005e2c19b7d63aULL, -+ 0xca6736da63023beaULL, 0x966c7f6db12a99b7ULL, 0xace09390c537c5e1ULL, 0x0b696063a1aa89eeULL, -+ 0xebb03e97288c56e5ULL, 0x432a9f9f938c8be8ULL, 0xa6a5a93d5b717f71ULL, 0x1a5fb4c3e18f9d97ULL, -+ 0x1c94e7ad1c60cdceULL, 0xee202a43fc02c4a0ULL, 0x8dafe4d867c46a20ULL, 0x0a10263c8ac27b58ULL, -+ 0xd0dea9dfe4432a4aULL, 0x856af87bbe9277c5ULL, 0xce8472acc212c71aULL, 0x6f151b6d9bbb1e91ULL, -+ 0x26776c527ceed56aULL, 0x7d211cb7fbf8faecULL, 0x37ae66a6fd4609ccULL, 0x1f81b702d2770c42ULL, -+ 0x2fb0b057eac58392ULL, 0xe1dd89fe29744e9dULL, 0xc964f8eb17beb4f8ULL, 0x29571073c9a2d41eULL, -+ 0xa948a18981c0e254ULL, 0x2df6369b65b22830ULL, 0xa33eb2d75fcfd3c6ULL, 0x078cd6ec4199a01fULL, -+ 0x4a584a41ad900d2fULL, 0x32142b78e2c74c52ULL, 0x68c4e8338431c978ULL, 0x7f69ea9008689fc2ULL, -+ 0x52f2c81e46a38265ULL, 0xfd78072d04a832fdULL, 0x8cd7d5fa25359e94ULL, 0x4de71b7454cc29d2ULL, -+ 0x42eb60ad1eda6ac9ULL, 0x0aad37dfdbc09c3aULL, 0x81004b71e33cc191ULL, 0x44e6be345122803cULL, -+ 0x03fe8388ba1920dbULL, 0xf5d57c32150db008ULL, 0x49c8c4281af60c29ULL, 0x21edb518de701aeeULL, -+ 0x7fb63e418f06dc99ULL, 0xa4460d99c166d7b8ULL, 0x24dd5248ce520a83ULL, 0x5ec3ad712b928358ULL, -+ 0x15022a5fbd17930fULL, 0xa4f64a77d82570e3ULL, 0x12bc8d6915783712ULL, 0x498194c0fc620abbULL, -+ 0x38a2d9d255686c82ULL, 0x785c6bd9193e21f0ULL, 0xe4d5c81ab24a5484ULL, 0x56307860b2e20989ULL, -+ 0x429d55f78b4d74c4ULL, 0x22f1834643350131ULL, 0x1e60c24598c71fffULL, 0x59f2f014979983efULL, -+ 0x46a47d56eb494a44ULL, 0x3e22a854d636a18eULL, 0xb346e15274491c3bULL, 0x2ceafd4e5390cde7ULL, -+ 0xba8a8538be0d6675ULL, 0x4b9074bb50818e23ULL, 0xcbdab89085d304c3ULL, 0x61a24fe0e56192c4ULL, -+ 0xcb7615e6db525bcbULL, 0xdd7d8c35a567e4caULL, 0xe6b4153acafcdd69ULL, 0x2d668e097f3c9766ULL, -+ 0xa57e7e265ce55ef0ULL, 0x5d9f4e527cd4b967ULL, 0xfbc83606492fd1e5ULL, 0x090d52beb7c3f7aeULL, -+ 0x09b9515a1e7b4d7cULL, 0x1f266a2599da44c0ULL, 0xa1c49548e2c55504ULL, 0x7ef04287126f15ccULL, -+ 0xfed1659dbd30ef15ULL, 0x8b4ab9eec4e0277bULL, 0x884d6236a5df3291ULL, 0x1fd96ea6bf5cf788ULL, -+ 0x42a161981f190d9aULL, 0x61d849507e6052c1ULL, 0x9fe113bf285a2cd5ULL, 0x7c22d676dbad85d8ULL, -+ 0x82e770ed2bfbd27dULL, 0x4c05b2ece996f5a5ULL, 0xcd40a9c2b0900150ULL, 0x5895319213d9bf64ULL, -+ 0xe7cc5d703fea2e08ULL, 0xb50c491258e2188cULL, 0xcce30baa48205bf0ULL, 0x537c659ccfa32d62ULL, -+ 0x37b6623a98cfc088ULL, 0xfe9bed1fa4d6aca4ULL, 0x04d29b8e56a8d1b0ULL, 0x725f71c40b519575ULL, -+ 0x28c7f89cd0339ce6ULL, 0x8367b14469ddc18bULL, 0x883ada83a6a1652cULL, 0x585f1974034d6c17ULL, -+ 0x89cfb266f1b19188ULL, 0xe63b4863e7c35217ULL, 0xd88c9da6b4c0526aULL, 0x3e035c9df0954635ULL, -+ 0xdd9d5412fb45de9dULL, 0xdd684532e4cff40dULL, 0x4b5c999b151d671cULL, 0x2d8c2cc811e7f690ULL, -+ 0x7f54be1d90055d40ULL, 0xa464c5df464aaf40ULL, 0x33979624f0e917beULL, 0x2c018dc527356b30ULL, -+ 0xa5415024e330b3d4ULL, 0x73ff3d96691652d3ULL, 0x94ec42c4ef9b59f1ULL, 0x0747201618d08e5aULL, -+ 0x4d6ca48aca411c53ULL, 0x66415f2fcfa66119ULL, 0x9c4dd40051e227ffULL, 0x59810bc09a02f7ebULL, -+ 0x2a7eb171b3dc101dULL, 0x441c5ab99ffef68eULL, 0x32025c9b93b359eaULL, 0x5e8ce0a71e9d112fULL, -+ 0xbfcccb92429503fdULL, 0xd271ba752f095d55ULL, 0x345ead5e972d091eULL, 0x18c8df11a83103baULL, -+ 0x90cd949a9aed0f4cULL, 0xc5d1f4cb6660e37eULL, 0xb8cac52d56c52e0bULL, 0x6e42e400c5808e0dULL, -+ 0xa3b46966eeaefd23ULL, 0x0c4f1f0be39ecdcaULL, 0x189dc8c9d683a51dULL, 0x51f27f054c09351bULL, -+ 0x4c487ccd2a320682ULL, 0x587ea95bb3df1c96ULL, 0xc8ccf79e555cb8e8ULL, 0x547dc829a206d73dULL, -+ 0xb822a6cd80c39b06ULL, 0xe96d54732000d4c6ULL, 0x28535b6f91463b4dULL, 0x228f4660e2486e1dULL, -+ 0x98799538de8d3abfULL, 0x8cd8330045ebca6eULL, 0x79952a008221e738ULL, 0x4322e1a7535cd2bbULL, -+ 0xb114c11819d1801cULL, 0x2016e4d84f3f5ec7ULL, 0xdd0e2df409260f4cULL, 0x5ec362c0ae5f7266ULL, -+ 0xc0462b18b8b2b4eeULL, 0x7cc8d950274d1afbULL, 0xf25f7105436b02d2ULL, 0x43bbf8dcbff9ccd3ULL, -+ 0xb6ad1767a039e9dfULL, 0xb0714da8f69d3583ULL, 0x5e55fa18b42931f5ULL, 0x4ed5558f33c60961ULL, -+ 0x1fe37901c647a5ddULL, 0x593ddf1f8081d357ULL, 0x0249a4fd813fd7a6ULL, 0x69acca274e9caf61ULL, -+ 0x047ba3ea330721c9ULL, 0x83423fc20e7e1ea0ULL, 0x1df4c0af01314a60ULL, 0x09a62dab89289527ULL, -+ 0xa5b325a49cc6cb00ULL, 0xe94b5dc654b56cb6ULL, 0x3be28779adc994a0ULL, 0x4296e8f8ba3a4aadULL, -+ 0x328689761e451eabULL, 0x2e4d598bff59594aULL, 0x49b96853d7a7084aULL, 0x4980a319601420a8ULL, -+ 0x9565b9e12f552c42ULL, 0x8a5318db7100fe96ULL, 0x05c90b4d43add0d7ULL, 0x538b4cd66a5d4edaULL, -+ 0xf4e94fc3e89f039fULL, 0x592c9af26f618045ULL, 0x08a36eb5fd4b9550ULL, 0x25fffaf6c2ed1419ULL, -+ 0x34434459cc79d354ULL, 0xeeecbfb4b1d5476bULL, 0xddeb34a061615d99ULL, 0x5129cecceb64b773ULL, -+ 0xee43215894993520ULL, 0x772f9c7cf14c0b3bULL, 0xd2e2fce306bedad5ULL, 0x715f42b546f06a97ULL, -+ 0x434ecdceda5b5f1aULL, 0x0da17115a49741a9ULL, 0x680bd77c73edad2eULL, 0x487c02354edd9041ULL, -+ 0xb8efeff3a70ed9c4ULL, 0x56a32aa3e857e302ULL, 0xdf3a68bd48a2a5a0ULL, 0x07f650b73176c444ULL, -+ 0xe38b9b1626e0ccb1ULL, 0x79e053c18b09fb36ULL, 0x56d90319c9f94964ULL, 0x1ca941e7ac9ff5c4ULL, -+ 0x49c4df29162fa0bbULL, 0x8488cf3282b33305ULL, 0x95dfda14cabb437dULL, 0x3391f78264d5ad86ULL, -+ 0x729ae06ae2b5095dULL, 0xd58a58d73259a946ULL, 0xe9834262d13921edULL, 0x27fedafaa54bb592ULL, -+ 0xa99dc5b829ad48bbULL, 0x5f025742499ee260ULL, 0x802c8ecd5d7513fdULL, 0x78ceb3ef3f6dd938ULL, -+ 0xc342f44f8a135d94ULL, 0x7b9edb44828cdda3ULL, 0x9436d11a0537cfe7ULL, 0x5064b164ec1ab4c8ULL, -+ 0x7020eccfd37eb2fcULL, 0x1f31ea3ed90d25fcULL, 0x1b930d7bdfa1bb34ULL, 0x5344467a48113044ULL, -+ 0x70073170f25e6dfbULL, 0xe385dc1a50114cc8ULL, 0x2348698ac8fc4f00ULL, 0x2a77a55284dd40d8ULL, -+ 0xfe06afe0c98c6ce4ULL, 0xc235df96dddfd6e4ULL, 0x1428d01e33bf1ed3ULL, 0x785768ec9300bdafULL, -+ 0x9702e57a91deb63bULL, 0x61bdb8bfe5ce8b80ULL, 0x645b426f3d1d58acULL, 0x4804a82227a557bcULL, -+ 0x8e57048ab44d2601ULL, 0x68d6501a4b3a6935ULL, 0xc39c9ec3f9e1c293ULL, 0x4172f257d4de63e2ULL, -+ 0xd368b450330c6401ULL, 0x040d3017418f2391ULL, 0x2c34bb6090b7d90dULL, 0x16f649228fdfd51fULL, -+ 0xbea6818e2b928ef5ULL, 0xe28ccf91cdc11e72ULL, 0x594aaa68e77a36cdULL, 0x313034806c7ffd0fULL, -+ 0x8a9d27ac2249bd65ULL, 0x19a3b464018e9512ULL, 0xc26ccff352b37ec7ULL, 0x056f68341d797b21ULL, -+ 0x5e79d6757efd2327ULL, 0xfabdbcb6553afe15ULL, 0xd3e7222c6eaf5a60ULL, 0x7046c76d4dae743bULL, -+ 0x660be872b18d4a55ULL, 0x19992518574e1496ULL, 0xc103053a302bdcbbULL, 0x3ed8e9800b218e8eULL, -+ 0x7b0b9239fa75e03eULL, 0xefe9fb684633c083ULL, 0x98a35fbe391a7793ULL, 0x6065510fe2d0fe34ULL, -+ 0x55cb668548abad0cULL, 0xb4584548da87e527ULL, 0x2c43ecea0107c1ddULL, 0x526028809372de35ULL, -+ 0x3415c56af9213b1fULL, 0x5bee1a4d017e98dbULL, 0x13f6b105b5cf709bULL, 0x5ff20e3482b29ab6ULL, -+ 0x0aa29c75cc2e6c90ULL, 0xfc7d73ca3a70e206ULL, 0x899fc38fc4b5c515ULL, 0x250386b124ffc207ULL, -+ 0x54ea28d5ae3d2b56ULL, 0x9913149dd6de60ceULL, 0x16694fc58f06d6c1ULL, 0x46b23975eb018fc7ULL, -+ 0x470a6a0fb4b7b4e2ULL, 0x5d92475a8f7253deULL, 0xabeee5b52fbd3adbULL, 0x7fa20801a0806968ULL, -+ 0x76f3faf19f7714d2ULL, 0xb3e840c12f4660c3ULL, 0x0fb4cd8df212744eULL, 0x4b065a251d3a2dd2ULL, -+ 0x5cebde383d77cd4aULL, 0x6adf39df882c9cb1ULL, 0xa2dd242eb09af759ULL, 0x3147c0e50e5f6422ULL, -+ 0x164ca5101d1350dbULL, 0xf8d13479c33fc962ULL, 0xe640ce4d13e5da08ULL, 0x4bdee0c45061f8baULL, -+ 0xd7c46dc1a4edb1c9ULL, 0x5514d7b6437fd98aULL, 0x58942f6bb2a1c00bULL, 0x2dffb2ab1d70710eULL, -+ 0xccdfcf2fc18b6d68ULL, 0xa8ebcba8b7806167ULL, 0x980697f95e2937e3ULL, 0x02fbba1cd0126e8cULL -+}; - --static void curve25519_bmi2_base(u8 session_key[CURVE25519_KEY_SIZE], -- const u8 private_key[CURVE25519_KEY_SIZE]) -+static void curve25519_ever64_base(u8 *out, const u8 *priv) - { -- struct { -- u64 buffer[4 * NUM_WORDS_ELTFP25519]; -- u64 coordinates[4 * NUM_WORDS_ELTFP25519]; -- u64 workspace[4 * NUM_WORDS_ELTFP25519]; -- u8 private[CURVE25519_KEY_SIZE]; -- } __aligned(32) m; -- -- const int ite[4] = { 64, 64, 64, 63 }; -- const int q = 3; - u64 swap = 1; -- -- int i = 0, j = 0, k = 0; -- u64 *const key = (u64 *)m.private; -- u64 *const Ur1 = m.coordinates + 0; -- u64 *const Zr1 = m.coordinates + 4; -- u64 *const Ur2 = m.coordinates + 8; -- u64 *const Zr2 = m.coordinates + 12; -- -- u64 *const UZr1 = m.coordinates + 0; -- u64 *const ZUr2 = m.coordinates + 8; -- -- u64 *const A = m.workspace + 0; -- u64 *const B = m.workspace + 4; -- u64 *const C = m.workspace + 8; -- u64 *const D = m.workspace + 12; -- -- u64 *const AB = m.workspace + 0; -- u64 *const CD = m.workspace + 8; -- -- const u64 *const P = table_ladder_8k; -- -- memcpy(m.private, private_key, sizeof(m.private)); -- -- curve25519_clamp_secret(m.private); -- -- setzero_eltfp25519_1w(Ur1); -- setzero_eltfp25519_1w(Zr1); -- setzero_eltfp25519_1w(Zr2); -- Ur1[0] = 1; -- Zr1[0] = 1; -- Zr2[0] = 1; -- -- /* G-S */ -- Ur2[3] = 0x1eaecdeee27cab34UL; -- Ur2[2] = 0xadc7a0b9235d48e2UL; -- Ur2[1] = 0xbbf095ae14b2edf8UL; -- Ur2[0] = 0x7e94e1fec82faabdUL; -- -- /* main-loop */ -- j = q; -- for (i = 0; i < NUM_WORDS_ELTFP25519; ++i) { -- while (j < ite[i]) { -- u64 bit = (key[i] >> j) & 0x1; -- k = (64 * i + j - q); -+ int i, j, k; -+ u64 tmp[16 + 32 + 4]; -+ u64 *x1 = &tmp[0]; -+ u64 *z1 = &tmp[4]; -+ u64 *x2 = &tmp[8]; -+ u64 *z2 = &tmp[12]; -+ u64 *xz1 = &tmp[0]; -+ u64 *xz2 = &tmp[8]; -+ u64 *a = &tmp[0 + 16]; -+ u64 *b = &tmp[4 + 16]; -+ u64 *c = &tmp[8 + 16]; -+ u64 *ab = &tmp[0 + 16]; -+ u64 *abcd = &tmp[0 + 16]; -+ u64 *ef = &tmp[16 + 16]; -+ u64 *efgh = &tmp[16 + 16]; -+ u64 *key = &tmp[0 + 16 + 32]; -+ -+ memcpy(key, priv, 32); -+ ((u8 *)key)[0] &= 248; -+ ((u8 *)key)[31] = (((u8 *)key)[31] & 127) | 64; -+ -+ x1[0] = 1, x1[1] = x1[2] = x1[3] = 0; -+ z1[0] = 1, z1[1] = z1[2] = z1[3] = 0; -+ z2[0] = 1, z2[1] = z2[2] = z2[3] = 0; -+ memcpy(x2, p_minus_s, sizeof(p_minus_s)); -+ -+ j = 3; -+ for (i = 0; i < 4; ++i) { -+ while (j < (const int[]){ 64, 64, 64, 63 }[i]) { -+ u64 bit = (key[i] >> j) & 1; -+ k = (64 * i + j - 3); - swap = swap ^ bit; -- cswap(swap, Ur1, Ur2); -- cswap(swap, Zr1, Zr2); -+ cswap2(swap, xz1, xz2); - swap = bit; -- /* Addition */ -- sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */ -- add_eltfp25519_1w_bmi2(A, Ur1, Zr1); /* A = Ur1+Zr1 */ -- mul_eltfp25519_1w_bmi2(C, &P[4 * k], B);/* C = M0-B */ -- sub_eltfp25519_1w(B, A, C); /* B = (Ur1+Zr1) - M*(Ur1-Zr1) */ -- add_eltfp25519_1w_bmi2(A, A, C); /* A = (Ur1+Zr1) + M*(Ur1-Zr1) */ -- sqr_eltfp25519_2w_bmi2(AB); /* A = A^2 | B = B^2 */ -- mul_eltfp25519_2w_bmi2(UZr1, ZUr2, AB); /* Ur1 = Zr2*A | Zr1 = Ur2*B */ -+ fsub(b, x1, z1); -+ fadd(a, x1, z1); -+ fmul(c, &table_ladder[4 * k], b, ef); -+ fsub(b, a, c); -+ fadd(a, a, c); -+ fsqr2(ab, ab, efgh); -+ fmul2(xz1, xz2, ab, efgh); - ++j; - } - j = 0; - } - -- /* Doubling */ -- for (i = 0; i < q; ++i) { -- add_eltfp25519_1w_bmi2(A, Ur1, Zr1); /* A = Ur1+Zr1 */ -- sub_eltfp25519_1w(B, Ur1, Zr1); /* B = Ur1-Zr1 */ -- sqr_eltfp25519_2w_bmi2(AB); /* A = A**2 B = B**2 */ -- copy_eltfp25519_1w(C, B); /* C = B */ -- sub_eltfp25519_1w(B, A, B); /* B = A-B */ -- mul_a24_eltfp25519_1w(D, B); /* D = my_a24*B */ -- add_eltfp25519_1w_bmi2(D, D, C); /* D = D+C */ -- mul_eltfp25519_2w_bmi2(UZr1, AB, CD); /* Ur1 = A*B Zr1 = Zr1*A */ -- } -- -- /* Convert to affine coordinates */ -- inv_eltfp25519_1w_bmi2(A, Zr1); -- mul_eltfp25519_1w_bmi2((u64 *)session_key, Ur1, A); -- fred_eltfp25519_1w((u64 *)session_key); -+ point_double(xz1, abcd, efgh); -+ point_double(xz1, abcd, efgh); -+ point_double(xz1, abcd, efgh); -+ encode_point(out, xz1); - -- memzero_explicit(&m, sizeof(m)); -+ memzero_explicit(tmp, sizeof(tmp)); - } - -+static __ro_after_init DEFINE_STATIC_KEY_FALSE(curve25519_use_bmi2_adx); -+ - void curve25519_arch(u8 mypublic[CURVE25519_KEY_SIZE], - const u8 secret[CURVE25519_KEY_SIZE], - const u8 basepoint[CURVE25519_KEY_SIZE]) - { -- if (static_branch_likely(&curve25519_use_adx)) -- curve25519_adx(mypublic, secret, basepoint); -- else if (static_branch_likely(&curve25519_use_bmi2)) -- curve25519_bmi2(mypublic, secret, basepoint); -+ if (static_branch_likely(&curve25519_use_bmi2_adx)) -+ curve25519_ever64(mypublic, secret, basepoint); - else - curve25519_generic(mypublic, secret, basepoint); - } -@@ -2355,10 +1395,8 @@ EXPORT_SYMBOL(curve25519_arch); - void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE], - const u8 secret[CURVE25519_KEY_SIZE]) - { -- if (static_branch_likely(&curve25519_use_adx)) -- curve25519_adx_base(pub, secret); -- else if (static_branch_likely(&curve25519_use_bmi2)) -- curve25519_bmi2_base(pub, secret); -+ if (static_branch_likely(&curve25519_use_bmi2_adx)) -+ curve25519_ever64_base(pub, secret); - else - curve25519_generic(pub, secret, curve25519_base_point); - } -@@ -2449,12 +1487,11 @@ static struct kpp_alg curve25519_alg = { - .max_size = curve25519_max_size, - }; - -+ - static int __init curve25519_mod_init(void) - { -- if (boot_cpu_has(X86_FEATURE_BMI2)) -- static_branch_enable(&curve25519_use_bmi2); -- else if (boot_cpu_has(X86_FEATURE_ADX)) -- static_branch_enable(&curve25519_use_adx); -+ if (boot_cpu_has(X86_FEATURE_BMI2) && boot_cpu_has(X86_FEATURE_ADX)) -+ static_branch_enable(&curve25519_use_bmi2_adx); - else - return 0; - return IS_REACHABLE(CONFIG_CRYPTO_KPP) ? -@@ -2474,3 +1511,4 @@ module_exit(curve25519_mod_exit); - MODULE_ALIAS_CRYPTO("curve25519"); - MODULE_ALIAS_CRYPTO("curve25519-x86"); - MODULE_LICENSE("GPL v2"); -+MODULE_AUTHOR("Jason A. Donenfeld "); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0055-crypto-x86-curve25519-leave-r12-as-spare-register.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0055-crypto-x86-curve25519-leave-r12-as-spare-register.patch deleted file mode 100644 index d5b11e0d3..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0055-crypto-x86-curve25519-leave-r12-as-spare-register.patch +++ /dev/null @@ -1,376 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Sun, 1 Mar 2020 16:06:56 +0800 -Subject: [PATCH] crypto: x86/curve25519 - leave r12 as spare register - -commit dc7fc3a53ae158263196b1892b672aedf67796c5 upstream. - -This updates to the newer register selection proved by HACL*, which -leads to a more compact instruction encoding, and saves around 100 -cycles. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/curve25519-x86_64.c | 110 ++++++++++++++-------------- - 1 file changed, 55 insertions(+), 55 deletions(-) - ---- a/arch/x86/crypto/curve25519-x86_64.c -+++ b/arch/x86/crypto/curve25519-x86_64.c -@@ -167,28 +167,28 @@ static inline void fmul(u64 *out, const - " movq 0(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);" - " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);" -- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" -+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" - /* Compute src1[1] * src2 */ - " movq 8(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" -- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 16(%0);" -- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);" -+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[2] * src2 */ - " movq 16(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" -- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 24(%0);" -- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);" -+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[3] * src2 */ - " movq 24(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" -- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 32(%0);" -- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " movq %%r12, 40(%0);" " mov $0, %%r8;" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);" -+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%0);" - /* Line up pointers */ -@@ -202,11 +202,11 @@ static inline void fmul(u64 *out, const - " mulxq 32(%1), %%r8, %%r13;" - " xor %3, %3;" - " adoxq 0(%1), %%r8;" -- " mulxq 40(%1), %%r9, %%r12;" -+ " mulxq 40(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 8(%1), %%r9;" - " mulxq 48(%1), %%r10, %%r13;" -- " adcx %%r12, %%r10;" -+ " adcx %%rbx, %%r10;" - " adoxq 16(%1), %%r10;" - " mulxq 56(%1), %%r11, %%rax;" - " adcx %%r13, %%r11;" -@@ -231,7 +231,7 @@ static inline void fmul(u64 *out, const - " movq %%r8, 0(%0);" - : "+&r" (tmp), "+&r" (f1), "+&r" (out), "+&r" (f2) - : -- : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "memory", "cc" -+ : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "memory", "cc" - ); - } - -@@ -248,28 +248,28 @@ static inline void fmul2(u64 *out, const - " movq 0(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);" - " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);" -- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" -+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" - /* Compute src1[1] * src2 */ - " movq 8(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" -- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 16(%0);" -- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);" -+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[2] * src2 */ - " movq 16(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" -- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 24(%0);" -- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);" -+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[3] * src2 */ - " movq 24(%1), %%rdx;" - " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" -- " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 32(%0);" -- " mulxq 16(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " movq %%r12, 40(%0);" " mov $0, %%r8;" -+ " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);" -+ " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 56(%0);" - -@@ -279,28 +279,28 @@ static inline void fmul2(u64 *out, const - " movq 32(%1), %%rdx;" - " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 64(%0);" - " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);" -- " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" -+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" - " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" - /* Compute src1[1] * src2 */ - " movq 40(%1), %%rdx;" - " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);" -- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 80(%0);" -- " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%0);" -+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[2] * src2 */ - " movq 48(%1), %%rdx;" - " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);" -- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 88(%0);" -- " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " mov $0, %%r8;" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%0);" -+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[3] * src2 */ - " movq 56(%1), %%rdx;" - " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);" -- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%r12, %%r10;" " movq %%r10, 96(%0);" -- " mulxq 48(%3), %%r12, %%r13;" " adox %%r11, %%r12;" " adcx %%r14, %%r12;" " movq %%r12, 104(%0);" " mov $0, %%r8;" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%0);" -+ " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 104(%0);" " mov $0, %%r8;" - " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 112(%0);" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 120(%0);" - /* Line up pointers */ -@@ -314,11 +314,11 @@ static inline void fmul2(u64 *out, const - " mulxq 32(%1), %%r8, %%r13;" - " xor %3, %3;" - " adoxq 0(%1), %%r8;" -- " mulxq 40(%1), %%r9, %%r12;" -+ " mulxq 40(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 8(%1), %%r9;" - " mulxq 48(%1), %%r10, %%r13;" -- " adcx %%r12, %%r10;" -+ " adcx %%rbx, %%r10;" - " adoxq 16(%1), %%r10;" - " mulxq 56(%1), %%r11, %%rax;" - " adcx %%r13, %%r11;" -@@ -347,11 +347,11 @@ static inline void fmul2(u64 *out, const - " mulxq 96(%1), %%r8, %%r13;" - " xor %3, %3;" - " adoxq 64(%1), %%r8;" -- " mulxq 104(%1), %%r9, %%r12;" -+ " mulxq 104(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 72(%1), %%r9;" - " mulxq 112(%1), %%r10, %%r13;" -- " adcx %%r12, %%r10;" -+ " adcx %%rbx, %%r10;" - " adoxq 80(%1), %%r10;" - " mulxq 120(%1), %%r11, %%rax;" - " adcx %%r13, %%r11;" -@@ -376,7 +376,7 @@ static inline void fmul2(u64 *out, const - " movq %%r8, 32(%0);" - : "+&r" (tmp), "+&r" (f1), "+&r" (out), "+&r" (f2) - : -- : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "memory", "cc" -+ : "%rax", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "memory", "cc" - ); - } - -@@ -388,11 +388,11 @@ static inline void fmul_scalar(u64 *out, - asm volatile( - /* Compute the raw multiplication of f1*f2 */ - " mulxq 0(%2), %%r8, %%rcx;" /* f1[0]*f2 */ -- " mulxq 8(%2), %%r9, %%r12;" /* f1[1]*f2 */ -+ " mulxq 8(%2), %%r9, %%rbx;" /* f1[1]*f2 */ - " add %%rcx, %%r9;" - " mov $0, %%rcx;" - " mulxq 16(%2), %%r10, %%r13;" /* f1[2]*f2 */ -- " adcx %%r12, %%r10;" -+ " adcx %%rbx, %%r10;" - " mulxq 24(%2), %%r11, %%rax;" /* f1[3]*f2 */ - " adcx %%r13, %%r11;" - " adcx %%rcx, %%rax;" -@@ -419,7 +419,7 @@ static inline void fmul_scalar(u64 *out, - " movq %%r8, 0(%1);" - : "+&r" (f2_r) - : "r" (out), "r" (f1) -- : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "memory", "cc" -+ : "%rax", "%rcx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "memory", "cc" - ); - } - -@@ -520,8 +520,8 @@ static inline void fsqr(u64 *out, const - " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ - " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ - " movq 24(%1), %%rdx;" /* f[3] */ -- " mulxq 8(%1), %%r11, %%r12;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */ -- " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%r12;" /* f[2]*f[3] */ -+ " mulxq 8(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */ -+ " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */ - " movq 8(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */ - " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ - -@@ -531,12 +531,12 @@ static inline void fsqr(u64 *out, const - " adcx %%r8, %%r8;" - " adox %%rcx, %%r11;" - " adcx %%r9, %%r9;" -- " adox %%r15, %%r12;" -+ " adox %%r15, %%rbx;" - " adcx %%r10, %%r10;" - " adox %%r15, %%r13;" - " adcx %%r11, %%r11;" - " adox %%r15, %%r14;" -- " adcx %%r12, %%r12;" -+ " adcx %%rbx, %%rbx;" - " adcx %%r13, %%r13;" - " adcx %%r14, %%r14;" - -@@ -549,7 +549,7 @@ static inline void fsqr(u64 *out, const - " adcx %%rcx, %%r10;" " movq %%r10, 24(%0);" - " movq 16(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ - " adcx %%rax, %%r11;" " movq %%r11, 32(%0);" -- " adcx %%rcx, %%r12;" " movq %%r12, 40(%0);" -+ " adcx %%rcx, %%rbx;" " movq %%rbx, 40(%0);" - " movq 24(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ - " adcx %%rax, %%r13;" " movq %%r13, 48(%0);" - " adcx %%rcx, %%r14;" " movq %%r14, 56(%0);" -@@ -565,11 +565,11 @@ static inline void fsqr(u64 *out, const - " mulxq 32(%1), %%r8, %%r13;" - " xor %%rcx, %%rcx;" - " adoxq 0(%1), %%r8;" -- " mulxq 40(%1), %%r9, %%r12;" -+ " mulxq 40(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 8(%1), %%r9;" - " mulxq 48(%1), %%r10, %%r13;" -- " adcx %%r12, %%r10;" -+ " adcx %%rbx, %%r10;" - " adoxq 16(%1), %%r10;" - " mulxq 56(%1), %%r11, %%rax;" - " adcx %%r13, %%r11;" -@@ -594,7 +594,7 @@ static inline void fsqr(u64 *out, const - " movq %%r8, 0(%0);" - : "+&r" (tmp), "+&r" (f), "+&r" (out) - : -- : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "memory", "cc" -+ : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "%r15", "memory", "cc" - ); - } - -@@ -611,8 +611,8 @@ static inline void fsqr2(u64 *out, const - " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ - " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ - " movq 24(%1), %%rdx;" /* f[3] */ -- " mulxq 8(%1), %%r11, %%r12;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */ -- " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%r12;" /* f[2]*f[3] */ -+ " mulxq 8(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */ -+ " mulxq 16(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */ - " movq 8(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */ - " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ - -@@ -622,12 +622,12 @@ static inline void fsqr2(u64 *out, const - " adcx %%r8, %%r8;" - " adox %%rcx, %%r11;" - " adcx %%r9, %%r9;" -- " adox %%r15, %%r12;" -+ " adox %%r15, %%rbx;" - " adcx %%r10, %%r10;" - " adox %%r15, %%r13;" - " adcx %%r11, %%r11;" - " adox %%r15, %%r14;" -- " adcx %%r12, %%r12;" -+ " adcx %%rbx, %%rbx;" - " adcx %%r13, %%r13;" - " adcx %%r14, %%r14;" - -@@ -640,7 +640,7 @@ static inline void fsqr2(u64 *out, const - " adcx %%rcx, %%r10;" " movq %%r10, 24(%0);" - " movq 16(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ - " adcx %%rax, %%r11;" " movq %%r11, 32(%0);" -- " adcx %%rcx, %%r12;" " movq %%r12, 40(%0);" -+ " adcx %%rcx, %%rbx;" " movq %%rbx, 40(%0);" - " movq 24(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ - " adcx %%rax, %%r13;" " movq %%r13, 48(%0);" - " adcx %%rcx, %%r14;" " movq %%r14, 56(%0);" -@@ -651,8 +651,8 @@ static inline void fsqr2(u64 *out, const - " mulxq 48(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ - " mulxq 56(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ - " movq 56(%1), %%rdx;" /* f[3] */ -- " mulxq 40(%1), %%r11, %%r12;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */ -- " mulxq 48(%1), %%rax, %%r13;" " adcx %%rax, %%r12;" /* f[2]*f[3] */ -+ " mulxq 40(%1), %%r11, %%rbx;" " adcx %%rcx, %%r11;" /* f[1]*f[3] */ -+ " mulxq 48(%1), %%rax, %%r13;" " adcx %%rax, %%rbx;" /* f[2]*f[3] */ - " movq 40(%1), %%rdx;" " adcx %%r15, %%r13;" /* f1 */ - " mulxq 48(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ - -@@ -662,12 +662,12 @@ static inline void fsqr2(u64 *out, const - " adcx %%r8, %%r8;" - " adox %%rcx, %%r11;" - " adcx %%r9, %%r9;" -- " adox %%r15, %%r12;" -+ " adox %%r15, %%rbx;" - " adcx %%r10, %%r10;" - " adox %%r15, %%r13;" - " adcx %%r11, %%r11;" - " adox %%r15, %%r14;" -- " adcx %%r12, %%r12;" -+ " adcx %%rbx, %%rbx;" - " adcx %%r13, %%r13;" - " adcx %%r14, %%r14;" - -@@ -680,7 +680,7 @@ static inline void fsqr2(u64 *out, const - " adcx %%rcx, %%r10;" " movq %%r10, 88(%0);" - " movq 48(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[2]^2 */ - " adcx %%rax, %%r11;" " movq %%r11, 96(%0);" -- " adcx %%rcx, %%r12;" " movq %%r12, 104(%0);" -+ " adcx %%rcx, %%rbx;" " movq %%rbx, 104(%0);" - " movq 56(%1), %%rdx;" " mulx %%rdx, %%rax, %%rcx;" /* f[3]^2 */ - " adcx %%rax, %%r13;" " movq %%r13, 112(%0);" - " adcx %%rcx, %%r14;" " movq %%r14, 120(%0);" -@@ -694,11 +694,11 @@ static inline void fsqr2(u64 *out, const - " mulxq 32(%1), %%r8, %%r13;" - " xor %%rcx, %%rcx;" - " adoxq 0(%1), %%r8;" -- " mulxq 40(%1), %%r9, %%r12;" -+ " mulxq 40(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 8(%1), %%r9;" - " mulxq 48(%1), %%r10, %%r13;" -- " adcx %%r12, %%r10;" -+ " adcx %%rbx, %%r10;" - " adoxq 16(%1), %%r10;" - " mulxq 56(%1), %%r11, %%rax;" - " adcx %%r13, %%r11;" -@@ -727,11 +727,11 @@ static inline void fsqr2(u64 *out, const - " mulxq 96(%1), %%r8, %%r13;" - " xor %%rcx, %%rcx;" - " adoxq 64(%1), %%r8;" -- " mulxq 104(%1), %%r9, %%r12;" -+ " mulxq 104(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" - " adoxq 72(%1), %%r9;" - " mulxq 112(%1), %%r10, %%r13;" -- " adcx %%r12, %%r10;" -+ " adcx %%rbx, %%r10;" - " adoxq 80(%1), %%r10;" - " mulxq 120(%1), %%r11, %%rax;" - " adcx %%r13, %%r11;" -@@ -756,7 +756,7 @@ static inline void fsqr2(u64 *out, const - " movq %%r8, 32(%0);" - : "+&r" (tmp), "+&r" (f), "+&r" (out) - : -- : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", "memory", "cc" -+ : "%rax", "%rcx", "%rdx", "%r8", "%r9", "%r10", "%r11", "%rbx", "%r13", "%r14", "%r15", "memory", "cc" - ); - } - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0056-crypto-arm-64-poly1305-add-artifact-to-.gitignore-fi.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0056-crypto-arm-64-poly1305-add-artifact-to-.gitignore-fi.patch deleted file mode 100644 index 655371630..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0056-crypto-arm-64-poly1305-add-artifact-to-.gitignore-fi.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 19 Mar 2020 11:56:17 -0600 -Subject: [PATCH] crypto: arm[64]/poly1305 - add artifact to .gitignore files - -commit 6e4e00d8b68ca7eb30d08afb740033e0d36abe55 upstream. - -The .S_shipped yields a .S, and the pattern in these directories is to -add that to .gitignore so that git-status doesn't raise a fuss. - -Fixes: a6b803b3ddc7 ("crypto: arm/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON implementation") -Fixes: f569ca164751 ("crypto: arm64/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON implementation") -Reported-by: Emil Renner Berthing -Cc: Ard Biesheuvel -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/.gitignore | 1 + - arch/arm64/crypto/.gitignore | 1 + - 2 files changed, 2 insertions(+) - ---- a/arch/arm/crypto/.gitignore -+++ b/arch/arm/crypto/.gitignore -@@ -1,3 +1,4 @@ - aesbs-core.S - sha256-core.S - sha512-core.S -+poly1305-core.S ---- a/arch/arm64/crypto/.gitignore -+++ b/arch/arm64/crypto/.gitignore -@@ -1,2 +1,3 @@ - sha256-core.S - sha512-core.S -+poly1305-core.S diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0057-crypto-arch-lib-limit-simd-usage-to-4k-chunks.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0057-crypto-arch-lib-limit-simd-usage-to-4k-chunks.patch deleted file mode 100644 index f8828f243..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0057-crypto-arch-lib-limit-simd-usage-to-4k-chunks.patch +++ /dev/null @@ -1,243 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 23 Apr 2020 15:54:04 -0600 -Subject: [PATCH] crypto: arch/lib - limit simd usage to 4k chunks - -commit 706024a52c614b478b63f7728d202532ce6591a9 upstream. - -The initial Zinc patchset, after some mailing list discussion, contained -code to ensure that kernel_fpu_enable would not be kept on for more than -a 4k chunk, since it disables preemption. The choice of 4k isn't totally -scientific, but it's not a bad guess either, and it's what's used in -both the x86 poly1305, blake2s, and nhpoly1305 code already (in the form -of PAGE_SIZE, which this commit corrects to be explicitly 4k for the -former two). - -Ard did some back of the envelope calculations and found that -at 5 cycles/byte (overestimate) on a 1ghz processor (pretty slow), 4k -means we have a maximum preemption disabling of 20us, which Sebastian -confirmed was probably a good limit. - -Unfortunately the chunking appears to have been left out of the final -patchset that added the glue code. So, this commit adds it back in. - -Fixes: 84e03fa39fbe ("crypto: x86/chacha - expose SIMD ChaCha routine as library function") -Fixes: b3aad5bad26a ("crypto: arm64/chacha - expose arm64 ChaCha routine as library function") -Fixes: a44a3430d71b ("crypto: arm/chacha - expose ARM ChaCha routine as library function") -Fixes: d7d7b8535662 ("crypto: x86/poly1305 - wire up faster implementations for kernel") -Fixes: f569ca164751 ("crypto: arm64/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON implementation") -Fixes: a6b803b3ddc7 ("crypto: arm/poly1305 - incorporate OpenSSL/CRYPTOGAMS NEON implementation") -Fixes: ed0356eda153 ("crypto: blake2s - x86_64 SIMD implementation") -Cc: Eric Biggers -Cc: Ard Biesheuvel -Cc: Sebastian Andrzej Siewior -Cc: stable@vger.kernel.org -Signed-off-by: Jason A. Donenfeld -Reviewed-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/chacha-glue.c | 14 +++++++++++--- - arch/arm/crypto/poly1305-glue.c | 15 +++++++++++---- - arch/arm64/crypto/chacha-neon-glue.c | 14 +++++++++++--- - arch/arm64/crypto/poly1305-glue.c | 15 +++++++++++---- - arch/x86/crypto/blake2s-glue.c | 10 ++++------ - arch/x86/crypto/chacha_glue.c | 14 +++++++++++--- - arch/x86/crypto/poly1305_glue.c | 13 ++++++------- - 7 files changed, 65 insertions(+), 30 deletions(-) - ---- a/arch/arm/crypto/chacha-glue.c -+++ b/arch/arm/crypto/chacha-glue.c -@@ -91,9 +91,17 @@ void chacha_crypt_arch(u32 *state, u8 *d - return; - } - -- kernel_neon_begin(); -- chacha_doneon(state, dst, src, bytes, nrounds); -- kernel_neon_end(); -+ do { -+ unsigned int todo = min_t(unsigned int, bytes, SZ_4K); -+ -+ kernel_neon_begin(); -+ chacha_doneon(state, dst, src, todo, nrounds); -+ kernel_neon_end(); -+ -+ bytes -= todo; -+ src += todo; -+ dst += todo; -+ } while (bytes); - } - EXPORT_SYMBOL(chacha_crypt_arch); - ---- a/arch/arm/crypto/poly1305-glue.c -+++ b/arch/arm/crypto/poly1305-glue.c -@@ -160,13 +160,20 @@ void poly1305_update_arch(struct poly130 - unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); - - if (static_branch_likely(&have_neon) && do_neon) { -- kernel_neon_begin(); -- poly1305_blocks_neon(&dctx->h, src, len, 1); -- kernel_neon_end(); -+ do { -+ unsigned int todo = min_t(unsigned int, len, SZ_4K); -+ -+ kernel_neon_begin(); -+ poly1305_blocks_neon(&dctx->h, src, todo, 1); -+ kernel_neon_end(); -+ -+ len -= todo; -+ src += todo; -+ } while (len); - } else { - poly1305_blocks_arm(&dctx->h, src, len, 1); -+ src += len; - } -- src += len; - nbytes %= POLY1305_BLOCK_SIZE; - } - ---- a/arch/arm64/crypto/chacha-neon-glue.c -+++ b/arch/arm64/crypto/chacha-neon-glue.c -@@ -87,9 +87,17 @@ void chacha_crypt_arch(u32 *state, u8 *d - !crypto_simd_usable()) - return chacha_crypt_generic(state, dst, src, bytes, nrounds); - -- kernel_neon_begin(); -- chacha_doneon(state, dst, src, bytes, nrounds); -- kernel_neon_end(); -+ do { -+ unsigned int todo = min_t(unsigned int, bytes, SZ_4K); -+ -+ kernel_neon_begin(); -+ chacha_doneon(state, dst, src, todo, nrounds); -+ kernel_neon_end(); -+ -+ bytes -= todo; -+ src += todo; -+ dst += todo; -+ } while (bytes); - } - EXPORT_SYMBOL(chacha_crypt_arch); - ---- a/arch/arm64/crypto/poly1305-glue.c -+++ b/arch/arm64/crypto/poly1305-glue.c -@@ -143,13 +143,20 @@ void poly1305_update_arch(struct poly130 - unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE); - - if (static_branch_likely(&have_neon) && crypto_simd_usable()) { -- kernel_neon_begin(); -- poly1305_blocks_neon(&dctx->h, src, len, 1); -- kernel_neon_end(); -+ do { -+ unsigned int todo = min_t(unsigned int, len, SZ_4K); -+ -+ kernel_neon_begin(); -+ poly1305_blocks_neon(&dctx->h, src, todo, 1); -+ kernel_neon_end(); -+ -+ len -= todo; -+ src += todo; -+ } while (len); - } else { - poly1305_blocks(&dctx->h, src, len, 1); -+ src += len; - } -- src += len; - nbytes %= POLY1305_BLOCK_SIZE; - } - ---- a/arch/x86/crypto/blake2s-glue.c -+++ b/arch/x86/crypto/blake2s-glue.c -@@ -32,16 +32,16 @@ void blake2s_compress_arch(struct blake2 - const u32 inc) - { - /* SIMD disables preemption, so relax after processing each page. */ -- BUILD_BUG_ON(PAGE_SIZE / BLAKE2S_BLOCK_SIZE < 8); -+ BUILD_BUG_ON(SZ_4K / BLAKE2S_BLOCK_SIZE < 8); - - if (!static_branch_likely(&blake2s_use_ssse3) || !crypto_simd_usable()) { - blake2s_compress_generic(state, block, nblocks, inc); - return; - } - -- for (;;) { -+ do { - const size_t blocks = min_t(size_t, nblocks, -- PAGE_SIZE / BLAKE2S_BLOCK_SIZE); -+ SZ_4K / BLAKE2S_BLOCK_SIZE); - - kernel_fpu_begin(); - if (IS_ENABLED(CONFIG_AS_AVX512) && -@@ -52,10 +52,8 @@ void blake2s_compress_arch(struct blake2 - kernel_fpu_end(); - - nblocks -= blocks; -- if (!nblocks) -- break; - block += blocks * BLAKE2S_BLOCK_SIZE; -- } -+ } while (nblocks); - } - EXPORT_SYMBOL(blake2s_compress_arch); - ---- a/arch/x86/crypto/chacha_glue.c -+++ b/arch/x86/crypto/chacha_glue.c -@@ -154,9 +154,17 @@ void chacha_crypt_arch(u32 *state, u8 *d - bytes <= CHACHA_BLOCK_SIZE) - return chacha_crypt_generic(state, dst, src, bytes, nrounds); - -- kernel_fpu_begin(); -- chacha_dosimd(state, dst, src, bytes, nrounds); -- kernel_fpu_end(); -+ do { -+ unsigned int todo = min_t(unsigned int, bytes, SZ_4K); -+ -+ kernel_fpu_begin(); -+ chacha_dosimd(state, dst, src, todo, nrounds); -+ kernel_fpu_end(); -+ -+ bytes -= todo; -+ src += todo; -+ dst += todo; -+ } while (bytes); - } - EXPORT_SYMBOL(chacha_crypt_arch); - ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -91,8 +91,8 @@ static void poly1305_simd_blocks(void *c - struct poly1305_arch_internal *state = ctx; - - /* SIMD disables preemption, so relax after processing each page. */ -- BUILD_BUG_ON(PAGE_SIZE < POLY1305_BLOCK_SIZE || -- PAGE_SIZE % POLY1305_BLOCK_SIZE); -+ BUILD_BUG_ON(SZ_4K < POLY1305_BLOCK_SIZE || -+ SZ_4K % POLY1305_BLOCK_SIZE); - - if (!IS_ENABLED(CONFIG_AS_AVX) || !static_branch_likely(&poly1305_use_avx) || - (len < (POLY1305_BLOCK_SIZE * 18) && !state->is_base2_26) || -@@ -102,8 +102,8 @@ static void poly1305_simd_blocks(void *c - return; - } - -- for (;;) { -- const size_t bytes = min_t(size_t, len, PAGE_SIZE); -+ do { -+ const size_t bytes = min_t(size_t, len, SZ_4K); - - kernel_fpu_begin(); - if (IS_ENABLED(CONFIG_AS_AVX512) && static_branch_likely(&poly1305_use_avx512)) -@@ -113,11 +113,10 @@ static void poly1305_simd_blocks(void *c - else - poly1305_blocks_avx(ctx, inp, bytes, padbit); - kernel_fpu_end(); -+ - len -= bytes; -- if (!len) -- break; - inp += bytes; -- } -+ } while (len); - } - - static void poly1305_simd_emit(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0058-crypto-lib-chacha20poly1305-Add-missing-function-dec.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0058-crypto-lib-chacha20poly1305-Add-missing-function-dec.patch deleted file mode 100644 index 736147f93..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0058-crypto-lib-chacha20poly1305-Add-missing-function-dec.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Herbert Xu -Date: Wed, 8 Jul 2020 12:41:13 +1000 -Subject: [PATCH] crypto: lib/chacha20poly1305 - Add missing function - declaration - -commit 06cc2afbbdf9a9e8df3e2f8db724997dd6e1b4ac upstream. - -This patch adds a declaration for chacha20poly1305_selftest to -silence a sparse warning. - -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - include/crypto/chacha20poly1305.h | 2 ++ - lib/crypto/chacha20poly1305.c | 2 -- - 2 files changed, 2 insertions(+), 2 deletions(-) - ---- a/include/crypto/chacha20poly1305.h -+++ b/include/crypto/chacha20poly1305.h -@@ -45,4 +45,6 @@ bool chacha20poly1305_decrypt_sg_inplace - const u64 nonce, - const u8 key[CHACHA20POLY1305_KEY_SIZE]); - -+bool chacha20poly1305_selftest(void); -+ - #endif /* __CHACHA20POLY1305_H */ ---- a/lib/crypto/chacha20poly1305.c -+++ b/lib/crypto/chacha20poly1305.c -@@ -21,8 +21,6 @@ - - #define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32)) - --bool __init chacha20poly1305_selftest(void); -- - static void chacha_load_key(u32 *k, const u8 *in) - { - k[0] = get_unaligned_le32(in); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0059-crypto-x86-chacha-sse3-use-unaligned-loads-for-state.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0059-crypto-x86-chacha-sse3-use-unaligned-loads-for-state.patch deleted file mode 100644 index 52847877f..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0059-crypto-x86-chacha-sse3-use-unaligned-loads-for-state.patch +++ /dev/null @@ -1,147 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Wed, 8 Jul 2020 12:11:18 +0300 -Subject: [PATCH] crypto: x86/chacha-sse3 - use unaligned loads for state array - -commit e79a31715193686e92dadb4caedfbb1f5de3659c upstream. - -Due to the fact that the x86 port does not support allocating objects -on the stack with an alignment that exceeds 8 bytes, we have a rather -ugly hack in the x86 code for ChaCha to ensure that the state array is -aligned to 16 bytes, allowing the SSE3 implementation of the algorithm -to use aligned loads. - -Given that the performance benefit of using of aligned loads appears to -be limited (~0.25% for 1k blocks using tcrypt on a Corei7-8650U), and -the fact that this hack has leaked into generic ChaCha code, let's just -remove it. - -Cc: Martin Willi -Cc: Herbert Xu -Cc: Eric Biggers -Signed-off-by: Ard Biesheuvel -Reviewed-by: Martin Willi -Reviewed-by: Eric Biggers -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/chacha-ssse3-x86_64.S | 16 ++++++++-------- - arch/x86/crypto/chacha_glue.c | 17 ++--------------- - include/crypto/chacha.h | 4 ---- - 3 files changed, 10 insertions(+), 27 deletions(-) - ---- a/arch/x86/crypto/chacha-ssse3-x86_64.S -+++ b/arch/x86/crypto/chacha-ssse3-x86_64.S -@@ -120,10 +120,10 @@ ENTRY(chacha_block_xor_ssse3) - FRAME_BEGIN - - # x0..3 = s0..3 -- movdqa 0x00(%rdi),%xmm0 -- movdqa 0x10(%rdi),%xmm1 -- movdqa 0x20(%rdi),%xmm2 -- movdqa 0x30(%rdi),%xmm3 -+ movdqu 0x00(%rdi),%xmm0 -+ movdqu 0x10(%rdi),%xmm1 -+ movdqu 0x20(%rdi),%xmm2 -+ movdqu 0x30(%rdi),%xmm3 - movdqa %xmm0,%xmm8 - movdqa %xmm1,%xmm9 - movdqa %xmm2,%xmm10 -@@ -205,10 +205,10 @@ ENTRY(hchacha_block_ssse3) - # %edx: nrounds - FRAME_BEGIN - -- movdqa 0x00(%rdi),%xmm0 -- movdqa 0x10(%rdi),%xmm1 -- movdqa 0x20(%rdi),%xmm2 -- movdqa 0x30(%rdi),%xmm3 -+ movdqu 0x00(%rdi),%xmm0 -+ movdqu 0x10(%rdi),%xmm1 -+ movdqu 0x20(%rdi),%xmm2 -+ movdqu 0x30(%rdi),%xmm3 - - mov %edx,%r8d - call chacha_permute ---- a/arch/x86/crypto/chacha_glue.c -+++ b/arch/x86/crypto/chacha_glue.c -@@ -14,8 +14,6 @@ - #include - #include - --#define CHACHA_STATE_ALIGN 16 -- - asmlinkage void chacha_block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, - unsigned int len, int nrounds); - asmlinkage void chacha_4block_xor_ssse3(u32 *state, u8 *dst, const u8 *src, -@@ -125,8 +123,6 @@ static void chacha_dosimd(u32 *state, u8 - - void hchacha_block_arch(const u32 *state, u32 *stream, int nrounds) - { -- state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); -- - if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable()) { - hchacha_block_generic(state, stream, nrounds); - } else { -@@ -139,8 +135,6 @@ EXPORT_SYMBOL(hchacha_block_arch); - - void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv) - { -- state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); -- - chacha_init_generic(state, key, iv); - } - EXPORT_SYMBOL(chacha_init_arch); -@@ -148,8 +142,6 @@ EXPORT_SYMBOL(chacha_init_arch); - void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, - int nrounds) - { -- state = PTR_ALIGN(state, CHACHA_STATE_ALIGN); -- - if (!static_branch_likely(&chacha_use_simd) || !crypto_simd_usable() || - bytes <= CHACHA_BLOCK_SIZE) - return chacha_crypt_generic(state, dst, src, bytes, nrounds); -@@ -171,15 +163,12 @@ EXPORT_SYMBOL(chacha_crypt_arch); - static int chacha_simd_stream_xor(struct skcipher_request *req, - const struct chacha_ctx *ctx, const u8 *iv) - { -- u32 *state, state_buf[16 + 2] __aligned(8); -+ u32 state[CHACHA_STATE_WORDS] __aligned(8); - struct skcipher_walk walk; - int err; - - err = skcipher_walk_virt(&walk, req, false); - -- BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); -- state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); -- - chacha_init_generic(state, ctx->key, iv); - - while (walk.nbytes > 0) { -@@ -218,12 +207,10 @@ static int xchacha_simd(struct skcipher_ - { - struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); - struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm); -- u32 *state, state_buf[16 + 2] __aligned(8); -+ u32 state[CHACHA_STATE_WORDS] __aligned(8); - struct chacha_ctx subctx; - u8 real_iv[16]; - -- BUILD_BUG_ON(CHACHA_STATE_ALIGN != 16); -- state = PTR_ALIGN(state_buf + 0, CHACHA_STATE_ALIGN); - chacha_init_generic(state, ctx->key, req->iv); - - if (req->cryptlen > CHACHA_BLOCK_SIZE && crypto_simd_usable()) { ---- a/include/crypto/chacha.h -+++ b/include/crypto/chacha.h -@@ -25,11 +25,7 @@ - #define CHACHA_BLOCK_SIZE 64 - #define CHACHAPOLY_IV_SIZE 12 - --#ifdef CONFIG_X86_64 --#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32)) --#else - #define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32)) --#endif - - /* 192-bit nonce, then 64-bit stream position */ - #define XCHACHA_IV_SIZE 32 diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0060-crypto-x86-curve25519-Remove-unused-carry-variables.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0060-crypto-x86-curve25519-Remove-unused-carry-variables.patch deleted file mode 100644 index 5a2d20a98..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0060-crypto-x86-curve25519-Remove-unused-carry-variables.patch +++ /dev/null @@ -1,46 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Herbert Xu -Date: Thu, 23 Jul 2020 17:50:48 +1000 -Subject: [PATCH] crypto: x86/curve25519 - Remove unused carry variables - -commit 054a5540fb8f7268e2c79e9deab4242db15c8cba upstream. - -The carry variables are assigned but never used, which upsets -the compiler. This patch removes them. - -Signed-off-by: Herbert Xu -Reviewed-by: Karthikeyan Bhargavan -Acked-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/curve25519-x86_64.c | 6 ++---- - 1 file changed, 2 insertions(+), 4 deletions(-) - ---- a/arch/x86/crypto/curve25519-x86_64.c -+++ b/arch/x86/crypto/curve25519-x86_64.c -@@ -948,10 +948,8 @@ static void store_felem(u64 *b, u64 *f) - { - u64 f30 = f[3U]; - u64 top_bit0 = f30 >> (u32)63U; -- u64 carry0; - u64 f31; - u64 top_bit; -- u64 carry; - u64 f0; - u64 f1; - u64 f2; -@@ -970,11 +968,11 @@ static void store_felem(u64 *b, u64 *f) - u64 o2; - u64 o3; - f[3U] = f30 & (u64)0x7fffffffffffffffU; -- carry0 = add_scalar(f, f, (u64)19U * top_bit0); -+ add_scalar(f, f, (u64)19U * top_bit0); - f31 = f[3U]; - top_bit = f31 >> (u32)63U; - f[3U] = f31 & (u64)0x7fffffffffffffffU; -- carry = add_scalar(f, f, (u64)19U * top_bit); -+ add_scalar(f, f, (u64)19U * top_bit); - f0 = f[0U]; - f1 = f[1U]; - f2 = f[2U]; diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0061-crypto-arm-curve25519-include-linux-scatterlist.h.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0061-crypto-arm-curve25519-include-linux-scatterlist.h.patch deleted file mode 100644 index b58fd08fc..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0061-crypto-arm-curve25519-include-linux-scatterlist.h.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Fabio Estevam -Date: Mon, 24 Aug 2020 11:09:53 -0300 -Subject: [PATCH] crypto: arm/curve25519 - include - -commit 6779d0e6b0fe193ab3010ea201782ca6f75a3862 upstream. - -Building ARM allmodconfig leads to the following warnings: - -arch/arm/crypto/curve25519-glue.c:73:12: error: implicit declaration of function 'sg_copy_to_buffer' [-Werror=implicit-function-declaration] -arch/arm/crypto/curve25519-glue.c:74:9: error: implicit declaration of function 'sg_nents_for_len' [-Werror=implicit-function-declaration] -arch/arm/crypto/curve25519-glue.c:88:11: error: implicit declaration of function 'sg_copy_from_buffer' [-Werror=implicit-function-declaration] - -Include to fix such warnings - -Reported-by: Olof's autobuilder -Fixes: 0c3dc787a62a ("crypto: algapi - Remove skbuff.h inclusion") -Signed-off-by: Fabio Estevam -Acked-by: Ard Biesheuvel -Acked-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/curve25519-glue.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/arm/crypto/curve25519-glue.c -+++ b/arch/arm/crypto/curve25519-glue.c -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - #include - - asmlinkage void curve25519_neon(u8 mypublic[CURVE25519_KEY_SIZE], diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0062-crypto-arm-poly1305-Add-prototype-for-poly1305_block.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0062-crypto-arm-poly1305-Add-prototype-for-poly1305_block.patch deleted file mode 100644 index cf3724a49..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0062-crypto-arm-poly1305-Add-prototype-for-poly1305_block.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Herbert Xu -Date: Tue, 25 Aug 2020 11:23:00 +1000 -Subject: [PATCH] crypto: arm/poly1305 - Add prototype for poly1305_blocks_neon - -commit 51982ea02aef972132eb35c583d3e4c5b83166e5 upstream. - -This patch adds a prototype for poly1305_blocks_neon to slience -a compiler warning: - - CC [M] arch/arm/crypto/poly1305-glue.o -../arch/arm/crypto/poly1305-glue.c:25:13: warning: no previous prototype for `poly1305_blocks_neon' [-Wmissing-prototypes] - void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit) - ^~~~~~~~~~~~~~~~~~~~ - -Signed-off-by: Herbert Xu -Acked-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/poly1305-glue.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/arm/crypto/poly1305-glue.c -+++ b/arch/arm/crypto/poly1305-glue.c -@@ -20,6 +20,7 @@ - - void poly1305_init_arm(void *state, const u8 *key); - void poly1305_blocks_arm(void *state, const u8 *src, u32 len, u32 hibit); -+void poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit); - void poly1305_emit_arm(void *state, u8 *digest, const u32 *nonce); - - void __weak poly1305_blocks_neon(void *state, const u8 *src, u32 len, u32 hibit) diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0063-crypto-curve25519-x86_64-Use-XORL-r32-32.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0063-crypto-curve25519-x86_64-Use-XORL-r32-32.patch deleted file mode 100644 index dd76e2a1f..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0063-crypto-curve25519-x86_64-Use-XORL-r32-32.patch +++ /dev/null @@ -1,261 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Uros Bizjak -Date: Thu, 27 Aug 2020 19:30:58 +0200 -Subject: [PATCH] crypto: curve25519-x86_64 - Use XORL r32,32 - -commit db719539fd3889836900bf912755aa30a5985e9a upstream. - -x86_64 zero extends 32bit operations, so for 64bit operands, -XORL r32,r32 is functionally equal to XORL r64,r64, but avoids -a REX prefix byte when legacy registers are used. - -Signed-off-by: Uros Bizjak -Cc: Herbert Xu -Cc: "David S. Miller" -Acked-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/curve25519-x86_64.c | 68 ++++++++++++++--------------- - 1 file changed, 34 insertions(+), 34 deletions(-) - ---- a/arch/x86/crypto/curve25519-x86_64.c -+++ b/arch/x86/crypto/curve25519-x86_64.c -@@ -45,11 +45,11 @@ static inline u64 add_scalar(u64 *out, c - - asm volatile( - /* Clear registers to propagate the carry bit */ -- " xor %%r8, %%r8;" -- " xor %%r9, %%r9;" -- " xor %%r10, %%r10;" -- " xor %%r11, %%r11;" -- " xor %1, %1;" -+ " xor %%r8d, %%r8d;" -+ " xor %%r9d, %%r9d;" -+ " xor %%r10d, %%r10d;" -+ " xor %%r11d, %%r11d;" -+ " xor %k1, %k1;" - - /* Begin addition chain */ - " addq 0(%3), %0;" -@@ -93,7 +93,7 @@ static inline void fadd(u64 *out, const - " cmovc %0, %%rax;" - - /* Step 2: Add carry*38 to the original sum */ -- " xor %%rcx, %%rcx;" -+ " xor %%ecx, %%ecx;" - " add %%rax, %%r8;" - " adcx %%rcx, %%r9;" - " movq %%r9, 8(%1);" -@@ -165,28 +165,28 @@ static inline void fmul(u64 *out, const - - /* Compute src1[0] * src2 */ - " movq 0(%1), %%rdx;" -- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 0(%0);" - " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);" - " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" - /* Compute src1[1] * src2 */ - " movq 8(%1), %%rdx;" -- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" - " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);" - " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[2] * src2 */ - " movq 16(%1), %%rdx;" -- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" - " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);" - " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[3] * src2 */ - " movq 24(%1), %%rdx;" -- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" - " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);" - " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;" -@@ -200,7 +200,7 @@ static inline void fmul(u64 *out, const - /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ - " mov $38, %%rdx;" - " mulxq 32(%1), %%r8, %%r13;" -- " xor %3, %3;" -+ " xor %k3, %k3;" - " adoxq 0(%1), %%r8;" - " mulxq 40(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" -@@ -246,28 +246,28 @@ static inline void fmul2(u64 *out, const - - /* Compute src1[0] * src2 */ - " movq 0(%1), %%rdx;" -- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 0(%0);" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 0(%0);" - " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 8(%0);" - " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" - /* Compute src1[1] * src2 */ - " movq 8(%1), %%rdx;" -- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 8(%0), %%r8;" " movq %%r8, 8(%0);" - " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 16(%0);" - " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[2] * src2 */ - " movq 16(%1), %%rdx;" -- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 16(%0), %%r8;" " movq %%r8, 16(%0);" - " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 24(%0);" - " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[3] * src2 */ - " movq 24(%1), %%rdx;" -- " mulxq 0(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" -+ " mulxq 0(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 24(%0), %%r8;" " movq %%r8, 24(%0);" - " mulxq 8(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 32(%0);" - " mulxq 16(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 40(%0);" " mov $0, %%r8;" - " mulxq 24(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 48(%0);" " mov $0, %%rax;" -@@ -277,29 +277,29 @@ static inline void fmul2(u64 *out, const - - /* Compute src1[0] * src2 */ - " movq 32(%1), %%rdx;" -- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " movq %%r8, 64(%0);" -- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);" -+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " movq %%r8, 64(%0);" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " movq %%r10, 72(%0);" - " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" - " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" - /* Compute src1[1] * src2 */ - " movq 40(%1), %%rdx;" -- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);" -- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%0);" -+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 72(%0), %%r8;" " movq %%r8, 72(%0);" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 80(%0);" - " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[2] * src2 */ - " movq 48(%1), %%rdx;" -- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);" -- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%0);" -+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 80(%0), %%r8;" " movq %%r8, 80(%0);" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 88(%0);" - " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " mov $0, %%r8;" - " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" - /* Compute src1[3] * src2 */ - " movq 56(%1), %%rdx;" -- " mulxq 32(%3), %%r8, %%r9;" " xor %%r10, %%r10;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);" -- " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%0);" -+ " mulxq 32(%3), %%r8, %%r9;" " xor %%r10d, %%r10d;" " adcxq 88(%0), %%r8;" " movq %%r8, 88(%0);" -+ " mulxq 40(%3), %%r10, %%r11;" " adox %%r9, %%r10;" " adcx %%rbx, %%r10;" " movq %%r10, 96(%0);" - " mulxq 48(%3), %%rbx, %%r13;" " adox %%r11, %%rbx;" " adcx %%r14, %%rbx;" " movq %%rbx, 104(%0);" " mov $0, %%r8;" - " mulxq 56(%3), %%r14, %%rdx;" " adox %%r13, %%r14;" " adcx %%rax, %%r14;" " movq %%r14, 112(%0);" " mov $0, %%rax;" - " adox %%rdx, %%rax;" " adcx %%r8, %%rax;" " movq %%rax, 120(%0);" -@@ -312,7 +312,7 @@ static inline void fmul2(u64 *out, const - /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ - " mov $38, %%rdx;" - " mulxq 32(%1), %%r8, %%r13;" -- " xor %3, %3;" -+ " xor %k3, %k3;" - " adoxq 0(%1), %%r8;" - " mulxq 40(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" -@@ -345,7 +345,7 @@ static inline void fmul2(u64 *out, const - /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ - " mov $38, %%rdx;" - " mulxq 96(%1), %%r8, %%r13;" -- " xor %3, %3;" -+ " xor %k3, %k3;" - " adoxq 64(%1), %%r8;" - " mulxq 104(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" -@@ -516,7 +516,7 @@ static inline void fsqr(u64 *out, const - - /* Step 1: Compute all partial products */ - " movq 0(%1), %%rdx;" /* f[0] */ -- " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */ -+ " mulxq 8(%1), %%r8, %%r14;" " xor %%r15d, %%r15d;" /* f[1]*f[0] */ - " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ - " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ - " movq 24(%1), %%rdx;" /* f[3] */ -@@ -526,7 +526,7 @@ static inline void fsqr(u64 *out, const - " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ - - /* Step 2: Compute two parallel carry chains */ -- " xor %%r15, %%r15;" -+ " xor %%r15d, %%r15d;" - " adox %%rax, %%r10;" - " adcx %%r8, %%r8;" - " adox %%rcx, %%r11;" -@@ -563,7 +563,7 @@ static inline void fsqr(u64 *out, const - /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ - " mov $38, %%rdx;" - " mulxq 32(%1), %%r8, %%r13;" -- " xor %%rcx, %%rcx;" -+ " xor %%ecx, %%ecx;" - " adoxq 0(%1), %%r8;" - " mulxq 40(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" -@@ -607,7 +607,7 @@ static inline void fsqr2(u64 *out, const - asm volatile( - /* Step 1: Compute all partial products */ - " movq 0(%1), %%rdx;" /* f[0] */ -- " mulxq 8(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */ -+ " mulxq 8(%1), %%r8, %%r14;" " xor %%r15d, %%r15d;" /* f[1]*f[0] */ - " mulxq 16(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ - " mulxq 24(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ - " movq 24(%1), %%rdx;" /* f[3] */ -@@ -617,7 +617,7 @@ static inline void fsqr2(u64 *out, const - " mulxq 16(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ - - /* Step 2: Compute two parallel carry chains */ -- " xor %%r15, %%r15;" -+ " xor %%r15d, %%r15d;" - " adox %%rax, %%r10;" - " adcx %%r8, %%r8;" - " adox %%rcx, %%r11;" -@@ -647,7 +647,7 @@ static inline void fsqr2(u64 *out, const - - /* Step 1: Compute all partial products */ - " movq 32(%1), %%rdx;" /* f[0] */ -- " mulxq 40(%1), %%r8, %%r14;" " xor %%r15, %%r15;" /* f[1]*f[0] */ -+ " mulxq 40(%1), %%r8, %%r14;" " xor %%r15d, %%r15d;" /* f[1]*f[0] */ - " mulxq 48(%1), %%r9, %%r10;" " adcx %%r14, %%r9;" /* f[2]*f[0] */ - " mulxq 56(%1), %%rax, %%rcx;" " adcx %%rax, %%r10;" /* f[3]*f[0] */ - " movq 56(%1), %%rdx;" /* f[3] */ -@@ -657,7 +657,7 @@ static inline void fsqr2(u64 *out, const - " mulxq 48(%1), %%rax, %%rcx;" " mov $0, %%r14;" /* f[2]*f[1] */ - - /* Step 2: Compute two parallel carry chains */ -- " xor %%r15, %%r15;" -+ " xor %%r15d, %%r15d;" - " adox %%rax, %%r10;" - " adcx %%r8, %%r8;" - " adox %%rcx, %%r11;" -@@ -692,7 +692,7 @@ static inline void fsqr2(u64 *out, const - /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ - " mov $38, %%rdx;" - " mulxq 32(%1), %%r8, %%r13;" -- " xor %%rcx, %%rcx;" -+ " xor %%ecx, %%ecx;" - " adoxq 0(%1), %%r8;" - " mulxq 40(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" -@@ -725,7 +725,7 @@ static inline void fsqr2(u64 *out, const - /* Step 1: Compute dst + carry == tmp_hi * 38 + tmp_lo */ - " mov $38, %%rdx;" - " mulxq 96(%1), %%r8, %%r13;" -- " xor %%rcx, %%rcx;" -+ " xor %%ecx, %%ecx;" - " adoxq 64(%1), %%r8;" - " mulxq 104(%1), %%r9, %%rbx;" - " adcx %%r13, %%r9;" diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0064-crypto-poly1305-x86_64-Use-XORL-r32-32.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0064-crypto-poly1305-x86_64-Use-XORL-r32-32.patch deleted file mode 100644 index 4fcaa1eb7..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0064-crypto-poly1305-x86_64-Use-XORL-r32-32.patch +++ /dev/null @@ -1,59 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Uros Bizjak -Date: Thu, 27 Aug 2020 19:38:31 +0200 -Subject: [PATCH] crypto: poly1305-x86_64 - Use XORL r32,32 - -commit 7dfd1e01b3dfc13431b1b25720cf2692a7e111ef upstream. - -x86_64 zero extends 32bit operations, so for 64bit operands, -XORL r32,r32 is functionally equal to XORQ r64,r64, but avoids -a REX prefix byte when legacy registers are used. - -Signed-off-by: Uros Bizjak -Cc: Herbert Xu -Cc: "David S. Miller" -Acked-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/poly1305-x86_64-cryptogams.pl | 8 ++++---- - 1 file changed, 4 insertions(+), 4 deletions(-) - ---- a/arch/x86/crypto/poly1305-x86_64-cryptogams.pl -+++ b/arch/x86/crypto/poly1305-x86_64-cryptogams.pl -@@ -246,7 +246,7 @@ $code.=<<___ if (!$kernel); - ___ - &declare_function("poly1305_init_x86_64", 32, 3); - $code.=<<___; -- xor %rax,%rax -+ xor %eax,%eax - mov %rax,0($ctx) # initialize hash value - mov %rax,8($ctx) - mov %rax,16($ctx) -@@ -2869,7 +2869,7 @@ $code.=<<___; - .type poly1305_init_base2_44,\@function,3 - .align 32 - poly1305_init_base2_44: -- xor %rax,%rax -+ xor %eax,%eax - mov %rax,0($ctx) # initialize hash value - mov %rax,8($ctx) - mov %rax,16($ctx) -@@ -3963,7 +3963,7 @@ xor128_decrypt_n_pad: - mov \$16,$len - sub %r10,$len - xor %eax,%eax -- xor %r11,%r11 -+ xor %r11d,%r11d - .Loop_dec_byte: - mov ($inp,$otp),%r11b - mov ($otp),%al -@@ -4101,7 +4101,7 @@ avx_handler: - .long 0xa548f3fc # cld; rep movsq - - mov $disp,%rsi -- xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER -+ xor %ecx,%ecx # arg1, UNW_FLAG_NHANDLER - mov 8(%rsi),%rdx # arg2, disp->ImageBase - mov 0(%rsi),%r8 # arg3, disp->ControlPc - mov 16(%rsi),%r9 # arg4, disp->FunctionEntry diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0065-crypto-x86-poly1305-Remove-assignments-with-no-effec.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0065-crypto-x86-poly1305-Remove-assignments-with-no-effec.patch deleted file mode 100644 index ee64bfe1f..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0065-crypto-x86-poly1305-Remove-assignments-with-no-effec.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Herbert Xu -Date: Thu, 24 Sep 2020 13:29:04 +1000 -Subject: [PATCH] crypto: x86/poly1305 - Remove assignments with no effect - -commit 4a0c1de64bf9d9027a6f19adfba89fc27893db23 upstream. - -This patch removes a few ineffectual assignments from the function -crypto_poly1305_setdctxkey. - -Reported-by: kernel test robot -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/poly1305_glue.c | 3 --- - 1 file changed, 3 deletions(-) - ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -157,9 +157,6 @@ static unsigned int crypto_poly1305_setd - dctx->s[1] = get_unaligned_le32(&inp[4]); - dctx->s[2] = get_unaligned_le32(&inp[8]); - dctx->s[3] = get_unaligned_le32(&inp[12]); -- inp += POLY1305_BLOCK_SIZE; -- len -= POLY1305_BLOCK_SIZE; -- acc += POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0066-crypto-x86-poly1305-add-back-a-needed-assignment.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0066-crypto-x86-poly1305-add-back-a-needed-assignment.patch deleted file mode 100644 index dce8bb912..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0066-crypto-x86-poly1305-add-back-a-needed-assignment.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Eric Biggers -Date: Fri, 23 Oct 2020 15:27:48 -0700 -Subject: [PATCH] crypto: x86/poly1305 - add back a needed assignment - -commit c3a98c3ad5c0dc60a1ac66bf91147a3f39cac96b upstream. - -One of the assignments that was removed by commit 4a0c1de64bf9 ("crypto: -x86/poly1305 - Remove assignments with no effect") is actually needed, -since it affects the return value. - -This fixes the following crypto self-test failure: - - alg: shash: poly1305-simd test failed (wrong result) on test vector 2, cfg="init+update+final aligned buffer" - -Fixes: 4a0c1de64bf9 ("crypto: x86/poly1305 - Remove assignments with no effect") -Signed-off-by: Eric Biggers -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/x86/crypto/poly1305_glue.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -157,6 +157,7 @@ static unsigned int crypto_poly1305_setd - dctx->s[1] = get_unaligned_le32(&inp[4]); - dctx->s[2] = get_unaligned_le32(&inp[8]); - dctx->s[3] = get_unaligned_le32(&inp[12]); -+ acc += POLY1305_BLOCK_SIZE; - dctx->sset = true; - } - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0067-crypto-Kconfig-CRYPTO_MANAGER_EXTRA_TESTS-requires-t.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0067-crypto-Kconfig-CRYPTO_MANAGER_EXTRA_TESTS-requires-t.patch deleted file mode 100644 index 31c47df4b..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0067-crypto-Kconfig-CRYPTO_MANAGER_EXTRA_TESTS-requires-t.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 2 Nov 2020 14:48:15 +0100 -Subject: [PATCH] crypto: Kconfig - CRYPTO_MANAGER_EXTRA_TESTS requires the - manager - -commit 6569e3097f1c4a490bdf2b23d326855e04942dfd upstream. - -The extra tests in the manager actually require the manager to be -selected too. Otherwise the linker gives errors like: - -ld: arch/x86/crypto/chacha_glue.o: in function `chacha_simd_stream_xor': -chacha_glue.c:(.text+0x422): undefined reference to `crypto_simd_disabled_for_test' - -Fixes: 2343d1529aff ("crypto: Kconfig - allow tests to be disabled when manager is disabled") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - crypto/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -145,7 +145,7 @@ config CRYPTO_MANAGER_DISABLE_TESTS - - config CRYPTO_MANAGER_EXTRA_TESTS - bool "Enable extra run-time crypto self tests" -- depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS -+ depends on DEBUG_KERNEL && !CRYPTO_MANAGER_DISABLE_TESTS && CRYPTO_MANAGER - help - Enable extra run-time self tests of registered crypto algorithms, - including randomized fuzz tests. diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0068-crypto-arm-chacha-neon-optimize-for-non-block-size-m.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0068-crypto-arm-chacha-neon-optimize-for-non-block-size-m.patch deleted file mode 100644 index b31b8d9a0..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0068-crypto-arm-chacha-neon-optimize-for-non-block-size-m.patch +++ /dev/null @@ -1,272 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Tue, 3 Nov 2020 17:28:09 +0100 -Subject: [PATCH] crypto: arm/chacha-neon - optimize for non-block size - multiples - -commit 86cd97ec4b943af35562a74688bc4e909b32c3d1 upstream. - -The current NEON based ChaCha implementation for ARM is optimized for -multiples of 4x the ChaCha block size (64 bytes). This makes sense for -block encryption, but given that ChaCha is also often used in the -context of networking, it makes sense to consider arbitrary length -inputs as well. - -For example, WireGuard typically uses 1420 byte packets, and performing -ChaCha encryption involves 5 invocations of chacha_4block_xor_neon() -and 3 invocations of chacha_block_xor_neon(), where the last one also -involves a memcpy() using a buffer on the stack to process the final -chunk of 1420 % 64 == 12 bytes. - -Let's optimize for this case as well, by letting chacha_4block_xor_neon() -deal with any input size between 64 and 256 bytes, using NEON permutation -instructions and overlapping loads and stores. This way, the 140 byte -tail of a 1420 byte input buffer can simply be processed in one go. - -This results in the following performance improvements for 1420 byte -blocks, without significant impact on power-of-2 input sizes. (Note -that Raspberry Pi is widely used in combination with a 32-bit kernel, -even though the core is 64-bit capable) - - Cortex-A8 (BeagleBone) : 7% - Cortex-A15 (Calxeda Midway) : 21% - Cortex-A53 (Raspberry Pi 3) : 3% - Cortex-A72 (Raspberry Pi 4) : 19% - -Cc: Eric Biggers -Cc: "Jason A . Donenfeld" -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/chacha-glue.c | 34 +++++------ - arch/arm/crypto/chacha-neon-core.S | 97 +++++++++++++++++++++++++++--- - 2 files changed, 107 insertions(+), 24 deletions(-) - ---- a/arch/arm/crypto/chacha-glue.c -+++ b/arch/arm/crypto/chacha-glue.c -@@ -23,7 +23,7 @@ - asmlinkage void chacha_block_xor_neon(const u32 *state, u8 *dst, const u8 *src, - int nrounds); - asmlinkage void chacha_4block_xor_neon(const u32 *state, u8 *dst, const u8 *src, -- int nrounds); -+ int nrounds, unsigned int nbytes); - asmlinkage void hchacha_block_arm(const u32 *state, u32 *out, int nrounds); - asmlinkage void hchacha_block_neon(const u32 *state, u32 *out, int nrounds); - -@@ -42,24 +42,24 @@ static void chacha_doneon(u32 *state, u8 - { - u8 buf[CHACHA_BLOCK_SIZE]; - -- while (bytes >= CHACHA_BLOCK_SIZE * 4) { -- chacha_4block_xor_neon(state, dst, src, nrounds); -- bytes -= CHACHA_BLOCK_SIZE * 4; -- src += CHACHA_BLOCK_SIZE * 4; -- dst += CHACHA_BLOCK_SIZE * 4; -- state[12] += 4; -- } -- while (bytes >= CHACHA_BLOCK_SIZE) { -- chacha_block_xor_neon(state, dst, src, nrounds); -- bytes -= CHACHA_BLOCK_SIZE; -- src += CHACHA_BLOCK_SIZE; -- dst += CHACHA_BLOCK_SIZE; -- state[12]++; -+ while (bytes > CHACHA_BLOCK_SIZE) { -+ unsigned int l = min(bytes, CHACHA_BLOCK_SIZE * 4U); -+ -+ chacha_4block_xor_neon(state, dst, src, nrounds, l); -+ bytes -= l; -+ src += l; -+ dst += l; -+ state[12] += DIV_ROUND_UP(l, CHACHA_BLOCK_SIZE); - } - if (bytes) { -- memcpy(buf, src, bytes); -- chacha_block_xor_neon(state, buf, buf, nrounds); -- memcpy(dst, buf, bytes); -+ const u8 *s = src; -+ u8 *d = dst; -+ -+ if (bytes != CHACHA_BLOCK_SIZE) -+ s = d = memcpy(buf, src, bytes); -+ chacha_block_xor_neon(state, d, s, nrounds); -+ if (d != dst) -+ memcpy(dst, buf, bytes); - } - } - ---- a/arch/arm/crypto/chacha-neon-core.S -+++ b/arch/arm/crypto/chacha-neon-core.S -@@ -47,6 +47,7 @@ - */ - - #include -+#include - - .text - .fpu neon -@@ -205,7 +206,7 @@ ENDPROC(hchacha_block_neon) - - .align 5 - ENTRY(chacha_4block_xor_neon) -- push {r4-r5} -+ push {r4, lr} - mov r4, sp // preserve the stack pointer - sub ip, sp, #0x20 // allocate a 32 byte buffer - bic ip, ip, #0x1f // aligned to 32 bytes -@@ -229,10 +230,10 @@ ENTRY(chacha_4block_xor_neon) - vld1.32 {q0-q1}, [r0] - vld1.32 {q2-q3}, [ip] - -- adr r5, .Lctrinc -+ adr lr, .Lctrinc - vdup.32 q15, d7[1] - vdup.32 q14, d7[0] -- vld1.32 {q4}, [r5, :128] -+ vld1.32 {q4}, [lr, :128] - vdup.32 q13, d6[1] - vdup.32 q12, d6[0] - vdup.32 q11, d5[1] -@@ -455,7 +456,7 @@ ENTRY(chacha_4block_xor_neon) - - // Re-interleave the words in the first two rows of each block (x0..7). - // Also add the counter values 0-3 to x12[0-3]. -- vld1.32 {q8}, [r5, :128] // load counter values 0-3 -+ vld1.32 {q8}, [lr, :128] // load counter values 0-3 - vzip.32 q0, q1 // => (0 1 0 1) (0 1 0 1) - vzip.32 q2, q3 // => (2 3 2 3) (2 3 2 3) - vzip.32 q4, q5 // => (4 5 4 5) (4 5 4 5) -@@ -493,6 +494,8 @@ ENTRY(chacha_4block_xor_neon) - - // Re-interleave the words in the last two rows of each block (x8..15). - vld1.32 {q8-q9}, [sp, :256] -+ mov sp, r4 // restore original stack pointer -+ ldr r4, [r4, #8] // load number of bytes - vzip.32 q12, q13 // => (12 13 12 13) (12 13 12 13) - vzip.32 q14, q15 // => (14 15 14 15) (14 15 14 15) - vzip.32 q8, q9 // => (8 9 8 9) (8 9 8 9) -@@ -520,41 +523,121 @@ ENTRY(chacha_4block_xor_neon) - // XOR the rest of the data with the keystream - - vld1.8 {q0-q1}, [r2]! -+ subs r4, r4, #96 - veor q0, q0, q8 - veor q1, q1, q12 -+ ble .Lle96 - vst1.8 {q0-q1}, [r1]! - - vld1.8 {q0-q1}, [r2]! -+ subs r4, r4, #32 - veor q0, q0, q2 - veor q1, q1, q6 -+ ble .Lle128 - vst1.8 {q0-q1}, [r1]! - - vld1.8 {q0-q1}, [r2]! -+ subs r4, r4, #32 - veor q0, q0, q10 - veor q1, q1, q14 -+ ble .Lle160 - vst1.8 {q0-q1}, [r1]! - - vld1.8 {q0-q1}, [r2]! -+ subs r4, r4, #32 - veor q0, q0, q4 - veor q1, q1, q5 -+ ble .Lle192 - vst1.8 {q0-q1}, [r1]! - - vld1.8 {q0-q1}, [r2]! -+ subs r4, r4, #32 - veor q0, q0, q9 - veor q1, q1, q13 -+ ble .Lle224 - vst1.8 {q0-q1}, [r1]! - - vld1.8 {q0-q1}, [r2]! -+ subs r4, r4, #32 - veor q0, q0, q3 - veor q1, q1, q7 -+ blt .Llt256 -+.Lout: - vst1.8 {q0-q1}, [r1]! - - vld1.8 {q0-q1}, [r2] -- mov sp, r4 // restore original stack pointer - veor q0, q0, q11 - veor q1, q1, q15 - vst1.8 {q0-q1}, [r1] - -- pop {r4-r5} -- bx lr -+ pop {r4, pc} -+ -+.Lle192: -+ vmov q4, q9 -+ vmov q5, q13 -+ -+.Lle160: -+ // nothing to do -+ -+.Lfinalblock: -+ // Process the final block if processing less than 4 full blocks. -+ // Entered with 32 bytes of ChaCha cipher stream in q4-q5, and the -+ // previous 32 byte output block that still needs to be written at -+ // [r1] in q0-q1. -+ beq .Lfullblock -+ -+.Lpartialblock: -+ adr lr, .Lpermute + 32 -+ add r2, r2, r4 -+ add lr, lr, r4 -+ add r4, r4, r1 -+ -+ vld1.8 {q2-q3}, [lr] -+ vld1.8 {q6-q7}, [r2] -+ -+ add r4, r4, #32 -+ -+ vtbl.8 d4, {q4-q5}, d4 -+ vtbl.8 d5, {q4-q5}, d5 -+ vtbl.8 d6, {q4-q5}, d6 -+ vtbl.8 d7, {q4-q5}, d7 -+ -+ veor q6, q6, q2 -+ veor q7, q7, q3 -+ -+ vst1.8 {q6-q7}, [r4] // overlapping stores -+ vst1.8 {q0-q1}, [r1] -+ pop {r4, pc} -+ -+.Lfullblock: -+ vmov q11, q4 -+ vmov q15, q5 -+ b .Lout -+.Lle96: -+ vmov q4, q2 -+ vmov q5, q6 -+ b .Lfinalblock -+.Lle128: -+ vmov q4, q10 -+ vmov q5, q14 -+ b .Lfinalblock -+.Lle224: -+ vmov q4, q3 -+ vmov q5, q7 -+ b .Lfinalblock -+.Llt256: -+ vmov q4, q11 -+ vmov q5, q15 -+ b .Lpartialblock - ENDPROC(chacha_4block_xor_neon) -+ -+ .align L1_CACHE_SHIFT -+.Lpermute: -+ .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 -+ .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f -+ .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 -+ .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f -+ .byte 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 -+ .byte 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f -+ .byte 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17 -+ .byte 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0069-crypto-arm64-chacha-simplify-tail-block-handling.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0069-crypto-arm64-chacha-simplify-tail-block-handling.patch deleted file mode 100644 index 42e9048b9..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0069-crypto-arm64-chacha-simplify-tail-block-handling.patch +++ /dev/null @@ -1,324 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Fri, 6 Nov 2020 17:39:38 +0100 -Subject: [PATCH] crypto: arm64/chacha - simplify tail block handling - -commit c4fc6328d6c67690a7e6e03f43a5a976a13120ef upstream. - -Based on lessons learnt from optimizing the 32-bit version of this driver, -we can simplify the arm64 version considerably, by reordering the final -two stores when the last block is not a multiple of 64 bytes. This removes -the need to use permutation instructions to calculate the elements that are -clobbered by the final overlapping store, given that the store of the -penultimate block now follows it, and that one carries the correct values -for those elements already. - -While at it, simplify the overlapping loads as well, by calculating the -address of the final overlapping load upfront, and switching to this -address for every load that would otherwise extend past the end of the -source buffer. - -There is no impact on performance, but the resulting code is substantially -smaller and easier to follow. - -Cc: Eric Biggers -Cc: "Jason A . Donenfeld" -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm64/crypto/chacha-neon-core.S | 193 ++++++++++----------------- - 1 file changed, 69 insertions(+), 124 deletions(-) - ---- a/arch/arm64/crypto/chacha-neon-core.S -+++ b/arch/arm64/crypto/chacha-neon-core.S -@@ -195,7 +195,6 @@ ENTRY(chacha_4block_xor_neon) - adr_l x10, .Lpermute - and x5, x4, #63 - add x10, x10, x5 -- add x11, x10, #64 - - // - // This function encrypts four consecutive ChaCha blocks by loading -@@ -645,11 +644,11 @@ CPU_BE( rev a15, a15 ) - zip2 v31.4s, v14.4s, v15.4s - eor a15, a15, w9 - -- mov x3, #64 -+ add x3, x2, x4 -+ sub x3, x3, #128 // start of last block -+ - subs x5, x4, #128 -- add x6, x5, x2 -- csel x3, x3, xzr, ge -- csel x2, x2, x6, ge -+ csel x2, x2, x3, ge - - // interleave 64-bit words in state n, n+2 - zip1 v0.2d, v16.2d, v18.2d -@@ -658,13 +657,10 @@ CPU_BE( rev a15, a15 ) - zip1 v8.2d, v17.2d, v19.2d - zip2 v12.2d, v17.2d, v19.2d - stp a2, a3, [x1, #-56] -- ld1 {v16.16b-v19.16b}, [x2], x3 - - subs x6, x4, #192 -- ccmp x3, xzr, #4, lt -- add x7, x6, x2 -- csel x3, x3, xzr, eq -- csel x2, x2, x7, eq -+ ld1 {v16.16b-v19.16b}, [x2], #64 -+ csel x2, x2, x3, ge - - zip1 v1.2d, v20.2d, v22.2d - zip2 v5.2d, v20.2d, v22.2d -@@ -672,13 +668,10 @@ CPU_BE( rev a15, a15 ) - zip1 v9.2d, v21.2d, v23.2d - zip2 v13.2d, v21.2d, v23.2d - stp a6, a7, [x1, #-40] -- ld1 {v20.16b-v23.16b}, [x2], x3 - - subs x7, x4, #256 -- ccmp x3, xzr, #4, lt -- add x8, x7, x2 -- csel x3, x3, xzr, eq -- csel x2, x2, x8, eq -+ ld1 {v20.16b-v23.16b}, [x2], #64 -+ csel x2, x2, x3, ge - - zip1 v2.2d, v24.2d, v26.2d - zip2 v6.2d, v24.2d, v26.2d -@@ -686,12 +679,10 @@ CPU_BE( rev a15, a15 ) - zip1 v10.2d, v25.2d, v27.2d - zip2 v14.2d, v25.2d, v27.2d - stp a10, a11, [x1, #-24] -- ld1 {v24.16b-v27.16b}, [x2], x3 - - subs x8, x4, #320 -- ccmp x3, xzr, #4, lt -- add x9, x8, x2 -- csel x2, x2, x9, eq -+ ld1 {v24.16b-v27.16b}, [x2], #64 -+ csel x2, x2, x3, ge - - zip1 v3.2d, v28.2d, v30.2d - zip2 v7.2d, v28.2d, v30.2d -@@ -699,151 +690,105 @@ CPU_BE( rev a15, a15 ) - zip1 v11.2d, v29.2d, v31.2d - zip2 v15.2d, v29.2d, v31.2d - stp a14, a15, [x1, #-8] -+ -+ tbnz x5, #63, .Lt128 - ld1 {v28.16b-v31.16b}, [x2] - - // xor with corresponding input, write to output -- tbnz x5, #63, 0f - eor v16.16b, v16.16b, v0.16b - eor v17.16b, v17.16b, v1.16b - eor v18.16b, v18.16b, v2.16b - eor v19.16b, v19.16b, v3.16b -- st1 {v16.16b-v19.16b}, [x1], #64 -- cbz x5, .Lout - -- tbnz x6, #63, 1f -+ tbnz x6, #63, .Lt192 -+ - eor v20.16b, v20.16b, v4.16b - eor v21.16b, v21.16b, v5.16b - eor v22.16b, v22.16b, v6.16b - eor v23.16b, v23.16b, v7.16b -- st1 {v20.16b-v23.16b}, [x1], #64 -- cbz x6, .Lout - -- tbnz x7, #63, 2f -+ st1 {v16.16b-v19.16b}, [x1], #64 -+ tbnz x7, #63, .Lt256 -+ - eor v24.16b, v24.16b, v8.16b - eor v25.16b, v25.16b, v9.16b - eor v26.16b, v26.16b, v10.16b - eor v27.16b, v27.16b, v11.16b -- st1 {v24.16b-v27.16b}, [x1], #64 -- cbz x7, .Lout - -- tbnz x8, #63, 3f -+ st1 {v20.16b-v23.16b}, [x1], #64 -+ tbnz x8, #63, .Lt320 -+ - eor v28.16b, v28.16b, v12.16b - eor v29.16b, v29.16b, v13.16b - eor v30.16b, v30.16b, v14.16b - eor v31.16b, v31.16b, v15.16b -+ -+ st1 {v24.16b-v27.16b}, [x1], #64 - st1 {v28.16b-v31.16b}, [x1] - - .Lout: frame_pop - ret - -- // fewer than 128 bytes of in/output --0: ld1 {v8.16b}, [x10] -- ld1 {v9.16b}, [x11] -- movi v10.16b, #16 -- sub x2, x1, #64 -- add x1, x1, x5 -- ld1 {v16.16b-v19.16b}, [x2] -- tbl v4.16b, {v0.16b-v3.16b}, v8.16b -- tbx v20.16b, {v16.16b-v19.16b}, v9.16b -- add v8.16b, v8.16b, v10.16b -- add v9.16b, v9.16b, v10.16b -- tbl v5.16b, {v0.16b-v3.16b}, v8.16b -- tbx v21.16b, {v16.16b-v19.16b}, v9.16b -- add v8.16b, v8.16b, v10.16b -- add v9.16b, v9.16b, v10.16b -- tbl v6.16b, {v0.16b-v3.16b}, v8.16b -- tbx v22.16b, {v16.16b-v19.16b}, v9.16b -- add v8.16b, v8.16b, v10.16b -- add v9.16b, v9.16b, v10.16b -- tbl v7.16b, {v0.16b-v3.16b}, v8.16b -- tbx v23.16b, {v16.16b-v19.16b}, v9.16b -- -- eor v20.16b, v20.16b, v4.16b -- eor v21.16b, v21.16b, v5.16b -- eor v22.16b, v22.16b, v6.16b -- eor v23.16b, v23.16b, v7.16b -- st1 {v20.16b-v23.16b}, [x1] -- b .Lout -- - // fewer than 192 bytes of in/output --1: ld1 {v8.16b}, [x10] -- ld1 {v9.16b}, [x11] -- movi v10.16b, #16 -- add x1, x1, x6 -- tbl v0.16b, {v4.16b-v7.16b}, v8.16b -- tbx v20.16b, {v16.16b-v19.16b}, v9.16b -- add v8.16b, v8.16b, v10.16b -- add v9.16b, v9.16b, v10.16b -- tbl v1.16b, {v4.16b-v7.16b}, v8.16b -- tbx v21.16b, {v16.16b-v19.16b}, v9.16b -- add v8.16b, v8.16b, v10.16b -- add v9.16b, v9.16b, v10.16b -- tbl v2.16b, {v4.16b-v7.16b}, v8.16b -- tbx v22.16b, {v16.16b-v19.16b}, v9.16b -- add v8.16b, v8.16b, v10.16b -- add v9.16b, v9.16b, v10.16b -- tbl v3.16b, {v4.16b-v7.16b}, v8.16b -- tbx v23.16b, {v16.16b-v19.16b}, v9.16b -- -- eor v20.16b, v20.16b, v0.16b -- eor v21.16b, v21.16b, v1.16b -- eor v22.16b, v22.16b, v2.16b -- eor v23.16b, v23.16b, v3.16b -- st1 {v20.16b-v23.16b}, [x1] -+.Lt192: cbz x5, 1f // exactly 128 bytes? -+ ld1 {v28.16b-v31.16b}, [x10] -+ add x5, x5, x1 -+ tbl v28.16b, {v4.16b-v7.16b}, v28.16b -+ tbl v29.16b, {v4.16b-v7.16b}, v29.16b -+ tbl v30.16b, {v4.16b-v7.16b}, v30.16b -+ tbl v31.16b, {v4.16b-v7.16b}, v31.16b -+ -+0: eor v20.16b, v20.16b, v28.16b -+ eor v21.16b, v21.16b, v29.16b -+ eor v22.16b, v22.16b, v30.16b -+ eor v23.16b, v23.16b, v31.16b -+ st1 {v20.16b-v23.16b}, [x5] // overlapping stores -+1: st1 {v16.16b-v19.16b}, [x1] - b .Lout - -+ // fewer than 128 bytes of in/output -+.Lt128: ld1 {v28.16b-v31.16b}, [x10] -+ add x5, x5, x1 -+ sub x1, x1, #64 -+ tbl v28.16b, {v0.16b-v3.16b}, v28.16b -+ tbl v29.16b, {v0.16b-v3.16b}, v29.16b -+ tbl v30.16b, {v0.16b-v3.16b}, v30.16b -+ tbl v31.16b, {v0.16b-v3.16b}, v31.16b -+ ld1 {v16.16b-v19.16b}, [x1] // reload first output block -+ b 0b -+ - // fewer than 256 bytes of in/output --2: ld1 {v4.16b}, [x10] -- ld1 {v5.16b}, [x11] -- movi v6.16b, #16 -- add x1, x1, x7 -+.Lt256: cbz x6, 2f // exactly 192 bytes? -+ ld1 {v4.16b-v7.16b}, [x10] -+ add x6, x6, x1 - tbl v0.16b, {v8.16b-v11.16b}, v4.16b -- tbx v24.16b, {v20.16b-v23.16b}, v5.16b -- add v4.16b, v4.16b, v6.16b -- add v5.16b, v5.16b, v6.16b -- tbl v1.16b, {v8.16b-v11.16b}, v4.16b -- tbx v25.16b, {v20.16b-v23.16b}, v5.16b -- add v4.16b, v4.16b, v6.16b -- add v5.16b, v5.16b, v6.16b -- tbl v2.16b, {v8.16b-v11.16b}, v4.16b -- tbx v26.16b, {v20.16b-v23.16b}, v5.16b -- add v4.16b, v4.16b, v6.16b -- add v5.16b, v5.16b, v6.16b -- tbl v3.16b, {v8.16b-v11.16b}, v4.16b -- tbx v27.16b, {v20.16b-v23.16b}, v5.16b -- -- eor v24.16b, v24.16b, v0.16b -- eor v25.16b, v25.16b, v1.16b -- eor v26.16b, v26.16b, v2.16b -- eor v27.16b, v27.16b, v3.16b -- st1 {v24.16b-v27.16b}, [x1] -+ tbl v1.16b, {v8.16b-v11.16b}, v5.16b -+ tbl v2.16b, {v8.16b-v11.16b}, v6.16b -+ tbl v3.16b, {v8.16b-v11.16b}, v7.16b -+ -+ eor v28.16b, v28.16b, v0.16b -+ eor v29.16b, v29.16b, v1.16b -+ eor v30.16b, v30.16b, v2.16b -+ eor v31.16b, v31.16b, v3.16b -+ st1 {v28.16b-v31.16b}, [x6] // overlapping stores -+2: st1 {v20.16b-v23.16b}, [x1] - b .Lout - - // fewer than 320 bytes of in/output --3: ld1 {v4.16b}, [x10] -- ld1 {v5.16b}, [x11] -- movi v6.16b, #16 -- add x1, x1, x8 -+.Lt320: cbz x7, 3f // exactly 256 bytes? -+ ld1 {v4.16b-v7.16b}, [x10] -+ add x7, x7, x1 - tbl v0.16b, {v12.16b-v15.16b}, v4.16b -- tbx v28.16b, {v24.16b-v27.16b}, v5.16b -- add v4.16b, v4.16b, v6.16b -- add v5.16b, v5.16b, v6.16b -- tbl v1.16b, {v12.16b-v15.16b}, v4.16b -- tbx v29.16b, {v24.16b-v27.16b}, v5.16b -- add v4.16b, v4.16b, v6.16b -- add v5.16b, v5.16b, v6.16b -- tbl v2.16b, {v12.16b-v15.16b}, v4.16b -- tbx v30.16b, {v24.16b-v27.16b}, v5.16b -- add v4.16b, v4.16b, v6.16b -- add v5.16b, v5.16b, v6.16b -- tbl v3.16b, {v12.16b-v15.16b}, v4.16b -- tbx v31.16b, {v24.16b-v27.16b}, v5.16b -+ tbl v1.16b, {v12.16b-v15.16b}, v5.16b -+ tbl v2.16b, {v12.16b-v15.16b}, v6.16b -+ tbl v3.16b, {v12.16b-v15.16b}, v7.16b - - eor v28.16b, v28.16b, v0.16b - eor v29.16b, v29.16b, v1.16b - eor v30.16b, v30.16b, v2.16b - eor v31.16b, v31.16b, v3.16b -- st1 {v28.16b-v31.16b}, [x1] -+ st1 {v28.16b-v31.16b}, [x7] // overlapping stores -+3: st1 {v24.16b-v27.16b}, [x1] - b .Lout - ENDPROC(chacha_4block_xor_neon) - -@@ -851,7 +796,7 @@ ENDPROC(chacha_4block_xor_neon) - .align L1_CACHE_SHIFT - .Lpermute: - .set .Li, 0 -- .rept 192 -+ .rept 128 - .byte (.Li - 64) - .set .Li, .Li + 1 - .endr diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0070-crypto-lib-chacha20poly1305-define-empty-module-exit.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0070-crypto-lib-chacha20poly1305-define-empty-module-exit.patch deleted file mode 100644 index 084ae74bf..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0070-crypto-lib-chacha20poly1305-define-empty-module-exit.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 15 Jan 2021 20:30:12 +0100 -Subject: [PATCH] crypto: lib/chacha20poly1305 - define empty module exit - function - -commit ac88c322d0f2917d41d13553c69e9d7f043c8b6f upstream. - -With no mod_exit function, users are unable to unload the module after -use. I'm not aware of any reason why module unloading should be -prohibited for this one, so this commit simply adds an empty exit -function. - -Reported-and-tested-by: John Donnelly -Acked-by: Ard Biesheuvel -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - lib/crypto/chacha20poly1305.c | 5 +++++ - 1 file changed, 5 insertions(+) - ---- a/lib/crypto/chacha20poly1305.c -+++ b/lib/crypto/chacha20poly1305.c -@@ -364,7 +364,12 @@ static int __init mod_init(void) - return 0; - } - -+static void __exit mod_exit(void) -+{ -+} -+ - module_init(mod_init); -+module_exit(mod_exit); - MODULE_LICENSE("GPL v2"); - MODULE_DESCRIPTION("ChaCha20Poly1305 AEAD construction"); - MODULE_AUTHOR("Jason A. Donenfeld "); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0071-crypto-arm-chacha-neon-add-missing-counter-increment.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0071-crypto-arm-chacha-neon-add-missing-counter-increment.patch deleted file mode 100644 index ea3cc802a..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0071-crypto-arm-chacha-neon-add-missing-counter-increment.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ard Biesheuvel -Date: Sun, 13 Dec 2020 15:39:29 +0100 -Subject: [PATCH] crypto: arm/chacha-neon - add missing counter increment - -commit fd16931a2f518a32753920ff20895e5cf04c8ff1 upstream. - -Commit 86cd97ec4b943af3 ("crypto: arm/chacha-neon - optimize for non-block -size multiples") refactored the chacha block handling in the glue code in -a way that may result in the counter increment to be omitted when calling -chacha_block_xor_neon() to process a full block. This violates the skcipher -API, which requires that the output IV is suitable for handling more input -as long as the preceding input has been presented in round multiples of the -block size. Also, the same code is exposed via the chacha library interface -whose callers may actually rely on this increment to occur even for final -blocks that are smaller than the chacha block size. - -So increment the counter after calling chacha_block_xor_neon(). - -Fixes: 86cd97ec4b943af3 ("crypto: arm/chacha-neon - optimize for non-block size multiples") -Reported-by: Eric Biggers -Signed-off-by: Ard Biesheuvel -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/chacha-glue.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/arch/arm/crypto/chacha-glue.c -+++ b/arch/arm/crypto/chacha-glue.c -@@ -60,6 +60,7 @@ static void chacha_doneon(u32 *state, u8 - chacha_block_xor_neon(state, d, s, nrounds); - if (d != dst) - memcpy(dst, buf, bytes); -+ state[12]++; - } - } - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0072-net-WireGuard-secure-network-tunnel.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0072-net-WireGuard-secure-network-tunnel.patch deleted file mode 100644 index 9e37bbb60..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0072-net-WireGuard-secure-network-tunnel.patch +++ /dev/null @@ -1,8071 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 9 Dec 2019 00:27:34 +0100 -Subject: [PATCH] net: WireGuard secure network tunnel - -commit e7096c131e5161fa3b8e52a650d7719d2857adfd upstream. - -WireGuard is a layer 3 secure networking tunnel made specifically for -the kernel, that aims to be much simpler and easier to audit than IPsec. -Extensive documentation and description of the protocol and -considerations, along with formal proofs of the cryptography, are -available at: - - * https://www.wireguard.com/ - * https://www.wireguard.com/papers/wireguard.pdf - -This commit implements WireGuard as a simple network device driver, -accessible in the usual RTNL way used by virtual network drivers. It -makes use of the udp_tunnel APIs, GRO, GSO, NAPI, and the usual set of -networking subsystem APIs. It has a somewhat novel multicore queueing -system designed for maximum throughput and minimal latency of encryption -operations, but it is implemented modestly using workqueues and NAPI. -Configuration is done via generic Netlink, and following a review from -the Netlink maintainer a year ago, several high profile userspace tools -have already implemented the API. - -This commit also comes with several different tests, both in-kernel -tests and out-of-kernel tests based on network namespaces, taking profit -of the fact that sockets used by WireGuard intentionally stay in the -namespace the WireGuard interface was originally created, exactly like -the semantics of userspace tun devices. See wireguard.com/netns/ for -pictures and examples. - -The source code is fairly short, but rather than combining everything -into a single file, WireGuard is developed as cleanly separable files, -making auditing and comprehension easier. Things are laid out as -follows: - - * noise.[ch], cookie.[ch], messages.h: These implement the bulk of the - cryptographic aspects of the protocol, and are mostly data-only in - nature, taking in buffers of bytes and spitting out buffers of - bytes. They also handle reference counting for their various shared - pieces of data, like keys and key lists. - - * ratelimiter.[ch]: Used as an integral part of cookie.[ch] for - ratelimiting certain types of cryptographic operations in accordance - with particular WireGuard semantics. - - * allowedips.[ch], peerlookup.[ch]: The main lookup structures of - WireGuard, the former being trie-like with particular semantics, an - integral part of the design of the protocol, and the latter just - being nice helper functions around the various hashtables we use. - - * device.[ch]: Implementation of functions for the netdevice and for - rtnl, responsible for maintaining the life of a given interface and - wiring it up to the rest of WireGuard. - - * peer.[ch]: Each interface has a list of peers, with helper functions - available here for creation, destruction, and reference counting. - - * socket.[ch]: Implementation of functions related to udp_socket and - the general set of kernel socket APIs, for sending and receiving - ciphertext UDP packets, and taking care of WireGuard-specific sticky - socket routing semantics for the automatic roaming. - - * netlink.[ch]: Userspace API entry point for configuring WireGuard - peers and devices. The API has been implemented by several userspace - tools and network management utility, and the WireGuard project - distributes the basic wg(8) tool. - - * queueing.[ch]: Shared function on the rx and tx path for handling - the various queues used in the multicore algorithms. - - * send.c: Handles encrypting outgoing packets in parallel on - multiple cores, before sending them in order on a single core, via - workqueues and ring buffers. Also handles sending handshake and cookie - messages as part of the protocol, in parallel. - - * receive.c: Handles decrypting incoming packets in parallel on - multiple cores, before passing them off in order to be ingested via - the rest of the networking subsystem with GRO via the typical NAPI - poll function. Also handles receiving handshake and cookie messages - as part of the protocol, in parallel. - - * timers.[ch]: Uses the timer wheel to implement protocol particular - event timeouts, and gives a set of very simple event-driven entry - point functions for callers. - - * main.c, version.h: Initialization and deinitialization of the module. - - * selftest/*.h: Runtime unit tests for some of the most security - sensitive functions. - - * tools/testing/selftests/wireguard/netns.sh: Aforementioned testing - script using network namespaces. - -This commit aims to be as self-contained as possible, implementing -WireGuard as a standalone module not needing much special handling or -coordination from the network subsystem. I expect for future -optimizations to the network stack to positively improve WireGuard, and -vice-versa, but for the time being, this exists as intentionally -standalone. - -We introduce a menu option for CONFIG_WIREGUARD, as well as providing a -verbose debug log and self-tests via CONFIG_WIREGUARD_DEBUG. - -Signed-off-by: Jason A. Donenfeld -Cc: David Miller -Cc: Greg KH -Cc: Linus Torvalds -Cc: Herbert Xu -Cc: linux-crypto@vger.kernel.org -Cc: linux-kernel@vger.kernel.org -Cc: netdev@vger.kernel.org -Signed-off-by: David S. Miller -[Jason: ported to 5.4 by doing the following: - - wg_get_device_start uses genl_family_attrbuf - - trival skb_redirect_reset change from 2c64605b590e is folded in - - skb_list_walk_safe was already backported prior] -Signed-off-by: Jason A. Donenfeld ---- - MAINTAINERS | 8 + - drivers/net/Kconfig | 41 + - drivers/net/Makefile | 1 + - drivers/net/wireguard/Makefile | 18 + - drivers/net/wireguard/allowedips.c | 381 +++++++++ - drivers/net/wireguard/allowedips.h | 59 ++ - drivers/net/wireguard/cookie.c | 236 ++++++ - drivers/net/wireguard/cookie.h | 59 ++ - drivers/net/wireguard/device.c | 458 ++++++++++ - drivers/net/wireguard/device.h | 65 ++ - drivers/net/wireguard/main.c | 64 ++ - drivers/net/wireguard/messages.h | 128 +++ - drivers/net/wireguard/netlink.c | 648 +++++++++++++++ - drivers/net/wireguard/netlink.h | 12 + - drivers/net/wireguard/noise.c | 828 +++++++++++++++++++ - drivers/net/wireguard/noise.h | 137 +++ - drivers/net/wireguard/peer.c | 240 ++++++ - drivers/net/wireguard/peer.h | 83 ++ - drivers/net/wireguard/peerlookup.c | 221 +++++ - drivers/net/wireguard/peerlookup.h | 64 ++ - drivers/net/wireguard/queueing.c | 53 ++ - drivers/net/wireguard/queueing.h | 197 +++++ - drivers/net/wireguard/ratelimiter.c | 223 +++++ - drivers/net/wireguard/ratelimiter.h | 19 + - drivers/net/wireguard/receive.c | 595 +++++++++++++ - drivers/net/wireguard/selftest/allowedips.c | 683 +++++++++++++++ - drivers/net/wireguard/selftest/counter.c | 104 +++ - drivers/net/wireguard/selftest/ratelimiter.c | 226 +++++ - drivers/net/wireguard/send.c | 413 +++++++++ - drivers/net/wireguard/socket.c | 437 ++++++++++ - drivers/net/wireguard/socket.h | 44 + - drivers/net/wireguard/timers.c | 243 ++++++ - drivers/net/wireguard/timers.h | 31 + - drivers/net/wireguard/version.h | 1 + - include/uapi/linux/wireguard.h | 196 +++++ - tools/testing/selftests/wireguard/netns.sh | 537 ++++++++++++ - 36 files changed, 7753 insertions(+) - create mode 100644 drivers/net/wireguard/Makefile - create mode 100644 drivers/net/wireguard/allowedips.c - create mode 100644 drivers/net/wireguard/allowedips.h - create mode 100644 drivers/net/wireguard/cookie.c - create mode 100644 drivers/net/wireguard/cookie.h - create mode 100644 drivers/net/wireguard/device.c - create mode 100644 drivers/net/wireguard/device.h - create mode 100644 drivers/net/wireguard/main.c - create mode 100644 drivers/net/wireguard/messages.h - create mode 100644 drivers/net/wireguard/netlink.c - create mode 100644 drivers/net/wireguard/netlink.h - create mode 100644 drivers/net/wireguard/noise.c - create mode 100644 drivers/net/wireguard/noise.h - create mode 100644 drivers/net/wireguard/peer.c - create mode 100644 drivers/net/wireguard/peer.h - create mode 100644 drivers/net/wireguard/peerlookup.c - create mode 100644 drivers/net/wireguard/peerlookup.h - create mode 100644 drivers/net/wireguard/queueing.c - create mode 100644 drivers/net/wireguard/queueing.h - create mode 100644 drivers/net/wireguard/ratelimiter.c - create mode 100644 drivers/net/wireguard/ratelimiter.h - create mode 100644 drivers/net/wireguard/receive.c - create mode 100644 drivers/net/wireguard/selftest/allowedips.c - create mode 100644 drivers/net/wireguard/selftest/counter.c - create mode 100644 drivers/net/wireguard/selftest/ratelimiter.c - create mode 100644 drivers/net/wireguard/send.c - create mode 100644 drivers/net/wireguard/socket.c - create mode 100644 drivers/net/wireguard/socket.h - create mode 100644 drivers/net/wireguard/timers.c - create mode 100644 drivers/net/wireguard/timers.h - create mode 100644 drivers/net/wireguard/version.h - create mode 100644 include/uapi/linux/wireguard.h - create mode 100755 tools/testing/selftests/wireguard/netns.sh - ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -17584,6 +17584,14 @@ L: linux-gpio@vger.kernel.org - S: Maintained - F: drivers/gpio/gpio-ws16c48.c - -+WIREGUARD SECURE NETWORK TUNNEL -+M: Jason A. Donenfeld -+S: Maintained -+F: drivers/net/wireguard/ -+F: tools/testing/selftests/wireguard/ -+L: wireguard@lists.zx2c4.com -+L: netdev@vger.kernel.org -+ - WISTRON LAPTOP BUTTON DRIVER - M: Miloslav Trmac - S: Maintained ---- a/drivers/net/Kconfig -+++ b/drivers/net/Kconfig -@@ -71,6 +71,47 @@ config DUMMY - To compile this driver as a module, choose M here: the module - will be called dummy. - -+config WIREGUARD -+ tristate "WireGuard secure network tunnel" -+ depends on NET && INET -+ depends on IPV6 || !IPV6 -+ select NET_UDP_TUNNEL -+ select DST_CACHE -+ select CRYPTO -+ select CRYPTO_LIB_CURVE25519 -+ select CRYPTO_LIB_CHACHA20POLY1305 -+ select CRYPTO_LIB_BLAKE2S -+ select CRYPTO_CHACHA20_X86_64 if X86 && 64BIT -+ select CRYPTO_POLY1305_X86_64 if X86 && 64BIT -+ select CRYPTO_BLAKE2S_X86 if X86 && 64BIT -+ select CRYPTO_CURVE25519_X86 if X86 && 64BIT -+ select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON -+ select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON -+ select CRYPTO_POLY1305_ARM if ARM -+ select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON -+ select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2 -+ select CRYPTO_POLY1305_MIPS if CPU_MIPS32 || (CPU_MIPS64 && 64BIT) -+ help -+ WireGuard is a secure, fast, and easy to use replacement for IPSec -+ that uses modern cryptography and clever networking tricks. It's -+ designed to be fairly general purpose and abstract enough to fit most -+ use cases, while at the same time remaining extremely simple to -+ configure. See www.wireguard.com for more info. -+ -+ It's safe to say Y or M here, as the driver is very lightweight and -+ is only in use when an administrator chooses to add an interface. -+ -+config WIREGUARD_DEBUG -+ bool "Debugging checks and verbose messages" -+ depends on WIREGUARD -+ help -+ This will write log messages for handshake and other events -+ that occur for a WireGuard interface. It will also perform some -+ extra validation checks and unit tests at various points. This is -+ only useful for debugging. -+ -+ Say N here unless you know what you're doing. -+ - config EQUALIZER - tristate "EQL (serial line load balancing) support" - ---help--- ---- a/drivers/net/Makefile -+++ b/drivers/net/Makefile -@@ -10,6 +10,7 @@ obj-$(CONFIG_BONDING) += bonding/ - obj-$(CONFIG_IPVLAN) += ipvlan/ - obj-$(CONFIG_IPVTAP) += ipvlan/ - obj-$(CONFIG_DUMMY) += dummy.o -+obj-$(CONFIG_WIREGUARD) += wireguard/ - obj-$(CONFIG_EQUALIZER) += eql.o - obj-$(CONFIG_IFB) += ifb.o - obj-$(CONFIG_MACSEC) += macsec.o ---- /dev/null -+++ b/drivers/net/wireguard/Makefile -@@ -0,0 +1,18 @@ -+ccflags-y := -O3 -+ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' -+ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG -+wireguard-y := main.o -+wireguard-y += noise.o -+wireguard-y += device.o -+wireguard-y += peer.o -+wireguard-y += timers.o -+wireguard-y += queueing.o -+wireguard-y += send.o -+wireguard-y += receive.o -+wireguard-y += socket.o -+wireguard-y += peerlookup.o -+wireguard-y += allowedips.o -+wireguard-y += ratelimiter.o -+wireguard-y += cookie.o -+wireguard-y += netlink.o -+obj-$(CONFIG_WIREGUARD) := wireguard.o ---- /dev/null -+++ b/drivers/net/wireguard/allowedips.c -@@ -0,0 +1,381 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "allowedips.h" -+#include "peer.h" -+ -+static void swap_endian(u8 *dst, const u8 *src, u8 bits) -+{ -+ if (bits == 32) { -+ *(u32 *)dst = be32_to_cpu(*(const __be32 *)src); -+ } else if (bits == 128) { -+ ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]); -+ ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]); -+ } -+} -+ -+static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src, -+ u8 cidr, u8 bits) -+{ -+ node->cidr = cidr; -+ node->bit_at_a = cidr / 8U; -+#ifdef __LITTLE_ENDIAN -+ node->bit_at_a ^= (bits / 8U - 1U) % 8U; -+#endif -+ node->bit_at_b = 7U - (cidr % 8U); -+ node->bitlen = bits; -+ memcpy(node->bits, src, bits / 8U); -+} -+#define CHOOSE_NODE(parent, key) \ -+ parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] -+ -+static void node_free_rcu(struct rcu_head *rcu) -+{ -+ kfree(container_of(rcu, struct allowedips_node, rcu)); -+} -+ -+static void push_rcu(struct allowedips_node **stack, -+ struct allowedips_node __rcu *p, unsigned int *len) -+{ -+ if (rcu_access_pointer(p)) { -+ WARN_ON(IS_ENABLED(DEBUG) && *len >= 128); -+ stack[(*len)++] = rcu_dereference_raw(p); -+ } -+} -+ -+static void root_free_rcu(struct rcu_head *rcu) -+{ -+ struct allowedips_node *node, *stack[128] = { -+ container_of(rcu, struct allowedips_node, rcu) }; -+ unsigned int len = 1; -+ -+ while (len > 0 && (node = stack[--len])) { -+ push_rcu(stack, node->bit[0], &len); -+ push_rcu(stack, node->bit[1], &len); -+ kfree(node); -+ } -+} -+ -+static void root_remove_peer_lists(struct allowedips_node *root) -+{ -+ struct allowedips_node *node, *stack[128] = { root }; -+ unsigned int len = 1; -+ -+ while (len > 0 && (node = stack[--len])) { -+ push_rcu(stack, node->bit[0], &len); -+ push_rcu(stack, node->bit[1], &len); -+ if (rcu_access_pointer(node->peer)) -+ list_del(&node->peer_list); -+ } -+} -+ -+static void walk_remove_by_peer(struct allowedips_node __rcu **top, -+ struct wg_peer *peer, struct mutex *lock) -+{ -+#define REF(p) rcu_access_pointer(p) -+#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock)) -+#define PUSH(p) ({ \ -+ WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \ -+ stack[len++] = p; \ -+ }) -+ -+ struct allowedips_node __rcu **stack[128], **nptr; -+ struct allowedips_node *node, *prev; -+ unsigned int len; -+ -+ if (unlikely(!peer || !REF(*top))) -+ return; -+ -+ for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) { -+ nptr = stack[len - 1]; -+ node = DEREF(nptr); -+ if (!node) { -+ --len; -+ continue; -+ } -+ if (!prev || REF(prev->bit[0]) == node || -+ REF(prev->bit[1]) == node) { -+ if (REF(node->bit[0])) -+ PUSH(&node->bit[0]); -+ else if (REF(node->bit[1])) -+ PUSH(&node->bit[1]); -+ } else if (REF(node->bit[0]) == prev) { -+ if (REF(node->bit[1])) -+ PUSH(&node->bit[1]); -+ } else { -+ if (rcu_dereference_protected(node->peer, -+ lockdep_is_held(lock)) == peer) { -+ RCU_INIT_POINTER(node->peer, NULL); -+ list_del_init(&node->peer_list); -+ if (!node->bit[0] || !node->bit[1]) { -+ rcu_assign_pointer(*nptr, DEREF( -+ &node->bit[!REF(node->bit[0])])); -+ call_rcu(&node->rcu, node_free_rcu); -+ node = DEREF(nptr); -+ } -+ } -+ --len; -+ } -+ } -+ -+#undef REF -+#undef DEREF -+#undef PUSH -+} -+ -+static unsigned int fls128(u64 a, u64 b) -+{ -+ return a ? fls64(a) + 64U : fls64(b); -+} -+ -+static u8 common_bits(const struct allowedips_node *node, const u8 *key, -+ u8 bits) -+{ -+ if (bits == 32) -+ return 32U - fls(*(const u32 *)node->bits ^ *(const u32 *)key); -+ else if (bits == 128) -+ return 128U - fls128( -+ *(const u64 *)&node->bits[0] ^ *(const u64 *)&key[0], -+ *(const u64 *)&node->bits[8] ^ *(const u64 *)&key[8]); -+ return 0; -+} -+ -+static bool prefix_matches(const struct allowedips_node *node, const u8 *key, -+ u8 bits) -+{ -+ /* This could be much faster if it actually just compared the common -+ * bits properly, by precomputing a mask bswap(~0 << (32 - cidr)), and -+ * the rest, but it turns out that common_bits is already super fast on -+ * modern processors, even taking into account the unfortunate bswap. -+ * So, we just inline it like this instead. -+ */ -+ return common_bits(node, key, bits) >= node->cidr; -+} -+ -+static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits, -+ const u8 *key) -+{ -+ struct allowedips_node *node = trie, *found = NULL; -+ -+ while (node && prefix_matches(node, key, bits)) { -+ if (rcu_access_pointer(node->peer)) -+ found = node; -+ if (node->cidr == bits) -+ break; -+ node = rcu_dereference_bh(CHOOSE_NODE(node, key)); -+ } -+ return found; -+} -+ -+/* Returns a strong reference to a peer */ -+static struct wg_peer *lookup(struct allowedips_node __rcu *root, u8 bits, -+ const void *be_ip) -+{ -+ /* Aligned so it can be passed to fls/fls64 */ -+ u8 ip[16] __aligned(__alignof(u64)); -+ struct allowedips_node *node; -+ struct wg_peer *peer = NULL; -+ -+ swap_endian(ip, be_ip, bits); -+ -+ rcu_read_lock_bh(); -+retry: -+ node = find_node(rcu_dereference_bh(root), bits, ip); -+ if (node) { -+ peer = wg_peer_get_maybe_zero(rcu_dereference_bh(node->peer)); -+ if (!peer) -+ goto retry; -+ } -+ rcu_read_unlock_bh(); -+ return peer; -+} -+ -+static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, -+ u8 cidr, u8 bits, struct allowedips_node **rnode, -+ struct mutex *lock) -+{ -+ struct allowedips_node *node = rcu_dereference_protected(trie, -+ lockdep_is_held(lock)); -+ struct allowedips_node *parent = NULL; -+ bool exact = false; -+ -+ while (node && node->cidr <= cidr && prefix_matches(node, key, bits)) { -+ parent = node; -+ if (parent->cidr == cidr) { -+ exact = true; -+ break; -+ } -+ node = rcu_dereference_protected(CHOOSE_NODE(parent, key), -+ lockdep_is_held(lock)); -+ } -+ *rnode = parent; -+ return exact; -+} -+ -+static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, -+ u8 cidr, struct wg_peer *peer, struct mutex *lock) -+{ -+ struct allowedips_node *node, *parent, *down, *newnode; -+ -+ if (unlikely(cidr > bits || !peer)) -+ return -EINVAL; -+ -+ if (!rcu_access_pointer(*trie)) { -+ node = kzalloc(sizeof(*node), GFP_KERNEL); -+ if (unlikely(!node)) -+ return -ENOMEM; -+ RCU_INIT_POINTER(node->peer, peer); -+ list_add_tail(&node->peer_list, &peer->allowedips_list); -+ copy_and_assign_cidr(node, key, cidr, bits); -+ rcu_assign_pointer(*trie, node); -+ return 0; -+ } -+ if (node_placement(*trie, key, cidr, bits, &node, lock)) { -+ rcu_assign_pointer(node->peer, peer); -+ list_move_tail(&node->peer_list, &peer->allowedips_list); -+ return 0; -+ } -+ -+ newnode = kzalloc(sizeof(*newnode), GFP_KERNEL); -+ if (unlikely(!newnode)) -+ return -ENOMEM; -+ RCU_INIT_POINTER(newnode->peer, peer); -+ list_add_tail(&newnode->peer_list, &peer->allowedips_list); -+ copy_and_assign_cidr(newnode, key, cidr, bits); -+ -+ if (!node) { -+ down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); -+ } else { -+ down = rcu_dereference_protected(CHOOSE_NODE(node, key), -+ lockdep_is_held(lock)); -+ if (!down) { -+ rcu_assign_pointer(CHOOSE_NODE(node, key), newnode); -+ return 0; -+ } -+ } -+ cidr = min(cidr, common_bits(down, key, bits)); -+ parent = node; -+ -+ if (newnode->cidr == cidr) { -+ rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down); -+ if (!parent) -+ rcu_assign_pointer(*trie, newnode); -+ else -+ rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), -+ newnode); -+ } else { -+ node = kzalloc(sizeof(*node), GFP_KERNEL); -+ if (unlikely(!node)) { -+ kfree(newnode); -+ return -ENOMEM; -+ } -+ INIT_LIST_HEAD(&node->peer_list); -+ copy_and_assign_cidr(node, newnode->bits, cidr, bits); -+ -+ rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); -+ rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); -+ if (!parent) -+ rcu_assign_pointer(*trie, node); -+ else -+ rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), -+ node); -+ } -+ return 0; -+} -+ -+void wg_allowedips_init(struct allowedips *table) -+{ -+ table->root4 = table->root6 = NULL; -+ table->seq = 1; -+} -+ -+void wg_allowedips_free(struct allowedips *table, struct mutex *lock) -+{ -+ struct allowedips_node __rcu *old4 = table->root4, *old6 = table->root6; -+ -+ ++table->seq; -+ RCU_INIT_POINTER(table->root4, NULL); -+ RCU_INIT_POINTER(table->root6, NULL); -+ if (rcu_access_pointer(old4)) { -+ struct allowedips_node *node = rcu_dereference_protected(old4, -+ lockdep_is_held(lock)); -+ -+ root_remove_peer_lists(node); -+ call_rcu(&node->rcu, root_free_rcu); -+ } -+ if (rcu_access_pointer(old6)) { -+ struct allowedips_node *node = rcu_dereference_protected(old6, -+ lockdep_is_held(lock)); -+ -+ root_remove_peer_lists(node); -+ call_rcu(&node->rcu, root_free_rcu); -+ } -+} -+ -+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, -+ u8 cidr, struct wg_peer *peer, struct mutex *lock) -+{ -+ /* Aligned so it can be passed to fls */ -+ u8 key[4] __aligned(__alignof(u32)); -+ -+ ++table->seq; -+ swap_endian(key, (const u8 *)ip, 32); -+ return add(&table->root4, 32, key, cidr, peer, lock); -+} -+ -+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, -+ u8 cidr, struct wg_peer *peer, struct mutex *lock) -+{ -+ /* Aligned so it can be passed to fls64 */ -+ u8 key[16] __aligned(__alignof(u64)); -+ -+ ++table->seq; -+ swap_endian(key, (const u8 *)ip, 128); -+ return add(&table->root6, 128, key, cidr, peer, lock); -+} -+ -+void wg_allowedips_remove_by_peer(struct allowedips *table, -+ struct wg_peer *peer, struct mutex *lock) -+{ -+ ++table->seq; -+ walk_remove_by_peer(&table->root4, peer, lock); -+ walk_remove_by_peer(&table->root6, peer, lock); -+} -+ -+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) -+{ -+ const unsigned int cidr_bytes = DIV_ROUND_UP(node->cidr, 8U); -+ swap_endian(ip, node->bits, node->bitlen); -+ memset(ip + cidr_bytes, 0, node->bitlen / 8U - cidr_bytes); -+ if (node->cidr) -+ ip[cidr_bytes - 1U] &= ~0U << (-node->cidr % 8U); -+ -+ *cidr = node->cidr; -+ return node->bitlen == 32 ? AF_INET : AF_INET6; -+} -+ -+/* Returns a strong reference to a peer */ -+struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, -+ struct sk_buff *skb) -+{ -+ if (skb->protocol == htons(ETH_P_IP)) -+ return lookup(table->root4, 32, &ip_hdr(skb)->daddr); -+ else if (skb->protocol == htons(ETH_P_IPV6)) -+ return lookup(table->root6, 128, &ipv6_hdr(skb)->daddr); -+ return NULL; -+} -+ -+/* Returns a strong reference to a peer */ -+struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, -+ struct sk_buff *skb) -+{ -+ if (skb->protocol == htons(ETH_P_IP)) -+ return lookup(table->root4, 32, &ip_hdr(skb)->saddr); -+ else if (skb->protocol == htons(ETH_P_IPV6)) -+ return lookup(table->root6, 128, &ipv6_hdr(skb)->saddr); -+ return NULL; -+} -+ -+#include "selftest/allowedips.c" ---- /dev/null -+++ b/drivers/net/wireguard/allowedips.h -@@ -0,0 +1,59 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_ALLOWEDIPS_H -+#define _WG_ALLOWEDIPS_H -+ -+#include -+#include -+#include -+ -+struct wg_peer; -+ -+struct allowedips_node { -+ struct wg_peer __rcu *peer; -+ struct allowedips_node __rcu *bit[2]; -+ /* While it may seem scandalous that we waste space for v4, -+ * we're alloc'ing to the nearest power of 2 anyway, so this -+ * doesn't actually make a difference. -+ */ -+ u8 bits[16] __aligned(__alignof(u64)); -+ u8 cidr, bit_at_a, bit_at_b, bitlen; -+ -+ /* Keep rarely used list at bottom to be beyond cache line. */ -+ union { -+ struct list_head peer_list; -+ struct rcu_head rcu; -+ }; -+}; -+ -+struct allowedips { -+ struct allowedips_node __rcu *root4; -+ struct allowedips_node __rcu *root6; -+ u64 seq; -+}; -+ -+void wg_allowedips_init(struct allowedips *table); -+void wg_allowedips_free(struct allowedips *table, struct mutex *mutex); -+int wg_allowedips_insert_v4(struct allowedips *table, const struct in_addr *ip, -+ u8 cidr, struct wg_peer *peer, struct mutex *lock); -+int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, -+ u8 cidr, struct wg_peer *peer, struct mutex *lock); -+void wg_allowedips_remove_by_peer(struct allowedips *table, -+ struct wg_peer *peer, struct mutex *lock); -+/* The ip input pointer should be __aligned(__alignof(u64))) */ -+int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr); -+ -+/* These return a strong reference to a peer: */ -+struct wg_peer *wg_allowedips_lookup_dst(struct allowedips *table, -+ struct sk_buff *skb); -+struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, -+ struct sk_buff *skb); -+ -+#ifdef DEBUG -+bool wg_allowedips_selftest(void); -+#endif -+ -+#endif /* _WG_ALLOWEDIPS_H */ ---- /dev/null -+++ b/drivers/net/wireguard/cookie.c -@@ -0,0 +1,236 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "cookie.h" -+#include "peer.h" -+#include "device.h" -+#include "messages.h" -+#include "ratelimiter.h" -+#include "timers.h" -+ -+#include -+#include -+ -+#include -+#include -+ -+void wg_cookie_checker_init(struct cookie_checker *checker, -+ struct wg_device *wg) -+{ -+ init_rwsem(&checker->secret_lock); -+ checker->secret_birthdate = ktime_get_coarse_boottime_ns(); -+ get_random_bytes(checker->secret, NOISE_HASH_LEN); -+ checker->device = wg; -+} -+ -+enum { COOKIE_KEY_LABEL_LEN = 8 }; -+static const u8 mac1_key_label[COOKIE_KEY_LABEL_LEN] = "mac1----"; -+static const u8 cookie_key_label[COOKIE_KEY_LABEL_LEN] = "cookie--"; -+ -+static void precompute_key(u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN], -+ const u8 label[COOKIE_KEY_LABEL_LEN]) -+{ -+ struct blake2s_state blake; -+ -+ blake2s_init(&blake, NOISE_SYMMETRIC_KEY_LEN); -+ blake2s_update(&blake, label, COOKIE_KEY_LABEL_LEN); -+ blake2s_update(&blake, pubkey, NOISE_PUBLIC_KEY_LEN); -+ blake2s_final(&blake, key); -+} -+ -+/* Must hold peer->handshake.static_identity->lock */ -+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker) -+{ -+ if (likely(checker->device->static_identity.has_identity)) { -+ precompute_key(checker->cookie_encryption_key, -+ checker->device->static_identity.static_public, -+ cookie_key_label); -+ precompute_key(checker->message_mac1_key, -+ checker->device->static_identity.static_public, -+ mac1_key_label); -+ } else { -+ memset(checker->cookie_encryption_key, 0, -+ NOISE_SYMMETRIC_KEY_LEN); -+ memset(checker->message_mac1_key, 0, NOISE_SYMMETRIC_KEY_LEN); -+ } -+} -+ -+void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer) -+{ -+ precompute_key(peer->latest_cookie.cookie_decryption_key, -+ peer->handshake.remote_static, cookie_key_label); -+ precompute_key(peer->latest_cookie.message_mac1_key, -+ peer->handshake.remote_static, mac1_key_label); -+} -+ -+void wg_cookie_init(struct cookie *cookie) -+{ -+ memset(cookie, 0, sizeof(*cookie)); -+ init_rwsem(&cookie->lock); -+} -+ -+static void compute_mac1(u8 mac1[COOKIE_LEN], const void *message, size_t len, -+ const u8 key[NOISE_SYMMETRIC_KEY_LEN]) -+{ -+ len = len - sizeof(struct message_macs) + -+ offsetof(struct message_macs, mac1); -+ blake2s(mac1, message, key, COOKIE_LEN, len, NOISE_SYMMETRIC_KEY_LEN); -+} -+ -+static void compute_mac2(u8 mac2[COOKIE_LEN], const void *message, size_t len, -+ const u8 cookie[COOKIE_LEN]) -+{ -+ len = len - sizeof(struct message_macs) + -+ offsetof(struct message_macs, mac2); -+ blake2s(mac2, message, cookie, COOKIE_LEN, len, COOKIE_LEN); -+} -+ -+static void make_cookie(u8 cookie[COOKIE_LEN], struct sk_buff *skb, -+ struct cookie_checker *checker) -+{ -+ struct blake2s_state state; -+ -+ if (wg_birthdate_has_expired(checker->secret_birthdate, -+ COOKIE_SECRET_MAX_AGE)) { -+ down_write(&checker->secret_lock); -+ checker->secret_birthdate = ktime_get_coarse_boottime_ns(); -+ get_random_bytes(checker->secret, NOISE_HASH_LEN); -+ up_write(&checker->secret_lock); -+ } -+ -+ down_read(&checker->secret_lock); -+ -+ blake2s_init_key(&state, COOKIE_LEN, checker->secret, NOISE_HASH_LEN); -+ if (skb->protocol == htons(ETH_P_IP)) -+ blake2s_update(&state, (u8 *)&ip_hdr(skb)->saddr, -+ sizeof(struct in_addr)); -+ else if (skb->protocol == htons(ETH_P_IPV6)) -+ blake2s_update(&state, (u8 *)&ipv6_hdr(skb)->saddr, -+ sizeof(struct in6_addr)); -+ blake2s_update(&state, (u8 *)&udp_hdr(skb)->source, sizeof(__be16)); -+ blake2s_final(&state, cookie); -+ -+ up_read(&checker->secret_lock); -+} -+ -+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker, -+ struct sk_buff *skb, -+ bool check_cookie) -+{ -+ struct message_macs *macs = (struct message_macs *) -+ (skb->data + skb->len - sizeof(*macs)); -+ enum cookie_mac_state ret; -+ u8 computed_mac[COOKIE_LEN]; -+ u8 cookie[COOKIE_LEN]; -+ -+ ret = INVALID_MAC; -+ compute_mac1(computed_mac, skb->data, skb->len, -+ checker->message_mac1_key); -+ if (crypto_memneq(computed_mac, macs->mac1, COOKIE_LEN)) -+ goto out; -+ -+ ret = VALID_MAC_BUT_NO_COOKIE; -+ -+ if (!check_cookie) -+ goto out; -+ -+ make_cookie(cookie, skb, checker); -+ -+ compute_mac2(computed_mac, skb->data, skb->len, cookie); -+ if (crypto_memneq(computed_mac, macs->mac2, COOKIE_LEN)) -+ goto out; -+ -+ ret = VALID_MAC_WITH_COOKIE_BUT_RATELIMITED; -+ if (!wg_ratelimiter_allow(skb, dev_net(checker->device->dev))) -+ goto out; -+ -+ ret = VALID_MAC_WITH_COOKIE; -+ -+out: -+ return ret; -+} -+ -+void wg_cookie_add_mac_to_packet(void *message, size_t len, -+ struct wg_peer *peer) -+{ -+ struct message_macs *macs = (struct message_macs *) -+ ((u8 *)message + len - sizeof(*macs)); -+ -+ down_write(&peer->latest_cookie.lock); -+ compute_mac1(macs->mac1, message, len, -+ peer->latest_cookie.message_mac1_key); -+ memcpy(peer->latest_cookie.last_mac1_sent, macs->mac1, COOKIE_LEN); -+ peer->latest_cookie.have_sent_mac1 = true; -+ up_write(&peer->latest_cookie.lock); -+ -+ down_read(&peer->latest_cookie.lock); -+ if (peer->latest_cookie.is_valid && -+ !wg_birthdate_has_expired(peer->latest_cookie.birthdate, -+ COOKIE_SECRET_MAX_AGE - COOKIE_SECRET_LATENCY)) -+ compute_mac2(macs->mac2, message, len, -+ peer->latest_cookie.cookie); -+ else -+ memset(macs->mac2, 0, COOKIE_LEN); -+ up_read(&peer->latest_cookie.lock); -+} -+ -+void wg_cookie_message_create(struct message_handshake_cookie *dst, -+ struct sk_buff *skb, __le32 index, -+ struct cookie_checker *checker) -+{ -+ struct message_macs *macs = (struct message_macs *) -+ ((u8 *)skb->data + skb->len - sizeof(*macs)); -+ u8 cookie[COOKIE_LEN]; -+ -+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE); -+ dst->receiver_index = index; -+ get_random_bytes_wait(dst->nonce, COOKIE_NONCE_LEN); -+ -+ make_cookie(cookie, skb, checker); -+ xchacha20poly1305_encrypt(dst->encrypted_cookie, cookie, COOKIE_LEN, -+ macs->mac1, COOKIE_LEN, dst->nonce, -+ checker->cookie_encryption_key); -+} -+ -+void wg_cookie_message_consume(struct message_handshake_cookie *src, -+ struct wg_device *wg) -+{ -+ struct wg_peer *peer = NULL; -+ u8 cookie[COOKIE_LEN]; -+ bool ret; -+ -+ if (unlikely(!wg_index_hashtable_lookup(wg->index_hashtable, -+ INDEX_HASHTABLE_HANDSHAKE | -+ INDEX_HASHTABLE_KEYPAIR, -+ src->receiver_index, &peer))) -+ return; -+ -+ down_read(&peer->latest_cookie.lock); -+ if (unlikely(!peer->latest_cookie.have_sent_mac1)) { -+ up_read(&peer->latest_cookie.lock); -+ goto out; -+ } -+ ret = xchacha20poly1305_decrypt( -+ cookie, src->encrypted_cookie, sizeof(src->encrypted_cookie), -+ peer->latest_cookie.last_mac1_sent, COOKIE_LEN, src->nonce, -+ peer->latest_cookie.cookie_decryption_key); -+ up_read(&peer->latest_cookie.lock); -+ -+ if (ret) { -+ down_write(&peer->latest_cookie.lock); -+ memcpy(peer->latest_cookie.cookie, cookie, COOKIE_LEN); -+ peer->latest_cookie.birthdate = ktime_get_coarse_boottime_ns(); -+ peer->latest_cookie.is_valid = true; -+ peer->latest_cookie.have_sent_mac1 = false; -+ up_write(&peer->latest_cookie.lock); -+ } else { -+ net_dbg_ratelimited("%s: Could not decrypt invalid cookie response\n", -+ wg->dev->name); -+ } -+ -+out: -+ wg_peer_put(peer); -+} ---- /dev/null -+++ b/drivers/net/wireguard/cookie.h -@@ -0,0 +1,59 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_COOKIE_H -+#define _WG_COOKIE_H -+ -+#include "messages.h" -+#include -+ -+struct wg_peer; -+ -+struct cookie_checker { -+ u8 secret[NOISE_HASH_LEN]; -+ u8 cookie_encryption_key[NOISE_SYMMETRIC_KEY_LEN]; -+ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN]; -+ u64 secret_birthdate; -+ struct rw_semaphore secret_lock; -+ struct wg_device *device; -+}; -+ -+struct cookie { -+ u64 birthdate; -+ bool is_valid; -+ u8 cookie[COOKIE_LEN]; -+ bool have_sent_mac1; -+ u8 last_mac1_sent[COOKIE_LEN]; -+ u8 cookie_decryption_key[NOISE_SYMMETRIC_KEY_LEN]; -+ u8 message_mac1_key[NOISE_SYMMETRIC_KEY_LEN]; -+ struct rw_semaphore lock; -+}; -+ -+enum cookie_mac_state { -+ INVALID_MAC, -+ VALID_MAC_BUT_NO_COOKIE, -+ VALID_MAC_WITH_COOKIE_BUT_RATELIMITED, -+ VALID_MAC_WITH_COOKIE -+}; -+ -+void wg_cookie_checker_init(struct cookie_checker *checker, -+ struct wg_device *wg); -+void wg_cookie_checker_precompute_device_keys(struct cookie_checker *checker); -+void wg_cookie_checker_precompute_peer_keys(struct wg_peer *peer); -+void wg_cookie_init(struct cookie *cookie); -+ -+enum cookie_mac_state wg_cookie_validate_packet(struct cookie_checker *checker, -+ struct sk_buff *skb, -+ bool check_cookie); -+void wg_cookie_add_mac_to_packet(void *message, size_t len, -+ struct wg_peer *peer); -+ -+void wg_cookie_message_create(struct message_handshake_cookie *src, -+ struct sk_buff *skb, __le32 index, -+ struct cookie_checker *checker); -+void wg_cookie_message_consume(struct message_handshake_cookie *src, -+ struct wg_device *wg); -+ -+#endif /* _WG_COOKIE_H */ ---- /dev/null -+++ b/drivers/net/wireguard/device.c -@@ -0,0 +1,458 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "queueing.h" -+#include "socket.h" -+#include "timers.h" -+#include "device.h" -+#include "ratelimiter.h" -+#include "peer.h" -+#include "messages.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static LIST_HEAD(device_list); -+ -+static int wg_open(struct net_device *dev) -+{ -+ struct in_device *dev_v4 = __in_dev_get_rtnl(dev); -+ struct inet6_dev *dev_v6 = __in6_dev_get(dev); -+ struct wg_device *wg = netdev_priv(dev); -+ struct wg_peer *peer; -+ int ret; -+ -+ if (dev_v4) { -+ /* At some point we might put this check near the ip_rt_send_ -+ * redirect call of ip_forward in net/ipv4/ip_forward.c, similar -+ * to the current secpath check. -+ */ -+ IN_DEV_CONF_SET(dev_v4, SEND_REDIRECTS, false); -+ IPV4_DEVCONF_ALL(dev_net(dev), SEND_REDIRECTS) = false; -+ } -+ if (dev_v6) -+ dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; -+ -+ ret = wg_socket_init(wg, wg->incoming_port); -+ if (ret < 0) -+ return ret; -+ mutex_lock(&wg->device_update_lock); -+ list_for_each_entry(peer, &wg->peer_list, peer_list) { -+ wg_packet_send_staged_packets(peer); -+ if (peer->persistent_keepalive_interval) -+ wg_packet_send_keepalive(peer); -+ } -+ mutex_unlock(&wg->device_update_lock); -+ return 0; -+} -+ -+#ifdef CONFIG_PM_SLEEP -+static int wg_pm_notification(struct notifier_block *nb, unsigned long action, -+ void *data) -+{ -+ struct wg_device *wg; -+ struct wg_peer *peer; -+ -+ /* If the machine is constantly suspending and resuming, as part of -+ * its normal operation rather than as a somewhat rare event, then we -+ * don't actually want to clear keys. -+ */ -+ if (IS_ENABLED(CONFIG_PM_AUTOSLEEP) || IS_ENABLED(CONFIG_ANDROID)) -+ return 0; -+ -+ if (action != PM_HIBERNATION_PREPARE && action != PM_SUSPEND_PREPARE) -+ return 0; -+ -+ rtnl_lock(); -+ list_for_each_entry(wg, &device_list, device_list) { -+ mutex_lock(&wg->device_update_lock); -+ list_for_each_entry(peer, &wg->peer_list, peer_list) { -+ del_timer(&peer->timer_zero_key_material); -+ wg_noise_handshake_clear(&peer->handshake); -+ wg_noise_keypairs_clear(&peer->keypairs); -+ } -+ mutex_unlock(&wg->device_update_lock); -+ } -+ rtnl_unlock(); -+ rcu_barrier(); -+ return 0; -+} -+ -+static struct notifier_block pm_notifier = { .notifier_call = wg_pm_notification }; -+#endif -+ -+static int wg_stop(struct net_device *dev) -+{ -+ struct wg_device *wg = netdev_priv(dev); -+ struct wg_peer *peer; -+ -+ mutex_lock(&wg->device_update_lock); -+ list_for_each_entry(peer, &wg->peer_list, peer_list) { -+ wg_packet_purge_staged_packets(peer); -+ wg_timers_stop(peer); -+ wg_noise_handshake_clear(&peer->handshake); -+ wg_noise_keypairs_clear(&peer->keypairs); -+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); -+ } -+ mutex_unlock(&wg->device_update_lock); -+ skb_queue_purge(&wg->incoming_handshakes); -+ wg_socket_reinit(wg, NULL, NULL); -+ return 0; -+} -+ -+static netdev_tx_t wg_xmit(struct sk_buff *skb, struct net_device *dev) -+{ -+ struct wg_device *wg = netdev_priv(dev); -+ struct sk_buff_head packets; -+ struct wg_peer *peer; -+ struct sk_buff *next; -+ sa_family_t family; -+ u32 mtu; -+ int ret; -+ -+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) { -+ ret = -EPROTONOSUPPORT; -+ net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); -+ goto err; -+ } -+ -+ peer = wg_allowedips_lookup_dst(&wg->peer_allowedips, skb); -+ if (unlikely(!peer)) { -+ ret = -ENOKEY; -+ if (skb->protocol == htons(ETH_P_IP)) -+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI4\n", -+ dev->name, &ip_hdr(skb)->daddr); -+ else if (skb->protocol == htons(ETH_P_IPV6)) -+ net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", -+ dev->name, &ipv6_hdr(skb)->daddr); -+ goto err; -+ } -+ -+ family = READ_ONCE(peer->endpoint.addr.sa_family); -+ if (unlikely(family != AF_INET && family != AF_INET6)) { -+ ret = -EDESTADDRREQ; -+ net_dbg_ratelimited("%s: No valid endpoint has been configured or discovered for peer %llu\n", -+ dev->name, peer->internal_id); -+ goto err_peer; -+ } -+ -+ mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; -+ -+ __skb_queue_head_init(&packets); -+ if (!skb_is_gso(skb)) { -+ skb_mark_not_on_list(skb); -+ } else { -+ struct sk_buff *segs = skb_gso_segment(skb, 0); -+ -+ if (unlikely(IS_ERR(segs))) { -+ ret = PTR_ERR(segs); -+ goto err_peer; -+ } -+ dev_kfree_skb(skb); -+ skb = segs; -+ } -+ -+ skb_list_walk_safe(skb, skb, next) { -+ skb_mark_not_on_list(skb); -+ -+ skb = skb_share_check(skb, GFP_ATOMIC); -+ if (unlikely(!skb)) -+ continue; -+ -+ /* We only need to keep the original dst around for icmp, -+ * so at this point we're in a position to drop it. -+ */ -+ skb_dst_drop(skb); -+ -+ PACKET_CB(skb)->mtu = mtu; -+ -+ __skb_queue_tail(&packets, skb); -+ } -+ -+ spin_lock_bh(&peer->staged_packet_queue.lock); -+ /* If the queue is getting too big, we start removing the oldest packets -+ * until it's small again. We do this before adding the new packet, so -+ * we don't remove GSO segments that are in excess. -+ */ -+ while (skb_queue_len(&peer->staged_packet_queue) > MAX_STAGED_PACKETS) { -+ dev_kfree_skb(__skb_dequeue(&peer->staged_packet_queue)); -+ ++dev->stats.tx_dropped; -+ } -+ skb_queue_splice_tail(&packets, &peer->staged_packet_queue); -+ spin_unlock_bh(&peer->staged_packet_queue.lock); -+ -+ wg_packet_send_staged_packets(peer); -+ -+ wg_peer_put(peer); -+ return NETDEV_TX_OK; -+ -+err_peer: -+ wg_peer_put(peer); -+err: -+ ++dev->stats.tx_errors; -+ if (skb->protocol == htons(ETH_P_IP)) -+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); -+ else if (skb->protocol == htons(ETH_P_IPV6)) -+ icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); -+ kfree_skb(skb); -+ return ret; -+} -+ -+static const struct net_device_ops netdev_ops = { -+ .ndo_open = wg_open, -+ .ndo_stop = wg_stop, -+ .ndo_start_xmit = wg_xmit, -+ .ndo_get_stats64 = ip_tunnel_get_stats64 -+}; -+ -+static void wg_destruct(struct net_device *dev) -+{ -+ struct wg_device *wg = netdev_priv(dev); -+ -+ rtnl_lock(); -+ list_del(&wg->device_list); -+ rtnl_unlock(); -+ mutex_lock(&wg->device_update_lock); -+ wg->incoming_port = 0; -+ wg_socket_reinit(wg, NULL, NULL); -+ /* The final references are cleared in the below calls to destroy_workqueue. */ -+ wg_peer_remove_all(wg); -+ destroy_workqueue(wg->handshake_receive_wq); -+ destroy_workqueue(wg->handshake_send_wq); -+ destroy_workqueue(wg->packet_crypt_wq); -+ wg_packet_queue_free(&wg->decrypt_queue, true); -+ wg_packet_queue_free(&wg->encrypt_queue, true); -+ rcu_barrier(); /* Wait for all the peers to be actually freed. */ -+ wg_ratelimiter_uninit(); -+ memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); -+ skb_queue_purge(&wg->incoming_handshakes); -+ free_percpu(dev->tstats); -+ free_percpu(wg->incoming_handshakes_worker); -+ if (wg->have_creating_net_ref) -+ put_net(wg->creating_net); -+ kvfree(wg->index_hashtable); -+ kvfree(wg->peer_hashtable); -+ mutex_unlock(&wg->device_update_lock); -+ -+ pr_debug("%s: Interface deleted\n", dev->name); -+ free_netdev(dev); -+} -+ -+static const struct device_type device_type = { .name = KBUILD_MODNAME }; -+ -+static void wg_setup(struct net_device *dev) -+{ -+ struct wg_device *wg = netdev_priv(dev); -+ enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | -+ NETIF_F_SG | NETIF_F_GSO | -+ NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; -+ -+ dev->netdev_ops = &netdev_ops; -+ dev->hard_header_len = 0; -+ dev->addr_len = 0; -+ dev->needed_headroom = DATA_PACKET_HEAD_ROOM; -+ dev->needed_tailroom = noise_encrypted_len(MESSAGE_PADDING_MULTIPLE); -+ dev->type = ARPHRD_NONE; -+ dev->flags = IFF_POINTOPOINT | IFF_NOARP; -+ dev->priv_flags |= IFF_NO_QUEUE; -+ dev->features |= NETIF_F_LLTX; -+ dev->features |= WG_NETDEV_FEATURES; -+ dev->hw_features |= WG_NETDEV_FEATURES; -+ dev->hw_enc_features |= WG_NETDEV_FEATURES; -+ dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH - -+ sizeof(struct udphdr) - -+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); -+ -+ SET_NETDEV_DEVTYPE(dev, &device_type); -+ -+ /* We need to keep the dst around in case of icmp replies. */ -+ netif_keep_dst(dev); -+ -+ memset(wg, 0, sizeof(*wg)); -+ wg->dev = dev; -+} -+ -+static int wg_newlink(struct net *src_net, struct net_device *dev, -+ struct nlattr *tb[], struct nlattr *data[], -+ struct netlink_ext_ack *extack) -+{ -+ struct wg_device *wg = netdev_priv(dev); -+ int ret = -ENOMEM; -+ -+ wg->creating_net = src_net; -+ init_rwsem(&wg->static_identity.lock); -+ mutex_init(&wg->socket_update_lock); -+ mutex_init(&wg->device_update_lock); -+ skb_queue_head_init(&wg->incoming_handshakes); -+ wg_allowedips_init(&wg->peer_allowedips); -+ wg_cookie_checker_init(&wg->cookie_checker, wg); -+ INIT_LIST_HEAD(&wg->peer_list); -+ wg->device_update_gen = 1; -+ -+ wg->peer_hashtable = wg_pubkey_hashtable_alloc(); -+ if (!wg->peer_hashtable) -+ return ret; -+ -+ wg->index_hashtable = wg_index_hashtable_alloc(); -+ if (!wg->index_hashtable) -+ goto err_free_peer_hashtable; -+ -+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); -+ if (!dev->tstats) -+ goto err_free_index_hashtable; -+ -+ wg->incoming_handshakes_worker = -+ wg_packet_percpu_multicore_worker_alloc( -+ wg_packet_handshake_receive_worker, wg); -+ if (!wg->incoming_handshakes_worker) -+ goto err_free_tstats; -+ -+ wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s", -+ WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name); -+ if (!wg->handshake_receive_wq) -+ goto err_free_incoming_handshakes; -+ -+ wg->handshake_send_wq = alloc_workqueue("wg-kex-%s", -+ WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name); -+ if (!wg->handshake_send_wq) -+ goto err_destroy_handshake_receive; -+ -+ wg->packet_crypt_wq = alloc_workqueue("wg-crypt-%s", -+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 0, dev->name); -+ if (!wg->packet_crypt_wq) -+ goto err_destroy_handshake_send; -+ -+ ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, -+ true, MAX_QUEUED_PACKETS); -+ if (ret < 0) -+ goto err_destroy_packet_crypt; -+ -+ ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, -+ true, MAX_QUEUED_PACKETS); -+ if (ret < 0) -+ goto err_free_encrypt_queue; -+ -+ ret = wg_ratelimiter_init(); -+ if (ret < 0) -+ goto err_free_decrypt_queue; -+ -+ ret = register_netdevice(dev); -+ if (ret < 0) -+ goto err_uninit_ratelimiter; -+ -+ list_add(&wg->device_list, &device_list); -+ -+ /* We wait until the end to assign priv_destructor, so that -+ * register_netdevice doesn't call it for us if it fails. -+ */ -+ dev->priv_destructor = wg_destruct; -+ -+ pr_debug("%s: Interface created\n", dev->name); -+ return ret; -+ -+err_uninit_ratelimiter: -+ wg_ratelimiter_uninit(); -+err_free_decrypt_queue: -+ wg_packet_queue_free(&wg->decrypt_queue, true); -+err_free_encrypt_queue: -+ wg_packet_queue_free(&wg->encrypt_queue, true); -+err_destroy_packet_crypt: -+ destroy_workqueue(wg->packet_crypt_wq); -+err_destroy_handshake_send: -+ destroy_workqueue(wg->handshake_send_wq); -+err_destroy_handshake_receive: -+ destroy_workqueue(wg->handshake_receive_wq); -+err_free_incoming_handshakes: -+ free_percpu(wg->incoming_handshakes_worker); -+err_free_tstats: -+ free_percpu(dev->tstats); -+err_free_index_hashtable: -+ kvfree(wg->index_hashtable); -+err_free_peer_hashtable: -+ kvfree(wg->peer_hashtable); -+ return ret; -+} -+ -+static struct rtnl_link_ops link_ops __read_mostly = { -+ .kind = KBUILD_MODNAME, -+ .priv_size = sizeof(struct wg_device), -+ .setup = wg_setup, -+ .newlink = wg_newlink, -+}; -+ -+static int wg_netdevice_notification(struct notifier_block *nb, -+ unsigned long action, void *data) -+{ -+ struct net_device *dev = ((struct netdev_notifier_info *)data)->dev; -+ struct wg_device *wg = netdev_priv(dev); -+ -+ ASSERT_RTNL(); -+ -+ if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops) -+ return 0; -+ -+ if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) { -+ put_net(wg->creating_net); -+ wg->have_creating_net_ref = false; -+ } else if (dev_net(dev) != wg->creating_net && -+ !wg->have_creating_net_ref) { -+ wg->have_creating_net_ref = true; -+ get_net(wg->creating_net); -+ } -+ return 0; -+} -+ -+static struct notifier_block netdevice_notifier = { -+ .notifier_call = wg_netdevice_notification -+}; -+ -+int __init wg_device_init(void) -+{ -+ int ret; -+ -+#ifdef CONFIG_PM_SLEEP -+ ret = register_pm_notifier(&pm_notifier); -+ if (ret) -+ return ret; -+#endif -+ -+ ret = register_netdevice_notifier(&netdevice_notifier); -+ if (ret) -+ goto error_pm; -+ -+ ret = rtnl_link_register(&link_ops); -+ if (ret) -+ goto error_netdevice; -+ -+ return 0; -+ -+error_netdevice: -+ unregister_netdevice_notifier(&netdevice_notifier); -+error_pm: -+#ifdef CONFIG_PM_SLEEP -+ unregister_pm_notifier(&pm_notifier); -+#endif -+ return ret; -+} -+ -+void wg_device_uninit(void) -+{ -+ rtnl_link_unregister(&link_ops); -+ unregister_netdevice_notifier(&netdevice_notifier); -+#ifdef CONFIG_PM_SLEEP -+ unregister_pm_notifier(&pm_notifier); -+#endif -+ rcu_barrier(); -+} ---- /dev/null -+++ b/drivers/net/wireguard/device.h -@@ -0,0 +1,65 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_DEVICE_H -+#define _WG_DEVICE_H -+ -+#include "noise.h" -+#include "allowedips.h" -+#include "peerlookup.h" -+#include "cookie.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct wg_device; -+ -+struct multicore_worker { -+ void *ptr; -+ struct work_struct work; -+}; -+ -+struct crypt_queue { -+ struct ptr_ring ring; -+ union { -+ struct { -+ struct multicore_worker __percpu *worker; -+ int last_cpu; -+ }; -+ struct work_struct work; -+ }; -+}; -+ -+struct wg_device { -+ struct net_device *dev; -+ struct crypt_queue encrypt_queue, decrypt_queue; -+ struct sock __rcu *sock4, *sock6; -+ struct net *creating_net; -+ struct noise_static_identity static_identity; -+ struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; -+ struct workqueue_struct *packet_crypt_wq; -+ struct sk_buff_head incoming_handshakes; -+ int incoming_handshake_cpu; -+ struct multicore_worker __percpu *incoming_handshakes_worker; -+ struct cookie_checker cookie_checker; -+ struct pubkey_hashtable *peer_hashtable; -+ struct index_hashtable *index_hashtable; -+ struct allowedips peer_allowedips; -+ struct mutex device_update_lock, socket_update_lock; -+ struct list_head device_list, peer_list; -+ unsigned int num_peers, device_update_gen; -+ u32 fwmark; -+ u16 incoming_port; -+ bool have_creating_net_ref; -+}; -+ -+int wg_device_init(void); -+void wg_device_uninit(void); -+ -+#endif /* _WG_DEVICE_H */ ---- /dev/null -+++ b/drivers/net/wireguard/main.c -@@ -0,0 +1,64 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "version.h" -+#include "device.h" -+#include "noise.h" -+#include "queueing.h" -+#include "ratelimiter.h" -+#include "netlink.h" -+ -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+static int __init mod_init(void) -+{ -+ int ret; -+ -+#ifdef DEBUG -+ if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() || -+ !wg_ratelimiter_selftest()) -+ return -ENOTRECOVERABLE; -+#endif -+ wg_noise_init(); -+ -+ ret = wg_device_init(); -+ if (ret < 0) -+ goto err_device; -+ -+ ret = wg_genetlink_init(); -+ if (ret < 0) -+ goto err_netlink; -+ -+ pr_info("WireGuard " WIREGUARD_VERSION " loaded. See www.wireguard.com for information.\n"); -+ pr_info("Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved.\n"); -+ -+ return 0; -+ -+err_netlink: -+ wg_device_uninit(); -+err_device: -+ return ret; -+} -+ -+static void __exit mod_exit(void) -+{ -+ wg_genetlink_uninit(); -+ wg_device_uninit(); -+} -+ -+module_init(mod_init); -+module_exit(mod_exit); -+MODULE_LICENSE("GPL v2"); -+MODULE_DESCRIPTION("WireGuard secure network tunnel"); -+MODULE_AUTHOR("Jason A. Donenfeld "); -+MODULE_VERSION(WIREGUARD_VERSION); -+MODULE_ALIAS_RTNL_LINK(KBUILD_MODNAME); -+MODULE_ALIAS_GENL_FAMILY(WG_GENL_NAME); ---- /dev/null -+++ b/drivers/net/wireguard/messages.h -@@ -0,0 +1,128 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_MESSAGES_H -+#define _WG_MESSAGES_H -+ -+#include -+#include -+#include -+ -+#include -+#include -+#include -+ -+enum noise_lengths { -+ NOISE_PUBLIC_KEY_LEN = CURVE25519_KEY_SIZE, -+ NOISE_SYMMETRIC_KEY_LEN = CHACHA20POLY1305_KEY_SIZE, -+ NOISE_TIMESTAMP_LEN = sizeof(u64) + sizeof(u32), -+ NOISE_AUTHTAG_LEN = CHACHA20POLY1305_AUTHTAG_SIZE, -+ NOISE_HASH_LEN = BLAKE2S_HASH_SIZE -+}; -+ -+#define noise_encrypted_len(plain_len) ((plain_len) + NOISE_AUTHTAG_LEN) -+ -+enum cookie_values { -+ COOKIE_SECRET_MAX_AGE = 2 * 60, -+ COOKIE_SECRET_LATENCY = 5, -+ COOKIE_NONCE_LEN = XCHACHA20POLY1305_NONCE_SIZE, -+ COOKIE_LEN = 16 -+}; -+ -+enum counter_values { -+ COUNTER_BITS_TOTAL = 2048, -+ COUNTER_REDUNDANT_BITS = BITS_PER_LONG, -+ COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS -+}; -+ -+enum limits { -+ REKEY_AFTER_MESSAGES = 1ULL << 60, -+ REJECT_AFTER_MESSAGES = U64_MAX - COUNTER_WINDOW_SIZE - 1, -+ REKEY_TIMEOUT = 5, -+ REKEY_TIMEOUT_JITTER_MAX_JIFFIES = HZ / 3, -+ REKEY_AFTER_TIME = 120, -+ REJECT_AFTER_TIME = 180, -+ INITIATIONS_PER_SECOND = 50, -+ MAX_PEERS_PER_DEVICE = 1U << 20, -+ KEEPALIVE_TIMEOUT = 10, -+ MAX_TIMER_HANDSHAKES = 90 / REKEY_TIMEOUT, -+ MAX_QUEUED_INCOMING_HANDSHAKES = 4096, /* TODO: replace this with DQL */ -+ MAX_STAGED_PACKETS = 128, -+ MAX_QUEUED_PACKETS = 1024 /* TODO: replace this with DQL */ -+}; -+ -+enum message_type { -+ MESSAGE_INVALID = 0, -+ MESSAGE_HANDSHAKE_INITIATION = 1, -+ MESSAGE_HANDSHAKE_RESPONSE = 2, -+ MESSAGE_HANDSHAKE_COOKIE = 3, -+ MESSAGE_DATA = 4 -+}; -+ -+struct message_header { -+ /* The actual layout of this that we want is: -+ * u8 type -+ * u8 reserved_zero[3] -+ * -+ * But it turns out that by encoding this as little endian, -+ * we achieve the same thing, and it makes checking faster. -+ */ -+ __le32 type; -+}; -+ -+struct message_macs { -+ u8 mac1[COOKIE_LEN]; -+ u8 mac2[COOKIE_LEN]; -+}; -+ -+struct message_handshake_initiation { -+ struct message_header header; -+ __le32 sender_index; -+ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN]; -+ u8 encrypted_static[noise_encrypted_len(NOISE_PUBLIC_KEY_LEN)]; -+ u8 encrypted_timestamp[noise_encrypted_len(NOISE_TIMESTAMP_LEN)]; -+ struct message_macs macs; -+}; -+ -+struct message_handshake_response { -+ struct message_header header; -+ __le32 sender_index; -+ __le32 receiver_index; -+ u8 unencrypted_ephemeral[NOISE_PUBLIC_KEY_LEN]; -+ u8 encrypted_nothing[noise_encrypted_len(0)]; -+ struct message_macs macs; -+}; -+ -+struct message_handshake_cookie { -+ struct message_header header; -+ __le32 receiver_index; -+ u8 nonce[COOKIE_NONCE_LEN]; -+ u8 encrypted_cookie[noise_encrypted_len(COOKIE_LEN)]; -+}; -+ -+struct message_data { -+ struct message_header header; -+ __le32 key_idx; -+ __le64 counter; -+ u8 encrypted_data[]; -+}; -+ -+#define message_data_len(plain_len) \ -+ (noise_encrypted_len(plain_len) + sizeof(struct message_data)) -+ -+enum message_alignments { -+ MESSAGE_PADDING_MULTIPLE = 16, -+ MESSAGE_MINIMUM_LENGTH = message_data_len(0) -+}; -+ -+#define SKB_HEADER_LEN \ -+ (max(sizeof(struct iphdr), sizeof(struct ipv6hdr)) + \ -+ sizeof(struct udphdr) + NET_SKB_PAD) -+#define DATA_PACKET_HEAD_ROOM \ -+ ALIGN(sizeof(struct message_data) + SKB_HEADER_LEN, 4) -+ -+enum { HANDSHAKE_DSCP = 0x88 /* AF41, plus 00 ECN */ }; -+ -+#endif /* _WG_MESSAGES_H */ ---- /dev/null -+++ b/drivers/net/wireguard/netlink.c -@@ -0,0 +1,648 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "netlink.h" -+#include "device.h" -+#include "peer.h" -+#include "socket.h" -+#include "queueing.h" -+#include "messages.h" -+ -+#include -+ -+#include -+#include -+#include -+#include -+ -+static struct genl_family genl_family; -+ -+static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = { -+ [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 }, -+ [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, -+ [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -+ [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -+ [WGDEVICE_A_FLAGS] = { .type = NLA_U32 }, -+ [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 }, -+ [WGDEVICE_A_FWMARK] = { .type = NLA_U32 }, -+ [WGDEVICE_A_PEERS] = { .type = NLA_NESTED } -+}; -+ -+static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = { -+ [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -+ [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN }, -+ [WGPEER_A_FLAGS] = { .type = NLA_U32 }, -+ [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) }, -+ [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, -+ [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) }, -+ [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, -+ [WGPEER_A_TX_BYTES] = { .type = NLA_U64 }, -+ [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED }, -+ [WGPEER_A_PROTOCOL_VERSION] = { .type = NLA_U32 } -+}; -+ -+static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = { -+ [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 }, -+ [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) }, -+ [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 } -+}; -+ -+static struct wg_device *lookup_interface(struct nlattr **attrs, -+ struct sk_buff *skb) -+{ -+ struct net_device *dev = NULL; -+ -+ if (!attrs[WGDEVICE_A_IFINDEX] == !attrs[WGDEVICE_A_IFNAME]) -+ return ERR_PTR(-EBADR); -+ if (attrs[WGDEVICE_A_IFINDEX]) -+ dev = dev_get_by_index(sock_net(skb->sk), -+ nla_get_u32(attrs[WGDEVICE_A_IFINDEX])); -+ else if (attrs[WGDEVICE_A_IFNAME]) -+ dev = dev_get_by_name(sock_net(skb->sk), -+ nla_data(attrs[WGDEVICE_A_IFNAME])); -+ if (!dev) -+ return ERR_PTR(-ENODEV); -+ if (!dev->rtnl_link_ops || !dev->rtnl_link_ops->kind || -+ strcmp(dev->rtnl_link_ops->kind, KBUILD_MODNAME)) { -+ dev_put(dev); -+ return ERR_PTR(-EOPNOTSUPP); -+ } -+ return netdev_priv(dev); -+} -+ -+static int get_allowedips(struct sk_buff *skb, const u8 *ip, u8 cidr, -+ int family) -+{ -+ struct nlattr *allowedip_nest; -+ -+ allowedip_nest = nla_nest_start(skb, 0); -+ if (!allowedip_nest) -+ return -EMSGSIZE; -+ -+ if (nla_put_u8(skb, WGALLOWEDIP_A_CIDR_MASK, cidr) || -+ nla_put_u16(skb, WGALLOWEDIP_A_FAMILY, family) || -+ nla_put(skb, WGALLOWEDIP_A_IPADDR, family == AF_INET6 ? -+ sizeof(struct in6_addr) : sizeof(struct in_addr), ip)) { -+ nla_nest_cancel(skb, allowedip_nest); -+ return -EMSGSIZE; -+ } -+ -+ nla_nest_end(skb, allowedip_nest); -+ return 0; -+} -+ -+struct dump_ctx { -+ struct wg_device *wg; -+ struct wg_peer *next_peer; -+ u64 allowedips_seq; -+ struct allowedips_node *next_allowedip; -+}; -+ -+#define DUMP_CTX(cb) ((struct dump_ctx *)(cb)->args) -+ -+static int -+get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx) -+{ -+ -+ struct nlattr *allowedips_nest, *peer_nest = nla_nest_start(skb, 0); -+ struct allowedips_node *allowedips_node = ctx->next_allowedip; -+ bool fail; -+ -+ if (!peer_nest) -+ return -EMSGSIZE; -+ -+ down_read(&peer->handshake.lock); -+ fail = nla_put(skb, WGPEER_A_PUBLIC_KEY, NOISE_PUBLIC_KEY_LEN, -+ peer->handshake.remote_static); -+ up_read(&peer->handshake.lock); -+ if (fail) -+ goto err; -+ -+ if (!allowedips_node) { -+ const struct __kernel_timespec last_handshake = { -+ .tv_sec = peer->walltime_last_handshake.tv_sec, -+ .tv_nsec = peer->walltime_last_handshake.tv_nsec -+ }; -+ -+ down_read(&peer->handshake.lock); -+ fail = nla_put(skb, WGPEER_A_PRESHARED_KEY, -+ NOISE_SYMMETRIC_KEY_LEN, -+ peer->handshake.preshared_key); -+ up_read(&peer->handshake.lock); -+ if (fail) -+ goto err; -+ -+ if (nla_put(skb, WGPEER_A_LAST_HANDSHAKE_TIME, -+ sizeof(last_handshake), &last_handshake) || -+ nla_put_u16(skb, WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, -+ peer->persistent_keepalive_interval) || -+ nla_put_u64_64bit(skb, WGPEER_A_TX_BYTES, peer->tx_bytes, -+ WGPEER_A_UNSPEC) || -+ nla_put_u64_64bit(skb, WGPEER_A_RX_BYTES, peer->rx_bytes, -+ WGPEER_A_UNSPEC) || -+ nla_put_u32(skb, WGPEER_A_PROTOCOL_VERSION, 1)) -+ goto err; -+ -+ read_lock_bh(&peer->endpoint_lock); -+ if (peer->endpoint.addr.sa_family == AF_INET) -+ fail = nla_put(skb, WGPEER_A_ENDPOINT, -+ sizeof(peer->endpoint.addr4), -+ &peer->endpoint.addr4); -+ else if (peer->endpoint.addr.sa_family == AF_INET6) -+ fail = nla_put(skb, WGPEER_A_ENDPOINT, -+ sizeof(peer->endpoint.addr6), -+ &peer->endpoint.addr6); -+ read_unlock_bh(&peer->endpoint_lock); -+ if (fail) -+ goto err; -+ allowedips_node = -+ list_first_entry_or_null(&peer->allowedips_list, -+ struct allowedips_node, peer_list); -+ } -+ if (!allowedips_node) -+ goto no_allowedips; -+ if (!ctx->allowedips_seq) -+ ctx->allowedips_seq = peer->device->peer_allowedips.seq; -+ else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq) -+ goto no_allowedips; -+ -+ allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS); -+ if (!allowedips_nest) -+ goto err; -+ -+ list_for_each_entry_from(allowedips_node, &peer->allowedips_list, -+ peer_list) { -+ u8 cidr, ip[16] __aligned(__alignof(u64)); -+ int family; -+ -+ family = wg_allowedips_read_node(allowedips_node, ip, &cidr); -+ if (get_allowedips(skb, ip, cidr, family)) { -+ nla_nest_end(skb, allowedips_nest); -+ nla_nest_end(skb, peer_nest); -+ ctx->next_allowedip = allowedips_node; -+ return -EMSGSIZE; -+ } -+ } -+ nla_nest_end(skb, allowedips_nest); -+no_allowedips: -+ nla_nest_end(skb, peer_nest); -+ ctx->next_allowedip = NULL; -+ ctx->allowedips_seq = 0; -+ return 0; -+err: -+ nla_nest_cancel(skb, peer_nest); -+ return -EMSGSIZE; -+} -+ -+static int wg_get_device_start(struct netlink_callback *cb) -+{ -+ struct nlattr **attrs = genl_family_attrbuf(&genl_family); -+ struct wg_device *wg; -+ int ret; -+ -+ ret = nlmsg_parse(cb->nlh, GENL_HDRLEN + genl_family.hdrsize, attrs, -+ genl_family.maxattr, device_policy, NULL); -+ if (ret < 0) -+ return ret; -+ wg = lookup_interface(attrs, cb->skb); -+ if (IS_ERR(wg)) -+ return PTR_ERR(wg); -+ DUMP_CTX(cb)->wg = wg; -+ return 0; -+} -+ -+static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb) -+{ -+ struct wg_peer *peer, *next_peer_cursor; -+ struct dump_ctx *ctx = DUMP_CTX(cb); -+ struct wg_device *wg = ctx->wg; -+ struct nlattr *peers_nest; -+ int ret = -EMSGSIZE; -+ bool done = true; -+ void *hdr; -+ -+ rtnl_lock(); -+ mutex_lock(&wg->device_update_lock); -+ cb->seq = wg->device_update_gen; -+ next_peer_cursor = ctx->next_peer; -+ -+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, -+ &genl_family, NLM_F_MULTI, WG_CMD_GET_DEVICE); -+ if (!hdr) -+ goto out; -+ genl_dump_check_consistent(cb, hdr); -+ -+ if (!ctx->next_peer) { -+ if (nla_put_u16(skb, WGDEVICE_A_LISTEN_PORT, -+ wg->incoming_port) || -+ nla_put_u32(skb, WGDEVICE_A_FWMARK, wg->fwmark) || -+ nla_put_u32(skb, WGDEVICE_A_IFINDEX, wg->dev->ifindex) || -+ nla_put_string(skb, WGDEVICE_A_IFNAME, wg->dev->name)) -+ goto out; -+ -+ down_read(&wg->static_identity.lock); -+ if (wg->static_identity.has_identity) { -+ if (nla_put(skb, WGDEVICE_A_PRIVATE_KEY, -+ NOISE_PUBLIC_KEY_LEN, -+ wg->static_identity.static_private) || -+ nla_put(skb, WGDEVICE_A_PUBLIC_KEY, -+ NOISE_PUBLIC_KEY_LEN, -+ wg->static_identity.static_public)) { -+ up_read(&wg->static_identity.lock); -+ goto out; -+ } -+ } -+ up_read(&wg->static_identity.lock); -+ } -+ -+ peers_nest = nla_nest_start(skb, WGDEVICE_A_PEERS); -+ if (!peers_nest) -+ goto out; -+ ret = 0; -+ /* If the last cursor was removed via list_del_init in peer_remove, then -+ * we just treat this the same as there being no more peers left. The -+ * reason is that seq_nr should indicate to userspace that this isn't a -+ * coherent dump anyway, so they'll try again. -+ */ -+ if (list_empty(&wg->peer_list) || -+ (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) { -+ nla_nest_cancel(skb, peers_nest); -+ goto out; -+ } -+ lockdep_assert_held(&wg->device_update_lock); -+ peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list); -+ list_for_each_entry_continue(peer, &wg->peer_list, peer_list) { -+ if (get_peer(peer, skb, ctx)) { -+ done = false; -+ break; -+ } -+ next_peer_cursor = peer; -+ } -+ nla_nest_end(skb, peers_nest); -+ -+out: -+ if (!ret && !done && next_peer_cursor) -+ wg_peer_get(next_peer_cursor); -+ wg_peer_put(ctx->next_peer); -+ mutex_unlock(&wg->device_update_lock); -+ rtnl_unlock(); -+ -+ if (ret) { -+ genlmsg_cancel(skb, hdr); -+ return ret; -+ } -+ genlmsg_end(skb, hdr); -+ if (done) { -+ ctx->next_peer = NULL; -+ return 0; -+ } -+ ctx->next_peer = next_peer_cursor; -+ return skb->len; -+ -+ /* At this point, we can't really deal ourselves with safely zeroing out -+ * the private key material after usage. This will need an additional API -+ * in the kernel for marking skbs as zero_on_free. -+ */ -+} -+ -+static int wg_get_device_done(struct netlink_callback *cb) -+{ -+ struct dump_ctx *ctx = DUMP_CTX(cb); -+ -+ if (ctx->wg) -+ dev_put(ctx->wg->dev); -+ wg_peer_put(ctx->next_peer); -+ return 0; -+} -+ -+static int set_port(struct wg_device *wg, u16 port) -+{ -+ struct wg_peer *peer; -+ -+ if (wg->incoming_port == port) -+ return 0; -+ list_for_each_entry(peer, &wg->peer_list, peer_list) -+ wg_socket_clear_peer_endpoint_src(peer); -+ if (!netif_running(wg->dev)) { -+ wg->incoming_port = port; -+ return 0; -+ } -+ return wg_socket_init(wg, port); -+} -+ -+static int set_allowedip(struct wg_peer *peer, struct nlattr **attrs) -+{ -+ int ret = -EINVAL; -+ u16 family; -+ u8 cidr; -+ -+ if (!attrs[WGALLOWEDIP_A_FAMILY] || !attrs[WGALLOWEDIP_A_IPADDR] || -+ !attrs[WGALLOWEDIP_A_CIDR_MASK]) -+ return ret; -+ family = nla_get_u16(attrs[WGALLOWEDIP_A_FAMILY]); -+ cidr = nla_get_u8(attrs[WGALLOWEDIP_A_CIDR_MASK]); -+ -+ if (family == AF_INET && cidr <= 32 && -+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in_addr)) -+ ret = wg_allowedips_insert_v4( -+ &peer->device->peer_allowedips, -+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer, -+ &peer->device->device_update_lock); -+ else if (family == AF_INET6 && cidr <= 128 && -+ nla_len(attrs[WGALLOWEDIP_A_IPADDR]) == sizeof(struct in6_addr)) -+ ret = wg_allowedips_insert_v6( -+ &peer->device->peer_allowedips, -+ nla_data(attrs[WGALLOWEDIP_A_IPADDR]), cidr, peer, -+ &peer->device->device_update_lock); -+ -+ return ret; -+} -+ -+static int set_peer(struct wg_device *wg, struct nlattr **attrs) -+{ -+ u8 *public_key = NULL, *preshared_key = NULL; -+ struct wg_peer *peer = NULL; -+ u32 flags = 0; -+ int ret; -+ -+ ret = -EINVAL; -+ if (attrs[WGPEER_A_PUBLIC_KEY] && -+ nla_len(attrs[WGPEER_A_PUBLIC_KEY]) == NOISE_PUBLIC_KEY_LEN) -+ public_key = nla_data(attrs[WGPEER_A_PUBLIC_KEY]); -+ else -+ goto out; -+ if (attrs[WGPEER_A_PRESHARED_KEY] && -+ nla_len(attrs[WGPEER_A_PRESHARED_KEY]) == NOISE_SYMMETRIC_KEY_LEN) -+ preshared_key = nla_data(attrs[WGPEER_A_PRESHARED_KEY]); -+ -+ if (attrs[WGPEER_A_FLAGS]) -+ flags = nla_get_u32(attrs[WGPEER_A_FLAGS]); -+ ret = -EOPNOTSUPP; -+ if (flags & ~__WGPEER_F_ALL) -+ goto out; -+ -+ ret = -EPFNOSUPPORT; -+ if (attrs[WGPEER_A_PROTOCOL_VERSION]) { -+ if (nla_get_u32(attrs[WGPEER_A_PROTOCOL_VERSION]) != 1) -+ goto out; -+ } -+ -+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, -+ nla_data(attrs[WGPEER_A_PUBLIC_KEY])); -+ ret = 0; -+ if (!peer) { /* Peer doesn't exist yet. Add a new one. */ -+ if (flags & (WGPEER_F_REMOVE_ME | WGPEER_F_UPDATE_ONLY)) -+ goto out; -+ -+ /* The peer is new, so there aren't allowed IPs to remove. */ -+ flags &= ~WGPEER_F_REPLACE_ALLOWEDIPS; -+ -+ down_read(&wg->static_identity.lock); -+ if (wg->static_identity.has_identity && -+ !memcmp(nla_data(attrs[WGPEER_A_PUBLIC_KEY]), -+ wg->static_identity.static_public, -+ NOISE_PUBLIC_KEY_LEN)) { -+ /* We silently ignore peers that have the same public -+ * key as the device. The reason we do it silently is -+ * that we'd like for people to be able to reuse the -+ * same set of API calls across peers. -+ */ -+ up_read(&wg->static_identity.lock); -+ ret = 0; -+ goto out; -+ } -+ up_read(&wg->static_identity.lock); -+ -+ peer = wg_peer_create(wg, public_key, preshared_key); -+ if (IS_ERR(peer)) { -+ /* Similar to the above, if the key is invalid, we skip -+ * it without fanfare, so that services don't need to -+ * worry about doing key validation themselves. -+ */ -+ ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer); -+ peer = NULL; -+ goto out; -+ } -+ /* Take additional reference, as though we've just been -+ * looked up. -+ */ -+ wg_peer_get(peer); -+ } -+ -+ if (flags & WGPEER_F_REMOVE_ME) { -+ wg_peer_remove(peer); -+ goto out; -+ } -+ -+ if (preshared_key) { -+ down_write(&peer->handshake.lock); -+ memcpy(&peer->handshake.preshared_key, preshared_key, -+ NOISE_SYMMETRIC_KEY_LEN); -+ up_write(&peer->handshake.lock); -+ } -+ -+ if (attrs[WGPEER_A_ENDPOINT]) { -+ struct sockaddr *addr = nla_data(attrs[WGPEER_A_ENDPOINT]); -+ size_t len = nla_len(attrs[WGPEER_A_ENDPOINT]); -+ -+ if ((len == sizeof(struct sockaddr_in) && -+ addr->sa_family == AF_INET) || -+ (len == sizeof(struct sockaddr_in6) && -+ addr->sa_family == AF_INET6)) { -+ struct endpoint endpoint = { { { 0 } } }; -+ -+ memcpy(&endpoint.addr, addr, len); -+ wg_socket_set_peer_endpoint(peer, &endpoint); -+ } -+ } -+ -+ if (flags & WGPEER_F_REPLACE_ALLOWEDIPS) -+ wg_allowedips_remove_by_peer(&wg->peer_allowedips, peer, -+ &wg->device_update_lock); -+ -+ if (attrs[WGPEER_A_ALLOWEDIPS]) { -+ struct nlattr *attr, *allowedip[WGALLOWEDIP_A_MAX + 1]; -+ int rem; -+ -+ nla_for_each_nested(attr, attrs[WGPEER_A_ALLOWEDIPS], rem) { -+ ret = nla_parse_nested(allowedip, WGALLOWEDIP_A_MAX, -+ attr, allowedip_policy, NULL); -+ if (ret < 0) -+ goto out; -+ ret = set_allowedip(peer, allowedip); -+ if (ret < 0) -+ goto out; -+ } -+ } -+ -+ if (attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]) { -+ const u16 persistent_keepalive_interval = nla_get_u16( -+ attrs[WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL]); -+ const bool send_keepalive = -+ !peer->persistent_keepalive_interval && -+ persistent_keepalive_interval && -+ netif_running(wg->dev); -+ -+ peer->persistent_keepalive_interval = persistent_keepalive_interval; -+ if (send_keepalive) -+ wg_packet_send_keepalive(peer); -+ } -+ -+ if (netif_running(wg->dev)) -+ wg_packet_send_staged_packets(peer); -+ -+out: -+ wg_peer_put(peer); -+ if (attrs[WGPEER_A_PRESHARED_KEY]) -+ memzero_explicit(nla_data(attrs[WGPEER_A_PRESHARED_KEY]), -+ nla_len(attrs[WGPEER_A_PRESHARED_KEY])); -+ return ret; -+} -+ -+static int wg_set_device(struct sk_buff *skb, struct genl_info *info) -+{ -+ struct wg_device *wg = lookup_interface(info->attrs, skb); -+ u32 flags = 0; -+ int ret; -+ -+ if (IS_ERR(wg)) { -+ ret = PTR_ERR(wg); -+ goto out_nodev; -+ } -+ -+ rtnl_lock(); -+ mutex_lock(&wg->device_update_lock); -+ -+ if (info->attrs[WGDEVICE_A_FLAGS]) -+ flags = nla_get_u32(info->attrs[WGDEVICE_A_FLAGS]); -+ ret = -EOPNOTSUPP; -+ if (flags & ~__WGDEVICE_F_ALL) -+ goto out; -+ -+ ret = -EPERM; -+ if ((info->attrs[WGDEVICE_A_LISTEN_PORT] || -+ info->attrs[WGDEVICE_A_FWMARK]) && -+ !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN)) -+ goto out; -+ -+ ++wg->device_update_gen; -+ -+ if (info->attrs[WGDEVICE_A_FWMARK]) { -+ struct wg_peer *peer; -+ -+ wg->fwmark = nla_get_u32(info->attrs[WGDEVICE_A_FWMARK]); -+ list_for_each_entry(peer, &wg->peer_list, peer_list) -+ wg_socket_clear_peer_endpoint_src(peer); -+ } -+ -+ if (info->attrs[WGDEVICE_A_LISTEN_PORT]) { -+ ret = set_port(wg, -+ nla_get_u16(info->attrs[WGDEVICE_A_LISTEN_PORT])); -+ if (ret) -+ goto out; -+ } -+ -+ if (flags & WGDEVICE_F_REPLACE_PEERS) -+ wg_peer_remove_all(wg); -+ -+ if (info->attrs[WGDEVICE_A_PRIVATE_KEY] && -+ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY]) == -+ NOISE_PUBLIC_KEY_LEN) { -+ u8 *private_key = nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]); -+ u8 public_key[NOISE_PUBLIC_KEY_LEN]; -+ struct wg_peer *peer, *temp; -+ -+ if (!crypto_memneq(wg->static_identity.static_private, -+ private_key, NOISE_PUBLIC_KEY_LEN)) -+ goto skip_set_private_key; -+ -+ /* We remove before setting, to prevent race, which means doing -+ * two 25519-genpub ops. -+ */ -+ if (curve25519_generate_public(public_key, private_key)) { -+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, -+ public_key); -+ if (peer) { -+ wg_peer_put(peer); -+ wg_peer_remove(peer); -+ } -+ } -+ -+ down_write(&wg->static_identity.lock); -+ wg_noise_set_static_identity_private_key(&wg->static_identity, -+ private_key); -+ list_for_each_entry_safe(peer, temp, &wg->peer_list, -+ peer_list) { -+ if (wg_noise_precompute_static_static(peer)) -+ wg_noise_expire_current_peer_keypairs(peer); -+ else -+ wg_peer_remove(peer); -+ } -+ wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); -+ up_write(&wg->static_identity.lock); -+ } -+skip_set_private_key: -+ -+ if (info->attrs[WGDEVICE_A_PEERS]) { -+ struct nlattr *attr, *peer[WGPEER_A_MAX + 1]; -+ int rem; -+ -+ nla_for_each_nested(attr, info->attrs[WGDEVICE_A_PEERS], rem) { -+ ret = nla_parse_nested(peer, WGPEER_A_MAX, attr, -+ peer_policy, NULL); -+ if (ret < 0) -+ goto out; -+ ret = set_peer(wg, peer); -+ if (ret < 0) -+ goto out; -+ } -+ } -+ ret = 0; -+ -+out: -+ mutex_unlock(&wg->device_update_lock); -+ rtnl_unlock(); -+ dev_put(wg->dev); -+out_nodev: -+ if (info->attrs[WGDEVICE_A_PRIVATE_KEY]) -+ memzero_explicit(nla_data(info->attrs[WGDEVICE_A_PRIVATE_KEY]), -+ nla_len(info->attrs[WGDEVICE_A_PRIVATE_KEY])); -+ return ret; -+} -+ -+static const struct genl_ops genl_ops[] = { -+ { -+ .cmd = WG_CMD_GET_DEVICE, -+ .start = wg_get_device_start, -+ .dumpit = wg_get_device_dump, -+ .done = wg_get_device_done, -+ .flags = GENL_UNS_ADMIN_PERM -+ }, { -+ .cmd = WG_CMD_SET_DEVICE, -+ .doit = wg_set_device, -+ .flags = GENL_UNS_ADMIN_PERM -+ } -+}; -+ -+static struct genl_family genl_family __ro_after_init = { -+ .ops = genl_ops, -+ .n_ops = ARRAY_SIZE(genl_ops), -+ .name = WG_GENL_NAME, -+ .version = WG_GENL_VERSION, -+ .maxattr = WGDEVICE_A_MAX, -+ .module = THIS_MODULE, -+ .policy = device_policy, -+ .netnsok = true -+}; -+ -+int __init wg_genetlink_init(void) -+{ -+ return genl_register_family(&genl_family); -+} -+ -+void __exit wg_genetlink_uninit(void) -+{ -+ genl_unregister_family(&genl_family); -+} ---- /dev/null -+++ b/drivers/net/wireguard/netlink.h -@@ -0,0 +1,12 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_NETLINK_H -+#define _WG_NETLINK_H -+ -+int wg_genetlink_init(void); -+void wg_genetlink_uninit(void); -+ -+#endif /* _WG_NETLINK_H */ ---- /dev/null -+++ b/drivers/net/wireguard/noise.c -@@ -0,0 +1,828 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "noise.h" -+#include "device.h" -+#include "peer.h" -+#include "messages.h" -+#include "queueing.h" -+#include "peerlookup.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* This implements Noise_IKpsk2: -+ * -+ * <- s -+ * ****** -+ * -> e, es, s, ss, {t} -+ * <- e, ee, se, psk, {} -+ */ -+ -+static const u8 handshake_name[37] = "Noise_IKpsk2_25519_ChaChaPoly_BLAKE2s"; -+static const u8 identifier_name[34] = "WireGuard v1 zx2c4 Jason@zx2c4.com"; -+static u8 handshake_init_hash[NOISE_HASH_LEN] __ro_after_init; -+static u8 handshake_init_chaining_key[NOISE_HASH_LEN] __ro_after_init; -+static atomic64_t keypair_counter = ATOMIC64_INIT(0); -+ -+void __init wg_noise_init(void) -+{ -+ struct blake2s_state blake; -+ -+ blake2s(handshake_init_chaining_key, handshake_name, NULL, -+ NOISE_HASH_LEN, sizeof(handshake_name), 0); -+ blake2s_init(&blake, NOISE_HASH_LEN); -+ blake2s_update(&blake, handshake_init_chaining_key, NOISE_HASH_LEN); -+ blake2s_update(&blake, identifier_name, sizeof(identifier_name)); -+ blake2s_final(&blake, handshake_init_hash); -+} -+ -+/* Must hold peer->handshake.static_identity->lock */ -+bool wg_noise_precompute_static_static(struct wg_peer *peer) -+{ -+ bool ret = true; -+ -+ down_write(&peer->handshake.lock); -+ if (peer->handshake.static_identity->has_identity) -+ ret = curve25519( -+ peer->handshake.precomputed_static_static, -+ peer->handshake.static_identity->static_private, -+ peer->handshake.remote_static); -+ else -+ memset(peer->handshake.precomputed_static_static, 0, -+ NOISE_PUBLIC_KEY_LEN); -+ up_write(&peer->handshake.lock); -+ return ret; -+} -+ -+bool wg_noise_handshake_init(struct noise_handshake *handshake, -+ struct noise_static_identity *static_identity, -+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -+ struct wg_peer *peer) -+{ -+ memset(handshake, 0, sizeof(*handshake)); -+ init_rwsem(&handshake->lock); -+ handshake->entry.type = INDEX_HASHTABLE_HANDSHAKE; -+ handshake->entry.peer = peer; -+ memcpy(handshake->remote_static, peer_public_key, NOISE_PUBLIC_KEY_LEN); -+ if (peer_preshared_key) -+ memcpy(handshake->preshared_key, peer_preshared_key, -+ NOISE_SYMMETRIC_KEY_LEN); -+ handshake->static_identity = static_identity; -+ handshake->state = HANDSHAKE_ZEROED; -+ return wg_noise_precompute_static_static(peer); -+} -+ -+static void handshake_zero(struct noise_handshake *handshake) -+{ -+ memset(&handshake->ephemeral_private, 0, NOISE_PUBLIC_KEY_LEN); -+ memset(&handshake->remote_ephemeral, 0, NOISE_PUBLIC_KEY_LEN); -+ memset(&handshake->hash, 0, NOISE_HASH_LEN); -+ memset(&handshake->chaining_key, 0, NOISE_HASH_LEN); -+ handshake->remote_index = 0; -+ handshake->state = HANDSHAKE_ZEROED; -+} -+ -+void wg_noise_handshake_clear(struct noise_handshake *handshake) -+{ -+ wg_index_hashtable_remove( -+ handshake->entry.peer->device->index_hashtable, -+ &handshake->entry); -+ down_write(&handshake->lock); -+ handshake_zero(handshake); -+ up_write(&handshake->lock); -+ wg_index_hashtable_remove( -+ handshake->entry.peer->device->index_hashtable, -+ &handshake->entry); -+} -+ -+static struct noise_keypair *keypair_create(struct wg_peer *peer) -+{ -+ struct noise_keypair *keypair = kzalloc(sizeof(*keypair), GFP_KERNEL); -+ -+ if (unlikely(!keypair)) -+ return NULL; -+ keypair->internal_id = atomic64_inc_return(&keypair_counter); -+ keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; -+ keypair->entry.peer = peer; -+ kref_init(&keypair->refcount); -+ return keypair; -+} -+ -+static void keypair_free_rcu(struct rcu_head *rcu) -+{ -+ kzfree(container_of(rcu, struct noise_keypair, rcu)); -+} -+ -+static void keypair_free_kref(struct kref *kref) -+{ -+ struct noise_keypair *keypair = -+ container_of(kref, struct noise_keypair, refcount); -+ -+ net_dbg_ratelimited("%s: Keypair %llu destroyed for peer %llu\n", -+ keypair->entry.peer->device->dev->name, -+ keypair->internal_id, -+ keypair->entry.peer->internal_id); -+ wg_index_hashtable_remove(keypair->entry.peer->device->index_hashtable, -+ &keypair->entry); -+ call_rcu(&keypair->rcu, keypair_free_rcu); -+} -+ -+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now) -+{ -+ if (unlikely(!keypair)) -+ return; -+ if (unlikely(unreference_now)) -+ wg_index_hashtable_remove( -+ keypair->entry.peer->device->index_hashtable, -+ &keypair->entry); -+ kref_put(&keypair->refcount, keypair_free_kref); -+} -+ -+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair) -+{ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), -+ "Taking noise keypair reference without holding the RCU BH read lock"); -+ if (unlikely(!keypair || !kref_get_unless_zero(&keypair->refcount))) -+ return NULL; -+ return keypair; -+} -+ -+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs) -+{ -+ struct noise_keypair *old; -+ -+ spin_lock_bh(&keypairs->keypair_update_lock); -+ -+ /* We zero the next_keypair before zeroing the others, so that -+ * wg_noise_received_with_keypair returns early before subsequent ones -+ * are zeroed. -+ */ -+ old = rcu_dereference_protected(keypairs->next_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ RCU_INIT_POINTER(keypairs->next_keypair, NULL); -+ wg_noise_keypair_put(old, true); -+ -+ old = rcu_dereference_protected(keypairs->previous_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ RCU_INIT_POINTER(keypairs->previous_keypair, NULL); -+ wg_noise_keypair_put(old, true); -+ -+ old = rcu_dereference_protected(keypairs->current_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ RCU_INIT_POINTER(keypairs->current_keypair, NULL); -+ wg_noise_keypair_put(old, true); -+ -+ spin_unlock_bh(&keypairs->keypair_update_lock); -+} -+ -+void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer) -+{ -+ struct noise_keypair *keypair; -+ -+ wg_noise_handshake_clear(&peer->handshake); -+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); -+ -+ spin_lock_bh(&peer->keypairs.keypair_update_lock); -+ keypair = rcu_dereference_protected(peer->keypairs.next_keypair, -+ lockdep_is_held(&peer->keypairs.keypair_update_lock)); -+ if (keypair) -+ keypair->sending.is_valid = false; -+ keypair = rcu_dereference_protected(peer->keypairs.current_keypair, -+ lockdep_is_held(&peer->keypairs.keypair_update_lock)); -+ if (keypair) -+ keypair->sending.is_valid = false; -+ spin_unlock_bh(&peer->keypairs.keypair_update_lock); -+} -+ -+static void add_new_keypair(struct noise_keypairs *keypairs, -+ struct noise_keypair *new_keypair) -+{ -+ struct noise_keypair *previous_keypair, *next_keypair, *current_keypair; -+ -+ spin_lock_bh(&keypairs->keypair_update_lock); -+ previous_keypair = rcu_dereference_protected(keypairs->previous_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ next_keypair = rcu_dereference_protected(keypairs->next_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ current_keypair = rcu_dereference_protected(keypairs->current_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ if (new_keypair->i_am_the_initiator) { -+ /* If we're the initiator, it means we've sent a handshake, and -+ * received a confirmation response, which means this new -+ * keypair can now be used. -+ */ -+ if (next_keypair) { -+ /* If there already was a next keypair pending, we -+ * demote it to be the previous keypair, and free the -+ * existing current. Note that this means KCI can result -+ * in this transition. It would perhaps be more sound to -+ * always just get rid of the unused next keypair -+ * instead of putting it in the previous slot, but this -+ * might be a bit less robust. Something to think about -+ * for the future. -+ */ -+ RCU_INIT_POINTER(keypairs->next_keypair, NULL); -+ rcu_assign_pointer(keypairs->previous_keypair, -+ next_keypair); -+ wg_noise_keypair_put(current_keypair, true); -+ } else /* If there wasn't an existing next keypair, we replace -+ * the previous with the current one. -+ */ -+ rcu_assign_pointer(keypairs->previous_keypair, -+ current_keypair); -+ /* At this point we can get rid of the old previous keypair, and -+ * set up the new keypair. -+ */ -+ wg_noise_keypair_put(previous_keypair, true); -+ rcu_assign_pointer(keypairs->current_keypair, new_keypair); -+ } else { -+ /* If we're the responder, it means we can't use the new keypair -+ * until we receive confirmation via the first data packet, so -+ * we get rid of the existing previous one, the possibly -+ * existing next one, and slide in the new next one. -+ */ -+ rcu_assign_pointer(keypairs->next_keypair, new_keypair); -+ wg_noise_keypair_put(next_keypair, true); -+ RCU_INIT_POINTER(keypairs->previous_keypair, NULL); -+ wg_noise_keypair_put(previous_keypair, true); -+ } -+ spin_unlock_bh(&keypairs->keypair_update_lock); -+} -+ -+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, -+ struct noise_keypair *received_keypair) -+{ -+ struct noise_keypair *old_keypair; -+ bool key_is_new; -+ -+ /* We first check without taking the spinlock. */ -+ key_is_new = received_keypair == -+ rcu_access_pointer(keypairs->next_keypair); -+ if (likely(!key_is_new)) -+ return false; -+ -+ spin_lock_bh(&keypairs->keypair_update_lock); -+ /* After locking, we double check that things didn't change from -+ * beneath us. -+ */ -+ if (unlikely(received_keypair != -+ rcu_dereference_protected(keypairs->next_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)))) { -+ spin_unlock_bh(&keypairs->keypair_update_lock); -+ return false; -+ } -+ -+ /* When we've finally received the confirmation, we slide the next -+ * into the current, the current into the previous, and get rid of -+ * the old previous. -+ */ -+ old_keypair = rcu_dereference_protected(keypairs->previous_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock)); -+ rcu_assign_pointer(keypairs->previous_keypair, -+ rcu_dereference_protected(keypairs->current_keypair, -+ lockdep_is_held(&keypairs->keypair_update_lock))); -+ wg_noise_keypair_put(old_keypair, true); -+ rcu_assign_pointer(keypairs->current_keypair, received_keypair); -+ RCU_INIT_POINTER(keypairs->next_keypair, NULL); -+ -+ spin_unlock_bh(&keypairs->keypair_update_lock); -+ return true; -+} -+ -+/* Must hold static_identity->lock */ -+void wg_noise_set_static_identity_private_key( -+ struct noise_static_identity *static_identity, -+ const u8 private_key[NOISE_PUBLIC_KEY_LEN]) -+{ -+ memcpy(static_identity->static_private, private_key, -+ NOISE_PUBLIC_KEY_LEN); -+ curve25519_clamp_secret(static_identity->static_private); -+ static_identity->has_identity = curve25519_generate_public( -+ static_identity->static_public, private_key); -+} -+ -+/* This is Hugo Krawczyk's HKDF: -+ * - https://eprint.iacr.org/2010/264.pdf -+ * - https://tools.ietf.org/html/rfc5869 -+ */ -+static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data, -+ size_t first_len, size_t second_len, size_t third_len, -+ size_t data_len, const u8 chaining_key[NOISE_HASH_LEN]) -+{ -+ u8 output[BLAKE2S_HASH_SIZE + 1]; -+ u8 secret[BLAKE2S_HASH_SIZE]; -+ -+ WARN_ON(IS_ENABLED(DEBUG) && -+ (first_len > BLAKE2S_HASH_SIZE || -+ second_len > BLAKE2S_HASH_SIZE || -+ third_len > BLAKE2S_HASH_SIZE || -+ ((second_len || second_dst || third_len || third_dst) && -+ (!first_len || !first_dst)) || -+ ((third_len || third_dst) && (!second_len || !second_dst)))); -+ -+ /* Extract entropy from data into secret */ -+ blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN); -+ -+ if (!first_dst || !first_len) -+ goto out; -+ -+ /* Expand first key: key = secret, data = 0x1 */ -+ output[0] = 1; -+ blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE); -+ memcpy(first_dst, output, first_len); -+ -+ if (!second_dst || !second_len) -+ goto out; -+ -+ /* Expand second key: key = secret, data = first-key || 0x2 */ -+ output[BLAKE2S_HASH_SIZE] = 2; -+ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, -+ BLAKE2S_HASH_SIZE); -+ memcpy(second_dst, output, second_len); -+ -+ if (!third_dst || !third_len) -+ goto out; -+ -+ /* Expand third key: key = secret, data = second-key || 0x3 */ -+ output[BLAKE2S_HASH_SIZE] = 3; -+ blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, -+ BLAKE2S_HASH_SIZE); -+ memcpy(third_dst, output, third_len); -+ -+out: -+ /* Clear sensitive data from stack */ -+ memzero_explicit(secret, BLAKE2S_HASH_SIZE); -+ memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); -+} -+ -+static void symmetric_key_init(struct noise_symmetric_key *key) -+{ -+ spin_lock_init(&key->counter.receive.lock); -+ atomic64_set(&key->counter.counter, 0); -+ memset(key->counter.receive.backtrack, 0, -+ sizeof(key->counter.receive.backtrack)); -+ key->birthdate = ktime_get_coarse_boottime_ns(); -+ key->is_valid = true; -+} -+ -+static void derive_keys(struct noise_symmetric_key *first_dst, -+ struct noise_symmetric_key *second_dst, -+ const u8 chaining_key[NOISE_HASH_LEN]) -+{ -+ kdf(first_dst->key, second_dst->key, NULL, NULL, -+ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, -+ chaining_key); -+ symmetric_key_init(first_dst); -+ symmetric_key_init(second_dst); -+} -+ -+static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], -+ u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ const u8 private[NOISE_PUBLIC_KEY_LEN], -+ const u8 public[NOISE_PUBLIC_KEY_LEN]) -+{ -+ u8 dh_calculation[NOISE_PUBLIC_KEY_LEN]; -+ -+ if (unlikely(!curve25519(dh_calculation, private, public))) -+ return false; -+ kdf(chaining_key, key, NULL, dh_calculation, NOISE_HASH_LEN, -+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, chaining_key); -+ memzero_explicit(dh_calculation, NOISE_PUBLIC_KEY_LEN); -+ return true; -+} -+ -+static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) -+{ -+ struct blake2s_state blake; -+ -+ blake2s_init(&blake, NOISE_HASH_LEN); -+ blake2s_update(&blake, hash, NOISE_HASH_LEN); -+ blake2s_update(&blake, src, src_len); -+ blake2s_final(&blake, hash); -+} -+ -+static void mix_psk(u8 chaining_key[NOISE_HASH_LEN], u8 hash[NOISE_HASH_LEN], -+ u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ const u8 psk[NOISE_SYMMETRIC_KEY_LEN]) -+{ -+ u8 temp_hash[NOISE_HASH_LEN]; -+ -+ kdf(chaining_key, temp_hash, key, psk, NOISE_HASH_LEN, NOISE_HASH_LEN, -+ NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, chaining_key); -+ mix_hash(hash, temp_hash, NOISE_HASH_LEN); -+ memzero_explicit(temp_hash, NOISE_HASH_LEN); -+} -+ -+static void handshake_init(u8 chaining_key[NOISE_HASH_LEN], -+ u8 hash[NOISE_HASH_LEN], -+ const u8 remote_static[NOISE_PUBLIC_KEY_LEN]) -+{ -+ memcpy(hash, handshake_init_hash, NOISE_HASH_LEN); -+ memcpy(chaining_key, handshake_init_chaining_key, NOISE_HASH_LEN); -+ mix_hash(hash, remote_static, NOISE_PUBLIC_KEY_LEN); -+} -+ -+static void message_encrypt(u8 *dst_ciphertext, const u8 *src_plaintext, -+ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ u8 hash[NOISE_HASH_LEN]) -+{ -+ chacha20poly1305_encrypt(dst_ciphertext, src_plaintext, src_len, hash, -+ NOISE_HASH_LEN, -+ 0 /* Always zero for Noise_IK */, key); -+ mix_hash(hash, dst_ciphertext, noise_encrypted_len(src_len)); -+} -+ -+static bool message_decrypt(u8 *dst_plaintext, const u8 *src_ciphertext, -+ size_t src_len, u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ u8 hash[NOISE_HASH_LEN]) -+{ -+ if (!chacha20poly1305_decrypt(dst_plaintext, src_ciphertext, src_len, -+ hash, NOISE_HASH_LEN, -+ 0 /* Always zero for Noise_IK */, key)) -+ return false; -+ mix_hash(hash, src_ciphertext, src_len); -+ return true; -+} -+ -+static void message_ephemeral(u8 ephemeral_dst[NOISE_PUBLIC_KEY_LEN], -+ const u8 ephemeral_src[NOISE_PUBLIC_KEY_LEN], -+ u8 chaining_key[NOISE_HASH_LEN], -+ u8 hash[NOISE_HASH_LEN]) -+{ -+ if (ephemeral_dst != ephemeral_src) -+ memcpy(ephemeral_dst, ephemeral_src, NOISE_PUBLIC_KEY_LEN); -+ mix_hash(hash, ephemeral_src, NOISE_PUBLIC_KEY_LEN); -+ kdf(chaining_key, NULL, NULL, ephemeral_src, NOISE_HASH_LEN, 0, 0, -+ NOISE_PUBLIC_KEY_LEN, chaining_key); -+} -+ -+static void tai64n_now(u8 output[NOISE_TIMESTAMP_LEN]) -+{ -+ struct timespec64 now; -+ -+ ktime_get_real_ts64(&now); -+ -+ /* In order to prevent some sort of infoleak from precise timers, we -+ * round down the nanoseconds part to the closest rounded-down power of -+ * two to the maximum initiations per second allowed anyway by the -+ * implementation. -+ */ -+ now.tv_nsec = ALIGN_DOWN(now.tv_nsec, -+ rounddown_pow_of_two(NSEC_PER_SEC / INITIATIONS_PER_SECOND)); -+ -+ /* https://cr.yp.to/libtai/tai64.html */ -+ *(__be64 *)output = cpu_to_be64(0x400000000000000aULL + now.tv_sec); -+ *(__be32 *)(output + sizeof(__be64)) = cpu_to_be32(now.tv_nsec); -+} -+ -+bool -+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, -+ struct noise_handshake *handshake) -+{ -+ u8 timestamp[NOISE_TIMESTAMP_LEN]; -+ u8 key[NOISE_SYMMETRIC_KEY_LEN]; -+ bool ret = false; -+ -+ /* We need to wait for crng _before_ taking any locks, since -+ * curve25519_generate_secret uses get_random_bytes_wait. -+ */ -+ wait_for_random_bytes(); -+ -+ down_read(&handshake->static_identity->lock); -+ down_write(&handshake->lock); -+ -+ if (unlikely(!handshake->static_identity->has_identity)) -+ goto out; -+ -+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION); -+ -+ handshake_init(handshake->chaining_key, handshake->hash, -+ handshake->remote_static); -+ -+ /* e */ -+ curve25519_generate_secret(handshake->ephemeral_private); -+ if (!curve25519_generate_public(dst->unencrypted_ephemeral, -+ handshake->ephemeral_private)) -+ goto out; -+ message_ephemeral(dst->unencrypted_ephemeral, -+ dst->unencrypted_ephemeral, handshake->chaining_key, -+ handshake->hash); -+ -+ /* es */ -+ if (!mix_dh(handshake->chaining_key, key, handshake->ephemeral_private, -+ handshake->remote_static)) -+ goto out; -+ -+ /* s */ -+ message_encrypt(dst->encrypted_static, -+ handshake->static_identity->static_public, -+ NOISE_PUBLIC_KEY_LEN, key, handshake->hash); -+ -+ /* ss */ -+ kdf(handshake->chaining_key, key, NULL, -+ handshake->precomputed_static_static, NOISE_HASH_LEN, -+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, -+ handshake->chaining_key); -+ -+ /* {t} */ -+ tai64n_now(timestamp); -+ message_encrypt(dst->encrypted_timestamp, timestamp, -+ NOISE_TIMESTAMP_LEN, key, handshake->hash); -+ -+ dst->sender_index = wg_index_hashtable_insert( -+ handshake->entry.peer->device->index_hashtable, -+ &handshake->entry); -+ -+ handshake->state = HANDSHAKE_CREATED_INITIATION; -+ ret = true; -+ -+out: -+ up_write(&handshake->lock); -+ up_read(&handshake->static_identity->lock); -+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); -+ return ret; -+} -+ -+struct wg_peer * -+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, -+ struct wg_device *wg) -+{ -+ struct wg_peer *peer = NULL, *ret_peer = NULL; -+ struct noise_handshake *handshake; -+ bool replay_attack, flood_attack; -+ u8 key[NOISE_SYMMETRIC_KEY_LEN]; -+ u8 chaining_key[NOISE_HASH_LEN]; -+ u8 hash[NOISE_HASH_LEN]; -+ u8 s[NOISE_PUBLIC_KEY_LEN]; -+ u8 e[NOISE_PUBLIC_KEY_LEN]; -+ u8 t[NOISE_TIMESTAMP_LEN]; -+ u64 initiation_consumption; -+ -+ down_read(&wg->static_identity.lock); -+ if (unlikely(!wg->static_identity.has_identity)) -+ goto out; -+ -+ handshake_init(chaining_key, hash, wg->static_identity.static_public); -+ -+ /* e */ -+ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash); -+ -+ /* es */ -+ if (!mix_dh(chaining_key, key, wg->static_identity.static_private, e)) -+ goto out; -+ -+ /* s */ -+ if (!message_decrypt(s, src->encrypted_static, -+ sizeof(src->encrypted_static), key, hash)) -+ goto out; -+ -+ /* Lookup which peer we're actually talking to */ -+ peer = wg_pubkey_hashtable_lookup(wg->peer_hashtable, s); -+ if (!peer) -+ goto out; -+ handshake = &peer->handshake; -+ -+ /* ss */ -+ kdf(chaining_key, key, NULL, handshake->precomputed_static_static, -+ NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, -+ chaining_key); -+ -+ /* {t} */ -+ if (!message_decrypt(t, src->encrypted_timestamp, -+ sizeof(src->encrypted_timestamp), key, hash)) -+ goto out; -+ -+ down_read(&handshake->lock); -+ replay_attack = memcmp(t, handshake->latest_timestamp, -+ NOISE_TIMESTAMP_LEN) <= 0; -+ flood_attack = (s64)handshake->last_initiation_consumption + -+ NSEC_PER_SEC / INITIATIONS_PER_SECOND > -+ (s64)ktime_get_coarse_boottime_ns(); -+ up_read(&handshake->lock); -+ if (replay_attack || flood_attack) -+ goto out; -+ -+ /* Success! Copy everything to peer */ -+ down_write(&handshake->lock); -+ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN); -+ if (memcmp(t, handshake->latest_timestamp, NOISE_TIMESTAMP_LEN) > 0) -+ memcpy(handshake->latest_timestamp, t, NOISE_TIMESTAMP_LEN); -+ memcpy(handshake->hash, hash, NOISE_HASH_LEN); -+ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); -+ handshake->remote_index = src->sender_index; -+ if ((s64)(handshake->last_initiation_consumption - -+ (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0) -+ handshake->last_initiation_consumption = initiation_consumption; -+ handshake->state = HANDSHAKE_CONSUMED_INITIATION; -+ up_write(&handshake->lock); -+ ret_peer = peer; -+ -+out: -+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); -+ memzero_explicit(hash, NOISE_HASH_LEN); -+ memzero_explicit(chaining_key, NOISE_HASH_LEN); -+ up_read(&wg->static_identity.lock); -+ if (!ret_peer) -+ wg_peer_put(peer); -+ return ret_peer; -+} -+ -+bool wg_noise_handshake_create_response(struct message_handshake_response *dst, -+ struct noise_handshake *handshake) -+{ -+ u8 key[NOISE_SYMMETRIC_KEY_LEN]; -+ bool ret = false; -+ -+ /* We need to wait for crng _before_ taking any locks, since -+ * curve25519_generate_secret uses get_random_bytes_wait. -+ */ -+ wait_for_random_bytes(); -+ -+ down_read(&handshake->static_identity->lock); -+ down_write(&handshake->lock); -+ -+ if (handshake->state != HANDSHAKE_CONSUMED_INITIATION) -+ goto out; -+ -+ dst->header.type = cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE); -+ dst->receiver_index = handshake->remote_index; -+ -+ /* e */ -+ curve25519_generate_secret(handshake->ephemeral_private); -+ if (!curve25519_generate_public(dst->unencrypted_ephemeral, -+ handshake->ephemeral_private)) -+ goto out; -+ message_ephemeral(dst->unencrypted_ephemeral, -+ dst->unencrypted_ephemeral, handshake->chaining_key, -+ handshake->hash); -+ -+ /* ee */ -+ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private, -+ handshake->remote_ephemeral)) -+ goto out; -+ -+ /* se */ -+ if (!mix_dh(handshake->chaining_key, NULL, handshake->ephemeral_private, -+ handshake->remote_static)) -+ goto out; -+ -+ /* psk */ -+ mix_psk(handshake->chaining_key, handshake->hash, key, -+ handshake->preshared_key); -+ -+ /* {} */ -+ message_encrypt(dst->encrypted_nothing, NULL, 0, key, handshake->hash); -+ -+ dst->sender_index = wg_index_hashtable_insert( -+ handshake->entry.peer->device->index_hashtable, -+ &handshake->entry); -+ -+ handshake->state = HANDSHAKE_CREATED_RESPONSE; -+ ret = true; -+ -+out: -+ up_write(&handshake->lock); -+ up_read(&handshake->static_identity->lock); -+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); -+ return ret; -+} -+ -+struct wg_peer * -+wg_noise_handshake_consume_response(struct message_handshake_response *src, -+ struct wg_device *wg) -+{ -+ enum noise_handshake_state state = HANDSHAKE_ZEROED; -+ struct wg_peer *peer = NULL, *ret_peer = NULL; -+ struct noise_handshake *handshake; -+ u8 key[NOISE_SYMMETRIC_KEY_LEN]; -+ u8 hash[NOISE_HASH_LEN]; -+ u8 chaining_key[NOISE_HASH_LEN]; -+ u8 e[NOISE_PUBLIC_KEY_LEN]; -+ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; -+ u8 static_private[NOISE_PUBLIC_KEY_LEN]; -+ -+ down_read(&wg->static_identity.lock); -+ -+ if (unlikely(!wg->static_identity.has_identity)) -+ goto out; -+ -+ handshake = (struct noise_handshake *)wg_index_hashtable_lookup( -+ wg->index_hashtable, INDEX_HASHTABLE_HANDSHAKE, -+ src->receiver_index, &peer); -+ if (unlikely(!handshake)) -+ goto out; -+ -+ down_read(&handshake->lock); -+ state = handshake->state; -+ memcpy(hash, handshake->hash, NOISE_HASH_LEN); -+ memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); -+ memcpy(ephemeral_private, handshake->ephemeral_private, -+ NOISE_PUBLIC_KEY_LEN); -+ up_read(&handshake->lock); -+ -+ if (state != HANDSHAKE_CREATED_INITIATION) -+ goto fail; -+ -+ /* e */ -+ message_ephemeral(e, src->unencrypted_ephemeral, chaining_key, hash); -+ -+ /* ee */ -+ if (!mix_dh(chaining_key, NULL, ephemeral_private, e)) -+ goto fail; -+ -+ /* se */ -+ if (!mix_dh(chaining_key, NULL, wg->static_identity.static_private, e)) -+ goto fail; -+ -+ /* psk */ -+ mix_psk(chaining_key, hash, key, handshake->preshared_key); -+ -+ /* {} */ -+ if (!message_decrypt(NULL, src->encrypted_nothing, -+ sizeof(src->encrypted_nothing), key, hash)) -+ goto fail; -+ -+ /* Success! Copy everything to peer */ -+ down_write(&handshake->lock); -+ /* It's important to check that the state is still the same, while we -+ * have an exclusive lock. -+ */ -+ if (handshake->state != state) { -+ up_write(&handshake->lock); -+ goto fail; -+ } -+ memcpy(handshake->remote_ephemeral, e, NOISE_PUBLIC_KEY_LEN); -+ memcpy(handshake->hash, hash, NOISE_HASH_LEN); -+ memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); -+ handshake->remote_index = src->sender_index; -+ handshake->state = HANDSHAKE_CONSUMED_RESPONSE; -+ up_write(&handshake->lock); -+ ret_peer = peer; -+ goto out; -+ -+fail: -+ wg_peer_put(peer); -+out: -+ memzero_explicit(key, NOISE_SYMMETRIC_KEY_LEN); -+ memzero_explicit(hash, NOISE_HASH_LEN); -+ memzero_explicit(chaining_key, NOISE_HASH_LEN); -+ memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); -+ memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); -+ up_read(&wg->static_identity.lock); -+ return ret_peer; -+} -+ -+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, -+ struct noise_keypairs *keypairs) -+{ -+ struct noise_keypair *new_keypair; -+ bool ret = false; -+ -+ down_write(&handshake->lock); -+ if (handshake->state != HANDSHAKE_CREATED_RESPONSE && -+ handshake->state != HANDSHAKE_CONSUMED_RESPONSE) -+ goto out; -+ -+ new_keypair = keypair_create(handshake->entry.peer); -+ if (!new_keypair) -+ goto out; -+ new_keypair->i_am_the_initiator = handshake->state == -+ HANDSHAKE_CONSUMED_RESPONSE; -+ new_keypair->remote_index = handshake->remote_index; -+ -+ if (new_keypair->i_am_the_initiator) -+ derive_keys(&new_keypair->sending, &new_keypair->receiving, -+ handshake->chaining_key); -+ else -+ derive_keys(&new_keypair->receiving, &new_keypair->sending, -+ handshake->chaining_key); -+ -+ handshake_zero(handshake); -+ rcu_read_lock_bh(); -+ if (likely(!READ_ONCE(container_of(handshake, struct wg_peer, -+ handshake)->is_dead))) { -+ add_new_keypair(keypairs, new_keypair); -+ net_dbg_ratelimited("%s: Keypair %llu created for peer %llu\n", -+ handshake->entry.peer->device->dev->name, -+ new_keypair->internal_id, -+ handshake->entry.peer->internal_id); -+ ret = wg_index_hashtable_replace( -+ handshake->entry.peer->device->index_hashtable, -+ &handshake->entry, &new_keypair->entry); -+ } else { -+ kzfree(new_keypair); -+ } -+ rcu_read_unlock_bh(); -+ -+out: -+ up_write(&handshake->lock); -+ return ret; -+} ---- /dev/null -+++ b/drivers/net/wireguard/noise.h -@@ -0,0 +1,137 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+#ifndef _WG_NOISE_H -+#define _WG_NOISE_H -+ -+#include "messages.h" -+#include "peerlookup.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+union noise_counter { -+ struct { -+ u64 counter; -+ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; -+ spinlock_t lock; -+ } receive; -+ atomic64_t counter; -+}; -+ -+struct noise_symmetric_key { -+ u8 key[NOISE_SYMMETRIC_KEY_LEN]; -+ union noise_counter counter; -+ u64 birthdate; -+ bool is_valid; -+}; -+ -+struct noise_keypair { -+ struct index_hashtable_entry entry; -+ struct noise_symmetric_key sending; -+ struct noise_symmetric_key receiving; -+ __le32 remote_index; -+ bool i_am_the_initiator; -+ struct kref refcount; -+ struct rcu_head rcu; -+ u64 internal_id; -+}; -+ -+struct noise_keypairs { -+ struct noise_keypair __rcu *current_keypair; -+ struct noise_keypair __rcu *previous_keypair; -+ struct noise_keypair __rcu *next_keypair; -+ spinlock_t keypair_update_lock; -+}; -+ -+struct noise_static_identity { -+ u8 static_public[NOISE_PUBLIC_KEY_LEN]; -+ u8 static_private[NOISE_PUBLIC_KEY_LEN]; -+ struct rw_semaphore lock; -+ bool has_identity; -+}; -+ -+enum noise_handshake_state { -+ HANDSHAKE_ZEROED, -+ HANDSHAKE_CREATED_INITIATION, -+ HANDSHAKE_CONSUMED_INITIATION, -+ HANDSHAKE_CREATED_RESPONSE, -+ HANDSHAKE_CONSUMED_RESPONSE -+}; -+ -+struct noise_handshake { -+ struct index_hashtable_entry entry; -+ -+ enum noise_handshake_state state; -+ u64 last_initiation_consumption; -+ -+ struct noise_static_identity *static_identity; -+ -+ u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; -+ u8 remote_static[NOISE_PUBLIC_KEY_LEN]; -+ u8 remote_ephemeral[NOISE_PUBLIC_KEY_LEN]; -+ u8 precomputed_static_static[NOISE_PUBLIC_KEY_LEN]; -+ -+ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; -+ -+ u8 hash[NOISE_HASH_LEN]; -+ u8 chaining_key[NOISE_HASH_LEN]; -+ -+ u8 latest_timestamp[NOISE_TIMESTAMP_LEN]; -+ __le32 remote_index; -+ -+ /* Protects all members except the immutable (after noise_handshake_ -+ * init): remote_static, precomputed_static_static, static_identity. -+ */ -+ struct rw_semaphore lock; -+}; -+ -+struct wg_device; -+ -+void wg_noise_init(void); -+bool wg_noise_handshake_init(struct noise_handshake *handshake, -+ struct noise_static_identity *static_identity, -+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -+ struct wg_peer *peer); -+void wg_noise_handshake_clear(struct noise_handshake *handshake); -+static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns) -+{ -+ atomic64_set(handshake_ns, ktime_get_coarse_boottime_ns() - -+ (u64)(REKEY_TIMEOUT + 1) * NSEC_PER_SEC); -+} -+ -+void wg_noise_keypair_put(struct noise_keypair *keypair, bool unreference_now); -+struct noise_keypair *wg_noise_keypair_get(struct noise_keypair *keypair); -+void wg_noise_keypairs_clear(struct noise_keypairs *keypairs); -+bool wg_noise_received_with_keypair(struct noise_keypairs *keypairs, -+ struct noise_keypair *received_keypair); -+void wg_noise_expire_current_peer_keypairs(struct wg_peer *peer); -+ -+void wg_noise_set_static_identity_private_key( -+ struct noise_static_identity *static_identity, -+ const u8 private_key[NOISE_PUBLIC_KEY_LEN]); -+bool wg_noise_precompute_static_static(struct wg_peer *peer); -+ -+bool -+wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, -+ struct noise_handshake *handshake); -+struct wg_peer * -+wg_noise_handshake_consume_initiation(struct message_handshake_initiation *src, -+ struct wg_device *wg); -+ -+bool wg_noise_handshake_create_response(struct message_handshake_response *dst, -+ struct noise_handshake *handshake); -+struct wg_peer * -+wg_noise_handshake_consume_response(struct message_handshake_response *src, -+ struct wg_device *wg); -+ -+bool wg_noise_handshake_begin_session(struct noise_handshake *handshake, -+ struct noise_keypairs *keypairs); -+ -+#endif /* _WG_NOISE_H */ ---- /dev/null -+++ b/drivers/net/wireguard/peer.c -@@ -0,0 +1,240 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "peer.h" -+#include "device.h" -+#include "queueing.h" -+#include "timers.h" -+#include "peerlookup.h" -+#include "noise.h" -+ -+#include -+#include -+#include -+#include -+ -+static atomic64_t peer_counter = ATOMIC64_INIT(0); -+ -+struct wg_peer *wg_peer_create(struct wg_device *wg, -+ const u8 public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]) -+{ -+ struct wg_peer *peer; -+ int ret = -ENOMEM; -+ -+ lockdep_assert_held(&wg->device_update_lock); -+ -+ if (wg->num_peers >= MAX_PEERS_PER_DEVICE) -+ return ERR_PTR(ret); -+ -+ peer = kzalloc(sizeof(*peer), GFP_KERNEL); -+ if (unlikely(!peer)) -+ return ERR_PTR(ret); -+ peer->device = wg; -+ -+ if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity, -+ public_key, preshared_key, peer)) { -+ ret = -EKEYREJECTED; -+ goto err_1; -+ } -+ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) -+ goto err_1; -+ if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, -+ MAX_QUEUED_PACKETS)) -+ goto err_2; -+ if (wg_packet_queue_init(&peer->rx_queue, NULL, false, -+ MAX_QUEUED_PACKETS)) -+ goto err_3; -+ -+ peer->internal_id = atomic64_inc_return(&peer_counter); -+ peer->serial_work_cpu = nr_cpumask_bits; -+ wg_cookie_init(&peer->latest_cookie); -+ wg_timers_init(peer); -+ wg_cookie_checker_precompute_peer_keys(peer); -+ spin_lock_init(&peer->keypairs.keypair_update_lock); -+ INIT_WORK(&peer->transmit_handshake_work, -+ wg_packet_handshake_send_worker); -+ rwlock_init(&peer->endpoint_lock); -+ kref_init(&peer->refcount); -+ skb_queue_head_init(&peer->staged_packet_queue); -+ wg_noise_reset_last_sent_handshake(&peer->last_sent_handshake); -+ set_bit(NAPI_STATE_NO_BUSY_POLL, &peer->napi.state); -+ netif_napi_add(wg->dev, &peer->napi, wg_packet_rx_poll, -+ NAPI_POLL_WEIGHT); -+ napi_enable(&peer->napi); -+ list_add_tail(&peer->peer_list, &wg->peer_list); -+ INIT_LIST_HEAD(&peer->allowedips_list); -+ wg_pubkey_hashtable_add(wg->peer_hashtable, peer); -+ ++wg->num_peers; -+ pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); -+ return peer; -+ -+err_3: -+ wg_packet_queue_free(&peer->tx_queue, false); -+err_2: -+ dst_cache_destroy(&peer->endpoint_cache); -+err_1: -+ kfree(peer); -+ return ERR_PTR(ret); -+} -+ -+struct wg_peer *wg_peer_get_maybe_zero(struct wg_peer *peer) -+{ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_bh_held(), -+ "Taking peer reference without holding the RCU read lock"); -+ if (unlikely(!peer || !kref_get_unless_zero(&peer->refcount))) -+ return NULL; -+ return peer; -+} -+ -+static void peer_make_dead(struct wg_peer *peer) -+{ -+ /* Remove from configuration-time lookup structures. */ -+ list_del_init(&peer->peer_list); -+ wg_allowedips_remove_by_peer(&peer->device->peer_allowedips, peer, -+ &peer->device->device_update_lock); -+ wg_pubkey_hashtable_remove(peer->device->peer_hashtable, peer); -+ -+ /* Mark as dead, so that we don't allow jumping contexts after. */ -+ WRITE_ONCE(peer->is_dead, true); -+ -+ /* The caller must now synchronize_rcu() for this to take effect. */ -+} -+ -+static void peer_remove_after_dead(struct wg_peer *peer) -+{ -+ WARN_ON(!peer->is_dead); -+ -+ /* No more keypairs can be created for this peer, since is_dead protects -+ * add_new_keypair, so we can now destroy existing ones. -+ */ -+ wg_noise_keypairs_clear(&peer->keypairs); -+ -+ /* Destroy all ongoing timers that were in-flight at the beginning of -+ * this function. -+ */ -+ wg_timers_stop(peer); -+ -+ /* The transition between packet encryption/decryption queues isn't -+ * guarded by is_dead, but each reference's life is strictly bounded by -+ * two generations: once for parallel crypto and once for serial -+ * ingestion, so we can simply flush twice, and be sure that we no -+ * longer have references inside these queues. -+ */ -+ -+ /* a) For encrypt/decrypt. */ -+ flush_workqueue(peer->device->packet_crypt_wq); -+ /* b.1) For send (but not receive, since that's napi). */ -+ flush_workqueue(peer->device->packet_crypt_wq); -+ /* b.2.1) For receive (but not send, since that's wq). */ -+ napi_disable(&peer->napi); -+ /* b.2.1) It's now safe to remove the napi struct, which must be done -+ * here from process context. -+ */ -+ netif_napi_del(&peer->napi); -+ -+ /* Ensure any workstructs we own (like transmit_handshake_work or -+ * clear_peer_work) no longer are in use. -+ */ -+ flush_workqueue(peer->device->handshake_send_wq); -+ -+ /* After the above flushes, a peer might still be active in a few -+ * different contexts: 1) from xmit(), before hitting is_dead and -+ * returning, 2) from wg_packet_consume_data(), before hitting is_dead -+ * and returning, 3) from wg_receive_handshake_packet() after a point -+ * where it has processed an incoming handshake packet, but where -+ * all calls to pass it off to timers fails because of is_dead. We won't -+ * have new references in (1) eventually, because we're removed from -+ * allowedips; we won't have new references in (2) eventually, because -+ * wg_index_hashtable_lookup will always return NULL, since we removed -+ * all existing keypairs and no more can be created; we won't have new -+ * references in (3) eventually, because we're removed from the pubkey -+ * hash table, which allows for a maximum of one handshake response, -+ * via the still-uncleared index hashtable entry, but not more than one, -+ * and in wg_cookie_message_consume, the lookup eventually gets a peer -+ * with a refcount of zero, so no new reference is taken. -+ */ -+ -+ --peer->device->num_peers; -+ wg_peer_put(peer); -+} -+ -+/* We have a separate "remove" function make sure that all active places where -+ * a peer is currently operating will eventually come to an end and not pass -+ * their reference onto another context. -+ */ -+void wg_peer_remove(struct wg_peer *peer) -+{ -+ if (unlikely(!peer)) -+ return; -+ lockdep_assert_held(&peer->device->device_update_lock); -+ -+ peer_make_dead(peer); -+ synchronize_rcu(); -+ peer_remove_after_dead(peer); -+} -+ -+void wg_peer_remove_all(struct wg_device *wg) -+{ -+ struct wg_peer *peer, *temp; -+ LIST_HEAD(dead_peers); -+ -+ lockdep_assert_held(&wg->device_update_lock); -+ -+ /* Avoid having to traverse individually for each one. */ -+ wg_allowedips_free(&wg->peer_allowedips, &wg->device_update_lock); -+ -+ list_for_each_entry_safe(peer, temp, &wg->peer_list, peer_list) { -+ peer_make_dead(peer); -+ list_add_tail(&peer->peer_list, &dead_peers); -+ } -+ synchronize_rcu(); -+ list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) -+ peer_remove_after_dead(peer); -+} -+ -+static void rcu_release(struct rcu_head *rcu) -+{ -+ struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); -+ -+ dst_cache_destroy(&peer->endpoint_cache); -+ wg_packet_queue_free(&peer->rx_queue, false); -+ wg_packet_queue_free(&peer->tx_queue, false); -+ -+ /* The final zeroing takes care of clearing any remaining handshake key -+ * material and other potentially sensitive information. -+ */ -+ kzfree(peer); -+} -+ -+static void kref_release(struct kref *refcount) -+{ -+ struct wg_peer *peer = container_of(refcount, struct wg_peer, refcount); -+ -+ pr_debug("%s: Peer %llu (%pISpfsc) destroyed\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ -+ /* Remove ourself from dynamic runtime lookup structures, now that the -+ * last reference is gone. -+ */ -+ wg_index_hashtable_remove(peer->device->index_hashtable, -+ &peer->handshake.entry); -+ -+ /* Remove any lingering packets that didn't have a chance to be -+ * transmitted. -+ */ -+ wg_packet_purge_staged_packets(peer); -+ -+ /* Free the memory used. */ -+ call_rcu(&peer->rcu, rcu_release); -+} -+ -+void wg_peer_put(struct wg_peer *peer) -+{ -+ if (unlikely(!peer)) -+ return; -+ kref_put(&peer->refcount, kref_release); -+} ---- /dev/null -+++ b/drivers/net/wireguard/peer.h -@@ -0,0 +1,83 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_PEER_H -+#define _WG_PEER_H -+ -+#include "device.h" -+#include "noise.h" -+#include "cookie.h" -+ -+#include -+#include -+#include -+#include -+#include -+ -+struct wg_device; -+ -+struct endpoint { -+ union { -+ struct sockaddr addr; -+ struct sockaddr_in addr4; -+ struct sockaddr_in6 addr6; -+ }; -+ union { -+ struct { -+ struct in_addr src4; -+ /* Essentially the same as addr6->scope_id */ -+ int src_if4; -+ }; -+ struct in6_addr src6; -+ }; -+}; -+ -+struct wg_peer { -+ struct wg_device *device; -+ struct crypt_queue tx_queue, rx_queue; -+ struct sk_buff_head staged_packet_queue; -+ int serial_work_cpu; -+ struct noise_keypairs keypairs; -+ struct endpoint endpoint; -+ struct dst_cache endpoint_cache; -+ rwlock_t endpoint_lock; -+ struct noise_handshake handshake; -+ atomic64_t last_sent_handshake; -+ struct work_struct transmit_handshake_work, clear_peer_work; -+ struct cookie latest_cookie; -+ struct hlist_node pubkey_hash; -+ u64 rx_bytes, tx_bytes; -+ struct timer_list timer_retransmit_handshake, timer_send_keepalive; -+ struct timer_list timer_new_handshake, timer_zero_key_material; -+ struct timer_list timer_persistent_keepalive; -+ unsigned int timer_handshake_attempts; -+ u16 persistent_keepalive_interval; -+ bool timer_need_another_keepalive; -+ bool sent_lastminute_handshake; -+ struct timespec64 walltime_last_handshake; -+ struct kref refcount; -+ struct rcu_head rcu; -+ struct list_head peer_list; -+ struct list_head allowedips_list; -+ u64 internal_id; -+ struct napi_struct napi; -+ bool is_dead; -+}; -+ -+struct wg_peer *wg_peer_create(struct wg_device *wg, -+ const u8 public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]); -+ -+struct wg_peer *__must_check wg_peer_get_maybe_zero(struct wg_peer *peer); -+static inline struct wg_peer *wg_peer_get(struct wg_peer *peer) -+{ -+ kref_get(&peer->refcount); -+ return peer; -+} -+void wg_peer_put(struct wg_peer *peer); -+void wg_peer_remove(struct wg_peer *peer); -+void wg_peer_remove_all(struct wg_device *wg); -+ -+#endif /* _WG_PEER_H */ ---- /dev/null -+++ b/drivers/net/wireguard/peerlookup.c -@@ -0,0 +1,221 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "peerlookup.h" -+#include "peer.h" -+#include "noise.h" -+ -+static struct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, -+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) -+{ -+ /* siphash gives us a secure 64bit number based on a random key. Since -+ * the bits are uniformly distributed, we can then mask off to get the -+ * bits we need. -+ */ -+ const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key); -+ -+ return &table->hashtable[hash & (HASH_SIZE(table->hashtable) - 1)]; -+} -+ -+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void) -+{ -+ struct pubkey_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); -+ -+ if (!table) -+ return NULL; -+ -+ get_random_bytes(&table->key, sizeof(table->key)); -+ hash_init(table->hashtable); -+ mutex_init(&table->lock); -+ return table; -+} -+ -+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, -+ struct wg_peer *peer) -+{ -+ mutex_lock(&table->lock); -+ hlist_add_head_rcu(&peer->pubkey_hash, -+ pubkey_bucket(table, peer->handshake.remote_static)); -+ mutex_unlock(&table->lock); -+} -+ -+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table, -+ struct wg_peer *peer) -+{ -+ mutex_lock(&table->lock); -+ hlist_del_init_rcu(&peer->pubkey_hash); -+ mutex_unlock(&table->lock); -+} -+ -+/* Returns a strong reference to a peer */ -+struct wg_peer * -+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table, -+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]) -+{ -+ struct wg_peer *iter_peer, *peer = NULL; -+ -+ rcu_read_lock_bh(); -+ hlist_for_each_entry_rcu_bh(iter_peer, pubkey_bucket(table, pubkey), -+ pubkey_hash) { -+ if (!memcmp(pubkey, iter_peer->handshake.remote_static, -+ NOISE_PUBLIC_KEY_LEN)) { -+ peer = iter_peer; -+ break; -+ } -+ } -+ peer = wg_peer_get_maybe_zero(peer); -+ rcu_read_unlock_bh(); -+ return peer; -+} -+ -+static struct hlist_head *index_bucket(struct index_hashtable *table, -+ const __le32 index) -+{ -+ /* Since the indices are random and thus all bits are uniformly -+ * distributed, we can find its bucket simply by masking. -+ */ -+ return &table->hashtable[(__force u32)index & -+ (HASH_SIZE(table->hashtable) - 1)]; -+} -+ -+struct index_hashtable *wg_index_hashtable_alloc(void) -+{ -+ struct index_hashtable *table = kvmalloc(sizeof(*table), GFP_KERNEL); -+ -+ if (!table) -+ return NULL; -+ -+ hash_init(table->hashtable); -+ spin_lock_init(&table->lock); -+ return table; -+} -+ -+/* At the moment, we limit ourselves to 2^20 total peers, which generally might -+ * amount to 2^20*3 items in this hashtable. The algorithm below works by -+ * picking a random number and testing it. We can see that these limits mean we -+ * usually succeed pretty quickly: -+ * -+ * >>> def calculation(tries, size): -+ * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32)) -+ * ... -+ * >>> calculation(1, 2**20 * 3) -+ * 0.999267578125 -+ * >>> calculation(2, 2**20 * 3) -+ * 0.0007318854331970215 -+ * >>> calculation(3, 2**20 * 3) -+ * 5.360489012673497e-07 -+ * >>> calculation(4, 2**20 * 3) -+ * 3.9261394135792216e-10 -+ * -+ * At the moment, we don't do any masking, so this algorithm isn't exactly -+ * constant time in either the random guessing or in the hash list lookup. We -+ * could require a minimum of 3 tries, which would successfully mask the -+ * guessing. this would not, however, help with the growing hash lengths, which -+ * is another thing to consider moving forward. -+ */ -+ -+__le32 wg_index_hashtable_insert(struct index_hashtable *table, -+ struct index_hashtable_entry *entry) -+{ -+ struct index_hashtable_entry *existing_entry; -+ -+ spin_lock_bh(&table->lock); -+ hlist_del_init_rcu(&entry->index_hash); -+ spin_unlock_bh(&table->lock); -+ -+ rcu_read_lock_bh(); -+ -+search_unused_slot: -+ /* First we try to find an unused slot, randomly, while unlocked. */ -+ entry->index = (__force __le32)get_random_u32(); -+ hlist_for_each_entry_rcu_bh(existing_entry, -+ index_bucket(table, entry->index), -+ index_hash) { -+ if (existing_entry->index == entry->index) -+ /* If it's already in use, we continue searching. */ -+ goto search_unused_slot; -+ } -+ -+ /* Once we've found an unused slot, we lock it, and then double-check -+ * that nobody else stole it from us. -+ */ -+ spin_lock_bh(&table->lock); -+ hlist_for_each_entry_rcu_bh(existing_entry, -+ index_bucket(table, entry->index), -+ index_hash) { -+ if (existing_entry->index == entry->index) { -+ spin_unlock_bh(&table->lock); -+ /* If it was stolen, we start over. */ -+ goto search_unused_slot; -+ } -+ } -+ /* Otherwise, we know we have it exclusively (since we're locked), -+ * so we insert. -+ */ -+ hlist_add_head_rcu(&entry->index_hash, -+ index_bucket(table, entry->index)); -+ spin_unlock_bh(&table->lock); -+ -+ rcu_read_unlock_bh(); -+ -+ return entry->index; -+} -+ -+bool wg_index_hashtable_replace(struct index_hashtable *table, -+ struct index_hashtable_entry *old, -+ struct index_hashtable_entry *new) -+{ -+ if (unlikely(hlist_unhashed(&old->index_hash))) -+ return false; -+ spin_lock_bh(&table->lock); -+ new->index = old->index; -+ hlist_replace_rcu(&old->index_hash, &new->index_hash); -+ -+ /* Calling init here NULLs out index_hash, and in fact after this -+ * function returns, it's theoretically possible for this to get -+ * reinserted elsewhere. That means the RCU lookup below might either -+ * terminate early or jump between buckets, in which case the packet -+ * simply gets dropped, which isn't terrible. -+ */ -+ INIT_HLIST_NODE(&old->index_hash); -+ spin_unlock_bh(&table->lock); -+ return true; -+} -+ -+void wg_index_hashtable_remove(struct index_hashtable *table, -+ struct index_hashtable_entry *entry) -+{ -+ spin_lock_bh(&table->lock); -+ hlist_del_init_rcu(&entry->index_hash); -+ spin_unlock_bh(&table->lock); -+} -+ -+/* Returns a strong reference to a entry->peer */ -+struct index_hashtable_entry * -+wg_index_hashtable_lookup(struct index_hashtable *table, -+ const enum index_hashtable_type type_mask, -+ const __le32 index, struct wg_peer **peer) -+{ -+ struct index_hashtable_entry *iter_entry, *entry = NULL; -+ -+ rcu_read_lock_bh(); -+ hlist_for_each_entry_rcu_bh(iter_entry, index_bucket(table, index), -+ index_hash) { -+ if (iter_entry->index == index) { -+ if (likely(iter_entry->type & type_mask)) -+ entry = iter_entry; -+ break; -+ } -+ } -+ if (likely(entry)) { -+ entry->peer = wg_peer_get_maybe_zero(entry->peer); -+ if (likely(entry->peer)) -+ *peer = entry->peer; -+ else -+ entry = NULL; -+ } -+ rcu_read_unlock_bh(); -+ return entry; -+} ---- /dev/null -+++ b/drivers/net/wireguard/peerlookup.h -@@ -0,0 +1,64 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_PEERLOOKUP_H -+#define _WG_PEERLOOKUP_H -+ -+#include "messages.h" -+ -+#include -+#include -+#include -+ -+struct wg_peer; -+ -+struct pubkey_hashtable { -+ /* TODO: move to rhashtable */ -+ DECLARE_HASHTABLE(hashtable, 11); -+ siphash_key_t key; -+ struct mutex lock; -+}; -+ -+struct pubkey_hashtable *wg_pubkey_hashtable_alloc(void); -+void wg_pubkey_hashtable_add(struct pubkey_hashtable *table, -+ struct wg_peer *peer); -+void wg_pubkey_hashtable_remove(struct pubkey_hashtable *table, -+ struct wg_peer *peer); -+struct wg_peer * -+wg_pubkey_hashtable_lookup(struct pubkey_hashtable *table, -+ const u8 pubkey[NOISE_PUBLIC_KEY_LEN]); -+ -+struct index_hashtable { -+ /* TODO: move to rhashtable */ -+ DECLARE_HASHTABLE(hashtable, 13); -+ spinlock_t lock; -+}; -+ -+enum index_hashtable_type { -+ INDEX_HASHTABLE_HANDSHAKE = 1U << 0, -+ INDEX_HASHTABLE_KEYPAIR = 1U << 1 -+}; -+ -+struct index_hashtable_entry { -+ struct wg_peer *peer; -+ struct hlist_node index_hash; -+ enum index_hashtable_type type; -+ __le32 index; -+}; -+ -+struct index_hashtable *wg_index_hashtable_alloc(void); -+__le32 wg_index_hashtable_insert(struct index_hashtable *table, -+ struct index_hashtable_entry *entry); -+bool wg_index_hashtable_replace(struct index_hashtable *table, -+ struct index_hashtable_entry *old, -+ struct index_hashtable_entry *new); -+void wg_index_hashtable_remove(struct index_hashtable *table, -+ struct index_hashtable_entry *entry); -+struct index_hashtable_entry * -+wg_index_hashtable_lookup(struct index_hashtable *table, -+ const enum index_hashtable_type type_mask, -+ const __le32 index, struct wg_peer **peer); -+ -+#endif /* _WG_PEERLOOKUP_H */ ---- /dev/null -+++ b/drivers/net/wireguard/queueing.c -@@ -0,0 +1,53 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "queueing.h" -+ -+struct multicore_worker __percpu * -+wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) -+{ -+ int cpu; -+ struct multicore_worker __percpu *worker = -+ alloc_percpu(struct multicore_worker); -+ -+ if (!worker) -+ return NULL; -+ -+ for_each_possible_cpu(cpu) { -+ per_cpu_ptr(worker, cpu)->ptr = ptr; -+ INIT_WORK(&per_cpu_ptr(worker, cpu)->work, function); -+ } -+ return worker; -+} -+ -+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, -+ bool multicore, unsigned int len) -+{ -+ int ret; -+ -+ memset(queue, 0, sizeof(*queue)); -+ ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); -+ if (ret) -+ return ret; -+ if (function) { -+ if (multicore) { -+ queue->worker = wg_packet_percpu_multicore_worker_alloc( -+ function, queue); -+ if (!queue->worker) -+ return -ENOMEM; -+ } else { -+ INIT_WORK(&queue->work, function); -+ } -+ } -+ return 0; -+} -+ -+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) -+{ -+ if (multicore) -+ free_percpu(queue->worker); -+ WARN_ON(!__ptr_ring_empty(&queue->ring)); -+ ptr_ring_cleanup(&queue->ring, NULL); -+} ---- /dev/null -+++ b/drivers/net/wireguard/queueing.h -@@ -0,0 +1,197 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_QUEUEING_H -+#define _WG_QUEUEING_H -+ -+#include "peer.h" -+#include -+#include -+#include -+#include -+ -+struct wg_device; -+struct wg_peer; -+struct multicore_worker; -+struct crypt_queue; -+struct sk_buff; -+ -+/* queueing.c APIs: */ -+int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, -+ bool multicore, unsigned int len); -+void wg_packet_queue_free(struct crypt_queue *queue, bool multicore); -+struct multicore_worker __percpu * -+wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); -+ -+/* receive.c APIs: */ -+void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb); -+void wg_packet_handshake_receive_worker(struct work_struct *work); -+/* NAPI poll function: */ -+int wg_packet_rx_poll(struct napi_struct *napi, int budget); -+/* Workqueue worker: */ -+void wg_packet_decrypt_worker(struct work_struct *work); -+ -+/* send.c APIs: */ -+void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, -+ bool is_retry); -+void wg_packet_send_handshake_response(struct wg_peer *peer); -+void wg_packet_send_handshake_cookie(struct wg_device *wg, -+ struct sk_buff *initiating_skb, -+ __le32 sender_index); -+void wg_packet_send_keepalive(struct wg_peer *peer); -+void wg_packet_purge_staged_packets(struct wg_peer *peer); -+void wg_packet_send_staged_packets(struct wg_peer *peer); -+/* Workqueue workers: */ -+void wg_packet_handshake_send_worker(struct work_struct *work); -+void wg_packet_tx_worker(struct work_struct *work); -+void wg_packet_encrypt_worker(struct work_struct *work); -+ -+enum packet_state { -+ PACKET_STATE_UNCRYPTED, -+ PACKET_STATE_CRYPTED, -+ PACKET_STATE_DEAD -+}; -+ -+struct packet_cb { -+ u64 nonce; -+ struct noise_keypair *keypair; -+ atomic_t state; -+ u32 mtu; -+ u8 ds; -+}; -+ -+#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) -+#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) -+ -+/* Returns either the correct skb->protocol value, or 0 if invalid. */ -+static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb) -+{ -+ if (skb_network_header(skb) >= skb->head && -+ (skb_network_header(skb) + sizeof(struct iphdr)) <= -+ skb_tail_pointer(skb) && -+ ip_hdr(skb)->version == 4) -+ return htons(ETH_P_IP); -+ if (skb_network_header(skb) >= skb->head && -+ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= -+ skb_tail_pointer(skb) && -+ ipv6_hdr(skb)->version == 6) -+ return htons(ETH_P_IPV6); -+ return 0; -+} -+ -+static inline void wg_reset_packet(struct sk_buff *skb) -+{ -+ const int pfmemalloc = skb->pfmemalloc; -+ -+ skb_scrub_packet(skb, true); -+ memset(&skb->headers_start, 0, -+ offsetof(struct sk_buff, headers_end) - -+ offsetof(struct sk_buff, headers_start)); -+ skb->pfmemalloc = pfmemalloc; -+ skb->queue_mapping = 0; -+ skb->nohdr = 0; -+ skb->peeked = 0; -+ skb->mac_len = 0; -+ skb->dev = NULL; -+#ifdef CONFIG_NET_SCHED -+ skb->tc_index = 0; -+#endif -+ skb_reset_redirect(skb); -+ skb->hdr_len = skb_headroom(skb); -+ skb_reset_mac_header(skb); -+ skb_reset_network_header(skb); -+ skb_reset_transport_header(skb); -+ skb_probe_transport_header(skb); -+ skb_reset_inner_headers(skb); -+} -+ -+static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id) -+{ -+ unsigned int cpu = *stored_cpu, cpu_index, i; -+ -+ if (unlikely(cpu == nr_cpumask_bits || -+ !cpumask_test_cpu(cpu, cpu_online_mask))) { -+ cpu_index = id % cpumask_weight(cpu_online_mask); -+ cpu = cpumask_first(cpu_online_mask); -+ for (i = 0; i < cpu_index; ++i) -+ cpu = cpumask_next(cpu, cpu_online_mask); -+ *stored_cpu = cpu; -+ } -+ return cpu; -+} -+ -+/* This function is racy, in the sense that next is unlocked, so it could return -+ * the same CPU twice. A race-free version of this would be to instead store an -+ * atomic sequence number, do an increment-and-return, and then iterate through -+ * every possible CPU until we get to that index -- choose_cpu. However that's -+ * a bit slower, and it doesn't seem like this potential race actually -+ * introduces any performance loss, so we live with it. -+ */ -+static inline int wg_cpumask_next_online(int *next) -+{ -+ int cpu = *next; -+ -+ while (unlikely(!cpumask_test_cpu(cpu, cpu_online_mask))) -+ cpu = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; -+ *next = cpumask_next(cpu, cpu_online_mask) % nr_cpumask_bits; -+ return cpu; -+} -+ -+static inline int wg_queue_enqueue_per_device_and_peer( -+ struct crypt_queue *device_queue, struct crypt_queue *peer_queue, -+ struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) -+{ -+ int cpu; -+ -+ atomic_set_release(&PACKET_CB(skb)->state, PACKET_STATE_UNCRYPTED); -+ /* We first queue this up for the peer ingestion, but the consumer -+ * will wait for the state to change to CRYPTED or DEAD before. -+ */ -+ if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) -+ return -ENOSPC; -+ /* Then we queue it up in the device queue, which consumes the -+ * packet as soon as it can. -+ */ -+ cpu = wg_cpumask_next_online(next_cpu); -+ if (unlikely(ptr_ring_produce_bh(&device_queue->ring, skb))) -+ return -EPIPE; -+ queue_work_on(cpu, wq, &per_cpu_ptr(device_queue->worker, cpu)->work); -+ return 0; -+} -+ -+static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, -+ struct sk_buff *skb, -+ enum packet_state state) -+{ -+ /* We take a reference, because as soon as we call atomic_set, the -+ * peer can be freed from below us. -+ */ -+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); -+ -+ atomic_set_release(&PACKET_CB(skb)->state, state); -+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, -+ peer->internal_id), -+ peer->device->packet_crypt_wq, &queue->work); -+ wg_peer_put(peer); -+} -+ -+static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb, -+ enum packet_state state) -+{ -+ /* We take a reference, because as soon as we call atomic_set, the -+ * peer can be freed from below us. -+ */ -+ struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); -+ -+ atomic_set_release(&PACKET_CB(skb)->state, state); -+ napi_schedule(&peer->napi); -+ wg_peer_put(peer); -+} -+ -+#ifdef DEBUG -+bool wg_packet_counter_selftest(void); -+#endif -+ -+#endif /* _WG_QUEUEING_H */ ---- /dev/null -+++ b/drivers/net/wireguard/ratelimiter.c -@@ -0,0 +1,223 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "ratelimiter.h" -+#include -+#include -+#include -+#include -+ -+static struct kmem_cache *entry_cache; -+static hsiphash_key_t key; -+static spinlock_t table_lock = __SPIN_LOCK_UNLOCKED("ratelimiter_table_lock"); -+static DEFINE_MUTEX(init_lock); -+static u64 init_refcnt; /* Protected by init_lock, hence not atomic. */ -+static atomic_t total_entries = ATOMIC_INIT(0); -+static unsigned int max_entries, table_size; -+static void wg_ratelimiter_gc_entries(struct work_struct *); -+static DECLARE_DEFERRABLE_WORK(gc_work, wg_ratelimiter_gc_entries); -+static struct hlist_head *table_v4; -+#if IS_ENABLED(CONFIG_IPV6) -+static struct hlist_head *table_v6; -+#endif -+ -+struct ratelimiter_entry { -+ u64 last_time_ns, tokens, ip; -+ void *net; -+ spinlock_t lock; -+ struct hlist_node hash; -+ struct rcu_head rcu; -+}; -+ -+enum { -+ PACKETS_PER_SECOND = 20, -+ PACKETS_BURSTABLE = 5, -+ PACKET_COST = NSEC_PER_SEC / PACKETS_PER_SECOND, -+ TOKEN_MAX = PACKET_COST * PACKETS_BURSTABLE -+}; -+ -+static void entry_free(struct rcu_head *rcu) -+{ -+ kmem_cache_free(entry_cache, -+ container_of(rcu, struct ratelimiter_entry, rcu)); -+ atomic_dec(&total_entries); -+} -+ -+static void entry_uninit(struct ratelimiter_entry *entry) -+{ -+ hlist_del_rcu(&entry->hash); -+ call_rcu(&entry->rcu, entry_free); -+} -+ -+/* Calling this function with a NULL work uninits all entries. */ -+static void wg_ratelimiter_gc_entries(struct work_struct *work) -+{ -+ const u64 now = ktime_get_coarse_boottime_ns(); -+ struct ratelimiter_entry *entry; -+ struct hlist_node *temp; -+ unsigned int i; -+ -+ for (i = 0; i < table_size; ++i) { -+ spin_lock(&table_lock); -+ hlist_for_each_entry_safe(entry, temp, &table_v4[i], hash) { -+ if (unlikely(!work) || -+ now - entry->last_time_ns > NSEC_PER_SEC) -+ entry_uninit(entry); -+ } -+#if IS_ENABLED(CONFIG_IPV6) -+ hlist_for_each_entry_safe(entry, temp, &table_v6[i], hash) { -+ if (unlikely(!work) || -+ now - entry->last_time_ns > NSEC_PER_SEC) -+ entry_uninit(entry); -+ } -+#endif -+ spin_unlock(&table_lock); -+ if (likely(work)) -+ cond_resched(); -+ } -+ if (likely(work)) -+ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); -+} -+ -+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net) -+{ -+ /* We only take the bottom half of the net pointer, so that we can hash -+ * 3 words in the end. This way, siphash's len param fits into the final -+ * u32, and we don't incur an extra round. -+ */ -+ const u32 net_word = (unsigned long)net; -+ struct ratelimiter_entry *entry; -+ struct hlist_head *bucket; -+ u64 ip; -+ -+ if (skb->protocol == htons(ETH_P_IP)) { -+ ip = (u64 __force)ip_hdr(skb)->saddr; -+ bucket = &table_v4[hsiphash_2u32(net_word, ip, &key) & -+ (table_size - 1)]; -+ } -+#if IS_ENABLED(CONFIG_IPV6) -+ else if (skb->protocol == htons(ETH_P_IPV6)) { -+ /* Only use 64 bits, so as to ratelimit the whole /64. */ -+ memcpy(&ip, &ipv6_hdr(skb)->saddr, sizeof(ip)); -+ bucket = &table_v6[hsiphash_3u32(net_word, ip >> 32, ip, &key) & -+ (table_size - 1)]; -+ } -+#endif -+ else -+ return false; -+ rcu_read_lock(); -+ hlist_for_each_entry_rcu(entry, bucket, hash) { -+ if (entry->net == net && entry->ip == ip) { -+ u64 now, tokens; -+ bool ret; -+ /* Quasi-inspired by nft_limit.c, but this is actually a -+ * slightly different algorithm. Namely, we incorporate -+ * the burst as part of the maximum tokens, rather than -+ * as part of the rate. -+ */ -+ spin_lock(&entry->lock); -+ now = ktime_get_coarse_boottime_ns(); -+ tokens = min_t(u64, TOKEN_MAX, -+ entry->tokens + now - -+ entry->last_time_ns); -+ entry->last_time_ns = now; -+ ret = tokens >= PACKET_COST; -+ entry->tokens = ret ? tokens - PACKET_COST : tokens; -+ spin_unlock(&entry->lock); -+ rcu_read_unlock(); -+ return ret; -+ } -+ } -+ rcu_read_unlock(); -+ -+ if (atomic_inc_return(&total_entries) > max_entries) -+ goto err_oom; -+ -+ entry = kmem_cache_alloc(entry_cache, GFP_KERNEL); -+ if (unlikely(!entry)) -+ goto err_oom; -+ -+ entry->net = net; -+ entry->ip = ip; -+ INIT_HLIST_NODE(&entry->hash); -+ spin_lock_init(&entry->lock); -+ entry->last_time_ns = ktime_get_coarse_boottime_ns(); -+ entry->tokens = TOKEN_MAX - PACKET_COST; -+ spin_lock(&table_lock); -+ hlist_add_head_rcu(&entry->hash, bucket); -+ spin_unlock(&table_lock); -+ return true; -+ -+err_oom: -+ atomic_dec(&total_entries); -+ return false; -+} -+ -+int wg_ratelimiter_init(void) -+{ -+ mutex_lock(&init_lock); -+ if (++init_refcnt != 1) -+ goto out; -+ -+ entry_cache = KMEM_CACHE(ratelimiter_entry, 0); -+ if (!entry_cache) -+ goto err; -+ -+ /* xt_hashlimit.c uses a slightly different algorithm for ratelimiting, -+ * but what it shares in common is that it uses a massive hashtable. So, -+ * we borrow their wisdom about good table sizes on different systems -+ * dependent on RAM. This calculation here comes from there. -+ */ -+ table_size = (totalram_pages() > (1U << 30) / PAGE_SIZE) ? 8192 : -+ max_t(unsigned long, 16, roundup_pow_of_two( -+ (totalram_pages() << PAGE_SHIFT) / -+ (1U << 14) / sizeof(struct hlist_head))); -+ max_entries = table_size * 8; -+ -+ table_v4 = kvzalloc(table_size * sizeof(*table_v4), GFP_KERNEL); -+ if (unlikely(!table_v4)) -+ goto err_kmemcache; -+ -+#if IS_ENABLED(CONFIG_IPV6) -+ table_v6 = kvzalloc(table_size * sizeof(*table_v6), GFP_KERNEL); -+ if (unlikely(!table_v6)) { -+ kvfree(table_v4); -+ goto err_kmemcache; -+ } -+#endif -+ -+ queue_delayed_work(system_power_efficient_wq, &gc_work, HZ); -+ get_random_bytes(&key, sizeof(key)); -+out: -+ mutex_unlock(&init_lock); -+ return 0; -+ -+err_kmemcache: -+ kmem_cache_destroy(entry_cache); -+err: -+ --init_refcnt; -+ mutex_unlock(&init_lock); -+ return -ENOMEM; -+} -+ -+void wg_ratelimiter_uninit(void) -+{ -+ mutex_lock(&init_lock); -+ if (!init_refcnt || --init_refcnt) -+ goto out; -+ -+ cancel_delayed_work_sync(&gc_work); -+ wg_ratelimiter_gc_entries(NULL); -+ rcu_barrier(); -+ kvfree(table_v4); -+#if IS_ENABLED(CONFIG_IPV6) -+ kvfree(table_v6); -+#endif -+ kmem_cache_destroy(entry_cache); -+out: -+ mutex_unlock(&init_lock); -+} -+ -+#include "selftest/ratelimiter.c" ---- /dev/null -+++ b/drivers/net/wireguard/ratelimiter.h -@@ -0,0 +1,19 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_RATELIMITER_H -+#define _WG_RATELIMITER_H -+ -+#include -+ -+int wg_ratelimiter_init(void); -+void wg_ratelimiter_uninit(void); -+bool wg_ratelimiter_allow(struct sk_buff *skb, struct net *net); -+ -+#ifdef DEBUG -+bool wg_ratelimiter_selftest(void); -+#endif -+ -+#endif /* _WG_RATELIMITER_H */ ---- /dev/null -+++ b/drivers/net/wireguard/receive.c -@@ -0,0 +1,595 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "queueing.h" -+#include "device.h" -+#include "peer.h" -+#include "timers.h" -+#include "messages.h" -+#include "cookie.h" -+#include "socket.h" -+ -+#include -+#include -+#include -+#include -+ -+/* Must be called with bh disabled. */ -+static void update_rx_stats(struct wg_peer *peer, size_t len) -+{ -+ struct pcpu_sw_netstats *tstats = -+ get_cpu_ptr(peer->device->dev->tstats); -+ -+ u64_stats_update_begin(&tstats->syncp); -+ ++tstats->rx_packets; -+ tstats->rx_bytes += len; -+ peer->rx_bytes += len; -+ u64_stats_update_end(&tstats->syncp); -+ put_cpu_ptr(tstats); -+} -+ -+#define SKB_TYPE_LE32(skb) (((struct message_header *)(skb)->data)->type) -+ -+static size_t validate_header_len(struct sk_buff *skb) -+{ -+ if (unlikely(skb->len < sizeof(struct message_header))) -+ return 0; -+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_DATA) && -+ skb->len >= MESSAGE_MINIMUM_LENGTH) -+ return sizeof(struct message_data); -+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION) && -+ skb->len == sizeof(struct message_handshake_initiation)) -+ return sizeof(struct message_handshake_initiation); -+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE) && -+ skb->len == sizeof(struct message_handshake_response)) -+ return sizeof(struct message_handshake_response); -+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE) && -+ skb->len == sizeof(struct message_handshake_cookie)) -+ return sizeof(struct message_handshake_cookie); -+ return 0; -+} -+ -+static int prepare_skb_header(struct sk_buff *skb, struct wg_device *wg) -+{ -+ size_t data_offset, data_len, header_len; -+ struct udphdr *udp; -+ -+ if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol || -+ skb_transport_header(skb) < skb->head || -+ (skb_transport_header(skb) + sizeof(struct udphdr)) > -+ skb_tail_pointer(skb))) -+ return -EINVAL; /* Bogus IP header */ -+ udp = udp_hdr(skb); -+ data_offset = (u8 *)udp - skb->data; -+ if (unlikely(data_offset > U16_MAX || -+ data_offset + sizeof(struct udphdr) > skb->len)) -+ /* Packet has offset at impossible location or isn't big enough -+ * to have UDP fields. -+ */ -+ return -EINVAL; -+ data_len = ntohs(udp->len); -+ if (unlikely(data_len < sizeof(struct udphdr) || -+ data_len > skb->len - data_offset)) -+ /* UDP packet is reporting too small of a size or lying about -+ * its size. -+ */ -+ return -EINVAL; -+ data_len -= sizeof(struct udphdr); -+ data_offset = (u8 *)udp + sizeof(struct udphdr) - skb->data; -+ if (unlikely(!pskb_may_pull(skb, -+ data_offset + sizeof(struct message_header)) || -+ pskb_trim(skb, data_len + data_offset) < 0)) -+ return -EINVAL; -+ skb_pull(skb, data_offset); -+ if (unlikely(skb->len != data_len)) -+ /* Final len does not agree with calculated len */ -+ return -EINVAL; -+ header_len = validate_header_len(skb); -+ if (unlikely(!header_len)) -+ return -EINVAL; -+ __skb_push(skb, data_offset); -+ if (unlikely(!pskb_may_pull(skb, data_offset + header_len))) -+ return -EINVAL; -+ __skb_pull(skb, data_offset); -+ return 0; -+} -+ -+static void wg_receive_handshake_packet(struct wg_device *wg, -+ struct sk_buff *skb) -+{ -+ enum cookie_mac_state mac_state; -+ struct wg_peer *peer = NULL; -+ /* This is global, so that our load calculation applies to the whole -+ * system. We don't care about races with it at all. -+ */ -+ static u64 last_under_load; -+ bool packet_needs_cookie; -+ bool under_load; -+ -+ if (SKB_TYPE_LE32(skb) == cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE)) { -+ net_dbg_skb_ratelimited("%s: Receiving cookie response from %pISpfsc\n", -+ wg->dev->name, skb); -+ wg_cookie_message_consume( -+ (struct message_handshake_cookie *)skb->data, wg); -+ return; -+ } -+ -+ under_load = skb_queue_len(&wg->incoming_handshakes) >= -+ MAX_QUEUED_INCOMING_HANDSHAKES / 8; -+ if (under_load) -+ last_under_load = ktime_get_coarse_boottime_ns(); -+ else if (last_under_load) -+ under_load = !wg_birthdate_has_expired(last_under_load, 1); -+ mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, -+ under_load); -+ if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) || -+ (!under_load && mac_state == VALID_MAC_BUT_NO_COOKIE)) { -+ packet_needs_cookie = false; -+ } else if (under_load && mac_state == VALID_MAC_BUT_NO_COOKIE) { -+ packet_needs_cookie = true; -+ } else { -+ net_dbg_skb_ratelimited("%s: Invalid MAC of handshake, dropping packet from %pISpfsc\n", -+ wg->dev->name, skb); -+ return; -+ } -+ -+ switch (SKB_TYPE_LE32(skb)) { -+ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): { -+ struct message_handshake_initiation *message = -+ (struct message_handshake_initiation *)skb->data; -+ -+ if (packet_needs_cookie) { -+ wg_packet_send_handshake_cookie(wg, skb, -+ message->sender_index); -+ return; -+ } -+ peer = wg_noise_handshake_consume_initiation(message, wg); -+ if (unlikely(!peer)) { -+ net_dbg_skb_ratelimited("%s: Invalid handshake initiation from %pISpfsc\n", -+ wg->dev->name, skb); -+ return; -+ } -+ wg_socket_set_peer_endpoint_from_skb(peer, skb); -+ net_dbg_ratelimited("%s: Receiving handshake initiation from peer %llu (%pISpfsc)\n", -+ wg->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ wg_packet_send_handshake_response(peer); -+ break; -+ } -+ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): { -+ struct message_handshake_response *message = -+ (struct message_handshake_response *)skb->data; -+ -+ if (packet_needs_cookie) { -+ wg_packet_send_handshake_cookie(wg, skb, -+ message->sender_index); -+ return; -+ } -+ peer = wg_noise_handshake_consume_response(message, wg); -+ if (unlikely(!peer)) { -+ net_dbg_skb_ratelimited("%s: Invalid handshake response from %pISpfsc\n", -+ wg->dev->name, skb); -+ return; -+ } -+ wg_socket_set_peer_endpoint_from_skb(peer, skb); -+ net_dbg_ratelimited("%s: Receiving handshake response from peer %llu (%pISpfsc)\n", -+ wg->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ if (wg_noise_handshake_begin_session(&peer->handshake, -+ &peer->keypairs)) { -+ wg_timers_session_derived(peer); -+ wg_timers_handshake_complete(peer); -+ /* Calling this function will either send any existing -+ * packets in the queue and not send a keepalive, which -+ * is the best case, Or, if there's nothing in the -+ * queue, it will send a keepalive, in order to give -+ * immediate confirmation of the session. -+ */ -+ wg_packet_send_keepalive(peer); -+ } -+ break; -+ } -+ } -+ -+ if (unlikely(!peer)) { -+ WARN(1, "Somehow a wrong type of packet wound up in the handshake queue!\n"); -+ return; -+ } -+ -+ local_bh_disable(); -+ update_rx_stats(peer, skb->len); -+ local_bh_enable(); -+ -+ wg_timers_any_authenticated_packet_received(peer); -+ wg_timers_any_authenticated_packet_traversal(peer); -+ wg_peer_put(peer); -+} -+ -+void wg_packet_handshake_receive_worker(struct work_struct *work) -+{ -+ struct wg_device *wg = container_of(work, struct multicore_worker, -+ work)->ptr; -+ struct sk_buff *skb; -+ -+ while ((skb = skb_dequeue(&wg->incoming_handshakes)) != NULL) { -+ wg_receive_handshake_packet(wg, skb); -+ dev_kfree_skb(skb); -+ cond_resched(); -+ } -+} -+ -+static void keep_key_fresh(struct wg_peer *peer) -+{ -+ struct noise_keypair *keypair; -+ bool send = false; -+ -+ if (peer->sent_lastminute_handshake) -+ return; -+ -+ rcu_read_lock_bh(); -+ keypair = rcu_dereference_bh(peer->keypairs.current_keypair); -+ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && -+ keypair->i_am_the_initiator && -+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, -+ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT))) -+ send = true; -+ rcu_read_unlock_bh(); -+ -+ if (send) { -+ peer->sent_lastminute_handshake = true; -+ wg_packet_send_queued_handshake_initiation(peer, false); -+ } -+} -+ -+static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) -+{ -+ struct scatterlist sg[MAX_SKB_FRAGS + 8]; -+ struct sk_buff *trailer; -+ unsigned int offset; -+ int num_frags; -+ -+ if (unlikely(!key)) -+ return false; -+ -+ if (unlikely(!READ_ONCE(key->is_valid) || -+ wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) || -+ key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) { -+ WRITE_ONCE(key->is_valid, false); -+ return false; -+ } -+ -+ PACKET_CB(skb)->nonce = -+ le64_to_cpu(((struct message_data *)skb->data)->counter); -+ -+ /* We ensure that the network header is part of the packet before we -+ * call skb_cow_data, so that there's no chance that data is removed -+ * from the skb, so that later we can extract the original endpoint. -+ */ -+ offset = skb->data - skb_network_header(skb); -+ skb_push(skb, offset); -+ num_frags = skb_cow_data(skb, 0, &trailer); -+ offset += sizeof(struct message_data); -+ skb_pull(skb, offset); -+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) -+ return false; -+ -+ sg_init_table(sg, num_frags); -+ if (skb_to_sgvec(skb, sg, 0, skb->len) <= 0) -+ return false; -+ -+ if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, -+ PACKET_CB(skb)->nonce, -+ key->key)) -+ return false; -+ -+ /* Another ugly situation of pushing and pulling the header so as to -+ * keep endpoint information intact. -+ */ -+ skb_push(skb, offset); -+ if (pskb_trim(skb, skb->len - noise_encrypted_len(0))) -+ return false; -+ skb_pull(skb, offset); -+ -+ return true; -+} -+ -+/* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ -+static bool counter_validate(union noise_counter *counter, u64 their_counter) -+{ -+ unsigned long index, index_current, top, i; -+ bool ret = false; -+ -+ spin_lock_bh(&counter->receive.lock); -+ -+ if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 || -+ their_counter >= REJECT_AFTER_MESSAGES)) -+ goto out; -+ -+ ++their_counter; -+ -+ if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < -+ counter->receive.counter)) -+ goto out; -+ -+ index = their_counter >> ilog2(BITS_PER_LONG); -+ -+ if (likely(their_counter > counter->receive.counter)) { -+ index_current = counter->receive.counter >> ilog2(BITS_PER_LONG); -+ top = min_t(unsigned long, index - index_current, -+ COUNTER_BITS_TOTAL / BITS_PER_LONG); -+ for (i = 1; i <= top; ++i) -+ counter->receive.backtrack[(i + index_current) & -+ ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; -+ counter->receive.counter = their_counter; -+ } -+ -+ index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; -+ ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), -+ &counter->receive.backtrack[index]); -+ -+out: -+ spin_unlock_bh(&counter->receive.lock); -+ return ret; -+} -+ -+#include "selftest/counter.c" -+ -+static void wg_packet_consume_data_done(struct wg_peer *peer, -+ struct sk_buff *skb, -+ struct endpoint *endpoint) -+{ -+ struct net_device *dev = peer->device->dev; -+ unsigned int len, len_before_trim; -+ struct wg_peer *routed_peer; -+ -+ wg_socket_set_peer_endpoint(peer, endpoint); -+ -+ if (unlikely(wg_noise_received_with_keypair(&peer->keypairs, -+ PACKET_CB(skb)->keypair))) { -+ wg_timers_handshake_complete(peer); -+ wg_packet_send_staged_packets(peer); -+ } -+ -+ keep_key_fresh(peer); -+ -+ wg_timers_any_authenticated_packet_received(peer); -+ wg_timers_any_authenticated_packet_traversal(peer); -+ -+ /* A packet with length 0 is a keepalive packet */ -+ if (unlikely(!skb->len)) { -+ update_rx_stats(peer, message_data_len(0)); -+ net_dbg_ratelimited("%s: Receiving keepalive packet from peer %llu (%pISpfsc)\n", -+ dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ goto packet_processed; -+ } -+ -+ wg_timers_data_received(peer); -+ -+ if (unlikely(skb_network_header(skb) < skb->head)) -+ goto dishonest_packet_size; -+ if (unlikely(!(pskb_network_may_pull(skb, sizeof(struct iphdr)) && -+ (ip_hdr(skb)->version == 4 || -+ (ip_hdr(skb)->version == 6 && -+ pskb_network_may_pull(skb, sizeof(struct ipv6hdr))))))) -+ goto dishonest_packet_type; -+ -+ skb->dev = dev; -+ /* We've already verified the Poly1305 auth tag, which means this packet -+ * was not modified in transit. We can therefore tell the networking -+ * stack that all checksums of every layer of encapsulation have already -+ * been checked "by the hardware" and therefore is unneccessary to check -+ * again in software. -+ */ -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+ skb->csum_level = ~0; /* All levels */ -+ skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb); -+ if (skb->protocol == htons(ETH_P_IP)) { -+ len = ntohs(ip_hdr(skb)->tot_len); -+ if (unlikely(len < sizeof(struct iphdr))) -+ goto dishonest_packet_size; -+ if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) -+ IP_ECN_set_ce(ip_hdr(skb)); -+ } else if (skb->protocol == htons(ETH_P_IPV6)) { -+ len = ntohs(ipv6_hdr(skb)->payload_len) + -+ sizeof(struct ipv6hdr); -+ if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) -+ IP6_ECN_set_ce(skb, ipv6_hdr(skb)); -+ } else { -+ goto dishonest_packet_type; -+ } -+ -+ if (unlikely(len > skb->len)) -+ goto dishonest_packet_size; -+ len_before_trim = skb->len; -+ if (unlikely(pskb_trim(skb, len))) -+ goto packet_processed; -+ -+ routed_peer = wg_allowedips_lookup_src(&peer->device->peer_allowedips, -+ skb); -+ wg_peer_put(routed_peer); /* We don't need the extra reference. */ -+ -+ if (unlikely(routed_peer != peer)) -+ goto dishonest_packet_peer; -+ -+ if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) { -+ ++dev->stats.rx_dropped; -+ net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n", -+ dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ } else { -+ update_rx_stats(peer, message_data_len(len_before_trim)); -+ } -+ return; -+ -+dishonest_packet_peer: -+ net_dbg_skb_ratelimited("%s: Packet has unallowed src IP (%pISc) from peer %llu (%pISpfsc)\n", -+ dev->name, skb, peer->internal_id, -+ &peer->endpoint.addr); -+ ++dev->stats.rx_errors; -+ ++dev->stats.rx_frame_errors; -+ goto packet_processed; -+dishonest_packet_type: -+ net_dbg_ratelimited("%s: Packet is neither ipv4 nor ipv6 from peer %llu (%pISpfsc)\n", -+ dev->name, peer->internal_id, &peer->endpoint.addr); -+ ++dev->stats.rx_errors; -+ ++dev->stats.rx_frame_errors; -+ goto packet_processed; -+dishonest_packet_size: -+ net_dbg_ratelimited("%s: Packet has incorrect size from peer %llu (%pISpfsc)\n", -+ dev->name, peer->internal_id, &peer->endpoint.addr); -+ ++dev->stats.rx_errors; -+ ++dev->stats.rx_length_errors; -+ goto packet_processed; -+packet_processed: -+ dev_kfree_skb(skb); -+} -+ -+int wg_packet_rx_poll(struct napi_struct *napi, int budget) -+{ -+ struct wg_peer *peer = container_of(napi, struct wg_peer, napi); -+ struct crypt_queue *queue = &peer->rx_queue; -+ struct noise_keypair *keypair; -+ struct endpoint endpoint; -+ enum packet_state state; -+ struct sk_buff *skb; -+ int work_done = 0; -+ bool free; -+ -+ if (unlikely(budget <= 0)) -+ return 0; -+ -+ while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && -+ (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != -+ PACKET_STATE_UNCRYPTED) { -+ __ptr_ring_discard_one(&queue->ring); -+ peer = PACKET_PEER(skb); -+ keypair = PACKET_CB(skb)->keypair; -+ free = true; -+ -+ if (unlikely(state != PACKET_STATE_CRYPTED)) -+ goto next; -+ -+ if (unlikely(!counter_validate(&keypair->receiving.counter, -+ PACKET_CB(skb)->nonce))) { -+ net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", -+ peer->device->dev->name, -+ PACKET_CB(skb)->nonce, -+ keypair->receiving.counter.receive.counter); -+ goto next; -+ } -+ -+ if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) -+ goto next; -+ -+ wg_reset_packet(skb); -+ wg_packet_consume_data_done(peer, skb, &endpoint); -+ free = false; -+ -+next: -+ wg_noise_keypair_put(keypair, false); -+ wg_peer_put(peer); -+ if (unlikely(free)) -+ dev_kfree_skb(skb); -+ -+ if (++work_done >= budget) -+ break; -+ } -+ -+ if (work_done < budget) -+ napi_complete_done(napi, work_done); -+ -+ return work_done; -+} -+ -+void wg_packet_decrypt_worker(struct work_struct *work) -+{ -+ struct crypt_queue *queue = container_of(work, struct multicore_worker, -+ work)->ptr; -+ struct sk_buff *skb; -+ -+ while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { -+ enum packet_state state = likely(decrypt_packet(skb, -+ &PACKET_CB(skb)->keypair->receiving)) ? -+ PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; -+ wg_queue_enqueue_per_peer_napi(skb, state); -+ } -+} -+ -+static void wg_packet_consume_data(struct wg_device *wg, struct sk_buff *skb) -+{ -+ __le32 idx = ((struct message_data *)skb->data)->key_idx; -+ struct wg_peer *peer = NULL; -+ int ret; -+ -+ rcu_read_lock_bh(); -+ PACKET_CB(skb)->keypair = -+ (struct noise_keypair *)wg_index_hashtable_lookup( -+ wg->index_hashtable, INDEX_HASHTABLE_KEYPAIR, idx, -+ &peer); -+ if (unlikely(!wg_noise_keypair_get(PACKET_CB(skb)->keypair))) -+ goto err_keypair; -+ -+ if (unlikely(READ_ONCE(peer->is_dead))) -+ goto err; -+ -+ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, -+ &peer->rx_queue, skb, -+ wg->packet_crypt_wq, -+ &wg->decrypt_queue.last_cpu); -+ if (unlikely(ret == -EPIPE)) -+ wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD); -+ if (likely(!ret || ret == -EPIPE)) { -+ rcu_read_unlock_bh(); -+ return; -+ } -+err: -+ wg_noise_keypair_put(PACKET_CB(skb)->keypair, false); -+err_keypair: -+ rcu_read_unlock_bh(); -+ wg_peer_put(peer); -+ dev_kfree_skb(skb); -+} -+ -+void wg_packet_receive(struct wg_device *wg, struct sk_buff *skb) -+{ -+ if (unlikely(prepare_skb_header(skb, wg) < 0)) -+ goto err; -+ switch (SKB_TYPE_LE32(skb)) { -+ case cpu_to_le32(MESSAGE_HANDSHAKE_INITIATION): -+ case cpu_to_le32(MESSAGE_HANDSHAKE_RESPONSE): -+ case cpu_to_le32(MESSAGE_HANDSHAKE_COOKIE): { -+ int cpu; -+ -+ if (skb_queue_len(&wg->incoming_handshakes) > -+ MAX_QUEUED_INCOMING_HANDSHAKES || -+ unlikely(!rng_is_initialized())) { -+ net_dbg_skb_ratelimited("%s: Dropping handshake packet from %pISpfsc\n", -+ wg->dev->name, skb); -+ goto err; -+ } -+ skb_queue_tail(&wg->incoming_handshakes, skb); -+ /* Queues up a call to packet_process_queued_handshake_ -+ * packets(skb): -+ */ -+ cpu = wg_cpumask_next_online(&wg->incoming_handshake_cpu); -+ queue_work_on(cpu, wg->handshake_receive_wq, -+ &per_cpu_ptr(wg->incoming_handshakes_worker, cpu)->work); -+ break; -+ } -+ case cpu_to_le32(MESSAGE_DATA): -+ PACKET_CB(skb)->ds = ip_tunnel_get_dsfield(ip_hdr(skb), skb); -+ wg_packet_consume_data(wg, skb); -+ break; -+ default: -+ net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n", -+ wg->dev->name, skb); -+ goto err; -+ } -+ return; -+ -+err: -+ dev_kfree_skb(skb); -+} ---- /dev/null -+++ b/drivers/net/wireguard/selftest/allowedips.c -@@ -0,0 +1,683 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * This contains some basic static unit tests for the allowedips data structure. -+ * It also has two additional modes that are disabled and meant to be used by -+ * folks directly playing with this file. If you define the macro -+ * DEBUG_PRINT_TRIE_GRAPHVIZ to be 1, then every time there's a full tree in -+ * memory, it will be printed out as KERN_DEBUG in a format that can be passed -+ * to graphviz (the dot command) to visualize it. If you define the macro -+ * DEBUG_RANDOM_TRIE to be 1, then there will be an extremely costly set of -+ * randomized tests done against a trivial implementation, which may take -+ * upwards of a half-hour to complete. There's no set of users who should be -+ * enabling these, and the only developers that should go anywhere near these -+ * nobs are the ones who are reading this comment. -+ */ -+ -+#ifdef DEBUG -+ -+#include -+ -+static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits, -+ u8 cidr) -+{ -+ swap_endian(dst, src, bits); -+ memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8); -+ if (cidr) -+ dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8); -+} -+ -+static __init void print_node(struct allowedips_node *node, u8 bits) -+{ -+ char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; -+ char *fmt_declaration = KERN_DEBUG -+ "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; -+ char *style = "dotted"; -+ u8 ip1[16], ip2[16]; -+ u32 color = 0; -+ -+ if (bits == 32) { -+ fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; -+ fmt_declaration = KERN_DEBUG -+ "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; -+ } else if (bits == 128) { -+ fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; -+ fmt_declaration = KERN_DEBUG -+ "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; -+ } -+ if (node->peer) { -+ hsiphash_key_t key = { { 0 } }; -+ -+ memcpy(&key, &node->peer, sizeof(node->peer)); -+ color = hsiphash_1u32(0xdeadbeef, &key) % 200 << 16 | -+ hsiphash_1u32(0xbabecafe, &key) % 200 << 8 | -+ hsiphash_1u32(0xabad1dea, &key) % 200; -+ style = "bold"; -+ } -+ swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr); -+ printk(fmt_declaration, ip1, node->cidr, style, color); -+ if (node->bit[0]) { -+ swap_endian_and_apply_cidr(ip2, -+ rcu_dereference_raw(node->bit[0])->bits, bits, -+ node->cidr); -+ printk(fmt_connection, ip1, node->cidr, ip2, -+ rcu_dereference_raw(node->bit[0])->cidr); -+ print_node(rcu_dereference_raw(node->bit[0]), bits); -+ } -+ if (node->bit[1]) { -+ swap_endian_and_apply_cidr(ip2, -+ rcu_dereference_raw(node->bit[1])->bits, -+ bits, node->cidr); -+ printk(fmt_connection, ip1, node->cidr, ip2, -+ rcu_dereference_raw(node->bit[1])->cidr); -+ print_node(rcu_dereference_raw(node->bit[1]), bits); -+ } -+} -+ -+static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) -+{ -+ printk(KERN_DEBUG "digraph trie {\n"); -+ print_node(rcu_dereference_raw(top), bits); -+ printk(KERN_DEBUG "}\n"); -+} -+ -+enum { -+ NUM_PEERS = 2000, -+ NUM_RAND_ROUTES = 400, -+ NUM_MUTATED_ROUTES = 100, -+ NUM_QUERIES = NUM_RAND_ROUTES * NUM_MUTATED_ROUTES * 30 -+}; -+ -+struct horrible_allowedips { -+ struct hlist_head head; -+}; -+ -+struct horrible_allowedips_node { -+ struct hlist_node table; -+ union nf_inet_addr ip; -+ union nf_inet_addr mask; -+ u8 ip_version; -+ void *value; -+}; -+ -+static __init void horrible_allowedips_init(struct horrible_allowedips *table) -+{ -+ INIT_HLIST_HEAD(&table->head); -+} -+ -+static __init void horrible_allowedips_free(struct horrible_allowedips *table) -+{ -+ struct horrible_allowedips_node *node; -+ struct hlist_node *h; -+ -+ hlist_for_each_entry_safe(node, h, &table->head, table) { -+ hlist_del(&node->table); -+ kfree(node); -+ } -+} -+ -+static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr) -+{ -+ union nf_inet_addr mask; -+ -+ memset(&mask, 0x00, 128 / 8); -+ memset(&mask, 0xff, cidr / 8); -+ if (cidr % 32) -+ mask.all[cidr / 32] = (__force u32)htonl( -+ (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); -+ return mask; -+} -+ -+static __init inline u8 horrible_mask_to_cidr(union nf_inet_addr subnet) -+{ -+ return hweight32(subnet.all[0]) + hweight32(subnet.all[1]) + -+ hweight32(subnet.all[2]) + hweight32(subnet.all[3]); -+} -+ -+static __init inline void -+horrible_mask_self(struct horrible_allowedips_node *node) -+{ -+ if (node->ip_version == 4) { -+ node->ip.ip &= node->mask.ip; -+ } else if (node->ip_version == 6) { -+ node->ip.ip6[0] &= node->mask.ip6[0]; -+ node->ip.ip6[1] &= node->mask.ip6[1]; -+ node->ip.ip6[2] &= node->mask.ip6[2]; -+ node->ip.ip6[3] &= node->mask.ip6[3]; -+ } -+} -+ -+static __init inline bool -+horrible_match_v4(const struct horrible_allowedips_node *node, -+ struct in_addr *ip) -+{ -+ return (ip->s_addr & node->mask.ip) == node->ip.ip; -+} -+ -+static __init inline bool -+horrible_match_v6(const struct horrible_allowedips_node *node, -+ struct in6_addr *ip) -+{ -+ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == -+ node->ip.ip6[0] && -+ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == -+ node->ip.ip6[1] && -+ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == -+ node->ip.ip6[2] && -+ (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; -+} -+ -+static __init void -+horrible_insert_ordered(struct horrible_allowedips *table, -+ struct horrible_allowedips_node *node) -+{ -+ struct horrible_allowedips_node *other = NULL, *where = NULL; -+ u8 my_cidr = horrible_mask_to_cidr(node->mask); -+ -+ hlist_for_each_entry(other, &table->head, table) { -+ if (!memcmp(&other->mask, &node->mask, -+ sizeof(union nf_inet_addr)) && -+ !memcmp(&other->ip, &node->ip, -+ sizeof(union nf_inet_addr)) && -+ other->ip_version == node->ip_version) { -+ other->value = node->value; -+ kfree(node); -+ return; -+ } -+ where = other; -+ if (horrible_mask_to_cidr(other->mask) <= my_cidr) -+ break; -+ } -+ if (!other && !where) -+ hlist_add_head(&node->table, &table->head); -+ else if (!other) -+ hlist_add_behind(&node->table, &where->table); -+ else -+ hlist_add_before(&node->table, &where->table); -+} -+ -+static __init int -+horrible_allowedips_insert_v4(struct horrible_allowedips *table, -+ struct in_addr *ip, u8 cidr, void *value) -+{ -+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), -+ GFP_KERNEL); -+ -+ if (unlikely(!node)) -+ return -ENOMEM; -+ node->ip.in = *ip; -+ node->mask = horrible_cidr_to_mask(cidr); -+ node->ip_version = 4; -+ node->value = value; -+ horrible_mask_self(node); -+ horrible_insert_ordered(table, node); -+ return 0; -+} -+ -+static __init int -+horrible_allowedips_insert_v6(struct horrible_allowedips *table, -+ struct in6_addr *ip, u8 cidr, void *value) -+{ -+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), -+ GFP_KERNEL); -+ -+ if (unlikely(!node)) -+ return -ENOMEM; -+ node->ip.in6 = *ip; -+ node->mask = horrible_cidr_to_mask(cidr); -+ node->ip_version = 6; -+ node->value = value; -+ horrible_mask_self(node); -+ horrible_insert_ordered(table, node); -+ return 0; -+} -+ -+static __init void * -+horrible_allowedips_lookup_v4(struct horrible_allowedips *table, -+ struct in_addr *ip) -+{ -+ struct horrible_allowedips_node *node; -+ void *ret = NULL; -+ -+ hlist_for_each_entry(node, &table->head, table) { -+ if (node->ip_version != 4) -+ continue; -+ if (horrible_match_v4(node, ip)) { -+ ret = node->value; -+ break; -+ } -+ } -+ return ret; -+} -+ -+static __init void * -+horrible_allowedips_lookup_v6(struct horrible_allowedips *table, -+ struct in6_addr *ip) -+{ -+ struct horrible_allowedips_node *node; -+ void *ret = NULL; -+ -+ hlist_for_each_entry(node, &table->head, table) { -+ if (node->ip_version != 6) -+ continue; -+ if (horrible_match_v6(node, ip)) { -+ ret = node->value; -+ break; -+ } -+ } -+ return ret; -+} -+ -+static __init bool randomized_test(void) -+{ -+ unsigned int i, j, k, mutate_amount, cidr; -+ u8 ip[16], mutate_mask[16], mutated[16]; -+ struct wg_peer **peers, *peer; -+ struct horrible_allowedips h; -+ DEFINE_MUTEX(mutex); -+ struct allowedips t; -+ bool ret = false; -+ -+ mutex_init(&mutex); -+ -+ wg_allowedips_init(&t); -+ horrible_allowedips_init(&h); -+ -+ peers = kcalloc(NUM_PEERS, sizeof(*peers), GFP_KERNEL); -+ if (unlikely(!peers)) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free; -+ } -+ for (i = 0; i < NUM_PEERS; ++i) { -+ peers[i] = kzalloc(sizeof(*peers[i]), GFP_KERNEL); -+ if (unlikely(!peers[i])) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free; -+ } -+ kref_init(&peers[i]->refcount); -+ } -+ -+ mutex_lock(&mutex); -+ -+ for (i = 0; i < NUM_RAND_ROUTES; ++i) { -+ prandom_bytes(ip, 4); -+ cidr = prandom_u32_max(32) + 1; -+ peer = peers[prandom_u32_max(NUM_PEERS)]; -+ if (wg_allowedips_insert_v4(&t, (struct in_addr *)ip, cidr, -+ peer, &mutex) < 0) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ if (horrible_allowedips_insert_v4(&h, (struct in_addr *)ip, -+ cidr, peer) < 0) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { -+ memcpy(mutated, ip, 4); -+ prandom_bytes(mutate_mask, 4); -+ mutate_amount = prandom_u32_max(32); -+ for (k = 0; k < mutate_amount / 8; ++k) -+ mutate_mask[k] = 0xff; -+ mutate_mask[k] = 0xff -+ << ((8 - (mutate_amount % 8)) % 8); -+ for (; k < 4; ++k) -+ mutate_mask[k] = 0; -+ for (k = 0; k < 4; ++k) -+ mutated[k] = (mutated[k] & mutate_mask[k]) | -+ (~mutate_mask[k] & -+ prandom_u32_max(256)); -+ cidr = prandom_u32_max(32) + 1; -+ peer = peers[prandom_u32_max(NUM_PEERS)]; -+ if (wg_allowedips_insert_v4(&t, -+ (struct in_addr *)mutated, -+ cidr, peer, &mutex) < 0) { -+ pr_err("allowedips random malloc: FAIL\n"); -+ goto free_locked; -+ } -+ if (horrible_allowedips_insert_v4(&h, -+ (struct in_addr *)mutated, cidr, peer)) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ } -+ } -+ -+ for (i = 0; i < NUM_RAND_ROUTES; ++i) { -+ prandom_bytes(ip, 16); -+ cidr = prandom_u32_max(128) + 1; -+ peer = peers[prandom_u32_max(NUM_PEERS)]; -+ if (wg_allowedips_insert_v6(&t, (struct in6_addr *)ip, cidr, -+ peer, &mutex) < 0) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ if (horrible_allowedips_insert_v6(&h, (struct in6_addr *)ip, -+ cidr, peer) < 0) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ for (j = 0; j < NUM_MUTATED_ROUTES; ++j) { -+ memcpy(mutated, ip, 16); -+ prandom_bytes(mutate_mask, 16); -+ mutate_amount = prandom_u32_max(128); -+ for (k = 0; k < mutate_amount / 8; ++k) -+ mutate_mask[k] = 0xff; -+ mutate_mask[k] = 0xff -+ << ((8 - (mutate_amount % 8)) % 8); -+ for (; k < 4; ++k) -+ mutate_mask[k] = 0; -+ for (k = 0; k < 4; ++k) -+ mutated[k] = (mutated[k] & mutate_mask[k]) | -+ (~mutate_mask[k] & -+ prandom_u32_max(256)); -+ cidr = prandom_u32_max(128) + 1; -+ peer = peers[prandom_u32_max(NUM_PEERS)]; -+ if (wg_allowedips_insert_v6(&t, -+ (struct in6_addr *)mutated, -+ cidr, peer, &mutex) < 0) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ if (horrible_allowedips_insert_v6( -+ &h, (struct in6_addr *)mutated, cidr, -+ peer)) { -+ pr_err("allowedips random self-test malloc: FAIL\n"); -+ goto free_locked; -+ } -+ } -+ } -+ -+ mutex_unlock(&mutex); -+ -+ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { -+ print_tree(t.root4, 32); -+ print_tree(t.root6, 128); -+ } -+ -+ for (i = 0; i < NUM_QUERIES; ++i) { -+ prandom_bytes(ip, 4); -+ if (lookup(t.root4, 32, ip) != -+ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { -+ pr_err("allowedips random self-test: FAIL\n"); -+ goto free; -+ } -+ } -+ -+ for (i = 0; i < NUM_QUERIES; ++i) { -+ prandom_bytes(ip, 16); -+ if (lookup(t.root6, 128, ip) != -+ horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { -+ pr_err("allowedips random self-test: FAIL\n"); -+ goto free; -+ } -+ } -+ ret = true; -+ -+free: -+ mutex_lock(&mutex); -+free_locked: -+ wg_allowedips_free(&t, &mutex); -+ mutex_unlock(&mutex); -+ horrible_allowedips_free(&h); -+ if (peers) { -+ for (i = 0; i < NUM_PEERS; ++i) -+ kfree(peers[i]); -+ } -+ kfree(peers); -+ return ret; -+} -+ -+static __init inline struct in_addr *ip4(u8 a, u8 b, u8 c, u8 d) -+{ -+ static struct in_addr ip; -+ u8 *split = (u8 *)&ip; -+ -+ split[0] = a; -+ split[1] = b; -+ split[2] = c; -+ split[3] = d; -+ return &ip; -+} -+ -+static __init inline struct in6_addr *ip6(u32 a, u32 b, u32 c, u32 d) -+{ -+ static struct in6_addr ip; -+ __be32 *split = (__be32 *)&ip; -+ -+ split[0] = cpu_to_be32(a); -+ split[1] = cpu_to_be32(b); -+ split[2] = cpu_to_be32(c); -+ split[3] = cpu_to_be32(d); -+ return &ip; -+} -+ -+static __init struct wg_peer *init_peer(void) -+{ -+ struct wg_peer *peer = kzalloc(sizeof(*peer), GFP_KERNEL); -+ -+ if (!peer) -+ return NULL; -+ kref_init(&peer->refcount); -+ INIT_LIST_HEAD(&peer->allowedips_list); -+ return peer; -+} -+ -+#define insert(version, mem, ipa, ipb, ipc, ipd, cidr) \ -+ wg_allowedips_insert_v##version(&t, ip##version(ipa, ipb, ipc, ipd), \ -+ cidr, mem, &mutex) -+ -+#define maybe_fail() do { \ -+ ++i; \ -+ if (!_s) { \ -+ pr_info("allowedips self-test %zu: FAIL\n", i); \ -+ success = false; \ -+ } \ -+ } while (0) -+ -+#define test(version, mem, ipa, ipb, ipc, ipd) do { \ -+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ -+ ip##version(ipa, ipb, ipc, ipd)) == (mem); \ -+ maybe_fail(); \ -+ } while (0) -+ -+#define test_negative(version, mem, ipa, ipb, ipc, ipd) do { \ -+ bool _s = lookup(t.root##version, (version) == 4 ? 32 : 128, \ -+ ip##version(ipa, ipb, ipc, ipd)) != (mem); \ -+ maybe_fail(); \ -+ } while (0) -+ -+#define test_boolean(cond) do { \ -+ bool _s = (cond); \ -+ maybe_fail(); \ -+ } while (0) -+ -+bool __init wg_allowedips_selftest(void) -+{ -+ bool found_a = false, found_b = false, found_c = false, found_d = false, -+ found_e = false, found_other = false; -+ struct wg_peer *a = init_peer(), *b = init_peer(), *c = init_peer(), -+ *d = init_peer(), *e = init_peer(), *f = init_peer(), -+ *g = init_peer(), *h = init_peer(); -+ struct allowedips_node *iter_node; -+ bool success = false; -+ struct allowedips t; -+ DEFINE_MUTEX(mutex); -+ struct in6_addr ip; -+ size_t i = 0, count = 0; -+ __be64 part; -+ -+ mutex_init(&mutex); -+ mutex_lock(&mutex); -+ wg_allowedips_init(&t); -+ -+ if (!a || !b || !c || !d || !e || !f || !g || !h) { -+ pr_err("allowedips self-test malloc: FAIL\n"); -+ goto free; -+ } -+ -+ insert(4, a, 192, 168, 4, 0, 24); -+ insert(4, b, 192, 168, 4, 4, 32); -+ insert(4, c, 192, 168, 0, 0, 16); -+ insert(4, d, 192, 95, 5, 64, 27); -+ /* replaces previous entry, and maskself is required */ -+ insert(4, c, 192, 95, 5, 65, 27); -+ insert(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); -+ insert(6, c, 0x26075300, 0x60006b00, 0, 0, 64); -+ insert(4, e, 0, 0, 0, 0, 0); -+ insert(6, e, 0, 0, 0, 0, 0); -+ /* replaces previous entry */ -+ insert(6, f, 0, 0, 0, 0, 0); -+ insert(6, g, 0x24046800, 0, 0, 0, 32); -+ /* maskself is required */ -+ insert(6, h, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 64); -+ insert(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef, 128); -+ insert(6, c, 0x24446800, 0x40e40800, 0xdeaebeef, 0xdefbeef, 128); -+ insert(6, b, 0x24446800, 0xf0e40800, 0xeeaebeef, 0, 98); -+ insert(4, g, 64, 15, 112, 0, 20); -+ /* maskself is required */ -+ insert(4, h, 64, 15, 123, 211, 25); -+ insert(4, a, 10, 0, 0, 0, 25); -+ insert(4, b, 10, 0, 0, 128, 25); -+ insert(4, a, 10, 1, 0, 0, 30); -+ insert(4, b, 10, 1, 0, 4, 30); -+ insert(4, c, 10, 1, 0, 8, 29); -+ insert(4, d, 10, 1, 0, 16, 29); -+ -+ if (IS_ENABLED(DEBUG_PRINT_TRIE_GRAPHVIZ)) { -+ print_tree(t.root4, 32); -+ print_tree(t.root6, 128); -+ } -+ -+ success = true; -+ -+ test(4, a, 192, 168, 4, 20); -+ test(4, a, 192, 168, 4, 0); -+ test(4, b, 192, 168, 4, 4); -+ test(4, c, 192, 168, 200, 182); -+ test(4, c, 192, 95, 5, 68); -+ test(4, e, 192, 95, 5, 96); -+ test(6, d, 0x26075300, 0x60006b00, 0, 0xc05f0543); -+ test(6, c, 0x26075300, 0x60006b00, 0, 0xc02e01ee); -+ test(6, f, 0x26075300, 0x60006b01, 0, 0); -+ test(6, g, 0x24046800, 0x40040806, 0, 0x1006); -+ test(6, g, 0x24046800, 0x40040806, 0x1234, 0x5678); -+ test(6, f, 0x240467ff, 0x40040806, 0x1234, 0x5678); -+ test(6, f, 0x24046801, 0x40040806, 0x1234, 0x5678); -+ test(6, h, 0x24046800, 0x40040800, 0x1234, 0x5678); -+ test(6, h, 0x24046800, 0x40040800, 0, 0); -+ test(6, h, 0x24046800, 0x40040800, 0x10101010, 0x10101010); -+ test(6, a, 0x24046800, 0x40040800, 0xdeadbeef, 0xdeadbeef); -+ test(4, g, 64, 15, 116, 26); -+ test(4, g, 64, 15, 127, 3); -+ test(4, g, 64, 15, 123, 1); -+ test(4, h, 64, 15, 123, 128); -+ test(4, h, 64, 15, 123, 129); -+ test(4, a, 10, 0, 0, 52); -+ test(4, b, 10, 0, 0, 220); -+ test(4, a, 10, 1, 0, 2); -+ test(4, b, 10, 1, 0, 6); -+ test(4, c, 10, 1, 0, 10); -+ test(4, d, 10, 1, 0, 20); -+ -+ insert(4, a, 1, 0, 0, 0, 32); -+ insert(4, a, 64, 0, 0, 0, 32); -+ insert(4, a, 128, 0, 0, 0, 32); -+ insert(4, a, 192, 0, 0, 0, 32); -+ insert(4, a, 255, 0, 0, 0, 32); -+ wg_allowedips_remove_by_peer(&t, a, &mutex); -+ test_negative(4, a, 1, 0, 0, 0); -+ test_negative(4, a, 64, 0, 0, 0); -+ test_negative(4, a, 128, 0, 0, 0); -+ test_negative(4, a, 192, 0, 0, 0); -+ test_negative(4, a, 255, 0, 0, 0); -+ -+ wg_allowedips_free(&t, &mutex); -+ wg_allowedips_init(&t); -+ insert(4, a, 192, 168, 0, 0, 16); -+ insert(4, a, 192, 168, 0, 0, 24); -+ wg_allowedips_remove_by_peer(&t, a, &mutex); -+ test_negative(4, a, 192, 168, 0, 1); -+ -+ /* These will hit the WARN_ON(len >= 128) in free_node if something -+ * goes wrong. -+ */ -+ for (i = 0; i < 128; ++i) { -+ part = cpu_to_be64(~(1LLU << (i % 64))); -+ memset(&ip, 0xff, 16); -+ memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); -+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); -+ } -+ -+ wg_allowedips_free(&t, &mutex); -+ -+ wg_allowedips_init(&t); -+ insert(4, a, 192, 95, 5, 93, 27); -+ insert(6, a, 0x26075300, 0x60006b00, 0, 0xc05f0543, 128); -+ insert(4, a, 10, 1, 0, 20, 29); -+ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 83); -+ insert(6, a, 0x26075300, 0x6d8a6bf8, 0xdab1f1df, 0xc05f1523, 21); -+ list_for_each_entry(iter_node, &a->allowedips_list, peer_list) { -+ u8 cidr, ip[16] __aligned(__alignof(u64)); -+ int family = wg_allowedips_read_node(iter_node, ip, &cidr); -+ -+ count++; -+ -+ if (cidr == 27 && family == AF_INET && -+ !memcmp(ip, ip4(192, 95, 5, 64), sizeof(struct in_addr))) -+ found_a = true; -+ else if (cidr == 128 && family == AF_INET6 && -+ !memcmp(ip, ip6(0x26075300, 0x60006b00, 0, 0xc05f0543), -+ sizeof(struct in6_addr))) -+ found_b = true; -+ else if (cidr == 29 && family == AF_INET && -+ !memcmp(ip, ip4(10, 1, 0, 16), sizeof(struct in_addr))) -+ found_c = true; -+ else if (cidr == 83 && family == AF_INET6 && -+ !memcmp(ip, ip6(0x26075300, 0x6d8a6bf8, 0xdab1e000, 0), -+ sizeof(struct in6_addr))) -+ found_d = true; -+ else if (cidr == 21 && family == AF_INET6 && -+ !memcmp(ip, ip6(0x26075000, 0, 0, 0), -+ sizeof(struct in6_addr))) -+ found_e = true; -+ else -+ found_other = true; -+ } -+ test_boolean(count == 5); -+ test_boolean(found_a); -+ test_boolean(found_b); -+ test_boolean(found_c); -+ test_boolean(found_d); -+ test_boolean(found_e); -+ test_boolean(!found_other); -+ -+ if (IS_ENABLED(DEBUG_RANDOM_TRIE) && success) -+ success = randomized_test(); -+ -+ if (success) -+ pr_info("allowedips self-tests: pass\n"); -+ -+free: -+ wg_allowedips_free(&t, &mutex); -+ kfree(a); -+ kfree(b); -+ kfree(c); -+ kfree(d); -+ kfree(e); -+ kfree(f); -+ kfree(g); -+ kfree(h); -+ mutex_unlock(&mutex); -+ -+ return success; -+} -+ -+#undef test_negative -+#undef test -+#undef remove -+#undef insert -+#undef init_peer -+ -+#endif ---- /dev/null -+++ b/drivers/net/wireguard/selftest/counter.c -@@ -0,0 +1,104 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifdef DEBUG -+bool __init wg_packet_counter_selftest(void) -+{ -+ unsigned int test_num = 0, i; -+ union noise_counter counter; -+ bool success = true; -+ -+#define T_INIT do { \ -+ memset(&counter, 0, sizeof(union noise_counter)); \ -+ spin_lock_init(&counter.receive.lock); \ -+ } while (0) -+#define T_LIM (COUNTER_WINDOW_SIZE + 1) -+#define T(n, v) do { \ -+ ++test_num; \ -+ if (counter_validate(&counter, n) != (v)) { \ -+ pr_err("nonce counter self-test %u: FAIL\n", \ -+ test_num); \ -+ success = false; \ -+ } \ -+ } while (0) -+ -+ T_INIT; -+ /* 1 */ T(0, true); -+ /* 2 */ T(1, true); -+ /* 3 */ T(1, false); -+ /* 4 */ T(9, true); -+ /* 5 */ T(8, true); -+ /* 6 */ T(7, true); -+ /* 7 */ T(7, false); -+ /* 8 */ T(T_LIM, true); -+ /* 9 */ T(T_LIM - 1, true); -+ /* 10 */ T(T_LIM - 1, false); -+ /* 11 */ T(T_LIM - 2, true); -+ /* 12 */ T(2, true); -+ /* 13 */ T(2, false); -+ /* 14 */ T(T_LIM + 16, true); -+ /* 15 */ T(3, false); -+ /* 16 */ T(T_LIM + 16, false); -+ /* 17 */ T(T_LIM * 4, true); -+ /* 18 */ T(T_LIM * 4 - (T_LIM - 1), true); -+ /* 19 */ T(10, false); -+ /* 20 */ T(T_LIM * 4 - T_LIM, false); -+ /* 21 */ T(T_LIM * 4 - (T_LIM + 1), false); -+ /* 22 */ T(T_LIM * 4 - (T_LIM - 2), true); -+ /* 23 */ T(T_LIM * 4 + 1 - T_LIM, false); -+ /* 24 */ T(0, false); -+ /* 25 */ T(REJECT_AFTER_MESSAGES, false); -+ /* 26 */ T(REJECT_AFTER_MESSAGES - 1, true); -+ /* 27 */ T(REJECT_AFTER_MESSAGES, false); -+ /* 28 */ T(REJECT_AFTER_MESSAGES - 1, false); -+ /* 29 */ T(REJECT_AFTER_MESSAGES - 2, true); -+ /* 30 */ T(REJECT_AFTER_MESSAGES + 1, false); -+ /* 31 */ T(REJECT_AFTER_MESSAGES + 2, false); -+ /* 32 */ T(REJECT_AFTER_MESSAGES - 2, false); -+ /* 33 */ T(REJECT_AFTER_MESSAGES - 3, true); -+ /* 34 */ T(0, false); -+ -+ T_INIT; -+ for (i = 1; i <= COUNTER_WINDOW_SIZE; ++i) -+ T(i, true); -+ T(0, true); -+ T(0, false); -+ -+ T_INIT; -+ for (i = 2; i <= COUNTER_WINDOW_SIZE + 1; ++i) -+ T(i, true); -+ T(1, true); -+ T(0, false); -+ -+ T_INIT; -+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 0;) -+ T(i, true); -+ -+ T_INIT; -+ for (i = COUNTER_WINDOW_SIZE + 2; i-- > 1;) -+ T(i, true); -+ T(0, false); -+ -+ T_INIT; -+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;) -+ T(i, true); -+ T(COUNTER_WINDOW_SIZE + 1, true); -+ T(0, false); -+ -+ T_INIT; -+ for (i = COUNTER_WINDOW_SIZE + 1; i-- > 1;) -+ T(i, true); -+ T(0, true); -+ T(COUNTER_WINDOW_SIZE + 1, true); -+ -+#undef T -+#undef T_LIM -+#undef T_INIT -+ -+ if (success) -+ pr_info("nonce counter self-tests: pass\n"); -+ return success; -+} -+#endif ---- /dev/null -+++ b/drivers/net/wireguard/selftest/ratelimiter.c -@@ -0,0 +1,226 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifdef DEBUG -+ -+#include -+ -+static const struct { -+ bool result; -+ unsigned int msec_to_sleep_before; -+} expected_results[] __initconst = { -+ [0 ... PACKETS_BURSTABLE - 1] = { true, 0 }, -+ [PACKETS_BURSTABLE] = { false, 0 }, -+ [PACKETS_BURSTABLE + 1] = { true, MSEC_PER_SEC / PACKETS_PER_SECOND }, -+ [PACKETS_BURSTABLE + 2] = { false, 0 }, -+ [PACKETS_BURSTABLE + 3] = { true, (MSEC_PER_SEC / PACKETS_PER_SECOND) * 2 }, -+ [PACKETS_BURSTABLE + 4] = { true, 0 }, -+ [PACKETS_BURSTABLE + 5] = { false, 0 } -+}; -+ -+static __init unsigned int maximum_jiffies_at_index(int index) -+{ -+ unsigned int total_msecs = 2 * MSEC_PER_SEC / PACKETS_PER_SECOND / 3; -+ int i; -+ -+ for (i = 0; i <= index; ++i) -+ total_msecs += expected_results[i].msec_to_sleep_before; -+ return msecs_to_jiffies(total_msecs); -+} -+ -+static __init int timings_test(struct sk_buff *skb4, struct iphdr *hdr4, -+ struct sk_buff *skb6, struct ipv6hdr *hdr6, -+ int *test) -+{ -+ unsigned long loop_start_time; -+ int i; -+ -+ wg_ratelimiter_gc_entries(NULL); -+ rcu_barrier(); -+ loop_start_time = jiffies; -+ -+ for (i = 0; i < ARRAY_SIZE(expected_results); ++i) { -+ if (expected_results[i].msec_to_sleep_before) -+ msleep(expected_results[i].msec_to_sleep_before); -+ -+ if (time_is_before_jiffies(loop_start_time + -+ maximum_jiffies_at_index(i))) -+ return -ETIMEDOUT; -+ if (wg_ratelimiter_allow(skb4, &init_net) != -+ expected_results[i].result) -+ return -EXFULL; -+ ++(*test); -+ -+ hdr4->saddr = htonl(ntohl(hdr4->saddr) + i + 1); -+ if (time_is_before_jiffies(loop_start_time + -+ maximum_jiffies_at_index(i))) -+ return -ETIMEDOUT; -+ if (!wg_ratelimiter_allow(skb4, &init_net)) -+ return -EXFULL; -+ ++(*test); -+ -+ hdr4->saddr = htonl(ntohl(hdr4->saddr) - i - 1); -+ -+#if IS_ENABLED(CONFIG_IPV6) -+ hdr6->saddr.in6_u.u6_addr32[2] = htonl(i); -+ hdr6->saddr.in6_u.u6_addr32[3] = htonl(i); -+ if (time_is_before_jiffies(loop_start_time + -+ maximum_jiffies_at_index(i))) -+ return -ETIMEDOUT; -+ if (wg_ratelimiter_allow(skb6, &init_net) != -+ expected_results[i].result) -+ return -EXFULL; -+ ++(*test); -+ -+ hdr6->saddr.in6_u.u6_addr32[0] = -+ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) + i + 1); -+ if (time_is_before_jiffies(loop_start_time + -+ maximum_jiffies_at_index(i))) -+ return -ETIMEDOUT; -+ if (!wg_ratelimiter_allow(skb6, &init_net)) -+ return -EXFULL; -+ ++(*test); -+ -+ hdr6->saddr.in6_u.u6_addr32[0] = -+ htonl(ntohl(hdr6->saddr.in6_u.u6_addr32[0]) - i - 1); -+ -+ if (time_is_before_jiffies(loop_start_time + -+ maximum_jiffies_at_index(i))) -+ return -ETIMEDOUT; -+#endif -+ } -+ return 0; -+} -+ -+static __init int capacity_test(struct sk_buff *skb4, struct iphdr *hdr4, -+ int *test) -+{ -+ int i; -+ -+ wg_ratelimiter_gc_entries(NULL); -+ rcu_barrier(); -+ -+ if (atomic_read(&total_entries)) -+ return -EXFULL; -+ ++(*test); -+ -+ for (i = 0; i <= max_entries; ++i) { -+ hdr4->saddr = htonl(i); -+ if (wg_ratelimiter_allow(skb4, &init_net) != (i != max_entries)) -+ return -EXFULL; -+ ++(*test); -+ } -+ return 0; -+} -+ -+bool __init wg_ratelimiter_selftest(void) -+{ -+ enum { TRIALS_BEFORE_GIVING_UP = 5000 }; -+ bool success = false; -+ int test = 0, trials; -+ struct sk_buff *skb4, *skb6; -+ struct iphdr *hdr4; -+ struct ipv6hdr *hdr6; -+ -+ if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) -+ return true; -+ -+ BUILD_BUG_ON(MSEC_PER_SEC % PACKETS_PER_SECOND != 0); -+ -+ if (wg_ratelimiter_init()) -+ goto out; -+ ++test; -+ if (wg_ratelimiter_init()) { -+ wg_ratelimiter_uninit(); -+ goto out; -+ } -+ ++test; -+ if (wg_ratelimiter_init()) { -+ wg_ratelimiter_uninit(); -+ wg_ratelimiter_uninit(); -+ goto out; -+ } -+ ++test; -+ -+ skb4 = alloc_skb(sizeof(struct iphdr), GFP_KERNEL); -+ if (unlikely(!skb4)) -+ goto err_nofree; -+ skb4->protocol = htons(ETH_P_IP); -+ hdr4 = (struct iphdr *)skb_put(skb4, sizeof(*hdr4)); -+ hdr4->saddr = htonl(8182); -+ skb_reset_network_header(skb4); -+ ++test; -+ -+#if IS_ENABLED(CONFIG_IPV6) -+ skb6 = alloc_skb(sizeof(struct ipv6hdr), GFP_KERNEL); -+ if (unlikely(!skb6)) { -+ kfree_skb(skb4); -+ goto err_nofree; -+ } -+ skb6->protocol = htons(ETH_P_IPV6); -+ hdr6 = (struct ipv6hdr *)skb_put(skb6, sizeof(*hdr6)); -+ hdr6->saddr.in6_u.u6_addr32[0] = htonl(1212); -+ hdr6->saddr.in6_u.u6_addr32[1] = htonl(289188); -+ skb_reset_network_header(skb6); -+ ++test; -+#endif -+ -+ for (trials = TRIALS_BEFORE_GIVING_UP;;) { -+ int test_count = 0, ret; -+ -+ ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); -+ if (ret == -ETIMEDOUT) { -+ if (!trials--) { -+ test += test_count; -+ goto err; -+ } -+ msleep(500); -+ continue; -+ } else if (ret < 0) { -+ test += test_count; -+ goto err; -+ } else { -+ test += test_count; -+ break; -+ } -+ } -+ -+ for (trials = TRIALS_BEFORE_GIVING_UP;;) { -+ int test_count = 0; -+ -+ if (capacity_test(skb4, hdr4, &test_count) < 0) { -+ if (!trials--) { -+ test += test_count; -+ goto err; -+ } -+ msleep(50); -+ continue; -+ } -+ test += test_count; -+ break; -+ } -+ -+ success = true; -+ -+err: -+ kfree_skb(skb4); -+#if IS_ENABLED(CONFIG_IPV6) -+ kfree_skb(skb6); -+#endif -+err_nofree: -+ wg_ratelimiter_uninit(); -+ wg_ratelimiter_uninit(); -+ wg_ratelimiter_uninit(); -+ /* Uninit one extra time to check underflow detection. */ -+ wg_ratelimiter_uninit(); -+out: -+ if (success) -+ pr_info("ratelimiter self-tests: pass\n"); -+ else -+ pr_err("ratelimiter self-test %d: FAIL\n", test); -+ -+ return success; -+} -+#endif ---- /dev/null -+++ b/drivers/net/wireguard/send.c -@@ -0,0 +1,413 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "queueing.h" -+#include "timers.h" -+#include "device.h" -+#include "peer.h" -+#include "socket.h" -+#include "messages.h" -+#include "cookie.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static void wg_packet_send_handshake_initiation(struct wg_peer *peer) -+{ -+ struct message_handshake_initiation packet; -+ -+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake), -+ REKEY_TIMEOUT)) -+ return; /* This function is rate limited. */ -+ -+ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns()); -+ net_dbg_ratelimited("%s: Sending handshake initiation to peer %llu (%pISpfsc)\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ -+ if (wg_noise_handshake_create_initiation(&packet, &peer->handshake)) { -+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer); -+ wg_timers_any_authenticated_packet_traversal(peer); -+ wg_timers_any_authenticated_packet_sent(peer); -+ atomic64_set(&peer->last_sent_handshake, -+ ktime_get_coarse_boottime_ns()); -+ wg_socket_send_buffer_to_peer(peer, &packet, sizeof(packet), -+ HANDSHAKE_DSCP); -+ wg_timers_handshake_initiated(peer); -+ } -+} -+ -+void wg_packet_handshake_send_worker(struct work_struct *work) -+{ -+ struct wg_peer *peer = container_of(work, struct wg_peer, -+ transmit_handshake_work); -+ -+ wg_packet_send_handshake_initiation(peer); -+ wg_peer_put(peer); -+} -+ -+void wg_packet_send_queued_handshake_initiation(struct wg_peer *peer, -+ bool is_retry) -+{ -+ if (!is_retry) -+ peer->timer_handshake_attempts = 0; -+ -+ rcu_read_lock_bh(); -+ /* We check last_sent_handshake here in addition to the actual function -+ * we're queueing up, so that we don't queue things if not strictly -+ * necessary: -+ */ -+ if (!wg_birthdate_has_expired(atomic64_read(&peer->last_sent_handshake), -+ REKEY_TIMEOUT) || -+ unlikely(READ_ONCE(peer->is_dead))) -+ goto out; -+ -+ wg_peer_get(peer); -+ /* Queues up calling packet_send_queued_handshakes(peer), where we do a -+ * peer_put(peer) after: -+ */ -+ if (!queue_work(peer->device->handshake_send_wq, -+ &peer->transmit_handshake_work)) -+ /* If the work was already queued, we want to drop the -+ * extra reference: -+ */ -+ wg_peer_put(peer); -+out: -+ rcu_read_unlock_bh(); -+} -+ -+void wg_packet_send_handshake_response(struct wg_peer *peer) -+{ -+ struct message_handshake_response packet; -+ -+ atomic64_set(&peer->last_sent_handshake, ktime_get_coarse_boottime_ns()); -+ net_dbg_ratelimited("%s: Sending handshake response to peer %llu (%pISpfsc)\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ -+ if (wg_noise_handshake_create_response(&packet, &peer->handshake)) { -+ wg_cookie_add_mac_to_packet(&packet, sizeof(packet), peer); -+ if (wg_noise_handshake_begin_session(&peer->handshake, -+ &peer->keypairs)) { -+ wg_timers_session_derived(peer); -+ wg_timers_any_authenticated_packet_traversal(peer); -+ wg_timers_any_authenticated_packet_sent(peer); -+ atomic64_set(&peer->last_sent_handshake, -+ ktime_get_coarse_boottime_ns()); -+ wg_socket_send_buffer_to_peer(peer, &packet, -+ sizeof(packet), -+ HANDSHAKE_DSCP); -+ } -+ } -+} -+ -+void wg_packet_send_handshake_cookie(struct wg_device *wg, -+ struct sk_buff *initiating_skb, -+ __le32 sender_index) -+{ -+ struct message_handshake_cookie packet; -+ -+ net_dbg_skb_ratelimited("%s: Sending cookie response for denied handshake message for %pISpfsc\n", -+ wg->dev->name, initiating_skb); -+ wg_cookie_message_create(&packet, initiating_skb, sender_index, -+ &wg->cookie_checker); -+ wg_socket_send_buffer_as_reply_to_skb(wg, initiating_skb, &packet, -+ sizeof(packet)); -+} -+ -+static void keep_key_fresh(struct wg_peer *peer) -+{ -+ struct noise_keypair *keypair; -+ bool send = false; -+ -+ rcu_read_lock_bh(); -+ keypair = rcu_dereference_bh(peer->keypairs.current_keypair); -+ if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && -+ (unlikely(atomic64_read(&keypair->sending.counter.counter) > -+ REKEY_AFTER_MESSAGES) || -+ (keypair->i_am_the_initiator && -+ unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, -+ REKEY_AFTER_TIME))))) -+ send = true; -+ rcu_read_unlock_bh(); -+ -+ if (send) -+ wg_packet_send_queued_handshake_initiation(peer, false); -+} -+ -+static unsigned int calculate_skb_padding(struct sk_buff *skb) -+{ -+ /* We do this modulo business with the MTU, just in case the networking -+ * layer gives us a packet that's bigger than the MTU. In that case, we -+ * wouldn't want the final subtraction to overflow in the case of the -+ * padded_size being clamped. -+ */ -+ unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu; -+ unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE); -+ -+ if (padded_size > PACKET_CB(skb)->mtu) -+ padded_size = PACKET_CB(skb)->mtu; -+ return padded_size - last_unit; -+} -+ -+static bool encrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) -+{ -+ unsigned int padding_len, plaintext_len, trailer_len; -+ struct scatterlist sg[MAX_SKB_FRAGS + 8]; -+ struct message_data *header; -+ struct sk_buff *trailer; -+ int num_frags; -+ -+ /* Calculate lengths. */ -+ padding_len = calculate_skb_padding(skb); -+ trailer_len = padding_len + noise_encrypted_len(0); -+ plaintext_len = skb->len + padding_len; -+ -+ /* Expand data section to have room for padding and auth tag. */ -+ num_frags = skb_cow_data(skb, trailer_len, &trailer); -+ if (unlikely(num_frags < 0 || num_frags > ARRAY_SIZE(sg))) -+ return false; -+ -+ /* Set the padding to zeros, and make sure it and the auth tag are part -+ * of the skb. -+ */ -+ memset(skb_tail_pointer(trailer), 0, padding_len); -+ -+ /* Expand head section to have room for our header and the network -+ * stack's headers. -+ */ -+ if (unlikely(skb_cow_head(skb, DATA_PACKET_HEAD_ROOM) < 0)) -+ return false; -+ -+ /* Finalize checksum calculation for the inner packet, if required. */ -+ if (unlikely(skb->ip_summed == CHECKSUM_PARTIAL && -+ skb_checksum_help(skb))) -+ return false; -+ -+ /* Only after checksumming can we safely add on the padding at the end -+ * and the header. -+ */ -+ skb_set_inner_network_header(skb, 0); -+ header = (struct message_data *)skb_push(skb, sizeof(*header)); -+ header->header.type = cpu_to_le32(MESSAGE_DATA); -+ header->key_idx = keypair->remote_index; -+ header->counter = cpu_to_le64(PACKET_CB(skb)->nonce); -+ pskb_put(skb, trailer, trailer_len); -+ -+ /* Now we can encrypt the scattergather segments */ -+ sg_init_table(sg, num_frags); -+ if (skb_to_sgvec(skb, sg, sizeof(struct message_data), -+ noise_encrypted_len(plaintext_len)) <= 0) -+ return false; -+ return chacha20poly1305_encrypt_sg_inplace(sg, plaintext_len, NULL, 0, -+ PACKET_CB(skb)->nonce, -+ keypair->sending.key); -+} -+ -+void wg_packet_send_keepalive(struct wg_peer *peer) -+{ -+ struct sk_buff *skb; -+ -+ if (skb_queue_empty(&peer->staged_packet_queue)) { -+ skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH, -+ GFP_ATOMIC); -+ if (unlikely(!skb)) -+ return; -+ skb_reserve(skb, DATA_PACKET_HEAD_ROOM); -+ skb->dev = peer->device->dev; -+ PACKET_CB(skb)->mtu = skb->dev->mtu; -+ skb_queue_tail(&peer->staged_packet_queue, skb); -+ net_dbg_ratelimited("%s: Sending keepalive packet to peer %llu (%pISpfsc)\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr); -+ } -+ -+ wg_packet_send_staged_packets(peer); -+} -+ -+static void wg_packet_create_data_done(struct sk_buff *first, -+ struct wg_peer *peer) -+{ -+ struct sk_buff *skb, *next; -+ bool is_keepalive, data_sent = false; -+ -+ wg_timers_any_authenticated_packet_traversal(peer); -+ wg_timers_any_authenticated_packet_sent(peer); -+ skb_list_walk_safe(first, skb, next) { -+ is_keepalive = skb->len == message_data_len(0); -+ if (likely(!wg_socket_send_skb_to_peer(peer, skb, -+ PACKET_CB(skb)->ds) && !is_keepalive)) -+ data_sent = true; -+ } -+ -+ if (likely(data_sent)) -+ wg_timers_data_sent(peer); -+ -+ keep_key_fresh(peer); -+} -+ -+void wg_packet_tx_worker(struct work_struct *work) -+{ -+ struct crypt_queue *queue = container_of(work, struct crypt_queue, -+ work); -+ struct noise_keypair *keypair; -+ enum packet_state state; -+ struct sk_buff *first; -+ struct wg_peer *peer; -+ -+ while ((first = __ptr_ring_peek(&queue->ring)) != NULL && -+ (state = atomic_read_acquire(&PACKET_CB(first)->state)) != -+ PACKET_STATE_UNCRYPTED) { -+ __ptr_ring_discard_one(&queue->ring); -+ peer = PACKET_PEER(first); -+ keypair = PACKET_CB(first)->keypair; -+ -+ if (likely(state == PACKET_STATE_CRYPTED)) -+ wg_packet_create_data_done(first, peer); -+ else -+ kfree_skb_list(first); -+ -+ wg_noise_keypair_put(keypair, false); -+ wg_peer_put(peer); -+ } -+} -+ -+void wg_packet_encrypt_worker(struct work_struct *work) -+{ -+ struct crypt_queue *queue = container_of(work, struct multicore_worker, -+ work)->ptr; -+ struct sk_buff *first, *skb, *next; -+ -+ while ((first = ptr_ring_consume_bh(&queue->ring)) != NULL) { -+ enum packet_state state = PACKET_STATE_CRYPTED; -+ -+ skb_list_walk_safe(first, skb, next) { -+ if (likely(encrypt_packet(skb, -+ PACKET_CB(first)->keypair))) { -+ wg_reset_packet(skb); -+ } else { -+ state = PACKET_STATE_DEAD; -+ break; -+ } -+ } -+ wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, -+ state); -+ -+ } -+} -+ -+static void wg_packet_create_data(struct sk_buff *first) -+{ -+ struct wg_peer *peer = PACKET_PEER(first); -+ struct wg_device *wg = peer->device; -+ int ret = -EINVAL; -+ -+ rcu_read_lock_bh(); -+ if (unlikely(READ_ONCE(peer->is_dead))) -+ goto err; -+ -+ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, -+ &peer->tx_queue, first, -+ wg->packet_crypt_wq, -+ &wg->encrypt_queue.last_cpu); -+ if (unlikely(ret == -EPIPE)) -+ wg_queue_enqueue_per_peer(&peer->tx_queue, first, -+ PACKET_STATE_DEAD); -+err: -+ rcu_read_unlock_bh(); -+ if (likely(!ret || ret == -EPIPE)) -+ return; -+ wg_noise_keypair_put(PACKET_CB(first)->keypair, false); -+ wg_peer_put(peer); -+ kfree_skb_list(first); -+} -+ -+void wg_packet_purge_staged_packets(struct wg_peer *peer) -+{ -+ spin_lock_bh(&peer->staged_packet_queue.lock); -+ peer->device->dev->stats.tx_dropped += peer->staged_packet_queue.qlen; -+ __skb_queue_purge(&peer->staged_packet_queue); -+ spin_unlock_bh(&peer->staged_packet_queue.lock); -+} -+ -+void wg_packet_send_staged_packets(struct wg_peer *peer) -+{ -+ struct noise_symmetric_key *key; -+ struct noise_keypair *keypair; -+ struct sk_buff_head packets; -+ struct sk_buff *skb; -+ -+ /* Steal the current queue into our local one. */ -+ __skb_queue_head_init(&packets); -+ spin_lock_bh(&peer->staged_packet_queue.lock); -+ skb_queue_splice_init(&peer->staged_packet_queue, &packets); -+ spin_unlock_bh(&peer->staged_packet_queue.lock); -+ if (unlikely(skb_queue_empty(&packets))) -+ return; -+ -+ /* First we make sure we have a valid reference to a valid key. */ -+ rcu_read_lock_bh(); -+ keypair = wg_noise_keypair_get( -+ rcu_dereference_bh(peer->keypairs.current_keypair)); -+ rcu_read_unlock_bh(); -+ if (unlikely(!keypair)) -+ goto out_nokey; -+ key = &keypair->sending; -+ if (unlikely(!READ_ONCE(key->is_valid))) -+ goto out_nokey; -+ if (unlikely(wg_birthdate_has_expired(key->birthdate, -+ REJECT_AFTER_TIME))) -+ goto out_invalid; -+ -+ /* After we know we have a somewhat valid key, we now try to assign -+ * nonces to all of the packets in the queue. If we can't assign nonces -+ * for all of them, we just consider it a failure and wait for the next -+ * handshake. -+ */ -+ skb_queue_walk(&packets, skb) { -+ /* 0 for no outer TOS: no leak. TODO: at some later point, we -+ * might consider using flowi->tos as outer instead. -+ */ -+ PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); -+ PACKET_CB(skb)->nonce = -+ atomic64_inc_return(&key->counter.counter) - 1; -+ if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) -+ goto out_invalid; -+ } -+ -+ packets.prev->next = NULL; -+ wg_peer_get(keypair->entry.peer); -+ PACKET_CB(packets.next)->keypair = keypair; -+ wg_packet_create_data(packets.next); -+ return; -+ -+out_invalid: -+ WRITE_ONCE(key->is_valid, false); -+out_nokey: -+ wg_noise_keypair_put(keypair, false); -+ -+ /* We orphan the packets if we're waiting on a handshake, so that they -+ * don't block a socket's pool. -+ */ -+ skb_queue_walk(&packets, skb) -+ skb_orphan(skb); -+ /* Then we put them back on the top of the queue. We're not too -+ * concerned about accidentally getting things a little out of order if -+ * packets are being added really fast, because this queue is for before -+ * packets can even be sent and it's small anyway. -+ */ -+ spin_lock_bh(&peer->staged_packet_queue.lock); -+ skb_queue_splice(&packets, &peer->staged_packet_queue); -+ spin_unlock_bh(&peer->staged_packet_queue.lock); -+ -+ /* If we're exiting because there's something wrong with the key, it -+ * means we should initiate a new handshake. -+ */ -+ wg_packet_send_queued_handshake_initiation(peer, false); -+} ---- /dev/null -+++ b/drivers/net/wireguard/socket.c -@@ -0,0 +1,437 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "device.h" -+#include "peer.h" -+#include "socket.h" -+#include "queueing.h" -+#include "messages.h" -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+static int send4(struct wg_device *wg, struct sk_buff *skb, -+ struct endpoint *endpoint, u8 ds, struct dst_cache *cache) -+{ -+ struct flowi4 fl = { -+ .saddr = endpoint->src4.s_addr, -+ .daddr = endpoint->addr4.sin_addr.s_addr, -+ .fl4_dport = endpoint->addr4.sin_port, -+ .flowi4_mark = wg->fwmark, -+ .flowi4_proto = IPPROTO_UDP -+ }; -+ struct rtable *rt = NULL; -+ struct sock *sock; -+ int ret = 0; -+ -+ skb_mark_not_on_list(skb); -+ skb->dev = wg->dev; -+ skb->mark = wg->fwmark; -+ -+ rcu_read_lock_bh(); -+ sock = rcu_dereference_bh(wg->sock4); -+ -+ if (unlikely(!sock)) { -+ ret = -ENONET; -+ goto err; -+ } -+ -+ fl.fl4_sport = inet_sk(sock)->inet_sport; -+ -+ if (cache) -+ rt = dst_cache_get_ip4(cache, &fl.saddr); -+ -+ if (!rt) { -+ security_sk_classify_flow(sock, flowi4_to_flowi(&fl)); -+ if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, -+ fl.saddr, RT_SCOPE_HOST))) { -+ endpoint->src4.s_addr = 0; -+ *(__force __be32 *)&endpoint->src_if4 = 0; -+ fl.saddr = 0; -+ if (cache) -+ dst_cache_reset(cache); -+ } -+ rt = ip_route_output_flow(sock_net(sock), &fl, sock); -+ if (unlikely(endpoint->src_if4 && ((IS_ERR(rt) && -+ PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && -+ rt->dst.dev->ifindex != endpoint->src_if4)))) { -+ endpoint->src4.s_addr = 0; -+ *(__force __be32 *)&endpoint->src_if4 = 0; -+ fl.saddr = 0; -+ if (cache) -+ dst_cache_reset(cache); -+ if (!IS_ERR(rt)) -+ ip_rt_put(rt); -+ rt = ip_route_output_flow(sock_net(sock), &fl, sock); -+ } -+ if (unlikely(IS_ERR(rt))) { -+ ret = PTR_ERR(rt); -+ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", -+ wg->dev->name, &endpoint->addr, ret); -+ goto err; -+ } else if (unlikely(rt->dst.dev == skb->dev)) { -+ ip_rt_put(rt); -+ ret = -ELOOP; -+ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", -+ wg->dev->name, &endpoint->addr); -+ goto err; -+ } -+ if (cache) -+ dst_cache_set_ip4(cache, &rt->dst, fl.saddr); -+ } -+ -+ skb->ignore_df = 1; -+ udp_tunnel_xmit_skb(rt, sock, skb, fl.saddr, fl.daddr, ds, -+ ip4_dst_hoplimit(&rt->dst), 0, fl.fl4_sport, -+ fl.fl4_dport, false, false); -+ goto out; -+ -+err: -+ kfree_skb(skb); -+out: -+ rcu_read_unlock_bh(); -+ return ret; -+} -+ -+static int send6(struct wg_device *wg, struct sk_buff *skb, -+ struct endpoint *endpoint, u8 ds, struct dst_cache *cache) -+{ -+#if IS_ENABLED(CONFIG_IPV6) -+ struct flowi6 fl = { -+ .saddr = endpoint->src6, -+ .daddr = endpoint->addr6.sin6_addr, -+ .fl6_dport = endpoint->addr6.sin6_port, -+ .flowi6_mark = wg->fwmark, -+ .flowi6_oif = endpoint->addr6.sin6_scope_id, -+ .flowi6_proto = IPPROTO_UDP -+ /* TODO: addr->sin6_flowinfo */ -+ }; -+ struct dst_entry *dst = NULL; -+ struct sock *sock; -+ int ret = 0; -+ -+ skb_mark_not_on_list(skb); -+ skb->dev = wg->dev; -+ skb->mark = wg->fwmark; -+ -+ rcu_read_lock_bh(); -+ sock = rcu_dereference_bh(wg->sock6); -+ -+ if (unlikely(!sock)) { -+ ret = -ENONET; -+ goto err; -+ } -+ -+ fl.fl6_sport = inet_sk(sock)->inet_sport; -+ -+ if (cache) -+ dst = dst_cache_get_ip6(cache, &fl.saddr); -+ -+ if (!dst) { -+ security_sk_classify_flow(sock, flowi6_to_flowi(&fl)); -+ if (unlikely(!ipv6_addr_any(&fl.saddr) && -+ !ipv6_chk_addr(sock_net(sock), &fl.saddr, NULL, 0))) { -+ endpoint->src6 = fl.saddr = in6addr_any; -+ if (cache) -+ dst_cache_reset(cache); -+ } -+ dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, -+ NULL); -+ if (unlikely(IS_ERR(dst))) { -+ ret = PTR_ERR(dst); -+ net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", -+ wg->dev->name, &endpoint->addr, ret); -+ goto err; -+ } else if (unlikely(dst->dev == skb->dev)) { -+ dst_release(dst); -+ ret = -ELOOP; -+ net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", -+ wg->dev->name, &endpoint->addr); -+ goto err; -+ } -+ if (cache) -+ dst_cache_set_ip6(cache, dst, &fl.saddr); -+ } -+ -+ skb->ignore_df = 1; -+ udp_tunnel6_xmit_skb(dst, sock, skb, skb->dev, &fl.saddr, &fl.daddr, ds, -+ ip6_dst_hoplimit(dst), 0, fl.fl6_sport, -+ fl.fl6_dport, false); -+ goto out; -+ -+err: -+ kfree_skb(skb); -+out: -+ rcu_read_unlock_bh(); -+ return ret; -+#else -+ return -EAFNOSUPPORT; -+#endif -+} -+ -+int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, u8 ds) -+{ -+ size_t skb_len = skb->len; -+ int ret = -EAFNOSUPPORT; -+ -+ read_lock_bh(&peer->endpoint_lock); -+ if (peer->endpoint.addr.sa_family == AF_INET) -+ ret = send4(peer->device, skb, &peer->endpoint, ds, -+ &peer->endpoint_cache); -+ else if (peer->endpoint.addr.sa_family == AF_INET6) -+ ret = send6(peer->device, skb, &peer->endpoint, ds, -+ &peer->endpoint_cache); -+ else -+ dev_kfree_skb(skb); -+ if (likely(!ret)) -+ peer->tx_bytes += skb_len; -+ read_unlock_bh(&peer->endpoint_lock); -+ -+ return ret; -+} -+ -+int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *buffer, -+ size_t len, u8 ds) -+{ -+ struct sk_buff *skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC); -+ -+ if (unlikely(!skb)) -+ return -ENOMEM; -+ -+ skb_reserve(skb, SKB_HEADER_LEN); -+ skb_set_inner_network_header(skb, 0); -+ skb_put_data(skb, buffer, len); -+ return wg_socket_send_skb_to_peer(peer, skb, ds); -+} -+ -+int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg, -+ struct sk_buff *in_skb, void *buffer, -+ size_t len) -+{ -+ int ret = 0; -+ struct sk_buff *skb; -+ struct endpoint endpoint; -+ -+ if (unlikely(!in_skb)) -+ return -EINVAL; -+ ret = wg_socket_endpoint_from_skb(&endpoint, in_skb); -+ if (unlikely(ret < 0)) -+ return ret; -+ -+ skb = alloc_skb(len + SKB_HEADER_LEN, GFP_ATOMIC); -+ if (unlikely(!skb)) -+ return -ENOMEM; -+ skb_reserve(skb, SKB_HEADER_LEN); -+ skb_set_inner_network_header(skb, 0); -+ skb_put_data(skb, buffer, len); -+ -+ if (endpoint.addr.sa_family == AF_INET) -+ ret = send4(wg, skb, &endpoint, 0, NULL); -+ else if (endpoint.addr.sa_family == AF_INET6) -+ ret = send6(wg, skb, &endpoint, 0, NULL); -+ /* No other possibilities if the endpoint is valid, which it is, -+ * as we checked above. -+ */ -+ -+ return ret; -+} -+ -+int wg_socket_endpoint_from_skb(struct endpoint *endpoint, -+ const struct sk_buff *skb) -+{ -+ memset(endpoint, 0, sizeof(*endpoint)); -+ if (skb->protocol == htons(ETH_P_IP)) { -+ endpoint->addr4.sin_family = AF_INET; -+ endpoint->addr4.sin_port = udp_hdr(skb)->source; -+ endpoint->addr4.sin_addr.s_addr = ip_hdr(skb)->saddr; -+ endpoint->src4.s_addr = ip_hdr(skb)->daddr; -+ endpoint->src_if4 = skb->skb_iif; -+ } else if (skb->protocol == htons(ETH_P_IPV6)) { -+ endpoint->addr6.sin6_family = AF_INET6; -+ endpoint->addr6.sin6_port = udp_hdr(skb)->source; -+ endpoint->addr6.sin6_addr = ipv6_hdr(skb)->saddr; -+ endpoint->addr6.sin6_scope_id = ipv6_iface_scope_id( -+ &ipv6_hdr(skb)->saddr, skb->skb_iif); -+ endpoint->src6 = ipv6_hdr(skb)->daddr; -+ } else { -+ return -EINVAL; -+ } -+ return 0; -+} -+ -+static bool endpoint_eq(const struct endpoint *a, const struct endpoint *b) -+{ -+ return (a->addr.sa_family == AF_INET && b->addr.sa_family == AF_INET && -+ a->addr4.sin_port == b->addr4.sin_port && -+ a->addr4.sin_addr.s_addr == b->addr4.sin_addr.s_addr && -+ a->src4.s_addr == b->src4.s_addr && a->src_if4 == b->src_if4) || -+ (a->addr.sa_family == AF_INET6 && -+ b->addr.sa_family == AF_INET6 && -+ a->addr6.sin6_port == b->addr6.sin6_port && -+ ipv6_addr_equal(&a->addr6.sin6_addr, &b->addr6.sin6_addr) && -+ a->addr6.sin6_scope_id == b->addr6.sin6_scope_id && -+ ipv6_addr_equal(&a->src6, &b->src6)) || -+ unlikely(!a->addr.sa_family && !b->addr.sa_family); -+} -+ -+void wg_socket_set_peer_endpoint(struct wg_peer *peer, -+ const struct endpoint *endpoint) -+{ -+ /* First we check unlocked, in order to optimize, since it's pretty rare -+ * that an endpoint will change. If we happen to be mid-write, and two -+ * CPUs wind up writing the same thing or something slightly different, -+ * it doesn't really matter much either. -+ */ -+ if (endpoint_eq(endpoint, &peer->endpoint)) -+ return; -+ write_lock_bh(&peer->endpoint_lock); -+ if (endpoint->addr.sa_family == AF_INET) { -+ peer->endpoint.addr4 = endpoint->addr4; -+ peer->endpoint.src4 = endpoint->src4; -+ peer->endpoint.src_if4 = endpoint->src_if4; -+ } else if (endpoint->addr.sa_family == AF_INET6) { -+ peer->endpoint.addr6 = endpoint->addr6; -+ peer->endpoint.src6 = endpoint->src6; -+ } else { -+ goto out; -+ } -+ dst_cache_reset(&peer->endpoint_cache); -+out: -+ write_unlock_bh(&peer->endpoint_lock); -+} -+ -+void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer, -+ const struct sk_buff *skb) -+{ -+ struct endpoint endpoint; -+ -+ if (!wg_socket_endpoint_from_skb(&endpoint, skb)) -+ wg_socket_set_peer_endpoint(peer, &endpoint); -+} -+ -+void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer) -+{ -+ write_lock_bh(&peer->endpoint_lock); -+ memset(&peer->endpoint.src6, 0, sizeof(peer->endpoint.src6)); -+ dst_cache_reset(&peer->endpoint_cache); -+ write_unlock_bh(&peer->endpoint_lock); -+} -+ -+static int wg_receive(struct sock *sk, struct sk_buff *skb) -+{ -+ struct wg_device *wg; -+ -+ if (unlikely(!sk)) -+ goto err; -+ wg = sk->sk_user_data; -+ if (unlikely(!wg)) -+ goto err; -+ wg_packet_receive(wg, skb); -+ return 0; -+ -+err: -+ kfree_skb(skb); -+ return 0; -+} -+ -+static void sock_free(struct sock *sock) -+{ -+ if (unlikely(!sock)) -+ return; -+ sk_clear_memalloc(sock); -+ udp_tunnel_sock_release(sock->sk_socket); -+} -+ -+static void set_sock_opts(struct socket *sock) -+{ -+ sock->sk->sk_allocation = GFP_ATOMIC; -+ sock->sk->sk_sndbuf = INT_MAX; -+ sk_set_memalloc(sock->sk); -+} -+ -+int wg_socket_init(struct wg_device *wg, u16 port) -+{ -+ int ret; -+ struct udp_tunnel_sock_cfg cfg = { -+ .sk_user_data = wg, -+ .encap_type = 1, -+ .encap_rcv = wg_receive -+ }; -+ struct socket *new4 = NULL, *new6 = NULL; -+ struct udp_port_cfg port4 = { -+ .family = AF_INET, -+ .local_ip.s_addr = htonl(INADDR_ANY), -+ .local_udp_port = htons(port), -+ .use_udp_checksums = true -+ }; -+#if IS_ENABLED(CONFIG_IPV6) -+ int retries = 0; -+ struct udp_port_cfg port6 = { -+ .family = AF_INET6, -+ .local_ip6 = IN6ADDR_ANY_INIT, -+ .use_udp6_tx_checksums = true, -+ .use_udp6_rx_checksums = true, -+ .ipv6_v6only = true -+ }; -+#endif -+ -+#if IS_ENABLED(CONFIG_IPV6) -+retry: -+#endif -+ -+ ret = udp_sock_create(wg->creating_net, &port4, &new4); -+ if (ret < 0) { -+ pr_err("%s: Could not create IPv4 socket\n", wg->dev->name); -+ return ret; -+ } -+ set_sock_opts(new4); -+ setup_udp_tunnel_sock(wg->creating_net, new4, &cfg); -+ -+#if IS_ENABLED(CONFIG_IPV6) -+ if (ipv6_mod_enabled()) { -+ port6.local_udp_port = inet_sk(new4->sk)->inet_sport; -+ ret = udp_sock_create(wg->creating_net, &port6, &new6); -+ if (ret < 0) { -+ udp_tunnel_sock_release(new4); -+ if (ret == -EADDRINUSE && !port && retries++ < 100) -+ goto retry; -+ pr_err("%s: Could not create IPv6 socket\n", -+ wg->dev->name); -+ return ret; -+ } -+ set_sock_opts(new6); -+ setup_udp_tunnel_sock(wg->creating_net, new6, &cfg); -+ } -+#endif -+ -+ wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL); -+ return 0; -+} -+ -+void wg_socket_reinit(struct wg_device *wg, struct sock *new4, -+ struct sock *new6) -+{ -+ struct sock *old4, *old6; -+ -+ mutex_lock(&wg->socket_update_lock); -+ old4 = rcu_dereference_protected(wg->sock4, -+ lockdep_is_held(&wg->socket_update_lock)); -+ old6 = rcu_dereference_protected(wg->sock6, -+ lockdep_is_held(&wg->socket_update_lock)); -+ rcu_assign_pointer(wg->sock4, new4); -+ rcu_assign_pointer(wg->sock6, new6); -+ if (new4) -+ wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); -+ mutex_unlock(&wg->socket_update_lock); -+ synchronize_rcu(); -+ synchronize_net(); -+ sock_free(old4); -+ sock_free(old6); -+} ---- /dev/null -+++ b/drivers/net/wireguard/socket.h -@@ -0,0 +1,44 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_SOCKET_H -+#define _WG_SOCKET_H -+ -+#include -+#include -+#include -+#include -+ -+int wg_socket_init(struct wg_device *wg, u16 port); -+void wg_socket_reinit(struct wg_device *wg, struct sock *new4, -+ struct sock *new6); -+int wg_socket_send_buffer_to_peer(struct wg_peer *peer, void *data, -+ size_t len, u8 ds); -+int wg_socket_send_skb_to_peer(struct wg_peer *peer, struct sk_buff *skb, -+ u8 ds); -+int wg_socket_send_buffer_as_reply_to_skb(struct wg_device *wg, -+ struct sk_buff *in_skb, -+ void *out_buffer, size_t len); -+ -+int wg_socket_endpoint_from_skb(struct endpoint *endpoint, -+ const struct sk_buff *skb); -+void wg_socket_set_peer_endpoint(struct wg_peer *peer, -+ const struct endpoint *endpoint); -+void wg_socket_set_peer_endpoint_from_skb(struct wg_peer *peer, -+ const struct sk_buff *skb); -+void wg_socket_clear_peer_endpoint_src(struct wg_peer *peer); -+ -+#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG) -+#define net_dbg_skb_ratelimited(fmt, dev, skb, ...) do { \ -+ struct endpoint __endpoint; \ -+ wg_socket_endpoint_from_skb(&__endpoint, skb); \ -+ net_dbg_ratelimited(fmt, dev, &__endpoint.addr, \ -+ ##__VA_ARGS__); \ -+ } while (0) -+#else -+#define net_dbg_skb_ratelimited(fmt, skb, ...) -+#endif -+ -+#endif /* _WG_SOCKET_H */ ---- /dev/null -+++ b/drivers/net/wireguard/timers.c -@@ -0,0 +1,243 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#include "timers.h" -+#include "device.h" -+#include "peer.h" -+#include "queueing.h" -+#include "socket.h" -+ -+/* -+ * - Timer for retransmitting the handshake if we don't hear back after -+ * `REKEY_TIMEOUT + jitter` ms. -+ * -+ * - Timer for sending empty packet if we have received a packet but after have -+ * not sent one for `KEEPALIVE_TIMEOUT` ms. -+ * -+ * - Timer for initiating new handshake if we have sent a packet but after have -+ * not received one (even empty) for `(KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) + -+ * jitter` ms. -+ * -+ * - Timer for zeroing out all ephemeral keys after `(REJECT_AFTER_TIME * 3)` ms -+ * if no new keys have been received. -+ * -+ * - Timer for, if enabled, sending an empty authenticated packet every user- -+ * specified seconds. -+ */ -+ -+static inline void mod_peer_timer(struct wg_peer *peer, -+ struct timer_list *timer, -+ unsigned long expires) -+{ -+ rcu_read_lock_bh(); -+ if (likely(netif_running(peer->device->dev) && -+ !READ_ONCE(peer->is_dead))) -+ mod_timer(timer, expires); -+ rcu_read_unlock_bh(); -+} -+ -+static void wg_expired_retransmit_handshake(struct timer_list *timer) -+{ -+ struct wg_peer *peer = from_timer(peer, timer, -+ timer_retransmit_handshake); -+ -+ if (peer->timer_handshake_attempts > MAX_TIMER_HANDSHAKES) { -+ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d attempts, giving up\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr, MAX_TIMER_HANDSHAKES + 2); -+ -+ del_timer(&peer->timer_send_keepalive); -+ /* We drop all packets without a keypair and don't try again, -+ * if we try unsuccessfully for too long to make a handshake. -+ */ -+ wg_packet_purge_staged_packets(peer); -+ -+ /* We set a timer for destroying any residue that might be left -+ * of a partial exchange. -+ */ -+ if (!timer_pending(&peer->timer_zero_key_material)) -+ mod_peer_timer(peer, &peer->timer_zero_key_material, -+ jiffies + REJECT_AFTER_TIME * 3 * HZ); -+ } else { -+ ++peer->timer_handshake_attempts; -+ pr_debug("%s: Handshake for peer %llu (%pISpfsc) did not complete after %d seconds, retrying (try %d)\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr, REKEY_TIMEOUT, -+ peer->timer_handshake_attempts + 1); -+ -+ /* We clear the endpoint address src address, in case this is -+ * the cause of trouble. -+ */ -+ wg_socket_clear_peer_endpoint_src(peer); -+ -+ wg_packet_send_queued_handshake_initiation(peer, true); -+ } -+} -+ -+static void wg_expired_send_keepalive(struct timer_list *timer) -+{ -+ struct wg_peer *peer = from_timer(peer, timer, timer_send_keepalive); -+ -+ wg_packet_send_keepalive(peer); -+ if (peer->timer_need_another_keepalive) { -+ peer->timer_need_another_keepalive = false; -+ mod_peer_timer(peer, &peer->timer_send_keepalive, -+ jiffies + KEEPALIVE_TIMEOUT * HZ); -+ } -+} -+ -+static void wg_expired_new_handshake(struct timer_list *timer) -+{ -+ struct wg_peer *peer = from_timer(peer, timer, timer_new_handshake); -+ -+ pr_debug("%s: Retrying handshake with peer %llu (%pISpfsc) because we stopped hearing back after %d seconds\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr, KEEPALIVE_TIMEOUT + REKEY_TIMEOUT); -+ /* We clear the endpoint address src address, in case this is the cause -+ * of trouble. -+ */ -+ wg_socket_clear_peer_endpoint_src(peer); -+ wg_packet_send_queued_handshake_initiation(peer, false); -+} -+ -+static void wg_expired_zero_key_material(struct timer_list *timer) -+{ -+ struct wg_peer *peer = from_timer(peer, timer, timer_zero_key_material); -+ -+ rcu_read_lock_bh(); -+ if (!READ_ONCE(peer->is_dead)) { -+ wg_peer_get(peer); -+ if (!queue_work(peer->device->handshake_send_wq, -+ &peer->clear_peer_work)) -+ /* If the work was already on the queue, we want to drop -+ * the extra reference. -+ */ -+ wg_peer_put(peer); -+ } -+ rcu_read_unlock_bh(); -+} -+ -+static void wg_queued_expired_zero_key_material(struct work_struct *work) -+{ -+ struct wg_peer *peer = container_of(work, struct wg_peer, -+ clear_peer_work); -+ -+ pr_debug("%s: Zeroing out all keys for peer %llu (%pISpfsc), since we haven't received a new one in %d seconds\n", -+ peer->device->dev->name, peer->internal_id, -+ &peer->endpoint.addr, REJECT_AFTER_TIME * 3); -+ wg_noise_handshake_clear(&peer->handshake); -+ wg_noise_keypairs_clear(&peer->keypairs); -+ wg_peer_put(peer); -+} -+ -+static void wg_expired_send_persistent_keepalive(struct timer_list *timer) -+{ -+ struct wg_peer *peer = from_timer(peer, timer, -+ timer_persistent_keepalive); -+ -+ if (likely(peer->persistent_keepalive_interval)) -+ wg_packet_send_keepalive(peer); -+} -+ -+/* Should be called after an authenticated data packet is sent. */ -+void wg_timers_data_sent(struct wg_peer *peer) -+{ -+ if (!timer_pending(&peer->timer_new_handshake)) -+ mod_peer_timer(peer, &peer->timer_new_handshake, -+ jiffies + (KEEPALIVE_TIMEOUT + REKEY_TIMEOUT) * HZ + -+ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES)); -+} -+ -+/* Should be called after an authenticated data packet is received. */ -+void wg_timers_data_received(struct wg_peer *peer) -+{ -+ if (likely(netif_running(peer->device->dev))) { -+ if (!timer_pending(&peer->timer_send_keepalive)) -+ mod_peer_timer(peer, &peer->timer_send_keepalive, -+ jiffies + KEEPALIVE_TIMEOUT * HZ); -+ else -+ peer->timer_need_another_keepalive = true; -+ } -+} -+ -+/* Should be called after any type of authenticated packet is sent, whether -+ * keepalive, data, or handshake. -+ */ -+void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer) -+{ -+ del_timer(&peer->timer_send_keepalive); -+} -+ -+/* Should be called after any type of authenticated packet is received, whether -+ * keepalive, data, or handshake. -+ */ -+void wg_timers_any_authenticated_packet_received(struct wg_peer *peer) -+{ -+ del_timer(&peer->timer_new_handshake); -+} -+ -+/* Should be called after a handshake initiation message is sent. */ -+void wg_timers_handshake_initiated(struct wg_peer *peer) -+{ -+ mod_peer_timer(peer, &peer->timer_retransmit_handshake, -+ jiffies + REKEY_TIMEOUT * HZ + -+ prandom_u32_max(REKEY_TIMEOUT_JITTER_MAX_JIFFIES)); -+} -+ -+/* Should be called after a handshake response message is received and processed -+ * or when getting key confirmation via the first data message. -+ */ -+void wg_timers_handshake_complete(struct wg_peer *peer) -+{ -+ del_timer(&peer->timer_retransmit_handshake); -+ peer->timer_handshake_attempts = 0; -+ peer->sent_lastminute_handshake = false; -+ ktime_get_real_ts64(&peer->walltime_last_handshake); -+} -+ -+/* Should be called after an ephemeral key is created, which is before sending a -+ * handshake response or after receiving a handshake response. -+ */ -+void wg_timers_session_derived(struct wg_peer *peer) -+{ -+ mod_peer_timer(peer, &peer->timer_zero_key_material, -+ jiffies + REJECT_AFTER_TIME * 3 * HZ); -+} -+ -+/* Should be called before a packet with authentication, whether -+ * keepalive, data, or handshakem is sent, or after one is received. -+ */ -+void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer) -+{ -+ if (peer->persistent_keepalive_interval) -+ mod_peer_timer(peer, &peer->timer_persistent_keepalive, -+ jiffies + peer->persistent_keepalive_interval * HZ); -+} -+ -+void wg_timers_init(struct wg_peer *peer) -+{ -+ timer_setup(&peer->timer_retransmit_handshake, -+ wg_expired_retransmit_handshake, 0); -+ timer_setup(&peer->timer_send_keepalive, wg_expired_send_keepalive, 0); -+ timer_setup(&peer->timer_new_handshake, wg_expired_new_handshake, 0); -+ timer_setup(&peer->timer_zero_key_material, -+ wg_expired_zero_key_material, 0); -+ timer_setup(&peer->timer_persistent_keepalive, -+ wg_expired_send_persistent_keepalive, 0); -+ INIT_WORK(&peer->clear_peer_work, wg_queued_expired_zero_key_material); -+ peer->timer_handshake_attempts = 0; -+ peer->sent_lastminute_handshake = false; -+ peer->timer_need_another_keepalive = false; -+} -+ -+void wg_timers_stop(struct wg_peer *peer) -+{ -+ del_timer_sync(&peer->timer_retransmit_handshake); -+ del_timer_sync(&peer->timer_send_keepalive); -+ del_timer_sync(&peer->timer_new_handshake); -+ del_timer_sync(&peer->timer_zero_key_material); -+ del_timer_sync(&peer->timer_persistent_keepalive); -+ flush_work(&peer->clear_peer_work); -+} ---- /dev/null -+++ b/drivers/net/wireguard/timers.h -@@ -0,0 +1,31 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#ifndef _WG_TIMERS_H -+#define _WG_TIMERS_H -+ -+#include -+ -+struct wg_peer; -+ -+void wg_timers_init(struct wg_peer *peer); -+void wg_timers_stop(struct wg_peer *peer); -+void wg_timers_data_sent(struct wg_peer *peer); -+void wg_timers_data_received(struct wg_peer *peer); -+void wg_timers_any_authenticated_packet_sent(struct wg_peer *peer); -+void wg_timers_any_authenticated_packet_received(struct wg_peer *peer); -+void wg_timers_handshake_initiated(struct wg_peer *peer); -+void wg_timers_handshake_complete(struct wg_peer *peer); -+void wg_timers_session_derived(struct wg_peer *peer); -+void wg_timers_any_authenticated_packet_traversal(struct wg_peer *peer); -+ -+static inline bool wg_birthdate_has_expired(u64 birthday_nanoseconds, -+ u64 expiration_seconds) -+{ -+ return (s64)(birthday_nanoseconds + expiration_seconds * NSEC_PER_SEC) -+ <= (s64)ktime_get_coarse_boottime_ns(); -+} -+ -+#endif /* _WG_TIMERS_H */ ---- /dev/null -+++ b/drivers/net/wireguard/version.h -@@ -0,0 +1 @@ -+#define WIREGUARD_VERSION "1.0.0" ---- /dev/null -+++ b/include/uapi/linux/wireguard.h -@@ -0,0 +1,196 @@ -+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */ -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ * -+ * Documentation -+ * ============= -+ * -+ * The below enums and macros are for interfacing with WireGuard, using generic -+ * netlink, with family WG_GENL_NAME and version WG_GENL_VERSION. It defines two -+ * methods: get and set. Note that while they share many common attributes, -+ * these two functions actually accept a slightly different set of inputs and -+ * outputs. -+ * -+ * WG_CMD_GET_DEVICE -+ * ----------------- -+ * -+ * May only be called via NLM_F_REQUEST | NLM_F_DUMP. The command should contain -+ * one but not both of: -+ * -+ * WGDEVICE_A_IFINDEX: NLA_U32 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * -+ * The kernel will then return several messages (NLM_F_MULTI) containing the -+ * following tree of nested items: -+ * -+ * WGDEVICE_A_IFINDEX: NLA_U32 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN -+ * WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN -+ * WGDEVICE_A_LISTEN_PORT: NLA_U16 -+ * WGDEVICE_A_FWMARK: NLA_U32 -+ * WGDEVICE_A_PEERS: NLA_NESTED -+ * 0: NLA_NESTED -+ * WGPEER_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN -+ * WGPEER_A_PRESHARED_KEY: NLA_EXACT_LEN, len WG_KEY_LEN -+ * WGPEER_A_ENDPOINT: NLA_MIN_LEN(struct sockaddr), struct sockaddr_in or struct sockaddr_in6 -+ * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16 -+ * WGPEER_A_LAST_HANDSHAKE_TIME: NLA_EXACT_LEN, struct __kernel_timespec -+ * WGPEER_A_RX_BYTES: NLA_U64 -+ * WGPEER_A_TX_BYTES: NLA_U64 -+ * WGPEER_A_ALLOWEDIPS: NLA_NESTED -+ * 0: NLA_NESTED -+ * WGALLOWEDIP_A_FAMILY: NLA_U16 -+ * WGALLOWEDIP_A_IPADDR: NLA_MIN_LEN(struct in_addr), struct in_addr or struct in6_addr -+ * WGALLOWEDIP_A_CIDR_MASK: NLA_U8 -+ * 0: NLA_NESTED -+ * ... -+ * 0: NLA_NESTED -+ * ... -+ * ... -+ * WGPEER_A_PROTOCOL_VERSION: NLA_U32 -+ * 0: NLA_NESTED -+ * ... -+ * ... -+ * -+ * It is possible that all of the allowed IPs of a single peer will not -+ * fit within a single netlink message. In that case, the same peer will -+ * be written in the following message, except it will only contain -+ * WGPEER_A_PUBLIC_KEY and WGPEER_A_ALLOWEDIPS. This may occur several -+ * times in a row for the same peer. It is then up to the receiver to -+ * coalesce adjacent peers. Likewise, it is possible that all peers will -+ * not fit within a single message. So, subsequent peers will be sent -+ * in following messages, except those will only contain WGDEVICE_A_IFNAME -+ * and WGDEVICE_A_PEERS. It is then up to the receiver to coalesce these -+ * messages to form the complete list of peers. -+ * -+ * Since this is an NLA_F_DUMP command, the final message will always be -+ * NLMSG_DONE, even if an error occurs. However, this NLMSG_DONE message -+ * contains an integer error code. It is either zero or a negative error -+ * code corresponding to the errno. -+ * -+ * WG_CMD_SET_DEVICE -+ * ----------------- -+ * -+ * May only be called via NLM_F_REQUEST. The command should contain the -+ * following tree of nested items, containing one but not both of -+ * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME: -+ * -+ * WGDEVICE_A_IFINDEX: NLA_U32 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current -+ * peers should be removed prior to adding the list below. -+ * WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove -+ * WGDEVICE_A_LISTEN_PORT: NLA_U16, 0 to choose randomly -+ * WGDEVICE_A_FWMARK: NLA_U32, 0 to disable -+ * WGDEVICE_A_PEERS: NLA_NESTED -+ * 0: NLA_NESTED -+ * WGPEER_A_PUBLIC_KEY: len WG_KEY_LEN -+ * WGPEER_A_FLAGS: NLA_U32, 0 and/or WGPEER_F_REMOVE_ME if the -+ * specified peer should not exist at the end of the -+ * operation, rather than added/updated and/or -+ * WGPEER_F_REPLACE_ALLOWEDIPS if all current allowed -+ * IPs of this peer should be removed prior to adding -+ * the list below and/or WGPEER_F_UPDATE_ONLY if the -+ * peer should only be set if it already exists. -+ * WGPEER_A_PRESHARED_KEY: len WG_KEY_LEN, all zeros to remove -+ * WGPEER_A_ENDPOINT: struct sockaddr_in or struct sockaddr_in6 -+ * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16, 0 to disable -+ * WGPEER_A_ALLOWEDIPS: NLA_NESTED -+ * 0: NLA_NESTED -+ * WGALLOWEDIP_A_FAMILY: NLA_U16 -+ * WGALLOWEDIP_A_IPADDR: struct in_addr or struct in6_addr -+ * WGALLOWEDIP_A_CIDR_MASK: NLA_U8 -+ * 0: NLA_NESTED -+ * ... -+ * 0: NLA_NESTED -+ * ... -+ * ... -+ * WGPEER_A_PROTOCOL_VERSION: NLA_U32, should not be set or used at -+ * all by most users of this API, as the -+ * most recent protocol will be used when -+ * this is unset. Otherwise, must be set -+ * to 1. -+ * 0: NLA_NESTED -+ * ... -+ * ... -+ * -+ * It is possible that the amount of configuration data exceeds that of -+ * the maximum message length accepted by the kernel. In that case, several -+ * messages should be sent one after another, with each successive one -+ * filling in information not contained in the prior. Note that if -+ * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably -+ * should not be specified in fragments that come after, so that the list -+ * of peers is only cleared the first time but appened after. Likewise for -+ * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message -+ * of a peer, it likely should not be specified in subsequent fragments. -+ * -+ * If an error occurs, NLMSG_ERROR will reply containing an errno. -+ */ -+ -+#ifndef _WG_UAPI_WIREGUARD_H -+#define _WG_UAPI_WIREGUARD_H -+ -+#define WG_GENL_NAME "wireguard" -+#define WG_GENL_VERSION 1 -+ -+#define WG_KEY_LEN 32 -+ -+enum wg_cmd { -+ WG_CMD_GET_DEVICE, -+ WG_CMD_SET_DEVICE, -+ __WG_CMD_MAX -+}; -+#define WG_CMD_MAX (__WG_CMD_MAX - 1) -+ -+enum wgdevice_flag { -+ WGDEVICE_F_REPLACE_PEERS = 1U << 0, -+ __WGDEVICE_F_ALL = WGDEVICE_F_REPLACE_PEERS -+}; -+enum wgdevice_attribute { -+ WGDEVICE_A_UNSPEC, -+ WGDEVICE_A_IFINDEX, -+ WGDEVICE_A_IFNAME, -+ WGDEVICE_A_PRIVATE_KEY, -+ WGDEVICE_A_PUBLIC_KEY, -+ WGDEVICE_A_FLAGS, -+ WGDEVICE_A_LISTEN_PORT, -+ WGDEVICE_A_FWMARK, -+ WGDEVICE_A_PEERS, -+ __WGDEVICE_A_LAST -+}; -+#define WGDEVICE_A_MAX (__WGDEVICE_A_LAST - 1) -+ -+enum wgpeer_flag { -+ WGPEER_F_REMOVE_ME = 1U << 0, -+ WGPEER_F_REPLACE_ALLOWEDIPS = 1U << 1, -+ WGPEER_F_UPDATE_ONLY = 1U << 2, -+ __WGPEER_F_ALL = WGPEER_F_REMOVE_ME | WGPEER_F_REPLACE_ALLOWEDIPS | -+ WGPEER_F_UPDATE_ONLY -+}; -+enum wgpeer_attribute { -+ WGPEER_A_UNSPEC, -+ WGPEER_A_PUBLIC_KEY, -+ WGPEER_A_PRESHARED_KEY, -+ WGPEER_A_FLAGS, -+ WGPEER_A_ENDPOINT, -+ WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL, -+ WGPEER_A_LAST_HANDSHAKE_TIME, -+ WGPEER_A_RX_BYTES, -+ WGPEER_A_TX_BYTES, -+ WGPEER_A_ALLOWEDIPS, -+ WGPEER_A_PROTOCOL_VERSION, -+ __WGPEER_A_LAST -+}; -+#define WGPEER_A_MAX (__WGPEER_A_LAST - 1) -+ -+enum wgallowedip_attribute { -+ WGALLOWEDIP_A_UNSPEC, -+ WGALLOWEDIP_A_FAMILY, -+ WGALLOWEDIP_A_IPADDR, -+ WGALLOWEDIP_A_CIDR_MASK, -+ __WGALLOWEDIP_A_LAST -+}; -+#define WGALLOWEDIP_A_MAX (__WGALLOWEDIP_A_LAST - 1) -+ -+#endif /* _WG_UAPI_WIREGUARD_H */ ---- /dev/null -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -0,0 +1,537 @@ -+#!/bin/bash -+# SPDX-License-Identifier: GPL-2.0 -+# -+# Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+# -+# This script tests the below topology: -+# -+# ┌─────────────────────┐ ┌──────────────────────────────────┐ ┌─────────────────────┐ -+# │ $ns1 namespace │ │ $ns0 namespace │ │ $ns2 namespace │ -+# │ │ │ │ │ │ -+# │┌────────┐ │ │ ┌────────┐ │ │ ┌────────┐│ -+# ││ wg0 │───────────┼───┼────────────│ lo │────────────┼───┼───────────│ wg0 ││ -+# │├────────┴──────────┐│ │ ┌───────┴────────┴────────┐ │ │┌──────────┴────────┤│ -+# ││192.168.241.1/24 ││ │ │(ns1) (ns2) │ │ ││192.168.241.2/24 ││ -+# ││fd00::1/24 ││ │ │127.0.0.1:1 127.0.0.1:2│ │ ││fd00::2/24 ││ -+# │└───────────────────┘│ │ │[::]:1 [::]:2 │ │ │└───────────────────┘│ -+# └─────────────────────┘ │ └─────────────────────────┘ │ └─────────────────────┘ -+# └──────────────────────────────────┘ -+# -+# After the topology is prepared we run a series of TCP/UDP iperf3 tests between the -+# wireguard peers in $ns1 and $ns2. Note that $ns0 is the endpoint for the wg0 -+# interfaces in $ns1 and $ns2. See https://www.wireguard.com/netns/ for further -+# details on how this is accomplished. -+set -e -+ -+exec 3>&1 -+export WG_HIDE_KEYS=never -+netns0="wg-test-$$-0" -+netns1="wg-test-$$-1" -+netns2="wg-test-$$-2" -+pretty() { echo -e "\x1b[32m\x1b[1m[+] ${1:+NS$1: }${2}\x1b[0m" >&3; } -+pp() { pretty "" "$*"; "$@"; } -+maybe_exec() { if [[ $BASHPID -eq $$ ]]; then "$@"; else exec "$@"; fi; } -+n0() { pretty 0 "$*"; maybe_exec ip netns exec $netns0 "$@"; } -+n1() { pretty 1 "$*"; maybe_exec ip netns exec $netns1 "$@"; } -+n2() { pretty 2 "$*"; maybe_exec ip netns exec $netns2 "$@"; } -+ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; } -+ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } -+ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } -+sleep() { read -t "$1" -N 0 || true; } -+waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } -+waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } -+waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } -+waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } -+ -+cleanup() { -+ set +e -+ exec 2>/dev/null -+ printf "$orig_message_cost" > /proc/sys/net/core/message_cost -+ ip0 link del dev wg0 -+ ip1 link del dev wg0 -+ ip2 link del dev wg0 -+ local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)" -+ [[ -n $to_kill ]] && kill $to_kill -+ pp ip netns del $netns1 -+ pp ip netns del $netns2 -+ pp ip netns del $netns0 -+ exit -+} -+ -+orig_message_cost="$(< /proc/sys/net/core/message_cost)" -+trap cleanup EXIT -+printf 0 > /proc/sys/net/core/message_cost -+ -+ip netns del $netns0 2>/dev/null || true -+ip netns del $netns1 2>/dev/null || true -+ip netns del $netns2 2>/dev/null || true -+pp ip netns add $netns0 -+pp ip netns add $netns1 -+pp ip netns add $netns2 -+ip0 link set up dev lo -+ -+ip0 link add dev wg0 type wireguard -+ip0 link set wg0 netns $netns1 -+ip0 link add dev wg0 type wireguard -+ip0 link set wg0 netns $netns2 -+key1="$(pp wg genkey)" -+key2="$(pp wg genkey)" -+key3="$(pp wg genkey)" -+pub1="$(pp wg pubkey <<<"$key1")" -+pub2="$(pp wg pubkey <<<"$key2")" -+pub3="$(pp wg pubkey <<<"$key3")" -+psk="$(pp wg genpsk)" -+[[ -n $key1 && -n $key2 && -n $psk ]] -+ -+configure_peers() { -+ ip1 addr add 192.168.241.1/24 dev wg0 -+ ip1 addr add fd00::1/24 dev wg0 -+ -+ ip2 addr add 192.168.241.2/24 dev wg0 -+ ip2 addr add fd00::2/24 dev wg0 -+ -+ n1 wg set wg0 \ -+ private-key <(echo "$key1") \ -+ listen-port 1 \ -+ peer "$pub2" \ -+ preshared-key <(echo "$psk") \ -+ allowed-ips 192.168.241.2/32,fd00::2/128 -+ n2 wg set wg0 \ -+ private-key <(echo "$key2") \ -+ listen-port 2 \ -+ peer "$pub1" \ -+ preshared-key <(echo "$psk") \ -+ allowed-ips 192.168.241.1/32,fd00::1/128 -+ -+ ip1 link set up dev wg0 -+ ip2 link set up dev wg0 -+} -+configure_peers -+ -+tests() { -+ # Ping over IPv4 -+ n2 ping -c 10 -f -W 1 192.168.241.1 -+ n1 ping -c 10 -f -W 1 192.168.241.2 -+ -+ # Ping over IPv6 -+ n2 ping6 -c 10 -f -W 1 fd00::1 -+ n1 ping6 -c 10 -f -W 1 fd00::2 -+ -+ # TCP over IPv4 -+ n2 iperf3 -s -1 -B 192.168.241.2 & -+ waitiperf $netns2 -+ n1 iperf3 -Z -t 3 -c 192.168.241.2 -+ -+ # TCP over IPv6 -+ n1 iperf3 -s -1 -B fd00::1 & -+ waitiperf $netns1 -+ n2 iperf3 -Z -t 3 -c fd00::1 -+ -+ # UDP over IPv4 -+ n1 iperf3 -s -1 -B 192.168.241.1 & -+ waitiperf $netns1 -+ n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1 -+ -+ # UDP over IPv6 -+ n2 iperf3 -s -1 -B fd00::2 & -+ waitiperf $netns2 -+ n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 -+} -+ -+[[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}" -+big_mtu=$(( 34816 - 1500 + $orig_mtu )) -+ -+# Test using IPv4 as outer transport -+n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 -+n2 wg set wg0 peer "$pub1" endpoint 127.0.0.1:1 -+# Before calling tests, we first make sure that the stats counters and timestamper are working -+n2 ping -c 10 -f -W 1 192.168.241.1 -+{ read _; read _; read _; read rx_bytes _; read _; read tx_bytes _; } < <(ip2 -stats link show dev wg0) -+(( rx_bytes == 1372 && (tx_bytes == 1428 || tx_bytes == 1460) )) -+{ read _; read _; read _; read rx_bytes _; read _; read tx_bytes _; } < <(ip1 -stats link show dev wg0) -+(( tx_bytes == 1372 && (rx_bytes == 1428 || rx_bytes == 1460) )) -+read _ rx_bytes tx_bytes < <(n2 wg show wg0 transfer) -+(( rx_bytes == 1372 && (tx_bytes == 1428 || tx_bytes == 1460) )) -+read _ rx_bytes tx_bytes < <(n1 wg show wg0 transfer) -+(( tx_bytes == 1372 && (rx_bytes == 1428 || rx_bytes == 1460) )) -+read _ timestamp < <(n1 wg show wg0 latest-handshakes) -+(( timestamp != 0 )) -+ -+tests -+ip1 link set wg0 mtu $big_mtu -+ip2 link set wg0 mtu $big_mtu -+tests -+ -+ip1 link set wg0 mtu $orig_mtu -+ip2 link set wg0 mtu $orig_mtu -+ -+# Test using IPv6 as outer transport -+n1 wg set wg0 peer "$pub2" endpoint [::1]:2 -+n2 wg set wg0 peer "$pub1" endpoint [::1]:1 -+tests -+ip1 link set wg0 mtu $big_mtu -+ip2 link set wg0 mtu $big_mtu -+tests -+ -+# Test that route MTUs work with the padding -+ip1 link set wg0 mtu 1300 -+ip2 link set wg0 mtu 1300 -+n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 -+n2 wg set wg0 peer "$pub1" endpoint 127.0.0.1:1 -+n0 iptables -A INPUT -m length --length 1360 -j DROP -+n1 ip route add 192.168.241.2/32 dev wg0 mtu 1299 -+n2 ip route add 192.168.241.1/32 dev wg0 mtu 1299 -+n2 ping -c 1 -W 1 -s 1269 192.168.241.1 -+n2 ip route delete 192.168.241.1/32 dev wg0 mtu 1299 -+n1 ip route delete 192.168.241.2/32 dev wg0 mtu 1299 -+n0 iptables -F INPUT -+ -+ip1 link set wg0 mtu $orig_mtu -+ip2 link set wg0 mtu $orig_mtu -+ -+# Test using IPv4 that roaming works -+ip0 -4 addr del 127.0.0.1/8 dev lo -+ip0 -4 addr add 127.212.121.99/8 dev lo -+n1 wg set wg0 listen-port 9999 -+n1 wg set wg0 peer "$pub2" endpoint 127.0.0.1:2 -+n1 ping6 -W 1 -c 1 fd00::2 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 127.212.121.99:9999" ]] -+ -+# Test using IPv6 that roaming works -+n1 wg set wg0 listen-port 9998 -+n1 wg set wg0 peer "$pub2" endpoint [::1]:2 -+n1 ping -W 1 -c 1 192.168.241.2 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 [::1]:9998" ]] -+ -+# Test that crypto-RP filter works -+n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24 -+exec 4< <(n1 ncat -l -u -p 1111) -+ncat_pid=$! -+waitncatudp $netns1 -+n2 ncat -u 192.168.241.1 1111 <<<"X" -+read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]] -+kill $ncat_pid -+more_specific_key="$(pp wg genkey | pp wg pubkey)" -+n1 wg set wg0 peer "$more_specific_key" allowed-ips 192.168.241.2/32 -+n2 wg set wg0 listen-port 9997 -+exec 4< <(n1 ncat -l -u -p 1111) -+ncat_pid=$! -+waitncatudp $netns1 -+n2 ncat -u 192.168.241.1 1111 <<<"X" -+! read -r -N 1 -t 1 out <&4 || false -+kill $ncat_pid -+n1 wg set wg0 peer "$more_specific_key" remove -+[[ $(n1 wg show wg0 endpoints) == "$pub2 [::1]:9997" ]] -+ -+# Test that we can change private keys keys and immediately handshake -+n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips 192.168.241.2/32 endpoint 127.0.0.1:2 -+n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 -+n1 ping -W 1 -c 1 192.168.241.2 -+n1 wg set wg0 private-key <(echo "$key3") -+n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove -+n1 ping -W 1 -c 1 192.168.241.2 -+ -+ip1 link del wg0 -+ip2 link del wg0 -+ -+# Test using NAT. We now change the topology to this: -+# ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐ -+# │ $ns1 namespace │ │ $ns0 namespace │ │ $ns2 namespace │ -+# │ │ │ │ │ │ -+# │ ┌─────┐ ┌─────┐ │ │ ┌──────┐ ┌──────┐ │ │ ┌─────┐ ┌─────┐ │ -+# │ │ wg0 │─────────────│vethc│───────────┼────┼────│vethrc│ │vethrs│──────────────┼─────┼──│veths│────────────│ wg0 │ │ -+# │ ├─────┴──────────┐ ├─────┴──────────┐│ │ ├──────┴─────────┐ ├──────┴────────────┐ │ │ ├─────┴──────────┐ ├─────┴──────────┐ │ -+# │ │192.168.241.1/24│ │192.168.1.100/24││ │ │192.168.1.1/24 │ │10.0.0.1/24 │ │ │ │10.0.0.100/24 │ │192.168.241.2/24│ │ -+# │ │fd00::1/24 │ │ ││ │ │ │ │SNAT:192.168.1.0/24│ │ │ │ │ │fd00::2/24 │ │ -+# │ └────────────────┘ └────────────────┘│ │ └────────────────┘ └───────────────────┘ │ │ └────────────────┘ └────────────────┘ │ -+# └────────────────────────────────────────┘ └────────────────────────────────────────────────┘ └────────────────────────────────────────┘ -+ -+ip1 link add dev wg0 type wireguard -+ip2 link add dev wg0 type wireguard -+configure_peers -+ -+ip0 link add vethrc type veth peer name vethc -+ip0 link add vethrs type veth peer name veths -+ip0 link set vethc netns $netns1 -+ip0 link set veths netns $netns2 -+ip0 link set vethrc up -+ip0 link set vethrs up -+ip0 addr add 192.168.1.1/24 dev vethrc -+ip0 addr add 10.0.0.1/24 dev vethrs -+ip1 addr add 192.168.1.100/24 dev vethc -+ip1 link set vethc up -+ip1 route add default via 192.168.1.1 -+ip2 addr add 10.0.0.100/24 dev veths -+ip2 link set veths up -+waitiface $netns0 vethrc -+waitiface $netns0 vethrs -+waitiface $netns1 vethc -+waitiface $netns2 veths -+ -+n0 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward' -+n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout' -+n0 bash -c 'printf 2 > /proc/sys/net/netfilter/nf_conntrack_udp_timeout_stream' -+n0 iptables -t nat -A POSTROUTING -s 192.168.1.0/24 -d 10.0.0.0/24 -j SNAT --to 10.0.0.1 -+ -+n1 wg set wg0 peer "$pub2" endpoint 10.0.0.100:2 persistent-keepalive 1 -+n1 ping -W 1 -c 1 192.168.241.2 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] -+# Demonstrate n2 can still send packets to n1, since persistent-keepalive will prevent connection tracking entry from expiring (to see entries: `n0 conntrack -L`). -+pp sleep 3 -+n2 ping -W 1 -c 1 192.168.241.1 -+n1 wg set wg0 peer "$pub2" persistent-keepalive 0 -+ -+# Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs. -+ip1 -6 addr add fc00::9/96 dev vethc -+ip1 -6 route add default via fc00::1 -+ip2 -4 addr add 192.168.99.7/32 dev wg0 -+ip2 -6 addr add abab::1111/128 dev wg0 -+n1 wg set wg0 fwmark 51820 peer "$pub2" allowed-ips 192.168.99.7,abab::1111 -+ip1 -6 route add default dev wg0 table 51820 -+ip1 -6 rule add not fwmark 51820 table 51820 -+ip1 -6 rule add table main suppress_prefixlength 0 -+ip1 -4 route add default dev wg0 table 51820 -+ip1 -4 rule add not fwmark 51820 table 51820 -+ip1 -4 rule add table main suppress_prefixlength 0 -+# suppress_prefixlength only got added in 3.12, and we want to support 3.10+. -+if [[ $(ip1 -4 rule show all) == *suppress_prefixlength* ]]; then -+ # Flood the pings instead of sending just one, to trigger routing table reference counting bugs. -+ n1 ping -W 1 -c 100 -f 192.168.99.7 -+ n1 ping -W 1 -c 100 -f abab::1111 -+fi -+ -+n0 iptables -t nat -F -+ip0 link del vethrc -+ip0 link del vethrs -+ip1 link del wg0 -+ip2 link del wg0 -+ -+# Test that saddr routing is sticky but not too sticky, changing to this topology: -+# ┌────────────────────────────────────────┐ ┌────────────────────────────────────────┐ -+# │ $ns1 namespace │ │ $ns2 namespace │ -+# │ │ │ │ -+# │ ┌─────┐ ┌─────┐ │ │ ┌─────┐ ┌─────┐ │ -+# │ │ wg0 │─────────────│veth1│───────────┼────┼──│veth2│────────────│ wg0 │ │ -+# │ ├─────┴──────────┐ ├─────┴──────────┐│ │ ├─────┴──────────┐ ├─────┴──────────┐ │ -+# │ │192.168.241.1/24│ │10.0.0.1/24 ││ │ │10.0.0.2/24 │ │192.168.241.2/24│ │ -+# │ │fd00::1/24 │ │fd00:aa::1/96 ││ │ │fd00:aa::2/96 │ │fd00::2/24 │ │ -+# │ └────────────────┘ └────────────────┘│ │ └────────────────┘ └────────────────┘ │ -+# └────────────────────────────────────────┘ └────────────────────────────────────────┘ -+ -+ip1 link add dev wg0 type wireguard -+ip2 link add dev wg0 type wireguard -+configure_peers -+ip1 link add veth1 type veth peer name veth2 -+ip1 link set veth2 netns $netns2 -+n1 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/all/accept_dad' -+n2 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/all/accept_dad' -+n1 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/veth1/accept_dad' -+n2 bash -c 'printf 0 > /proc/sys/net/ipv6/conf/veth2/accept_dad' -+n1 bash -c 'printf 1 > /proc/sys/net/ipv4/conf/veth1/promote_secondaries' -+ -+# First we check that we aren't overly sticky and can fall over to new IPs when old ones are removed -+ip1 addr add 10.0.0.1/24 dev veth1 -+ip1 addr add fd00:aa::1/96 dev veth1 -+ip2 addr add 10.0.0.2/24 dev veth2 -+ip2 addr add fd00:aa::2/96 dev veth2 -+ip1 link set veth1 up -+ip2 link set veth2 up -+waitiface $netns1 veth1 -+waitiface $netns2 veth2 -+n1 wg set wg0 peer "$pub2" endpoint 10.0.0.2:2 -+n1 ping -W 1 -c 1 192.168.241.2 -+ip1 addr add 10.0.0.10/24 dev veth1 -+ip1 addr del 10.0.0.1/24 dev veth1 -+n1 ping -W 1 -c 1 192.168.241.2 -+n1 wg set wg0 peer "$pub2" endpoint [fd00:aa::2]:2 -+n1 ping -W 1 -c 1 192.168.241.2 -+ip1 addr add fd00:aa::10/96 dev veth1 -+ip1 addr del fd00:aa::1/96 dev veth1 -+n1 ping -W 1 -c 1 192.168.241.2 -+ -+# Now we show that we can successfully do reply to sender routing -+ip1 link set veth1 down -+ip2 link set veth2 down -+ip1 addr flush dev veth1 -+ip2 addr flush dev veth2 -+ip1 addr add 10.0.0.1/24 dev veth1 -+ip1 addr add 10.0.0.2/24 dev veth1 -+ip1 addr add fd00:aa::1/96 dev veth1 -+ip1 addr add fd00:aa::2/96 dev veth1 -+ip2 addr add 10.0.0.3/24 dev veth2 -+ip2 addr add fd00:aa::3/96 dev veth2 -+ip1 link set veth1 up -+ip2 link set veth2 up -+waitiface $netns1 veth1 -+waitiface $netns2 veth2 -+n2 wg set wg0 peer "$pub1" endpoint 10.0.0.1:1 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] -+n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::1]:1 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 [fd00:aa::1]:1" ]] -+n2 wg set wg0 peer "$pub1" endpoint 10.0.0.2:1 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.2:1" ]] -+n2 wg set wg0 peer "$pub1" endpoint [fd00:aa::2]:1 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 [fd00:aa::2]:1" ]] -+ -+# What happens if the inbound destination address belongs to a different interface as the default route? -+ip1 link add dummy0 type dummy -+ip1 addr add 10.50.0.1/24 dev dummy0 -+ip1 link set dummy0 up -+ip2 route add 10.50.0.0/24 dev veth2 -+n2 wg set wg0 peer "$pub1" endpoint 10.50.0.1:1 -+n2 ping -W 1 -c 1 192.168.241.1 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.50.0.1:1" ]] -+ -+ip1 link del dummy0 -+ip1 addr flush dev veth1 -+ip2 addr flush dev veth2 -+ip1 route flush dev veth1 -+ip2 route flush dev veth2 -+ -+# Now we see what happens if another interface route takes precedence over an ongoing one -+ip1 link add veth3 type veth peer name veth4 -+ip1 link set veth4 netns $netns2 -+ip1 addr add 10.0.0.1/24 dev veth1 -+ip2 addr add 10.0.0.2/24 dev veth2 -+ip1 addr add 10.0.0.3/24 dev veth3 -+ip1 link set veth1 up -+ip2 link set veth2 up -+ip1 link set veth3 up -+ip2 link set veth4 up -+waitiface $netns1 veth1 -+waitiface $netns2 veth2 -+waitiface $netns1 veth3 -+waitiface $netns2 veth4 -+ip1 route flush dev veth1 -+ip1 route flush dev veth3 -+ip1 route add 10.0.0.0/24 dev veth1 src 10.0.0.1 metric 2 -+n1 wg set wg0 peer "$pub2" endpoint 10.0.0.2:2 -+n1 ping -W 1 -c 1 192.168.241.2 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.1:1" ]] -+ip1 route add 10.0.0.0/24 dev veth3 src 10.0.0.3 metric 1 -+n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/veth1/rp_filter' -+n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/veth4/rp_filter' -+n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter' -+n2 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/all/rp_filter' -+n1 ping -W 1 -c 1 192.168.241.2 -+[[ $(n2 wg show wg0 endpoints) == "$pub1 10.0.0.3:1" ]] -+ -+ip1 link del veth1 -+ip1 link del veth3 -+ip1 link del wg0 -+ip2 link del wg0 -+ -+# We test that Netlink/IPC is working properly by doing things that usually cause split responses -+ip0 link add dev wg0 type wireguard -+config=( "[Interface]" "PrivateKey=$(wg genkey)" "[Peer]" "PublicKey=$(wg genkey)" ) -+for a in {1..255}; do -+ for b in {0..255}; do -+ config+=( "AllowedIPs=$a.$b.0.0/16,$a::$b/128" ) -+ done -+done -+n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") -+i=0 -+for ip in $(n0 wg show wg0 allowed-ips); do -+ ((++i)) -+done -+((i == 255*256*2+1)) -+ip0 link del wg0 -+ip0 link add dev wg0 type wireguard -+config=( "[Interface]" "PrivateKey=$(wg genkey)" ) -+for a in {1..40}; do -+ config+=( "[Peer]" "PublicKey=$(wg genkey)" ) -+ for b in {1..52}; do -+ config+=( "AllowedIPs=$a.$b.0.0/16" ) -+ done -+done -+n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") -+i=0 -+while read -r line; do -+ j=0 -+ for ip in $line; do -+ ((++j)) -+ done -+ ((j == 53)) -+ ((++i)) -+done < <(n0 wg show wg0 allowed-ips) -+((i == 40)) -+ip0 link del wg0 -+ip0 link add wg0 type wireguard -+config=( ) -+for i in {1..29}; do -+ config+=( "[Peer]" "PublicKey=$(wg genkey)" ) -+done -+config+=( "[Peer]" "PublicKey=$(wg genkey)" "AllowedIPs=255.2.3.4/32,abcd::255/128" ) -+n0 wg setconf wg0 <(printf '%s\n' "${config[@]}") -+n0 wg showconf wg0 > /dev/null -+ip0 link del wg0 -+ -+allowedips=( ) -+for i in {1..197}; do -+ allowedips+=( abcd::$i ) -+done -+saved_ifs="$IFS" -+IFS=, -+allowedips="${allowedips[*]}" -+IFS="$saved_ifs" -+ip0 link add wg0 type wireguard -+n0 wg set wg0 peer "$pub1" -+n0 wg set wg0 peer "$pub2" allowed-ips "$allowedips" -+{ -+ read -r pub allowedips -+ [[ $pub == "$pub1" && $allowedips == "(none)" ]] -+ read -r pub allowedips -+ [[ $pub == "$pub2" ]] -+ i=0 -+ for _ in $allowedips; do -+ ((++i)) -+ done -+ ((i == 197)) -+} < <(n0 wg show wg0 allowed-ips) -+ip0 link del wg0 -+ -+! n0 wg show doesnotexist || false -+ -+ip0 link add wg0 type wireguard -+n0 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") -+[[ $(n0 wg show wg0 private-key) == "$key1" ]] -+[[ $(n0 wg show wg0 preshared-keys) == "$pub2 $psk" ]] -+n0 wg set wg0 private-key /dev/null peer "$pub2" preshared-key /dev/null -+[[ $(n0 wg show wg0 private-key) == "(none)" ]] -+[[ $(n0 wg show wg0 preshared-keys) == "$pub2 (none)" ]] -+n0 wg set wg0 peer "$pub2" -+n0 wg set wg0 private-key <(echo "$key2") -+[[ $(n0 wg show wg0 public-key) == "$pub2" ]] -+[[ -z $(n0 wg show wg0 peers) ]] -+n0 wg set wg0 peer "$pub2" -+[[ -z $(n0 wg show wg0 peers) ]] -+n0 wg set wg0 private-key <(echo "$key1") -+n0 wg set wg0 peer "$pub2" -+[[ $(n0 wg show wg0 peers) == "$pub2" ]] -+n0 wg set wg0 private-key <(echo "/${key1:1}") -+[[ $(n0 wg show wg0 private-key) == "+${key1:1}" ]] -+n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0,10.0.0.0/8,100.0.0.0/10,172.16.0.0/12,192.168.0.0/16 -+n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 -+n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 -+n0 wg set wg0 peer "$pub2" allowed-ips ::/0 -+ip0 link del wg0 -+ -+declare -A objects -+while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do -+ [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue -+ objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}" -+done < /dev/kmsg -+alldeleted=1 -+for object in "${!objects[@]}"; do -+ if [[ ${objects["$object"]} != *createddestroyed ]]; then -+ echo "Error: $object: merely ${objects["$object"]}" >&3 -+ alldeleted=0 -+ fi -+done -+[[ $alldeleted -eq 1 ]] -+pretty "" "Objects that were created were also destroyed." diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0073-wireguard-selftests-import-harness-makefile-for-test.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0073-wireguard-selftests-import-harness-makefile-for-test.patch deleted file mode 100644 index ca3853aa1..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0073-wireguard-selftests-import-harness-makefile-for-test.patch +++ /dev/null @@ -1,1078 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Sun, 15 Dec 2019 22:08:00 +0100 -Subject: [PATCH] wireguard: selftests: import harness makefile for test suite - -commit 65d88d04114bca7d85faebd5fed61069cb2b632c upstream. - -WireGuard has been using this on build.wireguard.com for the last -several years with considerable success. It allows for very quick and -iterative development cycles, and supports several platforms. - -To run the test suite on your current platform in QEMU: - - $ make -C tools/testing/selftests/wireguard/qemu -j$(nproc) - -To run it with KASAN and such turned on: - - $ DEBUG_KERNEL=yes make -C tools/testing/selftests/wireguard/qemu -j$(nproc) - -To run it emulated for another platform in QEMU: - - $ ARCH=arm make -C tools/testing/selftests/wireguard/qemu -j$(nproc) - -At the moment, we support aarch64_be, aarch64, arm, armeb, i686, m68k, -mips64, mips64el, mips, mipsel, powerpc64le, powerpc, and x86_64. - -The system supports incremental rebuilding, so it should be very fast to -change a single file and then test it out and have immediate feedback. - -This requires for the right toolchain and qemu to be installed prior. -I've had success with those from musl.cc. - -This is tailored for WireGuard at the moment, though later projects -might generalize it for other network testing. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - .../selftests/wireguard/qemu/.gitignore | 2 + - .../testing/selftests/wireguard/qemu/Makefile | 385 ++++++++++++++++++ - .../wireguard/qemu/arch/aarch64.config | 5 + - .../wireguard/qemu/arch/aarch64_be.config | 6 + - .../selftests/wireguard/qemu/arch/arm.config | 9 + - .../wireguard/qemu/arch/armeb.config | 10 + - .../selftests/wireguard/qemu/arch/i686.config | 5 + - .../selftests/wireguard/qemu/arch/m68k.config | 9 + - .../selftests/wireguard/qemu/arch/mips.config | 11 + - .../wireguard/qemu/arch/mips64.config | 14 + - .../wireguard/qemu/arch/mips64el.config | 15 + - .../wireguard/qemu/arch/mipsel.config | 12 + - .../wireguard/qemu/arch/powerpc.config | 10 + - .../wireguard/qemu/arch/powerpc64le.config | 12 + - .../wireguard/qemu/arch/x86_64.config | 5 + - .../selftests/wireguard/qemu/debug.config | 67 +++ - tools/testing/selftests/wireguard/qemu/init.c | 284 +++++++++++++ - .../selftests/wireguard/qemu/kernel.config | 86 ++++ - 18 files changed, 947 insertions(+) - create mode 100644 tools/testing/selftests/wireguard/qemu/.gitignore - create mode 100644 tools/testing/selftests/wireguard/qemu/Makefile - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/aarch64.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/arm.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/armeb.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/i686.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/m68k.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mips.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mips64.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mips64el.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/mipsel.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/powerpc.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config - create mode 100644 tools/testing/selftests/wireguard/qemu/arch/x86_64.config - create mode 100644 tools/testing/selftests/wireguard/qemu/debug.config - create mode 100644 tools/testing/selftests/wireguard/qemu/init.c - create mode 100644 tools/testing/selftests/wireguard/qemu/kernel.config - ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/.gitignore -@@ -0,0 +1,2 @@ -+build/ -+distfiles/ ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/Makefile -@@ -0,0 +1,385 @@ -+# SPDX-License-Identifier: GPL-2.0 -+# -+# Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ -+PWD := $(shell pwd) -+ -+CHOST := $(shell gcc -dumpmachine) -+ifneq (,$(ARCH)) -+CBUILD := $(subst -gcc,,$(lastword $(subst /, ,$(firstword $(wildcard $(foreach bindir,$(subst :, ,$(PATH)),$(bindir)/$(ARCH)-*-gcc)))))) -+ifeq (,$(CBUILD)) -+$(error The toolchain for $(ARCH) is not installed) -+endif -+else -+CBUILD := $(CHOST) -+ARCH := $(firstword $(subst -, ,$(CBUILD))) -+endif -+ -+# Set these from the environment to override -+KERNEL_PATH ?= $(PWD)/../../../../.. -+BUILD_PATH ?= $(PWD)/build/$(ARCH) -+DISTFILES_PATH ?= $(PWD)/distfiles -+NR_CPUS ?= 4 -+ -+MIRROR := https://download.wireguard.com/qemu-test/distfiles/ -+ -+default: qemu -+ -+# variable name, tarball project name, version, tarball extension, default URI base -+define tar_download = -+$(1)_VERSION := $(3) -+$(1)_NAME := $(2)-$$($(1)_VERSION) -+$(1)_TAR := $(DISTFILES_PATH)/$$($(1)_NAME)$(4) -+$(1)_PATH := $(BUILD_PATH)/$$($(1)_NAME) -+$(call file_download,$$($(1)_NAME)$(4),$(5),$(6)) -+endef -+ -+define file_download = -+$(DISTFILES_PATH)/$(1): -+ mkdir -p $(DISTFILES_PATH) -+ flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -t inf --retry-on-http-error=404 -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' -+ if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi -+endef -+ -+$(eval $(call tar_download,MUSL,musl,1.1.20,.tar.gz,https://www.musl-libc.org/releases/,44be8771d0e6c6b5f82dd15662eb2957c9a3173a19a8b49966ac0542bbd40d61)) -+$(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/,171f89699f286a5854b72b91d06e8f8e3683064c5901fb09d954a9ab6f551f81)) -+$(eval $(call tar_download,IPERF,iperf,3.1.7,.tar.gz,http://downloads.es.net/pub/iperf/,a4ef73406fe92250602b8da2ae89ec53211f805df97a1d1d629db5a14043734f)) -+$(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) -+$(eval $(call tar_download,IPROUTE2,iproute2,5.1.0,.tar.gz,https://www.kernel.org/pub/linux/utils/net/iproute2/,9b43707d6075ecdca14803ca8ce0c8553848c49fa1586d12fd508d66577243f2)) -+$(eval $(call tar_download,IPTABLES,iptables,1.6.1,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,0fc2d7bd5d7be11311726466789d4c65fb4c8e096c9182b56ce97440864f0cf5)) -+$(eval $(call tar_download,NMAP,nmap,7.60,.tar.bz2,https://nmap.org/dist/,a8796ecc4fa6c38aad6139d9515dc8113023a82e9d787e5a5fb5fa1b05516f21)) -+$(eval $(call tar_download,IPUTILS,iputils,s20161105,.tar.gz,https://github.com/iputils/iputils/archive/s20161105.tar.gz/#,f813092f03d17294fd23544b129b95cdb87fe19f7970a51908a6b88509acad8a)) -+$(eval $(call tar_download,WIREGUARD_TOOLS,WireGuard,0.0.20191212,.tar.xz,https://git.zx2c4.com/WireGuard/snapshot/,b0d718380f7a8822b2f12d75e462fa4eafa3a77871002981f367cd4fe2a1b071)) -+ -+KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) -+rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) -+WIREGUARD_SOURCES := $(call rwildcard,$(KERNEL_PATH)/drivers/net/wireguard/,*) -+ -+export CFLAGS ?= -O3 -pipe -+export LDFLAGS ?= -+export CPPFLAGS := -I$(BUILD_PATH)/include -+ -+ifeq ($(CHOST),$(CBUILD)) -+CROSS_COMPILE_FLAG := --host=$(CHOST) -+NOPIE_GCC := gcc -fno-PIE -+CFLAGS += -march=native -+STRIP := strip -+else -+$(info Cross compilation: building for $(CBUILD) using $(CHOST)) -+CROSS_COMPILE_FLAG := --build=$(CBUILD) --host=$(CHOST) -+export CROSS_COMPILE=$(CBUILD)- -+NOPIE_GCC := $(CBUILD)-gcc -fno-PIE -+STRIP := $(CBUILD)-strip -+endif -+ifeq ($(ARCH),aarch64) -+QEMU_ARCH := aarch64 -+KERNEL_ARCH := arm64 -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm -+else -+QEMU_MACHINE := -cpu cortex-a53 -machine virt -+CFLAGS += -march=armv8-a -mtune=cortex-a53 -+endif -+else ifeq ($(ARCH),aarch64_be) -+QEMU_ARCH := aarch64 -+KERNEL_ARCH := arm64 -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm -+else -+QEMU_MACHINE := -cpu cortex-a53 -machine virt -+CFLAGS += -march=armv8-a -mtune=cortex-a53 -+endif -+else ifeq ($(ARCH),arm) -+QEMU_ARCH := arm -+KERNEL_ARCH := arm -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm -+else -+QEMU_MACHINE := -cpu cortex-a15 -machine virt -+CFLAGS += -march=armv7-a -mtune=cortex-a15 -mabi=aapcs-linux -+endif -+else ifeq ($(ARCH),armeb) -+QEMU_ARCH := arm -+KERNEL_ARCH := arm -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm -+else -+QEMU_MACHINE := -cpu cortex-a15 -machine virt -+CFLAGS += -march=armv7-a -mabi=aapcs-linux # We don't pass -mtune=cortex-a15 due to a compiler bug on big endian. -+LDFLAGS += -Wl,--be8 -+endif -+else ifeq ($(ARCH),x86_64) -+QEMU_ARCH := x86_64 -+KERNEL_ARCH := x86_64 -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine q35,accel=kvm -+else -+QEMU_MACHINE := -cpu Skylake-Server -machine q35 -+CFLAGS += -march=skylake-avx512 -+endif -+else ifeq ($(ARCH),i686) -+QEMU_ARCH := i386 -+KERNEL_ARCH := x86 -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage -+ifeq ($(subst i686,x86_64,$(CBUILD)),$(CHOST)) -+QEMU_MACHINE := -cpu host -machine q35,accel=kvm -+else -+QEMU_MACHINE := -cpu coreduo -machine q35 -+CFLAGS += -march=prescott -+endif -+else ifeq ($(ARCH),mips64) -+QEMU_ARCH := mips64 -+KERNEL_ARCH := mips -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine malta,accel=kvm -+CFLAGS += -EB -+else -+QEMU_MACHINE := -cpu MIPS64R2-generic -machine malta -smp 1 -+CFLAGS += -march=mips64r2 -EB -+endif -+else ifeq ($(ARCH),mips64el) -+QEMU_ARCH := mips64el -+KERNEL_ARCH := mips -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine malta,accel=kvm -+CFLAGS += -EL -+else -+QEMU_MACHINE := -cpu MIPS64R2-generic -machine malta -smp 1 -+CFLAGS += -march=mips64r2 -EL -+endif -+else ifeq ($(ARCH),mips) -+QEMU_ARCH := mips -+KERNEL_ARCH := mips -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine malta,accel=kvm -+CFLAGS += -EB -+else -+QEMU_MACHINE := -cpu 24Kf -machine malta -smp 1 -+CFLAGS += -march=mips32r2 -EB -+endif -+else ifeq ($(ARCH),mipsel) -+QEMU_ARCH := mipsel -+KERNEL_ARCH := mips -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host -machine malta,accel=kvm -+CFLAGS += -EL -+else -+QEMU_MACHINE := -cpu 24Kf -machine malta -smp 1 -+CFLAGS += -march=mips32r2 -EL -+endif -+else ifeq ($(ARCH),powerpc64le) -+QEMU_ARCH := ppc64 -+KERNEL_ARCH := powerpc -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host,accel=kvm -machine pseries -+else -+QEMU_MACHINE := -machine pseries -+endif -+CFLAGS += -mcpu=powerpc64le -mlong-double-64 -+else ifeq ($(ARCH),powerpc) -+QEMU_ARCH := ppc -+KERNEL_ARCH := powerpc -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/powerpc/boot/uImage -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host,accel=kvm -machine ppce500 -+else -+QEMU_MACHINE := -machine ppce500 -+endif -+CFLAGS += -mcpu=powerpc -mlong-double-64 -msecure-plt -+else ifeq ($(ARCH),m68k) -+QEMU_ARCH := m68k -+KERNEL_ARCH := m68k -+KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux -+ifeq ($(CHOST),$(CBUILD)) -+QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -+else -+QEMU_MACHINE := -machine q800 -+endif -+else -+$(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64le, powerpc, m68k) -+endif -+ -+REAL_CC := $(CBUILD)-gcc -+MUSL_CC := $(BUILD_PATH)/musl-gcc -+export CC := $(MUSL_CC) -+USERSPACE_DEPS := $(MUSL_CC) $(BUILD_PATH)/include/.installed $(BUILD_PATH)/include/linux/.installed -+ -+build: $(KERNEL_BZIMAGE) -+qemu: $(KERNEL_BZIMAGE) -+ rm -f $(BUILD_PATH)/result -+ timeout --foreground 20m qemu-system-$(QEMU_ARCH) \ -+ -nodefaults \ -+ -nographic \ -+ -smp $(NR_CPUS) \ -+ $(QEMU_MACHINE) \ -+ -m $$(grep -q CONFIG_DEBUG_KMEMLEAK=y $(KERNEL_BUILD_PATH)/.config && echo 1G || echo 256M) \ -+ -serial stdio \ -+ -serial file:$(BUILD_PATH)/result \ -+ -no-reboot \ -+ -monitor none \ -+ -kernel $< -+ grep -Fq success $(BUILD_PATH)/result -+ -+$(BUILD_PATH)/init-cpio-spec.txt: -+ mkdir -p $(BUILD_PATH) -+ echo "file /init $(BUILD_PATH)/init 755 0 0" > $@ -+ echo "file /init.sh $(PWD)/../netns.sh 755 0 0" >> $@ -+ echo "dir /dev 755 0 0" >> $@ -+ echo "nod /dev/console 644 0 0 c 5 1" >> $@ -+ echo "dir /bin 755 0 0" >> $@ -+ echo "file /bin/iperf3 $(IPERF_PATH)/src/iperf3 755 0 0" >> $@ -+ echo "file /bin/wg $(WIREGUARD_TOOLS_PATH)/src/tools/wg 755 0 0" >> $@ -+ echo "file /bin/bash $(BASH_PATH)/bash 755 0 0" >> $@ -+ echo "file /bin/ip $(IPROUTE2_PATH)/ip/ip 755 0 0" >> $@ -+ echo "file /bin/ss $(IPROUTE2_PATH)/misc/ss 755 0 0" >> $@ -+ echo "file /bin/ping $(IPUTILS_PATH)/ping 755 0 0" >> $@ -+ echo "file /bin/ncat $(NMAP_PATH)/ncat/ncat 755 0 0" >> $@ -+ echo "file /bin/xtables-multi $(IPTABLES_PATH)/iptables/xtables-multi 755 0 0" >> $@ -+ echo "slink /bin/iptables xtables-multi 777 0 0" >> $@ -+ echo "slink /bin/ping6 ping 777 0 0" >> $@ -+ echo "dir /lib 755 0 0" >> $@ -+ echo "file /lib/libc.so $(MUSL_PATH)/lib/libc.so 755 0 0" >> $@ -+ echo "slink /lib/ld-linux.so.1 libc.so 777 0 0" >> $@ -+ -+$(KERNEL_BUILD_PATH)/.config: kernel.config arch/$(ARCH).config -+ mkdir -p $(KERNEL_BUILD_PATH) -+ cp kernel.config $(KERNEL_BUILD_PATH)/minimal.config -+ printf 'CONFIG_NR_CPUS=$(NR_CPUS)\nCONFIG_INITRAMFS_SOURCE="$(BUILD_PATH)/init-cpio-spec.txt"\n' >> $(KERNEL_BUILD_PATH)/minimal.config -+ cat arch/$(ARCH).config >> $(KERNEL_BUILD_PATH)/minimal.config -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) allnoconfig -+ cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config $(KERNEL_BUILD_PATH)/minimal.config -+ $(if $(findstring yes,$(DEBUG_KERNEL)),cp debug.config $(KERNEL_BUILD_PATH) && cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config debug.config,) -+ -+$(KERNEL_BZIMAGE): $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(MUSL_PATH)/lib/libc.so $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/tools/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" -+ -+$(BUILD_PATH)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) INSTALL_HDR_PATH=$(BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) headers_install -+ touch $@ -+ -+$(MUSL_PATH)/lib/libc.so: $(MUSL_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ cd $(MUSL_PATH) && CC=$(REAL_CC) ./configure --prefix=/ --disable-static --build=$(CBUILD) -+ $(MAKE) -C $(MUSL_PATH) -+ $(STRIP) -s $@ -+ -+$(BUILD_PATH)/include/.installed: $(MUSL_PATH)/lib/libc.so -+ $(MAKE) -C $(MUSL_PATH) DESTDIR=$(BUILD_PATH) install-headers -+ touch $@ -+ -+$(MUSL_CC): $(MUSL_PATH)/lib/libc.so -+ sh $(MUSL_PATH)/tools/musl-gcc.specs.sh $(BUILD_PATH)/include $(MUSL_PATH)/lib /lib/ld-linux.so.1 > $(BUILD_PATH)/musl-gcc.specs -+ printf '#!/bin/sh\nexec "$(REAL_CC)" --specs="$(BUILD_PATH)/musl-gcc.specs" -fno-stack-protector -no-pie "$$@"\n' > $(BUILD_PATH)/musl-gcc -+ chmod +x $(BUILD_PATH)/musl-gcc -+ -+$(IPERF_PATH)/.installed: $(IPERF_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ sed -i '1s/^/#include /' $(IPERF_PATH)/src/cjson.h $(IPERF_PATH)/src/timer.h -+ sed -i -r 's/-p?g//g' $(IPERF_PATH)/src/Makefile* -+ touch $@ -+ -+$(IPERF_PATH)/src/iperf3: | $(IPERF_PATH)/.installed $(USERSPACE_DEPS) -+ cd $(IPERF_PATH) && CFLAGS="$(CFLAGS) -D_GNU_SOURCE" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared -+ $(MAKE) -C $(IPERF_PATH) -+ $(STRIP) -s $@ -+ -+$(LIBMNL_PATH)/.installed: $(LIBMNL_TAR) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ touch $@ -+ -+$(LIBMNL_PATH)/src/.libs/libmnl.a: | $(LIBMNL_PATH)/.installed $(USERSPACE_DEPS) -+ cd $(LIBMNL_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared -+ $(MAKE) -C $(LIBMNL_PATH) -+ sed -i 's:prefix=.*:prefix=$(LIBMNL_PATH):' $(LIBMNL_PATH)/libmnl.pc -+ -+$(WIREGUARD_TOOLS_PATH)/.installed: $(WIREGUARD_TOOLS_TAR) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ touch $@ -+ -+$(WIREGUARD_TOOLS_PATH)/src/tools/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src/tools LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg -+ $(STRIP) -s $@ -+ -+$(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS) -+ mkdir -p $(BUILD_PATH) -+ $(MUSL_CC) -o $@ $(CFLAGS) $(LDFLAGS) -std=gnu11 $< -+ $(STRIP) -s $@ -+ -+$(IPUTILS_PATH)/.installed: $(IPUTILS_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ touch $@ -+ -+$(IPUTILS_PATH)/ping: | $(IPUTILS_PATH)/.installed $(USERSPACE_DEPS) -+ $(MAKE) -C $(IPUTILS_PATH) USE_CAP=no USE_IDN=no USE_NETTLE=no USE_CRYPTO=no ping -+ $(STRIP) -s $@ -+ -+$(BASH_PATH)/.installed: $(BASH_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ touch $@ -+ -+$(BASH_PATH)/bash: | $(BASH_PATH)/.installed $(USERSPACE_DEPS) -+ cd $(BASH_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --without-bash-malloc --disable-debugger --disable-help-builtin --disable-history --disable-multibyte --disable-progcomp --disable-readline --disable-mem-scramble -+ $(MAKE) -C $(BASH_PATH) -+ $(STRIP) -s $@ -+ -+$(IPROUTE2_PATH)/.installed: $(IPROUTE2_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=y\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS -DHAVE_LIBMNL -I$(LIBMNL_PATH)/include\nLDLIBS+=-lmnl' > $(IPROUTE2_PATH)/config.mk -+ printf 'lib: snapshot\n\t$$(MAKE) -C lib\nip/ip: lib\n\t$$(MAKE) -C ip ip\nmisc/ss: lib\n\t$$(MAKE) -C misc ss\n' >> $(IPROUTE2_PATH)/Makefile -+ touch $@ -+ -+$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip -+ $(STRIP) -s $(IPROUTE2_PATH)/ip/ip -+ -+$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss -+ $(STRIP) -s $(IPROUTE2_PATH)/misc/ss -+ -+$(IPTABLES_PATH)/.installed: $(IPTABLES_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ sed -i -e "/nfnetlink=[01]/s:=[01]:=0:" -e "/nfconntrack=[01]/s:=[01]:=0:" $(IPTABLES_PATH)/configure -+ touch $@ -+ -+$(IPTABLES_PATH)/iptables/xtables-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+ cd $(IPTABLES_PATH) && PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --with-kernel=$(BUILD_PATH)/include -+ $(MAKE) -C $(IPTABLES_PATH) -+ $(STRIP) -s $@ -+ -+$(NMAP_PATH)/.installed: $(NMAP_TAR) -+ mkdir -p $(BUILD_PATH) -+ flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -+ touch $@ -+ -+$(NMAP_PATH)/ncat/ncat: | $(NMAP_PATH)/.installed $(USERSPACE_DEPS) -+ cd $(NMAP_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --without-ndiff --without-zenmap --without-nping --with-libpcap=included --with-libpcre=included --with-libdnet=included --without-liblua --with-liblinear=included --without-nmap-update --without-openssl --with-pcap=linux -+ $(MAKE) -C $(NMAP_PATH) build-ncat -+ $(STRIP) -s $@ -+ -+clean: -+ rm -rf $(BUILD_PATH) -+ -+distclean: clean -+ rm -rf $(DISTFILES_PATH) -+ -+menuconfig: $(KERNEL_BUILD_PATH)/.config -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" menuconfig -+ -+.PHONY: qemu build clean distclean menuconfig -+.DELETE_ON_ERROR: ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/aarch64.config -@@ -0,0 +1,5 @@ -+CONFIG_SERIAL_AMBA_PL011=y -+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/aarch64_be.config -@@ -0,0 +1,6 @@ -+CONFIG_CPU_BIG_ENDIAN=y -+CONFIG_SERIAL_AMBA_PL011=y -+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/arm.config -@@ -0,0 +1,9 @@ -+CONFIG_MMU=y -+CONFIG_ARCH_MULTI_V7=y -+CONFIG_ARCH_VIRT=y -+CONFIG_THUMB2_KERNEL=n -+CONFIG_SERIAL_AMBA_PL011=y -+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/armeb.config -@@ -0,0 +1,10 @@ -+CONFIG_MMU=y -+CONFIG_ARCH_MULTI_V7=y -+CONFIG_ARCH_VIRT=y -+CONFIG_THUMB2_KERNEL=n -+CONFIG_SERIAL_AMBA_PL011=y -+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyAMA0 wg.success=ttyAMA1" -+CONFIG_CPU_BIG_ENDIAN=y -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/i686.config -@@ -0,0 +1,5 @@ -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/m68k.config -@@ -0,0 +1,9 @@ -+CONFIG_MMU=y -+CONFIG_M68040=y -+CONFIG_MAC=y -+CONFIG_SERIAL_PMACZILOG=y -+CONFIG_SERIAL_PMACZILOG_TTYS=y -+CONFIG_SERIAL_PMACZILOG_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/mips.config -@@ -0,0 +1,11 @@ -+CONFIG_CPU_MIPS32_R2=y -+CONFIG_MIPS_MALTA=y -+CONFIG_MIPS_CPS=y -+CONFIG_MIPS_FP_SUPPORT=y -+CONFIG_POWER_RESET=y -+CONFIG_POWER_RESET_SYSCON=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/mips64.config -@@ -0,0 +1,14 @@ -+CONFIG_64BIT=y -+CONFIG_CPU_MIPS64_R2=y -+CONFIG_MIPS32_N32=y -+CONFIG_CPU_HAS_MSA=y -+CONFIG_MIPS_MALTA=y -+CONFIG_MIPS_CPS=y -+CONFIG_MIPS_FP_SUPPORT=y -+CONFIG_POWER_RESET=y -+CONFIG_POWER_RESET_SYSCON=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/mips64el.config -@@ -0,0 +1,15 @@ -+CONFIG_64BIT=y -+CONFIG_CPU_MIPS64_R2=y -+CONFIG_MIPS32_N32=y -+CONFIG_CPU_HAS_MSA=y -+CONFIG_MIPS_MALTA=y -+CONFIG_CPU_LITTLE_ENDIAN=y -+CONFIG_MIPS_CPS=y -+CONFIG_MIPS_FP_SUPPORT=y -+CONFIG_POWER_RESET=y -+CONFIG_POWER_RESET_SYSCON=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/mipsel.config -@@ -0,0 +1,12 @@ -+CONFIG_CPU_MIPS32_R2=y -+CONFIG_MIPS_MALTA=y -+CONFIG_CPU_LITTLE_ENDIAN=y -+CONFIG_MIPS_CPS=y -+CONFIG_MIPS_FP_SUPPORT=y -+CONFIG_POWER_RESET=y -+CONFIG_POWER_RESET_SYSCON=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc.config -@@ -0,0 +1,10 @@ -+CONFIG_PPC_QEMU_E500=y -+CONFIG_FSL_SOC_BOOKE=y -+CONFIG_PPC_85xx=y -+CONFIG_PHYS_64BIT=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_MATH_EMULATION=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1024 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config -@@ -0,0 +1,12 @@ -+CONFIG_PPC64=y -+CONFIG_PPC_PSERIES=y -+CONFIG_ALTIVEC=y -+CONFIG_VSX=y -+CONFIG_PPC_OF_BOOT_TRAMPOLINE=y -+CONFIG_PPC_RADIX_MMU=y -+CONFIG_HVC_CONSOLE=y -+CONFIG_CPU_LITTLE_ENDIAN=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=hvc0 wg.success=hvc1" -+CONFIG_SECTION_MISMATCH_WARN_ONLY=y -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/arch/x86_64.config -@@ -0,0 +1,5 @@ -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_CMDLINE_BOOL=y -+CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" -+CONFIG_FRAME_WARN=1280 ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/debug.config -@@ -0,0 +1,67 @@ -+CONFIG_LOCALVERSION="-debug" -+CONFIG_ENABLE_WARN_DEPRECATED=y -+CONFIG_ENABLE_MUST_CHECK=y -+CONFIG_FRAME_POINTER=y -+CONFIG_STACK_VALIDATION=y -+CONFIG_DEBUG_KERNEL=y -+CONFIG_DEBUG_INFO=y -+CONFIG_DEBUG_INFO_DWARF4=y -+CONFIG_PAGE_EXTENSION=y -+CONFIG_PAGE_POISONING=y -+CONFIG_DEBUG_OBJECTS=y -+CONFIG_DEBUG_OBJECTS_FREE=y -+CONFIG_DEBUG_OBJECTS_TIMERS=y -+CONFIG_DEBUG_OBJECTS_WORK=y -+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y -+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y -+CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 -+CONFIG_SLUB_DEBUG_ON=y -+CONFIG_DEBUG_VM=y -+CONFIG_DEBUG_MEMORY_INIT=y -+CONFIG_HAVE_DEBUG_STACKOVERFLOW=y -+CONFIG_DEBUG_STACKOVERFLOW=y -+CONFIG_HAVE_ARCH_KMEMCHECK=y -+CONFIG_HAVE_ARCH_KASAN=y -+CONFIG_KASAN=y -+CONFIG_KASAN_INLINE=y -+CONFIG_UBSAN=y -+CONFIG_UBSAN_SANITIZE_ALL=y -+CONFIG_UBSAN_NO_ALIGNMENT=y -+CONFIG_UBSAN_NULL=y -+CONFIG_DEBUG_KMEMLEAK=y -+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=8192 -+CONFIG_DEBUG_STACK_USAGE=y -+CONFIG_DEBUG_SHIRQ=y -+CONFIG_WQ_WATCHDOG=y -+CONFIG_SCHED_DEBUG=y -+CONFIG_SCHED_INFO=y -+CONFIG_SCHEDSTATS=y -+CONFIG_SCHED_STACK_END_CHECK=y -+CONFIG_DEBUG_TIMEKEEPING=y -+CONFIG_TIMER_STATS=y -+CONFIG_DEBUG_PREEMPT=y -+CONFIG_DEBUG_RT_MUTEXES=y -+CONFIG_DEBUG_SPINLOCK=y -+CONFIG_DEBUG_MUTEXES=y -+CONFIG_DEBUG_LOCK_ALLOC=y -+CONFIG_PROVE_LOCKING=y -+CONFIG_LOCKDEP=y -+CONFIG_DEBUG_ATOMIC_SLEEP=y -+CONFIG_TRACE_IRQFLAGS=y -+CONFIG_DEBUG_BUGVERBOSE=y -+CONFIG_DEBUG_LIST=y -+CONFIG_DEBUG_PI_LIST=y -+CONFIG_PROVE_RCU=y -+CONFIG_SPARSE_RCU_POINTER=y -+CONFIG_RCU_CPU_STALL_TIMEOUT=21 -+CONFIG_RCU_TRACE=y -+CONFIG_RCU_EQS_DEBUG=y -+CONFIG_USER_STACKTRACE_SUPPORT=y -+CONFIG_DEBUG_SG=y -+CONFIG_DEBUG_NOTIFIERS=y -+CONFIG_DOUBLEFAULT=y -+CONFIG_X86_DEBUG_FPU=y -+CONFIG_DEBUG_SECTION_MISMATCH=y -+CONFIG_DEBUG_PAGEALLOC=y -+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y -+CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/init.c -@@ -0,0 +1,284 @@ -+// SPDX-License-Identifier: GPL-2.0 -+/* -+ * Copyright (C) 2015-2019 Jason A. Donenfeld . All Rights Reserved. -+ */ -+ -+#define _GNU_SOURCE -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+__attribute__((noreturn)) static void poweroff(void) -+{ -+ fflush(stdout); -+ fflush(stderr); -+ reboot(RB_AUTOBOOT); -+ sleep(30); -+ fprintf(stderr, "\x1b[37m\x1b[41m\x1b[1mFailed to power off!!!\x1b[0m\n"); -+ exit(1); -+} -+ -+static void panic(const char *what) -+{ -+ fprintf(stderr, "\n\n\x1b[37m\x1b[41m\x1b[1mSOMETHING WENT HORRIBLY WRONG\x1b[0m\n\n \x1b[31m\x1b[1m%s: %s\x1b[0m\n\n\x1b[37m\x1b[44m\x1b[1mPower off...\x1b[0m\n\n", what, strerror(errno)); -+ poweroff(); -+} -+ -+#define pretty_message(msg) puts("\x1b[32m\x1b[1m" msg "\x1b[0m") -+ -+static void print_banner(void) -+{ -+ struct utsname utsname; -+ int len; -+ -+ if (uname(&utsname) < 0) -+ panic("uname"); -+ -+ len = strlen(" WireGuard Test Suite on ") + strlen(utsname.sysname) + strlen(utsname.release) + strlen(utsname.machine); -+ printf("\x1b[45m\x1b[33m\x1b[1m%*.s\x1b[0m\n\x1b[45m\x1b[33m\x1b[1m WireGuard Test Suite on %s %s %s \x1b[0m\n\x1b[45m\x1b[33m\x1b[1m%*.s\x1b[0m\n\n", len, "", utsname.sysname, utsname.release, utsname.machine, len, ""); -+} -+ -+static void seed_rng(void) -+{ -+ int fd; -+ struct { -+ int entropy_count; -+ int buffer_size; -+ unsigned char buffer[256]; -+ } entropy = { -+ .entropy_count = sizeof(entropy.buffer) * 8, -+ .buffer_size = sizeof(entropy.buffer), -+ .buffer = "Adding real entropy is not actually important for these tests. Don't try this at home, kids!" -+ }; -+ -+ if (mknod("/dev/urandom", S_IFCHR | 0644, makedev(1, 9))) -+ panic("mknod(/dev/urandom)"); -+ fd = open("/dev/urandom", O_WRONLY); -+ if (fd < 0) -+ panic("open(urandom)"); -+ for (int i = 0; i < 256; ++i) { -+ if (ioctl(fd, RNDADDENTROPY, &entropy) < 0) -+ panic("ioctl(urandom)"); -+ } -+ close(fd); -+} -+ -+static void mount_filesystems(void) -+{ -+ pretty_message("[+] Mounting filesystems..."); -+ mkdir("/dev", 0755); -+ mkdir("/proc", 0755); -+ mkdir("/sys", 0755); -+ mkdir("/tmp", 0755); -+ mkdir("/run", 0755); -+ mkdir("/var", 0755); -+ if (mount("none", "/dev", "devtmpfs", 0, NULL)) -+ panic("devtmpfs mount"); -+ if (mount("none", "/proc", "proc", 0, NULL)) -+ panic("procfs mount"); -+ if (mount("none", "/sys", "sysfs", 0, NULL)) -+ panic("sysfs mount"); -+ if (mount("none", "/tmp", "tmpfs", 0, NULL)) -+ panic("tmpfs mount"); -+ if (mount("none", "/run", "tmpfs", 0, NULL)) -+ panic("tmpfs mount"); -+ if (mount("none", "/sys/kernel/debug", "debugfs", 0, NULL)) -+ ; /* Not a problem if it fails.*/ -+ if (symlink("/run", "/var/run")) -+ panic("run symlink"); -+ if (symlink("/proc/self/fd", "/dev/fd")) -+ panic("fd symlink"); -+} -+ -+static void enable_logging(void) -+{ -+ int fd; -+ pretty_message("[+] Enabling logging..."); -+ fd = open("/proc/sys/kernel/printk", O_WRONLY); -+ if (fd >= 0) { -+ if (write(fd, "9\n", 2) != 2) -+ panic("write(printk)"); -+ close(fd); -+ } -+ fd = open("/proc/sys/debug/exception-trace", O_WRONLY); -+ if (fd >= 0) { -+ if (write(fd, "1\n", 2) != 2) -+ panic("write(exception-trace)"); -+ close(fd); -+ } -+ fd = open("/proc/sys/kernel/panic_on_warn", O_WRONLY); -+ if (fd >= 0) { -+ if (write(fd, "1\n", 2) != 2) -+ panic("write(panic_on_warn)"); -+ close(fd); -+ } -+} -+ -+static void kmod_selftests(void) -+{ -+ FILE *file; -+ char line[2048], *start, *pass; -+ bool success = true; -+ pretty_message("[+] Module self-tests:"); -+ file = fopen("/proc/kmsg", "r"); -+ if (!file) -+ panic("fopen(kmsg)"); -+ if (fcntl(fileno(file), F_SETFL, O_NONBLOCK) < 0) -+ panic("fcntl(kmsg, nonblock)"); -+ while (fgets(line, sizeof(line), file)) { -+ start = strstr(line, "wireguard: "); -+ if (!start) -+ continue; -+ start += 11; -+ *strchrnul(start, '\n') = '\0'; -+ if (strstr(start, "www.wireguard.com")) -+ break; -+ pass = strstr(start, ": pass"); -+ if (!pass || pass[6] != '\0') { -+ success = false; -+ printf(" \x1b[31m* %s\x1b[0m\n", start); -+ } else -+ printf(" \x1b[32m* %s\x1b[0m\n", start); -+ } -+ fclose(file); -+ if (!success) { -+ puts("\x1b[31m\x1b[1m[-] Tests failed! \u2639\x1b[0m"); -+ poweroff(); -+ } -+} -+ -+static void launch_tests(void) -+{ -+ char cmdline[4096], *success_dev; -+ int status, fd; -+ pid_t pid; -+ -+ pretty_message("[+] Launching tests..."); -+ pid = fork(); -+ if (pid == -1) -+ panic("fork"); -+ else if (pid == 0) { -+ execl("/init.sh", "init", NULL); -+ panic("exec"); -+ } -+ if (waitpid(pid, &status, 0) < 0) -+ panic("waitpid"); -+ if (WIFEXITED(status) && WEXITSTATUS(status) == 0) { -+ pretty_message("[+] Tests successful! :-)"); -+ fd = open("/proc/cmdline", O_RDONLY); -+ if (fd < 0) -+ panic("open(/proc/cmdline)"); -+ if (read(fd, cmdline, sizeof(cmdline) - 1) <= 0) -+ panic("read(/proc/cmdline)"); -+ cmdline[sizeof(cmdline) - 1] = '\0'; -+ for (success_dev = strtok(cmdline, " \n"); success_dev; success_dev = strtok(NULL, " \n")) { -+ if (strncmp(success_dev, "wg.success=", 11)) -+ continue; -+ memcpy(success_dev + 11 - 5, "/dev/", 5); -+ success_dev += 11 - 5; -+ break; -+ } -+ if (!success_dev || !strlen(success_dev)) -+ panic("Unable to find success device"); -+ -+ fd = open(success_dev, O_WRONLY); -+ if (fd < 0) -+ panic("open(success_dev)"); -+ if (write(fd, "success\n", 8) != 8) -+ panic("write(success_dev)"); -+ close(fd); -+ } else { -+ const char *why = "unknown cause"; -+ int what = -1; -+ -+ if (WIFEXITED(status)) { -+ why = "exit code"; -+ what = WEXITSTATUS(status); -+ } else if (WIFSIGNALED(status)) { -+ why = "signal"; -+ what = WTERMSIG(status); -+ } -+ printf("\x1b[31m\x1b[1m[-] Tests failed with %s %d! \u2639\x1b[0m\n", why, what); -+ } -+} -+ -+static void ensure_console(void) -+{ -+ for (unsigned int i = 0; i < 1000; ++i) { -+ int fd = open("/dev/console", O_RDWR); -+ if (fd < 0) { -+ usleep(50000); -+ continue; -+ } -+ dup2(fd, 0); -+ dup2(fd, 1); -+ dup2(fd, 2); -+ close(fd); -+ if (write(1, "\0\0\0\0\n", 5) == 5) -+ return; -+ } -+ panic("Unable to open console device"); -+} -+ -+static void clear_leaks(void) -+{ -+ int fd; -+ -+ fd = open("/sys/kernel/debug/kmemleak", O_WRONLY); -+ if (fd < 0) -+ return; -+ pretty_message("[+] Starting memory leak detection..."); -+ write(fd, "clear\n", 5); -+ close(fd); -+} -+ -+static void check_leaks(void) -+{ -+ int fd; -+ -+ fd = open("/sys/kernel/debug/kmemleak", O_WRONLY); -+ if (fd < 0) -+ return; -+ pretty_message("[+] Scanning for memory leaks..."); -+ sleep(2); /* Wait for any grace periods. */ -+ write(fd, "scan\n", 5); -+ close(fd); -+ -+ fd = open("/sys/kernel/debug/kmemleak", O_RDONLY); -+ if (fd < 0) -+ return; -+ if (sendfile(1, fd, NULL, 0x7ffff000) > 0) -+ panic("Memory leaks encountered"); -+ close(fd); -+} -+ -+int main(int argc, char *argv[]) -+{ -+ seed_rng(); -+ ensure_console(); -+ print_banner(); -+ mount_filesystems(); -+ kmod_selftests(); -+ enable_logging(); -+ clear_leaks(); -+ launch_tests(); -+ check_leaks(); -+ poweroff(); -+ return 1; -+} ---- /dev/null -+++ b/tools/testing/selftests/wireguard/qemu/kernel.config -@@ -0,0 +1,86 @@ -+CONFIG_LOCALVERSION="" -+CONFIG_NET=y -+CONFIG_NETDEVICES=y -+CONFIG_NET_CORE=y -+CONFIG_NET_IPIP=y -+CONFIG_DUMMY=y -+CONFIG_VETH=y -+CONFIG_MULTIUSER=y -+CONFIG_NAMESPACES=y -+CONFIG_NET_NS=y -+CONFIG_UNIX=y -+CONFIG_INET=y -+CONFIG_IPV6=y -+CONFIG_NETFILTER=y -+CONFIG_NETFILTER_ADVANCED=y -+CONFIG_NF_CONNTRACK=y -+CONFIG_NF_NAT=y -+CONFIG_NETFILTER_XTABLES=y -+CONFIG_NETFILTER_XT_NAT=y -+CONFIG_NETFILTER_XT_MATCH_LENGTH=y -+CONFIG_NF_CONNTRACK_IPV4=y -+CONFIG_NF_NAT_IPV4=y -+CONFIG_IP_NF_IPTABLES=y -+CONFIG_IP_NF_FILTER=y -+CONFIG_IP_NF_NAT=y -+CONFIG_IP_ADVANCED_ROUTER=y -+CONFIG_IP_MULTIPLE_TABLES=y -+CONFIG_IPV6_MULTIPLE_TABLES=y -+CONFIG_TTY=y -+CONFIG_BINFMT_ELF=y -+CONFIG_BINFMT_SCRIPT=y -+CONFIG_VDSO=y -+CONFIG_VIRTUALIZATION=y -+CONFIG_HYPERVISOR_GUEST=y -+CONFIG_PARAVIRT=y -+CONFIG_KVM_GUEST=y -+CONFIG_PARAVIRT_SPINLOCKS=y -+CONFIG_PRINTK=y -+CONFIG_KALLSYMS=y -+CONFIG_BUG=y -+CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -+CONFIG_EMBEDDED=n -+CONFIG_BASE_FULL=y -+CONFIG_FUTEX=y -+CONFIG_SHMEM=y -+CONFIG_SLUB=y -+CONFIG_SPARSEMEM_VMEMMAP=y -+CONFIG_SMP=y -+CONFIG_SCHED_SMT=y -+CONFIG_SCHED_MC=y -+CONFIG_NUMA=y -+CONFIG_PREEMPT=y -+CONFIG_NO_HZ=y -+CONFIG_NO_HZ_IDLE=y -+CONFIG_NO_HZ_FULL=n -+CONFIG_HZ_PERIODIC=n -+CONFIG_HIGH_RES_TIMERS=y -+CONFIG_ARCH_RANDOM=y -+CONFIG_FILE_LOCKING=y -+CONFIG_POSIX_TIMERS=y -+CONFIG_DEVTMPFS=y -+CONFIG_PROC_FS=y -+CONFIG_PROC_SYSCTL=y -+CONFIG_SYSFS=y -+CONFIG_TMPFS=y -+CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15 -+CONFIG_PRINTK_TIME=y -+CONFIG_BLK_DEV_INITRD=y -+CONFIG_LEGACY_VSYSCALL_NONE=y -+CONFIG_KERNEL_GZIP=y -+CONFIG_PANIC_ON_OOPS=y -+CONFIG_BUG_ON_DATA_CORRUPTION=y -+CONFIG_LOCKUP_DETECTOR=y -+CONFIG_SOFTLOCKUP_DETECTOR=y -+CONFIG_HARDLOCKUP_DETECTOR=y -+CONFIG_WQ_WATCHDOG=y -+CONFIG_DETECT_HUNG_TASK=y -+CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y -+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y -+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y -+CONFIG_PANIC_TIMEOUT=-1 -+CONFIG_STACKTRACE=y -+CONFIG_EARLY_PRINTK=y -+CONFIG_GDB_SCRIPTS=y -+CONFIG_WIREGUARD=y -+CONFIG_WIREGUARD_DEBUG=y diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0074-wireguard-Kconfig-select-parent-dependency-for-crypt.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0074-wireguard-Kconfig-select-parent-dependency-for-crypt.patch deleted file mode 100644 index c2f8f77f5..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0074-wireguard-Kconfig-select-parent-dependency-for-crypt.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Sun, 15 Dec 2019 22:08:01 +0100 -Subject: [PATCH] wireguard: Kconfig: select parent dependency for crypto - -commit d7c68a38bb4f9b7c1a2e4a772872c752ee5c44a6 upstream. - -This fixes the crypto selection submenu depenencies. Otherwise, we'd -wind up issuing warnings in which certain dependencies we also select -couldn't be satisfied. This condition was triggered by the addition of -the test suite autobuilder in the previous commit. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/Kconfig | 2 ++ - 1 file changed, 2 insertions(+) - ---- a/drivers/net/Kconfig -+++ b/drivers/net/Kconfig -@@ -85,6 +85,8 @@ config WIREGUARD - select CRYPTO_POLY1305_X86_64 if X86 && 64BIT - select CRYPTO_BLAKE2S_X86 if X86 && 64BIT - select CRYPTO_CURVE25519_X86 if X86 && 64BIT -+ select ARM_CRYPTO if ARM -+ select ARM64_CRYPTO if ARM64 - select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON - select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON - select CRYPTO_POLY1305_ARM if ARM diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0075-wireguard-global-fix-spelling-mistakes-in-comments.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0075-wireguard-global-fix-spelling-mistakes-in-comments.patch deleted file mode 100644 index 9b34e663a..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0075-wireguard-global-fix-spelling-mistakes-in-comments.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Josh Soref -Date: Sun, 15 Dec 2019 22:08:02 +0100 -Subject: [PATCH] wireguard: global: fix spelling mistakes in comments - -commit a2ec8b5706944d228181c8b91d815f41d6dd8e7b upstream. - -This fixes two spelling errors in source code comments. - -Signed-off-by: Josh Soref -[Jason: rewrote commit message] -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 2 +- - include/uapi/linux/wireguard.h | 8 ++++---- - 2 files changed, 5 insertions(+), 5 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -380,7 +380,7 @@ static void wg_packet_consume_data_done( - /* We've already verified the Poly1305 auth tag, which means this packet - * was not modified in transit. We can therefore tell the networking - * stack that all checksums of every layer of encapsulation have already -- * been checked "by the hardware" and therefore is unneccessary to check -+ * been checked "by the hardware" and therefore is unnecessary to check - * again in software. - */ - skb->ip_summed = CHECKSUM_UNNECESSARY; ---- a/include/uapi/linux/wireguard.h -+++ b/include/uapi/linux/wireguard.h -@@ -18,13 +18,13 @@ - * one but not both of: - * - * WGDEVICE_A_IFINDEX: NLA_U32 -- * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 - * - * The kernel will then return several messages (NLM_F_MULTI) containing the - * following tree of nested items: - * - * WGDEVICE_A_IFINDEX: NLA_U32 -- * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 - * WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN - * WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN - * WGDEVICE_A_LISTEN_PORT: NLA_U16 -@@ -77,7 +77,7 @@ - * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME: - * - * WGDEVICE_A_IFINDEX: NLA_U32 -- * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMESIZ - 1 -+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1 - * WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current - * peers should be removed prior to adding the list below. - * WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove -@@ -121,7 +121,7 @@ - * filling in information not contained in the prior. Note that if - * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably - * should not be specified in fragments that come after, so that the list -- * of peers is only cleared the first time but appened after. Likewise for -+ * of peers is only cleared the first time but appended after. Likewise for - * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message - * of a peer, it likely should not be specified in subsequent fragments. - * diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0076-wireguard-main-remove-unused-include-linux-version.h.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0076-wireguard-main-remove-unused-include-linux-version.h.patch deleted file mode 100644 index 3cc0b56c3..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0076-wireguard-main-remove-unused-include-linux-version.h.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: YueHaibing -Date: Sun, 15 Dec 2019 22:08:03 +0100 -Subject: [PATCH] wireguard: main: remove unused include - -commit 43967b6ff91e53bcce5ae08c16a0588a475b53a1 upstream. - -Remove from the includes for main.c, which is unused. - -Signed-off-by: YueHaibing -[Jason: reworded commit message] -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/main.c | 1 - - 1 file changed, 1 deletion(-) - ---- a/drivers/net/wireguard/main.c -+++ b/drivers/net/wireguard/main.c -@@ -12,7 +12,6 @@ - - #include - --#include - #include - #include - #include diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0077-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0077-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch deleted file mode 100644 index edd90484d..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0077-wireguard-allowedips-use-kfree_rcu-instead-of-call_r.patch +++ /dev/null @@ -1,41 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Wei Yongjun -Date: Sun, 15 Dec 2019 22:08:04 +0100 -Subject: [PATCH] wireguard: allowedips: use kfree_rcu() instead of call_rcu() - -commit d89ee7d5c73af15c1c6f12b016cdf469742b5726 upstream. - -The callback function of call_rcu() just calls a kfree(), so we -can use kfree_rcu() instead of call_rcu() + callback function. - -Signed-off-by: Wei Yongjun -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/allowedips.c | 7 +------ - 1 file changed, 1 insertion(+), 6 deletions(-) - ---- a/drivers/net/wireguard/allowedips.c -+++ b/drivers/net/wireguard/allowedips.c -@@ -31,11 +31,6 @@ static void copy_and_assign_cidr(struct - #define CHOOSE_NODE(parent, key) \ - parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] - --static void node_free_rcu(struct rcu_head *rcu) --{ -- kfree(container_of(rcu, struct allowedips_node, rcu)); --} -- - static void push_rcu(struct allowedips_node **stack, - struct allowedips_node __rcu *p, unsigned int *len) - { -@@ -112,7 +107,7 @@ static void walk_remove_by_peer(struct a - if (!node->bit[0] || !node->bit[1]) { - rcu_assign_pointer(*nptr, DEREF( - &node->bit[!REF(node->bit[0])])); -- call_rcu(&node->rcu, node_free_rcu); -+ kfree_rcu(node, rcu); - node = DEREF(nptr); - } - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0078-wireguard-selftests-remove-ancient-kernel-compatibil.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0078-wireguard-selftests-remove-ancient-kernel-compatibil.patch deleted file mode 100644 index 6ff0dd9d1..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0078-wireguard-selftests-remove-ancient-kernel-compatibil.patch +++ /dev/null @@ -1,373 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 2 Jan 2020 17:47:49 +0100 -Subject: [PATCH] wireguard: selftests: remove ancient kernel compatibility - code - -commit 9a69a4c8802adf642bc4a13d471b5a86b44ed434 upstream. - -Quite a bit of the test suite was designed to work with ancient kernels. -Thankfully we no longer have to deal with this. This commit updates -things that we can finally update and removes things that we can finally -remove, to avoid the build-up of the last several years as a result of -having to support ancient kernels. We can finally rely on suppress_ -prefixlength being available. On the build side of things, the no-PIE -hack is no longer required, and we can bump some of the tools, repair -our m68k and i686-kvm support, and get better coverage of the static -branches used in the crypto lib and in udp_tunnel. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 11 +-- - .../testing/selftests/wireguard/qemu/Makefile | 82 ++++++++++--------- - .../selftests/wireguard/qemu/arch/m68k.config | 2 +- - tools/testing/selftests/wireguard/qemu/init.c | 1 + - .../selftests/wireguard/qemu/kernel.config | 2 + - 5 files changed, 50 insertions(+), 48 deletions(-) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -37,7 +37,7 @@ n2() { pretty 2 "$*"; maybe_exec ip netn - ip0() { pretty 0 "ip $*"; ip -n $netns0 "$@"; } - ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } - ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } --sleep() { read -t "$1" -N 0 || true; } -+sleep() { read -t "$1" -N 1 || true; } - waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } - waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } - waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } -@@ -294,12 +294,9 @@ ip1 -6 rule add table main suppress_pref - ip1 -4 route add default dev wg0 table 51820 - ip1 -4 rule add not fwmark 51820 table 51820 - ip1 -4 rule add table main suppress_prefixlength 0 --# suppress_prefixlength only got added in 3.12, and we want to support 3.10+. --if [[ $(ip1 -4 rule show all) == *suppress_prefixlength* ]]; then -- # Flood the pings instead of sending just one, to trigger routing table reference counting bugs. -- n1 ping -W 1 -c 100 -f 192.168.99.7 -- n1 ping -W 1 -c 100 -f abab::1111 --fi -+# Flood the pings instead of sending just one, to trigger routing table reference counting bugs. -+n1 ping -W 1 -c 100 -f 192.168.99.7 -+n1 ping -W 1 -c 100 -f abab::1111 - - n0 iptables -t nat -F - ip0 link del vethrc ---- a/tools/testing/selftests/wireguard/qemu/Makefile -+++ b/tools/testing/selftests/wireguard/qemu/Makefile -@@ -5,6 +5,7 @@ - PWD := $(shell pwd) - - CHOST := $(shell gcc -dumpmachine) -+HOST_ARCH := $(firstword $(subst -, ,$(CHOST))) - ifneq (,$(ARCH)) - CBUILD := $(subst -gcc,,$(lastword $(subst /, ,$(firstword $(wildcard $(foreach bindir,$(subst :, ,$(PATH)),$(bindir)/$(ARCH)-*-gcc)))))) - ifeq (,$(CBUILD)) -@@ -37,19 +38,19 @@ endef - define file_download = - $(DISTFILES_PATH)/$(1): - mkdir -p $(DISTFILES_PATH) -- flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -t inf --retry-on-http-error=404 -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' -+ flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' - if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi - endef - --$(eval $(call tar_download,MUSL,musl,1.1.20,.tar.gz,https://www.musl-libc.org/releases/,44be8771d0e6c6b5f82dd15662eb2957c9a3173a19a8b49966ac0542bbd40d61)) -+$(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3)) - $(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/,171f89699f286a5854b72b91d06e8f8e3683064c5901fb09d954a9ab6f551f81)) --$(eval $(call tar_download,IPERF,iperf,3.1.7,.tar.gz,http://downloads.es.net/pub/iperf/,a4ef73406fe92250602b8da2ae89ec53211f805df97a1d1d629db5a14043734f)) -+$(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) - $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) --$(eval $(call tar_download,IPROUTE2,iproute2,5.1.0,.tar.gz,https://www.kernel.org/pub/linux/utils/net/iproute2/,9b43707d6075ecdca14803ca8ce0c8553848c49fa1586d12fd508d66577243f2)) --$(eval $(call tar_download,IPTABLES,iptables,1.6.1,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,0fc2d7bd5d7be11311726466789d4c65fb4c8e096c9182b56ce97440864f0cf5)) --$(eval $(call tar_download,NMAP,nmap,7.60,.tar.bz2,https://nmap.org/dist/,a8796ecc4fa6c38aad6139d9515dc8113023a82e9d787e5a5fb5fa1b05516f21)) --$(eval $(call tar_download,IPUTILS,iputils,s20161105,.tar.gz,https://github.com/iputils/iputils/archive/s20161105.tar.gz/#,f813092f03d17294fd23544b129b95cdb87fe19f7970a51908a6b88509acad8a)) --$(eval $(call tar_download,WIREGUARD_TOOLS,WireGuard,0.0.20191212,.tar.xz,https://git.zx2c4.com/WireGuard/snapshot/,b0d718380f7a8822b2f12d75e462fa4eafa3a77871002981f367cd4fe2a1b071)) -+$(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) -+$(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) -+$(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) -+$(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a)) -+$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20191226,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,aa8af0fdc9872d369d8c890a84dbc2a2466b55795dccd5b47721b2d97644b04f)) - - KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) - rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) -@@ -59,23 +60,21 @@ export CFLAGS ?= -O3 -pipe - export LDFLAGS ?= - export CPPFLAGS := -I$(BUILD_PATH)/include - --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - CROSS_COMPILE_FLAG := --host=$(CHOST) --NOPIE_GCC := gcc -fno-PIE - CFLAGS += -march=native - STRIP := strip - else - $(info Cross compilation: building for $(CBUILD) using $(CHOST)) - CROSS_COMPILE_FLAG := --build=$(CBUILD) --host=$(CHOST) - export CROSS_COMPILE=$(CBUILD)- --NOPIE_GCC := $(CBUILD)-gcc -fno-PIE - STRIP := $(CBUILD)-strip - endif - ifeq ($(ARCH),aarch64) - QEMU_ARCH := aarch64 - KERNEL_ARCH := arm64 - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm - else - QEMU_MACHINE := -cpu cortex-a53 -machine virt -@@ -85,7 +84,7 @@ else ifeq ($(ARCH),aarch64_be) - QEMU_ARCH := aarch64 - KERNEL_ARCH := arm64 - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm64/boot/Image --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm - else - QEMU_MACHINE := -cpu cortex-a53 -machine virt -@@ -95,7 +94,7 @@ else ifeq ($(ARCH),arm) - QEMU_ARCH := arm - KERNEL_ARCH := arm - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm - else - QEMU_MACHINE := -cpu cortex-a15 -machine virt -@@ -105,7 +104,7 @@ else ifeq ($(ARCH),armeb) - QEMU_ARCH := arm - KERNEL_ARCH := arm - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/arm/boot/zImage --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine virt,gic_version=host,accel=kvm - else - QEMU_MACHINE := -cpu cortex-a15 -machine virt -@@ -116,7 +115,7 @@ else ifeq ($(ARCH),x86_64) - QEMU_ARCH := x86_64 - KERNEL_ARCH := x86_64 - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine q35,accel=kvm - else - QEMU_MACHINE := -cpu Skylake-Server -machine q35 -@@ -126,7 +125,7 @@ else ifeq ($(ARCH),i686) - QEMU_ARCH := i386 - KERNEL_ARCH := x86 - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/x86/boot/bzImage --ifeq ($(subst i686,x86_64,$(CBUILD)),$(CHOST)) -+ifeq ($(subst x86_64,i686,$(HOST_ARCH)),$(ARCH)) - QEMU_MACHINE := -cpu host -machine q35,accel=kvm - else - QEMU_MACHINE := -cpu coreduo -machine q35 -@@ -136,7 +135,7 @@ else ifeq ($(ARCH),mips64) - QEMU_ARCH := mips64 - KERNEL_ARCH := mips - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine malta,accel=kvm - CFLAGS += -EB - else -@@ -147,7 +146,7 @@ else ifeq ($(ARCH),mips64el) - QEMU_ARCH := mips64el - KERNEL_ARCH := mips - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine malta,accel=kvm - CFLAGS += -EL - else -@@ -158,7 +157,7 @@ else ifeq ($(ARCH),mips) - QEMU_ARCH := mips - KERNEL_ARCH := mips - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine malta,accel=kvm - CFLAGS += -EB - else -@@ -169,7 +168,7 @@ else ifeq ($(ARCH),mipsel) - QEMU_ARCH := mipsel - KERNEL_ARCH := mips - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host -machine malta,accel=kvm - CFLAGS += -EL - else -@@ -180,7 +179,7 @@ else ifeq ($(ARCH),powerpc64le) - QEMU_ARCH := ppc64 - KERNEL_ARCH := powerpc - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host,accel=kvm -machine pseries - else - QEMU_MACHINE := -machine pseries -@@ -190,7 +189,7 @@ else ifeq ($(ARCH),powerpc) - QEMU_ARCH := ppc - KERNEL_ARCH := powerpc - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/arch/powerpc/boot/uImage --ifeq ($(CHOST),$(CBUILD)) -+ifeq ($(HOST_ARCH),$(ARCH)) - QEMU_MACHINE := -cpu host,accel=kvm -machine ppce500 - else - QEMU_MACHINE := -machine ppce500 -@@ -200,10 +199,11 @@ else ifeq ($(ARCH),m68k) - QEMU_ARCH := m68k - KERNEL_ARCH := m68k - KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux --ifeq ($(CHOST),$(CBUILD)) --QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -+KERNEL_CMDLINE := $(shell sed -n 's/CONFIG_CMDLINE=\(.*\)/\1/p' arch/m68k.config) -+ifeq ($(HOST_ARCH),$(ARCH)) -+QEMU_MACHINE := -cpu host,accel=kvm -machine q800 -smp 1 -append $(KERNEL_CMDLINE) - else --QEMU_MACHINE := -machine q800 -+QEMU_MACHINE := -machine q800 -smp 1 -append $(KERNEL_CMDLINE) - endif - else - $(error I only build: x86_64, i686, arm, armeb, aarch64, aarch64_be, mips, mipsel, mips64, mips64el, powerpc64le, powerpc, m68k) -@@ -238,14 +238,14 @@ $(BUILD_PATH)/init-cpio-spec.txt: - echo "nod /dev/console 644 0 0 c 5 1" >> $@ - echo "dir /bin 755 0 0" >> $@ - echo "file /bin/iperf3 $(IPERF_PATH)/src/iperf3 755 0 0" >> $@ -- echo "file /bin/wg $(WIREGUARD_TOOLS_PATH)/src/tools/wg 755 0 0" >> $@ -+ echo "file /bin/wg $(WIREGUARD_TOOLS_PATH)/src/wg 755 0 0" >> $@ - echo "file /bin/bash $(BASH_PATH)/bash 755 0 0" >> $@ - echo "file /bin/ip $(IPROUTE2_PATH)/ip/ip 755 0 0" >> $@ - echo "file /bin/ss $(IPROUTE2_PATH)/misc/ss 755 0 0" >> $@ - echo "file /bin/ping $(IPUTILS_PATH)/ping 755 0 0" >> $@ - echo "file /bin/ncat $(NMAP_PATH)/ncat/ncat 755 0 0" >> $@ -- echo "file /bin/xtables-multi $(IPTABLES_PATH)/iptables/xtables-multi 755 0 0" >> $@ -- echo "slink /bin/iptables xtables-multi 777 0 0" >> $@ -+ echo "file /bin/xtables-legacy-multi $(IPTABLES_PATH)/iptables/xtables-legacy-multi 755 0 0" >> $@ -+ echo "slink /bin/iptables xtables-legacy-multi 777 0 0" >> $@ - echo "slink /bin/ping6 ping 777 0 0" >> $@ - echo "dir /lib 755 0 0" >> $@ - echo "file /lib/libc.so $(MUSL_PATH)/lib/libc.so 755 0 0" >> $@ -@@ -260,8 +260,8 @@ $(KERNEL_BUILD_PATH)/.config: kernel.con - cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config $(KERNEL_BUILD_PATH)/minimal.config - $(if $(findstring yes,$(DEBUG_KERNEL)),cp debug.config $(KERNEL_BUILD_PATH) && cd $(KERNEL_BUILD_PATH) && ARCH=$(KERNEL_ARCH) $(KERNEL_PATH)/scripts/kconfig/merge_config.sh -n $(KERNEL_BUILD_PATH)/.config debug.config,) - --$(KERNEL_BZIMAGE): $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(MUSL_PATH)/lib/libc.so $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/tools/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) -- $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" -+$(KERNEL_BZIMAGE): $(KERNEL_BUILD_PATH)/.config $(BUILD_PATH)/init-cpio-spec.txt $(MUSL_PATH)/lib/libc.so $(IPERF_PATH)/src/iperf3 $(IPUTILS_PATH)/ping $(BASH_PATH)/bash $(IPROUTE2_PATH)/misc/ss $(IPROUTE2_PATH)/ip/ip $(IPTABLES_PATH)/iptables/xtables-legacy-multi $(NMAP_PATH)/ncat/ncat $(WIREGUARD_TOOLS_PATH)/src/wg $(BUILD_PATH)/init ../netns.sh $(WIREGUARD_SOURCES) -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) - - $(BUILD_PATH)/include/linux/.installed: | $(KERNEL_BUILD_PATH)/.config - $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) INSTALL_HDR_PATH=$(BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) headers_install -@@ -280,7 +280,7 @@ $(BUILD_PATH)/include/.installed: $(MUSL - - $(MUSL_CC): $(MUSL_PATH)/lib/libc.so - sh $(MUSL_PATH)/tools/musl-gcc.specs.sh $(BUILD_PATH)/include $(MUSL_PATH)/lib /lib/ld-linux.so.1 > $(BUILD_PATH)/musl-gcc.specs -- printf '#!/bin/sh\nexec "$(REAL_CC)" --specs="$(BUILD_PATH)/musl-gcc.specs" -fno-stack-protector -no-pie "$$@"\n' > $(BUILD_PATH)/musl-gcc -+ printf '#!/bin/sh\nexec "$(REAL_CC)" --specs="$(BUILD_PATH)/musl-gcc.specs" "$$@"\n' > $(BUILD_PATH)/musl-gcc - chmod +x $(BUILD_PATH)/musl-gcc - - $(IPERF_PATH)/.installed: $(IPERF_TAR) -@@ -291,7 +291,7 @@ $(IPERF_PATH)/.installed: $(IPERF_TAR) - touch $@ - - $(IPERF_PATH)/src/iperf3: | $(IPERF_PATH)/.installed $(USERSPACE_DEPS) -- cd $(IPERF_PATH) && CFLAGS="$(CFLAGS) -D_GNU_SOURCE" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared -+ cd $(IPERF_PATH) && CFLAGS="$(CFLAGS) -D_GNU_SOURCE" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --with-openssl=no - $(MAKE) -C $(IPERF_PATH) - $(STRIP) -s $@ - -@@ -308,8 +308,8 @@ $(WIREGUARD_TOOLS_PATH)/.installed: $(WI - flock -s $<.lock tar -C $(BUILD_PATH) -xf $< - touch $@ - --$(WIREGUARD_TOOLS_PATH)/src/tools/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src/tools LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg -+$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+ LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg - $(STRIP) -s $@ - - $(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS) -@@ -323,7 +323,8 @@ $(IPUTILS_PATH)/.installed: $(IPUTILS_TA - touch $@ - - $(IPUTILS_PATH)/ping: | $(IPUTILS_PATH)/.installed $(USERSPACE_DEPS) -- $(MAKE) -C $(IPUTILS_PATH) USE_CAP=no USE_IDN=no USE_NETTLE=no USE_CRYPTO=no ping -+ sed -i /atexit/d $(IPUTILS_PATH)/ping.c -+ cd $(IPUTILS_PATH) && $(CC) $(CFLAGS) -std=c99 -o $@ ping.c ping_common.c ping6_common.c iputils_common.c -D_GNU_SOURCE -D'IPUTILS_VERSION(f)=f' -lresolv $(LDFLAGS) - $(STRIP) -s $@ - - $(BASH_PATH)/.installed: $(BASH_TAR) -@@ -357,7 +358,7 @@ $(IPTABLES_PATH)/.installed: $(IPTABLES_ - sed -i -e "/nfnetlink=[01]/s:=[01]:=0:" -e "/nfconntrack=[01]/s:=[01]:=0:" $(IPTABLES_PATH)/configure - touch $@ - --$(IPTABLES_PATH)/iptables/xtables-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -+$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) - cd $(IPTABLES_PATH) && PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --with-kernel=$(BUILD_PATH)/include - $(MAKE) -C $(IPTABLES_PATH) - $(STRIP) -s $@ -@@ -368,8 +369,9 @@ $(NMAP_PATH)/.installed: $(NMAP_TAR) - touch $@ - - $(NMAP_PATH)/ncat/ncat: | $(NMAP_PATH)/.installed $(USERSPACE_DEPS) -- cd $(NMAP_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --without-ndiff --without-zenmap --without-nping --with-libpcap=included --with-libpcre=included --with-libdnet=included --without-liblua --with-liblinear=included --without-nmap-update --without-openssl --with-pcap=linux -- $(MAKE) -C $(NMAP_PATH) build-ncat -+ cd $(NMAP_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --without-ndiff --without-zenmap --without-nping --with-libpcap=included --with-libpcre=included --with-libdnet=included --without-liblua --with-liblinear=included --without-nmap-update --without-openssl --with-pcap=linux --without-libssh -+ $(MAKE) -C $(NMAP_PATH)/libpcap -+ $(MAKE) -C $(NMAP_PATH)/ncat - $(STRIP) -s $@ - - clean: -@@ -379,7 +381,7 @@ distclean: clean - rm -rf $(DISTFILES_PATH) - - menuconfig: $(KERNEL_BUILD_PATH)/.config -- $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) CC="$(NOPIE_GCC)" menuconfig -+ $(MAKE) -C $(KERNEL_PATH) O=$(KERNEL_BUILD_PATH) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(CROSS_COMPILE) menuconfig - - .PHONY: qemu build clean distclean menuconfig - .DELETE_ON_ERROR: ---- a/tools/testing/selftests/wireguard/qemu/arch/m68k.config -+++ b/tools/testing/selftests/wireguard/qemu/arch/m68k.config -@@ -1,9 +1,9 @@ - CONFIG_MMU=y -+CONFIG_M68KCLASSIC=y - CONFIG_M68040=y - CONFIG_MAC=y - CONFIG_SERIAL_PMACZILOG=y - CONFIG_SERIAL_PMACZILOG_TTYS=y - CONFIG_SERIAL_PMACZILOG_CONSOLE=y --CONFIG_CMDLINE_BOOL=y - CONFIG_CMDLINE="console=ttyS0 wg.success=ttyS1" - CONFIG_FRAME_WARN=1024 ---- a/tools/testing/selftests/wireguard/qemu/init.c -+++ b/tools/testing/selftests/wireguard/qemu/init.c -@@ -21,6 +21,7 @@ - #include - #include - #include -+#include - #include - #include - ---- a/tools/testing/selftests/wireguard/qemu/kernel.config -+++ b/tools/testing/selftests/wireguard/qemu/kernel.config -@@ -39,6 +39,7 @@ CONFIG_PRINTK=y - CONFIG_KALLSYMS=y - CONFIG_BUG=y - CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y -+CONFIG_JUMP_LABEL=y - CONFIG_EMBEDDED=n - CONFIG_BASE_FULL=y - CONFIG_FUTEX=y -@@ -55,6 +56,7 @@ CONFIG_NO_HZ_IDLE=y - CONFIG_NO_HZ_FULL=n - CONFIG_HZ_PERIODIC=n - CONFIG_HIGH_RES_TIMERS=y -+CONFIG_COMPAT_32BIT_TIME=y - CONFIG_ARCH_RANDOM=y - CONFIG_FILE_LOCKING=y - CONFIG_POSIX_TIMERS=y diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0079-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0079-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch deleted file mode 100644 index fb03b1b1a..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0079-wireguard-queueing-do-not-account-for-pfmemalloc-whe.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 2 Jan 2020 17:47:50 +0100 -Subject: [PATCH] wireguard: queueing: do not account for pfmemalloc when - clearing skb header - -commit 04d2ea92a18417619182cbb79063f154892b0150 upstream. - -Before 8b7008620b84 ("net: Don't copy pfmemalloc flag in __copy_skb_ -header()"), the pfmemalloc flag used to be between headers_start and -headers_end, which is a region we clear when preparing the packet for -encryption/decryption. This is a parameter we certainly want to -preserve, which is why 8b7008620b84 moved it out of there. The code here -was written in a world before 8b7008620b84, though, where we had to -manually account for it. This commit brings things up to speed. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/queueing.h | 3 --- - 1 file changed, 3 deletions(-) - ---- a/drivers/net/wireguard/queueing.h -+++ b/drivers/net/wireguard/queueing.h -@@ -83,13 +83,10 @@ static inline __be16 wg_skb_examine_untr - - static inline void wg_reset_packet(struct sk_buff *skb) - { -- const int pfmemalloc = skb->pfmemalloc; -- - skb_scrub_packet(skb, true); - memset(&skb->headers_start, 0, - offsetof(struct sk_buff, headers_end) - - offsetof(struct sk_buff, headers_start)); -- skb->pfmemalloc = pfmemalloc; - skb->queue_mapping = 0; - skb->nohdr = 0; - skb->peeked = 0; diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0080-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0080-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch deleted file mode 100644 index 779491c8d..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0080-wireguard-socket-mark-skbs-as-not-on-list-when-recei.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 2 Jan 2020 17:47:51 +0100 -Subject: [PATCH] wireguard: socket: mark skbs as not on list when receiving - via gro - -commit 736775d06bac60d7a353e405398b48b2bd8b1e54 upstream. - -Certain drivers will pass gro skbs to udp, at which point the udp driver -simply iterates through them and passes them off to encap_rcv, which is -where we pick up. At the moment, we're not attempting to coalesce these -into bundles, but we also don't want to wind up having cascaded lists of -skbs treated separately. The right behavior here, then, is to just mark -each incoming one as not on a list. This can be seen in practice, for -example, with Qualcomm's rmnet_perf driver. - -Signed-off-by: Jason A. Donenfeld -Tested-by: Yaroslav Furman -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/socket.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -333,6 +333,7 @@ static int wg_receive(struct sock *sk, s - wg = sk->sk_user_data; - if (unlikely(!wg)) - goto err; -+ skb_mark_not_on_list(skb); - wg_packet_receive(wg, skb); - return 0; - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0081-wireguard-allowedips-fix-use-after-free-in-root_remo.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0081-wireguard-allowedips-fix-use-after-free-in-root_remo.patch deleted file mode 100644 index e77ab5834..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0081-wireguard-allowedips-fix-use-after-free-in-root_remo.patch +++ /dev/null @@ -1,164 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Eric Dumazet -Date: Tue, 4 Feb 2020 22:17:25 +0100 -Subject: [PATCH] wireguard: allowedips: fix use-after-free in - root_remove_peer_lists - -commit 9981159fc3b677b357f84e069a11de5a5ec8a2a8 upstream. - -In the unlikely case a new node could not be allocated, we need to -remove @newnode from @peer->allowedips_list before freeing it. - -syzbot reported: - -BUG: KASAN: use-after-free in __list_del_entry_valid+0xdc/0xf5 lib/list_debug.c:54 -Read of size 8 at addr ffff88809881a538 by task syz-executor.4/30133 - -CPU: 0 PID: 30133 Comm: syz-executor.4 Not tainted 5.5.0-syzkaller #0 -Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 -Call Trace: - __dump_stack lib/dump_stack.c:77 [inline] - dump_stack+0x197/0x210 lib/dump_stack.c:118 - print_address_description.constprop.0.cold+0xd4/0x30b mm/kasan/report.c:374 - __kasan_report.cold+0x1b/0x32 mm/kasan/report.c:506 - kasan_report+0x12/0x20 mm/kasan/common.c:639 - __asan_report_load8_noabort+0x14/0x20 mm/kasan/generic_report.c:135 - __list_del_entry_valid+0xdc/0xf5 lib/list_debug.c:54 - __list_del_entry include/linux/list.h:132 [inline] - list_del include/linux/list.h:146 [inline] - root_remove_peer_lists+0x24f/0x4b0 drivers/net/wireguard/allowedips.c:65 - wg_allowedips_free+0x232/0x390 drivers/net/wireguard/allowedips.c:300 - wg_peer_remove_all+0xd5/0x620 drivers/net/wireguard/peer.c:187 - wg_set_device+0xd01/0x1350 drivers/net/wireguard/netlink.c:542 - genl_family_rcv_msg_doit net/netlink/genetlink.c:672 [inline] - genl_family_rcv_msg net/netlink/genetlink.c:717 [inline] - genl_rcv_msg+0x67d/0xea0 net/netlink/genetlink.c:734 - netlink_rcv_skb+0x177/0x450 net/netlink/af_netlink.c:2477 - genl_rcv+0x29/0x40 net/netlink/genetlink.c:745 - netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] - netlink_unicast+0x59e/0x7e0 net/netlink/af_netlink.c:1328 - netlink_sendmsg+0x91c/0xea0 net/netlink/af_netlink.c:1917 - sock_sendmsg_nosec net/socket.c:652 [inline] - sock_sendmsg+0xd7/0x130 net/socket.c:672 - ____sys_sendmsg+0x753/0x880 net/socket.c:2343 - ___sys_sendmsg+0x100/0x170 net/socket.c:2397 - __sys_sendmsg+0x105/0x1d0 net/socket.c:2430 - __do_sys_sendmsg net/socket.c:2439 [inline] - __se_sys_sendmsg net/socket.c:2437 [inline] - __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2437 - do_syscall_64+0xfa/0x790 arch/x86/entry/common.c:294 - entry_SYSCALL_64_after_hwframe+0x49/0xbe -RIP: 0033:0x45b399 -Code: ad b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00 00 66 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 0f 83 7b b6 fb ff c3 66 2e 0f 1f 84 00 00 00 00 -RSP: 002b:00007f99a9bcdc78 EFLAGS: 00000246 ORIG_RAX: 000000000000002e -RAX: ffffffffffffffda RBX: 00007f99a9bce6d4 RCX: 000000000045b399 -RDX: 0000000000000000 RSI: 0000000020001340 RDI: 0000000000000003 -RBP: 000000000075bf20 R08: 0000000000000000 R09: 0000000000000000 -R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000004 -R13: 00000000000009ba R14: 00000000004cb2b8 R15: 0000000000000009 - -Allocated by task 30103: - save_stack+0x23/0x90 mm/kasan/common.c:72 - set_track mm/kasan/common.c:80 [inline] - __kasan_kmalloc mm/kasan/common.c:513 [inline] - __kasan_kmalloc.constprop.0+0xcf/0xe0 mm/kasan/common.c:486 - kasan_kmalloc+0x9/0x10 mm/kasan/common.c:527 - kmem_cache_alloc_trace+0x158/0x790 mm/slab.c:3551 - kmalloc include/linux/slab.h:556 [inline] - kzalloc include/linux/slab.h:670 [inline] - add+0x70a/0x1970 drivers/net/wireguard/allowedips.c:236 - wg_allowedips_insert_v4+0xf6/0x160 drivers/net/wireguard/allowedips.c:320 - set_allowedip drivers/net/wireguard/netlink.c:343 [inline] - set_peer+0xfb9/0x1150 drivers/net/wireguard/netlink.c:468 - wg_set_device+0xbd4/0x1350 drivers/net/wireguard/netlink.c:591 - genl_family_rcv_msg_doit net/netlink/genetlink.c:672 [inline] - genl_family_rcv_msg net/netlink/genetlink.c:717 [inline] - genl_rcv_msg+0x67d/0xea0 net/netlink/genetlink.c:734 - netlink_rcv_skb+0x177/0x450 net/netlink/af_netlink.c:2477 - genl_rcv+0x29/0x40 net/netlink/genetlink.c:745 - netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] - netlink_unicast+0x59e/0x7e0 net/netlink/af_netlink.c:1328 - netlink_sendmsg+0x91c/0xea0 net/netlink/af_netlink.c:1917 - sock_sendmsg_nosec net/socket.c:652 [inline] - sock_sendmsg+0xd7/0x130 net/socket.c:672 - ____sys_sendmsg+0x753/0x880 net/socket.c:2343 - ___sys_sendmsg+0x100/0x170 net/socket.c:2397 - __sys_sendmsg+0x105/0x1d0 net/socket.c:2430 - __do_sys_sendmsg net/socket.c:2439 [inline] - __se_sys_sendmsg net/socket.c:2437 [inline] - __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2437 - do_syscall_64+0xfa/0x790 arch/x86/entry/common.c:294 - entry_SYSCALL_64_after_hwframe+0x49/0xbe - -Freed by task 30103: - save_stack+0x23/0x90 mm/kasan/common.c:72 - set_track mm/kasan/common.c:80 [inline] - kasan_set_free_info mm/kasan/common.c:335 [inline] - __kasan_slab_free+0x102/0x150 mm/kasan/common.c:474 - kasan_slab_free+0xe/0x10 mm/kasan/common.c:483 - __cache_free mm/slab.c:3426 [inline] - kfree+0x10a/0x2c0 mm/slab.c:3757 - add+0x12d2/0x1970 drivers/net/wireguard/allowedips.c:266 - wg_allowedips_insert_v4+0xf6/0x160 drivers/net/wireguard/allowedips.c:320 - set_allowedip drivers/net/wireguard/netlink.c:343 [inline] - set_peer+0xfb9/0x1150 drivers/net/wireguard/netlink.c:468 - wg_set_device+0xbd4/0x1350 drivers/net/wireguard/netlink.c:591 - genl_family_rcv_msg_doit net/netlink/genetlink.c:672 [inline] - genl_family_rcv_msg net/netlink/genetlink.c:717 [inline] - genl_rcv_msg+0x67d/0xea0 net/netlink/genetlink.c:734 - netlink_rcv_skb+0x177/0x450 net/netlink/af_netlink.c:2477 - genl_rcv+0x29/0x40 net/netlink/genetlink.c:745 - netlink_unicast_kernel net/netlink/af_netlink.c:1302 [inline] - netlink_unicast+0x59e/0x7e0 net/netlink/af_netlink.c:1328 - netlink_sendmsg+0x91c/0xea0 net/netlink/af_netlink.c:1917 - sock_sendmsg_nosec net/socket.c:652 [inline] - sock_sendmsg+0xd7/0x130 net/socket.c:672 - ____sys_sendmsg+0x753/0x880 net/socket.c:2343 - ___sys_sendmsg+0x100/0x170 net/socket.c:2397 - __sys_sendmsg+0x105/0x1d0 net/socket.c:2430 - __do_sys_sendmsg net/socket.c:2439 [inline] - __se_sys_sendmsg net/socket.c:2437 [inline] - __x64_sys_sendmsg+0x78/0xb0 net/socket.c:2437 - do_syscall_64+0xfa/0x790 arch/x86/entry/common.c:294 - entry_SYSCALL_64_after_hwframe+0x49/0xbe - -The buggy address belongs to the object at ffff88809881a500 - which belongs to the cache kmalloc-64 of size 64 -The buggy address is located 56 bytes inside of - 64-byte region [ffff88809881a500, ffff88809881a540) -The buggy address belongs to the page: -page:ffffea0002620680 refcount:1 mapcount:0 mapping:ffff8880aa400380 index:0x0 -raw: 00fffe0000000200 ffffea000250b748 ffffea000254bac8 ffff8880aa400380 -raw: 0000000000000000 ffff88809881a000 0000000100000020 0000000000000000 -page dumped because: kasan: bad access detected - -Memory state around the buggy address: - ffff88809881a400: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc - ffff88809881a480: 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc ->ffff88809881a500: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc - ^ - ffff88809881a580: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc - ffff88809881a600: 00 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Eric Dumazet -Reported-by: syzbot -Cc: Jason A. Donenfeld -Cc: wireguard@lists.zx2c4.com -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/allowedips.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/net/wireguard/allowedips.c -+++ b/drivers/net/wireguard/allowedips.c -@@ -263,6 +263,7 @@ static int add(struct allowedips_node __ - } else { - node = kzalloc(sizeof(*node), GFP_KERNEL); - if (unlikely(!node)) { -+ list_del(&newnode->peer_list); - kfree(newnode); - return -ENOMEM; - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0082-wireguard-noise-reject-peers-with-low-order-public-k.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0082-wireguard-noise-reject-peers-with-low-order-public-k.patch deleted file mode 100644 index 55bb27611..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0082-wireguard-noise-reject-peers-with-low-order-public-k.patch +++ /dev/null @@ -1,233 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 4 Feb 2020 22:17:26 +0100 -Subject: [PATCH] wireguard: noise: reject peers with low order public keys - -commit ec31c2676a10e064878927b243fada8c2fb0c03c upstream. - -Our static-static calculation returns a failure if the public key is of -low order. We check for this when peers are added, and don't allow them -to be added if they're low order, except in the case where we haven't -yet been given a private key. In that case, we would defer the removal -of the peer until we're given a private key, since at that point we're -doing new static-static calculations which incur failures we can act on. -This meant, however, that we wound up removing peers rather late in the -configuration flow. - -Syzkaller points out that peer_remove calls flush_workqueue, which in -turn might then wait for sending a handshake initiation to complete. -Since handshake initiation needs the static identity lock, holding the -static identity lock while calling peer_remove can result in a rare -deadlock. We have precisely this case in this situation of late-stage -peer removal based on an invalid public key. We can't drop the lock when -removing, because then incoming handshakes might interact with a bogus -static-static calculation. - -While the band-aid patch for this would involve breaking up the peer -removal into two steps like wg_peer_remove_all does, in order to solve -the locking issue, there's actually a much more elegant way of fixing -this: - -If the static-static calculation succeeds with one private key, it -*must* succeed with all others, because all 32-byte strings map to valid -private keys, thanks to clamping. That means we can get rid of this -silly dance and locking headaches of removing peers late in the -configuration flow, and instead just reject them early on, regardless of -whether the device has yet been assigned a private key. For the case -where the device doesn't yet have a private key, we safely use zeros -just for the purposes of checking for low order points by way of -checking the output of the calculation. - -The following PoC will trigger the deadlock: - -ip link add wg0 type wireguard -ip addr add 10.0.0.1/24 dev wg0 -ip link set wg0 up -ping -f 10.0.0.2 & -while true; do - wg set wg0 private-key /dev/null peer AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= allowed-ips 10.0.0.0/24 endpoint 10.0.0.3:1234 - wg set wg0 private-key <(echo AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=) -done - -[ 0.949105] ====================================================== -[ 0.949550] WARNING: possible circular locking dependency detected -[ 0.950143] 5.5.0-debug+ #18 Not tainted -[ 0.950431] ------------------------------------------------------ -[ 0.950959] wg/89 is trying to acquire lock: -[ 0.951252] ffff8880333e2128 ((wq_completion)wg-kex-wg0){+.+.}, at: flush_workqueue+0xe3/0x12f0 -[ 0.951865] -[ 0.951865] but task is already holding lock: -[ 0.952280] ffff888032819bc0 (&wg->static_identity.lock){++++}, at: wg_set_device+0x95d/0xcc0 -[ 0.953011] -[ 0.953011] which lock already depends on the new lock. -[ 0.953011] -[ 0.953651] -[ 0.953651] the existing dependency chain (in reverse order) is: -[ 0.954292] -[ 0.954292] -> #2 (&wg->static_identity.lock){++++}: -[ 0.954804] lock_acquire+0x127/0x350 -[ 0.955133] down_read+0x83/0x410 -[ 0.955428] wg_noise_handshake_create_initiation+0x97/0x700 -[ 0.955885] wg_packet_send_handshake_initiation+0x13a/0x280 -[ 0.956401] wg_packet_handshake_send_worker+0x10/0x20 -[ 0.956841] process_one_work+0x806/0x1500 -[ 0.957167] worker_thread+0x8c/0xcb0 -[ 0.957549] kthread+0x2ee/0x3b0 -[ 0.957792] ret_from_fork+0x24/0x30 -[ 0.958234] -[ 0.958234] -> #1 ((work_completion)(&peer->transmit_handshake_work)){+.+.}: -[ 0.958808] lock_acquire+0x127/0x350 -[ 0.959075] process_one_work+0x7ab/0x1500 -[ 0.959369] worker_thread+0x8c/0xcb0 -[ 0.959639] kthread+0x2ee/0x3b0 -[ 0.959896] ret_from_fork+0x24/0x30 -[ 0.960346] -[ 0.960346] -> #0 ((wq_completion)wg-kex-wg0){+.+.}: -[ 0.960945] check_prev_add+0x167/0x1e20 -[ 0.961351] __lock_acquire+0x2012/0x3170 -[ 0.961725] lock_acquire+0x127/0x350 -[ 0.961990] flush_workqueue+0x106/0x12f0 -[ 0.962280] peer_remove_after_dead+0x160/0x220 -[ 0.962600] wg_set_device+0xa24/0xcc0 -[ 0.962994] genl_rcv_msg+0x52f/0xe90 -[ 0.963298] netlink_rcv_skb+0x111/0x320 -[ 0.963618] genl_rcv+0x1f/0x30 -[ 0.963853] netlink_unicast+0x3f6/0x610 -[ 0.964245] netlink_sendmsg+0x700/0xb80 -[ 0.964586] __sys_sendto+0x1dd/0x2c0 -[ 0.964854] __x64_sys_sendto+0xd8/0x1b0 -[ 0.965141] do_syscall_64+0x90/0xd9a -[ 0.965408] entry_SYSCALL_64_after_hwframe+0x49/0xbe -[ 0.965769] -[ 0.965769] other info that might help us debug this: -[ 0.965769] -[ 0.966337] Chain exists of: -[ 0.966337] (wq_completion)wg-kex-wg0 --> (work_completion)(&peer->transmit_handshake_work) --> &wg->static_identity.lock -[ 0.966337] -[ 0.967417] Possible unsafe locking scenario: -[ 0.967417] -[ 0.967836] CPU0 CPU1 -[ 0.968155] ---- ---- -[ 0.968497] lock(&wg->static_identity.lock); -[ 0.968779] lock((work_completion)(&peer->transmit_handshake_work)); -[ 0.969345] lock(&wg->static_identity.lock); -[ 0.969809] lock((wq_completion)wg-kex-wg0); -[ 0.970146] -[ 0.970146] *** DEADLOCK *** -[ 0.970146] -[ 0.970531] 5 locks held by wg/89: -[ 0.970908] #0: ffffffff827433c8 (cb_lock){++++}, at: genl_rcv+0x10/0x30 -[ 0.971400] #1: ffffffff82743480 (genl_mutex){+.+.}, at: genl_rcv_msg+0x642/0xe90 -[ 0.971924] #2: ffffffff827160c0 (rtnl_mutex){+.+.}, at: wg_set_device+0x9f/0xcc0 -[ 0.972488] #3: ffff888032819de0 (&wg->device_update_lock){+.+.}, at: wg_set_device+0xb0/0xcc0 -[ 0.973095] #4: ffff888032819bc0 (&wg->static_identity.lock){++++}, at: wg_set_device+0x95d/0xcc0 -[ 0.973653] -[ 0.973653] stack backtrace: -[ 0.973932] CPU: 1 PID: 89 Comm: wg Not tainted 5.5.0-debug+ #18 -[ 0.974476] Call Trace: -[ 0.974638] dump_stack+0x97/0xe0 -[ 0.974869] check_noncircular+0x312/0x3e0 -[ 0.975132] ? print_circular_bug+0x1f0/0x1f0 -[ 0.975410] ? __kernel_text_address+0x9/0x30 -[ 0.975727] ? unwind_get_return_address+0x51/0x90 -[ 0.976024] check_prev_add+0x167/0x1e20 -[ 0.976367] ? graph_lock+0x70/0x160 -[ 0.976682] __lock_acquire+0x2012/0x3170 -[ 0.976998] ? register_lock_class+0x1140/0x1140 -[ 0.977323] lock_acquire+0x127/0x350 -[ 0.977627] ? flush_workqueue+0xe3/0x12f0 -[ 0.977890] flush_workqueue+0x106/0x12f0 -[ 0.978147] ? flush_workqueue+0xe3/0x12f0 -[ 0.978410] ? find_held_lock+0x2c/0x110 -[ 0.978662] ? lock_downgrade+0x6e0/0x6e0 -[ 0.978919] ? queue_rcu_work+0x60/0x60 -[ 0.979166] ? netif_napi_del+0x151/0x3b0 -[ 0.979501] ? peer_remove_after_dead+0x160/0x220 -[ 0.979871] peer_remove_after_dead+0x160/0x220 -[ 0.980232] wg_set_device+0xa24/0xcc0 -[ 0.980516] ? deref_stack_reg+0x8e/0xc0 -[ 0.980801] ? set_peer+0xe10/0xe10 -[ 0.981040] ? __ww_mutex_check_waiters+0x150/0x150 -[ 0.981430] ? __nla_validate_parse+0x163/0x270 -[ 0.981719] ? genl_family_rcv_msg_attrs_parse+0x13f/0x310 -[ 0.982078] genl_rcv_msg+0x52f/0xe90 -[ 0.982348] ? genl_family_rcv_msg_attrs_parse+0x310/0x310 -[ 0.982690] ? register_lock_class+0x1140/0x1140 -[ 0.983049] netlink_rcv_skb+0x111/0x320 -[ 0.983298] ? genl_family_rcv_msg_attrs_parse+0x310/0x310 -[ 0.983645] ? netlink_ack+0x880/0x880 -[ 0.983888] genl_rcv+0x1f/0x30 -[ 0.984168] netlink_unicast+0x3f6/0x610 -[ 0.984443] ? netlink_detachskb+0x60/0x60 -[ 0.984729] ? find_held_lock+0x2c/0x110 -[ 0.984976] netlink_sendmsg+0x700/0xb80 -[ 0.985220] ? netlink_broadcast_filtered+0xa60/0xa60 -[ 0.985533] __sys_sendto+0x1dd/0x2c0 -[ 0.985763] ? __x64_sys_getpeername+0xb0/0xb0 -[ 0.986039] ? sockfd_lookup_light+0x17/0x160 -[ 0.986397] ? __sys_recvmsg+0x8c/0xf0 -[ 0.986711] ? __sys_recvmsg_sock+0xd0/0xd0 -[ 0.987018] __x64_sys_sendto+0xd8/0x1b0 -[ 0.987283] ? lockdep_hardirqs_on+0x39b/0x5a0 -[ 0.987666] do_syscall_64+0x90/0xd9a -[ 0.987903] entry_SYSCALL_64_after_hwframe+0x49/0xbe -[ 0.988223] RIP: 0033:0x7fe77c12003e -[ 0.988508] Code: c3 8b 07 85 c0 75 24 49 89 fb 48 89 f0 48 89 d7 48 89 ce 4c 89 c2 4d 89 ca 4c 8b 44 24 08 4c 8b 4c 24 10 4c 4 -[ 0.989666] RSP: 002b:00007fffada2ed58 EFLAGS: 00000246 ORIG_RAX: 000000000000002c -[ 0.990137] RAX: ffffffffffffffda RBX: 00007fe77c159d48 RCX: 00007fe77c12003e -[ 0.990583] RDX: 0000000000000040 RSI: 000055fd1d38e020 RDI: 0000000000000004 -[ 0.991091] RBP: 000055fd1d38e020 R08: 000055fd1cb63358 R09: 000000000000000c -[ 0.991568] R10: 0000000000000000 R11: 0000000000000246 R12: 000000000000002c -[ 0.992014] R13: 0000000000000004 R14: 000055fd1d38e020 R15: 0000000000000001 - -Signed-off-by: Jason A. Donenfeld -Reported-by: syzbot -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/netlink.c | 6 ++---- - drivers/net/wireguard/noise.c | 10 +++++++--- - 2 files changed, 9 insertions(+), 7 deletions(-) - ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -575,10 +575,8 @@ static int wg_set_device(struct sk_buff - private_key); - list_for_each_entry_safe(peer, temp, &wg->peer_list, - peer_list) { -- if (wg_noise_precompute_static_static(peer)) -- wg_noise_expire_current_peer_keypairs(peer); -- else -- wg_peer_remove(peer); -+ BUG_ON(!wg_noise_precompute_static_static(peer)); -+ wg_noise_expire_current_peer_keypairs(peer); - } - wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); - up_write(&wg->static_identity.lock); ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -46,17 +46,21 @@ void __init wg_noise_init(void) - /* Must hold peer->handshake.static_identity->lock */ - bool wg_noise_precompute_static_static(struct wg_peer *peer) - { -- bool ret = true; -+ bool ret; - - down_write(&peer->handshake.lock); -- if (peer->handshake.static_identity->has_identity) -+ if (peer->handshake.static_identity->has_identity) { - ret = curve25519( - peer->handshake.precomputed_static_static, - peer->handshake.static_identity->static_private, - peer->handshake.remote_static); -- else -+ } else { -+ u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; -+ -+ ret = curve25519(empty, empty, peer->handshake.remote_static); - memset(peer->handshake.precomputed_static_static, 0, - NOISE_PUBLIC_KEY_LEN); -+ } - up_write(&peer->handshake.lock); - return ret; - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0083-wireguard-selftests-ensure-non-addition-of-peers-wit.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0083-wireguard-selftests-ensure-non-addition-of-peers-wit.patch deleted file mode 100644 index 86877a659..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0083-wireguard-selftests-ensure-non-addition-of-peers-wit.patch +++ /dev/null @@ -1,34 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 4 Feb 2020 22:17:27 +0100 -Subject: [PATCH] wireguard: selftests: ensure non-addition of peers with - failed precomputation - -commit f9398acba6a4ae9cb98bfe4d56414d376eff8d57 upstream. - -Ensure that peers with low order points are ignored, both in the case -where we already have a device private key and in the case where we do -not. This adds points that naturally give a zero output. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 6 ++++++ - 1 file changed, 6 insertions(+) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -516,6 +516,12 @@ n0 wg set wg0 peer "$pub2" allowed-ips 0 - n0 wg set wg0 peer "$pub2" allowed-ips 0.0.0.0/0 - n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 - n0 wg set wg0 peer "$pub2" allowed-ips ::/0 -+n0 wg set wg0 peer "$pub2" remove -+low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= ) -+n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer } -+[[ -z $(n0 wg show wg0 peers) ]] -+n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer } -+[[ -z $(n0 wg show wg0 peers) ]] - ip0 link del wg0 - - declare -A objects diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0084-wireguard-selftests-tie-socket-waiting-to-target-pid.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0084-wireguard-selftests-tie-socket-waiting-to-target-pid.patch deleted file mode 100644 index 4530f0f49..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0084-wireguard-selftests-tie-socket-waiting-to-target-pid.patch +++ /dev/null @@ -1,77 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 4 Feb 2020 22:17:29 +0100 -Subject: [PATCH] wireguard: selftests: tie socket waiting to target pid - -commit 88f404a9b1d75388225b1c67b6dd327cb2182777 upstream. - -Without this, we wind up proceeding too early sometimes when the -previous process has just used the same listening port. So, we tie the -listening socket query to the specific pid we're interested in. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 17 ++++++++--------- - 1 file changed, 8 insertions(+), 9 deletions(-) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -38,9 +38,8 @@ ip0() { pretty 0 "ip $*"; ip -n $netns0 - ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } - ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } - sleep() { read -t "$1" -N 1 || true; } --waitiperf() { pretty "${1//*-}" "wait for iperf:5201"; while [[ $(ss -N "$1" -tlp 'sport = 5201') != *iperf3* ]]; do sleep 0.1; done; } --waitncatudp() { pretty "${1//*-}" "wait for udp:1111"; while [[ $(ss -N "$1" -ulp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } --waitncattcp() { pretty "${1//*-}" "wait for tcp:1111"; while [[ $(ss -N "$1" -tlp 'sport = 1111') != *ncat* ]]; do sleep 0.1; done; } -+waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } -+waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } - waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } - - cleanup() { -@@ -119,22 +118,22 @@ tests() { - - # TCP over IPv4 - n2 iperf3 -s -1 -B 192.168.241.2 & -- waitiperf $netns2 -+ waitiperf $netns2 $! - n1 iperf3 -Z -t 3 -c 192.168.241.2 - - # TCP over IPv6 - n1 iperf3 -s -1 -B fd00::1 & -- waitiperf $netns1 -+ waitiperf $netns1 $! - n2 iperf3 -Z -t 3 -c fd00::1 - - # UDP over IPv4 - n1 iperf3 -s -1 -B 192.168.241.1 & -- waitiperf $netns1 -+ waitiperf $netns1 $! - n2 iperf3 -Z -t 3 -b 0 -u -c 192.168.241.1 - - # UDP over IPv6 - n2 iperf3 -s -1 -B fd00::2 & -- waitiperf $netns2 -+ waitiperf $netns2 $! - n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 - } - -@@ -207,7 +206,7 @@ n1 ping -W 1 -c 1 192.168.241.2 - n1 wg set wg0 peer "$pub2" allowed-ips 192.168.241.0/24 - exec 4< <(n1 ncat -l -u -p 1111) - ncat_pid=$! --waitncatudp $netns1 -+waitncatudp $netns1 $ncat_pid - n2 ncat -u 192.168.241.1 1111 <<<"X" - read -r -N 1 -t 1 out <&4 && [[ $out == "X" ]] - kill $ncat_pid -@@ -216,7 +215,7 @@ n1 wg set wg0 peer "$more_specific_key" - n2 wg set wg0 listen-port 9997 - exec 4< <(n1 ncat -l -u -p 1111) - ncat_pid=$! --waitncatudp $netns1 -+waitncatudp $netns1 $ncat_pid - n2 ncat -u 192.168.241.1 1111 <<<"X" - ! read -r -N 1 -t 1 out <&4 || false - kill $ncat_pid diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0085-wireguard-device-use-icmp_ndo_send-helper.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0085-wireguard-device-use-icmp_ndo_send-helper.patch deleted file mode 100644 index 321db189e..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0085-wireguard-device-use-icmp_ndo_send-helper.patch +++ /dev/null @@ -1,64 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 11 Feb 2020 20:47:08 +0100 -Subject: [PATCH] wireguard: device: use icmp_ndo_send helper - -commit a12d7f3cbdc72c7625881c8dc2660fc2c979fdf2 upstream. - -Because wireguard is calling icmp from network device context, it should -use the ndo helper so that the rate limiting applies correctly. This -commit adds a small test to the wireguard test suite to ensure that the -new functions continue doing the right thing in the context of -wireguard. It does this by setting up a condition that will definately -evoke an icmp error message from the driver, but along a nat'd path. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 4 ++-- - tools/testing/selftests/wireguard/netns.sh | 11 +++++++++++ - 2 files changed, 13 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -203,9 +203,9 @@ err_peer: - err: - ++dev->stats.tx_errors; - if (skb->protocol == htons(ETH_P_IP)) -- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); -+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); - else if (skb->protocol == htons(ETH_P_IPV6)) -- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); -+ icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); - kfree_skb(skb); - return ret; - } ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -24,6 +24,7 @@ - set -e - - exec 3>&1 -+export LANG=C - export WG_HIDE_KEYS=never - netns0="wg-test-$$-0" - netns1="wg-test-$$-1" -@@ -297,7 +298,17 @@ ip1 -4 rule add table main suppress_pref - n1 ping -W 1 -c 100 -f 192.168.99.7 - n1 ping -W 1 -c 100 -f abab::1111 - -+# Have ns2 NAT into wg0 packets from ns0, but return an icmp error along the right route. -+n2 iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -d 192.168.241.0/24 -j SNAT --to 192.168.241.2 -+n0 iptables -t filter -A INPUT \! -s 10.0.0.0/24 -i vethrs -j DROP # Manual rpfilter just to be explicit. -+n2 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward' -+ip0 -4 route add 192.168.241.1 via 10.0.0.100 -+n2 wg set wg0 peer "$pub1" remove -+[[ $(! n0 ping -W 1 -c 1 192.168.241.1 || false) == *"From 10.0.0.100 icmp_seq=1 Destination Host Unreachable"* ]] -+ - n0 iptables -t nat -F -+n0 iptables -t filter -F -+n2 iptables -t nat -F - ip0 link del vethrc - ip0 link del vethrs - ip1 link del wg0 diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0086-wireguard-selftests-reduce-complexity-and-fix-make-r.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0086-wireguard-selftests-reduce-complexity-and-fix-make-r.patch deleted file mode 100644 index ac292a868..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0086-wireguard-selftests-reduce-complexity-and-fix-make-r.patch +++ /dev/null @@ -1,104 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 14 Feb 2020 23:57:20 +0100 -Subject: [PATCH] wireguard: selftests: reduce complexity and fix make races - -commit 04ddf1208f03e1dbc39a4619c40eba640051b950 upstream. - -This gives us fewer dependencies and shortens build time, fixes up some -hash checking race conditions, and also fixes missing directory creation -that caused issues on massively parallel builds. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - .../testing/selftests/wireguard/qemu/Makefile | 38 +++++++------------ - 1 file changed, 14 insertions(+), 24 deletions(-) - ---- a/tools/testing/selftests/wireguard/qemu/Makefile -+++ b/tools/testing/selftests/wireguard/qemu/Makefile -@@ -38,19 +38,17 @@ endef - define file_download = - $(DISTFILES_PATH)/$(1): - mkdir -p $(DISTFILES_PATH) -- flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp' -- if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi -+ flock -x $$@.lock -c '[ -f $$@ ] && exit 0; wget -O $$@.tmp $(MIRROR)$(1) || wget -O $$@.tmp $(2)$(1) || rm -f $$@.tmp; [ -f $$@.tmp ] || exit 1; if echo "$(3) $$@.tmp" | sha256sum -c -; then mv $$@.tmp $$@; else rm -f $$@.tmp; exit 71; fi' - endef - - $(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3)) --$(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/,171f89699f286a5854b72b91d06e8f8e3683064c5901fb09d954a9ab6f551f81)) - $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) - $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) - $(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) - $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) - $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) - $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a)) --$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20191226,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,aa8af0fdc9872d369d8c890a84dbc2a2466b55795dccd5b47721b2d97644b04f)) -+$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20200206,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,f5207248c6a3c3e3bfc9ab30b91c1897b00802ed861e1f9faaed873366078c64)) - - KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug) - rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d)) -@@ -295,21 +293,13 @@ $(IPERF_PATH)/src/iperf3: | $(IPERF_PATH - $(MAKE) -C $(IPERF_PATH) - $(STRIP) -s $@ - --$(LIBMNL_PATH)/.installed: $(LIBMNL_TAR) -- flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -- touch $@ -- --$(LIBMNL_PATH)/src/.libs/libmnl.a: | $(LIBMNL_PATH)/.installed $(USERSPACE_DEPS) -- cd $(LIBMNL_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared -- $(MAKE) -C $(LIBMNL_PATH) -- sed -i 's:prefix=.*:prefix=$(LIBMNL_PATH):' $(LIBMNL_PATH)/libmnl.pc -- - $(WIREGUARD_TOOLS_PATH)/.installed: $(WIREGUARD_TOOLS_TAR) -+ mkdir -p $(BUILD_PATH) - flock -s $<.lock tar -C $(BUILD_PATH) -xf $< - touch $@ - --$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg -+$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(USERSPACE_DEPS) -+ $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src wg - $(STRIP) -s $@ - - $(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS) -@@ -340,17 +330,17 @@ $(BASH_PATH)/bash: | $(BASH_PATH)/.insta - $(IPROUTE2_PATH)/.installed: $(IPROUTE2_TAR) - mkdir -p $(BUILD_PATH) - flock -s $<.lock tar -C $(BUILD_PATH) -xf $< -- printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=y\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS -DHAVE_LIBMNL -I$(LIBMNL_PATH)/include\nLDLIBS+=-lmnl' > $(IPROUTE2_PATH)/config.mk -+ printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=n\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS\n' > $(IPROUTE2_PATH)/config.mk - printf 'lib: snapshot\n\t$$(MAKE) -C lib\nip/ip: lib\n\t$$(MAKE) -C ip ip\nmisc/ss: lib\n\t$$(MAKE) -C misc ss\n' >> $(IPROUTE2_PATH)/Makefile - touch $@ - --$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip -- $(STRIP) -s $(IPROUTE2_PATH)/ip/ip -- --$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss -- $(STRIP) -s $(IPROUTE2_PATH)/misc/ss -+$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(USERSPACE_DEPS) -+ $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip -+ $(STRIP) -s $@ -+ -+$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(USERSPACE_DEPS) -+ $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss -+ $(STRIP) -s $@ - - $(IPTABLES_PATH)/.installed: $(IPTABLES_TAR) - mkdir -p $(BUILD_PATH) -@@ -358,8 +348,8 @@ $(IPTABLES_PATH)/.installed: $(IPTABLES_ - sed -i -e "/nfnetlink=[01]/s:=[01]:=0:" -e "/nfconntrack=[01]/s:=[01]:=0:" $(IPTABLES_PATH)/configure - touch $@ - --$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS) -- cd $(IPTABLES_PATH) && PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --with-kernel=$(BUILD_PATH)/include -+$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(USERSPACE_DEPS) -+ cd $(IPTABLES_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --disable-connlabel --with-kernel=$(BUILD_PATH)/include - $(MAKE) -C $(IPTABLES_PATH) - $(STRIP) -s $@ - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0087-wireguard-receive-reset-last_under_load-to-zero.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0087-wireguard-receive-reset-last_under_load-to-zero.patch deleted file mode 100644 index 193d28a83..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0087-wireguard-receive-reset-last_under_load-to-zero.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 14 Feb 2020 23:57:21 +0100 -Subject: [PATCH] wireguard: receive: reset last_under_load to zero - -commit 2a8a4df36462aa85b0db87b7c5ea145ba67e34a8 upstream. - -This is a small optimization that prevents more expensive comparisons -from happening when they are no longer necessary, by clearing the -last_under_load variable whenever we wind up in a state where we were -under load but we no longer are. - -Signed-off-by: Jason A. Donenfeld -Suggested-by: Matt Dunwoodie -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -118,10 +118,13 @@ static void wg_receive_handshake_packet( - - under_load = skb_queue_len(&wg->incoming_handshakes) >= - MAX_QUEUED_INCOMING_HANDSHAKES / 8; -- if (under_load) -+ if (under_load) { - last_under_load = ktime_get_coarse_boottime_ns(); -- else if (last_under_load) -+ } else if (last_under_load) { - under_load = !wg_birthdate_has_expired(last_under_load, 1); -+ if (!under_load) -+ last_under_load = 0; -+ } - mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb, - under_load); - if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) || diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0088-wireguard-send-account-for-mtu-0-devices.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0088-wireguard-send-account-for-mtu-0-devices.patch deleted file mode 100644 index d84efe20f..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0088-wireguard-send-account-for-mtu-0-devices.patch +++ /dev/null @@ -1,95 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 14 Feb 2020 23:57:22 +0100 -Subject: [PATCH] wireguard: send: account for mtu=0 devices - -commit 175f1ca9a9ed8689d2028da1a7c624bb4fb4ff7e upstream. - -It turns out there's an easy way to get packets queued up while still -having an MTU of zero, and that's via persistent keep alive. This commit -makes sure that in whatever condition, we don't wind up dividing by -zero. Note that an MTU of zero for a wireguard interface is something -quasi-valid, so I don't think the correct fix is to limit it via -min_mtu. This can be reproduced easily with: - -ip link add wg0 type wireguard -ip link add wg1 type wireguard -ip link set wg0 up mtu 0 -ip link set wg1 up -wg set wg0 private-key <(wg genkey) -wg set wg1 listen-port 1 private-key <(wg genkey) peer $(wg show wg0 public-key) -wg set wg0 peer $(wg show wg1 public-key) persistent-keepalive 1 endpoint 127.0.0.1:1 - -However, while min_mtu=0 seems fine, it makes sense to restrict the -max_mtu. This commit also restricts the maximum MTU to the greatest -number for which rounding up to the padding multiple won't overflow a -signed integer. Packets this large were always rejected anyway -eventually, due to checks deeper in, but it seems more sound not to even -let the administrator configure something that won't work anyway. - -We use this opportunity to clean up this function a bit so that it's -clear which paths we're expecting. - -Signed-off-by: Jason A. Donenfeld -Cc: Eric Dumazet -Reviewed-by: Eric Dumazet -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 7 ++++--- - drivers/net/wireguard/send.c | 16 +++++++++++----- - 2 files changed, 15 insertions(+), 8 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -258,6 +258,8 @@ static void wg_setup(struct net_device * - enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | - NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA }; -+ const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) + -+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); - - dev->netdev_ops = &netdev_ops; - dev->hard_header_len = 0; -@@ -271,9 +273,8 @@ static void wg_setup(struct net_device * - dev->features |= WG_NETDEV_FEATURES; - dev->hw_features |= WG_NETDEV_FEATURES; - dev->hw_enc_features |= WG_NETDEV_FEATURES; -- dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH - -- sizeof(struct udphdr) - -- max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); -+ dev->mtu = ETH_DATA_LEN - overhead; -+ dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead; - - SET_NETDEV_DEVTYPE(dev, &device_type); - ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -143,16 +143,22 @@ static void keep_key_fresh(struct wg_pee - - static unsigned int calculate_skb_padding(struct sk_buff *skb) - { -+ unsigned int padded_size, last_unit = skb->len; -+ -+ if (unlikely(!PACKET_CB(skb)->mtu)) -+ return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit; -+ - /* We do this modulo business with the MTU, just in case the networking - * layer gives us a packet that's bigger than the MTU. In that case, we - * wouldn't want the final subtraction to overflow in the case of the -- * padded_size being clamped. -+ * padded_size being clamped. Fortunately, that's very rarely the case, -+ * so we optimize for that not happening. - */ -- unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu; -- unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE); -+ if (unlikely(last_unit > PACKET_CB(skb)->mtu)) -+ last_unit %= PACKET_CB(skb)->mtu; - -- if (padded_size > PACKET_CB(skb)->mtu) -- padded_size = PACKET_CB(skb)->mtu; -+ padded_size = min(PACKET_CB(skb)->mtu, -+ ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE)); - return padded_size - last_unit; - } - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0089-wireguard-socket-remove-extra-call-to-synchronize_ne.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0089-wireguard-socket-remove-extra-call-to-synchronize_ne.patch deleted file mode 100644 index 458e9d51e..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0089-wireguard-socket-remove-extra-call-to-synchronize_ne.patch +++ /dev/null @@ -1,32 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 14 Feb 2020 23:57:23 +0100 -Subject: [PATCH] wireguard: socket: remove extra call to synchronize_net - -commit 1fbc33b0a7feb6ca72bf7dc8a05d81485ee8ee2e upstream. - -synchronize_net() is a wrapper around synchronize_rcu(), so there's no -point in having synchronize_net and synchronize_rcu back to back, -despite the documentation comment suggesting maybe it's somewhat useful, -"Wait for packets currently being received to be done." This commit -removes the extra call. - -Signed-off-by: Jason A. Donenfeld -Suggested-by: Eric Dumazet -Reviewed-by: Eric Dumazet -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/socket.c | 1 - - 1 file changed, 1 deletion(-) - ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -432,7 +432,6 @@ void wg_socket_reinit(struct wg_device * - wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); - mutex_unlock(&wg->socket_update_lock); - synchronize_rcu(); -- synchronize_net(); - sock_free(old4); - sock_free(old6); - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0090-wireguard-selftests-remove-duplicated-include-sys-ty.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0090-wireguard-selftests-remove-duplicated-include-sys-ty.patch deleted file mode 100644 index 93545e676..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0090-wireguard-selftests-remove-duplicated-include-sys-ty.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: YueHaibing -Date: Wed, 18 Mar 2020 18:30:43 -0600 -Subject: [PATCH] wireguard: selftests: remove duplicated include - -commit 166391159c5deb84795d2ff46e95f276177fa5fb upstream. - -This commit removes a duplicated include. - -Signed-off-by: YueHaibing -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/qemu/init.c | 1 - - 1 file changed, 1 deletion(-) - ---- a/tools/testing/selftests/wireguard/qemu/init.c -+++ b/tools/testing/selftests/wireguard/qemu/init.c -@@ -13,7 +13,6 @@ - #include - #include - #include --#include - #include - #include - #include diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0091-wireguard-queueing-account-for-skb-protocol-0.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0091-wireguard-queueing-account-for-skb-protocol-0.patch deleted file mode 100644 index a9ca655e7..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0091-wireguard-queueing-account-for-skb-protocol-0.patch +++ /dev/null @@ -1,100 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 18 Mar 2020 18:30:45 -0600 -Subject: [PATCH] wireguard: queueing: account for skb->protocol==0 - -commit a5588604af448664e796daf3c1d5a4523c60667b upstream. - -We carry out checks to the effect of: - - if (skb->protocol != wg_examine_packet_protocol(skb)) - goto err; - -By having wg_skb_examine_untrusted_ip_hdr return 0 on failure, this -means that the check above still passes in the case where skb->protocol -is zero, which is possible to hit with AF_PACKET: - - struct sockaddr_pkt saddr = { .spkt_device = "wg0" }; - unsigned char buffer[5] = { 0 }; - sendto(socket(AF_PACKET, SOCK_PACKET, /* skb->protocol = */ 0), - buffer, sizeof(buffer), 0, (const struct sockaddr *)&saddr, sizeof(saddr)); - -Additional checks mean that this isn't actually a problem in the code -base, but I could imagine it becoming a problem later if the function is -used more liberally. - -I would prefer to fix this by having wg_examine_packet_protocol return a -32-bit ~0 value on failure, which will never match any value of -skb->protocol, which would simply change the generated code from a mov -to a movzx. However, sparse complains, and adding __force casts doesn't -seem like a good idea, so instead we just add a simple helper function -to check for the zero return value. Since wg_examine_packet_protocol -itself gets inlined, this winds up not adding an additional branch to -the generated code, since the 0 return value already happens in a -mergable branch. - -Reported-by: Fabian Freyer -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 2 +- - drivers/net/wireguard/queueing.h | 8 +++++++- - drivers/net/wireguard/receive.c | 4 ++-- - 3 files changed, 10 insertions(+), 4 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -122,7 +122,7 @@ static netdev_tx_t wg_xmit(struct sk_buf - u32 mtu; - int ret; - -- if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol)) { -+ if (unlikely(!wg_check_packet_protocol(skb))) { - ret = -EPROTONOSUPPORT; - net_dbg_ratelimited("%s: Invalid IP packet\n", dev->name); - goto err; ---- a/drivers/net/wireguard/queueing.h -+++ b/drivers/net/wireguard/queueing.h -@@ -66,7 +66,7 @@ struct packet_cb { - #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) - - /* Returns either the correct skb->protocol value, or 0 if invalid. */ --static inline __be16 wg_skb_examine_untrusted_ip_hdr(struct sk_buff *skb) -+static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) - { - if (skb_network_header(skb) >= skb->head && - (skb_network_header(skb) + sizeof(struct iphdr)) <= -@@ -81,6 +81,12 @@ static inline __be16 wg_skb_examine_untr - return 0; - } - -+static inline bool wg_check_packet_protocol(struct sk_buff *skb) -+{ -+ __be16 real_protocol = wg_examine_packet_protocol(skb); -+ return real_protocol && skb->protocol == real_protocol; -+} -+ - static inline void wg_reset_packet(struct sk_buff *skb) - { - skb_scrub_packet(skb, true); ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -56,7 +56,7 @@ static int prepare_skb_header(struct sk_ - size_t data_offset, data_len, header_len; - struct udphdr *udp; - -- if (unlikely(wg_skb_examine_untrusted_ip_hdr(skb) != skb->protocol || -+ if (unlikely(!wg_check_packet_protocol(skb) || - skb_transport_header(skb) < skb->head || - (skb_transport_header(skb) + sizeof(struct udphdr)) > - skb_tail_pointer(skb))) -@@ -388,7 +388,7 @@ static void wg_packet_consume_data_done( - */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = ~0; /* All levels */ -- skb->protocol = wg_skb_examine_untrusted_ip_hdr(skb); -+ skb->protocol = wg_examine_packet_protocol(skb); - if (skb->protocol == htons(ETH_P_IP)) { - len = ntohs(ip_hdr(skb)->tot_len); - if (unlikely(len < sizeof(struct iphdr))) diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0092-wireguard-receive-remove-dead-code-from-default-pack.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0092-wireguard-receive-remove-dead-code-from-default-pack.patch deleted file mode 100644 index bcd4fbfbc..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0092-wireguard-receive-remove-dead-code-from-default-pack.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 18 Mar 2020 18:30:46 -0600 -Subject: [PATCH] wireguard: receive: remove dead code from default packet type - case - -commit 2b8765c52db24c0fbcc81bac9b5e8390f2c7d3c8 upstream. - -The situation in which we wind up hitting the default case here -indicates a major bug in earlier parsing code. It is not a usual thing -that should ever happen, which means a "friendly" message for it doesn't -make sense. Rather, replace this with a WARN_ON, just like we do earlier -in the file for a similar situation, so that somebody sends us a bug -report and we can fix it. - -Reported-by: Fabian Freyer -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -587,8 +587,7 @@ void wg_packet_receive(struct wg_device - wg_packet_consume_data(wg, skb); - break; - default: -- net_dbg_skb_ratelimited("%s: Invalid packet from %pISpfsc\n", -- wg->dev->name, skb); -+ WARN(1, "Non-exhaustive parsing of packet header lead to unknown packet type!\n"); - goto err; - } - return; diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0093-wireguard-noise-error-out-precomputed-DH-during-hand.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0093-wireguard-noise-error-out-precomputed-DH-during-hand.patch deleted file mode 100644 index dac3046e4..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0093-wireguard-noise-error-out-precomputed-DH-during-hand.patch +++ /dev/null @@ -1,224 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 18 Mar 2020 18:30:47 -0600 -Subject: [PATCH] wireguard: noise: error out precomputed DH during handshake - rather than config - -commit 11a7686aa99c7fe4b3f80f6dcccd54129817984d upstream. - -We precompute the static-static ECDH during configuration time, in order -to save an expensive computation later when receiving network packets. -However, not all ECDH computations yield a contributory result. Prior, -we were just not letting those peers be added to the interface. However, -this creates a strange inconsistency, since it was still possible to add -other weird points, like a valid public key plus a low-order point, and, -like points that result in zeros, a handshake would not complete. In -order to make the behavior more uniform and less surprising, simply -allow all peers to be added. Then, we'll error out later when doing the -crypto if there's an issue. This also adds more separation between the -crypto layer and the configuration layer. - -Discussed-with: Mathias Hall-Andersen -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/netlink.c | 8 +--- - drivers/net/wireguard/noise.c | 55 ++++++++++++---------- - drivers/net/wireguard/noise.h | 12 ++--- - drivers/net/wireguard/peer.c | 7 +-- - tools/testing/selftests/wireguard/netns.sh | 15 ++++-- - 5 files changed, 49 insertions(+), 48 deletions(-) - ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -417,11 +417,7 @@ static int set_peer(struct wg_device *wg - - peer = wg_peer_create(wg, public_key, preshared_key); - if (IS_ERR(peer)) { -- /* Similar to the above, if the key is invalid, we skip -- * it without fanfare, so that services don't need to -- * worry about doing key validation themselves. -- */ -- ret = PTR_ERR(peer) == -EKEYREJECTED ? 0 : PTR_ERR(peer); -+ ret = PTR_ERR(peer); - peer = NULL; - goto out; - } -@@ -575,7 +571,7 @@ static int wg_set_device(struct sk_buff - private_key); - list_for_each_entry_safe(peer, temp, &wg->peer_list, - peer_list) { -- BUG_ON(!wg_noise_precompute_static_static(peer)); -+ wg_noise_precompute_static_static(peer); - wg_noise_expire_current_peer_keypairs(peer); - } - wg_cookie_checker_precompute_device_keys(&wg->cookie_checker); ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -44,32 +44,23 @@ void __init wg_noise_init(void) - } - - /* Must hold peer->handshake.static_identity->lock */ --bool wg_noise_precompute_static_static(struct wg_peer *peer) -+void wg_noise_precompute_static_static(struct wg_peer *peer) - { -- bool ret; -- - down_write(&peer->handshake.lock); -- if (peer->handshake.static_identity->has_identity) { -- ret = curve25519( -- peer->handshake.precomputed_static_static, -+ if (!peer->handshake.static_identity->has_identity || -+ !curve25519(peer->handshake.precomputed_static_static, - peer->handshake.static_identity->static_private, -- peer->handshake.remote_static); -- } else { -- u8 empty[NOISE_PUBLIC_KEY_LEN] = { 0 }; -- -- ret = curve25519(empty, empty, peer->handshake.remote_static); -+ peer->handshake.remote_static)) - memset(peer->handshake.precomputed_static_static, 0, - NOISE_PUBLIC_KEY_LEN); -- } - up_write(&peer->handshake.lock); -- return ret; - } - --bool wg_noise_handshake_init(struct noise_handshake *handshake, -- struct noise_static_identity *static_identity, -- const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -- const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -- struct wg_peer *peer) -+void wg_noise_handshake_init(struct noise_handshake *handshake, -+ struct noise_static_identity *static_identity, -+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -+ struct wg_peer *peer) - { - memset(handshake, 0, sizeof(*handshake)); - init_rwsem(&handshake->lock); -@@ -81,7 +72,7 @@ bool wg_noise_handshake_init(struct nois - NOISE_SYMMETRIC_KEY_LEN); - handshake->static_identity = static_identity; - handshake->state = HANDSHAKE_ZEROED; -- return wg_noise_precompute_static_static(peer); -+ wg_noise_precompute_static_static(peer); - } - - static void handshake_zero(struct noise_handshake *handshake) -@@ -403,6 +394,19 @@ static bool __must_check mix_dh(u8 chain - return true; - } - -+static bool __must_check mix_precomputed_dh(u8 chaining_key[NOISE_HASH_LEN], -+ u8 key[NOISE_SYMMETRIC_KEY_LEN], -+ const u8 precomputed[NOISE_PUBLIC_KEY_LEN]) -+{ -+ static u8 zero_point[NOISE_PUBLIC_KEY_LEN]; -+ if (unlikely(!crypto_memneq(precomputed, zero_point, NOISE_PUBLIC_KEY_LEN))) -+ return false; -+ kdf(chaining_key, key, NULL, precomputed, NOISE_HASH_LEN, -+ NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, -+ chaining_key); -+ return true; -+} -+ - static void mix_hash(u8 hash[NOISE_HASH_LEN], const u8 *src, size_t src_len) - { - struct blake2s_state blake; -@@ -531,10 +535,9 @@ wg_noise_handshake_create_initiation(str - NOISE_PUBLIC_KEY_LEN, key, handshake->hash); - - /* ss */ -- kdf(handshake->chaining_key, key, NULL, -- handshake->precomputed_static_static, NOISE_HASH_LEN, -- NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, -- handshake->chaining_key); -+ if (!mix_precomputed_dh(handshake->chaining_key, key, -+ handshake->precomputed_static_static)) -+ goto out; - - /* {t} */ - tai64n_now(timestamp); -@@ -595,9 +598,9 @@ wg_noise_handshake_consume_initiation(st - handshake = &peer->handshake; - - /* ss */ -- kdf(chaining_key, key, NULL, handshake->precomputed_static_static, -- NOISE_HASH_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, NOISE_PUBLIC_KEY_LEN, -- chaining_key); -+ if (!mix_precomputed_dh(chaining_key, key, -+ handshake->precomputed_static_static)) -+ goto out; - - /* {t} */ - if (!message_decrypt(t, src->encrypted_timestamp, ---- a/drivers/net/wireguard/noise.h -+++ b/drivers/net/wireguard/noise.h -@@ -94,11 +94,11 @@ struct noise_handshake { - struct wg_device; - - void wg_noise_init(void); --bool wg_noise_handshake_init(struct noise_handshake *handshake, -- struct noise_static_identity *static_identity, -- const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -- const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -- struct wg_peer *peer); -+void wg_noise_handshake_init(struct noise_handshake *handshake, -+ struct noise_static_identity *static_identity, -+ const u8 peer_public_key[NOISE_PUBLIC_KEY_LEN], -+ const u8 peer_preshared_key[NOISE_SYMMETRIC_KEY_LEN], -+ struct wg_peer *peer); - void wg_noise_handshake_clear(struct noise_handshake *handshake); - static inline void wg_noise_reset_last_sent_handshake(atomic64_t *handshake_ns) - { -@@ -116,7 +116,7 @@ void wg_noise_expire_current_peer_keypai - void wg_noise_set_static_identity_private_key( - struct noise_static_identity *static_identity, - const u8 private_key[NOISE_PUBLIC_KEY_LEN]); --bool wg_noise_precompute_static_static(struct wg_peer *peer); -+void wg_noise_precompute_static_static(struct wg_peer *peer); - - bool - wg_noise_handshake_create_initiation(struct message_handshake_initiation *dst, ---- a/drivers/net/wireguard/peer.c -+++ b/drivers/net/wireguard/peer.c -@@ -34,11 +34,8 @@ struct wg_peer *wg_peer_create(struct wg - return ERR_PTR(ret); - peer->device = wg; - -- if (!wg_noise_handshake_init(&peer->handshake, &wg->static_identity, -- public_key, preshared_key, peer)) { -- ret = -EKEYREJECTED; -- goto err_1; -- } -+ wg_noise_handshake_init(&peer->handshake, &wg->static_identity, -+ public_key, preshared_key, peer); - if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) - goto err_1; - if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -527,11 +527,16 @@ n0 wg set wg0 peer "$pub2" allowed-ips 0 - n0 wg set wg0 peer "$pub2" allowed-ips ::/0,1700::/111,5000::/4,e000::/37,9000::/75 - n0 wg set wg0 peer "$pub2" allowed-ips ::/0 - n0 wg set wg0 peer "$pub2" remove --low_order_points=( AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38= ) --n0 wg set wg0 private-key /dev/null ${low_order_points[@]/#/peer } --[[ -z $(n0 wg show wg0 peers) ]] --n0 wg set wg0 private-key <(echo "$key1") ${low_order_points[@]/#/peer } --[[ -z $(n0 wg show wg0 peers) ]] -+for low_order_point in AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= AQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA= 4Ot6fDtBuK4WVuP68Z/EatoJjeucMrH9hmIFFl9JuAA= X5yVvKNQjCSx0LFVnIPvWwREXMRYHI6G2CJO3dCfEVc= 7P///////////////////////////////////////38= 7f///////////////////////////////////////38= 7v///////////////////////////////////////38=; do -+ n0 wg set wg0 peer "$low_order_point" persistent-keepalive 1 endpoint 127.0.0.1:1111 -+done -+[[ -n $(n0 wg show wg0 peers) ]] -+exec 4< <(n0 ncat -l -u -p 1111) -+ncat_pid=$! -+waitncatudp $netns0 $ncat_pid -+ip0 link set wg0 up -+! read -r -n 1 -t 2 <&4 || false -+kill $ncat_pid - ip0 link del wg0 - - declare -A objects diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0094-wireguard-send-remove-errant-newline-from-packet_enc.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0094-wireguard-send-remove-errant-newline-from-packet_enc.patch deleted file mode 100644 index c92b6a784..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0094-wireguard-send-remove-errant-newline-from-packet_enc.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Sultan Alsawaf -Date: Wed, 29 Apr 2020 14:59:20 -0600 -Subject: [PATCH] wireguard: send: remove errant newline from - packet_encrypt_worker - -commit d6833e42786e050e7522d6a91a9361e54085897d upstream. - -This commit removes a useless newline at the end of a scope, which -doesn't add anything in the way of organization or readability. - -Signed-off-by: Sultan Alsawaf -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/send.c | 1 - - 1 file changed, 1 deletion(-) - ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -304,7 +304,6 @@ void wg_packet_encrypt_worker(struct wor - } - wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, - state); -- - } - } - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0095-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0095-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch deleted file mode 100644 index a72c50989..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0095-wireguard-queueing-cleanup-ptr_ring-in-error-path-of.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 29 Apr 2020 14:59:21 -0600 -Subject: [PATCH] wireguard: queueing: cleanup ptr_ring in error path of - packet_queue_init - -commit 130c58606171326c81841a49cc913cd354113dd9 upstream. - -Prior, if the alloc_percpu of packet_percpu_multicore_worker_alloc -failed, the previously allocated ptr_ring wouldn't be freed. This commit -adds the missing call to ptr_ring_cleanup in the error case. - -Reported-by: Sultan Alsawaf -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/queueing.c | 4 +++- - 1 file changed, 3 insertions(+), 1 deletion(-) - ---- a/drivers/net/wireguard/queueing.c -+++ b/drivers/net/wireguard/queueing.c -@@ -35,8 +35,10 @@ int wg_packet_queue_init(struct crypt_qu - if (multicore) { - queue->worker = wg_packet_percpu_multicore_worker_alloc( - function, queue); -- if (!queue->worker) -+ if (!queue->worker) { -+ ptr_ring_cleanup(&queue->ring, NULL); - return -ENOMEM; -+ } - } else { - INIT_WORK(&queue->work, function); - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0096-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0096-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch deleted file mode 100644 index a72358c30..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0096-wireguard-receive-use-tunnel-helpers-for-decapsulati.patch +++ /dev/null @@ -1,50 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= -Date: Wed, 29 Apr 2020 14:59:22 -0600 -Subject: [PATCH] wireguard: receive: use tunnel helpers for decapsulating ECN - markings -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit eebabcb26ea1e3295704477c6cd4e772c96a9559 upstream. - -WireGuard currently only propagates ECN markings on tunnel decap according -to the old RFC3168 specification. However, the spec has since been updated -in RFC6040 to recommend slightly different decapsulation semantics. This -was implemented in the kernel as a set of common helpers for ECN -decapsulation, so let's just switch over WireGuard to using those, so it -can benefit from this enhancement and any future tweaks. We do not drop -packets with invalid ECN marking combinations, because WireGuard is -frequently used to work around broken ISPs, which could be doing that. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Reported-by: Olivier Tilmans -Cc: Dave Taht -Cc: Rodney W. Grimes -Signed-off-by: Toke Høiland-Jørgensen -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 6 ++---- - 1 file changed, 2 insertions(+), 4 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -393,13 +393,11 @@ static void wg_packet_consume_data_done( - len = ntohs(ip_hdr(skb)->tot_len); - if (unlikely(len < sizeof(struct iphdr))) - goto dishonest_packet_size; -- if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) -- IP_ECN_set_ce(ip_hdr(skb)); -+ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ip_hdr(skb)->tos); - } else if (skb->protocol == htons(ETH_P_IPV6)) { - len = ntohs(ipv6_hdr(skb)->payload_len) + - sizeof(struct ipv6hdr); -- if (INET_ECN_is_ce(PACKET_CB(skb)->ds)) -- IP6_ECN_set_ce(skb, ipv6_hdr(skb)); -+ INET_ECN_decapsulate(skb, PACKET_CB(skb)->ds, ipv6_get_dsfield(ipv6_hdr(skb))); - } else { - goto dishonest_packet_type; - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0097-wireguard-selftests-use-normal-kernel-stack-size-on-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0097-wireguard-selftests-use-normal-kernel-stack-size-on-.patch deleted file mode 100644 index f4543d256..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0097-wireguard-selftests-use-normal-kernel-stack-size-on-.patch +++ /dev/null @@ -1,28 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 6 May 2020 15:33:02 -0600 -Subject: [PATCH] wireguard: selftests: use normal kernel stack size on ppc64 - -commit a0fd7cc87a018df1a17f9d3f0bd994c1f22c6b34 upstream. - -While at some point it might have made sense to be running these tests -on ppc64 with 4k stacks, the kernel hasn't actually used 4k stacks on -64-bit powerpc in a long time, and more interesting things that we test -don't really work when we deviate from the default (16k). So, we stop -pushing our luck in this commit, and return to the default instead of -the minimum. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config | 1 + - 1 file changed, 1 insertion(+) - ---- a/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config -+++ b/tools/testing/selftests/wireguard/qemu/arch/powerpc64le.config -@@ -10,3 +10,4 @@ CONFIG_CMDLINE_BOOL=y - CONFIG_CMDLINE="console=hvc0 wg.success=hvc1" - CONFIG_SECTION_MISMATCH_WARN_ONLY=y - CONFIG_FRAME_WARN=1280 -+CONFIG_THREAD_SHIFT=14 diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0098-wireguard-socket-remove-errant-restriction-on-loopin.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0098-wireguard-socket-remove-errant-restriction-on-loopin.patch deleted file mode 100644 index 6dafa4781..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0098-wireguard-socket-remove-errant-restriction-on-loopin.patch +++ /dev/null @@ -1,162 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 6 May 2020 15:33:03 -0600 -Subject: [PATCH] wireguard: socket: remove errant restriction on looping to - self - -commit b673e24aad36981f327a6570412ffa7754de8911 upstream. - -It's already possible to create two different interfaces and loop -packets between them. This has always been possible with tunnels in the -kernel, and isn't specific to wireguard. Therefore, the networking stack -already needs to deal with that. At the very least, the packet winds up -exceeding the MTU and is discarded at that point. So, since this is -already something that happens, there's no need to forbid the not very -exceptional case of routing a packet back to the same interface; this -loop is no different than others, and we shouldn't special case it, but -rather rely on generic handling of loops in general. This also makes it -easier to do interesting things with wireguard such as onion routing. - -At the same time, we add a selftest for this, ensuring that both onion -routing works and infinite routing loops do not crash the kernel. We -also add a test case for wireguard interfaces nesting packets and -sending traffic between each other, as well as the loop in this case -too. We make sure to send some throughput-heavy traffic for this use -case, to stress out any possible recursion issues with the locks around -workqueues. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/socket.c | 12 ----- - tools/testing/selftests/wireguard/netns.sh | 54 ++++++++++++++++++++-- - 2 files changed, 51 insertions(+), 15 deletions(-) - ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -76,12 +76,6 @@ static int send4(struct wg_device *wg, s - net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", - wg->dev->name, &endpoint->addr, ret); - goto err; -- } else if (unlikely(rt->dst.dev == skb->dev)) { -- ip_rt_put(rt); -- ret = -ELOOP; -- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", -- wg->dev->name, &endpoint->addr); -- goto err; - } - if (cache) - dst_cache_set_ip4(cache, &rt->dst, fl.saddr); -@@ -149,12 +143,6 @@ static int send6(struct wg_device *wg, s - net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", - wg->dev->name, &endpoint->addr, ret); - goto err; -- } else if (unlikely(dst->dev == skb->dev)) { -- dst_release(dst); -- ret = -ELOOP; -- net_dbg_ratelimited("%s: Avoiding routing loop to %pISpfsc\n", -- wg->dev->name, &endpoint->addr); -- goto err; - } - if (cache) - dst_cache_set_ip6(cache, dst, &fl.saddr); ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -48,8 +48,11 @@ cleanup() { - exec 2>/dev/null - printf "$orig_message_cost" > /proc/sys/net/core/message_cost - ip0 link del dev wg0 -+ ip0 link del dev wg1 - ip1 link del dev wg0 -+ ip1 link del dev wg1 - ip2 link del dev wg0 -+ ip2 link del dev wg1 - local to_kill="$(ip netns pids $netns0) $(ip netns pids $netns1) $(ip netns pids $netns2)" - [[ -n $to_kill ]] && kill $to_kill - pp ip netns del $netns1 -@@ -77,18 +80,20 @@ ip0 link set wg0 netns $netns2 - key1="$(pp wg genkey)" - key2="$(pp wg genkey)" - key3="$(pp wg genkey)" -+key4="$(pp wg genkey)" - pub1="$(pp wg pubkey <<<"$key1")" - pub2="$(pp wg pubkey <<<"$key2")" - pub3="$(pp wg pubkey <<<"$key3")" -+pub4="$(pp wg pubkey <<<"$key4")" - psk="$(pp wg genpsk)" - [[ -n $key1 && -n $key2 && -n $psk ]] - - configure_peers() { - ip1 addr add 192.168.241.1/24 dev wg0 -- ip1 addr add fd00::1/24 dev wg0 -+ ip1 addr add fd00::1/112 dev wg0 - - ip2 addr add 192.168.241.2/24 dev wg0 -- ip2 addr add fd00::2/24 dev wg0 -+ ip2 addr add fd00::2/112 dev wg0 - - n1 wg set wg0 \ - private-key <(echo "$key1") \ -@@ -230,9 +235,38 @@ n1 ping -W 1 -c 1 192.168.241.2 - n1 wg set wg0 private-key <(echo "$key3") - n2 wg set wg0 peer "$pub3" preshared-key <(echo "$psk") allowed-ips 192.168.241.1/32 peer "$pub1" remove - n1 ping -W 1 -c 1 192.168.241.2 -+n2 wg set wg0 peer "$pub3" remove - --ip1 link del wg0 -+# Test that we can route wg through wg -+ip1 addr flush dev wg0 -+ip2 addr flush dev wg0 -+ip1 addr add fd00::5:1/112 dev wg0 -+ip2 addr add fd00::5:2/112 dev wg0 -+n1 wg set wg0 private-key <(echo "$key1") peer "$pub2" preshared-key <(echo "$psk") allowed-ips fd00::5:2/128 endpoint 127.0.0.1:2 -+n2 wg set wg0 private-key <(echo "$key2") listen-port 2 peer "$pub1" preshared-key <(echo "$psk") allowed-ips fd00::5:1/128 endpoint 127.212.121.99:9998 -+ip1 link add wg1 type wireguard -+ip2 link add wg1 type wireguard -+ip1 addr add 192.168.241.1/24 dev wg1 -+ip1 addr add fd00::1/112 dev wg1 -+ip2 addr add 192.168.241.2/24 dev wg1 -+ip2 addr add fd00::2/112 dev wg1 -+ip1 link set mtu 1340 up dev wg1 -+ip2 link set mtu 1340 up dev wg1 -+n1 wg set wg1 listen-port 5 private-key <(echo "$key3") peer "$pub4" allowed-ips 192.168.241.2/32,fd00::2/128 endpoint [fd00::5:2]:5 -+n2 wg set wg1 listen-port 5 private-key <(echo "$key4") peer "$pub3" allowed-ips 192.168.241.1/32,fd00::1/128 endpoint [fd00::5:1]:5 -+tests -+# Try to set up a routing loop between the two namespaces -+ip1 link set netns $netns0 dev wg1 -+ip0 addr add 192.168.241.1/24 dev wg1 -+ip0 link set up dev wg1 -+n0 ping -W 1 -c 1 192.168.241.2 -+n1 wg set wg0 peer "$pub2" endpoint 192.168.241.2:7 - ip2 link del wg0 -+ip2 link del wg1 -+! n0 ping -W 1 -c 10 -f 192.168.241.2 || false # Should not crash kernel -+ -+ip0 link del wg1 -+ip1 link del wg0 - - # Test using NAT. We now change the topology to this: - # ┌────────────────────────────────────────┐ ┌────────────────────────────────────────────────┐ ┌────────────────────────────────────────┐ -@@ -282,6 +316,20 @@ pp sleep 3 - n2 ping -W 1 -c 1 192.168.241.1 - n1 wg set wg0 peer "$pub2" persistent-keepalive 0 - -+# Test that onion routing works, even when it loops -+n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 -+ip1 addr add 192.168.242.1/24 dev wg0 -+ip2 link add wg1 type wireguard -+ip2 addr add 192.168.242.2/24 dev wg1 -+n2 wg set wg1 private-key <(echo "$key3") listen-port 5 peer "$pub1" allowed-ips 192.168.242.1/32 -+ip2 link set wg1 up -+n1 ping -W 1 -c 1 192.168.242.2 -+ip2 link del wg1 -+n1 wg set wg0 peer "$pub3" endpoint 192.168.242.2:5 -+! n1 ping -W 1 -c 1 192.168.242.2 || false # Should not crash kernel -+n1 wg set wg0 peer "$pub3" remove -+ip1 addr del 192.168.242.1/24 dev wg0 -+ - # Do a wg-quick(8)-style policy routing for the default route, making sure vethc has a v6 address to tease out bugs. - ip1 -6 addr add fc00::9/96 dev vethc - ip1 -6 route add default via fc00::1 diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0099-wireguard-send-receive-cond_resched-when-processing-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0099-wireguard-send-receive-cond_resched-when-processing-.patch deleted file mode 100644 index 499b36bc5..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0099-wireguard-send-receive-cond_resched-when-processing-.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 6 May 2020 15:33:04 -0600 -Subject: [PATCH] wireguard: send/receive: cond_resched() when processing - worker ringbuffers - -commit 4005f5c3c9d006157ba716594e0d70c88a235c5e upstream. - -Users with pathological hardware reported CPU stalls on CONFIG_ -PREEMPT_VOLUNTARY=y, because the ringbuffers would stay full, meaning -these workers would never terminate. That turned out not to be okay on -systems without forced preemption, which Sultan observed. This commit -adds a cond_resched() to the bottom of each loop iteration, so that -these workers don't hog the core. Note that we don't need this on the -napi poll worker, since that terminates after its budget is expended. - -Suggested-by: Sultan Alsawaf -Reported-by: Wang Jian -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 2 ++ - drivers/net/wireguard/send.c | 4 ++++ - 2 files changed, 6 insertions(+) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -516,6 +516,8 @@ void wg_packet_decrypt_worker(struct wor - &PACKET_CB(skb)->keypair->receiving)) ? - PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; - wg_queue_enqueue_per_peer_napi(skb, state); -+ if (need_resched()) -+ cond_resched(); - } - } - ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -281,6 +281,8 @@ void wg_packet_tx_worker(struct work_str - - wg_noise_keypair_put(keypair, false); - wg_peer_put(peer); -+ if (need_resched()) -+ cond_resched(); - } - } - -@@ -304,6 +306,8 @@ void wg_packet_encrypt_worker(struct wor - } - wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, - state); -+ if (need_resched()) -+ cond_resched(); - } - } - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0100-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0100-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch deleted file mode 100644 index c1124be5c..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0100-wireguard-selftests-initalize-ipv6-members-to-NULL-t.patch +++ /dev/null @@ -1,51 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 6 May 2020 15:33:05 -0600 -Subject: [PATCH] wireguard: selftests: initalize ipv6 members to NULL to - squelch clang warning - -commit 4fed818ef54b08d4b29200e416cce65546ad5312 upstream. - -Without setting these to NULL, clang complains in certain -configurations that have CONFIG_IPV6=n: - -In file included from drivers/net/wireguard/ratelimiter.c:223: -drivers/net/wireguard/selftest/ratelimiter.c:173:34: error: variable 'skb6' is uninitialized when used here [-Werror,-Wuninitialized] - ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); - ^~~~ -drivers/net/wireguard/selftest/ratelimiter.c:123:29: note: initialize the variable 'skb6' to silence this warning - struct sk_buff *skb4, *skb6; - ^ - = NULL -drivers/net/wireguard/selftest/ratelimiter.c:173:40: error: variable 'hdr6' is uninitialized when used here [-Werror,-Wuninitialized] - ret = timings_test(skb4, hdr4, skb6, hdr6, &test_count); - ^~~~ -drivers/net/wireguard/selftest/ratelimiter.c:125:22: note: initialize the variable 'hdr6' to silence this warning - struct ipv6hdr *hdr6; - ^ - -We silence this warning by setting the variables to NULL as the warning -suggests. - -Reported-by: Arnd Bergmann -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/selftest/ratelimiter.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/selftest/ratelimiter.c -+++ b/drivers/net/wireguard/selftest/ratelimiter.c -@@ -120,9 +120,9 @@ bool __init wg_ratelimiter_selftest(void - enum { TRIALS_BEFORE_GIVING_UP = 5000 }; - bool success = false; - int test = 0, trials; -- struct sk_buff *skb4, *skb6; -+ struct sk_buff *skb4, *skb6 = NULL; - struct iphdr *hdr4; -- struct ipv6hdr *hdr6; -+ struct ipv6hdr *hdr6 = NULL; - - if (IS_ENABLED(CONFIG_KASAN) || IS_ENABLED(CONFIG_UBSAN)) - return true; diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0101-wireguard-send-receive-use-explicit-unlikely-branch-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0101-wireguard-send-receive-use-explicit-unlikely-branch-.patch deleted file mode 100644 index 900e2f235..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0101-wireguard-send-receive-use-explicit-unlikely-branch-.patch +++ /dev/null @@ -1,88 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 6 May 2020 15:33:06 -0600 -Subject: [PATCH] wireguard: send/receive: use explicit unlikely branch instead - of implicit coalescing - -commit 243f2148937adc72bcaaa590d482d599c936efde upstream. - -It's very unlikely that send will become true. It's nearly always false -between 0 and 120 seconds of a session, and in most cases becomes true -only between 120 and 121 seconds before becoming false again. So, -unlikely(send) is clearly the right option here. - -What happened before was that we had this complex boolean expression -with multiple likely and unlikely clauses nested. Since this is -evaluated left-to-right anyway, the whole thing got converted to -unlikely. So, we can clean this up to better represent what's going on. - -The generated code is the same. - -Suggested-by: Sultan Alsawaf -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 13 ++++++------- - drivers/net/wireguard/send.c | 15 ++++++--------- - 2 files changed, 12 insertions(+), 16 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -226,21 +226,20 @@ void wg_packet_handshake_receive_worker( - static void keep_key_fresh(struct wg_peer *peer) - { - struct noise_keypair *keypair; -- bool send = false; -+ bool send; - - if (peer->sent_lastminute_handshake) - return; - - rcu_read_lock_bh(); - keypair = rcu_dereference_bh(peer->keypairs.current_keypair); -- if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && -- keypair->i_am_the_initiator && -- unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, -- REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT))) -- send = true; -+ send = keypair && READ_ONCE(keypair->sending.is_valid) && -+ keypair->i_am_the_initiator && -+ wg_birthdate_has_expired(keypair->sending.birthdate, -+ REJECT_AFTER_TIME - KEEPALIVE_TIMEOUT - REKEY_TIMEOUT); - rcu_read_unlock_bh(); - -- if (send) { -+ if (unlikely(send)) { - peer->sent_lastminute_handshake = true; - wg_packet_send_queued_handshake_initiation(peer, false); - } ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -124,20 +124,17 @@ void wg_packet_send_handshake_cookie(str - static void keep_key_fresh(struct wg_peer *peer) - { - struct noise_keypair *keypair; -- bool send = false; -+ bool send; - - rcu_read_lock_bh(); - keypair = rcu_dereference_bh(peer->keypairs.current_keypair); -- if (likely(keypair && READ_ONCE(keypair->sending.is_valid)) && -- (unlikely(atomic64_read(&keypair->sending.counter.counter) > -- REKEY_AFTER_MESSAGES) || -- (keypair->i_am_the_initiator && -- unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, -- REKEY_AFTER_TIME))))) -- send = true; -+ send = keypair && READ_ONCE(keypair->sending.is_valid) && -+ (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || -+ (keypair->i_am_the_initiator && -+ wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); - rcu_read_unlock_bh(); - -- if (send) -+ if (unlikely(send)) - wg_packet_send_queued_handshake_initiation(peer, false); - } - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0102-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0102-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch deleted file mode 100644 index d4efe37a4..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0102-wireguard-selftests-use-newer-iproute2-for-gcc-10.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 19 May 2020 22:49:27 -0600 -Subject: [PATCH] wireguard: selftests: use newer iproute2 for gcc-10 - -commit ee3c1aa3f34b7842c1557cfe5d8c3f7b8c692de8 upstream. - -gcc-10 switched to defaulting to -fno-common, which broke iproute2-5.4. -This was fixed in iproute-5.6, so switch to that. Because we're after a -stable testing surface, we generally don't like to bump these -unnecessarily, but in this case, being able to actually build is a basic -necessity. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/qemu/Makefile | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/tools/testing/selftests/wireguard/qemu/Makefile -+++ b/tools/testing/selftests/wireguard/qemu/Makefile -@@ -44,7 +44,7 @@ endef - $(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3)) - $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) - $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) --$(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) -+$(eval $(call tar_download,IPROUTE2,iproute2,5.6.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,1b5b0e25ce6e23da7526ea1da044e814ad85ba761b10dd29c2b027c056b04692)) - $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) - $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) - $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a)) diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0103-wireguard-noise-read-preshared-key-while-taking-lock.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0103-wireguard-noise-read-preshared-key-while-taking-lock.patch deleted file mode 100644 index 2dac4b706..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0103-wireguard-noise-read-preshared-key-while-taking-lock.patch +++ /dev/null @@ -1,61 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 19 May 2020 22:49:28 -0600 -Subject: [PATCH] wireguard: noise: read preshared key while taking lock - -commit bc67d371256f5c47d824e2eec51e46c8d62d022e upstream. - -Prior we read the preshared key after dropping the handshake lock, which -isn't an actual crypto issue if it races, but it's still not quite -correct. So copy that part of the state into a temporary like we do with -the rest of the handshake state variables. Then we can release the lock, -operate on the temporary, and zero it out at the end of the function. In -performance tests, the impact of this was entirely unnoticable, probably -because those bytes are coming from the same cacheline as other things -that are being copied out in the same manner. - -Reported-by: Matt Dunwoodie -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/noise.c | 6 +++++- - 1 file changed, 5 insertions(+), 1 deletion(-) - ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -715,6 +715,7 @@ wg_noise_handshake_consume_response(stru - u8 e[NOISE_PUBLIC_KEY_LEN]; - u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; - u8 static_private[NOISE_PUBLIC_KEY_LEN]; -+ u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; - - down_read(&wg->static_identity.lock); - -@@ -733,6 +734,8 @@ wg_noise_handshake_consume_response(stru - memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); - memcpy(ephemeral_private, handshake->ephemeral_private, - NOISE_PUBLIC_KEY_LEN); -+ memcpy(preshared_key, handshake->preshared_key, -+ NOISE_SYMMETRIC_KEY_LEN); - up_read(&handshake->lock); - - if (state != HANDSHAKE_CREATED_INITIATION) -@@ -750,7 +753,7 @@ wg_noise_handshake_consume_response(stru - goto fail; - - /* psk */ -- mix_psk(chaining_key, hash, key, handshake->preshared_key); -+ mix_psk(chaining_key, hash, key, preshared_key); - - /* {} */ - if (!message_decrypt(NULL, src->encrypted_nothing, -@@ -783,6 +786,7 @@ out: - memzero_explicit(chaining_key, NOISE_HASH_LEN); - memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); - memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); -+ memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN); - up_read(&wg->static_identity.lock); - return ret_peer; - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0104-wireguard-queueing-preserve-flow-hash-across-packet-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0104-wireguard-queueing-preserve-flow-hash-across-packet-.patch deleted file mode 100644 index 31deadbfc..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0104-wireguard-queueing-preserve-flow-hash-across-packet-.patch +++ /dev/null @@ -1,116 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 19 May 2020 22:49:29 -0600 -Subject: [PATCH] wireguard: queueing: preserve flow hash across packet - scrubbing -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit c78a0b4a78839d572d8a80f6a62221c0d7843135 upstream. - -It's important that we clear most header fields during encapsulation and -decapsulation, because the packet is substantially changed, and we don't -want any info leak or logic bug due to an accidental correlation. But, -for encapsulation, it's wrong to clear skb->hash, since it's used by -fq_codel and flow dissection in general. Without it, classification does -not proceed as usual. This change might make it easier to estimate the -number of innerflows by examining clustering of out of order packets, -but this shouldn't open up anything that can't already be inferred -otherwise (e.g. syn packet size inference), and fq_codel can be disabled -anyway. - -Furthermore, it might be the case that the hash isn't used or queried at -all until after wireguard transmits the encrypted UDP packet, which -means skb->hash might still be zero at this point, and thus no hash -taken over the inner packet data. In order to address this situation, we -force a calculation of skb->hash before encrypting packet data. - -Of course this means that fq_codel might transmit packets slightly more -out of order than usual. Toke did some testing on beefy machines with -high quantities of parallel flows and found that increasing the -reply-attack counter to 8192 takes care of the most pathological cases -pretty well. - -Reported-by: Dave Taht -Reviewed-and-tested-by: Toke Høiland-Jørgensen -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/messages.h | 2 +- - drivers/net/wireguard/queueing.h | 10 +++++++++- - drivers/net/wireguard/receive.c | 2 +- - drivers/net/wireguard/send.c | 7 ++++++- - 4 files changed, 17 insertions(+), 4 deletions(-) - ---- a/drivers/net/wireguard/messages.h -+++ b/drivers/net/wireguard/messages.h -@@ -32,7 +32,7 @@ enum cookie_values { - }; - - enum counter_values { -- COUNTER_BITS_TOTAL = 2048, -+ COUNTER_BITS_TOTAL = 8192, - COUNTER_REDUNDANT_BITS = BITS_PER_LONG, - COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS - }; ---- a/drivers/net/wireguard/queueing.h -+++ b/drivers/net/wireguard/queueing.h -@@ -87,12 +87,20 @@ static inline bool wg_check_packet_proto - return real_protocol && skb->protocol == real_protocol; - } - --static inline void wg_reset_packet(struct sk_buff *skb) -+static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) - { -+ u8 l4_hash = skb->l4_hash; -+ u8 sw_hash = skb->sw_hash; -+ u32 hash = skb->hash; - skb_scrub_packet(skb, true); - memset(&skb->headers_start, 0, - offsetof(struct sk_buff, headers_end) - - offsetof(struct sk_buff, headers_start)); -+ if (encapsulating) { -+ skb->l4_hash = l4_hash; -+ skb->sw_hash = sw_hash; -+ skb->hash = hash; -+ } - skb->queue_mapping = 0; - skb->nohdr = 0; - skb->peeked = 0; ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -484,7 +484,7 @@ int wg_packet_rx_poll(struct napi_struct - if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) - goto next; - -- wg_reset_packet(skb); -+ wg_reset_packet(skb, false); - wg_packet_consume_data_done(peer, skb, &endpoint); - free = false; - ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -167,6 +167,11 @@ static bool encrypt_packet(struct sk_buf - struct sk_buff *trailer; - int num_frags; - -+ /* Force hash calculation before encryption so that flow analysis is -+ * consistent over the inner packet. -+ */ -+ skb_get_hash(skb); -+ - /* Calculate lengths. */ - padding_len = calculate_skb_padding(skb); - trailer_len = padding_len + noise_encrypted_len(0); -@@ -295,7 +300,7 @@ void wg_packet_encrypt_worker(struct wor - skb_list_walk_safe(first, skb, next) { - if (likely(encrypt_packet(skb, - PACKET_CB(first)->keypair))) { -- wg_reset_packet(skb); -+ wg_reset_packet(skb, true); - } else { - state = PACKET_STATE_DEAD; - break; diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0105-wireguard-noise-separate-receive-counter-from-send-c.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0105-wireguard-noise-separate-receive-counter-from-send-c.patch deleted file mode 100644 index 87d38d36f..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0105-wireguard-noise-separate-receive-counter-from-send-c.patch +++ /dev/null @@ -1,330 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 19 May 2020 22:49:30 -0600 -Subject: [PATCH] wireguard: noise: separate receive counter from send counter - -commit a9e90d9931f3a474f04bab782ccd9d77904941e9 upstream. - -In "wireguard: queueing: preserve flow hash across packet scrubbing", we -were required to slightly increase the size of the receive replay -counter to something still fairly small, but an increase nonetheless. -It turns out that we can recoup some of the additional memory overhead -by splitting up the prior union type into two distinct types. Before, we -used the same "noise_counter" union for both sending and receiving, with -sending just using a simple atomic64_t, while receiving used the full -replay counter checker. This meant that most of the memory being -allocated for the sending counter was being wasted. Since the old -"noise_counter" type increased in size in the prior commit, now is a -good time to split up that union type into a distinct "noise_replay_ -counter" for receiving and a boring atomic64_t for sending, each using -neither more nor less memory than required. - -Also, since sometimes the replay counter is accessed without -necessitating additional accesses to the bitmap, we can reduce cache -misses by hoisting the always-necessary lock above the bitmap in the -struct layout. We also change a "noise_replay_counter" stack allocation -to kmalloc in a -DDEBUG selftest so that KASAN doesn't trigger a stack -frame warning. - -All and all, removing a bit of abstraction in this commit makes the code -simpler and smaller, in addition to the motivating memory usage -recuperation. For example, passing around raw "noise_symmetric_key" -structs is something that really only makes sense within noise.c, in the -one place where the sending and receiving keys can safely be thought of -as the same type of object; subsequent to that, it's important that we -uniformly access these through keypair->{sending,receiving}, where their -distinct roles are always made explicit. So this patch allows us to draw -that distinction clearly as well. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/noise.c | 16 +++------ - drivers/net/wireguard/noise.h | 14 ++++---- - drivers/net/wireguard/receive.c | 42 ++++++++++++------------ - drivers/net/wireguard/selftest/counter.c | 17 +++++++--- - drivers/net/wireguard/send.c | 12 +++---- - 5 files changed, 48 insertions(+), 53 deletions(-) - ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -104,6 +104,7 @@ static struct noise_keypair *keypair_cre - - if (unlikely(!keypair)) - return NULL; -+ spin_lock_init(&keypair->receiving_counter.lock); - keypair->internal_id = atomic64_inc_return(&keypair_counter); - keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; - keypair->entry.peer = peer; -@@ -358,25 +359,16 @@ out: - memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); - } - --static void symmetric_key_init(struct noise_symmetric_key *key) --{ -- spin_lock_init(&key->counter.receive.lock); -- atomic64_set(&key->counter.counter, 0); -- memset(key->counter.receive.backtrack, 0, -- sizeof(key->counter.receive.backtrack)); -- key->birthdate = ktime_get_coarse_boottime_ns(); -- key->is_valid = true; --} -- - static void derive_keys(struct noise_symmetric_key *first_dst, - struct noise_symmetric_key *second_dst, - const u8 chaining_key[NOISE_HASH_LEN]) - { -+ u64 birthdate = ktime_get_coarse_boottime_ns(); - kdf(first_dst->key, second_dst->key, NULL, NULL, - NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, - chaining_key); -- symmetric_key_init(first_dst); -- symmetric_key_init(second_dst); -+ first_dst->birthdate = second_dst->birthdate = birthdate; -+ first_dst->is_valid = second_dst->is_valid = true; - } - - static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], ---- a/drivers/net/wireguard/noise.h -+++ b/drivers/net/wireguard/noise.h -@@ -15,18 +15,14 @@ - #include - #include - --union noise_counter { -- struct { -- u64 counter; -- unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; -- spinlock_t lock; -- } receive; -- atomic64_t counter; -+struct noise_replay_counter { -+ u64 counter; -+ spinlock_t lock; -+ unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; - }; - - struct noise_symmetric_key { - u8 key[NOISE_SYMMETRIC_KEY_LEN]; -- union noise_counter counter; - u64 birthdate; - bool is_valid; - }; -@@ -34,7 +30,9 @@ struct noise_symmetric_key { - struct noise_keypair { - struct index_hashtable_entry entry; - struct noise_symmetric_key sending; -+ atomic64_t sending_counter; - struct noise_symmetric_key receiving; -+ struct noise_replay_counter receiving_counter; - __le32 remote_index; - bool i_am_the_initiator; - struct kref refcount; ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -245,20 +245,20 @@ static void keep_key_fresh(struct wg_pee - } - } - --static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) -+static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) - { - struct scatterlist sg[MAX_SKB_FRAGS + 8]; - struct sk_buff *trailer; - unsigned int offset; - int num_frags; - -- if (unlikely(!key)) -+ if (unlikely(!keypair)) - return false; - -- if (unlikely(!READ_ONCE(key->is_valid) || -- wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) || -- key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) { -- WRITE_ONCE(key->is_valid, false); -+ if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || -+ wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || -+ keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) { -+ WRITE_ONCE(keypair->receiving.is_valid, false); - return false; - } - -@@ -283,7 +283,7 @@ static bool decrypt_packet(struct sk_buf - - if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, - PACKET_CB(skb)->nonce, -- key->key)) -+ keypair->receiving.key)) - return false; - - /* Another ugly situation of pushing and pulling the header so as to -@@ -298,41 +298,41 @@ static bool decrypt_packet(struct sk_buf - } - - /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ --static bool counter_validate(union noise_counter *counter, u64 their_counter) -+static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter) - { - unsigned long index, index_current, top, i; - bool ret = false; - -- spin_lock_bh(&counter->receive.lock); -+ spin_lock_bh(&counter->lock); - -- if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 || -+ if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 || - their_counter >= REJECT_AFTER_MESSAGES)) - goto out; - - ++their_counter; - - if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < -- counter->receive.counter)) -+ counter->counter)) - goto out; - - index = their_counter >> ilog2(BITS_PER_LONG); - -- if (likely(their_counter > counter->receive.counter)) { -- index_current = counter->receive.counter >> ilog2(BITS_PER_LONG); -+ if (likely(their_counter > counter->counter)) { -+ index_current = counter->counter >> ilog2(BITS_PER_LONG); - top = min_t(unsigned long, index - index_current, - COUNTER_BITS_TOTAL / BITS_PER_LONG); - for (i = 1; i <= top; ++i) -- counter->receive.backtrack[(i + index_current) & -+ counter->backtrack[(i + index_current) & - ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; -- counter->receive.counter = their_counter; -+ counter->counter = their_counter; - } - - index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; - ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), -- &counter->receive.backtrack[index]); -+ &counter->backtrack[index]); - - out: -- spin_unlock_bh(&counter->receive.lock); -+ spin_unlock_bh(&counter->lock); - return ret; - } - -@@ -472,12 +472,12 @@ int wg_packet_rx_poll(struct napi_struct - if (unlikely(state != PACKET_STATE_CRYPTED)) - goto next; - -- if (unlikely(!counter_validate(&keypair->receiving.counter, -+ if (unlikely(!counter_validate(&keypair->receiving_counter, - PACKET_CB(skb)->nonce))) { - net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", - peer->device->dev->name, - PACKET_CB(skb)->nonce, -- keypair->receiving.counter.receive.counter); -+ keypair->receiving_counter.counter); - goto next; - } - -@@ -511,8 +511,8 @@ void wg_packet_decrypt_worker(struct wor - struct sk_buff *skb; - - while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { -- enum packet_state state = likely(decrypt_packet(skb, -- &PACKET_CB(skb)->keypair->receiving)) ? -+ enum packet_state state = -+ likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? - PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; - wg_queue_enqueue_per_peer_napi(skb, state); - if (need_resched()) ---- a/drivers/net/wireguard/selftest/counter.c -+++ b/drivers/net/wireguard/selftest/counter.c -@@ -6,18 +6,24 @@ - #ifdef DEBUG - bool __init wg_packet_counter_selftest(void) - { -+ struct noise_replay_counter *counter; - unsigned int test_num = 0, i; -- union noise_counter counter; - bool success = true; - --#define T_INIT do { \ -- memset(&counter, 0, sizeof(union noise_counter)); \ -- spin_lock_init(&counter.receive.lock); \ -+ counter = kmalloc(sizeof(*counter), GFP_KERNEL); -+ if (unlikely(!counter)) { -+ pr_err("nonce counter self-test malloc: FAIL\n"); -+ return false; -+ } -+ -+#define T_INIT do { \ -+ memset(counter, 0, sizeof(*counter)); \ -+ spin_lock_init(&counter->lock); \ - } while (0) - #define T_LIM (COUNTER_WINDOW_SIZE + 1) - #define T(n, v) do { \ - ++test_num; \ -- if (counter_validate(&counter, n) != (v)) { \ -+ if (counter_validate(counter, n) != (v)) { \ - pr_err("nonce counter self-test %u: FAIL\n", \ - test_num); \ - success = false; \ -@@ -99,6 +105,7 @@ bool __init wg_packet_counter_selftest(v - - if (success) - pr_info("nonce counter self-tests: pass\n"); -+ kfree(counter); - return success; - } - #endif ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -129,7 +129,7 @@ static void keep_key_fresh(struct wg_pee - rcu_read_lock_bh(); - keypair = rcu_dereference_bh(peer->keypairs.current_keypair); - send = keypair && READ_ONCE(keypair->sending.is_valid) && -- (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || -+ (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES || - (keypair->i_am_the_initiator && - wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); - rcu_read_unlock_bh(); -@@ -349,7 +349,6 @@ void wg_packet_purge_staged_packets(stru - - void wg_packet_send_staged_packets(struct wg_peer *peer) - { -- struct noise_symmetric_key *key; - struct noise_keypair *keypair; - struct sk_buff_head packets; - struct sk_buff *skb; -@@ -369,10 +368,9 @@ void wg_packet_send_staged_packets(struc - rcu_read_unlock_bh(); - if (unlikely(!keypair)) - goto out_nokey; -- key = &keypair->sending; -- if (unlikely(!READ_ONCE(key->is_valid))) -+ if (unlikely(!READ_ONCE(keypair->sending.is_valid))) - goto out_nokey; -- if (unlikely(wg_birthdate_has_expired(key->birthdate, -+ if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, - REJECT_AFTER_TIME))) - goto out_invalid; - -@@ -387,7 +385,7 @@ void wg_packet_send_staged_packets(struc - */ - PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); - PACKET_CB(skb)->nonce = -- atomic64_inc_return(&key->counter.counter) - 1; -+ atomic64_inc_return(&keypair->sending_counter) - 1; - if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) - goto out_invalid; - } -@@ -399,7 +397,7 @@ void wg_packet_send_staged_packets(struc - return; - - out_invalid: -- WRITE_ONCE(key->is_valid, false); -+ WRITE_ONCE(keypair->sending.is_valid, false); - out_nokey: - wg_noise_keypair_put(keypair, false); - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0106-wireguard-noise-do-not-assign-initiation-time-in-if-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0106-wireguard-noise-do-not-assign-initiation-time-in-if-.patch deleted file mode 100644 index a53c76470..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0106-wireguard-noise-do-not-assign-initiation-time-in-if-.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Frank Werner-Krippendorf -Date: Tue, 23 Jun 2020 03:59:44 -0600 -Subject: [PATCH] wireguard: noise: do not assign initiation time in if - condition - -commit 558b353c9c2a717509f291c066c6bd8f5f5e21be upstream. - -Fixes an error condition reported by checkpatch.pl which caused by -assigning a variable in an if condition in wg_noise_handshake_consume_ -initiation(). - -Signed-off-by: Frank Werner-Krippendorf -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/noise.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -617,8 +617,8 @@ wg_noise_handshake_consume_initiation(st - memcpy(handshake->hash, hash, NOISE_HASH_LEN); - memcpy(handshake->chaining_key, chaining_key, NOISE_HASH_LEN); - handshake->remote_index = src->sender_index; -- if ((s64)(handshake->last_initiation_consumption - -- (initiation_consumption = ktime_get_coarse_boottime_ns())) < 0) -+ initiation_consumption = ktime_get_coarse_boottime_ns(); -+ if ((s64)(handshake->last_initiation_consumption - initiation_consumption) < 0) - handshake->last_initiation_consumption = initiation_consumption; - handshake->state = HANDSHAKE_CONSUMED_INITIATION; - up_write(&handshake->lock); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0107-wireguard-device-avoid-circular-netns-references.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0107-wireguard-device-avoid-circular-netns-references.patch deleted file mode 100644 index 013023a3e..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0107-wireguard-device-avoid-circular-netns-references.patch +++ /dev/null @@ -1,296 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Tue, 23 Jun 2020 03:59:45 -0600 -Subject: [PATCH] wireguard: device: avoid circular netns references - -commit 900575aa33a3eaaef802b31de187a85c4a4b4bd0 upstream. - -Before, we took a reference to the creating netns if the new netns was -different. This caused issues with circular references, with two -wireguard interfaces swapping namespaces. The solution is to rather not -take any extra references at all, but instead simply invalidate the -creating netns pointer when that netns is deleted. - -In order to prevent this from happening again, this commit improves the -rough object leak tracking by allowing it to account for created and -destroyed interfaces, aside from just peers and keys. That then makes it -possible to check for the object leak when having two interfaces take a -reference to each others' namespaces. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 58 ++++++++++------------ - drivers/net/wireguard/device.h | 3 +- - drivers/net/wireguard/netlink.c | 14 ++++-- - drivers/net/wireguard/socket.c | 25 +++++++--- - tools/testing/selftests/wireguard/netns.sh | 13 ++++- - 5 files changed, 67 insertions(+), 46 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -45,17 +45,18 @@ static int wg_open(struct net_device *de - if (dev_v6) - dev_v6->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_NONE; - -+ mutex_lock(&wg->device_update_lock); - ret = wg_socket_init(wg, wg->incoming_port); - if (ret < 0) -- return ret; -- mutex_lock(&wg->device_update_lock); -+ goto out; - list_for_each_entry(peer, &wg->peer_list, peer_list) { - wg_packet_send_staged_packets(peer); - if (peer->persistent_keepalive_interval) - wg_packet_send_keepalive(peer); - } -+out: - mutex_unlock(&wg->device_update_lock); -- return 0; -+ return ret; - } - - #ifdef CONFIG_PM_SLEEP -@@ -225,6 +226,7 @@ static void wg_destruct(struct net_devic - list_del(&wg->device_list); - rtnl_unlock(); - mutex_lock(&wg->device_update_lock); -+ rcu_assign_pointer(wg->creating_net, NULL); - wg->incoming_port = 0; - wg_socket_reinit(wg, NULL, NULL); - /* The final references are cleared in the below calls to destroy_workqueue. */ -@@ -240,13 +242,11 @@ static void wg_destruct(struct net_devic - skb_queue_purge(&wg->incoming_handshakes); - free_percpu(dev->tstats); - free_percpu(wg->incoming_handshakes_worker); -- if (wg->have_creating_net_ref) -- put_net(wg->creating_net); - kvfree(wg->index_hashtable); - kvfree(wg->peer_hashtable); - mutex_unlock(&wg->device_update_lock); - -- pr_debug("%s: Interface deleted\n", dev->name); -+ pr_debug("%s: Interface destroyed\n", dev->name); - free_netdev(dev); - } - -@@ -292,7 +292,7 @@ static int wg_newlink(struct net *src_ne - struct wg_device *wg = netdev_priv(dev); - int ret = -ENOMEM; - -- wg->creating_net = src_net; -+ rcu_assign_pointer(wg->creating_net, src_net); - init_rwsem(&wg->static_identity.lock); - mutex_init(&wg->socket_update_lock); - mutex_init(&wg->device_update_lock); -@@ -393,30 +393,26 @@ static struct rtnl_link_ops link_ops __r - .newlink = wg_newlink, - }; - --static int wg_netdevice_notification(struct notifier_block *nb, -- unsigned long action, void *data) -+static void wg_netns_pre_exit(struct net *net) - { -- struct net_device *dev = ((struct netdev_notifier_info *)data)->dev; -- struct wg_device *wg = netdev_priv(dev); -- -- ASSERT_RTNL(); -- -- if (action != NETDEV_REGISTER || dev->netdev_ops != &netdev_ops) -- return 0; -+ struct wg_device *wg; - -- if (dev_net(dev) == wg->creating_net && wg->have_creating_net_ref) { -- put_net(wg->creating_net); -- wg->have_creating_net_ref = false; -- } else if (dev_net(dev) != wg->creating_net && -- !wg->have_creating_net_ref) { -- wg->have_creating_net_ref = true; -- get_net(wg->creating_net); -+ rtnl_lock(); -+ list_for_each_entry(wg, &device_list, device_list) { -+ if (rcu_access_pointer(wg->creating_net) == net) { -+ pr_debug("%s: Creating namespace exiting\n", wg->dev->name); -+ netif_carrier_off(wg->dev); -+ mutex_lock(&wg->device_update_lock); -+ rcu_assign_pointer(wg->creating_net, NULL); -+ wg_socket_reinit(wg, NULL, NULL); -+ mutex_unlock(&wg->device_update_lock); -+ } - } -- return 0; -+ rtnl_unlock(); - } - --static struct notifier_block netdevice_notifier = { -- .notifier_call = wg_netdevice_notification -+static struct pernet_operations pernet_ops = { -+ .pre_exit = wg_netns_pre_exit - }; - - int __init wg_device_init(void) -@@ -429,18 +425,18 @@ int __init wg_device_init(void) - return ret; - #endif - -- ret = register_netdevice_notifier(&netdevice_notifier); -+ ret = register_pernet_device(&pernet_ops); - if (ret) - goto error_pm; - - ret = rtnl_link_register(&link_ops); - if (ret) -- goto error_netdevice; -+ goto error_pernet; - - return 0; - --error_netdevice: -- unregister_netdevice_notifier(&netdevice_notifier); -+error_pernet: -+ unregister_pernet_device(&pernet_ops); - error_pm: - #ifdef CONFIG_PM_SLEEP - unregister_pm_notifier(&pm_notifier); -@@ -451,7 +447,7 @@ error_pm: - void wg_device_uninit(void) - { - rtnl_link_unregister(&link_ops); -- unregister_netdevice_notifier(&netdevice_notifier); -+ unregister_pernet_device(&pernet_ops); - #ifdef CONFIG_PM_SLEEP - unregister_pm_notifier(&pm_notifier); - #endif ---- a/drivers/net/wireguard/device.h -+++ b/drivers/net/wireguard/device.h -@@ -40,7 +40,7 @@ struct wg_device { - struct net_device *dev; - struct crypt_queue encrypt_queue, decrypt_queue; - struct sock __rcu *sock4, *sock6; -- struct net *creating_net; -+ struct net __rcu *creating_net; - struct noise_static_identity static_identity; - struct workqueue_struct *handshake_receive_wq, *handshake_send_wq; - struct workqueue_struct *packet_crypt_wq; -@@ -56,7 +56,6 @@ struct wg_device { - unsigned int num_peers, device_update_gen; - u32 fwmark; - u16 incoming_port; -- bool have_creating_net_ref; - }; - - int wg_device_init(void); ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -517,11 +517,15 @@ static int wg_set_device(struct sk_buff - if (flags & ~__WGDEVICE_F_ALL) - goto out; - -- ret = -EPERM; -- if ((info->attrs[WGDEVICE_A_LISTEN_PORT] || -- info->attrs[WGDEVICE_A_FWMARK]) && -- !ns_capable(wg->creating_net->user_ns, CAP_NET_ADMIN)) -- goto out; -+ if (info->attrs[WGDEVICE_A_LISTEN_PORT] || info->attrs[WGDEVICE_A_FWMARK]) { -+ struct net *net; -+ rcu_read_lock(); -+ net = rcu_dereference(wg->creating_net); -+ ret = !net || !ns_capable(net->user_ns, CAP_NET_ADMIN) ? -EPERM : 0; -+ rcu_read_unlock(); -+ if (ret) -+ goto out; -+ } - - ++wg->device_update_gen; - ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -347,6 +347,7 @@ static void set_sock_opts(struct socket - - int wg_socket_init(struct wg_device *wg, u16 port) - { -+ struct net *net; - int ret; - struct udp_tunnel_sock_cfg cfg = { - .sk_user_data = wg, -@@ -371,37 +372,47 @@ int wg_socket_init(struct wg_device *wg, - }; - #endif - -+ rcu_read_lock(); -+ net = rcu_dereference(wg->creating_net); -+ net = net ? maybe_get_net(net) : NULL; -+ rcu_read_unlock(); -+ if (unlikely(!net)) -+ return -ENONET; -+ - #if IS_ENABLED(CONFIG_IPV6) - retry: - #endif - -- ret = udp_sock_create(wg->creating_net, &port4, &new4); -+ ret = udp_sock_create(net, &port4, &new4); - if (ret < 0) { - pr_err("%s: Could not create IPv4 socket\n", wg->dev->name); -- return ret; -+ goto out; - } - set_sock_opts(new4); -- setup_udp_tunnel_sock(wg->creating_net, new4, &cfg); -+ setup_udp_tunnel_sock(net, new4, &cfg); - - #if IS_ENABLED(CONFIG_IPV6) - if (ipv6_mod_enabled()) { - port6.local_udp_port = inet_sk(new4->sk)->inet_sport; -- ret = udp_sock_create(wg->creating_net, &port6, &new6); -+ ret = udp_sock_create(net, &port6, &new6); - if (ret < 0) { - udp_tunnel_sock_release(new4); - if (ret == -EADDRINUSE && !port && retries++ < 100) - goto retry; - pr_err("%s: Could not create IPv6 socket\n", - wg->dev->name); -- return ret; -+ goto out; - } - set_sock_opts(new6); -- setup_udp_tunnel_sock(wg->creating_net, new6, &cfg); -+ setup_udp_tunnel_sock(net, new6, &cfg); - } - #endif - - wg_socket_reinit(wg, new4->sk, new6 ? new6->sk : NULL); -- return 0; -+ ret = 0; -+out: -+ put_net(net); -+ return ret; - } - - void wg_socket_reinit(struct wg_device *wg, struct sock *new4, ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -587,9 +587,20 @@ ip0 link set wg0 up - kill $ncat_pid - ip0 link del wg0 - -+# Ensure there aren't circular reference loops -+ip1 link add wg1 type wireguard -+ip2 link add wg2 type wireguard -+ip1 link set wg1 netns $netns2 -+ip2 link set wg2 netns $netns1 -+pp ip netns delete $netns1 -+pp ip netns delete $netns2 -+pp ip netns add $netns1 -+pp ip netns add $netns2 -+ -+sleep 2 # Wait for cleanup and grace periods - declare -A objects - while read -t 0.1 -r line 2>/dev/null || [[ $? -ne 142 ]]; do -- [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ [0-9]+)\ .*(created|destroyed).* ]] || continue -+ [[ $line =~ .*(wg[0-9]+:\ [A-Z][a-z]+\ ?[0-9]*)\ .*(created|destroyed).* ]] || continue - objects["${BASH_REMATCH[1]}"]+="${BASH_REMATCH[2]}" - done < /dev/kmsg - alldeleted=1 diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0108-wireguard-receive-account-for-napi_gro_receive-never.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0108-wireguard-receive-account-for-napi_gro_receive-never.patch deleted file mode 100644 index eceb0b925..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0108-wireguard-receive-account-for-napi_gro_receive-never.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 24 Jun 2020 16:06:03 -0600 -Subject: [PATCH] wireguard: receive: account for napi_gro_receive never - returning GRO_DROP - -commit df08126e3833e9dca19e2407db5f5860a7c194fb upstream. - -The napi_gro_receive function no longer returns GRO_DROP ever, making -handling GRO_DROP dead code. This commit removes that dead code. -Further, it's not even clear that device drivers have any business in -taking action after passing off received packets; that's arguably out of -their hands. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Fixes: 6570bc79c0df ("net: core: use listified Rx for GRO_NORMAL in napi_gro_receive()") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/receive.c | 10 ++-------- - 1 file changed, 2 insertions(+), 8 deletions(-) - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -414,14 +414,8 @@ static void wg_packet_consume_data_done( - if (unlikely(routed_peer != peer)) - goto dishonest_packet_peer; - -- if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) { -- ++dev->stats.rx_dropped; -- net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n", -- dev->name, peer->internal_id, -- &peer->endpoint.addr); -- } else { -- update_rx_stats(peer, message_data_len(len_before_trim)); -- } -+ napi_gro_receive(&peer->napi, skb); -+ update_rx_stats(peer, message_data_len(len_before_trim)); - return; - - dishonest_packet_peer: diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0109-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0109-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch deleted file mode 100644 index cfd6b1457..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0109-net-ip_tunnel-add-header_ops-for-layer-3-devices.patch +++ /dev/null @@ -1,58 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 29 Jun 2020 19:06:18 -0600 -Subject: [PATCH] net: ip_tunnel: add header_ops for layer 3 devices - -commit 2606aff916854b61234bf85001be9777bab2d5f8 upstream. - -Some devices that take straight up layer 3 packets benefit from having a -shared header_ops so that AF_PACKET sockets can inject packets that are -recognized. This shared infrastructure will be used by other drivers -that currently can't inject packets using AF_PACKET. It also exposes the -parser function, as it is useful in standalone form too. - -Signed-off-by: Jason A. Donenfeld -Acked-by: Willem de Bruijn -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - include/net/ip_tunnels.h | 3 +++ - net/ipv4/ip_tunnel_core.c | 18 ++++++++++++++++++ - 2 files changed, 21 insertions(+) - ---- a/include/net/ip_tunnels.h -+++ b/include/net/ip_tunnels.h -@@ -289,6 +289,9 @@ int ip_tunnel_newlink(struct net_device - struct ip_tunnel_parm *p, __u32 fwmark); - void ip_tunnel_setup(struct net_device *dev, unsigned int net_id); - -+extern const struct header_ops ip_tunnel_header_ops; -+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb); -+ - struct ip_tunnel_encap_ops { - size_t (*encap_hlen)(struct ip_tunnel_encap *e); - int (*build_header)(struct sk_buff *skb, struct ip_tunnel_encap *e, ---- a/net/ipv4/ip_tunnel_core.c -+++ b/net/ipv4/ip_tunnel_core.c -@@ -446,3 +446,21 @@ void ip_tunnel_unneed_metadata(void) - static_branch_dec(&ip_tunnel_metadata_cnt); - } - EXPORT_SYMBOL_GPL(ip_tunnel_unneed_metadata); -+ -+/* Returns either the correct skb->protocol value, or 0 if invalid. */ -+__be16 ip_tunnel_parse_protocol(const struct sk_buff *skb) -+{ -+ if (skb_network_header(skb) >= skb->head && -+ (skb_network_header(skb) + sizeof(struct iphdr)) <= skb_tail_pointer(skb) && -+ ip_hdr(skb)->version == 4) -+ return htons(ETH_P_IP); -+ if (skb_network_header(skb) >= skb->head && -+ (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= skb_tail_pointer(skb) && -+ ipv6_hdr(skb)->version == 6) -+ return htons(ETH_P_IPV6); -+ return 0; -+} -+EXPORT_SYMBOL(ip_tunnel_parse_protocol); -+ -+const struct header_ops ip_tunnel_header_ops = { .parse_protocol = ip_tunnel_parse_protocol }; -+EXPORT_SYMBOL(ip_tunnel_header_ops); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0110-wireguard-implement-header_ops-parse_protocol-for-AF.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0110-wireguard-implement-header_ops-parse_protocol-for-AF.patch deleted file mode 100644 index 415ecffee..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0110-wireguard-implement-header_ops-parse_protocol-for-AF.patch +++ /dev/null @@ -1,36 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 29 Jun 2020 19:06:20 -0600 -Subject: [PATCH] wireguard: implement header_ops->parse_protocol for AF_PACKET - -commit 01a4967c71c004f8ecad4ab57021348636502fa9 upstream. - -WireGuard uses skb->protocol to determine packet type, and bails out if -it's not set or set to something it's not expecting. For AF_PACKET -injection, we need to support its call chain of: - - packet_sendmsg -> packet_snd -> packet_parse_headers -> - dev_parse_header_protocol -> parse_protocol - -Without a valid parse_protocol, this returns zero, and wireguard then -rejects the skb. So, this wires up the ip_tunnel handler for layer 3 -packets for that case. - -Reported-by: Hans Wippel -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 1 + - 1 file changed, 1 insertion(+) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -262,6 +262,7 @@ static void wg_setup(struct net_device * - max(sizeof(struct ipv6hdr), sizeof(struct iphdr)); - - dev->netdev_ops = &netdev_ops; -+ dev->header_ops = &ip_tunnel_header_ops; - dev->hard_header_len = 0; - dev->addr_len = 0; - dev->needed_headroom = DATA_PACKET_HEAD_ROOM; diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0111-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0111-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch deleted file mode 100644 index a777732ce..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0111-wireguard-queueing-make-use-of-ip_tunnel_parse_proto.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 29 Jun 2020 19:06:21 -0600 -Subject: [PATCH] wireguard: queueing: make use of ip_tunnel_parse_protocol - -commit 1a574074ae7d1d745c16f7710655f38a53174c27 upstream. - -Now that wg_examine_packet_protocol has been added for general -consumption as ip_tunnel_parse_protocol, it's possible to remove -wg_examine_packet_protocol and simply use the new -ip_tunnel_parse_protocol function directly. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/queueing.h | 19 ++----------------- - drivers/net/wireguard/receive.c | 2 +- - 2 files changed, 3 insertions(+), 18 deletions(-) - ---- a/drivers/net/wireguard/queueing.h -+++ b/drivers/net/wireguard/queueing.h -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - - struct wg_device; - struct wg_peer; -@@ -65,25 +66,9 @@ struct packet_cb { - #define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb)) - #define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer) - --/* Returns either the correct skb->protocol value, or 0 if invalid. */ --static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb) --{ -- if (skb_network_header(skb) >= skb->head && -- (skb_network_header(skb) + sizeof(struct iphdr)) <= -- skb_tail_pointer(skb) && -- ip_hdr(skb)->version == 4) -- return htons(ETH_P_IP); -- if (skb_network_header(skb) >= skb->head && -- (skb_network_header(skb) + sizeof(struct ipv6hdr)) <= -- skb_tail_pointer(skb) && -- ipv6_hdr(skb)->version == 6) -- return htons(ETH_P_IPV6); -- return 0; --} -- - static inline bool wg_check_packet_protocol(struct sk_buff *skb) - { -- __be16 real_protocol = wg_examine_packet_protocol(skb); -+ __be16 real_protocol = ip_tunnel_parse_protocol(skb); - return real_protocol && skb->protocol == real_protocol; - } - ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -387,7 +387,7 @@ static void wg_packet_consume_data_done( - */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - skb->csum_level = ~0; /* All levels */ -- skb->protocol = wg_examine_packet_protocol(skb); -+ skb->protocol = ip_tunnel_parse_protocol(skb); - if (skb->protocol == htons(ETH_P_IP)) { - len = ntohs(ip_hdr(skb)->tot_len); - if (unlikely(len < sizeof(struct iphdr))) diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0112-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0112-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch deleted file mode 100644 index 4b2712bb2..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0112-netlink-consistently-use-NLA_POLICY_EXACT_LEN.patch +++ /dev/null @@ -1,49 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Johannes Berg -Date: Tue, 18 Aug 2020 10:17:31 +0200 -Subject: [PATCH] netlink: consistently use NLA_POLICY_EXACT_LEN() - -commit 8140860c817f3e9f78bcd1e420b9777ddcbaa629 upstream. - -Change places that open-code NLA_POLICY_EXACT_LEN() to -use the macro instead, giving us flexibility in how we -handle the details of the macro. - -Signed-off-by: Johannes Berg -Acked-by: Matthieu Baerts -Signed-off-by: David S. Miller -[Jason: only picked the drivers/net/wireguard/* part] -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/netlink.c | 10 +++++----- - 1 file changed, 5 insertions(+), 5 deletions(-) - ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -22,8 +22,8 @@ static struct genl_family genl_family; - static const struct nla_policy device_policy[WGDEVICE_A_MAX + 1] = { - [WGDEVICE_A_IFINDEX] = { .type = NLA_U32 }, - [WGDEVICE_A_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 }, -- [WGDEVICE_A_PRIVATE_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -- [WGDEVICE_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -+ [WGDEVICE_A_PRIVATE_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), -+ [WGDEVICE_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), - [WGDEVICE_A_FLAGS] = { .type = NLA_U32 }, - [WGDEVICE_A_LISTEN_PORT] = { .type = NLA_U16 }, - [WGDEVICE_A_FWMARK] = { .type = NLA_U32 }, -@@ -31,12 +31,12 @@ static const struct nla_policy device_po - }; - - static const struct nla_policy peer_policy[WGPEER_A_MAX + 1] = { -- [WGPEER_A_PUBLIC_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_PUBLIC_KEY_LEN }, -- [WGPEER_A_PRESHARED_KEY] = { .type = NLA_EXACT_LEN, .len = NOISE_SYMMETRIC_KEY_LEN }, -+ [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), -+ [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN), - [WGPEER_A_FLAGS] = { .type = NLA_U32 }, - [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) }, - [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, -- [WGPEER_A_LAST_HANDSHAKE_TIME] = { .type = NLA_EXACT_LEN, .len = sizeof(struct __kernel_timespec) }, -+ [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)), - [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, - [WGPEER_A_TX_BYTES] = { .type = NLA_U64 }, - [WGPEER_A_ALLOWEDIPS] = { .type = NLA_NESTED }, diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0113-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0113-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch deleted file mode 100644 index 4b414bc30..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0113-netlink-consistently-use-NLA_POLICY_MIN_LEN.patch +++ /dev/null @@ -1,39 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Johannes Berg -Date: Tue, 18 Aug 2020 10:17:32 +0200 -Subject: [PATCH] netlink: consistently use NLA_POLICY_MIN_LEN() - -commit bc0435855041d7fff0b83dd992fc4be34aa11afb upstream. - -Change places that open-code NLA_POLICY_MIN_LEN() to -use the macro instead, giving us flexibility in how we -handle the details of the macro. - -Signed-off-by: Johannes Berg -Signed-off-by: David S. Miller -[Jason: only picked the drivers/net/wireguard/* part] -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/netlink.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/netlink.c -+++ b/drivers/net/wireguard/netlink.c -@@ -34,7 +34,7 @@ static const struct nla_policy peer_poli - [WGPEER_A_PUBLIC_KEY] = NLA_POLICY_EXACT_LEN(NOISE_PUBLIC_KEY_LEN), - [WGPEER_A_PRESHARED_KEY] = NLA_POLICY_EXACT_LEN(NOISE_SYMMETRIC_KEY_LEN), - [WGPEER_A_FLAGS] = { .type = NLA_U32 }, -- [WGPEER_A_ENDPOINT] = { .type = NLA_MIN_LEN, .len = sizeof(struct sockaddr) }, -+ [WGPEER_A_ENDPOINT] = NLA_POLICY_MIN_LEN(sizeof(struct sockaddr)), - [WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL] = { .type = NLA_U16 }, - [WGPEER_A_LAST_HANDSHAKE_TIME] = NLA_POLICY_EXACT_LEN(sizeof(struct __kernel_timespec)), - [WGPEER_A_RX_BYTES] = { .type = NLA_U64 }, -@@ -45,7 +45,7 @@ static const struct nla_policy peer_poli - - static const struct nla_policy allowedip_policy[WGALLOWEDIP_A_MAX + 1] = { - [WGALLOWEDIP_A_FAMILY] = { .type = NLA_U16 }, -- [WGALLOWEDIP_A_IPADDR] = { .type = NLA_MIN_LEN, .len = sizeof(struct in_addr) }, -+ [WGALLOWEDIP_A_IPADDR] = NLA_POLICY_MIN_LEN(sizeof(struct in_addr)), - [WGALLOWEDIP_A_CIDR_MASK] = { .type = NLA_U8 } - }; - diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0114-wireguard-noise-take-lock-when-removing-handshake-en.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0114-wireguard-noise-take-lock-when-removing-handshake-en.patch deleted file mode 100644 index e80528c91..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0114-wireguard-noise-take-lock-when-removing-handshake-en.patch +++ /dev/null @@ -1,127 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 9 Sep 2020 13:58:14 +0200 -Subject: [PATCH] wireguard: noise: take lock when removing handshake entry - from table - -commit 9179ba31367bcf481c3c79b5f028c94faad9f30a upstream. - -Eric reported that syzkaller found a race of this variety: - -CPU 1 CPU 2 --------------------------------------------|--------------------------------------- -wg_index_hashtable_replace(old, ...) | - if (hlist_unhashed(&old->index_hash)) | - | wg_index_hashtable_remove(old) - | hlist_del_init_rcu(&old->index_hash) - | old->index_hash.pprev = NULL - hlist_replace_rcu(&old->index_hash, ...) | - *old->index_hash.pprev | - -Syzbot wasn't actually able to reproduce this more than once or create a -reproducer, because the race window between checking "hlist_unhashed" and -calling "hlist_replace_rcu" is just so small. Adding an mdelay(5) or -similar there helps make this demonstrable using this simple script: - - #!/bin/bash - set -ex - trap 'kill $pid1; kill $pid2; ip link del wg0; ip link del wg1' EXIT - ip link add wg0 type wireguard - ip link add wg1 type wireguard - wg set wg0 private-key <(wg genkey) listen-port 9999 - wg set wg1 private-key <(wg genkey) peer $(wg show wg0 public-key) endpoint 127.0.0.1:9999 persistent-keepalive 1 - wg set wg0 peer $(wg show wg1 public-key) - ip link set wg0 up - yes link set wg1 up | ip -force -batch - & - pid1=$! - yes link set wg1 down | ip -force -batch - & - pid2=$! - wait - -The fundumental underlying problem is that we permit calls to wg_index_ -hashtable_remove(handshake.entry) without requiring the caller to take -the handshake mutex that is intended to protect members of handshake -during mutations. This is consistently the case with calls to wg_index_ -hashtable_insert(handshake.entry) and wg_index_hashtable_replace( -handshake.entry), but it's missing from a pertinent callsite of wg_ -index_hashtable_remove(handshake.entry). So, this patch makes sure that -mutex is taken. - -The original code was a little bit funky though, in the form of: - - remove(handshake.entry) - lock(), memzero(handshake.some_members), unlock() - remove(handshake.entry) - -The original intention of that double removal pattern outside the lock -appears to be some attempt to prevent insertions that might happen while -locks are dropped during expensive crypto operations, but actually, all -callers of wg_index_hashtable_insert(handshake.entry) take the write -lock and then explicitly check handshake.state, as they should, which -the aforementioned memzero clears, which means an insertion should -already be impossible. And regardless, the original intention was -necessarily racy, since it wasn't guaranteed that something else would -run after the unlock() instead of after the remove(). So, from a -soundness perspective, it seems positive to remove what looks like a -hack at best. - -The crash from both syzbot and from the script above is as follows: - - general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN - KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] - CPU: 0 PID: 7395 Comm: kworker/0:3 Not tainted 5.9.0-rc4-syzkaller #0 - Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 - Workqueue: wg-kex-wg1 wg_packet_handshake_receive_worker - RIP: 0010:hlist_replace_rcu include/linux/rculist.h:505 [inline] - RIP: 0010:wg_index_hashtable_replace+0x176/0x330 drivers/net/wireguard/peerlookup.c:174 - Code: 00 fc ff df 48 89 f9 48 c1 e9 03 80 3c 01 00 0f 85 44 01 00 00 48 b9 00 00 00 00 00 fc ff df 48 8b 45 10 48 89 c6 48 c1 ee 03 <80> 3c 0e 00 0f 85 06 01 00 00 48 85 d2 4c 89 28 74 47 e8 a3 4f b5 - RSP: 0018:ffffc90006a97bf8 EFLAGS: 00010246 - RAX: 0000000000000000 RBX: ffff888050ffc4f8 RCX: dffffc0000000000 - RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffff88808e04e010 - RBP: ffff88808e04e000 R08: 0000000000000001 R09: ffff8880543d0000 - R10: ffffed100a87a000 R11: 000000000000016e R12: ffff8880543d0000 - R13: ffff88808e04e008 R14: ffff888050ffc508 R15: ffff888050ffc500 - FS: 0000000000000000(0000) GS:ffff8880ae600000(0000) knlGS:0000000000000000 - CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 - CR2: 00000000f5505db0 CR3: 0000000097cf7000 CR4: 00000000001526f0 - DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 - DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 - Call Trace: - wg_noise_handshake_begin_session+0x752/0xc9a drivers/net/wireguard/noise.c:820 - wg_receive_handshake_packet drivers/net/wireguard/receive.c:183 [inline] - wg_packet_handshake_receive_worker+0x33b/0x730 drivers/net/wireguard/receive.c:220 - process_one_work+0x94c/0x1670 kernel/workqueue.c:2269 - worker_thread+0x64c/0x1120 kernel/workqueue.c:2415 - kthread+0x3b5/0x4a0 kernel/kthread.c:292 - ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:294 - -Reported-by: syzbot -Reported-by: Eric Dumazet -Link: https://lore.kernel.org/wireguard/20200908145911.4090480-1-edumazet@google.com/ -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/noise.c | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) - ---- a/drivers/net/wireguard/noise.c -+++ b/drivers/net/wireguard/noise.c -@@ -87,15 +87,12 @@ static void handshake_zero(struct noise_ - - void wg_noise_handshake_clear(struct noise_handshake *handshake) - { -+ down_write(&handshake->lock); - wg_index_hashtable_remove( - handshake->entry.peer->device->index_hashtable, - &handshake->entry); -- down_write(&handshake->lock); - handshake_zero(handshake); - up_write(&handshake->lock); -- wg_index_hashtable_remove( -- handshake->entry.peer->device->index_hashtable, -- &handshake->entry); - } - - static struct noise_keypair *keypair_create(struct wg_peer *peer) diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0115-wireguard-peerlookup-take-lock-before-checking-hash-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0115-wireguard-peerlookup-take-lock-before-checking-hash-.patch deleted file mode 100644 index e7f46ddf9..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0115-wireguard-peerlookup-take-lock-before-checking-hash-.patch +++ /dev/null @@ -1,62 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Wed, 9 Sep 2020 13:58:15 +0200 -Subject: [PATCH] wireguard: peerlookup: take lock before checking hash in - replace operation - -commit 6147f7b1e90ff09bd52afc8b9206a7fcd133daf7 upstream. - -Eric's suggested fix for the previous commit's mentioned race condition -was to simply take the table->lock in wg_index_hashtable_replace(). The -table->lock of the hash table is supposed to protect the bucket heads, -not the entires, but actually, since all the mutator functions are -already taking it, it makes sense to take it too for the test to -hlist_unhashed, as a defense in depth measure, so that it no longer -races with deletions, regardless of what other locks are protecting -individual entries. This is sensible from a performance perspective -because, as Eric pointed out, the case of being unhashed is already the -unlikely case, so this won't add common contention. And comparing -instructions, this basically doesn't make much of a difference other -than pushing and popping %r13, used by the new `bool ret`. More -generally, I like the idea of locking consistency across table mutator -functions, and this might let me rest slightly easier at night. - -Suggested-by: Eric Dumazet -Link: https://lore.kernel.org/wireguard/20200908145911.4090480-1-edumazet@google.com/ -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/peerlookup.c | 11 ++++++++--- - 1 file changed, 8 insertions(+), 3 deletions(-) - ---- a/drivers/net/wireguard/peerlookup.c -+++ b/drivers/net/wireguard/peerlookup.c -@@ -167,9 +167,13 @@ bool wg_index_hashtable_replace(struct i - struct index_hashtable_entry *old, - struct index_hashtable_entry *new) - { -- if (unlikely(hlist_unhashed(&old->index_hash))) -- return false; -+ bool ret; -+ - spin_lock_bh(&table->lock); -+ ret = !hlist_unhashed(&old->index_hash); -+ if (unlikely(!ret)) -+ goto out; -+ - new->index = old->index; - hlist_replace_rcu(&old->index_hash, &new->index_hash); - -@@ -180,8 +184,9 @@ bool wg_index_hashtable_replace(struct i - * simply gets dropped, which isn't terrible. - */ - INIT_HLIST_NODE(&old->index_hash); -+out: - spin_unlock_bh(&table->lock); -- return true; -+ return ret; - } - - void wg_index_hashtable_remove(struct index_hashtable *table, diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0116-wireguard-selftests-check-that-route_me_harder-packe.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0116-wireguard-selftests-check-that-route_me_harder-packe.patch deleted file mode 100644 index 09c1b0b8f..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0116-wireguard-selftests-check-that-route_me_harder-packe.patch +++ /dev/null @@ -1,56 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Thu, 29 Oct 2020 03:56:05 +0100 -Subject: [PATCH] wireguard: selftests: check that route_me_harder packets use - the right sk - -commit af8afcf1fdd5f365f70e2386c2d8c7a1abd853d7 upstream. - -If netfilter changes the packet mark, the packet is rerouted. The -ip_route_me_harder family of functions fails to use the right sk, opting -to instead use skb->sk, resulting in a routing loop when used with -tunnels. With the next change fixing this issue in netfilter, test for -the relevant condition inside our test suite, since wireguard was where -the bug was discovered. - -Reported-by: Chen Minqiang -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Pablo Neira Ayuso -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 8 ++++++++ - tools/testing/selftests/wireguard/qemu/kernel.config | 2 ++ - 2 files changed, 10 insertions(+) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -316,6 +316,14 @@ pp sleep 3 - n2 ping -W 1 -c 1 192.168.241.1 - n1 wg set wg0 peer "$pub2" persistent-keepalive 0 - -+# Test that sk_bound_dev_if works -+n1 ping -I wg0 -c 1 -W 1 192.168.241.2 -+# What about when the mark changes and the packet must be rerouted? -+n1 iptables -t mangle -I OUTPUT -j MARK --set-xmark 1 -+n1 ping -c 1 -W 1 192.168.241.2 # First the boring case -+n1 ping -I wg0 -c 1 -W 1 192.168.241.2 # Then the sk_bound_dev_if case -+n1 iptables -t mangle -D OUTPUT -j MARK --set-xmark 1 -+ - # Test that onion routing works, even when it loops - n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 - ip1 addr add 192.168.242.1/24 dev wg0 ---- a/tools/testing/selftests/wireguard/qemu/kernel.config -+++ b/tools/testing/selftests/wireguard/qemu/kernel.config -@@ -18,10 +18,12 @@ CONFIG_NF_NAT=y - CONFIG_NETFILTER_XTABLES=y - CONFIG_NETFILTER_XT_NAT=y - CONFIG_NETFILTER_XT_MATCH_LENGTH=y -+CONFIG_NETFILTER_XT_MARK=y - CONFIG_NF_CONNTRACK_IPV4=y - CONFIG_NF_NAT_IPV4=y - CONFIG_IP_NF_IPTABLES=y - CONFIG_IP_NF_FILTER=y -+CONFIG_IP_NF_MANGLE=y - CONFIG_IP_NF_NAT=y - CONFIG_IP_ADVANCED_ROUTER=y - CONFIG_IP_MULTIPLE_TABLES=y diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0117-wireguard-avoid-double-unlikely-notation-when-using-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0117-wireguard-avoid-double-unlikely-notation-when-using-.patch deleted file mode 100644 index 7dfc1bb91..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0117-wireguard-avoid-double-unlikely-notation-when-using-.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Antonio Quartulli -Date: Mon, 22 Feb 2021 17:25:43 +0100 -Subject: [PATCH] wireguard: avoid double unlikely() notation when using - IS_ERR() - -commit 30ac4e2f54ec067b7b9ca0db27e75681581378d6 upstream. - -The definition of IS_ERR() already applies the unlikely() notation -when checking the error status of the passed pointer. For this -reason there is no need to have the same notation outside of -IS_ERR() itself. - -Clean up code by removing redundant notation. - -Signed-off-by: Antonio Quartulli -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 2 +- - drivers/net/wireguard/socket.c | 4 ++-- - 2 files changed, 3 insertions(+), 3 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -157,7 +157,7 @@ static netdev_tx_t wg_xmit(struct sk_buf - } else { - struct sk_buff *segs = skb_gso_segment(skb, 0); - -- if (unlikely(IS_ERR(segs))) { -+ if (IS_ERR(segs)) { - ret = PTR_ERR(segs); - goto err_peer; - } ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -71,7 +71,7 @@ static int send4(struct wg_device *wg, s - ip_rt_put(rt); - rt = ip_route_output_flow(sock_net(sock), &fl, sock); - } -- if (unlikely(IS_ERR(rt))) { -+ if (IS_ERR(rt)) { - ret = PTR_ERR(rt); - net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", - wg->dev->name, &endpoint->addr, ret); -@@ -138,7 +138,7 @@ static int send6(struct wg_device *wg, s - } - dst = ipv6_stub->ipv6_dst_lookup_flow(sock_net(sock), sock, &fl, - NULL); -- if (unlikely(IS_ERR(dst))) { -+ if (IS_ERR(dst)) { - ret = PTR_ERR(dst); - net_dbg_ratelimited("%s: No route to %pISpfsc, error %d\n", - wg->dev->name, &endpoint->addr, ret); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0118-wireguard-socket-remove-bogus-__be32-annotation.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0118-wireguard-socket-remove-bogus-__be32-annotation.patch deleted file mode 100644 index 1796f54de..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0118-wireguard-socket-remove-bogus-__be32-annotation.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Jann Horn -Date: Mon, 22 Feb 2021 17:25:44 +0100 -Subject: [PATCH] wireguard: socket: remove bogus __be32 annotation - -commit 7f57bd8dc22de35ddd895294aa554003e4f19a72 upstream. - -The endpoint->src_if4 has nothing to do with fixed-endian numbers; remove -the bogus annotation. - -This was introduced in -https://git.zx2c4.com/wireguard-monolithic-historical/commit?id=14e7d0a499a676ec55176c0de2f9fcbd34074a82 -in the historical WireGuard repo because the old code used to -zero-initialize multiple members as follows: - - endpoint->src4.s_addr = endpoint->src_if4 = fl.saddr = 0; - -Because fl.saddr is fixed-endian and an assignment returns a value with the -type of its left operand, this meant that sparse detected an assignment -between values of different endianness. - -Since then, this assignment was already split up into separate statements; -just the cast survived. - -Signed-off-by: Jann Horn -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/socket.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -53,7 +53,7 @@ static int send4(struct wg_device *wg, s - if (unlikely(!inet_confirm_addr(sock_net(sock), NULL, 0, - fl.saddr, RT_SCOPE_HOST))) { - endpoint->src4.s_addr = 0; -- *(__force __be32 *)&endpoint->src_if4 = 0; -+ endpoint->src_if4 = 0; - fl.saddr = 0; - if (cache) - dst_cache_reset(cache); -@@ -63,7 +63,7 @@ static int send4(struct wg_device *wg, s - PTR_ERR(rt) == -EINVAL) || (!IS_ERR(rt) && - rt->dst.dev->ifindex != endpoint->src_if4)))) { - endpoint->src4.s_addr = 0; -- *(__force __be32 *)&endpoint->src_if4 = 0; -+ endpoint->src_if4 = 0; - fl.saddr = 0; - if (cache) - dst_cache_reset(cache); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0119-wireguard-selftests-test-multiple-parallel-streams.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0119-wireguard-selftests-test-multiple-parallel-streams.patch deleted file mode 100644 index 3093de45f..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0119-wireguard-selftests-test-multiple-parallel-streams.patch +++ /dev/null @@ -1,52 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 22 Feb 2021 17:25:45 +0100 -Subject: [PATCH] wireguard: selftests: test multiple parallel streams - -commit d5a49aa6c3e264a93a7d08485d66e346be0969dd upstream. - -In order to test ndo_start_xmit being called in parallel, explicitly add -separate tests, which should all run on different cores. This should -help tease out bugs associated with queueing up packets from different -cores in parallel. Currently, it hasn't found those types of bugs, but -given future planned work, this is a useful regression to avoid. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 15 ++++++++++++++- - 1 file changed, 14 insertions(+), 1 deletion(-) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -39,7 +39,7 @@ ip0() { pretty 0 "ip $*"; ip -n $netns0 - ip1() { pretty 1 "ip $*"; ip -n $netns1 "$@"; } - ip2() { pretty 2 "ip $*"; ip -n $netns2 "$@"; } - sleep() { read -t "$1" -N 1 || true; } --waitiperf() { pretty "${1//*-}" "wait for iperf:5201 pid $2"; while [[ $(ss -N "$1" -tlpH 'sport = 5201') != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } -+waitiperf() { pretty "${1//*-}" "wait for iperf:${3:-5201} pid $2"; while [[ $(ss -N "$1" -tlpH "sport = ${3:-5201}") != *\"iperf3\",pid=$2,fd=* ]]; do sleep 0.1; done; } - waitncatudp() { pretty "${1//*-}" "wait for udp:1111 pid $2"; while [[ $(ss -N "$1" -ulpH 'sport = 1111') != *\"ncat\",pid=$2,fd=* ]]; do sleep 0.1; done; } - waitiface() { pretty "${1//*-}" "wait for $2 to come up"; ip netns exec "$1" bash -c "while [[ \$(< \"/sys/class/net/$2/operstate\") != up ]]; do read -t .1 -N 0 || true; done;"; } - -@@ -141,6 +141,19 @@ tests() { - n2 iperf3 -s -1 -B fd00::2 & - waitiperf $netns2 $! - n1 iperf3 -Z -t 3 -b 0 -u -c fd00::2 -+ -+ # TCP over IPv4, in parallel -+ for max in 4 5 50; do -+ local pids=( ) -+ for ((i=0; i < max; ++i)) do -+ n2 iperf3 -p $(( 5200 + i )) -s -1 -B 192.168.241.2 & -+ pids+=( $! ); waitiperf $netns2 $! $(( 5200 + i )) -+ done -+ for ((i=0; i < max; ++i)) do -+ n1 iperf3 -Z -t 3 -p $(( 5200 + i )) -c 192.168.241.2 & -+ done -+ wait "${pids[@]}" -+ done - } - - [[ $(ip1 link show dev wg0) =~ mtu\ ([0-9]+) ]] && orig_mtu="${BASH_REMATCH[1]}" diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0120-wireguard-peer-put-frequently-used-members-above-cac.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0120-wireguard-peer-put-frequently-used-members-above-cac.patch deleted file mode 100644 index 69e76b96e..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0120-wireguard-peer-put-frequently-used-members-above-cac.patch +++ /dev/null @@ -1,42 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 22 Feb 2021 17:25:46 +0100 -Subject: [PATCH] wireguard: peer: put frequently used members above cache - lines - -commit 5a0598695634a6bb4126818902dd9140cd9df8b6 upstream. - -The is_dead boolean is checked for every single packet, while the -internal_id member is used basically only for pr_debug messages. So it -makes sense to hoist up is_dead into some space formerly unused by a -struct hole, while demoting internal_api to below the lowest struct -cache line. - -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/peer.h | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/drivers/net/wireguard/peer.h -+++ b/drivers/net/wireguard/peer.h -@@ -39,6 +39,7 @@ struct wg_peer { - struct crypt_queue tx_queue, rx_queue; - struct sk_buff_head staged_packet_queue; - int serial_work_cpu; -+ bool is_dead; - struct noise_keypairs keypairs; - struct endpoint endpoint; - struct dst_cache endpoint_cache; -@@ -61,9 +62,8 @@ struct wg_peer { - struct rcu_head rcu; - struct list_head peer_list; - struct list_head allowedips_list; -- u64 internal_id; - struct napi_struct napi; -- bool is_dead; -+ u64 internal_id; - }; - - struct wg_peer *wg_peer_create(struct wg_device *wg, diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0121-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0121-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch deleted file mode 100644 index 073ee9b0d..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0121-wireguard-device-do-not-generate-ICMP-for-non-IP-pac.patch +++ /dev/null @@ -1,47 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 22 Feb 2021 17:25:47 +0100 -Subject: [PATCH] wireguard: device: do not generate ICMP for non-IP packets - -commit 99fff5264e7ab06f45b0ad60243475be0a8d0559 upstream. - -If skb->protocol doesn't match the actual skb->data header, it's -probably not a good idea to pass it off to icmp{,v6}_ndo_send, which is -expecting to reply to a valid IP packet. So this commit has that early -mismatch case jump to a later error label. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 7 ++++--- - 1 file changed, 4 insertions(+), 3 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -138,7 +138,7 @@ static netdev_tx_t wg_xmit(struct sk_buf - else if (skb->protocol == htons(ETH_P_IPV6)) - net_dbg_ratelimited("%s: No peer has allowed IPs matching %pI6\n", - dev->name, &ipv6_hdr(skb)->daddr); -- goto err; -+ goto err_icmp; - } - - family = READ_ONCE(peer->endpoint.addr.sa_family); -@@ -201,12 +201,13 @@ static netdev_tx_t wg_xmit(struct sk_buf - - err_peer: - wg_peer_put(peer); --err: -- ++dev->stats.tx_errors; -+err_icmp: - if (skb->protocol == htons(ETH_P_IP)) - icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); - else if (skb->protocol == htons(ETH_P_IPV6)) - icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); -+err: -+ ++dev->stats.tx_errors; - kfree_skb(skb); - return ret; - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0122-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0122-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch deleted file mode 100644 index 9dc7ddae7..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0122-wireguard-queueing-get-rid-of-per-peer-ring-buffers.patch +++ /dev/null @@ -1,560 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 22 Feb 2021 17:25:48 +0100 -Subject: [PATCH] wireguard: queueing: get rid of per-peer ring buffers -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit 8b5553ace83cced775eefd0f3f18b5c6214ccf7a upstream. - -Having two ring buffers per-peer means that every peer results in two -massive ring allocations. On an 8-core x86_64 machine, this commit -reduces the per-peer allocation from 18,688 bytes to 1,856 bytes, which -is an 90% reduction. Ninety percent! With some single-machine -deployments approaching 500,000 peers, we're talking about a reduction -from 7 gigs of memory down to 700 megs of memory. - -In order to get rid of these per-peer allocations, this commit switches -to using a list-based queueing approach. Currently GSO fragments are -chained together using the skb->next pointer (the skb_list_* singly -linked list approach), so we form the per-peer queue around the unused -skb->prev pointer (which sort of makes sense because the links are -pointing backwards). Use of skb_queue_* is not possible here, because -that is based on doubly linked lists and spinlocks. Multiple cores can -write into the queue at any given time, because its writes occur in the -start_xmit path or in the udp_recv path. But reads happen in a single -workqueue item per-peer, amounting to a multi-producer, single-consumer -paradigm. - -The MPSC queue is implemented locklessly and never blocks. However, it -is not linearizable (though it is serializable), with a very tight and -unlikely race on writes, which, when hit (some tiny fraction of the -0.15% of partial adds on a fully loaded 16-core x86_64 system), causes -the queue reader to terminate early. However, because every packet sent -queues up the same workqueue item after it is fully added, the worker -resumes again, and stopping early isn't actually a problem, since at -that point the packet wouldn't have yet been added to the encryption -queue. These properties allow us to avoid disabling interrupts or -spinning. The design is based on Dmitry Vyukov's algorithm [1]. - -Performance-wise, ordinarily list-based queues aren't preferable to -ringbuffers, because of cache misses when following pointers around. -However, we *already* have to follow the adjacent pointers when working -through fragments, so there shouldn't actually be any change there. A -potential downside is that dequeueing is a bit more complicated, but the -ptr_ring structure used prior had a spinlock when dequeueing, so all and -all the difference appears to be a wash. - -Actually, from profiling, the biggest performance hit, by far, of this -commit winds up being atomic_add_unless(count, 1, max) and atomic_ -dec(count), which account for the majority of CPU time, according to -perf. In that sense, the previous ring buffer was superior in that it -could check if it was full by head==tail, which the list-based approach -cannot do. - -But all and all, this enables us to get massive memory savings, allowing -WireGuard to scale for real world deployments, without taking much of a -performance hit. - -[1] http://www.1024cores.net/home/lock-free-algorithms/queues/intrusive-mpsc-node-based-queue - -Reviewed-by: Dmitry Vyukov -Reviewed-by: Toke Høiland-Jørgensen -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/device.c | 12 ++--- - drivers/net/wireguard/device.h | 15 +++--- - drivers/net/wireguard/peer.c | 28 ++++------- - drivers/net/wireguard/peer.h | 4 +- - drivers/net/wireguard/queueing.c | 86 +++++++++++++++++++++++++------- - drivers/net/wireguard/queueing.h | 45 ++++++++++++----- - drivers/net/wireguard/receive.c | 16 +++--- - drivers/net/wireguard/send.c | 31 ++++-------- - 8 files changed, 144 insertions(+), 93 deletions(-) - ---- a/drivers/net/wireguard/device.c -+++ b/drivers/net/wireguard/device.c -@@ -235,8 +235,8 @@ static void wg_destruct(struct net_devic - destroy_workqueue(wg->handshake_receive_wq); - destroy_workqueue(wg->handshake_send_wq); - destroy_workqueue(wg->packet_crypt_wq); -- wg_packet_queue_free(&wg->decrypt_queue, true); -- wg_packet_queue_free(&wg->encrypt_queue, true); -+ wg_packet_queue_free(&wg->decrypt_queue); -+ wg_packet_queue_free(&wg->encrypt_queue); - rcu_barrier(); /* Wait for all the peers to be actually freed. */ - wg_ratelimiter_uninit(); - memzero_explicit(&wg->static_identity, sizeof(wg->static_identity)); -@@ -338,12 +338,12 @@ static int wg_newlink(struct net *src_ne - goto err_destroy_handshake_send; - - ret = wg_packet_queue_init(&wg->encrypt_queue, wg_packet_encrypt_worker, -- true, MAX_QUEUED_PACKETS); -+ MAX_QUEUED_PACKETS); - if (ret < 0) - goto err_destroy_packet_crypt; - - ret = wg_packet_queue_init(&wg->decrypt_queue, wg_packet_decrypt_worker, -- true, MAX_QUEUED_PACKETS); -+ MAX_QUEUED_PACKETS); - if (ret < 0) - goto err_free_encrypt_queue; - -@@ -368,9 +368,9 @@ static int wg_newlink(struct net *src_ne - err_uninit_ratelimiter: - wg_ratelimiter_uninit(); - err_free_decrypt_queue: -- wg_packet_queue_free(&wg->decrypt_queue, true); -+ wg_packet_queue_free(&wg->decrypt_queue); - err_free_encrypt_queue: -- wg_packet_queue_free(&wg->encrypt_queue, true); -+ wg_packet_queue_free(&wg->encrypt_queue); - err_destroy_packet_crypt: - destroy_workqueue(wg->packet_crypt_wq); - err_destroy_handshake_send: ---- a/drivers/net/wireguard/device.h -+++ b/drivers/net/wireguard/device.h -@@ -27,13 +27,14 @@ struct multicore_worker { - - struct crypt_queue { - struct ptr_ring ring; -- union { -- struct { -- struct multicore_worker __percpu *worker; -- int last_cpu; -- }; -- struct work_struct work; -- }; -+ struct multicore_worker __percpu *worker; -+ int last_cpu; -+}; -+ -+struct prev_queue { -+ struct sk_buff *head, *tail, *peeked; -+ struct { struct sk_buff *next, *prev; } empty; // Match first 2 members of struct sk_buff. -+ atomic_t count; - }; - - struct wg_device { ---- a/drivers/net/wireguard/peer.c -+++ b/drivers/net/wireguard/peer.c -@@ -32,27 +32,22 @@ struct wg_peer *wg_peer_create(struct wg - peer = kzalloc(sizeof(*peer), GFP_KERNEL); - if (unlikely(!peer)) - return ERR_PTR(ret); -- peer->device = wg; -+ if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) -+ goto err; - -+ peer->device = wg; - wg_noise_handshake_init(&peer->handshake, &wg->static_identity, - public_key, preshared_key, peer); -- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) -- goto err_1; -- if (wg_packet_queue_init(&peer->tx_queue, wg_packet_tx_worker, false, -- MAX_QUEUED_PACKETS)) -- goto err_2; -- if (wg_packet_queue_init(&peer->rx_queue, NULL, false, -- MAX_QUEUED_PACKETS)) -- goto err_3; -- - peer->internal_id = atomic64_inc_return(&peer_counter); - peer->serial_work_cpu = nr_cpumask_bits; - wg_cookie_init(&peer->latest_cookie); - wg_timers_init(peer); - wg_cookie_checker_precompute_peer_keys(peer); - spin_lock_init(&peer->keypairs.keypair_update_lock); -- INIT_WORK(&peer->transmit_handshake_work, -- wg_packet_handshake_send_worker); -+ INIT_WORK(&peer->transmit_handshake_work, wg_packet_handshake_send_worker); -+ INIT_WORK(&peer->transmit_packet_work, wg_packet_tx_worker); -+ wg_prev_queue_init(&peer->tx_queue); -+ wg_prev_queue_init(&peer->rx_queue); - rwlock_init(&peer->endpoint_lock); - kref_init(&peer->refcount); - skb_queue_head_init(&peer->staged_packet_queue); -@@ -68,11 +63,7 @@ struct wg_peer *wg_peer_create(struct wg - pr_debug("%s: Peer %llu created\n", wg->dev->name, peer->internal_id); - return peer; - --err_3: -- wg_packet_queue_free(&peer->tx_queue, false); --err_2: -- dst_cache_destroy(&peer->endpoint_cache); --err_1: -+err: - kfree(peer); - return ERR_PTR(ret); - } -@@ -197,8 +188,7 @@ static void rcu_release(struct rcu_head - struct wg_peer *peer = container_of(rcu, struct wg_peer, rcu); - - dst_cache_destroy(&peer->endpoint_cache); -- wg_packet_queue_free(&peer->rx_queue, false); -- wg_packet_queue_free(&peer->tx_queue, false); -+ WARN_ON(wg_prev_queue_peek(&peer->tx_queue) || wg_prev_queue_peek(&peer->rx_queue)); - - /* The final zeroing takes care of clearing any remaining handshake key - * material and other potentially sensitive information. ---- a/drivers/net/wireguard/peer.h -+++ b/drivers/net/wireguard/peer.h -@@ -36,7 +36,7 @@ struct endpoint { - - struct wg_peer { - struct wg_device *device; -- struct crypt_queue tx_queue, rx_queue; -+ struct prev_queue tx_queue, rx_queue; - struct sk_buff_head staged_packet_queue; - int serial_work_cpu; - bool is_dead; -@@ -46,7 +46,7 @@ struct wg_peer { - rwlock_t endpoint_lock; - struct noise_handshake handshake; - atomic64_t last_sent_handshake; -- struct work_struct transmit_handshake_work, clear_peer_work; -+ struct work_struct transmit_handshake_work, clear_peer_work, transmit_packet_work; - struct cookie latest_cookie; - struct hlist_node pubkey_hash; - u64 rx_bytes, tx_bytes; ---- a/drivers/net/wireguard/queueing.c -+++ b/drivers/net/wireguard/queueing.c -@@ -9,8 +9,7 @@ struct multicore_worker __percpu * - wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr) - { - int cpu; -- struct multicore_worker __percpu *worker = -- alloc_percpu(struct multicore_worker); -+ struct multicore_worker __percpu *worker = alloc_percpu(struct multicore_worker); - - if (!worker) - return NULL; -@@ -23,7 +22,7 @@ wg_packet_percpu_multicore_worker_alloc( - } - - int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, -- bool multicore, unsigned int len) -+ unsigned int len) - { - int ret; - -@@ -31,25 +30,78 @@ int wg_packet_queue_init(struct crypt_qu - ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); - if (ret) - return ret; -- if (function) { -- if (multicore) { -- queue->worker = wg_packet_percpu_multicore_worker_alloc( -- function, queue); -- if (!queue->worker) { -- ptr_ring_cleanup(&queue->ring, NULL); -- return -ENOMEM; -- } -- } else { -- INIT_WORK(&queue->work, function); -- } -+ queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); -+ if (!queue->worker) { -+ ptr_ring_cleanup(&queue->ring, NULL); -+ return -ENOMEM; - } - return 0; - } - --void wg_packet_queue_free(struct crypt_queue *queue, bool multicore) -+void wg_packet_queue_free(struct crypt_queue *queue) - { -- if (multicore) -- free_percpu(queue->worker); -+ free_percpu(queue->worker); - WARN_ON(!__ptr_ring_empty(&queue->ring)); - ptr_ring_cleanup(&queue->ring, NULL); - } -+ -+#define NEXT(skb) ((skb)->prev) -+#define STUB(queue) ((struct sk_buff *)&queue->empty) -+ -+void wg_prev_queue_init(struct prev_queue *queue) -+{ -+ NEXT(STUB(queue)) = NULL; -+ queue->head = queue->tail = STUB(queue); -+ queue->peeked = NULL; -+ atomic_set(&queue->count, 0); -+ BUILD_BUG_ON( -+ offsetof(struct sk_buff, next) != offsetof(struct prev_queue, empty.next) - -+ offsetof(struct prev_queue, empty) || -+ offsetof(struct sk_buff, prev) != offsetof(struct prev_queue, empty.prev) - -+ offsetof(struct prev_queue, empty)); -+} -+ -+static void __wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) -+{ -+ WRITE_ONCE(NEXT(skb), NULL); -+ WRITE_ONCE(NEXT(xchg_release(&queue->head, skb)), skb); -+} -+ -+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb) -+{ -+ if (!atomic_add_unless(&queue->count, 1, MAX_QUEUED_PACKETS)) -+ return false; -+ __wg_prev_queue_enqueue(queue, skb); -+ return true; -+} -+ -+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue) -+{ -+ struct sk_buff *tail = queue->tail, *next = smp_load_acquire(&NEXT(tail)); -+ -+ if (tail == STUB(queue)) { -+ if (!next) -+ return NULL; -+ queue->tail = next; -+ tail = next; -+ next = smp_load_acquire(&NEXT(next)); -+ } -+ if (next) { -+ queue->tail = next; -+ atomic_dec(&queue->count); -+ return tail; -+ } -+ if (tail != READ_ONCE(queue->head)) -+ return NULL; -+ __wg_prev_queue_enqueue(queue, STUB(queue)); -+ next = smp_load_acquire(&NEXT(tail)); -+ if (next) { -+ queue->tail = next; -+ atomic_dec(&queue->count); -+ return tail; -+ } -+ return NULL; -+} -+ -+#undef NEXT -+#undef STUB ---- a/drivers/net/wireguard/queueing.h -+++ b/drivers/net/wireguard/queueing.h -@@ -17,12 +17,13 @@ struct wg_device; - struct wg_peer; - struct multicore_worker; - struct crypt_queue; -+struct prev_queue; - struct sk_buff; - - /* queueing.c APIs: */ - int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, -- bool multicore, unsigned int len); --void wg_packet_queue_free(struct crypt_queue *queue, bool multicore); -+ unsigned int len); -+void wg_packet_queue_free(struct crypt_queue *queue); - struct multicore_worker __percpu * - wg_packet_percpu_multicore_worker_alloc(work_func_t function, void *ptr); - -@@ -135,8 +136,31 @@ static inline int wg_cpumask_next_online - return cpu; - } - -+void wg_prev_queue_init(struct prev_queue *queue); -+ -+/* Multi producer */ -+bool wg_prev_queue_enqueue(struct prev_queue *queue, struct sk_buff *skb); -+ -+/* Single consumer */ -+struct sk_buff *wg_prev_queue_dequeue(struct prev_queue *queue); -+ -+/* Single consumer */ -+static inline struct sk_buff *wg_prev_queue_peek(struct prev_queue *queue) -+{ -+ if (queue->peeked) -+ return queue->peeked; -+ queue->peeked = wg_prev_queue_dequeue(queue); -+ return queue->peeked; -+} -+ -+/* Single consumer */ -+static inline void wg_prev_queue_drop_peeked(struct prev_queue *queue) -+{ -+ queue->peeked = NULL; -+} -+ - static inline int wg_queue_enqueue_per_device_and_peer( -- struct crypt_queue *device_queue, struct crypt_queue *peer_queue, -+ struct crypt_queue *device_queue, struct prev_queue *peer_queue, - struct sk_buff *skb, struct workqueue_struct *wq, int *next_cpu) - { - int cpu; -@@ -145,8 +169,9 @@ static inline int wg_queue_enqueue_per_d - /* We first queue this up for the peer ingestion, but the consumer - * will wait for the state to change to CRYPTED or DEAD before. - */ -- if (unlikely(ptr_ring_produce_bh(&peer_queue->ring, skb))) -+ if (unlikely(!wg_prev_queue_enqueue(peer_queue, skb))) - return -ENOSPC; -+ - /* Then we queue it up in the device queue, which consumes the - * packet as soon as it can. - */ -@@ -157,9 +182,7 @@ static inline int wg_queue_enqueue_per_d - return 0; - } - --static inline void wg_queue_enqueue_per_peer(struct crypt_queue *queue, -- struct sk_buff *skb, -- enum packet_state state) -+static inline void wg_queue_enqueue_per_peer_tx(struct sk_buff *skb, enum packet_state state) - { - /* We take a reference, because as soon as we call atomic_set, the - * peer can be freed from below us. -@@ -167,14 +190,12 @@ static inline void wg_queue_enqueue_per_ - struct wg_peer *peer = wg_peer_get(PACKET_PEER(skb)); - - atomic_set_release(&PACKET_CB(skb)->state, state); -- queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, -- peer->internal_id), -- peer->device->packet_crypt_wq, &queue->work); -+ queue_work_on(wg_cpumask_choose_online(&peer->serial_work_cpu, peer->internal_id), -+ peer->device->packet_crypt_wq, &peer->transmit_packet_work); - wg_peer_put(peer); - } - --static inline void wg_queue_enqueue_per_peer_napi(struct sk_buff *skb, -- enum packet_state state) -+static inline void wg_queue_enqueue_per_peer_rx(struct sk_buff *skb, enum packet_state state) - { - /* We take a reference, because as soon as we call atomic_set, the - * peer can be freed from below us. ---- a/drivers/net/wireguard/receive.c -+++ b/drivers/net/wireguard/receive.c -@@ -444,7 +444,6 @@ packet_processed: - int wg_packet_rx_poll(struct napi_struct *napi, int budget) - { - struct wg_peer *peer = container_of(napi, struct wg_peer, napi); -- struct crypt_queue *queue = &peer->rx_queue; - struct noise_keypair *keypair; - struct endpoint endpoint; - enum packet_state state; -@@ -455,11 +454,10 @@ int wg_packet_rx_poll(struct napi_struct - if (unlikely(budget <= 0)) - return 0; - -- while ((skb = __ptr_ring_peek(&queue->ring)) != NULL && -+ while ((skb = wg_prev_queue_peek(&peer->rx_queue)) != NULL && - (state = atomic_read_acquire(&PACKET_CB(skb)->state)) != - PACKET_STATE_UNCRYPTED) { -- __ptr_ring_discard_one(&queue->ring); -- peer = PACKET_PEER(skb); -+ wg_prev_queue_drop_peeked(&peer->rx_queue); - keypair = PACKET_CB(skb)->keypair; - free = true; - -@@ -508,7 +506,7 @@ void wg_packet_decrypt_worker(struct wor - enum packet_state state = - likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? - PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; -- wg_queue_enqueue_per_peer_napi(skb, state); -+ wg_queue_enqueue_per_peer_rx(skb, state); - if (need_resched()) - cond_resched(); - } -@@ -531,12 +529,10 @@ static void wg_packet_consume_data(struc - if (unlikely(READ_ONCE(peer->is_dead))) - goto err; - -- ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, -- &peer->rx_queue, skb, -- wg->packet_crypt_wq, -- &wg->decrypt_queue.last_cpu); -+ ret = wg_queue_enqueue_per_device_and_peer(&wg->decrypt_queue, &peer->rx_queue, skb, -+ wg->packet_crypt_wq, &wg->decrypt_queue.last_cpu); - if (unlikely(ret == -EPIPE)) -- wg_queue_enqueue_per_peer_napi(skb, PACKET_STATE_DEAD); -+ wg_queue_enqueue_per_peer_rx(skb, PACKET_STATE_DEAD); - if (likely(!ret || ret == -EPIPE)) { - rcu_read_unlock_bh(); - return; ---- a/drivers/net/wireguard/send.c -+++ b/drivers/net/wireguard/send.c -@@ -239,8 +239,7 @@ void wg_packet_send_keepalive(struct wg_ - wg_packet_send_staged_packets(peer); - } - --static void wg_packet_create_data_done(struct sk_buff *first, -- struct wg_peer *peer) -+static void wg_packet_create_data_done(struct wg_peer *peer, struct sk_buff *first) - { - struct sk_buff *skb, *next; - bool is_keepalive, data_sent = false; -@@ -262,22 +261,19 @@ static void wg_packet_create_data_done(s - - void wg_packet_tx_worker(struct work_struct *work) - { -- struct crypt_queue *queue = container_of(work, struct crypt_queue, -- work); -+ struct wg_peer *peer = container_of(work, struct wg_peer, transmit_packet_work); - struct noise_keypair *keypair; - enum packet_state state; - struct sk_buff *first; -- struct wg_peer *peer; - -- while ((first = __ptr_ring_peek(&queue->ring)) != NULL && -+ while ((first = wg_prev_queue_peek(&peer->tx_queue)) != NULL && - (state = atomic_read_acquire(&PACKET_CB(first)->state)) != - PACKET_STATE_UNCRYPTED) { -- __ptr_ring_discard_one(&queue->ring); -- peer = PACKET_PEER(first); -+ wg_prev_queue_drop_peeked(&peer->tx_queue); - keypair = PACKET_CB(first)->keypair; - - if (likely(state == PACKET_STATE_CRYPTED)) -- wg_packet_create_data_done(first, peer); -+ wg_packet_create_data_done(peer, first); - else - kfree_skb_list(first); - -@@ -306,16 +302,14 @@ void wg_packet_encrypt_worker(struct wor - break; - } - } -- wg_queue_enqueue_per_peer(&PACKET_PEER(first)->tx_queue, first, -- state); -+ wg_queue_enqueue_per_peer_tx(first, state); - if (need_resched()) - cond_resched(); - } - } - --static void wg_packet_create_data(struct sk_buff *first) -+static void wg_packet_create_data(struct wg_peer *peer, struct sk_buff *first) - { -- struct wg_peer *peer = PACKET_PEER(first); - struct wg_device *wg = peer->device; - int ret = -EINVAL; - -@@ -323,13 +317,10 @@ static void wg_packet_create_data(struct - if (unlikely(READ_ONCE(peer->is_dead))) - goto err; - -- ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, -- &peer->tx_queue, first, -- wg->packet_crypt_wq, -- &wg->encrypt_queue.last_cpu); -+ ret = wg_queue_enqueue_per_device_and_peer(&wg->encrypt_queue, &peer->tx_queue, first, -+ wg->packet_crypt_wq, &wg->encrypt_queue.last_cpu); - if (unlikely(ret == -EPIPE)) -- wg_queue_enqueue_per_peer(&peer->tx_queue, first, -- PACKET_STATE_DEAD); -+ wg_queue_enqueue_per_peer_tx(first, PACKET_STATE_DEAD); - err: - rcu_read_unlock_bh(); - if (likely(!ret || ret == -EPIPE)) -@@ -393,7 +384,7 @@ void wg_packet_send_staged_packets(struc - packets.prev->next = NULL; - wg_peer_get(keypair->entry.peer); - PACKET_CB(packets.next)->keypair = keypair; -- wg_packet_create_data(packets.next); -+ wg_packet_create_data(peer, packets.next); - return; - - out_invalid: diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0123-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0123-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch deleted file mode 100644 index 9a251492c..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0123-wireguard-kconfig-use-arm-chacha-even-with-no-neon.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Mon, 22 Feb 2021 17:25:49 +0100 -Subject: [PATCH] wireguard: kconfig: use arm chacha even with no neon - -commit bce2473927af8de12ad131a743f55d69d358c0b9 upstream. - -The condition here was incorrect: a non-neon fallback implementation is -available on arm32 when NEON is not supported. - -Reported-by: Ilya Lipnitskiy -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Signed-off-by: Jason A. Donenfeld -Signed-off-by: Jakub Kicinski -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/drivers/net/Kconfig -+++ b/drivers/net/Kconfig -@@ -87,7 +87,7 @@ config WIREGUARD - select CRYPTO_CURVE25519_X86 if X86 && 64BIT - select ARM_CRYPTO if ARM - select ARM64_CRYPTO if ARM64 -- select CRYPTO_CHACHA20_NEON if (ARM || ARM64) && KERNEL_MODE_NEON -+ select CRYPTO_CHACHA20_NEON if ARM || (ARM64 && KERNEL_MODE_NEON) - select CRYPTO_POLY1305_NEON if ARM64 && KERNEL_MODE_NEON - select CRYPTO_POLY1305_ARM if ARM - select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0124-crypto-mips-poly1305-enable-for-all-MIPS-processors.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0124-crypto-mips-poly1305-enable-for-all-MIPS-processors.patch deleted file mode 100644 index c0ee841b0..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0124-crypto-mips-poly1305-enable-for-all-MIPS-processors.patch +++ /dev/null @@ -1,60 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Maciej W. Rozycki" -Date: Thu, 11 Mar 2021 21:50:47 -0700 -Subject: [PATCH] crypto: mips/poly1305 - enable for all MIPS processors - -commit 6c810cf20feef0d4338e9b424ab7f2644a8b353e upstream. - -The MIPS Poly1305 implementation is generic MIPS code written such as to -support down to the original MIPS I and MIPS III ISA for the 32-bit and -64-bit variant respectively. Lift the current limitation then to enable -code for MIPSr1 ISA or newer processors only and have it available for -all MIPS processors. - -Signed-off-by: Maciej W. Rozycki -Fixes: a11d055e7a64 ("crypto: mips/poly1305 - incorporate OpenSSL/CRYPTOGAMS optimized implementation") -Cc: stable@vger.kernel.org # v5.5+ -Acked-by: Jason A. Donenfeld -Signed-off-by: Thomas Bogendoerfer -Signed-off-by: Jason A. Donenfeld ---- - arch/mips/crypto/Makefile | 4 ++-- - crypto/Kconfig | 2 +- - drivers/net/Kconfig | 2 +- - 3 files changed, 4 insertions(+), 4 deletions(-) - ---- a/arch/mips/crypto/Makefile -+++ b/arch/mips/crypto/Makefile -@@ -12,8 +12,8 @@ AFLAGS_chacha-core.o += -O2 # needed to - obj-$(CONFIG_CRYPTO_POLY1305_MIPS) += poly1305-mips.o - poly1305-mips-y := poly1305-core.o poly1305-glue.o - --perlasm-flavour-$(CONFIG_CPU_MIPS32) := o32 --perlasm-flavour-$(CONFIG_CPU_MIPS64) := 64 -+perlasm-flavour-$(CONFIG_32BIT) := o32 -+perlasm-flavour-$(CONFIG_64BIT) := 64 - - quiet_cmd_perlasm = PERLASM $@ - cmd_perlasm = $(PERL) $(<) $(perlasm-flavour-y) $(@) ---- a/crypto/Kconfig -+++ b/crypto/Kconfig -@@ -740,7 +740,7 @@ config CRYPTO_POLY1305_X86_64 - - config CRYPTO_POLY1305_MIPS - tristate "Poly1305 authenticator algorithm (MIPS optimized)" -- depends on CPU_MIPS32 || (CPU_MIPS64 && 64BIT) -+ depends on MIPS - select CRYPTO_ARCH_HAVE_LIB_POLY1305 - - config CRYPTO_MD4 ---- a/drivers/net/Kconfig -+++ b/drivers/net/Kconfig -@@ -92,7 +92,7 @@ config WIREGUARD - select CRYPTO_POLY1305_ARM if ARM - select CRYPTO_CURVE25519_NEON if ARM && KERNEL_MODE_NEON - select CRYPTO_CHACHA_MIPS if CPU_MIPS32_R2 -- select CRYPTO_POLY1305_MIPS if CPU_MIPS32 || (CPU_MIPS64 && 64BIT) -+ select CRYPTO_POLY1305_MIPS if MIPS - help - WireGuard is a secure, fast, and easy to use replacement for IPSec - that uses modern cryptography and clever networking tricks. It's diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0125-crypto-mips-add-poly1305-core.S-to-.gitignore.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0125-crypto-mips-add-poly1305-core.S-to-.gitignore.patch deleted file mode 100644 index 856d67d5b..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0125-crypto-mips-add-poly1305-core.S-to-.gitignore.patch +++ /dev/null @@ -1,24 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Ilya Lipnitskiy -Date: Sat, 27 Mar 2021 19:39:43 -0700 -Subject: [PATCH] crypto: mips: add poly1305-core.S to .gitignore - -commit dc92d0df51dc61de88bf6f4884a17bf73d5c6326 upstream. - -poly1305-core.S is an auto-generated file, so it should be ignored. - -Fixes: a11d055e7a64 ("crypto: mips/poly1305 - incorporate OpenSSL/CRYPTOGAMS optimized implementation") -Signed-off-by: Ilya Lipnitskiy -Cc: Ard Biesheuvel -Signed-off-by: Thomas Bogendoerfer -Signed-off-by: Jason A. Donenfeld ---- - arch/mips/crypto/.gitignore | 2 ++ - 1 file changed, 2 insertions(+) - create mode 100644 arch/mips/crypto/.gitignore - ---- /dev/null -+++ b/arch/mips/crypto/.gitignore -@@ -0,0 +1,2 @@ -+# SPDX-License-Identifier: GPL-2.0-only -+poly1305-core.S diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0126-crypto-poly1305-fix-poly1305_core_setkey-declaration.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0126-crypto-poly1305-fix-poly1305_core_setkey-declaration.patch deleted file mode 100644 index ded6625ae..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0126-crypto-poly1305-fix-poly1305_core_setkey-declaration.patch +++ /dev/null @@ -1,172 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: Arnd Bergmann -Date: Mon, 22 Mar 2021 18:05:15 +0100 -Subject: [PATCH] crypto: poly1305 - fix poly1305_core_setkey() declaration -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -commit 8d195e7a8ada68928f2aedb2c18302a4518fe68e upstream. - -gcc-11 points out a mismatch between the declaration and the definition -of poly1305_core_setkey(): - -lib/crypto/poly1305-donna32.c:13:67: error: argument 2 of type ‘const u8[16]’ {aka ‘const unsigned char[16]’} with mismatched bound [-Werror=array-parameter=] - 13 | void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) - | ~~~~~~~~~^~~~~~~~~~~ -In file included from lib/crypto/poly1305-donna32.c:11: -include/crypto/internal/poly1305.h:21:68: note: previously declared as ‘const u8 *’ {aka ‘const unsigned char *’} - 21 | void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key); - -This is harmless in principle, as the calling conventions are the same, -but the more specific prototype allows better type checking in the -caller. - -Change the declaration to match the actual function definition. -The poly1305_simd_init() is a bit suspicious here, as it previously -had a 32-byte argument type, but looks like it needs to take the -16-byte POLY1305_BLOCK_SIZE array instead. - -Fixes: 1c08a104360f ("crypto: poly1305 - add new 32 and 64-bit generic versions") -Signed-off-by: Arnd Bergmann -Reviewed-by: Ard Biesheuvel -Reviewed-by: Eric Biggers -Signed-off-by: Herbert Xu -Signed-off-by: Jason A. Donenfeld ---- - arch/arm/crypto/poly1305-glue.c | 2 +- - arch/arm64/crypto/poly1305-glue.c | 2 +- - arch/mips/crypto/poly1305-glue.c | 2 +- - arch/x86/crypto/poly1305_glue.c | 6 +++--- - include/crypto/internal/poly1305.h | 3 ++- - include/crypto/poly1305.h | 6 ++++-- - lib/crypto/poly1305-donna32.c | 3 ++- - lib/crypto/poly1305-donna64.c | 3 ++- - lib/crypto/poly1305.c | 3 ++- - 9 files changed, 18 insertions(+), 12 deletions(-) - ---- a/arch/arm/crypto/poly1305-glue.c -+++ b/arch/arm/crypto/poly1305-glue.c -@@ -29,7 +29,7 @@ void __weak poly1305_blocks_neon(void *s - - static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); - --void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) -+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) - { - poly1305_init_arm(&dctx->h, key); - dctx->s[0] = get_unaligned_le32(key + 16); ---- a/arch/arm64/crypto/poly1305-glue.c -+++ b/arch/arm64/crypto/poly1305-glue.c -@@ -25,7 +25,7 @@ asmlinkage void poly1305_emit(void *stat - - static __ro_after_init DEFINE_STATIC_KEY_FALSE(have_neon); - --void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) -+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) - { - poly1305_init_arm64(&dctx->h, key); - dctx->s[0] = get_unaligned_le32(key + 16); ---- a/arch/mips/crypto/poly1305-glue.c -+++ b/arch/mips/crypto/poly1305-glue.c -@@ -17,7 +17,7 @@ asmlinkage void poly1305_init_mips(void - asmlinkage void poly1305_blocks_mips(void *state, const u8 *src, u32 len, u32 hibit); - asmlinkage void poly1305_emit_mips(void *state, u8 *digest, const u32 *nonce); - --void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) -+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) - { - poly1305_init_mips(&dctx->h, key); - dctx->s[0] = get_unaligned_le32(key + 16); ---- a/arch/x86/crypto/poly1305_glue.c -+++ b/arch/x86/crypto/poly1305_glue.c -@@ -15,7 +15,7 @@ - #include - - asmlinkage void poly1305_init_x86_64(void *ctx, -- const u8 key[POLY1305_KEY_SIZE]); -+ const u8 key[POLY1305_BLOCK_SIZE]); - asmlinkage void poly1305_blocks_x86_64(void *ctx, const u8 *inp, - const size_t len, const u32 padbit); - asmlinkage void poly1305_emit_x86_64(void *ctx, u8 mac[POLY1305_DIGEST_SIZE], -@@ -80,7 +80,7 @@ static void convert_to_base2_64(void *ct - state->is_base2_26 = 0; - } - --static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_KEY_SIZE]) -+static void poly1305_simd_init(void *ctx, const u8 key[POLY1305_BLOCK_SIZE]) - { - poly1305_init_x86_64(ctx, key); - } -@@ -128,7 +128,7 @@ static void poly1305_simd_emit(void *ctx - poly1305_emit_avx(ctx, mac, nonce); - } - --void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 *key) -+void poly1305_init_arch(struct poly1305_desc_ctx *dctx, const u8 key[POLY1305_KEY_SIZE]) - { - poly1305_simd_init(&dctx->h, key); - dctx->s[0] = get_unaligned_le32(&key[16]); ---- a/include/crypto/internal/poly1305.h -+++ b/include/crypto/internal/poly1305.h -@@ -18,7 +18,8 @@ - * only the ε-almost-∆-universal hash function (not the full MAC) is computed. - */ - --void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key); -+void poly1305_core_setkey(struct poly1305_core_key *key, -+ const u8 raw_key[POLY1305_BLOCK_SIZE]); - static inline void poly1305_core_init(struct poly1305_state *state) - { - *state = (struct poly1305_state){}; ---- a/include/crypto/poly1305.h -+++ b/include/crypto/poly1305.h -@@ -58,8 +58,10 @@ struct poly1305_desc_ctx { - }; - }; - --void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key); --void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key); -+void poly1305_init_arch(struct poly1305_desc_ctx *desc, -+ const u8 key[POLY1305_KEY_SIZE]); -+void poly1305_init_generic(struct poly1305_desc_ctx *desc, -+ const u8 key[POLY1305_KEY_SIZE]); - - static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key) - { ---- a/lib/crypto/poly1305-donna32.c -+++ b/lib/crypto/poly1305-donna32.c -@@ -10,7 +10,8 @@ - #include - #include - --void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) -+void poly1305_core_setkey(struct poly1305_core_key *key, -+ const u8 raw_key[POLY1305_BLOCK_SIZE]) - { - /* r &= 0xffffffc0ffffffc0ffffffc0fffffff */ - key->key.r[0] = (get_unaligned_le32(&raw_key[0])) & 0x3ffffff; ---- a/lib/crypto/poly1305-donna64.c -+++ b/lib/crypto/poly1305-donna64.c -@@ -12,7 +12,8 @@ - - typedef __uint128_t u128; - --void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[16]) -+void poly1305_core_setkey(struct poly1305_core_key *key, -+ const u8 raw_key[POLY1305_BLOCK_SIZE]) - { - u64 t0, t1; - ---- a/lib/crypto/poly1305.c -+++ b/lib/crypto/poly1305.c -@@ -12,7 +12,8 @@ - #include - #include - --void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key) -+void poly1305_init_generic(struct poly1305_desc_ctx *desc, -+ const u8 key[POLY1305_KEY_SIZE]) - { - poly1305_core_setkey(&desc->core_r, key); - desc->s[0] = get_unaligned_le32(key + 16); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0127-wireguard-selftests-remove-old-conntrack-kconfig-val.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0127-wireguard-selftests-remove-old-conntrack-kconfig-val.patch deleted file mode 100644 index 3e7d1a8e0..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0127-wireguard-selftests-remove-old-conntrack-kconfig-val.patch +++ /dev/null @@ -1,29 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 4 Jun 2021 17:17:30 +0200 -Subject: [PATCH] wireguard: selftests: remove old conntrack kconfig value - -commit acf2492b51c9a3c4dfb947f4d3477a86d315150f upstream. - -On recent kernels, this config symbol is no longer used. - -Reported-by: Rui Salvaterra -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Cc: stable@vger.kernel.org -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/qemu/kernel.config | 1 - - 1 file changed, 1 deletion(-) - ---- a/tools/testing/selftests/wireguard/qemu/kernel.config -+++ b/tools/testing/selftests/wireguard/qemu/kernel.config -@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y - CONFIG_NETFILTER_XT_NAT=y - CONFIG_NETFILTER_XT_MATCH_LENGTH=y - CONFIG_NETFILTER_XT_MARK=y --CONFIG_NF_CONNTRACK_IPV4=y - CONFIG_NF_NAT_IPV4=y - CONFIG_IP_NF_IPTABLES=y - CONFIG_IP_NF_FILTER=y diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0128-wireguard-selftests-make-sure-rp_filter-is-disabled-.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0128-wireguard-selftests-make-sure-rp_filter-is-disabled-.patch deleted file mode 100644 index 22d0f3e32..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0128-wireguard-selftests-make-sure-rp_filter-is-disabled-.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 4 Jun 2021 17:17:31 +0200 -Subject: [PATCH] wireguard: selftests: make sure rp_filter is disabled on - vethc - -commit f8873d11d4121aad35024f9379e431e0c83abead upstream. - -Some distros may enable strict rp_filter by default, which will prevent -vethc from receiving the packets with an unrouteable reverse path address. - -Reported-by: Hangbin Liu -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Cc: stable@vger.kernel.org -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - tools/testing/selftests/wireguard/netns.sh | 1 + - 1 file changed, 1 insertion(+) - ---- a/tools/testing/selftests/wireguard/netns.sh -+++ b/tools/testing/selftests/wireguard/netns.sh -@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_pref - ip1 -4 route add default dev wg0 table 51820 - ip1 -4 rule add not fwmark 51820 table 51820 - ip1 -4 rule add table main suppress_prefixlength 0 -+n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter' - # Flood the pings instead of sending just one, to trigger routing table reference counting bugs. - n1 ping -W 1 -c 100 -f 192.168.99.7 - n1 ping -W 1 -c 100 -f abab::1111 diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0129-wireguard-do-not-use-O3.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0129-wireguard-do-not-use-O3.patch deleted file mode 100644 index a7890a738..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0129-wireguard-do-not-use-O3.patch +++ /dev/null @@ -1,33 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 4 Jun 2021 17:17:32 +0200 -Subject: [PATCH] wireguard: do not use -O3 - -commit cc5060ca0285efe2728bced399a1955a7ce808b2 upstream. - -Apparently, various versions of gcc have O3-related miscompiles. Looking -at the difference between -O2 and -O3 for gcc 11 doesn't indicate -miscompiles, but the difference also doesn't seem so significant for -performance that it's worth risking. - -Link: https://lore.kernel.org/lkml/CAHk-=wjuoGyxDhAF8SsrTkN0-YfCx7E6jUN3ikC_tn2AKWTTsA@mail.gmail.com/ -Link: https://lore.kernel.org/lkml/CAHmME9otB5Wwxp7H8bR_i2uH2esEMvoBMC8uEXBMH9p0q1s6Bw@mail.gmail.com/ -Reported-by: Linus Torvalds -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Cc: stable@vger.kernel.org -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/Makefile | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/net/wireguard/Makefile -+++ b/drivers/net/wireguard/Makefile -@@ -1,5 +1,4 @@ --ccflags-y := -O3 --ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' -+ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' - ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG - wireguard-y := main.o - wireguard-y += noise.o diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0130-wireguard-use-synchronize_net-rather-than-synchroniz.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0130-wireguard-use-synchronize_net-rather-than-synchroniz.patch deleted file mode 100644 index 309fe3619..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0130-wireguard-use-synchronize_net-rather-than-synchroniz.patch +++ /dev/null @@ -1,66 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 4 Jun 2021 17:17:33 +0200 -Subject: [PATCH] wireguard: use synchronize_net rather than synchronize_rcu - -commit 24b70eeeb4f46c09487f8155239ebfb1f875774a upstream. - -Many of the synchronization points are sometimes called under the rtnl -lock, which means we should use synchronize_net rather than -synchronize_rcu. Under the hood, this expands to using the expedited -flavor of function in the event that rtnl is held, in order to not stall -other concurrent changes. - -This fixes some very, very long delays when removing multiple peers at -once, which would cause some operations to take several minutes. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Cc: stable@vger.kernel.org -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/peer.c | 6 +++--- - drivers/net/wireguard/socket.c | 2 +- - 2 files changed, 4 insertions(+), 4 deletions(-) - ---- a/drivers/net/wireguard/peer.c -+++ b/drivers/net/wireguard/peer.c -@@ -88,7 +88,7 @@ static void peer_make_dead(struct wg_pee - /* Mark as dead, so that we don't allow jumping contexts after. */ - WRITE_ONCE(peer->is_dead, true); - -- /* The caller must now synchronize_rcu() for this to take effect. */ -+ /* The caller must now synchronize_net() for this to take effect. */ - } - - static void peer_remove_after_dead(struct wg_peer *peer) -@@ -160,7 +160,7 @@ void wg_peer_remove(struct wg_peer *peer - lockdep_assert_held(&peer->device->device_update_lock); - - peer_make_dead(peer); -- synchronize_rcu(); -+ synchronize_net(); - peer_remove_after_dead(peer); - } - -@@ -178,7 +178,7 @@ void wg_peer_remove_all(struct wg_device - peer_make_dead(peer); - list_add_tail(&peer->peer_list, &dead_peers); - } -- synchronize_rcu(); -+ synchronize_net(); - list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) - peer_remove_after_dead(peer); - } ---- a/drivers/net/wireguard/socket.c -+++ b/drivers/net/wireguard/socket.c -@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device * - if (new4) - wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); - mutex_unlock(&wg->socket_update_lock); -- synchronize_rcu(); -+ synchronize_net(); - sock_free(old4); - sock_free(old6); - } diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0131-wireguard-peer-allocate-in-kmem_cache.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0131-wireguard-peer-allocate-in-kmem_cache.patch deleted file mode 100644 index 32ae32703..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0131-wireguard-peer-allocate-in-kmem_cache.patch +++ /dev/null @@ -1,125 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 4 Jun 2021 17:17:34 +0200 -Subject: [PATCH] wireguard: peer: allocate in kmem_cache - -commit a4e9f8e3287c9eb6bf70df982870980dd3341863 upstream. - -With deployments having upwards of 600k peers now, this somewhat heavy -structure could benefit from more fine-grained allocations. -Specifically, instead of using a 2048-byte slab for a 1544-byte object, -we can now use 1544-byte objects directly, thus saving almost 25% -per-peer, or with 600k peers, that's a savings of 303 MiB. This also -makes wireguard's memory usage more transparent in tools like slabtop -and /proc/slabinfo. - -Fixes: 8b5553ace83c ("wireguard: queueing: get rid of per-peer ring buffers") -Suggested-by: Arnd Bergmann -Suggested-by: Matthew Wilcox -Cc: stable@vger.kernel.org -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/main.c | 7 +++++++ - drivers/net/wireguard/peer.c | 21 +++++++++++++++++---- - drivers/net/wireguard/peer.h | 3 +++ - 3 files changed, 27 insertions(+), 4 deletions(-) - ---- a/drivers/net/wireguard/main.c -+++ b/drivers/net/wireguard/main.c -@@ -28,6 +28,10 @@ static int __init mod_init(void) - #endif - wg_noise_init(); - -+ ret = wg_peer_init(); -+ if (ret < 0) -+ goto err_peer; -+ - ret = wg_device_init(); - if (ret < 0) - goto err_device; -@@ -44,6 +48,8 @@ static int __init mod_init(void) - err_netlink: - wg_device_uninit(); - err_device: -+ wg_peer_uninit(); -+err_peer: - return ret; - } - -@@ -51,6 +57,7 @@ static void __exit mod_exit(void) - { - wg_genetlink_uninit(); - wg_device_uninit(); -+ wg_peer_uninit(); - } - - module_init(mod_init); ---- a/drivers/net/wireguard/peer.c -+++ b/drivers/net/wireguard/peer.c -@@ -15,6 +15,7 @@ - #include - #include - -+static struct kmem_cache *peer_cache; - static atomic64_t peer_counter = ATOMIC64_INIT(0); - - struct wg_peer *wg_peer_create(struct wg_device *wg, -@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg - if (wg->num_peers >= MAX_PEERS_PER_DEVICE) - return ERR_PTR(ret); - -- peer = kzalloc(sizeof(*peer), GFP_KERNEL); -+ peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL); - if (unlikely(!peer)) - return ERR_PTR(ret); -- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) -+ if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))) - goto err; - - peer->device = wg; -@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg - return peer; - - err: -- kfree(peer); -+ kmem_cache_free(peer_cache, peer); - return ERR_PTR(ret); - } - -@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head - /* The final zeroing takes care of clearing any remaining handshake key - * material and other potentially sensitive information. - */ -- kzfree(peer); -+ memzero_explicit(peer, sizeof(*peer)); -+ kmem_cache_free(peer_cache, peer); - } - - static void kref_release(struct kref *refcount) -@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer) - return; - kref_put(&peer->refcount, kref_release); - } -+ -+int __init wg_peer_init(void) -+{ -+ peer_cache = KMEM_CACHE(wg_peer, 0); -+ return peer_cache ? 0 : -ENOMEM; -+} -+ -+void wg_peer_uninit(void) -+{ -+ kmem_cache_destroy(peer_cache); -+} ---- a/drivers/net/wireguard/peer.h -+++ b/drivers/net/wireguard/peer.h -@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer); - void wg_peer_remove(struct wg_peer *peer); - void wg_peer_remove_all(struct wg_device *wg); - -+int wg_peer_init(void); -+void wg_peer_uninit(void); -+ - #endif /* _WG_PEER_H */ diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0132-wireguard-allowedips-initialize-list-head-in-selftes.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0132-wireguard-allowedips-initialize-list-head-in-selftes.patch deleted file mode 100644 index ce4e5dcf5..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0132-wireguard-allowedips-initialize-list-head-in-selftes.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 4 Jun 2021 17:17:35 +0200 -Subject: [PATCH] wireguard: allowedips: initialize list head in selftest - -commit 46cfe8eee285cde465b420637507884551f5d7ca upstream. - -The randomized trie tests weren't initializing the dummy peer list head, -resulting in a NULL pointer dereference when used. Fix this by -initializing it in the randomized trie test, just like we do for the -static unit test. - -While we're at it, all of the other strings like this have the word -"self-test", so add it to the missing place here. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Cc: stable@vger.kernel.org -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/selftest/allowedips.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/drivers/net/wireguard/selftest/allowedips.c -+++ b/drivers/net/wireguard/selftest/allowedips.c -@@ -296,6 +296,7 @@ static __init bool randomized_test(void) - goto free; - } - kref_init(&peers[i]->refcount); -+ INIT_LIST_HEAD(&peers[i]->allowedips_list); - } - - mutex_lock(&mutex); -@@ -333,7 +334,7 @@ static __init bool randomized_test(void) - if (wg_allowedips_insert_v4(&t, - (struct in_addr *)mutated, - cidr, peer, &mutex) < 0) { -- pr_err("allowedips random malloc: FAIL\n"); -+ pr_err("allowedips random self-test malloc: FAIL\n"); - goto free_locked; - } - if (horrible_allowedips_insert_v4(&h, diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0133-wireguard-allowedips-remove-nodes-in-O-1.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0133-wireguard-allowedips-remove-nodes-in-O-1.patch deleted file mode 100644 index 78da24ea4..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0133-wireguard-allowedips-remove-nodes-in-O-1.patch +++ /dev/null @@ -1,237 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 4 Jun 2021 17:17:36 +0200 -Subject: [PATCH] wireguard: allowedips: remove nodes in O(1) - -commit f634f418c227c912e7ea95a3299efdc9b10e4022 upstream. - -Previously, deleting peers would require traversing the entire trie in -order to rebalance nodes and safely free them. This meant that removing -1000 peers from a trie with a half million nodes would take an extremely -long time, during which we're holding the rtnl lock. Large-scale users -were reporting 200ms latencies added to the networking stack as a whole -every time their userspace software would queue up significant removals. -That's a serious situation. - -This commit fixes that by maintaining a double pointer to the parent's -bit pointer for each node, and then using the already existing node list -belonging to each peer to go directly to the node, fix up its pointers, -and free it with RCU. This means removal is O(1) instead of O(n), and we -don't use gobs of stack. - -The removal algorithm has the same downside as the code that it fixes: -it won't collapse needlessly long runs of fillers. We can enhance that -in the future if it ever becomes a problem. This commit documents that -limitation with a TODO comment in code, a small but meaningful -improvement over the prior situation. - -Currently the biggest flaw, which the next commit addresses, is that -because this increases the node size on 64-bit machines from 60 bytes to -68 bytes. 60 rounds up to 64, but 68 rounds up to 128. So we wind up -using twice as much memory per node, because of power-of-two -allocations, which is a big bummer. We'll need to figure something out -there. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Cc: stable@vger.kernel.org -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/allowedips.c | 132 ++++++++++++----------------- - drivers/net/wireguard/allowedips.h | 9 +- - 2 files changed, 57 insertions(+), 84 deletions(-) - ---- a/drivers/net/wireguard/allowedips.c -+++ b/drivers/net/wireguard/allowedips.c -@@ -66,60 +66,6 @@ static void root_remove_peer_lists(struc - } - } - --static void walk_remove_by_peer(struct allowedips_node __rcu **top, -- struct wg_peer *peer, struct mutex *lock) --{ --#define REF(p) rcu_access_pointer(p) --#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock)) --#define PUSH(p) ({ \ -- WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \ -- stack[len++] = p; \ -- }) -- -- struct allowedips_node __rcu **stack[128], **nptr; -- struct allowedips_node *node, *prev; -- unsigned int len; -- -- if (unlikely(!peer || !REF(*top))) -- return; -- -- for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) { -- nptr = stack[len - 1]; -- node = DEREF(nptr); -- if (!node) { -- --len; -- continue; -- } -- if (!prev || REF(prev->bit[0]) == node || -- REF(prev->bit[1]) == node) { -- if (REF(node->bit[0])) -- PUSH(&node->bit[0]); -- else if (REF(node->bit[1])) -- PUSH(&node->bit[1]); -- } else if (REF(node->bit[0]) == prev) { -- if (REF(node->bit[1])) -- PUSH(&node->bit[1]); -- } else { -- if (rcu_dereference_protected(node->peer, -- lockdep_is_held(lock)) == peer) { -- RCU_INIT_POINTER(node->peer, NULL); -- list_del_init(&node->peer_list); -- if (!node->bit[0] || !node->bit[1]) { -- rcu_assign_pointer(*nptr, DEREF( -- &node->bit[!REF(node->bit[0])])); -- kfree_rcu(node, rcu); -- node = DEREF(nptr); -- } -- } -- --len; -- } -- } -- --#undef REF --#undef DEREF --#undef PUSH --} -- - static unsigned int fls128(u64 a, u64 b) - { - return a ? fls64(a) + 64U : fls64(b); -@@ -224,6 +170,7 @@ static int add(struct allowedips_node __ - RCU_INIT_POINTER(node->peer, peer); - list_add_tail(&node->peer_list, &peer->allowedips_list); - copy_and_assign_cidr(node, key, cidr, bits); -+ rcu_assign_pointer(node->parent_bit, trie); - rcu_assign_pointer(*trie, node); - return 0; - } -@@ -243,9 +190,9 @@ static int add(struct allowedips_node __ - if (!node) { - down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); - } else { -- down = rcu_dereference_protected(CHOOSE_NODE(node, key), -- lockdep_is_held(lock)); -+ down = rcu_dereference_protected(CHOOSE_NODE(node, key), lockdep_is_held(lock)); - if (!down) { -+ rcu_assign_pointer(newnode->parent_bit, &CHOOSE_NODE(node, key)); - rcu_assign_pointer(CHOOSE_NODE(node, key), newnode); - return 0; - } -@@ -254,29 +201,37 @@ static int add(struct allowedips_node __ - parent = node; - - if (newnode->cidr == cidr) { -+ rcu_assign_pointer(down->parent_bit, &CHOOSE_NODE(newnode, down->bits)); - rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down); -- if (!parent) -+ if (!parent) { -+ rcu_assign_pointer(newnode->parent_bit, trie); - rcu_assign_pointer(*trie, newnode); -- else -- rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), -- newnode); -- } else { -- node = kzalloc(sizeof(*node), GFP_KERNEL); -- if (unlikely(!node)) { -- list_del(&newnode->peer_list); -- kfree(newnode); -- return -ENOMEM; -+ } else { -+ rcu_assign_pointer(newnode->parent_bit, &CHOOSE_NODE(parent, newnode->bits)); -+ rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), newnode); - } -- INIT_LIST_HEAD(&node->peer_list); -- copy_and_assign_cidr(node, newnode->bits, cidr, bits); -+ return 0; -+ } -+ -+ node = kzalloc(sizeof(*node), GFP_KERNEL); -+ if (unlikely(!node)) { -+ list_del(&newnode->peer_list); -+ kfree(newnode); -+ return -ENOMEM; -+ } -+ INIT_LIST_HEAD(&node->peer_list); -+ copy_and_assign_cidr(node, newnode->bits, cidr, bits); - -- rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); -- rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); -- if (!parent) -- rcu_assign_pointer(*trie, node); -- else -- rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), -- node); -+ rcu_assign_pointer(down->parent_bit, &CHOOSE_NODE(node, down->bits)); -+ rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); -+ rcu_assign_pointer(newnode->parent_bit, &CHOOSE_NODE(node, newnode->bits)); -+ rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); -+ if (!parent) { -+ rcu_assign_pointer(node->parent_bit, trie); -+ rcu_assign_pointer(*trie, node); -+ } else { -+ rcu_assign_pointer(node->parent_bit, &CHOOSE_NODE(parent, node->bits)); -+ rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), node); - } - return 0; - } -@@ -335,9 +290,30 @@ int wg_allowedips_insert_v6(struct allow - void wg_allowedips_remove_by_peer(struct allowedips *table, - struct wg_peer *peer, struct mutex *lock) - { -+ struct allowedips_node *node, *child, *tmp; -+ -+ if (list_empty(&peer->allowedips_list)) -+ return; - ++table->seq; -- walk_remove_by_peer(&table->root4, peer, lock); -- walk_remove_by_peer(&table->root6, peer, lock); -+ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) { -+ list_del_init(&node->peer_list); -+ RCU_INIT_POINTER(node->peer, NULL); -+ if (node->bit[0] && node->bit[1]) -+ continue; -+ child = rcu_dereference_protected( -+ node->bit[!rcu_access_pointer(node->bit[0])], -+ lockdep_is_held(lock)); -+ if (child) -+ child->parent_bit = node->parent_bit; -+ *rcu_dereference_protected(node->parent_bit, lockdep_is_held(lock)) = child; -+ kfree_rcu(node, rcu); -+ -+ /* TODO: Note that we currently don't walk up and down in order to -+ * free any potential filler nodes. This means that this function -+ * doesn't free up as much as it could, which could be revisited -+ * at some point. -+ */ -+ } - } - - int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) ---- a/drivers/net/wireguard/allowedips.h -+++ b/drivers/net/wireguard/allowedips.h -@@ -15,14 +15,11 @@ struct wg_peer; - struct allowedips_node { - struct wg_peer __rcu *peer; - struct allowedips_node __rcu *bit[2]; -- /* While it may seem scandalous that we waste space for v4, -- * we're alloc'ing to the nearest power of 2 anyway, so this -- * doesn't actually make a difference. -- */ -- u8 bits[16] __aligned(__alignof(u64)); - u8 cidr, bit_at_a, bit_at_b, bitlen; -+ u8 bits[16] __aligned(__alignof(u64)); - -- /* Keep rarely used list at bottom to be beyond cache line. */ -+ /* Keep rarely used members at bottom to be beyond cache line. */ -+ struct allowedips_node *__rcu *parent_bit; /* XXX: this puts us at 68->128 bytes instead of 60->64 bytes!! */ - union { - struct list_head peer_list; - struct rcu_head rcu; diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0134-wireguard-allowedips-allocate-nodes-in-kmem_cache.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0134-wireguard-allowedips-allocate-nodes-in-kmem_cache.patch deleted file mode 100644 index 65b31b05f..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0134-wireguard-allowedips-allocate-nodes-in-kmem_cache.patch +++ /dev/null @@ -1,173 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 4 Jun 2021 17:17:37 +0200 -Subject: [PATCH] wireguard: allowedips: allocate nodes in kmem_cache - -commit dc680de28ca849dfe589dc15ac56d22505f0ef11 upstream. - -The previous commit moved from O(n) to O(1) for removal, but in the -process introduced an additional pointer member to a struct that -increased the size from 60 to 68 bytes, putting nodes in the 128-byte -slab. With deployed systems having as many as 2 million nodes, this -represents a significant doubling in memory usage (128 MiB -> 256 MiB). -Fix this by using our own kmem_cache, that's sized exactly right. This -also makes wireguard's memory usage more transparent in tools like -slabtop and /proc/slabinfo. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Suggested-by: Arnd Bergmann -Suggested-by: Matthew Wilcox -Cc: stable@vger.kernel.org -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/allowedips.c | 31 ++++++++++++++++++++++++------ - drivers/net/wireguard/allowedips.h | 5 ++++- - drivers/net/wireguard/main.c | 10 +++++++++- - 3 files changed, 38 insertions(+), 8 deletions(-) - ---- a/drivers/net/wireguard/allowedips.c -+++ b/drivers/net/wireguard/allowedips.c -@@ -6,6 +6,8 @@ - #include "allowedips.h" - #include "peer.h" - -+static struct kmem_cache *node_cache; -+ - static void swap_endian(u8 *dst, const u8 *src, u8 bits) - { - if (bits == 32) { -@@ -40,6 +42,11 @@ static void push_rcu(struct allowedips_n - } - } - -+static void node_free_rcu(struct rcu_head *rcu) -+{ -+ kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu)); -+} -+ - static void root_free_rcu(struct rcu_head *rcu) - { - struct allowedips_node *node, *stack[128] = { -@@ -49,7 +56,7 @@ static void root_free_rcu(struct rcu_hea - while (len > 0 && (node = stack[--len])) { - push_rcu(stack, node->bit[0], &len); - push_rcu(stack, node->bit[1], &len); -- kfree(node); -+ kmem_cache_free(node_cache, node); - } - } - -@@ -164,7 +171,7 @@ static int add(struct allowedips_node __ - return -EINVAL; - - if (!rcu_access_pointer(*trie)) { -- node = kzalloc(sizeof(*node), GFP_KERNEL); -+ node = kmem_cache_zalloc(node_cache, GFP_KERNEL); - if (unlikely(!node)) - return -ENOMEM; - RCU_INIT_POINTER(node->peer, peer); -@@ -180,7 +187,7 @@ static int add(struct allowedips_node __ - return 0; - } - -- newnode = kzalloc(sizeof(*newnode), GFP_KERNEL); -+ newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL); - if (unlikely(!newnode)) - return -ENOMEM; - RCU_INIT_POINTER(newnode->peer, peer); -@@ -213,10 +220,10 @@ static int add(struct allowedips_node __ - return 0; - } - -- node = kzalloc(sizeof(*node), GFP_KERNEL); -+ node = kmem_cache_zalloc(node_cache, GFP_KERNEL); - if (unlikely(!node)) { - list_del(&newnode->peer_list); -- kfree(newnode); -+ kmem_cache_free(node_cache, newnode); - return -ENOMEM; - } - INIT_LIST_HEAD(&node->peer_list); -@@ -306,7 +313,7 @@ void wg_allowedips_remove_by_peer(struct - if (child) - child->parent_bit = node->parent_bit; - *rcu_dereference_protected(node->parent_bit, lockdep_is_held(lock)) = child; -- kfree_rcu(node, rcu); -+ call_rcu(&node->rcu, node_free_rcu); - - /* TODO: Note that we currently don't walk up and down in order to - * free any potential filler nodes. This means that this function -@@ -350,4 +357,16 @@ struct wg_peer *wg_allowedips_lookup_src - return NULL; - } - -+int __init wg_allowedips_slab_init(void) -+{ -+ node_cache = KMEM_CACHE(allowedips_node, 0); -+ return node_cache ? 0 : -ENOMEM; -+} -+ -+void wg_allowedips_slab_uninit(void) -+{ -+ rcu_barrier(); -+ kmem_cache_destroy(node_cache); -+} -+ - #include "selftest/allowedips.c" ---- a/drivers/net/wireguard/allowedips.h -+++ b/drivers/net/wireguard/allowedips.h -@@ -19,7 +19,7 @@ struct allowedips_node { - u8 bits[16] __aligned(__alignof(u64)); - - /* Keep rarely used members at bottom to be beyond cache line. */ -- struct allowedips_node *__rcu *parent_bit; /* XXX: this puts us at 68->128 bytes instead of 60->64 bytes!! */ -+ struct allowedips_node *__rcu *parent_bit; - union { - struct list_head peer_list; - struct rcu_head rcu; -@@ -53,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src - bool wg_allowedips_selftest(void); - #endif - -+int wg_allowedips_slab_init(void); -+void wg_allowedips_slab_uninit(void); -+ - #endif /* _WG_ALLOWEDIPS_H */ ---- a/drivers/net/wireguard/main.c -+++ b/drivers/net/wireguard/main.c -@@ -21,10 +21,15 @@ static int __init mod_init(void) - { - int ret; - -+ ret = wg_allowedips_slab_init(); -+ if (ret < 0) -+ goto err_allowedips; -+ - #ifdef DEBUG -+ ret = -ENOTRECOVERABLE; - if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() || - !wg_ratelimiter_selftest()) -- return -ENOTRECOVERABLE; -+ goto err_peer; - #endif - wg_noise_init(); - -@@ -50,6 +55,8 @@ err_netlink: - err_device: - wg_peer_uninit(); - err_peer: -+ wg_allowedips_slab_uninit(); -+err_allowedips: - return ret; - } - -@@ -58,6 +65,7 @@ static void __exit mod_exit(void) - wg_genetlink_uninit(); - wg_device_uninit(); - wg_peer_uninit(); -+ wg_allowedips_slab_uninit(); - } - - module_init(mod_init); diff --git a/feeds/ipq807x/ipq807x/patches/080-wireguard-0135-wireguard-allowedips-free-empty-intermediate-nodes-w.patch b/feeds/ipq807x/ipq807x/patches/080-wireguard-0135-wireguard-allowedips-free-empty-intermediate-nodes-w.patch deleted file mode 100644 index c044ad25a..000000000 --- a/feeds/ipq807x/ipq807x/patches/080-wireguard-0135-wireguard-allowedips-free-empty-intermediate-nodes-w.patch +++ /dev/null @@ -1,521 +0,0 @@ -From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 -From: "Jason A. Donenfeld" -Date: Fri, 4 Jun 2021 17:17:38 +0200 -Subject: [PATCH] wireguard: allowedips: free empty intermediate nodes when - removing single node - -commit bf7b042dc62a31f66d3a41dd4dfc7806f267b307 upstream. - -When removing single nodes, it's possible that that node's parent is an -empty intermediate node, in which case, it too should be removed. -Otherwise the trie fills up and never is fully emptied, leading to -gradual memory leaks over time for tries that are modified often. There -was originally code to do this, but was removed during refactoring in -2016 and never reworked. Now that we have proper parent pointers from -the previous commits, we can implement this properly. - -In order to reduce branching and expensive comparisons, we want to keep -the double pointer for parent assignment (which lets us easily chain up -to the root), but we still need to actually get the parent's base -address. So encode the bit number into the last two bits of the pointer, -and pack and unpack it as needed. This is a little bit clumsy but is the -fastest and less memory wasteful of the compromises. Note that we align -the root struct here to a minimum of 4, because it's embedded into a -larger struct, and we're relying on having the bottom two bits for our -flag, which would only be 16-bit aligned on m68k. - -The existing macro-based helpers were a bit unwieldy for adding the bit -packing to, so this commit replaces them with safer and clearer ordinary -functions. - -We add a test to the randomized/fuzzer part of the selftests, to free -the randomized tries by-peer, refuzz it, and repeat, until it's supposed -to be empty, and then then see if that actually resulted in the whole -thing being emptied. That combined with kmemcheck should hopefully make -sure this commit is doing what it should. Along the way this resulted in -various other cleanups of the tests and fixes for recent graphviz. - -Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") -Cc: stable@vger.kernel.org -Signed-off-by: Jason A. Donenfeld -Signed-off-by: David S. Miller -Signed-off-by: Jason A. Donenfeld ---- - drivers/net/wireguard/allowedips.c | 102 ++++++------ - drivers/net/wireguard/allowedips.h | 4 +- - drivers/net/wireguard/selftest/allowedips.c | 162 ++++++++++---------- - 3 files changed, 137 insertions(+), 131 deletions(-) - ---- a/drivers/net/wireguard/allowedips.c -+++ b/drivers/net/wireguard/allowedips.c -@@ -30,8 +30,11 @@ static void copy_and_assign_cidr(struct - node->bitlen = bits; - memcpy(node->bits, src, bits / 8U); - } --#define CHOOSE_NODE(parent, key) \ -- parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] -+ -+static inline u8 choose(struct allowedips_node *node, const u8 *key) -+{ -+ return (key[node->bit_at_a] >> node->bit_at_b) & 1; -+} - - static void push_rcu(struct allowedips_node **stack, - struct allowedips_node __rcu *p, unsigned int *len) -@@ -112,7 +115,7 @@ static struct allowedips_node *find_node - found = node; - if (node->cidr == bits) - break; -- node = rcu_dereference_bh(CHOOSE_NODE(node, key)); -+ node = rcu_dereference_bh(node->bit[choose(node, key)]); - } - return found; - } -@@ -144,8 +147,7 @@ static bool node_placement(struct allowe - u8 cidr, u8 bits, struct allowedips_node **rnode, - struct mutex *lock) - { -- struct allowedips_node *node = rcu_dereference_protected(trie, -- lockdep_is_held(lock)); -+ struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock)); - struct allowedips_node *parent = NULL; - bool exact = false; - -@@ -155,13 +157,24 @@ static bool node_placement(struct allowe - exact = true; - break; - } -- node = rcu_dereference_protected(CHOOSE_NODE(parent, key), -- lockdep_is_held(lock)); -+ node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock)); - } - *rnode = parent; - return exact; - } - -+static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node) -+{ -+ node->parent_bit_packed = (unsigned long)parent | bit; -+ rcu_assign_pointer(*parent, node); -+} -+ -+static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node) -+{ -+ u8 bit = choose(parent, node->bits); -+ connect_node(&parent->bit[bit], bit, node); -+} -+ - static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, - u8 cidr, struct wg_peer *peer, struct mutex *lock) - { -@@ -177,8 +190,7 @@ static int add(struct allowedips_node __ - RCU_INIT_POINTER(node->peer, peer); - list_add_tail(&node->peer_list, &peer->allowedips_list); - copy_and_assign_cidr(node, key, cidr, bits); -- rcu_assign_pointer(node->parent_bit, trie); -- rcu_assign_pointer(*trie, node); -+ connect_node(trie, 2, node); - return 0; - } - if (node_placement(*trie, key, cidr, bits, &node, lock)) { -@@ -197,10 +209,10 @@ static int add(struct allowedips_node __ - if (!node) { - down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); - } else { -- down = rcu_dereference_protected(CHOOSE_NODE(node, key), lockdep_is_held(lock)); -+ const u8 bit = choose(node, key); -+ down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock)); - if (!down) { -- rcu_assign_pointer(newnode->parent_bit, &CHOOSE_NODE(node, key)); -- rcu_assign_pointer(CHOOSE_NODE(node, key), newnode); -+ connect_node(&node->bit[bit], bit, newnode); - return 0; - } - } -@@ -208,15 +220,11 @@ static int add(struct allowedips_node __ - parent = node; - - if (newnode->cidr == cidr) { -- rcu_assign_pointer(down->parent_bit, &CHOOSE_NODE(newnode, down->bits)); -- rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down); -- if (!parent) { -- rcu_assign_pointer(newnode->parent_bit, trie); -- rcu_assign_pointer(*trie, newnode); -- } else { -- rcu_assign_pointer(newnode->parent_bit, &CHOOSE_NODE(parent, newnode->bits)); -- rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), newnode); -- } -+ choose_and_connect_node(newnode, down); -+ if (!parent) -+ connect_node(trie, 2, newnode); -+ else -+ choose_and_connect_node(parent, newnode); - return 0; - } - -@@ -229,17 +237,12 @@ static int add(struct allowedips_node __ - INIT_LIST_HEAD(&node->peer_list); - copy_and_assign_cidr(node, newnode->bits, cidr, bits); - -- rcu_assign_pointer(down->parent_bit, &CHOOSE_NODE(node, down->bits)); -- rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); -- rcu_assign_pointer(newnode->parent_bit, &CHOOSE_NODE(node, newnode->bits)); -- rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); -- if (!parent) { -- rcu_assign_pointer(node->parent_bit, trie); -- rcu_assign_pointer(*trie, node); -- } else { -- rcu_assign_pointer(node->parent_bit, &CHOOSE_NODE(parent, node->bits)); -- rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), node); -- } -+ choose_and_connect_node(node, down); -+ choose_and_connect_node(node, newnode); -+ if (!parent) -+ connect_node(trie, 2, node); -+ else -+ choose_and_connect_node(parent, node); - return 0; - } - -@@ -297,7 +300,8 @@ int wg_allowedips_insert_v6(struct allow - void wg_allowedips_remove_by_peer(struct allowedips *table, - struct wg_peer *peer, struct mutex *lock) - { -- struct allowedips_node *node, *child, *tmp; -+ struct allowedips_node *node, *child, **parent_bit, *parent, *tmp; -+ bool free_parent; - - if (list_empty(&peer->allowedips_list)) - return; -@@ -307,19 +311,29 @@ void wg_allowedips_remove_by_peer(struct - RCU_INIT_POINTER(node->peer, NULL); - if (node->bit[0] && node->bit[1]) - continue; -- child = rcu_dereference_protected( -- node->bit[!rcu_access_pointer(node->bit[0])], -- lockdep_is_held(lock)); -+ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])], -+ lockdep_is_held(lock)); - if (child) -- child->parent_bit = node->parent_bit; -- *rcu_dereference_protected(node->parent_bit, lockdep_is_held(lock)) = child; -+ child->parent_bit_packed = node->parent_bit_packed; -+ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL); -+ *parent_bit = child; -+ parent = (void *)parent_bit - -+ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]); -+ free_parent = !rcu_access_pointer(node->bit[0]) && -+ !rcu_access_pointer(node->bit[1]) && -+ (node->parent_bit_packed & 3) <= 1 && -+ !rcu_access_pointer(parent->peer); -+ if (free_parent) -+ child = rcu_dereference_protected( -+ parent->bit[!(node->parent_bit_packed & 1)], -+ lockdep_is_held(lock)); - call_rcu(&node->rcu, node_free_rcu); -- -- /* TODO: Note that we currently don't walk up and down in order to -- * free any potential filler nodes. This means that this function -- * doesn't free up as much as it could, which could be revisited -- * at some point. -- */ -+ if (!free_parent) -+ continue; -+ if (child) -+ child->parent_bit_packed = parent->parent_bit_packed; -+ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child; -+ call_rcu(&parent->rcu, node_free_rcu); - } - } - ---- a/drivers/net/wireguard/allowedips.h -+++ b/drivers/net/wireguard/allowedips.h -@@ -19,7 +19,7 @@ struct allowedips_node { - u8 bits[16] __aligned(__alignof(u64)); - - /* Keep rarely used members at bottom to be beyond cache line. */ -- struct allowedips_node *__rcu *parent_bit; -+ unsigned long parent_bit_packed; - union { - struct list_head peer_list; - struct rcu_head rcu; -@@ -30,7 +30,7 @@ struct allowedips { - struct allowedips_node __rcu *root4; - struct allowedips_node __rcu *root6; - u64 seq; --}; -+} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */ - - void wg_allowedips_init(struct allowedips *table); - void wg_allowedips_free(struct allowedips *table, struct mutex *mutex); ---- a/drivers/net/wireguard/selftest/allowedips.c -+++ b/drivers/net/wireguard/selftest/allowedips.c -@@ -19,32 +19,22 @@ - - #include - --static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits, -- u8 cidr) --{ -- swap_endian(dst, src, bits); -- memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8); -- if (cidr) -- dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8); --} -- - static __init void print_node(struct allowedips_node *node, u8 bits) - { - char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; -- char *fmt_declaration = KERN_DEBUG -- "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; -+ char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; -+ u8 ip1[16], ip2[16], cidr1, cidr2; - char *style = "dotted"; -- u8 ip1[16], ip2[16]; - u32 color = 0; - -+ if (node == NULL) -+ return; - if (bits == 32) { - fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; -- fmt_declaration = KERN_DEBUG -- "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; -+ fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; - } else if (bits == 128) { - fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; -- fmt_declaration = KERN_DEBUG -- "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; -+ fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; - } - if (node->peer) { - hsiphash_key_t key = { { 0 } }; -@@ -55,24 +45,20 @@ static __init void print_node(struct all - hsiphash_1u32(0xabad1dea, &key) % 200; - style = "bold"; - } -- swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr); -- printk(fmt_declaration, ip1, node->cidr, style, color); -+ wg_allowedips_read_node(node, ip1, &cidr1); -+ printk(fmt_declaration, ip1, cidr1, style, color); - if (node->bit[0]) { -- swap_endian_and_apply_cidr(ip2, -- rcu_dereference_raw(node->bit[0])->bits, bits, -- node->cidr); -- printk(fmt_connection, ip1, node->cidr, ip2, -- rcu_dereference_raw(node->bit[0])->cidr); -- print_node(rcu_dereference_raw(node->bit[0]), bits); -+ wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2); -+ printk(fmt_connection, ip1, cidr1, ip2, cidr2); - } - if (node->bit[1]) { -- swap_endian_and_apply_cidr(ip2, -- rcu_dereference_raw(node->bit[1])->bits, -- bits, node->cidr); -- printk(fmt_connection, ip1, node->cidr, ip2, -- rcu_dereference_raw(node->bit[1])->cidr); -- print_node(rcu_dereference_raw(node->bit[1]), bits); -+ wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2); -+ printk(fmt_connection, ip1, cidr1, ip2, cidr2); - } -+ if (node->bit[0]) -+ print_node(rcu_dereference_raw(node->bit[0]), bits); -+ if (node->bit[1]) -+ print_node(rcu_dereference_raw(node->bit[1]), bits); - } - - static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) -@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr - { - union nf_inet_addr mask; - -- memset(&mask, 0x00, 128 / 8); -- memset(&mask, 0xff, cidr / 8); -+ memset(&mask, 0, sizeof(mask)); -+ memset(&mask.all, 0xff, cidr / 8); - if (cidr % 32) - mask.all[cidr / 32] = (__force u32)htonl( - (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); -@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allow - } - - static __init inline bool --horrible_match_v4(const struct horrible_allowedips_node *node, -- struct in_addr *ip) -+horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip) - { - return (ip->s_addr & node->mask.ip) == node->ip.ip; - } - - static __init inline bool --horrible_match_v6(const struct horrible_allowedips_node *node, -- struct in6_addr *ip) -+horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip) - { -- return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == -- node->ip.ip6[0] && -- (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == -- node->ip.ip6[1] && -- (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == -- node->ip.ip6[2] && -+ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] && -+ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] && -+ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] && - (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; - } - - static __init void --horrible_insert_ordered(struct horrible_allowedips *table, -- struct horrible_allowedips_node *node) -+horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node) - { - struct horrible_allowedips_node *other = NULL, *where = NULL; - u8 my_cidr = horrible_mask_to_cidr(node->mask); - - hlist_for_each_entry(other, &table->head, table) { -- if (!memcmp(&other->mask, &node->mask, -- sizeof(union nf_inet_addr)) && -- !memcmp(&other->ip, &node->ip, -- sizeof(union nf_inet_addr)) && -- other->ip_version == node->ip_version) { -+ if (other->ip_version == node->ip_version && -+ !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) && -+ !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) { - other->value = node->value; - kfree(node); - return; - } -+ } -+ hlist_for_each_entry(other, &table->head, table) { - where = other; - if (horrible_mask_to_cidr(other->mask) <= my_cidr) - break; -@@ -201,8 +181,7 @@ static __init int - horrible_allowedips_insert_v4(struct horrible_allowedips *table, - struct in_addr *ip, u8 cidr, void *value) - { -- struct horrible_allowedips_node *node = kzalloc(sizeof(*node), -- GFP_KERNEL); -+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); - - if (unlikely(!node)) - return -ENOMEM; -@@ -219,8 +198,7 @@ static __init int - horrible_allowedips_insert_v6(struct horrible_allowedips *table, - struct in6_addr *ip, u8 cidr, void *value) - { -- struct horrible_allowedips_node *node = kzalloc(sizeof(*node), -- GFP_KERNEL); -+ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); - - if (unlikely(!node)) - return -ENOMEM; -@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct hor - } - - static __init void * --horrible_allowedips_lookup_v4(struct horrible_allowedips *table, -- struct in_addr *ip) -+horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip) - { - struct horrible_allowedips_node *node; -- void *ret = NULL; - - hlist_for_each_entry(node, &table->head, table) { -- if (node->ip_version != 4) -- continue; -- if (horrible_match_v4(node, ip)) { -- ret = node->value; -- break; -- } -+ if (node->ip_version == 4 && horrible_match_v4(node, ip)) -+ return node->value; - } -- return ret; -+ return NULL; - } - - static __init void * --horrible_allowedips_lookup_v6(struct horrible_allowedips *table, -- struct in6_addr *ip) -+horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip) - { - struct horrible_allowedips_node *node; -- void *ret = NULL; - - hlist_for_each_entry(node, &table->head, table) { -- if (node->ip_version != 6) -+ if (node->ip_version == 6 && horrible_match_v6(node, ip)) -+ return node->value; -+ } -+ return NULL; -+} -+ -+ -+static __init void -+horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value) -+{ -+ struct horrible_allowedips_node *node; -+ struct hlist_node *h; -+ -+ hlist_for_each_entry_safe(node, h, &table->head, table) { -+ if (node->value != value) - continue; -- if (horrible_match_v6(node, ip)) { -- ret = node->value; -- break; -- } -+ hlist_del(&node->table); -+ kfree(node); - } -- return ret; -+ - } - - static __init bool randomized_test(void) -@@ -397,23 +379,33 @@ static __init bool randomized_test(void) - print_tree(t.root6, 128); - } - -- for (i = 0; i < NUM_QUERIES; ++i) { -- prandom_bytes(ip, 4); -- if (lookup(t.root4, 32, ip) != -- horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { -- pr_err("allowedips random self-test: FAIL\n"); -- goto free; -+ for (j = 0;; ++j) { -+ for (i = 0; i < NUM_QUERIES; ++i) { -+ prandom_bytes(ip, 4); -+ if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { -+ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip); -+ pr_err("allowedips random v4 self-test: FAIL\n"); -+ goto free; -+ } -+ prandom_bytes(ip, 16); -+ if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { -+ pr_err("allowedips random v6 self-test: FAIL\n"); -+ goto free; -+ } - } -+ if (j >= NUM_PEERS) -+ break; -+ mutex_lock(&mutex); -+ wg_allowedips_remove_by_peer(&t, peers[j], &mutex); -+ mutex_unlock(&mutex); -+ horrible_allowedips_remove_by_value(&h, peers[j]); - } - -- for (i = 0; i < NUM_QUERIES; ++i) { -- prandom_bytes(ip, 16); -- if (lookup(t.root6, 128, ip) != -- horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { -- pr_err("allowedips random self-test: FAIL\n"); -- goto free; -- } -+ if (t.root4 || t.root6) { -+ pr_err("allowedips random self-test removal: FAIL\n"); -+ goto free; - } -+ - ret = true; - - free: diff --git a/feeds/ipq807x/ipq807x/patches/100-dts.patch b/feeds/ipq807x/ipq807x/patches/100-dts.patch deleted file mode 100644 index fe7e2234e..000000000 --- a/feeds/ipq807x/ipq807x/patches/100-dts.patch +++ /dev/null @@ -1,26 +0,0 @@ -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/arch/arm64/boot/dts/qcom/ipq8074.dtsi -=================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/arch/arm64/boot/dts/qcom/ipq8074.dtsi -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/arch/arm64/boot/dts/qcom/ipq8074.dtsi -@@ -1587,7 +1587,7 @@ - }; - - wifi1: wifi1@c0000000 { -- compatible = "qcom,cnss-qca8074v2", "qcom,ipq8074-wifi"; -+ compatible = "qcom,ipq8074-wifi"; - reg = <0xc000000 0x2000000>; - qcom,hw-mode-id = <1>; - #ifdef __IPQ_MEM_PROFILE_256_MB__ -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/arch/arm64/boot/dts/qcom/ipq6018.dtsi -=================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/arch/arm64/boot/dts/qcom/ipq6018.dtsi -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/arch/arm64/boot/dts/qcom/ipq6018.dtsi -@@ -1561,7 +1561,7 @@ - }; - - wifi0: wifi@c000000 { -- compatible = "qcom,cnss-qca6018", "qcom,ipq6018-wifi"; -+ compatible = "qcom,ipq6018-wifi"; - reg = <0xc000000 0x1000000>; - qcom,hw-mode-id = <1>; - #ifdef __IPQ_MEM_PROFILE_256_MB__ diff --git a/feeds/ipq807x/ipq807x/patches/100-qrtr-ns.patch b/feeds/ipq807x/ipq807x/patches/100-qrtr-ns.patch new file mode 100644 index 000000000..850e64477 --- /dev/null +++ b/feeds/ipq807x/ipq807x/patches/100-qrtr-ns.patch @@ -0,0 +1,976 @@ +Index: linux-4.4.60/net/qrtr/ns.c +=================================================================== +--- /dev/null ++++ linux-4.4.60/net/qrtr/ns.c +@@ -0,0 +1,760 @@ ++// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause ++/* ++ * Copyright (c) 2015, Sony Mobile Communications Inc. ++ * Copyright (c) 2013, The Linux Foundation. All rights reserved. ++ * Copyright (c) 2020, Linaro Ltd. ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include "qrtr.h" ++ ++#define CREATE_TRACE_POINTS ++#include ++ ++static RADIX_TREE(nodes, GFP_KERNEL); ++ ++static struct { ++ struct socket *sock; ++ struct sockaddr_qrtr bcast_sq; ++ struct list_head lookups; ++ struct workqueue_struct *workqueue; ++ struct work_struct work; ++ int local_node; ++} qrtr_ns; ++ ++static const char * const qrtr_ctrl_pkt_strings[] = { ++ [QRTR_TYPE_HELLO] = "hello", ++ [QRTR_TYPE_BYE] = "bye", ++ [QRTR_TYPE_NEW_SERVER] = "new-server", ++ [QRTR_TYPE_DEL_SERVER] = "del-server", ++ [QRTR_TYPE_DEL_CLIENT] = "del-client", ++ [QRTR_TYPE_RESUME_TX] = "resume-tx", ++ [QRTR_TYPE_EXIT] = "exit", ++ [QRTR_TYPE_PING] = "ping", ++ [QRTR_TYPE_NEW_LOOKUP] = "new-lookup", ++ [QRTR_TYPE_DEL_LOOKUP] = "del-lookup", ++}; ++ ++struct qrtr_server_filter { ++ unsigned int service; ++ unsigned int instance; ++ unsigned int ifilter; ++}; ++ ++struct qrtr_lookup { ++ unsigned int service; ++ unsigned int instance; ++ ++ struct sockaddr_qrtr sq; ++ struct list_head li; ++}; ++ ++struct qrtr_server { ++ unsigned int service; ++ unsigned int instance; ++ ++ unsigned int node; ++ unsigned int port; ++ ++ struct list_head qli; ++}; ++ ++struct qrtr_node { ++ unsigned int id; ++ struct radix_tree_root servers; ++}; ++ ++static struct qrtr_node *node_get(unsigned int node_id) ++{ ++ struct qrtr_node *node; ++ ++ node = radix_tree_lookup(&nodes, node_id); ++ if (node) ++ return node; ++ ++ /* If node didn't exist, allocate and insert it to the tree */ ++ node = kzalloc(sizeof(*node), GFP_KERNEL); ++ if (!node) ++ return NULL; ++ ++ node->id = node_id; ++ ++ radix_tree_insert(&nodes, node_id, node); ++ ++ return node; ++} ++ ++static int server_match(const struct qrtr_server *srv, ++ const struct qrtr_server_filter *f) ++{ ++ unsigned int ifilter = f->ifilter; ++ ++ if (f->service != 0 && srv->service != f->service) ++ return 0; ++ if (!ifilter && f->instance) ++ ifilter = ~0; ++ ++ return (srv->instance & ifilter) == f->instance; ++} ++ ++static int service_announce_new(struct sockaddr_qrtr *dest, ++ struct qrtr_server *srv) ++{ ++ struct qrtr_ctrl_pkt pkt; ++ struct msghdr msg = { }; ++ struct kvec iv; ++ ++ trace_qrtr_ns_service_announce_new(srv->service, srv->instance, ++ srv->node, srv->port); ++ ++ iv.iov_base = &pkt; ++ iv.iov_len = sizeof(pkt); ++ ++ memset(&pkt, 0, sizeof(pkt)); ++ pkt.cmd = cpu_to_le32(QRTR_TYPE_NEW_SERVER); ++ pkt.server.service = cpu_to_le32(srv->service); ++ pkt.server.instance = cpu_to_le32(srv->instance); ++ pkt.server.node = cpu_to_le32(srv->node); ++ pkt.server.port = cpu_to_le32(srv->port); ++ ++ msg.msg_name = (struct sockaddr *)dest; ++ msg.msg_namelen = sizeof(*dest); ++ ++ return kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); ++} ++ ++static int service_announce_del(struct sockaddr_qrtr *dest, ++ struct qrtr_server *srv) ++{ ++ struct qrtr_ctrl_pkt pkt; ++ struct msghdr msg = { }; ++ struct kvec iv; ++ int ret; ++ ++ trace_qrtr_ns_service_announce_del(srv->service, srv->instance, ++ srv->node, srv->port); ++ ++ iv.iov_base = &pkt; ++ iv.iov_len = sizeof(pkt); ++ ++ memset(&pkt, 0, sizeof(pkt)); ++ pkt.cmd = cpu_to_le32(QRTR_TYPE_DEL_SERVER); ++ pkt.server.service = cpu_to_le32(srv->service); ++ pkt.server.instance = cpu_to_le32(srv->instance); ++ pkt.server.node = cpu_to_le32(srv->node); ++ pkt.server.port = cpu_to_le32(srv->port); ++ ++ msg.msg_name = (struct sockaddr *)dest; ++ msg.msg_namelen = sizeof(*dest); ++ ++ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); ++ if (ret < 0) ++ pr_err("failed to announce del service\n"); ++ ++ return ret; ++} ++ ++static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv, ++ bool new) ++{ ++ struct qrtr_ctrl_pkt pkt; ++ struct msghdr msg = { }; ++ struct kvec iv; ++ int ret; ++ ++ iv.iov_base = &pkt; ++ iv.iov_len = sizeof(pkt); ++ ++ memset(&pkt, 0, sizeof(pkt)); ++ pkt.cmd = new ? cpu_to_le32(QRTR_TYPE_NEW_SERVER) : ++ cpu_to_le32(QRTR_TYPE_DEL_SERVER); ++ if (srv) { ++ pkt.server.service = cpu_to_le32(srv->service); ++ pkt.server.instance = cpu_to_le32(srv->instance); ++ pkt.server.node = cpu_to_le32(srv->node); ++ pkt.server.port = cpu_to_le32(srv->port); ++ } ++ ++ msg.msg_name = (struct sockaddr *)to; ++ msg.msg_namelen = sizeof(*to); ++ ++ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); ++ if (ret < 0) ++ pr_err("failed to send lookup notification\n"); ++} ++ ++static int announce_servers(struct sockaddr_qrtr *sq) ++{ ++ struct radix_tree_iter iter; ++ struct qrtr_server *srv; ++ struct qrtr_node *node; ++ void __rcu **slot; ++ int ret; ++ ++ node = node_get(qrtr_ns.local_node); ++ if (!node) ++ return 0; ++ ++ /* Announce the list of servers registered in this node */ ++ radix_tree_for_each_slot(slot, &node->servers, &iter, 0) { ++ srv = radix_tree_deref_slot(slot); ++ ++ ret = service_announce_new(sq, srv); ++ if (ret < 0) { ++ pr_err("failed to announce new service\n"); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static struct qrtr_server *server_add(unsigned int service, ++ unsigned int instance, ++ unsigned int node_id, ++ unsigned int port) ++{ ++ struct qrtr_server *srv; ++ struct qrtr_server *old; ++ struct qrtr_node *node; ++ ++ if (!service || !port) ++ return NULL; ++ ++ srv = kzalloc(sizeof(*srv), GFP_KERNEL); ++ if (!srv) ++ return NULL; ++ ++ srv->service = service; ++ srv->instance = instance; ++ srv->node = node_id; ++ srv->port = port; ++ ++ node = node_get(node_id); ++ if (!node) ++ goto err; ++ ++ /* Delete the old server on the same port */ ++ old = radix_tree_lookup(&node->servers, port); ++ if (old) { ++ radix_tree_delete(&node->servers, port); ++ kfree(old); ++ } ++ ++ radix_tree_insert(&node->servers, port, srv); ++ ++ trace_qrtr_ns_server_add(srv->service, srv->instance, ++ srv->node, srv->port); ++ ++ return srv; ++ ++err: ++ kfree(srv); ++ return NULL; ++} ++ ++static int server_del(struct qrtr_node *node, unsigned int port) ++{ ++ struct qrtr_lookup *lookup; ++ struct qrtr_server *srv; ++ struct list_head *li; ++ ++ srv = radix_tree_lookup(&node->servers, port); ++ if (!srv) ++ return -ENOENT; ++ ++ radix_tree_delete(&node->servers, port); ++ ++ /* Broadcast the removal of local servers */ ++ if (srv->node == qrtr_ns.local_node) ++ service_announce_del(&qrtr_ns.bcast_sq, srv); ++ ++ /* Announce the service's disappearance to observers */ ++ list_for_each(li, &qrtr_ns.lookups) { ++ lookup = container_of(li, struct qrtr_lookup, li); ++ if (lookup->service && lookup->service != srv->service) ++ continue; ++ if (lookup->instance && lookup->instance != srv->instance) ++ continue; ++ ++ lookup_notify(&lookup->sq, srv, false); ++ } ++ ++ kfree(srv); ++ ++ return 0; ++} ++ ++static int say_hello(struct sockaddr_qrtr *dest) ++{ ++ struct qrtr_ctrl_pkt pkt; ++ struct msghdr msg = { }; ++ struct kvec iv; ++ int ret; ++ ++ iv.iov_base = &pkt; ++ iv.iov_len = sizeof(pkt); ++ ++ memset(&pkt, 0, sizeof(pkt)); ++ pkt.cmd = cpu_to_le32(QRTR_TYPE_HELLO); ++ ++ msg.msg_name = (struct sockaddr *)dest; ++ msg.msg_namelen = sizeof(*dest); ++ ++ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); ++ if (ret < 0) ++ pr_err("failed to send hello msg\n"); ++ ++ return ret; ++} ++ ++/* Announce the list of servers registered on the local node */ ++static int ctrl_cmd_hello(struct sockaddr_qrtr *sq) ++{ ++ int ret; ++ ++ ret = say_hello(sq); ++ if (ret < 0) ++ return ret; ++ ++ return announce_servers(sq); ++} ++ ++static int ctrl_cmd_bye(struct sockaddr_qrtr *from) ++{ ++ struct qrtr_node *local_node; ++ struct radix_tree_iter iter; ++ struct qrtr_ctrl_pkt pkt; ++ struct qrtr_server *srv; ++ struct sockaddr_qrtr sq; ++ struct msghdr msg = { }; ++ struct qrtr_node *node; ++ void __rcu **slot; ++ struct kvec iv; ++ int ret; ++ ++ iv.iov_base = &pkt; ++ iv.iov_len = sizeof(pkt); ++ ++ node = node_get(from->sq_node); ++ if (!node) ++ return 0; ++ ++ /* Advertise removal of this client to all servers of remote node */ ++ radix_tree_for_each_slot(slot, &node->servers, &iter, 0) { ++ srv = radix_tree_deref_slot(slot); ++ server_del(node, srv->port); ++ } ++ ++ /* Advertise the removal of this client to all local servers */ ++ local_node = node_get(qrtr_ns.local_node); ++ if (!local_node) ++ return 0; ++ ++ memset(&pkt, 0, sizeof(pkt)); ++ pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE); ++ pkt.client.node = cpu_to_le32(from->sq_node); ++ ++ radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) { ++ srv = radix_tree_deref_slot(slot); ++ ++ sq.sq_family = AF_QIPCRTR; ++ sq.sq_node = srv->node; ++ sq.sq_port = srv->port; ++ ++ msg.msg_name = (struct sockaddr *)&sq; ++ msg.msg_namelen = sizeof(sq); ++ ++ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); ++ if (ret < 0) { ++ pr_err("failed to send bye cmd\n"); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static int ctrl_cmd_del_client(struct sockaddr_qrtr *from, ++ unsigned int node_id, unsigned int port) ++{ ++ struct qrtr_node *local_node; ++ struct radix_tree_iter iter; ++ struct qrtr_lookup *lookup; ++ struct qrtr_ctrl_pkt pkt; ++ struct msghdr msg = { }; ++ struct qrtr_server *srv; ++ struct sockaddr_qrtr sq; ++ struct qrtr_node *node; ++ struct list_head *tmp; ++ struct list_head *li; ++ void __rcu **slot; ++ struct kvec iv; ++ int ret; ++ ++ iv.iov_base = &pkt; ++ iv.iov_len = sizeof(pkt); ++ ++ /* Don't accept spoofed messages */ ++ if (from->sq_node != node_id) ++ return -EINVAL; ++ ++ /* Local DEL_CLIENT messages comes from the port being closed */ ++ if (from->sq_node == qrtr_ns.local_node && from->sq_port != port) ++ return -EINVAL; ++ ++ /* Remove any lookups by this client */ ++ list_for_each_safe(li, tmp, &qrtr_ns.lookups) { ++ lookup = container_of(li, struct qrtr_lookup, li); ++ if (lookup->sq.sq_node != node_id) ++ continue; ++ if (lookup->sq.sq_port != port) ++ continue; ++ ++ list_del(&lookup->li); ++ kfree(lookup); ++ } ++ ++ /* Remove the server belonging to this port */ ++ node = node_get(node_id); ++ if (node) ++ server_del(node, port); ++ ++ /* Advertise the removal of this client to all local servers */ ++ local_node = node_get(qrtr_ns.local_node); ++ if (!local_node) ++ return 0; ++ ++ memset(&pkt, 0, sizeof(pkt)); ++ pkt.cmd = cpu_to_le32(QRTR_TYPE_DEL_CLIENT); ++ pkt.client.node = cpu_to_le32(node_id); ++ pkt.client.port = cpu_to_le32(port); ++ ++ radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) { ++ srv = radix_tree_deref_slot(slot); ++ ++ sq.sq_family = AF_QIPCRTR; ++ sq.sq_node = srv->node; ++ sq.sq_port = srv->port; ++ ++ msg.msg_name = (struct sockaddr *)&sq; ++ msg.msg_namelen = sizeof(sq); ++ ++ ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); ++ if (ret < 0) { ++ pr_err("failed to send del client cmd\n"); ++ return ret; ++ } ++ } ++ ++ return 0; ++} ++ ++static int ctrl_cmd_new_server(struct sockaddr_qrtr *from, ++ unsigned int service, unsigned int instance, ++ unsigned int node_id, unsigned int port) ++{ ++ struct qrtr_lookup *lookup; ++ struct qrtr_server *srv; ++ struct list_head *li; ++ int ret = 0; ++ ++ /* Ignore specified node and port for local servers */ ++ if (from->sq_node == qrtr_ns.local_node) { ++ node_id = from->sq_node; ++ port = from->sq_port; ++ } ++ ++ /* Don't accept spoofed messages */ ++ if (from->sq_node != node_id) ++ return -EINVAL; ++ ++ srv = server_add(service, instance, node_id, port); ++ if (!srv) ++ return -EINVAL; ++ ++ if (srv->node == qrtr_ns.local_node) { ++ ret = service_announce_new(&qrtr_ns.bcast_sq, srv); ++ if (ret < 0) { ++ pr_err("failed to announce new service\n"); ++ return ret; ++ } ++ } ++ ++ /* Notify any potential lookups about the new server */ ++ list_for_each(li, &qrtr_ns.lookups) { ++ lookup = container_of(li, struct qrtr_lookup, li); ++ if (lookup->service && lookup->service != service) ++ continue; ++ if (lookup->instance && lookup->instance != instance) ++ continue; ++ ++ lookup_notify(&lookup->sq, srv, true); ++ } ++ ++ return ret; ++} ++ ++static int ctrl_cmd_del_server(struct sockaddr_qrtr *from, ++ unsigned int service, unsigned int instance, ++ unsigned int node_id, unsigned int port) ++{ ++ struct qrtr_node *node; ++ ++ /* Ignore specified node and port for local servers*/ ++ if (from->sq_node == qrtr_ns.local_node) { ++ node_id = from->sq_node; ++ port = from->sq_port; ++ } ++ ++ /* Don't accept spoofed messages */ ++ if (from->sq_node != node_id) ++ return -EINVAL; ++ ++ /* Local servers may only unregister themselves */ ++ if (from->sq_node == qrtr_ns.local_node && from->sq_port != port) ++ return -EINVAL; ++ ++ node = node_get(node_id); ++ if (!node) ++ return -ENOENT; ++ ++ return server_del(node, port); ++} ++ ++static int ctrl_cmd_new_lookup(struct sockaddr_qrtr *from, ++ unsigned int service, unsigned int instance) ++{ ++ struct radix_tree_iter node_iter; ++ struct qrtr_server_filter filter; ++ struct radix_tree_iter srv_iter; ++ struct qrtr_lookup *lookup; ++ struct qrtr_node *node; ++ void __rcu **node_slot; ++ void __rcu **srv_slot; ++ ++ /* Accept only local observers */ ++ if (from->sq_node != qrtr_ns.local_node) ++ return -EINVAL; ++ ++ lookup = kzalloc(sizeof(*lookup), GFP_KERNEL); ++ if (!lookup) ++ return -ENOMEM; ++ ++ lookup->sq = *from; ++ lookup->service = service; ++ lookup->instance = instance; ++ list_add_tail(&lookup->li, &qrtr_ns.lookups); ++ ++ memset(&filter, 0, sizeof(filter)); ++ filter.service = service; ++ filter.instance = instance; ++ ++ radix_tree_for_each_slot(node_slot, &nodes, &node_iter, 0) { ++ node = radix_tree_deref_slot(node_slot); ++ ++ radix_tree_for_each_slot(srv_slot, &node->servers, ++ &srv_iter, 0) { ++ struct qrtr_server *srv; ++ ++ srv = radix_tree_deref_slot(srv_slot); ++ if (!server_match(srv, &filter)) ++ continue; ++ ++ lookup_notify(from, srv, true); ++ } ++ } ++ ++ /* Empty notification, to indicate end of listing */ ++ lookup_notify(from, NULL, true); ++ ++ return 0; ++} ++ ++static void ctrl_cmd_del_lookup(struct sockaddr_qrtr *from, ++ unsigned int service, unsigned int instance) ++{ ++ struct qrtr_lookup *lookup; ++ struct list_head *tmp; ++ struct list_head *li; ++ ++ list_for_each_safe(li, tmp, &qrtr_ns.lookups) { ++ lookup = container_of(li, struct qrtr_lookup, li); ++ if (lookup->sq.sq_node != from->sq_node) ++ continue; ++ if (lookup->sq.sq_port != from->sq_port) ++ continue; ++ if (lookup->service != service) ++ continue; ++ if (lookup->instance && lookup->instance != instance) ++ continue; ++ ++ list_del(&lookup->li); ++ kfree(lookup); ++ } ++} ++ ++static void qrtr_ns_worker(struct work_struct *work) ++{ ++ const struct qrtr_ctrl_pkt *pkt; ++ size_t recv_buf_size = 4096; ++ struct sockaddr_qrtr sq; ++ struct msghdr msg = { }; ++ unsigned int cmd; ++ ssize_t msglen; ++ void *recv_buf; ++ struct kvec iv; ++ int ret; ++ ++ msg.msg_name = (struct sockaddr *)&sq; ++ msg.msg_namelen = sizeof(sq); ++ ++ recv_buf = kzalloc(recv_buf_size, GFP_KERNEL); ++ if (!recv_buf) ++ return; ++ ++ for (;;) { ++ iv.iov_base = recv_buf; ++ iv.iov_len = recv_buf_size; ++ ++ msglen = kernel_recvmsg(qrtr_ns.sock, &msg, &iv, 1, ++ iv.iov_len, MSG_DONTWAIT); ++ ++ if (msglen == -EAGAIN) ++ break; ++ ++ if (msglen < 0) { ++ pr_err("error receiving packet: %zd\n", msglen); ++ break; ++ } ++ ++ pkt = recv_buf; ++ cmd = le32_to_cpu(pkt->cmd); ++ if (cmd < ARRAY_SIZE(qrtr_ctrl_pkt_strings) && ++ qrtr_ctrl_pkt_strings[cmd]) ++ trace_qrtr_ns_message(qrtr_ctrl_pkt_strings[cmd], ++ sq.sq_node, sq.sq_port); ++ ++ ret = 0; ++ switch (cmd) { ++ case QRTR_TYPE_HELLO: ++ ret = ctrl_cmd_hello(&sq); ++ break; ++ case QRTR_TYPE_BYE: ++ ret = ctrl_cmd_bye(&sq); ++ break; ++ case QRTR_TYPE_DEL_CLIENT: ++ ret = ctrl_cmd_del_client(&sq, ++ le32_to_cpu(pkt->client.node), ++ le32_to_cpu(pkt->client.port)); ++ break; ++ case QRTR_TYPE_NEW_SERVER: ++ ret = ctrl_cmd_new_server(&sq, ++ le32_to_cpu(pkt->server.service), ++ le32_to_cpu(pkt->server.instance), ++ le32_to_cpu(pkt->server.node), ++ le32_to_cpu(pkt->server.port)); ++ break; ++ case QRTR_TYPE_DEL_SERVER: ++ ret = ctrl_cmd_del_server(&sq, ++ le32_to_cpu(pkt->server.service), ++ le32_to_cpu(pkt->server.instance), ++ le32_to_cpu(pkt->server.node), ++ le32_to_cpu(pkt->server.port)); ++ break; ++ case QRTR_TYPE_EXIT: ++ case QRTR_TYPE_PING: ++ case QRTR_TYPE_RESUME_TX: ++ break; ++ case QRTR_TYPE_NEW_LOOKUP: ++ ret = ctrl_cmd_new_lookup(&sq, ++ le32_to_cpu(pkt->server.service), ++ le32_to_cpu(pkt->server.instance)); ++ break; ++ case QRTR_TYPE_DEL_LOOKUP: ++ ctrl_cmd_del_lookup(&sq, ++ le32_to_cpu(pkt->server.service), ++ le32_to_cpu(pkt->server.instance)); ++ break; ++ } ++ ++ if (ret < 0) ++ pr_err("failed while handling packet from %d:%d", ++ sq.sq_node, sq.sq_port); ++ } ++ ++ kfree(recv_buf); ++} ++ ++static void qrtr_ns_data_ready(struct sock *sk) ++{ ++ queue_work(qrtr_ns.workqueue, &qrtr_ns.work); ++} ++ ++void qrtr_ns_init(void) ++{ ++ struct sockaddr_qrtr sq; ++ int sl = sizeof(sq); ++ int ret; ++ ++ INIT_LIST_HEAD(&qrtr_ns.lookups); ++ INIT_WORK(&qrtr_ns.work, qrtr_ns_worker); ++ ++ ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM, ++ PF_QIPCRTR, &qrtr_ns.sock); ++ if (ret < 0) ++ return; ++ ++ ret = kernel_getsockname(qrtr_ns.sock, (struct sockaddr *)&sq, &sl); ++ if (ret < 0) { ++ pr_err("failed to get socket name\n"); ++ goto err_sock; ++ } ++ ++ qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1); ++ if (!qrtr_ns.workqueue) ++ goto err_sock; ++ ++ qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready; ++ ++ sq.sq_port = QRTR_PORT_CTRL; ++ qrtr_ns.local_node = sq.sq_node; ++ ++ ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq)); ++ if (ret < 0) { ++ pr_err("failed to bind to socket\n"); ++ goto err_wq; ++ } ++ ++ qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR; ++ qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST; ++ qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL; ++ ++ ret = say_hello(&qrtr_ns.bcast_sq); ++ if (ret < 0) ++ goto err_wq; ++ ++ return; ++ ++err_wq: ++ destroy_workqueue(qrtr_ns.workqueue); ++err_sock: ++ sock_release(qrtr_ns.sock); ++} ++EXPORT_SYMBOL_GPL(qrtr_ns_init); ++ ++void qrtr_ns_remove(void) ++{ ++ cancel_work_sync(&qrtr_ns.work); ++ destroy_workqueue(qrtr_ns.workqueue); ++ sock_release(qrtr_ns.sock); ++} ++EXPORT_SYMBOL_GPL(qrtr_ns_remove); ++ ++MODULE_AUTHOR("Manivannan Sadhasivam "); ++MODULE_DESCRIPTION("Qualcomm IPC Router Nameservice"); ++MODULE_LICENSE("Dual BSD/GPL"); +Index: linux-4.4.60/net/qrtr/qrtr.c +=================================================================== +--- linux-4.4.60.orig/net/qrtr/qrtr.c ++++ linux-4.4.60/net/qrtr/qrtr.c +@@ -135,6 +135,8 @@ static DEFINE_IDR(qrtr_ports); + static DEFINE_MUTEX(qrtr_port_lock); + static DEFINE_MUTEX(qrtr_node_locking); + ++static struct delayed_work qrtr_ns_work; ++ + /** + * struct qrtr_node - endpoint node + * @ep_lock: lock for endpoint management and callbacks +@@ -1765,33 +1767,6 @@ static int qrtr_create(struct net *net, + return 0; + } + +-static const struct nla_policy qrtr_policy[IFA_MAX + 1] = { +- [IFA_LOCAL] = { .type = NLA_U32 }, +-}; +- +-static int qrtr_addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh) +-{ +- struct nlattr *tb[IFA_MAX + 1]; +- struct ifaddrmsg *ifm; +- int rc; +- +- if (!netlink_capable(skb, CAP_NET_ADMIN)) +- return -EPERM; +- +- ASSERT_RTNL(); +- +- rc = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, qrtr_policy); +- if (rc < 0) +- return rc; +- +- ifm = nlmsg_data(nlh); +- if (!tb[IFA_LOCAL]) +- return -EINVAL; +- +- qrtr_local_nid = nla_get_u32(tb[IFA_LOCAL]); +- return 0; +-} +- + static const struct net_proto_family qrtr_family = { + .owner = THIS_MODULE, + .family = AF_QIPCRTR, +@@ -1811,7 +1786,8 @@ static int __init qrtr_proto_init(void) + proto_unregister(&qrtr_proto); + return rc; + } +- rtnl_register(PF_QIPCRTR, RTM_NEWADDR, qrtr_addr_doit, NULL, NULL); ++ ++ qrtr_ns_init(); + + return 0; + } +@@ -1819,7 +1795,8 @@ postcore_initcall(qrtr_proto_init); + + static void __exit qrtr_proto_fini(void) + { +- rtnl_unregister(PF_QIPCRTR, RTM_NEWADDR); ++ cancel_delayed_work_sync(&qrtr_ns_work); ++ qrtr_ns_remove(); + sock_unregister(qrtr_family.family); + proto_unregister(&qrtr_proto); + } +Index: linux-4.4.60/net/qrtr/qrtr.h +=================================================================== +--- linux-4.4.60.orig/net/qrtr/qrtr.h ++++ linux-4.4.60/net/qrtr/qrtr.h +@@ -33,4 +33,9 @@ void qrtr_endpoint_unregister(struct qrt + int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len); + + int qrtr_peek_pkt_size(const void *data); ++ ++void qrtr_ns_init(void); ++ ++void qrtr_ns_remove(void); ++ + #endif +Index: linux-4.4.60/net/qrtr/Makefile +=================================================================== +--- linux-4.4.60.orig/net/qrtr/Makefile ++++ linux-4.4.60/net/qrtr/Makefile +@@ -1,4 +1,4 @@ +-obj-$(CONFIG_QRTR) := qrtr.o ++obj-$(CONFIG_QRTR) := qrtr.o ns.o + + obj-$(CONFIG_QRTR_SMD) += qrtr-smd.o + qrtr-smd-y := smd.o +Index: linux-4.4.60/include/trace/events/qrtr.h +=================================================================== +--- /dev/null ++++ linux-4.4.60/include/trace/events/qrtr.h +@@ -0,0 +1,115 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM qrtr ++ ++#if !defined(_TRACE_QRTR_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_QRTR_H ++ ++#include ++#include ++ ++TRACE_EVENT(qrtr_ns_service_announce_new, ++ ++ TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port), ++ ++ TP_ARGS(service, instance, node, port), ++ ++ TP_STRUCT__entry( ++ __field(__le32, service) ++ __field(__le32, instance) ++ __field(__le32, node) ++ __field(__le32, port) ++ ), ++ ++ TP_fast_assign( ++ __entry->service = service; ++ __entry->instance = instance; ++ __entry->node = node; ++ __entry->port = port; ++ ), ++ ++ TP_printk("advertising new server [%d:%x]@[%d:%d]", ++ __entry->service, __entry->instance, __entry->node, ++ __entry->port ++ ) ++); ++ ++TRACE_EVENT(qrtr_ns_service_announce_del, ++ ++ TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port), ++ ++ TP_ARGS(service, instance, node, port), ++ ++ TP_STRUCT__entry( ++ __field(__le32, service) ++ __field(__le32, instance) ++ __field(__le32, node) ++ __field(__le32, port) ++ ), ++ ++ TP_fast_assign( ++ __entry->service = service; ++ __entry->instance = instance; ++ __entry->node = node; ++ __entry->port = port; ++ ), ++ ++ TP_printk("advertising removal of server [%d:%x]@[%d:%d]", ++ __entry->service, __entry->instance, __entry->node, ++ __entry->port ++ ) ++); ++ ++TRACE_EVENT(qrtr_ns_server_add, ++ ++ TP_PROTO(__le32 service, __le32 instance, __le32 node, __le32 port), ++ ++ TP_ARGS(service, instance, node, port), ++ ++ TP_STRUCT__entry( ++ __field(__le32, service) ++ __field(__le32, instance) ++ __field(__le32, node) ++ __field(__le32, port) ++ ), ++ ++ TP_fast_assign( ++ __entry->service = service; ++ __entry->instance = instance; ++ __entry->node = node; ++ __entry->port = port; ++ ), ++ ++ TP_printk("add server [%d:%x]@[%d:%d]", ++ __entry->service, __entry->instance, __entry->node, ++ __entry->port ++ ) ++); ++ ++TRACE_EVENT(qrtr_ns_message, ++ ++ TP_PROTO(const char * const ctrl_pkt_str, __u32 sq_node, __u32 sq_port), ++ ++ TP_ARGS(ctrl_pkt_str, sq_node, sq_port), ++ ++ TP_STRUCT__entry( ++ __string(ctrl_pkt_str, ctrl_pkt_str) ++ __field(__u32, sq_node) ++ __field(__u32, sq_port) ++ ), ++ ++ TP_fast_assign( ++ __assign_str(ctrl_pkt_str, ctrl_pkt_str); ++ __entry->sq_node = sq_node; ++ __entry->sq_port = sq_port; ++ ), ++ ++ TP_printk("%s from %d:%d", ++ __get_str(ctrl_pkt_str), __entry->sq_node, __entry->sq_port ++ ) ++); ++ ++#endif /* _TRACE_QRTR_H */ ++ ++/* This part must be outside protection */ ++#include diff --git a/feeds/ipq807x/ipq807x/patches/101-aq_phy.patch b/feeds/ipq807x/ipq807x/patches/101-aq_phy.patch deleted file mode 100644 index 1d2072b4d..000000000 --- a/feeds/ipq807x/ipq807x/patches/101-aq_phy.patch +++ /dev/null @@ -1,78 +0,0 @@ -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/drivers/net/phy/aquantia_main.c -=================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/drivers/net/phy/aquantia_main.c -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/drivers/net/phy/aquantia_main.c -@@ -25,9 +25,11 @@ - #define PHY_ID_AQR109 0x03a1b502 - #define PHY_ID_AQR111 0x03a1b610 - #define PHY_ID_AQR111B0 0x03a1b612 -+#define PHY_ID_AQR111C 0x03a1b7e2 - #define PHY_ID_AQR112 0x03a1b660 - #define PHY_ID_AQR112C 0x03a1b792 - #define PHY_ID_AQR113C 0x31c31C10 -+#define PHY_ID_AQR114C 0x31c31C22 - #define PHY_ID_AQCS109 0x03a1b5c2 - #define PHY_ID_AQR405 0x03a1b4b0 - -@@ -1011,6 +1013,24 @@ static struct phy_driver aqr_driver[] = - .link_change_notify = aqr107_link_change_notify, - }, - { -+ PHY_ID_MATCH_MODEL(PHY_ID_AQR111C), -+ .name = "Aquantia AQR111C", -+ .probe = aqr107_probe, -+ .config_init = aqr107_config_init, -+ .config_aneg = aqr_config_aneg, -+ .config_intr = aqr_config_intr, -+ .ack_interrupt = aqr_ack_interrupt, -+ .read_status = aqr107_read_status, -+ .get_tunable = aqr107_get_tunable, -+ .set_tunable = aqr107_set_tunable, -+ .suspend = aqr107_suspend, -+ .resume = aqr107_resume, -+ .get_sset_count = aqr107_get_sset_count, -+ .get_strings = aqr107_get_strings, -+ .get_stats = aqr107_get_stats, -+ .link_change_notify = aqr107_link_change_notify, -+}, -+{ - PHY_ID_MATCH_MODEL(PHY_ID_AQR112), - .name = "Aquantia AQR112", - .probe = aqr107_probe, -@@ -1065,6 +1085,24 @@ static struct phy_driver aqr_driver[] = - .link_change_notify = aqr107_link_change_notify, - }, - { -+ PHY_ID_MATCH_MODEL(PHY_ID_AQR114C), -+ .name = "Aquantia AQR114C", -+ .probe = aqr107_probe, -+ .config_init = aqr107_config_init, -+ .config_aneg = aqr_config_aneg, -+ .config_intr = aqr_config_intr, -+ .ack_interrupt = aqr_ack_interrupt, -+ .read_status = aqr107_read_status, -+ .get_tunable = aqr107_get_tunable, -+ .set_tunable = aqr107_set_tunable, -+ .suspend = aqr107_suspend, -+ .resume = aqr107_resume, -+ .get_sset_count = aqr107_get_sset_count, -+ .get_strings = aqr107_get_strings, -+ .get_stats = aqr107_get_stats, -+ .link_change_notify = aqr107_link_change_notify, -+}, -+{ - PHY_ID_MATCH_MODEL(PHY_ID_AQCS109), - .name = "Aquantia AQCS109", - .probe = aqr107_probe, -@@ -1104,9 +1142,11 @@ static struct mdio_device_id __maybe_unu - { PHY_ID_MATCH_MODEL(PHY_ID_AQR109) }, - { PHY_ID_MATCH_MODEL(PHY_ID_AQR111) }, - { PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0) }, -+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR111C) }, - { PHY_ID_MATCH_MODEL(PHY_ID_AQR112) }, - { PHY_ID_MATCH_MODEL(PHY_ID_AQR112C) }, - { PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) }, -+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR114C) }, - { PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) }, - { PHY_ID_MATCH_MODEL(PHY_ID_AQR405) }, - { } diff --git a/feeds/ipq807x/ipq807x/patches/101-squashfs.patch b/feeds/ipq807x/ipq807x/patches/101-squashfs.patch new file mode 100644 index 000000000..5744a2a5b --- /dev/null +++ b/feeds/ipq807x/ipq807x/patches/101-squashfs.patch @@ -0,0 +1,16 @@ +Index: linux-4.4.60/fs/squashfs/xz_wrapper.c +=================================================================== +--- linux-4.4.60.orig/fs/squashfs/xz_wrapper.c ++++ linux-4.4.60/fs/squashfs/xz_wrapper.c +@@ -40,10 +40,8 @@ struct squashfs_xz { + }; + + struct disk_comp_opts { +- __le32 flags; +- __le16 bit_opts; +- __le16 fb; + __le32 dictionary_size; ++ __le32 flags; + }; + + struct comp_opts { diff --git a/feeds/ipq807x/ipq807x/patches/102-aq-phy.patch b/feeds/ipq807x/ipq807x/patches/102-aq-phy.patch new file mode 100644 index 000000000..6c6d9b88e --- /dev/null +++ b/feeds/ipq807x/ipq807x/patches/102-aq-phy.patch @@ -0,0 +1,90 @@ +Index: linux-4.4.60-qsdk-ad8f8efb2edcd35cdb130466cfc1923c37ef7ec1/drivers/net/phy/aquantia.c +=================================================================== +--- linux-4.4.60-qsdk-ad8f8efb2edcd35cdb130466cfc1923c37ef7ec1.orig/drivers/net/phy/aquantia.c ++++ linux-4.4.60-qsdk-ad8f8efb2edcd35cdb130466cfc1923c37ef7ec1/drivers/net/phy/aquantia.c +@@ -32,6 +32,7 @@ + #define PHY_ID_AQR112 0x03a1b660 + #define PHY_ID_AQR113C 0x31c31C10 + #define PHY_ID_AQR112C 0x03a1b792 ++#define PHY_ID_AQR114C 0x31c31C22 + + #define AQ_PHY_MAX_VALID_MMD_REG 0xff01 + #define AQ_PHY_MAX_INVALID_MMD_REG 0xffff +@@ -756,6 +757,25 @@ static struct phy_driver aquantia_driver + .update_link = aquantia_update_link, + .driver = { .owner = THIS_MODULE,}, + }, ++{ ++ .phy_id = PHY_ID_AQR114C, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQR114C", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .probe = aquantia_phy_probe, ++ .soft_reset = aquantia_soft_reset, ++ .config_init = aquantia_config_init, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++ .suspend = aquantia_suspend, ++ .resume = aquantia_resume, ++ .update_link = aquantia_update_link, ++ .driver = { .owner = THIS_MODULE,}, ++}, + }; + + module_phy_driver(aquantia_driver); +@@ -773,6 +793,7 @@ static struct mdio_device_id __maybe_unu + { PHY_ID_AQR112, 0xfffffff0 }, + { PHY_ID_AQR113C, 0xfffffff0 }, + { PHY_ID_AQR112C, 0xfffffff0 }, ++ { PHY_ID_AQR114C, 0xfffffff0 }, + { } + }; + +Index: linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/drivers/net/phy/aquantia.c +=================================================================== +--- linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce.orig/drivers/net/phy/aquantia.c ++++ linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/drivers/net/phy/aquantia.c +@@ -29,6 +29,7 @@ + #define PHY_ID_AQR109 0x03a1b502 + #define PHY_ID_AQR111 0x03a1b610 + #define PHY_ID_AQR111B0 0x03a1b612 ++#define PHY_ID_AQR111C 0x03a1b7e2 + #define PHY_ID_AQR112 0x03a1b660 + #define PHY_ID_AQR113C 0x31c31C10 + #define PHY_ID_AQR112C 0x03a1b792 +@@ -701,6 +702,23 @@ static struct phy_driver aquantia_driver + .driver = { .owner = THIS_MODULE,}, + }, + { ++ .phy_id = PHY_ID_AQR111C, ++ .phy_id_mask = 0xfffffff0, ++ .name = "Aquantia AQR111C", ++ .features = PHY_AQUANTIA_FEATURES, ++ .flags = PHY_HAS_INTERRUPT, ++ .probe = aquantia_phy_probe, ++ .soft_reset = aquantia_soft_reset, ++ .config_init = aquantia_config_init, ++ .aneg_done = aquantia_aneg_done, ++ .config_aneg = aquantia_config_aneg, ++ .config_intr = aquantia_config_intr, ++ .ack_interrupt = aquantia_ack_interrupt, ++ .read_status = aquantia_read_status, ++ .update_link = aquantia_update_link, ++ .driver = { .owner = THIS_MODULE,}, ++}, ++{ + .phy_id = PHY_ID_AQR112, + .phy_id_mask = 0xfffffff0, + .name = "Aquantia AQR112", +@@ -790,6 +808,7 @@ static struct mdio_device_id __maybe_unu + { PHY_ID_AQR109, 0xfffffff0 }, + { PHY_ID_AQR111, 0xfffffff0 }, + { PHY_ID_AQR111B0, 0xfffffff0 }, ++ { PHY_ID_AQR111C, 0xfffffff0 }, + { PHY_ID_AQR112, 0xfffffff0 }, + { PHY_ID_AQR113C, 0xfffffff0 }, + { PHY_ID_AQR112C, 0xfffffff0 }, diff --git a/feeds/ipq807x/ipq807x/patches/102-fix-null-pointer-dereference-in-iptunnel_xmit.patch b/feeds/ipq807x/ipq807x/patches/102-fix-null-pointer-dereference-in-iptunnel_xmit.patch deleted file mode 100644 index f129274b4..000000000 --- a/feeds/ipq807x/ipq807x/patches/102-fix-null-pointer-dereference-in-iptunnel_xmit.patch +++ /dev/null @@ -1,13 +0,0 @@ -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/net/ipv4/ip_tunnel_core.c -=================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/net/ipv4/ip_tunnel_core.c -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/net/ipv4/ip_tunnel_core.c -@@ -86,7 +86,7 @@ void iptunnel_xmit(struct sock *sk, stru - in_dev = __dev_get_by_index(&init_net, skb_iif); - } - -- if (proto == IPPROTO_IPV6 || proto == IPPROTO_GRE || netif_is_vxlan(in_dev)) { -+ if (proto == IPPROTO_IPV6 || proto == IPPROTO_GRE || (in_dev && netif_is_vxlan(in_dev))) { - skb->skb_iif = skb_iif; - } - diff --git a/feeds/ipq807x/ipq807x/patches/103-fix-dtc-gcc10-build.patch b/feeds/ipq807x/ipq807x/patches/103-fix-dtc-gcc10-build.patch new file mode 100644 index 000000000..f91601ec5 --- /dev/null +++ b/feeds/ipq807x/ipq807x/patches/103-fix-dtc-gcc10-build.patch @@ -0,0 +1,11 @@ +--- a/scripts/dtc/dtc-lexer.lex.c_shipped ++++ b/scripts/dtc/dtc-lexer.lex.c_shipped +@@ -637,7 +637,7 @@ char *yytext; + #include "srcpos.h" + #include "dtc-parser.tab.h" + +-YYLTYPE yylloc; ++extern YYLTYPE yylloc; + extern bool treesource_error; + + /* CAUTION: this will stop working if we ever use yyless() or yyunput() */ diff --git a/feeds/ipq807x/ipq807x/patches/104-log-spam.patch b/feeds/ipq807x/ipq807x/patches/104-log-spam.patch index 87dce5c82..94b2a3ffd 100644 --- a/feeds/ipq807x/ipq807x/patches/104-log-spam.patch +++ b/feeds/ipq807x/ipq807x/patches/104-log-spam.patch @@ -1,17 +1,17 @@ -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/drivers/clk/qcom/clk-branch.c +Index: linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/drivers/clk/qcom/clk-branch.c =================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/drivers/clk/qcom/clk-branch.c -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/drivers/clk/qcom/clk-branch.c -@@ -67,7 +67,7 @@ static int clk_branch_wait(const struct +--- linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce.orig/drivers/clk/qcom/clk-branch.c ++++ linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/drivers/clk/qcom/clk-branch.c +@@ -75,7 +75,7 @@ static int clk_branch_wait(const struct bool (check_halt)(const struct clk_branch *, bool)) { bool voted = br->halt_check & BRANCH_VOTED; - const char *name = clk_hw_get_name(&br->clkr.hw); + //const char *name = clk_hw_get_name(&br->clkr.hw); - /* - * Skip checking halt bit if we're explicitly ignoring the bit or the -@@ -88,8 +88,8 @@ static int clk_branch_wait(const struct + /* Skip checking halt bit if the clock is in hardware gated mode */ + if (clk_branch_in_hwcg_mode(br)) +@@ -93,8 +93,8 @@ static int clk_branch_wait(const struct return 0; udelay(1); } @@ -22,3 +22,16 @@ Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/drivers/clk/q return -EBUSY; } return 0; +Index: linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/drivers/usb/phy/phy-msm-qusb.c +=================================================================== +--- linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce.orig/drivers/usb/phy/phy-msm-qusb.c ++++ linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/drivers/usb/phy/phy-msm-qusb.c +@@ -491,7 +491,7 @@ static int qusb_phy_init(struct usb_phy + dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", + readb_relaxed(qphy->base + + QUSB2PHY_PLL_STATUS)); +- WARN_ON(1); ++ //WARN_ON(1); + } + + /* Set OTG VBUS Valid from HSPHY to controller */ diff --git a/feeds/ipq807x/ipq807x/patches/105-add-esmt-nand.patch b/feeds/ipq807x/ipq807x/patches/105-add-esmt-nand.patch index 6e39c6399..d47a4d0d1 100644 --- a/feeds/ipq807x/ipq807x/patches/105-add-esmt-nand.patch +++ b/feeds/ipq807x/ipq807x/patches/105-add-esmt-nand.patch @@ -1,15 +1,37 @@ -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/drivers/mtd/nand/raw/nand_ids.c +Index: linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/drivers/mtd/nand/nand_ids.c =================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/drivers/mtd/nand/raw/nand_ids.c -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/drivers/mtd/nand/raw/nand_ids.c -@@ -114,6 +114,10 @@ struct nand_flash_dev nand_flash_ids[] = - { .id = {0xc8, 0x42} }, - SZ_2K, SZ_256, SZ_128K, 0, 2, 128, NAND_ECC_INFO(8, SZ_512), 0}, - +--- linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce.orig/drivers/mtd/nand/nand_ids.c ++++ linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/drivers/mtd/nand/nand_ids.c +@@ -62,6 +62,12 @@ struct nand_flash_dev nand_flash_ids[] = + {"TH58NYG3S0H 8G 1.8V 8-bit", + { .id = {0x98, 0xa3, 0x91, 0x26} }, + SZ_4K, SZ_1K, SZ_256K, 0, 4, 256, NAND_ECC_INFO(8, SZ_512) }, ++ + {"F59D2G81KA 2G 1.8V 8-bit", + { .id = {0xc8, 0x5a, 0x90, 0x04} }, + SZ_2K, SZ_256, SZ_128K, 0, 4, 128, NAND_ECC_INFO(8, SZ_512) }, ++ + LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS), +@@ -190,6 +196,7 @@ struct nand_manufacturers nand_manuf_ids + {NAND_MFR_SANDISK, "SanDisk"}, + {NAND_MFR_INTEL, "Intel"}, + {NAND_MFR_ATO, "ATO"}, ++ {NAND_MFR_ESMT, "ESMT"}, + {NAND_MFR_GIGA, "GigaDevice"}, + {NAND_MFR_ATO, "ATO"}, + {NAND_MFR_WINBOND, "Winbond"}, +Index: linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/include/linux/mtd/nand.h +=================================================================== +--- linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce.orig/include/linux/mtd/nand.h ++++ linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/include/linux/mtd/nand.h +@@ -778,6 +778,7 @@ static inline struct mtd_info *nand_to_m + #define NAND_MFR_ATO 0x9b + #define NAND_MFR_WINBOND 0xef + #define NAND_MFR_FIDELIX 0xe5 ++#define NAND_MFR_ESMT 0xc8 + + /* The maximum expected count of bytes in the NAND ID sequence */ + #define NAND_MAX_ID_LEN 8 diff --git a/feeds/ipq807x/ipq807x/patches/106-pstore.patch b/feeds/ipq807x/ipq807x/patches/106-pstore.patch new file mode 100644 index 000000000..dc3960306 --- /dev/null +++ b/feeds/ipq807x/ipq807x/patches/106-pstore.patch @@ -0,0 +1,147 @@ +Index: linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016/arch/arm64/boot/dts/qcom/qcom-ipq6018-memory.dtsi +=================================================================== +--- linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016.orig/arch/arm64/boot/dts/qcom/qcom-ipq6018-memory.dtsi ++++ linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016/arch/arm64/boot/dts/qcom/qcom-ipq6018-memory.dtsi +@@ -92,6 +92,12 @@ + reg = <0x0 0x40000000 0x0 0x00800000>; + }; + ++ ramoops@4A0f0000 { ++ compatible = "ramoops"; ++ reg = <0 0x4A0f0000 0 0x10000>; ++ record-size = <0x1000>; ++ }; ++ + uboot@4A100000 { + no-map; + reg = <0x0 0x4A100000 0x0 0x00400000>; +@@ -211,6 +217,12 @@ + reg = <0x0 0x40000000 0x0 0x01000000>; + }; + ++ ramoops@4A0f0000 { ++ compatible = "ramoops"; ++ reg = <0 0x4A0f0000 0 0x10000>; ++ record-size = <0x1000>; ++ }; ++ + uboot@4A100000 { + no-map; + reg = <0x0 0x4A100000 0x0 0x00400000>; +@@ -330,6 +342,12 @@ + reg = <0x0 0x40000000 0x0 0x01000000>; + }; + ++ ramoops@4A0f0000 { ++ compatible = "ramoops"; ++ reg = <0 0x4A0f0000 0 0x10000>; ++ record-size = <0x1000>; ++ }; ++ + uboot@4A100000 { + no-map; + reg = <0x0 0x4A100000 0x0 0x00400000>; +Index: linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016/fs/pstore/ram.c +=================================================================== +--- linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016.orig/fs/pstore/ram.c ++++ linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016/fs/pstore/ram.c +@@ -466,15 +466,46 @@ static int ramoops_init_prz(struct devic + return 0; + } + ++static int ramoops_parse_dt(struct platform_device *pdev, ++ struct ramoops_platform_data *pdata) ++{ ++ struct resource *res; ++ ++ dev_dbg(&pdev->dev, "using Device Tree\n"); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, ++ "failed to locate DT /reserved-memory resource\n"); ++ return -EINVAL; ++ } ++ ++ pdata->mem_size = resource_size(res); ++ pdata->mem_address = res->start; ++ pdata->dump_oops = true; ++ pdata->record_size = 0x1000; ++ return 0; ++} ++ + static int ramoops_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; + struct ramoops_platform_data *pdata = pdev->dev.platform_data; ++ struct ramoops_platform_data pdata_local; + struct ramoops_context *cxt = &oops_cxt; + size_t dump_mem_sz; + phys_addr_t paddr; + int err = -EINVAL; + ++ if (dev_of_node(dev) && !pdata) { ++ pdata = &pdata_local; ++ memset(pdata, 0, sizeof(*pdata)); ++ ++ err = ramoops_parse_dt(pdev, pdata); ++ if (err < 0) ++ goto fail_out; ++ } ++ + /* Only a single ramoops area allowed at a time, so fail extra + * probes. + */ +@@ -603,11 +634,17 @@ static int ramoops_remove(struct platfor + return 0; + } + ++static const struct of_device_id dt_match[] = { ++ { .compatible = "ramoops" }, ++ {} ++}; ++ + static struct platform_driver ramoops_driver = { + .probe = ramoops_probe, + .remove = ramoops_remove, + .driver = { + .name = "ramoops", ++ .of_match_table = dt_match, + }, + }; + +Index: linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016/drivers/of/platform.c +=================================================================== +--- linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016.orig/drivers/of/platform.c ++++ linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016/drivers/of/platform.c +@@ -53,6 +53,30 @@ struct platform_device *of_find_device_b + } + EXPORT_SYMBOL(of_find_device_by_node); + ++static const struct of_device_id reserved_mem_matches[] = { ++ { .compatible = "ramoops" }, ++ {} ++}; ++ ++static int __init of_platform_default_populate_init(void) ++{ ++ struct device_node *node; ++ ++ if (!of_have_populated_dt()) ++ return -ENODEV; ++ ++ /* ++ * Handle certain compatibles explicitly, since we don't want to create ++ * platform_devices for every node in /reserved-memory with a ++ * "compatible", ++ */ ++ for_each_matching_node(node, reserved_mem_matches) ++ of_platform_device_create(node, NULL, NULL); ++ ++ return 0; ++} ++arch_initcall_sync(of_platform_default_populate_init); ++ + #ifdef CONFIG_OF_ADDRESS + /* + * The following routines scan a subtree and registers a device for diff --git a/feeds/ipq807x/ipq807x/patches/107-bridge-allow-bcast-mcast-same-port-hairpinmode.patch b/feeds/ipq807x/ipq807x/patches/107-bridge-allow-bcast-mcast-same-port-hairpinmode.patch index 68f3e3fbd..99989b306 100644 --- a/feeds/ipq807x/ipq807x/patches/107-bridge-allow-bcast-mcast-same-port-hairpinmode.patch +++ b/feeds/ipq807x/ipq807x/patches/107-bridge-allow-bcast-mcast-same-port-hairpinmode.patch @@ -1,13 +1,14 @@ -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/net/bridge/br_forward.c +Index: linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/net/bridge/br_forward.c =================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/net/bridge/br_forward.c -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/net/bridge/br_forward.c -@@ -24,7 +24,7 @@ static inline int should_deliver(const s +--- linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce.orig/net/bridge/br_forward.c ++++ linux-4.4.60-qsdk-10fd7d14853b7020b804acae690c8acec5d954ce/net/bridge/br_forward.c +@@ -33,8 +33,7 @@ static inline int should_deliver(const s struct net_bridge_vlan_group *vg; vg = nbp_vlan_group_rcu(p); -- return (((p->flags & BR_HAIRPIN_MODE) && !is_multicast_ether_addr(eth_hdr(skb)->h_dest)) -+ return (((p->flags & BR_HAIRPIN_MODE)) - || (skb->dev != p->dev)) && - br_allowed_egress(vg, skb) && (p->state == BR_STATE_FORWARDING) && - nbp_switchdev_allowed_egress(p, skb) && +- return ((skb->dev != p->dev) || ((p->flags & BR_HAIRPIN_MODE) && +- (!is_multicast_ether_addr(eth_hdr(skb)->h_dest)))) && ++ return ((skb->dev != p->dev) || (p->flags & BR_HAIRPIN_MODE)) && + br_allowed_egress(vg, skb) && p->state == BR_STATE_FORWARDING; + } + diff --git a/feeds/ipq807x/ipq807x/patches/108-add-W25N01GW.patch b/feeds/ipq807x/ipq807x/patches/108-add-W25N01GW.patch index 5ef127952..f00cec911 100644 --- a/feeds/ipq807x/ipq807x/patches/108-add-W25N01GW.patch +++ b/feeds/ipq807x/ipq807x/patches/108-add-W25N01GW.patch @@ -1,15 +1,14 @@ -Index: linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/drivers/mtd/nand/raw/nand_ids.c +Index: linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016/drivers/mtd/nand/qcom_nandc.c =================================================================== ---- linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac.orig/drivers/mtd/nand/raw/nand_ids.c -+++ linux-5.4.164-qsdk-d5fcb18e5420670c8734c6a659873e73adab6dac/drivers/mtd/nand/raw/nand_ids.c -@@ -118,6 +118,10 @@ struct nand_flash_dev nand_flash_ids[] = - { .id = {0xc8, 0x5a, 0x90, 0x04} }, - SZ_2K, SZ_256, SZ_128K, 0, 4, 128, NAND_ECC_INFO(8, SZ_512) }, - +--- linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016.orig/drivers/mtd/nand/qcom_nandc.c ++++ linux-4.4.60-qsdk-11f09717303ecd83c3a64e9efe23f25921dc1016/drivers/mtd/nand/qcom_nandc.c +@@ -405,6 +405,9 @@ struct nand_flash_dev qspinand_flash_ids + {"MX35UF1GE4AC SPI NAND 1G 1.8V", + { .id = {0xc2, 0x92} }, + SZ_2K, SZ_128, SZ_128K, 0, 2, 64, NAND_ECC_INFO(4, SZ_512), 0}, + {"W25N01GW SPI NAND 1.8V 1G-BIT", + { .id = {0xef, 0xba} }, + SZ_2K, SZ_128, SZ_128K, 0, 2, 64, NAND_ECC_INFO(4, SZ_512), 0}, -+ - LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), - LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), - LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS), + {NULL} + }; + diff --git a/feeds/ipq807x/ipq807x/patches/190-revert-threaded-NAPI.patch b/feeds/ipq807x/ipq807x/patches/190-revert-threaded-NAPI.patch deleted file mode 100644 index 0e1d3c019..000000000 --- a/feeds/ipq807x/ipq807x/patches/190-revert-threaded-NAPI.patch +++ /dev/null @@ -1,306 +0,0 @@ ---- a/include/linux/netdevice.h -+++ b/include/linux/netdevice.h -@@ -338,7 +338,6 @@ struct napi_struct { - struct list_head dev_list; - struct hlist_node napi_hash_node; - unsigned int napi_id; -- struct work_struct work; - }; - - enum { -@@ -349,7 +348,6 @@ enum { - NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ - NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ - NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ -- NAPI_STATE_THREADED, /* Use threaded NAPI */ - }; - - enum { -@@ -360,7 +358,6 @@ enum { - NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), - NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), - NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), -- NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), - }; - - enum gro_result { -@@ -2320,26 +2317,6 @@ void netif_napi_add(struct net_device *d - int (*poll)(struct napi_struct *, int), int weight); - - /** -- * netif_threaded_napi_add - initialize a NAPI context -- * @dev: network device -- * @napi: NAPI context -- * @poll: polling function -- * @weight: default weight -- * -- * This variant of netif_napi_add() should be used from drivers using NAPI -- * with CPU intensive poll functions. -- * This will schedule polling from a high priority workqueue -- */ --static inline void netif_threaded_napi_add(struct net_device *dev, -- struct napi_struct *napi, -- int (*poll)(struct napi_struct *, int), -- int weight) --{ -- set_bit(NAPI_STATE_THREADED, &napi->state); -- netif_napi_add(dev, napi, poll, weight); --} -- --/** - * netif_tx_napi_add - initialize a NAPI context - * @dev: network device - * @napi: NAPI context ---- a/net/core/dev.c -+++ b/net/core/dev.c -@@ -157,7 +157,6 @@ static DEFINE_SPINLOCK(offload_lock); - struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly; - struct list_head ptype_all __read_mostly; /* Taps */ - static struct list_head offload_base __read_mostly; --static struct workqueue_struct *napi_workq __read_mostly; - - static int netif_rx_internal(struct sk_buff *skb); - static int call_netdevice_notifiers_info(unsigned long val, -@@ -5969,11 +5968,6 @@ void __napi_schedule(struct napi_struct - { - unsigned long flags; - -- if (test_bit(NAPI_STATE_THREADED, &n->state)) { -- queue_work(napi_workq, &n->work); -- return; -- } -- - local_irq_save(flags); - ____napi_schedule(this_cpu_ptr(&softnet_data), n); - local_irq_restore(flags); -@@ -6289,84 +6283,6 @@ static void init_gro_hash(struct napi_st - napi->gro_bitmask = 0; - } - --static int __napi_poll(struct napi_struct *n, bool *repoll) --{ -- int work, weight; -- -- weight = n->weight; -- -- /* This NAPI_STATE_SCHED test is for avoiding a race -- * with netpoll's poll_napi(). Only the entity which -- * obtains the lock and sees NAPI_STATE_SCHED set will -- * actually make the ->poll() call. Therefore we avoid -- * accidentally calling ->poll() when NAPI is not scheduled. -- */ -- work = 0; -- if (test_bit(NAPI_STATE_SCHED, &n->state)) { -- work = n->poll(n, weight); -- trace_napi_poll(n, work, weight); -- } -- -- WARN_ON_ONCE(work > weight); -- -- if (likely(work < weight)) -- return work; -- -- /* Drivers must not modify the NAPI state if they -- * consume the entire weight. In such cases this code -- * still "owns" the NAPI instance and therefore can -- * move the instance around on the list at-will. -- */ -- if (unlikely(napi_disable_pending(n))) { -- napi_complete(n); -- return work; -- } -- -- if (n->gro_bitmask) { -- /* flush too old packets -- * If HZ < 1000, flush all packets. -- */ -- napi_gro_flush(n, HZ >= 1000); -- } -- -- gro_normal_list(n); -- -- *repoll = true; -- -- return work; --} -- --static void napi_workfn(struct work_struct *work) --{ -- struct napi_struct *n = container_of(work, struct napi_struct, work); -- void *have; -- -- for (;;) { -- bool repoll = false; -- -- local_bh_disable(); -- -- have = netpoll_poll_lock(n); -- __napi_poll(n, &repoll); -- netpoll_poll_unlock(have); -- -- local_bh_enable(); -- -- if (!repoll) -- return; -- -- if (!need_resched()) -- continue; -- -- /* -- * have to pay for the latency of task switch even if -- * napi is scheduled -- */ -- queue_work(napi_workq, work); -- return; -- } --} -- - void netif_napi_add(struct net_device *dev, struct napi_struct *napi, - int (*poll)(struct napi_struct *, int), int weight) - { -@@ -6386,7 +6302,6 @@ void netif_napi_add(struct net_device *d - #ifdef CONFIG_NETPOLL - napi->poll_owner = -1; - #endif -- INIT_WORK(&napi->work, napi_workfn); - set_bit(NAPI_STATE_SCHED, &napi->state); - set_bit(NAPI_STATE_NPSVC, &napi->state); - list_add_rcu(&napi->dev_list, &dev->napi_list); -@@ -6427,7 +6342,6 @@ static void flush_gro_hash(struct napi_s - void netif_napi_del(struct napi_struct *napi) - { - might_sleep(); -- cancel_work_sync(&napi->work); - if (napi_hash_del(napi)) - synchronize_net(); - list_del_init(&napi->dev_list); -@@ -6440,19 +6354,51 @@ EXPORT_SYMBOL(netif_napi_del); - - static int napi_poll(struct napi_struct *n, struct list_head *repoll) - { -- bool do_repoll = false; - void *have; -- int work; -+ int work, weight; - - list_del_init(&n->poll_list); - - have = netpoll_poll_lock(n); - -- work = __napi_poll(n, &do_repoll); -+ weight = n->weight; - -- if (!do_repoll) -+ /* This NAPI_STATE_SCHED test is for avoiding a race -+ * with netpoll's poll_napi(). Only the entity which -+ * obtains the lock and sees NAPI_STATE_SCHED set will -+ * actually make the ->poll() call. Therefore we avoid -+ * accidentally calling ->poll() when NAPI is not scheduled. -+ */ -+ work = 0; -+ if (test_bit(NAPI_STATE_SCHED, &n->state)) { -+ work = n->poll(n, weight); -+ trace_napi_poll(n, work, weight); -+ } -+ -+ WARN_ON_ONCE(work > weight); -+ -+ if (likely(work < weight)) - goto out_unlock; - -+ /* Drivers must not modify the NAPI state if they -+ * consume the entire weight. In such cases this code -+ * still "owns" the NAPI instance and therefore can -+ * move the instance around on the list at-will. -+ */ -+ if (unlikely(napi_disable_pending(n))) { -+ napi_complete(n); -+ goto out_unlock; -+ } -+ -+ if (n->gro_bitmask) { -+ /* flush too old packets -+ * If HZ < 1000, flush all packets. -+ */ -+ napi_gro_flush(n, HZ >= 1000); -+ } -+ -+ gro_normal_list(n); -+ - /* Some drivers may have called napi_schedule - * prior to exhausting their budget. - */ -@@ -10428,10 +10374,6 @@ static int __init net_dev_init(void) - sd->backlog.weight = weight_p; - } - -- napi_workq = alloc_workqueue("napi_workq", WQ_UNBOUND | WQ_HIGHPRI, -- WQ_UNBOUND_MAX_ACTIVE | WQ_SYSFS); -- BUG_ON(!napi_workq); -- - dev_boot_phase = 0; - - /* The loopback device is special if any other network devices ---- a/net/core/net-sysfs.c -+++ b/net/core/net-sysfs.c -@@ -470,52 +470,6 @@ static ssize_t proto_down_store(struct d - } - NETDEVICE_SHOW_RW(proto_down, fmt_dec); - --static int change_napi_threaded(struct net_device *dev, unsigned long val) --{ -- struct napi_struct *napi; -- -- if (list_empty(&dev->napi_list)) -- return -EOPNOTSUPP; -- -- list_for_each_entry(napi, &dev->napi_list, dev_list) { -- if (val) -- set_bit(NAPI_STATE_THREADED, &napi->state); -- else -- clear_bit(NAPI_STATE_THREADED, &napi->state); -- } -- -- return 0; --} -- --static ssize_t napi_threaded_store(struct device *dev, -- struct device_attribute *attr, -- const char *buf, size_t len) --{ -- return netdev_store(dev, attr, buf, len, change_napi_threaded); --} -- --static ssize_t napi_threaded_show(struct device *dev, -- struct device_attribute *attr, -- char *buf) --{ -- struct net_device *netdev = to_net_dev(dev); -- struct napi_struct *napi; -- bool enabled = false; -- -- if (!rtnl_trylock()) -- return restart_syscall(); -- -- list_for_each_entry(napi, &netdev->napi_list, dev_list) { -- if (test_bit(NAPI_STATE_THREADED, &napi->state)) -- enabled = true; -- } -- -- rtnl_unlock(); -- -- return sprintf(buf, fmt_dec, enabled); --} --static DEVICE_ATTR_RW(napi_threaded); -- - static ssize_t phys_port_id_show(struct device *dev, - struct device_attribute *attr, char *buf) - { -@@ -627,7 +581,6 @@ static struct attribute *net_class_attrs - &dev_attr_flags.attr, - &dev_attr_tx_queue_len.attr, - &dev_attr_gro_flush_timeout.attr, -- &dev_attr_napi_threaded.attr, - &dev_attr_phys_port_id.attr, - &dev_attr_phys_port_name.attr, - &dev_attr_phys_switch_id.attr, diff --git a/feeds/ipq807x/ipq807x/patches/200-bpf_backport.patch b/feeds/ipq807x/ipq807x/patches/200-bpf_backport.patch new file mode 100644 index 000000000..4357369c2 --- /dev/null +++ b/feeds/ipq807x/ipq807x/patches/200-bpf_backport.patch @@ -0,0 +1,44780 @@ +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -38,7 +38,7 @@ config ARM + select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 + select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) + select HAVE_ARCH_TRACEHOOK +- select HAVE_BPF_JIT ++ select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 + select HAVE_CC_STACKPROTECTOR + select HAVE_CONTEXT_TRACKING + select HAVE_C_RECORDMCOUNT +--- a/arch/arm/net/bpf_jit_32.c ++++ b/arch/arm/net/bpf_jit_32.c +@@ -1,13 +1,12 @@ ++// SPDX-License-Identifier: GPL-2.0-only + /* +- * Just-In-Time compiler for BPF filters on 32bit ARM ++ * Just-In-Time compiler for eBPF filters on 32bit ARM + * ++ * Copyright (c) 2017 Shubham Bansal + * Copyright (c) 2011 Mircea Gherzan +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License as published by the +- * Free Software Foundation; version 2 of the License. + */ + ++#include + #include + #include + #include +@@ -20,51 +19,182 @@ + #include + #include + #include ++#include + + #include "bpf_jit_32.h" + + /* +- * ABI: ++ * eBPF prog stack layout: ++ * ++ * high ++ * original ARM_SP => +-----+ ++ * | | callee saved registers ++ * +-----+ <= (BPF_FP + SCRATCH_SIZE) ++ * | ... | eBPF JIT scratch space ++ * eBPF fp register => +-----+ ++ * (BPF_FP) | ... | eBPF prog stack ++ * +-----+ ++ * |RSVD | JIT scratchpad ++ * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE) ++ * | | ++ * | ... | Function call stack ++ * | | ++ * +-----+ ++ * low ++ * ++ * The callee saved registers depends on whether frame pointers are enabled. ++ * With frame pointers (to be compliant with the ABI): ++ * ++ * high ++ * original ARM_SP => +--------------+ \ ++ * | pc | | ++ * current ARM_FP => +--------------+ } callee saved registers ++ * |r4-r9,fp,ip,lr| | ++ * +--------------+ / ++ * low + * +- * r0 scratch register +- * r4 BPF register A +- * r5 BPF register X +- * r6 pointer to the skb +- * r7 skb->data +- * r8 skb_headlen(skb) ++ * Without frame pointers: ++ * ++ * high ++ * original ARM_SP => +--------------+ ++ * | r4-r9,fp,lr | callee saved registers ++ * current ARM_FP => +--------------+ ++ * low ++ * ++ * When popping registers off the stack at the end of a BPF function, we ++ * reference them via the current ARM_FP register. + */ ++#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \ ++ 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R9 | \ ++ 1 << ARM_FP) ++#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR) ++#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC) ++ ++enum { ++ /* Stack layout - these are offsets from (top of stack - 4) */ ++ BPF_R2_HI, ++ BPF_R2_LO, ++ BPF_R3_HI, ++ BPF_R3_LO, ++ BPF_R4_HI, ++ BPF_R4_LO, ++ BPF_R5_HI, ++ BPF_R5_LO, ++ BPF_R7_HI, ++ BPF_R7_LO, ++ BPF_R8_HI, ++ BPF_R8_LO, ++ BPF_R9_HI, ++ BPF_R9_LO, ++ BPF_FP_HI, ++ BPF_FP_LO, ++ BPF_TC_HI, ++ BPF_TC_LO, ++ BPF_AX_HI, ++ BPF_AX_LO, ++ /* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4, ++ * BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9, ++ * BPF_REG_FP and Tail call counts. ++ */ ++ BPF_JIT_SCRATCH_REGS, ++}; ++ ++/* ++ * Negative "register" values indicate the register is stored on the stack ++ * and are the offset from the top of the eBPF JIT scratch space. ++ */ ++#define STACK_OFFSET(k) (-4 - (k) * 4) ++#define SCRATCH_SIZE (BPF_JIT_SCRATCH_REGS * 4) ++ ++#ifdef CONFIG_FRAME_POINTER ++#define EBPF_SCRATCH_TO_ARM_FP(x) ((x) - 4 * hweight16(CALLEE_PUSH_MASK) - 4) ++#else ++#define EBPF_SCRATCH_TO_ARM_FP(x) (x) ++#endif + +-#define r_scratch ARM_R0 +-/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */ +-#define r_off ARM_R1 +-#define r_A ARM_R4 +-#define r_X ARM_R5 +-#define r_skb ARM_R6 +-#define r_skb_data ARM_R7 +-#define r_skb_hl ARM_R8 +- +-#define SCRATCH_SP_OFFSET 0 +-#define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + 4 * (k)) +- +-#define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) +-#define SEEN_MEM_WORD(k) (1 << (k)) +-#define SEEN_X (1 << BPF_MEMWORDS) +-#define SEEN_CALL (1 << (BPF_MEMWORDS + 1)) +-#define SEEN_SKB (1 << (BPF_MEMWORDS + 2)) +-#define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) ++#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */ ++#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */ ++#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */ + +-#define FLAG_NEED_X_RESET (1 << 0) +-#define FLAG_IMM_OVERFLOW (1 << 1) ++#define FLAG_IMM_OVERFLOW (1 << 0) ++ ++/* ++ * Map eBPF registers to ARM 32bit registers or stack scratch space. ++ * ++ * 1. First argument is passed using the arm 32bit registers and rest of the ++ * arguments are passed on stack scratch space. ++ * 2. First callee-saved argument is mapped to arm 32 bit registers and rest ++ * arguments are mapped to scratch space on stack. ++ * 3. We need two 64 bit temp registers to do complex operations on eBPF ++ * registers. ++ * ++ * As the eBPF registers are all 64 bit registers and arm has only 32 bit ++ * registers, we have to map each eBPF registers with two arm 32 bit regs or ++ * scratch memory space and we have to build eBPF 64 bit register from those. ++ * ++ */ ++static const s8 bpf2a32[][2] = { ++ /* return value from in-kernel function, and exit value from eBPF */ ++ [BPF_REG_0] = {ARM_R1, ARM_R0}, ++ /* arguments from eBPF program to in-kernel function */ ++ [BPF_REG_1] = {ARM_R3, ARM_R2}, ++ /* Stored on stack scratch space */ ++ [BPF_REG_2] = {STACK_OFFSET(BPF_R2_HI), STACK_OFFSET(BPF_R2_LO)}, ++ [BPF_REG_3] = {STACK_OFFSET(BPF_R3_HI), STACK_OFFSET(BPF_R3_LO)}, ++ [BPF_REG_4] = {STACK_OFFSET(BPF_R4_HI), STACK_OFFSET(BPF_R4_LO)}, ++ [BPF_REG_5] = {STACK_OFFSET(BPF_R5_HI), STACK_OFFSET(BPF_R5_LO)}, ++ /* callee saved registers that in-kernel function will preserve */ ++ [BPF_REG_6] = {ARM_R5, ARM_R4}, ++ /* Stored on stack scratch space */ ++ [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)}, ++ [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)}, ++ [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)}, ++ /* Read only Frame Pointer to access Stack */ ++ [BPF_REG_FP] = {STACK_OFFSET(BPF_FP_HI), STACK_OFFSET(BPF_FP_LO)}, ++ /* Temporary Register for internal BPF JIT, can be used ++ * for constant blindings and others. ++ */ ++ [TMP_REG_1] = {ARM_R7, ARM_R6}, ++ [TMP_REG_2] = {ARM_R9, ARM_R8}, ++ /* Tail call count. Stored on stack scratch space. */ ++ [TCALL_CNT] = {STACK_OFFSET(BPF_TC_HI), STACK_OFFSET(BPF_TC_LO)}, ++ /* temporary register for blinding constants. ++ * Stored on stack scratch space. ++ */ ++ [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)}, ++}; ++ ++#define dst_lo dst[1] ++#define dst_hi dst[0] ++#define src_lo src[1] ++#define src_hi src[0] ++ ++/* ++ * JIT Context: ++ * ++ * prog : bpf_prog ++ * idx : index of current last JITed instruction. ++ * prologue_bytes : bytes used in prologue. ++ * epilogue_offset : offset of epilogue starting. ++ * offsets : array of eBPF instruction offsets in ++ * JITed code. ++ * target : final JITed code. ++ * epilogue_bytes : no of bytes used in epilogue. ++ * imm_count : no of immediate counts used for global ++ * variables. ++ * imms : array of global variable addresses. ++ */ + + struct jit_ctx { +- const struct bpf_prog *skf; +- unsigned idx; +- unsigned prologue_bytes; +- int ret0_fp_idx; +- u32 seen; ++ const struct bpf_prog *prog; ++ unsigned int idx; ++ unsigned int prologue_bytes; ++ unsigned int epilogue_offset; ++ unsigned int cpu_architecture; + u32 flags; + u32 *offsets; + u32 *target; ++ u32 stack_size; + #if __LINUX_ARM_ARCH__ < 7 + u16 epilogue_bytes; + u16 imm_count; +@@ -72,68 +202,16 @@ struct jit_ctx { + #endif + }; + +-int bpf_jit_enable __read_mostly; +- +-static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, +- unsigned int size) +-{ +- void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); +- +- if (!ptr) +- return -EFAULT; +- memcpy(ret, ptr, size); +- return 0; +-} +- +-static u64 jit_get_skb_b(struct sk_buff *skb, int offset) +-{ +- u8 ret; +- int err; +- +- if (offset < 0) +- err = call_neg_helper(skb, offset, &ret, 1); +- else +- err = skb_copy_bits(skb, offset, &ret, 1); +- +- return (u64)err << 32 | ret; +-} +- +-static u64 jit_get_skb_h(struct sk_buff *skb, int offset) +-{ +- u16 ret; +- int err; +- +- if (offset < 0) +- err = call_neg_helper(skb, offset, &ret, 2); +- else +- err = skb_copy_bits(skb, offset, &ret, 2); +- +- return (u64)err << 32 | ntohs(ret); +-} +- +-static u64 jit_get_skb_w(struct sk_buff *skb, int offset) +-{ +- u32 ret; +- int err; +- +- if (offset < 0) +- err = call_neg_helper(skb, offset, &ret, 4); +- else +- err = skb_copy_bits(skb, offset, &ret, 4); +- +- return (u64)err << 32 | ntohl(ret); +-} +- + /* + * Wrappers which handle both OABI and EABI and assures Thumb2 interworking + * (where the assembly routines like __aeabi_uidiv could cause problems). + */ +-static u32 jit_udiv(u32 dividend, u32 divisor) ++static u32 jit_udiv32(u32 dividend, u32 divisor) + { + return dividend / divisor; + } + +-static u32 jit_mod(u32 dividend, u32 divisor) ++static u32 jit_mod32(u32 dividend, u32 divisor) + { + return dividend % divisor; + } +@@ -157,36 +235,100 @@ static inline void emit(u32 inst, struct + _emit(ARM_COND_AL, inst, ctx); + } + +-static u16 saved_regs(struct jit_ctx *ctx) ++/* ++ * This is rather horrid, but necessary to convert an integer constant ++ * to an immediate operand for the opcodes, and be able to detect at ++ * build time whether the constant can't be converted (iow, usable in ++ * BUILD_BUG_ON()). ++ */ ++#define imm12val(v, s) (rol32(v, (s)) | (s) << 7) ++#define const_imm8m(x) \ ++ ({ int r; \ ++ u32 v = (x); \ ++ if (!(v & ~0x000000ff)) \ ++ r = imm12val(v, 0); \ ++ else if (!(v & ~0xc000003f)) \ ++ r = imm12val(v, 2); \ ++ else if (!(v & ~0xf000000f)) \ ++ r = imm12val(v, 4); \ ++ else if (!(v & ~0xfc000003)) \ ++ r = imm12val(v, 6); \ ++ else if (!(v & ~0xff000000)) \ ++ r = imm12val(v, 8); \ ++ else if (!(v & ~0x3fc00000)) \ ++ r = imm12val(v, 10); \ ++ else if (!(v & ~0x0ff00000)) \ ++ r = imm12val(v, 12); \ ++ else if (!(v & ~0x03fc0000)) \ ++ r = imm12val(v, 14); \ ++ else if (!(v & ~0x00ff0000)) \ ++ r = imm12val(v, 16); \ ++ else if (!(v & ~0x003fc000)) \ ++ r = imm12val(v, 18); \ ++ else if (!(v & ~0x000ff000)) \ ++ r = imm12val(v, 20); \ ++ else if (!(v & ~0x0003fc00)) \ ++ r = imm12val(v, 22); \ ++ else if (!(v & ~0x0000ff00)) \ ++ r = imm12val(v, 24); \ ++ else if (!(v & ~0x00003fc0)) \ ++ r = imm12val(v, 26); \ ++ else if (!(v & ~0x00000ff0)) \ ++ r = imm12val(v, 28); \ ++ else if (!(v & ~0x000003fc)) \ ++ r = imm12val(v, 30); \ ++ else \ ++ r = -1; \ ++ r; }) ++ ++/* ++ * Checks if immediate value can be converted to imm12(12 bits) value. ++ */ ++static int imm8m(u32 x) + { +- u16 ret = 0; ++ u32 rot; + +- if ((ctx->skf->len > 1) || +- (ctx->skf->insns[0].code == (BPF_RET | BPF_A))) +- ret |= 1 << r_A; ++ for (rot = 0; rot < 16; rot++) ++ if ((x & ~ror32(0xff, 2 * rot)) == 0) ++ return rol32(x, 2 * rot) | (rot << 8); ++ return -1; ++} + +-#ifdef CONFIG_FRAME_POINTER +- ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC); +-#else +- if (ctx->seen & SEEN_CALL) +- ret |= 1 << ARM_LR; +-#endif +- if (ctx->seen & (SEEN_DATA | SEEN_SKB)) +- ret |= 1 << r_skb; +- if (ctx->seen & SEEN_DATA) +- ret |= (1 << r_skb_data) | (1 << r_skb_hl); +- if (ctx->seen & SEEN_X) +- ret |= 1 << r_X; ++#define imm8m(x) (__builtin_constant_p(x) ? const_imm8m(x) : imm8m(x)) + +- return ret; ++static u32 arm_bpf_ldst_imm12(u32 op, u8 rt, u8 rn, s16 imm12) ++{ ++ op |= rt << 12 | rn << 16; ++ if (imm12 >= 0) ++ op |= ARM_INST_LDST__U; ++ else ++ imm12 = -imm12; ++ return op | (imm12 & ARM_INST_LDST__IMM12); + } + +-static inline int mem_words_used(struct jit_ctx *ctx) ++static u32 arm_bpf_ldst_imm8(u32 op, u8 rt, u8 rn, s16 imm8) + { +- /* yes, we do waste some stack space IF there are "holes" in the set" */ +- return fls(ctx->seen & SEEN_MEM); ++ op |= rt << 12 | rn << 16; ++ if (imm8 >= 0) ++ op |= ARM_INST_LDST__U; ++ else ++ imm8 = -imm8; ++ return op | (imm8 & 0xf0) << 4 | (imm8 & 0x0f); + } + ++#define ARM_LDR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDR_I, rt, rn, off) ++#define ARM_LDRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_LDRB_I, rt, rn, off) ++#define ARM_LDRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRD_I, rt, rn, off) ++#define ARM_LDRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_LDRH_I, rt, rn, off) ++ ++#define ARM_STR_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STR_I, rt, rn, off) ++#define ARM_STRB_I(rt, rn, off) arm_bpf_ldst_imm12(ARM_INST_STRB_I, rt, rn, off) ++#define ARM_STRD_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRD_I, rt, rn, off) ++#define ARM_STRH_I(rt, rn, off) arm_bpf_ldst_imm8(ARM_INST_STRH_I, rt, rn, off) ++ ++/* ++ * Initializes the JIT space with undefined instructions. ++ */ + static void jit_fill_hole(void *area, unsigned int size) + { + u32 *ptr; +@@ -195,88 +337,23 @@ static void jit_fill_hole(void *area, un + *ptr++ = __opcode_to_mem_arm(ARM_INST_UDF); + } + +-static void build_prologue(struct jit_ctx *ctx) +-{ +- u16 reg_set = saved_regs(ctx); +- u16 off; +- +-#ifdef CONFIG_FRAME_POINTER +- emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); +- emit(ARM_PUSH(reg_set), ctx); +- emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); ++#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) ++/* EABI requires the stack to be aligned to 64-bit boundaries */ ++#define STACK_ALIGNMENT 8 + #else +- if (reg_set) +- emit(ARM_PUSH(reg_set), ctx); ++/* Stack must be aligned to 32-bit boundaries */ ++#define STACK_ALIGNMENT 4 + #endif + +- if (ctx->seen & (SEEN_DATA | SEEN_SKB)) +- emit(ARM_MOV_R(r_skb, ARM_R0), ctx); +- +- if (ctx->seen & SEEN_DATA) { +- off = offsetof(struct sk_buff, data); +- emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx); +- /* headlen = len - data_len */ +- off = offsetof(struct sk_buff, len); +- emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx); +- off = offsetof(struct sk_buff, data_len); +- emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); +- emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx); +- } +- +- if (ctx->flags & FLAG_NEED_X_RESET) +- emit(ARM_MOV_I(r_X, 0), ctx); +- +- /* do not leak kernel data to userspace */ +- if (bpf_needs_clear_a(&ctx->skf->insns[0])) +- emit(ARM_MOV_I(r_A, 0), ctx); +- +- /* stack space for the BPF_MEM words */ +- if (ctx->seen & SEEN_MEM) +- emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); +-} +- +-static void build_epilogue(struct jit_ctx *ctx) +-{ +- u16 reg_set = saved_regs(ctx); +- +- if (ctx->seen & SEEN_MEM) +- emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); +- +- reg_set &= ~(1 << ARM_LR); +- +-#ifdef CONFIG_FRAME_POINTER +- /* the first instruction of the prologue was: mov ip, sp */ +- reg_set &= ~(1 << ARM_IP); +- reg_set |= (1 << ARM_SP); +- emit(ARM_LDM(ARM_SP, reg_set), ctx); +-#else +- if (reg_set) { +- if (ctx->seen & SEEN_CALL) +- reg_set |= 1 << ARM_PC; +- emit(ARM_POP(reg_set), ctx); +- } +- +- if (!(ctx->seen & SEEN_CALL)) +- emit(ARM_BX(ARM_LR), ctx); +-#endif +-} +- +-static int16_t imm8m(u32 x) +-{ +- u32 rot; +- +- for (rot = 0; rot < 16; rot++) +- if ((x & ~ror32(0xff, 2 * rot)) == 0) +- return rol32(x, 2 * rot) | (rot << 8); +- +- return -1; +-} ++/* total stack size used in JITed code */ ++#define _STACK_SIZE (ctx->prog->aux->stack_depth + SCRATCH_SIZE) ++#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT) + + #if __LINUX_ARM_ARCH__ < 7 + + static u16 imm_offset(u32 k, struct jit_ctx *ctx) + { +- unsigned i = 0, offset; ++ unsigned int i = 0, offset; + u16 imm; + + /* on the "fake" run we just count them (duplicates included) */ +@@ -295,7 +372,7 @@ static u16 imm_offset(u32 k, struct jit_ + ctx->imms[i] = k; + + /* constants go just after the epilogue */ +- offset = ctx->offsets[ctx->skf->len]; ++ offset = ctx->offsets[ctx->prog->len - 1] * 4; + offset += ctx->prologue_bytes; + offset += ctx->epilogue_bytes; + offset += i * 4; +@@ -319,10 +396,22 @@ static u16 imm_offset(u32 k, struct jit_ + + #endif /* __LINUX_ARM_ARCH__ */ + ++static inline int bpf2a32_offset(int bpf_to, int bpf_from, ++ const struct jit_ctx *ctx) { ++ int to, from; ++ ++ if (ctx->target == NULL) ++ return 0; ++ to = ctx->offsets[bpf_to]; ++ from = ctx->offsets[bpf_from]; ++ ++ return to - from - 1; ++} ++ + /* + * Move an immediate that's not an imm8m to a core register. + */ +-static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) ++static inline void emit_mov_i_no8m(const u8 rd, u32 val, struct jit_ctx *ctx) + { + #if __LINUX_ARM_ARCH__ < 7 + emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); +@@ -333,7 +422,7 @@ static inline void emit_mov_i_no8m(int r + #endif + } + +-static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) ++static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx) + { + int imm12 = imm8m(val); + +@@ -343,676 +432,1508 @@ static inline void emit_mov_i(int rd, u3 + emit_mov_i_no8m(rd, val, ctx); + } + +-#if __LINUX_ARM_ARCH__ < 6 ++static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx) ++{ ++ if (elf_hwcap & HWCAP_THUMB) ++ emit(ARM_BX(tgt_reg), ctx); ++ else ++ emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); ++} + +-static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) ++static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) + { +- _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx); +- _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); +- _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx); +- _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx); +- _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx); +- _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx); +- _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx); +- _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx); ++#if __LINUX_ARM_ARCH__ < 5 ++ emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); ++ emit_bx_r(tgt_reg, ctx); ++#else ++ emit(ARM_BLX_R(tgt_reg), ctx); ++#endif + } + +-static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) ++static inline int epilogue_offset(const struct jit_ctx *ctx) + { +- _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); +- _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx); +- _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx); ++ int to, from; ++ /* No need for 1st dummy run */ ++ if (ctx->target == NULL) ++ return 0; ++ to = ctx->epilogue_offset; ++ from = ctx->idx; ++ ++ return to - from - 2; + } + +-static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) ++static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op) + { +- /* r_dst = (r_src << 8) | (r_src >> 8) */ +- emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx); +- emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx); ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ ++#if __LINUX_ARM_ARCH__ == 7 ++ if (elf_hwcap & HWCAP_IDIVA) { ++ if (op == BPF_DIV) ++ emit(ARM_UDIV(rd, rm, rn), ctx); ++ else { ++ emit(ARM_UDIV(ARM_IP, rm, rn), ctx); ++ emit(ARM_MLS(rd, rn, ARM_IP, rm), ctx); ++ } ++ return; ++ } ++#endif + + /* +- * we need to mask out the bits set in r_dst[23:16] due to +- * the first shift instruction. +- * +- * note that 0x8ff is the encoded immediate 0x00ff0000. ++ * For BPF_ALU | BPF_DIV | BPF_K instructions ++ * As ARM_R1 and ARM_R0 contains 1st argument of bpf ++ * function, we need to save it on caller side to save ++ * it from getting destroyed within callee. ++ * After the return from the callee, we restore ARM_R0 ++ * ARM_R1. + */ +- emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx); ++ if (rn != ARM_R1) { ++ emit(ARM_MOV_R(tmp[0], ARM_R1), ctx); ++ emit(ARM_MOV_R(ARM_R1, rn), ctx); ++ } ++ if (rm != ARM_R0) { ++ emit(ARM_MOV_R(tmp[1], ARM_R0), ctx); ++ emit(ARM_MOV_R(ARM_R0, rm), ctx); ++ } ++ ++ /* Call appropriate function */ ++ emit_mov_i(ARM_IP, op == BPF_DIV ? ++ (u32)jit_udiv32 : (u32)jit_mod32, ctx); ++ emit_blx_r(ARM_IP, ctx); ++ ++ /* Save return value */ ++ if (rd != ARM_R0) ++ emit(ARM_MOV_R(rd, ARM_R0), ctx); ++ ++ /* Restore ARM_R0 and ARM_R1 */ ++ if (rn != ARM_R1) ++ emit(ARM_MOV_R(ARM_R1, tmp[0]), ctx); ++ if (rm != ARM_R0) ++ emit(ARM_MOV_R(ARM_R0, tmp[1]), ctx); + } + +-#else /* ARMv6+ */ ++/* Is the translated BPF register on stack? */ ++static bool is_stacked(s8 reg) ++{ ++ return reg < 0; ++} + +-static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) ++/* If a BPF register is on the stack (stk is true), load it to the ++ * supplied temporary register and return the temporary register ++ * for subsequent operations, otherwise just use the CPU register. ++ */ ++static s8 arm_bpf_get_reg32(s8 reg, s8 tmp, struct jit_ctx *ctx) + { +- _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx); +-#ifdef __LITTLE_ENDIAN +- _emit(cond, ARM_REV(r_res, r_res), ctx); +-#endif ++ if (is_stacked(reg)) { ++ emit(ARM_LDR_I(tmp, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx); ++ reg = tmp; ++ } ++ return reg; + } + +-static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) ++static const s8 *arm_bpf_get_reg64(const s8 *reg, const s8 *tmp, ++ struct jit_ctx *ctx) + { +- _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx); +-#ifdef __LITTLE_ENDIAN +- _emit(cond, ARM_REV16(r_res, r_res), ctx); +-#endif ++ if (is_stacked(reg[1])) { ++ if (__LINUX_ARM_ARCH__ >= 6 || ++ ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) { ++ emit(ARM_LDRD_I(tmp[1], ARM_FP, ++ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); ++ } else { ++ emit(ARM_LDR_I(tmp[1], ARM_FP, ++ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); ++ emit(ARM_LDR_I(tmp[0], ARM_FP, ++ EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx); ++ } ++ reg = tmp; ++ } ++ return reg; + } + +-static inline void emit_swap16(u8 r_dst __maybe_unused, +- u8 r_src __maybe_unused, +- struct jit_ctx *ctx __maybe_unused) ++/* If a BPF register is on the stack (stk is true), save the register ++ * back to the stack. If the source register is not the same, then ++ * move it into the correct register. ++ */ ++static void arm_bpf_put_reg32(s8 reg, s8 src, struct jit_ctx *ctx) + { +-#ifdef __LITTLE_ENDIAN +- emit(ARM_REV16(r_dst, r_src), ctx); +-#endif ++ if (is_stacked(reg)) ++ emit(ARM_STR_I(src, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(reg)), ctx); ++ else if (reg != src) ++ emit(ARM_MOV_R(reg, src), ctx); ++} ++ ++static void arm_bpf_put_reg64(const s8 *reg, const s8 *src, ++ struct jit_ctx *ctx) ++{ ++ if (is_stacked(reg[1])) { ++ if (__LINUX_ARM_ARCH__ >= 6 || ++ ctx->cpu_architecture >= CPU_ARCH_ARMv5TE) { ++ emit(ARM_STRD_I(src[1], ARM_FP, ++ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); ++ } else { ++ emit(ARM_STR_I(src[1], ARM_FP, ++ EBPF_SCRATCH_TO_ARM_FP(reg[1])), ctx); ++ emit(ARM_STR_I(src[0], ARM_FP, ++ EBPF_SCRATCH_TO_ARM_FP(reg[0])), ctx); ++ } ++ } else { ++ if (reg[1] != src[1]) ++ emit(ARM_MOV_R(reg[1], src[1]), ctx); ++ if (reg[0] != src[0]) ++ emit(ARM_MOV_R(reg[0], src[0]), ctx); ++ } + } + +-#endif /* __LINUX_ARM_ARCH__ < 6 */ ++static inline void emit_a32_mov_i(const s8 dst, const u32 val, ++ struct jit_ctx *ctx) ++{ ++ const s8 *tmp = bpf2a32[TMP_REG_1]; + ++ if (is_stacked(dst)) { ++ emit_mov_i(tmp[1], val, ctx); ++ arm_bpf_put_reg32(dst, tmp[1], ctx); ++ } else { ++ emit_mov_i(dst, val, ctx); ++ } ++} + +-/* Compute the immediate value for a PC-relative branch. */ +-static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx) ++static void emit_a32_mov_i64(const s8 dst[], u64 val, struct jit_ctx *ctx) + { +- u32 imm; ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *rd = is_stacked(dst_lo) ? tmp : dst; + +- if (ctx->target == NULL) +- return 0; +- /* +- * BPF allows only forward jumps and the offset of the target is +- * still the one computed during the first pass. ++ emit_mov_i(rd[1], (u32)val, ctx); ++ emit_mov_i(rd[0], val >> 32, ctx); ++ ++ arm_bpf_put_reg64(dst, rd, ctx); ++} ++ ++/* Sign extended move */ ++static inline void emit_a32_mov_se_i64(const bool is64, const s8 dst[], ++ const u32 val, struct jit_ctx *ctx) { ++ u64 val64 = val; ++ ++ if (is64 && (val & (1<<31))) ++ val64 |= 0xffffffff00000000ULL; ++ emit_a32_mov_i64(dst, val64, ctx); ++} ++ ++static inline void emit_a32_add_r(const u8 dst, const u8 src, ++ const bool is64, const bool hi, ++ struct jit_ctx *ctx) { ++ /* 64 bit : ++ * adds dst_lo, dst_lo, src_lo ++ * adc dst_hi, dst_hi, src_hi ++ * 32 bit : ++ * add dst_lo, dst_lo, src_lo + */ +- imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8); ++ if (!hi && is64) ++ emit(ARM_ADDS_R(dst, dst, src), ctx); ++ else if (hi && is64) ++ emit(ARM_ADC_R(dst, dst, src), ctx); ++ else ++ emit(ARM_ADD_R(dst, dst, src), ctx); ++} + +- return imm >> 2; ++static inline void emit_a32_sub_r(const u8 dst, const u8 src, ++ const bool is64, const bool hi, ++ struct jit_ctx *ctx) { ++ /* 64 bit : ++ * subs dst_lo, dst_lo, src_lo ++ * sbc dst_hi, dst_hi, src_hi ++ * 32 bit : ++ * sub dst_lo, dst_lo, src_lo ++ */ ++ if (!hi && is64) ++ emit(ARM_SUBS_R(dst, dst, src), ctx); ++ else if (hi && is64) ++ emit(ARM_SBC_R(dst, dst, src), ctx); ++ else ++ emit(ARM_SUB_R(dst, dst, src), ctx); ++} ++ ++static inline void emit_alu_r(const u8 dst, const u8 src, const bool is64, ++ const bool hi, const u8 op, struct jit_ctx *ctx){ ++ switch (BPF_OP(op)) { ++ /* dst = dst + src */ ++ case BPF_ADD: ++ emit_a32_add_r(dst, src, is64, hi, ctx); ++ break; ++ /* dst = dst - src */ ++ case BPF_SUB: ++ emit_a32_sub_r(dst, src, is64, hi, ctx); ++ break; ++ /* dst = dst | src */ ++ case BPF_OR: ++ emit(ARM_ORR_R(dst, dst, src), ctx); ++ break; ++ /* dst = dst & src */ ++ case BPF_AND: ++ emit(ARM_AND_R(dst, dst, src), ctx); ++ break; ++ /* dst = dst ^ src */ ++ case BPF_XOR: ++ emit(ARM_EOR_R(dst, dst, src), ctx); ++ break; ++ /* dst = dst * src */ ++ case BPF_MUL: ++ emit(ARM_MUL(dst, dst, src), ctx); ++ break; ++ /* dst = dst << src */ ++ case BPF_LSH: ++ emit(ARM_LSL_R(dst, dst, src), ctx); ++ break; ++ /* dst = dst >> src */ ++ case BPF_RSH: ++ emit(ARM_LSR_R(dst, dst, src), ctx); ++ break; ++ /* dst = dst >> src (signed)*/ ++ case BPF_ARSH: ++ emit(ARM_MOV_SR(dst, dst, SRTYPE_ASR, src), ctx); ++ break; ++ } + } + +-#define OP_IMM3(op, r1, r2, imm_val, ctx) \ +- do { \ +- imm12 = imm8m(imm_val); \ +- if (imm12 < 0) { \ +- emit_mov_i_no8m(r_scratch, imm_val, ctx); \ +- emit(op ## _R((r1), (r2), r_scratch), ctx); \ +- } else { \ +- emit(op ## _I((r1), (r2), imm12), ctx); \ +- } \ +- } while (0) +- +-static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx) +-{ +- if (ctx->ret0_fp_idx >= 0) { +- _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx); +- /* NOP to keep the size constant between passes */ +- emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx); ++/* ALU operation (32 bit) ++ * dst = dst (op) src ++ */ ++static inline void emit_a32_alu_r(const s8 dst, const s8 src, ++ struct jit_ctx *ctx, const bool is64, ++ const bool hi, const u8 op) { ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ s8 rn, rd; ++ ++ rn = arm_bpf_get_reg32(src, tmp[1], ctx); ++ rd = arm_bpf_get_reg32(dst, tmp[0], ctx); ++ /* ALU operation */ ++ emit_alu_r(rd, rn, is64, hi, op, ctx); ++ arm_bpf_put_reg32(dst, rd, ctx); ++} ++ ++/* ALU operation (64 bit) */ ++static inline void emit_a32_alu_r64(const bool is64, const s8 dst[], ++ const s8 src[], struct jit_ctx *ctx, ++ const u8 op) { ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s8 *rd; ++ ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ if (is64) { ++ const s8 *rs; ++ ++ rs = arm_bpf_get_reg64(src, tmp2, ctx); ++ ++ /* ALU operation */ ++ emit_alu_r(rd[1], rs[1], true, false, op, ctx); ++ emit_alu_r(rd[0], rs[0], true, true, op, ctx); + } else { +- _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx); +- _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx); ++ s8 rs; ++ ++ rs = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); ++ ++ /* ALU operation */ ++ emit_alu_r(rd[1], rs, true, false, op, ctx); ++ if (!ctx->prog->aux->verifier_zext) ++ emit_a32_mov_i(rd[0], 0, ctx); + } ++ ++ arm_bpf_put_reg64(dst, rd, ctx); + } + +-static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) +-{ +-#if __LINUX_ARM_ARCH__ < 5 +- emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); ++/* dst = src (4 bytes)*/ ++static inline void emit_a32_mov_r(const s8 dst, const s8 src, ++ struct jit_ctx *ctx) { ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ s8 rt; ++ ++ rt = arm_bpf_get_reg32(src, tmp[0], ctx); ++ arm_bpf_put_reg32(dst, rt, ctx); ++} ++ ++/* dst = src */ ++static inline void emit_a32_mov_r64(const bool is64, const s8 dst[], ++ const s8 src[], ++ struct jit_ctx *ctx) { ++ if (!is64) { ++ emit_a32_mov_r(dst_lo, src_lo, ctx); ++ if (!ctx->prog->aux->verifier_zext) ++ /* Zero out high 4 bytes */ ++ emit_a32_mov_i(dst_hi, 0, ctx); ++ } else if (__LINUX_ARM_ARCH__ < 6 && ++ ctx->cpu_architecture < CPU_ARCH_ARMv5TE) { ++ /* complete 8 byte move */ ++ emit_a32_mov_r(dst_lo, src_lo, ctx); ++ emit_a32_mov_r(dst_hi, src_hi, ctx); ++ } else if (is_stacked(src_lo) && is_stacked(dst_lo)) { ++ const u8 *tmp = bpf2a32[TMP_REG_1]; ++ ++ emit(ARM_LDRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx); ++ emit(ARM_STRD_I(tmp[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx); ++ } else if (is_stacked(src_lo)) { ++ emit(ARM_LDRD_I(dst[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(src_lo)), ctx); ++ } else if (is_stacked(dst_lo)) { ++ emit(ARM_STRD_I(src[1], ARM_FP, EBPF_SCRATCH_TO_ARM_FP(dst_lo)), ctx); ++ } else { ++ emit(ARM_MOV_R(dst[0], src[0]), ctx); ++ emit(ARM_MOV_R(dst[1], src[1]), ctx); ++ } ++} + +- if (elf_hwcap & HWCAP_THUMB) +- emit(ARM_BX(tgt_reg), ctx); +- else +- emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); +-#else +- emit(ARM_BLX_R(tgt_reg), ctx); +-#endif ++/* Shift operations */ ++static inline void emit_a32_alu_i(const s8 dst, const u32 val, ++ struct jit_ctx *ctx, const u8 op) { ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ s8 rd; ++ ++ rd = arm_bpf_get_reg32(dst, tmp[0], ctx); ++ ++ /* Do shift operation */ ++ switch (op) { ++ case BPF_LSH: ++ emit(ARM_LSL_I(rd, rd, val), ctx); ++ break; ++ case BPF_RSH: ++ emit(ARM_LSR_I(rd, rd, val), ctx); ++ break; ++ case BPF_NEG: ++ emit(ARM_RSB_I(rd, rd, val), ctx); ++ break; ++ } ++ ++ arm_bpf_put_reg32(dst, rd, ctx); + } + +-static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, +- int bpf_op) +-{ +-#if __LINUX_ARM_ARCH__ == 7 +- if (elf_hwcap & HWCAP_IDIVA) { +- if (bpf_op == BPF_DIV) +- emit(ARM_UDIV(rd, rm, rn), ctx); +- else { +- emit(ARM_UDIV(ARM_R3, rm, rn), ctx); +- emit(ARM_MLS(rd, rn, ARM_R3, rm), ctx); ++/* dst = ~dst (64 bit) */ ++static inline void emit_a32_neg64(const s8 dst[], ++ struct jit_ctx *ctx){ ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *rd; ++ ++ /* Setup Operand */ ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ ++ /* Do Negate Operation */ ++ emit(ARM_RSBS_I(rd[1], rd[1], 0), ctx); ++ emit(ARM_RSC_I(rd[0], rd[0], 0), ctx); ++ ++ arm_bpf_put_reg64(dst, rd, ctx); ++} ++ ++/* dst = dst << src */ ++static inline void emit_a32_lsh_r64(const s8 dst[], const s8 src[], ++ struct jit_ctx *ctx) { ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s8 *rd; ++ s8 rt; ++ ++ /* Setup Operands */ ++ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ ++ /* Do LSH operation */ ++ emit(ARM_SUB_I(ARM_IP, rt, 32), ctx); ++ emit(ARM_RSB_I(tmp2[0], rt, 32), ctx); ++ emit(ARM_MOV_SR(ARM_LR, rd[0], SRTYPE_ASL, rt), ctx); ++ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[1], SRTYPE_ASL, ARM_IP), ctx); ++ emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd[1], SRTYPE_LSR, tmp2[0]), ctx); ++ emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_ASL, rt), ctx); ++ ++ arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); ++ arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); ++} ++ ++/* dst = dst >> src (signed)*/ ++static inline void emit_a32_arsh_r64(const s8 dst[], const s8 src[], ++ struct jit_ctx *ctx) { ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s8 *rd; ++ s8 rt; ++ ++ /* Setup Operands */ ++ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ ++ /* Do the ARSH operation */ ++ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); ++ emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); ++ emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); ++ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); ++ _emit(ARM_COND_MI, ARM_B(0), ctx); ++ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASR, tmp2[0]), ctx); ++ emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_ASR, rt), ctx); ++ ++ arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); ++ arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); ++} ++ ++/* dst = dst >> src */ ++static inline void emit_a32_rsh_r64(const s8 dst[], const s8 src[], ++ struct jit_ctx *ctx) { ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s8 *rd; ++ s8 rt; ++ ++ /* Setup Operands */ ++ rt = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ ++ /* Do RSH operation */ ++ emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); ++ emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); ++ emit(ARM_MOV_SR(ARM_LR, rd[1], SRTYPE_LSR, rt), ctx); ++ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_ASL, ARM_IP), ctx); ++ emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd[0], SRTYPE_LSR, tmp2[0]), ctx); ++ emit(ARM_MOV_SR(ARM_IP, rd[0], SRTYPE_LSR, rt), ctx); ++ ++ arm_bpf_put_reg32(dst_lo, ARM_LR, ctx); ++ arm_bpf_put_reg32(dst_hi, ARM_IP, ctx); ++} ++ ++/* dst = dst << val */ ++static inline void emit_a32_lsh_i64(const s8 dst[], ++ const u32 val, struct jit_ctx *ctx){ ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s8 *rd; ++ ++ /* Setup operands */ ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ ++ /* Do LSH operation */ ++ if (val < 32) { ++ emit(ARM_MOV_SI(tmp2[0], rd[0], SRTYPE_ASL, val), ctx); ++ emit(ARM_ORR_SI(rd[0], tmp2[0], rd[1], SRTYPE_LSR, 32 - val), ctx); ++ emit(ARM_MOV_SI(rd[1], rd[1], SRTYPE_ASL, val), ctx); ++ } else { ++ if (val == 32) ++ emit(ARM_MOV_R(rd[0], rd[1]), ctx); ++ else ++ emit(ARM_MOV_SI(rd[0], rd[1], SRTYPE_ASL, val - 32), ctx); ++ emit(ARM_EOR_R(rd[1], rd[1], rd[1]), ctx); ++ } ++ ++ arm_bpf_put_reg64(dst, rd, ctx); ++} ++ ++/* dst = dst >> val */ ++static inline void emit_a32_rsh_i64(const s8 dst[], ++ const u32 val, struct jit_ctx *ctx) { ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s8 *rd; ++ ++ /* Setup operands */ ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ ++ /* Do LSR operation */ ++ if (val == 0) { ++ /* An immediate value of 0 encodes a shift amount of 32 ++ * for LSR. To shift by 0, don't do anything. ++ */ ++ } else if (val < 32) { ++ emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); ++ emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); ++ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_LSR, val), ctx); ++ } else if (val == 32) { ++ emit(ARM_MOV_R(rd[1], rd[0]), ctx); ++ emit(ARM_MOV_I(rd[0], 0), ctx); ++ } else { ++ emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_LSR, val - 32), ctx); ++ emit(ARM_MOV_I(rd[0], 0), ctx); ++ } ++ ++ arm_bpf_put_reg64(dst, rd, ctx); ++} ++ ++/* dst = dst >> val (signed) */ ++static inline void emit_a32_arsh_i64(const s8 dst[], ++ const u32 val, struct jit_ctx *ctx){ ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s8 *rd; ++ ++ /* Setup operands */ ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ ++ /* Do ARSH operation */ ++ if (val == 0) { ++ /* An immediate value of 0 encodes a shift amount of 32 ++ * for ASR. To shift by 0, don't do anything. ++ */ ++ } else if (val < 32) { ++ emit(ARM_MOV_SI(tmp2[1], rd[1], SRTYPE_LSR, val), ctx); ++ emit(ARM_ORR_SI(rd[1], tmp2[1], rd[0], SRTYPE_ASL, 32 - val), ctx); ++ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, val), ctx); ++ } else if (val == 32) { ++ emit(ARM_MOV_R(rd[1], rd[0]), ctx); ++ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx); ++ } else { ++ emit(ARM_MOV_SI(rd[1], rd[0], SRTYPE_ASR, val - 32), ctx); ++ emit(ARM_MOV_SI(rd[0], rd[0], SRTYPE_ASR, 31), ctx); ++ } ++ ++ arm_bpf_put_reg64(dst, rd, ctx); ++} ++ ++static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[], ++ struct jit_ctx *ctx) { ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s8 *rd, *rt; ++ ++ /* Setup operands for multiplication */ ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ rt = arm_bpf_get_reg64(src, tmp2, ctx); ++ ++ /* Do Multiplication */ ++ emit(ARM_MUL(ARM_IP, rd[1], rt[0]), ctx); ++ emit(ARM_MUL(ARM_LR, rd[0], rt[1]), ctx); ++ emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx); ++ ++ emit(ARM_UMULL(ARM_IP, rd[0], rd[1], rt[1]), ctx); ++ emit(ARM_ADD_R(rd[0], ARM_LR, rd[0]), ctx); ++ ++ arm_bpf_put_reg32(dst_lo, ARM_IP, ctx); ++ arm_bpf_put_reg32(dst_hi, rd[0], ctx); ++} ++ ++static bool is_ldst_imm(s16 off, const u8 size) ++{ ++ s16 off_max = 0; ++ ++ switch (size) { ++ case BPF_B: ++ case BPF_W: ++ off_max = 0xfff; ++ break; ++ case BPF_H: ++ off_max = 0xff; ++ break; ++ case BPF_DW: ++ /* Need to make sure off+4 does not overflow. */ ++ off_max = 0xfff - 4; ++ break; ++ } ++ return -off_max <= off && off <= off_max; ++} ++ ++/* *(size *)(dst + off) = src */ ++static inline void emit_str_r(const s8 dst, const s8 src[], ++ s16 off, struct jit_ctx *ctx, const u8 sz){ ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ s8 rd; ++ ++ rd = arm_bpf_get_reg32(dst, tmp[1], ctx); ++ ++ if (!is_ldst_imm(off, sz)) { ++ emit_a32_mov_i(tmp[0], off, ctx); ++ emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx); ++ rd = tmp[0]; ++ off = 0; ++ } ++ switch (sz) { ++ case BPF_B: ++ /* Store a Byte */ ++ emit(ARM_STRB_I(src_lo, rd, off), ctx); ++ break; ++ case BPF_H: ++ /* Store a HalfWord */ ++ emit(ARM_STRH_I(src_lo, rd, off), ctx); ++ break; ++ case BPF_W: ++ /* Store a Word */ ++ emit(ARM_STR_I(src_lo, rd, off), ctx); ++ break; ++ case BPF_DW: ++ /* Store a Double Word */ ++ emit(ARM_STR_I(src_lo, rd, off), ctx); ++ emit(ARM_STR_I(src_hi, rd, off + 4), ctx); ++ break; ++ } ++} ++ ++/* dst = *(size*)(src + off) */ ++static inline void emit_ldx_r(const s8 dst[], const s8 src, ++ s16 off, struct jit_ctx *ctx, const u8 sz){ ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *rd = is_stacked(dst_lo) ? tmp : dst; ++ s8 rm = src; ++ ++ if (!is_ldst_imm(off, sz)) { ++ emit_a32_mov_i(tmp[0], off, ctx); ++ emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx); ++ rm = tmp[0]; ++ off = 0; ++ } else if (rd[1] == rm) { ++ emit(ARM_MOV_R(tmp[0], rm), ctx); ++ rm = tmp[0]; ++ } ++ switch (sz) { ++ case BPF_B: ++ /* Load a Byte */ ++ emit(ARM_LDRB_I(rd[1], rm, off), ctx); ++ if (!ctx->prog->aux->verifier_zext) ++ emit_a32_mov_i(rd[0], 0, ctx); ++ break; ++ case BPF_H: ++ /* Load a HalfWord */ ++ emit(ARM_LDRH_I(rd[1], rm, off), ctx); ++ if (!ctx->prog->aux->verifier_zext) ++ emit_a32_mov_i(rd[0], 0, ctx); ++ break; ++ case BPF_W: ++ /* Load a Word */ ++ emit(ARM_LDR_I(rd[1], rm, off), ctx); ++ if (!ctx->prog->aux->verifier_zext) ++ emit_a32_mov_i(rd[0], 0, ctx); ++ break; ++ case BPF_DW: ++ /* Load a Double Word */ ++ emit(ARM_LDR_I(rd[1], rm, off), ctx); ++ emit(ARM_LDR_I(rd[0], rm, off + 4), ctx); ++ break; ++ } ++ arm_bpf_put_reg64(dst, rd, ctx); ++} ++ ++/* Arithmatic Operation */ ++static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm, ++ const u8 rn, struct jit_ctx *ctx, u8 op, ++ bool is_jmp64) { ++ switch (op) { ++ case BPF_JSET: ++ if (is_jmp64) { ++ emit(ARM_AND_R(ARM_IP, rt, rn), ctx); ++ emit(ARM_AND_R(ARM_LR, rd, rm), ctx); ++ emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx); ++ } else { ++ emit(ARM_ANDS_R(ARM_IP, rt, rn), ctx); + } +- return; ++ break; ++ case BPF_JEQ: ++ case BPF_JNE: ++ case BPF_JGT: ++ case BPF_JGE: ++ case BPF_JLE: ++ case BPF_JLT: ++ if (is_jmp64) { ++ emit(ARM_CMP_R(rd, rm), ctx); ++ /* Only compare low halve if high halve are equal. */ ++ _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx); ++ } else { ++ emit(ARM_CMP_R(rt, rn), ctx); ++ } ++ break; ++ case BPF_JSLE: ++ case BPF_JSGT: ++ emit(ARM_CMP_R(rn, rt), ctx); ++ if (is_jmp64) ++ emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx); ++ break; ++ case BPF_JSLT: ++ case BPF_JSGE: ++ emit(ARM_CMP_R(rt, rn), ctx); ++ if (is_jmp64) ++ emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx); ++ break; + } +-#endif ++} + +- /* +- * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4 +- * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into +- * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm +- * before using it as a source for ARM_R1. +- * +- * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is +- * ARM_R5 (r_X) so there is no particular register overlap +- * issues. ++static int out_offset = -1; /* initialized on the first pass of build_body() */ ++static int emit_bpf_tail_call(struct jit_ctx *ctx) ++{ ++ ++ /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */ ++ const s8 *r2 = bpf2a32[BPF_REG_2]; ++ const s8 *r3 = bpf2a32[BPF_REG_3]; ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s8 *tcc = bpf2a32[TCALL_CNT]; ++ const s8 *tc; ++ const int idx0 = ctx->idx; ++#define cur_offset (ctx->idx - idx0) ++#define jmp_offset (out_offset - (cur_offset) - 2) ++ u32 lo, hi; ++ s8 r_array, r_index; ++ int off; ++ ++ /* if (index >= array->map.max_entries) ++ * goto out; + */ +- if (rn != ARM_R1) +- emit(ARM_MOV_R(ARM_R1, rn), ctx); +- if (rm != ARM_R0) +- emit(ARM_MOV_R(ARM_R0, rm), ctx); ++ BUILD_BUG_ON(offsetof(struct bpf_array, map.max_entries) > ++ ARM_INST_LDST__IMM12); ++ off = offsetof(struct bpf_array, map.max_entries); ++ r_array = arm_bpf_get_reg32(r2[1], tmp2[0], ctx); ++ /* index is 32-bit for arrays */ ++ r_index = arm_bpf_get_reg32(r3[1], tmp2[1], ctx); ++ /* array->map.max_entries */ ++ emit(ARM_LDR_I(tmp[1], r_array, off), ctx); ++ /* index >= array->map.max_entries */ ++ emit(ARM_CMP_R(r_index, tmp[1]), ctx); ++ _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); ++ ++ /* tmp2[0] = array, tmp2[1] = index */ ++ ++ /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) ++ * goto out; ++ * tail_call_cnt++; ++ */ ++ lo = (u32)MAX_TAIL_CALL_CNT; ++ hi = (u32)((u64)MAX_TAIL_CALL_CNT >> 32); ++ tc = arm_bpf_get_reg64(tcc, tmp, ctx); ++ emit(ARM_CMP_I(tc[0], hi), ctx); ++ _emit(ARM_COND_EQ, ARM_CMP_I(tc[1], lo), ctx); ++ _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); ++ emit(ARM_ADDS_I(tc[1], tc[1], 1), ctx); ++ emit(ARM_ADC_I(tc[0], tc[0], 0), ctx); ++ arm_bpf_put_reg64(tcc, tmp, ctx); ++ ++ /* prog = array->ptrs[index] ++ * if (prog == NULL) ++ * goto out; ++ */ ++ BUILD_BUG_ON(imm8m(offsetof(struct bpf_array, ptrs)) < 0); ++ off = imm8m(offsetof(struct bpf_array, ptrs)); ++ emit(ARM_ADD_I(tmp[1], r_array, off), ctx); ++ emit(ARM_LDR_R_SI(tmp[1], tmp[1], r_index, SRTYPE_ASL, 2), ctx); ++ emit(ARM_CMP_I(tmp[1], 0), ctx); ++ _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); ++ ++ /* goto *(prog->bpf_func + prologue_size); */ ++ BUILD_BUG_ON(offsetof(struct bpf_prog, bpf_func) > ++ ARM_INST_LDST__IMM12); ++ off = offsetof(struct bpf_prog, bpf_func); ++ emit(ARM_LDR_I(tmp[1], tmp[1], off), ctx); ++ emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx); ++ emit_bx_r(tmp[1], ctx); ++ ++ /* out: */ ++ if (out_offset == -1) ++ out_offset = cur_offset; ++ if (cur_offset != out_offset) { ++ pr_err_once("tail_call out_offset = %d, expected %d!\n", ++ cur_offset, out_offset); ++ return -1; ++ } ++ return 0; ++#undef cur_offset ++#undef jmp_offset ++} ++ ++/* 0xabcd => 0xcdab */ ++static inline void emit_rev16(const u8 rd, const u8 rn, struct jit_ctx *ctx) ++{ ++#if __LINUX_ARM_ARCH__ < 6 ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; + +- ctx->seen |= SEEN_CALL; +- emit_mov_i(ARM_R3, bpf_op == BPF_DIV ? (u32)jit_udiv : (u32)jit_mod, +- ctx); +- emit_blx_r(ARM_R3, ctx); ++ emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); ++ emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 8), ctx); ++ emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx); ++ emit(ARM_ORR_SI(rd, tmp2[0], tmp2[1], SRTYPE_LSL, 8), ctx); ++#else /* ARMv6+ */ ++ emit(ARM_REV16(rd, rn), ctx); ++#endif ++} + +- if (rd != ARM_R0) +- emit(ARM_MOV_R(rd, ARM_R0), ctx); ++/* 0xabcdefgh => 0xghefcdab */ ++static inline void emit_rev32(const u8 rd, const u8 rn, struct jit_ctx *ctx) ++{ ++#if __LINUX_ARM_ARCH__ < 6 ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ ++ emit(ARM_AND_I(tmp2[1], rn, 0xff), ctx); ++ emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 24), ctx); ++ emit(ARM_ORR_SI(ARM_IP, tmp2[0], tmp2[1], SRTYPE_LSL, 24), ctx); ++ ++ emit(ARM_MOV_SI(tmp2[1], rn, SRTYPE_LSR, 8), ctx); ++ emit(ARM_AND_I(tmp2[1], tmp2[1], 0xff), ctx); ++ emit(ARM_MOV_SI(tmp2[0], rn, SRTYPE_LSR, 16), ctx); ++ emit(ARM_AND_I(tmp2[0], tmp2[0], 0xff), ctx); ++ emit(ARM_MOV_SI(tmp2[0], tmp2[0], SRTYPE_LSL, 8), ctx); ++ emit(ARM_ORR_SI(tmp2[0], tmp2[0], tmp2[1], SRTYPE_LSL, 16), ctx); ++ emit(ARM_ORR_R(rd, ARM_IP, tmp2[0]), ctx); ++ ++#else /* ARMv6+ */ ++ emit(ARM_REV(rd, rn), ctx); ++#endif + } + +-static inline void update_on_xread(struct jit_ctx *ctx) ++// push the scratch stack register on top of the stack ++static inline void emit_push_r64(const s8 src[], struct jit_ctx *ctx) + { +- if (!(ctx->seen & SEEN_X)) +- ctx->flags |= FLAG_NEED_X_RESET; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s8 *rt; ++ u16 reg_set = 0; ++ ++ rt = arm_bpf_get_reg64(src, tmp2, ctx); + +- ctx->seen |= SEEN_X; ++ reg_set = (1 << rt[1]) | (1 << rt[0]); ++ emit(ARM_PUSH(reg_set), ctx); + } + +-static int build_body(struct jit_ctx *ctx) ++static void build_prologue(struct jit_ctx *ctx) + { +- void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; +- const struct bpf_prog *prog = ctx->skf; +- const struct sock_filter *inst; +- unsigned i, load_order, off, condt; +- int imm12; +- u32 k; ++ const s8 r0 = bpf2a32[BPF_REG_0][1]; ++ const s8 r2 = bpf2a32[BPF_REG_1][1]; ++ const s8 r3 = bpf2a32[BPF_REG_1][0]; ++ const s8 r4 = bpf2a32[BPF_REG_6][1]; ++ const s8 fplo = bpf2a32[BPF_REG_FP][1]; ++ const s8 fphi = bpf2a32[BPF_REG_FP][0]; ++ const s8 *tcc = bpf2a32[TCALL_CNT]; + +- for (i = 0; i < prog->len; i++) { +- u16 code; ++ /* Save callee saved registers. */ ++#ifdef CONFIG_FRAME_POINTER ++ u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC; ++ emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); ++ emit(ARM_PUSH(reg_set), ctx); ++ emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); ++#else ++ emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx); ++ emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx); ++#endif ++ /* Save frame pointer for later */ ++ emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx); + +- inst = &(prog->insns[i]); +- /* K as an immediate value operand */ +- k = inst->k; +- code = bpf_anc_helper(inst); ++ ctx->stack_size = imm8m(STACK_SIZE); + +- /* compute offsets only in the fake pass */ +- if (ctx->target == NULL) +- ctx->offsets[i] = ctx->idx * 4; ++ /* Set up function call stack */ ++ emit(ARM_SUB_I(ARM_SP, ARM_SP, ctx->stack_size), ctx); + +- switch (code) { +- case BPF_LD | BPF_IMM: +- emit_mov_i(r_A, k, ctx); +- break; +- case BPF_LD | BPF_W | BPF_LEN: +- ctx->seen |= SEEN_SKB; +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); +- emit(ARM_LDR_I(r_A, r_skb, +- offsetof(struct sk_buff, len)), ctx); +- break; +- case BPF_LD | BPF_MEM: +- /* A = scratch[k] */ +- ctx->seen |= SEEN_MEM_WORD(k); +- emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); +- break; +- case BPF_LD | BPF_W | BPF_ABS: +- load_order = 2; +- goto load; +- case BPF_LD | BPF_H | BPF_ABS: +- load_order = 1; +- goto load; +- case BPF_LD | BPF_B | BPF_ABS: +- load_order = 0; +-load: +- emit_mov_i(r_off, k, ctx); +-load_common: +- ctx->seen |= SEEN_DATA | SEEN_CALL; +- +- if (load_order > 0) { +- emit(ARM_SUB_I(r_scratch, r_skb_hl, +- 1 << load_order), ctx); +- emit(ARM_CMP_R(r_scratch, r_off), ctx); +- condt = ARM_COND_GE; +- } else { +- emit(ARM_CMP_R(r_skb_hl, r_off), ctx); +- condt = ARM_COND_HI; +- } ++ /* Set up BPF prog stack base register */ ++ emit_a32_mov_r(fplo, ARM_IP, ctx); ++ emit_a32_mov_i(fphi, 0, ctx); + +- /* +- * test for negative offset, only if we are +- * currently scheduled to take the fast +- * path. this will update the flags so that +- * the slowpath instruction are ignored if the +- * offset is negative. +- * +- * for loard_order == 0 the HI condition will +- * make loads at offset 0 take the slow path too. +- */ +- _emit(condt, ARM_CMP_I(r_off, 0), ctx); ++ /* mov r4, 0 */ ++ emit(ARM_MOV_I(r4, 0), ctx); + +- _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), +- ctx); ++ /* Move BPF_CTX to BPF_R1 */ ++ emit(ARM_MOV_R(r3, r4), ctx); ++ emit(ARM_MOV_R(r2, r0), ctx); ++ /* Initialize Tail Count */ ++ emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[0])), ctx); ++ emit(ARM_STR_I(r4, ARM_FP, EBPF_SCRATCH_TO_ARM_FP(tcc[1])), ctx); ++ /* end of prologue */ ++} + +- if (load_order == 0) +- _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0), +- ctx); +- else if (load_order == 1) +- emit_load_be16(condt, r_A, r_scratch, ctx); +- else if (load_order == 2) +- emit_load_be32(condt, r_A, r_scratch, ctx); +- +- _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx); +- +- /* the slowpath */ +- emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx); +- emit(ARM_MOV_R(ARM_R0, r_skb), ctx); +- /* the offset is already in R1 */ +- emit_blx_r(ARM_R3, ctx); +- /* check the result of skb_copy_bits */ +- emit(ARM_CMP_I(ARM_R1, 0), ctx); +- emit_err_ret(ARM_COND_NE, ctx); +- emit(ARM_MOV_R(r_A, ARM_R0), ctx); +- break; +- case BPF_LD | BPF_W | BPF_IND: +- load_order = 2; +- goto load_ind; +- case BPF_LD | BPF_H | BPF_IND: +- load_order = 1; +- goto load_ind; +- case BPF_LD | BPF_B | BPF_IND: +- load_order = 0; +-load_ind: +- update_on_xread(ctx); +- OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); +- goto load_common; +- case BPF_LDX | BPF_IMM: +- ctx->seen |= SEEN_X; +- emit_mov_i(r_X, k, ctx); +- break; +- case BPF_LDX | BPF_W | BPF_LEN: +- ctx->seen |= SEEN_X | SEEN_SKB; +- emit(ARM_LDR_I(r_X, r_skb, +- offsetof(struct sk_buff, len)), ctx); +- break; +- case BPF_LDX | BPF_MEM: +- ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); +- emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); +- break; +- case BPF_LDX | BPF_B | BPF_MSH: +- /* x = ((*(frame + k)) & 0xf) << 2; */ +- ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; +- /* the interpreter should deal with the negative K */ +- if ((int)k < 0) +- return -1; +- /* offset in r1: we might have to take the slow path */ +- emit_mov_i(r_off, k, ctx); +- emit(ARM_CMP_R(r_skb_hl, r_off), ctx); +- +- /* load in r0: common with the slowpath */ +- _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data, +- ARM_R1), ctx); +- /* +- * emit_mov_i() might generate one or two instructions, +- * the same holds for emit_blx_r() +- */ +- _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx); ++/* restore callee saved registers. */ ++static void build_epilogue(struct jit_ctx *ctx) ++{ ++#ifdef CONFIG_FRAME_POINTER ++ /* When using frame pointers, some additional registers need to ++ * be loaded. */ ++ u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP; ++ emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx); ++ emit(ARM_LDM(ARM_SP, reg_set), ctx); ++#else ++ /* Restore callee saved registers. */ ++ emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx); ++ emit(ARM_POP(CALLEE_POP_MASK), ctx); ++#endif ++} + +- emit(ARM_MOV_R(ARM_R0, r_skb), ctx); +- /* r_off is r1 */ +- emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx); +- emit_blx_r(ARM_R3, ctx); +- /* check the return value of skb_copy_bits */ +- emit(ARM_CMP_I(ARM_R1, 0), ctx); +- emit_err_ret(ARM_COND_NE, ctx); +- +- emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); +- emit(ARM_LSL_I(r_X, r_X, 2), ctx); +- break; +- case BPF_ST: +- ctx->seen |= SEEN_MEM_WORD(k); +- emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); +- break; +- case BPF_STX: +- update_on_xread(ctx); +- ctx->seen |= SEEN_MEM_WORD(k); +- emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); +- break; +- case BPF_ALU | BPF_ADD | BPF_K: +- /* A += K */ +- OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); +- break; +- case BPF_ALU | BPF_ADD | BPF_X: +- update_on_xread(ctx); +- emit(ARM_ADD_R(r_A, r_A, r_X), ctx); +- break; +- case BPF_ALU | BPF_SUB | BPF_K: +- /* A -= K */ +- OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); +- break; +- case BPF_ALU | BPF_SUB | BPF_X: +- update_on_xread(ctx); +- emit(ARM_SUB_R(r_A, r_A, r_X), ctx); +- break; +- case BPF_ALU | BPF_MUL | BPF_K: +- /* A *= K */ +- emit_mov_i(r_scratch, k, ctx); +- emit(ARM_MUL(r_A, r_A, r_scratch), ctx); +- break; +- case BPF_ALU | BPF_MUL | BPF_X: +- update_on_xread(ctx); +- emit(ARM_MUL(r_A, r_A, r_X), ctx); +- break; +- case BPF_ALU | BPF_DIV | BPF_K: +- if (k == 1) +- break; +- emit_mov_i(r_scratch, k, ctx); +- emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_DIV); +- break; +- case BPF_ALU | BPF_DIV | BPF_X: +- update_on_xread(ctx); +- emit(ARM_CMP_I(r_X, 0), ctx); +- emit_err_ret(ARM_COND_EQ, ctx); +- emit_udivmod(r_A, r_A, r_X, ctx, BPF_DIV); +- break; +- case BPF_ALU | BPF_MOD | BPF_K: +- if (k == 1) { +- emit_mov_i(r_A, 0, ctx); ++/* ++ * Convert an eBPF instruction to native instruction, i.e ++ * JITs an eBPF instruction. ++ * Returns : ++ * 0 - Successfully JITed an 8-byte eBPF instruction ++ * >0 - Successfully JITed a 16-byte eBPF instruction ++ * <0 - Failed to JIT. ++ */ ++static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) ++{ ++ const u8 code = insn->code; ++ const s8 *dst = bpf2a32[insn->dst_reg]; ++ const s8 *src = bpf2a32[insn->src_reg]; ++ const s8 *tmp = bpf2a32[TMP_REG_1]; ++ const s8 *tmp2 = bpf2a32[TMP_REG_2]; ++ const s16 off = insn->off; ++ const s32 imm = insn->imm; ++ const int i = insn - ctx->prog->insnsi; ++ const bool is64 = BPF_CLASS(code) == BPF_ALU64; ++ const s8 *rd, *rs; ++ s8 rd_lo, rt, rm, rn; ++ s32 jmp_offset; ++ ++#define check_imm(bits, imm) do { \ ++ if ((imm) >= (1 << ((bits) - 1)) || \ ++ (imm) < -(1 << ((bits) - 1))) { \ ++ pr_info("[%2d] imm=%d(0x%x) out of range\n", \ ++ i, imm, imm); \ ++ return -EINVAL; \ ++ } \ ++} while (0) ++#define check_imm24(imm) check_imm(24, imm) ++ ++ switch (code) { ++ /* ALU operations */ ++ ++ /* dst = src */ ++ case BPF_ALU | BPF_MOV | BPF_K: ++ case BPF_ALU | BPF_MOV | BPF_X: ++ case BPF_ALU64 | BPF_MOV | BPF_K: ++ case BPF_ALU64 | BPF_MOV | BPF_X: ++ switch (BPF_SRC(code)) { ++ case BPF_X: ++ if (imm == 1) { ++ /* Special mov32 for zext */ ++ emit_a32_mov_i(dst_hi, 0, ctx); + break; + } +- emit_mov_i(r_scratch, k, ctx); +- emit_udivmod(r_A, r_A, r_scratch, ctx, BPF_MOD); ++ emit_a32_mov_r64(is64, dst, src, ctx); + break; +- case BPF_ALU | BPF_MOD | BPF_X: +- update_on_xread(ctx); +- emit(ARM_CMP_I(r_X, 0), ctx); +- emit_err_ret(ARM_COND_EQ, ctx); +- emit_udivmod(r_A, r_A, r_X, ctx, BPF_MOD); +- break; +- case BPF_ALU | BPF_OR | BPF_K: +- /* A |= K */ +- OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); +- break; +- case BPF_ALU | BPF_OR | BPF_X: +- update_on_xread(ctx); +- emit(ARM_ORR_R(r_A, r_A, r_X), ctx); +- break; +- case BPF_ALU | BPF_XOR | BPF_K: +- /* A ^= K; */ +- OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); +- break; +- case BPF_ANC | SKF_AD_ALU_XOR_X: +- case BPF_ALU | BPF_XOR | BPF_X: +- /* A ^= X */ +- update_on_xread(ctx); +- emit(ARM_EOR_R(r_A, r_A, r_X), ctx); +- break; +- case BPF_ALU | BPF_AND | BPF_K: +- /* A &= K */ +- OP_IMM3(ARM_AND, r_A, r_A, k, ctx); +- break; +- case BPF_ALU | BPF_AND | BPF_X: +- update_on_xread(ctx); +- emit(ARM_AND_R(r_A, r_A, r_X), ctx); +- break; +- case BPF_ALU | BPF_LSH | BPF_K: +- if (unlikely(k > 31)) +- return -1; +- emit(ARM_LSL_I(r_A, r_A, k), ctx); +- break; +- case BPF_ALU | BPF_LSH | BPF_X: +- update_on_xread(ctx); +- emit(ARM_LSL_R(r_A, r_A, r_X), ctx); +- break; +- case BPF_ALU | BPF_RSH | BPF_K: +- if (unlikely(k > 31)) +- return -1; +- if (k) +- emit(ARM_LSR_I(r_A, r_A, k), ctx); +- break; +- case BPF_ALU | BPF_RSH | BPF_X: +- update_on_xread(ctx); +- emit(ARM_LSR_R(r_A, r_A, r_X), ctx); +- break; +- case BPF_ALU | BPF_NEG: +- /* A = -A */ +- emit(ARM_RSB_I(r_A, r_A, 0), ctx); +- break; +- case BPF_JMP | BPF_JA: +- /* pc += K */ +- emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); +- break; +- case BPF_JMP | BPF_JEQ | BPF_K: +- /* pc += (A == K) ? pc->jt : pc->jf */ +- condt = ARM_COND_EQ; +- goto cmp_imm; +- case BPF_JMP | BPF_JGT | BPF_K: +- /* pc += (A > K) ? pc->jt : pc->jf */ +- condt = ARM_COND_HI; +- goto cmp_imm; +- case BPF_JMP | BPF_JGE | BPF_K: +- /* pc += (A >= K) ? pc->jt : pc->jf */ +- condt = ARM_COND_HS; +-cmp_imm: +- imm12 = imm8m(k); +- if (imm12 < 0) { +- emit_mov_i_no8m(r_scratch, k, ctx); +- emit(ARM_CMP_R(r_A, r_scratch), ctx); +- } else { +- emit(ARM_CMP_I(r_A, imm12), ctx); +- } +-cond_jump: +- if (inst->jt) +- _emit(condt, ARM_B(b_imm(i + inst->jt + 1, +- ctx)), ctx); +- if (inst->jf) +- _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, +- ctx)), ctx); +- break; +- case BPF_JMP | BPF_JEQ | BPF_X: +- /* pc += (A == X) ? pc->jt : pc->jf */ +- condt = ARM_COND_EQ; +- goto cmp_x; +- case BPF_JMP | BPF_JGT | BPF_X: +- /* pc += (A > X) ? pc->jt : pc->jf */ +- condt = ARM_COND_HI; +- goto cmp_x; +- case BPF_JMP | BPF_JGE | BPF_X: +- /* pc += (A >= X) ? pc->jt : pc->jf */ +- condt = ARM_COND_CS; +-cmp_x: +- update_on_xread(ctx); +- emit(ARM_CMP_R(r_A, r_X), ctx); +- goto cond_jump; +- case BPF_JMP | BPF_JSET | BPF_K: +- /* pc += (A & K) ? pc->jt : pc->jf */ +- condt = ARM_COND_NE; +- /* not set iff all zeroes iff Z==1 iff EQ */ +- +- imm12 = imm8m(k); +- if (imm12 < 0) { +- emit_mov_i_no8m(r_scratch, k, ctx); +- emit(ARM_TST_R(r_A, r_scratch), ctx); +- } else { +- emit(ARM_TST_I(r_A, imm12), ctx); +- } +- goto cond_jump; +- case BPF_JMP | BPF_JSET | BPF_X: +- /* pc += (A & X) ? pc->jt : pc->jf */ +- update_on_xread(ctx); +- condt = ARM_COND_NE; +- emit(ARM_TST_R(r_A, r_X), ctx); +- goto cond_jump; +- case BPF_RET | BPF_A: +- emit(ARM_MOV_R(ARM_R0, r_A), ctx); +- goto b_epilogue; +- case BPF_RET | BPF_K: +- if ((k == 0) && (ctx->ret0_fp_idx < 0)) +- ctx->ret0_fp_idx = i; +- emit_mov_i(ARM_R0, k, ctx); +-b_epilogue: +- if (i != ctx->skf->len - 1) +- emit(ARM_B(b_imm(prog->len, ctx)), ctx); +- break; +- case BPF_MISC | BPF_TAX: +- /* X = A */ +- ctx->seen |= SEEN_X; +- emit(ARM_MOV_R(r_X, r_A), ctx); +- break; +- case BPF_MISC | BPF_TXA: +- /* A = X */ +- update_on_xread(ctx); +- emit(ARM_MOV_R(r_A, r_X), ctx); +- break; +- case BPF_ANC | SKF_AD_PROTOCOL: +- /* A = ntohs(skb->protocol) */ +- ctx->seen |= SEEN_SKB; +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, +- protocol) != 2); +- off = offsetof(struct sk_buff, protocol); +- emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); +- emit_swap16(r_A, r_scratch, ctx); +- break; +- case BPF_ANC | SKF_AD_CPU: +- /* r_scratch = current_thread_info() */ +- OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); +- /* A = current_thread_info()->cpu */ +- BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); +- off = offsetof(struct thread_info, cpu); +- emit(ARM_LDR_I(r_A, r_scratch, off), ctx); +- break; +- case BPF_ANC | SKF_AD_IFINDEX: +- case BPF_ANC | SKF_AD_HATYPE: +- /* A = skb->dev->ifindex */ +- /* A = skb->dev->type */ +- ctx->seen |= SEEN_SKB; +- off = offsetof(struct sk_buff, dev); +- emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); +- +- emit(ARM_CMP_I(r_scratch, 0), ctx); +- emit_err_ret(ARM_COND_EQ, ctx); +- +- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, +- ifindex) != 4); +- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, +- type) != 2); +- +- if (code == (BPF_ANC | SKF_AD_IFINDEX)) { +- off = offsetof(struct net_device, ifindex); +- emit(ARM_LDR_I(r_A, r_scratch, off), ctx); +- } else { +- /* +- * offset of field "type" in "struct +- * net_device" is above what can be +- * used in the ldrh rd, [rn, #imm] +- * instruction, so load the offset in +- * a register and use ldrh rd, [rn, rm] +- */ +- off = offsetof(struct net_device, type); +- emit_mov_i(ARM_R3, off, ctx); +- emit(ARM_LDRH_R(r_A, r_scratch, ARM_R3), ctx); +- } ++ case BPF_K: ++ /* Sign-extend immediate value to destination reg */ ++ emit_a32_mov_se_i64(is64, dst, imm, ctx); + break; +- case BPF_ANC | SKF_AD_MARK: +- ctx->seen |= SEEN_SKB; +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); +- off = offsetof(struct sk_buff, mark); +- emit(ARM_LDR_I(r_A, r_skb, off), ctx); +- break; +- case BPF_ANC | SKF_AD_RXHASH: +- ctx->seen |= SEEN_SKB; +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); +- off = offsetof(struct sk_buff, hash); +- emit(ARM_LDR_I(r_A, r_skb, off), ctx); +- break; +- case BPF_ANC | SKF_AD_VLAN_TAG: +- case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: +- ctx->seen |= SEEN_SKB; +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); +- off = offsetof(struct sk_buff, vlan_tci); +- emit(ARM_LDRH_I(r_A, r_skb, off), ctx); +- if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) +- OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx); +- else { +- OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx); +- OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx); +- } ++ } ++ break; ++ /* dst = dst + src/imm */ ++ /* dst = dst - src/imm */ ++ /* dst = dst | src/imm */ ++ /* dst = dst & src/imm */ ++ /* dst = dst ^ src/imm */ ++ /* dst = dst * src/imm */ ++ /* dst = dst << src */ ++ /* dst = dst >> src */ ++ case BPF_ALU | BPF_ADD | BPF_K: ++ case BPF_ALU | BPF_ADD | BPF_X: ++ case BPF_ALU | BPF_SUB | BPF_K: ++ case BPF_ALU | BPF_SUB | BPF_X: ++ case BPF_ALU | BPF_OR | BPF_K: ++ case BPF_ALU | BPF_OR | BPF_X: ++ case BPF_ALU | BPF_AND | BPF_K: ++ case BPF_ALU | BPF_AND | BPF_X: ++ case BPF_ALU | BPF_XOR | BPF_K: ++ case BPF_ALU | BPF_XOR | BPF_X: ++ case BPF_ALU | BPF_MUL | BPF_K: ++ case BPF_ALU | BPF_MUL | BPF_X: ++ case BPF_ALU | BPF_LSH | BPF_X: ++ case BPF_ALU | BPF_RSH | BPF_X: ++ case BPF_ALU | BPF_ARSH | BPF_K: ++ case BPF_ALU | BPF_ARSH | BPF_X: ++ case BPF_ALU64 | BPF_ADD | BPF_K: ++ case BPF_ALU64 | BPF_ADD | BPF_X: ++ case BPF_ALU64 | BPF_SUB | BPF_K: ++ case BPF_ALU64 | BPF_SUB | BPF_X: ++ case BPF_ALU64 | BPF_OR | BPF_K: ++ case BPF_ALU64 | BPF_OR | BPF_X: ++ case BPF_ALU64 | BPF_AND | BPF_K: ++ case BPF_ALU64 | BPF_AND | BPF_X: ++ case BPF_ALU64 | BPF_XOR | BPF_K: ++ case BPF_ALU64 | BPF_XOR | BPF_X: ++ switch (BPF_SRC(code)) { ++ case BPF_X: ++ emit_a32_alu_r64(is64, dst, src, ctx, BPF_OP(code)); ++ break; ++ case BPF_K: ++ /* Move immediate value to the temporary register ++ * and then do the ALU operation on the temporary ++ * register as this will sign-extend the immediate ++ * value into temporary reg and then it would be ++ * safe to do the operation on it. ++ */ ++ emit_a32_mov_se_i64(is64, tmp2, imm, ctx); ++ emit_a32_alu_r64(is64, dst, tmp2, ctx, BPF_OP(code)); + break; +- case BPF_ANC | SKF_AD_PKTTYPE: +- ctx->seen |= SEEN_SKB; +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, +- __pkt_type_offset[0]) != 1); +- off = PKT_TYPE_OFFSET(); +- emit(ARM_LDRB_I(r_A, r_skb, off), ctx); +- emit(ARM_AND_I(r_A, r_A, PKT_TYPE_MAX), ctx); +-#ifdef __BIG_ENDIAN_BITFIELD +- emit(ARM_LSR_I(r_A, r_A, 5), ctx); +-#endif ++ } ++ break; ++ /* dst = dst / src(imm) */ ++ /* dst = dst % src(imm) */ ++ case BPF_ALU | BPF_DIV | BPF_K: ++ case BPF_ALU | BPF_DIV | BPF_X: ++ case BPF_ALU | BPF_MOD | BPF_K: ++ case BPF_ALU | BPF_MOD | BPF_X: ++ rd_lo = arm_bpf_get_reg32(dst_lo, tmp2[1], ctx); ++ switch (BPF_SRC(code)) { ++ case BPF_X: ++ rt = arm_bpf_get_reg32(src_lo, tmp2[0], ctx); ++ break; ++ case BPF_K: ++ rt = tmp2[0]; ++ emit_a32_mov_i(rt, imm, ctx); + break; +- case BPF_ANC | SKF_AD_QUEUE: +- ctx->seen |= SEEN_SKB; +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, +- queue_mapping) != 2); +- BUILD_BUG_ON(offsetof(struct sk_buff, +- queue_mapping) > 0xff); +- off = offsetof(struct sk_buff, queue_mapping); +- emit(ARM_LDRH_I(r_A, r_skb, off), ctx); +- break; +- case BPF_ANC | SKF_AD_PAY_OFFSET: +- ctx->seen |= SEEN_SKB | SEEN_CALL; +- +- emit(ARM_MOV_R(ARM_R0, r_skb), ctx); +- emit_mov_i(ARM_R3, (unsigned int)skb_get_poff, ctx); +- emit_blx_r(ARM_R3, ctx); +- emit(ARM_MOV_R(r_A, ARM_R0), ctx); +- break; +- case BPF_LDX | BPF_W | BPF_ABS: +- /* +- * load a 32bit word from struct seccomp_data. +- * seccomp_check_filter() will already have checked +- * that k is 32bit aligned and lies within the +- * struct seccomp_data. ++ default: ++ rt = src_lo; ++ break; ++ } ++ emit_udivmod(rd_lo, rd_lo, rt, ctx, BPF_OP(code)); ++ arm_bpf_put_reg32(dst_lo, rd_lo, ctx); ++ if (!ctx->prog->aux->verifier_zext) ++ emit_a32_mov_i(dst_hi, 0, ctx); ++ break; ++ case BPF_ALU64 | BPF_DIV | BPF_K: ++ case BPF_ALU64 | BPF_DIV | BPF_X: ++ case BPF_ALU64 | BPF_MOD | BPF_K: ++ case BPF_ALU64 | BPF_MOD | BPF_X: ++ goto notyet; ++ /* dst = dst >> imm */ ++ /* dst = dst << imm */ ++ case BPF_ALU | BPF_RSH | BPF_K: ++ case BPF_ALU | BPF_LSH | BPF_K: ++ if (unlikely(imm > 31)) ++ return -EINVAL; ++ if (imm) ++ emit_a32_alu_i(dst_lo, imm, ctx, BPF_OP(code)); ++ if (!ctx->prog->aux->verifier_zext) ++ emit_a32_mov_i(dst_hi, 0, ctx); ++ break; ++ /* dst = dst << imm */ ++ case BPF_ALU64 | BPF_LSH | BPF_K: ++ if (unlikely(imm > 63)) ++ return -EINVAL; ++ emit_a32_lsh_i64(dst, imm, ctx); ++ break; ++ /* dst = dst >> imm */ ++ case BPF_ALU64 | BPF_RSH | BPF_K: ++ if (unlikely(imm > 63)) ++ return -EINVAL; ++ emit_a32_rsh_i64(dst, imm, ctx); ++ break; ++ /* dst = dst << src */ ++ case BPF_ALU64 | BPF_LSH | BPF_X: ++ emit_a32_lsh_r64(dst, src, ctx); ++ break; ++ /* dst = dst >> src */ ++ case BPF_ALU64 | BPF_RSH | BPF_X: ++ emit_a32_rsh_r64(dst, src, ctx); ++ break; ++ /* dst = dst >> src (signed) */ ++ case BPF_ALU64 | BPF_ARSH | BPF_X: ++ emit_a32_arsh_r64(dst, src, ctx); ++ break; ++ /* dst = dst >> imm (signed) */ ++ case BPF_ALU64 | BPF_ARSH | BPF_K: ++ if (unlikely(imm > 63)) ++ return -EINVAL; ++ emit_a32_arsh_i64(dst, imm, ctx); ++ break; ++ /* dst = ~dst */ ++ case BPF_ALU | BPF_NEG: ++ emit_a32_alu_i(dst_lo, 0, ctx, BPF_OP(code)); ++ if (!ctx->prog->aux->verifier_zext) ++ emit_a32_mov_i(dst_hi, 0, ctx); ++ break; ++ /* dst = ~dst (64 bit) */ ++ case BPF_ALU64 | BPF_NEG: ++ emit_a32_neg64(dst, ctx); ++ break; ++ /* dst = dst * src/imm */ ++ case BPF_ALU64 | BPF_MUL | BPF_X: ++ case BPF_ALU64 | BPF_MUL | BPF_K: ++ switch (BPF_SRC(code)) { ++ case BPF_X: ++ emit_a32_mul_r64(dst, src, ctx); ++ break; ++ case BPF_K: ++ /* Move immediate value to the temporary register ++ * and then do the multiplication on it as this ++ * will sign-extend the immediate value into temp ++ * reg then it would be safe to do the operation ++ * on it. + */ +- ctx->seen |= SEEN_SKB; +- emit(ARM_LDR_I(r_A, r_skb, k), ctx); ++ emit_a32_mov_se_i64(is64, tmp2, imm, ctx); ++ emit_a32_mul_r64(dst, tmp2, ctx); ++ break; ++ } ++ break; ++ /* dst = htole(dst) */ ++ /* dst = htobe(dst) */ ++ case BPF_ALU | BPF_END | BPF_FROM_LE: ++ case BPF_ALU | BPF_END | BPF_FROM_BE: ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ if (BPF_SRC(code) == BPF_FROM_LE) ++ goto emit_bswap_uxt; ++ switch (imm) { ++ case 16: ++ emit_rev16(rd[1], rd[1], ctx); ++ goto emit_bswap_uxt; ++ case 32: ++ emit_rev32(rd[1], rd[1], ctx); ++ goto emit_bswap_uxt; ++ case 64: ++ emit_rev32(ARM_LR, rd[1], ctx); ++ emit_rev32(rd[1], rd[0], ctx); ++ emit(ARM_MOV_R(rd[0], ARM_LR), ctx); + break; +- default: +- return -1; + } ++ goto exit; ++emit_bswap_uxt: ++ switch (imm) { ++ case 16: ++ /* zero-extend 16 bits into 64 bits */ ++#if __LINUX_ARM_ARCH__ < 6 ++ emit_a32_mov_i(tmp2[1], 0xffff, ctx); ++ emit(ARM_AND_R(rd[1], rd[1], tmp2[1]), ctx); ++#else /* ARMv6+ */ ++ emit(ARM_UXTH(rd[1], rd[1]), ctx); ++#endif ++ if (!ctx->prog->aux->verifier_zext) ++ emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); ++ break; ++ case 32: ++ /* zero-extend 32 bits into 64 bits */ ++ if (!ctx->prog->aux->verifier_zext) ++ emit(ARM_EOR_R(rd[0], rd[0], rd[0]), ctx); ++ break; ++ case 64: ++ /* nop */ ++ break; ++ } ++exit: ++ arm_bpf_put_reg64(dst, rd, ctx); ++ break; ++ /* dst = imm64 */ ++ case BPF_LD | BPF_IMM | BPF_DW: ++ { ++ u64 val = (u32)imm | (u64)insn[1].imm << 32; + +- if (ctx->flags & FLAG_IMM_OVERFLOW) +- /* +- * this instruction generated an overflow when +- * trying to access the literal pool, so +- * delegate this filter to the kernel interpreter. +- */ +- return -1; ++ emit_a32_mov_i64(dst, val, ctx); ++ ++ return 1; + } ++ /* LDX: dst = *(size *)(src + off) */ ++ case BPF_LDX | BPF_MEM | BPF_W: ++ case BPF_LDX | BPF_MEM | BPF_H: ++ case BPF_LDX | BPF_MEM | BPF_B: ++ case BPF_LDX | BPF_MEM | BPF_DW: ++ rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); ++ emit_ldx_r(dst, rn, off, ctx, BPF_SIZE(code)); ++ break; ++ /* ST: *(size *)(dst + off) = imm */ ++ case BPF_ST | BPF_MEM | BPF_W: ++ case BPF_ST | BPF_MEM | BPF_H: ++ case BPF_ST | BPF_MEM | BPF_B: ++ case BPF_ST | BPF_MEM | BPF_DW: ++ switch (BPF_SIZE(code)) { ++ case BPF_DW: ++ /* Sign-extend immediate value into temp reg */ ++ emit_a32_mov_se_i64(true, tmp2, imm, ctx); ++ break; ++ case BPF_W: ++ case BPF_H: ++ case BPF_B: ++ emit_a32_mov_i(tmp2[1], imm, ctx); ++ break; ++ } ++ emit_str_r(dst_lo, tmp2, off, ctx, BPF_SIZE(code)); ++ break; ++ /* STX XADD: lock *(u32 *)(dst + off) += src */ ++ case BPF_STX | BPF_XADD | BPF_W: ++ /* STX XADD: lock *(u64 *)(dst + off) += src */ ++ case BPF_STX | BPF_XADD | BPF_DW: ++ goto notyet; ++ /* STX: *(size *)(dst + off) = src */ ++ case BPF_STX | BPF_MEM | BPF_W: ++ case BPF_STX | BPF_MEM | BPF_H: ++ case BPF_STX | BPF_MEM | BPF_B: ++ case BPF_STX | BPF_MEM | BPF_DW: ++ rs = arm_bpf_get_reg64(src, tmp2, ctx); ++ emit_str_r(dst_lo, rs, off, ctx, BPF_SIZE(code)); ++ break; ++ /* PC += off if dst == src */ ++ /* PC += off if dst > src */ ++ /* PC += off if dst >= src */ ++ /* PC += off if dst < src */ ++ /* PC += off if dst <= src */ ++ /* PC += off if dst != src */ ++ /* PC += off if dst > src (signed) */ ++ /* PC += off if dst >= src (signed) */ ++ /* PC += off if dst < src (signed) */ ++ /* PC += off if dst <= src (signed) */ ++ /* PC += off if dst & src */ ++ case BPF_JMP | BPF_JEQ | BPF_X: ++ case BPF_JMP | BPF_JGT | BPF_X: ++ case BPF_JMP | BPF_JGE | BPF_X: ++ case BPF_JMP | BPF_JNE | BPF_X: ++ case BPF_JMP | BPF_JSGT | BPF_X: ++ case BPF_JMP | BPF_JSGE | BPF_X: ++ case BPF_JMP | BPF_JSET | BPF_X: ++ case BPF_JMP | BPF_JLE | BPF_X: ++ case BPF_JMP | BPF_JLT | BPF_X: ++ case BPF_JMP | BPF_JSLT | BPF_X: ++ case BPF_JMP | BPF_JSLE | BPF_X: ++ case BPF_JMP32 | BPF_JEQ | BPF_X: ++ case BPF_JMP32 | BPF_JGT | BPF_X: ++ case BPF_JMP32 | BPF_JGE | BPF_X: ++ case BPF_JMP32 | BPF_JNE | BPF_X: ++ case BPF_JMP32 | BPF_JSGT | BPF_X: ++ case BPF_JMP32 | BPF_JSGE | BPF_X: ++ case BPF_JMP32 | BPF_JSET | BPF_X: ++ case BPF_JMP32 | BPF_JLE | BPF_X: ++ case BPF_JMP32 | BPF_JLT | BPF_X: ++ case BPF_JMP32 | BPF_JSLT | BPF_X: ++ case BPF_JMP32 | BPF_JSLE | BPF_X: ++ /* Setup source registers */ ++ rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx); ++ rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx); ++ goto go_jmp; ++ /* PC += off if dst == imm */ ++ /* PC += off if dst > imm */ ++ /* PC += off if dst >= imm */ ++ /* PC += off if dst < imm */ ++ /* PC += off if dst <= imm */ ++ /* PC += off if dst != imm */ ++ /* PC += off if dst > imm (signed) */ ++ /* PC += off if dst >= imm (signed) */ ++ /* PC += off if dst < imm (signed) */ ++ /* PC += off if dst <= imm (signed) */ ++ /* PC += off if dst & imm */ ++ case BPF_JMP | BPF_JEQ | BPF_K: ++ case BPF_JMP | BPF_JGT | BPF_K: ++ case BPF_JMP | BPF_JGE | BPF_K: ++ case BPF_JMP | BPF_JNE | BPF_K: ++ case BPF_JMP | BPF_JSGT | BPF_K: ++ case BPF_JMP | BPF_JSGE | BPF_K: ++ case BPF_JMP | BPF_JSET | BPF_K: ++ case BPF_JMP | BPF_JLT | BPF_K: ++ case BPF_JMP | BPF_JLE | BPF_K: ++ case BPF_JMP | BPF_JSLT | BPF_K: ++ case BPF_JMP | BPF_JSLE | BPF_K: ++ case BPF_JMP32 | BPF_JEQ | BPF_K: ++ case BPF_JMP32 | BPF_JGT | BPF_K: ++ case BPF_JMP32 | BPF_JGE | BPF_K: ++ case BPF_JMP32 | BPF_JNE | BPF_K: ++ case BPF_JMP32 | BPF_JSGT | BPF_K: ++ case BPF_JMP32 | BPF_JSGE | BPF_K: ++ case BPF_JMP32 | BPF_JSET | BPF_K: ++ case BPF_JMP32 | BPF_JLT | BPF_K: ++ case BPF_JMP32 | BPF_JLE | BPF_K: ++ case BPF_JMP32 | BPF_JSLT | BPF_K: ++ case BPF_JMP32 | BPF_JSLE | BPF_K: ++ if (off == 0) ++ break; ++ rm = tmp2[0]; ++ rn = tmp2[1]; ++ /* Sign-extend immediate value */ ++ emit_a32_mov_se_i64(true, tmp2, imm, ctx); ++go_jmp: ++ /* Setup destination register */ ++ rd = arm_bpf_get_reg64(dst, tmp, ctx); ++ ++ /* Check for the condition */ ++ emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code), ++ BPF_CLASS(code) == BPF_JMP); ++ ++ /* Setup JUMP instruction */ ++ jmp_offset = bpf2a32_offset(i+off, i, ctx); ++ switch (BPF_OP(code)) { ++ case BPF_JNE: ++ case BPF_JSET: ++ _emit(ARM_COND_NE, ARM_B(jmp_offset), ctx); ++ break; ++ case BPF_JEQ: ++ _emit(ARM_COND_EQ, ARM_B(jmp_offset), ctx); ++ break; ++ case BPF_JGT: ++ _emit(ARM_COND_HI, ARM_B(jmp_offset), ctx); ++ break; ++ case BPF_JGE: ++ _emit(ARM_COND_CS, ARM_B(jmp_offset), ctx); ++ break; ++ case BPF_JSGT: ++ _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx); ++ break; ++ case BPF_JSGE: ++ _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx); ++ break; ++ case BPF_JLE: ++ _emit(ARM_COND_LS, ARM_B(jmp_offset), ctx); ++ break; ++ case BPF_JLT: ++ _emit(ARM_COND_CC, ARM_B(jmp_offset), ctx); ++ break; ++ case BPF_JSLT: ++ _emit(ARM_COND_LT, ARM_B(jmp_offset), ctx); ++ break; ++ case BPF_JSLE: ++ _emit(ARM_COND_GE, ARM_B(jmp_offset), ctx); ++ break; ++ } ++ break; ++ /* JMP OFF */ ++ case BPF_JMP | BPF_JA: ++ { ++ if (off == 0) ++ break; ++ jmp_offset = bpf2a32_offset(i+off, i, ctx); ++ check_imm24(jmp_offset); ++ emit(ARM_B(jmp_offset), ctx); ++ break; ++ } ++ /* tail call */ ++ case BPF_JMP | BPF_TAIL_CALL: ++ if (emit_bpf_tail_call(ctx)) ++ return -EFAULT; ++ break; ++ /* function call */ ++ case BPF_JMP | BPF_CALL: ++ { ++ const s8 *r0 = bpf2a32[BPF_REG_0]; ++ const s8 *r1 = bpf2a32[BPF_REG_1]; ++ const s8 *r2 = bpf2a32[BPF_REG_2]; ++ const s8 *r3 = bpf2a32[BPF_REG_3]; ++ const s8 *r4 = bpf2a32[BPF_REG_4]; ++ const s8 *r5 = bpf2a32[BPF_REG_5]; ++ const u32 func = (u32)__bpf_call_base + (u32)imm; ++ ++ emit_a32_mov_r64(true, r0, r1, ctx); ++ emit_a32_mov_r64(true, r1, r2, ctx); ++ emit_push_r64(r5, ctx); ++ emit_push_r64(r4, ctx); ++ emit_push_r64(r3, ctx); + +- /* compute offsets only during the first pass */ +- if (ctx->target == NULL) +- ctx->offsets[i] = ctx->idx * 4; ++ emit_a32_mov_i(tmp[1], func, ctx); ++ emit_blx_r(tmp[1], ctx); + ++ emit(ARM_ADD_I(ARM_SP, ARM_SP, imm8m(24)), ctx); // callee clean ++ break; ++ } ++ /* function return */ ++ case BPF_JMP | BPF_EXIT: ++ /* Optimization: when last instruction is EXIT ++ * simply fallthrough to epilogue. ++ */ ++ if (i == ctx->prog->len - 1) ++ break; ++ jmp_offset = epilogue_offset(ctx); ++ check_imm24(jmp_offset); ++ emit(ARM_B(jmp_offset), ctx); ++ break; ++notyet: ++ pr_info_once("*** NOT YET: opcode %02x ***\n", code); ++ return -EFAULT; ++ default: ++ pr_err_once("unknown opcode %02x\n", code); ++ return -EINVAL; ++ } ++ ++ if (ctx->flags & FLAG_IMM_OVERFLOW) ++ /* ++ * this instruction generated an overflow when ++ * trying to access the literal pool, so ++ * delegate this filter to the kernel interpreter. ++ */ ++ return -1; + return 0; + } + ++static int build_body(struct jit_ctx *ctx) ++{ ++ const struct bpf_prog *prog = ctx->prog; ++ unsigned int i; ++ ++ for (i = 0; i < prog->len; i++) { ++ const struct bpf_insn *insn = &(prog->insnsi[i]); ++ int ret; ++ ++ ret = build_insn(insn, ctx); ++ ++ /* It's used with loading the 64 bit immediate value. */ ++ if (ret > 0) { ++ i++; ++ if (ctx->target == NULL) ++ ctx->offsets[i] = ctx->idx; ++ continue; ++ } ++ ++ if (ctx->target == NULL) ++ ctx->offsets[i] = ctx->idx; ++ ++ /* If unsuccesfull, return with error code */ ++ if (ret) ++ return ret; ++ } ++ return 0; ++} ++ ++static int validate_code(struct jit_ctx *ctx) ++{ ++ int i; ++ ++ for (i = 0; i < ctx->idx; i++) { ++ if (ctx->target[i] == __opcode_to_mem_arm(ARM_INST_UDF)) ++ return -1; ++ } ++ ++ return 0; ++} + +-void bpf_jit_compile(struct bpf_prog *fp) ++void bpf_jit_compile(struct bpf_prog *prog) + { ++ /* Nothing to do here. We support Internal BPF. */ ++} ++ ++bool bpf_jit_needs_zext(void) ++{ ++ return true; ++} ++ ++struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) ++{ ++ struct bpf_prog *tmp, *orig_prog = prog; + struct bpf_binary_header *header; ++ bool tmp_blinded = false; + struct jit_ctx ctx; +- unsigned tmp_idx; +- unsigned alloc_size; +- u8 *target_ptr; ++ unsigned int tmp_idx; ++ unsigned int image_size; ++ u8 *image_ptr; + +- if (!bpf_jit_enable) +- return; ++ /* If BPF JIT was not enabled then we must fall back to ++ * the interpreter. ++ */ ++ if (!prog->jit_requested) ++ return orig_prog; + +- memset(&ctx, 0, sizeof(ctx)); +- ctx.skf = fp; +- ctx.ret0_fp_idx = -1; ++ /* If constant blinding was enabled and we failed during blinding ++ * then we must fall back to the interpreter. Otherwise, we save ++ * the new JITed code. ++ */ ++ tmp = bpf_jit_blind_constants(prog); + +- ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL); +- if (ctx.offsets == NULL) +- return; ++ if (IS_ERR(tmp)) ++ return orig_prog; ++ if (tmp != prog) { ++ tmp_blinded = true; ++ prog = tmp; ++ } + +- /* fake pass to fill in the ctx->seen */ +- if (unlikely(build_body(&ctx))) ++ memset(&ctx, 0, sizeof(ctx)); ++ ctx.prog = prog; ++ ctx.cpu_architecture = cpu_architecture(); ++ ++ /* Not able to allocate memory for offsets[] , then ++ * we must fall back to the interpreter ++ */ ++ ctx.offsets = kcalloc(prog->len, sizeof(int), GFP_KERNEL); ++ if (ctx.offsets == NULL) { ++ prog = orig_prog; + goto out; ++ } ++ ++ /* 1) fake pass to find in the length of the JITed code, ++ * to compute ctx->offsets and other context variables ++ * needed to compute final JITed code. ++ * Also, calculate random starting pointer/start of JITed code ++ * which is prefixed by random number of fault instructions. ++ * ++ * If the first pass fails then there is no chance of it ++ * being successful in the second pass, so just fall back ++ * to the interpreter. ++ */ ++ if (build_body(&ctx)) { ++ prog = orig_prog; ++ goto out_off; ++ } + + tmp_idx = ctx.idx; + build_prologue(&ctx); + ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; + ++ ctx.epilogue_offset = ctx.idx; ++ + #if __LINUX_ARM_ARCH__ < 7 + tmp_idx = ctx.idx; + build_epilogue(&ctx); +@@ -1020,64 +1941,83 @@ void bpf_jit_compile(struct bpf_prog *fp + + ctx.idx += ctx.imm_count; + if (ctx.imm_count) { +- ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL); +- if (ctx.imms == NULL) +- goto out; ++ ctx.imms = kcalloc(ctx.imm_count, sizeof(u32), GFP_KERNEL); ++ if (ctx.imms == NULL) { ++ prog = orig_prog; ++ goto out_off; ++ } + } + #else +- /* there's nothing after the epilogue on ARMv7 */ ++ /* there's nothing about the epilogue on ARMv7 */ + build_epilogue(&ctx); + #endif +- alloc_size = 4 * ctx.idx; +- header = bpf_jit_binary_alloc(alloc_size, &target_ptr, +- 4, jit_fill_hole); +- if (header == NULL) +- goto out; ++ /* Now we can get the actual image size of the JITed arm code. ++ * Currently, we are not considering the THUMB-2 instructions ++ * for jit, although it can decrease the size of the image. ++ * ++ * As each arm instruction is of length 32bit, we are translating ++ * number of JITed intructions into the size required to store these ++ * JITed code. ++ */ ++ image_size = sizeof(u32) * ctx.idx; + +- ctx.target = (u32 *) target_ptr; ++ /* Now we know the size of the structure to make */ ++ header = bpf_jit_binary_alloc(image_size, &image_ptr, ++ sizeof(u32), jit_fill_hole); ++ /* Not able to allocate memory for the structure then ++ * we must fall back to the interpretation ++ */ ++ if (header == NULL) { ++ prog = orig_prog; ++ goto out_imms; ++ } ++ ++ /* 2.) Actual pass to generate final JIT code */ ++ ctx.target = (u32 *) image_ptr; + ctx.idx = 0; + + build_prologue(&ctx); ++ ++ /* If building the body of the JITed code fails somehow, ++ * we fall back to the interpretation. ++ */ + if (build_body(&ctx) < 0) { +-#if __LINUX_ARM_ARCH__ < 7 +- if (ctx.imm_count) +- kfree(ctx.imms); +-#endif ++ image_ptr = NULL; + bpf_jit_binary_free(header); +- goto out; ++ prog = orig_prog; ++ goto out_imms; + } + build_epilogue(&ctx); + ++ /* 3.) Extra pass to validate JITed Code */ ++ if (validate_code(&ctx)) { ++ image_ptr = NULL; ++ bpf_jit_binary_free(header); ++ prog = orig_prog; ++ goto out_imms; ++ } + flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx)); + ++ if (bpf_jit_enable > 1) ++ /* there are 2 passes here */ ++ bpf_jit_dump(prog->len, image_size, 2, ctx.target); ++ ++ bpf_jit_binary_lock_ro(header); ++ prog->bpf_func = (void *)ctx.target; ++ prog->jited = 1; ++ prog->jited_len = image_size; ++ ++out_imms: + #if __LINUX_ARM_ARCH__ < 7 + if (ctx.imm_count) + kfree(ctx.imms); + #endif +- +- if (bpf_jit_enable > 1) +- /* there are 2 passes here */ +- bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); +- +- set_memory_ro((unsigned long)header, header->pages); +- fp->bpf_func = (void *)ctx.target; +- fp->jited = 1; +-out: ++out_off: + kfree(ctx.offsets); +- return; ++out: ++ if (tmp_blinded) ++ bpf_jit_prog_release_other(prog, prog == orig_prog ? ++ tmp : orig_prog); ++ return prog; + } + +-void bpf_jit_free(struct bpf_prog *fp) +-{ +- unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK; +- struct bpf_binary_header *header = (void *)addr; +- +- if (!fp->jited) +- goto free_filter; +- +- set_memory_rw(addr, header->pages); +- bpf_jit_binary_free(header); +- +-free_filter: +- bpf_prog_unlock_free(fp); +-} +--- a/arch/arm/net/bpf_jit_32.h ++++ b/arch/arm/net/bpf_jit_32.h +@@ -1,16 +1,14 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ + /* + * Just-In-Time compiler for BPF filters on 32bit ARM + * + * Copyright (c) 2011 Mircea Gherzan +- * +- * This program is free software; you can redistribute it and/or modify it +- * under the terms of the GNU General Public License as published by the +- * Free Software Foundation; version 2 of the License. + */ + + #ifndef PFILTER_OPCODES_ARM_H + #define PFILTER_OPCODES_ARM_H + ++/* ARM 32bit Registers */ + #define ARM_R0 0 + #define ARM_R1 1 + #define ARM_R2 2 +@@ -22,40 +20,46 @@ + #define ARM_R8 8 + #define ARM_R9 9 + #define ARM_R10 10 +-#define ARM_FP 11 +-#define ARM_IP 12 +-#define ARM_SP 13 +-#define ARM_LR 14 +-#define ARM_PC 15 +- +-#define ARM_COND_EQ 0x0 +-#define ARM_COND_NE 0x1 +-#define ARM_COND_CS 0x2 ++#define ARM_FP 11 /* Frame Pointer */ ++#define ARM_IP 12 /* Intra-procedure scratch register */ ++#define ARM_SP 13 /* Stack pointer: as load/store base reg */ ++#define ARM_LR 14 /* Link Register */ ++#define ARM_PC 15 /* Program counter */ ++ ++#define ARM_COND_EQ 0x0 /* == */ ++#define ARM_COND_NE 0x1 /* != */ ++#define ARM_COND_CS 0x2 /* unsigned >= */ + #define ARM_COND_HS ARM_COND_CS +-#define ARM_COND_CC 0x3 ++#define ARM_COND_CC 0x3 /* unsigned < */ + #define ARM_COND_LO ARM_COND_CC +-#define ARM_COND_MI 0x4 +-#define ARM_COND_PL 0x5 +-#define ARM_COND_VS 0x6 +-#define ARM_COND_VC 0x7 +-#define ARM_COND_HI 0x8 +-#define ARM_COND_LS 0x9 +-#define ARM_COND_GE 0xa +-#define ARM_COND_LT 0xb +-#define ARM_COND_GT 0xc +-#define ARM_COND_LE 0xd +-#define ARM_COND_AL 0xe ++#define ARM_COND_MI 0x4 /* < 0 */ ++#define ARM_COND_PL 0x5 /* >= 0 */ ++#define ARM_COND_VS 0x6 /* Signed Overflow */ ++#define ARM_COND_VC 0x7 /* No Signed Overflow */ ++#define ARM_COND_HI 0x8 /* unsigned > */ ++#define ARM_COND_LS 0x9 /* unsigned <= */ ++#define ARM_COND_GE 0xa /* Signed >= */ ++#define ARM_COND_LT 0xb /* Signed < */ ++#define ARM_COND_GT 0xc /* Signed > */ ++#define ARM_COND_LE 0xd /* Signed <= */ ++#define ARM_COND_AL 0xe /* None */ + + /* register shift types */ + #define SRTYPE_LSL 0 + #define SRTYPE_LSR 1 + #define SRTYPE_ASR 2 + #define SRTYPE_ROR 3 ++#define SRTYPE_ASL (SRTYPE_LSL) + + #define ARM_INST_ADD_R 0x00800000 ++#define ARM_INST_ADDS_R 0x00900000 ++#define ARM_INST_ADC_R 0x00a00000 ++#define ARM_INST_ADC_I 0x02a00000 + #define ARM_INST_ADD_I 0x02800000 ++#define ARM_INST_ADDS_I 0x02900000 + + #define ARM_INST_AND_R 0x00000000 ++#define ARM_INST_ANDS_R 0x00100000 + #define ARM_INST_AND_I 0x02000000 + + #define ARM_INST_BIC_R 0x01c00000 +@@ -71,13 +75,18 @@ + #define ARM_INST_EOR_R 0x00200000 + #define ARM_INST_EOR_I 0x02200000 + +-#define ARM_INST_LDRB_I 0x05d00000 ++#define ARM_INST_LDST__U 0x00800000 ++#define ARM_INST_LDST__IMM12 0x00000fff ++#define ARM_INST_LDRB_I 0x05500000 + #define ARM_INST_LDRB_R 0x07d00000 +-#define ARM_INST_LDRH_I 0x01d000b0 ++#define ARM_INST_LDRD_I 0x014000d0 ++#define ARM_INST_LDRH_I 0x015000b0 + #define ARM_INST_LDRH_R 0x019000b0 +-#define ARM_INST_LDR_I 0x05900000 ++#define ARM_INST_LDR_I 0x05100000 ++#define ARM_INST_LDR_R 0x07900000 + + #define ARM_INST_LDM 0x08900000 ++#define ARM_INST_LDM_IA 0x08b00000 + + #define ARM_INST_LSL_I 0x01a00000 + #define ARM_INST_LSL_R 0x01a00010 +@@ -86,6 +95,7 @@ + #define ARM_INST_LSR_R 0x01a00030 + + #define ARM_INST_MOV_R 0x01a00000 ++#define ARM_INST_MOVS_R 0x01b00000 + #define ARM_INST_MOV_I 0x03a00000 + #define ARM_INST_MOVW 0x03000000 + #define ARM_INST_MOVT 0x03400000 +@@ -96,17 +106,29 @@ + #define ARM_INST_PUSH 0x092d0000 + + #define ARM_INST_ORR_R 0x01800000 ++#define ARM_INST_ORRS_R 0x01900000 + #define ARM_INST_ORR_I 0x03800000 + + #define ARM_INST_REV 0x06bf0f30 + #define ARM_INST_REV16 0x06bf0fb0 + + #define ARM_INST_RSB_I 0x02600000 ++#define ARM_INST_RSBS_I 0x02700000 ++#define ARM_INST_RSC_I 0x02e00000 + + #define ARM_INST_SUB_R 0x00400000 ++#define ARM_INST_SUBS_R 0x00500000 ++#define ARM_INST_RSB_R 0x00600000 + #define ARM_INST_SUB_I 0x02400000 +- +-#define ARM_INST_STR_I 0x05800000 ++#define ARM_INST_SUBS_I 0x02500000 ++#define ARM_INST_SBC_I 0x02c00000 ++#define ARM_INST_SBC_R 0x00c00000 ++#define ARM_INST_SBCS_R 0x00d00000 ++ ++#define ARM_INST_STR_I 0x05000000 ++#define ARM_INST_STRB_I 0x05400000 ++#define ARM_INST_STRD_I 0x014000f0 ++#define ARM_INST_STRH_I 0x014000b0 + + #define ARM_INST_TST_R 0x01100000 + #define ARM_INST_TST_I 0x03100000 +@@ -117,6 +139,8 @@ + + #define ARM_INST_MLS 0x00600090 + ++#define ARM_INST_UXTH 0x06ff0070 ++ + /* + * Use a suitable undefined instruction to use for ARM/Thumb2 faulting. + * We need to be careful not to conflict with those used by other modules +@@ -135,11 +159,18 @@ + #define _AL3_R(op, rd, rn, rm) ((op ## _R) | (rd) << 12 | (rn) << 16 | (rm)) + /* immediate */ + #define _AL3_I(op, rd, rn, imm) ((op ## _I) | (rd) << 12 | (rn) << 16 | (imm)) ++/* register with register-shift */ ++#define _AL3_SR(inst) (inst | (1 << 4)) + + #define ARM_ADD_R(rd, rn, rm) _AL3_R(ARM_INST_ADD, rd, rn, rm) ++#define ARM_ADDS_R(rd, rn, rm) _AL3_R(ARM_INST_ADDS, rd, rn, rm) + #define ARM_ADD_I(rd, rn, imm) _AL3_I(ARM_INST_ADD, rd, rn, imm) ++#define ARM_ADDS_I(rd, rn, imm) _AL3_I(ARM_INST_ADDS, rd, rn, imm) ++#define ARM_ADC_R(rd, rn, rm) _AL3_R(ARM_INST_ADC, rd, rn, rm) ++#define ARM_ADC_I(rd, rn, imm) _AL3_I(ARM_INST_ADC, rd, rn, imm) + + #define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm) ++#define ARM_ANDS_R(rd, rn, rm) _AL3_R(ARM_INST_ANDS, rd, rn, rm) + #define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm) + + #define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm) +@@ -155,27 +186,38 @@ + #define ARM_EOR_R(rd, rn, rm) _AL3_R(ARM_INST_EOR, rd, rn, rm) + #define ARM_EOR_I(rd, rn, imm) _AL3_I(ARM_INST_EOR, rd, rn, imm) + +-#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ +- | (off)) +-#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ +- | (off)) +-#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \ ++#define ARM_LDR_R(rt, rn, rm) (ARM_INST_LDR_R | ARM_INST_LDST__U \ ++ | (rt) << 12 | (rn) << 16 \ + | (rm)) +-#define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \ +- | (((off) & 0xf0) << 4) | ((off) & 0xf)) +-#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | (rt) << 12 | (rn) << 16 \ ++#define ARM_LDR_R_SI(rt, rn, rm, type, imm) \ ++ (ARM_INST_LDR_R | ARM_INST_LDST__U \ ++ | (rt) << 12 | (rn) << 16 \ ++ | (imm) << 7 | (type) << 5 | (rm)) ++#define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | ARM_INST_LDST__U \ ++ | (rt) << 12 | (rn) << 16 \ ++ | (rm)) ++#define ARM_LDRH_R(rt, rn, rm) (ARM_INST_LDRH_R | ARM_INST_LDST__U \ ++ | (rt) << 12 | (rn) << 16 \ + | (rm)) + + #define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs)) ++#define ARM_LDM_IA(rn, regs) (ARM_INST_LDM_IA | (rn) << 16 | (regs)) + + #define ARM_LSL_R(rd, rn, rm) (_AL3_R(ARM_INST_LSL, rd, 0, rn) | (rm) << 8) + #define ARM_LSL_I(rd, rn, imm) (_AL3_I(ARM_INST_LSL, rd, 0, rn) | (imm) << 7) + + #define ARM_LSR_R(rd, rn, rm) (_AL3_R(ARM_INST_LSR, rd, 0, rn) | (rm) << 8) + #define ARM_LSR_I(rd, rn, imm) (_AL3_I(ARM_INST_LSR, rd, 0, rn) | (imm) << 7) ++#define ARM_ASR_R(rd, rn, rm) (_AL3_R(ARM_INST_ASR, rd, 0, rn) | (rm) << 8) ++#define ARM_ASR_I(rd, rn, imm) (_AL3_I(ARM_INST_ASR, rd, 0, rn) | (imm) << 7) + + #define ARM_MOV_R(rd, rm) _AL3_R(ARM_INST_MOV, rd, 0, rm) ++#define ARM_MOVS_R(rd, rm) _AL3_R(ARM_INST_MOVS, rd, 0, rm) + #define ARM_MOV_I(rd, imm) _AL3_I(ARM_INST_MOV, rd, 0, imm) ++#define ARM_MOV_SR(rd, rm, type, rs) \ ++ (_AL3_SR(ARM_MOV_R(rd, rm)) | (type) << 5 | (rs) << 8) ++#define ARM_MOV_SI(rd, rm, type, imm6) \ ++ (ARM_MOV_R(rd, rm) | (type) << 5 | (imm6) << 7) + + #define ARM_MOVW(rd, imm) \ + (ARM_INST_MOVW | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff)) +@@ -190,19 +232,31 @@ + + #define ARM_ORR_R(rd, rn, rm) _AL3_R(ARM_INST_ORR, rd, rn, rm) + #define ARM_ORR_I(rd, rn, imm) _AL3_I(ARM_INST_ORR, rd, rn, imm) +-#define ARM_ORR_S(rd, rn, rm, type, rs) \ +- (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (rs) << 7) ++#define ARM_ORR_SR(rd, rn, rm, type, rs) \ ++ (_AL3_SR(ARM_ORR_R(rd, rn, rm)) | (type) << 5 | (rs) << 8) ++#define ARM_ORRS_R(rd, rn, rm) _AL3_R(ARM_INST_ORRS, rd, rn, rm) ++#define ARM_ORRS_SR(rd, rn, rm, type, rs) \ ++ (_AL3_SR(ARM_ORRS_R(rd, rn, rm)) | (type) << 5 | (rs) << 8) ++#define ARM_ORR_SI(rd, rn, rm, type, imm6) \ ++ (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (imm6) << 7) ++#define ARM_ORRS_SI(rd, rn, rm, type, imm6) \ ++ (ARM_ORRS_R(rd, rn, rm) | (type) << 5 | (imm6) << 7) + + #define ARM_REV(rd, rm) (ARM_INST_REV | (rd) << 12 | (rm)) + #define ARM_REV16(rd, rm) (ARM_INST_REV16 | (rd) << 12 | (rm)) + + #define ARM_RSB_I(rd, rn, imm) _AL3_I(ARM_INST_RSB, rd, rn, imm) ++#define ARM_RSBS_I(rd, rn, imm) _AL3_I(ARM_INST_RSBS, rd, rn, imm) ++#define ARM_RSC_I(rd, rn, imm) _AL3_I(ARM_INST_RSC, rd, rn, imm) + + #define ARM_SUB_R(rd, rn, rm) _AL3_R(ARM_INST_SUB, rd, rn, rm) ++#define ARM_SUBS_R(rd, rn, rm) _AL3_R(ARM_INST_SUBS, rd, rn, rm) ++#define ARM_RSB_R(rd, rn, rm) _AL3_R(ARM_INST_RSB, rd, rn, rm) ++#define ARM_SBC_R(rd, rn, rm) _AL3_R(ARM_INST_SBC, rd, rn, rm) ++#define ARM_SBCS_R(rd, rn, rm) _AL3_R(ARM_INST_SBCS, rd, rn, rm) + #define ARM_SUB_I(rd, rn, imm) _AL3_I(ARM_INST_SUB, rd, rn, imm) +- +-#define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \ +- | (off)) ++#define ARM_SUBS_I(rd, rn, imm) _AL3_I(ARM_INST_SUBS, rd, rn, imm) ++#define ARM_SBC_I(rd, rn, imm) _AL3_I(ARM_INST_SBC, rd, rn, imm) + + #define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm) + #define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm) +@@ -214,5 +268,6 @@ + + #define ARM_MLS(rd, rn, rm, ra) (ARM_INST_MLS | (rd) << 16 | (rn) | (rm) << 8 \ + | (ra) << 12) ++#define ARM_UXTH(rd, rm) (ARM_INST_UXTH | (rd) << 12 | (rm)) + + #endif /* PFILTER_OPCODES_ARM_H */ +--- a/arch/arm/net/Makefile ++++ b/arch/arm/net/Makefile +@@ -1,3 +1,4 @@ ++# SPDX-License-Identifier: GPL-2.0-only + # ARM-specific networking code + + obj-$(CONFIG_BPF_JIT) += bpf_jit_32.o +--- /dev/null ++++ b/include/linux/bpf-cgroup.h +@@ -0,0 +1,410 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef _BPF_CGROUP_H ++#define _BPF_CGROUP_H ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++struct sock; ++struct sockaddr; ++struct cgroup; ++struct sk_buff; ++struct bpf_map; ++struct bpf_prog; ++struct bpf_sock_ops_kern; ++struct bpf_cgroup_storage; ++struct ctl_table; ++struct ctl_table_header; ++ ++#ifdef CONFIG_CGROUP_BPF ++ ++extern struct static_key_false cgroup_bpf_enabled_key; ++#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) ++ ++DECLARE_PER_CPU(struct bpf_cgroup_storage*, ++ bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); ++ ++#define for_each_cgroup_storage_type(stype) \ ++ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) ++ ++struct bpf_cgroup_storage_map; ++ ++struct bpf_storage_buffer { ++ struct rcu_head rcu; ++ char data[0]; ++}; ++ ++struct bpf_cgroup_storage { ++ union { ++ struct bpf_storage_buffer *buf; ++ void __percpu *percpu_buf; ++ }; ++ struct bpf_cgroup_storage_map *map; ++ struct bpf_cgroup_storage_key key; ++ struct list_head list; ++ struct rb_node node; ++ struct rcu_head rcu; ++}; ++ ++struct bpf_prog_list { ++ struct list_head node; ++ struct bpf_prog *prog; ++ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; ++}; ++ ++struct bpf_prog_array; ++ ++struct cgroup_bpf { ++ /* array of effective progs in this cgroup */ ++ struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; ++ ++ /* attached progs to this cgroup and attach flags ++ * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will ++ * have either zero or one element ++ * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS ++ */ ++ struct list_head progs[MAX_BPF_ATTACH_TYPE]; ++ u32 flags[MAX_BPF_ATTACH_TYPE]; ++ ++ /* temp storage for effective prog array used by prog_attach/detach */ ++ struct bpf_prog_array *inactive; ++ ++ /* reference counter used to detach bpf programs after cgroup removal */ ++ struct percpu_ref refcnt; ++ ++ /* cgroup_bpf is released using a work queue */ ++ struct work_struct release_work; ++}; ++ ++int cgroup_bpf_inherit(struct cgroup *cgrp); ++void cgroup_bpf_offline(struct cgroup *cgrp); ++ ++int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, ++ enum bpf_attach_type type, u32 flags); ++int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, ++ enum bpf_attach_type type); ++int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, ++ union bpf_attr __user *uattr); ++ ++/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */ ++int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, ++ enum bpf_attach_type type, u32 flags); ++int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, ++ enum bpf_attach_type type, u32 flags); ++int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, ++ union bpf_attr __user *uattr); ++ ++int __cgroup_bpf_run_filter_skb(struct sock *sk, ++ struct sk_buff *skb, ++ enum bpf_attach_type type); ++ ++int __cgroup_bpf_run_filter_sk(struct sock *sk, ++ enum bpf_attach_type type); ++ ++int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, ++ struct sockaddr *uaddr, ++ enum bpf_attach_type type, ++ void *t_ctx); ++ ++int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, ++ struct bpf_sock_ops_kern *sock_ops, ++ enum bpf_attach_type type); ++ ++int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, ++ short access, enum bpf_attach_type type); ++ ++int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, ++ struct ctl_table *table, int write, ++ void __user *buf, size_t *pcount, ++ loff_t *ppos, void **new_buf, ++ enum bpf_attach_type type); ++ ++int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, ++ int *optname, char __user *optval, ++ int *optlen, char **kernel_optval); ++int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, ++ int optname, char __user *optval, ++ int __user *optlen, int max_optlen, ++ int retval); ++ ++static inline enum bpf_cgroup_storage_type cgroup_storage_type( ++ struct bpf_map *map) ++{ ++ if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) ++ return BPF_CGROUP_STORAGE_PERCPU; ++ ++ return BPF_CGROUP_STORAGE_SHARED; ++} ++ ++static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage ++ *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) ++{ ++ enum bpf_cgroup_storage_type stype; ++ ++ for_each_cgroup_storage_type(stype) ++ this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); ++} ++ ++struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, ++ enum bpf_cgroup_storage_type stype); ++void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); ++void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, ++ struct cgroup *cgroup, ++ enum bpf_attach_type type); ++void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); ++int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map); ++void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map); ++ ++int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); ++int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, ++ void *value, u64 flags); ++ ++/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ ++#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ++({ \ ++ int __ret = 0; \ ++ if (cgroup_bpf_enabled) \ ++ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ ++ BPF_CGROUP_INET_INGRESS); \ ++ \ ++ __ret; \ ++}) ++ ++#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ ++({ \ ++ int __ret = 0; \ ++ if (cgroup_bpf_enabled && sk && sk == skb->sk) { \ ++ typeof(sk) __sk = sk_to_full_sk(sk); \ ++ if (sk_fullsock(__sk)) \ ++ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ ++ BPF_CGROUP_INET_EGRESS); \ ++ } \ ++ __ret; \ ++}) ++ ++#define BPF_CGROUP_RUN_SK_PROG(sk, type) \ ++({ \ ++ int __ret = 0; \ ++ if (cgroup_bpf_enabled) { \ ++ __ret = __cgroup_bpf_run_filter_sk(sk, type); \ ++ } \ ++ __ret; \ ++}) ++ ++#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ ++ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) ++ ++#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ ++ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) ++ ++#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ ++ BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) ++ ++#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ ++({ \ ++ int __ret = 0; \ ++ if (cgroup_bpf_enabled) \ ++ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ ++ NULL); \ ++ __ret; \ ++}) ++ ++#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \ ++({ \ ++ int __ret = 0; \ ++ if (cgroup_bpf_enabled) { \ ++ lock_sock(sk); \ ++ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ ++ t_ctx); \ ++ release_sock(sk); \ ++ } \ ++ __ret; \ ++}) ++ ++#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \ ++ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND) ++ ++#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \ ++ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND) ++ ++#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \ ++ sk->sk_prot->pre_connect) ++ ++#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ ++ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) ++ ++#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ ++ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) ++ ++#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ ++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL) ++ ++#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ ++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL) ++ ++#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ ++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx) ++ ++#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ ++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx) ++ ++#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ ++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL) ++ ++#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ ++ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL) ++ ++#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ ++({ \ ++ int __ret = 0; \ ++ if (cgroup_bpf_enabled && (sock_ops)->sk) { \ ++ typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ ++ if (__sk && sk_fullsock(__sk)) \ ++ __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ ++ sock_ops, \ ++ BPF_CGROUP_SOCK_OPS); \ ++ } \ ++ __ret; \ ++}) ++ ++#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ ++({ \ ++ int __ret = 0; \ ++ if (cgroup_bpf_enabled) \ ++ __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ ++ access, \ ++ BPF_CGROUP_DEVICE); \ ++ \ ++ __ret; \ ++}) ++ ++ ++#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \ ++({ \ ++ int __ret = 0; \ ++ if (cgroup_bpf_enabled) \ ++ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ ++ buf, count, pos, nbuf, \ ++ BPF_CGROUP_SYSCTL); \ ++ __ret; \ ++}) ++ ++#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ ++ kernel_optval) \ ++({ \ ++ int __ret = 0; \ ++ if (cgroup_bpf_enabled) \ ++ __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ ++ optname, optval, \ ++ optlen, \ ++ kernel_optval); \ ++ __ret; \ ++}) ++ ++#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ ++({ \ ++ int __ret = 0; \ ++ if (cgroup_bpf_enabled) \ ++ get_user(__ret, optlen); \ ++ __ret; \ ++}) ++ ++#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ ++ max_optlen, retval) \ ++({ \ ++ int __ret = retval; \ ++ if (cgroup_bpf_enabled) \ ++ __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \ ++ optname, optval, \ ++ optlen, max_optlen, \ ++ retval); \ ++ __ret; \ ++}) ++ ++int cgroup_bpf_prog_attach(const union bpf_attr *attr, ++ enum bpf_prog_type ptype, struct bpf_prog *prog); ++int cgroup_bpf_prog_detach(const union bpf_attr *attr, ++ enum bpf_prog_type ptype); ++int cgroup_bpf_prog_query(const union bpf_attr *attr, ++ union bpf_attr __user *uattr); ++#else ++ ++struct bpf_prog; ++struct cgroup_bpf {}; ++static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } ++static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} ++ ++static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, ++ enum bpf_prog_type ptype, ++ struct bpf_prog *prog) ++{ ++ return -EINVAL; ++} ++ ++static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, ++ enum bpf_prog_type ptype) ++{ ++ return -EINVAL; ++} ++ ++static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ return -EINVAL; ++} ++ ++static inline void bpf_cgroup_storage_set( ++ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} ++static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, ++ struct bpf_map *map) { return 0; } ++static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, ++ struct bpf_map *map) {} ++static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( ++ struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } ++static inline void bpf_cgroup_storage_free( ++ struct bpf_cgroup_storage *storage) {} ++static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, ++ void *value) { ++ return 0; ++} ++static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, ++ void *key, void *value, u64 flags) { ++ return 0; ++} ++ ++#define cgroup_bpf_enabled (0) ++#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) ++#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; }) ++#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) ++#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ ++ optlen, max_optlen, retval) ({ retval; }) ++#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ ++ kernel_optval) ({ 0; }) ++ ++#define for_each_cgroup_storage_type(stype) for (; false; ) ++ ++#endif /* CONFIG_CGROUP_BPF */ ++ ++#endif /* _BPF_CGROUP_H */ +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -1,55 +1,183 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ + /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of version 2 of the GNU General Public +- * License as published by the Free Software Foundation. + */ + #ifndef _LINUX_BPF_H + #define _LINUX_BPF_H 1 + + #include ++ + #include + #include ++#include ++#include ++#include ++#include ++#include ++#include + ++struct bpf_verifier_env; ++struct perf_event; ++struct bpf_prog; + struct bpf_map; ++struct sock; ++struct seq_file; ++struct btf; ++struct btf_type; ++ ++extern struct idr btf_idr; ++extern spinlock_t btf_idr_lock; + + /* map is generic key/value storage optionally accesible by eBPF programs */ + struct bpf_map_ops { + /* funcs callable from userspace (via syscall) */ ++ int (*map_alloc_check)(union bpf_attr *attr); + struct bpf_map *(*map_alloc)(union bpf_attr *attr); +- void (*map_free)(struct bpf_map *); ++ void (*map_release)(struct bpf_map *map, struct file *map_file); ++ void (*map_free)(struct bpf_map *map); + int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); ++ void (*map_release_uref)(struct bpf_map *map); ++ void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); + + /* funcs callable from userspace and from eBPF programs */ + void *(*map_lookup_elem)(struct bpf_map *map, void *key); + int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); + int (*map_delete_elem)(struct bpf_map *map, void *key); ++ int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); ++ int (*map_pop_elem)(struct bpf_map *map, void *value); ++ int (*map_peek_elem)(struct bpf_map *map, void *value); + + /* funcs called by prog_array and perf_event_array map */ +- void *(*map_fd_get_ptr) (struct bpf_map *map, int fd); +- void (*map_fd_put_ptr) (void *ptr); ++ void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, ++ int fd); ++ void (*map_fd_put_ptr)(void *ptr); ++ u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); ++ u32 (*map_fd_sys_lookup_elem)(void *ptr); ++ void (*map_seq_show_elem)(struct bpf_map *map, void *key, ++ struct seq_file *m); ++ int (*map_check_btf)(const struct bpf_map *map, ++ const struct btf *btf, ++ const struct btf_type *key_type, ++ const struct btf_type *value_type); ++ ++ /* Direct value access helpers. */ ++ int (*map_direct_value_addr)(const struct bpf_map *map, ++ u64 *imm, u32 off); ++ int (*map_direct_value_meta)(const struct bpf_map *map, ++ u64 imm, u32 *off); ++}; ++ ++struct bpf_map_memory { ++ u32 pages; ++ struct user_struct *user; + }; + + struct bpf_map { +- atomic_t refcnt; ++ /* The first two cachelines with read-mostly members of which some ++ * are also accessed in fast-path (e.g. ops, max_entries). ++ */ ++ const struct bpf_map_ops *ops ____cacheline_aligned; ++ struct bpf_map *inner_map_meta; ++#ifdef CONFIG_SECURITY ++ void *security; ++#endif + enum bpf_map_type map_type; + u32 key_size; + u32 value_size; + u32 max_entries; +- u32 pages; ++ u32 map_flags; ++ int spin_lock_off; /* >=0 valid offset, <0 error */ ++ u32 id; ++ int numa_node; ++ u32 btf_key_type_id; ++ u32 btf_value_type_id; ++ struct btf *btf; ++ struct bpf_map_memory memory; + bool unpriv_array; +- struct user_struct *user; +- const struct bpf_map_ops *ops; +- struct work_struct work; ++ bool frozen; /* write-once */ ++ /* 48 bytes hole */ ++ ++ /* The 3rd and 4th cacheline with misc members to avoid false sharing ++ * particularly with refcounting. ++ */ ++ atomic_t refcnt ____cacheline_aligned; + atomic_t usercnt; ++ struct work_struct work; ++ char name[BPF_OBJ_NAME_LEN]; + }; + +-struct bpf_map_type_list { +- struct list_head list_node; +- const struct bpf_map_ops *ops; +- enum bpf_map_type type; ++static inline bool map_value_has_spin_lock(const struct bpf_map *map) ++{ ++ return map->spin_lock_off >= 0; ++} ++ ++static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) ++{ ++ if (likely(!map_value_has_spin_lock(map))) ++ return; ++ *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = ++ (struct bpf_spin_lock){}; ++} ++ ++/* copy everything but bpf_spin_lock */ ++static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) ++{ ++ if (unlikely(map_value_has_spin_lock(map))) { ++ u32 off = map->spin_lock_off; ++ ++ memcpy(dst, src, off); ++ memcpy(dst + off + sizeof(struct bpf_spin_lock), ++ src + off + sizeof(struct bpf_spin_lock), ++ map->value_size - off - sizeof(struct bpf_spin_lock)); ++ } else { ++ memcpy(dst, src, map->value_size); ++ } ++} ++void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, ++ bool lock_src); ++ ++struct bpf_offload_dev; ++struct bpf_offloaded_map; ++ ++struct bpf_map_dev_ops { ++ int (*map_get_next_key)(struct bpf_offloaded_map *map, ++ void *key, void *next_key); ++ int (*map_lookup_elem)(struct bpf_offloaded_map *map, ++ void *key, void *value); ++ int (*map_update_elem)(struct bpf_offloaded_map *map, ++ void *key, void *value, u64 flags); ++ int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); + }; + ++struct bpf_offloaded_map { ++ struct bpf_map map; ++ struct net_device *netdev; ++ const struct bpf_map_dev_ops *dev_ops; ++ void *dev_priv; ++ struct list_head offloads; ++}; ++ ++static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) ++{ ++ return container_of(map, struct bpf_offloaded_map, map); ++} ++ ++static inline bool bpf_map_offload_neutral(const struct bpf_map *map) ++{ ++ return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; ++} ++ ++static inline bool bpf_map_support_seq_show(const struct bpf_map *map) ++{ ++ return map->btf && map->ops->map_seq_show_elem; ++} ++ ++int map_check_no_btf(const struct bpf_map *map, ++ const struct btf *btf, ++ const struct btf_type *key_type, ++ const struct btf_type *value_type); ++ ++extern const struct bpf_map_ops bpf_map_offload_ops; ++ + /* function argument constraints */ + enum bpf_arg_type { + ARG_DONTCARE = 0, /* unused argument in helper function */ +@@ -60,22 +188,40 @@ enum bpf_arg_type { + ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ + ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ + ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ ++ ARG_PTR_TO_UNINIT_MAP_VALUE, /* pointer to valid memory used to store a map value */ ++ ARG_PTR_TO_MAP_VALUE_OR_NULL, /* pointer to stack used as map value or NULL */ + + /* the following constraints used to prototype bpf_memcmp() and other + * functions that access data on eBPF program stack + */ +- ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */ +- ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */ ++ ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ ++ ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ ++ ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, ++ * helper function must fill all bytes or clear ++ * them in error case. ++ */ ++ ++ ARG_CONST_SIZE, /* number of bytes accessed from memory */ ++ ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ + + ARG_PTR_TO_CTX, /* pointer to context */ + ARG_ANYTHING, /* any (initialized) argument is ok */ ++ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ ++ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ ++ ARG_PTR_TO_INT, /* pointer to int */ ++ ARG_PTR_TO_LONG, /* pointer to long */ ++ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ + }; + + /* type of values returned from helper functions */ + enum bpf_return_type { + RET_INTEGER, /* function returns integer */ + RET_VOID, /* function doesn't return anything */ ++ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ + RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ ++ RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ ++ RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ ++ RET_PTR_TO_SOCK_COMMON_OR_NULL, /* returns a pointer to a sock_common or NULL */ + }; + + /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs +@@ -85,6 +231,7 @@ enum bpf_return_type { + struct bpf_func_proto { + u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + bool gpl_only; ++ bool pkt_access; + enum bpf_return_type ret_type; + enum bpf_arg_type arg1_type; + enum bpf_arg_type arg2_type; +@@ -104,35 +251,172 @@ enum bpf_access_type { + BPF_WRITE = 2 + }; + +-struct bpf_prog; ++/* types of values stored in eBPF registers */ ++/* Pointer types represent: ++ * pointer ++ * pointer + imm ++ * pointer + (u16) var ++ * pointer + (u16) var + imm ++ * if (range > 0) then [ptr, ptr + range - off) is safe to access ++ * if (id > 0) means that some 'var' was added ++ * if (off > 0) means that 'imm' was added ++ */ ++enum bpf_reg_type { ++ NOT_INIT = 0, /* nothing was written into register */ ++ SCALAR_VALUE, /* reg doesn't contain a valid pointer */ ++ PTR_TO_CTX, /* reg points to bpf_context */ ++ CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ ++ PTR_TO_MAP_VALUE, /* reg points to map element value */ ++ PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ ++ PTR_TO_STACK, /* reg == frame_pointer + offset */ ++ PTR_TO_PACKET_META, /* skb->data - meta_len */ ++ PTR_TO_PACKET, /* reg points to skb->data */ ++ PTR_TO_PACKET_END, /* skb->data + headlen */ ++ PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ ++ PTR_TO_SOCKET, /* reg points to struct bpf_sock */ ++ PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ ++ PTR_TO_SOCK_COMMON, /* reg points to sock_common */ ++ PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ ++ PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ ++ PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ ++ PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ ++ PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ ++}; ++ ++/* The information passed from prog-specific *_is_valid_access ++ * back to the verifier. ++ */ ++struct bpf_insn_access_aux { ++ enum bpf_reg_type reg_type; ++ int ctx_field_size; ++}; ++ ++static inline void ++bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) ++{ ++ aux->ctx_field_size = size; ++} ++ ++struct bpf_prog_ops { ++ int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, ++ union bpf_attr __user *uattr); ++}; + + struct bpf_verifier_ops { + /* return eBPF function prototype for verification */ +- const struct bpf_func_proto *(*get_func_proto)(enum bpf_func_id func_id); ++ const struct bpf_func_proto * ++ (*get_func_proto)(enum bpf_func_id func_id, ++ const struct bpf_prog *prog); + + /* return true if 'size' wide access at offset 'off' within bpf_context + * with 'type' (read or write) is allowed + */ +- bool (*is_valid_access)(int off, int size, enum bpf_access_type type); ++ bool (*is_valid_access)(int off, int size, enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info); ++ int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, ++ const struct bpf_prog *prog); ++ int (*gen_ld_abs)(const struct bpf_insn *orig, ++ struct bpf_insn *insn_buf); ++ u32 (*convert_ctx_access)(enum bpf_access_type type, ++ const struct bpf_insn *src, ++ struct bpf_insn *dst, ++ struct bpf_prog *prog, u32 *target_size); ++}; + +- u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg, +- int src_reg, int ctx_off, +- struct bpf_insn *insn, struct bpf_prog *prog); ++struct bpf_prog_offload_ops { ++ /* verifier basic callbacks */ ++ int (*insn_hook)(struct bpf_verifier_env *env, ++ int insn_idx, int prev_insn_idx); ++ int (*finalize)(struct bpf_verifier_env *env); ++ /* verifier optimization callbacks (called after .finalize) */ ++ int (*replace_insn)(struct bpf_verifier_env *env, u32 off, ++ struct bpf_insn *insn); ++ int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); ++ /* program management callbacks */ ++ int (*prepare)(struct bpf_prog *prog); ++ int (*translate)(struct bpf_prog *prog); ++ void (*destroy)(struct bpf_prog *prog); + }; + +-struct bpf_prog_type_list { +- struct list_head list_node; +- const struct bpf_verifier_ops *ops; +- enum bpf_prog_type type; ++struct bpf_prog_offload { ++ struct bpf_prog *prog; ++ struct net_device *netdev; ++ struct bpf_offload_dev *offdev; ++ void *dev_priv; ++ struct list_head offloads; ++ bool dev_state; ++ bool opt_failed; ++ void *jited_image; ++ u32 jited_len; ++}; ++ ++enum bpf_cgroup_storage_type { ++ BPF_CGROUP_STORAGE_SHARED, ++ BPF_CGROUP_STORAGE_PERCPU, ++ __BPF_CGROUP_STORAGE_MAX ++}; ++ ++#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX ++ ++struct bpf_prog_stats { ++ u64 cnt; ++ u64 nsecs; ++ struct u64_stats_sync syncp; + }; + + struct bpf_prog_aux { + atomic_t refcnt; + u32 used_map_cnt; +- const struct bpf_verifier_ops *ops; ++ u32 max_ctx_offset; ++ u32 max_pkt_offset; ++ u32 max_tp_access; ++ u32 stack_depth; ++ u32 id; ++ u32 func_cnt; /* used by non-func prog as the number of func progs */ ++ u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ ++ bool verifier_zext; /* Zero extensions has been inserted by verifier. */ ++ bool offload_requested; ++ struct bpf_prog **func; ++ void *jit_data; /* JIT specific data. arch dependent */ ++ struct latch_tree_node ksym_tnode; ++ struct list_head ksym_lnode; ++ const struct bpf_prog_ops *ops; + struct bpf_map **used_maps; + struct bpf_prog *prog; + struct user_struct *user; ++ u64 load_time; /* ns since boottime */ ++ struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; ++ char name[BPF_OBJ_NAME_LEN]; ++#ifdef CONFIG_SECURITY ++ void *security; ++#endif ++ struct bpf_prog_offload *offload; ++ struct btf *btf; ++ struct bpf_func_info *func_info; ++ /* bpf_line_info loaded from userspace. linfo->insn_off ++ * has the xlated insn offset. ++ * Both the main and sub prog share the same linfo. ++ * The subprog can access its first linfo by ++ * using the linfo_idx. ++ */ ++ struct bpf_line_info *linfo; ++ /* jited_linfo is the jited addr of the linfo. It has a ++ * one to one mapping to linfo: ++ * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. ++ * Both the main and sub prog share the same jited_linfo. ++ * The subprog can access its first jited_linfo by ++ * using the linfo_idx. ++ */ ++ void **jited_linfo; ++ u32 func_info_cnt; ++ u32 nr_linfo; ++ /* subprog can use linfo_idx to access its first linfo and ++ * jited_linfo. ++ * main prog always has linfo_idx == 0 ++ */ ++ u32 linfo_idx; ++ struct bpf_prog_stats __percpu *stats; + union { + struct work_struct work; + struct rcu_head rcu; +@@ -153,76 +437,688 @@ struct bpf_array { + union { + char value[0] __aligned(8); + void *ptrs[0] __aligned(8); ++ void __percpu *pptrs[0] __aligned(8); + }; + }; ++ ++#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ + #define MAX_TAIL_CALL_CNT 32 + +-u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); +-void bpf_fd_array_map_clear(struct bpf_map *map); ++#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ ++ BPF_F_RDONLY_PROG | \ ++ BPF_F_WRONLY | \ ++ BPF_F_WRONLY_PROG) ++ ++#define BPF_MAP_CAN_READ BIT(0) ++#define BPF_MAP_CAN_WRITE BIT(1) ++ ++static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) ++{ ++ u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); ++ ++ /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is ++ * not possible. ++ */ ++ if (access_flags & BPF_F_RDONLY_PROG) ++ return BPF_MAP_CAN_READ; ++ else if (access_flags & BPF_F_WRONLY_PROG) ++ return BPF_MAP_CAN_WRITE; ++ else ++ return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; ++} ++ ++static inline bool bpf_map_flags_access_ok(u32 access_flags) ++{ ++ return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != ++ (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); ++} ++ ++struct bpf_event_entry { ++ struct perf_event *event; ++ struct file *perf_file; ++ struct file *map_file; ++ struct rcu_head rcu; ++}; ++ + bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); ++int bpf_prog_calc_tag(struct bpf_prog *fp); ++ + const struct bpf_func_proto *bpf_get_trace_printk_proto(void); + ++typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, ++ unsigned long off, unsigned long len); ++typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, ++ const struct bpf_insn *src, ++ struct bpf_insn *dst, ++ struct bpf_prog *prog, ++ u32 *target_size); ++ ++u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, ++ void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); ++ ++/* an array of programs to be executed under rcu_lock. ++ * ++ * Typical usage: ++ * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); ++ * ++ * the structure returned by bpf_prog_array_alloc() should be populated ++ * with program pointers and the last pointer must be NULL. ++ * The user has to keep refcnt on the program and make sure the program ++ * is removed from the array before bpf_prog_put(). ++ * The 'struct bpf_prog_array *' should only be replaced with xchg() ++ * since other cpus are walking the array of pointers in parallel. ++ */ ++struct bpf_prog_array_item { ++ struct bpf_prog *prog; ++ struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; ++}; ++ ++struct bpf_prog_array { ++ struct rcu_head rcu; ++ struct bpf_prog_array_item items[0]; ++}; ++ ++struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); ++void bpf_prog_array_free(struct bpf_prog_array *progs); ++int bpf_prog_array_length(struct bpf_prog_array *progs); ++bool bpf_prog_array_is_empty(struct bpf_prog_array *array); ++int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, ++ __u32 __user *prog_ids, u32 cnt); ++ ++void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, ++ struct bpf_prog *old_prog); ++int bpf_prog_array_copy_info(struct bpf_prog_array *array, ++ u32 *prog_ids, u32 request_cnt, ++ u32 *prog_cnt); ++int bpf_prog_array_copy(struct bpf_prog_array *old_array, ++ struct bpf_prog *exclude_prog, ++ struct bpf_prog *include_prog, ++ struct bpf_prog_array **new_array); ++ ++#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \ ++ ({ \ ++ struct bpf_prog_array_item *_item; \ ++ struct bpf_prog *_prog; \ ++ struct bpf_prog_array *_array; \ ++ u32 _ret = 1; \ ++ preempt_disable(); \ ++ rcu_read_lock(); \ ++ _array = rcu_dereference(array); \ ++ if (unlikely(check_non_null && !_array))\ ++ goto _out; \ ++ _item = &_array->items[0]; \ ++ while ((_prog = READ_ONCE(_item->prog))) { \ ++ if (set_cg_storage) \ ++ bpf_cgroup_storage_set(_item->cgroup_storage); \ ++ _ret &= func(_prog, ctx); \ ++ _item++; \ ++ } \ ++_out: \ ++ rcu_read_unlock(); \ ++ preempt_enable(); \ ++ _ret; \ ++ }) ++ ++/* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs ++ * so BPF programs can request cwr for TCP packets. ++ * ++ * Current cgroup skb programs can only return 0 or 1 (0 to drop the ++ * packet. This macro changes the behavior so the low order bit ++ * indicates whether the packet should be dropped (0) or not (1) ++ * and the next bit is a congestion notification bit. This could be ++ * used by TCP to call tcp_enter_cwr() ++ * ++ * Hence, new allowed return values of CGROUP EGRESS BPF programs are: ++ * 0: drop packet ++ * 1: keep packet ++ * 2: drop packet and cn ++ * 3: keep packet and cn ++ * ++ * This macro then converts it to one of the NET_XMIT or an error ++ * code that is then interpreted as drop packet (and no cn): ++ * 0: NET_XMIT_SUCCESS skb should be transmitted ++ * 1: NET_XMIT_DROP skb should be dropped and cn ++ * 2: NET_XMIT_CN skb should be transmitted and cn ++ * 3: -EPERM skb should be dropped ++ */ ++#define BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(array, ctx, func) \ ++ ({ \ ++ struct bpf_prog_array_item *_item; \ ++ struct bpf_prog *_prog; \ ++ struct bpf_prog_array *_array; \ ++ u32 ret; \ ++ u32 _ret = 1; \ ++ u32 _cn = 0; \ ++ preempt_disable(); \ ++ rcu_read_lock(); \ ++ _array = rcu_dereference(array); \ ++ _item = &_array->items[0]; \ ++ while ((_prog = READ_ONCE(_item->prog))) { \ ++ bpf_cgroup_storage_set(_item->cgroup_storage); \ ++ ret = func(_prog, ctx); \ ++ _ret &= (ret & 1); \ ++ _cn |= (ret & 2); \ ++ _item++; \ ++ } \ ++ rcu_read_unlock(); \ ++ preempt_enable(); \ ++ if (_ret) \ ++ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ ++ else \ ++ _ret = (_cn ? NET_XMIT_DROP : -EPERM); \ ++ _ret; \ ++ }) ++ ++#define BPF_PROG_RUN_ARRAY(array, ctx, func) \ ++ __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) ++ ++#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ ++ __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false) ++ + #ifdef CONFIG_BPF_SYSCALL +-void bpf_register_prog_type(struct bpf_prog_type_list *tl); +-void bpf_register_map_type(struct bpf_map_type_list *tl); ++DECLARE_PER_CPU(int, bpf_prog_active); ++ ++extern const struct file_operations bpf_map_fops; ++extern const struct file_operations bpf_prog_fops; ++ ++#define BPF_PROG_TYPE(_id, _name) \ ++ extern const struct bpf_prog_ops _name ## _prog_ops; \ ++ extern const struct bpf_verifier_ops _name ## _verifier_ops; ++#define BPF_MAP_TYPE(_id, _ops) \ ++ extern const struct bpf_map_ops _ops; ++#include ++#undef BPF_PROG_TYPE ++#undef BPF_MAP_TYPE ++ ++extern const struct bpf_prog_ops bpf_offload_prog_ops; ++extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; ++extern const struct bpf_verifier_ops xdp_analyzer_ops; + + struct bpf_prog *bpf_prog_get(u32 ufd); +-struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog); ++struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, ++ bool attach_drv); ++struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); ++void bpf_prog_sub(struct bpf_prog *prog, int i); ++struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); ++struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); + void bpf_prog_put(struct bpf_prog *prog); +-void bpf_prog_put_rcu(struct bpf_prog *prog); ++int __bpf_prog_charge(struct user_struct *user, u32 pages); ++void __bpf_prog_uncharge(struct user_struct *user, u32 pages); ++ ++void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); ++void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); + + struct bpf_map *bpf_map_get_with_uref(u32 ufd); + struct bpf_map *__bpf_map_get(struct fd f); +-struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref); ++struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); ++struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map, ++ bool uref); + void bpf_map_put_with_uref(struct bpf_map *map); + void bpf_map_put(struct bpf_map *map); ++int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); ++void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); ++int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); ++void bpf_map_charge_finish(struct bpf_map_memory *mem); ++void bpf_map_charge_move(struct bpf_map_memory *dst, ++ struct bpf_map_memory *src); ++void *bpf_map_area_alloc(u64 size, int numa_node); ++void bpf_map_area_free(void *base); ++void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); + + extern int sysctl_unprivileged_bpf_disabled; + +-int bpf_map_new_fd(struct bpf_map *map); ++int bpf_map_new_fd(struct bpf_map *map, int flags); + int bpf_prog_new_fd(struct bpf_prog *prog); + + int bpf_obj_pin_user(u32 ufd, const char __user *pathname); +-int bpf_obj_get_user(const char __user *pathname); ++int bpf_obj_get_user(const char __user *pathname, int flags); ++ ++int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); ++int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); ++int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, ++ u64 flags); ++int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, ++ u64 flags); ++ ++int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); ++ ++int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, ++ void *key, void *value, u64 map_flags); ++int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); ++int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, ++ void *key, void *value, u64 map_flags); ++int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); ++ ++int bpf_get_file_flag(int flags); ++int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size, ++ size_t actual_size); ++ ++/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and ++ * forced to use 'long' read/writes to try to atomically copy long counters. ++ * Best-effort only. No barriers here, since it _will_ race with concurrent ++ * updates from BPF programs. Called from bpf syscall and mostly used with ++ * size 8 or 16 bytes, so ask compiler to inline it. ++ */ ++static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) ++{ ++ const long *lsrc = src; ++ long *ldst = dst; ++ ++ size /= sizeof(long); ++ while (size--) ++ *ldst++ = *lsrc++; ++} + + /* verify correctness of eBPF program */ +-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); +-#else +-static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl) ++int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, ++ union bpf_attr __user *uattr); ++ ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON ++void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); ++#endif ++ ++/* Map specifics */ ++struct xdp_buff; ++struct sk_buff; ++ ++struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key); ++struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key); ++void __dev_map_flush(struct bpf_map *map); ++int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, ++ struct net_device *dev_rx); ++int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, ++ struct bpf_prog *xdp_prog); ++ ++struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key); ++void __cpu_map_flush(struct bpf_map *map); ++int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, ++ struct net_device *dev_rx); ++ ++/* Return map's numa specified by userspace */ ++static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) + { ++ return (attr->map_flags & BPF_F_NUMA_NODE) ? ++ attr->numa_node : NUMA_NO_NODE; + } + ++struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); ++int array_map_alloc_check(union bpf_attr *attr); ++ ++int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, ++ union bpf_attr __user *uattr); ++int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, ++ union bpf_attr __user *uattr); ++int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, ++ const union bpf_attr *kattr, ++ union bpf_attr __user *uattr); ++#else /* !CONFIG_BPF_SYSCALL */ + static inline struct bpf_prog *bpf_prog_get(u32 ufd) + { + return ERR_PTR(-EOPNOTSUPP); + } + ++static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, ++ enum bpf_prog_type type, ++ bool attach_drv) ++{ ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ ++static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, ++ int i) ++{ ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ ++static inline void bpf_prog_sub(struct bpf_prog *prog, int i) ++{ ++} ++ + static inline void bpf_prog_put(struct bpf_prog *prog) + { + } + +-static inline void bpf_prog_put_rcu(struct bpf_prog *prog) ++static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog) ++{ ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ ++static inline struct bpf_prog *__must_check ++bpf_prog_inc_not_zero(struct bpf_prog *prog) ++{ ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ ++static inline int __bpf_prog_charge(struct user_struct *user, u32 pages) ++{ ++ return 0; ++} ++ ++static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) ++{ ++} ++ ++static inline int bpf_obj_get_user(const char __user *pathname, int flags) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, ++ u32 key) ++{ ++ return NULL; ++} ++ ++static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map, ++ u32 key) ++{ ++ return NULL; ++} ++ ++static inline void __dev_map_flush(struct bpf_map *map) ++{ ++} ++ ++struct xdp_buff; ++struct bpf_dtab_netdev; ++ ++static inline ++int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, ++ struct net_device *dev_rx) ++{ ++ return 0; ++} ++ ++struct sk_buff; ++ ++static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, ++ struct sk_buff *skb, ++ struct bpf_prog *xdp_prog) ++{ ++ return 0; ++} ++ ++static inline ++struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key) ++{ ++ return NULL; ++} ++ ++static inline void __cpu_map_flush(struct bpf_map *map) ++{ ++} ++ ++static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, ++ struct xdp_buff *xdp, ++ struct net_device *dev_rx) ++{ ++ return 0; ++} ++ ++static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, ++ enum bpf_prog_type type) ++{ ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ ++static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, ++ const union bpf_attr *kattr, ++ union bpf_attr __user *uattr) ++{ ++ return -ENOTSUPP; ++} ++ ++static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, ++ const union bpf_attr *kattr, ++ union bpf_attr __user *uattr) ++{ ++ return -ENOTSUPP; ++} ++ ++static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, ++ const union bpf_attr *kattr, ++ union bpf_attr __user *uattr) ++{ ++ return -ENOTSUPP; ++} ++#endif /* CONFIG_BPF_SYSCALL */ ++ ++static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, ++ enum bpf_prog_type type) ++{ ++ return bpf_prog_get_type_dev(ufd, type, false); ++} ++ ++bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); ++ ++#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) ++ ++static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) ++{ ++ return aux->offload_requested; ++} ++ ++static inline bool bpf_map_is_dev_bound(struct bpf_map *map) ++{ ++ return false; ++} ++ ++#else ++static inline int bpf_prog_offload_init(struct bpf_prog *prog, ++ union bpf_attr *attr) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) ++{ ++ return false; ++} ++ ++static inline bool bpf_map_is_dev_bound(struct bpf_map *map) ++{ ++ return false; ++} ++ ++#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ ++ ++#if defined(CONFIG_BPF_STREAM_PARSER) ++int sock_map_prog_update(struct bpf_map *map, struct bpf_prog *prog, ++ struct bpf_prog *old, u32 which); ++int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); ++int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); ++#else ++static inline int sock_map_prog_update(struct bpf_map *map, ++ struct bpf_prog *prog, ++ struct bpf_prog *old, u32 which) + { ++ return -EOPNOTSUPP; ++} ++ ++static inline int sock_map_get_from_fd(const union bpf_attr *attr, ++ struct bpf_prog *prog) ++{ ++ return -EINVAL; ++} ++ ++static inline int sock_map_prog_detach(const union bpf_attr *attr, ++ enum bpf_prog_type ptype) ++{ ++ return -EOPNOTSUPP; ++} ++#endif ++ ++#if defined(CONFIG_XDP_SOCKETS) ++struct xdp_sock; ++struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key); ++int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, ++ struct xdp_sock *xs); ++void __xsk_map_flush(struct bpf_map *map); ++#else ++struct xdp_sock; ++static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, ++ u32 key) ++{ ++ return NULL; ++} ++ ++static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, ++ struct xdp_sock *xs) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static inline void __xsk_map_flush(struct bpf_map *map) ++{ ++} ++#endif ++ ++#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) ++void bpf_sk_reuseport_detach(struct sock *sk); ++int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, ++ void *value); ++int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, ++ void *value, u64 map_flags); ++#else ++static inline void bpf_sk_reuseport_detach(struct sock *sk) ++{ ++} ++ ++#ifdef CONFIG_BPF_SYSCALL ++static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, ++ void *key, void *value) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, ++ void *key, void *value, ++ u64 map_flags) ++{ ++ return -EOPNOTSUPP; + } + #endif /* CONFIG_BPF_SYSCALL */ ++#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ + + /* verifier prototypes for helper functions called from eBPF programs */ + extern const struct bpf_func_proto bpf_map_lookup_elem_proto; + extern const struct bpf_func_proto bpf_map_update_elem_proto; + extern const struct bpf_func_proto bpf_map_delete_elem_proto; ++extern const struct bpf_func_proto bpf_map_push_elem_proto; ++extern const struct bpf_func_proto bpf_map_pop_elem_proto; ++extern const struct bpf_func_proto bpf_map_peek_elem_proto; + + extern const struct bpf_func_proto bpf_get_prandom_u32_proto; + extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; ++extern const struct bpf_func_proto bpf_get_numa_node_id_proto; + extern const struct bpf_func_proto bpf_tail_call_proto; + extern const struct bpf_func_proto bpf_ktime_get_ns_proto; + extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; + extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; + extern const struct bpf_func_proto bpf_get_current_comm_proto; +-extern const struct bpf_func_proto bpf_skb_vlan_push_proto; +-extern const struct bpf_func_proto bpf_skb_vlan_pop_proto; ++extern const struct bpf_func_proto bpf_get_stackid_proto; ++extern const struct bpf_func_proto bpf_get_stack_proto; ++extern const struct bpf_func_proto bpf_sock_map_update_proto; ++extern const struct bpf_func_proto bpf_sock_hash_update_proto; ++extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; ++extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; ++extern const struct bpf_func_proto bpf_msg_redirect_map_proto; ++extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; ++extern const struct bpf_func_proto bpf_sk_redirect_map_proto; ++extern const struct bpf_func_proto bpf_spin_lock_proto; ++extern const struct bpf_func_proto bpf_spin_unlock_proto; ++extern const struct bpf_func_proto bpf_get_local_storage_proto; ++extern const struct bpf_func_proto bpf_strtol_proto; ++extern const struct bpf_func_proto bpf_strtoul_proto; ++extern const struct bpf_func_proto bpf_tcp_sock_proto; + + /* Shared helpers among cBPF and eBPF. */ + void bpf_user_rnd_init_once(void); + u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); + ++#if defined(CONFIG_NET) ++bool bpf_sock_common_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ struct bpf_insn_access_aux *info); ++bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, ++ struct bpf_insn_access_aux *info); ++u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, ++ u32 *target_size); ++#else ++static inline bool bpf_sock_common_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++static inline bool bpf_sock_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, ++ u32 *target_size) ++{ ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_INET ++bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, ++ struct bpf_insn_access_aux *info); ++ ++u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, ++ u32 *target_size); ++ ++bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, ++ struct bpf_insn_access_aux *info); ++ ++u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, ++ u32 *target_size); ++#else ++static inline bool bpf_tcp_sock_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++ ++static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, ++ u32 *target_size) ++{ ++ return 0; ++} ++static inline bool bpf_xdp_sock_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++ ++static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, ++ u32 *target_size) ++{ ++ return 0; ++} ++#endif /* CONFIG_INET */ ++ + #endif /* _LINUX_BPF_H */ +--- /dev/null ++++ b/include/linux/bpf_trace.h +@@ -0,0 +1,7 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __LINUX_BPF_TRACE_H__ ++#define __LINUX_BPF_TRACE_H__ ++ ++#include ++ ++#endif /* __LINUX_BPF_TRACE_H__ */ +--- /dev/null ++++ b/include/linux/bpf_types.h +@@ -0,0 +1,44 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* internal file - do not include directly */ ++ ++#ifdef CONFIG_NET ++BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter) ++BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act) ++BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) ++BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in) ++BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out) ++BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) ++BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local) ++BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops) ++BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb) ++BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg) ++BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector) ++#endif ++#ifdef CONFIG_BPF_EVENTS ++BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe) ++BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) ++BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) ++BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint) ++BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable) ++#endif ++ ++BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops) ++#ifdef CONFIG_PERF_EVENTS ++BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_trace_map_ops) ++#endif ++BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) ++#ifdef CONFIG_NET ++BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, dev_map_hash_ops) ++#endif ++BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops) ++BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops) +--- /dev/null ++++ b/include/linux/bpf_verifier.h +@@ -0,0 +1,425 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com ++ */ ++#ifndef _LINUX_BPF_VERIFIER_H ++#define _LINUX_BPF_VERIFIER_H 1 ++ ++#include /* for enum bpf_reg_type */ ++#include /* for MAX_BPF_STACK */ ++#include ++ ++/* Maximum variable offset umax_value permitted when resolving memory accesses. ++ * In practice this is far bigger than any realistic pointer offset; this limit ++ * ensures that umax_value + (int)off + (int)size cannot overflow a u64. ++ */ ++#define BPF_MAX_VAR_OFF (1 << 29) ++/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures ++ * that converting umax_value to int cannot overflow. ++ */ ++#define BPF_MAX_VAR_SIZ (1 << 29) ++ ++/* Liveness marks, used for registers and spilled-regs (in stack slots). ++ * Read marks propagate upwards until they find a write mark; they record that ++ * "one of this state's descendants read this reg" (and therefore the reg is ++ * relevant for states_equal() checks). ++ * Write marks collect downwards and do not propagate; they record that "the ++ * straight-line code that reached this state (from its parent) wrote this reg" ++ * (and therefore that reads propagated from this state or its descendants ++ * should not propagate to its parent). ++ * A state with a write mark can receive read marks; it just won't propagate ++ * them to its parent, since the write mark is a property, not of the state, ++ * but of the link between it and its parent. See mark_reg_read() and ++ * mark_stack_slot_read() in kernel/bpf/verifier.c. ++ */ ++enum bpf_reg_liveness { ++ REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ ++ REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ ++ REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ ++ REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, ++ REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ ++ REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ ++}; ++ ++struct bpf_reg_state { ++ /* Ordering of fields matters. See states_equal() */ ++ enum bpf_reg_type type; ++ union { ++ /* valid when type == PTR_TO_PACKET */ ++ u16 range; ++ ++ /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | ++ * PTR_TO_MAP_VALUE_OR_NULL ++ */ ++ struct bpf_map *map_ptr; ++ ++ /* Max size from any of the above. */ ++ unsigned long raw; ++ }; ++ /* Fixed part of pointer offset, pointer types only */ ++ s32 off; ++ /* For PTR_TO_PACKET, used to find other pointers with the same variable ++ * offset, so they can share range knowledge. ++ * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we ++ * came from, when one is tested for != NULL. ++ * For PTR_TO_SOCKET this is used to share which pointers retain the ++ * same reference to the socket, to determine proper reference freeing. ++ */ ++ u32 id; ++ /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned ++ * from a pointer-cast helper, bpf_sk_fullsock() and ++ * bpf_tcp_sock(). ++ * ++ * Consider the following where "sk" is a reference counted ++ * pointer returned from "sk = bpf_sk_lookup_tcp();": ++ * ++ * 1: sk = bpf_sk_lookup_tcp(); ++ * 2: if (!sk) { return 0; } ++ * 3: fullsock = bpf_sk_fullsock(sk); ++ * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } ++ * 5: tp = bpf_tcp_sock(fullsock); ++ * 6: if (!tp) { bpf_sk_release(sk); return 0; } ++ * 7: bpf_sk_release(sk); ++ * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain ++ * ++ * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and ++ * "tp" ptr should be invalidated also. In order to do that, ++ * the reg holding "fullsock" and "sk" need to remember ++ * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id ++ * such that the verifier can reset all regs which have ++ * ref_obj_id matching the sk_reg->id. ++ * ++ * sk_reg->ref_obj_id is set to sk_reg->id at line 1. ++ * sk_reg->id will stay as NULL-marking purpose only. ++ * After NULL-marking is done, sk_reg->id can be reset to 0. ++ * ++ * After "fullsock = bpf_sk_fullsock(sk);" at line 3, ++ * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. ++ * ++ * After "tp = bpf_tcp_sock(fullsock);" at line 5, ++ * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id ++ * which is the same as sk_reg->ref_obj_id. ++ * ++ * From the verifier perspective, if sk, fullsock and tp ++ * are not NULL, they are the same ptr with different ++ * reg->type. In particular, bpf_sk_release(tp) is also ++ * allowed and has the same effect as bpf_sk_release(sk). ++ */ ++ u32 ref_obj_id; ++ /* For scalar types (SCALAR_VALUE), this represents our knowledge of ++ * the actual value. ++ * For pointer types, this represents the variable part of the offset ++ * from the pointed-to object, and is shared with all bpf_reg_states ++ * with the same id as us. ++ */ ++ struct tnum var_off; ++ /* Used to determine if any memory access using this register will ++ * result in a bad access. ++ * These refer to the same value as var_off, not necessarily the actual ++ * contents of the register. ++ */ ++ s64 smin_value; /* minimum possible (s64)value */ ++ s64 smax_value; /* maximum possible (s64)value */ ++ u64 umin_value; /* minimum possible (u64)value */ ++ u64 umax_value; /* maximum possible (u64)value */ ++ /* parentage chain for liveness checking */ ++ struct bpf_reg_state *parent; ++ /* Inside the callee two registers can be both PTR_TO_STACK like ++ * R1=fp-8 and R2=fp-8, but one of them points to this function stack ++ * while another to the caller's stack. To differentiate them 'frameno' ++ * is used which is an index in bpf_verifier_state->frame[] array ++ * pointing to bpf_func_state. ++ */ ++ u32 frameno; ++ /* Tracks subreg definition. The stored value is the insn_idx of the ++ * writing insn. This is safe because subreg_def is used before any insn ++ * patching which only happens after main verification finished. ++ */ ++ s32 subreg_def; ++ enum bpf_reg_liveness live; ++ /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ ++ bool precise; ++}; ++ ++enum bpf_stack_slot_type { ++ STACK_INVALID, /* nothing was stored in this stack slot */ ++ STACK_SPILL, /* register spilled into stack */ ++ STACK_MISC, /* BPF program wrote some data into this slot */ ++ STACK_ZERO, /* BPF program wrote constant zero */ ++}; ++ ++#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ ++ ++struct bpf_stack_state { ++ struct bpf_reg_state spilled_ptr; ++ u8 slot_type[BPF_REG_SIZE]; ++}; ++ ++struct bpf_reference_state { ++ /* Track each reference created with a unique id, even if the same ++ * instruction creates the reference multiple times (eg, via CALL). ++ */ ++ int id; ++ /* Instruction where the allocation of this reference occurred. This ++ * is used purely to inform the user of a reference leak. ++ */ ++ int insn_idx; ++}; ++ ++/* state of the program: ++ * type of all registers and stack info ++ */ ++struct bpf_func_state { ++ struct bpf_reg_state regs[MAX_BPF_REG]; ++ /* index of call instruction that called into this func */ ++ int callsite; ++ /* stack frame number of this function state from pov of ++ * enclosing bpf_verifier_state. ++ * 0 = main function, 1 = first callee. ++ */ ++ u32 frameno; ++ /* subprog number == index within subprog_stack_depth ++ * zero == main subprog ++ */ ++ u32 subprogno; ++ ++ /* The following fields should be last. See copy_func_state() */ ++ int acquired_refs; ++ struct bpf_reference_state *refs; ++ int allocated_stack; ++ struct bpf_stack_state *stack; ++}; ++ ++struct bpf_idx_pair { ++ u32 prev_idx; ++ u32 idx; ++}; ++ ++#define MAX_CALL_FRAMES 8 ++struct bpf_verifier_state { ++ /* call stack tracking */ ++ struct bpf_func_state *frame[MAX_CALL_FRAMES]; ++ struct bpf_verifier_state *parent; ++ /* ++ * 'branches' field is the number of branches left to explore: ++ * 0 - all possible paths from this state reached bpf_exit or ++ * were safely pruned ++ * 1 - at least one path is being explored. ++ * This state hasn't reached bpf_exit ++ * 2 - at least two paths are being explored. ++ * This state is an immediate parent of two children. ++ * One is fallthrough branch with branches==1 and another ++ * state is pushed into stack (to be explored later) also with ++ * branches==1. The parent of this state has branches==1. ++ * The verifier state tree connected via 'parent' pointer looks like: ++ * 1 ++ * 1 ++ * 2 -> 1 (first 'if' pushed into stack) ++ * 1 ++ * 2 -> 1 (second 'if' pushed into stack) ++ * 1 ++ * 1 ++ * 1 bpf_exit. ++ * ++ * Once do_check() reaches bpf_exit, it calls update_branch_counts() ++ * and the verifier state tree will look: ++ * 1 ++ * 1 ++ * 2 -> 1 (first 'if' pushed into stack) ++ * 1 ++ * 1 -> 1 (second 'if' pushed into stack) ++ * 0 ++ * 0 ++ * 0 bpf_exit. ++ * After pop_stack() the do_check() will resume at second 'if'. ++ * ++ * If is_state_visited() sees a state with branches > 0 it means ++ * there is a loop. If such state is exactly equal to the current state ++ * it's an infinite loop. Note states_equal() checks for states ++ * equvalency, so two states being 'states_equal' does not mean ++ * infinite loop. The exact comparison is provided by ++ * states_maybe_looping() function. It's a stronger pre-check and ++ * much faster than states_equal(). ++ * ++ * This algorithm may not find all possible infinite loops or ++ * loop iteration count may be too high. ++ * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. ++ */ ++ u32 branches; ++ u32 insn_idx; ++ u32 curframe; ++ u32 active_spin_lock; ++ bool speculative; ++ ++ /* first and last insn idx of this verifier state */ ++ u32 first_insn_idx; ++ u32 last_insn_idx; ++ /* jmp history recorded from first to last. ++ * backtracking is using it to go from last to first. ++ * For most states jmp_history_cnt is [0-3]. ++ * For loops can go up to ~40. ++ */ ++ struct bpf_idx_pair *jmp_history; ++ u32 jmp_history_cnt; ++}; ++ ++#define bpf_get_spilled_reg(slot, frame) \ ++ (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ ++ (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ ++ ? &frame->stack[slot].spilled_ptr : NULL) ++ ++/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ ++#define bpf_for_each_spilled_reg(iter, frame, reg) \ ++ for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ ++ iter < frame->allocated_stack / BPF_REG_SIZE; \ ++ iter++, reg = bpf_get_spilled_reg(iter, frame)) ++ ++/* linked list of verifier states used to prune search */ ++struct bpf_verifier_state_list { ++ struct bpf_verifier_state state; ++ struct bpf_verifier_state_list *next; ++ int miss_cnt, hit_cnt; ++}; ++ ++/* Possible states for alu_state member. */ ++#define BPF_ALU_SANITIZE_SRC (1U << 0) ++#define BPF_ALU_SANITIZE_DST (1U << 1) ++#define BPF_ALU_NEG_VALUE (1U << 2) ++#define BPF_ALU_NON_POINTER (1U << 3) ++#define BPF_ALU_IMMEDIATE (1U << 4) ++#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ ++ BPF_ALU_SANITIZE_DST) ++ ++struct bpf_insn_aux_data { ++ union { ++ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ ++ unsigned long map_state; /* pointer/poison value for maps */ ++ s32 call_imm; /* saved imm field of call insn */ ++ u32 alu_limit; /* limit for add/sub register with pointer */ ++ struct { ++ u32 map_index; /* index into used_maps[] */ ++ u32 map_off; /* offset from value base address */ ++ }; ++ }; ++ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ ++ int sanitize_stack_off; /* stack slot to be cleared */ ++ bool seen; /* this insn was processed by the verifier */ ++ bool zext_dst; /* this insn zero extends dst reg */ ++ u8 alu_state; /* used in combination with alu_limit */ ++ bool prune_point; ++ unsigned int orig_idx; /* original instruction index */ ++}; ++ ++#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ ++ ++#define BPF_VERIFIER_TMP_LOG_SIZE 1024 ++ ++struct bpf_verifier_log { ++ u32 level; ++ char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; ++ char __user *ubuf; ++ u32 len_used; ++ u32 len_total; ++}; ++ ++static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) ++{ ++ return log->len_used >= log->len_total - 1; ++} ++ ++#define BPF_LOG_LEVEL1 1 ++#define BPF_LOG_LEVEL2 2 ++#define BPF_LOG_STATS 4 ++#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) ++#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) ++ ++static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) ++{ ++ return log->level && log->ubuf && !bpf_verifier_log_full(log); ++} ++ ++#define BPF_MAX_SUBPROGS 256 ++ ++struct bpf_subprog_info { ++ u32 start; /* insn idx of function entry point */ ++ u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ ++ u16 stack_depth; /* max. stack depth used by this function */ ++ bool has_tail_call; ++}; ++ ++/* single container for all structs ++ * one verifier_env per bpf_check() call ++ */ ++struct bpf_verifier_env { ++ u32 insn_idx; ++ u32 prev_insn_idx; ++ struct bpf_prog *prog; /* eBPF program being verified */ ++ const struct bpf_verifier_ops *ops; ++ struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ ++ int stack_size; /* number of states to be processed */ ++ bool strict_alignment; /* perform strict pointer alignment checks */ ++ bool test_state_freq; /* test verifier with different pruning frequency */ ++ struct bpf_verifier_state *cur_state; /* current verifier state */ ++ struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ ++ struct bpf_verifier_state_list *free_list; ++ struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ ++ u32 used_map_cnt; /* number of used maps */ ++ u32 id_gen; /* used to generate unique reg IDs */ ++ bool allow_ptr_leaks; ++ bool seen_direct_write; ++ struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ ++ const struct bpf_line_info *prev_linfo; ++ struct bpf_verifier_log log; ++ struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; ++ struct { ++ int *insn_state; ++ int *insn_stack; ++ int cur_stack; ++ } cfg; ++ u32 subprog_cnt; ++ /* number of instructions analyzed by the verifier */ ++ u32 prev_insn_processed, insn_processed; ++ /* number of jmps, calls, exits analyzed so far */ ++ u32 prev_jmps_processed, jmps_processed; ++ /* total verification time */ ++ u64 verification_time; ++ /* maximum number of verifier states kept in 'branching' instructions */ ++ u32 max_states_per_insn; ++ /* total number of allocated verifier states */ ++ u32 total_states; ++ /* some states are freed during program analysis. ++ * this is peak number of states. this number dominates kernel ++ * memory consumption during verification ++ */ ++ u32 peak_states; ++ /* longest register parentage chain walked for liveness marking */ ++ u32 longest_mark_read_walk; ++}; ++ ++__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, ++ const char *fmt, va_list args); ++__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, ++ const char *fmt, ...); ++ ++static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) ++{ ++ struct bpf_verifier_state *cur = env->cur_state; ++ ++ return cur->frame[cur->curframe]; ++} ++ ++static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) ++{ ++ return cur_func(env)->regs; ++} ++ ++int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); ++int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, ++ int insn_idx, int prev_insn_idx); ++int bpf_prog_offload_finalize(struct bpf_verifier_env *env); ++void ++bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, ++ struct bpf_insn *insn); ++void ++bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); ++ ++#endif /* _LINUX_BPF_VERIFIER_H */ +--- /dev/null ++++ b/include/linux/btf.h +@@ -0,0 +1,72 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (c) 2018 Facebook */ ++ ++#ifndef _LINUX_BTF_H ++#define _LINUX_BTF_H 1 ++ ++#include ++ ++struct btf; ++struct btf_member; ++struct btf_type; ++union bpf_attr; ++ ++extern const struct file_operations btf_fops; ++ ++void btf_put(struct btf *btf); ++int btf_new_fd(const union bpf_attr *attr); ++struct btf *btf_get_by_fd(int fd); ++int btf_get_info_by_fd(const struct btf *btf, ++ const union bpf_attr *attr, ++ union bpf_attr __user *uattr); ++/* Figure out the size of a type_id. If type_id is a modifier ++ * (e.g. const), it will be resolved to find out the type with size. ++ * ++ * For example: ++ * In describing "const void *", type_id is "const" and "const" ++ * refers to "void *". The return type will be "void *". ++ * ++ * If type_id is a simple "int", then return type will be "int". ++ * ++ * @btf: struct btf object ++ * @type_id: Find out the size of type_id. The type_id of the return ++ * type is set to *type_id. ++ * @ret_size: It can be NULL. If not NULL, the size of the return ++ * type is set to *ret_size. ++ * Return: The btf_type (resolved to another type with size info if needed). ++ * NULL is returned if type_id itself does not have size info ++ * (e.g. void) or it cannot be resolved to another type that ++ * has size info. ++ * *type_id and *ret_size will not be changed in the ++ * NULL return case. ++ */ ++const struct btf_type *btf_type_id_size(const struct btf *btf, ++ u32 *type_id, ++ u32 *ret_size); ++void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, ++ struct seq_file *m); ++int btf_get_fd_by_id(u32 id); ++u32 btf_id(const struct btf *btf); ++bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, ++ const struct btf_member *m, ++ u32 expected_offset, u32 expected_size); ++int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); ++bool btf_type_is_void(const struct btf_type *t); ++ ++#ifdef CONFIG_BPF_SYSCALL ++const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); ++const char *btf_name_by_offset(const struct btf *btf, u32 offset); ++#else ++static inline const struct btf_type *btf_type_by_id(const struct btf *btf, ++ u32 type_id) ++{ ++ return NULL; ++} ++static inline const char *btf_name_by_offset(const struct btf *btf, ++ u32 offset) ++{ ++ return NULL; ++} ++#endif ++ ++#endif +--- a/include/uapi/linux/bpf_common.h ++++ b/include/uapi/linux/bpf_common.h +@@ -1,3 +1,4 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + #ifndef _UAPI__LINUX_BPF_COMMON_H__ + #define _UAPI__LINUX_BPF_COMMON_H__ + +@@ -14,9 +15,10 @@ + + /* ld/ldx fields */ + #define BPF_SIZE(code) ((code) & 0x18) +-#define BPF_W 0x00 +-#define BPF_H 0x08 +-#define BPF_B 0x10 ++#define BPF_W 0x00 /* 32-bit */ ++#define BPF_H 0x08 /* 16-bit */ ++#define BPF_B 0x10 /* 8-bit */ ++/* eBPF BPF_DW 0x18 64-bit */ + #define BPF_MODE(code) ((code) & 0xe0) + #define BPF_IMM 0x00 + #define BPF_ABS 0x20 +--- a/include/uapi/linux/bpf.h ++++ b/include/uapi/linux/bpf.h +@@ -1,3 +1,4 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com + * + * This program is free software; you can redistribute it and/or +@@ -13,10 +14,11 @@ + /* Extended instruction set based on top of classic BPF */ + + /* instruction classes */ ++#define BPF_JMP32 0x06 /* jmp mode in word width */ + #define BPF_ALU64 0x07 /* alu mode in double word width */ + + /* ld/ldx fields */ +-#define BPF_DW 0x18 /* double word */ ++#define BPF_DW 0x18 /* double word (64-bit) */ + #define BPF_XADD 0xc0 /* exclusive add */ + + /* alu/jmp fields */ +@@ -30,9 +32,14 @@ + #define BPF_FROM_LE BPF_TO_LE + #define BPF_FROM_BE BPF_TO_BE + ++/* jmp encodings */ + #define BPF_JNE 0x50 /* jump != */ ++#define BPF_JLT 0xa0 /* LT is unsigned, '<' */ ++#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ + #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ + #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ ++#define BPF_JSLT 0xc0 /* SLT is signed, '<' */ ++#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ + #define BPF_CALL 0x80 /* function call */ + #define BPF_EXIT 0x90 /* function return */ + +@@ -63,6 +70,17 @@ struct bpf_insn { + __s32 imm; /* signed immediate constant */ + }; + ++/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */ ++struct bpf_lpm_trie_key { ++ __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */ ++ __u8 data[0]; /* Arbitrary size */ ++}; ++ ++struct bpf_cgroup_storage_key { ++ __u64 cgroup_inode_id; /* cgroup inode id */ ++ __u32 attach_type; /* program attach type */ ++}; ++ + /* BPF syscall commands, see bpf(2) man-page for details. */ + enum bpf_cmd { + BPF_MAP_CREATE, +@@ -73,6 +91,22 @@ enum bpf_cmd { + BPF_PROG_LOAD, + BPF_OBJ_PIN, + BPF_OBJ_GET, ++ BPF_PROG_ATTACH, ++ BPF_PROG_DETACH, ++ BPF_PROG_TEST_RUN, ++ BPF_PROG_GET_NEXT_ID, ++ BPF_MAP_GET_NEXT_ID, ++ BPF_PROG_GET_FD_BY_ID, ++ BPF_MAP_GET_FD_BY_ID, ++ BPF_OBJ_GET_INFO_BY_FD, ++ BPF_PROG_QUERY, ++ BPF_RAW_TRACEPOINT_OPEN, ++ BPF_BTF_LOAD, ++ BPF_BTF_GET_FD_BY_ID, ++ BPF_TASK_FD_QUERY, ++ BPF_MAP_LOOKUP_AND_DELETE_ELEM, ++ BPF_MAP_FREEZE, ++ BPF_BTF_GET_NEXT_ID, + }; + + enum bpf_map_type { +@@ -81,22 +115,256 @@ enum bpf_map_type { + BPF_MAP_TYPE_ARRAY, + BPF_MAP_TYPE_PROG_ARRAY, + BPF_MAP_TYPE_PERF_EVENT_ARRAY, ++ BPF_MAP_TYPE_PERCPU_HASH, ++ BPF_MAP_TYPE_PERCPU_ARRAY, ++ BPF_MAP_TYPE_STACK_TRACE, ++ BPF_MAP_TYPE_CGROUP_ARRAY, ++ BPF_MAP_TYPE_LRU_HASH, ++ BPF_MAP_TYPE_LRU_PERCPU_HASH, ++ BPF_MAP_TYPE_LPM_TRIE, ++ BPF_MAP_TYPE_ARRAY_OF_MAPS, ++ BPF_MAP_TYPE_HASH_OF_MAPS, ++ BPF_MAP_TYPE_DEVMAP, ++ BPF_MAP_TYPE_SOCKMAP, ++ BPF_MAP_TYPE_CPUMAP, ++ BPF_MAP_TYPE_XSKMAP, ++ BPF_MAP_TYPE_SOCKHASH, ++ BPF_MAP_TYPE_CGROUP_STORAGE, ++ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, ++ BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, ++ BPF_MAP_TYPE_QUEUE, ++ BPF_MAP_TYPE_STACK, ++ BPF_MAP_TYPE_SK_STORAGE, ++ BPF_MAP_TYPE_DEVMAP_HASH, + }; + ++/* Note that tracing related programs such as ++ * BPF_PROG_TYPE_{KPROBE,TRACEPOINT,PERF_EVENT,RAW_TRACEPOINT} ++ * are not subject to a stable API since kernel internal data ++ * structures can change from release to release and may ++ * therefore break existing tracing BPF programs. Tracing BPF ++ * programs correspond to /a/ specific kernel which is to be ++ * analyzed, and not /a/ specific kernel /and/ all future ones. ++ */ + enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC, + BPF_PROG_TYPE_SOCKET_FILTER, + BPF_PROG_TYPE_KPROBE, + BPF_PROG_TYPE_SCHED_CLS, + BPF_PROG_TYPE_SCHED_ACT, ++ BPF_PROG_TYPE_TRACEPOINT, ++ BPF_PROG_TYPE_XDP, ++ BPF_PROG_TYPE_PERF_EVENT, ++ BPF_PROG_TYPE_CGROUP_SKB, ++ BPF_PROG_TYPE_CGROUP_SOCK, ++ BPF_PROG_TYPE_LWT_IN, ++ BPF_PROG_TYPE_LWT_OUT, ++ BPF_PROG_TYPE_LWT_XMIT, ++ BPF_PROG_TYPE_SOCK_OPS, ++ BPF_PROG_TYPE_SK_SKB, ++ BPF_PROG_TYPE_CGROUP_DEVICE, ++ BPF_PROG_TYPE_SK_MSG, ++ BPF_PROG_TYPE_RAW_TRACEPOINT, ++ BPF_PROG_TYPE_CGROUP_SOCK_ADDR, ++ BPF_PROG_TYPE_LWT_SEG6LOCAL, ++ BPF_PROG_TYPE_LIRC_MODE2, ++ BPF_PROG_TYPE_SK_REUSEPORT, ++ BPF_PROG_TYPE_FLOW_DISSECTOR, ++ BPF_PROG_TYPE_CGROUP_SYSCTL, ++ BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, ++ BPF_PROG_TYPE_CGROUP_SOCKOPT, + }; + ++enum bpf_attach_type { ++ BPF_CGROUP_INET_INGRESS, ++ BPF_CGROUP_INET_EGRESS, ++ BPF_CGROUP_INET_SOCK_CREATE, ++ BPF_CGROUP_SOCK_OPS, ++ BPF_SK_SKB_STREAM_PARSER, ++ BPF_SK_SKB_STREAM_VERDICT, ++ BPF_CGROUP_DEVICE, ++ BPF_SK_MSG_VERDICT, ++ BPF_CGROUP_INET4_BIND, ++ BPF_CGROUP_INET6_BIND, ++ BPF_CGROUP_INET4_CONNECT, ++ BPF_CGROUP_INET6_CONNECT, ++ BPF_CGROUP_INET4_POST_BIND, ++ BPF_CGROUP_INET6_POST_BIND, ++ BPF_CGROUP_UDP4_SENDMSG, ++ BPF_CGROUP_UDP6_SENDMSG, ++ BPF_LIRC_MODE2, ++ BPF_FLOW_DISSECTOR, ++ BPF_CGROUP_SYSCTL, ++ BPF_CGROUP_UDP4_RECVMSG, ++ BPF_CGROUP_UDP6_RECVMSG, ++ BPF_CGROUP_GETSOCKOPT, ++ BPF_CGROUP_SETSOCKOPT, ++ __MAX_BPF_ATTACH_TYPE ++}; ++ ++#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE ++ ++/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command ++ * ++ * NONE(default): No further bpf programs allowed in the subtree. ++ * ++ * BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program, ++ * the program in this cgroup yields to sub-cgroup program. ++ * ++ * BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program, ++ * that cgroup program gets run in addition to the program in this cgroup. ++ * ++ * Only one program is allowed to be attached to a cgroup with ++ * NONE or BPF_F_ALLOW_OVERRIDE flag. ++ * Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will ++ * release old program and attach the new one. Attach flags has to match. ++ * ++ * Multiple programs are allowed to be attached to a cgroup with ++ * BPF_F_ALLOW_MULTI flag. They are executed in FIFO order ++ * (those that were attached first, run first) ++ * The programs of sub-cgroup are executed first, then programs of ++ * this cgroup and then programs of parent cgroup. ++ * When children program makes decision (like picking TCP CA or sock bind) ++ * parent program has a chance to override it. ++ * ++ * A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups. ++ * A cgroup with NONE doesn't allow any programs in sub-cgroups. ++ * Ex1: ++ * cgrp1 (MULTI progs A, B) -> ++ * cgrp2 (OVERRIDE prog C) -> ++ * cgrp3 (MULTI prog D) -> ++ * cgrp4 (OVERRIDE prog E) -> ++ * cgrp5 (NONE prog F) ++ * the event in cgrp5 triggers execution of F,D,A,B in that order. ++ * if prog F is detached, the execution is E,D,A,B ++ * if prog F and D are detached, the execution is E,A,B ++ * if prog F, E and D are detached, the execution is C,A,B ++ * ++ * All eligible programs are executed regardless of return code from ++ * earlier programs. ++ */ ++#define BPF_F_ALLOW_OVERRIDE (1U << 0) ++#define BPF_F_ALLOW_MULTI (1U << 1) ++ ++/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the ++ * verifier will perform strict alignment checking as if the kernel ++ * has been built with CONFIG_EFFICIENT_UNALIGNED_ACCESS not set, ++ * and NET_IP_ALIGN defined to 2. ++ */ ++#define BPF_F_STRICT_ALIGNMENT (1U << 0) ++ ++/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the ++ * verifier will allow any alignment whatsoever. On platforms ++ * with strict alignment requirements for loads ands stores (such ++ * as sparc and mips) the verifier validates that all loads and ++ * stores provably follow this requirement. This flag turns that ++ * checking and enforcement off. ++ * ++ * It is mostly used for testing when we want to validate the ++ * context and memory access aspects of the verifier, but because ++ * of an unaligned access the alignment check would trigger before ++ * the one we are interested in. ++ */ ++#define BPF_F_ANY_ALIGNMENT (1U << 1) ++ ++/* BPF_F_TEST_RND_HI32 is used in BPF_PROG_LOAD command for testing purpose. ++ * Verifier does sub-register def/use analysis and identifies instructions whose ++ * def only matters for low 32-bit, high 32-bit is never referenced later ++ * through implicit zero extension. Therefore verifier notifies JIT back-ends ++ * that it is safe to ignore clearing high 32-bit for these instructions. This ++ * saves some back-ends a lot of code-gen. However such optimization is not ++ * necessary on some arches, for example x86_64, arm64 etc, whose JIT back-ends ++ * hence hasn't used verifier's analysis result. But, we really want to have a ++ * way to be able to verify the correctness of the described optimization on ++ * x86_64 on which testsuites are frequently exercised. ++ * ++ * So, this flag is introduced. Once it is set, verifier will randomize high ++ * 32-bit for those instructions who has been identified as safe to ignore them. ++ * Then, if verifier is not doing correct analysis, such randomization will ++ * regress tests to expose bugs. ++ */ ++#define BPF_F_TEST_RND_HI32 (1U << 2) ++ ++/* The verifier internal test flag. Behavior is undefined */ ++#define BPF_F_TEST_STATE_FREQ (1U << 3) ++ ++/* When BPF ldimm64's insn[0].src_reg != 0 then this can have ++ * two extensions: ++ * ++ * insn[0].src_reg: BPF_PSEUDO_MAP_FD BPF_PSEUDO_MAP_VALUE ++ * insn[0].imm: map fd map fd ++ * insn[1].imm: 0 offset into value ++ * insn[0].off: 0 0 ++ * insn[1].off: 0 0 ++ * ldimm64 rewrite: address of map address of map[0]+offset ++ * verifier type: CONST_PTR_TO_MAP PTR_TO_MAP_VALUE ++ */ + #define BPF_PSEUDO_MAP_FD 1 ++#define BPF_PSEUDO_MAP_VALUE 2 ++ ++/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative ++ * offset to another bpf function ++ */ ++#define BPF_PSEUDO_CALL 1 + + /* flags for BPF_MAP_UPDATE_ELEM command */ + #define BPF_ANY 0 /* create new element or update existing */ + #define BPF_NOEXIST 1 /* create new element if it didn't exist */ + #define BPF_EXIST 2 /* update existing element */ ++#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */ ++ ++/* flags for BPF_MAP_CREATE command */ ++#define BPF_F_NO_PREALLOC (1U << 0) ++/* Instead of having one common LRU list in the ++ * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list ++ * which can scale and perform better. ++ * Note, the LRU nodes (including free nodes) cannot be moved ++ * across different LRU lists. ++ */ ++#define BPF_F_NO_COMMON_LRU (1U << 1) ++/* Specify numa node during map creation */ ++#define BPF_F_NUMA_NODE (1U << 2) ++ ++#define BPF_OBJ_NAME_LEN 16U ++ ++/* Flags for accessing BPF object from syscall side. */ ++#define BPF_F_RDONLY (1U << 3) ++#define BPF_F_WRONLY (1U << 4) ++ ++/* Flag for stack_map, store build_id+offset instead of pointer */ ++#define BPF_F_STACK_BUILD_ID (1U << 5) ++ ++/* Zero-initialize hash function seed. This should only be used for testing. */ ++#define BPF_F_ZERO_SEED (1U << 6) ++ ++/* Flags for accessing BPF object from program side. */ ++#define BPF_F_RDONLY_PROG (1U << 7) ++#define BPF_F_WRONLY_PROG (1U << 8) ++ ++/* Clone map from listener for newly accepted socket */ ++#define BPF_F_CLONE (1U << 9) ++ ++/* flags for BPF_PROG_QUERY */ ++#define BPF_F_QUERY_EFFECTIVE (1U << 0) ++ ++enum bpf_stack_build_id_status { ++ /* user space need an empty entry to identify end of a trace */ ++ BPF_STACK_BUILD_ID_EMPTY = 0, ++ /* with valid build_id and offset */ ++ BPF_STACK_BUILD_ID_VALID = 1, ++ /* couldn't get build_id, fallback to ip */ ++ BPF_STACK_BUILD_ID_IP = 2, ++}; ++ ++#define BPF_BUILD_ID_SIZE 20 ++struct bpf_stack_build_id { ++ __s32 status; ++ unsigned char build_id[BPF_BUILD_ID_SIZE]; ++ union { ++ __u64 offset; ++ __u64 ip; ++ }; ++}; + + union bpf_attr { + struct { /* anonymous struct used by BPF_MAP_CREATE command */ +@@ -104,6 +372,18 @@ union bpf_attr { + __u32 key_size; /* size of key in bytes */ + __u32 value_size; /* size of value in bytes */ + __u32 max_entries; /* max number of entries in a map */ ++ __u32 map_flags; /* BPF_MAP_CREATE related ++ * flags defined above. ++ */ ++ __u32 inner_map_fd; /* fd pointing to the inner map */ ++ __u32 numa_node; /* numa node (effective only if ++ * BPF_F_NUMA_NODE is set). ++ */ ++ char map_name[BPF_OBJ_NAME_LEN]; ++ __u32 map_ifindex; /* ifindex of netdev to create on */ ++ __u32 btf_fd; /* fd pointing to a BTF type data */ ++ __u32 btf_key_type_id; /* BTF type_id of the key */ ++ __u32 btf_value_type_id; /* BTF type_id of the value */ + }; + + struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ +@@ -124,154 +404,2568 @@ union bpf_attr { + __u32 log_level; /* verbosity level of verifier */ + __u32 log_size; /* size of user buffer */ + __aligned_u64 log_buf; /* user supplied buffer */ +- __u32 kern_version; /* checked when prog_type=kprobe */ ++ __u32 kern_version; /* not used */ ++ __u32 prog_flags; ++ char prog_name[BPF_OBJ_NAME_LEN]; ++ __u32 prog_ifindex; /* ifindex of netdev to prep for */ ++ /* For some prog types expected attach type must be known at ++ * load time to verify attach type specific parts of prog ++ * (context accesses, allowed helpers, etc). ++ */ ++ __u32 expected_attach_type; ++ __u32 prog_btf_fd; /* fd pointing to BTF type data */ ++ __u32 func_info_rec_size; /* userspace bpf_func_info size */ ++ __aligned_u64 func_info; /* func info */ ++ __u32 func_info_cnt; /* number of bpf_func_info records */ ++ __u32 line_info_rec_size; /* userspace bpf_line_info size */ ++ __aligned_u64 line_info; /* line info */ ++ __u32 line_info_cnt; /* number of bpf_line_info records */ + }; + + struct { /* anonymous struct used by BPF_OBJ_* commands */ + __aligned_u64 pathname; + __u32 bpf_fd; ++ __u32 file_flags; ++ }; ++ ++ struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ ++ __u32 target_fd; /* container object to attach to */ ++ __u32 attach_bpf_fd; /* eBPF program to attach */ ++ __u32 attach_type; ++ __u32 attach_flags; ++ }; ++ ++ struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */ ++ __u32 prog_fd; ++ __u32 retval; ++ __u32 data_size_in; /* input: len of data_in */ ++ __u32 data_size_out; /* input/output: len of data_out ++ * returns ENOSPC if data_out ++ * is too small. ++ */ ++ __aligned_u64 data_in; ++ __aligned_u64 data_out; ++ __u32 repeat; ++ __u32 duration; ++ __u32 ctx_size_in; /* input: len of ctx_in */ ++ __u32 ctx_size_out; /* input/output: len of ctx_out ++ * returns ENOSPC if ctx_out ++ * is too small. ++ */ ++ __aligned_u64 ctx_in; ++ __aligned_u64 ctx_out; ++ } test; ++ ++ struct { /* anonymous struct used by BPF_*_GET_*_ID */ ++ union { ++ __u32 start_id; ++ __u32 prog_id; ++ __u32 map_id; ++ __u32 btf_id; ++ }; ++ __u32 next_id; ++ __u32 open_flags; + }; ++ ++ struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ ++ __u32 bpf_fd; ++ __u32 info_len; ++ __aligned_u64 info; ++ } info; ++ ++ struct { /* anonymous struct used by BPF_PROG_QUERY command */ ++ __u32 target_fd; /* container object to query */ ++ __u32 attach_type; ++ __u32 query_flags; ++ __u32 attach_flags; ++ __aligned_u64 prog_ids; ++ __u32 prog_cnt; ++ } query; ++ ++ struct { ++ __u64 name; ++ __u32 prog_fd; ++ } raw_tracepoint; ++ ++ struct { /* anonymous struct for BPF_BTF_LOAD */ ++ __aligned_u64 btf; ++ __aligned_u64 btf_log_buf; ++ __u32 btf_size; ++ __u32 btf_log_size; ++ __u32 btf_log_level; ++ }; ++ ++ struct { ++ __u32 pid; /* input: pid */ ++ __u32 fd; /* input: fd */ ++ __u32 flags; /* input: flags */ ++ __u32 buf_len; /* input/output: buf len */ ++ __aligned_u64 buf; /* input/output: ++ * tp_name for tracepoint ++ * symbol for kprobe ++ * filename for uprobe ++ */ ++ __u32 prog_id; /* output: prod_id */ ++ __u32 fd_type; /* output: BPF_FD_TYPE_* */ ++ __u64 probe_offset; /* output: probe_offset */ ++ __u64 probe_addr; /* output: probe_addr */ ++ } task_fd_query; + } __attribute__((aligned(8))); + ++/* The description below is an attempt at providing documentation to eBPF ++ * developers about the multiple available eBPF helper functions. It can be ++ * parsed and used to produce a manual page. The workflow is the following, ++ * and requires the rst2man utility: ++ * ++ * $ ./scripts/bpf_helpers_doc.py \ ++ * --filename include/uapi/linux/bpf.h > /tmp/bpf-helpers.rst ++ * $ rst2man /tmp/bpf-helpers.rst > /tmp/bpf-helpers.7 ++ * $ man /tmp/bpf-helpers.7 ++ * ++ * Note that in order to produce this external documentation, some RST ++ * formatting is used in the descriptions to get "bold" and "italics" in ++ * manual pages. Also note that the few trailing white spaces are ++ * intentional, removing them would break paragraphs for rst2man. ++ * ++ * Start of BPF helper function descriptions: ++ * ++ * void *bpf_map_lookup_elem(struct bpf_map *map, const void *key) ++ * Description ++ * Perform a lookup in *map* for an entry associated to *key*. ++ * Return ++ * Map value associated to *key*, or **NULL** if no entry was ++ * found. ++ * ++ * int bpf_map_update_elem(struct bpf_map *map, const void *key, const void *value, u64 flags) ++ * Description ++ * Add or update the value of the entry associated to *key* in ++ * *map* with *value*. *flags* is one of: ++ * ++ * **BPF_NOEXIST** ++ * The entry for *key* must not exist in the map. ++ * **BPF_EXIST** ++ * The entry for *key* must already exist in the map. ++ * **BPF_ANY** ++ * No condition on the existence of the entry for *key*. ++ * ++ * Flag value **BPF_NOEXIST** cannot be used for maps of types ++ * **BPF_MAP_TYPE_ARRAY** or **BPF_MAP_TYPE_PERCPU_ARRAY** (all ++ * elements always exist), the helper would return an error. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_map_delete_elem(struct bpf_map *map, const void *key) ++ * Description ++ * Delete entry with *key* from *map*. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_probe_read(void *dst, u32 size, const void *src) ++ * Description ++ * For tracing programs, safely attempt to read *size* bytes from ++ * address *src* and store the data in *dst*. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * u64 bpf_ktime_get_ns(void) ++ * Description ++ * Return the time elapsed since system boot, in nanoseconds. ++ * Return ++ * Current *ktime*. ++ * ++ * int bpf_trace_printk(const char *fmt, u32 fmt_size, ...) ++ * Description ++ * This helper is a "printk()-like" facility for debugging. It ++ * prints a message defined by format *fmt* (of size *fmt_size*) ++ * to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if ++ * available. It can take up to three additional **u64** ++ * arguments (as an eBPF helpers, the total number of arguments is ++ * limited to five). ++ * ++ * Each time the helper is called, it appends a line to the trace. ++ * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is ++ * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. ++ * The format of the trace is customizable, and the exact output ++ * one will get depends on the options set in ++ * *\/sys/kernel/debug/tracing/trace_options* (see also the ++ * *README* file under the same directory). However, it usually ++ * defaults to something like: ++ * ++ * :: ++ * ++ * telnet-470 [001] .N.. 419421.045894: 0x00000001: ++ * ++ * In the above: ++ * ++ * * ``telnet`` is the name of the current task. ++ * * ``470`` is the PID of the current task. ++ * * ``001`` is the CPU number on which the task is ++ * running. ++ * * In ``.N..``, each character refers to a set of ++ * options (whether irqs are enabled, scheduling ++ * options, whether hard/softirqs are running, level of ++ * preempt_disabled respectively). **N** means that ++ * **TIF_NEED_RESCHED** and **PREEMPT_NEED_RESCHED** ++ * are set. ++ * * ``419421.045894`` is a timestamp. ++ * * ``0x00000001`` is a fake value used by BPF for the ++ * instruction pointer register. ++ * * ```` is the message formatted with ++ * *fmt*. ++ * ++ * The conversion specifiers supported by *fmt* are similar, but ++ * more limited than for printk(). They are **%d**, **%i**, ++ * **%u**, **%x**, **%ld**, **%li**, **%lu**, **%lx**, **%lld**, ++ * **%lli**, **%llu**, **%llx**, **%p**, **%s**. No modifier (size ++ * of field, padding with zeroes, etc.) is available, and the ++ * helper will return **-EINVAL** (but print nothing) if it ++ * encounters an unknown specifier. ++ * ++ * Also, note that **bpf_trace_printk**\ () is slow, and should ++ * only be used for debugging purposes. For this reason, a notice ++ * bloc (spanning several lines) is printed to kernel logs and ++ * states that the helper should not be used "for production use" ++ * the first time this helper is used (or more precisely, when ++ * **trace_printk**\ () buffers are allocated). For passing values ++ * to user space, perf events should be preferred. ++ * Return ++ * The number of bytes written to the buffer, or a negative error ++ * in case of failure. ++ * ++ * u32 bpf_get_prandom_u32(void) ++ * Description ++ * Get a pseudo-random number. ++ * ++ * From a security point of view, this helper uses its own ++ * pseudo-random internal state, and cannot be used to infer the ++ * seed of other random functions in the kernel. However, it is ++ * essential to note that the generator used by the helper is not ++ * cryptographically secure. ++ * Return ++ * A random 32-bit unsigned value. ++ * ++ * u32 bpf_get_smp_processor_id(void) ++ * Description ++ * Get the SMP (symmetric multiprocessing) processor id. Note that ++ * all programs run with preemption disabled, which means that the ++ * SMP processor id is stable during all the execution of the ++ * program. ++ * Return ++ * The SMP id of the processor running the program. ++ * ++ * int bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) ++ * Description ++ * Store *len* bytes from address *from* into the packet ++ * associated to *skb*, at *offset*. *flags* are a combination of ++ * **BPF_F_RECOMPUTE_CSUM** (automatically recompute the ++ * checksum for the packet after storing the bytes) and ++ * **BPF_F_INVALIDATE_HASH** (set *skb*\ **->hash**, *skb*\ ++ * **->swhash** and *skb*\ **->l4hash** to 0). ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_l3_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 size) ++ * Description ++ * Recompute the layer 3 (e.g. IP) checksum for the packet ++ * associated to *skb*. Computation is incremental, so the helper ++ * must know the former value of the header field that was ++ * modified (*from*), the new value of this field (*to*), and the ++ * number of bytes (2 or 4) for this field, stored in *size*. ++ * Alternatively, it is possible to store the difference between ++ * the previous and the new values of the header field in *to*, by ++ * setting *from* and *size* to 0. For both methods, *offset* ++ * indicates the location of the IP checksum within the packet. ++ * ++ * This helper works in combination with **bpf_csum_diff**\ (), ++ * which does not update the checksum in-place, but offers more ++ * flexibility and can handle sizes larger than 2 or 4 for the ++ * checksum to update. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_l4_csum_replace(struct sk_buff *skb, u32 offset, u64 from, u64 to, u64 flags) ++ * Description ++ * Recompute the layer 4 (e.g. TCP, UDP or ICMP) checksum for the ++ * packet associated to *skb*. Computation is incremental, so the ++ * helper must know the former value of the header field that was ++ * modified (*from*), the new value of this field (*to*), and the ++ * number of bytes (2 or 4) for this field, stored on the lowest ++ * four bits of *flags*. Alternatively, it is possible to store ++ * the difference between the previous and the new values of the ++ * header field in *to*, by setting *from* and the four lowest ++ * bits of *flags* to 0. For both methods, *offset* indicates the ++ * location of the IP checksum within the packet. In addition to ++ * the size of the field, *flags* can be added (bitwise OR) actual ++ * flags. With **BPF_F_MARK_MANGLED_0**, a null checksum is left ++ * untouched (unless **BPF_F_MARK_ENFORCE** is added as well), and ++ * for updates resulting in a null checksum the value is set to ++ * **CSUM_MANGLED_0** instead. Flag **BPF_F_PSEUDO_HDR** indicates ++ * the checksum is to be computed against a pseudo-header. ++ * ++ * This helper works in combination with **bpf_csum_diff**\ (), ++ * which does not update the checksum in-place, but offers more ++ * flexibility and can handle sizes larger than 2 or 4 for the ++ * checksum to update. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) ++ * Description ++ * This special helper is used to trigger a "tail call", or in ++ * other words, to jump into another eBPF program. The same stack ++ * frame is used (but values on stack and in registers for the ++ * caller are not accessible to the callee). This mechanism allows ++ * for program chaining, either for raising the maximum number of ++ * available eBPF instructions, or to execute given programs in ++ * conditional blocks. For security reasons, there is an upper ++ * limit to the number of successive tail calls that can be ++ * performed. ++ * ++ * Upon call of this helper, the program attempts to jump into a ++ * program referenced at index *index* in *prog_array_map*, a ++ * special map of type **BPF_MAP_TYPE_PROG_ARRAY**, and passes ++ * *ctx*, a pointer to the context. ++ * ++ * If the call succeeds, the kernel immediately runs the first ++ * instruction of the new program. This is not a function call, ++ * and it never returns to the previous program. If the call ++ * fails, then the helper has no effect, and the caller continues ++ * to run its subsequent instructions. A call can fail if the ++ * destination program for the jump does not exist (i.e. *index* ++ * is superior to the number of entries in *prog_array_map*), or ++ * if the maximum number of tail calls has been reached for this ++ * chain of programs. This limit is defined in the kernel by the ++ * macro **MAX_TAIL_CALL_CNT** (not accessible to user space), ++ * which is currently set to 32. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_clone_redirect(struct sk_buff *skb, u32 ifindex, u64 flags) ++ * Description ++ * Clone and redirect the packet associated to *skb* to another ++ * net device of index *ifindex*. Both ingress and egress ++ * interfaces can be used for redirection. The **BPF_F_INGRESS** ++ * value in *flags* is used to make the distinction (ingress path ++ * is selected if the flag is present, egress path otherwise). ++ * This is the only flag supported for now. ++ * ++ * In comparison with **bpf_redirect**\ () helper, ++ * **bpf_clone_redirect**\ () has the associated cost of ++ * duplicating the packet buffer, but this can be executed out of ++ * the eBPF program. Conversely, **bpf_redirect**\ () is more ++ * efficient, but it is handled through an action code where the ++ * redirection happens only after the eBPF program has returned. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * u64 bpf_get_current_pid_tgid(void) ++ * Return ++ * A 64-bit integer containing the current tgid and pid, and ++ * created as such: ++ * *current_task*\ **->tgid << 32 \|** ++ * *current_task*\ **->pid**. ++ * ++ * u64 bpf_get_current_uid_gid(void) ++ * Return ++ * A 64-bit integer containing the current GID and UID, and ++ * created as such: *current_gid* **<< 32 \|** *current_uid*. ++ * ++ * int bpf_get_current_comm(char *buf, u32 size_of_buf) ++ * Description ++ * Copy the **comm** attribute of the current task into *buf* of ++ * *size_of_buf*. The **comm** attribute contains the name of ++ * the executable (excluding the path) for the current task. The ++ * *size_of_buf* must be strictly positive. On success, the ++ * helper makes sure that the *buf* is NUL-terminated. On failure, ++ * it is filled with zeroes. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * u32 bpf_get_cgroup_classid(struct sk_buff *skb) ++ * Description ++ * Retrieve the classid for the current task, i.e. for the net_cls ++ * cgroup to which *skb* belongs. ++ * ++ * This helper can be used on TC egress path, but not on ingress. ++ * ++ * The net_cls cgroup provides an interface to tag network packets ++ * based on a user-provided identifier for all traffic coming from ++ * the tasks belonging to the related cgroup. See also the related ++ * kernel documentation, available from the Linux sources in file ++ * *Documentation/admin-guide/cgroup-v1/net_cls.rst*. ++ * ++ * The Linux kernel has two versions for cgroups: there are ++ * cgroups v1 and cgroups v2. Both are available to users, who can ++ * use a mixture of them, but note that the net_cls cgroup is for ++ * cgroup v1 only. This makes it incompatible with BPF programs ++ * run on cgroups, which is a cgroup-v2-only feature (a socket can ++ * only hold data for one version of cgroups at a time). ++ * ++ * This helper is only available is the kernel was compiled with ++ * the **CONFIG_CGROUP_NET_CLASSID** configuration option set to ++ * "**y**" or to "**m**". ++ * Return ++ * The classid, or 0 for the default unconfigured classid. ++ * ++ * int bpf_skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) ++ * Description ++ * Push a *vlan_tci* (VLAN tag control information) of protocol ++ * *vlan_proto* to the packet associated to *skb*, then update ++ * the checksum. Note that if *vlan_proto* is different from ++ * **ETH_P_8021Q** and **ETH_P_8021AD**, it is considered to ++ * be **ETH_P_8021Q**. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_skb_vlan_pop(struct sk_buff *skb) ++ * Description ++ * Pop a VLAN header from the packet associated to *skb*. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_skb_get_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) ++ * Description ++ * Get tunnel metadata. This helper takes a pointer *key* to an ++ * empty **struct bpf_tunnel_key** of **size**, that will be ++ * filled with tunnel metadata for the packet associated to *skb*. ++ * The *flags* can be set to **BPF_F_TUNINFO_IPV6**, which ++ * indicates that the tunnel is based on IPv6 protocol instead of ++ * IPv4. ++ * ++ * The **struct bpf_tunnel_key** is an object that generalizes the ++ * principal parameters used by various tunneling protocols into a ++ * single struct. This way, it can be used to easily make a ++ * decision based on the contents of the encapsulation header, ++ * "summarized" in this struct. In particular, it holds the IP ++ * address of the remote end (IPv4 or IPv6, depending on the case) ++ * in *key*\ **->remote_ipv4** or *key*\ **->remote_ipv6**. Also, ++ * this struct exposes the *key*\ **->tunnel_id**, which is ++ * generally mapped to a VNI (Virtual Network Identifier), making ++ * it programmable together with the **bpf_skb_set_tunnel_key**\ ++ * () helper. ++ * ++ * Let's imagine that the following code is part of a program ++ * attached to the TC ingress interface, on one end of a GRE ++ * tunnel, and is supposed to filter out all messages coming from ++ * remote ends with IPv4 address other than 10.0.0.1: ++ * ++ * :: ++ * ++ * int ret; ++ * struct bpf_tunnel_key key = {}; ++ * ++ * ret = bpf_skb_get_tunnel_key(skb, &key, sizeof(key), 0); ++ * if (ret < 0) ++ * return TC_ACT_SHOT; // drop packet ++ * ++ * if (key.remote_ipv4 != 0x0a000001) ++ * return TC_ACT_SHOT; // drop packet ++ * ++ * return TC_ACT_OK; // accept packet ++ * ++ * This interface can also be used with all encapsulation devices ++ * that can operate in "collect metadata" mode: instead of having ++ * one network device per specific configuration, the "collect ++ * metadata" mode only requires a single device where the ++ * configuration can be extracted from this helper. ++ * ++ * This can be used together with various tunnels such as VXLan, ++ * Geneve, GRE or IP in IP (IPIP). ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_skb_set_tunnel_key(struct sk_buff *skb, struct bpf_tunnel_key *key, u32 size, u64 flags) ++ * Description ++ * Populate tunnel metadata for packet associated to *skb.* The ++ * tunnel metadata is set to the contents of *key*, of *size*. The ++ * *flags* can be set to a combination of the following values: ++ * ++ * **BPF_F_TUNINFO_IPV6** ++ * Indicate that the tunnel is based on IPv6 protocol ++ * instead of IPv4. ++ * **BPF_F_ZERO_CSUM_TX** ++ * For IPv4 packets, add a flag to tunnel metadata ++ * indicating that checksum computation should be skipped ++ * and checksum set to zeroes. ++ * **BPF_F_DONT_FRAGMENT** ++ * Add a flag to tunnel metadata indicating that the ++ * packet should not be fragmented. ++ * **BPF_F_SEQ_NUMBER** ++ * Add a flag to tunnel metadata indicating that a ++ * sequence number should be added to tunnel header before ++ * sending the packet. This flag was added for GRE ++ * encapsulation, but might be used with other protocols ++ * as well in the future. ++ * ++ * Here is a typical usage on the transmit path: ++ * ++ * :: ++ * ++ * struct bpf_tunnel_key key; ++ * populate key ... ++ * bpf_skb_set_tunnel_key(skb, &key, sizeof(key), 0); ++ * bpf_clone_redirect(skb, vxlan_dev_ifindex, 0); ++ * ++ * See also the description of the **bpf_skb_get_tunnel_key**\ () ++ * helper for additional information. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * u64 bpf_perf_event_read(struct bpf_map *map, u64 flags) ++ * Description ++ * Read the value of a perf event counter. This helper relies on a ++ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of ++ * the perf event counter is selected when *map* is updated with ++ * perf event file descriptors. The *map* is an array whose size ++ * is the number of available CPUs, and each cell contains a value ++ * relative to one CPU. The value to retrieve is indicated by ++ * *flags*, that contains the index of the CPU to look up, masked ++ * with **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to ++ * **BPF_F_CURRENT_CPU** to indicate that the value for the ++ * current CPU should be retrieved. ++ * ++ * Note that before Linux 4.13, only hardware perf event can be ++ * retrieved. ++ * ++ * Also, be aware that the newer helper ++ * **bpf_perf_event_read_value**\ () is recommended over ++ * **bpf_perf_event_read**\ () in general. The latter has some ABI ++ * quirks where error and counter value are used as a return code ++ * (which is wrong to do since ranges may overlap). This issue is ++ * fixed with **bpf_perf_event_read_value**\ (), which at the same ++ * time provides more features over the **bpf_perf_event_read**\ ++ * () interface. Please refer to the description of ++ * **bpf_perf_event_read_value**\ () for details. ++ * Return ++ * The value of the perf event counter read from the map, or a ++ * negative error code in case of failure. ++ * ++ * int bpf_redirect(u32 ifindex, u64 flags) ++ * Description ++ * Redirect the packet to another net device of index *ifindex*. ++ * This helper is somewhat similar to **bpf_clone_redirect**\ ++ * (), except that the packet is not cloned, which provides ++ * increased performance. ++ * ++ * Except for XDP, both ingress and egress interfaces can be used ++ * for redirection. The **BPF_F_INGRESS** value in *flags* is used ++ * to make the distinction (ingress path is selected if the flag ++ * is present, egress path otherwise). Currently, XDP only ++ * supports redirection to the egress interface, and accepts no ++ * flag at all. ++ * ++ * The same effect can be attained with the more generic ++ * **bpf_redirect_map**\ (), which requires specific maps to be ++ * used but offers better performance. ++ * Return ++ * For XDP, the helper returns **XDP_REDIRECT** on success or ++ * **XDP_ABORTED** on error. For other program types, the values ++ * are **TC_ACT_REDIRECT** on success or **TC_ACT_SHOT** on ++ * error. ++ * ++ * u32 bpf_get_route_realm(struct sk_buff *skb) ++ * Description ++ * Retrieve the realm or the route, that is to say the ++ * **tclassid** field of the destination for the *skb*. The ++ * indentifier retrieved is a user-provided tag, similar to the ++ * one used with the net_cls cgroup (see description for ++ * **bpf_get_cgroup_classid**\ () helper), but here this tag is ++ * held by a route (a destination entry), not by a task. ++ * ++ * Retrieving this identifier works with the clsact TC egress hook ++ * (see also **tc-bpf(8)**), or alternatively on conventional ++ * classful egress qdiscs, but not on TC ingress path. In case of ++ * clsact TC egress hook, this has the advantage that, internally, ++ * the destination entry has not been dropped yet in the transmit ++ * path. Therefore, the destination entry does not need to be ++ * artificially held via **netif_keep_dst**\ () for a classful ++ * qdisc until the *skb* is freed. ++ * ++ * This helper is available only if the kernel was compiled with ++ * **CONFIG_IP_ROUTE_CLASSID** configuration option. ++ * Return ++ * The realm of the route for the packet associated to *skb*, or 0 ++ * if none was found. ++ * ++ * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) ++ * Description ++ * Write raw *data* blob into a special BPF perf event held by ++ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf ++ * event must have the following attributes: **PERF_SAMPLE_RAW** ++ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and ++ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**. ++ * ++ * The *flags* are used to indicate the index in *map* for which ++ * the value must be put, masked with **BPF_F_INDEX_MASK**. ++ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU** ++ * to indicate that the index of the current CPU core should be ++ * used. ++ * ++ * The value to write, of *size*, is passed through eBPF stack and ++ * pointed by *data*. ++ * ++ * The context of the program *ctx* needs also be passed to the ++ * helper. ++ * ++ * On user space, a program willing to read the values needs to ++ * call **perf_event_open**\ () on the perf event (either for ++ * one or for all CPUs) and to store the file descriptor into the ++ * *map*. This must be done before the eBPF program can send data ++ * into it. An example is available in file ++ * *samples/bpf/trace_output_user.c* in the Linux kernel source ++ * tree (the eBPF program counterpart is in ++ * *samples/bpf/trace_output_kern.c*). ++ * ++ * **bpf_perf_event_output**\ () achieves better performance ++ * than **bpf_trace_printk**\ () for sharing data with user ++ * space, and is much better suitable for streaming data from eBPF ++ * programs. ++ * ++ * Note that this helper is not restricted to tracing use cases ++ * and can be used with programs attached to TC or XDP as well, ++ * where it allows for passing data to user space listeners. Data ++ * can be: ++ * ++ * * Only custom structs, ++ * * Only the packet payload, or ++ * * A combination of both. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len) ++ * Description ++ * This helper was provided as an easy way to load data from a ++ * packet. It can be used to load *len* bytes from *offset* from ++ * the packet associated to *skb*, into the buffer pointed by ++ * *to*. ++ * ++ * Since Linux 4.7, usage of this helper has mostly been replaced ++ * by "direct packet access", enabling packet data to be ++ * manipulated with *skb*\ **->data** and *skb*\ **->data_end** ++ * pointing respectively to the first byte of packet data and to ++ * the byte after the last byte of packet data. However, it ++ * remains useful if one wishes to read large quantities of data ++ * at once from a packet into the eBPF stack. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags) ++ * Description ++ * Walk a user or a kernel stack and return its id. To achieve ++ * this, the helper needs *ctx*, which is a pointer to the context ++ * on which the tracing program is executed, and a pointer to a ++ * *map* of type **BPF_MAP_TYPE_STACK_TRACE**. ++ * ++ * The last argument, *flags*, holds the number of stack frames to ++ * skip (from 0 to 255), masked with ++ * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set ++ * a combination of the following flags: ++ * ++ * **BPF_F_USER_STACK** ++ * Collect a user space stack instead of a kernel stack. ++ * **BPF_F_FAST_STACK_CMP** ++ * Compare stacks by hash only. ++ * **BPF_F_REUSE_STACKID** ++ * If two different stacks hash into the same *stackid*, ++ * discard the old one. ++ * ++ * The stack id retrieved is a 32 bit long integer handle which ++ * can be further combined with other data (including other stack ++ * ids) and used as a key into maps. This can be useful for ++ * generating a variety of graphs (such as flame graphs or off-cpu ++ * graphs). ++ * ++ * For walking a stack, this helper is an improvement over ++ * **bpf_probe_read**\ (), which can be used with unrolled loops ++ * but is not efficient and consumes a lot of eBPF instructions. ++ * Instead, **bpf_get_stackid**\ () can collect up to ++ * **PERF_MAX_STACK_DEPTH** both kernel and user frames. Note that ++ * this limit can be controlled with the **sysctl** program, and ++ * that it should be manually increased in order to profile long ++ * user stacks (such as stacks for Java programs). To do so, use: ++ * ++ * :: ++ * ++ * # sysctl kernel.perf_event_max_stack= ++ * Return ++ * The positive or null stack id on success, or a negative error ++ * in case of failure. ++ * ++ * s64 bpf_csum_diff(__be32 *from, u32 from_size, __be32 *to, u32 to_size, __wsum seed) ++ * Description ++ * Compute a checksum difference, from the raw buffer pointed by ++ * *from*, of length *from_size* (that must be a multiple of 4), ++ * towards the raw buffer pointed by *to*, of size *to_size* ++ * (same remark). An optional *seed* can be added to the value ++ * (this can be cascaded, the seed may come from a previous call ++ * to the helper). ++ * ++ * This is flexible enough to be used in several ways: ++ * ++ * * With *from_size* == 0, *to_size* > 0 and *seed* set to ++ * checksum, it can be used when pushing new data. ++ * * With *from_size* > 0, *to_size* == 0 and *seed* set to ++ * checksum, it can be used when removing data from a packet. ++ * * With *from_size* > 0, *to_size* > 0 and *seed* set to 0, it ++ * can be used to compute a diff. Note that *from_size* and ++ * *to_size* do not need to be equal. ++ * ++ * This helper can be used in combination with ++ * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ (), to ++ * which one can feed in the difference computed with ++ * **bpf_csum_diff**\ (). ++ * Return ++ * The checksum result, or a negative error code in case of ++ * failure. ++ * ++ * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) ++ * Description ++ * Retrieve tunnel options metadata for the packet associated to ++ * *skb*, and store the raw tunnel option data to the buffer *opt* ++ * of *size*. ++ * ++ * This helper can be used with encapsulation devices that can ++ * operate in "collect metadata" mode (please refer to the related ++ * note in the description of **bpf_skb_get_tunnel_key**\ () for ++ * more details). A particular example where this can be used is ++ * in combination with the Geneve encapsulation protocol, where it ++ * allows for pushing (with **bpf_skb_get_tunnel_opt**\ () helper) ++ * and retrieving arbitrary TLVs (Type-Length-Value headers) from ++ * the eBPF program. This allows for full customization of these ++ * headers. ++ * Return ++ * The size of the option data retrieved. ++ * ++ * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size) ++ * Description ++ * Set tunnel options metadata for the packet associated to *skb* ++ * to the option data contained in the raw buffer *opt* of *size*. ++ * ++ * See also the description of the **bpf_skb_get_tunnel_opt**\ () ++ * helper for additional information. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_skb_change_proto(struct sk_buff *skb, __be16 proto, u64 flags) ++ * Description ++ * Change the protocol of the *skb* to *proto*. Currently ++ * supported are transition from IPv4 to IPv6, and from IPv6 to ++ * IPv4. The helper takes care of the groundwork for the ++ * transition, including resizing the socket buffer. The eBPF ++ * program is expected to fill the new headers, if any, via ++ * **skb_store_bytes**\ () and to recompute the checksums with ++ * **bpf_l3_csum_replace**\ () and **bpf_l4_csum_replace**\ ++ * (). The main case for this helper is to perform NAT64 ++ * operations out of an eBPF program. ++ * ++ * Internally, the GSO type is marked as dodgy so that headers are ++ * checked and segments are recalculated by the GSO/GRO engine. ++ * The size for GSO target is adapted as well. ++ * ++ * All values for *flags* are reserved for future usage, and must ++ * be left at zero. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_skb_change_type(struct sk_buff *skb, u32 type) ++ * Description ++ * Change the packet type for the packet associated to *skb*. This ++ * comes down to setting *skb*\ **->pkt_type** to *type*, except ++ * the eBPF program does not have a write access to *skb*\ ++ * **->pkt_type** beside this helper. Using a helper here allows ++ * for graceful handling of errors. ++ * ++ * The major use case is to change incoming *skb*s to ++ * **PACKET_HOST** in a programmatic way instead of having to ++ * recirculate via **redirect**\ (..., **BPF_F_INGRESS**), for ++ * example. ++ * ++ * Note that *type* only allows certain values. At this time, they ++ * are: ++ * ++ * **PACKET_HOST** ++ * Packet is for us. ++ * **PACKET_BROADCAST** ++ * Send packet to all. ++ * **PACKET_MULTICAST** ++ * Send packet to group. ++ * **PACKET_OTHERHOST** ++ * Send packet to someone else. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_skb_under_cgroup(struct sk_buff *skb, struct bpf_map *map, u32 index) ++ * Description ++ * Check whether *skb* is a descendant of the cgroup2 held by ++ * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. ++ * Return ++ * The return value depends on the result of the test, and can be: ++ * ++ * * 0, if the *skb* failed the cgroup2 descendant test. ++ * * 1, if the *skb* succeeded the cgroup2 descendant test. ++ * * A negative error code, if an error occurred. ++ * ++ * u32 bpf_get_hash_recalc(struct sk_buff *skb) ++ * Description ++ * Retrieve the hash of the packet, *skb*\ **->hash**. If it is ++ * not set, in particular if the hash was cleared due to mangling, ++ * recompute this hash. Later accesses to the hash can be done ++ * directly with *skb*\ **->hash**. ++ * ++ * Calling **bpf_set_hash_invalid**\ (), changing a packet ++ * prototype with **bpf_skb_change_proto**\ (), or calling ++ * **bpf_skb_store_bytes**\ () with the ++ * **BPF_F_INVALIDATE_HASH** are actions susceptible to clear ++ * the hash and to trigger a new computation for the next call to ++ * **bpf_get_hash_recalc**\ (). ++ * Return ++ * The 32-bit hash. ++ * ++ * u64 bpf_get_current_task(void) ++ * Return ++ * A pointer to the current task struct. ++ * ++ * int bpf_probe_write_user(void *dst, const void *src, u32 len) ++ * Description ++ * Attempt in a safe way to write *len* bytes from the buffer ++ * *src* to *dst* in memory. It only works for threads that are in ++ * user context, and *dst* must be a valid user space address. ++ * ++ * This helper should not be used to implement any kind of ++ * security mechanism because of TOC-TOU attacks, but rather to ++ * debug, divert, and manipulate execution of semi-cooperative ++ * processes. ++ * ++ * Keep in mind that this feature is meant for experiments, and it ++ * has a risk of crashing the system and running programs. ++ * Therefore, when an eBPF program using this helper is attached, ++ * a warning including PID and process name is printed to kernel ++ * logs. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_current_task_under_cgroup(struct bpf_map *map, u32 index) ++ * Description ++ * Check whether the probe is being run is the context of a given ++ * subset of the cgroup2 hierarchy. The cgroup2 to test is held by ++ * *map* of type **BPF_MAP_TYPE_CGROUP_ARRAY**, at *index*. ++ * Return ++ * The return value depends on the result of the test, and can be: ++ * ++ * * 0, if current task belongs to the cgroup2. ++ * * 1, if current task does not belong to the cgroup2. ++ * * A negative error code, if an error occurred. ++ * ++ * int bpf_skb_change_tail(struct sk_buff *skb, u32 len, u64 flags) ++ * Description ++ * Resize (trim or grow) the packet associated to *skb* to the ++ * new *len*. The *flags* are reserved for future usage, and must ++ * be left at zero. ++ * ++ * The basic idea is that the helper performs the needed work to ++ * change the size of the packet, then the eBPF program rewrites ++ * the rest via helpers like **bpf_skb_store_bytes**\ (), ++ * **bpf_l3_csum_replace**\ (), **bpf_l3_csum_replace**\ () ++ * and others. This helper is a slow path utility intended for ++ * replies with control messages. And because it is targeted for ++ * slow path, the helper itself can afford to be slow: it ++ * implicitly linearizes, unclones and drops offloads from the ++ * *skb*. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_skb_pull_data(struct sk_buff *skb, u32 len) ++ * Description ++ * Pull in non-linear data in case the *skb* is non-linear and not ++ * all of *len* are part of the linear section. Make *len* bytes ++ * from *skb* readable and writable. If a zero value is passed for ++ * *len*, then the whole length of the *skb* is pulled. ++ * ++ * This helper is only needed for reading and writing with direct ++ * packet access. ++ * ++ * For direct packet access, testing that offsets to access ++ * are within packet boundaries (test on *skb*\ **->data_end**) is ++ * susceptible to fail if offsets are invalid, or if the requested ++ * data is in non-linear parts of the *skb*. On failure the ++ * program can just bail out, or in the case of a non-linear ++ * buffer, use a helper to make the data available. The ++ * **bpf_skb_load_bytes**\ () helper is a first solution to access ++ * the data. Another one consists in using **bpf_skb_pull_data** ++ * to pull in once the non-linear parts, then retesting and ++ * eventually access the data. ++ * ++ * At the same time, this also makes sure the *skb* is uncloned, ++ * which is a necessary condition for direct write. As this needs ++ * to be an invariant for the write part only, the verifier ++ * detects writes and adds a prologue that is calling ++ * **bpf_skb_pull_data()** to effectively unclone the *skb* from ++ * the very beginning in case it is indeed cloned. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * s64 bpf_csum_update(struct sk_buff *skb, __wsum csum) ++ * Description ++ * Add the checksum *csum* into *skb*\ **->csum** in case the ++ * driver has supplied a checksum for the entire packet into that ++ * field. Return an error otherwise. This helper is intended to be ++ * used in combination with **bpf_csum_diff**\ (), in particular ++ * when the checksum needs to be updated after data has been ++ * written into the packet through direct packet access. ++ * Return ++ * The checksum on success, or a negative error code in case of ++ * failure. ++ * ++ * void bpf_set_hash_invalid(struct sk_buff *skb) ++ * Description ++ * Invalidate the current *skb*\ **->hash**. It can be used after ++ * mangling on headers through direct packet access, in order to ++ * indicate that the hash is outdated and to trigger a ++ * recalculation the next time the kernel tries to access this ++ * hash or when the **bpf_get_hash_recalc**\ () helper is called. ++ * ++ * int bpf_get_numa_node_id(void) ++ * Description ++ * Return the id of the current NUMA node. The primary use case ++ * for this helper is the selection of sockets for the local NUMA ++ * node, when the program is attached to sockets using the ++ * **SO_ATTACH_REUSEPORT_EBPF** option (see also **socket(7)**), ++ * but the helper is also available to other eBPF program types, ++ * similarly to **bpf_get_smp_processor_id**\ (). ++ * Return ++ * The id of current NUMA node. ++ * ++ * int bpf_skb_change_head(struct sk_buff *skb, u32 len, u64 flags) ++ * Description ++ * Grows headroom of packet associated to *skb* and adjusts the ++ * offset of the MAC header accordingly, adding *len* bytes of ++ * space. It automatically extends and reallocates memory as ++ * required. ++ * ++ * This helper can be used on a layer 3 *skb* to push a MAC header ++ * for redirection into a layer 2 device. ++ * ++ * All values for *flags* are reserved for future usage, and must ++ * be left at zero. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_xdp_adjust_head(struct xdp_buff *xdp_md, int delta) ++ * Description ++ * Adjust (move) *xdp_md*\ **->data** by *delta* bytes. Note that ++ * it is possible to use a negative value for *delta*. This helper ++ * can be used to prepare the packet for pushing or popping ++ * headers. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) ++ * Description ++ * Copy a NUL terminated string from an unsafe address ++ * *unsafe_ptr* to *dst*. The *size* should include the ++ * terminating NUL byte. In case the string length is smaller than ++ * *size*, the target is not padded with further NUL bytes. If the ++ * string length is larger than *size*, just *size*-1 bytes are ++ * copied and the last byte is set to NUL. ++ * ++ * On success, the length of the copied string is returned. This ++ * makes this helper useful in tracing programs for reading ++ * strings, and more importantly to get its length at runtime. See ++ * the following snippet: ++ * ++ * :: ++ * ++ * SEC("kprobe/sys_open") ++ * void bpf_sys_open(struct pt_regs *ctx) ++ * { ++ * char buf[PATHLEN]; // PATHLEN is defined to 256 ++ * int res = bpf_probe_read_str(buf, sizeof(buf), ++ * ctx->di); ++ * ++ * // Consume buf, for example push it to ++ * // userspace via bpf_perf_event_output(); we ++ * // can use res (the string length) as event ++ * // size, after checking its boundaries. ++ * } ++ * ++ * In comparison, using **bpf_probe_read()** helper here instead ++ * to read the string would require to estimate the length at ++ * compile time, and would often result in copying more memory ++ * than necessary. ++ * ++ * Another useful use case is when parsing individual process ++ * arguments or individual environment variables navigating ++ * *current*\ **->mm->arg_start** and *current*\ ++ * **->mm->env_start**: using this helper and the return value, ++ * one can quickly iterate at the right offset of the memory area. ++ * Return ++ * On success, the strictly positive length of the string, ++ * including the trailing NUL character. On error, a negative ++ * value. ++ * ++ * u64 bpf_get_socket_cookie(struct sk_buff *skb) ++ * Description ++ * If the **struct sk_buff** pointed by *skb* has a known socket, ++ * retrieve the cookie (generated by the kernel) of this socket. ++ * If no cookie has been set yet, generate a new cookie. Once ++ * generated, the socket cookie remains stable for the life of the ++ * socket. This helper can be useful for monitoring per socket ++ * networking traffic statistics as it provides a global socket ++ * identifier that can be assumed unique. ++ * Return ++ * A 8-byte long non-decreasing number on success, or 0 if the ++ * socket field is missing inside *skb*. ++ * ++ * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx) ++ * Description ++ * Equivalent to bpf_get_socket_cookie() helper that accepts ++ * *skb*, but gets socket from **struct bpf_sock_addr** context. ++ * Return ++ * A 8-byte long non-decreasing number. ++ * ++ * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx) ++ * Description ++ * Equivalent to bpf_get_socket_cookie() helper that accepts ++ * *skb*, but gets socket from **struct bpf_sock_ops** context. ++ * Return ++ * A 8-byte long non-decreasing number. ++ * ++ * u32 bpf_get_socket_uid(struct sk_buff *skb) ++ * Return ++ * The owner UID of the socket associated to *skb*. If the socket ++ * is **NULL**, or if it is not a full socket (i.e. if it is a ++ * time-wait or a request socket instead), **overflowuid** value ++ * is returned (note that **overflowuid** might also be the actual ++ * UID value for the socket). ++ * ++ * u32 bpf_set_hash(struct sk_buff *skb, u32 hash) ++ * Description ++ * Set the full hash for *skb* (set the field *skb*\ **->hash**) ++ * to value *hash*. ++ * Return ++ * 0 ++ * ++ * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) ++ * Description ++ * Emulate a call to **setsockopt()** on the socket associated to ++ * *bpf_socket*, which must be a full socket. The *level* at ++ * which the option resides and the name *optname* of the option ++ * must be specified, see **setsockopt(2)** for more information. ++ * The option value of length *optlen* is pointed by *optval*. ++ * ++ * This helper actually implements a subset of **setsockopt()**. ++ * It supports the following *level*\ s: ++ * ++ * * **SOL_SOCKET**, which supports the following *optname*\ s: ++ * **SO_RCVBUF**, **SO_SNDBUF**, **SO_MAX_PACING_RATE**, ++ * **SO_PRIORITY**, **SO_RCVLOWAT**, **SO_MARK**. ++ * * **IPPROTO_TCP**, which supports the following *optname*\ s: ++ * **TCP_CONGESTION**, **TCP_BPF_IW**, ++ * **TCP_BPF_SNDCWND_CLAMP**. ++ * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. ++ * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_skb_adjust_room(struct sk_buff *skb, s32 len_diff, u32 mode, u64 flags) ++ * Description ++ * Grow or shrink the room for data in the packet associated to ++ * *skb* by *len_diff*, and according to the selected *mode*. ++ * ++ * There are two supported modes at this time: ++ * ++ * * **BPF_ADJ_ROOM_MAC**: Adjust room at the mac layer ++ * (room space is added or removed below the layer 2 header). ++ * ++ * * **BPF_ADJ_ROOM_NET**: Adjust room at the network layer ++ * (room space is added or removed below the layer 3 header). ++ * ++ * The following flags are supported at this time: ++ * ++ * * **BPF_F_ADJ_ROOM_FIXED_GSO**: Do not adjust gso_size. ++ * Adjusting mss in this way is not allowed for datagrams. ++ * ++ * * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV4**, ++ * **BPF_F_ADJ_ROOM_ENCAP_L3_IPV6**: ++ * Any new space is reserved to hold a tunnel header. ++ * Configure skb offsets and other fields accordingly. ++ * ++ * * **BPF_F_ADJ_ROOM_ENCAP_L4_GRE**, ++ * **BPF_F_ADJ_ROOM_ENCAP_L4_UDP**: ++ * Use with ENCAP_L3 flags to further specify the tunnel type. ++ * ++ * * **BPF_F_ADJ_ROOM_ENCAP_L2**\ (*len*): ++ * Use with ENCAP_L3/L4 flags to further specify the tunnel ++ * type; *len* is the length of the inner MAC header. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_redirect_map(struct bpf_map *map, u32 key, u64 flags) ++ * Description ++ * Redirect the packet to the endpoint referenced by *map* at ++ * index *key*. Depending on its type, this *map* can contain ++ * references to net devices (for forwarding packets through other ++ * ports), or to CPUs (for redirecting XDP frames to another CPU; ++ * but this is only implemented for native XDP (with driver ++ * support) as of this writing). ++ * ++ * The lower two bits of *flags* are used as the return code if ++ * the map lookup fails. This is so that the return value can be ++ * one of the XDP program return codes up to XDP_TX, as chosen by ++ * the caller. Any higher bits in the *flags* argument must be ++ * unset. ++ * ++ * When used to redirect packets to net devices, this helper ++ * provides a high performance increase over **bpf_redirect**\ (). ++ * This is due to various implementation details of the underlying ++ * mechanisms, one of which is the fact that **bpf_redirect_map**\ ++ * () tries to send packet as a "bulk" to the device. ++ * Return ++ * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error. ++ * ++ * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags) ++ * Description ++ * Redirect the packet to the socket referenced by *map* (of type ++ * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and ++ * egress interfaces can be used for redirection. The ++ * **BPF_F_INGRESS** value in *flags* is used to make the ++ * distinction (ingress path is selected if the flag is present, ++ * egress path otherwise). This is the only flag supported for now. ++ * Return ++ * **SK_PASS** on success, or **SK_DROP** on error. ++ * ++ * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) ++ * Description ++ * Add an entry to, or update a *map* referencing sockets. The ++ * *skops* is used as a new value for the entry associated to ++ * *key*. *flags* is one of: ++ * ++ * **BPF_NOEXIST** ++ * The entry for *key* must not exist in the map. ++ * **BPF_EXIST** ++ * The entry for *key* must already exist in the map. ++ * **BPF_ANY** ++ * No condition on the existence of the entry for *key*. ++ * ++ * If the *map* has eBPF programs (parser and verdict), those will ++ * be inherited by the socket being added. If the socket is ++ * already attached to eBPF programs, this results in an error. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_xdp_adjust_meta(struct xdp_buff *xdp_md, int delta) ++ * Description ++ * Adjust the address pointed by *xdp_md*\ **->data_meta** by ++ * *delta* (which can be positive or negative). Note that this ++ * operation modifies the address stored in *xdp_md*\ **->data**, ++ * so the latter must be loaded only after the helper has been ++ * called. ++ * ++ * The use of *xdp_md*\ **->data_meta** is optional and programs ++ * are not required to use it. The rationale is that when the ++ * packet is processed with XDP (e.g. as DoS filter), it is ++ * possible to push further meta data along with it before passing ++ * to the stack, and to give the guarantee that an ingress eBPF ++ * program attached as a TC classifier on the same device can pick ++ * this up for further post-processing. Since TC works with socket ++ * buffers, it remains possible to set from XDP the **mark** or ++ * **priority** pointers, or other pointers for the socket buffer. ++ * Having this scratch space generic and programmable allows for ++ * more flexibility as the user is free to store whatever meta ++ * data they need. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_perf_event_read_value(struct bpf_map *map, u64 flags, struct bpf_perf_event_value *buf, u32 buf_size) ++ * Description ++ * Read the value of a perf event counter, and store it into *buf* ++ * of size *buf_size*. This helper relies on a *map* of type ++ * **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. The nature of the perf event ++ * counter is selected when *map* is updated with perf event file ++ * descriptors. The *map* is an array whose size is the number of ++ * available CPUs, and each cell contains a value relative to one ++ * CPU. The value to retrieve is indicated by *flags*, that ++ * contains the index of the CPU to look up, masked with ++ * **BPF_F_INDEX_MASK**. Alternatively, *flags* can be set to ++ * **BPF_F_CURRENT_CPU** to indicate that the value for the ++ * current CPU should be retrieved. ++ * ++ * This helper behaves in a way close to ++ * **bpf_perf_event_read**\ () helper, save that instead of ++ * just returning the value observed, it fills the *buf* ++ * structure. This allows for additional data to be retrieved: in ++ * particular, the enabled and running times (in *buf*\ ++ * **->enabled** and *buf*\ **->running**, respectively) are ++ * copied. In general, **bpf_perf_event_read_value**\ () is ++ * recommended over **bpf_perf_event_read**\ (), which has some ++ * ABI issues and provides fewer functionalities. ++ * ++ * These values are interesting, because hardware PMU (Performance ++ * Monitoring Unit) counters are limited resources. When there are ++ * more PMU based perf events opened than available counters, ++ * kernel will multiplex these events so each event gets certain ++ * percentage (but not all) of the PMU time. In case that ++ * multiplexing happens, the number of samples or counter value ++ * will not reflect the case compared to when no multiplexing ++ * occurs. This makes comparison between different runs difficult. ++ * Typically, the counter value should be normalized before ++ * comparing to other experiments. The usual normalization is done ++ * as follows. ++ * ++ * :: ++ * ++ * normalized_counter = counter * t_enabled / t_running ++ * ++ * Where t_enabled is the time enabled for event and t_running is ++ * the time running for event since last normalization. The ++ * enabled and running times are accumulated since the perf event ++ * open. To achieve scaling factor between two invocations of an ++ * eBPF program, users can can use CPU id as the key (which is ++ * typical for perf array usage model) to remember the previous ++ * value and do the calculation inside the eBPF program. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) ++ * Description ++ * For en eBPF program attached to a perf event, retrieve the ++ * value of the event counter associated to *ctx* and store it in ++ * the structure pointed by *buf* and of size *buf_size*. Enabled ++ * and running times are also stored in the structure (see ++ * description of helper **bpf_perf_event_read_value**\ () for ++ * more details). ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) ++ * Description ++ * Emulate a call to **getsockopt()** on the socket associated to ++ * *bpf_socket*, which must be a full socket. The *level* at ++ * which the option resides and the name *optname* of the option ++ * must be specified, see **getsockopt(2)** for more information. ++ * The retrieved value is stored in the structure pointed by ++ * *opval* and of length *optlen*. ++ * ++ * This helper actually implements a subset of **getsockopt()**. ++ * It supports the following *level*\ s: ++ * ++ * * **IPPROTO_TCP**, which supports *optname* ++ * **TCP_CONGESTION**. ++ * * **IPPROTO_IP**, which supports *optname* **IP_TOS**. ++ * * **IPPROTO_IPV6**, which supports *optname* **IPV6_TCLASS**. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_override_return(struct pt_regs *regs, u64 rc) ++ * Description ++ * Used for error injection, this helper uses kprobes to override ++ * the return value of the probed function, and to set it to *rc*. ++ * The first argument is the context *regs* on which the kprobe ++ * works. ++ * ++ * This helper works by setting setting the PC (program counter) ++ * to an override function which is run in place of the original ++ * probed function. This means the probed function is not run at ++ * all. The replacement function just returns with the required ++ * value. ++ * ++ * This helper has security implications, and thus is subject to ++ * restrictions. It is only available if the kernel was compiled ++ * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration ++ * option, and in this case it only works on functions tagged with ++ * **ALLOW_ERROR_INJECTION** in the kernel code. ++ * ++ * Also, the helper is only available for the architectures having ++ * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, ++ * x86 architecture is the only one to support this feature. ++ * Return ++ * 0 ++ * ++ * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) ++ * Description ++ * Attempt to set the value of the **bpf_sock_ops_cb_flags** field ++ * for the full TCP socket associated to *bpf_sock_ops* to ++ * *argval*. ++ * ++ * The primary use of this field is to determine if there should ++ * be calls to eBPF programs of type ++ * **BPF_PROG_TYPE_SOCK_OPS** at various points in the TCP ++ * code. A program of the same type can change its value, per ++ * connection and as necessary, when the connection is ++ * established. This field is directly accessible for reading, but ++ * this helper must be used for updates in order to return an ++ * error if an eBPF program tries to set a callback that is not ++ * supported in the current kernel. ++ * ++ * *argval* is a flag array which can combine these flags: ++ * ++ * * **BPF_SOCK_OPS_RTO_CB_FLAG** (retransmission time out) ++ * * **BPF_SOCK_OPS_RETRANS_CB_FLAG** (retransmission) ++ * * **BPF_SOCK_OPS_STATE_CB_FLAG** (TCP state change) ++ * * **BPF_SOCK_OPS_RTT_CB_FLAG** (every RTT) ++ * ++ * Therefore, this function can be used to clear a callback flag by ++ * setting the appropriate bit to zero. e.g. to disable the RTO ++ * callback: ++ * ++ * **bpf_sock_ops_cb_flags_set(bpf_sock,** ++ * **bpf_sock->bpf_sock_ops_cb_flags & ~BPF_SOCK_OPS_RTO_CB_FLAG)** ++ * ++ * Here are some examples of where one could call such eBPF ++ * program: ++ * ++ * * When RTO fires. ++ * * When a packet is retransmitted. ++ * * When the connection terminates. ++ * * When a packet is sent. ++ * * When a packet is received. ++ * Return ++ * Code **-EINVAL** if the socket is not a full TCP socket; ++ * otherwise, a positive number containing the bits that could not ++ * be set is returned (which comes down to 0 if all bits were set ++ * as required). ++ * ++ * int bpf_msg_redirect_map(struct sk_msg_buff *msg, struct bpf_map *map, u32 key, u64 flags) ++ * Description ++ * This helper is used in programs implementing policies at the ++ * socket level. If the message *msg* is allowed to pass (i.e. if ++ * the verdict eBPF program returns **SK_PASS**), redirect it to ++ * the socket referenced by *map* (of type ++ * **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and ++ * egress interfaces can be used for redirection. The ++ * **BPF_F_INGRESS** value in *flags* is used to make the ++ * distinction (ingress path is selected if the flag is present, ++ * egress path otherwise). This is the only flag supported for now. ++ * Return ++ * **SK_PASS** on success, or **SK_DROP** on error. ++ * ++ * int bpf_msg_apply_bytes(struct sk_msg_buff *msg, u32 bytes) ++ * Description ++ * For socket policies, apply the verdict of the eBPF program to ++ * the next *bytes* (number of bytes) of message *msg*. ++ * ++ * For example, this helper can be used in the following cases: ++ * ++ * * A single **sendmsg**\ () or **sendfile**\ () system call ++ * contains multiple logical messages that the eBPF program is ++ * supposed to read and for which it should apply a verdict. ++ * * An eBPF program only cares to read the first *bytes* of a ++ * *msg*. If the message has a large payload, then setting up ++ * and calling the eBPF program repeatedly for all bytes, even ++ * though the verdict is already known, would create unnecessary ++ * overhead. ++ * ++ * When called from within an eBPF program, the helper sets a ++ * counter internal to the BPF infrastructure, that is used to ++ * apply the last verdict to the next *bytes*. If *bytes* is ++ * smaller than the current data being processed from a ++ * **sendmsg**\ () or **sendfile**\ () system call, the first ++ * *bytes* will be sent and the eBPF program will be re-run with ++ * the pointer for start of data pointing to byte number *bytes* ++ * **+ 1**. If *bytes* is larger than the current data being ++ * processed, then the eBPF verdict will be applied to multiple ++ * **sendmsg**\ () or **sendfile**\ () calls until *bytes* are ++ * consumed. ++ * ++ * Note that if a socket closes with the internal counter holding ++ * a non-zero value, this is not a problem because data is not ++ * being buffered for *bytes* and is sent as it is received. ++ * Return ++ * 0 ++ * ++ * int bpf_msg_cork_bytes(struct sk_msg_buff *msg, u32 bytes) ++ * Description ++ * For socket policies, prevent the execution of the verdict eBPF ++ * program for message *msg* until *bytes* (byte number) have been ++ * accumulated. ++ * ++ * This can be used when one needs a specific number of bytes ++ * before a verdict can be assigned, even if the data spans ++ * multiple **sendmsg**\ () or **sendfile**\ () calls. The extreme ++ * case would be a user calling **sendmsg**\ () repeatedly with ++ * 1-byte long message segments. Obviously, this is bad for ++ * performance, but it is still valid. If the eBPF program needs ++ * *bytes* bytes to validate a header, this helper can be used to ++ * prevent the eBPF program to be called again until *bytes* have ++ * been accumulated. ++ * Return ++ * 0 ++ * ++ * int bpf_msg_pull_data(struct sk_msg_buff *msg, u32 start, u32 end, u64 flags) ++ * Description ++ * For socket policies, pull in non-linear data from user space ++ * for *msg* and set pointers *msg*\ **->data** and *msg*\ ++ * **->data_end** to *start* and *end* bytes offsets into *msg*, ++ * respectively. ++ * ++ * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a ++ * *msg* it can only parse data that the (**data**, **data_end**) ++ * pointers have already consumed. For **sendmsg**\ () hooks this ++ * is likely the first scatterlist element. But for calls relying ++ * on the **sendpage** handler (e.g. **sendfile**\ ()) this will ++ * be the range (**0**, **0**) because the data is shared with ++ * user space and by default the objective is to avoid allowing ++ * user space to modify data while (or after) eBPF verdict is ++ * being decided. This helper can be used to pull in data and to ++ * set the start and end pointer to given values. Data will be ++ * copied if necessary (i.e. if data was not linear and if start ++ * and end pointers do not point to the same chunk). ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * ++ * All values for *flags* are reserved for future usage, and must ++ * be left at zero. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) ++ * Description ++ * Bind the socket associated to *ctx* to the address pointed by ++ * *addr*, of length *addr_len*. This allows for making outgoing ++ * connection from the desired IP address, which can be useful for ++ * example when all processes inside a cgroup should use one ++ * single IP address on a host that has multiple IP configured. ++ * ++ * This helper works for IPv4 and IPv6, TCP and UDP sockets. The ++ * domain (*addr*\ **->sa_family**) must be **AF_INET** (or ++ * **AF_INET6**). Looking for a free port to bind to can be ++ * expensive, therefore binding to port is not permitted by the ++ * helper: *addr*\ **->sin_port** (or **sin6_port**, respectively) ++ * must be set to zero. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_xdp_adjust_tail(struct xdp_buff *xdp_md, int delta) ++ * Description ++ * Adjust (move) *xdp_md*\ **->data_end** by *delta* bytes. It is ++ * only possible to shrink the packet as of this writing, ++ * therefore *delta* must be a negative integer. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_skb_get_xfrm_state(struct sk_buff *skb, u32 index, struct bpf_xfrm_state *xfrm_state, u32 size, u64 flags) ++ * Description ++ * Retrieve the XFRM state (IP transform framework, see also ++ * **ip-xfrm(8)**) at *index* in XFRM "security path" for *skb*. ++ * ++ * The retrieved value is stored in the **struct bpf_xfrm_state** ++ * pointed by *xfrm_state* and of length *size*. ++ * ++ * All values for *flags* are reserved for future usage, and must ++ * be left at zero. ++ * ++ * This helper is available only if the kernel was compiled with ++ * **CONFIG_XFRM** configuration option. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags) ++ * Description ++ * Return a user or a kernel stack in bpf program provided buffer. ++ * To achieve this, the helper needs *ctx*, which is a pointer ++ * to the context on which the tracing program is executed. ++ * To store the stacktrace, the bpf program provides *buf* with ++ * a nonnegative *size*. ++ * ++ * The last argument, *flags*, holds the number of stack frames to ++ * skip (from 0 to 255), masked with ++ * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set ++ * the following flags: ++ * ++ * **BPF_F_USER_STACK** ++ * Collect a user space stack instead of a kernel stack. ++ * **BPF_F_USER_BUILD_ID** ++ * Collect buildid+offset instead of ips for user stack, ++ * only valid if **BPF_F_USER_STACK** is also specified. ++ * ++ * **bpf_get_stack**\ () can collect up to ++ * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject ++ * to sufficient large buffer size. Note that ++ * this limit can be controlled with the **sysctl** program, and ++ * that it should be manually increased in order to profile long ++ * user stacks (such as stacks for Java programs). To do so, use: ++ * ++ * :: ++ * ++ * # sysctl kernel.perf_event_max_stack= ++ * Return ++ * A non-negative value equal to or less than *size* on success, ++ * or a negative error in case of failure. ++ * ++ * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) ++ * Description ++ * This helper is similar to **bpf_skb_load_bytes**\ () in that ++ * it provides an easy way to load *len* bytes from *offset* ++ * from the packet associated to *skb*, into the buffer pointed ++ * by *to*. The difference to **bpf_skb_load_bytes**\ () is that ++ * a fifth argument *start_header* exists in order to select a ++ * base offset to start from. *start_header* can be one of: ++ * ++ * **BPF_HDR_START_MAC** ++ * Base offset to load data from is *skb*'s mac header. ++ * **BPF_HDR_START_NET** ++ * Base offset to load data from is *skb*'s network header. ++ * ++ * In general, "direct packet access" is the preferred method to ++ * access packet data, however, this helper is in particular useful ++ * in socket filters where *skb*\ **->data** does not always point ++ * to the start of the mac header and where "direct packet access" ++ * is not available. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_fib_lookup(void *ctx, struct bpf_fib_lookup *params, int plen, u32 flags) ++ * Description ++ * Do FIB lookup in kernel tables using parameters in *params*. ++ * If lookup is successful and result shows packet is to be ++ * forwarded, the neighbor tables are searched for the nexthop. ++ * If successful (ie., FIB lookup shows forwarding and nexthop ++ * is resolved), the nexthop address is returned in ipv4_dst ++ * or ipv6_dst based on family, smac is set to mac address of ++ * egress device, dmac is set to nexthop mac address, rt_metric ++ * is set to metric from route (IPv4/IPv6 only), and ifindex ++ * is set to the device index of the nexthop from the FIB lookup. ++ * ++ * *plen* argument is the size of the passed in struct. ++ * *flags* argument can be a combination of one or more of the ++ * following values: ++ * ++ * **BPF_FIB_LOOKUP_DIRECT** ++ * Do a direct table lookup vs full lookup using FIB ++ * rules. ++ * **BPF_FIB_LOOKUP_OUTPUT** ++ * Perform lookup from an egress perspective (default is ++ * ingress). ++ * ++ * *ctx* is either **struct xdp_md** for XDP programs or ++ * **struct sk_buff** tc cls_act programs. ++ * Return ++ * * < 0 if any input argument is invalid ++ * * 0 on success (packet is forwarded, nexthop neighbor exists) ++ * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the ++ * packet is not forwarded or needs assist from full stack ++ * ++ * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) ++ * Description ++ * Add an entry to, or update a sockhash *map* referencing sockets. ++ * The *skops* is used as a new value for the entry associated to ++ * *key*. *flags* is one of: ++ * ++ * **BPF_NOEXIST** ++ * The entry for *key* must not exist in the map. ++ * **BPF_EXIST** ++ * The entry for *key* must already exist in the map. ++ * **BPF_ANY** ++ * No condition on the existence of the entry for *key*. ++ * ++ * If the *map* has eBPF programs (parser and verdict), those will ++ * be inherited by the socket being added. If the socket is ++ * already attached to eBPF programs, this results in an error. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_msg_redirect_hash(struct sk_msg_buff *msg, struct bpf_map *map, void *key, u64 flags) ++ * Description ++ * This helper is used in programs implementing policies at the ++ * socket level. If the message *msg* is allowed to pass (i.e. if ++ * the verdict eBPF program returns **SK_PASS**), redirect it to ++ * the socket referenced by *map* (of type ++ * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and ++ * egress interfaces can be used for redirection. The ++ * **BPF_F_INGRESS** value in *flags* is used to make the ++ * distinction (ingress path is selected if the flag is present, ++ * egress path otherwise). This is the only flag supported for now. ++ * Return ++ * **SK_PASS** on success, or **SK_DROP** on error. ++ * ++ * int bpf_sk_redirect_hash(struct sk_buff *skb, struct bpf_map *map, void *key, u64 flags) ++ * Description ++ * This helper is used in programs implementing policies at the ++ * skb socket level. If the sk_buff *skb* is allowed to pass (i.e. ++ * if the verdeict eBPF program returns **SK_PASS**), redirect it ++ * to the socket referenced by *map* (of type ++ * **BPF_MAP_TYPE_SOCKHASH**) using hash *key*. Both ingress and ++ * egress interfaces can be used for redirection. The ++ * **BPF_F_INGRESS** value in *flags* is used to make the ++ * distinction (ingress path is selected if the flag is present, ++ * egress otherwise). This is the only flag supported for now. ++ * Return ++ * **SK_PASS** on success, or **SK_DROP** on error. ++ * ++ * int bpf_lwt_push_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) ++ * Description ++ * Encapsulate the packet associated to *skb* within a Layer 3 ++ * protocol header. This header is provided in the buffer at ++ * address *hdr*, with *len* its size in bytes. *type* indicates ++ * the protocol of the header and can be one of: ++ * ++ * **BPF_LWT_ENCAP_SEG6** ++ * IPv6 encapsulation with Segment Routing Header ++ * (**struct ipv6_sr_hdr**). *hdr* only contains the SRH, ++ * the IPv6 header is computed by the kernel. ++ * **BPF_LWT_ENCAP_SEG6_INLINE** ++ * Only works if *skb* contains an IPv6 packet. Insert a ++ * Segment Routing Header (**struct ipv6_sr_hdr**) inside ++ * the IPv6 header. ++ * **BPF_LWT_ENCAP_IP** ++ * IP encapsulation (GRE/GUE/IPIP/etc). The outer header ++ * must be IPv4 or IPv6, followed by zero or more ++ * additional headers, up to **LWT_BPF_MAX_HEADROOM** ++ * total bytes in all prepended headers. Please note that ++ * if **skb_is_gso**\ (*skb*) is true, no more than two ++ * headers can be prepended, and the inner header, if ++ * present, should be either GRE or UDP/GUE. ++ * ++ * **BPF_LWT_ENCAP_SEG6**\ \* types can be called by BPF programs ++ * of type **BPF_PROG_TYPE_LWT_IN**; **BPF_LWT_ENCAP_IP** type can ++ * be called by bpf programs of types **BPF_PROG_TYPE_LWT_IN** and ++ * **BPF_PROG_TYPE_LWT_XMIT**. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_lwt_seg6_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len) ++ * Description ++ * Store *len* bytes from address *from* into the packet ++ * associated to *skb*, at *offset*. Only the flags, tag and TLVs ++ * inside the outermost IPv6 Segment Routing Header can be ++ * modified through this helper. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_lwt_seg6_adjust_srh(struct sk_buff *skb, u32 offset, s32 delta) ++ * Description ++ * Adjust the size allocated to TLVs in the outermost IPv6 ++ * Segment Routing Header contained in the packet associated to ++ * *skb*, at position *offset* by *delta* bytes. Only offsets ++ * after the segments are accepted. *delta* can be as well ++ * positive (growing) as negative (shrinking). ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_lwt_seg6_action(struct sk_buff *skb, u32 action, void *param, u32 param_len) ++ * Description ++ * Apply an IPv6 Segment Routing action of type *action* to the ++ * packet associated to *skb*. Each action takes a parameter ++ * contained at address *param*, and of length *param_len* bytes. ++ * *action* can be one of: ++ * ++ * **SEG6_LOCAL_ACTION_END_X** ++ * End.X action: Endpoint with Layer-3 cross-connect. ++ * Type of *param*: **struct in6_addr**. ++ * **SEG6_LOCAL_ACTION_END_T** ++ * End.T action: Endpoint with specific IPv6 table lookup. ++ * Type of *param*: **int**. ++ * **SEG6_LOCAL_ACTION_END_B6** ++ * End.B6 action: Endpoint bound to an SRv6 policy. ++ * Type of *param*: **struct ipv6_sr_hdr**. ++ * **SEG6_LOCAL_ACTION_END_B6_ENCAP** ++ * End.B6.Encap action: Endpoint bound to an SRv6 ++ * encapsulation policy. ++ * Type of *param*: **struct ipv6_sr_hdr**. ++ * ++ * A call to this helper is susceptible to change the underlying ++ * packet buffer. Therefore, at load time, all checks on pointers ++ * previously done by the verifier are invalidated and must be ++ * performed again, if the helper is used in combination with ++ * direct packet access. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_rc_repeat(void *ctx) ++ * Description ++ * This helper is used in programs implementing IR decoding, to ++ * report a successfully decoded repeat key message. This delays ++ * the generation of a key up event for previously generated ++ * key down event. ++ * ++ * Some IR protocols like NEC have a special IR message for ++ * repeating last button, for when a button is held down. ++ * ++ * The *ctx* should point to the lirc sample as passed into ++ * the program. ++ * ++ * This helper is only available is the kernel was compiled with ++ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to ++ * "**y**". ++ * Return ++ * 0 ++ * ++ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle) ++ * Description ++ * This helper is used in programs implementing IR decoding, to ++ * report a successfully decoded key press with *scancode*, ++ * *toggle* value in the given *protocol*. The scancode will be ++ * translated to a keycode using the rc keymap, and reported as ++ * an input key down event. After a period a key up event is ++ * generated. This period can be extended by calling either ++ * **bpf_rc_keydown**\ () again with the same values, or calling ++ * **bpf_rc_repeat**\ (). ++ * ++ * Some protocols include a toggle bit, in case the button was ++ * released and pressed again between consecutive scancodes. ++ * ++ * The *ctx* should point to the lirc sample as passed into ++ * the program. ++ * ++ * The *protocol* is the decoded protocol number (see ++ * **enum rc_proto** for some predefined values). ++ * ++ * This helper is only available is the kernel was compiled with ++ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to ++ * "**y**". ++ * Return ++ * 0 ++ * ++ * u64 bpf_skb_cgroup_id(struct sk_buff *skb) ++ * Description ++ * Return the cgroup v2 id of the socket associated with the *skb*. ++ * This is roughly similar to the **bpf_get_cgroup_classid**\ () ++ * helper for cgroup v1 by providing a tag resp. identifier that ++ * can be matched on or used for map lookups e.g. to implement ++ * policy. The cgroup v2 id of a given path in the hierarchy is ++ * exposed in user space through the f_handle API in order to get ++ * to the same 64-bit id. ++ * ++ * This helper can be used on TC egress path, but not on ingress, ++ * and is available only if the kernel was compiled with the ++ * **CONFIG_SOCK_CGROUP_DATA** configuration option. ++ * Return ++ * The id is returned or 0 in case the id could not be retrieved. ++ * ++ * u64 bpf_get_current_cgroup_id(void) ++ * Return ++ * A 64-bit integer containing the current cgroup id based ++ * on the cgroup within which the current task is running. ++ * ++ * void *bpf_get_local_storage(void *map, u64 flags) ++ * Description ++ * Get the pointer to the local storage area. ++ * The type and the size of the local storage is defined ++ * by the *map* argument. ++ * The *flags* meaning is specific for each map type, ++ * and has to be 0 for cgroup local storage. ++ * ++ * Depending on the BPF program type, a local storage area ++ * can be shared between multiple instances of the BPF program, ++ * running simultaneously. ++ * ++ * A user should care about the synchronization by himself. ++ * For example, by using the **BPF_STX_XADD** instruction to alter ++ * the shared data. ++ * Return ++ * A pointer to the local storage area. ++ * ++ * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags) ++ * Description ++ * Select a **SO_REUSEPORT** socket from a ++ * **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*. ++ * It checks the selected socket is matching the incoming ++ * request in the socket buffer. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level) ++ * Description ++ * Return id of cgroup v2 that is ancestor of cgroup associated ++ * with the *skb* at the *ancestor_level*. The root cgroup is at ++ * *ancestor_level* zero and each step down the hierarchy ++ * increments the level. If *ancestor_level* == level of cgroup ++ * associated with *skb*, then return value will be same as that ++ * of **bpf_skb_cgroup_id**\ (). ++ * ++ * The helper is useful to implement policies based on cgroups ++ * that are upper in hierarchy than immediate cgroup associated ++ * with *skb*. ++ * ++ * The format of returned id and helper limitations are same as in ++ * **bpf_skb_cgroup_id**\ (). ++ * Return ++ * The id is returned or 0 in case the id could not be retrieved. ++ * ++ * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) ++ * Description ++ * Look for TCP socket matching *tuple*, optionally in a child ++ * network namespace *netns*. The return value must be checked, ++ * and if non-**NULL**, released via **bpf_sk_release**\ (). ++ * ++ * The *ctx* should point to the context of the program, such as ++ * the skb or socket (depending on the hook in use). This is used ++ * to determine the base network namespace for the lookup. ++ * ++ * *tuple_size* must be one of: ++ * ++ * **sizeof**\ (*tuple*\ **->ipv4**) ++ * Look for an IPv4 socket. ++ * **sizeof**\ (*tuple*\ **->ipv6**) ++ * Look for an IPv6 socket. ++ * ++ * If the *netns* is a negative signed 32-bit integer, then the ++ * socket lookup table in the netns associated with the *ctx* will ++ * will be used. For the TC hooks, this is the netns of the device ++ * in the skb. For socket hooks, this is the netns of the socket. ++ * If *netns* is any other signed 32-bit value greater than or ++ * equal to zero then it specifies the ID of the netns relative to ++ * the netns associated with the *ctx*. *netns* values beyond the ++ * range of 32-bit integers are reserved for future use. ++ * ++ * All values for *flags* are reserved for future usage, and must ++ * be left at zero. ++ * ++ * This helper is available only if the kernel was compiled with ++ * **CONFIG_NET** configuration option. ++ * Return ++ * Pointer to **struct bpf_sock**, or **NULL** in case of failure. ++ * For sockets with reuseport option, the **struct bpf_sock** ++ * result is from *reuse*\ **->socks**\ [] using the hash of the ++ * tuple. ++ * ++ * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) ++ * Description ++ * Look for UDP socket matching *tuple*, optionally in a child ++ * network namespace *netns*. The return value must be checked, ++ * and if non-**NULL**, released via **bpf_sk_release**\ (). ++ * ++ * The *ctx* should point to the context of the program, such as ++ * the skb or socket (depending on the hook in use). This is used ++ * to determine the base network namespace for the lookup. ++ * ++ * *tuple_size* must be one of: ++ * ++ * **sizeof**\ (*tuple*\ **->ipv4**) ++ * Look for an IPv4 socket. ++ * **sizeof**\ (*tuple*\ **->ipv6**) ++ * Look for an IPv6 socket. ++ * ++ * If the *netns* is a negative signed 32-bit integer, then the ++ * socket lookup table in the netns associated with the *ctx* will ++ * will be used. For the TC hooks, this is the netns of the device ++ * in the skb. For socket hooks, this is the netns of the socket. ++ * If *netns* is any other signed 32-bit value greater than or ++ * equal to zero then it specifies the ID of the netns relative to ++ * the netns associated with the *ctx*. *netns* values beyond the ++ * range of 32-bit integers are reserved for future use. ++ * ++ * All values for *flags* are reserved for future usage, and must ++ * be left at zero. ++ * ++ * This helper is available only if the kernel was compiled with ++ * **CONFIG_NET** configuration option. ++ * Return ++ * Pointer to **struct bpf_sock**, or **NULL** in case of failure. ++ * For sockets with reuseport option, the **struct bpf_sock** ++ * result is from *reuse*\ **->socks**\ [] using the hash of the ++ * tuple. ++ * ++ * int bpf_sk_release(struct bpf_sock *sock) ++ * Description ++ * Release the reference held by *sock*. *sock* must be a ++ * non-**NULL** pointer that was returned from ++ * **bpf_sk_lookup_xxx**\ (). ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags) ++ * Description ++ * Push an element *value* in *map*. *flags* is one of: ++ * ++ * **BPF_EXIST** ++ * If the queue/stack is full, the oldest element is ++ * removed to make room for this. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_map_pop_elem(struct bpf_map *map, void *value) ++ * Description ++ * Pop an element from *map*. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_map_peek_elem(struct bpf_map *map, void *value) ++ * Description ++ * Get an element from *map* without removing it. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags) ++ * Description ++ * For socket policies, insert *len* bytes into *msg* at offset ++ * *start*. ++ * ++ * If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a ++ * *msg* it may want to insert metadata or options into the *msg*. ++ * This can later be read and used by any of the lower layer BPF ++ * hooks. ++ * ++ * This helper may fail if under memory pressure (a malloc ++ * fails) in these cases BPF programs will get an appropriate ++ * error and BPF programs will need to handle them. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags) ++ * Description ++ * Will remove *pop* bytes from a *msg* starting at byte *start*. ++ * This may result in **ENOMEM** errors under certain situations if ++ * an allocation and copy are required due to a full ring buffer. ++ * However, the helper will try to avoid doing the allocation ++ * if possible. Other errors can occur if input parameters are ++ * invalid either due to *start* byte not being valid part of *msg* ++ * payload and/or *pop* value being to large. ++ * Return ++ * 0 on success, or a negative error in case of failure. ++ * ++ * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y) ++ * Description ++ * This helper is used in programs implementing IR decoding, to ++ * report a successfully decoded pointer movement. ++ * ++ * The *ctx* should point to the lirc sample as passed into ++ * the program. ++ * ++ * This helper is only available is the kernel was compiled with ++ * the **CONFIG_BPF_LIRC_MODE2** configuration option set to ++ * "**y**". ++ * Return ++ * 0 ++ * ++ * int bpf_spin_lock(struct bpf_spin_lock *lock) ++ * Description ++ * Acquire a spinlock represented by the pointer *lock*, which is ++ * stored as part of a value of a map. Taking the lock allows to ++ * safely update the rest of the fields in that value. The ++ * spinlock can (and must) later be released with a call to ++ * **bpf_spin_unlock**\ (\ *lock*\ ). ++ * ++ * Spinlocks in BPF programs come with a number of restrictions ++ * and constraints: ++ * ++ * * **bpf_spin_lock** objects are only allowed inside maps of ++ * types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this ++ * list could be extended in the future). ++ * * BTF description of the map is mandatory. ++ * * The BPF program can take ONE lock at a time, since taking two ++ * or more could cause dead locks. ++ * * Only one **struct bpf_spin_lock** is allowed per map element. ++ * * When the lock is taken, calls (either BPF to BPF or helpers) ++ * are not allowed. ++ * * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not ++ * allowed inside a spinlock-ed region. ++ * * The BPF program MUST call **bpf_spin_unlock**\ () to release ++ * the lock, on all execution paths, before it returns. ++ * * The BPF program can access **struct bpf_spin_lock** only via ++ * the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ () ++ * helpers. Loading or storing data into the **struct ++ * bpf_spin_lock** *lock*\ **;** field of a map is not allowed. ++ * * To use the **bpf_spin_lock**\ () helper, the BTF description ++ * of the map value must be a struct and have **struct ++ * bpf_spin_lock** *anyname*\ **;** field at the top level. ++ * Nested lock inside another struct is not allowed. ++ * * The **struct bpf_spin_lock** *lock* field in a map value must ++ * be aligned on a multiple of 4 bytes in that value. ++ * * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy ++ * the **bpf_spin_lock** field to user space. ++ * * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from ++ * a BPF program, do not update the **bpf_spin_lock** field. ++ * * **bpf_spin_lock** cannot be on the stack or inside a ++ * networking packet (it can only be inside of a map values). ++ * * **bpf_spin_lock** is available to root only. ++ * * Tracing programs and socket filter programs cannot use ++ * **bpf_spin_lock**\ () due to insufficient preemption checks ++ * (but this may change in the future). ++ * * **bpf_spin_lock** is not allowed in inner maps of map-in-map. ++ * Return ++ * 0 ++ * ++ * int bpf_spin_unlock(struct bpf_spin_lock *lock) ++ * Description ++ * Release the *lock* previously locked by a call to ++ * **bpf_spin_lock**\ (\ *lock*\ ). ++ * Return ++ * 0 ++ * ++ * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk) ++ * Description ++ * This helper gets a **struct bpf_sock** pointer such ++ * that all the fields in this **bpf_sock** can be accessed. ++ * Return ++ * A **struct bpf_sock** pointer on success, or **NULL** in ++ * case of failure. ++ * ++ * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk) ++ * Description ++ * This helper gets a **struct bpf_tcp_sock** pointer from a ++ * **struct bpf_sock** pointer. ++ * Return ++ * A **struct bpf_tcp_sock** pointer on success, or **NULL** in ++ * case of failure. ++ * ++ * int bpf_skb_ecn_set_ce(struct sk_buf *skb) ++ * Description ++ * Set ECN (Explicit Congestion Notification) field of IP header ++ * to **CE** (Congestion Encountered) if current value is **ECT** ++ * (ECN Capable Transport). Otherwise, do nothing. Works with IPv6 ++ * and IPv4. ++ * Return ++ * 1 if the **CE** flag is set (either by the current helper call ++ * or because it was already present), 0 if it is not set. ++ * ++ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk) ++ * Description ++ * Return a **struct bpf_sock** pointer in **TCP_LISTEN** state. ++ * **bpf_sk_release**\ () is unnecessary and not allowed. ++ * Return ++ * A **struct bpf_sock** pointer on success, or **NULL** in ++ * case of failure. ++ * ++ * struct bpf_sock *bpf_skc_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags) ++ * Description ++ * Look for TCP socket matching *tuple*, optionally in a child ++ * network namespace *netns*. The return value must be checked, ++ * and if non-**NULL**, released via **bpf_sk_release**\ (). ++ * ++ * This function is identical to **bpf_sk_lookup_tcp**\ (), except ++ * that it also returns timewait or request sockets. Use ++ * **bpf_sk_fullsock**\ () or **bpf_tcp_sock**\ () to access the ++ * full structure. ++ * ++ * This helper is available only if the kernel was compiled with ++ * **CONFIG_NET** configuration option. ++ * Return ++ * Pointer to **struct bpf_sock**, or **NULL** in case of failure. ++ * For sockets with reuseport option, the **struct bpf_sock** ++ * result is from *reuse*\ **->socks**\ [] using the hash of the ++ * tuple. ++ * ++ * int bpf_tcp_check_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) ++ * Description ++ * Check whether *iph* and *th* contain a valid SYN cookie ACK for ++ * the listening socket in *sk*. ++ * ++ * *iph* points to the start of the IPv4 or IPv6 header, while ++ * *iph_len* contains **sizeof**\ (**struct iphdr**) or ++ * **sizeof**\ (**struct ip6hdr**). ++ * ++ * *th* points to the start of the TCP header, while *th_len* ++ * contains **sizeof**\ (**struct tcphdr**). ++ * ++ * Return ++ * 0 if *iph* and *th* are a valid SYN cookie ACK, or a negative ++ * error otherwise. ++ * ++ * int bpf_sysctl_get_name(struct bpf_sysctl *ctx, char *buf, size_t buf_len, u64 flags) ++ * Description ++ * Get name of sysctl in /proc/sys/ and copy it into provided by ++ * program buffer *buf* of size *buf_len*. ++ * ++ * The buffer is always NUL terminated, unless it's zero-sized. ++ * ++ * If *flags* is zero, full name (e.g. "net/ipv4/tcp_mem") is ++ * copied. Use **BPF_F_SYSCTL_BASE_NAME** flag to copy base name ++ * only (e.g. "tcp_mem"). ++ * Return ++ * Number of character copied (not including the trailing NUL). ++ * ++ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain ++ * truncated name in this case). ++ * ++ * int bpf_sysctl_get_current_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) ++ * Description ++ * Get current value of sysctl as it is presented in /proc/sys ++ * (incl. newline, etc), and copy it as a string into provided ++ * by program buffer *buf* of size *buf_len*. ++ * ++ * The whole value is copied, no matter what file position user ++ * space issued e.g. sys_read at. ++ * ++ * The buffer is always NUL terminated, unless it's zero-sized. ++ * Return ++ * Number of character copied (not including the trailing NUL). ++ * ++ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain ++ * truncated name in this case). ++ * ++ * **-EINVAL** if current value was unavailable, e.g. because ++ * sysctl is uninitialized and read returns -EIO for it. ++ * ++ * int bpf_sysctl_get_new_value(struct bpf_sysctl *ctx, char *buf, size_t buf_len) ++ * Description ++ * Get new value being written by user space to sysctl (before ++ * the actual write happens) and copy it as a string into ++ * provided by program buffer *buf* of size *buf_len*. ++ * ++ * User space may write new value at file position > 0. ++ * ++ * The buffer is always NUL terminated, unless it's zero-sized. ++ * Return ++ * Number of character copied (not including the trailing NUL). ++ * ++ * **-E2BIG** if the buffer wasn't big enough (*buf* will contain ++ * truncated name in this case). ++ * ++ * **-EINVAL** if sysctl is being read. ++ * ++ * int bpf_sysctl_set_new_value(struct bpf_sysctl *ctx, const char *buf, size_t buf_len) ++ * Description ++ * Override new value being written by user space to sysctl with ++ * value provided by program in buffer *buf* of size *buf_len*. ++ * ++ * *buf* should contain a string in same form as provided by user ++ * space on sysctl write. ++ * ++ * User space may write new value at file position > 0. To override ++ * the whole sysctl value file position should be set to zero. ++ * Return ++ * 0 on success. ++ * ++ * **-E2BIG** if the *buf_len* is too big. ++ * ++ * **-EINVAL** if sysctl is being read. ++ * ++ * int bpf_strtol(const char *buf, size_t buf_len, u64 flags, long *res) ++ * Description ++ * Convert the initial part of the string from buffer *buf* of ++ * size *buf_len* to a long integer according to the given base ++ * and save the result in *res*. ++ * ++ * The string may begin with an arbitrary amount of white space ++ * (as determined by **isspace**\ (3)) followed by a single ++ * optional '**-**' sign. ++ * ++ * Five least significant bits of *flags* encode base, other bits ++ * are currently unused. ++ * ++ * Base must be either 8, 10, 16 or 0 to detect it automatically ++ * similar to user space **strtol**\ (3). ++ * Return ++ * Number of characters consumed on success. Must be positive but ++ * no more than *buf_len*. ++ * ++ * **-EINVAL** if no valid digits were found or unsupported base ++ * was provided. ++ * ++ * **-ERANGE** if resulting value was out of range. ++ * ++ * int bpf_strtoul(const char *buf, size_t buf_len, u64 flags, unsigned long *res) ++ * Description ++ * Convert the initial part of the string from buffer *buf* of ++ * size *buf_len* to an unsigned long integer according to the ++ * given base and save the result in *res*. ++ * ++ * The string may begin with an arbitrary amount of white space ++ * (as determined by **isspace**\ (3)). ++ * ++ * Five least significant bits of *flags* encode base, other bits ++ * are currently unused. ++ * ++ * Base must be either 8, 10, 16 or 0 to detect it automatically ++ * similar to user space **strtoul**\ (3). ++ * Return ++ * Number of characters consumed on success. Must be positive but ++ * no more than *buf_len*. ++ * ++ * **-EINVAL** if no valid digits were found or unsupported base ++ * was provided. ++ * ++ * **-ERANGE** if resulting value was out of range. ++ * ++ * void *bpf_sk_storage_get(struct bpf_map *map, struct bpf_sock *sk, void *value, u64 flags) ++ * Description ++ * Get a bpf-local-storage from a *sk*. ++ * ++ * Logically, it could be thought of getting the value from ++ * a *map* with *sk* as the **key**. From this ++ * perspective, the usage is not much different from ++ * **bpf_map_lookup_elem**\ (*map*, **&**\ *sk*) except this ++ * helper enforces the key must be a full socket and the map must ++ * be a **BPF_MAP_TYPE_SK_STORAGE** also. ++ * ++ * Underneath, the value is stored locally at *sk* instead of ++ * the *map*. The *map* is used as the bpf-local-storage ++ * "type". The bpf-local-storage "type" (i.e. the *map*) is ++ * searched against all bpf-local-storages residing at *sk*. ++ * ++ * An optional *flags* (**BPF_SK_STORAGE_GET_F_CREATE**) can be ++ * used such that a new bpf-local-storage will be ++ * created if one does not exist. *value* can be used ++ * together with **BPF_SK_STORAGE_GET_F_CREATE** to specify ++ * the initial value of a bpf-local-storage. If *value* is ++ * **NULL**, the new bpf-local-storage will be zero initialized. ++ * Return ++ * A bpf-local-storage pointer is returned on success. ++ * ++ * **NULL** if not found or there was an error in adding ++ * a new bpf-local-storage. ++ * ++ * int bpf_sk_storage_delete(struct bpf_map *map, struct bpf_sock *sk) ++ * Description ++ * Delete a bpf-local-storage from a *sk*. ++ * Return ++ * 0 on success. ++ * ++ * **-ENOENT** if the bpf-local-storage cannot be found. ++ * ++ * int bpf_send_signal(u32 sig) ++ * Description ++ * Send signal *sig* to the current task. ++ * Return ++ * 0 on success or successfully queued. ++ * ++ * **-EBUSY** if work queue under nmi is full. ++ * ++ * **-EINVAL** if *sig* is invalid. ++ * ++ * **-EPERM** if no permission to send the *sig*. ++ * ++ * **-EAGAIN** if bpf program can try again. ++ * ++ * s64 bpf_tcp_gen_syncookie(struct bpf_sock *sk, void *iph, u32 iph_len, struct tcphdr *th, u32 th_len) ++ * Description ++ * Try to issue a SYN cookie for the packet with corresponding ++ * IP/TCP headers, *iph* and *th*, on the listening socket in *sk*. ++ * ++ * *iph* points to the start of the IPv4 or IPv6 header, while ++ * *iph_len* contains **sizeof**\ (**struct iphdr**) or ++ * **sizeof**\ (**struct ip6hdr**). ++ * ++ * *th* points to the start of the TCP header, while *th_len* ++ * contains the length of the TCP header. ++ * ++ * Return ++ * On success, lower 32 bits hold the generated SYN cookie in ++ * followed by 16 bits which hold the MSS value for that cookie, ++ * and the top 16 bits are unused. ++ * ++ * On failure, the returned value is one of the following: ++ * ++ * **-EINVAL** SYN cookie cannot be issued due to error ++ * ++ * **-ENOENT** SYN cookie should not be issued (no SYN flood) ++ * ++ * **-EOPNOTSUPP** kernel configuration does not enable SYN cookies ++ * ++ * **-EPROTONOSUPPORT** IP packet version is not 4 or 6 ++ */ ++#define __BPF_FUNC_MAPPER(FN) \ ++ FN(unspec), \ ++ FN(map_lookup_elem), \ ++ FN(map_update_elem), \ ++ FN(map_delete_elem), \ ++ FN(probe_read), \ ++ FN(ktime_get_ns), \ ++ FN(trace_printk), \ ++ FN(get_prandom_u32), \ ++ FN(get_smp_processor_id), \ ++ FN(skb_store_bytes), \ ++ FN(l3_csum_replace), \ ++ FN(l4_csum_replace), \ ++ FN(tail_call), \ ++ FN(clone_redirect), \ ++ FN(get_current_pid_tgid), \ ++ FN(get_current_uid_gid), \ ++ FN(get_current_comm), \ ++ FN(get_cgroup_classid), \ ++ FN(skb_vlan_push), \ ++ FN(skb_vlan_pop), \ ++ FN(skb_get_tunnel_key), \ ++ FN(skb_set_tunnel_key), \ ++ FN(perf_event_read), \ ++ FN(redirect), \ ++ FN(get_route_realm), \ ++ FN(perf_event_output), \ ++ FN(skb_load_bytes), \ ++ FN(get_stackid), \ ++ FN(csum_diff), \ ++ FN(skb_get_tunnel_opt), \ ++ FN(skb_set_tunnel_opt), \ ++ FN(skb_change_proto), \ ++ FN(skb_change_type), \ ++ FN(skb_under_cgroup), \ ++ FN(get_hash_recalc), \ ++ FN(get_current_task), \ ++ FN(probe_write_user), \ ++ FN(current_task_under_cgroup), \ ++ FN(skb_change_tail), \ ++ FN(skb_pull_data), \ ++ FN(csum_update), \ ++ FN(set_hash_invalid), \ ++ FN(get_numa_node_id), \ ++ FN(skb_change_head), \ ++ FN(xdp_adjust_head), \ ++ FN(probe_read_str), \ ++ FN(get_socket_cookie), \ ++ FN(get_socket_uid), \ ++ FN(set_hash), \ ++ FN(setsockopt), \ ++ FN(skb_adjust_room), \ ++ FN(redirect_map), \ ++ FN(sk_redirect_map), \ ++ FN(sock_map_update), \ ++ FN(xdp_adjust_meta), \ ++ FN(perf_event_read_value), \ ++ FN(perf_prog_read_value), \ ++ FN(getsockopt), \ ++ FN(override_return), \ ++ FN(sock_ops_cb_flags_set), \ ++ FN(msg_redirect_map), \ ++ FN(msg_apply_bytes), \ ++ FN(msg_cork_bytes), \ ++ FN(msg_pull_data), \ ++ FN(bind), \ ++ FN(xdp_adjust_tail), \ ++ FN(skb_get_xfrm_state), \ ++ FN(get_stack), \ ++ FN(skb_load_bytes_relative), \ ++ FN(fib_lookup), \ ++ FN(sock_hash_update), \ ++ FN(msg_redirect_hash), \ ++ FN(sk_redirect_hash), \ ++ FN(lwt_push_encap), \ ++ FN(lwt_seg6_store_bytes), \ ++ FN(lwt_seg6_adjust_srh), \ ++ FN(lwt_seg6_action), \ ++ FN(rc_repeat), \ ++ FN(rc_keydown), \ ++ FN(skb_cgroup_id), \ ++ FN(get_current_cgroup_id), \ ++ FN(get_local_storage), \ ++ FN(sk_select_reuseport), \ ++ FN(skb_ancestor_cgroup_id), \ ++ FN(sk_lookup_tcp), \ ++ FN(sk_lookup_udp), \ ++ FN(sk_release), \ ++ FN(map_push_elem), \ ++ FN(map_pop_elem), \ ++ FN(map_peek_elem), \ ++ FN(msg_push_data), \ ++ FN(msg_pop_data), \ ++ FN(rc_pointer_rel), \ ++ FN(spin_lock), \ ++ FN(spin_unlock), \ ++ FN(sk_fullsock), \ ++ FN(tcp_sock), \ ++ FN(skb_ecn_set_ce), \ ++ FN(get_listener_sock), \ ++ FN(skc_lookup_tcp), \ ++ FN(tcp_check_syncookie), \ ++ FN(sysctl_get_name), \ ++ FN(sysctl_get_current_value), \ ++ FN(sysctl_get_new_value), \ ++ FN(sysctl_set_new_value), \ ++ FN(strtol), \ ++ FN(strtoul), \ ++ FN(sk_storage_get), \ ++ FN(sk_storage_delete), \ ++ FN(send_signal), \ ++ FN(tcp_gen_syncookie), ++ + /* integer value in 'imm' field of BPF_CALL instruction selects which helper + * function eBPF program intends to call + */ ++#define __BPF_ENUM_FN(x) BPF_FUNC_ ## x + enum bpf_func_id { +- BPF_FUNC_unspec, +- BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */ +- BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */ +- BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */ +- BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */ +- BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */ +- BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */ +- BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */ +- BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */ +- +- /** +- * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet +- * @skb: pointer to skb +- * @offset: offset within packet from skb->mac_header +- * @from: pointer where to copy bytes from +- * @len: number of bytes to store into packet +- * @flags: bit 0 - if true, recompute skb->csum +- * other bits - reserved +- * Return: 0 on success +- */ +- BPF_FUNC_skb_store_bytes, +- +- /** +- * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum +- * @skb: pointer to skb +- * @offset: offset within packet where IP checksum is located +- * @from: old value of header field +- * @to: new value of header field +- * @flags: bits 0-3 - size of header field +- * other bits - reserved +- * Return: 0 on success +- */ +- BPF_FUNC_l3_csum_replace, +- +- /** +- * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum +- * @skb: pointer to skb +- * @offset: offset within packet where TCP/UDP checksum is located +- * @from: old value of header field +- * @to: new value of header field +- * @flags: bits 0-3 - size of header field +- * bit 4 - is pseudo header +- * other bits - reserved +- * Return: 0 on success +- */ +- BPF_FUNC_l4_csum_replace, ++ __BPF_FUNC_MAPPER(__BPF_ENUM_FN) ++ __BPF_FUNC_MAX_ID, ++}; ++#undef __BPF_ENUM_FN + +- /** +- * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program +- * @ctx: context pointer passed to next program +- * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY +- * @index: index inside array that selects specific program to run +- * Return: 0 on success +- */ +- BPF_FUNC_tail_call, ++/* All flags used by eBPF helper functions, placed here. */ + +- /** +- * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev +- * @skb: pointer to skb +- * @ifindex: ifindex of the net device +- * @flags: bit 0 - if set, redirect to ingress instead of egress +- * other bits - reserved +- * Return: 0 on success +- */ +- BPF_FUNC_clone_redirect, ++/* BPF_FUNC_skb_store_bytes flags. */ ++#define BPF_F_RECOMPUTE_CSUM (1ULL << 0) ++#define BPF_F_INVALIDATE_HASH (1ULL << 1) + +- /** +- * u64 bpf_get_current_pid_tgid(void) +- * Return: current->tgid << 32 | current->pid +- */ +- BPF_FUNC_get_current_pid_tgid, ++/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags. ++ * First 4 bits are for passing the header field size. ++ */ ++#define BPF_F_HDR_FIELD_MASK 0xfULL + +- /** +- * u64 bpf_get_current_uid_gid(void) +- * Return: current_gid << 32 | current_uid +- */ +- BPF_FUNC_get_current_uid_gid, ++/* BPF_FUNC_l4_csum_replace flags. */ ++#define BPF_F_PSEUDO_HDR (1ULL << 4) ++#define BPF_F_MARK_MANGLED_0 (1ULL << 5) ++#define BPF_F_MARK_ENFORCE (1ULL << 6) ++ ++/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */ ++#define BPF_F_INGRESS (1ULL << 0) ++ ++/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ ++#define BPF_F_TUNINFO_IPV6 (1ULL << 0) ++ ++/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ ++#define BPF_F_SKIP_FIELD_MASK 0xffULL ++#define BPF_F_USER_STACK (1ULL << 8) ++/* flags used by BPF_FUNC_get_stackid only. */ ++#define BPF_F_FAST_STACK_CMP (1ULL << 9) ++#define BPF_F_REUSE_STACKID (1ULL << 10) ++/* flags used by BPF_FUNC_get_stack only. */ ++#define BPF_F_USER_BUILD_ID (1ULL << 11) ++ ++/* BPF_FUNC_skb_set_tunnel_key flags. */ ++#define BPF_F_ZERO_CSUM_TX (1ULL << 1) ++#define BPF_F_DONT_FRAGMENT (1ULL << 2) ++#define BPF_F_SEQ_NUMBER (1ULL << 3) + +- /** +- * bpf_get_current_comm(char *buf, int size_of_buf) +- * stores current->comm into buf +- * Return: 0 on success +- */ +- BPF_FUNC_get_current_comm, +- +- /** +- * bpf_get_cgroup_classid(skb) - retrieve a proc's classid +- * @skb: pointer to skb +- * Return: classid if != 0 +- */ +- BPF_FUNC_get_cgroup_classid, +- BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */ +- BPF_FUNC_skb_vlan_pop, /* bpf_skb_vlan_pop(skb) */ +- +- /** +- * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags) +- * retrieve or populate tunnel metadata +- * @skb: pointer to skb +- * @key: pointer to 'struct bpf_tunnel_key' +- * @size: size of 'struct bpf_tunnel_key' +- * @flags: room for future extensions +- * Retrun: 0 on success +- */ +- BPF_FUNC_skb_get_tunnel_key, +- BPF_FUNC_skb_set_tunnel_key, +- BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */ +- /** +- * bpf_redirect(ifindex, flags) - redirect to another netdev +- * @ifindex: ifindex of the net device +- * @flags: bit 0 - if set, redirect to ingress instead of egress +- * other bits - reserved +- * Return: TC_ACT_REDIRECT +- */ +- BPF_FUNC_redirect, ++/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and ++ * BPF_FUNC_perf_event_read_value flags. ++ */ ++#define BPF_F_INDEX_MASK 0xffffffffULL ++#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK ++/* BPF_FUNC_perf_event_output for sk_buff input context. */ ++#define BPF_F_CTXLEN_MASK (0xfffffULL << 32) ++ ++/* Current network namespace */ ++#define BPF_F_CURRENT_NETNS (-1L) ++ ++/* BPF_FUNC_skb_adjust_room flags. */ ++#define BPF_F_ADJ_ROOM_FIXED_GSO (1ULL << 0) ++ ++#define BPF_ADJ_ROOM_ENCAP_L2_MASK 0xff ++#define BPF_ADJ_ROOM_ENCAP_L2_SHIFT 56 ++ ++#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 (1ULL << 1) ++#define BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 (1ULL << 2) ++#define BPF_F_ADJ_ROOM_ENCAP_L4_GRE (1ULL << 3) ++#define BPF_F_ADJ_ROOM_ENCAP_L4_UDP (1ULL << 4) ++#define BPF_F_ADJ_ROOM_ENCAP_L2(len) (((__u64)len & \ ++ BPF_ADJ_ROOM_ENCAP_L2_MASK) \ ++ << BPF_ADJ_ROOM_ENCAP_L2_SHIFT) ++ ++/* BPF_FUNC_sysctl_get_name flags. */ ++#define BPF_F_SYSCTL_BASE_NAME (1ULL << 0) ++ ++/* BPF_FUNC_sk_storage_get flags */ ++#define BPF_SK_STORAGE_GET_F_CREATE (1ULL << 0) ++ ++/* Mode for BPF_FUNC_skb_adjust_room helper. */ ++enum bpf_adj_room_mode { ++ BPF_ADJ_ROOM_NET, ++ BPF_ADJ_ROOM_MAC, ++}; + +- /** +- * bpf_get_route_realm(skb) - retrieve a dst's tclassid +- * @skb: pointer to skb +- * Return: realm if != 0 +- */ +- BPF_FUNC_get_route_realm, ++/* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ ++enum bpf_hdr_start_off { ++ BPF_HDR_START_MAC, ++ BPF_HDR_START_NET, ++}; + +- /** +- * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample +- * @ctx: struct pt_regs* +- * @map: pointer to perf_event_array map +- * @index: index of event in the map +- * @data: data on stack to be output as raw data +- * @size: size of data +- * Return: 0 on success +- */ +- BPF_FUNC_perf_event_output, +- __BPF_FUNC_MAX_ID, ++/* Encapsulation type for BPF_FUNC_lwt_push_encap helper. */ ++enum bpf_lwt_encap_mode { ++ BPF_LWT_ENCAP_SEG6, ++ BPF_LWT_ENCAP_SEG6_INLINE, ++ BPF_LWT_ENCAP_IP, + }; + ++#define __bpf_md_ptr(type, name) \ ++union { \ ++ type name; \ ++ __u64 :64; \ ++} __attribute__((aligned(8))) ++ + /* user accessible mirror of in-kernel sk_buff. + * new fields can only be added to the end of this structure + */ +@@ -291,11 +2985,632 @@ struct __sk_buff { + __u32 cb[5]; + __u32 hash; + __u32 tc_classid; ++ __u32 data; ++ __u32 data_end; ++ __u32 napi_id; ++ ++ /* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */ ++ __u32 family; ++ __u32 remote_ip4; /* Stored in network byte order */ ++ __u32 local_ip4; /* Stored in network byte order */ ++ __u32 remote_ip6[4]; /* Stored in network byte order */ ++ __u32 local_ip6[4]; /* Stored in network byte order */ ++ __u32 remote_port; /* Stored in network byte order */ ++ __u32 local_port; /* stored in host byte order */ ++ /* ... here. */ ++ ++ __u32 data_meta; ++ __bpf_md_ptr(struct bpf_flow_keys *, flow_keys); ++ __u64 tstamp; ++ __u32 wire_len; ++ __u32 gso_segs; ++ __bpf_md_ptr(struct bpf_sock *, sk); + }; + + struct bpf_tunnel_key { + __u32 tunnel_id; +- __u32 remote_ipv4; ++ union { ++ __u32 remote_ipv4; ++ __u32 remote_ipv6[4]; ++ }; ++ __u8 tunnel_tos; ++ __u8 tunnel_ttl; ++ __u16 tunnel_ext; /* Padding, future use. */ ++ __u32 tunnel_label; ++}; ++ ++/* user accessible mirror of in-kernel xfrm_state. ++ * new fields can only be added to the end of this structure ++ */ ++struct bpf_xfrm_state { ++ __u32 reqid; ++ __u32 spi; /* Stored in network byte order */ ++ __u16 family; ++ __u16 ext; /* Padding, future use. */ ++ union { ++ __u32 remote_ipv4; /* Stored in network byte order */ ++ __u32 remote_ipv6[4]; /* Stored in network byte order */ ++ }; ++}; ++ ++/* Generic BPF return codes which all BPF program types may support. ++ * The values are binary compatible with their TC_ACT_* counter-part to ++ * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT ++ * programs. ++ * ++ * XDP is handled seprately, see XDP_*. ++ */ ++enum bpf_ret_code { ++ BPF_OK = 0, ++ /* 1 reserved */ ++ BPF_DROP = 2, ++ /* 3-6 reserved */ ++ BPF_REDIRECT = 7, ++ /* >127 are reserved for prog type specific return codes. ++ * ++ * BPF_LWT_REROUTE: used by BPF_PROG_TYPE_LWT_IN and ++ * BPF_PROG_TYPE_LWT_XMIT to indicate that skb had been ++ * changed and should be routed based on its new L3 header. ++ * (This is an L3 redirect, as opposed to L2 redirect ++ * represented by BPF_REDIRECT above). ++ */ ++ BPF_LWT_REROUTE = 128, ++}; ++ ++struct bpf_sock { ++ __u32 bound_dev_if; ++ __u32 family; ++ __u32 type; ++ __u32 protocol; ++ __u32 mark; ++ __u32 priority; ++ /* IP address also allows 1 and 2 bytes access */ ++ __u32 src_ip4; ++ __u32 src_ip6[4]; ++ __u32 src_port; /* host byte order */ ++ __u32 dst_port; /* network byte order */ ++ __u32 dst_ip4; ++ __u32 dst_ip6[4]; ++ __u32 state; ++}; ++ ++struct bpf_tcp_sock { ++ __u32 snd_cwnd; /* Sending congestion window */ ++ __u32 srtt_us; /* smoothed round trip time << 3 in usecs */ ++ __u32 rtt_min; ++ __u32 snd_ssthresh; /* Slow start size threshold */ ++ __u32 rcv_nxt; /* What we want to receive next */ ++ __u32 snd_nxt; /* Next sequence we send */ ++ __u32 snd_una; /* First byte we want an ack for */ ++ __u32 mss_cache; /* Cached effective mss, not including SACKS */ ++ __u32 ecn_flags; /* ECN status bits. */ ++ __u32 rate_delivered; /* saved rate sample: packets delivered */ ++ __u32 rate_interval_us; /* saved rate sample: time elapsed */ ++ __u32 packets_out; /* Packets which are "in flight" */ ++ __u32 retrans_out; /* Retransmitted packets out */ ++ __u32 total_retrans; /* Total retransmits for entire connection */ ++ __u32 segs_in; /* RFC4898 tcpEStatsPerfSegsIn ++ * total number of segments in. ++ */ ++ __u32 data_segs_in; /* RFC4898 tcpEStatsPerfDataSegsIn ++ * total number of data segments in. ++ */ ++ __u32 segs_out; /* RFC4898 tcpEStatsPerfSegsOut ++ * The total number of segments sent. ++ */ ++ __u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut ++ * total number of data segments sent. ++ */ ++ __u32 lost_out; /* Lost packets */ ++ __u32 sacked_out; /* SACK'd packets */ ++ __u64 bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived ++ * sum(delta(rcv_nxt)), or how many bytes ++ * were acked. ++ */ ++ __u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked ++ * sum(delta(snd_una)), or how many bytes ++ * were acked. ++ */ ++ __u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups ++ * total number of DSACK blocks received ++ */ ++ __u32 delivered; /* Total data packets delivered incl. rexmits */ ++ __u32 delivered_ce; /* Like the above but only ECE marked packets */ ++ __u32 icsk_retransmits; /* Number of unrecovered [RTO] timeouts */ ++}; ++ ++struct bpf_sock_tuple { ++ union { ++ struct { ++ __be32 saddr; ++ __be32 daddr; ++ __be16 sport; ++ __be16 dport; ++ } ipv4; ++ struct { ++ __be32 saddr[4]; ++ __be32 daddr[4]; ++ __be16 sport; ++ __be16 dport; ++ } ipv6; ++ }; ++}; ++ ++struct bpf_xdp_sock { ++ __u32 queue_id; ++}; ++ ++#define XDP_PACKET_HEADROOM 256 ++ ++/* User return codes for XDP prog type. ++ * A valid XDP program must return one of these defined values. All other ++ * return codes are reserved for future use. Unknown return codes will ++ * result in packet drops and a warning via bpf_warn_invalid_xdp_action(). ++ */ ++enum xdp_action { ++ XDP_ABORTED = 0, ++ XDP_DROP, ++ XDP_PASS, ++ XDP_TX, ++ XDP_REDIRECT, ++}; ++ ++/* user accessible metadata for XDP packet hook ++ * new fields must be added to the end of this structure ++ */ ++struct xdp_md { ++ __u32 data; ++ __u32 data_end; ++ __u32 data_meta; ++ /* Below access go through struct xdp_rxq_info */ ++ __u32 ingress_ifindex; /* rxq->dev->ifindex */ ++ __u32 rx_queue_index; /* rxq->queue_index */ ++}; ++ ++enum sk_action { ++ SK_DROP = 0, ++ SK_PASS, ++}; ++ ++/* user accessible metadata for SK_MSG packet hook, new fields must ++ * be added to the end of this structure ++ */ ++struct sk_msg_md { ++ __bpf_md_ptr(void *, data); ++ __bpf_md_ptr(void *, data_end); ++ ++ __u32 family; ++ __u32 remote_ip4; /* Stored in network byte order */ ++ __u32 local_ip4; /* Stored in network byte order */ ++ __u32 remote_ip6[4]; /* Stored in network byte order */ ++ __u32 local_ip6[4]; /* Stored in network byte order */ ++ __u32 remote_port; /* Stored in network byte order */ ++ __u32 local_port; /* stored in host byte order */ ++ __u32 size; /* Total size of sk_msg */ ++}; ++ ++struct sk_reuseport_md { ++ /* ++ * Start of directly accessible data. It begins from ++ * the tcp/udp header. ++ */ ++ __bpf_md_ptr(void *, data); ++ /* End of directly accessible data */ ++ __bpf_md_ptr(void *, data_end); ++ /* ++ * Total length of packet (starting from the tcp/udp header). ++ * Note that the directly accessible bytes (data_end - data) ++ * could be less than this "len". Those bytes could be ++ * indirectly read by a helper "bpf_skb_load_bytes()". ++ */ ++ __u32 len; ++ /* ++ * Eth protocol in the mac header (network byte order). e.g. ++ * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD) ++ */ ++ __u32 eth_protocol; ++ __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */ ++ __u32 bind_inany; /* Is sock bound to an INANY address? */ ++ __u32 hash; /* A hash of the packet 4 tuples */ ++}; ++ ++#define BPF_TAG_SIZE 8 ++ ++struct bpf_prog_info { ++ __u32 type; ++ __u32 id; ++ __u8 tag[BPF_TAG_SIZE]; ++ __u32 jited_prog_len; ++ __u32 xlated_prog_len; ++ __aligned_u64 jited_prog_insns; ++ __aligned_u64 xlated_prog_insns; ++ __u64 load_time; /* ns since boottime */ ++ __u32 created_by_uid; ++ __u32 nr_map_ids; ++ __aligned_u64 map_ids; ++ char name[BPF_OBJ_NAME_LEN]; ++ __u32 ifindex; ++ __u32 gpl_compatible:1; ++ __u32 :31; /* alignment pad */ ++ __u64 netns_dev; ++ __u64 netns_ino; ++ __u32 nr_jited_ksyms; ++ __u32 nr_jited_func_lens; ++ __aligned_u64 jited_ksyms; ++ __aligned_u64 jited_func_lens; ++ __u32 btf_id; ++ __u32 func_info_rec_size; ++ __aligned_u64 func_info; ++ __u32 nr_func_info; ++ __u32 nr_line_info; ++ __aligned_u64 line_info; ++ __aligned_u64 jited_line_info; ++ __u32 nr_jited_line_info; ++ __u32 line_info_rec_size; ++ __u32 jited_line_info_rec_size; ++ __u32 nr_prog_tags; ++ __aligned_u64 prog_tags; ++ __u64 run_time_ns; ++ __u64 run_cnt; ++} __attribute__((aligned(8))); ++ ++struct bpf_map_info { ++ __u32 type; ++ __u32 id; ++ __u32 key_size; ++ __u32 value_size; ++ __u32 max_entries; ++ __u32 map_flags; ++ char name[BPF_OBJ_NAME_LEN]; ++ __u32 ifindex; ++ __u32 :32; ++ __u64 netns_dev; ++ __u64 netns_ino; ++ __u32 btf_id; ++ __u32 btf_key_type_id; ++ __u32 btf_value_type_id; ++} __attribute__((aligned(8))); ++ ++struct bpf_btf_info { ++ __aligned_u64 btf; ++ __u32 btf_size; ++ __u32 id; ++} __attribute__((aligned(8))); ++ ++/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed ++ * by user and intended to be used by socket (e.g. to bind to, depends on ++ * attach attach type). ++ */ ++struct bpf_sock_addr { ++ __u32 user_family; /* Allows 4-byte read, but no write. */ ++ __u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write. ++ * Stored in network byte order. ++ */ ++ __u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. ++ * Stored in network byte order. ++ */ ++ __u32 user_port; /* Allows 4-byte read and write. ++ * Stored in network byte order ++ */ ++ __u32 family; /* Allows 4-byte read, but no write */ ++ __u32 type; /* Allows 4-byte read, but no write */ ++ __u32 protocol; /* Allows 4-byte read, but no write */ ++ __u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write. ++ * Stored in network byte order. ++ */ ++ __u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write. ++ * Stored in network byte order. ++ */ ++ __bpf_md_ptr(struct bpf_sock *, sk); ++}; ++ ++/* User bpf_sock_ops struct to access socket values and specify request ops ++ * and their replies. ++ * Some of this fields are in network (bigendian) byte order and may need ++ * to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h). ++ * New fields can only be added at the end of this structure ++ */ ++struct bpf_sock_ops { ++ __u32 op; ++ union { ++ __u32 args[4]; /* Optionally passed to bpf program */ ++ __u32 reply; /* Returned by bpf program */ ++ __u32 replylong[4]; /* Optionally returned by bpf prog */ ++ }; ++ __u32 family; ++ __u32 remote_ip4; /* Stored in network byte order */ ++ __u32 local_ip4; /* Stored in network byte order */ ++ __u32 remote_ip6[4]; /* Stored in network byte order */ ++ __u32 local_ip6[4]; /* Stored in network byte order */ ++ __u32 remote_port; /* Stored in network byte order */ ++ __u32 local_port; /* stored in host byte order */ ++ __u32 is_fullsock; /* Some TCP fields are only valid if ++ * there is a full socket. If not, the ++ * fields read as zero. ++ */ ++ __u32 snd_cwnd; ++ __u32 srtt_us; /* Averaged RTT << 3 in usecs */ ++ __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */ ++ __u32 state; ++ __u32 rtt_min; ++ __u32 snd_ssthresh; ++ __u32 rcv_nxt; ++ __u32 snd_nxt; ++ __u32 snd_una; ++ __u32 mss_cache; ++ __u32 ecn_flags; ++ __u32 rate_delivered; ++ __u32 rate_interval_us; ++ __u32 packets_out; ++ __u32 retrans_out; ++ __u32 total_retrans; ++ __u32 segs_in; ++ __u32 data_segs_in; ++ __u32 segs_out; ++ __u32 data_segs_out; ++ __u32 lost_out; ++ __u32 sacked_out; ++ __u32 sk_txhash; ++ __u64 bytes_received; ++ __u64 bytes_acked; ++ __bpf_md_ptr(struct bpf_sock *, sk); ++}; ++ ++/* Definitions for bpf_sock_ops_cb_flags */ ++#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0) ++#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1) ++#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2) ++#define BPF_SOCK_OPS_RTT_CB_FLAG (1<<3) ++#define BPF_SOCK_OPS_ALL_CB_FLAGS 0xF /* Mask of all currently ++ * supported cb flags ++ */ ++ ++/* List of known BPF sock_ops operators. ++ * New entries can only be added at the end ++ */ ++enum { ++ BPF_SOCK_OPS_VOID, ++ BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or ++ * -1 if default value should be used ++ */ ++ BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized ++ * window (in packets) or -1 if default ++ * value should be used ++ */ ++ BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an ++ * active connection is initialized ++ */ ++ BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an ++ * active connection is ++ * established ++ */ ++ BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a ++ * passive connection is ++ * established ++ */ ++ BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control ++ * needs ECN ++ */ ++ BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is ++ * based on the path and may be ++ * dependent on the congestion control ++ * algorithm. In general it indicates ++ * a congestion threshold. RTTs above ++ * this indicate congestion ++ */ ++ BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered. ++ * Arg1: value of icsk_retransmits ++ * Arg2: value of icsk_rto ++ * Arg3: whether RTO has expired ++ */ ++ BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted. ++ * Arg1: sequence number of 1st byte ++ * Arg2: # segments ++ * Arg3: return value of ++ * tcp_transmit_skb (0 => success) ++ */ ++ BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state. ++ * Arg1: old_state ++ * Arg2: new_state ++ */ ++ BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after ++ * socket transition to LISTEN state. ++ */ ++ BPF_SOCK_OPS_RTT_CB, /* Called on every RTT. ++ */ ++}; ++ ++/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect ++ * changes between the TCP and BPF versions. Ideally this should never happen. ++ * If it does, we need to add code to convert them before calling ++ * the BPF sock_ops function. ++ */ ++enum { ++ BPF_TCP_ESTABLISHED = 1, ++ BPF_TCP_SYN_SENT, ++ BPF_TCP_SYN_RECV, ++ BPF_TCP_FIN_WAIT1, ++ BPF_TCP_FIN_WAIT2, ++ BPF_TCP_TIME_WAIT, ++ BPF_TCP_CLOSE, ++ BPF_TCP_CLOSE_WAIT, ++ BPF_TCP_LAST_ACK, ++ BPF_TCP_LISTEN, ++ BPF_TCP_CLOSING, /* Now a valid state */ ++ BPF_TCP_NEW_SYN_RECV, ++ ++ BPF_TCP_MAX_STATES /* Leave at the end! */ ++}; ++ ++#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ ++#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */ ++ ++struct bpf_perf_event_value { ++ __u64 counter; ++ __u64 enabled; ++ __u64 running; ++}; ++ ++#define BPF_DEVCG_ACC_MKNOD (1ULL << 0) ++#define BPF_DEVCG_ACC_READ (1ULL << 1) ++#define BPF_DEVCG_ACC_WRITE (1ULL << 2) ++ ++#define BPF_DEVCG_DEV_BLOCK (1ULL << 0) ++#define BPF_DEVCG_DEV_CHAR (1ULL << 1) ++ ++struct bpf_cgroup_dev_ctx { ++ /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */ ++ __u32 access_type; ++ __u32 major; ++ __u32 minor; ++}; ++ ++struct bpf_raw_tracepoint_args { ++ __u64 args[0]; ++}; ++ ++/* DIRECT: Skip the FIB rules and go to FIB table associated with device ++ * OUTPUT: Do lookup from egress perspective; default is ingress ++ */ ++#define BPF_FIB_LOOKUP_DIRECT (1U << 0) ++#define BPF_FIB_LOOKUP_OUTPUT (1U << 1) ++ ++enum { ++ BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */ ++ BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */ ++ BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */ ++ BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */ ++ BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */ ++ BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */ ++ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */ ++ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */ ++ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */ ++}; ++ ++struct bpf_fib_lookup { ++ /* input: network family for lookup (AF_INET, AF_INET6) ++ * output: network family of egress nexthop ++ */ ++ __u8 family; ++ ++ /* set if lookup is to consider L4 data - e.g., FIB rules */ ++ __u8 l4_protocol; ++ __be16 sport; ++ __be16 dport; ++ ++ /* total length of packet from network header - used for MTU check */ ++ __u16 tot_len; ++ ++ /* input: L3 device index for lookup ++ * output: device index from FIB lookup ++ */ ++ __u32 ifindex; ++ ++ union { ++ /* inputs to lookup */ ++ __u8 tos; /* AF_INET */ ++ __be32 flowinfo; /* AF_INET6, flow_label + priority */ ++ ++ /* output: metric of fib result (IPv4/IPv6 only) */ ++ __u32 rt_metric; ++ }; ++ ++ union { ++ __be32 ipv4_src; ++ __u32 ipv6_src[4]; /* in6_addr; network order */ ++ }; ++ ++ /* input to bpf_fib_lookup, ipv{4,6}_dst is destination address in ++ * network header. output: bpf_fib_lookup sets to gateway address ++ * if FIB lookup returns gateway route ++ */ ++ union { ++ __be32 ipv4_dst; ++ __u32 ipv6_dst[4]; /* in6_addr; network order */ ++ }; ++ ++ /* output */ ++ __be16 h_vlan_proto; ++ __be16 h_vlan_TCI; ++ __u8 smac[6]; /* ETH_ALEN */ ++ __u8 dmac[6]; /* ETH_ALEN */ ++}; ++ ++enum bpf_task_fd_type { ++ BPF_FD_TYPE_RAW_TRACEPOINT, /* tp name */ ++ BPF_FD_TYPE_TRACEPOINT, /* tp name */ ++ BPF_FD_TYPE_KPROBE, /* (symbol + offset) or addr */ ++ BPF_FD_TYPE_KRETPROBE, /* (symbol + offset) or addr */ ++ BPF_FD_TYPE_UPROBE, /* filename + offset */ ++ BPF_FD_TYPE_URETPROBE, /* filename + offset */ ++}; ++ ++#define BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG (1U << 0) ++#define BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL (1U << 1) ++#define BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP (1U << 2) ++ ++struct bpf_flow_keys { ++ __u16 nhoff; ++ __u16 thoff; ++ __u16 addr_proto; /* ETH_P_* of valid addrs */ ++ __u8 is_frag; ++ __u8 is_first_frag; ++ __u8 is_encap; ++ __u8 ip_proto; ++ __be16 n_proto; ++ __be16 sport; ++ __be16 dport; ++ union { ++ struct { ++ __be32 ipv4_src; ++ __be32 ipv4_dst; ++ }; ++ struct { ++ __u32 ipv6_src[4]; /* in6_addr; network order */ ++ __u32 ipv6_dst[4]; /* in6_addr; network order */ ++ }; ++ }; ++ __u32 flags; ++ __be32 flow_label; ++}; ++ ++struct bpf_func_info { ++ __u32 insn_off; ++ __u32 type_id; ++}; ++ ++#define BPF_LINE_INFO_LINE_NUM(line_col) ((line_col) >> 10) ++#define BPF_LINE_INFO_LINE_COL(line_col) ((line_col) & 0x3ff) ++ ++struct bpf_line_info { ++ __u32 insn_off; ++ __u32 file_name_off; ++ __u32 line_off; ++ __u32 line_col; ++}; ++ ++struct bpf_spin_lock { ++ __u32 val; ++}; ++ ++struct bpf_sysctl { ++ __u32 write; /* Sysctl is being read (= 0) or written (= 1). ++ * Allows 1,2,4-byte read, but no write. ++ */ ++ __u32 file_pos; /* Sysctl file position to read from, write to. ++ * Allows 1,2,4-byte read an 4-byte write. ++ */ ++}; ++ ++struct bpf_sockopt { ++ __bpf_md_ptr(struct bpf_sock *, sk); ++ __bpf_md_ptr(void *, optval); ++ __bpf_md_ptr(void *, optval_end); ++ ++ __s32 level; ++ __s32 optname; ++ __s32 optlen; ++ __s32 retval; + }; + + #endif /* _UAPI__LINUX_BPF_H__ */ +--- /dev/null ++++ b/include/uapi/linux/bpfilter.h +@@ -0,0 +1,21 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++#ifndef _UAPI_LINUX_BPFILTER_H ++#define _UAPI_LINUX_BPFILTER_H ++ ++#include ++ ++enum { ++ BPFILTER_IPT_SO_SET_REPLACE = 64, ++ BPFILTER_IPT_SO_SET_ADD_COUNTERS = 65, ++ BPFILTER_IPT_SET_MAX, ++}; ++ ++enum { ++ BPFILTER_IPT_SO_GET_INFO = 64, ++ BPFILTER_IPT_SO_GET_ENTRIES = 65, ++ BPFILTER_IPT_SO_GET_REVISION_MATCH = 66, ++ BPFILTER_IPT_SO_GET_REVISION_TARGET = 67, ++ BPFILTER_IPT_GET_MAX, ++}; ++ ++#endif /* _UAPI_LINUX_BPFILTER_H */ +--- /dev/null ++++ b/include/uapi/linux/bpf_perf_event.h +@@ -0,0 +1,19 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++/* Copyright (c) 2016 Facebook ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of version 2 of the GNU General Public ++ * License as published by the Free Software Foundation. ++ */ ++#ifndef _UAPI__LINUX_BPF_PERF_EVENT_H__ ++#define _UAPI__LINUX_BPF_PERF_EVENT_H__ ++ ++#include ++ ++struct bpf_perf_event_data { ++ bpf_user_pt_regs_t regs; ++ __u64 sample_period; ++ __u64 addr; ++}; ++ ++#endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */ +--- /dev/null ++++ b/include/uapi/linux/btf.h +@@ -0,0 +1,165 @@ ++/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ ++/* Copyright (c) 2018 Facebook */ ++#ifndef _UAPI__LINUX_BTF_H__ ++#define _UAPI__LINUX_BTF_H__ ++ ++#include ++ ++#define BTF_MAGIC 0xeB9F ++#define BTF_VERSION 1 ++ ++struct btf_header { ++ __u16 magic; ++ __u8 version; ++ __u8 flags; ++ __u32 hdr_len; ++ ++ /* All offsets are in bytes relative to the end of this header */ ++ __u32 type_off; /* offset of type section */ ++ __u32 type_len; /* length of type section */ ++ __u32 str_off; /* offset of string section */ ++ __u32 str_len; /* length of string section */ ++}; ++ ++/* Max # of type identifier */ ++#define BTF_MAX_TYPE 0x000fffff ++/* Max offset into the string section */ ++#define BTF_MAX_NAME_OFFSET 0x00ffffff ++/* Max # of struct/union/enum members or func args */ ++#define BTF_MAX_VLEN 0xffff ++ ++struct btf_type { ++ __u32 name_off; ++ /* "info" bits arrangement ++ * bits 0-15: vlen (e.g. # of struct's members) ++ * bits 16-23: unused ++ * bits 24-27: kind (e.g. int, ptr, array...etc) ++ * bits 28-30: unused ++ * bit 31: kind_flag, currently used by ++ * struct, union and fwd ++ */ ++ __u32 info; ++ /* "size" is used by INT, ENUM, STRUCT, UNION and DATASEC. ++ * "size" tells the size of the type it is describing. ++ * ++ * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, ++ * FUNC, FUNC_PROTO and VAR. ++ * "type" is a type_id referring to another type. ++ */ ++ union { ++ __u32 size; ++ __u32 type; ++ }; ++}; ++ ++#define BTF_INFO_KIND(info) (((info) >> 24) & 0x0f) ++#define BTF_INFO_VLEN(info) ((info) & 0xffff) ++#define BTF_INFO_KFLAG(info) ((info) >> 31) ++ ++#define BTF_KIND_UNKN 0 /* Unknown */ ++#define BTF_KIND_INT 1 /* Integer */ ++#define BTF_KIND_PTR 2 /* Pointer */ ++#define BTF_KIND_ARRAY 3 /* Array */ ++#define BTF_KIND_STRUCT 4 /* Struct */ ++#define BTF_KIND_UNION 5 /* Union */ ++#define BTF_KIND_ENUM 6 /* Enumeration */ ++#define BTF_KIND_FWD 7 /* Forward */ ++#define BTF_KIND_TYPEDEF 8 /* Typedef */ ++#define BTF_KIND_VOLATILE 9 /* Volatile */ ++#define BTF_KIND_CONST 10 /* Const */ ++#define BTF_KIND_RESTRICT 11 /* Restrict */ ++#define BTF_KIND_FUNC 12 /* Function */ ++#define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ ++#define BTF_KIND_VAR 14 /* Variable */ ++#define BTF_KIND_DATASEC 15 /* Section */ ++#define BTF_KIND_MAX BTF_KIND_DATASEC ++#define NR_BTF_KINDS (BTF_KIND_MAX + 1) ++ ++/* For some specific BTF_KIND, "struct btf_type" is immediately ++ * followed by extra data. ++ */ ++ ++/* BTF_KIND_INT is followed by a u32 and the following ++ * is the 32 bits arrangement: ++ */ ++#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) ++#define BTF_INT_OFFSET(VAL) (((VAL) & 0x00ff0000) >> 16) ++#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) ++ ++/* Attributes stored in the BTF_INT_ENCODING */ ++#define BTF_INT_SIGNED (1 << 0) ++#define BTF_INT_CHAR (1 << 1) ++#define BTF_INT_BOOL (1 << 2) ++ ++/* BTF_KIND_ENUM is followed by multiple "struct btf_enum". ++ * The exact number of btf_enum is stored in the vlen (of the ++ * info in "struct btf_type"). ++ */ ++struct btf_enum { ++ __u32 name_off; ++ __s32 val; ++}; ++ ++/* BTF_KIND_ARRAY is followed by one "struct btf_array" */ ++struct btf_array { ++ __u32 type; ++ __u32 index_type; ++ __u32 nelems; ++}; ++ ++/* BTF_KIND_STRUCT and BTF_KIND_UNION are followed ++ * by multiple "struct btf_member". The exact number ++ * of btf_member is stored in the vlen (of the info in ++ * "struct btf_type"). ++ */ ++struct btf_member { ++ __u32 name_off; ++ __u32 type; ++ /* If the type info kind_flag is set, the btf_member offset ++ * contains both member bitfield size and bit offset. The ++ * bitfield size is set for bitfield members. If the type ++ * info kind_flag is not set, the offset contains only bit ++ * offset. ++ */ ++ __u32 offset; ++}; ++ ++/* If the struct/union type info kind_flag is set, the ++ * following two macros are used to access bitfield_size ++ * and bit_offset from btf_member.offset. ++ */ ++#define BTF_MEMBER_BITFIELD_SIZE(val) ((val) >> 24) ++#define BTF_MEMBER_BIT_OFFSET(val) ((val) & 0xffffff) ++ ++/* BTF_KIND_FUNC_PROTO is followed by multiple "struct btf_param". ++ * The exact number of btf_param is stored in the vlen (of the ++ * info in "struct btf_type"). ++ */ ++struct btf_param { ++ __u32 name_off; ++ __u32 type; ++}; ++ ++enum { ++ BTF_VAR_STATIC = 0, ++ BTF_VAR_GLOBAL_ALLOCATED, ++}; ++ ++/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe ++ * additional information related to the variable such as its linkage. ++ */ ++struct btf_var { ++ __u32 linkage; ++}; ++ ++/* BTF_KIND_DATASEC is followed by multiple "struct btf_var_secinfo" ++ * to describe all BTF_KIND_VAR types it contains along with it's ++ * in-section offset as well as size. ++ */ ++struct btf_var_secinfo { ++ __u32 type; ++ __u32 offset; ++ __u32 size; ++}; ++ ++#endif /* _UAPI__LINUX_BTF_H__ */ +--- a/kernel/bpf/arraymap.c ++++ b/kernel/bpf/arraymap.c +@@ -1,78 +1,141 @@ ++// SPDX-License-Identifier: GPL-2.0-only + /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of version 2 of the GNU General Public +- * License as published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, but +- * WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. ++ * Copyright (c) 2016,2017 Facebook + */ + #include ++#include + #include +-#include + #include + #include + #include + #include ++#include ++ ++#include "map_in_map.h" ++ ++#define ARRAY_CREATE_FLAG_MASK \ ++ (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) ++ ++static void bpf_array_free_percpu(struct bpf_array *array) ++{ ++ int i; ++ ++ for (i = 0; i < array->map.max_entries; i++) { ++ free_percpu(array->pptrs[i]); ++ cond_resched(); ++ } ++} ++ ++static int bpf_array_alloc_percpu(struct bpf_array *array) ++{ ++ void __percpu *ptr; ++ int i; ++ ++ for (i = 0; i < array->map.max_entries; i++) { ++ ptr = __alloc_percpu_gfp(array->elem_size, 8, ++ GFP_USER | __GFP_NOWARN); ++ if (!ptr) { ++ bpf_array_free_percpu(array); ++ return -ENOMEM; ++ } ++ array->pptrs[i] = ptr; ++ cond_resched(); ++ } ++ ++ return 0; ++} + + /* Called from syscall */ +-static struct bpf_map *array_map_alloc(union bpf_attr *attr) ++int array_map_alloc_check(union bpf_attr *attr) + { +- struct bpf_array *array; +- u32 elem_size, array_size; +- u32 index_mask, max_entries; +- bool unpriv = !capable(CAP_SYS_ADMIN); ++ bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; ++ int numa_node = bpf_map_attr_numa_node(attr); + + /* check sanity of attributes */ + if (attr->max_entries == 0 || attr->key_size != 4 || +- attr->value_size == 0) +- return ERR_PTR(-EINVAL); ++ attr->value_size == 0 || ++ attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || ++ !bpf_map_flags_access_ok(attr->map_flags) || ++ (percpu && numa_node != NUMA_NO_NODE)) ++ return -EINVAL; + +- if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1)) ++ if (attr->value_size > KMALLOC_MAX_SIZE) + /* if value_size is bigger, the user space won't be able to + * access the elements. + */ +- return ERR_PTR(-E2BIG); ++ return -E2BIG; ++ ++ return 0; ++} ++ ++static struct bpf_map *array_map_alloc(union bpf_attr *attr) ++{ ++ bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY; ++ int ret, numa_node = bpf_map_attr_numa_node(attr); ++ u32 elem_size, index_mask, max_entries; ++ bool unpriv = !capable(CAP_SYS_ADMIN); ++ u64 cost, array_size, mask64; ++ struct bpf_map_memory mem; ++ struct bpf_array *array; + + elem_size = round_up(attr->value_size, 8); + + max_entries = attr->max_entries; +- index_mask = roundup_pow_of_two(max_entries) - 1; + +- if (unpriv) ++ /* On 32 bit archs roundup_pow_of_two() with max_entries that has ++ * upper most bit set in u32 space is undefined behavior due to ++ * resulting 1U << 32, so do it manually here in u64 space. ++ */ ++ mask64 = fls_long(max_entries - 1); ++ mask64 = 1ULL << mask64; ++ mask64 -= 1; ++ ++ index_mask = mask64; ++ if (unpriv) { + /* round up array size to nearest power of 2, + * since cpu will speculate within index_mask limits + */ + max_entries = index_mask + 1; ++ /* Check for overflows. */ ++ if (max_entries < attr->max_entries) ++ return ERR_PTR(-E2BIG); ++ } + +- +- /* check round_up into zero and u32 overflow */ +- if (elem_size == 0 || +- attr->max_entries > (U32_MAX - PAGE_SIZE - sizeof(*array)) / elem_size) +- return ERR_PTR(-ENOMEM); +- +- array_size = sizeof(*array) + max_entries * elem_size; ++ array_size = sizeof(*array); ++ if (percpu) ++ array_size += (u64) max_entries * sizeof(void *); ++ else ++ array_size += (u64) max_entries * elem_size; ++ ++ /* make sure there is no u32 overflow later in round_up() */ ++ cost = array_size; ++ if (percpu) ++ cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); ++ ++ ret = bpf_map_charge_init(&mem, cost); ++ if (ret < 0) ++ return ERR_PTR(ret); + + /* allocate all map elements and zero-initialize them */ +- array = kzalloc(array_size, GFP_USER | __GFP_NOWARN); ++ array = bpf_map_area_alloc(array_size, numa_node); + if (!array) { +- array = vzalloc(array_size); +- if (!array) +- return ERR_PTR(-ENOMEM); ++ bpf_map_charge_finish(&mem); ++ return ERR_PTR(-ENOMEM); + } +- + array->index_mask = index_mask; + array->map.unpriv_array = unpriv; + + /* copy mandatory map attributes */ +- array->map.key_size = attr->key_size; +- array->map.value_size = attr->value_size; +- array->map.max_entries = attr->max_entries; +- array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT; ++ bpf_map_init_from_attr(&array->map, attr); ++ bpf_map_charge_move(&array->map.memory, &mem); + array->elem_size = elem_size; + ++ if (percpu && bpf_array_alloc_percpu(array)) { ++ bpf_map_charge_finish(&array->map.memory); ++ bpf_map_area_free(array); ++ return ERR_PTR(-ENOMEM); ++ } ++ + return &array->map; + } + +@@ -82,17 +145,115 @@ static void *array_map_lookup_elem(struc + struct bpf_array *array = container_of(map, struct bpf_array, map); + u32 index = *(u32 *)key; + +- if (index >= array->map.max_entries) ++ if (unlikely(index >= array->map.max_entries)) + return NULL; + + return array->value + array->elem_size * (index & array->index_mask); + } + ++static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, ++ u32 off) ++{ ++ struct bpf_array *array = container_of(map, struct bpf_array, map); ++ ++ if (map->max_entries != 1) ++ return -ENOTSUPP; ++ if (off >= map->value_size) ++ return -EINVAL; ++ ++ *imm = (unsigned long)array->value; ++ return 0; ++} ++ ++static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm, ++ u32 *off) ++{ ++ struct bpf_array *array = container_of(map, struct bpf_array, map); ++ u64 base = (unsigned long)array->value; ++ u64 range = array->elem_size; ++ ++ if (map->max_entries != 1) ++ return -ENOTSUPP; ++ if (imm < base || imm >= base + range) ++ return -ENOENT; ++ ++ *off = imm - base; ++ return 0; ++} ++ ++/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */ ++static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) ++{ ++ struct bpf_array *array = container_of(map, struct bpf_array, map); ++ struct bpf_insn *insn = insn_buf; ++ u32 elem_size = round_up(map->value_size, 8); ++ const int ret = BPF_REG_0; ++ const int map_ptr = BPF_REG_1; ++ const int index = BPF_REG_2; ++ ++ *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); ++ *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); ++ if (map->unpriv_array) { ++ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4); ++ *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); ++ } else { ++ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3); ++ } ++ ++ if (is_power_of_2(elem_size)) { ++ *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); ++ } else { ++ *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); ++ } ++ *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); ++ *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); ++ *insn++ = BPF_MOV64_IMM(ret, 0); ++ return insn - insn_buf; ++} ++ ++/* Called from eBPF program */ ++static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key) ++{ ++ struct bpf_array *array = container_of(map, struct bpf_array, map); ++ u32 index = *(u32 *)key; ++ ++ if (unlikely(index >= array->map.max_entries)) ++ return NULL; ++ ++ return this_cpu_ptr(array->pptrs[index & array->index_mask]); ++} ++ ++int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value) ++{ ++ struct bpf_array *array = container_of(map, struct bpf_array, map); ++ u32 index = *(u32 *)key; ++ void __percpu *pptr; ++ int cpu, off = 0; ++ u32 size; ++ ++ if (unlikely(index >= array->map.max_entries)) ++ return -ENOENT; ++ ++ /* per_cpu areas are zero-filled and bpf programs can only ++ * access 'value_size' of them, so copying rounded areas ++ * will not leak any kernel data ++ */ ++ size = round_up(map->value_size, 8); ++ rcu_read_lock(); ++ pptr = array->pptrs[index & array->index_mask]; ++ for_each_possible_cpu(cpu) { ++ bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size); ++ off += size; ++ } ++ rcu_read_unlock(); ++ return 0; ++} ++ + /* Called from syscall */ + static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key) + { + struct bpf_array *array = container_of(map, struct bpf_array, map); +- u32 index = *(u32 *)key; ++ u32 index = key ? *(u32 *)key : U32_MAX; + u32 *next = (u32 *)next_key; + + if (index >= array->map.max_entries) { +@@ -113,22 +274,73 @@ static int array_map_update_elem(struct + { + struct bpf_array *array = container_of(map, struct bpf_array, map); + u32 index = *(u32 *)key; ++ char *val; + +- if (map_flags > BPF_EXIST) ++ if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) + /* unknown flags */ + return -EINVAL; + +- if (index >= array->map.max_entries) ++ if (unlikely(index >= array->map.max_entries)) ++ /* all elements were pre-allocated, cannot insert a new one */ ++ return -E2BIG; ++ ++ if (unlikely(map_flags & BPF_NOEXIST)) ++ /* all elements already exist */ ++ return -EEXIST; ++ ++ if (unlikely((map_flags & BPF_F_LOCK) && ++ !map_value_has_spin_lock(map))) ++ return -EINVAL; ++ ++ if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { ++ memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), ++ value, map->value_size); ++ } else { ++ val = array->value + ++ array->elem_size * (index & array->index_mask); ++ if (map_flags & BPF_F_LOCK) ++ copy_map_value_locked(map, val, value, false); ++ else ++ copy_map_value(map, val, value); ++ } ++ return 0; ++} ++ ++int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, ++ u64 map_flags) ++{ ++ struct bpf_array *array = container_of(map, struct bpf_array, map); ++ u32 index = *(u32 *)key; ++ void __percpu *pptr; ++ int cpu, off = 0; ++ u32 size; ++ ++ if (unlikely(map_flags > BPF_EXIST)) ++ /* unknown flags */ ++ return -EINVAL; ++ ++ if (unlikely(index >= array->map.max_entries)) + /* all elements were pre-allocated, cannot insert a new one */ + return -E2BIG; + +- if (map_flags == BPF_NOEXIST) ++ if (unlikely(map_flags == BPF_NOEXIST)) + /* all elements already exist */ + return -EEXIST; + +- memcpy(array->value + +- array->elem_size * (index & array->index_mask), +- value, map->value_size); ++ /* the user space will provide round_up(value_size, 8) bytes that ++ * will be copied into per-cpu area. bpf programs can only access ++ * value_size of it. During lookup the same extra bytes will be ++ * returned or zeros which were zero-filled by percpu_alloc, ++ * so no kernel data leaks possible ++ */ ++ size = round_up(map->value_size, 8); ++ rcu_read_lock(); ++ pptr = array->pptrs[index & array->index_mask]; ++ for_each_possible_cpu(cpu) { ++ bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size); ++ off += size; ++ } ++ rcu_read_unlock(); + return 0; + } + +@@ -150,36 +362,124 @@ static void array_map_free(struct bpf_ma + */ + synchronize_rcu(); + +- kvfree(array); ++ if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) ++ bpf_array_free_percpu(array); ++ ++ bpf_map_area_free(array); + } + +-static const struct bpf_map_ops array_ops = { ++static void array_map_seq_show_elem(struct bpf_map *map, void *key, ++ struct seq_file *m) ++{ ++ void *value; ++ ++ rcu_read_lock(); ++ ++ value = array_map_lookup_elem(map, key); ++ if (!value) { ++ rcu_read_unlock(); ++ return; ++ } ++ ++ if (map->btf_key_type_id) ++ seq_printf(m, "%u: ", *(u32 *)key); ++ btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); ++ seq_puts(m, "\n"); ++ ++ rcu_read_unlock(); ++} ++ ++static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key, ++ struct seq_file *m) ++{ ++ struct bpf_array *array = container_of(map, struct bpf_array, map); ++ u32 index = *(u32 *)key; ++ void __percpu *pptr; ++ int cpu; ++ ++ rcu_read_lock(); ++ ++ seq_printf(m, "%u: {\n", *(u32 *)key); ++ pptr = array->pptrs[index & array->index_mask]; ++ for_each_possible_cpu(cpu) { ++ seq_printf(m, "\tcpu%d: ", cpu); ++ btf_type_seq_show(map->btf, map->btf_value_type_id, ++ per_cpu_ptr(pptr, cpu), m); ++ seq_puts(m, "\n"); ++ } ++ seq_puts(m, "}\n"); ++ ++ rcu_read_unlock(); ++} ++ ++static int array_map_check_btf(const struct bpf_map *map, ++ const struct btf *btf, ++ const struct btf_type *key_type, ++ const struct btf_type *value_type) ++{ ++ u32 int_data; ++ ++ /* One exception for keyless BTF: .bss/.data/.rodata map */ ++ if (btf_type_is_void(key_type)) { ++ if (map->map_type != BPF_MAP_TYPE_ARRAY || ++ map->max_entries != 1) ++ return -EINVAL; ++ ++ if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC) ++ return -EINVAL; ++ ++ return 0; ++ } ++ ++ if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) ++ return -EINVAL; ++ ++ int_data = *(u32 *)(key_type + 1); ++ /* bpf array can only take a u32 key. This check makes sure ++ * that the btf matches the attr used during map_create. ++ */ ++ if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++const struct bpf_map_ops array_map_ops = { ++ .map_alloc_check = array_map_alloc_check, + .map_alloc = array_map_alloc, + .map_free = array_map_free, + .map_get_next_key = array_map_get_next_key, + .map_lookup_elem = array_map_lookup_elem, + .map_update_elem = array_map_update_elem, + .map_delete_elem = array_map_delete_elem, ++ .map_gen_lookup = array_map_gen_lookup, ++ .map_direct_value_addr = array_map_direct_value_addr, ++ .map_direct_value_meta = array_map_direct_value_meta, ++ .map_seq_show_elem = array_map_seq_show_elem, ++ .map_check_btf = array_map_check_btf, + }; + +-static struct bpf_map_type_list array_type __read_mostly = { +- .ops = &array_ops, +- .type = BPF_MAP_TYPE_ARRAY, ++const struct bpf_map_ops percpu_array_map_ops = { ++ .map_alloc_check = array_map_alloc_check, ++ .map_alloc = array_map_alloc, ++ .map_free = array_map_free, ++ .map_get_next_key = array_map_get_next_key, ++ .map_lookup_elem = percpu_array_map_lookup_elem, ++ .map_update_elem = array_map_update_elem, ++ .map_delete_elem = array_map_delete_elem, ++ .map_seq_show_elem = percpu_array_map_seq_show_elem, ++ .map_check_btf = array_map_check_btf, + }; + +-static int __init register_array_map(void) +-{ +- bpf_register_map_type(&array_type); +- return 0; +-} +-late_initcall(register_array_map); +- +-static struct bpf_map *fd_array_map_alloc(union bpf_attr *attr) ++static int fd_array_map_alloc_check(union bpf_attr *attr) + { + /* only file descriptors can be stored in this type of map */ + if (attr->value_size != sizeof(u32)) +- return ERR_PTR(-EINVAL); +- return array_map_alloc(attr); ++ return -EINVAL; ++ /* Program read-only/write-only not supported for special maps yet. */ ++ if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) ++ return -EINVAL; ++ return array_map_alloc_check(attr); + } + + static void fd_array_map_free(struct bpf_map *map) +@@ -192,17 +492,38 @@ static void fd_array_map_free(struct bpf + /* make sure it's empty */ + for (i = 0; i < array->map.max_entries; i++) + BUG_ON(array->ptrs[i] != NULL); +- kvfree(array); ++ ++ bpf_map_area_free(array); + } + + static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key) + { +- return NULL; ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ ++/* only called from syscall */ ++int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) ++{ ++ void **elem, *ptr; ++ int ret = 0; ++ ++ if (!map->ops->map_fd_sys_lookup_elem) ++ return -ENOTSUPP; ++ ++ rcu_read_lock(); ++ elem = array_map_lookup_elem(map, key); ++ if (elem && (ptr = READ_ONCE(*elem))) ++ *value = map->ops->map_fd_sys_lookup_elem(ptr); ++ else ++ ret = -ENOENT; ++ rcu_read_unlock(); ++ ++ return ret; + } + + /* only called from syscall */ +-static int fd_array_map_update_elem(struct bpf_map *map, void *key, +- void *value, u64 map_flags) ++int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, ++ void *key, void *value, u64 map_flags) + { + struct bpf_array *array = container_of(map, struct bpf_array, map); + void *new_ptr, *old_ptr; +@@ -215,7 +536,7 @@ static int fd_array_map_update_elem(stru + return -E2BIG; + + ufd = *(u32 *)value; +- new_ptr = map->ops->map_fd_get_ptr(map, ufd); ++ new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); + if (IS_ERR(new_ptr)) + return PTR_ERR(new_ptr); + +@@ -244,10 +565,12 @@ static int fd_array_map_delete_elem(stru + } + } + +-static void *prog_fd_array_get_ptr(struct bpf_map *map, int fd) ++static void *prog_fd_array_get_ptr(struct bpf_map *map, ++ struct file *map_file, int fd) + { + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct bpf_prog *prog = bpf_prog_get(fd); ++ + if (IS_ERR(prog)) + return prog; + +@@ -255,18 +578,22 @@ static void *prog_fd_array_get_ptr(struc + bpf_prog_put(prog); + return ERR_PTR(-EINVAL); + } ++ + return prog; + } + + static void prog_fd_array_put_ptr(void *ptr) + { +- struct bpf_prog *prog = ptr; ++ bpf_prog_put(ptr); ++} + +- bpf_prog_put_rcu(prog); ++static u32 prog_fd_array_sys_lookup_elem(void *ptr) ++{ ++ return ((struct bpf_prog *)ptr)->aux->id; + } + + /* decrement refcnt of all bpf_progs that are stored in this map */ +-void bpf_fd_array_map_clear(struct bpf_map *map) ++static void bpf_fd_array_map_clear(struct bpf_map *map) + { + struct bpf_array *array = container_of(map, struct bpf_array, map); + int i; +@@ -275,91 +602,208 @@ void bpf_fd_array_map_clear(struct bpf_m + fd_array_map_delete_elem(map, &i); + } + +-static const struct bpf_map_ops prog_array_ops = { +- .map_alloc = fd_array_map_alloc, ++static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key, ++ struct seq_file *m) ++{ ++ void **elem, *ptr; ++ u32 prog_id; ++ ++ rcu_read_lock(); ++ ++ elem = array_map_lookup_elem(map, key); ++ if (elem) { ++ ptr = READ_ONCE(*elem); ++ if (ptr) { ++ seq_printf(m, "%u: ", *(u32 *)key); ++ prog_id = prog_fd_array_sys_lookup_elem(ptr); ++ btf_type_seq_show(map->btf, map->btf_value_type_id, ++ &prog_id, m); ++ seq_puts(m, "\n"); ++ } ++ } ++ ++ rcu_read_unlock(); ++} ++ ++const struct bpf_map_ops prog_array_map_ops = { ++ .map_alloc_check = fd_array_map_alloc_check, ++ .map_alloc = array_map_alloc, + .map_free = fd_array_map_free, + .map_get_next_key = array_map_get_next_key, + .map_lookup_elem = fd_array_map_lookup_elem, +- .map_update_elem = fd_array_map_update_elem, + .map_delete_elem = fd_array_map_delete_elem, + .map_fd_get_ptr = prog_fd_array_get_ptr, + .map_fd_put_ptr = prog_fd_array_put_ptr, ++ .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, ++ .map_release_uref = bpf_fd_array_map_clear, ++ .map_seq_show_elem = prog_array_map_seq_show_elem, + }; + +-static struct bpf_map_type_list prog_array_type __read_mostly = { +- .ops = &prog_array_ops, +- .type = BPF_MAP_TYPE_PROG_ARRAY, +-}; ++static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, ++ struct file *map_file) ++{ ++ struct bpf_event_entry *ee; ++ ++ ee = kzalloc(sizeof(*ee), GFP_ATOMIC); ++ if (ee) { ++ ee->event = perf_file->private_data; ++ ee->perf_file = perf_file; ++ ee->map_file = map_file; ++ } + +-static int __init register_prog_array_map(void) ++ return ee; ++} ++ ++static void __bpf_event_entry_free(struct rcu_head *rcu) + { +- bpf_register_map_type(&prog_array_type); +- return 0; ++ struct bpf_event_entry *ee; ++ ++ ee = container_of(rcu, struct bpf_event_entry, rcu); ++ fput(ee->perf_file); ++ kfree(ee); + } +-late_initcall(register_prog_array_map); + +-static void perf_event_array_map_free(struct bpf_map *map) ++static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee) + { +- bpf_fd_array_map_clear(map); +- fd_array_map_free(map); ++ call_rcu(&ee->rcu, __bpf_event_entry_free); + } + +-static void *perf_event_fd_array_get_ptr(struct bpf_map *map, int fd) ++static void *perf_event_fd_array_get_ptr(struct bpf_map *map, ++ struct file *map_file, int fd) + { ++ struct bpf_event_entry *ee; + struct perf_event *event; +- const struct perf_event_attr *attr; ++ struct file *perf_file; ++ u64 value; + + event = perf_event_get(fd); + if (IS_ERR(event)) + return event; + +- attr = perf_event_attrs(event); +- if (IS_ERR(attr)) +- goto err; +- +- if (attr->inherit) +- goto err; ++ value = perf_event_read_local(event); + +- if (attr->type == PERF_TYPE_RAW) +- return event; +- +- if (attr->type == PERF_TYPE_HARDWARE) +- return event; ++ ee = bpf_event_entry_gen(perf_file, map_file); ++ if (ee) ++ return ee; + +- if (attr->type == PERF_TYPE_SOFTWARE && +- attr->config == PERF_COUNT_SW_BPF_OUTPUT) +- return event; +-err: +- perf_event_release_kernel(event); +- return ERR_PTR(-EINVAL); ++ ee = ERR_PTR(-ENOMEM); ++ return ee; + } + + static void perf_event_fd_array_put_ptr(void *ptr) + { +- struct perf_event *event = ptr; ++ bpf_event_entry_free_rcu(ptr); ++} + +- perf_event_release_kernel(event); ++static void perf_event_fd_array_release(struct bpf_map *map, ++ struct file *map_file) ++{ ++ struct bpf_array *array = container_of(map, struct bpf_array, map); ++ struct bpf_event_entry *ee; ++ int i; ++ ++ rcu_read_lock(); ++ for (i = 0; i < array->map.max_entries; i++) { ++ ee = READ_ONCE(array->ptrs[i]); ++ if (ee && ee->map_file == map_file) ++ fd_array_map_delete_elem(map, &i); ++ } ++ rcu_read_unlock(); + } + +-static const struct bpf_map_ops perf_event_array_ops = { +- .map_alloc = fd_array_map_alloc, +- .map_free = perf_event_array_map_free, ++const struct bpf_map_ops perf_event_array_map_ops = { ++ .map_alloc_check = fd_array_map_alloc_check, ++ .map_alloc = array_map_alloc, ++ .map_free = fd_array_map_free, + .map_get_next_key = array_map_get_next_key, + .map_lookup_elem = fd_array_map_lookup_elem, +- .map_update_elem = fd_array_map_update_elem, + .map_delete_elem = fd_array_map_delete_elem, + .map_fd_get_ptr = perf_event_fd_array_get_ptr, + .map_fd_put_ptr = perf_event_fd_array_put_ptr, ++ .map_release = perf_event_fd_array_release, ++ .map_check_btf = map_check_no_btf, + }; + +-static struct bpf_map_type_list perf_event_array_type __read_mostly = { +- .ops = &perf_event_array_ops, +- .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY, +-}; ++static struct bpf_map *array_of_map_alloc(union bpf_attr *attr) ++{ ++ struct bpf_map *map, *inner_map_meta; ++ ++ inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); ++ if (IS_ERR(inner_map_meta)) ++ return inner_map_meta; ++ ++ map = array_map_alloc(attr); ++ if (IS_ERR(map)) { ++ bpf_map_meta_free(inner_map_meta); ++ return map; ++ } ++ ++ map->inner_map_meta = inner_map_meta; + +-static int __init register_perf_event_array_map(void) ++ return map; ++} ++ ++static void array_of_map_free(struct bpf_map *map) + { +- bpf_register_map_type(&perf_event_array_type); +- return 0; ++ /* map->inner_map_meta is only accessed by syscall which ++ * is protected by fdget/fdput. ++ */ ++ bpf_map_meta_free(map->inner_map_meta); ++ bpf_fd_array_map_clear(map); ++ fd_array_map_free(map); + } +-late_initcall(register_perf_event_array_map); ++ ++static void *array_of_map_lookup_elem(struct bpf_map *map, void *key) ++{ ++ struct bpf_map **inner_map = array_map_lookup_elem(map, key); ++ ++ if (!inner_map) ++ return NULL; ++ ++ return READ_ONCE(*inner_map); ++} ++ ++static u32 array_of_map_gen_lookup(struct bpf_map *map, ++ struct bpf_insn *insn_buf) ++{ ++ struct bpf_array *array = container_of(map, struct bpf_array, map); ++ u32 elem_size = round_up(map->value_size, 8); ++ struct bpf_insn *insn = insn_buf; ++ const int ret = BPF_REG_0; ++ const int map_ptr = BPF_REG_1; ++ const int index = BPF_REG_2; ++ ++ *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value)); ++ *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); ++ if (map->unpriv_array) { ++ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6); ++ *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask); ++ } else { ++ *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); ++ } ++ if (is_power_of_2(elem_size)) ++ *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size)); ++ else ++ *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size); ++ *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr); ++ *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); ++ *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); ++ *insn++ = BPF_MOV64_IMM(ret, 0); ++ ++ return insn - insn_buf; ++} ++ ++const struct bpf_map_ops array_of_maps_map_ops = { ++ .map_alloc_check = fd_array_map_alloc_check, ++ .map_alloc = array_of_map_alloc, ++ .map_free = array_of_map_free, ++ .map_get_next_key = array_map_get_next_key, ++ .map_lookup_elem = array_of_map_lookup_elem, ++ .map_delete_elem = fd_array_map_delete_elem, ++ .map_fd_get_ptr = bpf_map_fd_get_ptr, ++ .map_fd_put_ptr = bpf_map_fd_put_ptr, ++ .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, ++ .map_gen_lookup = array_of_map_gen_lookup, ++ .map_check_btf = map_check_no_btf, ++}; +--- /dev/null ++++ b/kernel/bpf/bpf_lru_list.c +@@ -0,0 +1,695 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2016 Facebook ++ */ ++#include ++#include ++#include ++ ++#include "bpf_lru_list.h" ++ ++#define LOCAL_FREE_TARGET (128) ++#define LOCAL_NR_SCANS LOCAL_FREE_TARGET ++ ++#define PERCPU_FREE_TARGET (4) ++#define PERCPU_NR_SCANS PERCPU_FREE_TARGET ++ ++/* Helpers to get the local list index */ ++#define LOCAL_LIST_IDX(t) ((t) - BPF_LOCAL_LIST_T_OFFSET) ++#define LOCAL_FREE_LIST_IDX LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE) ++#define LOCAL_PENDING_LIST_IDX LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING) ++#define IS_LOCAL_LIST_TYPE(t) ((t) >= BPF_LOCAL_LIST_T_OFFSET) ++ ++static int get_next_cpu(int cpu) ++{ ++ cpu = cpumask_next(cpu, cpu_possible_mask); ++ if (cpu >= nr_cpu_ids) ++ cpu = cpumask_first(cpu_possible_mask); ++ return cpu; ++} ++ ++/* Local list helpers */ ++static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l) ++{ ++ return &loc_l->lists[LOCAL_FREE_LIST_IDX]; ++} ++ ++static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l) ++{ ++ return &loc_l->lists[LOCAL_PENDING_LIST_IDX]; ++} ++ ++/* bpf_lru_node helpers */ ++static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node) ++{ ++ return node->ref; ++} ++ ++static void bpf_lru_list_count_inc(struct bpf_lru_list *l, ++ enum bpf_lru_list_type type) ++{ ++ if (type < NR_BPF_LRU_LIST_COUNT) ++ l->counts[type]++; ++} ++ ++static void bpf_lru_list_count_dec(struct bpf_lru_list *l, ++ enum bpf_lru_list_type type) ++{ ++ if (type < NR_BPF_LRU_LIST_COUNT) ++ l->counts[type]--; ++} ++ ++static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l, ++ struct bpf_lru_node *node, ++ struct list_head *free_list, ++ enum bpf_lru_list_type tgt_free_type) ++{ ++ if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type))) ++ return; ++ ++ /* If the removing node is the next_inactive_rotation candidate, ++ * move the next_inactive_rotation pointer also. ++ */ ++ if (&node->list == l->next_inactive_rotation) ++ l->next_inactive_rotation = l->next_inactive_rotation->prev; ++ ++ bpf_lru_list_count_dec(l, node->type); ++ ++ node->type = tgt_free_type; ++ list_move(&node->list, free_list); ++} ++ ++/* Move nodes from local list to the LRU list */ ++static void __bpf_lru_node_move_in(struct bpf_lru_list *l, ++ struct bpf_lru_node *node, ++ enum bpf_lru_list_type tgt_type) ++{ ++ if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) || ++ WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type))) ++ return; ++ ++ bpf_lru_list_count_inc(l, tgt_type); ++ node->type = tgt_type; ++ node->ref = 0; ++ list_move(&node->list, &l->lists[tgt_type]); ++} ++ ++/* Move nodes between or within active and inactive list (like ++ * active to inactive, inactive to active or tail of active back to ++ * the head of active). ++ */ ++static void __bpf_lru_node_move(struct bpf_lru_list *l, ++ struct bpf_lru_node *node, ++ enum bpf_lru_list_type tgt_type) ++{ ++ if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) || ++ WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type))) ++ return; ++ ++ if (node->type != tgt_type) { ++ bpf_lru_list_count_dec(l, node->type); ++ bpf_lru_list_count_inc(l, tgt_type); ++ node->type = tgt_type; ++ } ++ node->ref = 0; ++ ++ /* If the moving node is the next_inactive_rotation candidate, ++ * move the next_inactive_rotation pointer also. ++ */ ++ if (&node->list == l->next_inactive_rotation) ++ l->next_inactive_rotation = l->next_inactive_rotation->prev; ++ ++ list_move(&node->list, &l->lists[tgt_type]); ++} ++ ++static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l) ++{ ++ return l->counts[BPF_LRU_LIST_T_INACTIVE] < ++ l->counts[BPF_LRU_LIST_T_ACTIVE]; ++} ++ ++/* Rotate the active list: ++ * 1. Start from tail ++ * 2. If the node has the ref bit set, it will be rotated ++ * back to the head of active list with the ref bit cleared. ++ * Give this node one more chance to survive in the active list. ++ * 3. If the ref bit is not set, move it to the head of the ++ * inactive list. ++ * 4. It will at most scan nr_scans nodes ++ */ ++static void __bpf_lru_list_rotate_active(struct bpf_lru *lru, ++ struct bpf_lru_list *l) ++{ ++ struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE]; ++ struct bpf_lru_node *node, *tmp_node, *first_node; ++ unsigned int i = 0; ++ ++ first_node = list_first_entry(active, struct bpf_lru_node, list); ++ list_for_each_entry_safe_reverse(node, tmp_node, active, list) { ++ if (bpf_lru_node_is_ref(node)) ++ __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); ++ else ++ __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); ++ ++ if (++i == lru->nr_scans || node == first_node) ++ break; ++ } ++} ++ ++/* Rotate the inactive list. It starts from the next_inactive_rotation ++ * 1. If the node has ref bit set, it will be moved to the head ++ * of active list with the ref bit cleared. ++ * 2. If the node does not have ref bit set, it will leave it ++ * at its current location (i.e. do nothing) so that it can ++ * be considered during the next inactive_shrink. ++ * 3. It will at most scan nr_scans nodes ++ */ ++static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru, ++ struct bpf_lru_list *l) ++{ ++ struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; ++ struct list_head *cur, *last, *next = inactive; ++ struct bpf_lru_node *node; ++ unsigned int i = 0; ++ ++ if (list_empty(inactive)) ++ return; ++ ++ last = l->next_inactive_rotation->next; ++ if (last == inactive) ++ last = last->next; ++ ++ cur = l->next_inactive_rotation; ++ while (i < lru->nr_scans) { ++ if (cur == inactive) { ++ cur = cur->prev; ++ continue; ++ } ++ ++ node = list_entry(cur, struct bpf_lru_node, list); ++ next = cur->prev; ++ if (bpf_lru_node_is_ref(node)) ++ __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); ++ if (cur == last) ++ break; ++ cur = next; ++ i++; ++ } ++ ++ l->next_inactive_rotation = next; ++} ++ ++/* Shrink the inactive list. It starts from the tail of the ++ * inactive list and only move the nodes without the ref bit ++ * set to the designated free list. ++ */ ++static unsigned int ++__bpf_lru_list_shrink_inactive(struct bpf_lru *lru, ++ struct bpf_lru_list *l, ++ unsigned int tgt_nshrink, ++ struct list_head *free_list, ++ enum bpf_lru_list_type tgt_free_type) ++{ ++ struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE]; ++ struct bpf_lru_node *node, *tmp_node; ++ unsigned int nshrinked = 0; ++ unsigned int i = 0; ++ ++ list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) { ++ if (bpf_lru_node_is_ref(node)) { ++ __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE); ++ } else if (lru->del_from_htab(lru->del_arg, node)) { ++ __bpf_lru_node_move_to_free(l, node, free_list, ++ tgt_free_type); ++ if (++nshrinked == tgt_nshrink) ++ break; ++ } ++ ++ if (++i == lru->nr_scans) ++ break; ++ } ++ ++ return nshrinked; ++} ++ ++/* 1. Rotate the active list (if needed) ++ * 2. Always rotate the inactive list ++ */ ++static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l) ++{ ++ if (bpf_lru_list_inactive_low(l)) ++ __bpf_lru_list_rotate_active(lru, l); ++ ++ __bpf_lru_list_rotate_inactive(lru, l); ++} ++ ++/* Calls __bpf_lru_list_shrink_inactive() to shrink some ++ * ref-bit-cleared nodes and move them to the designated ++ * free list. ++ * ++ * If it cannot get a free node after calling ++ * __bpf_lru_list_shrink_inactive(). It will just remove ++ * one node from either inactive or active list without ++ * honoring the ref-bit. It prefers inactive list to active ++ * list in this situation. ++ */ ++static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru, ++ struct bpf_lru_list *l, ++ unsigned int tgt_nshrink, ++ struct list_head *free_list, ++ enum bpf_lru_list_type tgt_free_type) ++ ++{ ++ struct bpf_lru_node *node, *tmp_node; ++ struct list_head *force_shrink_list; ++ unsigned int nshrinked; ++ ++ nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink, ++ free_list, tgt_free_type); ++ if (nshrinked) ++ return nshrinked; ++ ++ /* Do a force shrink by ignoring the reference bit */ ++ if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE])) ++ force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE]; ++ else ++ force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE]; ++ ++ list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list, ++ list) { ++ if (lru->del_from_htab(lru->del_arg, node)) { ++ __bpf_lru_node_move_to_free(l, node, free_list, ++ tgt_free_type); ++ return 1; ++ } ++ } ++ ++ return 0; ++} ++ ++/* Flush the nodes from the local pending list to the LRU list */ ++static void __local_list_flush(struct bpf_lru_list *l, ++ struct bpf_lru_locallist *loc_l) ++{ ++ struct bpf_lru_node *node, *tmp_node; ++ ++ list_for_each_entry_safe_reverse(node, tmp_node, ++ local_pending_list(loc_l), list) { ++ if (bpf_lru_node_is_ref(node)) ++ __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE); ++ else ++ __bpf_lru_node_move_in(l, node, ++ BPF_LRU_LIST_T_INACTIVE); ++ } ++} ++ ++static void bpf_lru_list_push_free(struct bpf_lru_list *l, ++ struct bpf_lru_node *node) ++{ ++ unsigned long flags; ++ ++ if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type))) ++ return; ++ ++ raw_spin_lock_irqsave(&l->lock, flags); ++ __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE); ++ raw_spin_unlock_irqrestore(&l->lock, flags); ++} ++ ++static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru, ++ struct bpf_lru_locallist *loc_l) ++{ ++ struct bpf_lru_list *l = &lru->common_lru.lru_list; ++ struct bpf_lru_node *node, *tmp_node; ++ unsigned int nfree = 0; ++ ++ raw_spin_lock(&l->lock); ++ ++ __local_list_flush(l, loc_l); ++ ++ __bpf_lru_list_rotate(lru, l); ++ ++ list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE], ++ list) { ++ __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l), ++ BPF_LRU_LOCAL_LIST_T_FREE); ++ if (++nfree == LOCAL_FREE_TARGET) ++ break; ++ } ++ ++ if (nfree < LOCAL_FREE_TARGET) ++ __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree, ++ local_free_list(loc_l), ++ BPF_LRU_LOCAL_LIST_T_FREE); ++ ++ raw_spin_unlock(&l->lock); ++} ++ ++static void __local_list_add_pending(struct bpf_lru *lru, ++ struct bpf_lru_locallist *loc_l, ++ int cpu, ++ struct bpf_lru_node *node, ++ u32 hash) ++{ ++ *(u32 *)((void *)node + lru->hash_offset) = hash; ++ node->cpu = cpu; ++ node->type = BPF_LRU_LOCAL_LIST_T_PENDING; ++ node->ref = 0; ++ list_add(&node->list, local_pending_list(loc_l)); ++} ++ ++static struct bpf_lru_node * ++__local_list_pop_free(struct bpf_lru_locallist *loc_l) ++{ ++ struct bpf_lru_node *node; ++ ++ node = list_first_entry_or_null(local_free_list(loc_l), ++ struct bpf_lru_node, ++ list); ++ if (node) ++ list_del(&node->list); ++ ++ return node; ++} ++ ++static struct bpf_lru_node * ++__local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l) ++{ ++ struct bpf_lru_node *node; ++ bool force = false; ++ ++ignore_ref: ++ /* Get from the tail (i.e. older element) of the pending list. */ ++ list_for_each_entry_reverse(node, local_pending_list(loc_l), ++ list) { ++ if ((!bpf_lru_node_is_ref(node) || force) && ++ lru->del_from_htab(lru->del_arg, node)) { ++ list_del(&node->list); ++ return node; ++ } ++ } ++ ++ if (!force) { ++ force = true; ++ goto ignore_ref; ++ } ++ ++ return NULL; ++} ++ ++static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru, ++ u32 hash) ++{ ++ struct list_head *free_list; ++ struct bpf_lru_node *node = NULL; ++ struct bpf_lru_list *l; ++ unsigned long flags; ++ int cpu = raw_smp_processor_id(); ++ ++ l = per_cpu_ptr(lru->percpu_lru, cpu); ++ ++ raw_spin_lock_irqsave(&l->lock, flags); ++ ++ __bpf_lru_list_rotate(lru, l); ++ ++ free_list = &l->lists[BPF_LRU_LIST_T_FREE]; ++ if (list_empty(free_list)) ++ __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list, ++ BPF_LRU_LIST_T_FREE); ++ ++ if (!list_empty(free_list)) { ++ node = list_first_entry(free_list, struct bpf_lru_node, list); ++ *(u32 *)((void *)node + lru->hash_offset) = hash; ++ node->ref = 0; ++ __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE); ++ } ++ ++ raw_spin_unlock_irqrestore(&l->lock, flags); ++ ++ return node; ++} ++ ++static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru, ++ u32 hash) ++{ ++ struct bpf_lru_locallist *loc_l, *steal_loc_l; ++ struct bpf_common_lru *clru = &lru->common_lru; ++ struct bpf_lru_node *node; ++ int steal, first_steal; ++ unsigned long flags; ++ int cpu = raw_smp_processor_id(); ++ ++ loc_l = per_cpu_ptr(clru->local_list, cpu); ++ ++ raw_spin_lock_irqsave(&loc_l->lock, flags); ++ ++ node = __local_list_pop_free(loc_l); ++ if (!node) { ++ bpf_lru_list_pop_free_to_local(lru, loc_l); ++ node = __local_list_pop_free(loc_l); ++ } ++ ++ if (node) ++ __local_list_add_pending(lru, loc_l, cpu, node, hash); ++ ++ raw_spin_unlock_irqrestore(&loc_l->lock, flags); ++ ++ if (node) ++ return node; ++ ++ /* No free nodes found from the local free list and ++ * the global LRU list. ++ * ++ * Steal from the local free/pending list of the ++ * current CPU and remote CPU in RR. It starts ++ * with the loc_l->next_steal CPU. ++ */ ++ ++ first_steal = loc_l->next_steal; ++ steal = first_steal; ++ do { ++ steal_loc_l = per_cpu_ptr(clru->local_list, steal); ++ ++ raw_spin_lock_irqsave(&steal_loc_l->lock, flags); ++ ++ node = __local_list_pop_free(steal_loc_l); ++ if (!node) ++ node = __local_list_pop_pending(lru, steal_loc_l); ++ ++ raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags); ++ ++ steal = get_next_cpu(steal); ++ } while (!node && steal != first_steal); ++ ++ loc_l->next_steal = steal; ++ ++ if (node) { ++ raw_spin_lock_irqsave(&loc_l->lock, flags); ++ __local_list_add_pending(lru, loc_l, cpu, node, hash); ++ raw_spin_unlock_irqrestore(&loc_l->lock, flags); ++ } ++ ++ return node; ++} ++ ++struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash) ++{ ++ if (lru->percpu) ++ return bpf_percpu_lru_pop_free(lru, hash); ++ else ++ return bpf_common_lru_pop_free(lru, hash); ++} ++ ++static void bpf_common_lru_push_free(struct bpf_lru *lru, ++ struct bpf_lru_node *node) ++{ ++ u8 node_type = READ_ONCE(node->type); ++ unsigned long flags; ++ ++ if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) || ++ WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE)) ++ return; ++ ++ if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) { ++ struct bpf_lru_locallist *loc_l; ++ ++ loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu); ++ ++ raw_spin_lock_irqsave(&loc_l->lock, flags); ++ ++ if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) { ++ raw_spin_unlock_irqrestore(&loc_l->lock, flags); ++ goto check_lru_list; ++ } ++ ++ node->type = BPF_LRU_LOCAL_LIST_T_FREE; ++ node->ref = 0; ++ list_move(&node->list, local_free_list(loc_l)); ++ ++ raw_spin_unlock_irqrestore(&loc_l->lock, flags); ++ return; ++ } ++ ++check_lru_list: ++ bpf_lru_list_push_free(&lru->common_lru.lru_list, node); ++} ++ ++static void bpf_percpu_lru_push_free(struct bpf_lru *lru, ++ struct bpf_lru_node *node) ++{ ++ struct bpf_lru_list *l; ++ unsigned long flags; ++ ++ l = per_cpu_ptr(lru->percpu_lru, node->cpu); ++ ++ raw_spin_lock_irqsave(&l->lock, flags); ++ ++ __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE); ++ ++ raw_spin_unlock_irqrestore(&l->lock, flags); ++} ++ ++void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node) ++{ ++ if (lru->percpu) ++ bpf_percpu_lru_push_free(lru, node); ++ else ++ bpf_common_lru_push_free(lru, node); ++} ++ ++static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf, ++ u32 node_offset, u32 elem_size, ++ u32 nr_elems) ++{ ++ struct bpf_lru_list *l = &lru->common_lru.lru_list; ++ u32 i; ++ ++ for (i = 0; i < nr_elems; i++) { ++ struct bpf_lru_node *node; ++ ++ node = (struct bpf_lru_node *)(buf + node_offset); ++ node->type = BPF_LRU_LIST_T_FREE; ++ node->ref = 0; ++ list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); ++ buf += elem_size; ++ } ++} ++ ++static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf, ++ u32 node_offset, u32 elem_size, ++ u32 nr_elems) ++{ ++ u32 i, pcpu_entries; ++ int cpu; ++ struct bpf_lru_list *l; ++ ++ pcpu_entries = nr_elems / num_possible_cpus(); ++ ++ i = 0; ++ ++ for_each_possible_cpu(cpu) { ++ struct bpf_lru_node *node; ++ ++ l = per_cpu_ptr(lru->percpu_lru, cpu); ++again: ++ node = (struct bpf_lru_node *)(buf + node_offset); ++ node->cpu = cpu; ++ node->type = BPF_LRU_LIST_T_FREE; ++ node->ref = 0; ++ list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]); ++ i++; ++ buf += elem_size; ++ if (i == nr_elems) ++ break; ++ if (i % pcpu_entries) ++ goto again; ++ } ++} ++ ++void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, ++ u32 elem_size, u32 nr_elems) ++{ ++ if (lru->percpu) ++ bpf_percpu_lru_populate(lru, buf, node_offset, elem_size, ++ nr_elems); ++ else ++ bpf_common_lru_populate(lru, buf, node_offset, elem_size, ++ nr_elems); ++} ++ ++static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu) ++{ ++ int i; ++ ++ for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++) ++ INIT_LIST_HEAD(&loc_l->lists[i]); ++ ++ loc_l->next_steal = cpu; ++ ++ raw_spin_lock_init(&loc_l->lock); ++} ++ ++static void bpf_lru_list_init(struct bpf_lru_list *l) ++{ ++ int i; ++ ++ for (i = 0; i < NR_BPF_LRU_LIST_T; i++) ++ INIT_LIST_HEAD(&l->lists[i]); ++ ++ for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++) ++ l->counts[i] = 0; ++ ++ l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE]; ++ ++ raw_spin_lock_init(&l->lock); ++} ++ ++int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, ++ del_from_htab_func del_from_htab, void *del_arg) ++{ ++ int cpu; ++ ++ if (percpu) { ++ lru->percpu_lru = alloc_percpu(struct bpf_lru_list); ++ if (!lru->percpu_lru) ++ return -ENOMEM; ++ ++ for_each_possible_cpu(cpu) { ++ struct bpf_lru_list *l; ++ ++ l = per_cpu_ptr(lru->percpu_lru, cpu); ++ bpf_lru_list_init(l); ++ } ++ lru->nr_scans = PERCPU_NR_SCANS; ++ } else { ++ struct bpf_common_lru *clru = &lru->common_lru; ++ ++ clru->local_list = alloc_percpu(struct bpf_lru_locallist); ++ if (!clru->local_list) ++ return -ENOMEM; ++ ++ for_each_possible_cpu(cpu) { ++ struct bpf_lru_locallist *loc_l; ++ ++ loc_l = per_cpu_ptr(clru->local_list, cpu); ++ bpf_lru_locallist_init(loc_l, cpu); ++ } ++ ++ bpf_lru_list_init(&clru->lru_list); ++ lru->nr_scans = LOCAL_NR_SCANS; ++ } ++ ++ lru->percpu = percpu; ++ lru->del_from_htab = del_from_htab; ++ lru->del_arg = del_arg; ++ lru->hash_offset = hash_offset; ++ ++ return 0; ++} ++ ++void bpf_lru_destroy(struct bpf_lru *lru) ++{ ++ if (lru->percpu) ++ free_percpu(lru->percpu_lru); ++ else ++ free_percpu(lru->common_lru.local_list); ++} +--- /dev/null ++++ b/kernel/bpf/bpf_lru_list.h +@@ -0,0 +1,82 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* Copyright (c) 2016 Facebook ++ */ ++#ifndef __BPF_LRU_LIST_H_ ++#define __BPF_LRU_LIST_H_ ++ ++#include ++#include ++ ++#define NR_BPF_LRU_LIST_T (3) ++#define NR_BPF_LRU_LIST_COUNT (2) ++#define NR_BPF_LRU_LOCAL_LIST_T (2) ++#define BPF_LOCAL_LIST_T_OFFSET NR_BPF_LRU_LIST_T ++ ++enum bpf_lru_list_type { ++ BPF_LRU_LIST_T_ACTIVE, ++ BPF_LRU_LIST_T_INACTIVE, ++ BPF_LRU_LIST_T_FREE, ++ BPF_LRU_LOCAL_LIST_T_FREE, ++ BPF_LRU_LOCAL_LIST_T_PENDING, ++}; ++ ++struct bpf_lru_node { ++ struct list_head list; ++ u16 cpu; ++ u8 type; ++ u8 ref; ++}; ++ ++struct bpf_lru_list { ++ struct list_head lists[NR_BPF_LRU_LIST_T]; ++ unsigned int counts[NR_BPF_LRU_LIST_COUNT]; ++ /* The next inacitve list rotation starts from here */ ++ struct list_head *next_inactive_rotation; ++ ++ raw_spinlock_t lock ____cacheline_aligned_in_smp; ++}; ++ ++struct bpf_lru_locallist { ++ struct list_head lists[NR_BPF_LRU_LOCAL_LIST_T]; ++ u16 next_steal; ++ raw_spinlock_t lock; ++}; ++ ++struct bpf_common_lru { ++ struct bpf_lru_list lru_list; ++ struct bpf_lru_locallist __percpu *local_list; ++}; ++ ++typedef bool (*del_from_htab_func)(void *arg, struct bpf_lru_node *node); ++ ++struct bpf_lru { ++ union { ++ struct bpf_common_lru common_lru; ++ struct bpf_lru_list __percpu *percpu_lru; ++ }; ++ del_from_htab_func del_from_htab; ++ void *del_arg; ++ unsigned int hash_offset; ++ unsigned int nr_scans; ++ bool percpu; ++}; ++ ++static inline void bpf_lru_node_set_ref(struct bpf_lru_node *node) ++{ ++ /* ref is an approximation on access frequency. It does not ++ * have to be very accurate. Hence, no protection is used. ++ */ ++ if (!node->ref) ++ node->ref = 1; ++} ++ ++int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset, ++ del_from_htab_func del_from_htab, void *delete_arg); ++void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset, ++ u32 elem_size, u32 nr_elems); ++void bpf_lru_destroy(struct bpf_lru *lru); ++struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash); ++void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node); ++void bpf_lru_promote(struct bpf_lru *lru, struct bpf_lru_node *node); ++ ++#endif +--- /dev/null ++++ b/kernel/bpf/btf.c +@@ -0,0 +1,3514 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* Copyright (c) 2018 Facebook */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* BTF (BPF Type Format) is the meta data format which describes ++ * the data types of BPF program/map. Hence, it basically focus ++ * on the C programming language which the modern BPF is primary ++ * using. ++ * ++ * ELF Section: ++ * ~~~~~~~~~~~ ++ * The BTF data is stored under the ".BTF" ELF section ++ * ++ * struct btf_type: ++ * ~~~~~~~~~~~~~~~ ++ * Each 'struct btf_type' object describes a C data type. ++ * Depending on the type it is describing, a 'struct btf_type' ++ * object may be followed by more data. F.e. ++ * To describe an array, 'struct btf_type' is followed by ++ * 'struct btf_array'. ++ * ++ * 'struct btf_type' and any extra data following it are ++ * 4 bytes aligned. ++ * ++ * Type section: ++ * ~~~~~~~~~~~~~ ++ * The BTF type section contains a list of 'struct btf_type' objects. ++ * Each one describes a C type. Recall from the above section ++ * that a 'struct btf_type' object could be immediately followed by extra ++ * data in order to desribe some particular C types. ++ * ++ * type_id: ++ * ~~~~~~~ ++ * Each btf_type object is identified by a type_id. The type_id ++ * is implicitly implied by the location of the btf_type object in ++ * the BTF type section. The first one has type_id 1. The second ++ * one has type_id 2...etc. Hence, an earlier btf_type has ++ * a smaller type_id. ++ * ++ * A btf_type object may refer to another btf_type object by using ++ * type_id (i.e. the "type" in the "struct btf_type"). ++ * ++ * NOTE that we cannot assume any reference-order. ++ * A btf_type object can refer to an earlier btf_type object ++ * but it can also refer to a later btf_type object. ++ * ++ * For example, to describe "const void *". A btf_type ++ * object describing "const" may refer to another btf_type ++ * object describing "void *". This type-reference is done ++ * by specifying type_id: ++ * ++ * [1] CONST (anon) type_id=2 ++ * [2] PTR (anon) type_id=0 ++ * ++ * The above is the btf_verifier debug log: ++ * - Each line started with "[?]" is a btf_type object ++ * - [?] is the type_id of the btf_type object. ++ * - CONST/PTR is the BTF_KIND_XXX ++ * - "(anon)" is the name of the type. It just ++ * happens that CONST and PTR has no name. ++ * - type_id=XXX is the 'u32 type' in btf_type ++ * ++ * NOTE: "void" has type_id 0 ++ * ++ * String section: ++ * ~~~~~~~~~~~~~~ ++ * The BTF string section contains the names used by the type section. ++ * Each string is referred by an "offset" from the beginning of the ++ * string section. ++ * ++ * Each string is '\0' terminated. ++ * ++ * The first character in the string section must be '\0' ++ * which is used to mean 'anonymous'. Some btf_type may not ++ * have a name. ++ */ ++ ++/* BTF verification: ++ * ++ * To verify BTF data, two passes are needed. ++ * ++ * Pass #1 ++ * ~~~~~~~ ++ * The first pass is to collect all btf_type objects to ++ * an array: "btf->types". ++ * ++ * Depending on the C type that a btf_type is describing, ++ * a btf_type may be followed by extra data. We don't know ++ * how many btf_type is there, and more importantly we don't ++ * know where each btf_type is located in the type section. ++ * ++ * Without knowing the location of each type_id, most verifications ++ * cannot be done. e.g. an earlier btf_type may refer to a later ++ * btf_type (recall the "const void *" above), so we cannot ++ * check this type-reference in the first pass. ++ * ++ * In the first pass, it still does some verifications (e.g. ++ * checking the name is a valid offset to the string section). ++ * ++ * Pass #2 ++ * ~~~~~~~ ++ * The main focus is to resolve a btf_type that is referring ++ * to another type. ++ * ++ * We have to ensure the referring type: ++ * 1) does exist in the BTF (i.e. in btf->types[]) ++ * 2) does not cause a loop: ++ * struct A { ++ * struct B b; ++ * }; ++ * ++ * struct B { ++ * struct A a; ++ * }; ++ * ++ * btf_type_needs_resolve() decides if a btf_type needs ++ * to be resolved. ++ * ++ * The needs_resolve type implements the "resolve()" ops which ++ * essentially does a DFS and detects backedge. ++ * ++ * During resolve (or DFS), different C types have different ++ * "RESOLVED" conditions. ++ * ++ * When resolving a BTF_KIND_STRUCT, we need to resolve all its ++ * members because a member is always referring to another ++ * type. A struct's member can be treated as "RESOLVED" if ++ * it is referring to a BTF_KIND_PTR. Otherwise, the ++ * following valid C struct would be rejected: ++ * ++ * struct A { ++ * int m; ++ * struct A *a; ++ * }; ++ * ++ * When resolving a BTF_KIND_PTR, it needs to keep resolving if ++ * it is referring to another BTF_KIND_PTR. Otherwise, we cannot ++ * detect a pointer loop, e.g.: ++ * BTF_KIND_CONST -> BTF_KIND_PTR -> BTF_KIND_CONST -> BTF_KIND_PTR + ++ * ^ | ++ * +-----------------------------------------+ ++ * ++ */ ++ ++#define BITS_PER_U128 (sizeof(u64) * BITS_PER_BYTE * 2) ++#define BITS_PER_BYTE_MASK (BITS_PER_BYTE - 1) ++#define BITS_PER_BYTE_MASKED(bits) ((bits) & BITS_PER_BYTE_MASK) ++#define BITS_ROUNDDOWN_BYTES(bits) ((bits) >> 3) ++#define BITS_ROUNDUP_BYTES(bits) \ ++ (BITS_ROUNDDOWN_BYTES(bits) + !!BITS_PER_BYTE_MASKED(bits)) ++ ++#define BTF_INFO_MASK 0x8f00ffff ++#define BTF_INT_MASK 0x0fffffff ++#define BTF_TYPE_ID_VALID(type_id) ((type_id) <= BTF_MAX_TYPE) ++#define BTF_STR_OFFSET_VALID(name_off) ((name_off) <= BTF_MAX_NAME_OFFSET) ++ ++/* 16MB for 64k structs and each has 16 members and ++ * a few MB spaces for the string section. ++ * The hard limit is S32_MAX. ++ */ ++#define BTF_MAX_SIZE (16 * 1024 * 1024) ++ ++#define for_each_member(i, struct_type, member) \ ++ for (i = 0, member = btf_type_member(struct_type); \ ++ i < btf_type_vlen(struct_type); \ ++ i++, member++) ++ ++#define for_each_member_from(i, from, struct_type, member) \ ++ for (i = from, member = btf_type_member(struct_type) + from; \ ++ i < btf_type_vlen(struct_type); \ ++ i++, member++) ++ ++#define for_each_vsi(i, struct_type, member) \ ++ for (i = 0, member = btf_type_var_secinfo(struct_type); \ ++ i < btf_type_vlen(struct_type); \ ++ i++, member++) ++ ++#define for_each_vsi_from(i, from, struct_type, member) \ ++ for (i = from, member = btf_type_var_secinfo(struct_type) + from; \ ++ i < btf_type_vlen(struct_type); \ ++ i++, member++) ++ ++DEFINE_IDR(btf_idr); ++DEFINE_SPINLOCK(btf_idr_lock); ++ ++struct btf { ++ void *data; ++ struct btf_type **types; ++ u32 *resolved_ids; ++ u32 *resolved_sizes; ++ const char *strings; ++ void *nohdr_data; ++ struct btf_header hdr; ++ u32 nr_types; ++ u32 types_size; ++ u32 data_size; ++ refcount_t refcnt; ++ u32 id; ++ struct rcu_head rcu; ++}; ++ ++enum verifier_phase { ++ CHECK_META, ++ CHECK_TYPE, ++}; ++ ++struct resolve_vertex { ++ const struct btf_type *t; ++ u32 type_id; ++ u16 next_member; ++}; ++ ++enum visit_state { ++ NOT_VISITED, ++ VISITED, ++ RESOLVED, ++}; ++ ++enum resolve_mode { ++ RESOLVE_TBD, /* To Be Determined */ ++ RESOLVE_PTR, /* Resolving for Pointer */ ++ RESOLVE_STRUCT_OR_ARRAY, /* Resolving for struct/union ++ * or array ++ */ ++}; ++ ++#define MAX_RESOLVE_DEPTH 32 ++ ++struct btf_sec_info { ++ u32 off; ++ u32 len; ++}; ++ ++struct btf_verifier_env { ++ struct btf *btf; ++ u8 *visit_states; ++ struct resolve_vertex stack[MAX_RESOLVE_DEPTH]; ++ struct bpf_verifier_log log; ++ u32 log_type_id; ++ u32 top_stack; ++ enum verifier_phase phase; ++ enum resolve_mode resolve_mode; ++}; ++ ++static const char * const btf_kind_str[NR_BTF_KINDS] = { ++ [BTF_KIND_UNKN] = "UNKNOWN", ++ [BTF_KIND_INT] = "INT", ++ [BTF_KIND_PTR] = "PTR", ++ [BTF_KIND_ARRAY] = "ARRAY", ++ [BTF_KIND_STRUCT] = "STRUCT", ++ [BTF_KIND_UNION] = "UNION", ++ [BTF_KIND_ENUM] = "ENUM", ++ [BTF_KIND_FWD] = "FWD", ++ [BTF_KIND_TYPEDEF] = "TYPEDEF", ++ [BTF_KIND_VOLATILE] = "VOLATILE", ++ [BTF_KIND_CONST] = "CONST", ++ [BTF_KIND_RESTRICT] = "RESTRICT", ++ [BTF_KIND_FUNC] = "FUNC", ++ [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", ++ [BTF_KIND_VAR] = "VAR", ++ [BTF_KIND_DATASEC] = "DATASEC", ++}; ++ ++struct btf_kind_operations { ++ s32 (*check_meta)(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left); ++ int (*resolve)(struct btf_verifier_env *env, ++ const struct resolve_vertex *v); ++ int (*check_member)(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type); ++ int (*check_kflag_member)(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type); ++ void (*log_details)(struct btf_verifier_env *env, ++ const struct btf_type *t); ++ void (*seq_show)(const struct btf *btf, const struct btf_type *t, ++ u32 type_id, void *data, u8 bits_offsets, ++ struct seq_file *m); ++}; ++ ++static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS]; ++static struct btf_type btf_void; ++ ++static int btf_resolve(struct btf_verifier_env *env, ++ const struct btf_type *t, u32 type_id); ++ ++static bool btf_type_is_modifier(const struct btf_type *t) ++{ ++ /* Some of them is not strictly a C modifier ++ * but they are grouped into the same bucket ++ * for BTF concern: ++ * A type (t) that refers to another ++ * type through t->type AND its size cannot ++ * be determined without following the t->type. ++ * ++ * ptr does not fall into this bucket ++ * because its size is always sizeof(void *). ++ */ ++ switch (BTF_INFO_KIND(t->info)) { ++ case BTF_KIND_TYPEDEF: ++ case BTF_KIND_VOLATILE: ++ case BTF_KIND_CONST: ++ case BTF_KIND_RESTRICT: ++ return true; ++ } ++ ++ return false; ++} ++ ++bool btf_type_is_void(const struct btf_type *t) ++{ ++ return t == &btf_void; ++} ++ ++static bool btf_type_is_fwd(const struct btf_type *t) ++{ ++ return BTF_INFO_KIND(t->info) == BTF_KIND_FWD; ++} ++ ++static bool btf_type_is_func(const struct btf_type *t) ++{ ++ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC; ++} ++ ++static bool btf_type_is_func_proto(const struct btf_type *t) ++{ ++ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO; ++} ++ ++static bool btf_type_nosize(const struct btf_type *t) ++{ ++ return btf_type_is_void(t) || btf_type_is_fwd(t) || ++ btf_type_is_func(t) || btf_type_is_func_proto(t); ++} ++ ++static bool btf_type_nosize_or_null(const struct btf_type *t) ++{ ++ return !t || btf_type_nosize(t); ++} ++ ++/* union is only a special case of struct: ++ * all its offsetof(member) == 0 ++ */ ++static bool btf_type_is_struct(const struct btf_type *t) ++{ ++ u8 kind = BTF_INFO_KIND(t->info); ++ ++ return kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION; ++} ++ ++static bool __btf_type_is_struct(const struct btf_type *t) ++{ ++ return BTF_INFO_KIND(t->info) == BTF_KIND_STRUCT; ++} ++ ++static bool btf_type_is_array(const struct btf_type *t) ++{ ++ return BTF_INFO_KIND(t->info) == BTF_KIND_ARRAY; ++} ++ ++static bool btf_type_is_ptr(const struct btf_type *t) ++{ ++ return BTF_INFO_KIND(t->info) == BTF_KIND_PTR; ++} ++ ++static bool btf_type_is_int(const struct btf_type *t) ++{ ++ return BTF_INFO_KIND(t->info) == BTF_KIND_INT; ++} ++ ++static bool btf_type_is_var(const struct btf_type *t) ++{ ++ return BTF_INFO_KIND(t->info) == BTF_KIND_VAR; ++} ++ ++static bool btf_type_is_datasec(const struct btf_type *t) ++{ ++ return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; ++} ++ ++/* Types that act only as a source, not sink or intermediate ++ * type when resolving. ++ */ ++static bool btf_type_is_resolve_source_only(const struct btf_type *t) ++{ ++ return btf_type_is_var(t) || ++ btf_type_is_datasec(t); ++} ++ ++/* What types need to be resolved? ++ * ++ * btf_type_is_modifier() is an obvious one. ++ * ++ * btf_type_is_struct() because its member refers to ++ * another type (through member->type). ++ * ++ * btf_type_is_var() because the variable refers to ++ * another type. btf_type_is_datasec() holds multiple ++ * btf_type_is_var() types that need resolving. ++ * ++ * btf_type_is_array() because its element (array->type) ++ * refers to another type. Array can be thought of a ++ * special case of struct while array just has the same ++ * member-type repeated by array->nelems of times. ++ */ ++static bool btf_type_needs_resolve(const struct btf_type *t) ++{ ++ return btf_type_is_modifier(t) || ++ btf_type_is_ptr(t) || ++ btf_type_is_struct(t) || ++ btf_type_is_array(t) || ++ btf_type_is_var(t) || ++ btf_type_is_datasec(t); ++} ++ ++/* t->size can be used */ ++static bool btf_type_has_size(const struct btf_type *t) ++{ ++ switch (BTF_INFO_KIND(t->info)) { ++ case BTF_KIND_INT: ++ case BTF_KIND_STRUCT: ++ case BTF_KIND_UNION: ++ case BTF_KIND_ENUM: ++ case BTF_KIND_DATASEC: ++ return true; ++ } ++ ++ return false; ++} ++ ++static const char *btf_int_encoding_str(u8 encoding) ++{ ++ if (encoding == 0) ++ return "(none)"; ++ else if (encoding == BTF_INT_SIGNED) ++ return "SIGNED"; ++ else if (encoding == BTF_INT_CHAR) ++ return "CHAR"; ++ else if (encoding == BTF_INT_BOOL) ++ return "BOOL"; ++ else ++ return "UNKN"; ++} ++ ++static u16 btf_type_vlen(const struct btf_type *t) ++{ ++ return BTF_INFO_VLEN(t->info); ++} ++ ++static bool btf_type_kflag(const struct btf_type *t) ++{ ++ return BTF_INFO_KFLAG(t->info); ++} ++ ++static u32 btf_member_bit_offset(const struct btf_type *struct_type, ++ const struct btf_member *member) ++{ ++ return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset) ++ : member->offset; ++} ++ ++static u32 btf_member_bitfield_size(const struct btf_type *struct_type, ++ const struct btf_member *member) ++{ ++ return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset) ++ : 0; ++} ++ ++static u32 btf_type_int(const struct btf_type *t) ++{ ++ return *(u32 *)(t + 1); ++} ++ ++static const struct btf_array *btf_type_array(const struct btf_type *t) ++{ ++ return (const struct btf_array *)(t + 1); ++} ++ ++static const struct btf_member *btf_type_member(const struct btf_type *t) ++{ ++ return (const struct btf_member *)(t + 1); ++} ++ ++static const struct btf_enum *btf_type_enum(const struct btf_type *t) ++{ ++ return (const struct btf_enum *)(t + 1); ++} ++ ++static const struct btf_var *btf_type_var(const struct btf_type *t) ++{ ++ return (const struct btf_var *)(t + 1); ++} ++ ++static const struct btf_var_secinfo *btf_type_var_secinfo(const struct btf_type *t) ++{ ++ return (const struct btf_var_secinfo *)(t + 1); ++} ++ ++static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) ++{ ++ return kind_ops[BTF_INFO_KIND(t->info)]; ++} ++ ++static bool btf_name_offset_valid(const struct btf *btf, u32 offset) ++{ ++ return BTF_STR_OFFSET_VALID(offset) && ++ offset < btf->hdr.str_len; ++} ++ ++static bool __btf_name_char_ok(char c, bool first, bool dot_ok) ++{ ++ if ((first ? !isalpha(c) : ++ !isalnum(c)) && ++ c != '_' && ++ ((c == '.' && !dot_ok) || ++ c != '.')) ++ return false; ++ return true; ++} ++ ++static bool __btf_name_valid(const struct btf *btf, u32 offset, bool dot_ok) ++{ ++ /* offset must be valid */ ++ const char *src = &btf->strings[offset]; ++ const char *src_limit; ++ ++ if (!__btf_name_char_ok(*src, true, dot_ok)) ++ return false; ++ ++ /* set a limit on identifier length */ ++ src_limit = src + KSYM_NAME_LEN; ++ src++; ++ while (*src && src < src_limit) { ++ if (!__btf_name_char_ok(*src, false, dot_ok)) ++ return false; ++ src++; ++ } ++ ++ return !*src; ++} ++ ++/* Only C-style identifier is permitted. This can be relaxed if ++ * necessary. ++ */ ++static bool btf_name_valid_identifier(const struct btf *btf, u32 offset) ++{ ++ return __btf_name_valid(btf, offset, false); ++} ++ ++static bool btf_name_valid_section(const struct btf *btf, u32 offset) ++{ ++ return __btf_name_valid(btf, offset, true); ++} ++ ++static const char *__btf_name_by_offset(const struct btf *btf, u32 offset) ++{ ++ if (!offset) ++ return "(anon)"; ++ else if (offset < btf->hdr.str_len) ++ return &btf->strings[offset]; ++ else ++ return "(invalid-name-offset)"; ++} ++ ++const char *btf_name_by_offset(const struct btf *btf, u32 offset) ++{ ++ if (offset < btf->hdr.str_len) ++ return &btf->strings[offset]; ++ ++ return NULL; ++} ++ ++const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) ++{ ++ if (type_id > btf->nr_types) ++ return NULL; ++ ++ return btf->types[type_id]; ++} ++ ++/* ++ * Regular int is not a bit field and it must be either ++ * u8/u16/u32/u64 or __int128. ++ */ ++static bool btf_type_int_is_regular(const struct btf_type *t) ++{ ++ u8 nr_bits, nr_bytes; ++ u32 int_data; ++ ++ int_data = btf_type_int(t); ++ nr_bits = BTF_INT_BITS(int_data); ++ nr_bytes = BITS_ROUNDUP_BYTES(nr_bits); ++ if (BITS_PER_BYTE_MASKED(nr_bits) || ++ BTF_INT_OFFSET(int_data) || ++ (nr_bytes != sizeof(u8) && nr_bytes != sizeof(u16) && ++ nr_bytes != sizeof(u32) && nr_bytes != sizeof(u64) && ++ nr_bytes != (2 * sizeof(u64)))) { ++ return false; ++ } ++ ++ return true; ++} ++ ++/* ++ * Check that given struct member is a regular int with expected ++ * offset and size. ++ */ ++bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, ++ const struct btf_member *m, ++ u32 expected_offset, u32 expected_size) ++{ ++ const struct btf_type *t; ++ u32 id, int_data; ++ u8 nr_bits; ++ ++ id = m->type; ++ t = btf_type_id_size(btf, &id, NULL); ++ if (!t || !btf_type_is_int(t)) ++ return false; ++ ++ int_data = btf_type_int(t); ++ nr_bits = BTF_INT_BITS(int_data); ++ if (btf_type_kflag(s)) { ++ u32 bitfield_size = BTF_MEMBER_BITFIELD_SIZE(m->offset); ++ u32 bit_offset = BTF_MEMBER_BIT_OFFSET(m->offset); ++ ++ /* if kflag set, int should be a regular int and ++ * bit offset should be at byte boundary. ++ */ ++ return !bitfield_size && ++ BITS_ROUNDUP_BYTES(bit_offset) == expected_offset && ++ BITS_ROUNDUP_BYTES(nr_bits) == expected_size; ++ } ++ ++ if (BTF_INT_OFFSET(int_data) || ++ BITS_PER_BYTE_MASKED(m->offset) || ++ BITS_ROUNDUP_BYTES(m->offset) != expected_offset || ++ BITS_PER_BYTE_MASKED(nr_bits) || ++ BITS_ROUNDUP_BYTES(nr_bits) != expected_size) ++ return false; ++ ++ return true; ++} ++ ++__printf(2, 3) static void __btf_verifier_log(struct bpf_verifier_log *log, ++ const char *fmt, ...) ++{ ++ va_list args; ++ ++ va_start(args, fmt); ++ bpf_verifier_vlog(log, fmt, args); ++ va_end(args); ++} ++ ++__printf(2, 3) static void btf_verifier_log(struct btf_verifier_env *env, ++ const char *fmt, ...) ++{ ++ struct bpf_verifier_log *log = &env->log; ++ va_list args; ++ ++ if (!bpf_verifier_log_needed(log)) ++ return; ++ ++ va_start(args, fmt); ++ bpf_verifier_vlog(log, fmt, args); ++ va_end(args); ++} ++ ++__printf(4, 5) static void __btf_verifier_log_type(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ bool log_details, ++ const char *fmt, ...) ++{ ++ struct bpf_verifier_log *log = &env->log; ++ u8 kind = BTF_INFO_KIND(t->info); ++ struct btf *btf = env->btf; ++ va_list args; ++ ++ if (!bpf_verifier_log_needed(log)) ++ return; ++ ++ __btf_verifier_log(log, "[%u] %s %s%s", ++ env->log_type_id, ++ btf_kind_str[kind], ++ __btf_name_by_offset(btf, t->name_off), ++ log_details ? " " : ""); ++ ++ if (log_details) ++ btf_type_ops(t)->log_details(env, t); ++ ++ if (fmt && *fmt) { ++ __btf_verifier_log(log, " "); ++ va_start(args, fmt); ++ bpf_verifier_vlog(log, fmt, args); ++ va_end(args); ++ } ++ ++ __btf_verifier_log(log, "\n"); ++} ++ ++#define btf_verifier_log_type(env, t, ...) \ ++ __btf_verifier_log_type((env), (t), true, __VA_ARGS__) ++#define btf_verifier_log_basic(env, t, ...) \ ++ __btf_verifier_log_type((env), (t), false, __VA_ARGS__) ++ ++__printf(4, 5) ++static void btf_verifier_log_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const char *fmt, ...) ++{ ++ struct bpf_verifier_log *log = &env->log; ++ struct btf *btf = env->btf; ++ va_list args; ++ ++ if (!bpf_verifier_log_needed(log)) ++ return; ++ ++ /* The CHECK_META phase already did a btf dump. ++ * ++ * If member is logged again, it must hit an error in ++ * parsing this member. It is useful to print out which ++ * struct this member belongs to. ++ */ ++ if (env->phase != CHECK_META) ++ btf_verifier_log_type(env, struct_type, NULL); ++ ++ if (btf_type_kflag(struct_type)) ++ __btf_verifier_log(log, ++ "\t%s type_id=%u bitfield_size=%u bits_offset=%u", ++ __btf_name_by_offset(btf, member->name_off), ++ member->type, ++ BTF_MEMBER_BITFIELD_SIZE(member->offset), ++ BTF_MEMBER_BIT_OFFSET(member->offset)); ++ else ++ __btf_verifier_log(log, "\t%s type_id=%u bits_offset=%u", ++ __btf_name_by_offset(btf, member->name_off), ++ member->type, member->offset); ++ ++ if (fmt && *fmt) { ++ __btf_verifier_log(log, " "); ++ va_start(args, fmt); ++ bpf_verifier_vlog(log, fmt, args); ++ va_end(args); ++ } ++ ++ __btf_verifier_log(log, "\n"); ++} ++ ++__printf(4, 5) ++static void btf_verifier_log_vsi(struct btf_verifier_env *env, ++ const struct btf_type *datasec_type, ++ const struct btf_var_secinfo *vsi, ++ const char *fmt, ...) ++{ ++ struct bpf_verifier_log *log = &env->log; ++ va_list args; ++ ++ if (!bpf_verifier_log_needed(log)) ++ return; ++ if (env->phase != CHECK_META) ++ btf_verifier_log_type(env, datasec_type, NULL); ++ ++ __btf_verifier_log(log, "\t type_id=%u offset=%u size=%u", ++ vsi->type, vsi->offset, vsi->size); ++ if (fmt && *fmt) { ++ __btf_verifier_log(log, " "); ++ va_start(args, fmt); ++ bpf_verifier_vlog(log, fmt, args); ++ va_end(args); ++ } ++ ++ __btf_verifier_log(log, "\n"); ++} ++ ++static void btf_verifier_log_hdr(struct btf_verifier_env *env, ++ u32 btf_data_size) ++{ ++ struct bpf_verifier_log *log = &env->log; ++ const struct btf *btf = env->btf; ++ const struct btf_header *hdr; ++ ++ if (!bpf_verifier_log_needed(log)) ++ return; ++ ++ hdr = &btf->hdr; ++ __btf_verifier_log(log, "magic: 0x%x\n", hdr->magic); ++ __btf_verifier_log(log, "version: %u\n", hdr->version); ++ __btf_verifier_log(log, "flags: 0x%x\n", hdr->flags); ++ __btf_verifier_log(log, "hdr_len: %u\n", hdr->hdr_len); ++ __btf_verifier_log(log, "type_off: %u\n", hdr->type_off); ++ __btf_verifier_log(log, "type_len: %u\n", hdr->type_len); ++ __btf_verifier_log(log, "str_off: %u\n", hdr->str_off); ++ __btf_verifier_log(log, "str_len: %u\n", hdr->str_len); ++ __btf_verifier_log(log, "btf_total_size: %u\n", btf_data_size); ++} ++ ++static int btf_add_type(struct btf_verifier_env *env, struct btf_type *t) ++{ ++ struct btf *btf = env->btf; ++ ++ /* < 2 because +1 for btf_void which is always in btf->types[0]. ++ * btf_void is not accounted in btf->nr_types because btf_void ++ * does not come from the BTF file. ++ */ ++ if (btf->types_size - btf->nr_types < 2) { ++ /* Expand 'types' array */ ++ ++ struct btf_type **new_types; ++ u32 expand_by, new_size; ++ ++ if (btf->types_size == BTF_MAX_TYPE) { ++ btf_verifier_log(env, "Exceeded max num of types"); ++ return -E2BIG; ++ } ++ ++ expand_by = max_t(u32, btf->types_size >> 2, 16); ++ new_size = min_t(u32, BTF_MAX_TYPE, ++ btf->types_size + expand_by); ++ ++ new_types = kcalloc(new_size, sizeof(*new_types), ++ GFP_KERNEL | __GFP_NOWARN); ++ if (!new_types) ++ return -ENOMEM; ++ ++ if (btf->nr_types == 0) ++ new_types[0] = &btf_void; ++ else ++ memcpy(new_types, btf->types, ++ sizeof(*btf->types) * (btf->nr_types + 1)); ++ ++ kvfree(btf->types); ++ btf->types = new_types; ++ btf->types_size = new_size; ++ } ++ ++ btf->types[++(btf->nr_types)] = t; ++ ++ return 0; ++} ++ ++static int btf_alloc_id(struct btf *btf) ++{ ++ int id; ++ ++ idr_preload(GFP_KERNEL); ++ spin_lock_bh(&btf_idr_lock); ++ id = idr_alloc_cyclic(&btf_idr, btf, 1, INT_MAX, GFP_ATOMIC); ++ if (id > 0) ++ btf->id = id; ++ spin_unlock_bh(&btf_idr_lock); ++ idr_preload_end(); ++ ++ if (WARN_ON_ONCE(!id)) ++ return -ENOSPC; ++ ++ return id > 0 ? 0 : id; ++} ++ ++static void btf_free_id(struct btf *btf) ++{ ++ unsigned long flags; ++ ++ /* ++ * In map-in-map, calling map_delete_elem() on outer ++ * map will call bpf_map_put on the inner map. ++ * It will then eventually call btf_free_id() ++ * on the inner map. Some of the map_delete_elem() ++ * implementation may have irq disabled, so ++ * we need to use the _irqsave() version instead ++ * of the _bh() version. ++ */ ++ spin_lock_irqsave(&btf_idr_lock, flags); ++ idr_remove(&btf_idr, btf->id); ++ spin_unlock_irqrestore(&btf_idr_lock, flags); ++} ++ ++static void btf_free(struct btf *btf) ++{ ++ kvfree(btf->types); ++ kvfree(btf->resolved_sizes); ++ kvfree(btf->resolved_ids); ++ kvfree(btf->data); ++ kfree(btf); ++} ++ ++static void btf_free_rcu(struct rcu_head *rcu) ++{ ++ struct btf *btf = container_of(rcu, struct btf, rcu); ++ ++ btf_free(btf); ++} ++ ++void btf_put(struct btf *btf) ++{ ++ if (btf && refcount_dec_and_test(&btf->refcnt)) { ++ btf_free_id(btf); ++ call_rcu(&btf->rcu, btf_free_rcu); ++ } ++} ++ ++static int env_resolve_init(struct btf_verifier_env *env) ++{ ++ struct btf *btf = env->btf; ++ u32 nr_types = btf->nr_types; ++ u32 *resolved_sizes = NULL; ++ u32 *resolved_ids = NULL; ++ u8 *visit_states = NULL; ++ ++ /* +1 for btf_void */ ++ resolved_sizes = kcalloc(nr_types + 1, sizeof(*resolved_sizes), ++ GFP_KERNEL | __GFP_NOWARN); ++ if (!resolved_sizes) ++ goto nomem; ++ ++ resolved_ids = kcalloc(nr_types + 1, sizeof(*resolved_ids), ++ GFP_KERNEL | __GFP_NOWARN); ++ if (!resolved_ids) ++ goto nomem; ++ ++ visit_states = kcalloc(nr_types + 1, sizeof(*visit_states), ++ GFP_KERNEL | __GFP_NOWARN); ++ if (!visit_states) ++ goto nomem; ++ ++ btf->resolved_sizes = resolved_sizes; ++ btf->resolved_ids = resolved_ids; ++ env->visit_states = visit_states; ++ ++ return 0; ++ ++nomem: ++ kvfree(resolved_sizes); ++ kvfree(resolved_ids); ++ kvfree(visit_states); ++ return -ENOMEM; ++} ++ ++static void btf_verifier_env_free(struct btf_verifier_env *env) ++{ ++ kvfree(env->visit_states); ++ kfree(env); ++} ++ ++static bool env_type_is_resolve_sink(const struct btf_verifier_env *env, ++ const struct btf_type *next_type) ++{ ++ switch (env->resolve_mode) { ++ case RESOLVE_TBD: ++ /* int, enum or void is a sink */ ++ return !btf_type_needs_resolve(next_type); ++ case RESOLVE_PTR: ++ /* int, enum, void, struct, array, func or func_proto is a sink ++ * for ptr ++ */ ++ return !btf_type_is_modifier(next_type) && ++ !btf_type_is_ptr(next_type); ++ case RESOLVE_STRUCT_OR_ARRAY: ++ /* int, enum, void, ptr, func or func_proto is a sink ++ * for struct and array ++ */ ++ return !btf_type_is_modifier(next_type) && ++ !btf_type_is_array(next_type) && ++ !btf_type_is_struct(next_type); ++ default: ++ BUG(); ++ } ++} ++ ++static bool env_type_is_resolved(const struct btf_verifier_env *env, ++ u32 type_id) ++{ ++ return env->visit_states[type_id] == RESOLVED; ++} ++ ++static int env_stack_push(struct btf_verifier_env *env, ++ const struct btf_type *t, u32 type_id) ++{ ++ struct resolve_vertex *v; ++ ++ if (env->top_stack == MAX_RESOLVE_DEPTH) ++ return -E2BIG; ++ ++ if (env->visit_states[type_id] != NOT_VISITED) ++ return -EEXIST; ++ ++ env->visit_states[type_id] = VISITED; ++ ++ v = &env->stack[env->top_stack++]; ++ v->t = t; ++ v->type_id = type_id; ++ v->next_member = 0; ++ ++ if (env->resolve_mode == RESOLVE_TBD) { ++ if (btf_type_is_ptr(t)) ++ env->resolve_mode = RESOLVE_PTR; ++ else if (btf_type_is_struct(t) || btf_type_is_array(t)) ++ env->resolve_mode = RESOLVE_STRUCT_OR_ARRAY; ++ } ++ ++ return 0; ++} ++ ++static void env_stack_set_next_member(struct btf_verifier_env *env, ++ u16 next_member) ++{ ++ env->stack[env->top_stack - 1].next_member = next_member; ++} ++ ++static void env_stack_pop_resolved(struct btf_verifier_env *env, ++ u32 resolved_type_id, ++ u32 resolved_size) ++{ ++ u32 type_id = env->stack[--(env->top_stack)].type_id; ++ struct btf *btf = env->btf; ++ ++ btf->resolved_sizes[type_id] = resolved_size; ++ btf->resolved_ids[type_id] = resolved_type_id; ++ env->visit_states[type_id] = RESOLVED; ++} ++ ++static const struct resolve_vertex *env_stack_peak(struct btf_verifier_env *env) ++{ ++ return env->top_stack ? &env->stack[env->top_stack - 1] : NULL; ++} ++ ++/* The input param "type_id" must point to a needs_resolve type */ ++static const struct btf_type *btf_type_id_resolve(const struct btf *btf, ++ u32 *type_id) ++{ ++ *type_id = btf->resolved_ids[*type_id]; ++ return btf_type_by_id(btf, *type_id); ++} ++ ++const struct btf_type *btf_type_id_size(const struct btf *btf, ++ u32 *type_id, u32 *ret_size) ++{ ++ const struct btf_type *size_type; ++ u32 size_type_id = *type_id; ++ u32 size = 0; ++ ++ size_type = btf_type_by_id(btf, size_type_id); ++ if (btf_type_nosize_or_null(size_type)) ++ return NULL; ++ ++ if (btf_type_has_size(size_type)) { ++ size = size_type->size; ++ } else if (btf_type_is_array(size_type)) { ++ size = btf->resolved_sizes[size_type_id]; ++ } else if (btf_type_is_ptr(size_type)) { ++ size = sizeof(void *); ++ } else { ++ if (WARN_ON_ONCE(!btf_type_is_modifier(size_type) && ++ !btf_type_is_var(size_type))) ++ return NULL; ++ ++ size_type_id = btf->resolved_ids[size_type_id]; ++ size_type = btf_type_by_id(btf, size_type_id); ++ if (btf_type_nosize_or_null(size_type)) ++ return NULL; ++ else if (btf_type_has_size(size_type)) ++ size = size_type->size; ++ else if (btf_type_is_array(size_type)) ++ size = btf->resolved_sizes[size_type_id]; ++ else if (btf_type_is_ptr(size_type)) ++ size = sizeof(void *); ++ else ++ return NULL; ++ } ++ ++ *type_id = size_type_id; ++ if (ret_size) ++ *ret_size = size; ++ ++ return size_type; ++} ++ ++static int btf_df_check_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ btf_verifier_log_basic(env, struct_type, ++ "Unsupported check_member"); ++ return -EINVAL; ++} ++ ++static int btf_df_check_kflag_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ btf_verifier_log_basic(env, struct_type, ++ "Unsupported check_kflag_member"); ++ return -EINVAL; ++} ++ ++/* Used for ptr, array and struct/union type members. ++ * int, enum and modifier types have their specific callback functions. ++ */ ++static int btf_generic_check_kflag_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ if (BTF_MEMBER_BITFIELD_SIZE(member->offset)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Invalid member bitfield_size"); ++ return -EINVAL; ++ } ++ ++ /* bitfield size is 0, so member->offset represents bit offset only. ++ * It is safe to call non kflag check_member variants. ++ */ ++ return btf_type_ops(member_type)->check_member(env, struct_type, ++ member, ++ member_type); ++} ++ ++static int btf_df_resolve(struct btf_verifier_env *env, ++ const struct resolve_vertex *v) ++{ ++ btf_verifier_log_basic(env, v->t, "Unsupported resolve"); ++ return -EINVAL; ++} ++ ++static void btf_df_seq_show(const struct btf *btf, const struct btf_type *t, ++ u32 type_id, void *data, u8 bits_offsets, ++ struct seq_file *m) ++{ ++ seq_printf(m, "", BTF_INFO_KIND(t->info)); ++} ++ ++static int btf_int_check_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ u32 int_data = btf_type_int(member_type); ++ u32 struct_bits_off = member->offset; ++ u32 struct_size = struct_type->size; ++ u32 nr_copy_bits; ++ u32 bytes_offset; ++ ++ if (U32_MAX - struct_bits_off < BTF_INT_OFFSET(int_data)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "bits_offset exceeds U32_MAX"); ++ return -EINVAL; ++ } ++ ++ struct_bits_off += BTF_INT_OFFSET(int_data); ++ bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); ++ nr_copy_bits = BTF_INT_BITS(int_data) + ++ BITS_PER_BYTE_MASKED(struct_bits_off); ++ ++ if (nr_copy_bits > BITS_PER_U128) { ++ btf_verifier_log_member(env, struct_type, member, ++ "nr_copy_bits exceeds 128"); ++ return -EINVAL; ++ } ++ ++ if (struct_size < bytes_offset || ++ struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member exceeds struct_size"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int btf_int_check_kflag_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ u32 struct_bits_off, nr_bits, nr_int_data_bits, bytes_offset; ++ u32 int_data = btf_type_int(member_type); ++ u32 struct_size = struct_type->size; ++ u32 nr_copy_bits; ++ ++ /* a regular int type is required for the kflag int member */ ++ if (!btf_type_int_is_regular(member_type)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Invalid member base type"); ++ return -EINVAL; ++ } ++ ++ /* check sanity of bitfield size */ ++ nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); ++ struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); ++ nr_int_data_bits = BTF_INT_BITS(int_data); ++ if (!nr_bits) { ++ /* Not a bitfield member, member offset must be at byte ++ * boundary. ++ */ ++ if (BITS_PER_BYTE_MASKED(struct_bits_off)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Invalid member offset"); ++ return -EINVAL; ++ } ++ ++ nr_bits = nr_int_data_bits; ++ } else if (nr_bits > nr_int_data_bits) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Invalid member bitfield_size"); ++ return -EINVAL; ++ } ++ ++ bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); ++ nr_copy_bits = nr_bits + BITS_PER_BYTE_MASKED(struct_bits_off); ++ if (nr_copy_bits > BITS_PER_U128) { ++ btf_verifier_log_member(env, struct_type, member, ++ "nr_copy_bits exceeds 128"); ++ return -EINVAL; ++ } ++ ++ if (struct_size < bytes_offset || ++ struct_size - bytes_offset < BITS_ROUNDUP_BYTES(nr_copy_bits)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member exceeds struct_size"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static s32 btf_int_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ u32 int_data, nr_bits, meta_needed = sizeof(int_data); ++ u16 encoding; ++ ++ if (meta_left < meta_needed) { ++ btf_verifier_log_basic(env, t, ++ "meta_left:%u meta_needed:%u", ++ meta_left, meta_needed); ++ return -EINVAL; ++ } ++ ++ if (btf_type_vlen(t)) { ++ btf_verifier_log_type(env, t, "vlen != 0"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_kflag(t)) { ++ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); ++ return -EINVAL; ++ } ++ ++ int_data = btf_type_int(t); ++ if (int_data & ~BTF_INT_MASK) { ++ btf_verifier_log_basic(env, t, "Invalid int_data:%x", ++ int_data); ++ return -EINVAL; ++ } ++ ++ nr_bits = BTF_INT_BITS(int_data) + BTF_INT_OFFSET(int_data); ++ ++ if (nr_bits > BITS_PER_U128) { ++ btf_verifier_log_type(env, t, "nr_bits exceeds %zu", ++ BITS_PER_U128); ++ return -EINVAL; ++ } ++ ++ if (BITS_ROUNDUP_BYTES(nr_bits) > t->size) { ++ btf_verifier_log_type(env, t, "nr_bits exceeds type_size"); ++ return -EINVAL; ++ } ++ ++ /* ++ * Only one of the encoding bits is allowed and it ++ * should be sufficient for the pretty print purpose (i.e. decoding). ++ * Multiple bits can be allowed later if it is found ++ * to be insufficient. ++ */ ++ encoding = BTF_INT_ENCODING(int_data); ++ if (encoding && ++ encoding != BTF_INT_SIGNED && ++ encoding != BTF_INT_CHAR && ++ encoding != BTF_INT_BOOL) { ++ btf_verifier_log_type(env, t, "Unsupported encoding"); ++ return -ENOTSUPP; ++ } ++ ++ btf_verifier_log_type(env, t, NULL); ++ ++ return meta_needed; ++} ++ ++static void btf_int_log(struct btf_verifier_env *env, ++ const struct btf_type *t) ++{ ++ int int_data = btf_type_int(t); ++ ++ btf_verifier_log(env, ++ "size=%u bits_offset=%u nr_bits=%u encoding=%s", ++ t->size, BTF_INT_OFFSET(int_data), ++ BTF_INT_BITS(int_data), ++ btf_int_encoding_str(BTF_INT_ENCODING(int_data))); ++} ++ ++static void btf_int128_print(struct seq_file *m, void *data) ++{ ++ /* data points to a __int128 number. ++ * Suppose ++ * int128_num = *(__int128 *)data; ++ * The below formulas shows what upper_num and lower_num represents: ++ * upper_num = int128_num >> 64; ++ * lower_num = int128_num & 0xffffffffFFFFFFFFULL; ++ */ ++ u64 upper_num, lower_num; ++ ++#ifdef __BIG_ENDIAN_BITFIELD ++ upper_num = *(u64 *)data; ++ lower_num = *(u64 *)(data + 8); ++#else ++ upper_num = *(u64 *)(data + 8); ++ lower_num = *(u64 *)data; ++#endif ++ if (upper_num == 0) ++ seq_printf(m, "0x%llx", lower_num); ++ else ++ seq_printf(m, "0x%llx%016llx", upper_num, lower_num); ++} ++ ++static void btf_int128_shift(u64 *print_num, u16 left_shift_bits, ++ u16 right_shift_bits) ++{ ++ u64 upper_num, lower_num; ++ ++#ifdef __BIG_ENDIAN_BITFIELD ++ upper_num = print_num[0]; ++ lower_num = print_num[1]; ++#else ++ upper_num = print_num[1]; ++ lower_num = print_num[0]; ++#endif ++ ++ /* shake out un-needed bits by shift/or operations */ ++ if (left_shift_bits >= 64) { ++ upper_num = lower_num << (left_shift_bits - 64); ++ lower_num = 0; ++ } else { ++ upper_num = (upper_num << left_shift_bits) | ++ (lower_num >> (64 - left_shift_bits)); ++ lower_num = lower_num << left_shift_bits; ++ } ++ ++ if (right_shift_bits >= 64) { ++ lower_num = upper_num >> (right_shift_bits - 64); ++ upper_num = 0; ++ } else { ++ lower_num = (lower_num >> right_shift_bits) | ++ (upper_num << (64 - right_shift_bits)); ++ upper_num = upper_num >> right_shift_bits; ++ } ++ ++#ifdef __BIG_ENDIAN_BITFIELD ++ print_num[0] = upper_num; ++ print_num[1] = lower_num; ++#else ++ print_num[0] = lower_num; ++ print_num[1] = upper_num; ++#endif ++} ++ ++static void btf_bitfield_seq_show(void *data, u8 bits_offset, ++ u8 nr_bits, struct seq_file *m) ++{ ++ u16 left_shift_bits, right_shift_bits; ++ u8 nr_copy_bytes; ++ u8 nr_copy_bits; ++ u64 print_num[2] = {}; ++ ++ nr_copy_bits = nr_bits + bits_offset; ++ nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits); ++ ++ memcpy(print_num, data, nr_copy_bytes); ++ ++#ifdef __BIG_ENDIAN_BITFIELD ++ left_shift_bits = bits_offset; ++#else ++ left_shift_bits = BITS_PER_U128 - nr_copy_bits; ++#endif ++ right_shift_bits = BITS_PER_U128 - nr_bits; ++ ++ btf_int128_shift(print_num, left_shift_bits, right_shift_bits); ++ btf_int128_print(m, print_num); ++} ++ ++ ++static void btf_int_bits_seq_show(const struct btf *btf, ++ const struct btf_type *t, ++ void *data, u8 bits_offset, ++ struct seq_file *m) ++{ ++ u32 int_data = btf_type_int(t); ++ u8 nr_bits = BTF_INT_BITS(int_data); ++ u8 total_bits_offset; ++ ++ /* ++ * bits_offset is at most 7. ++ * BTF_INT_OFFSET() cannot exceed 128 bits. ++ */ ++ total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data); ++ data += BITS_ROUNDDOWN_BYTES(total_bits_offset); ++ bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset); ++ btf_bitfield_seq_show(data, bits_offset, nr_bits, m); ++} ++ ++static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t, ++ u32 type_id, void *data, u8 bits_offset, ++ struct seq_file *m) ++{ ++ u32 int_data = btf_type_int(t); ++ u8 encoding = BTF_INT_ENCODING(int_data); ++ bool sign = encoding & BTF_INT_SIGNED; ++ u8 nr_bits = BTF_INT_BITS(int_data); ++ ++ if (bits_offset || BTF_INT_OFFSET(int_data) || ++ BITS_PER_BYTE_MASKED(nr_bits)) { ++ btf_int_bits_seq_show(btf, t, data, bits_offset, m); ++ return; ++ } ++ ++ switch (nr_bits) { ++ case 128: ++ btf_int128_print(m, data); ++ break; ++ case 64: ++ if (sign) ++ seq_printf(m, "%lld", *(s64 *)data); ++ else ++ seq_printf(m, "%llu", *(u64 *)data); ++ break; ++ case 32: ++ if (sign) ++ seq_printf(m, "%d", *(s32 *)data); ++ else ++ seq_printf(m, "%u", *(u32 *)data); ++ break; ++ case 16: ++ if (sign) ++ seq_printf(m, "%d", *(s16 *)data); ++ else ++ seq_printf(m, "%u", *(u16 *)data); ++ break; ++ case 8: ++ if (sign) ++ seq_printf(m, "%d", *(s8 *)data); ++ else ++ seq_printf(m, "%u", *(u8 *)data); ++ break; ++ default: ++ btf_int_bits_seq_show(btf, t, data, bits_offset, m); ++ } ++} ++ ++static const struct btf_kind_operations int_ops = { ++ .check_meta = btf_int_check_meta, ++ .resolve = btf_df_resolve, ++ .check_member = btf_int_check_member, ++ .check_kflag_member = btf_int_check_kflag_member, ++ .log_details = btf_int_log, ++ .seq_show = btf_int_seq_show, ++}; ++ ++static int btf_modifier_check_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ const struct btf_type *resolved_type; ++ u32 resolved_type_id = member->type; ++ struct btf_member resolved_member; ++ struct btf *btf = env->btf; ++ ++ resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); ++ if (!resolved_type) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Invalid member"); ++ return -EINVAL; ++ } ++ ++ resolved_member = *member; ++ resolved_member.type = resolved_type_id; ++ ++ return btf_type_ops(resolved_type)->check_member(env, struct_type, ++ &resolved_member, ++ resolved_type); ++} ++ ++static int btf_modifier_check_kflag_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ const struct btf_type *resolved_type; ++ u32 resolved_type_id = member->type; ++ struct btf_member resolved_member; ++ struct btf *btf = env->btf; ++ ++ resolved_type = btf_type_id_size(btf, &resolved_type_id, NULL); ++ if (!resolved_type) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Invalid member"); ++ return -EINVAL; ++ } ++ ++ resolved_member = *member; ++ resolved_member.type = resolved_type_id; ++ ++ return btf_type_ops(resolved_type)->check_kflag_member(env, struct_type, ++ &resolved_member, ++ resolved_type); ++} ++ ++static int btf_ptr_check_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ u32 struct_size, struct_bits_off, bytes_offset; ++ ++ struct_size = struct_type->size; ++ struct_bits_off = member->offset; ++ bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); ++ ++ if (BITS_PER_BYTE_MASKED(struct_bits_off)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member is not byte aligned"); ++ return -EINVAL; ++ } ++ ++ if (struct_size - bytes_offset < sizeof(void *)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member exceeds struct_size"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int btf_ref_type_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ if (btf_type_vlen(t)) { ++ btf_verifier_log_type(env, t, "vlen != 0"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_kflag(t)) { ++ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); ++ return -EINVAL; ++ } ++ ++ if (!BTF_TYPE_ID_VALID(t->type)) { ++ btf_verifier_log_type(env, t, "Invalid type_id"); ++ return -EINVAL; ++ } ++ ++ /* typedef type must have a valid name, and other ref types, ++ * volatile, const, restrict, should have a null name. ++ */ ++ if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) { ++ if (!t->name_off || ++ !btf_name_valid_identifier(env->btf, t->name_off)) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ } else { ++ if (t->name_off) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ } ++ ++ btf_verifier_log_type(env, t, NULL); ++ ++ return 0; ++} ++ ++static int btf_modifier_resolve(struct btf_verifier_env *env, ++ const struct resolve_vertex *v) ++{ ++ const struct btf_type *t = v->t; ++ const struct btf_type *next_type; ++ u32 next_type_id = t->type; ++ struct btf *btf = env->btf; ++ ++ next_type = btf_type_by_id(btf, next_type_id); ++ if (!next_type || btf_type_is_resolve_source_only(next_type)) { ++ btf_verifier_log_type(env, v->t, "Invalid type_id"); ++ return -EINVAL; ++ } ++ ++ if (!env_type_is_resolve_sink(env, next_type) && ++ !env_type_is_resolved(env, next_type_id)) ++ return env_stack_push(env, next_type, next_type_id); ++ ++ /* Figure out the resolved next_type_id with size. ++ * They will be stored in the current modifier's ++ * resolved_ids and resolved_sizes such that it can ++ * save us a few type-following when we use it later (e.g. in ++ * pretty print). ++ */ ++ if (!btf_type_id_size(btf, &next_type_id, NULL)) { ++ if (env_type_is_resolved(env, next_type_id)) ++ next_type = btf_type_id_resolve(btf, &next_type_id); ++ ++ /* "typedef void new_void", "const void"...etc */ ++ if (!btf_type_is_void(next_type) && ++ !btf_type_is_fwd(next_type) && ++ !btf_type_is_func_proto(next_type)) { ++ btf_verifier_log_type(env, v->t, "Invalid type_id"); ++ return -EINVAL; ++ } ++ } ++ ++ env_stack_pop_resolved(env, next_type_id, 0); ++ ++ return 0; ++} ++ ++static int btf_var_resolve(struct btf_verifier_env *env, ++ const struct resolve_vertex *v) ++{ ++ const struct btf_type *next_type; ++ const struct btf_type *t = v->t; ++ u32 next_type_id = t->type; ++ struct btf *btf = env->btf; ++ ++ next_type = btf_type_by_id(btf, next_type_id); ++ if (!next_type || btf_type_is_resolve_source_only(next_type)) { ++ btf_verifier_log_type(env, v->t, "Invalid type_id"); ++ return -EINVAL; ++ } ++ ++ if (!env_type_is_resolve_sink(env, next_type) && ++ !env_type_is_resolved(env, next_type_id)) ++ return env_stack_push(env, next_type, next_type_id); ++ ++ if (btf_type_is_modifier(next_type)) { ++ const struct btf_type *resolved_type; ++ u32 resolved_type_id; ++ ++ resolved_type_id = next_type_id; ++ resolved_type = btf_type_id_resolve(btf, &resolved_type_id); ++ ++ if (btf_type_is_ptr(resolved_type) && ++ !env_type_is_resolve_sink(env, resolved_type) && ++ !env_type_is_resolved(env, resolved_type_id)) ++ return env_stack_push(env, resolved_type, ++ resolved_type_id); ++ } ++ ++ /* We must resolve to something concrete at this point, no ++ * forward types or similar that would resolve to size of ++ * zero is allowed. ++ */ ++ if (!btf_type_id_size(btf, &next_type_id, NULL)) { ++ btf_verifier_log_type(env, v->t, "Invalid type_id"); ++ return -EINVAL; ++ } ++ ++ env_stack_pop_resolved(env, next_type_id, 0); ++ ++ return 0; ++} ++ ++static int btf_ptr_resolve(struct btf_verifier_env *env, ++ const struct resolve_vertex *v) ++{ ++ const struct btf_type *next_type; ++ const struct btf_type *t = v->t; ++ u32 next_type_id = t->type; ++ struct btf *btf = env->btf; ++ ++ next_type = btf_type_by_id(btf, next_type_id); ++ if (!next_type || btf_type_is_resolve_source_only(next_type)) { ++ btf_verifier_log_type(env, v->t, "Invalid type_id"); ++ return -EINVAL; ++ } ++ ++ if (!env_type_is_resolve_sink(env, next_type) && ++ !env_type_is_resolved(env, next_type_id)) ++ return env_stack_push(env, next_type, next_type_id); ++ ++ /* If the modifier was RESOLVED during RESOLVE_STRUCT_OR_ARRAY, ++ * the modifier may have stopped resolving when it was resolved ++ * to a ptr (last-resolved-ptr). ++ * ++ * We now need to continue from the last-resolved-ptr to ++ * ensure the last-resolved-ptr will not referring back to ++ * the currenct ptr (t). ++ */ ++ if (btf_type_is_modifier(next_type)) { ++ const struct btf_type *resolved_type; ++ u32 resolved_type_id; ++ ++ resolved_type_id = next_type_id; ++ resolved_type = btf_type_id_resolve(btf, &resolved_type_id); ++ ++ if (btf_type_is_ptr(resolved_type) && ++ !env_type_is_resolve_sink(env, resolved_type) && ++ !env_type_is_resolved(env, resolved_type_id)) ++ return env_stack_push(env, resolved_type, ++ resolved_type_id); ++ } ++ ++ if (!btf_type_id_size(btf, &next_type_id, NULL)) { ++ if (env_type_is_resolved(env, next_type_id)) ++ next_type = btf_type_id_resolve(btf, &next_type_id); ++ ++ if (!btf_type_is_void(next_type) && ++ !btf_type_is_fwd(next_type) && ++ !btf_type_is_func_proto(next_type)) { ++ btf_verifier_log_type(env, v->t, "Invalid type_id"); ++ return -EINVAL; ++ } ++ } ++ ++ env_stack_pop_resolved(env, next_type_id, 0); ++ ++ return 0; ++} ++ ++static void btf_modifier_seq_show(const struct btf *btf, ++ const struct btf_type *t, ++ u32 type_id, void *data, ++ u8 bits_offset, struct seq_file *m) ++{ ++ t = btf_type_id_resolve(btf, &type_id); ++ ++ btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m); ++} ++ ++static void btf_var_seq_show(const struct btf *btf, const struct btf_type *t, ++ u32 type_id, void *data, u8 bits_offset, ++ struct seq_file *m) ++{ ++ t = btf_type_id_resolve(btf, &type_id); ++ ++ btf_type_ops(t)->seq_show(btf, t, type_id, data, bits_offset, m); ++} ++ ++static void btf_ptr_seq_show(const struct btf *btf, const struct btf_type *t, ++ u32 type_id, void *data, u8 bits_offset, ++ struct seq_file *m) ++{ ++ /* It is a hashed value */ ++ seq_printf(m, "%p", *(void **)data); ++} ++ ++static void btf_ref_type_log(struct btf_verifier_env *env, ++ const struct btf_type *t) ++{ ++ btf_verifier_log(env, "type_id=%u", t->type); ++} ++ ++static struct btf_kind_operations modifier_ops = { ++ .check_meta = btf_ref_type_check_meta, ++ .resolve = btf_modifier_resolve, ++ .check_member = btf_modifier_check_member, ++ .check_kflag_member = btf_modifier_check_kflag_member, ++ .log_details = btf_ref_type_log, ++ .seq_show = btf_modifier_seq_show, ++}; ++ ++static struct btf_kind_operations ptr_ops = { ++ .check_meta = btf_ref_type_check_meta, ++ .resolve = btf_ptr_resolve, ++ .check_member = btf_ptr_check_member, ++ .check_kflag_member = btf_generic_check_kflag_member, ++ .log_details = btf_ref_type_log, ++ .seq_show = btf_ptr_seq_show, ++}; ++ ++static s32 btf_fwd_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ if (btf_type_vlen(t)) { ++ btf_verifier_log_type(env, t, "vlen != 0"); ++ return -EINVAL; ++ } ++ ++ if (t->type) { ++ btf_verifier_log_type(env, t, "type != 0"); ++ return -EINVAL; ++ } ++ ++ /* fwd type must have a valid name */ ++ if (!t->name_off || ++ !btf_name_valid_identifier(env->btf, t->name_off)) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ ++ btf_verifier_log_type(env, t, NULL); ++ ++ return 0; ++} ++ ++static void btf_fwd_type_log(struct btf_verifier_env *env, ++ const struct btf_type *t) ++{ ++ btf_verifier_log(env, "%s", btf_type_kflag(t) ? "union" : "struct"); ++} ++ ++static struct btf_kind_operations fwd_ops = { ++ .check_meta = btf_fwd_check_meta, ++ .resolve = btf_df_resolve, ++ .check_member = btf_df_check_member, ++ .check_kflag_member = btf_df_check_kflag_member, ++ .log_details = btf_fwd_type_log, ++ .seq_show = btf_df_seq_show, ++}; ++ ++static int btf_array_check_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ u32 struct_bits_off = member->offset; ++ u32 struct_size, bytes_offset; ++ u32 array_type_id, array_size; ++ struct btf *btf = env->btf; ++ ++ if (BITS_PER_BYTE_MASKED(struct_bits_off)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member is not byte aligned"); ++ return -EINVAL; ++ } ++ ++ array_type_id = member->type; ++ btf_type_id_size(btf, &array_type_id, &array_size); ++ struct_size = struct_type->size; ++ bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); ++ if (struct_size - bytes_offset < array_size) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member exceeds struct_size"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static s32 btf_array_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ const struct btf_array *array = btf_type_array(t); ++ u32 meta_needed = sizeof(*array); ++ ++ if (meta_left < meta_needed) { ++ btf_verifier_log_basic(env, t, ++ "meta_left:%u meta_needed:%u", ++ meta_left, meta_needed); ++ return -EINVAL; ++ } ++ ++ /* array type should not have a name */ ++ if (t->name_off) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_vlen(t)) { ++ btf_verifier_log_type(env, t, "vlen != 0"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_kflag(t)) { ++ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); ++ return -EINVAL; ++ } ++ ++ if (t->size) { ++ btf_verifier_log_type(env, t, "size != 0"); ++ return -EINVAL; ++ } ++ ++ /* Array elem type and index type cannot be in type void, ++ * so !array->type and !array->index_type are not allowed. ++ */ ++ if (!array->type || !BTF_TYPE_ID_VALID(array->type)) { ++ btf_verifier_log_type(env, t, "Invalid elem"); ++ return -EINVAL; ++ } ++ ++ if (!array->index_type || !BTF_TYPE_ID_VALID(array->index_type)) { ++ btf_verifier_log_type(env, t, "Invalid index"); ++ return -EINVAL; ++ } ++ ++ btf_verifier_log_type(env, t, NULL); ++ ++ return meta_needed; ++} ++ ++static int btf_array_resolve(struct btf_verifier_env *env, ++ const struct resolve_vertex *v) ++{ ++ const struct btf_array *array = btf_type_array(v->t); ++ const struct btf_type *elem_type, *index_type; ++ u32 elem_type_id, index_type_id; ++ struct btf *btf = env->btf; ++ u32 elem_size; ++ ++ /* Check array->index_type */ ++ index_type_id = array->index_type; ++ index_type = btf_type_by_id(btf, index_type_id); ++ if (btf_type_nosize_or_null(index_type) || ++ btf_type_is_resolve_source_only(index_type)) { ++ btf_verifier_log_type(env, v->t, "Invalid index"); ++ return -EINVAL; ++ } ++ ++ if (!env_type_is_resolve_sink(env, index_type) && ++ !env_type_is_resolved(env, index_type_id)) ++ return env_stack_push(env, index_type, index_type_id); ++ ++ index_type = btf_type_id_size(btf, &index_type_id, NULL); ++ if (!index_type || !btf_type_is_int(index_type) || ++ !btf_type_int_is_regular(index_type)) { ++ btf_verifier_log_type(env, v->t, "Invalid index"); ++ return -EINVAL; ++ } ++ ++ /* Check array->type */ ++ elem_type_id = array->type; ++ elem_type = btf_type_by_id(btf, elem_type_id); ++ if (btf_type_nosize_or_null(elem_type) || ++ btf_type_is_resolve_source_only(elem_type)) { ++ btf_verifier_log_type(env, v->t, ++ "Invalid elem"); ++ return -EINVAL; ++ } ++ ++ if (!env_type_is_resolve_sink(env, elem_type) && ++ !env_type_is_resolved(env, elem_type_id)) ++ return env_stack_push(env, elem_type, elem_type_id); ++ ++ elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); ++ if (!elem_type) { ++ btf_verifier_log_type(env, v->t, "Invalid elem"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_is_int(elem_type) && !btf_type_int_is_regular(elem_type)) { ++ btf_verifier_log_type(env, v->t, "Invalid array of int"); ++ return -EINVAL; ++ } ++ ++ if (array->nelems && elem_size > U32_MAX / array->nelems) { ++ btf_verifier_log_type(env, v->t, ++ "Array size overflows U32_MAX"); ++ return -EINVAL; ++ } ++ ++ env_stack_pop_resolved(env, elem_type_id, elem_size * array->nelems); ++ ++ return 0; ++} ++ ++static void btf_array_log(struct btf_verifier_env *env, ++ const struct btf_type *t) ++{ ++ const struct btf_array *array = btf_type_array(t); ++ ++ btf_verifier_log(env, "type_id=%u index_type_id=%u nr_elems=%u", ++ array->type, array->index_type, array->nelems); ++} ++ ++static void btf_array_seq_show(const struct btf *btf, const struct btf_type *t, ++ u32 type_id, void *data, u8 bits_offset, ++ struct seq_file *m) ++{ ++ const struct btf_array *array = btf_type_array(t); ++ const struct btf_kind_operations *elem_ops; ++ const struct btf_type *elem_type; ++ u32 i, elem_size, elem_type_id; ++ ++ elem_type_id = array->type; ++ elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); ++ elem_ops = btf_type_ops(elem_type); ++ seq_puts(m, "["); ++ for (i = 0; i < array->nelems; i++) { ++ if (i) ++ seq_puts(m, ","); ++ ++ elem_ops->seq_show(btf, elem_type, elem_type_id, data, ++ bits_offset, m); ++ data += elem_size; ++ } ++ seq_puts(m, "]"); ++} ++ ++static struct btf_kind_operations array_ops = { ++ .check_meta = btf_array_check_meta, ++ .resolve = btf_array_resolve, ++ .check_member = btf_array_check_member, ++ .check_kflag_member = btf_generic_check_kflag_member, ++ .log_details = btf_array_log, ++ .seq_show = btf_array_seq_show, ++}; ++ ++static int btf_struct_check_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ u32 struct_bits_off = member->offset; ++ u32 struct_size, bytes_offset; ++ ++ if (BITS_PER_BYTE_MASKED(struct_bits_off)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member is not byte aligned"); ++ return -EINVAL; ++ } ++ ++ struct_size = struct_type->size; ++ bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); ++ if (struct_size - bytes_offset < member_type->size) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member exceeds struct_size"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static s32 btf_struct_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ bool is_union = BTF_INFO_KIND(t->info) == BTF_KIND_UNION; ++ const struct btf_member *member; ++ u32 meta_needed, last_offset; ++ struct btf *btf = env->btf; ++ u32 struct_size = t->size; ++ u32 offset; ++ u16 i; ++ ++ meta_needed = btf_type_vlen(t) * sizeof(*member); ++ if (meta_left < meta_needed) { ++ btf_verifier_log_basic(env, t, ++ "meta_left:%u meta_needed:%u", ++ meta_left, meta_needed); ++ return -EINVAL; ++ } ++ ++ /* struct type either no name or a valid one */ ++ if (t->name_off && ++ !btf_name_valid_identifier(env->btf, t->name_off)) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ ++ btf_verifier_log_type(env, t, NULL); ++ ++ last_offset = 0; ++ for_each_member(i, t, member) { ++ if (!btf_name_offset_valid(btf, member->name_off)) { ++ btf_verifier_log_member(env, t, member, ++ "Invalid member name_offset:%u", ++ member->name_off); ++ return -EINVAL; ++ } ++ ++ /* struct member either no name or a valid one */ ++ if (member->name_off && ++ !btf_name_valid_identifier(btf, member->name_off)) { ++ btf_verifier_log_member(env, t, member, "Invalid name"); ++ return -EINVAL; ++ } ++ /* A member cannot be in type void */ ++ if (!member->type || !BTF_TYPE_ID_VALID(member->type)) { ++ btf_verifier_log_member(env, t, member, ++ "Invalid type_id"); ++ return -EINVAL; ++ } ++ ++ offset = btf_member_bit_offset(t, member); ++ if (is_union && offset) { ++ btf_verifier_log_member(env, t, member, ++ "Invalid member bits_offset"); ++ return -EINVAL; ++ } ++ ++ /* ++ * ">" instead of ">=" because the last member could be ++ * "char a[0];" ++ */ ++ if (last_offset > offset) { ++ btf_verifier_log_member(env, t, member, ++ "Invalid member bits_offset"); ++ return -EINVAL; ++ } ++ ++ if (BITS_ROUNDUP_BYTES(offset) > struct_size) { ++ btf_verifier_log_member(env, t, member, ++ "Member bits_offset exceeds its struct size"); ++ return -EINVAL; ++ } ++ ++ btf_verifier_log_member(env, t, member, NULL); ++ last_offset = offset; ++ } ++ ++ return meta_needed; ++} ++ ++static int btf_struct_resolve(struct btf_verifier_env *env, ++ const struct resolve_vertex *v) ++{ ++ const struct btf_member *member; ++ int err; ++ u16 i; ++ ++ /* Before continue resolving the next_member, ++ * ensure the last member is indeed resolved to a ++ * type with size info. ++ */ ++ if (v->next_member) { ++ const struct btf_type *last_member_type; ++ const struct btf_member *last_member; ++ u16 last_member_type_id; ++ ++ last_member = btf_type_member(v->t) + v->next_member - 1; ++ last_member_type_id = last_member->type; ++ if (WARN_ON_ONCE(!env_type_is_resolved(env, ++ last_member_type_id))) ++ return -EINVAL; ++ ++ last_member_type = btf_type_by_id(env->btf, ++ last_member_type_id); ++ if (btf_type_kflag(v->t)) ++ err = btf_type_ops(last_member_type)->check_kflag_member(env, v->t, ++ last_member, ++ last_member_type); ++ else ++ err = btf_type_ops(last_member_type)->check_member(env, v->t, ++ last_member, ++ last_member_type); ++ if (err) ++ return err; ++ } ++ ++ for_each_member_from(i, v->next_member, v->t, member) { ++ u32 member_type_id = member->type; ++ const struct btf_type *member_type = btf_type_by_id(env->btf, ++ member_type_id); ++ ++ if (btf_type_nosize_or_null(member_type) || ++ btf_type_is_resolve_source_only(member_type)) { ++ btf_verifier_log_member(env, v->t, member, ++ "Invalid member"); ++ return -EINVAL; ++ } ++ ++ if (!env_type_is_resolve_sink(env, member_type) && ++ !env_type_is_resolved(env, member_type_id)) { ++ env_stack_set_next_member(env, i + 1); ++ return env_stack_push(env, member_type, member_type_id); ++ } ++ ++ if (btf_type_kflag(v->t)) ++ err = btf_type_ops(member_type)->check_kflag_member(env, v->t, ++ member, ++ member_type); ++ else ++ err = btf_type_ops(member_type)->check_member(env, v->t, ++ member, ++ member_type); ++ if (err) ++ return err; ++ } ++ ++ env_stack_pop_resolved(env, 0, 0); ++ ++ return 0; ++} ++ ++static void btf_struct_log(struct btf_verifier_env *env, ++ const struct btf_type *t) ++{ ++ btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); ++} ++ ++/* find 'struct bpf_spin_lock' in map value. ++ * return >= 0 offset if found ++ * and < 0 in case of error ++ */ ++int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t) ++{ ++ const struct btf_member *member; ++ u32 i, off = -ENOENT; ++ ++ if (!__btf_type_is_struct(t)) ++ return -EINVAL; ++ ++ for_each_member(i, t, member) { ++ const struct btf_type *member_type = btf_type_by_id(btf, ++ member->type); ++ if (!__btf_type_is_struct(member_type)) ++ continue; ++ if (member_type->size != sizeof(struct bpf_spin_lock)) ++ continue; ++ if (strcmp(__btf_name_by_offset(btf, member_type->name_off), ++ "bpf_spin_lock")) ++ continue; ++ if (off != -ENOENT) ++ /* only one 'struct bpf_spin_lock' is allowed */ ++ return -E2BIG; ++ off = btf_member_bit_offset(t, member); ++ if (off % 8) ++ /* valid C code cannot generate such BTF */ ++ return -EINVAL; ++ off /= 8; ++ if (off % __alignof__(struct bpf_spin_lock)) ++ /* valid struct bpf_spin_lock will be 4 byte aligned */ ++ return -EINVAL; ++ } ++ return off; ++} ++ ++static void btf_struct_seq_show(const struct btf *btf, const struct btf_type *t, ++ u32 type_id, void *data, u8 bits_offset, ++ struct seq_file *m) ++{ ++ const char *seq = BTF_INFO_KIND(t->info) == BTF_KIND_UNION ? "|" : ","; ++ const struct btf_member *member; ++ u32 i; ++ ++ seq_puts(m, "{"); ++ for_each_member(i, t, member) { ++ const struct btf_type *member_type = btf_type_by_id(btf, ++ member->type); ++ const struct btf_kind_operations *ops; ++ u32 member_offset, bitfield_size; ++ u32 bytes_offset; ++ u8 bits8_offset; ++ ++ if (i) ++ seq_puts(m, seq); ++ ++ member_offset = btf_member_bit_offset(t, member); ++ bitfield_size = btf_member_bitfield_size(t, member); ++ bytes_offset = BITS_ROUNDDOWN_BYTES(member_offset); ++ bits8_offset = BITS_PER_BYTE_MASKED(member_offset); ++ if (bitfield_size) { ++ btf_bitfield_seq_show(data + bytes_offset, bits8_offset, ++ bitfield_size, m); ++ } else { ++ ops = btf_type_ops(member_type); ++ ops->seq_show(btf, member_type, member->type, ++ data + bytes_offset, bits8_offset, m); ++ } ++ } ++ seq_puts(m, "}"); ++} ++ ++static struct btf_kind_operations struct_ops = { ++ .check_meta = btf_struct_check_meta, ++ .resolve = btf_struct_resolve, ++ .check_member = btf_struct_check_member, ++ .check_kflag_member = btf_generic_check_kflag_member, ++ .log_details = btf_struct_log, ++ .seq_show = btf_struct_seq_show, ++}; ++ ++static int btf_enum_check_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ u32 struct_bits_off = member->offset; ++ u32 struct_size, bytes_offset; ++ ++ if (BITS_PER_BYTE_MASKED(struct_bits_off)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member is not byte aligned"); ++ return -EINVAL; ++ } ++ ++ struct_size = struct_type->size; ++ bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off); ++ if (struct_size - bytes_offset < member_type->size) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member exceeds struct_size"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int btf_enum_check_kflag_member(struct btf_verifier_env *env, ++ const struct btf_type *struct_type, ++ const struct btf_member *member, ++ const struct btf_type *member_type) ++{ ++ u32 struct_bits_off, nr_bits, bytes_end, struct_size; ++ u32 int_bitsize = sizeof(int) * BITS_PER_BYTE; ++ ++ struct_bits_off = BTF_MEMBER_BIT_OFFSET(member->offset); ++ nr_bits = BTF_MEMBER_BITFIELD_SIZE(member->offset); ++ if (!nr_bits) { ++ if (BITS_PER_BYTE_MASKED(struct_bits_off)) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member is not byte aligned"); ++ return -EINVAL; ++ } ++ ++ nr_bits = int_bitsize; ++ } else if (nr_bits > int_bitsize) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Invalid member bitfield_size"); ++ return -EINVAL; ++ } ++ ++ struct_size = struct_type->size; ++ bytes_end = BITS_ROUNDUP_BYTES(struct_bits_off + nr_bits); ++ if (struct_size < bytes_end) { ++ btf_verifier_log_member(env, struct_type, member, ++ "Member exceeds struct_size"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static s32 btf_enum_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ const struct btf_enum *enums = btf_type_enum(t); ++ struct btf *btf = env->btf; ++ u16 i, nr_enums; ++ u32 meta_needed; ++ ++ nr_enums = btf_type_vlen(t); ++ meta_needed = nr_enums * sizeof(*enums); ++ ++ if (meta_left < meta_needed) { ++ btf_verifier_log_basic(env, t, ++ "meta_left:%u meta_needed:%u", ++ meta_left, meta_needed); ++ return -EINVAL; ++ } ++ ++ if (btf_type_kflag(t)) { ++ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); ++ return -EINVAL; ++ } ++ ++ if (t->size > 8 || !is_power_of_2(t->size)) { ++ btf_verifier_log_type(env, t, "Unexpected size"); ++ return -EINVAL; ++ } ++ ++ /* enum type either no name or a valid one */ ++ if (t->name_off && ++ !btf_name_valid_identifier(env->btf, t->name_off)) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ ++ btf_verifier_log_type(env, t, NULL); ++ ++ for (i = 0; i < nr_enums; i++) { ++ if (!btf_name_offset_valid(btf, enums[i].name_off)) { ++ btf_verifier_log(env, "\tInvalid name_offset:%u", ++ enums[i].name_off); ++ return -EINVAL; ++ } ++ ++ /* enum member must have a valid name */ ++ if (!enums[i].name_off || ++ !btf_name_valid_identifier(btf, enums[i].name_off)) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ ++ ++ btf_verifier_log(env, "\t%s val=%d\n", ++ __btf_name_by_offset(btf, enums[i].name_off), ++ enums[i].val); ++ } ++ ++ return meta_needed; ++} ++ ++static void btf_enum_log(struct btf_verifier_env *env, ++ const struct btf_type *t) ++{ ++ btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); ++} ++ ++static void btf_enum_seq_show(const struct btf *btf, const struct btf_type *t, ++ u32 type_id, void *data, u8 bits_offset, ++ struct seq_file *m) ++{ ++ const struct btf_enum *enums = btf_type_enum(t); ++ u32 i, nr_enums = btf_type_vlen(t); ++ int v = *(int *)data; ++ ++ for (i = 0; i < nr_enums; i++) { ++ if (v == enums[i].val) { ++ seq_printf(m, "%s", ++ __btf_name_by_offset(btf, ++ enums[i].name_off)); ++ return; ++ } ++ } ++ ++ seq_printf(m, "%d", v); ++} ++ ++static struct btf_kind_operations enum_ops = { ++ .check_meta = btf_enum_check_meta, ++ .resolve = btf_df_resolve, ++ .check_member = btf_enum_check_member, ++ .check_kflag_member = btf_enum_check_kflag_member, ++ .log_details = btf_enum_log, ++ .seq_show = btf_enum_seq_show, ++}; ++ ++static s32 btf_func_proto_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ u32 meta_needed = btf_type_vlen(t) * sizeof(struct btf_param); ++ ++ if (meta_left < meta_needed) { ++ btf_verifier_log_basic(env, t, ++ "meta_left:%u meta_needed:%u", ++ meta_left, meta_needed); ++ return -EINVAL; ++ } ++ ++ if (t->name_off) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_kflag(t)) { ++ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); ++ return -EINVAL; ++ } ++ ++ btf_verifier_log_type(env, t, NULL); ++ ++ return meta_needed; ++} ++ ++static void btf_func_proto_log(struct btf_verifier_env *env, ++ const struct btf_type *t) ++{ ++ const struct btf_param *args = (const struct btf_param *)(t + 1); ++ u16 nr_args = btf_type_vlen(t), i; ++ ++ btf_verifier_log(env, "return=%u args=(", t->type); ++ if (!nr_args) { ++ btf_verifier_log(env, "void"); ++ goto done; ++ } ++ ++ if (nr_args == 1 && !args[0].type) { ++ /* Only one vararg */ ++ btf_verifier_log(env, "vararg"); ++ goto done; ++ } ++ ++ btf_verifier_log(env, "%u %s", args[0].type, ++ __btf_name_by_offset(env->btf, ++ args[0].name_off)); ++ for (i = 1; i < nr_args - 1; i++) ++ btf_verifier_log(env, ", %u %s", args[i].type, ++ __btf_name_by_offset(env->btf, ++ args[i].name_off)); ++ ++ if (nr_args > 1) { ++ const struct btf_param *last_arg = &args[nr_args - 1]; ++ ++ if (last_arg->type) ++ btf_verifier_log(env, ", %u %s", last_arg->type, ++ __btf_name_by_offset(env->btf, ++ last_arg->name_off)); ++ else ++ btf_verifier_log(env, ", vararg"); ++ } ++ ++done: ++ btf_verifier_log(env, ")"); ++} ++ ++static struct btf_kind_operations func_proto_ops = { ++ .check_meta = btf_func_proto_check_meta, ++ .resolve = btf_df_resolve, ++ /* ++ * BTF_KIND_FUNC_PROTO cannot be directly referred by ++ * a struct's member. ++ * ++ * It should be a funciton pointer instead. ++ * (i.e. struct's member -> BTF_KIND_PTR -> BTF_KIND_FUNC_PROTO) ++ * ++ * Hence, there is no btf_func_check_member(). ++ */ ++ .check_member = btf_df_check_member, ++ .check_kflag_member = btf_df_check_kflag_member, ++ .log_details = btf_func_proto_log, ++ .seq_show = btf_df_seq_show, ++}; ++ ++static s32 btf_func_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ if (!t->name_off || ++ !btf_name_valid_identifier(env->btf, t->name_off)) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_vlen(t)) { ++ btf_verifier_log_type(env, t, "vlen != 0"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_kflag(t)) { ++ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); ++ return -EINVAL; ++ } ++ ++ btf_verifier_log_type(env, t, NULL); ++ ++ return 0; ++} ++ ++static struct btf_kind_operations func_ops = { ++ .check_meta = btf_func_check_meta, ++ .resolve = btf_df_resolve, ++ .check_member = btf_df_check_member, ++ .check_kflag_member = btf_df_check_kflag_member, ++ .log_details = btf_ref_type_log, ++ .seq_show = btf_df_seq_show, ++}; ++ ++static s32 btf_var_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ const struct btf_var *var; ++ u32 meta_needed = sizeof(*var); ++ ++ if (meta_left < meta_needed) { ++ btf_verifier_log_basic(env, t, ++ "meta_left:%u meta_needed:%u", ++ meta_left, meta_needed); ++ return -EINVAL; ++ } ++ ++ if (btf_type_vlen(t)) { ++ btf_verifier_log_type(env, t, "vlen != 0"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_kflag(t)) { ++ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); ++ return -EINVAL; ++ } ++ ++ if (!t->name_off || ++ !__btf_name_valid(env->btf, t->name_off, true)) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ ++ /* A var cannot be in type void */ ++ if (!t->type || !BTF_TYPE_ID_VALID(t->type)) { ++ btf_verifier_log_type(env, t, "Invalid type_id"); ++ return -EINVAL; ++ } ++ ++ var = btf_type_var(t); ++ if (var->linkage != BTF_VAR_STATIC && ++ var->linkage != BTF_VAR_GLOBAL_ALLOCATED) { ++ btf_verifier_log_type(env, t, "Linkage not supported"); ++ return -EINVAL; ++ } ++ ++ btf_verifier_log_type(env, t, NULL); ++ ++ return meta_needed; ++} ++ ++static void btf_var_log(struct btf_verifier_env *env, const struct btf_type *t) ++{ ++ const struct btf_var *var = btf_type_var(t); ++ ++ btf_verifier_log(env, "type_id=%u linkage=%u", t->type, var->linkage); ++} ++ ++static const struct btf_kind_operations var_ops = { ++ .check_meta = btf_var_check_meta, ++ .resolve = btf_var_resolve, ++ .check_member = btf_df_check_member, ++ .check_kflag_member = btf_df_check_kflag_member, ++ .log_details = btf_var_log, ++ .seq_show = btf_var_seq_show, ++}; ++ ++static s32 btf_datasec_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ const struct btf_var_secinfo *vsi; ++ u64 last_vsi_end_off = 0, sum = 0; ++ u32 i, meta_needed; ++ ++ meta_needed = btf_type_vlen(t) * sizeof(*vsi); ++ if (meta_left < meta_needed) { ++ btf_verifier_log_basic(env, t, ++ "meta_left:%u meta_needed:%u", ++ meta_left, meta_needed); ++ return -EINVAL; ++ } ++ ++ if (!btf_type_vlen(t)) { ++ btf_verifier_log_type(env, t, "vlen == 0"); ++ return -EINVAL; ++ } ++ ++ if (!t->size) { ++ btf_verifier_log_type(env, t, "size == 0"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_kflag(t)) { ++ btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); ++ return -EINVAL; ++ } ++ ++ if (!t->name_off || ++ !btf_name_valid_section(env->btf, t->name_off)) { ++ btf_verifier_log_type(env, t, "Invalid name"); ++ return -EINVAL; ++ } ++ ++ btf_verifier_log_type(env, t, NULL); ++ ++ for_each_vsi(i, t, vsi) { ++ /* A var cannot be in type void */ ++ if (!vsi->type || !BTF_TYPE_ID_VALID(vsi->type)) { ++ btf_verifier_log_vsi(env, t, vsi, ++ "Invalid type_id"); ++ return -EINVAL; ++ } ++ ++ if (vsi->offset < last_vsi_end_off || vsi->offset >= t->size) { ++ btf_verifier_log_vsi(env, t, vsi, ++ "Invalid offset"); ++ return -EINVAL; ++ } ++ ++ if (!vsi->size || vsi->size > t->size) { ++ btf_verifier_log_vsi(env, t, vsi, ++ "Invalid size"); ++ return -EINVAL; ++ } ++ ++ last_vsi_end_off = vsi->offset + vsi->size; ++ if (last_vsi_end_off > t->size) { ++ btf_verifier_log_vsi(env, t, vsi, ++ "Invalid offset+size"); ++ return -EINVAL; ++ } ++ ++ btf_verifier_log_vsi(env, t, vsi, NULL); ++ sum += vsi->size; ++ } ++ ++ if (t->size < sum) { ++ btf_verifier_log_type(env, t, "Invalid btf_info size"); ++ return -EINVAL; ++ } ++ ++ return meta_needed; ++} ++ ++static int btf_datasec_resolve(struct btf_verifier_env *env, ++ const struct resolve_vertex *v) ++{ ++ const struct btf_var_secinfo *vsi; ++ struct btf *btf = env->btf; ++ u16 i; ++ ++ for_each_vsi_from(i, v->next_member, v->t, vsi) { ++ u32 var_type_id = vsi->type, type_id, type_size = 0; ++ const struct btf_type *var_type = btf_type_by_id(env->btf, ++ var_type_id); ++ if (!var_type || !btf_type_is_var(var_type)) { ++ btf_verifier_log_vsi(env, v->t, vsi, ++ "Not a VAR kind member"); ++ return -EINVAL; ++ } ++ ++ if (!env_type_is_resolve_sink(env, var_type) && ++ !env_type_is_resolved(env, var_type_id)) { ++ env_stack_set_next_member(env, i + 1); ++ return env_stack_push(env, var_type, var_type_id); ++ } ++ ++ type_id = var_type->type; ++ if (!btf_type_id_size(btf, &type_id, &type_size)) { ++ btf_verifier_log_vsi(env, v->t, vsi, "Invalid type"); ++ return -EINVAL; ++ } ++ ++ if (vsi->size < type_size) { ++ btf_verifier_log_vsi(env, v->t, vsi, "Invalid size"); ++ return -EINVAL; ++ } ++ } ++ ++ env_stack_pop_resolved(env, 0, 0); ++ return 0; ++} ++ ++static void btf_datasec_log(struct btf_verifier_env *env, ++ const struct btf_type *t) ++{ ++ btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t)); ++} ++ ++static void btf_datasec_seq_show(const struct btf *btf, ++ const struct btf_type *t, u32 type_id, ++ void *data, u8 bits_offset, ++ struct seq_file *m) ++{ ++ const struct btf_var_secinfo *vsi; ++ const struct btf_type *var; ++ u32 i; ++ ++ seq_printf(m, "section (\"%s\") = {", __btf_name_by_offset(btf, t->name_off)); ++ for_each_vsi(i, t, vsi) { ++ var = btf_type_by_id(btf, vsi->type); ++ if (i) ++ seq_puts(m, ","); ++ btf_type_ops(var)->seq_show(btf, var, vsi->type, ++ data + vsi->offset, bits_offset, m); ++ } ++ seq_puts(m, "}"); ++} ++ ++static const struct btf_kind_operations datasec_ops = { ++ .check_meta = btf_datasec_check_meta, ++ .resolve = btf_datasec_resolve, ++ .check_member = btf_df_check_member, ++ .check_kflag_member = btf_df_check_kflag_member, ++ .log_details = btf_datasec_log, ++ .seq_show = btf_datasec_seq_show, ++}; ++ ++static int btf_func_proto_check(struct btf_verifier_env *env, ++ const struct btf_type *t) ++{ ++ const struct btf_type *ret_type; ++ const struct btf_param *args; ++ const struct btf *btf; ++ u16 nr_args, i; ++ int err; ++ ++ btf = env->btf; ++ args = (const struct btf_param *)(t + 1); ++ nr_args = btf_type_vlen(t); ++ ++ /* Check func return type which could be "void" (t->type == 0) */ ++ if (t->type) { ++ u32 ret_type_id = t->type; ++ ++ ret_type = btf_type_by_id(btf, ret_type_id); ++ if (!ret_type) { ++ btf_verifier_log_type(env, t, "Invalid return type"); ++ return -EINVAL; ++ } ++ ++ if (btf_type_needs_resolve(ret_type) && ++ !env_type_is_resolved(env, ret_type_id)) { ++ err = btf_resolve(env, ret_type, ret_type_id); ++ if (err) ++ return err; ++ } ++ ++ /* Ensure the return type is a type that has a size */ ++ if (!btf_type_id_size(btf, &ret_type_id, NULL)) { ++ btf_verifier_log_type(env, t, "Invalid return type"); ++ return -EINVAL; ++ } ++ } ++ ++ if (!nr_args) ++ return 0; ++ ++ /* Last func arg type_id could be 0 if it is a vararg */ ++ if (!args[nr_args - 1].type) { ++ if (args[nr_args - 1].name_off) { ++ btf_verifier_log_type(env, t, "Invalid arg#%u", ++ nr_args); ++ return -EINVAL; ++ } ++ nr_args--; ++ } ++ ++ err = 0; ++ for (i = 0; i < nr_args; i++) { ++ const struct btf_type *arg_type; ++ u32 arg_type_id; ++ ++ arg_type_id = args[i].type; ++ arg_type = btf_type_by_id(btf, arg_type_id); ++ if (!arg_type) { ++ btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); ++ err = -EINVAL; ++ break; ++ } ++ ++ if (args[i].name_off && ++ (!btf_name_offset_valid(btf, args[i].name_off) || ++ !btf_name_valid_identifier(btf, args[i].name_off))) { ++ btf_verifier_log_type(env, t, ++ "Invalid arg#%u", i + 1); ++ err = -EINVAL; ++ break; ++ } ++ ++ if (btf_type_needs_resolve(arg_type) && ++ !env_type_is_resolved(env, arg_type_id)) { ++ err = btf_resolve(env, arg_type, arg_type_id); ++ if (err) ++ break; ++ } ++ ++ if (!btf_type_id_size(btf, &arg_type_id, NULL)) { ++ btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); ++ err = -EINVAL; ++ break; ++ } ++ } ++ ++ return err; ++} ++ ++static int btf_func_check(struct btf_verifier_env *env, ++ const struct btf_type *t) ++{ ++ const struct btf_type *proto_type; ++ const struct btf_param *args; ++ const struct btf *btf; ++ u16 nr_args, i; ++ ++ btf = env->btf; ++ proto_type = btf_type_by_id(btf, t->type); ++ ++ if (!proto_type || !btf_type_is_func_proto(proto_type)) { ++ btf_verifier_log_type(env, t, "Invalid type_id"); ++ return -EINVAL; ++ } ++ ++ args = (const struct btf_param *)(proto_type + 1); ++ nr_args = btf_type_vlen(proto_type); ++ for (i = 0; i < nr_args; i++) { ++ if (!args[i].name_off && args[i].type) { ++ btf_verifier_log_type(env, t, "Invalid arg#%u", i + 1); ++ return -EINVAL; ++ } ++ } ++ ++ return 0; ++} ++ ++static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = { ++ [BTF_KIND_INT] = &int_ops, ++ [BTF_KIND_PTR] = &ptr_ops, ++ [BTF_KIND_ARRAY] = &array_ops, ++ [BTF_KIND_STRUCT] = &struct_ops, ++ [BTF_KIND_UNION] = &struct_ops, ++ [BTF_KIND_ENUM] = &enum_ops, ++ [BTF_KIND_FWD] = &fwd_ops, ++ [BTF_KIND_TYPEDEF] = &modifier_ops, ++ [BTF_KIND_VOLATILE] = &modifier_ops, ++ [BTF_KIND_CONST] = &modifier_ops, ++ [BTF_KIND_RESTRICT] = &modifier_ops, ++ [BTF_KIND_FUNC] = &func_ops, ++ [BTF_KIND_FUNC_PROTO] = &func_proto_ops, ++ [BTF_KIND_VAR] = &var_ops, ++ [BTF_KIND_DATASEC] = &datasec_ops, ++}; ++ ++static s32 btf_check_meta(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 meta_left) ++{ ++ u32 saved_meta_left = meta_left; ++ s32 var_meta_size; ++ ++ if (meta_left < sizeof(*t)) { ++ btf_verifier_log(env, "[%u] meta_left:%u meta_needed:%zu", ++ env->log_type_id, meta_left, sizeof(*t)); ++ return -EINVAL; ++ } ++ meta_left -= sizeof(*t); ++ ++ if (t->info & ~BTF_INFO_MASK) { ++ btf_verifier_log(env, "[%u] Invalid btf_info:%x", ++ env->log_type_id, t->info); ++ return -EINVAL; ++ } ++ ++ if (BTF_INFO_KIND(t->info) > BTF_KIND_MAX || ++ BTF_INFO_KIND(t->info) == BTF_KIND_UNKN) { ++ btf_verifier_log(env, "[%u] Invalid kind:%u", ++ env->log_type_id, BTF_INFO_KIND(t->info)); ++ return -EINVAL; ++ } ++ ++ if (!btf_name_offset_valid(env->btf, t->name_off)) { ++ btf_verifier_log(env, "[%u] Invalid name_offset:%u", ++ env->log_type_id, t->name_off); ++ return -EINVAL; ++ } ++ ++ var_meta_size = btf_type_ops(t)->check_meta(env, t, meta_left); ++ if (var_meta_size < 0) ++ return var_meta_size; ++ ++ meta_left -= var_meta_size; ++ ++ return saved_meta_left - meta_left; ++} ++ ++static int btf_check_all_metas(struct btf_verifier_env *env) ++{ ++ struct btf *btf = env->btf; ++ struct btf_header *hdr; ++ void *cur, *end; ++ ++ hdr = &btf->hdr; ++ cur = btf->nohdr_data + hdr->type_off; ++ end = cur + hdr->type_len; ++ ++ env->log_type_id = 1; ++ while (cur < end) { ++ struct btf_type *t = cur; ++ s32 meta_size; ++ ++ meta_size = btf_check_meta(env, t, end - cur); ++ if (meta_size < 0) ++ return meta_size; ++ ++ btf_add_type(env, t); ++ cur += meta_size; ++ env->log_type_id++; ++ } ++ ++ return 0; ++} ++ ++static bool btf_resolve_valid(struct btf_verifier_env *env, ++ const struct btf_type *t, ++ u32 type_id) ++{ ++ struct btf *btf = env->btf; ++ ++ if (!env_type_is_resolved(env, type_id)) ++ return false; ++ ++ if (btf_type_is_struct(t) || btf_type_is_datasec(t)) ++ return !btf->resolved_ids[type_id] && ++ !btf->resolved_sizes[type_id]; ++ ++ if (btf_type_is_modifier(t) || btf_type_is_ptr(t) || ++ btf_type_is_var(t)) { ++ t = btf_type_id_resolve(btf, &type_id); ++ return t && ++ !btf_type_is_modifier(t) && ++ !btf_type_is_var(t) && ++ !btf_type_is_datasec(t); ++ } ++ ++ if (btf_type_is_array(t)) { ++ const struct btf_array *array = btf_type_array(t); ++ const struct btf_type *elem_type; ++ u32 elem_type_id = array->type; ++ u32 elem_size; ++ ++ elem_type = btf_type_id_size(btf, &elem_type_id, &elem_size); ++ return elem_type && !btf_type_is_modifier(elem_type) && ++ (array->nelems * elem_size == ++ btf->resolved_sizes[type_id]); ++ } ++ ++ return false; ++} ++ ++static int btf_resolve(struct btf_verifier_env *env, ++ const struct btf_type *t, u32 type_id) ++{ ++ u32 save_log_type_id = env->log_type_id; ++ const struct resolve_vertex *v; ++ int err = 0; ++ ++ env->resolve_mode = RESOLVE_TBD; ++ env_stack_push(env, t, type_id); ++ while (!err && (v = env_stack_peak(env))) { ++ env->log_type_id = v->type_id; ++ err = btf_type_ops(v->t)->resolve(env, v); ++ } ++ ++ env->log_type_id = type_id; ++ if (err == -E2BIG) { ++ btf_verifier_log_type(env, t, ++ "Exceeded max resolving depth:%u", ++ MAX_RESOLVE_DEPTH); ++ } else if (err == -EEXIST) { ++ btf_verifier_log_type(env, t, "Loop detected"); ++ } ++ ++ /* Final sanity check */ ++ if (!err && !btf_resolve_valid(env, t, type_id)) { ++ btf_verifier_log_type(env, t, "Invalid resolve state"); ++ err = -EINVAL; ++ } ++ ++ env->log_type_id = save_log_type_id; ++ return err; ++} ++ ++static int btf_check_all_types(struct btf_verifier_env *env) ++{ ++ struct btf *btf = env->btf; ++ u32 type_id; ++ int err; ++ ++ err = env_resolve_init(env); ++ if (err) ++ return err; ++ ++ env->phase++; ++ for (type_id = 1; type_id <= btf->nr_types; type_id++) { ++ const struct btf_type *t = btf_type_by_id(btf, type_id); ++ ++ env->log_type_id = type_id; ++ if (btf_type_needs_resolve(t) && ++ !env_type_is_resolved(env, type_id)) { ++ err = btf_resolve(env, t, type_id); ++ if (err) ++ return err; ++ } ++ ++ if (btf_type_is_func_proto(t)) { ++ err = btf_func_proto_check(env, t); ++ if (err) ++ return err; ++ } ++ ++ if (btf_type_is_func(t)) { ++ err = btf_func_check(env, t); ++ if (err) ++ return err; ++ } ++ } ++ ++ return 0; ++} ++ ++static int btf_parse_type_sec(struct btf_verifier_env *env) ++{ ++ const struct btf_header *hdr = &env->btf->hdr; ++ int err; ++ ++ /* Type section must align to 4 bytes */ ++ if (hdr->type_off & (sizeof(u32) - 1)) { ++ btf_verifier_log(env, "Unaligned type_off"); ++ return -EINVAL; ++ } ++ ++ if (!hdr->type_len) { ++ btf_verifier_log(env, "No type found"); ++ return -EINVAL; ++ } ++ ++ err = btf_check_all_metas(env); ++ if (err) ++ return err; ++ ++ return btf_check_all_types(env); ++} ++ ++static int btf_parse_str_sec(struct btf_verifier_env *env) ++{ ++ const struct btf_header *hdr; ++ struct btf *btf = env->btf; ++ const char *start, *end; ++ ++ hdr = &btf->hdr; ++ start = btf->nohdr_data + hdr->str_off; ++ end = start + hdr->str_len; ++ ++ if (end != btf->data + btf->data_size) { ++ btf_verifier_log(env, "String section is not at the end"); ++ return -EINVAL; ++ } ++ ++ if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || ++ start[0] || end[-1]) { ++ btf_verifier_log(env, "Invalid string section"); ++ return -EINVAL; ++ } ++ ++ btf->strings = start; ++ ++ return 0; ++} ++ ++static const size_t btf_sec_info_offset[] = { ++ offsetof(struct btf_header, type_off), ++ offsetof(struct btf_header, str_off), ++}; ++ ++static int btf_sec_info_cmp(const void *a, const void *b) ++{ ++ const struct btf_sec_info *x = a; ++ const struct btf_sec_info *y = b; ++ ++ return (int)(x->off - y->off) ? : (int)(x->len - y->len); ++} ++ ++static int btf_check_sec_info(struct btf_verifier_env *env, ++ u32 btf_data_size) ++{ ++ struct btf_sec_info secs[ARRAY_SIZE(btf_sec_info_offset)]; ++ u32 total, expected_total, i; ++ const struct btf_header *hdr; ++ const struct btf *btf; ++ ++ btf = env->btf; ++ hdr = &btf->hdr; ++ ++ /* Populate the secs from hdr */ ++ for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) ++ secs[i] = *(struct btf_sec_info *)((void *)hdr + ++ btf_sec_info_offset[i]); ++ ++ sort(secs, ARRAY_SIZE(btf_sec_info_offset), ++ sizeof(struct btf_sec_info), btf_sec_info_cmp, NULL); ++ ++ /* Check for gaps and overlap among sections */ ++ total = 0; ++ expected_total = btf_data_size - hdr->hdr_len; ++ for (i = 0; i < ARRAY_SIZE(btf_sec_info_offset); i++) { ++ if (expected_total < secs[i].off) { ++ btf_verifier_log(env, "Invalid section offset"); ++ return -EINVAL; ++ } ++ if (total < secs[i].off) { ++ /* gap */ ++ btf_verifier_log(env, "Unsupported section found"); ++ return -EINVAL; ++ } ++ if (total > secs[i].off) { ++ btf_verifier_log(env, "Section overlap found"); ++ return -EINVAL; ++ } ++ if (expected_total - total < secs[i].len) { ++ btf_verifier_log(env, ++ "Total section length too long"); ++ return -EINVAL; ++ } ++ total += secs[i].len; ++ } ++ ++ /* There is data other than hdr and known sections */ ++ if (expected_total != total) { ++ btf_verifier_log(env, "Unsupported section found"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int btf_parse_hdr(struct btf_verifier_env *env) ++{ ++ u32 hdr_len, hdr_copy, btf_data_size; ++ const struct btf_header *hdr; ++ struct btf *btf; ++ int err; ++ ++ btf = env->btf; ++ btf_data_size = btf->data_size; ++ ++ if (btf_data_size < ++ offsetof(struct btf_header, hdr_len) + sizeof(hdr->hdr_len)) { ++ btf_verifier_log(env, "hdr_len not found"); ++ return -EINVAL; ++ } ++ ++ hdr = btf->data; ++ hdr_len = hdr->hdr_len; ++ if (btf_data_size < hdr_len) { ++ btf_verifier_log(env, "btf_header not found"); ++ return -EINVAL; ++ } ++ ++ /* Ensure the unsupported header fields are zero */ ++ if (hdr_len > sizeof(btf->hdr)) { ++ u8 *expected_zero = btf->data + sizeof(btf->hdr); ++ u8 *end = btf->data + hdr_len; ++ ++ for (; expected_zero < end; expected_zero++) { ++ if (*expected_zero) { ++ btf_verifier_log(env, "Unsupported btf_header"); ++ return -E2BIG; ++ } ++ } ++ } ++ ++ hdr_copy = min_t(u32, hdr_len, sizeof(btf->hdr)); ++ memcpy(&btf->hdr, btf->data, hdr_copy); ++ ++ hdr = &btf->hdr; ++ ++ btf_verifier_log_hdr(env, btf_data_size); ++ ++ if (hdr->magic != BTF_MAGIC) { ++ btf_verifier_log(env, "Invalid magic"); ++ return -EINVAL; ++ } ++ ++ if (hdr->version != BTF_VERSION) { ++ btf_verifier_log(env, "Unsupported version"); ++ return -ENOTSUPP; ++ } ++ ++ if (hdr->flags) { ++ btf_verifier_log(env, "Unsupported flags"); ++ return -ENOTSUPP; ++ } ++ ++ if (btf_data_size == hdr->hdr_len) { ++ btf_verifier_log(env, "No data"); ++ return -EINVAL; ++ } ++ ++ err = btf_check_sec_info(env, btf_data_size); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size, ++ u32 log_level, char __user *log_ubuf, u32 log_size) ++{ ++ struct btf_verifier_env *env = NULL; ++ struct bpf_verifier_log *log; ++ struct btf *btf = NULL; ++ u8 *data; ++ int err; ++ ++ if (btf_data_size > BTF_MAX_SIZE) ++ return ERR_PTR(-E2BIG); ++ ++ env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); ++ if (!env) ++ return ERR_PTR(-ENOMEM); ++ ++ log = &env->log; ++ if (log_level || log_ubuf || log_size) { ++ /* user requested verbose verifier output ++ * and supplied buffer to store the verification trace ++ */ ++ log->level = log_level; ++ log->ubuf = log_ubuf; ++ log->len_total = log_size; ++ ++ /* log attributes have to be sane */ ++ if (log->len_total < 128 || log->len_total > UINT_MAX >> 8 || ++ !log->level || !log->ubuf) { ++ err = -EINVAL; ++ goto errout; ++ } ++ } ++ ++ btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); ++ if (!btf) { ++ err = -ENOMEM; ++ goto errout; ++ } ++ env->btf = btf; ++ ++ data = kmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN); ++ if (!data) { ++ err = -ENOMEM; ++ goto errout; ++ } ++ ++ btf->data = data; ++ btf->data_size = btf_data_size; ++ ++ if (copy_from_user(data, btf_data, btf_data_size)) { ++ err = -EFAULT; ++ goto errout; ++ } ++ ++ err = btf_parse_hdr(env); ++ if (err) ++ goto errout; ++ ++ btf->nohdr_data = btf->data + btf->hdr.hdr_len; ++ ++ err = btf_parse_str_sec(env); ++ if (err) ++ goto errout; ++ ++ err = btf_parse_type_sec(env); ++ if (err) ++ goto errout; ++ ++ if (log->level && bpf_verifier_log_full(log)) { ++ err = -ENOSPC; ++ goto errout; ++ } ++ ++ btf_verifier_env_free(env); ++ refcount_set(&btf->refcnt, 1); ++ return btf; ++ ++errout: ++ btf_verifier_env_free(env); ++ if (btf) ++ btf_free(btf); ++ return ERR_PTR(err); ++} ++ ++void btf_type_seq_show(const struct btf *btf, u32 type_id, void *obj, ++ struct seq_file *m) ++{ ++ const struct btf_type *t = btf_type_by_id(btf, type_id); ++ ++ btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m); ++} ++ ++#ifdef CONFIG_PROC_FS ++static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp) ++{ ++ const struct btf *btf = filp->private_data; ++ ++ seq_printf(m, "btf_id:\t%u\n", btf->id); ++} ++#endif ++ ++static int btf_release(struct inode *inode, struct file *filp) ++{ ++ btf_put(filp->private_data); ++ return 0; ++} ++ ++const struct file_operations btf_fops = { ++#ifdef CONFIG_PROC_FS ++ .show_fdinfo = bpf_btf_show_fdinfo, ++#endif ++ .release = btf_release, ++}; ++ ++static int __btf_new_fd(struct btf *btf) ++{ ++ return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC); ++} ++ ++int btf_new_fd(const union bpf_attr *attr) ++{ ++ struct btf *btf; ++ int ret; ++ ++ btf = btf_parse(u64_to_user_ptr(attr->btf), ++ attr->btf_size, attr->btf_log_level, ++ u64_to_user_ptr(attr->btf_log_buf), ++ attr->btf_log_size); ++ if (IS_ERR(btf)) ++ return PTR_ERR(btf); ++ ++ ret = btf_alloc_id(btf); ++ if (ret) { ++ btf_free(btf); ++ return ret; ++ } ++ ++ /* ++ * The BTF ID is published to the userspace. ++ * All BTF free must go through call_rcu() from ++ * now on (i.e. free by calling btf_put()). ++ */ ++ ++ ret = __btf_new_fd(btf); ++ if (ret < 0) ++ btf_put(btf); ++ ++ return ret; ++} ++ ++struct btf *btf_get_by_fd(int fd) ++{ ++ struct btf *btf; ++ struct fd f; ++ ++ f = fdget(fd); ++ ++ if (!f.file) ++ return ERR_PTR(-EBADF); ++ ++ if (f.file->f_op != &btf_fops) { ++ fdput(f); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ btf = f.file->private_data; ++ refcount_inc(&btf->refcnt); ++ fdput(f); ++ ++ return btf; ++} ++ ++int btf_get_info_by_fd(const struct btf *btf, ++ const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ struct bpf_btf_info __user *uinfo; ++ struct bpf_btf_info info; ++ u32 info_copy, btf_copy; ++ void __user *ubtf; ++ u32 uinfo_len; ++ ++ uinfo = u64_to_user_ptr(attr->info.info); ++ uinfo_len = attr->info.info_len; ++ ++ info_copy = min_t(u32, uinfo_len, sizeof(info)); ++ memset(&info, 0, sizeof(info)); ++ if (copy_from_user(&info, uinfo, info_copy)) ++ return -EFAULT; ++ ++ info.id = btf->id; ++ ubtf = u64_to_user_ptr(info.btf); ++ btf_copy = min_t(u32, btf->data_size, info.btf_size); ++ if (copy_to_user(ubtf, btf->data, btf_copy)) ++ return -EFAULT; ++ info.btf_size = btf->data_size; ++ ++ if (copy_to_user(uinfo, &info, info_copy) || ++ put_user(info_copy, &uattr->info.info_len)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++int btf_get_fd_by_id(u32 id) ++{ ++ struct btf *btf; ++ int fd; ++ ++ rcu_read_lock(); ++ btf = idr_find(&btf_idr, id); ++ if (!btf || !refcount_inc_not_zero(&btf->refcnt)) ++ btf = ERR_PTR(-ENOENT); ++ rcu_read_unlock(); ++ ++ if (IS_ERR(btf)) ++ return PTR_ERR(btf); ++ ++ fd = __btf_new_fd(btf); ++ if (fd < 0) ++ btf_put(btf); ++ ++ return fd; ++} ++ ++u32 btf_id(const struct btf *btf) ++{ ++ return btf->id; ++} +--- /dev/null ++++ b/kernel/bpf/cgroup.c +@@ -0,0 +1,1581 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Functions to manage eBPF programs attached to cgroups ++ * ++ * Copyright (c) 2016 Daniel Mack ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "../cgroup/cgroup-internal.h" ++ ++DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key); ++EXPORT_SYMBOL(cgroup_bpf_enabled_key); ++ ++void cgroup_bpf_offline(struct cgroup *cgrp) ++{ ++ cgroup_get(cgrp); ++ percpu_ref_kill(&cgrp->bpf.refcnt); ++} ++ ++/** ++ * cgroup_bpf_release() - put references of all bpf programs and ++ * release all cgroup bpf data ++ * @work: work structure embedded into the cgroup to modify ++ */ ++static void cgroup_bpf_release(struct work_struct *work) ++{ ++ struct cgroup *p, *cgrp = container_of(work, struct cgroup, ++ bpf.release_work); ++ enum bpf_cgroup_storage_type stype; ++ struct bpf_prog_array *old_array; ++ unsigned int type; ++ ++ mutex_lock(&cgroup_mutex); ++ ++ for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { ++ struct list_head *progs = &cgrp->bpf.progs[type]; ++ struct bpf_prog_list *pl, *tmp; ++ ++ list_for_each_entry_safe(pl, tmp, progs, node) { ++ list_del(&pl->node); ++ bpf_prog_put(pl->prog); ++ for_each_cgroup_storage_type(stype) { ++ bpf_cgroup_storage_unlink(pl->storage[stype]); ++ bpf_cgroup_storage_free(pl->storage[stype]); ++ } ++ kfree(pl); ++ static_branch_dec(&cgroup_bpf_enabled_key); ++ } ++ old_array = rcu_dereference_protected( ++ cgrp->bpf.effective[type], ++ lockdep_is_held(&cgroup_mutex)); ++ bpf_prog_array_free(old_array); ++ } ++ ++ mutex_unlock(&cgroup_mutex); ++ ++ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) ++ cgroup_bpf_put(p); ++ ++ percpu_ref_exit(&cgrp->bpf.refcnt); ++ cgroup_put(cgrp); ++} ++ ++/** ++ * cgroup_bpf_release_fn() - callback used to schedule releasing ++ * of bpf cgroup data ++ * @ref: percpu ref counter structure ++ */ ++static void cgroup_bpf_release_fn(struct percpu_ref *ref) ++{ ++ struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); ++ ++ INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); ++ queue_work(system_wq, &cgrp->bpf.release_work); ++} ++ ++/* count number of elements in the list. ++ * it's slow but the list cannot be long ++ */ ++static u32 prog_list_length(struct list_head *head) ++{ ++ struct bpf_prog_list *pl; ++ u32 cnt = 0; ++ ++ list_for_each_entry(pl, head, node) { ++ if (!pl->prog) ++ continue; ++ cnt++; ++ } ++ return cnt; ++} ++ ++/* if parent has non-overridable prog attached, ++ * disallow attaching new programs to the descendent cgroup. ++ * if parent has overridable or multi-prog, allow attaching ++ */ ++static bool hierarchy_allows_attach(struct cgroup *cgrp, ++ enum bpf_attach_type type, ++ u32 new_flags) ++{ ++ struct cgroup *p; ++ ++ p = cgroup_parent(cgrp); ++ if (!p) ++ return true; ++ do { ++ u32 flags = p->bpf.flags[type]; ++ u32 cnt; ++ ++ if (flags & BPF_F_ALLOW_MULTI) ++ return true; ++ cnt = prog_list_length(&p->bpf.progs[type]); ++ WARN_ON_ONCE(cnt > 1); ++ if (cnt == 1) ++ return !!(flags & BPF_F_ALLOW_OVERRIDE); ++ p = cgroup_parent(p); ++ } while (p); ++ return true; ++} ++ ++/* compute a chain of effective programs for a given cgroup: ++ * start from the list of programs in this cgroup and add ++ * all parent programs. ++ * Note that parent's F_ALLOW_OVERRIDE-type program is yielding ++ * to programs in this cgroup ++ */ ++static int compute_effective_progs(struct cgroup *cgrp, ++ enum bpf_attach_type type, ++ struct bpf_prog_array **array) ++{ ++ enum bpf_cgroup_storage_type stype; ++ struct bpf_prog_array *progs; ++ struct bpf_prog_list *pl; ++ struct cgroup *p = cgrp; ++ int cnt = 0; ++ ++ /* count number of effective programs by walking parents */ ++ do { ++ if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) ++ cnt += prog_list_length(&p->bpf.progs[type]); ++ p = cgroup_parent(p); ++ } while (p); ++ ++ progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); ++ if (!progs) ++ return -ENOMEM; ++ ++ /* populate the array with effective progs */ ++ cnt = 0; ++ p = cgrp; ++ do { ++ if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) ++ continue; ++ ++ list_for_each_entry(pl, &p->bpf.progs[type], node) { ++ if (!pl->prog) ++ continue; ++ ++ progs->items[cnt].prog = pl->prog; ++ for_each_cgroup_storage_type(stype) ++ progs->items[cnt].cgroup_storage[stype] = ++ pl->storage[stype]; ++ cnt++; ++ } ++ } while ((p = cgroup_parent(p))); ++ ++ *array = progs; ++ return 0; ++} ++ ++static void activate_effective_progs(struct cgroup *cgrp, ++ enum bpf_attach_type type, ++ struct bpf_prog_array *old_array) ++{ ++ rcu_swap_protected(cgrp->bpf.effective[type], old_array, ++ lockdep_is_held(&cgroup_mutex)); ++ /* free prog array after grace period, since __cgroup_bpf_run_*() ++ * might be still walking the array ++ */ ++ bpf_prog_array_free(old_array); ++} ++ ++/** ++ * cgroup_bpf_inherit() - inherit effective programs from parent ++ * @cgrp: the cgroup to modify ++ */ ++int cgroup_bpf_inherit(struct cgroup *cgrp) ++{ ++/* has to use marco instead of const int, since compiler thinks ++ * that array below is variable length ++ */ ++#define NR ARRAY_SIZE(cgrp->bpf.effective) ++ struct bpf_prog_array *arrays[NR] = {}; ++ struct cgroup *p; ++ int ret, i; ++ ++ ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, ++ GFP_KERNEL); ++ if (ret) ++ return ret; ++ ++ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) ++ cgroup_bpf_get(p); ++ ++ for (i = 0; i < NR; i++) ++ INIT_LIST_HEAD(&cgrp->bpf.progs[i]); ++ ++ for (i = 0; i < NR; i++) ++ if (compute_effective_progs(cgrp, i, &arrays[i])) ++ goto cleanup; ++ ++ for (i = 0; i < NR; i++) ++ activate_effective_progs(cgrp, i, arrays[i]); ++ ++ return 0; ++cleanup: ++ for (i = 0; i < NR; i++) ++ bpf_prog_array_free(arrays[i]); ++ ++ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) ++ cgroup_bpf_put(p); ++ ++ percpu_ref_exit(&cgrp->bpf.refcnt); ++ ++ return -ENOMEM; ++} ++ ++static int update_effective_progs(struct cgroup *cgrp, ++ enum bpf_attach_type type) ++{ ++ struct cgroup_subsys_state *css; ++ int err; ++ ++ /* allocate and recompute effective prog arrays */ ++ css_for_each_descendant_pre(css, &cgrp->self) { ++ struct cgroup *desc = container_of(css, struct cgroup, self); ++ ++ if (percpu_ref_is_zero(&desc->bpf.refcnt)) ++ continue; ++ ++ err = compute_effective_progs(desc, type, &desc->bpf.inactive); ++ if (err) ++ goto cleanup; ++ } ++ ++ /* all allocations were successful. Activate all prog arrays */ ++ css_for_each_descendant_pre(css, &cgrp->self) { ++ struct cgroup *desc = container_of(css, struct cgroup, self); ++ ++ if (percpu_ref_is_zero(&desc->bpf.refcnt)) { ++ if (unlikely(desc->bpf.inactive)) { ++ bpf_prog_array_free(desc->bpf.inactive); ++ desc->bpf.inactive = NULL; ++ } ++ continue; ++ } ++ ++ activate_effective_progs(desc, type, desc->bpf.inactive); ++ desc->bpf.inactive = NULL; ++ } ++ ++ return 0; ++ ++cleanup: ++ /* oom while computing effective. Free all computed effective arrays ++ * since they were not activated ++ */ ++ css_for_each_descendant_pre(css, &cgrp->self) { ++ struct cgroup *desc = container_of(css, struct cgroup, self); ++ ++ bpf_prog_array_free(desc->bpf.inactive); ++ desc->bpf.inactive = NULL; ++ } ++ ++ return err; ++} ++ ++#define BPF_CGROUP_MAX_PROGS 64 ++ ++/** ++ * __cgroup_bpf_attach() - Attach the program to a cgroup, and ++ * propagate the change to descendants ++ * @cgrp: The cgroup which descendants to traverse ++ * @prog: A program to attach ++ * @type: Type of attach operation ++ * @flags: Option flags ++ * ++ * Must be called with cgroup_mutex held. ++ */ ++int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, ++ enum bpf_attach_type type, u32 flags) ++{ ++ struct list_head *progs = &cgrp->bpf.progs[type]; ++ struct bpf_prog *old_prog = NULL; ++ struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; ++ struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; ++ enum bpf_cgroup_storage_type stype; ++ struct bpf_prog_list *pl; ++ bool pl_was_allocated; ++ int err; ++ ++ if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ++ /* invalid combination */ ++ return -EINVAL; ++ ++ if (!hierarchy_allows_attach(cgrp, type, flags)) ++ return -EPERM; ++ ++ if (!list_empty(progs) && cgrp->bpf.flags[type] != flags) ++ /* Disallow attaching non-overridable on top ++ * of existing overridable in this cgroup. ++ * Disallow attaching multi-prog if overridable or none ++ */ ++ return -EPERM; ++ ++ if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) ++ return -E2BIG; ++ ++ for_each_cgroup_storage_type(stype) { ++ storage[stype] = bpf_cgroup_storage_alloc(prog, stype); ++ if (IS_ERR(storage[stype])) { ++ storage[stype] = NULL; ++ for_each_cgroup_storage_type(stype) ++ bpf_cgroup_storage_free(storage[stype]); ++ return -ENOMEM; ++ } ++ } ++ ++ if (flags & BPF_F_ALLOW_MULTI) { ++ list_for_each_entry(pl, progs, node) { ++ if (pl->prog == prog) { ++ /* disallow attaching the same prog twice */ ++ for_each_cgroup_storage_type(stype) ++ bpf_cgroup_storage_free(storage[stype]); ++ return -EINVAL; ++ } ++ } ++ ++ pl = kmalloc(sizeof(*pl), GFP_KERNEL); ++ if (!pl) { ++ for_each_cgroup_storage_type(stype) ++ bpf_cgroup_storage_free(storage[stype]); ++ return -ENOMEM; ++ } ++ ++ pl_was_allocated = true; ++ pl->prog = prog; ++ for_each_cgroup_storage_type(stype) ++ pl->storage[stype] = storage[stype]; ++ list_add_tail(&pl->node, progs); ++ } else { ++ if (list_empty(progs)) { ++ pl = kmalloc(sizeof(*pl), GFP_KERNEL); ++ if (!pl) { ++ for_each_cgroup_storage_type(stype) ++ bpf_cgroup_storage_free(storage[stype]); ++ return -ENOMEM; ++ } ++ pl_was_allocated = true; ++ list_add_tail(&pl->node, progs); ++ } else { ++ pl = list_first_entry(progs, typeof(*pl), node); ++ old_prog = pl->prog; ++ for_each_cgroup_storage_type(stype) { ++ old_storage[stype] = pl->storage[stype]; ++ bpf_cgroup_storage_unlink(old_storage[stype]); ++ } ++ pl_was_allocated = false; ++ } ++ pl->prog = prog; ++ for_each_cgroup_storage_type(stype) ++ pl->storage[stype] = storage[stype]; ++ } ++ ++ cgrp->bpf.flags[type] = flags; ++ ++ err = update_effective_progs(cgrp, type); ++ if (err) ++ goto cleanup; ++ ++ static_branch_inc(&cgroup_bpf_enabled_key); ++ for_each_cgroup_storage_type(stype) { ++ if (!old_storage[stype]) ++ continue; ++ bpf_cgroup_storage_free(old_storage[stype]); ++ } ++ if (old_prog) { ++ bpf_prog_put(old_prog); ++ static_branch_dec(&cgroup_bpf_enabled_key); ++ } ++ for_each_cgroup_storage_type(stype) ++ bpf_cgroup_storage_link(storage[stype], cgrp, type); ++ return 0; ++ ++cleanup: ++ /* and cleanup the prog list */ ++ pl->prog = old_prog; ++ for_each_cgroup_storage_type(stype) { ++ bpf_cgroup_storage_free(pl->storage[stype]); ++ pl->storage[stype] = old_storage[stype]; ++ bpf_cgroup_storage_link(old_storage[stype], cgrp, type); ++ } ++ if (pl_was_allocated) { ++ list_del(&pl->node); ++ kfree(pl); ++ } ++ return err; ++} ++ ++/** ++ * __cgroup_bpf_detach() - Detach the program from a cgroup, and ++ * propagate the change to descendants ++ * @cgrp: The cgroup which descendants to traverse ++ * @prog: A program to detach or NULL ++ * @type: Type of detach operation ++ * ++ * Must be called with cgroup_mutex held. ++ */ ++int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, ++ enum bpf_attach_type type) ++{ ++ struct list_head *progs = &cgrp->bpf.progs[type]; ++ enum bpf_cgroup_storage_type stype; ++ u32 flags = cgrp->bpf.flags[type]; ++ struct bpf_prog *old_prog = NULL; ++ struct bpf_prog_list *pl; ++ int err; ++ ++ if (flags & BPF_F_ALLOW_MULTI) { ++ if (!prog) ++ /* to detach MULTI prog the user has to specify valid FD ++ * of the program to be detached ++ */ ++ return -EINVAL; ++ } else { ++ if (list_empty(progs)) ++ /* report error when trying to detach and nothing is attached */ ++ return -ENOENT; ++ } ++ ++ if (flags & BPF_F_ALLOW_MULTI) { ++ /* find the prog and detach it */ ++ list_for_each_entry(pl, progs, node) { ++ if (pl->prog != prog) ++ continue; ++ old_prog = prog; ++ /* mark it deleted, so it's ignored while ++ * recomputing effective ++ */ ++ pl->prog = NULL; ++ break; ++ } ++ if (!old_prog) ++ return -ENOENT; ++ } else { ++ /* to maintain backward compatibility NONE and OVERRIDE cgroups ++ * allow detaching with invalid FD (prog==NULL) ++ */ ++ pl = list_first_entry(progs, typeof(*pl), node); ++ old_prog = pl->prog; ++ pl->prog = NULL; ++ } ++ ++ err = update_effective_progs(cgrp, type); ++ if (err) ++ goto cleanup; ++ ++ /* now can actually delete it from this cgroup list */ ++ list_del(&pl->node); ++ for_each_cgroup_storage_type(stype) { ++ bpf_cgroup_storage_unlink(pl->storage[stype]); ++ bpf_cgroup_storage_free(pl->storage[stype]); ++ } ++ kfree(pl); ++ if (list_empty(progs)) ++ /* last program was detached, reset flags to zero */ ++ cgrp->bpf.flags[type] = 0; ++ ++ bpf_prog_put(old_prog); ++ static_branch_dec(&cgroup_bpf_enabled_key); ++ return 0; ++ ++cleanup: ++ /* and restore back old_prog */ ++ pl->prog = old_prog; ++ return err; ++} ++ ++/* Must be called with cgroup_mutex held to avoid races. */ ++int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); ++ enum bpf_attach_type type = attr->query.attach_type; ++ struct list_head *progs = &cgrp->bpf.progs[type]; ++ u32 flags = cgrp->bpf.flags[type]; ++ struct bpf_prog_array *effective; ++ int cnt, ret = 0, i; ++ ++ effective = rcu_dereference_protected(cgrp->bpf.effective[type], ++ lockdep_is_held(&cgroup_mutex)); ++ ++ if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) ++ cnt = bpf_prog_array_length(effective); ++ else ++ cnt = prog_list_length(progs); ++ ++ if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) ++ return -EFAULT; ++ if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) ++ return -EFAULT; ++ if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) ++ /* return early if user requested only program count + flags */ ++ return 0; ++ if (attr->query.prog_cnt < cnt) { ++ cnt = attr->query.prog_cnt; ++ ret = -ENOSPC; ++ } ++ ++ if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { ++ return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); ++ } else { ++ struct bpf_prog_list *pl; ++ u32 id; ++ ++ i = 0; ++ list_for_each_entry(pl, progs, node) { ++ id = pl->prog->aux->id; ++ if (copy_to_user(prog_ids + i, &id, sizeof(id))) ++ return -EFAULT; ++ if (++i == cnt) ++ break; ++ } ++ } ++ return ret; ++} ++ ++int cgroup_bpf_prog_attach(const union bpf_attr *attr, ++ enum bpf_prog_type ptype, struct bpf_prog *prog) ++{ ++ struct cgroup *cgrp; ++ int ret; ++ ++ cgrp = cgroup_get_from_fd(attr->target_fd); ++ if (IS_ERR(cgrp)) ++ return PTR_ERR(cgrp); ++ ++ ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type, ++ attr->attach_flags); ++ cgroup_put(cgrp); ++ return ret; ++} ++ ++int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) ++{ ++ struct bpf_prog *prog; ++ struct cgroup *cgrp; ++ int ret; ++ ++ cgrp = cgroup_get_from_fd(attr->target_fd); ++ if (IS_ERR(cgrp)) ++ return PTR_ERR(cgrp); ++ ++ prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); ++ if (IS_ERR(prog)) ++ prog = NULL; ++ ++ ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0); ++ if (prog) ++ bpf_prog_put(prog); ++ ++ cgroup_put(cgrp); ++ return ret; ++} ++ ++int cgroup_bpf_prog_query(const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ struct cgroup *cgrp; ++ int ret; ++ ++ cgrp = cgroup_get_from_fd(attr->query.target_fd); ++ if (IS_ERR(cgrp)) ++ return PTR_ERR(cgrp); ++ ++ ret = cgroup_bpf_query(cgrp, attr, uattr); ++ ++ cgroup_put(cgrp); ++ return ret; ++} ++ ++/** ++ * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering ++ * @sk: The socket sending or receiving traffic ++ * @skb: The skb that is being sent or received ++ * @type: The type of program to be exectuted ++ * ++ * If no socket is passed, or the socket is not of type INET or INET6, ++ * this function does nothing and returns 0. ++ * ++ * The program type passed in via @type must be suitable for network ++ * filtering. No further check is performed to assert that. ++ * ++ * For egress packets, this function can return: ++ * NET_XMIT_SUCCESS (0) - continue with packet output ++ * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr ++ * NET_XMIT_CN (2) - continue with packet output and notify TCP ++ * to call cwr ++ * -EPERM - drop packet ++ * ++ * For ingress packets, this function will return -EPERM if any ++ * attached program was found and if it returned != 1 during execution. ++ * Otherwise 0 is returned. ++ */ ++int __cgroup_bpf_run_filter_skb(struct sock *sk, ++ struct sk_buff *skb, ++ enum bpf_attach_type type) ++{ ++ unsigned int offset = skb->data - skb_network_header(skb); ++ struct sock *save_sk; ++ void *saved_data_end; ++ struct cgroup *cgrp; ++ int ret; ++ ++ if (!sk || !sk_fullsock(sk)) ++ return 0; ++ ++ if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) ++ return 0; ++ ++ cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); ++ save_sk = skb->sk; ++ skb->sk = sk; ++ __skb_push(skb, offset); ++ ++ /* compute pointers for the bpf prog */ ++ bpf_compute_and_save_data_end(skb, &saved_data_end); ++ ++ if (type == BPF_CGROUP_INET_EGRESS) { ++ ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( ++ cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb); ++ } else { ++ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, ++ __bpf_prog_run_save_cb); ++ ret = (ret == 1 ? 0 : -EPERM); ++ } ++ bpf_restore_data_end(skb, saved_data_end); ++ __skb_pull(skb, offset); ++ skb->sk = save_sk; ++ ++ return ret; ++} ++EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); ++ ++/** ++ * __cgroup_bpf_run_filter_sk() - Run a program on a sock ++ * @sk: sock structure to manipulate ++ * @type: The type of program to be exectuted ++ * ++ * socket is passed is expected to be of type INET or INET6. ++ * ++ * The program type passed in via @type must be suitable for sock ++ * filtering. No further check is performed to assert that. ++ * ++ * This function will return %-EPERM if any if an attached program was found ++ * and if it returned != 1 during execution. In all other cases, 0 is returned. ++ */ ++int __cgroup_bpf_run_filter_sk(struct sock *sk, ++ enum bpf_attach_type type) ++{ ++ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); ++ int ret; ++ ++ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN); ++ return ret == 1 ? 0 : -EPERM; ++} ++EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); ++ ++/** ++ * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and ++ * provided by user sockaddr ++ * @sk: sock struct that will use sockaddr ++ * @uaddr: sockaddr struct provided by user ++ * @type: The type of program to be exectuted ++ * @t_ctx: Pointer to attach type specific context ++ * ++ * socket is expected to be of type INET or INET6. ++ * ++ * This function will return %-EPERM if an attached program is found and ++ * returned value != 1 during execution. In all other cases, 0 is returned. ++ */ ++int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, ++ struct sockaddr *uaddr, ++ enum bpf_attach_type type, ++ void *t_ctx) ++{ ++ struct bpf_sock_addr_kern ctx = { ++ .sk = sk, ++ .uaddr = uaddr, ++ .t_ctx = t_ctx, ++ }; ++ struct sockaddr_storage unspec; ++ struct cgroup *cgrp; ++ int ret; ++ ++ /* Check socket family since not all sockets represent network ++ * endpoint (e.g. AF_UNIX). ++ */ ++ if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) ++ return 0; ++ ++ if (!ctx.uaddr) { ++ memset(&unspec, 0, sizeof(unspec)); ++ ctx.uaddr = (struct sockaddr *)&unspec; ++ } ++ ++ cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); ++ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); ++ ++ return ret == 1 ? 0 : -EPERM; ++} ++EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); ++ ++/** ++ * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock ++ * @sk: socket to get cgroup from ++ * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains ++ * sk with connection information (IP addresses, etc.) May not contain ++ * cgroup info if it is a req sock. ++ * @type: The type of program to be exectuted ++ * ++ * socket passed is expected to be of type INET or INET6. ++ * ++ * The program type passed in via @type must be suitable for sock_ops ++ * filtering. No further check is performed to assert that. ++ * ++ * This function will return %-EPERM if any if an attached program was found ++ * and if it returned != 1 during execution. In all other cases, 0 is returned. ++ */ ++int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, ++ struct bpf_sock_ops_kern *sock_ops, ++ enum bpf_attach_type type) ++{ ++ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); ++ int ret; ++ ++ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops, ++ BPF_PROG_RUN); ++ return ret == 1 ? 0 : -EPERM; ++} ++EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); ++ ++int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, ++ short access, enum bpf_attach_type type) ++{ ++ struct cgroup *cgrp; ++ struct bpf_cgroup_dev_ctx ctx = { ++ .access_type = (access << 16) | dev_type, ++ .major = major, ++ .minor = minor, ++ }; ++ int allow = 1; ++ ++ rcu_read_lock(); ++ cgrp = task_dfl_cgroup(current); ++ allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, ++ BPF_PROG_RUN); ++ rcu_read_unlock(); ++ ++ return !allow; ++} ++EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission); ++ ++static const struct bpf_func_proto * ++cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ switch (func_id) { ++ case BPF_FUNC_map_lookup_elem: ++ return &bpf_map_lookup_elem_proto; ++ case BPF_FUNC_map_update_elem: ++ return &bpf_map_update_elem_proto; ++ case BPF_FUNC_map_delete_elem: ++ return &bpf_map_delete_elem_proto; ++ case BPF_FUNC_map_push_elem: ++ return &bpf_map_push_elem_proto; ++ case BPF_FUNC_map_pop_elem: ++ return &bpf_map_pop_elem_proto; ++ case BPF_FUNC_map_peek_elem: ++ return &bpf_map_peek_elem_proto; ++ case BPF_FUNC_get_current_uid_gid: ++ return &bpf_get_current_uid_gid_proto; ++ case BPF_FUNC_get_local_storage: ++ return &bpf_get_local_storage_proto; ++ case BPF_FUNC_get_current_cgroup_id: ++ return &bpf_get_current_cgroup_id_proto; ++ case BPF_FUNC_trace_printk: ++ if (capable(CAP_SYS_ADMIN)) ++ return bpf_get_trace_printk_proto(); ++ /* fall through */ ++ default: ++ return NULL; ++ } ++} ++ ++static const struct bpf_func_proto * ++cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ return cgroup_base_func_proto(func_id, prog); ++} ++ ++static bool cgroup_dev_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ const int size_default = sizeof(__u32); ++ ++ if (type == BPF_WRITE) ++ return false; ++ ++ if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) ++ return false; ++ /* The verifier guarantees that size > 0. */ ++ if (off % size != 0) ++ return false; ++ ++ switch (off) { ++ case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): ++ bpf_ctx_record_field_size(info, size_default); ++ if (!bpf_ctx_narrow_access_ok(off, size, size_default)) ++ return false; ++ break; ++ default: ++ if (size != size_default) ++ return false; ++ } ++ ++ return true; ++} ++ ++const struct bpf_prog_ops cg_dev_prog_ops = { ++}; ++ ++const struct bpf_verifier_ops cg_dev_verifier_ops = { ++ .get_func_proto = cgroup_dev_func_proto, ++ .is_valid_access = cgroup_dev_is_valid_access, ++}; ++ ++/** ++ * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl ++ * ++ * @head: sysctl table header ++ * @table: sysctl table ++ * @write: sysctl is being read (= 0) or written (= 1) ++ * @buf: pointer to buffer passed by user space ++ * @pcount: value-result argument: value is size of buffer pointed to by @buf, ++ * result is size of @new_buf if program set new value, initial value ++ * otherwise ++ * @ppos: value-result argument: value is position at which read from or write ++ * to sysctl is happening, result is new position if program overrode it, ++ * initial value otherwise ++ * @new_buf: pointer to pointer to new buffer that will be allocated if program ++ * overrides new value provided by user space on sysctl write ++ * NOTE: it's caller responsibility to free *new_buf if it was set ++ * @type: type of program to be executed ++ * ++ * Program is run when sysctl is being accessed, either read or written, and ++ * can allow or deny such access. ++ * ++ * This function will return %-EPERM if an attached program is found and ++ * returned value != 1 during execution. In all other cases 0 is returned. ++ */ ++int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, ++ struct ctl_table *table, int write, ++ void __user *buf, size_t *pcount, ++ loff_t *ppos, void **new_buf, ++ enum bpf_attach_type type) ++{ ++ struct bpf_sysctl_kern ctx = { ++ .head = head, ++ .table = table, ++ .write = write, ++ .ppos = ppos, ++ .cur_val = NULL, ++ .cur_len = PAGE_SIZE, ++ .new_val = NULL, ++ .new_len = 0, ++ .new_updated = 0, ++ }; ++ struct cgroup *cgrp; ++ int ret; ++ ++ ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); ++ if (ctx.cur_val) { ++ mm_segment_t old_fs; ++ loff_t pos = 0; ++ ++ old_fs = get_fs(); ++ set_fs(KERNEL_DS); ++ if (table->proc_handler(table, 0, (void __user *)ctx.cur_val, ++ &ctx.cur_len, &pos)) { ++ /* Let BPF program decide how to proceed. */ ++ ctx.cur_len = 0; ++ } ++ set_fs(old_fs); ++ } else { ++ /* Let BPF program decide how to proceed. */ ++ ctx.cur_len = 0; ++ } ++ ++ if (write && buf && *pcount) { ++ /* BPF program should be able to override new value with a ++ * buffer bigger than provided by user. ++ */ ++ ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); ++ ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); ++ if (!ctx.new_val || ++ copy_from_user(ctx.new_val, buf, ctx.new_len)) ++ /* Let BPF program decide how to proceed. */ ++ ctx.new_len = 0; ++ } ++ ++ rcu_read_lock(); ++ cgrp = task_dfl_cgroup(current); ++ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); ++ rcu_read_unlock(); ++ ++ kfree(ctx.cur_val); ++ ++ if (ret == 1 && ctx.new_updated) { ++ *new_buf = ctx.new_val; ++ *pcount = ctx.new_len; ++ } else { ++ kfree(ctx.new_val); ++ } ++ ++ return ret == 1 ? 0 : -EPERM; ++} ++EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl); ++ ++#ifdef CONFIG_NET ++static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, ++ enum bpf_attach_type attach_type) ++{ ++ struct bpf_prog_array *prog_array; ++ bool empty; ++ ++ rcu_read_lock(); ++ prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); ++ empty = bpf_prog_array_is_empty(prog_array); ++ rcu_read_unlock(); ++ ++ return empty; ++} ++ ++static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) ++{ ++ if (unlikely(max_optlen < 0)) ++ return -EINVAL; ++ ++ if (unlikely(max_optlen > PAGE_SIZE)) { ++ /* We don't expose optvals that are greater than PAGE_SIZE ++ * to the BPF program. ++ */ ++ max_optlen = PAGE_SIZE; ++ } ++ ++ ctx->optval = kzalloc(max_optlen, GFP_USER); ++ if (!ctx->optval) ++ return -ENOMEM; ++ ++ ctx->optval_end = ctx->optval + max_optlen; ++ ++ return max_optlen; ++} ++ ++static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) ++{ ++ kfree(ctx->optval); ++} ++ ++int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, ++ int *optname, char __user *optval, ++ int *optlen, char **kernel_optval) ++{ ++ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); ++ struct bpf_sockopt_kern ctx = { ++ .sk = sk, ++ .level = *level, ++ .optname = *optname, ++ }; ++ int ret, max_optlen; ++ ++ /* Opportunistic check to see whether we have any BPF program ++ * attached to the hook so we don't waste time allocating ++ * memory and locking the socket. ++ */ ++ if (!cgroup_bpf_enabled || ++ __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT)) ++ return 0; ++ ++ /* Allocate a bit more than the initial user buffer for ++ * BPF program. The canonical use case is overriding ++ * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). ++ */ ++ max_optlen = max_t(int, 16, *optlen); ++ ++ max_optlen = sockopt_alloc_buf(&ctx, max_optlen); ++ if (max_optlen < 0) ++ return max_optlen; ++ ++ ctx.optlen = *optlen; ++ ++ if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ lock_sock(sk); ++ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT], ++ &ctx, BPF_PROG_RUN); ++ release_sock(sk); ++ ++ if (!ret) { ++ ret = -EPERM; ++ goto out; ++ } ++ ++ if (ctx.optlen == -1) { ++ /* optlen set to -1, bypass kernel */ ++ ret = 1; ++ } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { ++ /* optlen is out of bounds */ ++ ret = -EFAULT; ++ } else { ++ /* optlen within bounds, run kernel handler */ ++ ret = 0; ++ ++ /* export any potential modifications */ ++ *level = ctx.level; ++ *optname = ctx.optname; ++ ++ /* optlen == 0 from BPF indicates that we should ++ * use original userspace data. ++ */ ++ if (ctx.optlen != 0) { ++ *optlen = ctx.optlen; ++ *kernel_optval = ctx.optval; ++ /* export and don't free sockopt buf */ ++ return 0; ++ } ++ } ++ ++out: ++ sockopt_free_buf(&ctx); ++ return ret; ++} ++EXPORT_SYMBOL(__cgroup_bpf_run_filter_setsockopt); ++ ++int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, ++ int optname, char __user *optval, ++ int __user *optlen, int max_optlen, ++ int retval) ++{ ++ struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); ++ struct bpf_sockopt_kern ctx = { ++ .sk = sk, ++ .level = level, ++ .optname = optname, ++ .retval = retval, ++ }; ++ int ret; ++ ++ /* Opportunistic check to see whether we have any BPF program ++ * attached to the hook so we don't waste time allocating ++ * memory and locking the socket. ++ */ ++ if (!cgroup_bpf_enabled || ++ __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) ++ return retval; ++ ++ ctx.optlen = max_optlen; ++ ++ max_optlen = sockopt_alloc_buf(&ctx, max_optlen); ++ if (max_optlen < 0) ++ return max_optlen; ++ ++ if (!retval) { ++ /* If kernel getsockopt finished successfully, ++ * copy whatever was returned to the user back ++ * into our temporary buffer. Set optlen to the ++ * one that kernel returned as well to let ++ * BPF programs inspect the value. ++ */ ++ ++ if (get_user(ctx.optlen, optlen)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ if (ctx.optlen < 0) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ if (copy_from_user(ctx.optval, optval, ++ min(ctx.optlen, max_optlen)) != 0) { ++ ret = -EFAULT; ++ goto out; ++ } ++ } ++ ++ lock_sock(sk); ++ ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT], ++ &ctx, BPF_PROG_RUN); ++ release_sock(sk); ++ ++ if (!ret) { ++ ret = -EPERM; ++ goto out; ++ } ++ ++ if (ctx.optlen > max_optlen || ctx.optlen < 0) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ /* BPF programs only allowed to set retval to 0, not some ++ * arbitrary value. ++ */ ++ if (ctx.retval != 0 && ctx.retval != retval) { ++ ret = -EFAULT; ++ goto out; ++ } ++ ++ if (ctx.optlen != 0) { ++ if (copy_to_user(optval, ctx.optval, ctx.optlen) || ++ put_user(ctx.optlen, optlen)) { ++ ret = -EFAULT; ++ goto out; ++ } ++ } ++ ++ ret = ctx.retval; ++ ++out: ++ sockopt_free_buf(&ctx); ++ return ret; ++} ++EXPORT_SYMBOL(__cgroup_bpf_run_filter_getsockopt); ++#endif ++ ++static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, ++ size_t *lenp) ++{ ++ ssize_t tmp_ret = 0, ret; ++ ++ if (dir->header.parent) { ++ tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); ++ if (tmp_ret < 0) ++ return tmp_ret; ++ } ++ ++ ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); ++ if (ret < 0) ++ return ret; ++ *bufp += ret; ++ *lenp -= ret; ++ ret += tmp_ret; ++ ++ /* Avoid leading slash. */ ++ if (!ret) ++ return ret; ++ ++ tmp_ret = strscpy(*bufp, "/", *lenp); ++ if (tmp_ret < 0) ++ return tmp_ret; ++ *bufp += tmp_ret; ++ *lenp -= tmp_ret; ++ ++ return ret + tmp_ret; ++} ++ ++BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, ++ size_t, buf_len, u64, flags) ++{ ++ ssize_t tmp_ret = 0, ret; ++ ++ if (!buf) ++ return -EINVAL; ++ ++ if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { ++ if (!ctx->head) ++ return -EINVAL; ++ tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); ++ if (tmp_ret < 0) ++ return tmp_ret; ++ } ++ ++ ret = strscpy(buf, ctx->table->procname, buf_len); ++ ++ return ret < 0 ? ret : tmp_ret + ret; ++} ++ ++static const struct bpf_func_proto bpf_sysctl_get_name_proto = { ++ .func = bpf_sysctl_get_name, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_PTR_TO_MEM, ++ .arg3_type = ARG_CONST_SIZE, ++ .arg4_type = ARG_ANYTHING, ++}; ++ ++static int copy_sysctl_value(char *dst, size_t dst_len, char *src, ++ size_t src_len) ++{ ++ if (!dst) ++ return -EINVAL; ++ ++ if (!dst_len) ++ return -E2BIG; ++ ++ if (!src || !src_len) { ++ memset(dst, 0, dst_len); ++ return -EINVAL; ++ } ++ ++ memcpy(dst, src, min(dst_len, src_len)); ++ ++ if (dst_len > src_len) { ++ memset(dst + src_len, '\0', dst_len - src_len); ++ return src_len; ++ } ++ ++ dst[dst_len - 1] = '\0'; ++ ++ return -E2BIG; ++} ++ ++BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, ++ char *, buf, size_t, buf_len) ++{ ++ return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); ++} ++ ++static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { ++ .func = bpf_sysctl_get_current_value, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_PTR_TO_UNINIT_MEM, ++ .arg3_type = ARG_CONST_SIZE, ++}; ++ ++BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, ++ size_t, buf_len) ++{ ++ if (!ctx->write) { ++ if (buf && buf_len) ++ memset(buf, '\0', buf_len); ++ return -EINVAL; ++ } ++ return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); ++} ++ ++static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { ++ .func = bpf_sysctl_get_new_value, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_PTR_TO_UNINIT_MEM, ++ .arg3_type = ARG_CONST_SIZE, ++}; ++ ++BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, ++ const char *, buf, size_t, buf_len) ++{ ++ if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) ++ return -EINVAL; ++ ++ if (buf_len > PAGE_SIZE - 1) ++ return -E2BIG; ++ ++ memcpy(ctx->new_val, buf, buf_len); ++ ctx->new_len = buf_len; ++ ctx->new_updated = 1; ++ ++ return 0; ++} ++ ++static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { ++ .func = bpf_sysctl_set_new_value, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_PTR_TO_MEM, ++ .arg3_type = ARG_CONST_SIZE, ++}; ++ ++static const struct bpf_func_proto * ++sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ switch (func_id) { ++ case BPF_FUNC_strtol: ++ return &bpf_strtol_proto; ++ case BPF_FUNC_strtoul: ++ return &bpf_strtoul_proto; ++ case BPF_FUNC_sysctl_get_name: ++ return &bpf_sysctl_get_name_proto; ++ case BPF_FUNC_sysctl_get_current_value: ++ return &bpf_sysctl_get_current_value_proto; ++ case BPF_FUNC_sysctl_get_new_value: ++ return &bpf_sysctl_get_new_value_proto; ++ case BPF_FUNC_sysctl_set_new_value: ++ return &bpf_sysctl_set_new_value_proto; ++ default: ++ return cgroup_base_func_proto(func_id, prog); ++ } ++} ++ ++static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ const int size_default = sizeof(__u32); ++ ++ if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) ++ return false; ++ ++ switch (off) { ++ case bpf_ctx_range(struct bpf_sysctl, write): ++ if (type != BPF_READ) ++ return false; ++ bpf_ctx_record_field_size(info, size_default); ++ return bpf_ctx_narrow_access_ok(off, size, size_default); ++ case bpf_ctx_range(struct bpf_sysctl, file_pos): ++ if (type == BPF_READ) { ++ bpf_ctx_record_field_size(info, size_default); ++ return bpf_ctx_narrow_access_ok(off, size, size_default); ++ } else { ++ return size == size_default; ++ } ++ default: ++ return false; ++ } ++} ++ ++static u32 sysctl_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, u32 *target_size) ++{ ++ struct bpf_insn *insn = insn_buf; ++ u32 read_size; ++ ++ switch (si->off) { ++ case offsetof(struct bpf_sysctl, write): ++ *insn++ = BPF_LDX_MEM( ++ BPF_SIZE(si->code), si->dst_reg, si->src_reg, ++ bpf_target_off(struct bpf_sysctl_kern, write, ++ FIELD_SIZEOF(struct bpf_sysctl_kern, ++ write), ++ target_size)); ++ break; ++ case offsetof(struct bpf_sysctl, file_pos): ++ /* ppos is a pointer so it should be accessed via indirect ++ * loads and stores. Also for stores additional temporary ++ * register is used since neither src_reg nor dst_reg can be ++ * overridden. ++ */ ++ if (type == BPF_WRITE) { ++ int treg = BPF_REG_9; ++ ++ if (si->src_reg == treg || si->dst_reg == treg) ++ --treg; ++ if (si->src_reg == treg || si->dst_reg == treg) ++ --treg; ++ *insn++ = BPF_STX_MEM( ++ BPF_DW, si->dst_reg, treg, ++ offsetof(struct bpf_sysctl_kern, tmp_reg)); ++ *insn++ = BPF_LDX_MEM( ++ BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), ++ treg, si->dst_reg, ++ offsetof(struct bpf_sysctl_kern, ppos)); ++ *insn++ = BPF_STX_MEM( ++ BPF_SIZEOF(u32), treg, si->src_reg, ++ bpf_ctx_narrow_access_offset( ++ 0, sizeof(u32), sizeof(loff_t))); ++ *insn++ = BPF_LDX_MEM( ++ BPF_DW, treg, si->dst_reg, ++ offsetof(struct bpf_sysctl_kern, tmp_reg)); ++ } else { ++ *insn++ = BPF_LDX_MEM( ++ BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), ++ si->dst_reg, si->src_reg, ++ offsetof(struct bpf_sysctl_kern, ppos)); ++ read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); ++ *insn++ = BPF_LDX_MEM( ++ BPF_SIZE(si->code), si->dst_reg, si->dst_reg, ++ bpf_ctx_narrow_access_offset( ++ 0, read_size, sizeof(loff_t))); ++ } ++ *target_size = sizeof(u32); ++ break; ++ } ++ ++ return insn - insn_buf; ++} ++ ++const struct bpf_verifier_ops cg_sysctl_verifier_ops = { ++ .get_func_proto = sysctl_func_proto, ++ .is_valid_access = sysctl_is_valid_access, ++ .convert_ctx_access = sysctl_convert_ctx_access, ++}; ++ ++const struct bpf_prog_ops cg_sysctl_prog_ops = { ++}; ++ ++static const struct bpf_func_proto * ++cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ switch (func_id) { ++#ifdef CONFIG_NET ++ case BPF_FUNC_sk_storage_get: ++ return &bpf_sk_storage_get_proto; ++ case BPF_FUNC_sk_storage_delete: ++ return &bpf_sk_storage_delete_proto; ++#endif ++#ifdef CONFIG_INET ++ case BPF_FUNC_tcp_sock: ++ return &bpf_tcp_sock_proto; ++#endif ++ default: ++ return cgroup_base_func_proto(func_id, prog); ++ } ++} ++ ++static bool cg_sockopt_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ const int size_default = sizeof(__u32); ++ ++ if (off < 0 || off >= sizeof(struct bpf_sockopt)) ++ return false; ++ ++ if (off % size != 0) ++ return false; ++ ++ if (type == BPF_WRITE) { ++ switch (off) { ++ case offsetof(struct bpf_sockopt, retval): ++ if (size != size_default) ++ return false; ++ return prog->expected_attach_type == ++ BPF_CGROUP_GETSOCKOPT; ++ case offsetof(struct bpf_sockopt, optname): ++ /* fallthrough */ ++ case offsetof(struct bpf_sockopt, level): ++ if (size != size_default) ++ return false; ++ return prog->expected_attach_type == ++ BPF_CGROUP_SETSOCKOPT; ++ case offsetof(struct bpf_sockopt, optlen): ++ return size == size_default; ++ default: ++ return false; ++ } ++ } ++ ++ switch (off) { ++ case offsetof(struct bpf_sockopt, sk): ++ if (size != sizeof(__u64)) ++ return false; ++ info->reg_type = PTR_TO_SOCKET; ++ break; ++ case offsetof(struct bpf_sockopt, optval): ++ if (size != sizeof(__u64)) ++ return false; ++ info->reg_type = PTR_TO_PACKET; ++ break; ++ case offsetof(struct bpf_sockopt, optval_end): ++ if (size != sizeof(__u64)) ++ return false; ++ info->reg_type = PTR_TO_PACKET_END; ++ break; ++ case offsetof(struct bpf_sockopt, retval): ++ if (size != size_default) ++ return false; ++ return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; ++ default: ++ if (size != size_default) ++ return false; ++ break; ++ } ++ return true; ++} ++ ++#define CG_SOCKOPT_ACCESS_FIELD(T, F) \ ++ T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ ++ si->dst_reg, si->src_reg, \ ++ offsetof(struct bpf_sockopt_kern, F)) ++ ++static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, ++ u32 *target_size) ++{ ++ struct bpf_insn *insn = insn_buf; ++ ++ switch (si->off) { ++ case offsetof(struct bpf_sockopt, sk): ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); ++ break; ++ case offsetof(struct bpf_sockopt, level): ++ if (type == BPF_WRITE) ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); ++ else ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); ++ break; ++ case offsetof(struct bpf_sockopt, optname): ++ if (type == BPF_WRITE) ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); ++ else ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); ++ break; ++ case offsetof(struct bpf_sockopt, optlen): ++ if (type == BPF_WRITE) ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); ++ else ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); ++ break; ++ case offsetof(struct bpf_sockopt, retval): ++ if (type == BPF_WRITE) ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); ++ else ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); ++ break; ++ case offsetof(struct bpf_sockopt, optval): ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); ++ break; ++ case offsetof(struct bpf_sockopt, optval_end): ++ *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); ++ break; ++ } ++ ++ return insn - insn_buf; ++} ++ ++static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, ++ bool direct_write, ++ const struct bpf_prog *prog) ++{ ++ /* Nothing to do for sockopt argument. The data is kzalloc'ated. ++ */ ++ return 0; ++} ++ ++const struct bpf_verifier_ops cg_sockopt_verifier_ops = { ++ .get_func_proto = cg_sockopt_func_proto, ++ .is_valid_access = cg_sockopt_is_valid_access, ++ .convert_ctx_access = cg_sockopt_convert_ctx_access, ++ .gen_prologue = cg_sockopt_get_prologue, ++}; ++ ++const struct bpf_prog_ops cg_sockopt_prog_ops = { ++}; +--- a/kernel/bpf/core.c ++++ b/kernel/bpf/core.c +@@ -1,3 +1,4 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later + /* + * Linux Socket Filter - Kernel level socket filtering + * +@@ -12,21 +13,22 @@ + * Alexei Starovoitov + * Daniel Borkmann + * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License +- * as published by the Free Software Foundation; either version +- * 2 of the License, or (at your option) any later version. +- * + * Andi Kleen - Fix a few bad bugs and races. + * Kris Katterjohn - Added many additional checks in bpf_check_classic() + */ + ++#include + #include + #include + #include + #include + #include + #include ++#include ++#include ++#include ++#include ++#include + + #include + +@@ -47,6 +49,7 @@ + #define DST regs[insn->dst_reg] + #define SRC regs[insn->src_reg] + #define FP regs[BPF_REG_FP] ++#define AX regs[BPF_REG_AX] + #define ARG1 regs[BPF_REG_ARG1] + #define CTX regs[BPF_REG_CTX] + #define IMM insn->imm +@@ -70,10 +73,9 @@ void *bpf_internal_load_pointer_neg_help + return NULL; + } + +-struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) ++struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags) + { +- gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | +- gfp_extra_flags; ++ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; + struct bpf_prog_aux *aux; + struct bpf_prog *fp; + +@@ -82,8 +84,6 @@ struct bpf_prog *bpf_prog_alloc(unsigned + if (fp == NULL) + return NULL; + +- kmemcheck_annotate_bitfield(fp, meta); +- + aux = kzalloc(sizeof(*aux), GFP_KERNEL | gfp_extra_flags); + if (aux == NULL) { + vfree(fp); +@@ -93,30 +93,151 @@ struct bpf_prog *bpf_prog_alloc(unsigned + fp->pages = size / PAGE_SIZE; + fp->aux = aux; + fp->aux->prog = fp; ++ fp->jit_requested = ebpf_jit_enabled(); ++ ++ INIT_LIST_HEAD_RCU(&fp->aux->ksym_lnode); + + return fp; + } ++ ++struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags) ++{ ++ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; ++ struct bpf_prog *prog; ++ int cpu; ++ ++ prog = bpf_prog_alloc_no_stats(size, gfp_extra_flags); ++ if (!prog) ++ return NULL; ++ ++ prog->aux->stats = alloc_percpu_gfp(struct bpf_prog_stats, gfp_flags); ++ if (!prog->aux->stats) { ++ kfree(prog->aux); ++ vfree(prog); ++ return NULL; ++ } ++ ++ for_each_possible_cpu(cpu) { ++ struct bpf_prog_stats *pstats; ++ ++ pstats = per_cpu_ptr(prog->aux->stats, cpu); ++ u64_stats_init(&pstats->syncp); ++ } ++ return prog; ++} + EXPORT_SYMBOL_GPL(bpf_prog_alloc); + ++int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog) ++{ ++ if (!prog->aux->nr_linfo || !prog->jit_requested) ++ return 0; ++ ++ prog->aux->jited_linfo = kcalloc(prog->aux->nr_linfo, ++ sizeof(*prog->aux->jited_linfo), ++ GFP_KERNEL | __GFP_NOWARN); ++ if (!prog->aux->jited_linfo) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++void bpf_prog_free_jited_linfo(struct bpf_prog *prog) ++{ ++ kfree(prog->aux->jited_linfo); ++ prog->aux->jited_linfo = NULL; ++} ++ ++void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog) ++{ ++ if (prog->aux->jited_linfo && !prog->aux->jited_linfo[0]) ++ bpf_prog_free_jited_linfo(prog); ++} ++ ++/* The jit engine is responsible to provide an array ++ * for insn_off to the jited_off mapping (insn_to_jit_off). ++ * ++ * The idx to this array is the insn_off. Hence, the insn_off ++ * here is relative to the prog itself instead of the main prog. ++ * This array has one entry for each xlated bpf insn. ++ * ++ * jited_off is the byte off to the last byte of the jited insn. ++ * ++ * Hence, with ++ * insn_start: ++ * The first bpf insn off of the prog. The insn off ++ * here is relative to the main prog. ++ * e.g. if prog is a subprog, insn_start > 0 ++ * linfo_idx: ++ * The prog's idx to prog->aux->linfo and jited_linfo ++ * ++ * jited_linfo[linfo_idx] = prog->bpf_func ++ * ++ * For i > linfo_idx, ++ * ++ * jited_linfo[i] = prog->bpf_func + ++ * insn_to_jit_off[linfo[i].insn_off - insn_start - 1] ++ */ ++void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, ++ const u32 *insn_to_jit_off) ++{ ++ u32 linfo_idx, insn_start, insn_end, nr_linfo, i; ++ const struct bpf_line_info *linfo; ++ void **jited_linfo; ++ ++ if (!prog->aux->jited_linfo) ++ /* Userspace did not provide linfo */ ++ return; ++ ++ linfo_idx = prog->aux->linfo_idx; ++ linfo = &prog->aux->linfo[linfo_idx]; ++ insn_start = linfo[0].insn_off; ++ insn_end = insn_start + prog->len; ++ ++ jited_linfo = &prog->aux->jited_linfo[linfo_idx]; ++ jited_linfo[0] = prog->bpf_func; ++ ++ nr_linfo = prog->aux->nr_linfo - linfo_idx; ++ ++ for (i = 1; i < nr_linfo && linfo[i].insn_off < insn_end; i++) ++ /* The verifier ensures that linfo[i].insn_off is ++ * strictly increasing ++ */ ++ jited_linfo[i] = prog->bpf_func + ++ insn_to_jit_off[linfo[i].insn_off - insn_start - 1]; ++} ++ ++void bpf_prog_free_linfo(struct bpf_prog *prog) ++{ ++ bpf_prog_free_jited_linfo(prog); ++ kvfree(prog->aux->linfo); ++} ++ + struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, + gfp_t gfp_extra_flags) + { +- gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | +- gfp_extra_flags; ++ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; + struct bpf_prog *fp; ++ u32 pages, delta; ++ int ret; + + BUG_ON(fp_old == NULL); + + size = round_up(size, PAGE_SIZE); +- if (size <= fp_old->pages * PAGE_SIZE) ++ pages = size / PAGE_SIZE; ++ if (pages <= fp_old->pages) + return fp_old; + +- fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); +- if (fp != NULL) { +- kmemcheck_annotate_bitfield(fp, meta); ++ delta = pages - fp_old->pages; ++ ret = __bpf_prog_charge(fp_old->aux->user, delta); ++ if (ret) ++ return NULL; + ++ fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); ++ if (fp == NULL) { ++ __bpf_prog_uncharge(fp_old->aux->user, delta); ++ } else { + memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); +- fp->pages = size / PAGE_SIZE; ++ fp->pages = pages; + fp->aux->prog = fp; + + /* We keep fp->aux from fp_old around in the new +@@ -128,40 +249,578 @@ struct bpf_prog *bpf_prog_realloc(struct + + return fp; + } +-EXPORT_SYMBOL_GPL(bpf_prog_realloc); + + void __bpf_prog_free(struct bpf_prog *fp) + { +- kfree(fp->aux); ++ if (fp->aux) { ++ free_percpu(fp->aux->stats); ++ kfree(fp->aux); ++ } + vfree(fp); + } +-EXPORT_SYMBOL_GPL(__bpf_prog_free); ++ ++int bpf_prog_calc_tag(struct bpf_prog *fp) ++{ ++ const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64); ++ u32 raw_size = bpf_prog_tag_scratch_size(fp); ++ u32 digest[SHA_DIGEST_WORDS]; ++ u32 ws[SHA_WORKSPACE_WORDS]; ++ u32 i, bsize, psize, blocks; ++ struct bpf_insn *dst; ++ bool was_ld_map; ++ u8 *raw, *todo; ++ __be32 *result; ++ __be64 *bits; ++ ++ raw = vmalloc(raw_size); ++ if (!raw) ++ return -ENOMEM; ++ ++ sha_init(digest); ++ memset(ws, 0, sizeof(ws)); ++ ++ /* We need to take out the map fd for the digest calculation ++ * since they are unstable from user space side. ++ */ ++ dst = (void *)raw; ++ for (i = 0, was_ld_map = false; i < fp->len; i++) { ++ dst[i] = fp->insnsi[i]; ++ if (!was_ld_map && ++ dst[i].code == (BPF_LD | BPF_IMM | BPF_DW) && ++ (dst[i].src_reg == BPF_PSEUDO_MAP_FD || ++ dst[i].src_reg == BPF_PSEUDO_MAP_VALUE)) { ++ was_ld_map = true; ++ dst[i].imm = 0; ++ } else if (was_ld_map && ++ dst[i].code == 0 && ++ dst[i].dst_reg == 0 && ++ dst[i].src_reg == 0 && ++ dst[i].off == 0) { ++ was_ld_map = false; ++ dst[i].imm = 0; ++ } else { ++ was_ld_map = false; ++ } ++ } ++ ++ psize = bpf_prog_insn_size(fp); ++ memset(&raw[psize], 0, raw_size - psize); ++ raw[psize++] = 0x80; ++ ++ bsize = round_up(psize, SHA_MESSAGE_BYTES); ++ blocks = bsize / SHA_MESSAGE_BYTES; ++ todo = raw; ++ if (bsize - psize >= sizeof(__be64)) { ++ bits = (__be64 *)(todo + bsize - sizeof(__be64)); ++ } else { ++ bits = (__be64 *)(todo + bsize + bits_offset); ++ blocks++; ++ } ++ *bits = cpu_to_be64((psize - 1) << 3); ++ ++ while (blocks--) { ++ sha_transform(digest, todo, ws); ++ todo += SHA_MESSAGE_BYTES; ++ } ++ ++ result = (__force __be32 *)digest; ++ for (i = 0; i < SHA_DIGEST_WORDS; i++) ++ result[i] = cpu_to_be32(digest[i]); ++ memcpy(fp->tag, result, sizeof(fp->tag)); ++ ++ vfree(raw); ++ return 0; ++} ++ ++static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, s32 end_old, ++ s32 end_new, s32 curr, const bool probe_pass) ++{ ++ const s64 imm_min = S32_MIN, imm_max = S32_MAX; ++ s32 delta = end_new - end_old; ++ s64 imm = insn->imm; ++ ++ if (curr < pos && curr + imm + 1 >= end_old) ++ imm += delta; ++ else if (curr >= end_new && curr + imm + 1 < end_new) ++ imm -= delta; ++ if (imm < imm_min || imm > imm_max) ++ return -ERANGE; ++ if (!probe_pass) ++ insn->imm = imm; ++ return 0; ++} ++ ++static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, s32 end_old, ++ s32 end_new, s32 curr, const bool probe_pass) ++{ ++ const s32 off_min = S16_MIN, off_max = S16_MAX; ++ s32 delta = end_new - end_old; ++ s32 off = insn->off; ++ ++ if (curr < pos && curr + off + 1 >= end_old) ++ off += delta; ++ else if (curr >= end_new && curr + off + 1 < end_new) ++ off -= delta; ++ if (off < off_min || off > off_max) ++ return -ERANGE; ++ if (!probe_pass) ++ insn->off = off; ++ return 0; ++} ++ ++static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, s32 end_old, ++ s32 end_new, const bool probe_pass) ++{ ++ u32 i, insn_cnt = prog->len + (probe_pass ? end_new - end_old : 0); ++ struct bpf_insn *insn = prog->insnsi; ++ int ret = 0; ++ ++ for (i = 0; i < insn_cnt; i++, insn++) { ++ u8 code; ++ ++ /* In the probing pass we still operate on the original, ++ * unpatched image in order to check overflows before we ++ * do any other adjustments. Therefore skip the patchlet. ++ */ ++ if (probe_pass && i == pos) { ++ i = end_new; ++ insn = prog->insnsi + end_old; ++ } ++ code = insn->code; ++ if ((BPF_CLASS(code) != BPF_JMP && ++ BPF_CLASS(code) != BPF_JMP32) || ++ BPF_OP(code) == BPF_EXIT) ++ continue; ++ /* Adjust offset of jmps if we cross patch boundaries. */ ++ if (BPF_OP(code) == BPF_CALL) { ++ if (insn->src_reg != BPF_PSEUDO_CALL) ++ continue; ++ ret = bpf_adj_delta_to_imm(insn, pos, end_old, ++ end_new, i, probe_pass); ++ } else { ++ ret = bpf_adj_delta_to_off(insn, pos, end_old, ++ end_new, i, probe_pass); ++ } ++ if (ret) ++ break; ++ } ++ ++ return ret; ++} ++ ++static void bpf_adj_linfo(struct bpf_prog *prog, u32 off, u32 delta) ++{ ++ struct bpf_line_info *linfo; ++ u32 i, nr_linfo; ++ ++ nr_linfo = prog->aux->nr_linfo; ++ if (!nr_linfo || !delta) ++ return; ++ ++ linfo = prog->aux->linfo; ++ ++ for (i = 0; i < nr_linfo; i++) ++ if (off < linfo[i].insn_off) ++ break; ++ ++ /* Push all off < linfo[i].insn_off by delta */ ++ for (; i < nr_linfo; i++) ++ linfo[i].insn_off += delta; ++} ++ ++struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, ++ const struct bpf_insn *patch, u32 len) ++{ ++ u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; ++ const u32 cnt_max = S16_MAX; ++ struct bpf_prog *prog_adj; ++ int err; ++ ++ /* Since our patchlet doesn't expand the image, we're done. */ ++ if (insn_delta == 0) { ++ memcpy(prog->insnsi + off, patch, sizeof(*patch)); ++ return prog; ++ } ++ ++ insn_adj_cnt = prog->len + insn_delta; ++ ++ /* Reject anything that would potentially let the insn->off ++ * target overflow when we have excessive program expansions. ++ * We need to probe here before we do any reallocation where ++ * we afterwards may not fail anymore. ++ */ ++ if (insn_adj_cnt > cnt_max && ++ (err = bpf_adj_branches(prog, off, off + 1, off + len, true))) ++ return ERR_PTR(err); ++ ++ /* Several new instructions need to be inserted. Make room ++ * for them. Likely, there's no need for a new allocation as ++ * last page could have large enough tailroom. ++ */ ++ prog_adj = bpf_prog_realloc(prog, bpf_prog_size(insn_adj_cnt), ++ GFP_USER); ++ if (!prog_adj) ++ return ERR_PTR(-ENOMEM); ++ ++ prog_adj->len = insn_adj_cnt; ++ ++ /* Patching happens in 3 steps: ++ * ++ * 1) Move over tail of insnsi from next instruction onwards, ++ * so we can patch the single target insn with one or more ++ * new ones (patching is always from 1 to n insns, n > 0). ++ * 2) Inject new instructions at the target location. ++ * 3) Adjust branch offsets if necessary. ++ */ ++ insn_rest = insn_adj_cnt - off - len; ++ ++ memmove(prog_adj->insnsi + off + len, prog_adj->insnsi + off + 1, ++ sizeof(*patch) * insn_rest); ++ memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); ++ ++ /* We are guaranteed to not fail at this point, otherwise ++ * the ship has sailed to reverse to the original state. An ++ * overflow cannot happen at this point. ++ */ ++ BUG_ON(bpf_adj_branches(prog_adj, off, off + 1, off + len, false)); ++ ++ bpf_adj_linfo(prog_adj, off, insn_delta); ++ ++ return prog_adj; ++} ++ ++int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt) ++{ ++ /* Branch offsets can't overflow when program is shrinking, no need ++ * to call bpf_adj_branches(..., true) here ++ */ ++ memmove(prog->insnsi + off, prog->insnsi + off + cnt, ++ sizeof(struct bpf_insn) * (prog->len - off - cnt)); ++ prog->len -= cnt; ++ ++ return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false)); ++} ++ ++static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp) ++{ ++ int i; ++ ++ for (i = 0; i < fp->aux->func_cnt; i++) ++ bpf_prog_kallsyms_del(fp->aux->func[i]); ++} ++ ++void bpf_prog_kallsyms_del_all(struct bpf_prog *fp) ++{ ++ bpf_prog_kallsyms_del_subprogs(fp); ++ bpf_prog_kallsyms_del(fp); ++} + + #ifdef CONFIG_BPF_JIT ++/* All BPF JIT sysctl knobs here. */ ++int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON); ++int bpf_jit_harden __read_mostly; ++int bpf_jit_kallsyms __read_mostly; ++long bpf_jit_limit __read_mostly; ++ ++static __always_inline void ++bpf_get_prog_addr_region(const struct bpf_prog *prog, ++ unsigned long *symbol_start, ++ unsigned long *symbol_end) ++{ ++ const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(prog); ++ unsigned long addr = (unsigned long)hdr; ++ ++ WARN_ON_ONCE(!bpf_prog_ebpf_jited(prog)); ++ ++ *symbol_start = addr; ++ *symbol_end = addr + hdr->pages * PAGE_SIZE; ++} ++ ++void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) ++{ ++ const char *end = sym + KSYM_NAME_LEN; ++ const struct btf_type *type; ++ const char *func_name; ++ ++ BUILD_BUG_ON(sizeof("bpf_prog_") + ++ sizeof(prog->tag) * 2 + ++ /* name has been null terminated. ++ * We should need +1 for the '_' preceding ++ * the name. However, the null character ++ * is double counted between the name and the ++ * sizeof("bpf_prog_") above, so we omit ++ * the +1 here. ++ */ ++ sizeof(prog->aux->name) > KSYM_NAME_LEN); ++ ++ sym += snprintf(sym, KSYM_NAME_LEN, "bpf_prog_"); ++ sym = bin2hex(sym, prog->tag, sizeof(prog->tag)); ++ ++ /* prog->aux->name will be ignored if full btf name is available */ ++ if (prog->aux->func_info_cnt) { ++ type = btf_type_by_id(prog->aux->btf, ++ prog->aux->func_info[prog->aux->func_idx].type_id); ++ func_name = btf_name_by_offset(prog->aux->btf, type->name_off); ++ snprintf(sym, (size_t)(end - sym), "_%s", func_name); ++ return; ++ } ++ ++ if (prog->aux->name[0]) ++ snprintf(sym, (size_t)(end - sym), "_%s", prog->aux->name); ++ else ++ *sym = 0; ++} ++ ++static __always_inline unsigned long ++bpf_get_prog_addr_start(struct latch_tree_node *n) ++{ ++ unsigned long symbol_start, symbol_end; ++ const struct bpf_prog_aux *aux; ++ ++ aux = container_of(n, struct bpf_prog_aux, ksym_tnode); ++ bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); ++ ++ return symbol_start; ++} ++ ++static __always_inline bool bpf_tree_less(struct latch_tree_node *a, ++ struct latch_tree_node *b) ++{ ++ return bpf_get_prog_addr_start(a) < bpf_get_prog_addr_start(b); ++} ++ ++static __always_inline int bpf_tree_comp(void *key, struct latch_tree_node *n) ++{ ++ unsigned long val = (unsigned long)key; ++ unsigned long symbol_start, symbol_end; ++ const struct bpf_prog_aux *aux; ++ ++ aux = container_of(n, struct bpf_prog_aux, ksym_tnode); ++ bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end); ++ ++ if (val < symbol_start) ++ return -1; ++ if (val >= symbol_end) ++ return 1; ++ ++ return 0; ++} ++ ++static const struct latch_tree_ops bpf_tree_ops = { ++ .less = bpf_tree_less, ++ .comp = bpf_tree_comp, ++}; ++ ++static DEFINE_SPINLOCK(bpf_lock); ++static LIST_HEAD(bpf_kallsyms); ++static struct latch_tree_root bpf_tree __cacheline_aligned; ++ ++static void bpf_prog_ksym_node_add(struct bpf_prog_aux *aux) ++{ ++ WARN_ON_ONCE(!list_empty(&aux->ksym_lnode)); ++ list_add_tail_rcu(&aux->ksym_lnode, &bpf_kallsyms); ++ latch_tree_insert(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); ++} ++ ++static void bpf_prog_ksym_node_del(struct bpf_prog_aux *aux) ++{ ++ if (list_empty(&aux->ksym_lnode)) ++ return; ++ ++ latch_tree_erase(&aux->ksym_tnode, &bpf_tree, &bpf_tree_ops); ++ list_del_rcu(&aux->ksym_lnode); ++} ++ ++static bool bpf_prog_kallsyms_candidate(const struct bpf_prog *fp) ++{ ++ return fp->jited && !bpf_prog_was_classic(fp); ++} ++ ++static bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) ++{ ++ return list_empty(&fp->aux->ksym_lnode) || ++ fp->aux->ksym_lnode.prev == LIST_POISON2; ++} ++ ++void bpf_prog_kallsyms_add(struct bpf_prog *fp) ++{ ++ if (!bpf_prog_kallsyms_candidate(fp) || ++ !capable(CAP_SYS_ADMIN)) ++ return; ++ ++ spin_lock_bh(&bpf_lock); ++ bpf_prog_ksym_node_add(fp->aux); ++ spin_unlock_bh(&bpf_lock); ++} ++ ++void bpf_prog_kallsyms_del(struct bpf_prog *fp) ++{ ++ if (!bpf_prog_kallsyms_candidate(fp)) ++ return; ++ ++ spin_lock_bh(&bpf_lock); ++ bpf_prog_ksym_node_del(fp->aux); ++ spin_unlock_bh(&bpf_lock); ++} ++ ++static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) ++{ ++ struct latch_tree_node *n; ++ ++ if (!bpf_jit_kallsyms_enabled()) ++ return NULL; ++ ++ n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); ++ return n ? ++ container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : ++ NULL; ++} ++ ++const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, ++ unsigned long *off, char *sym) ++{ ++ unsigned long symbol_start, symbol_end; ++ struct bpf_prog *prog; ++ char *ret = NULL; ++ ++ rcu_read_lock(); ++ prog = bpf_prog_kallsyms_find(addr); ++ if (prog) { ++ bpf_get_prog_addr_region(prog, &symbol_start, &symbol_end); ++ bpf_get_prog_name(prog, sym); ++ ++ ret = sym; ++ if (size) ++ *size = symbol_end - symbol_start; ++ if (off) ++ *off = addr - symbol_start; ++ } ++ rcu_read_unlock(); ++ ++ return ret; ++} ++ ++bool is_bpf_text_address(unsigned long addr) ++{ ++ bool ret; ++ ++ rcu_read_lock(); ++ ret = bpf_prog_kallsyms_find(addr) != NULL; ++ rcu_read_unlock(); ++ ++ return ret; ++} ++ ++int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, ++ char *sym) ++{ ++ struct bpf_prog_aux *aux; ++ unsigned int it = 0; ++ int ret = -ERANGE; ++ ++ if (!bpf_jit_kallsyms_enabled()) ++ return ret; ++ ++ rcu_read_lock(); ++ list_for_each_entry_rcu(aux, &bpf_kallsyms, ksym_lnode) { ++ if (it++ != symnum) ++ continue; ++ ++ bpf_get_prog_name(aux->prog, sym); ++ ++ *value = (unsigned long)aux->prog->bpf_func; ++ *type = BPF_SYM_ELF_TYPE; ++ ++ ret = 0; ++ break; ++ } ++ rcu_read_unlock(); ++ ++ return ret; ++} ++ ++static atomic_long_t bpf_jit_current; ++ ++/* Can be overridden by an arch's JIT compiler if it has a custom, ++ * dedicated BPF backend memory area, or if neither of the two ++ * below apply. ++ */ ++u64 __weak bpf_jit_alloc_exec_limit(void) ++{ ++#if defined(MODULES_VADDR) ++ return MODULES_END - MODULES_VADDR; ++#else ++ return VMALLOC_END - VMALLOC_START; ++#endif ++} ++ ++static int __init bpf_jit_charge_init(void) ++{ ++ /* Only used as heuristic here to derive limit. */ ++ bpf_jit_limit = min_t(u64, round_up(bpf_jit_alloc_exec_limit() >> 2, ++ PAGE_SIZE), LONG_MAX); ++ return 0; ++} ++pure_initcall(bpf_jit_charge_init); ++ ++static int bpf_jit_charge_modmem(u32 pages) ++{ ++ if (atomic_long_add_return(pages, &bpf_jit_current) > ++ (bpf_jit_limit >> PAGE_SHIFT)) { ++ if (!capable(CAP_SYS_ADMIN)) { ++ atomic_long_sub(pages, &bpf_jit_current); ++ return -EPERM; ++ } ++ } ++ ++ return 0; ++} ++ ++static void bpf_jit_uncharge_modmem(u32 pages) ++{ ++ atomic_long_sub(pages, &bpf_jit_current); ++} ++ ++void *__weak bpf_jit_alloc_exec(unsigned long size) ++{ ++ return module_alloc(size); ++} ++ ++void __weak bpf_jit_free_exec(void *addr) ++{ ++ module_memfree(addr); ++} ++ + struct bpf_binary_header * + bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, + unsigned int alignment, + bpf_jit_fill_hole_t bpf_fill_ill_insns) + { + struct bpf_binary_header *hdr; +- unsigned int size, hole, start; ++ u32 size, hole, start, pages; + + /* Most of BPF filters are really small, but if some of them + * fill a page, allow at least 128 extra bytes to insert a + * random section of illegal instructions. + */ + size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE); +- hdr = module_alloc(size); +- if (hdr == NULL) ++ pages = size / PAGE_SIZE; ++ ++ if (bpf_jit_charge_modmem(pages)) ++ return NULL; ++ hdr = bpf_jit_alloc_exec(size); ++ if (!hdr) { ++ bpf_jit_uncharge_modmem(pages); + return NULL; ++ } + + /* Fill space with illegal/arch-dep instructions. */ + bpf_fill_ill_insns(hdr, size); + +- hdr->pages = size / PAGE_SIZE; ++ hdr->pages = pages; + hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)), + PAGE_SIZE - sizeof(*hdr)); +- start = (prandom_u32() % hole) & ~(alignment - 1); ++ start = (get_random_int() % hole) & ~(alignment - 1); + + /* Leave a random number of instructions before BPF code. */ + *image_ptr = &hdr->image[start]; +@@ -171,13 +830,301 @@ bpf_jit_binary_alloc(unsigned int progle + + void bpf_jit_binary_free(struct bpf_binary_header *hdr) + { +- module_memfree(hdr); ++ u32 pages = hdr->pages; ++ ++ bpf_jit_free_exec(hdr); ++ bpf_jit_uncharge_modmem(pages); ++} ++ ++/* This symbol is only overridden by archs that have different ++ * requirements than the usual eBPF JITs, f.e. when they only ++ * implement cBPF JIT, do not set images read-only, etc. ++ */ ++void __weak bpf_jit_free(struct bpf_prog *fp) ++{ ++ if (fp->jited) { ++ struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp); ++ ++ bpf_jit_binary_unlock_ro(hdr); ++ bpf_jit_binary_free(hdr); ++ ++ WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(fp)); ++ } ++ ++ bpf_prog_unlock_free(fp); ++} ++ ++int bpf_jit_get_func_addr(const struct bpf_prog *prog, ++ const struct bpf_insn *insn, bool extra_pass, ++ u64 *func_addr, bool *func_addr_fixed) ++{ ++ s16 off = insn->off; ++ s32 imm = insn->imm; ++ u8 *addr; ++ ++ *func_addr_fixed = insn->src_reg != BPF_PSEUDO_CALL; ++ if (!*func_addr_fixed) { ++ /* Place-holder address till the last pass has collected ++ * all addresses for JITed subprograms in which case we ++ * can pick them up from prog->aux. ++ */ ++ if (!extra_pass) ++ addr = NULL; ++ else if (prog->aux->func && ++ off >= 0 && off < prog->aux->func_cnt) ++ addr = (u8 *)prog->aux->func[off]->bpf_func; ++ else ++ return -EINVAL; ++ } else { ++ /* Address of a BPF helper call. Since part of the core ++ * kernel, it's always at a fixed location. __bpf_call_base ++ * and the helper with imm relative to it are both in core ++ * kernel. ++ */ ++ addr = (u8 *)__bpf_call_base + imm; ++ } ++ ++ *func_addr = (unsigned long)addr; ++ return 0; ++} ++ ++static int bpf_jit_blind_insn(const struct bpf_insn *from, ++ const struct bpf_insn *aux, ++ struct bpf_insn *to_buff, ++ bool emit_zext) ++{ ++ struct bpf_insn *to = to_buff; ++ u32 imm_rnd = get_random_int(); ++ s16 off; ++ ++ BUILD_BUG_ON(BPF_REG_AX + 1 != MAX_BPF_JIT_REG); ++ BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG); ++ ++ /* Constraints on AX register: ++ * ++ * AX register is inaccessible from user space. It is mapped in ++ * all JITs, and used here for constant blinding rewrites. It is ++ * typically "stateless" meaning its contents are only valid within ++ * the executed instruction, but not across several instructions. ++ * There are a few exceptions however which are further detailed ++ * below. ++ * ++ * Constant blinding is only used by JITs, not in the interpreter. ++ * The interpreter uses AX in some occasions as a local temporary ++ * register e.g. in DIV or MOD instructions. ++ * ++ * In restricted circumstances, the verifier can also use the AX ++ * register for rewrites as long as they do not interfere with ++ * the above cases! ++ */ ++ if (from->dst_reg == BPF_REG_AX || from->src_reg == BPF_REG_AX) ++ goto out; ++ ++ if (from->imm == 0 && ++ (from->code == (BPF_ALU | BPF_MOV | BPF_K) || ++ from->code == (BPF_ALU64 | BPF_MOV | BPF_K))) { ++ *to++ = BPF_ALU64_REG(BPF_XOR, from->dst_reg, from->dst_reg); ++ goto out; ++ } ++ ++ switch (from->code) { ++ case BPF_ALU | BPF_ADD | BPF_K: ++ case BPF_ALU | BPF_SUB | BPF_K: ++ case BPF_ALU | BPF_AND | BPF_K: ++ case BPF_ALU | BPF_OR | BPF_K: ++ case BPF_ALU | BPF_XOR | BPF_K: ++ case BPF_ALU | BPF_MUL | BPF_K: ++ case BPF_ALU | BPF_MOV | BPF_K: ++ case BPF_ALU | BPF_DIV | BPF_K: ++ case BPF_ALU | BPF_MOD | BPF_K: ++ *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); ++ *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); ++ *to++ = BPF_ALU32_REG(from->code, from->dst_reg, BPF_REG_AX); ++ break; ++ ++ case BPF_ALU64 | BPF_ADD | BPF_K: ++ case BPF_ALU64 | BPF_SUB | BPF_K: ++ case BPF_ALU64 | BPF_AND | BPF_K: ++ case BPF_ALU64 | BPF_OR | BPF_K: ++ case BPF_ALU64 | BPF_XOR | BPF_K: ++ case BPF_ALU64 | BPF_MUL | BPF_K: ++ case BPF_ALU64 | BPF_MOV | BPF_K: ++ case BPF_ALU64 | BPF_DIV | BPF_K: ++ case BPF_ALU64 | BPF_MOD | BPF_K: ++ *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); ++ *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); ++ *to++ = BPF_ALU64_REG(from->code, from->dst_reg, BPF_REG_AX); ++ break; ++ ++ case BPF_JMP | BPF_JEQ | BPF_K: ++ case BPF_JMP | BPF_JNE | BPF_K: ++ case BPF_JMP | BPF_JGT | BPF_K: ++ case BPF_JMP | BPF_JLT | BPF_K: ++ case BPF_JMP | BPF_JGE | BPF_K: ++ case BPF_JMP | BPF_JLE | BPF_K: ++ case BPF_JMP | BPF_JSGT | BPF_K: ++ case BPF_JMP | BPF_JSLT | BPF_K: ++ case BPF_JMP | BPF_JSGE | BPF_K: ++ case BPF_JMP | BPF_JSLE | BPF_K: ++ case BPF_JMP | BPF_JSET | BPF_K: ++ /* Accommodate for extra offset in case of a backjump. */ ++ off = from->off; ++ if (off < 0) ++ off -= 2; ++ *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); ++ *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); ++ *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off); ++ break; ++ ++ case BPF_JMP32 | BPF_JEQ | BPF_K: ++ case BPF_JMP32 | BPF_JNE | BPF_K: ++ case BPF_JMP32 | BPF_JGT | BPF_K: ++ case BPF_JMP32 | BPF_JLT | BPF_K: ++ case BPF_JMP32 | BPF_JGE | BPF_K: ++ case BPF_JMP32 | BPF_JLE | BPF_K: ++ case BPF_JMP32 | BPF_JSGT | BPF_K: ++ case BPF_JMP32 | BPF_JSLT | BPF_K: ++ case BPF_JMP32 | BPF_JSGE | BPF_K: ++ case BPF_JMP32 | BPF_JSLE | BPF_K: ++ case BPF_JMP32 | BPF_JSET | BPF_K: ++ /* Accommodate for extra offset in case of a backjump. */ ++ off = from->off; ++ if (off < 0) ++ off -= 2; ++ *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); ++ *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); ++ *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX, ++ off); ++ break; ++ ++ case BPF_LD | BPF_IMM | BPF_DW: ++ *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[1].imm); ++ *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); ++ *to++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); ++ *to++ = BPF_ALU64_REG(BPF_MOV, aux[0].dst_reg, BPF_REG_AX); ++ break; ++ case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ ++ *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); ++ *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); ++ if (emit_zext) ++ *to++ = BPF_ZEXT_REG(BPF_REG_AX); ++ *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); ++ break; ++ ++ case BPF_ST | BPF_MEM | BPF_DW: ++ case BPF_ST | BPF_MEM | BPF_W: ++ case BPF_ST | BPF_MEM | BPF_H: ++ case BPF_ST | BPF_MEM | BPF_B: ++ *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm); ++ *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); ++ *to++ = BPF_STX_MEM(from->code, from->dst_reg, BPF_REG_AX, from->off); ++ break; ++ } ++out: ++ return to - to_buff; ++} ++ ++static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other, ++ gfp_t gfp_extra_flags) ++{ ++ gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags; ++ struct bpf_prog *fp; ++ ++ fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL); ++ if (fp != NULL) { ++ /* aux->prog still points to the fp_other one, so ++ * when promoting the clone to the real program, ++ * this still needs to be adapted. ++ */ ++ memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); ++ } ++ ++ return fp; ++} ++ ++static void bpf_prog_clone_free(struct bpf_prog *fp) ++{ ++ /* aux was stolen by the other clone, so we cannot free ++ * it from this path! It will be freed eventually by the ++ * other program on release. ++ * ++ * At this point, we don't need a deferred release since ++ * clone is guaranteed to not be locked. ++ */ ++ fp->aux = NULL; ++ __bpf_prog_free(fp); ++} ++ ++void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other) ++{ ++ /* We have to repoint aux->prog to self, as we don't ++ * know whether fp here is the clone or the original. ++ */ ++ fp->aux->prog = fp; ++ bpf_prog_clone_free(fp_other); ++} ++ ++struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog) ++{ ++ struct bpf_insn insn_buff[16], aux[2]; ++ struct bpf_prog *clone, *tmp; ++ int insn_delta, insn_cnt; ++ struct bpf_insn *insn; ++ int i, rewritten; ++ ++ if (!bpf_jit_blinding_enabled(prog) || prog->blinded) ++ return prog; ++ ++ clone = bpf_prog_clone_create(prog, GFP_USER); ++ if (!clone) ++ return ERR_PTR(-ENOMEM); ++ ++ insn_cnt = clone->len; ++ insn = clone->insnsi; ++ ++ for (i = 0; i < insn_cnt; i++, insn++) { ++ /* We temporarily need to hold the original ld64 insn ++ * so that we can still access the first part in the ++ * second blinding run. ++ */ ++ if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) && ++ insn[1].code == 0) ++ memcpy(aux, insn, sizeof(aux)); ++ ++ rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, ++ clone->aux->verifier_zext); ++ if (!rewritten) ++ continue; ++ ++ tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten); ++ if (IS_ERR(tmp)) { ++ /* Patching may have repointed aux->prog during ++ * realloc from the original one, so we need to ++ * fix it up here on error. ++ */ ++ bpf_jit_prog_release_other(prog, clone); ++ return tmp; ++ } ++ ++ clone = tmp; ++ insn_delta = rewritten - 1; ++ ++ /* Walk new program and skip insns we just inserted. */ ++ insn = clone->insnsi + i + insn_delta; ++ insn_cnt += insn_delta; ++ i += insn_delta; ++ } ++ ++ clone->blinded = 1; ++ return clone; + } + #endif /* CONFIG_BPF_JIT */ + + /* Base function for offset calculation. Needs to go into .text section, + * therefore keeping it non-static as well; will also be used by JITs +- * anyway later on, so do not let the compiler omit it. ++ * anyway later on, so do not let the compiler omit it. This also needs ++ * to go into kallsyms for correlation from e.g. bpftool, so naming ++ * must not change. + */ + noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) + { +@@ -185,157 +1132,243 @@ noinline u64 __bpf_call_base(u64 r1, u64 + } + EXPORT_SYMBOL_GPL(__bpf_call_base); + ++/* All UAPI available opcodes. */ ++#define BPF_INSN_MAP(INSN_2, INSN_3) \ ++ /* 32 bit ALU operations. */ \ ++ /* Register based. */ \ ++ INSN_3(ALU, ADD, X), \ ++ INSN_3(ALU, SUB, X), \ ++ INSN_3(ALU, AND, X), \ ++ INSN_3(ALU, OR, X), \ ++ INSN_3(ALU, LSH, X), \ ++ INSN_3(ALU, RSH, X), \ ++ INSN_3(ALU, XOR, X), \ ++ INSN_3(ALU, MUL, X), \ ++ INSN_3(ALU, MOV, X), \ ++ INSN_3(ALU, ARSH, X), \ ++ INSN_3(ALU, DIV, X), \ ++ INSN_3(ALU, MOD, X), \ ++ INSN_2(ALU, NEG), \ ++ INSN_3(ALU, END, TO_BE), \ ++ INSN_3(ALU, END, TO_LE), \ ++ /* Immediate based. */ \ ++ INSN_3(ALU, ADD, K), \ ++ INSN_3(ALU, SUB, K), \ ++ INSN_3(ALU, AND, K), \ ++ INSN_3(ALU, OR, K), \ ++ INSN_3(ALU, LSH, K), \ ++ INSN_3(ALU, RSH, K), \ ++ INSN_3(ALU, XOR, K), \ ++ INSN_3(ALU, MUL, K), \ ++ INSN_3(ALU, MOV, K), \ ++ INSN_3(ALU, ARSH, K), \ ++ INSN_3(ALU, DIV, K), \ ++ INSN_3(ALU, MOD, K), \ ++ /* 64 bit ALU operations. */ \ ++ /* Register based. */ \ ++ INSN_3(ALU64, ADD, X), \ ++ INSN_3(ALU64, SUB, X), \ ++ INSN_3(ALU64, AND, X), \ ++ INSN_3(ALU64, OR, X), \ ++ INSN_3(ALU64, LSH, X), \ ++ INSN_3(ALU64, RSH, X), \ ++ INSN_3(ALU64, XOR, X), \ ++ INSN_3(ALU64, MUL, X), \ ++ INSN_3(ALU64, MOV, X), \ ++ INSN_3(ALU64, ARSH, X), \ ++ INSN_3(ALU64, DIV, X), \ ++ INSN_3(ALU64, MOD, X), \ ++ INSN_2(ALU64, NEG), \ ++ /* Immediate based. */ \ ++ INSN_3(ALU64, ADD, K), \ ++ INSN_3(ALU64, SUB, K), \ ++ INSN_3(ALU64, AND, K), \ ++ INSN_3(ALU64, OR, K), \ ++ INSN_3(ALU64, LSH, K), \ ++ INSN_3(ALU64, RSH, K), \ ++ INSN_3(ALU64, XOR, K), \ ++ INSN_3(ALU64, MUL, K), \ ++ INSN_3(ALU64, MOV, K), \ ++ INSN_3(ALU64, ARSH, K), \ ++ INSN_3(ALU64, DIV, K), \ ++ INSN_3(ALU64, MOD, K), \ ++ /* Call instruction. */ \ ++ INSN_2(JMP, CALL), \ ++ /* Exit instruction. */ \ ++ INSN_2(JMP, EXIT), \ ++ /* 32-bit Jump instructions. */ \ ++ /* Register based. */ \ ++ INSN_3(JMP32, JEQ, X), \ ++ INSN_3(JMP32, JNE, X), \ ++ INSN_3(JMP32, JGT, X), \ ++ INSN_3(JMP32, JLT, X), \ ++ INSN_3(JMP32, JGE, X), \ ++ INSN_3(JMP32, JLE, X), \ ++ INSN_3(JMP32, JSGT, X), \ ++ INSN_3(JMP32, JSLT, X), \ ++ INSN_3(JMP32, JSGE, X), \ ++ INSN_3(JMP32, JSLE, X), \ ++ INSN_3(JMP32, JSET, X), \ ++ /* Immediate based. */ \ ++ INSN_3(JMP32, JEQ, K), \ ++ INSN_3(JMP32, JNE, K), \ ++ INSN_3(JMP32, JGT, K), \ ++ INSN_3(JMP32, JLT, K), \ ++ INSN_3(JMP32, JGE, K), \ ++ INSN_3(JMP32, JLE, K), \ ++ INSN_3(JMP32, JSGT, K), \ ++ INSN_3(JMP32, JSLT, K), \ ++ INSN_3(JMP32, JSGE, K), \ ++ INSN_3(JMP32, JSLE, K), \ ++ INSN_3(JMP32, JSET, K), \ ++ /* Jump instructions. */ \ ++ /* Register based. */ \ ++ INSN_3(JMP, JEQ, X), \ ++ INSN_3(JMP, JNE, X), \ ++ INSN_3(JMP, JGT, X), \ ++ INSN_3(JMP, JLT, X), \ ++ INSN_3(JMP, JGE, X), \ ++ INSN_3(JMP, JLE, X), \ ++ INSN_3(JMP, JSGT, X), \ ++ INSN_3(JMP, JSLT, X), \ ++ INSN_3(JMP, JSGE, X), \ ++ INSN_3(JMP, JSLE, X), \ ++ INSN_3(JMP, JSET, X), \ ++ /* Immediate based. */ \ ++ INSN_3(JMP, JEQ, K), \ ++ INSN_3(JMP, JNE, K), \ ++ INSN_3(JMP, JGT, K), \ ++ INSN_3(JMP, JLT, K), \ ++ INSN_3(JMP, JGE, K), \ ++ INSN_3(JMP, JLE, K), \ ++ INSN_3(JMP, JSGT, K), \ ++ INSN_3(JMP, JSLT, K), \ ++ INSN_3(JMP, JSGE, K), \ ++ INSN_3(JMP, JSLE, K), \ ++ INSN_3(JMP, JSET, K), \ ++ INSN_2(JMP, JA), \ ++ /* Store instructions. */ \ ++ /* Register based. */ \ ++ INSN_3(STX, MEM, B), \ ++ INSN_3(STX, MEM, H), \ ++ INSN_3(STX, MEM, W), \ ++ INSN_3(STX, MEM, DW), \ ++ INSN_3(STX, XADD, W), \ ++ INSN_3(STX, XADD, DW), \ ++ /* Immediate based. */ \ ++ INSN_3(ST, MEM, B), \ ++ INSN_3(ST, MEM, H), \ ++ INSN_3(ST, MEM, W), \ ++ INSN_3(ST, MEM, DW), \ ++ /* Load instructions. */ \ ++ /* Register based. */ \ ++ INSN_3(LDX, MEM, B), \ ++ INSN_3(LDX, MEM, H), \ ++ INSN_3(LDX, MEM, W), \ ++ INSN_3(LDX, MEM, DW), \ ++ /* Immediate based. */ \ ++ INSN_3(LD, IMM, DW) ++ ++bool bpf_opcode_in_insntable(u8 code) ++{ ++#define BPF_INSN_2_TBL(x, y) [BPF_##x | BPF_##y] = true ++#define BPF_INSN_3_TBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = true ++ static const bool public_insntable[256] = { ++ [0 ... 255] = false, ++ /* Now overwrite non-defaults ... */ ++ BPF_INSN_MAP(BPF_INSN_2_TBL, BPF_INSN_3_TBL), ++ /* UAPI exposed, but rewritten opcodes. cBPF carry-over. */ ++ [BPF_LD | BPF_ABS | BPF_B] = true, ++ [BPF_LD | BPF_ABS | BPF_H] = true, ++ [BPF_LD | BPF_ABS | BPF_W] = true, ++ [BPF_LD | BPF_IND | BPF_B] = true, ++ [BPF_LD | BPF_IND | BPF_H] = true, ++ [BPF_LD | BPF_IND | BPF_W] = true, ++ }; ++#undef BPF_INSN_3_TBL ++#undef BPF_INSN_2_TBL ++ return public_insntable[code]; ++} ++ ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON + /** + * __bpf_prog_run - run eBPF program on a given context +- * @ctx: is the data we are operating on ++ * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers + * @insn: is the array of eBPF instructions ++ * @stack: is the eBPF storage stack + * + * Decode and execute eBPF instructions. + */ +-static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn) ++static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) + { +- u64 stack[MAX_BPF_STACK / sizeof(u64)]; +- u64 regs[MAX_BPF_REG], tmp; +- static const void *jumptable[256] = { ++#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y ++#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z ++ static const void * const jumptable[256] = { + [0 ... 255] = &&default_label, + /* Now overwrite non-defaults ... */ +- /* 32 bit ALU operations */ +- [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X, +- [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K, +- [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X, +- [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K, +- [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X, +- [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K, +- [BPF_ALU | BPF_OR | BPF_X] = &&ALU_OR_X, +- [BPF_ALU | BPF_OR | BPF_K] = &&ALU_OR_K, +- [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X, +- [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K, +- [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X, +- [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K, +- [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X, +- [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K, +- [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X, +- [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K, +- [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X, +- [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K, +- [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X, +- [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K, +- [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X, +- [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K, +- [BPF_ALU | BPF_NEG] = &&ALU_NEG, +- [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE, +- [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE, +- /* 64 bit ALU operations */ +- [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X, +- [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K, +- [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X, +- [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K, +- [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X, +- [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K, +- [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X, +- [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K, +- [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X, +- [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K, +- [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X, +- [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K, +- [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X, +- [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K, +- [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X, +- [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K, +- [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X, +- [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K, +- [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X, +- [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K, +- [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X, +- [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K, +- [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X, +- [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K, +- [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG, +- /* Call instruction */ +- [BPF_JMP | BPF_CALL] = &&JMP_CALL, +- [BPF_JMP | BPF_CALL | BPF_X] = &&JMP_TAIL_CALL, +- /* Jumps */ +- [BPF_JMP | BPF_JA] = &&JMP_JA, +- [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X, +- [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K, +- [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X, +- [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K, +- [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X, +- [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K, +- [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X, +- [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K, +- [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X, +- [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K, +- [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X, +- [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K, +- [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X, +- [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K, +- /* Program return */ +- [BPF_JMP | BPF_EXIT] = &&JMP_EXIT, +- /* Store instructions */ +- [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B, +- [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H, +- [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W, +- [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW, +- [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W, +- [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW, +- [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B, +- [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H, +- [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W, +- [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW, +- /* Load instructions */ +- [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B, +- [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H, +- [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W, +- [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW, +- [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W, +- [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H, +- [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B, +- [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W, +- [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H, +- [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B, +- [BPF_LD | BPF_IMM | BPF_DW] = &&LD_IMM_DW, ++ BPF_INSN_MAP(BPF_INSN_2_LBL, BPF_INSN_3_LBL), ++ /* Non-UAPI available opcodes. */ ++ [BPF_JMP | BPF_CALL_ARGS] = &&JMP_CALL_ARGS, ++ [BPF_JMP | BPF_TAIL_CALL] = &&JMP_TAIL_CALL, + }; ++#undef BPF_INSN_3_LBL ++#undef BPF_INSN_2_LBL + u32 tail_call_cnt = 0; +- void *ptr; +- int off; + + #define CONT ({ insn++; goto select_insn; }) + #define CONT_JMP ({ insn++; goto select_insn; }) + +- FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; +- ARG1 = (u64) (unsigned long) ctx; +- +- /* Registers used in classic BPF programs need to be reset first. */ +- regs[BPF_REG_A] = 0; +- regs[BPF_REG_X] = 0; +- + select_insn: + goto *jumptable[insn->code]; + +- /* ALU */ +-#define ALU(OPCODE, OP) \ +- ALU64_##OPCODE##_X: \ +- DST = DST OP SRC; \ +- CONT; \ +- ALU_##OPCODE##_X: \ +- DST = (u32) DST OP (u32) SRC; \ +- CONT; \ +- ALU64_##OPCODE##_K: \ +- DST = DST OP IMM; \ +- CONT; \ +- ALU_##OPCODE##_K: \ +- DST = (u32) DST OP (u32) IMM; \ ++ /* Explicitly mask the register-based shift amounts with 63 or 31 ++ * to avoid undefined behavior. Normally this won't affect the ++ * generated code, for example, in case of native 64 bit archs such ++ * as x86-64 or arm64, the compiler is optimizing the AND away for ++ * the interpreter. In case of JITs, each of the JIT backends compiles ++ * the BPF shift operations to machine instructions which produce ++ * implementation-defined results in such a case; the resulting ++ * contents of the register may be arbitrary, but program behaviour ++ * as a whole remains defined. In other words, in case of JIT backends, ++ * the AND must /not/ be added to the emitted LSH/RSH/ARSH translation. ++ */ ++ /* ALU (shifts) */ ++#define SHT(OPCODE, OP) \ ++ ALU64_##OPCODE##_X: \ ++ DST = DST OP (SRC & 63); \ ++ CONT; \ ++ ALU_##OPCODE##_X: \ ++ DST = (u32) DST OP ((u32) SRC & 31); \ ++ CONT; \ ++ ALU64_##OPCODE##_K: \ ++ DST = DST OP IMM; \ ++ CONT; \ ++ ALU_##OPCODE##_K: \ ++ DST = (u32) DST OP (u32) IMM; \ ++ CONT; ++ /* ALU (rest) */ ++#define ALU(OPCODE, OP) \ ++ ALU64_##OPCODE##_X: \ ++ DST = DST OP SRC; \ ++ CONT; \ ++ ALU_##OPCODE##_X: \ ++ DST = (u32) DST OP (u32) SRC; \ ++ CONT; \ ++ ALU64_##OPCODE##_K: \ ++ DST = DST OP IMM; \ ++ CONT; \ ++ ALU_##OPCODE##_K: \ ++ DST = (u32) DST OP (u32) IMM; \ + CONT; +- + ALU(ADD, +) + ALU(SUB, -) + ALU(AND, &) + ALU(OR, |) +- ALU(LSH, <<) +- ALU(RSH, >>) + ALU(XOR, ^) + ALU(MUL, *) ++ SHT(LSH, <<) ++ SHT(RSH, >>) ++#undef SHT + #undef ALU + ALU_NEG: + DST = (u32) -DST; +@@ -359,51 +1392,49 @@ select_insn: + DST = (u64) (u32) insn[0].imm | ((u64) (u32) insn[1].imm) << 32; + insn++; + CONT; ++ ALU_ARSH_X: ++ DST = (u64) (u32) (((s32) DST) >> (SRC & 31)); ++ CONT; ++ ALU_ARSH_K: ++ DST = (u64) (u32) (((s32) DST) >> IMM); ++ CONT; + ALU64_ARSH_X: +- (*(s64 *) &DST) >>= SRC; ++ (*(s64 *) &DST) >>= (SRC & 63); + CONT; + ALU64_ARSH_K: + (*(s64 *) &DST) >>= IMM; + CONT; + ALU64_MOD_X: +- if (unlikely(SRC == 0)) +- return 0; +- div64_u64_rem(DST, SRC, &tmp); +- DST = tmp; ++ div64_u64_rem(DST, SRC, &AX); ++ DST = AX; + CONT; + ALU_MOD_X: +- if (unlikely(SRC == 0)) +- return 0; +- tmp = (u32) DST; +- DST = do_div(tmp, (u32) SRC); ++ AX = (u32) DST; ++ DST = do_div(AX, (u32) SRC); + CONT; + ALU64_MOD_K: +- div64_u64_rem(DST, IMM, &tmp); +- DST = tmp; ++ div64_u64_rem(DST, IMM, &AX); ++ DST = AX; + CONT; + ALU_MOD_K: +- tmp = (u32) DST; +- DST = do_div(tmp, (u32) IMM); ++ AX = (u32) DST; ++ DST = do_div(AX, (u32) IMM); + CONT; + ALU64_DIV_X: +- if (unlikely(SRC == 0)) +- return 0; + DST = div64_u64(DST, SRC); + CONT; + ALU_DIV_X: +- if (unlikely(SRC == 0)) +- return 0; +- tmp = (u32) DST; +- do_div(tmp, (u32) SRC); +- DST = (u32) tmp; ++ AX = (u32) DST; ++ do_div(AX, (u32) SRC); ++ DST = (u32) AX; + CONT; + ALU64_DIV_K: + DST = div64_u64(DST, IMM); + CONT; + ALU_DIV_K: +- tmp = (u32) DST; +- do_div(tmp, (u32) IMM); +- DST = (u32) tmp; ++ AX = (u32) DST; ++ do_div(AX, (u32) IMM); ++ DST = (u32) AX; + CONT; + ALU_END_TO_BE: + switch (IMM) { +@@ -442,22 +1473,28 @@ select_insn: + BPF_R4, BPF_R5); + CONT; + ++ JMP_CALL_ARGS: ++ BPF_R0 = (__bpf_call_base_args + insn->imm)(BPF_R1, BPF_R2, ++ BPF_R3, BPF_R4, ++ BPF_R5, ++ insn + insn->off + 1); ++ CONT; ++ + JMP_TAIL_CALL: { + struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; + struct bpf_array *array = container_of(map, struct bpf_array, map); + struct bpf_prog *prog; +- u64 index = BPF_R3; ++ u32 index = BPF_R3; + + if (unlikely(index >= array->map.max_entries)) + goto out; +- + if (unlikely(tail_call_cnt > MAX_TAIL_CALL_CNT)) + goto out; + + tail_call_cnt++; + + prog = READ_ONCE(array->ptrs[index]); +- if (unlikely(!prog)) ++ if (!prog) + goto out; + + /* ARG1 at this point is guaranteed to point to CTX from +@@ -470,97 +1507,49 @@ select_insn: + out: + CONT; + } +- /* JMP */ + JMP_JA: + insn += insn->off; + CONT; +- JMP_JEQ_X: +- if (DST == SRC) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JEQ_K: +- if (DST == IMM) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JNE_X: +- if (DST != SRC) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JNE_K: +- if (DST != IMM) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JGT_X: +- if (DST > SRC) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JGT_K: +- if (DST > IMM) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JGE_X: +- if (DST >= SRC) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JGE_K: +- if (DST >= IMM) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JSGT_X: +- if (((s64) DST) > ((s64) SRC)) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JSGT_K: +- if (((s64) DST) > ((s64) IMM)) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JSGE_X: +- if (((s64) DST) >= ((s64) SRC)) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JSGE_K: +- if (((s64) DST) >= ((s64) IMM)) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JSET_X: +- if (DST & SRC) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; +- JMP_JSET_K: +- if (DST & IMM) { +- insn += insn->off; +- CONT_JMP; +- } +- CONT; + JMP_EXIT: + return BPF_R0; +- ++ /* JMP */ ++#define COND_JMP(SIGN, OPCODE, CMP_OP) \ ++ JMP_##OPCODE##_X: \ ++ if ((SIGN##64) DST CMP_OP (SIGN##64) SRC) { \ ++ insn += insn->off; \ ++ CONT_JMP; \ ++ } \ ++ CONT; \ ++ JMP32_##OPCODE##_X: \ ++ if ((SIGN##32) DST CMP_OP (SIGN##32) SRC) { \ ++ insn += insn->off; \ ++ CONT_JMP; \ ++ } \ ++ CONT; \ ++ JMP_##OPCODE##_K: \ ++ if ((SIGN##64) DST CMP_OP (SIGN##64) IMM) { \ ++ insn += insn->off; \ ++ CONT_JMP; \ ++ } \ ++ CONT; \ ++ JMP32_##OPCODE##_K: \ ++ if ((SIGN##32) DST CMP_OP (SIGN##32) IMM) { \ ++ insn += insn->off; \ ++ CONT_JMP; \ ++ } \ ++ CONT; ++ COND_JMP(u, JEQ, ==) ++ COND_JMP(u, JNE, !=) ++ COND_JMP(u, JGT, >) ++ COND_JMP(u, JLT, <) ++ COND_JMP(u, JGE, >=) ++ COND_JMP(u, JLE, <=) ++ COND_JMP(u, JSET, &) ++ COND_JMP(s, JSGT, >) ++ COND_JMP(s, JSLT, <) ++ COND_JMP(s, JSGE, >=) ++ COND_JMP(s, JSLE, <=) ++#undef COND_JMP + /* STX and ST and LDX*/ + #define LDST(SIZEOP, SIZE) \ + STX_MEM_##SIZEOP: \ +@@ -586,77 +1575,108 @@ out: + atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) + (DST + insn->off)); + CONT; +- LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ +- off = IMM; +-load_word: +- /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are +- * only appearing in the programs where ctx == +- * skb. All programs keep 'ctx' in regs[BPF_REG_CTX] +- * == BPF_R6, bpf_convert_filter() saves it in BPF_R6, +- * internal BPF verifier will check that BPF_R6 == +- * ctx. +- * +- * BPF_ABS and BPF_IND are wrappers of function calls, +- * so they scratch BPF_R1-BPF_R5 registers, preserve +- * BPF_R6-BPF_R9, and store return value into BPF_R0. +- * +- * Implicit input: +- * ctx == skb == BPF_R6 == CTX +- * +- * Explicit input: +- * SRC == any register +- * IMM == 32-bit immediate ++ ++ default_label: ++ /* If we ever reach this, we have a bug somewhere. Die hard here ++ * instead of just returning 0; we could be somewhere in a subprog, ++ * so execution could continue otherwise which we do /not/ want. + * +- * Output: +- * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness ++ * Note, verifier whitelists all opcodes in bpf_opcode_in_insntable(). + */ ++ pr_warn("BPF interpreter: unknown opcode %02x\n", insn->code); ++ BUG_ON(1); ++ return 0; ++} + +- ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp); +- if (likely(ptr != NULL)) { +- BPF_R0 = get_unaligned_be32(ptr); +- CONT; +- } ++#define PROG_NAME(stack_size) __bpf_prog_run##stack_size ++#define DEFINE_BPF_PROG_RUN(stack_size) \ ++static unsigned int PROG_NAME(stack_size)(const void *ctx, const struct bpf_insn *insn) \ ++{ \ ++ u64 stack[stack_size / sizeof(u64)]; \ ++ u64 regs[MAX_BPF_EXT_REG]; \ ++\ ++ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ ++ ARG1 = (u64) (unsigned long) ctx; \ ++ return ___bpf_prog_run(regs, insn, stack); \ ++} + +- return 0; +- LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */ +- off = IMM; +-load_half: +- ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp); +- if (likely(ptr != NULL)) { +- BPF_R0 = get_unaligned_be16(ptr); +- CONT; +- } ++#define PROG_NAME_ARGS(stack_size) __bpf_prog_run_args##stack_size ++#define DEFINE_BPF_PROG_RUN_ARGS(stack_size) \ ++static u64 PROG_NAME_ARGS(stack_size)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, \ ++ const struct bpf_insn *insn) \ ++{ \ ++ u64 stack[stack_size / sizeof(u64)]; \ ++ u64 regs[MAX_BPF_EXT_REG]; \ ++\ ++ FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; \ ++ BPF_R1 = r1; \ ++ BPF_R2 = r2; \ ++ BPF_R3 = r3; \ ++ BPF_R4 = r4; \ ++ BPF_R5 = r5; \ ++ return ___bpf_prog_run(regs, insn, stack); \ ++} + +- return 0; +- LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */ +- off = IMM; +-load_byte: +- ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp); +- if (likely(ptr != NULL)) { +- BPF_R0 = *(u8 *)ptr; +- CONT; +- } ++#define EVAL1(FN, X) FN(X) ++#define EVAL2(FN, X, Y...) FN(X) EVAL1(FN, Y) ++#define EVAL3(FN, X, Y...) FN(X) EVAL2(FN, Y) ++#define EVAL4(FN, X, Y...) FN(X) EVAL3(FN, Y) ++#define EVAL5(FN, X, Y...) FN(X) EVAL4(FN, Y) ++#define EVAL6(FN, X, Y...) FN(X) EVAL5(FN, Y) ++ ++EVAL6(DEFINE_BPF_PROG_RUN, 32, 64, 96, 128, 160, 192); ++EVAL6(DEFINE_BPF_PROG_RUN, 224, 256, 288, 320, 352, 384); ++EVAL4(DEFINE_BPF_PROG_RUN, 416, 448, 480, 512); ++ ++EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 32, 64, 96, 128, 160, 192); ++EVAL6(DEFINE_BPF_PROG_RUN_ARGS, 224, 256, 288, 320, 352, 384); ++EVAL4(DEFINE_BPF_PROG_RUN_ARGS, 416, 448, 480, 512); ++ ++#define PROG_NAME_LIST(stack_size) PROG_NAME(stack_size), ++ ++static unsigned int (*interpreters[])(const void *ctx, ++ const struct bpf_insn *insn) = { ++EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) ++EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) ++EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) ++}; ++#undef PROG_NAME_LIST ++#define PROG_NAME_LIST(stack_size) PROG_NAME_ARGS(stack_size), ++static u64 (*interpreters_args[])(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5, ++ const struct bpf_insn *insn) = { ++EVAL6(PROG_NAME_LIST, 32, 64, 96, 128, 160, 192) ++EVAL6(PROG_NAME_LIST, 224, 256, 288, 320, 352, 384) ++EVAL4(PROG_NAME_LIST, 416, 448, 480, 512) ++}; ++#undef PROG_NAME_LIST + +- return 0; +- LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */ +- off = IMM + SRC; +- goto load_word; +- LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */ +- off = IMM + SRC; +- goto load_half; +- LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */ +- off = IMM + SRC; +- goto load_byte; ++void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth) ++{ ++ stack_depth = max_t(u32, stack_depth, 1); ++ insn->off = (s16) insn->imm; ++ insn->imm = interpreters_args[(round_up(stack_depth, 32) / 32) - 1] - ++ __bpf_call_base_args; ++ insn->code = BPF_JMP | BPF_CALL_ARGS; ++} + +- default_label: +- /* If we ever reach this, we have a bug somewhere. */ +- WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code); +- return 0; ++#else ++static unsigned int __bpf_prog_ret0_warn(const void *ctx, ++ const struct bpf_insn *insn) ++{ ++ /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON ++ * is not working properly, so warn about it! ++ */ ++ WARN_ON_ONCE(1); ++ return 0; + } ++#endif + + bool bpf_prog_array_compatible(struct bpf_array *array, + const struct bpf_prog *fp) + { ++ if (fp->kprobe_override) ++ return false; ++ + if (!array->owner_prog_type) { + /* There's no owner yet where we could check for + * compatibility. +@@ -691,18 +1711,62 @@ static int bpf_check_tail_call(const str + return 0; + } + ++static void bpf_prog_select_func(struct bpf_prog *fp) ++{ ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON ++ u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1); ++ ++ fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1]; ++#else ++ fp->bpf_func = __bpf_prog_ret0_warn; ++#endif ++} ++ + /** + * bpf_prog_select_runtime - select exec runtime for BPF program + * @fp: bpf_prog populated with internal BPF program ++ * @err: pointer to error variable + * + * Try to JIT eBPF program, if JIT is not available, use interpreter. + * The BPF program will be executed via BPF_PROG_RUN() macro. + */ +-int bpf_prog_select_runtime(struct bpf_prog *fp) ++struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err) + { +- fp->bpf_func = (void *) __bpf_prog_run; ++ /* In case of BPF to BPF calls, verifier did all the prep ++ * work with regards to JITing, etc. ++ */ ++ if (fp->bpf_func) ++ goto finalize; ++ ++ bpf_prog_select_func(fp); ++ ++ /* eBPF JITs can rewrite the program in case constant ++ * blinding is active. However, in case of error during ++ * blinding, bpf_int_jit_compile() must always return a ++ * valid program, which in this case would simply not ++ * be JITed, but falls back to the interpreter. ++ */ ++ if (!bpf_prog_is_dev_bound(fp->aux)) { ++ *err = bpf_prog_alloc_jited_linfo(fp); ++ if (*err) ++ return fp; ++ ++ fp = bpf_int_jit_compile(fp); ++ if (!fp->jited) { ++ bpf_prog_free_jited_linfo(fp); ++#ifdef CONFIG_BPF_JIT_ALWAYS_ON ++ *err = -ENOTSUPP; ++ return fp; ++#endif ++ } else { ++ bpf_prog_free_unused_jited_linfo(fp); ++ } ++ } else { ++ *err = -EINVAL; ++ return fp; ++ } + +- bpf_int_jit_compile(fp); ++finalize: + bpf_prog_lock_ro(fp); + + /* The tail call compatibility check can only be done at +@@ -710,16 +1774,238 @@ int bpf_prog_select_runtime(struct bpf_p + * with JITed or non JITed program concatenations and not + * all eBPF JITs might immediately support all features. + */ +- return bpf_check_tail_call(fp); ++ *err = bpf_check_tail_call(fp); ++ ++ return fp; + } + EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); + ++static unsigned int __bpf_prog_ret1(const void *ctx, ++ const struct bpf_insn *insn) ++{ ++ return 1; ++} ++ ++static struct bpf_prog_dummy { ++ struct bpf_prog prog; ++} dummy_bpf_prog = { ++ .prog = { ++ .bpf_func = __bpf_prog_ret1, ++ }, ++}; ++ ++/* to avoid allocating empty bpf_prog_array for cgroups that ++ * don't have bpf program attached use one global 'empty_prog_array' ++ * It will not be modified the caller of bpf_prog_array_alloc() ++ * (since caller requested prog_cnt == 0) ++ * that pointer should be 'freed' by bpf_prog_array_free() ++ */ ++static struct { ++ struct bpf_prog_array hdr; ++ struct bpf_prog *null_prog; ++} empty_prog_array = { ++ .null_prog = NULL, ++}; ++ ++struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags) ++{ ++ if (prog_cnt) ++ return kzalloc(sizeof(struct bpf_prog_array) + ++ sizeof(struct bpf_prog_array_item) * ++ (prog_cnt + 1), ++ flags); ++ ++ return &empty_prog_array.hdr; ++} ++ ++void bpf_prog_array_free(struct bpf_prog_array *progs) ++{ ++ if (!progs || progs == &empty_prog_array.hdr) ++ return; ++ kfree_rcu(progs, rcu); ++} ++ ++int bpf_prog_array_length(struct bpf_prog_array *array) ++{ ++ struct bpf_prog_array_item *item; ++ u32 cnt = 0; ++ ++ for (item = array->items; item->prog; item++) ++ if (item->prog != &dummy_bpf_prog.prog) ++ cnt++; ++ return cnt; ++} ++ ++bool bpf_prog_array_is_empty(struct bpf_prog_array *array) ++{ ++ struct bpf_prog_array_item *item; ++ ++ for (item = array->items; item->prog; item++) ++ if (item->prog != &dummy_bpf_prog.prog) ++ return false; ++ return true; ++} ++ ++static bool bpf_prog_array_copy_core(struct bpf_prog_array *array, ++ u32 *prog_ids, ++ u32 request_cnt) ++{ ++ struct bpf_prog_array_item *item; ++ int i = 0; ++ ++ for (item = array->items; item->prog; item++) { ++ if (item->prog == &dummy_bpf_prog.prog) ++ continue; ++ prog_ids[i] = item->prog->aux->id; ++ if (++i == request_cnt) { ++ item++; ++ break; ++ } ++ } ++ ++ return !!(item->prog); ++} ++ ++int bpf_prog_array_copy_to_user(struct bpf_prog_array *array, ++ __u32 __user *prog_ids, u32 cnt) ++{ ++ unsigned long err = 0; ++ bool nospc; ++ u32 *ids; ++ ++ /* users of this function are doing: ++ * cnt = bpf_prog_array_length(); ++ * if (cnt > 0) ++ * bpf_prog_array_copy_to_user(..., cnt); ++ * so below kcalloc doesn't need extra cnt > 0 check. ++ */ ++ ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN); ++ if (!ids) ++ return -ENOMEM; ++ nospc = bpf_prog_array_copy_core(array, ids, cnt); ++ err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); ++ kfree(ids); ++ if (err) ++ return -EFAULT; ++ if (nospc) ++ return -ENOSPC; ++ return 0; ++} ++ ++void bpf_prog_array_delete_safe(struct bpf_prog_array *array, ++ struct bpf_prog *old_prog) ++{ ++ struct bpf_prog_array_item *item; ++ ++ for (item = array->items; item->prog; item++) ++ if (item->prog == old_prog) { ++ WRITE_ONCE(item->prog, &dummy_bpf_prog.prog); ++ break; ++ } ++} ++ ++int bpf_prog_array_copy(struct bpf_prog_array *old_array, ++ struct bpf_prog *exclude_prog, ++ struct bpf_prog *include_prog, ++ struct bpf_prog_array **new_array) ++{ ++ int new_prog_cnt, carry_prog_cnt = 0; ++ struct bpf_prog_array_item *existing; ++ struct bpf_prog_array *array; ++ bool found_exclude = false; ++ int new_prog_idx = 0; ++ ++ /* Figure out how many existing progs we need to carry over to ++ * the new array. ++ */ ++ if (old_array) { ++ existing = old_array->items; ++ for (; existing->prog; existing++) { ++ if (existing->prog == exclude_prog) { ++ found_exclude = true; ++ continue; ++ } ++ if (existing->prog != &dummy_bpf_prog.prog) ++ carry_prog_cnt++; ++ if (existing->prog == include_prog) ++ return -EEXIST; ++ } ++ } ++ ++ if (exclude_prog && !found_exclude) ++ return -ENOENT; ++ ++ /* How many progs (not NULL) will be in the new array? */ ++ new_prog_cnt = carry_prog_cnt; ++ if (include_prog) ++ new_prog_cnt += 1; ++ ++ /* Do we have any prog (not NULL) in the new array? */ ++ if (!new_prog_cnt) { ++ *new_array = NULL; ++ return 0; ++ } ++ ++ /* +1 as the end of prog_array is marked with NULL */ ++ array = bpf_prog_array_alloc(new_prog_cnt + 1, GFP_KERNEL); ++ if (!array) ++ return -ENOMEM; ++ ++ /* Fill in the new prog array */ ++ if (carry_prog_cnt) { ++ existing = old_array->items; ++ for (; existing->prog; existing++) ++ if (existing->prog != exclude_prog && ++ existing->prog != &dummy_bpf_prog.prog) { ++ array->items[new_prog_idx++].prog = ++ existing->prog; ++ } ++ } ++ if (include_prog) ++ array->items[new_prog_idx++].prog = include_prog; ++ array->items[new_prog_idx].prog = NULL; ++ *new_array = array; ++ return 0; ++} ++ ++int bpf_prog_array_copy_info(struct bpf_prog_array *array, ++ u32 *prog_ids, u32 request_cnt, ++ u32 *prog_cnt) ++{ ++ u32 cnt = 0; ++ ++ if (array) ++ cnt = bpf_prog_array_length(array); ++ ++ *prog_cnt = cnt; ++ ++ /* return early if user requested only program count or nothing to copy */ ++ if (!request_cnt || !cnt) ++ return 0; ++ ++ /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ ++ return bpf_prog_array_copy_core(array, prog_ids, request_cnt) ? -ENOSPC ++ : 0; ++} ++ + static void bpf_prog_free_deferred(struct work_struct *work) + { + struct bpf_prog_aux *aux; ++ int i; + + aux = container_of(work, struct bpf_prog_aux, work); +- bpf_jit_free(aux->prog); ++#ifdef CONFIG_PERF_EVENTS ++ if (aux->prog->has_callchain_buf) ++ put_callchain_buffers(); ++#endif ++ for (i = 0; i < aux->func_cnt; i++) ++ bpf_jit_free(aux->func[i]); ++ if (aux->func_cnt) { ++ kfree(aux->func); ++ bpf_prog_unlock_free(aux->prog); ++ } else { ++ bpf_jit_free(aux->prog); ++ } + } + + /* Free internal BPF program */ +@@ -740,7 +2026,7 @@ void bpf_user_rnd_init_once(void) + prandom_init_once(&bpf_user_rnd_state); + } + +-u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_0(bpf_user_rnd_u32) + { + /* Should someone ever have the rather unwise idea to use some + * of the registers passed into this function, then note that +@@ -753,7 +2039,7 @@ u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 + + state = &get_cpu_var(bpf_user_rnd_state); + res = prandom_u32_state(state); +- put_cpu_var(state); ++ put_cpu_var(bpf_user_rnd_state); + + return res; + } +@@ -762,18 +2048,36 @@ u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 + const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; + const struct bpf_func_proto bpf_map_update_elem_proto __weak; + const struct bpf_func_proto bpf_map_delete_elem_proto __weak; ++const struct bpf_func_proto bpf_map_push_elem_proto __weak; ++const struct bpf_func_proto bpf_map_pop_elem_proto __weak; ++const struct bpf_func_proto bpf_map_peek_elem_proto __weak; ++const struct bpf_func_proto bpf_spin_lock_proto __weak; ++const struct bpf_func_proto bpf_spin_unlock_proto __weak; + + const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; + const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; ++const struct bpf_func_proto bpf_get_numa_node_id_proto __weak; + const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; ++ + const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak; + const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak; + const struct bpf_func_proto bpf_get_current_comm_proto __weak; ++const struct bpf_func_proto bpf_get_current_cgroup_id_proto __weak; ++const struct bpf_func_proto bpf_get_local_storage_proto __weak; ++ + const struct bpf_func_proto * __weak bpf_get_trace_printk_proto(void) + { + return NULL; + } + ++u64 __weak ++bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, ++ void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) ++{ ++ return -ENOTSUPP; ++} ++EXPORT_SYMBOL_GPL(bpf_event_output); ++ + /* Always built-in helper functions. */ + const struct bpf_func_proto bpf_tail_call_proto = { + .func = NULL, +@@ -784,9 +2088,34 @@ const struct bpf_func_proto bpf_tail_cal + .arg3_type = ARG_ANYTHING, + }; + +-/* For classic BPF JITs that don't implement bpf_int_jit_compile(). */ +-void __weak bpf_int_jit_compile(struct bpf_prog *prog) ++/* Stub for JITs that only support cBPF. eBPF programs are interpreted. ++ * It is encouraged to implement bpf_int_jit_compile() instead, so that ++ * eBPF and implicitly also cBPF can get JITed! ++ */ ++struct bpf_prog * __weak bpf_int_jit_compile(struct bpf_prog *prog) + { ++ return prog; ++} ++ ++/* Stub for JITs that support eBPF. All cBPF code gets transformed into ++ * eBPF by the kernel and is later compiled by bpf_int_jit_compile(). ++ */ ++void __weak bpf_jit_compile(struct bpf_prog *prog) ++{ ++} ++ ++bool __weak bpf_helper_changes_pkt_data(void *func) ++{ ++ return false; ++} ++ ++/* Return TRUE if the JIT backend wants verifier to enable sub-register usage ++ * analysis code and wants explicit zero extension inserted by verifier. ++ * Otherwise, return FALSE. ++ */ ++bool __weak bpf_jit_needs_zext(void) ++{ ++ return false; + } + + /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call +@@ -797,3 +2126,13 @@ int __weak skb_copy_bits(const struct sk + { + return -EFAULT; + } ++ ++DEFINE_STATIC_KEY_FALSE(bpf_stats_enabled_key); ++EXPORT_SYMBOL(bpf_stats_enabled_key); ++ ++/* All definitions of tracepoints related to BPF. */ ++#define CREATE_TRACE_POINTS ++#include ++ ++EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); ++EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_bulk_tx); +--- /dev/null ++++ b/kernel/bpf/devmap.c +@@ -0,0 +1,698 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2017 Covalent IO, Inc. http://covalent.io ++ */ ++ ++/* Devmaps primary use is as a backend map for XDP BPF helper call ++ * bpf_redirect_map(). Because XDP is mostly concerned with performance we ++ * spent some effort to ensure the datapath with redirect maps does not use ++ * any locking. This is a quick note on the details. ++ * ++ * We have three possible paths to get into the devmap control plane bpf ++ * syscalls, bpf programs, and driver side xmit/flush operations. A bpf syscall ++ * will invoke an update, delete, or lookup operation. To ensure updates and ++ * deletes appear atomic from the datapath side xchg() is used to modify the ++ * netdev_map array. Then because the datapath does a lookup into the netdev_map ++ * array (read-only) from an RCU critical section we use call_rcu() to wait for ++ * an rcu grace period before free'ing the old data structures. This ensures the ++ * datapath always has a valid copy. However, the datapath does a "flush" ++ * operation that pushes any pending packets in the driver outside the RCU ++ * critical section. Each bpf_dtab_netdev tracks these pending operations using ++ * a per-cpu flush list. The bpf_dtab_netdev object will not be destroyed until ++ * this list is empty, indicating outstanding flush operations have completed. ++ * ++ * BPF syscalls may race with BPF program calls on any of the update, delete ++ * or lookup operations. As noted above the xchg() operation also keep the ++ * netdev_map consistent in this case. From the devmap side BPF programs ++ * calling into these operations are the same as multiple user space threads ++ * making system calls. ++ * ++ * Finally, any of the above may race with a netdev_unregister notifier. The ++ * unregister notifier must search for net devices in the map structure that ++ * contain a reference to the net device and remove them. This is a two step ++ * process (a) dereference the bpf_dtab_netdev object in netdev_map and (b) ++ * check to see if the ifindex is the same as the net_device being removed. ++ * When removing the dev a cmpxchg() is used to ensure the correct dev is ++ * removed, in the case of a concurrent update or delete operation it is ++ * possible that the initially referenced dev is no longer in the map. As the ++ * notifier hook walks the map we know that new dev references can not be ++ * added by the user because core infrastructure ensures dev_get_by_index() ++ * calls will fail at this point. ++ * ++ * The devmap_hash type is a map type which interprets keys as ifindexes and ++ * indexes these using a hashmap. This allows maps that use ifindex as key to be ++ * densely packed instead of having holes in the lookup array for unused ++ * ifindexes. The setup and packet enqueue/send code is shared between the two ++ * types of devmap; only the lookup and insertion is different. ++ */ ++#include ++#include ++#include ++#include ++ ++#define DEV_CREATE_FLAG_MASK \ ++ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) ++ ++#define DEV_MAP_BULK_SIZE 16 ++struct bpf_dtab_netdev; ++ ++struct xdp_bulk_queue { ++ struct xdp_frame *q[DEV_MAP_BULK_SIZE]; ++ struct list_head flush_node; ++ struct net_device *dev_rx; ++ struct bpf_dtab_netdev *obj; ++ unsigned int count; ++}; ++ ++struct bpf_dtab_netdev { ++ struct net_device *dev; /* must be first member, due to tracepoint */ ++ struct hlist_node index_hlist; ++ struct bpf_dtab *dtab; ++ struct xdp_bulk_queue __percpu *bulkq; ++ struct rcu_head rcu; ++ unsigned int idx; /* keep track of map index for tracepoint */ ++}; ++ ++struct bpf_dtab { ++ struct bpf_map map; ++ struct bpf_dtab_netdev **netdev_map; /* DEVMAP type only */ ++ struct list_head __percpu *flush_list; ++ struct list_head list; ++ ++ /* these are only used for DEVMAP_HASH type maps */ ++ struct hlist_head *dev_index_head; ++ spinlock_t index_lock; ++ unsigned int items; ++ u32 n_buckets; ++}; ++ ++static DEFINE_SPINLOCK(dev_map_lock); ++static LIST_HEAD(dev_map_list); ++ ++static struct hlist_head *dev_map_create_hash(unsigned int entries, ++ int numa_node) ++{ ++ int i; ++ struct hlist_head *hash; ++ ++ hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); ++ if (hash != NULL) ++ for (i = 0; i < entries; i++) ++ INIT_HLIST_HEAD(&hash[i]); ++ ++ return hash; ++} ++ ++static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, ++ int idx) ++{ ++ return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; ++} ++ ++static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) ++{ ++ int err, cpu; ++ u64 cost; ++ ++ /* check sanity of attributes */ ++ if (attr->max_entries == 0 || attr->key_size != 4 || ++ attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) ++ return -EINVAL; ++ ++ /* Lookup returns a pointer straight to dev->ifindex, so make sure the ++ * verifier prevents writes from the BPF side ++ */ ++ attr->map_flags |= BPF_F_RDONLY_PROG; ++ ++ ++ bpf_map_init_from_attr(&dtab->map, attr); ++ ++ /* make sure page count doesn't overflow */ ++ cost = (u64) sizeof(struct list_head) * num_possible_cpus(); ++ ++ if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { ++ dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); ++ ++ if (!dtab->n_buckets) /* Overflow check */ ++ return -EINVAL; ++ cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets; ++ } else { ++ cost += (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); ++ } ++ ++ /* if map size is larger than memlock limit, reject it */ ++ err = bpf_map_charge_init(&dtab->map.memory, cost); ++ if (err) ++ return -EINVAL; ++ ++ dtab->flush_list = alloc_percpu(struct list_head); ++ if (!dtab->flush_list) ++ goto free_charge; ++ ++ for_each_possible_cpu(cpu) ++ INIT_LIST_HEAD(per_cpu_ptr(dtab->flush_list, cpu)); ++ ++ if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { ++ dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, ++ dtab->map.numa_node); ++ if (!dtab->dev_index_head) ++ goto free_percpu; ++ ++ spin_lock_init(&dtab->index_lock); ++ } else { ++ dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * ++ sizeof(struct bpf_dtab_netdev *), ++ dtab->map.numa_node); ++ if (!dtab->netdev_map) ++ goto free_percpu; ++ } ++ ++ return 0; ++ ++free_percpu: ++ free_percpu(dtab->flush_list); ++free_charge: ++ bpf_map_charge_finish(&dtab->map.memory); ++ return -ENOMEM; ++} ++ ++static struct bpf_map *dev_map_alloc(union bpf_attr *attr) ++{ ++ struct bpf_dtab *dtab; ++ int err; ++ ++ if (!capable(CAP_NET_ADMIN)) ++ return ERR_PTR(-EPERM); ++ ++ dtab = kzalloc(sizeof(*dtab), GFP_USER); ++ if (!dtab) ++ return ERR_PTR(-ENOMEM); ++ ++ err = dev_map_init_map(dtab, attr); ++ if (err) { ++ kfree(dtab); ++ return ERR_PTR(err); ++ } ++ ++ spin_lock(&dev_map_lock); ++ list_add_tail_rcu(&dtab->list, &dev_map_list); ++ spin_unlock(&dev_map_lock); ++ ++ return &dtab->map; ++} ++ ++static void dev_map_free(struct bpf_map *map) ++{ ++ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); ++ int i, cpu; ++ ++ /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, ++ * so the programs (can be more than one that used this map) were ++ * disconnected from events. Wait for outstanding critical sections in ++ * these programs to complete. The rcu critical section only guarantees ++ * no further reads against netdev_map. It does __not__ ensure pending ++ * flush operations (if any) are complete. ++ */ ++ ++ spin_lock(&dev_map_lock); ++ list_del_rcu(&dtab->list); ++ spin_unlock(&dev_map_lock); ++ ++ bpf_clear_redirect_map(map); ++ synchronize_rcu(); ++ ++ /* Make sure prior __dev_map_entry_free() have completed. */ ++ rcu_barrier(); ++ ++ /* To ensure all pending flush operations have completed wait for flush ++ * list to empty on _all_ cpus. ++ * Because the above synchronize_rcu() ensures the map is disconnected ++ * from the program we can assume no new items will be added. ++ */ ++ for_each_online_cpu(cpu) { ++ struct list_head *flush_list = per_cpu_ptr(dtab->flush_list, cpu); ++ ++ while (!list_empty(flush_list)) ++ cond_resched(); ++ } ++ ++ if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { ++ for (i = 0; i < dtab->n_buckets; i++) { ++ struct bpf_dtab_netdev *dev; ++ struct hlist_head *head; ++ struct hlist_node *next; ++ ++ head = dev_map_index_hash(dtab, i); ++ ++ hlist_for_each_entry_safe(dev, next, head, index_hlist) { ++ hlist_del_rcu(&dev->index_hlist); ++ free_percpu(dev->bulkq); ++ dev_put(dev->dev); ++ kfree(dev); ++ } ++ } ++ ++ bpf_map_area_free(dtab->dev_index_head); ++ } else { ++ for (i = 0; i < dtab->map.max_entries; i++) { ++ struct bpf_dtab_netdev *dev; ++ ++ dev = dtab->netdev_map[i]; ++ if (!dev) ++ continue; ++ ++ free_percpu(dev->bulkq); ++ dev_put(dev->dev); ++ kfree(dev); ++ } ++ ++ bpf_map_area_free(dtab->netdev_map); ++ } ++ ++ free_percpu(dtab->flush_list); ++ kfree(dtab); ++} ++ ++static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key) ++{ ++ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); ++ u32 index = key ? *(u32 *)key : U32_MAX; ++ u32 *next = next_key; ++ ++ if (index >= dtab->map.max_entries) { ++ *next = 0; ++ return 0; ++ } ++ ++ if (index == dtab->map.max_entries - 1) ++ return -ENOENT; ++ *next = index + 1; ++ return 0; ++} ++ ++struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key) ++{ ++ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); ++ struct hlist_head *head = dev_map_index_hash(dtab, key); ++ struct bpf_dtab_netdev *dev; ++ ++ hlist_for_each_entry_rcu(dev, head, index_hlist) ++ if (dev->idx == key) ++ return dev; ++ ++ return NULL; ++} ++ ++static int dev_map_hash_get_next_key(struct bpf_map *map, void *key, ++ void *next_key) ++{ ++ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); ++ u32 idx, *next = next_key; ++ struct bpf_dtab_netdev *dev, *next_dev; ++ struct hlist_head *head; ++ int i = 0; ++ ++ if (!key) ++ goto find_first; ++ ++ idx = *(u32 *)key; ++ ++ dev = __dev_map_hash_lookup_elem(map, idx); ++ if (!dev) ++ goto find_first; ++ ++ next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&dev->index_hlist)), ++ struct bpf_dtab_netdev, index_hlist); ++ ++ if (next_dev) { ++ *next = next_dev->idx; ++ return 0; ++ } ++ ++ i = idx & (dtab->n_buckets - 1); ++ i++; ++ ++ find_first: ++ for (; i < dtab->n_buckets; i++) { ++ head = dev_map_index_hash(dtab, i); ++ ++ next_dev = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), ++ struct bpf_dtab_netdev, ++ index_hlist); ++ if (next_dev) { ++ *next = next_dev->idx; ++ return 0; ++ } ++ } ++ ++ return -ENOENT; ++} ++ ++/* __dev_map_flush is called from xdp_do_flush_map() which _must_ be signaled ++ * from the driver before returning from its napi->poll() routine. The poll() ++ * routine is called either from busy_poll context or net_rx_action signaled ++ * from NET_RX_SOFTIRQ. Either way the poll routine must complete before the ++ * net device can be torn down. On devmap tear down we ensure the flush list ++ * is empty before completing to ensure all flush operations have completed. ++ */ ++void __dev_map_flush(struct bpf_map *map) ++{ ++} ++ ++/* rcu_read_lock (from syscall and BPF contexts) ensures that if a delete and/or ++ * update happens in parallel here a dev_put wont happen until after reading the ++ * ifindex. ++ */ ++struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) ++{ ++ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); ++ struct bpf_dtab_netdev *obj; ++ ++ if (key >= map->max_entries) ++ return NULL; ++ ++ obj = READ_ONCE(dtab->netdev_map[key]); ++ return obj; ++} ++ ++int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, ++ struct net_device *dev_rx) ++{ ++ return -EOPNOTSUPP; ++} ++ ++int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, ++ struct bpf_prog *xdp_prog) ++{ ++ return -EOPNOTSUPP; ++} ++ ++static void *dev_map_lookup_elem(struct bpf_map *map, void *key) ++{ ++ struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key); ++ struct net_device *dev = obj ? obj->dev : NULL; ++ ++ return dev ? &dev->ifindex : NULL; ++} ++ ++static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key) ++{ ++ struct bpf_dtab_netdev *obj = __dev_map_hash_lookup_elem(map, ++ *(u32 *)key); ++ struct net_device *dev = obj ? obj->dev : NULL; ++ ++ return dev ? &dev->ifindex : NULL; ++} ++ ++static void __dev_map_entry_free(struct rcu_head *rcu) ++{ ++ struct bpf_dtab_netdev *dev; ++ ++ dev = container_of(rcu, struct bpf_dtab_netdev, rcu); ++ free_percpu(dev->bulkq); ++ dev_put(dev->dev); ++ kfree(dev); ++} ++ ++static int dev_map_delete_elem(struct bpf_map *map, void *key) ++{ ++ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); ++ struct bpf_dtab_netdev *old_dev; ++ int k = *(u32 *)key; ++ ++ if (k >= map->max_entries) ++ return -EINVAL; ++ ++ /* Use call_rcu() here to ensure any rcu critical sections have ++ * completed, but this does not guarantee a flush has happened ++ * yet. Because driver side rcu_read_lock/unlock only protects the ++ * running XDP program. However, for pending flush operations the ++ * dev and ctx are stored in another per cpu map. And additionally, ++ * the driver tear down ensures all soft irqs are complete before ++ * removing the net device in the case of dev_put equals zero. ++ */ ++ old_dev = xchg(&dtab->netdev_map[k], NULL); ++ if (old_dev) ++ call_rcu(&old_dev->rcu, __dev_map_entry_free); ++ return 0; ++} ++ ++static int dev_map_hash_delete_elem(struct bpf_map *map, void *key) ++{ ++ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); ++ struct bpf_dtab_netdev *old_dev; ++ int k = *(u32 *)key; ++ unsigned long flags; ++ int ret = -ENOENT; ++ ++ spin_lock_irqsave(&dtab->index_lock, flags); ++ ++ old_dev = __dev_map_hash_lookup_elem(map, k); ++ if (old_dev) { ++ dtab->items--; ++ hlist_del_init_rcu(&old_dev->index_hlist); ++ call_rcu(&old_dev->rcu, __dev_map_entry_free); ++ ret = 0; ++ } ++ spin_unlock_irqrestore(&dtab->index_lock, flags); ++ ++ return ret; ++} ++ ++static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net, ++ struct bpf_dtab *dtab, ++ u32 ifindex, ++ unsigned int idx) ++{ ++ gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; ++ struct bpf_dtab_netdev *dev; ++ struct xdp_bulk_queue *bq; ++ int cpu; ++ ++ dev = kmalloc_node(sizeof(*dev), gfp, dtab->map.numa_node); ++ if (!dev) ++ return ERR_PTR(-ENOMEM); ++ ++ dev->bulkq = __alloc_percpu_gfp(sizeof(*dev->bulkq), ++ sizeof(void *), gfp); ++ if (!dev->bulkq) { ++ kfree(dev); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ for_each_possible_cpu(cpu) { ++ bq = per_cpu_ptr(dev->bulkq, cpu); ++ bq->obj = dev; ++ } ++ ++ dev->dev = dev_get_by_index(net, ifindex); ++ if (!dev->dev) { ++ free_percpu(dev->bulkq); ++ kfree(dev); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ dev->idx = idx; ++ dev->dtab = dtab; ++ ++ return dev; ++} ++ ++static int __dev_map_update_elem(struct net *net, struct bpf_map *map, ++ void *key, void *value, u64 map_flags) ++{ ++ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); ++ struct bpf_dtab_netdev *dev, *old_dev; ++ u32 ifindex = *(u32 *)value; ++ u32 i = *(u32 *)key; ++ ++ if (unlikely(map_flags > BPF_EXIST)) ++ return -EINVAL; ++ if (unlikely(i >= dtab->map.max_entries)) ++ return -E2BIG; ++ if (unlikely(map_flags == BPF_NOEXIST)) ++ return -EEXIST; ++ ++ if (!ifindex) { ++ dev = NULL; ++ } else { ++ dev = __dev_map_alloc_node(net, dtab, ifindex, i); ++ if (IS_ERR(dev)) ++ return PTR_ERR(dev); ++ } ++ ++ /* Use call_rcu() here to ensure rcu critical sections have completed ++ * Remembering the driver side flush operation will happen before the ++ * net device is removed. ++ */ ++ old_dev = xchg(&dtab->netdev_map[i], dev); ++ if (old_dev) ++ call_rcu(&old_dev->rcu, __dev_map_entry_free); ++ ++ return 0; ++} ++ ++static int dev_map_update_elem(struct bpf_map *map, void *key, void *value, ++ u64 map_flags) ++{ ++ return __dev_map_update_elem(current->nsproxy->net_ns, ++ map, key, value, map_flags); ++} ++ ++static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map, ++ void *key, void *value, u64 map_flags) ++{ ++ struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); ++ struct bpf_dtab_netdev *dev, *old_dev; ++ u32 ifindex = *(u32 *)value; ++ u32 idx = *(u32 *)key; ++ unsigned long flags; ++ int err = -EEXIST; ++ ++ if (unlikely(map_flags > BPF_EXIST || !ifindex)) ++ return -EINVAL; ++ ++ spin_lock_irqsave(&dtab->index_lock, flags); ++ ++ old_dev = __dev_map_hash_lookup_elem(map, idx); ++ if (old_dev && (map_flags & BPF_NOEXIST)) ++ goto out_err; ++ ++ dev = __dev_map_alloc_node(net, dtab, ifindex, idx); ++ if (IS_ERR(dev)) { ++ err = PTR_ERR(dev); ++ goto out_err; ++ } ++ ++ if (old_dev) { ++ hlist_del_rcu(&old_dev->index_hlist); ++ } else { ++ if (dtab->items >= dtab->map.max_entries) { ++ spin_unlock_irqrestore(&dtab->index_lock, flags); ++ call_rcu(&dev->rcu, __dev_map_entry_free); ++ return -E2BIG; ++ } ++ dtab->items++; ++ } ++ ++ hlist_add_head_rcu(&dev->index_hlist, ++ dev_map_index_hash(dtab, idx)); ++ spin_unlock_irqrestore(&dtab->index_lock, flags); ++ ++ if (old_dev) ++ call_rcu(&old_dev->rcu, __dev_map_entry_free); ++ ++ return 0; ++ ++out_err: ++ spin_unlock_irqrestore(&dtab->index_lock, flags); ++ return err; ++} ++ ++static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value, ++ u64 map_flags) ++{ ++ return __dev_map_hash_update_elem(current->nsproxy->net_ns, ++ map, key, value, map_flags); ++} ++ ++const struct bpf_map_ops dev_map_ops = { ++ .map_alloc = dev_map_alloc, ++ .map_free = dev_map_free, ++ .map_get_next_key = dev_map_get_next_key, ++ .map_lookup_elem = dev_map_lookup_elem, ++ .map_update_elem = dev_map_update_elem, ++ .map_delete_elem = dev_map_delete_elem, ++ .map_check_btf = map_check_no_btf, ++}; ++ ++const struct bpf_map_ops dev_map_hash_ops = { ++ .map_alloc = dev_map_alloc, ++ .map_free = dev_map_free, ++ .map_get_next_key = dev_map_hash_get_next_key, ++ .map_lookup_elem = dev_map_hash_lookup_elem, ++ .map_update_elem = dev_map_hash_update_elem, ++ .map_delete_elem = dev_map_hash_delete_elem, ++ .map_check_btf = map_check_no_btf, ++}; ++ ++static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, ++ struct net_device *netdev) ++{ ++ unsigned long flags; ++ u32 i; ++ ++ spin_lock_irqsave(&dtab->index_lock, flags); ++ for (i = 0; i < dtab->n_buckets; i++) { ++ struct bpf_dtab_netdev *dev; ++ struct hlist_head *head; ++ struct hlist_node *next; ++ ++ head = dev_map_index_hash(dtab, i); ++ ++ hlist_for_each_entry_safe(dev, next, head, index_hlist) { ++ if (netdev != dev->dev) ++ continue; ++ ++ dtab->items--; ++ hlist_del_rcu(&dev->index_hlist); ++ call_rcu(&dev->rcu, __dev_map_entry_free); ++ } ++ } ++ spin_unlock_irqrestore(&dtab->index_lock, flags); ++} ++ ++static int dev_map_notification(struct notifier_block *notifier, ++ ulong event, void *ptr) ++{ ++ struct net_device *netdev = netdev_notifier_info_to_dev(ptr); ++ struct bpf_dtab *dtab; ++ int i; ++ ++ switch (event) { ++ case NETDEV_UNREGISTER: ++ /* This rcu_read_lock/unlock pair is needed because ++ * dev_map_list is an RCU list AND to ensure a delete ++ * operation does not free a netdev_map entry while we ++ * are comparing it against the netdev being unregistered. ++ */ ++ rcu_read_lock(); ++ list_for_each_entry_rcu(dtab, &dev_map_list, list) { ++ if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { ++ dev_map_hash_remove_netdev(dtab, netdev); ++ continue; ++ } ++ ++ for (i = 0; i < dtab->map.max_entries; i++) { ++ struct bpf_dtab_netdev *dev, *odev; ++ ++ dev = READ_ONCE(dtab->netdev_map[i]); ++ if (!dev || netdev != dev->dev) ++ continue; ++ odev = cmpxchg(&dtab->netdev_map[i], dev, NULL); ++ if (dev == odev) ++ call_rcu(&dev->rcu, ++ __dev_map_entry_free); ++ } ++ } ++ rcu_read_unlock(); ++ break; ++ default: ++ break; ++ } ++ return NOTIFY_OK; ++} ++ ++static struct notifier_block dev_map_notifier = { ++ .notifier_call = dev_map_notification, ++}; ++ ++static int __init dev_map_init(void) ++{ ++ /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ ++ BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != ++ offsetof(struct _bpf_dtab_netdev, dev)); ++ register_netdevice_notifier(&dev_map_notifier); ++ return 0; ++} ++ ++subsys_initcall(dev_map_init); +--- /dev/null ++++ b/kernel/bpf/disasm.c +@@ -0,0 +1,258 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com ++ * Copyright (c) 2016 Facebook ++ */ ++ ++#include ++ ++#include "disasm.h" ++ ++#define __BPF_FUNC_STR_FN(x) [BPF_FUNC_ ## x] = __stringify(bpf_ ## x) ++static const char * const func_id_str[] = { ++ __BPF_FUNC_MAPPER(__BPF_FUNC_STR_FN) ++}; ++#undef __BPF_FUNC_STR_FN ++ ++static const char *__func_get_name(const struct bpf_insn_cbs *cbs, ++ const struct bpf_insn *insn, ++ char *buff, size_t len) ++{ ++ BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); ++ ++ if (insn->src_reg != BPF_PSEUDO_CALL && ++ insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID && ++ func_id_str[insn->imm]) ++ return func_id_str[insn->imm]; ++ ++ if (cbs && cbs->cb_call) ++ return cbs->cb_call(cbs->private_data, insn); ++ ++ if (insn->src_reg == BPF_PSEUDO_CALL) ++ snprintf(buff, len, "%+d", insn->imm); ++ ++ return buff; ++} ++ ++static const char *__func_imm_name(const struct bpf_insn_cbs *cbs, ++ const struct bpf_insn *insn, ++ u64 full_imm, char *buff, size_t len) ++{ ++ if (cbs && cbs->cb_imm) ++ return cbs->cb_imm(cbs->private_data, insn, full_imm); ++ ++ snprintf(buff, len, "0x%llx", (unsigned long long)full_imm); ++ return buff; ++} ++ ++const char *func_id_name(int id) ++{ ++ if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) ++ return func_id_str[id]; ++ else ++ return "unknown"; ++} ++ ++const char *const bpf_class_string[8] = { ++ [BPF_LD] = "ld", ++ [BPF_LDX] = "ldx", ++ [BPF_ST] = "st", ++ [BPF_STX] = "stx", ++ [BPF_ALU] = "alu", ++ [BPF_JMP] = "jmp", ++ [BPF_JMP32] = "jmp32", ++ [BPF_ALU64] = "alu64", ++}; ++ ++const char *const bpf_alu_string[16] = { ++ [BPF_ADD >> 4] = "+=", ++ [BPF_SUB >> 4] = "-=", ++ [BPF_MUL >> 4] = "*=", ++ [BPF_DIV >> 4] = "/=", ++ [BPF_OR >> 4] = "|=", ++ [BPF_AND >> 4] = "&=", ++ [BPF_LSH >> 4] = "<<=", ++ [BPF_RSH >> 4] = ">>=", ++ [BPF_NEG >> 4] = "neg", ++ [BPF_MOD >> 4] = "%=", ++ [BPF_XOR >> 4] = "^=", ++ [BPF_MOV >> 4] = "=", ++ [BPF_ARSH >> 4] = "s>>=", ++ [BPF_END >> 4] = "endian", ++}; ++ ++static const char *const bpf_ldst_string[] = { ++ [BPF_W >> 3] = "u32", ++ [BPF_H >> 3] = "u16", ++ [BPF_B >> 3] = "u8", ++ [BPF_DW >> 3] = "u64", ++}; ++ ++static const char *const bpf_jmp_string[16] = { ++ [BPF_JA >> 4] = "jmp", ++ [BPF_JEQ >> 4] = "==", ++ [BPF_JGT >> 4] = ">", ++ [BPF_JLT >> 4] = "<", ++ [BPF_JGE >> 4] = ">=", ++ [BPF_JLE >> 4] = "<=", ++ [BPF_JSET >> 4] = "&", ++ [BPF_JNE >> 4] = "!=", ++ [BPF_JSGT >> 4] = "s>", ++ [BPF_JSLT >> 4] = "s<", ++ [BPF_JSGE >> 4] = "s>=", ++ [BPF_JSLE >> 4] = "s<=", ++ [BPF_CALL >> 4] = "call", ++ [BPF_EXIT >> 4] = "exit", ++}; ++ ++static void print_bpf_end_insn(bpf_insn_print_t verbose, ++ void *private_data, ++ const struct bpf_insn *insn) ++{ ++ verbose(private_data, "(%02x) r%d = %s%d r%d\n", ++ insn->code, insn->dst_reg, ++ BPF_SRC(insn->code) == BPF_TO_BE ? "be" : "le", ++ insn->imm, insn->dst_reg); ++} ++ ++void print_bpf_insn(const struct bpf_insn_cbs *cbs, ++ const struct bpf_insn *insn, ++ bool allow_ptr_leaks) ++{ ++ const bpf_insn_print_t verbose = cbs->cb_print; ++ u8 class = BPF_CLASS(insn->code); ++ ++ if (class == BPF_ALU || class == BPF_ALU64) { ++ if (BPF_OP(insn->code) == BPF_END) { ++ if (class == BPF_ALU64) ++ verbose(cbs->private_data, "BUG_alu64_%02x\n", insn->code); ++ else ++ print_bpf_end_insn(verbose, cbs->private_data, insn); ++ } else if (BPF_OP(insn->code) == BPF_NEG) { ++ verbose(cbs->private_data, "(%02x) %c%d = -%c%d\n", ++ insn->code, class == BPF_ALU ? 'w' : 'r', ++ insn->dst_reg, class == BPF_ALU ? 'w' : 'r', ++ insn->dst_reg); ++ } else if (BPF_SRC(insn->code) == BPF_X) { ++ verbose(cbs->private_data, "(%02x) %c%d %s %c%d\n", ++ insn->code, class == BPF_ALU ? 'w' : 'r', ++ insn->dst_reg, ++ bpf_alu_string[BPF_OP(insn->code) >> 4], ++ class == BPF_ALU ? 'w' : 'r', ++ insn->src_reg); ++ } else { ++ verbose(cbs->private_data, "(%02x) %c%d %s %d\n", ++ insn->code, class == BPF_ALU ? 'w' : 'r', ++ insn->dst_reg, ++ bpf_alu_string[BPF_OP(insn->code) >> 4], ++ insn->imm); ++ } ++ } else if (class == BPF_STX) { ++ if (BPF_MODE(insn->code) == BPF_MEM) ++ verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = r%d\n", ++ insn->code, ++ bpf_ldst_string[BPF_SIZE(insn->code) >> 3], ++ insn->dst_reg, ++ insn->off, insn->src_reg); ++ else if (BPF_MODE(insn->code) == BPF_XADD) ++ verbose(cbs->private_data, "(%02x) lock *(%s *)(r%d %+d) += r%d\n", ++ insn->code, ++ bpf_ldst_string[BPF_SIZE(insn->code) >> 3], ++ insn->dst_reg, insn->off, ++ insn->src_reg); ++ else ++ verbose(cbs->private_data, "BUG_%02x\n", insn->code); ++ } else if (class == BPF_ST) { ++ if (BPF_MODE(insn->code) != BPF_MEM) { ++ verbose(cbs->private_data, "BUG_st_%02x\n", insn->code); ++ return; ++ } ++ verbose(cbs->private_data, "(%02x) *(%s *)(r%d %+d) = %d\n", ++ insn->code, ++ bpf_ldst_string[BPF_SIZE(insn->code) >> 3], ++ insn->dst_reg, ++ insn->off, insn->imm); ++ } else if (class == BPF_LDX) { ++ if (BPF_MODE(insn->code) != BPF_MEM) { ++ verbose(cbs->private_data, "BUG_ldx_%02x\n", insn->code); ++ return; ++ } ++ verbose(cbs->private_data, "(%02x) r%d = *(%s *)(r%d %+d)\n", ++ insn->code, insn->dst_reg, ++ bpf_ldst_string[BPF_SIZE(insn->code) >> 3], ++ insn->src_reg, insn->off); ++ } else if (class == BPF_LD) { ++ if (BPF_MODE(insn->code) == BPF_ABS) { ++ verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[%d]\n", ++ insn->code, ++ bpf_ldst_string[BPF_SIZE(insn->code) >> 3], ++ insn->imm); ++ } else if (BPF_MODE(insn->code) == BPF_IND) { ++ verbose(cbs->private_data, "(%02x) r0 = *(%s *)skb[r%d + %d]\n", ++ insn->code, ++ bpf_ldst_string[BPF_SIZE(insn->code) >> 3], ++ insn->src_reg, insn->imm); ++ } else if (BPF_MODE(insn->code) == BPF_IMM && ++ BPF_SIZE(insn->code) == BPF_DW) { ++ /* At this point, we already made sure that the second ++ * part of the ldimm64 insn is accessible. ++ */ ++ u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; ++ bool is_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD || ++ insn->src_reg == BPF_PSEUDO_MAP_VALUE; ++ char tmp[64]; ++ ++ if (is_ptr && !allow_ptr_leaks) ++ imm = 0; ++ ++ verbose(cbs->private_data, "(%02x) r%d = %s\n", ++ insn->code, insn->dst_reg, ++ __func_imm_name(cbs, insn, imm, ++ tmp, sizeof(tmp))); ++ } else { ++ verbose(cbs->private_data, "BUG_ld_%02x\n", insn->code); ++ return; ++ } ++ } else if (class == BPF_JMP32 || class == BPF_JMP) { ++ u8 opcode = BPF_OP(insn->code); ++ ++ if (opcode == BPF_CALL) { ++ char tmp[64]; ++ ++ if (insn->src_reg == BPF_PSEUDO_CALL) { ++ verbose(cbs->private_data, "(%02x) call pc%s\n", ++ insn->code, ++ __func_get_name(cbs, insn, ++ tmp, sizeof(tmp))); ++ } else { ++ strcpy(tmp, "unknown"); ++ verbose(cbs->private_data, "(%02x) call %s#%d\n", insn->code, ++ __func_get_name(cbs, insn, ++ tmp, sizeof(tmp)), ++ insn->imm); ++ } ++ } else if (insn->code == (BPF_JMP | BPF_JA)) { ++ verbose(cbs->private_data, "(%02x) goto pc%+d\n", ++ insn->code, insn->off); ++ } else if (insn->code == (BPF_JMP | BPF_EXIT)) { ++ verbose(cbs->private_data, "(%02x) exit\n", insn->code); ++ } else if (BPF_SRC(insn->code) == BPF_X) { ++ verbose(cbs->private_data, ++ "(%02x) if %c%d %s %c%d goto pc%+d\n", ++ insn->code, class == BPF_JMP32 ? 'w' : 'r', ++ insn->dst_reg, ++ bpf_jmp_string[BPF_OP(insn->code) >> 4], ++ class == BPF_JMP32 ? 'w' : 'r', ++ insn->src_reg, insn->off); ++ } else { ++ verbose(cbs->private_data, ++ "(%02x) if %c%d %s 0x%x goto pc%+d\n", ++ insn->code, class == BPF_JMP32 ? 'w' : 'r', ++ insn->dst_reg, ++ bpf_jmp_string[BPF_OP(insn->code) >> 4], ++ insn->imm, insn->off); ++ } ++ } else { ++ verbose(cbs->private_data, "(%02x) %s\n", ++ insn->code, bpf_class_string[class]); ++ } ++} +--- /dev/null ++++ b/kernel/bpf/disasm.h +@@ -0,0 +1,40 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com ++ * Copyright (c) 2016 Facebook ++ */ ++ ++#ifndef __BPF_DISASM_H__ ++#define __BPF_DISASM_H__ ++ ++#include ++#include ++#include ++#ifndef __KERNEL__ ++#include ++#include ++#endif ++ ++extern const char *const bpf_alu_string[16]; ++extern const char *const bpf_class_string[8]; ++ ++const char *func_id_name(int id); ++ ++typedef __printf(2, 3) void (*bpf_insn_print_t)(void *private_data, ++ const char *, ...); ++typedef const char *(*bpf_insn_revmap_call_t)(void *private_data, ++ const struct bpf_insn *insn); ++typedef const char *(*bpf_insn_print_imm_t)(void *private_data, ++ const struct bpf_insn *insn, ++ __u64 full_imm); ++ ++struct bpf_insn_cbs { ++ bpf_insn_print_t cb_print; ++ bpf_insn_revmap_call_t cb_call; ++ bpf_insn_print_imm_t cb_imm; ++ void *private_data; ++}; ++ ++void print_bpf_insn(const struct bpf_insn_cbs *cbs, ++ const struct bpf_insn *insn, ++ bool allow_ptr_leaks); ++#endif +--- a/kernel/bpf/hashtab.c ++++ b/kernel/bpf/hashtab.c +@@ -1,147 +1,467 @@ ++// SPDX-License-Identifier: GPL-2.0-only + /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of version 2 of the GNU General Public +- * License as published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, but +- * WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. ++ * Copyright (c) 2016 Facebook + */ + #include ++#include + #include + #include +-#include ++#include ++#include ++#include ++#include "percpu_freelist.h" ++#include "bpf_lru_list.h" ++#include "map_in_map.h" ++ ++#define HTAB_CREATE_FLAG_MASK \ ++ (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ ++ BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED) ++ ++struct bucket { ++ struct hlist_nulls_head head; ++ raw_spinlock_t lock; ++}; + + struct bpf_htab { + struct bpf_map map; +- struct hlist_head *buckets; +- raw_spinlock_t lock; +- u32 count; /* number of elements in this hashtable */ ++ struct bucket *buckets; ++ void *elems; ++ union { ++ struct pcpu_freelist freelist; ++ struct bpf_lru lru; ++ }; ++ struct htab_elem *__percpu *extra_elems; ++ atomic_t count; /* number of elements in this hashtable */ + u32 n_buckets; /* number of hash buckets */ + u32 elem_size; /* size of each element in bytes */ ++ u32 hashrnd; + }; + + /* each htab element is struct htab_elem + key + value */ + struct htab_elem { +- struct hlist_node hash_node; +- struct rcu_head rcu; ++ union { ++ struct hlist_nulls_node hash_node; ++ struct { ++ void *padding; ++ union { ++ struct bpf_htab *htab; ++ struct pcpu_freelist_node fnode; ++ }; ++ }; ++ }; ++ union { ++ struct rcu_head rcu; ++ struct bpf_lru_node lru_node; ++ }; + u32 hash; + char key[0] __aligned(8); + }; + ++static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node); ++ ++static bool htab_is_lru(const struct bpf_htab *htab) ++{ ++ return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH || ++ htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; ++} ++ ++static bool htab_is_percpu(const struct bpf_htab *htab) ++{ ++ return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH || ++ htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; ++} ++ ++static bool htab_is_prealloc(const struct bpf_htab *htab) ++{ ++ return !(htab->map.map_flags & BPF_F_NO_PREALLOC); ++} ++ ++static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, ++ void __percpu *pptr) ++{ ++ *(void __percpu **)(l->key + key_size) = pptr; ++} ++ ++static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size) ++{ ++ return *(void __percpu **)(l->key + key_size); ++} ++ ++static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l) ++{ ++ return *(void **)(l->key + roundup(map->key_size, 8)); ++} ++ ++static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i) ++{ ++ return (struct htab_elem *) (htab->elems + i * htab->elem_size); ++} ++ ++static void htab_free_elems(struct bpf_htab *htab) ++{ ++ int i; ++ ++ if (!htab_is_percpu(htab)) ++ goto free_elems; ++ ++ for (i = 0; i < htab->map.max_entries; i++) { ++ void __percpu *pptr; ++ ++ pptr = htab_elem_get_ptr(get_htab_elem(htab, i), ++ htab->map.key_size); ++ free_percpu(pptr); ++ cond_resched(); ++ } ++free_elems: ++ bpf_map_area_free(htab->elems); ++} ++ ++static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, ++ u32 hash) ++{ ++ struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash); ++ struct htab_elem *l; ++ ++ if (node) { ++ l = container_of(node, struct htab_elem, lru_node); ++ memcpy(l->key, key, htab->map.key_size); ++ return l; ++ } ++ ++ return NULL; ++} ++ ++static int prealloc_init(struct bpf_htab *htab) ++{ ++ u32 num_entries = htab->map.max_entries; ++ int err = -ENOMEM, i; ++ ++ if (!htab_is_percpu(htab) && !htab_is_lru(htab)) ++ num_entries += num_possible_cpus(); ++ ++ htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries, ++ htab->map.numa_node); ++ if (!htab->elems) ++ return -ENOMEM; ++ ++ if (!htab_is_percpu(htab)) ++ goto skip_percpu_elems; ++ ++ for (i = 0; i < num_entries; i++) { ++ u32 size = round_up(htab->map.value_size, 8); ++ void __percpu *pptr; ++ ++ pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN); ++ if (!pptr) ++ goto free_elems; ++ htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, ++ pptr); ++ cond_resched(); ++ } ++ ++skip_percpu_elems: ++ if (htab_is_lru(htab)) ++ err = bpf_lru_init(&htab->lru, ++ htab->map.map_flags & BPF_F_NO_COMMON_LRU, ++ offsetof(struct htab_elem, hash) - ++ offsetof(struct htab_elem, lru_node), ++ htab_lru_map_delete_node, ++ htab); ++ else ++ err = pcpu_freelist_init(&htab->freelist); ++ ++ if (err) ++ goto free_elems; ++ ++ if (htab_is_lru(htab)) ++ bpf_lru_populate(&htab->lru, htab->elems, ++ offsetof(struct htab_elem, lru_node), ++ htab->elem_size, num_entries); ++ else ++ pcpu_freelist_populate(&htab->freelist, ++ htab->elems + offsetof(struct htab_elem, fnode), ++ htab->elem_size, num_entries); ++ ++ return 0; ++ ++free_elems: ++ htab_free_elems(htab); ++ return err; ++} ++ ++static void prealloc_destroy(struct bpf_htab *htab) ++{ ++ htab_free_elems(htab); ++ ++ if (htab_is_lru(htab)) ++ bpf_lru_destroy(&htab->lru); ++ else ++ pcpu_freelist_destroy(&htab->freelist); ++} ++ ++static int alloc_extra_elems(struct bpf_htab *htab) ++{ ++ struct htab_elem *__percpu *pptr, *l_new; ++ struct pcpu_freelist_node *l; ++ int cpu; ++ ++ pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8, ++ GFP_USER | __GFP_NOWARN); ++ if (!pptr) ++ return -ENOMEM; ++ ++ for_each_possible_cpu(cpu) { ++ l = pcpu_freelist_pop(&htab->freelist); ++ /* pop will succeed, since prealloc_init() ++ * preallocated extra num_possible_cpus elements ++ */ ++ l_new = container_of(l, struct htab_elem, fnode); ++ *per_cpu_ptr(pptr, cpu) = l_new; ++ } ++ htab->extra_elems = pptr; ++ return 0; ++} ++ + /* Called from syscall */ +-static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ++static int htab_map_alloc_check(union bpf_attr *attr) + { +- struct bpf_htab *htab; +- int err, i; ++ bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || ++ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); ++ bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || ++ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); ++ /* percpu_lru means each cpu has its own LRU list. ++ * it is different from BPF_MAP_TYPE_PERCPU_HASH where ++ * the map's value itself is percpu. percpu_lru has ++ * nothing to do with the map's value. ++ */ ++ bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); ++ bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); ++ bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED); ++ int numa_node = bpf_map_attr_numa_node(attr); ++ ++ BUILD_BUG_ON(offsetof(struct htab_elem, htab) != ++ offsetof(struct htab_elem, hash_node.pprev)); ++ BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) != ++ offsetof(struct htab_elem, hash_node.pprev)); ++ ++ if (lru && !capable(CAP_SYS_ADMIN)) ++ /* LRU implementation is much complicated than other ++ * maps. Hence, limit to CAP_SYS_ADMIN for now. ++ */ ++ return -EPERM; + +- htab = kzalloc(sizeof(*htab), GFP_USER); +- if (!htab) +- return ERR_PTR(-ENOMEM); ++ if (zero_seed && !capable(CAP_SYS_ADMIN)) ++ /* Guard against local DoS, and discourage production use. */ ++ return -EPERM; + +- /* mandatory map attributes */ +- htab->map.key_size = attr->key_size; +- htab->map.value_size = attr->value_size; +- htab->map.max_entries = attr->max_entries; ++ if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK || ++ !bpf_map_flags_access_ok(attr->map_flags)) ++ return -EINVAL; ++ ++ if (!lru && percpu_lru) ++ return -EINVAL; ++ ++ if (lru && !prealloc) ++ return -ENOTSUPP; ++ ++ if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru)) ++ return -EINVAL; + + /* check sanity of attributes. + * value_size == 0 may be allowed in the future to use map as a set + */ +- err = -EINVAL; +- if (htab->map.max_entries == 0 || htab->map.key_size == 0 || +- htab->map.value_size == 0) +- goto free_htab; +- +- /* hash table size must be power of 2 */ +- htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); ++ if (attr->max_entries == 0 || attr->key_size == 0 || ++ attr->value_size == 0) ++ return -EINVAL; + +- err = -E2BIG; +- if (htab->map.key_size > MAX_BPF_STACK) ++ if (attr->key_size > MAX_BPF_STACK) + /* eBPF programs initialize keys on stack, so they cannot be + * larger than max stack size + */ +- goto free_htab; ++ return -E2BIG; + +- if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) - ++ if (attr->value_size >= KMALLOC_MAX_SIZE - + MAX_BPF_STACK - sizeof(struct htab_elem)) + /* if value_size is bigger, the user space won't be able to + * access the elements via bpf syscall. This check also makes + * sure that the elem_size doesn't overflow and it's + * kmalloc-able later in htab_map_update_elem() + */ +- goto free_htab; ++ return -E2BIG; ++ ++ return 0; ++} ++ ++static struct bpf_map *htab_map_alloc(union bpf_attr *attr) ++{ ++ bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH || ++ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); ++ bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH || ++ attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH); ++ /* percpu_lru means each cpu has its own LRU list. ++ * it is different from BPF_MAP_TYPE_PERCPU_HASH where ++ * the map's value itself is percpu. percpu_lru has ++ * nothing to do with the map's value. ++ */ ++ bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); ++ bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); ++ struct bpf_htab *htab; ++ int err, i; ++ u64 cost; ++ ++ htab = kzalloc(sizeof(*htab), GFP_USER); ++ if (!htab) ++ return ERR_PTR(-ENOMEM); ++ ++ bpf_map_init_from_attr(&htab->map, attr); ++ ++ if (percpu_lru) { ++ /* ensure each CPU's lru list has >=1 elements. ++ * since we are at it, make each lru list has the same ++ * number of elements. ++ */ ++ htab->map.max_entries = roundup(attr->max_entries, ++ num_possible_cpus()); ++ if (htab->map.max_entries < attr->max_entries) ++ htab->map.max_entries = rounddown(attr->max_entries, ++ num_possible_cpus()); ++ } ++ ++ /* hash table size must be power of 2 */ ++ htab->n_buckets = roundup_pow_of_two(htab->map.max_entries); + + htab->elem_size = sizeof(struct htab_elem) + +- round_up(htab->map.key_size, 8) + +- htab->map.value_size; ++ round_up(htab->map.key_size, 8); ++ if (percpu) ++ htab->elem_size += sizeof(void *); ++ else ++ htab->elem_size += round_up(htab->map.value_size, 8); + ++ err = -E2BIG; + /* prevent zero size kmalloc and check for u32 overflow */ + if (htab->n_buckets == 0 || +- htab->n_buckets > U32_MAX / sizeof(struct hlist_head)) ++ htab->n_buckets > U32_MAX / sizeof(struct bucket)) + goto free_htab; + +- if ((u64) htab->n_buckets * sizeof(struct hlist_head) + +- (u64) htab->elem_size * htab->map.max_entries >= +- U32_MAX - PAGE_SIZE) +- /* make sure page count doesn't overflow */ +- goto free_htab; ++ cost = (u64) htab->n_buckets * sizeof(struct bucket) + ++ (u64) htab->elem_size * htab->map.max_entries; + +- htab->map.pages = round_up(htab->n_buckets * sizeof(struct hlist_head) + +- htab->elem_size * htab->map.max_entries, +- PAGE_SIZE) >> PAGE_SHIFT; ++ if (percpu) ++ cost += (u64) round_up(htab->map.value_size, 8) * ++ num_possible_cpus() * htab->map.max_entries; ++ else ++ cost += (u64) htab->elem_size * num_possible_cpus(); ++ ++ /* if map size is larger than memlock limit, reject it */ ++ err = bpf_map_charge_init(&htab->map.memory, cost); ++ if (err) ++ goto free_htab; + + err = -ENOMEM; +- htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct hlist_head), +- GFP_USER | __GFP_NOWARN); ++ htab->buckets = bpf_map_area_alloc(htab->n_buckets * ++ sizeof(struct bucket), ++ htab->map.numa_node); ++ if (!htab->buckets) ++ goto free_charge; ++ ++ if (htab->map.map_flags & BPF_F_ZERO_SEED) ++ htab->hashrnd = 0; ++ else ++ htab->hashrnd = get_random_int(); + +- if (!htab->buckets) { +- htab->buckets = vmalloc(htab->n_buckets * sizeof(struct hlist_head)); +- if (!htab->buckets) +- goto free_htab; ++ for (i = 0; i < htab->n_buckets; i++) { ++ INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i); ++ raw_spin_lock_init(&htab->buckets[i].lock); + } + +- for (i = 0; i < htab->n_buckets; i++) +- INIT_HLIST_HEAD(&htab->buckets[i]); +- +- raw_spin_lock_init(&htab->lock); +- htab->count = 0; ++ if (prealloc) { ++ err = prealloc_init(htab); ++ if (err) ++ goto free_buckets; ++ ++ if (!percpu && !lru) { ++ /* lru itself can remove the least used element, so ++ * there is no need for an extra elem during map_update. ++ */ ++ err = alloc_extra_elems(htab); ++ if (err) ++ goto free_prealloc; ++ } ++ } + + return &htab->map; + ++free_prealloc: ++ prealloc_destroy(htab); ++free_buckets: ++ bpf_map_area_free(htab->buckets); ++free_charge: ++ bpf_map_charge_finish(&htab->map.memory); + free_htab: + kfree(htab); + return ERR_PTR(err); + } + +-static inline u32 htab_map_hash(const void *key, u32 key_len) ++static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd) + { +- return jhash(key, key_len, 0); ++ return jhash(key, key_len, hashrnd); + } + +-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash) ++static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash) + { + return &htab->buckets[hash & (htab->n_buckets - 1)]; + } + +-static struct htab_elem *lookup_elem_raw(struct hlist_head *head, u32 hash, ++static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash) ++{ ++ return &__select_bucket(htab, hash)->head; ++} ++ ++/* this lookup function can only be called with bucket lock taken */ ++static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash, + void *key, u32 key_size) + { ++ struct hlist_nulls_node *n; + struct htab_elem *l; + +- hlist_for_each_entry_rcu(l, head, hash_node) ++ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) + if (l->hash == hash && !memcmp(&l->key, key, key_size)) + return l; + + return NULL; + } + +-/* Called from syscall or from eBPF program */ +-static void *htab_map_lookup_elem(struct bpf_map *map, void *key) ++/* can be called without bucket lock. it will repeat the loop in ++ * the unlikely event when elements moved from one bucket into another ++ * while link list is being walked ++ */ ++static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head, ++ u32 hash, void *key, ++ u32 key_size, u32 n_buckets) ++{ ++ struct hlist_nulls_node *n; ++ struct htab_elem *l; ++ ++again: ++ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) ++ if (l->hash == hash && !memcmp(&l->key, key, key_size)) ++ return l; ++ ++ if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1)))) ++ goto again; ++ ++ return NULL; ++} ++ ++/* Called from syscall or from eBPF program directly, so ++ * arguments have to match bpf_map_lookup_elem() exactly. ++ * The return value is adjusted by BPF instructions ++ * in htab_map_gen_lookup(). ++ */ ++static void *__htab_map_lookup_elem(struct bpf_map *map, void *key) + { + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); +- struct hlist_head *head; ++ struct hlist_nulls_head *head; + struct htab_elem *l; + u32 hash, key_size; + +@@ -150,11 +470,18 @@ static void *htab_map_lookup_elem(struct + + key_size = map->key_size; + +- hash = htab_map_hash(key, key_size); ++ hash = htab_map_hash(key, key_size, htab->hashrnd); + + head = select_bucket(htab, hash); + +- l = lookup_elem_raw(head, hash, key, key_size); ++ l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); ++ ++ return l; ++} ++ ++static void *htab_map_lookup_elem(struct bpf_map *map, void *key) ++{ ++ struct htab_elem *l = __htab_map_lookup_elem(map, key); + + if (l) + return l->key + round_up(map->key_size, 8); +@@ -162,33 +489,138 @@ static void *htab_map_lookup_elem(struct + return NULL; + } + ++/* inline bpf_map_lookup_elem() call. ++ * Instead of: ++ * bpf_prog ++ * bpf_map_lookup_elem ++ * map->ops->map_lookup_elem ++ * htab_map_lookup_elem ++ * __htab_map_lookup_elem ++ * do: ++ * bpf_prog ++ * __htab_map_lookup_elem ++ */ ++static u32 htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) ++{ ++ struct bpf_insn *insn = insn_buf; ++ const int ret = BPF_REG_0; ++ ++ BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, ++ (void *(*)(struct bpf_map *map, void *key))NULL)); ++ *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1); ++ *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, ++ offsetof(struct htab_elem, key) + ++ round_up(map->key_size, 8)); ++ return insn - insn_buf; ++} ++ ++static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, ++ void *key, const bool mark) ++{ ++ struct htab_elem *l = __htab_map_lookup_elem(map, key); ++ ++ if (l) { ++ if (mark) ++ bpf_lru_node_set_ref(&l->lru_node); ++ return l->key + round_up(map->key_size, 8); ++ } ++ ++ return NULL; ++} ++ ++static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) ++{ ++ return __htab_lru_map_lookup_elem(map, key, true); ++} ++ ++static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) ++{ ++ return __htab_lru_map_lookup_elem(map, key, false); ++} ++ ++static u32 htab_lru_map_gen_lookup(struct bpf_map *map, ++ struct bpf_insn *insn_buf) ++{ ++ struct bpf_insn *insn = insn_buf; ++ const int ret = BPF_REG_0; ++ const int ref_reg = BPF_REG_1; ++ ++ BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, ++ (void *(*)(struct bpf_map *map, void *key))NULL)); ++ *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4); ++ *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret, ++ offsetof(struct htab_elem, lru_node) + ++ offsetof(struct bpf_lru_node, ref)); ++ *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1); ++ *insn++ = BPF_ST_MEM(BPF_B, ret, ++ offsetof(struct htab_elem, lru_node) + ++ offsetof(struct bpf_lru_node, ref), ++ 1); ++ *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, ++ offsetof(struct htab_elem, key) + ++ round_up(map->key_size, 8)); ++ return insn - insn_buf; ++} ++ ++/* It is called from the bpf_lru_list when the LRU needs to delete ++ * older elements from the htab. ++ */ ++static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node) ++{ ++ struct bpf_htab *htab = (struct bpf_htab *)arg; ++ struct htab_elem *l = NULL, *tgt_l; ++ struct hlist_nulls_head *head; ++ struct hlist_nulls_node *n; ++ unsigned long flags; ++ struct bucket *b; ++ ++ tgt_l = container_of(node, struct htab_elem, lru_node); ++ b = __select_bucket(htab, tgt_l->hash); ++ head = &b->head; ++ ++ raw_spin_lock_irqsave(&b->lock, flags); ++ ++ hlist_nulls_for_each_entry_rcu(l, n, head, hash_node) ++ if (l == tgt_l) { ++ hlist_nulls_del_rcu(&l->hash_node); ++ break; ++ } ++ ++ raw_spin_unlock_irqrestore(&b->lock, flags); ++ ++ return l == tgt_l; ++} ++ + /* Called from syscall */ + static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key) + { + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); +- struct hlist_head *head; ++ struct hlist_nulls_head *head; + struct htab_elem *l, *next_l; + u32 hash, key_size; +- int i; ++ int i = 0; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + key_size = map->key_size; + +- hash = htab_map_hash(key, key_size); ++ if (!key) ++ goto find_first_elem; ++ ++ hash = htab_map_hash(key, key_size, htab->hashrnd); + + head = select_bucket(htab, hash); + + /* lookup the key */ +- l = lookup_elem_raw(head, hash, key, key_size); ++ l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets); + +- if (!l) { +- i = 0; ++ if (!l) + goto find_first_elem; +- } + + /* key was found, get next key in the same bucket */ +- next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)), ++ next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)), + struct htab_elem, hash_node); + + if (next_l) { +@@ -207,7 +639,7 @@ find_first_elem: + head = select_bucket(htab, i); + + /* pick first element in the bucket */ +- next_l = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)), ++ next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)), + struct htab_elem, hash_node); + if (next_l) { + /* if it's not empty, just return it */ +@@ -216,90 +648,491 @@ find_first_elem: + } + } + +- /* itereated over all buckets and all elements */ ++ /* iterated over all buckets and all elements */ + return -ENOENT; + } + ++static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l) ++{ ++ if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH) ++ free_percpu(htab_elem_get_ptr(l, htab->map.key_size)); ++ kfree(l); ++} ++ ++static void htab_elem_free_rcu(struct rcu_head *head) ++{ ++ struct htab_elem *l = container_of(head, struct htab_elem, rcu); ++ struct bpf_htab *htab = l->htab; ++ ++ htab_elem_free(htab, l); ++} ++ ++static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l) ++{ ++ struct bpf_map *map = &htab->map; ++ void *ptr; ++ ++ if (map->ops->map_fd_put_ptr) { ++ ptr = fd_htab_map_get_ptr(map, l); ++ map->ops->map_fd_put_ptr(ptr); ++ } ++} ++ ++static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) ++{ ++ htab_put_fd_value(htab, l); ++ ++ if (htab_is_prealloc(htab)) { ++ __pcpu_freelist_push(&htab->freelist, &l->fnode); ++ } else { ++ atomic_dec(&htab->count); ++ l->htab = htab; ++ call_rcu(&l->rcu, htab_elem_free_rcu); ++ } ++} ++ ++static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, ++ void *value, bool onallcpus) ++{ ++ if (!onallcpus) { ++ /* copy true value_size bytes */ ++ memcpy(this_cpu_ptr(pptr), value, htab->map.value_size); ++ } else { ++ u32 size = round_up(htab->map.value_size, 8); ++ int off = 0, cpu; ++ ++ for_each_possible_cpu(cpu) { ++ bpf_long_memcpy(per_cpu_ptr(pptr, cpu), ++ value + off, size); ++ off += size; ++ } ++ } ++} ++ ++static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr, ++ void *value, bool onallcpus) ++{ ++ /* When using prealloc and not setting the initial value on all cpus, ++ * zero-fill element values for other cpus (just as what happens when ++ * not using prealloc). Otherwise, bpf program has no way to ensure ++ * known initial values for cpus other than current one ++ * (onallcpus=false always when coming from bpf prog). ++ */ ++ if (htab_is_prealloc(htab) && !onallcpus) { ++ u32 size = round_up(htab->map.value_size, 8); ++ int current_cpu = raw_smp_processor_id(); ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ if (cpu == current_cpu) ++ bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value, ++ size); ++ else ++ memset(per_cpu_ptr(pptr, cpu), 0, size); ++ } ++ } else { ++ pcpu_copy_value(htab, pptr, value, onallcpus); ++ } ++} ++ ++static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) ++{ ++ return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && ++ BITS_PER_LONG == 64; ++} ++ ++static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, ++ void *value, u32 key_size, u32 hash, ++ bool percpu, bool onallcpus, ++ struct htab_elem *old_elem) ++{ ++ u32 size = htab->map.value_size; ++ bool prealloc = htab_is_prealloc(htab); ++ struct htab_elem *l_new, **pl_new; ++ void __percpu *pptr; ++ ++ if (prealloc) { ++ if (old_elem) { ++ /* if we're updating the existing element, ++ * use per-cpu extra elems to avoid freelist_pop/push ++ */ ++ pl_new = this_cpu_ptr(htab->extra_elems); ++ l_new = *pl_new; ++ htab_put_fd_value(htab, old_elem); ++ *pl_new = old_elem; ++ } else { ++ struct pcpu_freelist_node *l; ++ ++ l = __pcpu_freelist_pop(&htab->freelist); ++ if (!l) ++ return ERR_PTR(-E2BIG); ++ l_new = container_of(l, struct htab_elem, fnode); ++ } ++ } else { ++ if (atomic_inc_return(&htab->count) > htab->map.max_entries) ++ if (!old_elem) { ++ /* when map is full and update() is replacing ++ * old element, it's ok to allocate, since ++ * old element will be freed immediately. ++ * Otherwise return an error ++ */ ++ l_new = ERR_PTR(-E2BIG); ++ goto dec_count; ++ } ++ l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN, ++ htab->map.numa_node); ++ if (!l_new) { ++ l_new = ERR_PTR(-ENOMEM); ++ goto dec_count; ++ } ++ check_and_init_map_lock(&htab->map, ++ l_new->key + round_up(key_size, 8)); ++ } ++ ++ memcpy(l_new->key, key, key_size); ++ if (percpu) { ++ size = round_up(size, 8); ++ if (prealloc) { ++ pptr = htab_elem_get_ptr(l_new, key_size); ++ } else { ++ /* alloc_percpu zero-fills */ ++ pptr = __alloc_percpu_gfp(size, 8, ++ GFP_ATOMIC | __GFP_NOWARN); ++ if (!pptr) { ++ kfree(l_new); ++ l_new = ERR_PTR(-ENOMEM); ++ goto dec_count; ++ } ++ } ++ ++ pcpu_init_value(htab, pptr, value, onallcpus); ++ ++ if (!prealloc) ++ htab_elem_set_ptr(l_new, key_size, pptr); ++ } else if (fd_htab_map_needs_adjust(htab)) { ++ size = round_up(size, 8); ++ memcpy(l_new->key + round_up(key_size, 8), value, size); ++ } else { ++ copy_map_value(&htab->map, ++ l_new->key + round_up(key_size, 8), ++ value); ++ } ++ ++ l_new->hash = hash; ++ return l_new; ++dec_count: ++ atomic_dec(&htab->count); ++ return l_new; ++} ++ ++static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old, ++ u64 map_flags) ++{ ++ if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST) ++ /* elem already exists */ ++ return -EEXIST; ++ ++ if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST) ++ /* elem doesn't exist, cannot update it */ ++ return -ENOENT; ++ ++ return 0; ++} ++ + /* Called from syscall or from eBPF program */ + static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, + u64 map_flags) + { + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); +- struct htab_elem *l_new, *l_old; +- struct hlist_head *head; ++ struct htab_elem *l_new = NULL, *l_old; ++ struct hlist_nulls_head *head; + unsigned long flags; +- u32 key_size; ++ struct bucket *b; ++ u32 key_size, hash; + int ret; + +- if (map_flags > BPF_EXIST) ++ if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST)) + /* unknown flags */ + return -EINVAL; + + WARN_ON_ONCE(!rcu_read_lock_held()); + +- /* allocate new element outside of lock */ +- l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); +- if (!l_new) +- return -ENOMEM; +- + key_size = map->key_size; + +- memcpy(l_new->key, key, key_size); +- memcpy(l_new->key + round_up(key_size, 8), value, map->value_size); ++ hash = htab_map_hash(key, key_size, htab->hashrnd); ++ ++ b = __select_bucket(htab, hash); ++ head = &b->head; + +- l_new->hash = htab_map_hash(l_new->key, key_size); ++ if (unlikely(map_flags & BPF_F_LOCK)) { ++ if (unlikely(!map_value_has_spin_lock(map))) ++ return -EINVAL; ++ /* find an element without taking the bucket lock */ ++ l_old = lookup_nulls_elem_raw(head, hash, key, key_size, ++ htab->n_buckets); ++ ret = check_flags(htab, l_old, map_flags); ++ if (ret) ++ return ret; ++ if (l_old) { ++ /* grab the element lock and update value in place */ ++ copy_map_value_locked(map, ++ l_old->key + round_up(key_size, 8), ++ value, false); ++ return 0; ++ } ++ /* fall through, grab the bucket lock and lookup again. ++ * 99.9% chance that the element won't be found, ++ * but second lookup under lock has to be done. ++ */ ++ } + + /* bpf_map_update_elem() can be called in_irq() */ +- raw_spin_lock_irqsave(&htab->lock, flags); ++ raw_spin_lock_irqsave(&b->lock, flags); + +- head = select_bucket(htab, l_new->hash); ++ l_old = lookup_elem_raw(head, hash, key, key_size); + +- l_old = lookup_elem_raw(head, l_new->hash, key, key_size); ++ ret = check_flags(htab, l_old, map_flags); ++ if (ret) ++ goto err; + +- if (!l_old && unlikely(htab->count >= map->max_entries)) { +- /* if elem with this 'key' doesn't exist and we've reached +- * max_entries limit, fail insertion of new elem ++ if (unlikely(l_old && (map_flags & BPF_F_LOCK))) { ++ /* first lookup without the bucket lock didn't find the element, ++ * but second lookup with the bucket lock found it. ++ * This case is highly unlikely, but has to be dealt with: ++ * grab the element lock in addition to the bucket lock ++ * and update element in place + */ +- ret = -E2BIG; ++ copy_map_value_locked(map, ++ l_old->key + round_up(key_size, 8), ++ value, false); ++ ret = 0; + goto err; + } + +- if (l_old && map_flags == BPF_NOEXIST) { +- /* elem already exists */ +- ret = -EEXIST; ++ l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, ++ l_old); ++ if (IS_ERR(l_new)) { ++ /* all pre-allocated elements are in use or memory exhausted */ ++ ret = PTR_ERR(l_new); + goto err; + } + +- if (!l_old && map_flags == BPF_EXIST) { +- /* elem doesn't exist, cannot update it */ +- ret = -ENOENT; +- goto err; ++ /* add new element to the head of the list, so that ++ * concurrent search will find it before old elem ++ */ ++ hlist_nulls_add_head_rcu(&l_new->hash_node, head); ++ if (l_old) { ++ hlist_nulls_del_rcu(&l_old->hash_node); ++ if (!htab_is_prealloc(htab)) ++ free_htab_elem(htab, l_old); + } ++ ret = 0; ++err: ++ raw_spin_unlock_irqrestore(&b->lock, flags); ++ return ret; ++} + +- /* add new element to the head of the list, so that concurrent +- * search will find it before old elem ++static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value, ++ u64 map_flags) ++{ ++ struct bpf_htab *htab = container_of(map, struct bpf_htab, map); ++ struct htab_elem *l_new, *l_old = NULL; ++ struct hlist_nulls_head *head; ++ unsigned long flags; ++ struct bucket *b; ++ u32 key_size, hash; ++ int ret; ++ ++ if (unlikely(map_flags > BPF_EXIST)) ++ /* unknown flags */ ++ return -EINVAL; ++ ++ WARN_ON_ONCE(!rcu_read_lock_held()); ++ ++ key_size = map->key_size; ++ ++ hash = htab_map_hash(key, key_size, htab->hashrnd); ++ ++ b = __select_bucket(htab, hash); ++ head = &b->head; ++ ++ /* For LRU, we need to alloc before taking bucket's ++ * spinlock because getting free nodes from LRU may need ++ * to remove older elements from htab and this removal ++ * operation will need a bucket lock. + */ +- hlist_add_head_rcu(&l_new->hash_node, head); ++ l_new = prealloc_lru_pop(htab, key, hash); ++ if (!l_new) ++ return -ENOMEM; ++ memcpy(l_new->key + round_up(map->key_size, 8), value, map->value_size); ++ ++ /* bpf_map_update_elem() can be called in_irq() */ ++ raw_spin_lock_irqsave(&b->lock, flags); ++ ++ l_old = lookup_elem_raw(head, hash, key, key_size); ++ ++ ret = check_flags(htab, l_old, map_flags); ++ if (ret) ++ goto err; ++ ++ /* add new element to the head of the list, so that ++ * concurrent search will find it before old elem ++ */ ++ hlist_nulls_add_head_rcu(&l_new->hash_node, head); ++ if (l_old) { ++ bpf_lru_node_set_ref(&l_new->lru_node); ++ hlist_nulls_del_rcu(&l_old->hash_node); ++ } ++ ret = 0; ++ ++err: ++ raw_spin_unlock_irqrestore(&b->lock, flags); ++ ++ if (ret) ++ bpf_lru_push_free(&htab->lru, &l_new->lru_node); ++ else if (l_old) ++ bpf_lru_push_free(&htab->lru, &l_old->lru_node); ++ ++ return ret; ++} ++ ++static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, ++ void *value, u64 map_flags, ++ bool onallcpus) ++{ ++ struct bpf_htab *htab = container_of(map, struct bpf_htab, map); ++ struct htab_elem *l_new = NULL, *l_old; ++ struct hlist_nulls_head *head; ++ unsigned long flags; ++ struct bucket *b; ++ u32 key_size, hash; ++ int ret; ++ ++ if (unlikely(map_flags > BPF_EXIST)) ++ /* unknown flags */ ++ return -EINVAL; ++ ++ WARN_ON_ONCE(!rcu_read_lock_held()); ++ ++ key_size = map->key_size; ++ ++ hash = htab_map_hash(key, key_size, htab->hashrnd); ++ ++ b = __select_bucket(htab, hash); ++ head = &b->head; ++ ++ /* bpf_map_update_elem() can be called in_irq() */ ++ raw_spin_lock_irqsave(&b->lock, flags); ++ ++ l_old = lookup_elem_raw(head, hash, key, key_size); ++ ++ ret = check_flags(htab, l_old, map_flags); ++ if (ret) ++ goto err; ++ + if (l_old) { +- hlist_del_rcu(&l_old->hash_node); +- kfree_rcu(l_old, rcu); ++ /* per-cpu hash map can update value in-place */ ++ pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), ++ value, onallcpus); + } else { +- htab->count++; ++ l_new = alloc_htab_elem(htab, key, value, key_size, ++ hash, true, onallcpus, NULL); ++ if (IS_ERR(l_new)) { ++ ret = PTR_ERR(l_new); ++ goto err; ++ } ++ hlist_nulls_add_head_rcu(&l_new->hash_node, head); + } +- raw_spin_unlock_irqrestore(&htab->lock, flags); ++ ret = 0; ++err: ++ raw_spin_unlock_irqrestore(&b->lock, flags); ++ return ret; ++} + +- return 0; ++static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, ++ void *value, u64 map_flags, ++ bool onallcpus) ++{ ++ struct bpf_htab *htab = container_of(map, struct bpf_htab, map); ++ struct htab_elem *l_new = NULL, *l_old; ++ struct hlist_nulls_head *head; ++ unsigned long flags; ++ struct bucket *b; ++ u32 key_size, hash; ++ int ret; ++ ++ if (unlikely(map_flags > BPF_EXIST)) ++ /* unknown flags */ ++ return -EINVAL; ++ ++ WARN_ON_ONCE(!rcu_read_lock_held()); ++ ++ key_size = map->key_size; ++ ++ hash = htab_map_hash(key, key_size, htab->hashrnd); ++ ++ b = __select_bucket(htab, hash); ++ head = &b->head; ++ ++ /* For LRU, we need to alloc before taking bucket's ++ * spinlock because LRU's elem alloc may need ++ * to remove older elem from htab and this removal ++ * operation will need a bucket lock. ++ */ ++ if (map_flags != BPF_EXIST) { ++ l_new = prealloc_lru_pop(htab, key, hash); ++ if (!l_new) ++ return -ENOMEM; ++ } ++ ++ /* bpf_map_update_elem() can be called in_irq() */ ++ raw_spin_lock_irqsave(&b->lock, flags); ++ ++ l_old = lookup_elem_raw(head, hash, key, key_size); ++ ++ ret = check_flags(htab, l_old, map_flags); ++ if (ret) ++ goto err; ++ ++ if (l_old) { ++ bpf_lru_node_set_ref(&l_old->lru_node); ++ ++ /* per-cpu hash map can update value in-place */ ++ pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), ++ value, onallcpus); ++ } else { ++ pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size), ++ value, onallcpus); ++ hlist_nulls_add_head_rcu(&l_new->hash_node, head); ++ l_new = NULL; ++ } ++ ret = 0; + err: +- raw_spin_unlock_irqrestore(&htab->lock, flags); +- kfree(l_new); ++ raw_spin_unlock_irqrestore(&b->lock, flags); ++ if (l_new) ++ bpf_lru_push_free(&htab->lru, &l_new->lru_node); + return ret; + } + ++static int htab_percpu_map_update_elem(struct bpf_map *map, void *key, ++ void *value, u64 map_flags) ++{ ++ return __htab_percpu_map_update_elem(map, key, value, map_flags, false); ++} ++ ++static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key, ++ void *value, u64 map_flags) ++{ ++ return __htab_lru_percpu_map_update_elem(map, key, value, map_flags, ++ false); ++} ++ + /* Called from syscall or from eBPF program */ + static int htab_map_delete_elem(struct bpf_map *map, void *key) + { + struct bpf_htab *htab = container_of(map, struct bpf_htab, map); +- struct hlist_head *head; ++ struct hlist_nulls_head *head; ++ struct bucket *b; + struct htab_elem *l; + unsigned long flags; + u32 hash, key_size; +@@ -309,22 +1142,54 @@ static int htab_map_delete_elem(struct b + + key_size = map->key_size; + +- hash = htab_map_hash(key, key_size); ++ hash = htab_map_hash(key, key_size, htab->hashrnd); ++ b = __select_bucket(htab, hash); ++ head = &b->head; + +- raw_spin_lock_irqsave(&htab->lock, flags); ++ raw_spin_lock_irqsave(&b->lock, flags); + +- head = select_bucket(htab, hash); ++ l = lookup_elem_raw(head, hash, key, key_size); ++ ++ if (l) { ++ hlist_nulls_del_rcu(&l->hash_node); ++ free_htab_elem(htab, l); ++ ret = 0; ++ } ++ ++ raw_spin_unlock_irqrestore(&b->lock, flags); ++ return ret; ++} ++ ++static int htab_lru_map_delete_elem(struct bpf_map *map, void *key) ++{ ++ struct bpf_htab *htab = container_of(map, struct bpf_htab, map); ++ struct hlist_nulls_head *head; ++ struct bucket *b; ++ struct htab_elem *l; ++ unsigned long flags; ++ u32 hash, key_size; ++ int ret = -ENOENT; ++ ++ WARN_ON_ONCE(!rcu_read_lock_held()); ++ ++ key_size = map->key_size; ++ ++ hash = htab_map_hash(key, key_size, htab->hashrnd); ++ b = __select_bucket(htab, hash); ++ head = &b->head; ++ ++ raw_spin_lock_irqsave(&b->lock, flags); + + l = lookup_elem_raw(head, hash, key, key_size); + + if (l) { +- hlist_del_rcu(&l->hash_node); +- htab->count--; +- kfree_rcu(l, rcu); ++ hlist_nulls_del_rcu(&l->hash_node); + ret = 0; + } + +- raw_spin_unlock_irqrestore(&htab->lock, flags); ++ raw_spin_unlock_irqrestore(&b->lock, flags); ++ if (l) ++ bpf_lru_push_free(&htab->lru, &l->lru_node); + return ret; + } + +@@ -333,14 +1198,13 @@ static void delete_all_elements(struct b + int i; + + for (i = 0; i < htab->n_buckets; i++) { +- struct hlist_head *head = select_bucket(htab, i); +- struct hlist_node *n; ++ struct hlist_nulls_head *head = select_bucket(htab, i); ++ struct hlist_nulls_node *n; + struct htab_elem *l; + +- hlist_for_each_entry_safe(l, n, head, hash_node) { +- hlist_del_rcu(&l->hash_node); +- htab->count--; +- kfree(l); ++ hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { ++ hlist_nulls_del_rcu(&l->hash_node); ++ htab_elem_free(htab, l); + } + } + } +@@ -357,31 +1221,320 @@ static void htab_map_free(struct bpf_map + */ + synchronize_rcu(); + +- /* some of kfree_rcu() callbacks for elements of this map may not have +- * executed. It's ok. Proceed to free residual elements and map itself ++ /* some of free_htab_elem() callbacks for elements of this map may ++ * not have executed. Wait for them. + */ +- delete_all_elements(htab); +- kvfree(htab->buckets); ++ rcu_barrier(); ++ if (!htab_is_prealloc(htab)) ++ delete_all_elements(htab); ++ else ++ prealloc_destroy(htab); ++ ++ free_percpu(htab->extra_elems); ++ bpf_map_area_free(htab->buckets); + kfree(htab); + } + +-static const struct bpf_map_ops htab_ops = { ++static void htab_map_seq_show_elem(struct bpf_map *map, void *key, ++ struct seq_file *m) ++{ ++ void *value; ++ ++ rcu_read_lock(); ++ ++ value = htab_map_lookup_elem(map, key); ++ if (!value) { ++ rcu_read_unlock(); ++ return; ++ } ++ ++ btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); ++ seq_puts(m, ": "); ++ btf_type_seq_show(map->btf, map->btf_value_type_id, value, m); ++ seq_puts(m, "\n"); ++ ++ rcu_read_unlock(); ++} ++ ++const struct bpf_map_ops htab_map_ops = { ++ .map_alloc_check = htab_map_alloc_check, + .map_alloc = htab_map_alloc, + .map_free = htab_map_free, + .map_get_next_key = htab_map_get_next_key, + .map_lookup_elem = htab_map_lookup_elem, + .map_update_elem = htab_map_update_elem, + .map_delete_elem = htab_map_delete_elem, ++ .map_gen_lookup = htab_map_gen_lookup, ++ .map_seq_show_elem = htab_map_seq_show_elem, + }; + +-static struct bpf_map_type_list htab_type __read_mostly = { +- .ops = &htab_ops, +- .type = BPF_MAP_TYPE_HASH, ++const struct bpf_map_ops htab_lru_map_ops = { ++ .map_alloc_check = htab_map_alloc_check, ++ .map_alloc = htab_map_alloc, ++ .map_free = htab_map_free, ++ .map_get_next_key = htab_map_get_next_key, ++ .map_lookup_elem = htab_lru_map_lookup_elem, ++ .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys, ++ .map_update_elem = htab_lru_map_update_elem, ++ .map_delete_elem = htab_lru_map_delete_elem, ++ .map_gen_lookup = htab_lru_map_gen_lookup, ++ .map_seq_show_elem = htab_map_seq_show_elem, + }; + +-static int __init register_htab_map(void) ++/* Called from eBPF program */ ++static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key) + { +- bpf_register_map_type(&htab_type); +- return 0; ++ struct htab_elem *l = __htab_map_lookup_elem(map, key); ++ ++ if (l) ++ return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); ++ else ++ return NULL; + } +-late_initcall(register_htab_map); ++ ++static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key) ++{ ++ struct htab_elem *l = __htab_map_lookup_elem(map, key); ++ ++ if (l) { ++ bpf_lru_node_set_ref(&l->lru_node); ++ return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size)); ++ } ++ ++ return NULL; ++} ++ ++int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) ++{ ++ struct htab_elem *l; ++ void __percpu *pptr; ++ int ret = -ENOENT; ++ int cpu, off = 0; ++ u32 size; ++ ++ /* per_cpu areas are zero-filled and bpf programs can only ++ * access 'value_size' of them, so copying rounded areas ++ * will not leak any kernel data ++ */ ++ size = round_up(map->value_size, 8); ++ rcu_read_lock(); ++ l = __htab_map_lookup_elem(map, key); ++ if (!l) ++ goto out; ++ /* We do not mark LRU map element here in order to not mess up ++ * eviction heuristics when user space does a map walk. ++ */ ++ pptr = htab_elem_get_ptr(l, map->key_size); ++ for_each_possible_cpu(cpu) { ++ bpf_long_memcpy(value + off, ++ per_cpu_ptr(pptr, cpu), size); ++ off += size; ++ } ++ ret = 0; ++out: ++ rcu_read_unlock(); ++ return ret; ++} ++ ++int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, ++ u64 map_flags) ++{ ++ struct bpf_htab *htab = container_of(map, struct bpf_htab, map); ++ int ret; ++ ++ rcu_read_lock(); ++ if (htab_is_lru(htab)) ++ ret = __htab_lru_percpu_map_update_elem(map, key, value, ++ map_flags, true); ++ else ++ ret = __htab_percpu_map_update_elem(map, key, value, map_flags, ++ true); ++ rcu_read_unlock(); ++ ++ return ret; ++} ++ ++static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key, ++ struct seq_file *m) ++{ ++ struct htab_elem *l; ++ void __percpu *pptr; ++ int cpu; ++ ++ rcu_read_lock(); ++ ++ l = __htab_map_lookup_elem(map, key); ++ if (!l) { ++ rcu_read_unlock(); ++ return; ++ } ++ ++ btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); ++ seq_puts(m, ": {\n"); ++ pptr = htab_elem_get_ptr(l, map->key_size); ++ for_each_possible_cpu(cpu) { ++ seq_printf(m, "\tcpu%d: ", cpu); ++ btf_type_seq_show(map->btf, map->btf_value_type_id, ++ per_cpu_ptr(pptr, cpu), m); ++ seq_puts(m, "\n"); ++ } ++ seq_puts(m, "}\n"); ++ ++ rcu_read_unlock(); ++} ++ ++const struct bpf_map_ops htab_percpu_map_ops = { ++ .map_alloc_check = htab_map_alloc_check, ++ .map_alloc = htab_map_alloc, ++ .map_free = htab_map_free, ++ .map_get_next_key = htab_map_get_next_key, ++ .map_lookup_elem = htab_percpu_map_lookup_elem, ++ .map_update_elem = htab_percpu_map_update_elem, ++ .map_delete_elem = htab_map_delete_elem, ++ .map_seq_show_elem = htab_percpu_map_seq_show_elem, ++}; ++ ++const struct bpf_map_ops htab_lru_percpu_map_ops = { ++ .map_alloc_check = htab_map_alloc_check, ++ .map_alloc = htab_map_alloc, ++ .map_free = htab_map_free, ++ .map_get_next_key = htab_map_get_next_key, ++ .map_lookup_elem = htab_lru_percpu_map_lookup_elem, ++ .map_update_elem = htab_lru_percpu_map_update_elem, ++ .map_delete_elem = htab_lru_map_delete_elem, ++ .map_seq_show_elem = htab_percpu_map_seq_show_elem, ++}; ++ ++static int fd_htab_map_alloc_check(union bpf_attr *attr) ++{ ++ if (attr->value_size != sizeof(u32)) ++ return -EINVAL; ++ return htab_map_alloc_check(attr); ++} ++ ++static void fd_htab_map_free(struct bpf_map *map) ++{ ++ struct bpf_htab *htab = container_of(map, struct bpf_htab, map); ++ struct hlist_nulls_node *n; ++ struct hlist_nulls_head *head; ++ struct htab_elem *l; ++ int i; ++ ++ for (i = 0; i < htab->n_buckets; i++) { ++ head = select_bucket(htab, i); ++ ++ hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { ++ void *ptr = fd_htab_map_get_ptr(map, l); ++ ++ map->ops->map_fd_put_ptr(ptr); ++ } ++ } ++ ++ htab_map_free(map); ++} ++ ++/* only called from syscall */ ++int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value) ++{ ++ void **ptr; ++ int ret = 0; ++ ++ if (!map->ops->map_fd_sys_lookup_elem) ++ return -ENOTSUPP; ++ ++ rcu_read_lock(); ++ ptr = htab_map_lookup_elem(map, key); ++ if (ptr) ++ *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr)); ++ else ++ ret = -ENOENT; ++ rcu_read_unlock(); ++ ++ return ret; ++} ++ ++/* only called from syscall */ ++int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, ++ void *key, void *value, u64 map_flags) ++{ ++ void *ptr; ++ int ret; ++ u32 ufd = *(u32 *)value; ++ ++ ptr = map->ops->map_fd_get_ptr(map, map_file, ufd); ++ if (IS_ERR(ptr)) ++ return PTR_ERR(ptr); ++ ++ ret = htab_map_update_elem(map, key, &ptr, map_flags); ++ if (ret) ++ map->ops->map_fd_put_ptr(ptr); ++ ++ return ret; ++} ++ ++static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr) ++{ ++ struct bpf_map *map, *inner_map_meta; ++ ++ inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd); ++ if (IS_ERR(inner_map_meta)) ++ return inner_map_meta; ++ ++ map = htab_map_alloc(attr); ++ if (IS_ERR(map)) { ++ bpf_map_meta_free(inner_map_meta); ++ return map; ++ } ++ ++ map->inner_map_meta = inner_map_meta; ++ ++ return map; ++} ++ ++static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key) ++{ ++ struct bpf_map **inner_map = htab_map_lookup_elem(map, key); ++ ++ if (!inner_map) ++ return NULL; ++ ++ return READ_ONCE(*inner_map); ++} ++ ++static u32 htab_of_map_gen_lookup(struct bpf_map *map, ++ struct bpf_insn *insn_buf) ++{ ++ struct bpf_insn *insn = insn_buf; ++ const int ret = BPF_REG_0; ++ ++ BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem, ++ (void *(*)(struct bpf_map *map, void *key))NULL)); ++ *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem)); ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2); ++ *insn++ = BPF_ALU64_IMM(BPF_ADD, ret, ++ offsetof(struct htab_elem, key) + ++ round_up(map->key_size, 8)); ++ *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0); ++ ++ return insn - insn_buf; ++} ++ ++static void htab_of_map_free(struct bpf_map *map) ++{ ++ bpf_map_meta_free(map->inner_map_meta); ++ fd_htab_map_free(map); ++} ++ ++const struct bpf_map_ops htab_of_maps_map_ops = { ++ .map_alloc_check = fd_htab_map_alloc_check, ++ .map_alloc = htab_of_map_alloc, ++ .map_free = htab_of_map_free, ++ .map_get_next_key = htab_map_get_next_key, ++ .map_lookup_elem = htab_of_map_lookup_elem, ++ .map_delete_elem = htab_map_delete_elem, ++ .map_fd_get_ptr = bpf_map_fd_get_ptr, ++ .map_fd_put_ptr = bpf_map_fd_put_ptr, ++ .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem, ++ .map_gen_lookup = htab_of_map_gen_lookup, ++ .map_check_btf = map_check_no_btf, ++}; +--- a/kernel/bpf/helpers.c ++++ b/kernel/bpf/helpers.c +@@ -1,21 +1,18 @@ ++// SPDX-License-Identifier: GPL-2.0-only + /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of version 2 of the GNU General Public +- * License as published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, but +- * WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. + */ + #include + #include + #include + #include ++#include + #include + #include + #include ++#include ++#include ++ ++#include "../../lib/kstrtox.h" + + /* If kernel subsystem is allowing eBPF programs to call this function, + * inside its own verifier_ops->get_func_proto() callback it should return +@@ -26,48 +23,32 @@ + * if program is allowed to access maps, so check rcu_read_lock_held in + * all three functions. + */ +-static u64 bpf_map_lookup_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_2(bpf_map_lookup_elem, struct bpf_map *, map, void *, key) + { +- /* verifier checked that R1 contains a valid pointer to bpf_map +- * and R2 points to a program stack and map->key_size bytes were +- * initialized +- */ +- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; +- void *key = (void *) (unsigned long) r2; +- void *value; +- + WARN_ON_ONCE(!rcu_read_lock_held()); +- +- value = map->ops->map_lookup_elem(map, key); +- +- /* lookup() returns either pointer to element value or NULL +- * which is the meaning of PTR_TO_MAP_VALUE_OR_NULL type +- */ +- return (unsigned long) value; ++ return (unsigned long) map->ops->map_lookup_elem(map, key); + } + + const struct bpf_func_proto bpf_map_lookup_elem_proto = { + .func = bpf_map_lookup_elem, + .gpl_only = false, ++ .pkt_access = true, + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, + }; + +-static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_4(bpf_map_update_elem, struct bpf_map *, map, void *, key, ++ void *, value, u64, flags) + { +- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; +- void *key = (void *) (unsigned long) r2; +- void *value = (void *) (unsigned long) r3; +- + WARN_ON_ONCE(!rcu_read_lock_held()); +- +- return map->ops->map_update_elem(map, key, value, r4); ++ return map->ops->map_update_elem(map, key, value, flags); + } + + const struct bpf_func_proto bpf_map_update_elem_proto = { + .func = bpf_map_update_elem, + .gpl_only = false, ++ .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, +@@ -75,33 +56,71 @@ const struct bpf_func_proto bpf_map_upda + .arg4_type = ARG_ANYTHING, + }; + +-static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_2(bpf_map_delete_elem, struct bpf_map *, map, void *, key) + { +- struct bpf_map *map = (struct bpf_map *) (unsigned long) r1; +- void *key = (void *) (unsigned long) r2; +- + WARN_ON_ONCE(!rcu_read_lock_held()); +- + return map->ops->map_delete_elem(map, key); + } + + const struct bpf_func_proto bpf_map_delete_elem_proto = { + .func = bpf_map_delete_elem, + .gpl_only = false, ++ .pkt_access = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_CONST_MAP_PTR, + .arg2_type = ARG_PTR_TO_MAP_KEY, + }; + ++BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) ++{ ++ return map->ops->map_push_elem(map, value, flags); ++} ++ ++const struct bpf_func_proto bpf_map_push_elem_proto = { ++ .func = bpf_map_push_elem, ++ .gpl_only = false, ++ .pkt_access = true, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_CONST_MAP_PTR, ++ .arg2_type = ARG_PTR_TO_MAP_VALUE, ++ .arg3_type = ARG_ANYTHING, ++}; ++ ++BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) ++{ ++ return map->ops->map_pop_elem(map, value); ++} ++ ++const struct bpf_func_proto bpf_map_pop_elem_proto = { ++ .func = bpf_map_pop_elem, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_CONST_MAP_PTR, ++ .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, ++}; ++ ++BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) ++{ ++ return map->ops->map_peek_elem(map, value); ++} ++ ++const struct bpf_func_proto bpf_map_peek_elem_proto = { ++ .func = bpf_map_peek_elem, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_CONST_MAP_PTR, ++ .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, ++}; ++ + const struct bpf_func_proto bpf_get_prandom_u32_proto = { + .func = bpf_user_rnd_u32, + .gpl_only = false, + .ret_type = RET_INTEGER, + }; + +-static u64 bpf_get_smp_processor_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_0(bpf_get_smp_processor_id) + { +- return raw_smp_processor_id(); ++ return smp_processor_id(); + } + + const struct bpf_func_proto bpf_get_smp_processor_id_proto = { +@@ -110,7 +129,18 @@ const struct bpf_func_proto bpf_get_smp_ + .ret_type = RET_INTEGER, + }; + +-static u64 bpf_ktime_get_ns(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_0(bpf_get_numa_node_id) ++{ ++ return numa_node_id(); ++} ++ ++const struct bpf_func_proto bpf_get_numa_node_id_proto = { ++ .func = bpf_get_numa_node_id, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++}; ++ ++BPF_CALL_0(bpf_ktime_get_ns) + { + /* NMI safe access to clock monotonic */ + return ktime_get_mono_fast_ns(); +@@ -122,11 +152,11 @@ const struct bpf_func_proto bpf_ktime_ge + .ret_type = RET_INTEGER, + }; + +-static u64 bpf_get_current_pid_tgid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_0(bpf_get_current_pid_tgid) + { + struct task_struct *task = current; + +- if (!task) ++ if (unlikely(!task)) + return -EINVAL; + + return (u64) task->tgid << 32 | task->pid; +@@ -138,18 +168,18 @@ const struct bpf_func_proto bpf_get_curr + .ret_type = RET_INTEGER, + }; + +-static u64 bpf_get_current_uid_gid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_0(bpf_get_current_uid_gid) + { + struct task_struct *task = current; + kuid_t uid; + kgid_t gid; + +- if (!task) ++ if (unlikely(!task)) + return -EINVAL; + + current_uid_gid(&uid, &gid); + return (u64) from_kgid(&init_user_ns, gid) << 32 | +- from_kuid(&init_user_ns, uid); ++ from_kuid(&init_user_ns, uid); + } + + const struct bpf_func_proto bpf_get_current_uid_gid_proto = { +@@ -158,22 +188,254 @@ const struct bpf_func_proto bpf_get_curr + .ret_type = RET_INTEGER, + }; + +-static u64 bpf_get_current_comm(u64 r1, u64 size, u64 r3, u64 r4, u64 r5) ++BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size) + { + struct task_struct *task = current; +- char *buf = (char *) (long) r1; + +- if (!task) +- return -EINVAL; ++ if (unlikely(!task)) ++ goto err_clear; ++ ++ strncpy(buf, task->comm, size); + +- strlcpy(buf, task->comm, min_t(size_t, size, sizeof(task->comm))); ++ /* Verifier guarantees that size > 0. For task->comm exceeding ++ * size, guarantee that buf is %NUL-terminated. Unconditionally ++ * done here to save the size test. ++ */ ++ buf[size - 1] = 0; + return 0; ++err_clear: ++ memset(buf, 0, size); ++ return -EINVAL; + } + + const struct bpf_func_proto bpf_get_current_comm_proto = { + .func = bpf_get_current_comm, + .gpl_only = false, + .ret_type = RET_INTEGER, +- .arg1_type = ARG_PTR_TO_STACK, +- .arg2_type = ARG_CONST_STACK_SIZE, ++ .arg1_type = ARG_PTR_TO_UNINIT_MEM, ++ .arg2_type = ARG_CONST_SIZE, ++}; ++ ++#if defined(CONFIG_QUEUED_SPINLOCKS) || defined(CONFIG_BPF_ARCH_SPINLOCK) ++ ++static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) ++{ ++ arch_spinlock_t *l = (void *)lock; ++ union { ++ __u32 val; ++ arch_spinlock_t lock; ++ } u = { .lock = __ARCH_SPIN_LOCK_UNLOCKED }; ++ ++ compiletime_assert(u.val == 0, "__ARCH_SPIN_LOCK_UNLOCKED not 0"); ++ BUILD_BUG_ON(sizeof(*l) != sizeof(__u32)); ++ BUILD_BUG_ON(sizeof(*lock) != sizeof(__u32)); ++ arch_spin_lock(l); ++} ++ ++static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) ++{ ++ arch_spinlock_t *l = (void *)lock; ++ ++ arch_spin_unlock(l); ++} ++ ++#else ++ ++static inline void __bpf_spin_lock(struct bpf_spin_lock *lock) ++{ ++ atomic_t *l = (void *)lock; ++ ++ BUILD_BUG_ON(sizeof(*l) != sizeof(*lock)); ++ do { ++ smp_cond_load_relaxed(&l->counter, !VAL); ++ } while (atomic_xchg(l, 1)); ++} ++ ++static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock) ++{ ++ atomic_t *l = (void *)lock; ++ ++ atomic_set_release(l, 0); ++} ++ ++#endif ++ ++static DEFINE_PER_CPU(unsigned long, irqsave_flags); ++ ++notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ __bpf_spin_lock(lock); ++ __this_cpu_write(irqsave_flags, flags); ++ return 0; ++} ++ ++const struct bpf_func_proto bpf_spin_lock_proto = { ++ .func = bpf_spin_lock, ++ .gpl_only = false, ++ .ret_type = RET_VOID, ++ .arg1_type = ARG_PTR_TO_SPIN_LOCK, ++}; ++ ++notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock) ++{ ++ unsigned long flags; ++ ++ flags = __this_cpu_read(irqsave_flags); ++ __bpf_spin_unlock(lock); ++ local_irq_restore(flags); ++ return 0; ++} ++ ++const struct bpf_func_proto bpf_spin_unlock_proto = { ++ .func = bpf_spin_unlock, ++ .gpl_only = false, ++ .ret_type = RET_VOID, ++ .arg1_type = ARG_PTR_TO_SPIN_LOCK, ++}; ++ ++void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, ++ bool lock_src) ++{ ++ struct bpf_spin_lock *lock; ++ ++ if (lock_src) ++ lock = src + map->spin_lock_off; ++ else ++ lock = dst + map->spin_lock_off; ++ preempt_disable(); ++ ____bpf_spin_lock(lock); ++ copy_map_value(map, dst, src); ++ ____bpf_spin_unlock(lock); ++ preempt_enable(); ++} ++ ++#define BPF_STRTOX_BASE_MASK 0x1F ++ ++static int __bpf_strtoull(const char *buf, size_t buf_len, u64 flags, ++ unsigned long long *res, bool *is_negative) ++{ ++ unsigned int base = flags & BPF_STRTOX_BASE_MASK; ++ const char *cur_buf = buf; ++ size_t cur_len = buf_len; ++ unsigned int consumed; ++ size_t val_len; ++ char str[64]; ++ ++ if (!buf || !buf_len || !res || !is_negative) ++ return -EINVAL; ++ ++ if (base != 0 && base != 8 && base != 10 && base != 16) ++ return -EINVAL; ++ ++ if (flags & ~BPF_STRTOX_BASE_MASK) ++ return -EINVAL; ++ ++ while (cur_buf < buf + buf_len && isspace(*cur_buf)) ++ ++cur_buf; ++ ++ *is_negative = (cur_buf < buf + buf_len && *cur_buf == '-'); ++ if (*is_negative) ++ ++cur_buf; ++ ++ consumed = cur_buf - buf; ++ cur_len -= consumed; ++ if (!cur_len) ++ return -EINVAL; ++ ++ cur_len = min(cur_len, sizeof(str) - 1); ++ memcpy(str, cur_buf, cur_len); ++ str[cur_len] = '\0'; ++ cur_buf = str; ++ ++ cur_buf = _parse_integer_fixup_radix(cur_buf, &base); ++ val_len = _parse_integer(cur_buf, base, res); ++ ++ if (val_len & KSTRTOX_OVERFLOW) ++ return -ERANGE; ++ ++ if (val_len == 0) ++ return -EINVAL; ++ ++ cur_buf += val_len; ++ consumed += cur_buf - str; ++ ++ return consumed; ++} ++ ++static int __bpf_strtoll(const char *buf, size_t buf_len, u64 flags, ++ long long *res) ++{ ++ unsigned long long _res; ++ bool is_negative; ++ int err; ++ ++ err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); ++ if (err < 0) ++ return err; ++ if (is_negative) { ++ if ((long long)-_res > 0) ++ return -ERANGE; ++ *res = -_res; ++ } else { ++ if ((long long)_res < 0) ++ return -ERANGE; ++ *res = _res; ++ } ++ return err; ++} ++ ++BPF_CALL_4(bpf_strtol, const char *, buf, size_t, buf_len, u64, flags, ++ long *, res) ++{ ++ long long _res; ++ int err; ++ ++ err = __bpf_strtoll(buf, buf_len, flags, &_res); ++ if (err < 0) ++ return err; ++ if (_res != (long)_res) ++ return -ERANGE; ++ *res = _res; ++ return err; ++} ++ ++const struct bpf_func_proto bpf_strtol_proto = { ++ .func = bpf_strtol, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_CONST_SIZE, ++ .arg3_type = ARG_ANYTHING, ++ .arg4_type = ARG_PTR_TO_LONG, ++}; ++ ++BPF_CALL_4(bpf_strtoul, const char *, buf, size_t, buf_len, u64, flags, ++ unsigned long *, res) ++{ ++ unsigned long long _res; ++ bool is_negative; ++ int err; ++ ++ err = __bpf_strtoull(buf, buf_len, flags, &_res, &is_negative); ++ if (err < 0) ++ return err; ++ if (is_negative) ++ return -EINVAL; ++ if (_res != (unsigned long)_res) ++ return -ERANGE; ++ *res = _res; ++ return err; ++} ++ ++const struct bpf_func_proto bpf_strtoul_proto = { ++ .func = bpf_strtoul, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_MEM, ++ .arg2_type = ARG_CONST_SIZE, ++ .arg3_type = ARG_ANYTHING, ++ .arg4_type = ARG_PTR_TO_LONG, + }; +--- a/kernel/bpf/inode.c ++++ b/kernel/bpf/inode.c +@@ -1,3 +1,4 @@ ++// SPDX-License-Identifier: GPL-2.0-only + /* + * Minimal file system backend for holding eBPF maps and programs, + * used by bpf(2) object pinning. +@@ -5,21 +6,19 @@ + * Authors: + * + * Daniel Borkmann +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License +- * version 2 as published by the Free Software Foundation. + */ + +-#include ++#include + #include + #include + #include + #include + #include + #include ++#include + #include + #include ++#include + + enum bpf_type { + BPF_TYPE_UNSPEC = 0, +@@ -87,6 +86,7 @@ static struct inode *bpf_get_inode(struc + switch (mode & S_IFMT) { + case S_IFDIR: + case S_IFREG: ++ case S_IFLNK: + break; + default: + return ERR_PTR(-EINVAL); +@@ -119,18 +119,20 @@ static int bpf_inode_type(const struct i + return 0; + } + +-static bool bpf_dname_reserved(const struct dentry *dentry) ++static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode, ++ struct inode *dir) + { +- return strchr(dentry->d_name.name, '.'); ++ d_instantiate(dentry, inode); ++ dget(dentry); ++ ++ dir->i_mtime = CURRENT_TIME; ++ dir->i_ctime = dir->i_mtime; + } + + static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) + { + struct inode *inode; + +- if (bpf_dname_reserved(dentry)) +- return -EPERM; +- + inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR); + if (IS_ERR(inode)) + return PTR_ERR(inode); +@@ -141,30 +143,30 @@ static int bpf_mkdir(struct inode *dir, + inc_nlink(inode); + inc_nlink(dir); + +- d_instantiate(dentry, inode); +- dget(dentry); +- ++ bpf_dentry_finalize(dentry, inode, dir); + return 0; + } + +-static int bpf_mkobj_ops(struct inode *dir, struct dentry *dentry, +- umode_t mode, const struct inode_operations *iops) ++static int bpffs_obj_open(struct inode *inode, struct file *file) + { +- struct inode *inode; ++ return -EIO; ++} + +- if (bpf_dname_reserved(dentry)) +- return -EPERM; ++static const struct file_operations bpffs_obj_fops = { ++ .open = bpffs_obj_open, ++}; + +- inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFREG); ++static int bpf_mkobj_ops(struct inode *dir, struct dentry *dentry, ++ umode_t mode, const struct inode_operations *iops) ++{ ++ struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFREG); + if (IS_ERR(inode)) + return PTR_ERR(inode); + + inode->i_op = iops; + inode->i_private = dentry->d_fsdata; + +- d_instantiate(dentry, inode); +- dget(dentry); +- ++ bpf_dentry_finalize(dentry, inode, dir); + return 0; + } + +@@ -187,11 +189,48 @@ static int bpf_mkobj(struct inode *dir, + } + } + ++static struct dentry * ++bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags) ++{ ++ /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future ++ * extensions. ++ */ ++ if (strchr(dentry->d_name.name, '.')) ++ return ERR_PTR(-EPERM); ++ ++ return simple_lookup(dir, dentry, flags); ++} ++ ++static int bpf_symlink(struct inode *dir, struct dentry *dentry, ++ const char *target) ++{ ++ char *link = kstrdup(target, GFP_USER | __GFP_NOWARN); ++ struct inode *inode; ++ ++ if (!link) ++ return -ENOMEM; ++ ++ inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK); ++ if (IS_ERR(inode)) { ++ kfree(link); ++ return PTR_ERR(inode); ++ } ++ ++ inode->i_op = &simple_symlink_inode_operations; ++ inode->i_link = link; ++ ++ bpf_dentry_finalize(dentry, inode, dir); ++ return 0; ++} ++ + static const struct inode_operations bpf_dir_iops = { +- .lookup = simple_lookup, ++ .lookup = bpf_lookup, + .mknod = bpf_mkobj, + .mkdir = bpf_mkdir, ++ .symlink = bpf_symlink, + .rmdir = simple_rmdir, ++ .rename = simple_rename, ++ .link = simple_link, + .unlink = simple_unlink, + }; + +@@ -256,7 +295,7 @@ out: + } + + static void *bpf_obj_do_get(const struct filename *pathname, +- enum bpf_type *type) ++ enum bpf_type *type, int flags) + { + struct inode *inode; + struct path path; +@@ -268,7 +307,7 @@ static void *bpf_obj_do_get(const struct + return ERR_PTR(ret); + + inode = d_backing_inode(path.dentry); +- ret = inode_permission(inode, MAY_WRITE); ++ ret = inode_permission(inode, ACC_MODE(flags)); + if (ret) + goto out; + +@@ -287,18 +326,23 @@ out: + return ERR_PTR(ret); + } + +-int bpf_obj_get_user(const char __user *pathname) ++int bpf_obj_get_user(const char __user *pathname, int flags) + { + enum bpf_type type = BPF_TYPE_UNSPEC; + struct filename *pname; + int ret = -ENOENT; ++ int f_flags; + void *raw; + ++ f_flags = bpf_get_file_flag(flags); ++ if (f_flags < 0) ++ return f_flags; ++ + pname = getname(pathname); + if (IS_ERR(pname)) + return PTR_ERR(pname); + +- raw = bpf_obj_do_get(pname, &type); ++ raw = bpf_obj_do_get(pname, &type, f_flags); + if (IS_ERR(raw)) { + ret = PTR_ERR(raw); + goto out; +@@ -307,7 +351,7 @@ int bpf_obj_get_user(const char __user * + if (type == BPF_TYPE_PROG) + ret = bpf_prog_new_fd(raw); + else if (type == BPF_TYPE_MAP) +- ret = bpf_map_new_fd(raw); ++ ret = bpf_map_new_fd(raw, f_flags); + else + goto out; + +@@ -318,29 +362,131 @@ out: + return ret; + } + +-static void bpf_evict_inode(struct inode *inode) ++static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type) + { +- enum bpf_type type; ++ struct bpf_prog *prog; ++ int ret = inode_permission(inode, MAY_READ); ++ if (ret) ++ return ERR_PTR(ret); ++ ++ if (inode->i_op == &bpf_map_iops) ++ return ERR_PTR(-EINVAL); ++ if (inode->i_op != &bpf_prog_iops) ++ return ERR_PTR(-EACCES); + +- truncate_inode_pages_final(&inode->i_data); +- clear_inode(inode); ++ prog = inode->i_private; + ++ if (!bpf_prog_get_ok(prog, &type, false)) ++ return ERR_PTR(-EINVAL); ++ ++ return bpf_prog_inc(prog); ++} ++ ++struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type) ++{ ++ struct bpf_prog *prog; ++ struct path path; ++ int ret = kern_path(name, LOOKUP_FOLLOW, &path); ++ if (ret) ++ return ERR_PTR(ret); ++ prog = __get_prog_inode(d_backing_inode(path.dentry), type); ++ if (!IS_ERR(prog)) ++ touch_atime(&path); ++ path_put(&path); ++ return prog; ++} ++EXPORT_SYMBOL(bpf_prog_get_type_path); ++ ++/* ++ * Display the mount options in /proc/mounts. ++ */ ++static int bpf_show_options(struct seq_file *m, struct dentry *root) ++{ ++ umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX; ++ ++ if (mode != S_IRWXUGO) ++ seq_printf(m, ",mode=%o", mode); ++ return 0; ++} ++ ++static void bpf_destroy_inode_deferred(struct rcu_head *head) ++{ ++ struct inode *inode = container_of(head, struct inode, i_rcu); ++ enum bpf_type type; ++ ++ if (S_ISLNK(inode->i_mode)) ++ kfree(inode->i_link); + if (!bpf_inode_type(inode, &type)) + bpf_any_put(inode->i_private, type); ++ free_inode_nonrcu(inode); ++} ++ ++static void bpf_destroy_inode(struct inode *inode) ++{ ++ call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred); + } + + static const struct super_operations bpf_super_ops = { + .statfs = simple_statfs, + .drop_inode = generic_delete_inode, +- .evict_inode = bpf_evict_inode, ++ .show_options = bpf_show_options, ++ .destroy_inode = bpf_destroy_inode, ++}; ++ ++enum { ++ OPT_MODE, ++ OPT_ERR, ++}; ++ ++static const match_table_t bpf_mount_tokens = { ++ { OPT_MODE, "mode=%o" }, ++ { OPT_ERR, NULL }, ++}; ++ ++struct bpf_mount_opts { ++ umode_t mode; + }; + ++static int bpf_parse_options(char *data, struct bpf_mount_opts *opts) ++{ ++ substring_t args[MAX_OPT_ARGS]; ++ int option, token; ++ char *ptr; ++ ++ opts->mode = S_IRWXUGO; ++ ++ while ((ptr = strsep(&data, ",")) != NULL) { ++ if (!*ptr) ++ continue; ++ ++ token = match_token(ptr, bpf_mount_tokens, args); ++ switch (token) { ++ case OPT_MODE: ++ if (match_octal(&args[0], &option)) ++ return -EINVAL; ++ opts->mode = option & S_IALLUGO; ++ break; ++ /* We might like to report bad mount options here, but ++ * traditionally we've ignored all mount options, so we'd ++ * better continue to ignore non-existing options for bpf. ++ */ ++ } ++ } ++ ++ return 0; ++} ++ + static int bpf_fill_super(struct super_block *sb, void *data, int silent) + { + static struct tree_descr bpf_rfiles[] = { { "" } }; ++ struct bpf_mount_opts opts; + struct inode *inode; + int ret; + ++ ret = bpf_parse_options(data, &opts); ++ if (ret) ++ return ret; ++ + ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles); + if (ret) + return ret; +@@ -350,7 +496,7 @@ static int bpf_fill_super(struct super_b + inode = sb->s_root->d_inode; + inode->i_op = &bpf_dir_iops; + inode->i_mode &= ~S_IALLUGO; +- inode->i_mode |= S_ISVTX | S_IRWXUGO; ++ inode->i_mode |= S_ISVTX | opts.mode; + + return 0; + } +@@ -368,8 +514,6 @@ static struct file_system_type bpf_fs_ty + .kill_sb = kill_litter_super, + }; + +-MODULE_ALIAS_FS("bpf"); +- + static int __init bpf_init(void) + { + int ret; +--- /dev/null ++++ b/kernel/bpf/local_storage.c +@@ -0,0 +1,600 @@ ++//SPDX-License-Identifier: GPL-2.0 ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++DEFINE_PER_CPU(struct bpf_cgroup_storage*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]); ++ ++#ifdef CONFIG_CGROUP_BPF ++ ++#define LOCAL_STORAGE_CREATE_FLAG_MASK \ ++ (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) ++ ++struct bpf_cgroup_storage_map { ++ struct bpf_map map; ++ ++ spinlock_t lock; ++ struct bpf_prog *prog; ++ struct rb_root root; ++ struct list_head list; ++}; ++ ++static struct bpf_cgroup_storage_map *map_to_storage(struct bpf_map *map) ++{ ++ return container_of(map, struct bpf_cgroup_storage_map, map); ++} ++ ++static int bpf_cgroup_storage_key_cmp( ++ const struct bpf_cgroup_storage_key *key1, ++ const struct bpf_cgroup_storage_key *key2) ++{ ++ if (key1->cgroup_inode_id < key2->cgroup_inode_id) ++ return -1; ++ else if (key1->cgroup_inode_id > key2->cgroup_inode_id) ++ return 1; ++ else if (key1->attach_type < key2->attach_type) ++ return -1; ++ else if (key1->attach_type > key2->attach_type) ++ return 1; ++ return 0; ++} ++ ++static struct bpf_cgroup_storage *cgroup_storage_lookup( ++ struct bpf_cgroup_storage_map *map, struct bpf_cgroup_storage_key *key, ++ bool locked) ++{ ++ struct rb_root *root = &map->root; ++ struct rb_node *node; ++ ++ if (!locked) ++ spin_lock_bh(&map->lock); ++ ++ node = root->rb_node; ++ while (node) { ++ struct bpf_cgroup_storage *storage; ++ ++ storage = container_of(node, struct bpf_cgroup_storage, node); ++ ++ switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) { ++ case -1: ++ node = node->rb_left; ++ break; ++ case 1: ++ node = node->rb_right; ++ break; ++ default: ++ if (!locked) ++ spin_unlock_bh(&map->lock); ++ return storage; ++ } ++ } ++ ++ if (!locked) ++ spin_unlock_bh(&map->lock); ++ ++ return NULL; ++} ++ ++static int cgroup_storage_insert(struct bpf_cgroup_storage_map *map, ++ struct bpf_cgroup_storage *storage) ++{ ++ struct rb_root *root = &map->root; ++ struct rb_node **new = &(root->rb_node), *parent = NULL; ++ ++ while (*new) { ++ struct bpf_cgroup_storage *this; ++ ++ this = container_of(*new, struct bpf_cgroup_storage, node); ++ ++ parent = *new; ++ switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) { ++ case -1: ++ new = &((*new)->rb_left); ++ break; ++ case 1: ++ new = &((*new)->rb_right); ++ break; ++ default: ++ return -EEXIST; ++ } ++ } ++ ++ rb_link_node(&storage->node, parent, new); ++ rb_insert_color(&storage->node, root); ++ ++ return 0; ++} ++ ++static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *_key) ++{ ++ struct bpf_cgroup_storage_map *map = map_to_storage(_map); ++ struct bpf_cgroup_storage_key *key = _key; ++ struct bpf_cgroup_storage *storage; ++ ++ storage = cgroup_storage_lookup(map, key, false); ++ if (!storage) ++ return NULL; ++ ++ return &READ_ONCE(storage->buf)->data[0]; ++} ++ ++static int cgroup_storage_update_elem(struct bpf_map *map, void *_key, ++ void *value, u64 flags) ++{ ++ struct bpf_cgroup_storage_key *key = _key; ++ struct bpf_cgroup_storage *storage; ++ struct bpf_storage_buffer *new; ++ ++ if (unlikely(flags & ~(BPF_F_LOCK | BPF_EXIST | BPF_NOEXIST))) ++ return -EINVAL; ++ ++ if (unlikely(flags & BPF_NOEXIST)) ++ return -EINVAL; ++ ++ if (unlikely((flags & BPF_F_LOCK) && ++ !map_value_has_spin_lock(map))) ++ return -EINVAL; ++ ++ storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, ++ key, false); ++ if (!storage) ++ return -ENOENT; ++ ++ if (flags & BPF_F_LOCK) { ++ copy_map_value_locked(map, storage->buf->data, value, false); ++ return 0; ++ } ++ ++ new = kmalloc_node(sizeof(struct bpf_storage_buffer) + ++ map->value_size, ++ __GFP_ZERO | GFP_ATOMIC | __GFP_NOWARN, ++ map->numa_node); ++ if (!new) ++ return -ENOMEM; ++ ++ memcpy(&new->data[0], value, map->value_size); ++ check_and_init_map_lock(map, new->data); ++ ++ new = xchg(&storage->buf, new); ++ kfree_rcu(new, rcu); ++ ++ return 0; ++} ++ ++int bpf_percpu_cgroup_storage_copy(struct bpf_map *_map, void *_key, ++ void *value) ++{ ++ struct bpf_cgroup_storage_map *map = map_to_storage(_map); ++ struct bpf_cgroup_storage_key *key = _key; ++ struct bpf_cgroup_storage *storage; ++ int cpu, off = 0; ++ u32 size; ++ ++ rcu_read_lock(); ++ storage = cgroup_storage_lookup(map, key, false); ++ if (!storage) { ++ rcu_read_unlock(); ++ return -ENOENT; ++ } ++ ++ /* per_cpu areas are zero-filled and bpf programs can only ++ * access 'value_size' of them, so copying rounded areas ++ * will not leak any kernel data ++ */ ++ size = round_up(_map->value_size, 8); ++ for_each_possible_cpu(cpu) { ++ bpf_long_memcpy(value + off, ++ per_cpu_ptr(storage->percpu_buf, cpu), size); ++ off += size; ++ } ++ rcu_read_unlock(); ++ return 0; ++} ++ ++int bpf_percpu_cgroup_storage_update(struct bpf_map *_map, void *_key, ++ void *value, u64 map_flags) ++{ ++ struct bpf_cgroup_storage_map *map = map_to_storage(_map); ++ struct bpf_cgroup_storage_key *key = _key; ++ struct bpf_cgroup_storage *storage; ++ int cpu, off = 0; ++ u32 size; ++ ++ if (map_flags != BPF_ANY && map_flags != BPF_EXIST) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ storage = cgroup_storage_lookup(map, key, false); ++ if (!storage) { ++ rcu_read_unlock(); ++ return -ENOENT; ++ } ++ ++ /* the user space will provide round_up(value_size, 8) bytes that ++ * will be copied into per-cpu area. bpf programs can only access ++ * value_size of it. During lookup the same extra bytes will be ++ * returned or zeros which were zero-filled by percpu_alloc, ++ * so no kernel data leaks possible ++ */ ++ size = round_up(_map->value_size, 8); ++ for_each_possible_cpu(cpu) { ++ bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu), ++ value + off, size); ++ off += size; ++ } ++ rcu_read_unlock(); ++ return 0; ++} ++ ++static int cgroup_storage_get_next_key(struct bpf_map *_map, void *_key, ++ void *_next_key) ++{ ++ struct bpf_cgroup_storage_map *map = map_to_storage(_map); ++ struct bpf_cgroup_storage_key *key = _key; ++ struct bpf_cgroup_storage_key *next = _next_key; ++ struct bpf_cgroup_storage *storage; ++ ++ spin_lock_bh(&map->lock); ++ ++ if (list_empty(&map->list)) ++ goto enoent; ++ ++ if (key) { ++ storage = cgroup_storage_lookup(map, key, true); ++ if (!storage) ++ goto enoent; ++ ++ storage = list_next_entry(storage, list); ++ if (!storage) ++ goto enoent; ++ } else { ++ storage = list_first_entry(&map->list, ++ struct bpf_cgroup_storage, list); ++ } ++ ++ spin_unlock_bh(&map->lock); ++ next->attach_type = storage->key.attach_type; ++ next->cgroup_inode_id = storage->key.cgroup_inode_id; ++ return 0; ++ ++enoent: ++ spin_unlock_bh(&map->lock); ++ return -ENOENT; ++} ++ ++static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr) ++{ ++ int numa_node = bpf_map_attr_numa_node(attr); ++ struct bpf_cgroup_storage_map *map; ++ struct bpf_map_memory mem; ++ int ret; ++ ++ if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) ++ return ERR_PTR(-EINVAL); ++ ++ if (attr->value_size == 0) ++ return ERR_PTR(-EINVAL); ++ ++ if (attr->value_size > PAGE_SIZE) ++ return ERR_PTR(-E2BIG); ++ ++ if (attr->map_flags & ~LOCAL_STORAGE_CREATE_FLAG_MASK || ++ !bpf_map_flags_access_ok(attr->map_flags)) ++ return ERR_PTR(-EINVAL); ++ ++ if (attr->max_entries) ++ /* max_entries is not used and enforced to be 0 */ ++ return ERR_PTR(-EINVAL); ++ ++ ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map)); ++ if (ret < 0) ++ return ERR_PTR(ret); ++ ++ map = kmalloc_node(sizeof(struct bpf_cgroup_storage_map), ++ __GFP_ZERO | GFP_USER, numa_node); ++ if (!map) { ++ bpf_map_charge_finish(&mem); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ bpf_map_charge_move(&map->map.memory, &mem); ++ ++ /* copy mandatory map attributes */ ++ bpf_map_init_from_attr(&map->map, attr); ++ ++ spin_lock_init(&map->lock); ++ map->root = RB_ROOT; ++ INIT_LIST_HEAD(&map->list); ++ ++ return &map->map; ++} ++ ++static void cgroup_storage_map_free(struct bpf_map *_map) ++{ ++ struct bpf_cgroup_storage_map *map = map_to_storage(_map); ++ ++ WARN_ON(!RB_EMPTY_ROOT(&map->root)); ++ WARN_ON(!list_empty(&map->list)); ++ ++ kfree(map); ++} ++ ++static int cgroup_storage_delete_elem(struct bpf_map *map, void *key) ++{ ++ return -EINVAL; ++} ++ ++static int cgroup_storage_check_btf(const struct bpf_map *map, ++ const struct btf *btf, ++ const struct btf_type *key_type, ++ const struct btf_type *value_type) ++{ ++ struct btf_member *m; ++ u32 offset, size; ++ ++ /* Key is expected to be of struct bpf_cgroup_storage_key type, ++ * which is: ++ * struct bpf_cgroup_storage_key { ++ * __u64 cgroup_inode_id; ++ * __u32 attach_type; ++ * }; ++ */ ++ ++ /* ++ * Key_type must be a structure with two fields. ++ */ ++ if (BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT || ++ BTF_INFO_VLEN(key_type->info) != 2) ++ return -EINVAL; ++ ++ /* ++ * The first field must be a 64 bit integer at 0 offset. ++ */ ++ m = (struct btf_member *)(key_type + 1); ++ size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id); ++ if (!btf_member_is_reg_int(btf, key_type, m, 0, size)) ++ return -EINVAL; ++ ++ /* ++ * The second field must be a 32 bit integer at 64 bit offset. ++ */ ++ m++; ++ offset = offsetof(struct bpf_cgroup_storage_key, attach_type); ++ size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type); ++ if (!btf_member_is_reg_int(btf, key_type, m, offset, size)) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++static void cgroup_storage_seq_show_elem(struct bpf_map *map, void *_key, ++ struct seq_file *m) ++{ ++ enum bpf_cgroup_storage_type stype = cgroup_storage_type(map); ++ struct bpf_cgroup_storage_key *key = _key; ++ struct bpf_cgroup_storage *storage; ++ int cpu; ++ ++ rcu_read_lock(); ++ storage = cgroup_storage_lookup(map_to_storage(map), key, false); ++ if (!storage) { ++ rcu_read_unlock(); ++ return; ++ } ++ ++ btf_type_seq_show(map->btf, map->btf_key_type_id, key, m); ++ stype = cgroup_storage_type(map); ++ if (stype == BPF_CGROUP_STORAGE_SHARED) { ++ seq_puts(m, ": "); ++ btf_type_seq_show(map->btf, map->btf_value_type_id, ++ &READ_ONCE(storage->buf)->data[0], m); ++ seq_puts(m, "\n"); ++ } else { ++ seq_puts(m, ": {\n"); ++ for_each_possible_cpu(cpu) { ++ seq_printf(m, "\tcpu%d: ", cpu); ++ btf_type_seq_show(map->btf, map->btf_value_type_id, ++ per_cpu_ptr(storage->percpu_buf, cpu), ++ m); ++ seq_puts(m, "\n"); ++ } ++ seq_puts(m, "}\n"); ++ } ++ rcu_read_unlock(); ++} ++ ++const struct bpf_map_ops cgroup_storage_map_ops = { ++ .map_alloc = cgroup_storage_map_alloc, ++ .map_free = cgroup_storage_map_free, ++ .map_get_next_key = cgroup_storage_get_next_key, ++ .map_lookup_elem = cgroup_storage_lookup_elem, ++ .map_update_elem = cgroup_storage_update_elem, ++ .map_delete_elem = cgroup_storage_delete_elem, ++ .map_check_btf = cgroup_storage_check_btf, ++ .map_seq_show_elem = cgroup_storage_seq_show_elem, ++}; ++ ++int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) ++{ ++ enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); ++ struct bpf_cgroup_storage_map *map = map_to_storage(_map); ++ int ret = -EBUSY; ++ ++ spin_lock_bh(&map->lock); ++ ++ if (map->prog && map->prog != prog) ++ goto unlock; ++ if (prog->aux->cgroup_storage[stype] && ++ prog->aux->cgroup_storage[stype] != _map) ++ goto unlock; ++ ++ map->prog = prog; ++ prog->aux->cgroup_storage[stype] = _map; ++ ret = 0; ++unlock: ++ spin_unlock_bh(&map->lock); ++ ++ return ret; ++} ++ ++void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map) ++{ ++ enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map); ++ struct bpf_cgroup_storage_map *map = map_to_storage(_map); ++ ++ spin_lock_bh(&map->lock); ++ if (map->prog == prog) { ++ WARN_ON(prog->aux->cgroup_storage[stype] != _map); ++ map->prog = NULL; ++ prog->aux->cgroup_storage[stype] = NULL; ++ } ++ spin_unlock_bh(&map->lock); ++} ++ ++static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) ++{ ++ size_t size; ++ ++ if (cgroup_storage_type(map) == BPF_CGROUP_STORAGE_SHARED) { ++ size = sizeof(struct bpf_storage_buffer) + map->value_size; ++ *pages = round_up(sizeof(struct bpf_cgroup_storage) + size, ++ PAGE_SIZE) >> PAGE_SHIFT; ++ } else { ++ size = map->value_size; ++ *pages = round_up(round_up(size, 8) * num_possible_cpus(), ++ PAGE_SIZE) >> PAGE_SHIFT; ++ } ++ ++ return size; ++} ++ ++struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, ++ enum bpf_cgroup_storage_type stype) ++{ ++ struct bpf_cgroup_storage *storage; ++ struct bpf_map *map; ++ gfp_t flags; ++ size_t size; ++ u32 pages; ++ ++ map = prog->aux->cgroup_storage[stype]; ++ if (!map) ++ return NULL; ++ ++ size = bpf_cgroup_storage_calculate_size(map, &pages); ++ ++ if (bpf_map_charge_memlock(map, pages)) ++ return ERR_PTR(-EPERM); ++ ++ storage = kmalloc_node(sizeof(struct bpf_cgroup_storage), ++ __GFP_ZERO | GFP_USER, map->numa_node); ++ if (!storage) ++ goto enomem; ++ ++ flags = __GFP_ZERO | GFP_USER; ++ ++ if (stype == BPF_CGROUP_STORAGE_SHARED) { ++ storage->buf = kmalloc_node(size, flags, map->numa_node); ++ if (!storage->buf) ++ goto enomem; ++ check_and_init_map_lock(map, storage->buf->data); ++ } else { ++ storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags); ++ if (!storage->percpu_buf) ++ goto enomem; ++ } ++ ++ storage->map = (struct bpf_cgroup_storage_map *)map; ++ ++ return storage; ++ ++enomem: ++ bpf_map_uncharge_memlock(map, pages); ++ kfree(storage); ++ return ERR_PTR(-ENOMEM); ++} ++ ++static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu) ++{ ++ struct bpf_cgroup_storage *storage = ++ container_of(rcu, struct bpf_cgroup_storage, rcu); ++ ++ kfree(storage->buf); ++ kfree(storage); ++} ++ ++static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu) ++{ ++ struct bpf_cgroup_storage *storage = ++ container_of(rcu, struct bpf_cgroup_storage, rcu); ++ ++ free_percpu(storage->percpu_buf); ++ kfree(storage); ++} ++ ++void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage) ++{ ++ enum bpf_cgroup_storage_type stype; ++ struct bpf_map *map; ++ u32 pages; ++ ++ if (!storage) ++ return; ++ ++ map = &storage->map->map; ++ ++ bpf_cgroup_storage_calculate_size(map, &pages); ++ bpf_map_uncharge_memlock(map, pages); ++ ++ stype = cgroup_storage_type(map); ++ if (stype == BPF_CGROUP_STORAGE_SHARED) ++ call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu); ++ else ++ call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu); ++} ++ ++void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, ++ struct cgroup *cgroup, ++ enum bpf_attach_type type) ++{ ++ struct bpf_cgroup_storage_map *map; ++ ++ if (!storage) ++ return; ++ ++ storage->key.attach_type = type; ++ storage->key.cgroup_inode_id = cgroup->kn->id.id; ++ ++ map = storage->map; ++ ++ spin_lock_bh(&map->lock); ++ WARN_ON(cgroup_storage_insert(map, storage)); ++ list_add(&storage->list, &map->list); ++ spin_unlock_bh(&map->lock); ++} ++ ++void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage) ++{ ++ struct bpf_cgroup_storage_map *map; ++ struct rb_root *root; ++ ++ if (!storage) ++ return; ++ ++ map = storage->map; ++ ++ spin_lock_bh(&map->lock); ++ root = &map->root; ++ rb_erase(&storage->node, root); ++ ++ list_del(&storage->list); ++ spin_unlock_bh(&map->lock); ++} ++ ++#endif +--- /dev/null ++++ b/kernel/bpf/lpm_trie.c +@@ -0,0 +1,746 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* ++ * Longest prefix match list implementation ++ * ++ * Copyright (c) 2016,2017 Daniel Mack ++ * Copyright (c) 2016 David Herrmann ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Intermediate node */ ++#define LPM_TREE_NODE_FLAG_IM BIT(0) ++ ++struct lpm_trie_node; ++ ++struct lpm_trie_node { ++ struct rcu_head rcu; ++ struct lpm_trie_node __rcu *child[2]; ++ u32 prefixlen; ++ u32 flags; ++ u8 data[0]; ++}; ++ ++struct lpm_trie { ++ struct bpf_map map; ++ struct lpm_trie_node __rcu *root; ++ size_t n_entries; ++ size_t max_prefixlen; ++ size_t data_size; ++ raw_spinlock_t lock; ++}; ++ ++/* This trie implements a longest prefix match algorithm that can be used to ++ * match IP addresses to a stored set of ranges. ++ * ++ * Data stored in @data of struct bpf_lpm_key and struct lpm_trie_node is ++ * interpreted as big endian, so data[0] stores the most significant byte. ++ * ++ * Match ranges are internally stored in instances of struct lpm_trie_node ++ * which each contain their prefix length as well as two pointers that may ++ * lead to more nodes containing more specific matches. Each node also stores ++ * a value that is defined by and returned to userspace via the update_elem ++ * and lookup functions. ++ * ++ * For instance, let's start with a trie that was created with a prefix length ++ * of 32, so it can be used for IPv4 addresses, and one single element that ++ * matches 192.168.0.0/16. The data array would hence contain ++ * [0xc0, 0xa8, 0x00, 0x00] in big-endian notation. This documentation will ++ * stick to IP-address notation for readability though. ++ * ++ * As the trie is empty initially, the new node (1) will be places as root ++ * node, denoted as (R) in the example below. As there are no other node, both ++ * child pointers are %NULL. ++ * ++ * +----------------+ ++ * | (1) (R) | ++ * | 192.168.0.0/16 | ++ * | value: 1 | ++ * | [0] [1] | ++ * +----------------+ ++ * ++ * Next, let's add a new node (2) matching 192.168.0.0/24. As there is already ++ * a node with the same data and a smaller prefix (ie, a less specific one), ++ * node (2) will become a child of (1). In child index depends on the next bit ++ * that is outside of what (1) matches, and that bit is 0, so (2) will be ++ * child[0] of (1): ++ * ++ * +----------------+ ++ * | (1) (R) | ++ * | 192.168.0.0/16 | ++ * | value: 1 | ++ * | [0] [1] | ++ * +----------------+ ++ * | ++ * +----------------+ ++ * | (2) | ++ * | 192.168.0.0/24 | ++ * | value: 2 | ++ * | [0] [1] | ++ * +----------------+ ++ * ++ * The child[1] slot of (1) could be filled with another node which has bit #17 ++ * (the next bit after the ones that (1) matches on) set to 1. For instance, ++ * 192.168.128.0/24: ++ * ++ * +----------------+ ++ * | (1) (R) | ++ * | 192.168.0.0/16 | ++ * | value: 1 | ++ * | [0] [1] | ++ * +----------------+ ++ * | | ++ * +----------------+ +------------------+ ++ * | (2) | | (3) | ++ * | 192.168.0.0/24 | | 192.168.128.0/24 | ++ * | value: 2 | | value: 3 | ++ * | [0] [1] | | [0] [1] | ++ * +----------------+ +------------------+ ++ * ++ * Let's add another node (4) to the game for 192.168.1.0/24. In order to place ++ * it, node (1) is looked at first, and because (4) of the semantics laid out ++ * above (bit #17 is 0), it would normally be attached to (1) as child[0]. ++ * However, that slot is already allocated, so a new node is needed in between. ++ * That node does not have a value attached to it and it will never be ++ * returned to users as result of a lookup. It is only there to differentiate ++ * the traversal further. It will get a prefix as wide as necessary to ++ * distinguish its two children: ++ * ++ * +----------------+ ++ * | (1) (R) | ++ * | 192.168.0.0/16 | ++ * | value: 1 | ++ * | [0] [1] | ++ * +----------------+ ++ * | | ++ * +----------------+ +------------------+ ++ * | (4) (I) | | (3) | ++ * | 192.168.0.0/23 | | 192.168.128.0/24 | ++ * | value: --- | | value: 3 | ++ * | [0] [1] | | [0] [1] | ++ * +----------------+ +------------------+ ++ * | | ++ * +----------------+ +----------------+ ++ * | (2) | | (5) | ++ * | 192.168.0.0/24 | | 192.168.1.0/24 | ++ * | value: 2 | | value: 5 | ++ * | [0] [1] | | [0] [1] | ++ * +----------------+ +----------------+ ++ * ++ * 192.168.1.1/32 would be a child of (5) etc. ++ * ++ * An intermediate node will be turned into a 'real' node on demand. In the ++ * example above, (4) would be re-used if 192.168.0.0/23 is added to the trie. ++ * ++ * A fully populated trie would have a height of 32 nodes, as the trie was ++ * created with a prefix length of 32. ++ * ++ * The lookup starts at the root node. If the current node matches and if there ++ * is a child that can be used to become more specific, the trie is traversed ++ * downwards. The last node in the traversal that is a non-intermediate one is ++ * returned. ++ */ ++ ++static inline int extract_bit(const u8 *data, size_t index) ++{ ++ return !!(data[index / 8] & (1 << (7 - (index % 8)))); ++} ++ ++/** ++ * longest_prefix_match() - determine the longest prefix ++ * @trie: The trie to get internal sizes from ++ * @node: The node to operate on ++ * @key: The key to compare to @node ++ * ++ * Determine the longest prefix of @node that matches the bits in @key. ++ */ ++static size_t longest_prefix_match(const struct lpm_trie *trie, ++ const struct lpm_trie_node *node, ++ const struct bpf_lpm_trie_key *key) ++{ ++ u32 limit = min(node->prefixlen, key->prefixlen); ++ u32 prefixlen = 0, i = 0; ++ ++ BUILD_BUG_ON(offsetof(struct lpm_trie_node, data) % sizeof(u32)); ++ BUILD_BUG_ON(offsetof(struct bpf_lpm_trie_key, data) % sizeof(u32)); ++ ++#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && defined(CONFIG_64BIT) ++ ++ /* data_size >= 16 has very small probability. ++ * We do not use a loop for optimal code generation. ++ */ ++ if (trie->data_size >= 8) { ++ u64 diff = be64_to_cpu(*(__be64 *)node->data ^ ++ *(__be64 *)key->data); ++ ++ prefixlen = 64 - fls64(diff); ++ if (prefixlen >= limit) ++ return limit; ++ if (diff) ++ return prefixlen; ++ i = 8; ++ } ++#endif ++ ++ while (trie->data_size >= i + 4) { ++ u32 diff = be32_to_cpu(*(__be32 *)&node->data[i] ^ ++ *(__be32 *)&key->data[i]); ++ ++ prefixlen += 32 - fls(diff); ++ if (prefixlen >= limit) ++ return limit; ++ if (diff) ++ return prefixlen; ++ i += 4; ++ } ++ ++ if (trie->data_size >= i + 2) { ++ u16 diff = be16_to_cpu(*(__be16 *)&node->data[i] ^ ++ *(__be16 *)&key->data[i]); ++ ++ prefixlen += 16 - fls(diff); ++ if (prefixlen >= limit) ++ return limit; ++ if (diff) ++ return prefixlen; ++ i += 2; ++ } ++ ++ if (trie->data_size >= i + 1) { ++ prefixlen += 8 - fls(node->data[i] ^ key->data[i]); ++ ++ if (prefixlen >= limit) ++ return limit; ++ } ++ ++ return prefixlen; ++} ++ ++/* Called from syscall or from eBPF program */ ++static void *trie_lookup_elem(struct bpf_map *map, void *_key) ++{ ++ struct lpm_trie *trie = container_of(map, struct lpm_trie, map); ++ struct lpm_trie_node *node, *found = NULL; ++ struct bpf_lpm_trie_key *key = _key; ++ ++ /* Start walking the trie from the root node ... */ ++ ++ for (node = rcu_dereference(trie->root); node;) { ++ unsigned int next_bit; ++ size_t matchlen; ++ ++ /* Determine the longest prefix of @node that matches @key. ++ * If it's the maximum possible prefix for this trie, we have ++ * an exact match and can return it directly. ++ */ ++ matchlen = longest_prefix_match(trie, node, key); ++ if (matchlen == trie->max_prefixlen) { ++ found = node; ++ break; ++ } ++ ++ /* If the number of bits that match is smaller than the prefix ++ * length of @node, bail out and return the node we have seen ++ * last in the traversal (ie, the parent). ++ */ ++ if (matchlen < node->prefixlen) ++ break; ++ ++ /* Consider this node as return candidate unless it is an ++ * artificially added intermediate one. ++ */ ++ if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) ++ found = node; ++ ++ /* If the node match is fully satisfied, let's see if we can ++ * become more specific. Determine the next bit in the key and ++ * traverse down. ++ */ ++ next_bit = extract_bit(key->data, node->prefixlen); ++ node = rcu_dereference(node->child[next_bit]); ++ } ++ ++ if (!found) ++ return NULL; ++ ++ return found->data + trie->data_size; ++} ++ ++static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie, ++ const void *value) ++{ ++ struct lpm_trie_node *node; ++ size_t size = sizeof(struct lpm_trie_node) + trie->data_size; ++ ++ if (value) ++ size += trie->map.value_size; ++ ++ node = kmalloc_node(size, GFP_ATOMIC | __GFP_NOWARN, ++ trie->map.numa_node); ++ if (!node) ++ return NULL; ++ ++ node->flags = 0; ++ ++ if (value) ++ memcpy(node->data + trie->data_size, value, ++ trie->map.value_size); ++ ++ return node; ++} ++ ++/* Called from syscall or from eBPF program */ ++static int trie_update_elem(struct bpf_map *map, ++ void *_key, void *value, u64 flags) ++{ ++ struct lpm_trie *trie = container_of(map, struct lpm_trie, map); ++ struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL; ++ struct lpm_trie_node __rcu **slot; ++ struct bpf_lpm_trie_key *key = _key; ++ unsigned long irq_flags; ++ unsigned int next_bit; ++ size_t matchlen = 0; ++ int ret = 0; ++ ++ if (unlikely(flags > BPF_EXIST)) ++ return -EINVAL; ++ ++ if (key->prefixlen > trie->max_prefixlen) ++ return -EINVAL; ++ ++ raw_spin_lock_irqsave(&trie->lock, irq_flags); ++ ++ /* Allocate and fill a new node */ ++ ++ if (trie->n_entries == trie->map.max_entries) { ++ ret = -ENOSPC; ++ goto out; ++ } ++ ++ new_node = lpm_trie_node_alloc(trie, value); ++ if (!new_node) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ trie->n_entries++; ++ ++ new_node->prefixlen = key->prefixlen; ++ RCU_INIT_POINTER(new_node->child[0], NULL); ++ RCU_INIT_POINTER(new_node->child[1], NULL); ++ memcpy(new_node->data, key->data, trie->data_size); ++ ++ /* Now find a slot to attach the new node. To do that, walk the tree ++ * from the root and match as many bits as possible for each node until ++ * we either find an empty slot or a slot that needs to be replaced by ++ * an intermediate node. ++ */ ++ slot = &trie->root; ++ ++ while ((node = rcu_dereference_protected(*slot, ++ lockdep_is_held(&trie->lock)))) { ++ matchlen = longest_prefix_match(trie, node, key); ++ ++ if (node->prefixlen != matchlen || ++ node->prefixlen == key->prefixlen || ++ node->prefixlen == trie->max_prefixlen) ++ break; ++ ++ next_bit = extract_bit(key->data, node->prefixlen); ++ slot = &node->child[next_bit]; ++ } ++ ++ /* If the slot is empty (a free child pointer or an empty root), ++ * simply assign the @new_node to that slot and be done. ++ */ ++ if (!node) { ++ rcu_assign_pointer(*slot, new_node); ++ goto out; ++ } ++ ++ /* If the slot we picked already exists, replace it with @new_node ++ * which already has the correct data array set. ++ */ ++ if (node->prefixlen == matchlen) { ++ new_node->child[0] = node->child[0]; ++ new_node->child[1] = node->child[1]; ++ ++ if (!(node->flags & LPM_TREE_NODE_FLAG_IM)) ++ trie->n_entries--; ++ ++ rcu_assign_pointer(*slot, new_node); ++ kfree_rcu(node, rcu); ++ ++ goto out; ++ } ++ ++ /* If the new node matches the prefix completely, it must be inserted ++ * as an ancestor. Simply insert it between @node and *@slot. ++ */ ++ if (matchlen == key->prefixlen) { ++ next_bit = extract_bit(node->data, matchlen); ++ rcu_assign_pointer(new_node->child[next_bit], node); ++ rcu_assign_pointer(*slot, new_node); ++ goto out; ++ } ++ ++ im_node = lpm_trie_node_alloc(trie, NULL); ++ if (!im_node) { ++ ret = -ENOMEM; ++ goto out; ++ } ++ ++ im_node->prefixlen = matchlen; ++ im_node->flags |= LPM_TREE_NODE_FLAG_IM; ++ memcpy(im_node->data, node->data, trie->data_size); ++ ++ /* Now determine which child to install in which slot */ ++ if (extract_bit(key->data, matchlen)) { ++ rcu_assign_pointer(im_node->child[0], node); ++ rcu_assign_pointer(im_node->child[1], new_node); ++ } else { ++ rcu_assign_pointer(im_node->child[0], new_node); ++ rcu_assign_pointer(im_node->child[1], node); ++ } ++ ++ /* Finally, assign the intermediate node to the determined spot */ ++ rcu_assign_pointer(*slot, im_node); ++ ++out: ++ if (ret) { ++ if (new_node) ++ trie->n_entries--; ++ ++ kfree(new_node); ++ kfree(im_node); ++ } ++ ++ raw_spin_unlock_irqrestore(&trie->lock, irq_flags); ++ ++ return ret; ++} ++ ++/* Called from syscall or from eBPF program */ ++static int trie_delete_elem(struct bpf_map *map, void *_key) ++{ ++ struct lpm_trie *trie = container_of(map, struct lpm_trie, map); ++ struct bpf_lpm_trie_key *key = _key; ++ struct lpm_trie_node __rcu **trim, **trim2; ++ struct lpm_trie_node *node, *parent; ++ unsigned long irq_flags; ++ unsigned int next_bit; ++ size_t matchlen = 0; ++ int ret = 0; ++ ++ if (key->prefixlen > trie->max_prefixlen) ++ return -EINVAL; ++ ++ raw_spin_lock_irqsave(&trie->lock, irq_flags); ++ ++ /* Walk the tree looking for an exact key/length match and keeping ++ * track of the path we traverse. We will need to know the node ++ * we wish to delete, and the slot that points to the node we want ++ * to delete. We may also need to know the nodes parent and the ++ * slot that contains it. ++ */ ++ trim = &trie->root; ++ trim2 = trim; ++ parent = NULL; ++ while ((node = rcu_dereference_protected( ++ *trim, lockdep_is_held(&trie->lock)))) { ++ matchlen = longest_prefix_match(trie, node, key); ++ ++ if (node->prefixlen != matchlen || ++ node->prefixlen == key->prefixlen) ++ break; ++ ++ parent = node; ++ trim2 = trim; ++ next_bit = extract_bit(key->data, node->prefixlen); ++ trim = &node->child[next_bit]; ++ } ++ ++ if (!node || node->prefixlen != key->prefixlen || ++ node->prefixlen != matchlen || ++ (node->flags & LPM_TREE_NODE_FLAG_IM)) { ++ ret = -ENOENT; ++ goto out; ++ } ++ ++ trie->n_entries--; ++ ++ /* If the node we are removing has two children, simply mark it ++ * as intermediate and we are done. ++ */ ++ if (rcu_access_pointer(node->child[0]) && ++ rcu_access_pointer(node->child[1])) { ++ node->flags |= LPM_TREE_NODE_FLAG_IM; ++ goto out; ++ } ++ ++ /* If the parent of the node we are about to delete is an intermediate ++ * node, and the deleted node doesn't have any children, we can delete ++ * the intermediate parent as well and promote its other child ++ * up the tree. Doing this maintains the invariant that all ++ * intermediate nodes have exactly 2 children and that there are no ++ * unnecessary intermediate nodes in the tree. ++ */ ++ if (parent && (parent->flags & LPM_TREE_NODE_FLAG_IM) && ++ !node->child[0] && !node->child[1]) { ++ if (node == rcu_access_pointer(parent->child[0])) ++ rcu_assign_pointer( ++ *trim2, rcu_access_pointer(parent->child[1])); ++ else ++ rcu_assign_pointer( ++ *trim2, rcu_access_pointer(parent->child[0])); ++ kfree_rcu(parent, rcu); ++ kfree_rcu(node, rcu); ++ goto out; ++ } ++ ++ /* The node we are removing has either zero or one child. If there ++ * is a child, move it into the removed node's slot then delete ++ * the node. Otherwise just clear the slot and delete the node. ++ */ ++ if (node->child[0]) ++ rcu_assign_pointer(*trim, rcu_access_pointer(node->child[0])); ++ else if (node->child[1]) ++ rcu_assign_pointer(*trim, rcu_access_pointer(node->child[1])); ++ else ++ RCU_INIT_POINTER(*trim, NULL); ++ kfree_rcu(node, rcu); ++ ++out: ++ raw_spin_unlock_irqrestore(&trie->lock, irq_flags); ++ ++ return ret; ++} ++ ++#define LPM_DATA_SIZE_MAX 256 ++#define LPM_DATA_SIZE_MIN 1 ++ ++#define LPM_VAL_SIZE_MAX (KMALLOC_MAX_SIZE - LPM_DATA_SIZE_MAX - \ ++ sizeof(struct lpm_trie_node)) ++#define LPM_VAL_SIZE_MIN 1 ++ ++#define LPM_KEY_SIZE(X) (sizeof(struct bpf_lpm_trie_key) + (X)) ++#define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX) ++#define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN) ++ ++#define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE | \ ++ BPF_F_ACCESS_MASK) ++ ++static struct bpf_map *trie_alloc(union bpf_attr *attr) ++{ ++ struct lpm_trie *trie; ++ u64 cost = sizeof(*trie), cost_per_node; ++ int ret; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return ERR_PTR(-EPERM); ++ ++ /* check sanity of attributes */ ++ if (attr->max_entries == 0 || ++ !(attr->map_flags & BPF_F_NO_PREALLOC) || ++ attr->map_flags & ~LPM_CREATE_FLAG_MASK || ++ !bpf_map_flags_access_ok(attr->map_flags) || ++ attr->key_size < LPM_KEY_SIZE_MIN || ++ attr->key_size > LPM_KEY_SIZE_MAX || ++ attr->value_size < LPM_VAL_SIZE_MIN || ++ attr->value_size > LPM_VAL_SIZE_MAX) ++ return ERR_PTR(-EINVAL); ++ ++ trie = kzalloc(sizeof(*trie), GFP_USER | __GFP_NOWARN); ++ if (!trie) ++ return ERR_PTR(-ENOMEM); ++ ++ /* copy mandatory map attributes */ ++ bpf_map_init_from_attr(&trie->map, attr); ++ trie->data_size = attr->key_size - ++ offsetof(struct bpf_lpm_trie_key, data); ++ trie->max_prefixlen = trie->data_size * 8; ++ ++ cost_per_node = sizeof(struct lpm_trie_node) + ++ attr->value_size + trie->data_size; ++ cost += (u64) attr->max_entries * cost_per_node; ++ ++ ret = bpf_map_charge_init(&trie->map.memory, cost); ++ if (ret) ++ goto out_err; ++ ++ raw_spin_lock_init(&trie->lock); ++ ++ return &trie->map; ++out_err: ++ kfree(trie); ++ return ERR_PTR(ret); ++} ++ ++static void trie_free(struct bpf_map *map) ++{ ++ struct lpm_trie *trie = container_of(map, struct lpm_trie, map); ++ struct lpm_trie_node __rcu **slot; ++ struct lpm_trie_node *node; ++ ++ /* Wait for outstanding programs to complete ++ * update/lookup/delete/get_next_key and free the trie. ++ */ ++ synchronize_rcu(); ++ ++ /* Always start at the root and walk down to a node that has no ++ * children. Then free that node, nullify its reference in the parent ++ * and start over. ++ */ ++ ++ for (;;) { ++ slot = &trie->root; ++ ++ for (;;) { ++ node = rcu_dereference_protected(*slot, 1); ++ if (!node) ++ goto out; ++ ++ if (rcu_access_pointer(node->child[0])) { ++ slot = &node->child[0]; ++ continue; ++ } ++ ++ if (rcu_access_pointer(node->child[1])) { ++ slot = &node->child[1]; ++ continue; ++ } ++ ++ kfree(node); ++ RCU_INIT_POINTER(*slot, NULL); ++ break; ++ } ++ } ++ ++out: ++ kfree(trie); ++} ++ ++static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key) ++{ ++ struct lpm_trie_node *node, *next_node = NULL, *parent, *search_root; ++ struct lpm_trie *trie = container_of(map, struct lpm_trie, map); ++ struct bpf_lpm_trie_key *key = _key, *next_key = _next_key; ++ struct lpm_trie_node **node_stack = NULL; ++ int err = 0, stack_ptr = -1; ++ unsigned int next_bit; ++ size_t matchlen; ++ ++ /* The get_next_key follows postorder. For the 4 node example in ++ * the top of this file, the trie_get_next_key() returns the following ++ * one after another: ++ * 192.168.0.0/24 ++ * 192.168.1.0/24 ++ * 192.168.128.0/24 ++ * 192.168.0.0/16 ++ * ++ * The idea is to return more specific keys before less specific ones. ++ */ ++ ++ /* Empty trie */ ++ search_root = rcu_dereference(trie->root); ++ if (!search_root) ++ return -ENOENT; ++ ++ /* For invalid key, find the leftmost node in the trie */ ++ if (!key || key->prefixlen > trie->max_prefixlen) ++ goto find_leftmost; ++ ++ node_stack = kmalloc_array(trie->max_prefixlen, ++ sizeof(struct lpm_trie_node *), ++ GFP_ATOMIC | __GFP_NOWARN); ++ if (!node_stack) ++ return -ENOMEM; ++ ++ /* Try to find the exact node for the given key */ ++ for (node = search_root; node;) { ++ node_stack[++stack_ptr] = node; ++ matchlen = longest_prefix_match(trie, node, key); ++ if (node->prefixlen != matchlen || ++ node->prefixlen == key->prefixlen) ++ break; ++ ++ next_bit = extract_bit(key->data, node->prefixlen); ++ node = rcu_dereference(node->child[next_bit]); ++ } ++ if (!node || node->prefixlen != key->prefixlen || ++ (node->flags & LPM_TREE_NODE_FLAG_IM)) ++ goto find_leftmost; ++ ++ /* The node with the exactly-matching key has been found, ++ * find the first node in postorder after the matched node. ++ */ ++ node = node_stack[stack_ptr]; ++ while (stack_ptr > 0) { ++ parent = node_stack[stack_ptr - 1]; ++ if (rcu_dereference(parent->child[0]) == node) { ++ search_root = rcu_dereference(parent->child[1]); ++ if (search_root) ++ goto find_leftmost; ++ } ++ if (!(parent->flags & LPM_TREE_NODE_FLAG_IM)) { ++ next_node = parent; ++ goto do_copy; ++ } ++ ++ node = parent; ++ stack_ptr--; ++ } ++ ++ /* did not find anything */ ++ err = -ENOENT; ++ goto free_stack; ++ ++find_leftmost: ++ /* Find the leftmost non-intermediate node, all intermediate nodes ++ * have exact two children, so this function will never return NULL. ++ */ ++ for (node = search_root; node;) { ++ if (node->flags & LPM_TREE_NODE_FLAG_IM) { ++ node = rcu_dereference(node->child[0]); ++ } else { ++ next_node = node; ++ node = rcu_dereference(node->child[0]); ++ if (!node) ++ node = rcu_dereference(next_node->child[1]); ++ } ++ } ++do_copy: ++ next_key->prefixlen = next_node->prefixlen; ++ memcpy((void *)next_key + offsetof(struct bpf_lpm_trie_key, data), ++ next_node->data, trie->data_size); ++free_stack: ++ kfree(node_stack); ++ return err; ++} ++ ++static int trie_check_btf(const struct bpf_map *map, ++ const struct btf *btf, ++ const struct btf_type *key_type, ++ const struct btf_type *value_type) ++{ ++ /* Keys must have struct bpf_lpm_trie_key embedded. */ ++ return BTF_INFO_KIND(key_type->info) != BTF_KIND_STRUCT ? ++ -EINVAL : 0; ++} ++ ++const struct bpf_map_ops trie_map_ops = { ++ .map_alloc = trie_alloc, ++ .map_free = trie_free, ++ .map_get_next_key = trie_get_next_key, ++ .map_lookup_elem = trie_lookup_elem, ++ .map_update_elem = trie_update_elem, ++ .map_delete_elem = trie_delete_elem, ++ .map_check_btf = trie_check_btf, ++}; +--- a/kernel/bpf/Makefile ++++ b/kernel/bpf/Makefile +@@ -1,4 +1,23 @@ ++# SPDX-License-Identifier: GPL-2.0 + obj-y := core.o ++ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y) ++# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details ++cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse ++endif ++CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy) + +-obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o +-obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o ++obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o ++obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o ++obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ++obj-$(CONFIG_BPF_SYSCALL) += disasm.o ++obj-$(CONFIG_BPF_SYSCALL) += btf.o ++ifeq ($(CONFIG_NET),y) ++obj-$(CONFIG_BPF_SYSCALL) += devmap.o ++endif ++ifeq ($(CONFIG_PERF_EVENTS),y) ++obj-$(CONFIG_BPF_SYSCALL) += stackmap.o ++endif ++obj-$(CONFIG_CGROUP_BPF) += cgroup.o ++ifeq ($(CONFIG_SYSFS),y) ++obj-$(CONFIG_DEBUG_INFO_BTF) += sysfs_btf.o ++endif +--- /dev/null ++++ b/kernel/bpf/map_in_map.c +@@ -0,0 +1,120 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2017 Facebook ++ */ ++#include ++#include ++ ++#include "map_in_map.h" ++ ++struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd) ++{ ++ struct bpf_map *inner_map, *inner_map_meta; ++ u32 inner_map_meta_size; ++ struct fd f; ++ ++ f = fdget(inner_map_ufd); ++ inner_map = __bpf_map_get(f); ++ if (IS_ERR(inner_map)) ++ return inner_map; ++ ++ /* prog_array->owner_prog_type and owner_jited ++ * is a runtime binding. Doing static check alone ++ * in the verifier is not enough. ++ */ ++ if (inner_map->map_type == BPF_MAP_TYPE_PROG_ARRAY || ++ inner_map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || ++ inner_map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) { ++ fdput(f); ++ return ERR_PTR(-ENOTSUPP); ++ } ++ ++ /* Does not support >1 level map-in-map */ ++ if (inner_map->inner_map_meta) { ++ fdput(f); ++ return ERR_PTR(-EINVAL); ++ } ++ ++ if (map_value_has_spin_lock(inner_map)) { ++ fdput(f); ++ return ERR_PTR(-ENOTSUPP); ++ } ++ ++ inner_map_meta_size = sizeof(*inner_map_meta); ++ /* In some cases verifier needs to access beyond just base map. */ ++ if (inner_map->ops == &array_map_ops) ++ inner_map_meta_size = sizeof(struct bpf_array); ++ ++ inner_map_meta = kzalloc(inner_map_meta_size, GFP_USER); ++ if (!inner_map_meta) { ++ fdput(f); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ inner_map_meta->map_type = inner_map->map_type; ++ inner_map_meta->key_size = inner_map->key_size; ++ inner_map_meta->value_size = inner_map->value_size; ++ inner_map_meta->map_flags = inner_map->map_flags; ++ inner_map_meta->max_entries = inner_map->max_entries; ++ inner_map_meta->spin_lock_off = inner_map->spin_lock_off; ++ ++ /* Misc members not needed in bpf_map_meta_equal() check. */ ++ inner_map_meta->ops = inner_map->ops; ++ if (inner_map->ops == &array_map_ops) { ++ inner_map_meta->unpriv_array = inner_map->unpriv_array; ++ container_of(inner_map_meta, struct bpf_array, map)->index_mask = ++ container_of(inner_map, struct bpf_array, map)->index_mask; ++ } ++ ++ fdput(f); ++ return inner_map_meta; ++} ++ ++void bpf_map_meta_free(struct bpf_map *map_meta) ++{ ++ kfree(map_meta); ++} ++ ++bool bpf_map_meta_equal(const struct bpf_map *meta0, ++ const struct bpf_map *meta1) ++{ ++ /* No need to compare ops because it is covered by map_type */ ++ return meta0->map_type == meta1->map_type && ++ meta0->key_size == meta1->key_size && ++ meta0->value_size == meta1->value_size && ++ meta0->map_flags == meta1->map_flags && ++ meta0->max_entries == meta1->max_entries; ++} ++ ++void *bpf_map_fd_get_ptr(struct bpf_map *map, ++ struct file *map_file /* not used */, ++ int ufd) ++{ ++ struct bpf_map *inner_map; ++ struct fd f; ++ ++ f = fdget(ufd); ++ inner_map = __bpf_map_get(f); ++ if (IS_ERR(inner_map)) ++ return inner_map; ++ ++ if (bpf_map_meta_equal(map->inner_map_meta, inner_map)) ++ inner_map = bpf_map_inc(inner_map, false); ++ else ++ inner_map = ERR_PTR(-EINVAL); ++ ++ fdput(f); ++ return inner_map; ++} ++ ++void bpf_map_fd_put_ptr(void *ptr) ++{ ++ /* ptr->ops->map_free() has to go through one ++ * rcu grace period by itself. ++ */ ++ bpf_map_put(ptr); ++} ++ ++u32 bpf_map_fd_sys_lookup_elem(void *ptr) ++{ ++ return ((struct bpf_map *)ptr)->id; ++} +--- /dev/null ++++ b/kernel/bpf/map_in_map.h +@@ -0,0 +1,21 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* Copyright (c) 2017 Facebook ++ */ ++#ifndef __MAP_IN_MAP_H__ ++#define __MAP_IN_MAP_H__ ++ ++#include ++ ++struct file; ++struct bpf_map; ++ ++struct bpf_map *bpf_map_meta_alloc(int inner_map_ufd); ++void bpf_map_meta_free(struct bpf_map *map_meta); ++bool bpf_map_meta_equal(const struct bpf_map *meta0, ++ const struct bpf_map *meta1); ++void *bpf_map_fd_get_ptr(struct bpf_map *map, struct file *map_file, ++ int ufd); ++void bpf_map_fd_put_ptr(void *ptr); ++u32 bpf_map_fd_sys_lookup_elem(void *ptr); ++ ++#endif +--- /dev/null ++++ b/kernel/bpf/percpu_freelist.c +@@ -0,0 +1,118 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2016 Facebook ++ */ ++#include "percpu_freelist.h" ++ ++int pcpu_freelist_init(struct pcpu_freelist *s) ++{ ++ int cpu; ++ ++ s->freelist = alloc_percpu(struct pcpu_freelist_head); ++ if (!s->freelist) ++ return -ENOMEM; ++ ++ for_each_possible_cpu(cpu) { ++ struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu); ++ ++ raw_spin_lock_init(&head->lock); ++ head->first = NULL; ++ } ++ return 0; ++} ++ ++void pcpu_freelist_destroy(struct pcpu_freelist *s) ++{ ++ free_percpu(s->freelist); ++} ++ ++static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, ++ struct pcpu_freelist_node *node) ++{ ++ raw_spin_lock(&head->lock); ++ node->next = head->first; ++ head->first = node; ++ raw_spin_unlock(&head->lock); ++} ++ ++void __pcpu_freelist_push(struct pcpu_freelist *s, ++ struct pcpu_freelist_node *node) ++{ ++ struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); ++ ++ ___pcpu_freelist_push(head, node); ++} ++ ++void pcpu_freelist_push(struct pcpu_freelist *s, ++ struct pcpu_freelist_node *node) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ __pcpu_freelist_push(s, node); ++ local_irq_restore(flags); ++} ++ ++void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, ++ u32 nr_elems) ++{ ++ struct pcpu_freelist_head *head; ++ unsigned long flags; ++ int i, cpu, pcpu_entries; ++ ++ pcpu_entries = nr_elems / num_possible_cpus() + 1; ++ i = 0; ++ ++ /* disable irq to workaround lockdep false positive ++ * in bpf usage pcpu_freelist_populate() will never race ++ * with pcpu_freelist_push() ++ */ ++ local_irq_save(flags); ++ for_each_possible_cpu(cpu) { ++again: ++ head = per_cpu_ptr(s->freelist, cpu); ++ ___pcpu_freelist_push(head, buf); ++ i++; ++ buf += elem_size; ++ if (i == nr_elems) ++ break; ++ if (i % pcpu_entries) ++ goto again; ++ } ++ local_irq_restore(flags); ++} ++ ++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) ++{ ++ struct pcpu_freelist_head *head; ++ struct pcpu_freelist_node *node; ++ int orig_cpu, cpu; ++ ++ orig_cpu = cpu = raw_smp_processor_id(); ++ while (1) { ++ head = per_cpu_ptr(s->freelist, cpu); ++ raw_spin_lock(&head->lock); ++ node = head->first; ++ if (node) { ++ head->first = node->next; ++ raw_spin_unlock(&head->lock); ++ return node; ++ } ++ raw_spin_unlock(&head->lock); ++ cpu = cpumask_next(cpu, cpu_possible_mask); ++ if (cpu >= nr_cpu_ids) ++ cpu = 0; ++ if (cpu == orig_cpu) ++ return NULL; ++ } ++} ++ ++struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) ++{ ++ struct pcpu_freelist_node *ret; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ ret = __pcpu_freelist_pop(s); ++ local_irq_restore(flags); ++ return ret; ++} +--- /dev/null ++++ b/kernel/bpf/percpu_freelist.h +@@ -0,0 +1,32 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* Copyright (c) 2016 Facebook ++ */ ++#ifndef __PERCPU_FREELIST_H__ ++#define __PERCPU_FREELIST_H__ ++#include ++#include ++ ++struct pcpu_freelist_head { ++ struct pcpu_freelist_node *first; ++ raw_spinlock_t lock; ++}; ++ ++struct pcpu_freelist { ++ struct pcpu_freelist_head __percpu *freelist; ++}; ++ ++struct pcpu_freelist_node { ++ struct pcpu_freelist_node *next; ++}; ++ ++/* pcpu_freelist_* do spin_lock_irqsave. */ ++void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); ++struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); ++/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */ ++void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); ++struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *); ++void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, ++ u32 nr_elems); ++int pcpu_freelist_init(struct pcpu_freelist *); ++void pcpu_freelist_destroy(struct pcpu_freelist *s); ++#endif +--- /dev/null ++++ b/kernel/bpf/queue_stack_maps.c +@@ -0,0 +1,289 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * queue_stack_maps.c: BPF queue and stack maps ++ * ++ * Copyright (c) 2018 Politecnico di Torino ++ */ ++#include ++#include ++#include ++#include ++#include "percpu_freelist.h" ++ ++#define QUEUE_STACK_CREATE_FLAG_MASK \ ++ (BPF_F_NUMA_NODE | BPF_F_ACCESS_MASK) ++ ++struct bpf_queue_stack { ++ struct bpf_map map; ++ raw_spinlock_t lock; ++ u32 head, tail; ++ u32 size; /* max_entries + 1 */ ++ ++ char elements[0] __aligned(8); ++}; ++ ++static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map) ++{ ++ return container_of(map, struct bpf_queue_stack, map); ++} ++ ++static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs) ++{ ++ return qs->head == qs->tail; ++} ++ ++static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) ++{ ++ u32 head = qs->head + 1; ++ ++ if (unlikely(head >= qs->size)) ++ head = 0; ++ ++ return head == qs->tail; ++} ++ ++/* Called from syscall */ ++static int queue_stack_map_alloc_check(union bpf_attr *attr) ++{ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ /* check sanity of attributes */ ++ if (attr->max_entries == 0 || attr->key_size != 0 || ++ attr->value_size == 0 || ++ attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK || ++ !bpf_map_flags_access_ok(attr->map_flags)) ++ return -EINVAL; ++ ++ if (attr->value_size > KMALLOC_MAX_SIZE) ++ /* if value_size is bigger, the user space won't be able to ++ * access the elements. ++ */ ++ return -E2BIG; ++ ++ return 0; ++} ++ ++static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) ++{ ++ int ret, numa_node = bpf_map_attr_numa_node(attr); ++ struct bpf_map_memory mem = {0}; ++ struct bpf_queue_stack *qs; ++ u64 size, queue_size, cost; ++ ++ size = (u64) attr->max_entries + 1; ++ cost = queue_size = sizeof(*qs) + size * attr->value_size; ++ ++ ret = bpf_map_charge_init(&mem, cost); ++ if (ret < 0) ++ return ERR_PTR(ret); ++ ++ qs = bpf_map_area_alloc(queue_size, numa_node); ++ if (!qs) { ++ bpf_map_charge_finish(&mem); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ memset(qs, 0, sizeof(*qs)); ++ ++ bpf_map_init_from_attr(&qs->map, attr); ++ ++ bpf_map_charge_move(&qs->map.memory, &mem); ++ qs->size = size; ++ ++ raw_spin_lock_init(&qs->lock); ++ ++ return &qs->map; ++} ++ ++/* Called when map->refcnt goes to zero, either from workqueue or from syscall */ ++static void queue_stack_map_free(struct bpf_map *map) ++{ ++ struct bpf_queue_stack *qs = bpf_queue_stack(map); ++ ++ /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, ++ * so the programs (can be more than one that used this map) were ++ * disconnected from events. Wait for outstanding critical sections in ++ * these programs to complete ++ */ ++ synchronize_rcu(); ++ ++ bpf_map_area_free(qs); ++} ++ ++static int __queue_map_get(struct bpf_map *map, void *value, bool delete) ++{ ++ struct bpf_queue_stack *qs = bpf_queue_stack(map); ++ unsigned long flags; ++ int err = 0; ++ void *ptr; ++ ++ raw_spin_lock_irqsave(&qs->lock, flags); ++ ++ if (queue_stack_map_is_empty(qs)) { ++ memset(value, 0, qs->map.value_size); ++ err = -ENOENT; ++ goto out; ++ } ++ ++ ptr = &qs->elements[qs->tail * qs->map.value_size]; ++ memcpy(value, ptr, qs->map.value_size); ++ ++ if (delete) { ++ if (unlikely(++qs->tail >= qs->size)) ++ qs->tail = 0; ++ } ++ ++out: ++ raw_spin_unlock_irqrestore(&qs->lock, flags); ++ return err; ++} ++ ++ ++static int __stack_map_get(struct bpf_map *map, void *value, bool delete) ++{ ++ struct bpf_queue_stack *qs = bpf_queue_stack(map); ++ unsigned long flags; ++ int err = 0; ++ void *ptr; ++ u32 index; ++ ++ raw_spin_lock_irqsave(&qs->lock, flags); ++ ++ if (queue_stack_map_is_empty(qs)) { ++ memset(value, 0, qs->map.value_size); ++ err = -ENOENT; ++ goto out; ++ } ++ ++ index = qs->head - 1; ++ if (unlikely(index >= qs->size)) ++ index = qs->size - 1; ++ ++ ptr = &qs->elements[index * qs->map.value_size]; ++ memcpy(value, ptr, qs->map.value_size); ++ ++ if (delete) ++ qs->head = index; ++ ++out: ++ raw_spin_unlock_irqrestore(&qs->lock, flags); ++ return err; ++} ++ ++/* Called from syscall or from eBPF program */ ++static int queue_map_peek_elem(struct bpf_map *map, void *value) ++{ ++ return __queue_map_get(map, value, false); ++} ++ ++/* Called from syscall or from eBPF program */ ++static int stack_map_peek_elem(struct bpf_map *map, void *value) ++{ ++ return __stack_map_get(map, value, false); ++} ++ ++/* Called from syscall or from eBPF program */ ++static int queue_map_pop_elem(struct bpf_map *map, void *value) ++{ ++ return __queue_map_get(map, value, true); ++} ++ ++/* Called from syscall or from eBPF program */ ++static int stack_map_pop_elem(struct bpf_map *map, void *value) ++{ ++ return __stack_map_get(map, value, true); ++} ++ ++/* Called from syscall or from eBPF program */ ++static int queue_stack_map_push_elem(struct bpf_map *map, void *value, ++ u64 flags) ++{ ++ struct bpf_queue_stack *qs = bpf_queue_stack(map); ++ unsigned long irq_flags; ++ int err = 0; ++ void *dst; ++ ++ /* BPF_EXIST is used to force making room for a new element in case the ++ * map is full ++ */ ++ bool replace = (flags & BPF_EXIST); ++ ++ /* Check supported flags for queue and stack maps */ ++ if (flags & BPF_NOEXIST || flags > BPF_EXIST) ++ return -EINVAL; ++ ++ raw_spin_lock_irqsave(&qs->lock, irq_flags); ++ ++ if (queue_stack_map_is_full(qs)) { ++ if (!replace) { ++ err = -E2BIG; ++ goto out; ++ } ++ /* advance tail pointer to overwrite oldest element */ ++ if (unlikely(++qs->tail >= qs->size)) ++ qs->tail = 0; ++ } ++ ++ dst = &qs->elements[qs->head * qs->map.value_size]; ++ memcpy(dst, value, qs->map.value_size); ++ ++ if (unlikely(++qs->head >= qs->size)) ++ qs->head = 0; ++ ++out: ++ raw_spin_unlock_irqrestore(&qs->lock, irq_flags); ++ return err; ++} ++ ++/* Called from syscall or from eBPF program */ ++static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key) ++{ ++ return NULL; ++} ++ ++/* Called from syscall or from eBPF program */ ++static int queue_stack_map_update_elem(struct bpf_map *map, void *key, ++ void *value, u64 flags) ++{ ++ return -EINVAL; ++} ++ ++/* Called from syscall or from eBPF program */ ++static int queue_stack_map_delete_elem(struct bpf_map *map, void *key) ++{ ++ return -EINVAL; ++} ++ ++/* Called from syscall */ ++static int queue_stack_map_get_next_key(struct bpf_map *map, void *key, ++ void *next_key) ++{ ++ return -EINVAL; ++} ++ ++const struct bpf_map_ops queue_map_ops = { ++ .map_alloc_check = queue_stack_map_alloc_check, ++ .map_alloc = queue_stack_map_alloc, ++ .map_free = queue_stack_map_free, ++ .map_lookup_elem = queue_stack_map_lookup_elem, ++ .map_update_elem = queue_stack_map_update_elem, ++ .map_delete_elem = queue_stack_map_delete_elem, ++ .map_push_elem = queue_stack_map_push_elem, ++ .map_pop_elem = queue_map_pop_elem, ++ .map_peek_elem = queue_map_peek_elem, ++ .map_get_next_key = queue_stack_map_get_next_key, ++}; ++ ++const struct bpf_map_ops stack_map_ops = { ++ .map_alloc_check = queue_stack_map_alloc_check, ++ .map_alloc = queue_stack_map_alloc, ++ .map_free = queue_stack_map_free, ++ .map_lookup_elem = queue_stack_map_lookup_elem, ++ .map_update_elem = queue_stack_map_update_elem, ++ .map_delete_elem = queue_stack_map_delete_elem, ++ .map_push_elem = queue_stack_map_push_elem, ++ .map_pop_elem = stack_map_pop_elem, ++ .map_peek_elem = stack_map_peek_elem, ++ .map_get_next_key = queue_stack_map_get_next_key, ++}; +--- /dev/null ++++ b/kernel/bpf/stackmap.c +@@ -0,0 +1,634 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* Copyright (c) 2016 Facebook ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "percpu_freelist.h" ++ ++#define STACK_CREATE_FLAG_MASK \ ++ (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \ ++ BPF_F_STACK_BUILD_ID) ++ ++struct stack_map_bucket { ++ struct pcpu_freelist_node fnode; ++ u32 hash; ++ u32 nr; ++ u64 data[]; ++}; ++ ++struct bpf_stack_map { ++ struct bpf_map map; ++ void *elems; ++ struct pcpu_freelist freelist; ++ u32 n_buckets; ++ struct stack_map_bucket *buckets[]; ++}; ++ ++/* irq_work to run up_read() for build_id lookup in nmi context */ ++struct stack_map_irq_work { ++ struct irq_work irq_work; ++ struct rw_semaphore *sem; ++}; ++ ++static void do_up_read(struct irq_work *entry) ++{ ++ struct stack_map_irq_work *work; ++ ++ work = container_of(entry, struct stack_map_irq_work, irq_work); ++ up_read_non_owner(work->sem); ++ work->sem = NULL; ++} ++ ++static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work); ++ ++static inline bool stack_map_use_build_id(struct bpf_map *map) ++{ ++ return (map->map_flags & BPF_F_STACK_BUILD_ID); ++} ++ ++static inline int stack_map_data_size(struct bpf_map *map) ++{ ++ return stack_map_use_build_id(map) ? ++ sizeof(struct bpf_stack_build_id) : sizeof(u64); ++} ++ ++static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) ++{ ++ u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; ++ int err; ++ ++ smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, ++ smap->map.numa_node); ++ if (!smap->elems) ++ return -ENOMEM; ++ ++ err = pcpu_freelist_init(&smap->freelist); ++ if (err) ++ goto free_elems; ++ ++ pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size, ++ smap->map.max_entries); ++ return 0; ++ ++free_elems: ++ bpf_map_area_free(smap->elems); ++ return err; ++} ++ ++/* Called from syscall */ ++static struct bpf_map *stack_map_alloc(union bpf_attr *attr) ++{ ++ u32 value_size = attr->value_size; ++ struct bpf_stack_map *smap; ++ struct bpf_map_memory mem; ++ u64 cost, n_buckets; ++ int err; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return ERR_PTR(-EPERM); ++ ++ if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) ++ return ERR_PTR(-EINVAL); ++ ++ /* check sanity of attributes */ ++ if (attr->max_entries == 0 || attr->key_size != 4 || ++ value_size < 8 || value_size % 8) ++ return ERR_PTR(-EINVAL); ++ ++ BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64)); ++ if (attr->map_flags & BPF_F_STACK_BUILD_ID) { ++ if (value_size % sizeof(struct bpf_stack_build_id) || ++ value_size / sizeof(struct bpf_stack_build_id) ++ > sysctl_perf_event_max_stack) ++ return ERR_PTR(-EINVAL); ++ } else if (value_size / 8 > sysctl_perf_event_max_stack) ++ return ERR_PTR(-EINVAL); ++ ++ /* hash table size must be power of 2 */ ++ n_buckets = roundup_pow_of_two(attr->max_entries); ++ if (!n_buckets) ++ return ERR_PTR(-E2BIG); ++ ++ cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); ++ cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); ++ err = bpf_map_charge_init(&mem, cost); ++ if (err) ++ return ERR_PTR(err); ++ ++ smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); ++ if (!smap) { ++ bpf_map_charge_finish(&mem); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ bpf_map_init_from_attr(&smap->map, attr); ++ smap->map.value_size = value_size; ++ smap->n_buckets = n_buckets; ++ ++ err = get_callchain_buffers(sysctl_perf_event_max_stack); ++ if (err) ++ goto free_charge; ++ ++ err = prealloc_elems_and_freelist(smap); ++ if (err) ++ goto put_buffers; ++ ++ bpf_map_charge_move(&smap->map.memory, &mem); ++ ++ return &smap->map; ++ ++put_buffers: ++ put_callchain_buffers(); ++free_charge: ++ bpf_map_charge_finish(&mem); ++ bpf_map_area_free(smap); ++ return ERR_PTR(err); ++} ++ ++#define BPF_BUILD_ID 3 ++/* ++ * Parse build id from the note segment. This logic can be shared between ++ * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are ++ * identical. ++ */ ++static inline int stack_map_parse_build_id(void *page_addr, ++ unsigned char *build_id, ++ void *note_start, ++ Elf32_Word note_size) ++{ ++ Elf32_Word note_offs = 0, new_offs; ++ ++ /* check for overflow */ ++ if (note_start < page_addr || note_start + note_size < note_start) ++ return -EINVAL; ++ ++ /* only supports note that fits in the first page */ ++ if (note_start + note_size > page_addr + PAGE_SIZE) ++ return -EINVAL; ++ ++ while (note_offs + sizeof(Elf32_Nhdr) < note_size) { ++ Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs); ++ ++ if (nhdr->n_type == BPF_BUILD_ID && ++ nhdr->n_namesz == sizeof("GNU") && ++ nhdr->n_descsz > 0 && ++ nhdr->n_descsz <= BPF_BUILD_ID_SIZE) { ++ memcpy(build_id, ++ note_start + note_offs + ++ ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), ++ nhdr->n_descsz); ++ memset(build_id + nhdr->n_descsz, 0, ++ BPF_BUILD_ID_SIZE - nhdr->n_descsz); ++ return 0; ++ } ++ new_offs = note_offs + sizeof(Elf32_Nhdr) + ++ ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4); ++ if (new_offs <= note_offs) /* overflow */ ++ break; ++ note_offs = new_offs; ++ } ++ return -EINVAL; ++} ++ ++/* Parse build ID from 32-bit ELF */ ++static int stack_map_get_build_id_32(void *page_addr, ++ unsigned char *build_id) ++{ ++ Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr; ++ Elf32_Phdr *phdr; ++ int i; ++ ++ /* only supports phdr that fits in one page */ ++ if (ehdr->e_phnum > ++ (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr)) ++ return -EINVAL; ++ ++ phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr)); ++ ++ for (i = 0; i < ehdr->e_phnum; ++i) ++ if (phdr[i].p_type == PT_NOTE) ++ return stack_map_parse_build_id(page_addr, build_id, ++ page_addr + phdr[i].p_offset, ++ phdr[i].p_filesz); ++ return -EINVAL; ++} ++ ++/* Parse build ID from 64-bit ELF */ ++static int stack_map_get_build_id_64(void *page_addr, ++ unsigned char *build_id) ++{ ++ Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr; ++ Elf64_Phdr *phdr; ++ int i; ++ ++ /* only supports phdr that fits in one page */ ++ if (ehdr->e_phnum > ++ (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr)) ++ return -EINVAL; ++ ++ phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr)); ++ ++ for (i = 0; i < ehdr->e_phnum; ++i) ++ if (phdr[i].p_type == PT_NOTE) ++ return stack_map_parse_build_id(page_addr, build_id, ++ page_addr + phdr[i].p_offset, ++ phdr[i].p_filesz); ++ return -EINVAL; ++} ++ ++/* Parse build ID of ELF file mapped to vma */ ++static int stack_map_get_build_id(struct vm_area_struct *vma, ++ unsigned char *build_id) ++{ ++ Elf32_Ehdr *ehdr; ++ struct page *page; ++ void *page_addr; ++ int ret; ++ ++ /* only works for page backed storage */ ++ if (!vma->vm_file) ++ return -EINVAL; ++ ++ page = find_get_page(vma->vm_file->f_mapping, 0); ++ if (!page) ++ return -EFAULT; /* page not mapped */ ++ ++ ret = -EINVAL; ++ page_addr = kmap_atomic(page); ++ ehdr = (Elf32_Ehdr *)page_addr; ++ ++ /* compare magic x7f "ELF" */ ++ if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) ++ goto out; ++ ++ /* only support executable file and shared object file */ ++ if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN) ++ goto out; ++ ++ if (ehdr->e_ident[EI_CLASS] == ELFCLASS32) ++ ret = stack_map_get_build_id_32(page_addr, build_id); ++ else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ++ ret = stack_map_get_build_id_64(page_addr, build_id); ++out: ++ kunmap_atomic(page_addr); ++ put_page(page); ++ return ret; ++} ++ ++static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, ++ u64 *ips, u32 trace_nr, bool user) ++{ ++ int i; ++ struct vm_area_struct *vma; ++ bool irq_work_busy = false; ++ struct stack_map_irq_work *work = NULL; ++ ++ if (irqs_disabled()) { ++ work = this_cpu_ptr(&up_read_work); ++ if (work->irq_work.flags & IRQ_WORK_BUSY) ++ /* cannot queue more up_read, fallback */ ++ irq_work_busy = true; ++ } ++ ++ /* ++ * We cannot do up_read() when the irq is disabled, because of ++ * risk to deadlock with rq_lock. To do build_id lookup when the ++ * irqs are disabled, we need to run up_read() in irq_work. We use ++ * a percpu variable to do the irq_work. If the irq_work is ++ * already used by another lookup, we fall back to report ips. ++ * ++ * Same fallback is used for kernel stack (!user) on a stackmap ++ * with build_id. ++ */ ++ if (!user || !current || !current->mm || irq_work_busy || ++ down_read_trylock(¤t->mm->mmap_sem) == 0) { ++ /* cannot access current->mm, fall back to ips */ ++ for (i = 0; i < trace_nr; i++) { ++ id_offs[i].status = BPF_STACK_BUILD_ID_IP; ++ id_offs[i].ip = ips[i]; ++ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); ++ } ++ return; ++ } ++ ++ for (i = 0; i < trace_nr; i++) { ++ vma = find_vma(current->mm, ips[i]); ++ if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) { ++ /* per entry fall back to ips */ ++ id_offs[i].status = BPF_STACK_BUILD_ID_IP; ++ id_offs[i].ip = ips[i]; ++ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE); ++ continue; ++ } ++ id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i] ++ - vma->vm_start; ++ id_offs[i].status = BPF_STACK_BUILD_ID_VALID; ++ } ++ ++ if (!work) { ++ up_read(¤t->mm->mmap_sem); ++ } else { ++ work->sem = ¤t->mm->mmap_sem; ++ irq_work_queue(&work->irq_work); ++ /* ++ * The irq_work will release the mmap_sem with ++ * up_read_non_owner(). The rwsem_release() is called ++ * here to release the lock from lockdep's perspective. ++ */ ++ rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_); ++ } ++} ++ ++BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, ++ u64, flags) ++{ ++ struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); ++ struct perf_callchain_entry *trace; ++ struct stack_map_bucket *bucket, *new_bucket, *old_bucket; ++ u32 max_depth = map->value_size / stack_map_data_size(map); ++ /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */ ++ u32 init_nr = sysctl_perf_event_max_stack - max_depth; ++ u32 skip = flags & BPF_F_SKIP_FIELD_MASK; ++ u32 hash, id, trace_nr, trace_len; ++ bool user = flags & BPF_F_USER_STACK; ++ bool kernel = !user; ++ u64 *ips; ++ bool hash_matches; ++ ++ if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | ++ BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID))) ++ return -EINVAL; ++ ++ trace = get_perf_callchain(regs, init_nr, kernel, user, ++ sysctl_perf_event_max_stack, false, false); ++ ++ if (unlikely(!trace)) ++ /* couldn't fetch the stack trace */ ++ return -EFAULT; ++ ++ /* get_perf_callchain() guarantees that trace->nr >= init_nr ++ * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth ++ */ ++ trace_nr = trace->nr - init_nr; ++ ++ if (trace_nr <= skip) ++ /* skipping more than usable stack trace */ ++ return -EFAULT; ++ ++ trace_nr -= skip; ++ trace_len = trace_nr * sizeof(u64); ++ ips = trace->ip + skip + init_nr; ++ hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0); ++ id = hash & (smap->n_buckets - 1); ++ bucket = READ_ONCE(smap->buckets[id]); ++ ++ hash_matches = bucket && bucket->hash == hash; ++ /* fast cmp */ ++ if (hash_matches && flags & BPF_F_FAST_STACK_CMP) ++ return id; ++ ++ if (stack_map_use_build_id(map)) { ++ /* for build_id+offset, pop a bucket before slow cmp */ ++ new_bucket = (struct stack_map_bucket *) ++ pcpu_freelist_pop(&smap->freelist); ++ if (unlikely(!new_bucket)) ++ return -ENOMEM; ++ new_bucket->nr = trace_nr; ++ stack_map_get_build_id_offset( ++ (struct bpf_stack_build_id *)new_bucket->data, ++ ips, trace_nr, user); ++ trace_len = trace_nr * sizeof(struct bpf_stack_build_id); ++ if (hash_matches && bucket->nr == trace_nr && ++ memcmp(bucket->data, new_bucket->data, trace_len) == 0) { ++ pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); ++ return id; ++ } ++ if (bucket && !(flags & BPF_F_REUSE_STACKID)) { ++ pcpu_freelist_push(&smap->freelist, &new_bucket->fnode); ++ return -EEXIST; ++ } ++ } else { ++ if (hash_matches && bucket->nr == trace_nr && ++ memcmp(bucket->data, ips, trace_len) == 0) ++ return id; ++ if (bucket && !(flags & BPF_F_REUSE_STACKID)) ++ return -EEXIST; ++ ++ new_bucket = (struct stack_map_bucket *) ++ pcpu_freelist_pop(&smap->freelist); ++ if (unlikely(!new_bucket)) ++ return -ENOMEM; ++ memcpy(new_bucket->data, ips, trace_len); ++ } ++ ++ new_bucket->hash = hash; ++ new_bucket->nr = trace_nr; ++ ++ old_bucket = xchg(&smap->buckets[id], new_bucket); ++ if (old_bucket) ++ pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); ++ return id; ++} ++ ++const struct bpf_func_proto bpf_get_stackid_proto = { ++ .func = bpf_get_stackid, ++ .gpl_only = true, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_CONST_MAP_PTR, ++ .arg3_type = ARG_ANYTHING, ++}; ++ ++BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, ++ u64, flags) ++{ ++ u32 init_nr, trace_nr, copy_len, elem_size, num_elem; ++ bool user_build_id = flags & BPF_F_USER_BUILD_ID; ++ u32 skip = flags & BPF_F_SKIP_FIELD_MASK; ++ bool user = flags & BPF_F_USER_STACK; ++ struct perf_callchain_entry *trace; ++ bool kernel = !user; ++ int err = -EINVAL; ++ u64 *ips; ++ ++ if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | ++ BPF_F_USER_BUILD_ID))) ++ goto clear; ++ if (kernel && user_build_id) ++ goto clear; ++ ++ elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id) ++ : sizeof(u64); ++ if (unlikely(size % elem_size)) ++ goto clear; ++ ++ num_elem = size / elem_size; ++ if (sysctl_perf_event_max_stack < num_elem) ++ init_nr = 0; ++ else ++ init_nr = sysctl_perf_event_max_stack - num_elem; ++ trace = get_perf_callchain(regs, init_nr, kernel, user, ++ sysctl_perf_event_max_stack, false, false); ++ if (unlikely(!trace)) ++ goto err_fault; ++ ++ trace_nr = trace->nr - init_nr; ++ if (trace_nr < skip) ++ goto err_fault; ++ ++ trace_nr -= skip; ++ trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; ++ copy_len = trace_nr * elem_size; ++ ips = trace->ip + skip + init_nr; ++ if (user && user_build_id) ++ stack_map_get_build_id_offset(buf, ips, trace_nr, user); ++ else ++ memcpy(buf, ips, copy_len); ++ ++ if (size > copy_len) ++ memset(buf + copy_len, 0, size - copy_len); ++ return copy_len; ++ ++err_fault: ++ err = -EFAULT; ++clear: ++ memset(buf, 0, size); ++ return err; ++} ++ ++const struct bpf_func_proto bpf_get_stack_proto = { ++ .func = bpf_get_stack, ++ .gpl_only = true, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_PTR_TO_UNINIT_MEM, ++ .arg3_type = ARG_CONST_SIZE_OR_ZERO, ++ .arg4_type = ARG_ANYTHING, ++}; ++ ++/* Called from eBPF program */ ++static void *stack_map_lookup_elem(struct bpf_map *map, void *key) ++{ ++ return ERR_PTR(-EOPNOTSUPP); ++} ++ ++/* Called from syscall */ ++int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) ++{ ++ struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); ++ struct stack_map_bucket *bucket, *old_bucket; ++ u32 id = *(u32 *)key, trace_len; ++ ++ if (unlikely(id >= smap->n_buckets)) ++ return -ENOENT; ++ ++ bucket = xchg(&smap->buckets[id], NULL); ++ if (!bucket) ++ return -ENOENT; ++ ++ trace_len = bucket->nr * stack_map_data_size(map); ++ memcpy(value, bucket->data, trace_len); ++ memset(value + trace_len, 0, map->value_size - trace_len); ++ ++ old_bucket = xchg(&smap->buckets[id], bucket); ++ if (old_bucket) ++ pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); ++ return 0; ++} ++ ++static int stack_map_get_next_key(struct bpf_map *map, void *key, ++ void *next_key) ++{ ++ struct bpf_stack_map *smap = container_of(map, ++ struct bpf_stack_map, map); ++ u32 id; ++ ++ WARN_ON_ONCE(!rcu_read_lock_held()); ++ ++ if (!key) { ++ id = 0; ++ } else { ++ id = *(u32 *)key; ++ if (id >= smap->n_buckets || !smap->buckets[id]) ++ id = 0; ++ else ++ id++; ++ } ++ ++ while (id < smap->n_buckets && !smap->buckets[id]) ++ id++; ++ ++ if (id >= smap->n_buckets) ++ return -ENOENT; ++ ++ *(u32 *)next_key = id; ++ return 0; ++} ++ ++static int stack_map_update_elem(struct bpf_map *map, void *key, void *value, ++ u64 map_flags) ++{ ++ return -EINVAL; ++} ++ ++/* Called from syscall or from eBPF program */ ++static int stack_map_delete_elem(struct bpf_map *map, void *key) ++{ ++ struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); ++ struct stack_map_bucket *old_bucket; ++ u32 id = *(u32 *)key; ++ ++ if (unlikely(id >= smap->n_buckets)) ++ return -E2BIG; ++ ++ old_bucket = xchg(&smap->buckets[id], NULL); ++ if (old_bucket) { ++ pcpu_freelist_push(&smap->freelist, &old_bucket->fnode); ++ return 0; ++ } else { ++ return -ENOENT; ++ } ++} ++ ++/* Called when map->refcnt goes to zero, either from workqueue or from syscall */ ++static void stack_map_free(struct bpf_map *map) ++{ ++ struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map); ++ ++ /* wait for bpf programs to complete before freeing stack map */ ++ synchronize_rcu(); ++ ++ bpf_map_area_free(smap->elems); ++ pcpu_freelist_destroy(&smap->freelist); ++ bpf_map_area_free(smap); ++ put_callchain_buffers(); ++} ++ ++const struct bpf_map_ops stack_trace_map_ops = { ++ .map_alloc = stack_map_alloc, ++ .map_free = stack_map_free, ++ .map_get_next_key = stack_map_get_next_key, ++ .map_lookup_elem = stack_map_lookup_elem, ++ .map_update_elem = stack_map_update_elem, ++ .map_delete_elem = stack_map_delete_elem, ++ .map_check_btf = map_check_no_btf, ++}; ++ ++static int __init stack_map_init(void) ++{ ++ int cpu; ++ struct stack_map_irq_work *work; ++ ++ for_each_possible_cpu(cpu) { ++ work = per_cpu_ptr(&up_read_work, cpu); ++ init_irq_work(&work->irq_work, do_up_read); ++ } ++ return 0; ++} ++subsys_initcall(stack_map_init); +--- a/kernel/bpf/syscall.c ++++ b/kernel/bpf/syscall.c +@@ -1,106 +1,333 @@ ++// SPDX-License-Identifier: GPL-2.0-only + /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of version 2 of the GNU General Public +- * License as published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, but +- * WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. + */ + #include ++#include ++#include + #include + #include ++#include ++#include + #include ++#include + #include ++#include + #include + #include + #include ++#include ++#include ++#include ++#include ++#include ++ ++#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \ ++ (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ ++ (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \ ++ (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) ++#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) ++#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) ++ ++#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) ++ ++DEFINE_PER_CPU(int, bpf_prog_active); ++static DEFINE_IDR(prog_idr); ++static DEFINE_SPINLOCK(prog_idr_lock); ++static DEFINE_IDR(map_idr); ++static DEFINE_SPINLOCK(map_idr_lock); + + int sysctl_unprivileged_bpf_disabled __read_mostly; + +-static LIST_HEAD(bpf_map_types); ++static const struct bpf_map_ops * const bpf_map_types[] = { ++#define BPF_PROG_TYPE(_id, _ops) ++#define BPF_MAP_TYPE(_id, _ops) \ ++ [_id] = &_ops, ++#include ++#undef BPF_PROG_TYPE ++#undef BPF_MAP_TYPE ++}; ++ ++/* ++ * If we're handed a bigger struct than we know of, ensure all the unknown bits ++ * are 0 - i.e. new user-space does not rely on any kernel feature extensions ++ * we don't know about yet. ++ * ++ * There is a ToCToU between this function call and the following ++ * copy_from_user() call. However, this is not a concern since this function is ++ * meant to be a future-proofing of bits. ++ */ ++int bpf_check_uarg_tail_zero(void __user *uaddr, ++ size_t expected_size, ++ size_t actual_size) ++{ ++ unsigned char __user *addr; ++ unsigned char __user *end; ++ unsigned char val; ++ int err; ++ ++ if (unlikely(actual_size > PAGE_SIZE)) /* silly large */ ++ return -E2BIG; ++ ++ if (unlikely(!access_ok(VERIFY_READ, uaddr, actual_size))) ++ return -EFAULT; ++ ++ if (actual_size <= expected_size) ++ return 0; ++ ++ addr = uaddr + expected_size; ++ end = uaddr + actual_size; ++ ++ for (; addr < end; addr++) { ++ err = get_user(val, addr); ++ if (err) ++ return err; ++ if (val) ++ return -E2BIG; ++ } ++ ++ return 0; ++} + + static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) + { +- struct bpf_map_type_list *tl; ++ const struct bpf_map_ops *ops; ++ u32 type = attr->map_type; + struct bpf_map *map; ++ int err; + +- list_for_each_entry(tl, &bpf_map_types, list_node) { +- if (tl->type == attr->map_type) { +- map = tl->ops->map_alloc(attr); +- if (IS_ERR(map)) +- return map; +- map->ops = tl->ops; +- map->map_type = attr->map_type; +- return map; +- } ++ if (type >= ARRAY_SIZE(bpf_map_types)) ++ return ERR_PTR(-EINVAL); ++ ops = bpf_map_types[type]; ++ if (!ops) ++ return ERR_PTR(-EINVAL); ++ ++ if (ops->map_alloc_check) { ++ err = ops->map_alloc_check(attr); ++ if (err) ++ return ERR_PTR(err); ++ } ++ map = ops->map_alloc(attr); ++ if (IS_ERR(map)) ++ return map; ++ map->ops = ops; ++ map->map_type = type; ++ return map; ++} ++ ++void *bpf_map_area_alloc(u64 size, int numa_node) ++{ ++ /* We really just want to fail instead of triggering OOM killer ++ * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, ++ * which is used for lower order allocation requests. ++ * ++ * It has been observed that higher order allocation requests done by ++ * vmalloc with __GFP_NORETRY being set might fail due to not trying ++ * to reclaim memory from the page cache, thus we set ++ * __GFP_RETRY_MAYFAIL to avoid such situations. ++ */ ++ ++ const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; ++ void *area; ++ ++ if (size >= SIZE_MAX) ++ return NULL; ++ ++ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { ++ area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, ++ numa_node); ++ if (area != NULL) ++ return area; + } +- return ERR_PTR(-EINVAL); ++ ++ return __vmalloc_node_range(size, 1, ++ VMALLOC_START, VMALLOC_END, ++ GFP_KERNEL | flags, ++ PAGE_KERNEL, 0, numa_node, ++ __builtin_return_address(0)); + } + +-/* boot time registration of different map implementations */ +-void bpf_register_map_type(struct bpf_map_type_list *tl) ++void bpf_map_area_free(void *area) + { +- list_add(&tl->list_node, &bpf_map_types); ++ kvfree(area); + } + +-static int bpf_map_charge_memlock(struct bpf_map *map) ++static u32 bpf_map_flags_retain_permanent(u32 flags) + { +- struct user_struct *user = get_current_user(); +- unsigned long memlock_limit; ++ /* Some map creation flags are not tied to the map object but ++ * rather to the map fd instead, so they have no meaning upon ++ * map object inspection since multiple file descriptors with ++ * different (access) properties can exist here. Thus, given ++ * this has zero meaning for the map itself, lets clear these ++ * from here. ++ */ ++ return flags & ~(BPF_F_RDONLY | BPF_F_WRONLY); ++} ++ ++void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr) ++{ ++ map->map_type = attr->map_type; ++ map->key_size = attr->key_size; ++ map->value_size = attr->value_size; ++ map->max_entries = attr->max_entries; ++ map->map_flags = bpf_map_flags_retain_permanent(attr->map_flags); ++ map->numa_node = bpf_map_attr_numa_node(attr); ++} ++ ++static int bpf_charge_memlock(struct user_struct *user, u32 pages) ++{ ++ unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; ++ ++ if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { ++ atomic_long_sub(pages, &user->locked_vm); ++ return -EPERM; ++ } ++ return 0; ++} ++ ++static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) ++{ ++ if (user) ++ atomic_long_sub(pages, &user->locked_vm); ++} + +- memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; ++int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size) ++{ ++ u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; ++ struct user_struct *user; ++ int ret; + +- atomic_long_add(map->pages, &user->locked_vm); ++ if (size >= U32_MAX - PAGE_SIZE) ++ return -E2BIG; + +- if (atomic_long_read(&user->locked_vm) > memlock_limit) { +- atomic_long_sub(map->pages, &user->locked_vm); ++ user = get_current_user(); ++ ret = bpf_charge_memlock(user, pages); ++ if (ret) { + free_uid(user); +- return -EPERM; ++ return ret; + } +- map->user = user; ++ ++ mem->pages = pages; ++ mem->user = user; ++ + return 0; + } + +-static void bpf_map_uncharge_memlock(struct bpf_map *map) ++void bpf_map_charge_finish(struct bpf_map_memory *mem) + { +- struct user_struct *user = map->user; ++ bpf_uncharge_memlock(mem->user, mem->pages); ++ free_uid(mem->user); ++} + +- atomic_long_sub(map->pages, &user->locked_vm); +- free_uid(user); ++void bpf_map_charge_move(struct bpf_map_memory *dst, ++ struct bpf_map_memory *src) ++{ ++ *dst = *src; ++ ++ /* Make sure src will not be used for the redundant uncharging. */ ++ memset(src, 0, sizeof(struct bpf_map_memory)); ++} ++ ++int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) ++{ ++ int ret; ++ ++ ret = bpf_charge_memlock(map->memory.user, pages); ++ if (ret) ++ return ret; ++ map->memory.pages += pages; ++ return ret; ++} ++ ++void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) ++{ ++ bpf_uncharge_memlock(map->memory.user, pages); ++ map->memory.pages -= pages; ++} ++ ++static int bpf_map_alloc_id(struct bpf_map *map) ++{ ++ int id; ++ ++ idr_preload(GFP_KERNEL); ++ spin_lock_bh(&map_idr_lock); ++ id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC); ++ if (id > 0) ++ map->id = id; ++ spin_unlock_bh(&map_idr_lock); ++ idr_preload_end(); ++ ++ if (WARN_ON_ONCE(!id)) ++ return -ENOSPC; ++ ++ return id > 0 ? 0 : id; ++} ++ ++void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock) ++{ ++ unsigned long flags; ++ ++ /* Offloaded maps are removed from the IDR store when their device ++ * disappears - even if someone holds an fd to them they are unusable, ++ * the memory is gone, all ops will fail; they are simply waiting for ++ * refcnt to drop to be freed. ++ */ ++ if (!map->id) ++ return; ++ ++ if (do_idr_lock) ++ spin_lock_irqsave(&map_idr_lock, flags); ++ else ++ __acquire(&map_idr_lock); ++ ++ idr_remove(&map_idr, map->id); ++ map->id = 0; ++ ++ if (do_idr_lock) ++ spin_unlock_irqrestore(&map_idr_lock, flags); ++ else ++ __release(&map_idr_lock); + } + + /* called from workqueue */ + static void bpf_map_free_deferred(struct work_struct *work) + { + struct bpf_map *map = container_of(work, struct bpf_map, work); ++ struct bpf_map_memory mem; + +- bpf_map_uncharge_memlock(map); ++ bpf_map_charge_move(&mem, &map->memory); + /* implementation dependent freeing */ + map->ops->map_free(map); ++ bpf_map_charge_finish(&mem); + } + + static void bpf_map_put_uref(struct bpf_map *map) + { + if (atomic_dec_and_test(&map->usercnt)) { +- if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) +- bpf_fd_array_map_clear(map); ++ if (map->ops->map_release_uref) ++ map->ops->map_release_uref(map); + } + } + + /* decrement map refcnt and schedule it for freeing via workqueue + * (unrelying map implementation ops->map_free() might sleep) + */ +-void bpf_map_put(struct bpf_map *map) ++static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock) + { + if (atomic_dec_and_test(&map->refcnt)) { ++ /* bpf_map_free_id() must be called first */ ++ bpf_map_free_id(map, do_idr_lock); ++ btf_put(map->btf); + INIT_WORK(&map->work, bpf_map_free_deferred); + schedule_work(&map->work); + } + } + ++void bpf_map_put(struct bpf_map *map) ++{ ++ __bpf_map_put(map, true); ++} ++EXPORT_SYMBOL_GPL(bpf_map_put); ++ + void bpf_map_put_with_uref(struct bpf_map *map) + { + bpf_map_put_uref(map); +@@ -109,18 +336,110 @@ void bpf_map_put_with_uref(struct bpf_ma + + static int bpf_map_release(struct inode *inode, struct file *filp) + { +- bpf_map_put_with_uref(filp->private_data); ++ struct bpf_map *map = filp->private_data; ++ ++ if (map->ops->map_release) ++ map->ops->map_release(map, filp); ++ ++ bpf_map_put_with_uref(map); + return 0; + } + +-static const struct file_operations bpf_map_fops = { +- .release = bpf_map_release, ++static fmode_t map_get_sys_perms(struct bpf_map *map, struct fd f) ++{ ++ fmode_t mode = f.file->f_mode; ++ ++ /* Our file permissions may have been overridden by global ++ * map permissions facing syscall side. ++ */ ++ if (READ_ONCE(map->frozen)) ++ mode &= ~FMODE_CAN_WRITE; ++ return mode; ++} ++ ++#ifdef CONFIG_PROC_FS ++static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) ++{ ++ const struct bpf_map *map = filp->private_data; ++ const struct bpf_array *array; ++ u32 owner_prog_type = 0; ++ u32 owner_jited = 0; ++ ++ if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) { ++ array = container_of(map, struct bpf_array, map); ++ owner_prog_type = array->owner_prog_type; ++ owner_jited = array->owner_jited; ++ } ++ ++ seq_printf(m, ++ "map_type:\t%u\n" ++ "key_size:\t%u\n" ++ "value_size:\t%u\n" ++ "max_entries:\t%u\n" ++ "map_flags:\t%#x\n" ++ "memlock:\t%llu\n" ++ "map_id:\t%u\n" ++ "frozen:\t%u\n", ++ map->map_type, ++ map->key_size, ++ map->value_size, ++ map->max_entries, ++ map->map_flags, ++ map->memory.pages * 1ULL << PAGE_SHIFT, ++ map->id, ++ READ_ONCE(map->frozen)); ++ ++ if (owner_prog_type) { ++ seq_printf(m, "owner_prog_type:\t%u\n", ++ owner_prog_type); ++ seq_printf(m, "owner_jited:\t%u\n", ++ owner_jited); ++ } ++} ++#endif ++ ++static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, ++ loff_t *ppos) ++{ ++ /* We need this handler such that alloc_file() enables ++ * f_mode with FMODE_CAN_READ. ++ */ ++ return -EINVAL; ++} ++ ++static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, ++ size_t siz, loff_t *ppos) ++{ ++ /* We need this handler such that alloc_file() enables ++ * f_mode with FMODE_CAN_WRITE. ++ */ ++ return -EINVAL; ++} ++ ++const struct file_operations bpf_map_fops = { ++#ifdef CONFIG_PROC_FS ++ .show_fdinfo = bpf_map_show_fdinfo, ++#endif ++ .release = bpf_map_release, ++ .read = bpf_dummy_read, ++ .write = bpf_dummy_write, + }; + +-int bpf_map_new_fd(struct bpf_map *map) ++int bpf_map_new_fd(struct bpf_map *map, int flags) + { + return anon_inode_getfd("bpf-map", &bpf_map_fops, map, +- O_RDWR | O_CLOEXEC); ++ flags | O_CLOEXEC); ++} ++ ++int bpf_get_file_flag(int flags) ++{ ++ if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) ++ return -EINVAL; ++ if (flags & BPF_F_RDONLY) ++ return O_RDONLY; ++ if (flags & BPF_F_WRONLY) ++ return O_WRONLY; ++ return O_RDWR; + } + + /* helper macro to check that unused fields 'union bpf_attr' are zero */ +@@ -131,38 +450,171 @@ int bpf_map_new_fd(struct bpf_map *map) + offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ + sizeof(attr->CMD##_LAST_FIELD)) != NULL + +-#define BPF_MAP_CREATE_LAST_FIELD max_entries ++/* dst and src must have at least BPF_OBJ_NAME_LEN number of bytes. ++ * Return 0 on success and < 0 on error. ++ */ ++static int bpf_obj_name_cpy(char *dst, const char *src) ++{ ++ const char *end = src + BPF_OBJ_NAME_LEN; ++ ++ memset(dst, 0, BPF_OBJ_NAME_LEN); ++ /* Copy all isalnum(), '_' and '.' chars. */ ++ while (src < end && *src) { ++ if (!isalnum(*src) && ++ *src != '_' && *src != '.') ++ return -EINVAL; ++ *dst++ = *src++; ++ } ++ ++ /* No '\0' found in BPF_OBJ_NAME_LEN number of bytes */ ++ if (src == end) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++int map_check_no_btf(const struct bpf_map *map, ++ const struct btf *btf, ++ const struct btf_type *key_type, ++ const struct btf_type *value_type) ++{ ++ return -ENOTSUPP; ++} ++ ++static int map_check_btf(struct bpf_map *map, const struct btf *btf, ++ u32 btf_key_id, u32 btf_value_id) ++{ ++ const struct btf_type *key_type, *value_type; ++ u32 key_size, value_size; ++ int ret = 0; ++ ++ /* Some maps allow key to be unspecified. */ ++ if (btf_key_id) { ++ key_type = btf_type_id_size(btf, &btf_key_id, &key_size); ++ if (!key_type || key_size != map->key_size) ++ return -EINVAL; ++ } else { ++ key_type = btf_type_by_id(btf, 0); ++ if (!map->ops->map_check_btf) ++ return -EINVAL; ++ } ++ ++ value_type = btf_type_id_size(btf, &btf_value_id, &value_size); ++ if (!value_type || value_size != map->value_size) ++ return -EINVAL; ++ ++ map->spin_lock_off = btf_find_spin_lock(btf, value_type); ++ ++ if (map_value_has_spin_lock(map)) { ++ if (map->map_flags & BPF_F_RDONLY_PROG) ++ return -EACCES; ++ if (map->map_type != BPF_MAP_TYPE_HASH && ++ map->map_type != BPF_MAP_TYPE_ARRAY && ++ map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && ++ map->map_type != BPF_MAP_TYPE_SK_STORAGE) ++ return -ENOTSUPP; ++ if (map->spin_lock_off + sizeof(struct bpf_spin_lock) > ++ map->value_size) { ++ WARN_ONCE(1, ++ "verifier bug spin_lock_off %d value_size %d\n", ++ map->spin_lock_off, map->value_size); ++ return -EFAULT; ++ } ++ } ++ ++ if (map->ops->map_check_btf) ++ ret = map->ops->map_check_btf(map, btf, key_type, value_type); ++ ++ return ret; ++} ++ ++#define BPF_MAP_CREATE_LAST_FIELD btf_value_type_id + /* called via syscall */ + static int map_create(union bpf_attr *attr) + { ++ int numa_node = bpf_map_attr_numa_node(attr); ++ struct bpf_map_memory mem; + struct bpf_map *map; ++ int f_flags; + int err; + + err = CHECK_ATTR(BPF_MAP_CREATE); + if (err) + return -EINVAL; + ++ f_flags = bpf_get_file_flag(attr->map_flags); ++ if (f_flags < 0) ++ return f_flags; ++ ++ if (numa_node != NUMA_NO_NODE && ++ ((unsigned int)numa_node >= nr_node_ids || ++ !node_online(numa_node))) ++ return -EINVAL; ++ + /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ + map = find_and_alloc_map(attr); + if (IS_ERR(map)) + return PTR_ERR(map); + ++ err = bpf_obj_name_cpy(map->name, attr->map_name); ++ if (err) ++ goto free_map; ++ + atomic_set(&map->refcnt, 1); + atomic_set(&map->usercnt, 1); + +- err = bpf_map_charge_memlock(map); ++ if (attr->btf_key_type_id || attr->btf_value_type_id) { ++ struct btf *btf; ++ ++ if (!attr->btf_value_type_id) { ++ err = -EINVAL; ++ goto free_map; ++ } ++ ++ btf = btf_get_by_fd(attr->btf_fd); ++ if (IS_ERR(btf)) { ++ err = PTR_ERR(btf); ++ goto free_map; ++ } ++ ++ err = map_check_btf(map, btf, attr->btf_key_type_id, ++ attr->btf_value_type_id); ++ if (err) { ++ btf_put(btf); ++ goto free_map; ++ } ++ ++ map->btf = btf; ++ map->btf_key_type_id = attr->btf_key_type_id; ++ map->btf_value_type_id = attr->btf_value_type_id; ++ } else { ++ map->spin_lock_off = -EINVAL; ++ } ++ ++ err = bpf_map_alloc_id(map); + if (err) +- goto free_map; ++ goto free_map_sec; + +- err = bpf_map_new_fd(map); +- if (err < 0) +- /* failed to allocate fd */ +- goto free_map; ++ err = bpf_map_new_fd(map, f_flags); ++ if (err < 0) { ++ /* failed to allocate fd. ++ * bpf_map_put_with_uref() is needed because the above ++ * bpf_map_alloc_id() has published the map ++ * to the userspace and the userspace may ++ * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID. ++ */ ++ bpf_map_put_with_uref(map); ++ return err; ++ } + + return err; + ++free_map_sec: + free_map: ++ btf_put(map->btf); ++ bpf_map_charge_move(&mem, &map->memory); + map->ops->map_free(map); ++ bpf_map_charge_finish(&mem); + return err; + } + +@@ -194,6 +646,7 @@ struct bpf_map *bpf_map_inc(struct bpf_m + atomic_inc(&map->usercnt); + return map; + } ++EXPORT_SYMBOL_GPL(bpf_map_inc); + + struct bpf_map *bpf_map_get_with_uref(u32 ufd) + { +@@ -210,59 +663,155 @@ struct bpf_map *bpf_map_get_with_uref(u3 + return map; + } + +-/* helper to convert user pointers passed inside __aligned_u64 fields */ +-static void __user *u64_to_ptr(__u64 val) ++/* map_idr_lock should have been held */ ++static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, ++ bool uref) ++{ ++ int refold; ++ ++ refold = atomic_fetch_add_unless(&map->refcnt, 1, 0); ++ ++ if (refold >= BPF_MAX_REFCNT) { ++ __bpf_map_put(map, false); ++ return ERR_PTR(-EBUSY); ++ } ++ ++ if (!refold) ++ return ERR_PTR(-ENOENT); ++ ++ if (uref) ++ atomic_inc(&map->usercnt); ++ ++ return map; ++} ++ ++struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref) ++{ ++ spin_lock_bh(&map_idr_lock); ++ map = __bpf_map_inc_not_zero(map, uref); ++ spin_unlock_bh(&map_idr_lock); ++ ++ return map; ++} ++EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); ++ ++int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) ++{ ++ return -ENOTSUPP; ++} ++ ++static void *__bpf_copy_key(void __user *ukey, u64 key_size) + { +- return (void __user *) (unsigned long) val; ++ if (key_size) ++ return memdup_user(ukey, key_size); ++ ++ if (ukey) ++ return ERR_PTR(-EINVAL); ++ ++ return NULL; + } + + /* last field in 'union bpf_attr' used by this command */ +-#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value ++#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags + + static int map_lookup_elem(union bpf_attr *attr) + { +- void __user *ukey = u64_to_ptr(attr->key); +- void __user *uvalue = u64_to_ptr(attr->value); ++ void __user *ukey = u64_to_user_ptr(attr->key); ++ void __user *uvalue = u64_to_user_ptr(attr->value); + int ufd = attr->map_fd; + struct bpf_map *map; + void *key, *value, *ptr; ++ u32 value_size; + struct fd f; + int err; + + if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM)) + return -EINVAL; + ++ if (attr->flags & ~BPF_F_LOCK) ++ return -EINVAL; ++ + f = fdget(ufd); + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); ++ if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { ++ err = -EPERM; ++ goto err_put; ++ } + +- err = -ENOMEM; +- key = kmalloc(map->key_size, GFP_USER); +- if (!key) ++ if ((attr->flags & BPF_F_LOCK) && ++ !map_value_has_spin_lock(map)) { ++ err = -EINVAL; + goto err_put; ++ } + +- err = -EFAULT; +- if (copy_from_user(key, ukey, map->key_size) != 0) +- goto free_key; ++ key = __bpf_copy_key(ukey, map->key_size); ++ if (IS_ERR(key)) { ++ err = PTR_ERR(key); ++ goto err_put; ++ } ++ ++ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || ++ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || ++ map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) ++ value_size = round_up(map->value_size, 8) * num_possible_cpus(); ++ else if (IS_FD_MAP(map)) ++ value_size = sizeof(u32); ++ else ++ value_size = map->value_size; + + err = -ENOMEM; +- value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); ++ value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); + if (!value) + goto free_key; + +- rcu_read_lock(); +- ptr = map->ops->map_lookup_elem(map, key); +- if (ptr) +- memcpy(value, ptr, map->value_size); +- rcu_read_unlock(); ++ preempt_disable(); ++ this_cpu_inc(bpf_prog_active); ++ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || ++ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { ++ err = bpf_percpu_hash_copy(map, key, value); ++ } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { ++ err = bpf_percpu_array_copy(map, key, value); ++ } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { ++ err = bpf_stackmap_copy(map, key, value); ++ } else if (IS_FD_ARRAY(map)) { ++ err = bpf_fd_array_map_lookup_elem(map, key, value); ++ } else if (IS_FD_HASH(map)) { ++ err = bpf_fd_htab_map_lookup_elem(map, key, value); ++ } else if (map->map_type == BPF_MAP_TYPE_QUEUE || ++ map->map_type == BPF_MAP_TYPE_STACK) { ++ err = map->ops->map_peek_elem(map, value); ++ } else { ++ rcu_read_lock(); ++ if (map->ops->map_lookup_elem_sys_only) ++ ptr = map->ops->map_lookup_elem_sys_only(map, key); ++ else ++ ptr = map->ops->map_lookup_elem(map, key); ++ if (IS_ERR(ptr)) { ++ err = PTR_ERR(ptr); ++ } else if (!ptr) { ++ err = -ENOENT; ++ } else { ++ err = 0; ++ if (attr->flags & BPF_F_LOCK) ++ /* lock 'ptr' and copy everything but lock */ ++ copy_map_value_locked(map, value, ptr, true); ++ else ++ copy_map_value(map, value, ptr); ++ /* mask lock, since value wasn't zero inited */ ++ check_and_init_map_lock(map, value); ++ } ++ rcu_read_unlock(); ++ } ++ this_cpu_dec(bpf_prog_active); ++ preempt_enable(); + +- err = -ENOENT; +- if (!ptr) ++ if (err) + goto free_value; + + err = -EFAULT; +- if (copy_to_user(uvalue, value, map->value_size) != 0) ++ if (copy_to_user(uvalue, value, value_size) != 0) + goto free_value; + + err = 0; +@@ -276,15 +825,27 @@ err_put: + return err; + } + ++static void maybe_wait_bpf_programs(struct bpf_map *map) ++{ ++ /* Wait for any running BPF programs to complete so that ++ * userspace, when we return to it, knows that all programs ++ * that could be running use the new map value. ++ */ ++ if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS || ++ map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) ++ synchronize_rcu(); ++} ++ + #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags + + static int map_update_elem(union bpf_attr *attr) + { +- void __user *ukey = u64_to_ptr(attr->key); +- void __user *uvalue = u64_to_ptr(attr->value); ++ void __user *ukey = u64_to_user_ptr(attr->key); ++ void __user *uvalue = u64_to_user_ptr(attr->value); + int ufd = attr->map_fd; + struct bpf_map *map; + void *key, *value; ++ u32 value_size; + struct fd f; + int err; + +@@ -295,32 +856,79 @@ static int map_update_elem(union bpf_att + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); ++ if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { ++ err = -EPERM; ++ goto err_put; ++ } + +- err = -ENOMEM; +- key = kmalloc(map->key_size, GFP_USER); +- if (!key) ++ if ((attr->flags & BPF_F_LOCK) && ++ !map_value_has_spin_lock(map)) { ++ err = -EINVAL; ++ goto err_put; ++ } ++ ++ key = __bpf_copy_key(ukey, map->key_size); ++ if (IS_ERR(key)) { ++ err = PTR_ERR(key); + goto err_put; ++ } + +- err = -EFAULT; +- if (copy_from_user(key, ukey, map->key_size) != 0) +- goto free_key; ++ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || ++ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH || ++ map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) ++ value_size = round_up(map->value_size, 8) * num_possible_cpus(); ++ else ++ value_size = map->value_size; + + err = -ENOMEM; +- value = kmalloc(map->value_size, GFP_USER | __GFP_NOWARN); ++ value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); + if (!value) + goto free_key; + + err = -EFAULT; +- if (copy_from_user(value, uvalue, map->value_size) != 0) ++ if (copy_from_user(value, uvalue, value_size) != 0) + goto free_value; + +- /* eBPF program that use maps are running under rcu_read_lock(), +- * therefore all map accessors rely on this fact, so do the same here +- */ +- rcu_read_lock(); +- err = map->ops->map_update_elem(map, key, value, attr->flags); +- rcu_read_unlock(); ++ /* Need to create a kthread, thus must support schedule */ ++ if (map->map_type == BPF_MAP_TYPE_CPUMAP || ++ map->map_type == BPF_MAP_TYPE_SOCKHASH || ++ map->map_type == BPF_MAP_TYPE_SOCKMAP) { ++ err = map->ops->map_update_elem(map, key, value, attr->flags); ++ goto out; ++ } + ++ /* must increment bpf_prog_active to avoid kprobe+bpf triggering from ++ * inside bpf map update or delete otherwise deadlocks are possible ++ */ ++ preempt_disable(); ++ __this_cpu_inc(bpf_prog_active); ++ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || ++ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { ++ err = bpf_percpu_hash_update(map, key, value, attr->flags); ++ } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { ++ err = bpf_percpu_array_update(map, key, value, attr->flags); ++ } else if (IS_FD_ARRAY(map)) { ++ rcu_read_lock(); ++ err = bpf_fd_array_map_update_elem(map, f.file, key, value, ++ attr->flags); ++ rcu_read_unlock(); ++ } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { ++ rcu_read_lock(); ++ err = bpf_fd_htab_map_update_elem(map, f.file, key, value, ++ attr->flags); ++ rcu_read_unlock(); ++ } else if (map->map_type == BPF_MAP_TYPE_QUEUE || ++ map->map_type == BPF_MAP_TYPE_STACK) { ++ err = map->ops->map_push_elem(map, value, attr->flags); ++ } else { ++ rcu_read_lock(); ++ err = map->ops->map_update_elem(map, key, value, attr->flags); ++ rcu_read_unlock(); ++ } ++ __this_cpu_dec(bpf_prog_active); ++ preempt_enable(); ++ maybe_wait_bpf_programs(map); ++out: + free_value: + kfree(value); + free_key: +@@ -334,7 +942,7 @@ err_put: + + static int map_delete_elem(union bpf_attr *attr) + { +- void __user *ukey = u64_to_ptr(attr->key); ++ void __user *ukey = u64_to_user_ptr(attr->key); + int ufd = attr->map_fd; + struct bpf_map *map; + struct fd f; +@@ -348,21 +956,25 @@ static int map_delete_elem(union bpf_att + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); +- +- err = -ENOMEM; +- key = kmalloc(map->key_size, GFP_USER); +- if (!key) ++ if (!(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { ++ err = -EPERM; + goto err_put; ++ } + +- err = -EFAULT; +- if (copy_from_user(key, ukey, map->key_size) != 0) +- goto free_key; ++ key = __bpf_copy_key(ukey, map->key_size); ++ if (IS_ERR(key)) { ++ err = PTR_ERR(key); ++ goto err_put; ++ } + ++ preempt_disable(); ++ __this_cpu_inc(bpf_prog_active); + rcu_read_lock(); + err = map->ops->map_delete_elem(map, key); + rcu_read_unlock(); +- +-free_key: ++ __this_cpu_dec(bpf_prog_active); ++ preempt_enable(); ++ maybe_wait_bpf_programs(map); + kfree(key); + err_put: + fdput(f); +@@ -374,8 +986,8 @@ err_put: + + static int map_get_next_key(union bpf_attr *attr) + { +- void __user *ukey = u64_to_ptr(attr->key); +- void __user *unext_key = u64_to_ptr(attr->next_key); ++ void __user *ukey = u64_to_user_ptr(attr->key); ++ void __user *unext_key = u64_to_user_ptr(attr->next_key); + int ufd = attr->map_fd; + struct bpf_map *map; + void *key, *next_key; +@@ -389,15 +1001,20 @@ static int map_get_next_key(union bpf_at + map = __bpf_map_get(f); + if (IS_ERR(map)) + return PTR_ERR(map); +- +- err = -ENOMEM; +- key = kmalloc(map->key_size, GFP_USER); +- if (!key) ++ if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ)) { ++ err = -EPERM; + goto err_put; ++ } + +- err = -EFAULT; +- if (copy_from_user(key, ukey, map->key_size) != 0) +- goto free_key; ++ if (ukey) { ++ key = __bpf_copy_key(ukey, map->key_size); ++ if (IS_ERR(key)) { ++ err = PTR_ERR(key); ++ goto err_put; ++ } ++ } else { ++ key = NULL; ++ } + + err = -ENOMEM; + next_key = kmalloc(map->key_size, GFP_USER); +@@ -425,77 +1042,126 @@ err_put: + return err; + } + +-static LIST_HEAD(bpf_prog_types); ++#define BPF_MAP_LOOKUP_AND_DELETE_ELEM_LAST_FIELD value + +-static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) ++static int map_lookup_and_delete_elem(union bpf_attr *attr) + { +- struct bpf_prog_type_list *tl; ++ void __user *ukey = u64_to_user_ptr(attr->key); ++ void __user *uvalue = u64_to_user_ptr(attr->value); ++ int ufd = attr->map_fd; ++ struct bpf_map *map; ++ void *key, *value; ++ u32 value_size; ++ struct fd f; ++ int err; + +- list_for_each_entry(tl, &bpf_prog_types, list_node) { +- if (tl->type == type) { +- prog->aux->ops = tl->ops; +- prog->type = type; +- return 0; +- } ++ if (CHECK_ATTR(BPF_MAP_LOOKUP_AND_DELETE_ELEM)) ++ return -EINVAL; ++ ++ f = fdget(ufd); ++ map = __bpf_map_get(f); ++ if (IS_ERR(map)) ++ return PTR_ERR(map); ++ if (!(map_get_sys_perms(map, f) & FMODE_CAN_READ) || ++ !(map_get_sys_perms(map, f) & FMODE_CAN_WRITE)) { ++ err = -EPERM; ++ goto err_put; + } + +- return -EINVAL; +-} ++ key = __bpf_copy_key(ukey, map->key_size); ++ if (IS_ERR(key)) { ++ err = PTR_ERR(key); ++ goto err_put; ++ } + +-void bpf_register_prog_type(struct bpf_prog_type_list *tl) +-{ +- list_add(&tl->list_node, &bpf_prog_types); ++ value_size = map->value_size; ++ ++ err = -ENOMEM; ++ value = kmalloc(value_size, GFP_USER | __GFP_NOWARN); ++ if (!value) ++ goto free_key; ++ ++ if (map->map_type == BPF_MAP_TYPE_QUEUE || ++ map->map_type == BPF_MAP_TYPE_STACK) { ++ err = map->ops->map_pop_elem(map, value); ++ } else { ++ err = -ENOTSUPP; ++ } ++ ++ if (err) ++ goto free_value; ++ ++ if (copy_to_user(uvalue, value, value_size) != 0) { ++ err = -EFAULT; ++ goto free_value; ++ } ++ ++ err = 0; ++ ++free_value: ++ kfree(value); ++free_key: ++ kfree(key); ++err_put: ++ fdput(f); ++ return err; + } + +-/* fixup insn->imm field of bpf_call instructions: +- * if (insn->imm == BPF_FUNC_map_lookup_elem) +- * insn->imm = bpf_map_lookup_elem - __bpf_call_base; +- * else if (insn->imm == BPF_FUNC_map_update_elem) +- * insn->imm = bpf_map_update_elem - __bpf_call_base; +- * else ... +- * +- * this function is called after eBPF program passed verification +- */ +-static void fixup_bpf_calls(struct bpf_prog *prog) ++#define BPF_MAP_FREEZE_LAST_FIELD map_fd ++ ++static int map_freeze(const union bpf_attr *attr) + { +- const struct bpf_func_proto *fn; +- int i; ++ int err = 0, ufd = attr->map_fd; ++ struct bpf_map *map; ++ struct fd f; + +- for (i = 0; i < prog->len; i++) { +- struct bpf_insn *insn = &prog->insnsi[i]; ++ if (CHECK_ATTR(BPF_MAP_FREEZE)) ++ return -EINVAL; + +- if (insn->code == (BPF_JMP | BPF_CALL)) { +- /* we reach here when program has bpf_call instructions +- * and it passed bpf_check(), means that +- * ops->get_func_proto must have been supplied, check it +- */ +- BUG_ON(!prog->aux->ops->get_func_proto); ++ f = fdget(ufd); ++ map = __bpf_map_get(f); ++ if (IS_ERR(map)) ++ return PTR_ERR(map); ++ if (READ_ONCE(map->frozen)) { ++ err = -EBUSY; ++ goto err_put; ++ } ++ if (!capable(CAP_SYS_ADMIN)) { ++ err = -EPERM; ++ goto err_put; ++ } + +- if (insn->imm == BPF_FUNC_get_route_realm) +- prog->dst_needed = 1; +- if (insn->imm == BPF_FUNC_get_prandom_u32) +- bpf_user_rnd_init_once(); +- if (insn->imm == BPF_FUNC_tail_call) { +- /* mark bpf_tail_call as different opcode +- * to avoid conditional branch in +- * interpeter for every normal call +- * and to prevent accidental JITing by +- * JIT compiler that doesn't support +- * bpf_tail_call yet +- */ +- insn->imm = 0; +- insn->code |= BPF_X; +- continue; +- } ++ WRITE_ONCE(map->frozen, true); ++err_put: ++ fdput(f); ++ return err; ++} + +- fn = prog->aux->ops->get_func_proto(insn->imm); +- /* all functions that have prototype and verifier allowed +- * programs to call them, must be real in-kernel functions +- */ +- BUG_ON(!fn->func); +- insn->imm = fn->func - __bpf_call_base; +- } +- } ++static const struct bpf_prog_ops * const bpf_prog_types[] = { ++#define BPF_PROG_TYPE(_id, _name) \ ++ [_id] = & _name ## _prog_ops, ++#define BPF_MAP_TYPE(_id, _ops) ++#include ++#undef BPF_PROG_TYPE ++#undef BPF_MAP_TYPE ++}; ++ ++static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) ++{ ++ const struct bpf_prog_ops *ops; ++ ++ if (type >= ARRAY_SIZE(bpf_prog_types)) ++ return -EINVAL; ++ ops = bpf_prog_types[type]; ++ if (!ops) ++ return -EINVAL; ++ ++ if (!bpf_prog_is_dev_bound(prog->aux)) ++ prog->aux->ops = ops; ++ else ++ return -EINVAL; ++ prog->type = type; ++ return 0; + } + + /* drop refcnt on maps used by eBPF program and free auxilary data */ +@@ -509,19 +1175,39 @@ static void free_used_maps(struct bpf_pr + kfree(aux->used_maps); + } + ++int __bpf_prog_charge(struct user_struct *user, u32 pages) ++{ ++ unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; ++ unsigned long user_bufs; ++ ++ if (user) { ++ user_bufs = atomic_long_add_return(pages, &user->locked_vm); ++ if (user_bufs > memlock_limit) { ++ atomic_long_sub(pages, &user->locked_vm); ++ return -EPERM; ++ } ++ } ++ ++ return 0; ++} ++ ++void __bpf_prog_uncharge(struct user_struct *user, u32 pages) ++{ ++ if (user) ++ atomic_long_sub(pages, &user->locked_vm); ++} ++ + static int bpf_prog_charge_memlock(struct bpf_prog *prog) + { + struct user_struct *user = get_current_user(); +- unsigned long memlock_limit; ++ int ret; + +- memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; +- +- atomic_long_add(prog->pages, &user->locked_vm); +- if (atomic_long_read(&user->locked_vm) > memlock_limit) { +- atomic_long_sub(prog->pages, &user->locked_vm); ++ ret = __bpf_prog_charge(user, prog->pages); ++ if (ret) { + free_uid(user); +- return -EPERM; ++ return ret; + } ++ + prog->aux->user = user; + return 0; + } +@@ -530,30 +1216,87 @@ static void bpf_prog_uncharge_memlock(st + { + struct user_struct *user = prog->aux->user; + +- atomic_long_sub(prog->pages, &user->locked_vm); ++ __bpf_prog_uncharge(user, prog->pages); + free_uid(user); + } + +-static void __prog_put_common(struct rcu_head *rcu) ++static int bpf_prog_alloc_id(struct bpf_prog *prog) ++{ ++ int id; ++ ++ idr_preload(GFP_KERNEL); ++ spin_lock_bh(&prog_idr_lock); ++ id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC); ++ if (id > 0) ++ prog->aux->id = id; ++ spin_unlock_bh(&prog_idr_lock); ++ idr_preload_end(); ++ ++ /* id is in [1, INT_MAX) */ ++ if (WARN_ON_ONCE(!id)) ++ return -ENOSPC; ++ ++ return id > 0 ? 0 : id; ++} ++ ++void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) ++{ ++ /* cBPF to eBPF migrations are currently not in the idr store. ++ * Offloaded programs are removed from the store when their device ++ * disappears - even if someone grabs an fd to them they are unusable, ++ * simply waiting for refcnt to drop to be freed. ++ */ ++ if (!prog->aux->id) ++ return; ++ ++ if (do_idr_lock) ++ spin_lock_bh(&prog_idr_lock); ++ else ++ __acquire(&prog_idr_lock); ++ ++ idr_remove(&prog_idr, prog->aux->id); ++ prog->aux->id = 0; ++ ++ if (do_idr_lock) ++ spin_unlock_bh(&prog_idr_lock); ++ else ++ __release(&prog_idr_lock); ++} ++ ++static void __bpf_prog_put_rcu(struct rcu_head *rcu) + { + struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); + ++ kvfree(aux->func_info); + free_used_maps(aux); + bpf_prog_uncharge_memlock(aux->prog); + bpf_prog_free(aux->prog); + } + +-/* version of bpf_prog_put() that is called after a grace period */ +-void bpf_prog_put_rcu(struct bpf_prog *prog) ++static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred) ++{ ++ bpf_prog_kallsyms_del_all(prog); ++ btf_put(prog->aux->btf); ++ bpf_prog_free_linfo(prog); ++ ++ if (deferred) ++ call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); ++ else ++ __bpf_prog_put_rcu(&prog->aux->rcu); ++} ++ ++static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) + { +- if (atomic_dec_and_test(&prog->aux->refcnt)) +- call_rcu(&prog->aux->rcu, __prog_put_common); ++ if (atomic_dec_and_test(&prog->aux->refcnt)) { ++ /* bpf_prog_free_id() must be called first */ ++ bpf_prog_free_id(prog, do_idr_lock); ++ __bpf_prog_put_noref(prog, true); ++ } + } + + void bpf_prog_put(struct bpf_prog *prog) + { +- if (atomic_dec_and_test(&prog->aux->refcnt)) +- __prog_put_common(&prog->aux->rcu); ++ __bpf_prog_put(prog, true); + } + EXPORT_SYMBOL_GPL(bpf_prog_put); + +@@ -561,12 +1304,68 @@ static int bpf_prog_release(struct inode + { + struct bpf_prog *prog = filp->private_data; + +- bpf_prog_put_rcu(prog); ++ bpf_prog_put(prog); + return 0; + } + +-static const struct file_operations bpf_prog_fops = { +- .release = bpf_prog_release, ++static void bpf_prog_get_stats(const struct bpf_prog *prog, ++ struct bpf_prog_stats *stats) ++{ ++ u64 nsecs = 0, cnt = 0; ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ const struct bpf_prog_stats *st; ++ unsigned int start; ++ u64 tnsecs, tcnt; ++ ++ st = per_cpu_ptr(prog->aux->stats, cpu); ++ do { ++ start = u64_stats_fetch_begin_irq(&st->syncp); ++ tnsecs = st->nsecs; ++ tcnt = st->cnt; ++ } while (u64_stats_fetch_retry_irq(&st->syncp, start)); ++ nsecs += tnsecs; ++ cnt += tcnt; ++ } ++ stats->nsecs = nsecs; ++ stats->cnt = cnt; ++} ++ ++#ifdef CONFIG_PROC_FS ++static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) ++{ ++ const struct bpf_prog *prog = filp->private_data; ++ char prog_tag[sizeof(prog->tag) * 2 + 1] = { }; ++ struct bpf_prog_stats stats; ++ ++ bpf_prog_get_stats(prog, &stats); ++ bin2hex(prog_tag, prog->tag, sizeof(prog->tag)); ++ seq_printf(m, ++ "prog_type:\t%u\n" ++ "prog_jited:\t%u\n" ++ "prog_tag:\t%s\n" ++ "memlock:\t%llu\n" ++ "prog_id:\t%u\n" ++ "run_time_ns:\t%llu\n" ++ "run_cnt:\t%llu\n", ++ prog->type, ++ prog->jited, ++ prog_tag, ++ prog->pages * 1ULL << PAGE_SHIFT, ++ prog->aux->id, ++ stats.nsecs, ++ stats.cnt); ++} ++#endif ++ ++const struct file_operations bpf_prog_fops = { ++#ifdef CONFIG_PROC_FS ++ .show_fdinfo = bpf_prog_show_fdinfo, ++#endif ++ .release = bpf_prog_release, ++ .read = bpf_dummy_read, ++ .write = bpf_dummy_write, + }; + + int bpf_prog_new_fd(struct bpf_prog *prog) +@@ -575,7 +1374,7 @@ int bpf_prog_new_fd(struct bpf_prog *pro + O_RDWR | O_CLOEXEC); + } + +-static struct bpf_prog *__bpf_prog_get(struct fd f) ++static struct bpf_prog *____bpf_prog_get(struct fd f) + { + if (!f.file) + return ERR_PTR(-EBADF); +@@ -587,38 +1386,178 @@ static struct bpf_prog *__bpf_prog_get(s + return f.file->private_data; + } + ++struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i) ++{ ++ if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) { ++ atomic_sub(i, &prog->aux->refcnt); ++ return ERR_PTR(-EBUSY); ++ } ++ return prog; ++} ++EXPORT_SYMBOL_GPL(bpf_prog_add); ++ ++void bpf_prog_sub(struct bpf_prog *prog, int i) ++{ ++ /* Only to be used for undoing previous bpf_prog_add() in some ++ * error path. We still know that another entity in our call ++ * path holds a reference to the program, thus atomic_sub() can ++ * be safely used in such cases! ++ */ ++ WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0); ++} ++EXPORT_SYMBOL_GPL(bpf_prog_sub); ++ + struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog) + { +- if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) { +- atomic_dec(&prog->aux->refcnt); ++ return bpf_prog_add(prog, 1); ++} ++EXPORT_SYMBOL_GPL(bpf_prog_inc); ++ ++/* prog_idr_lock should have been held */ ++struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog) ++{ ++ int refold; ++ ++ refold = atomic_fetch_add_unless(&prog->aux->refcnt, 1, 0); ++ ++ if (refold >= BPF_MAX_REFCNT) { ++ __bpf_prog_put(prog, false); + return ERR_PTR(-EBUSY); + } ++ ++ if (!refold) ++ return ERR_PTR(-ENOENT); ++ + return prog; + } ++EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); + +-/* called by sockets/tracing/seccomp before attaching program to an event +- * pairs with bpf_prog_put() +- */ +-struct bpf_prog *bpf_prog_get(u32 ufd) ++bool bpf_prog_get_ok(struct bpf_prog *prog, ++ enum bpf_prog_type *attach_type, bool attach_drv) ++{ ++ /* not an attachment, just a refcount inc, always allow */ ++ if (!attach_type) ++ return true; ++ ++ if (prog->type != *attach_type) ++ return false; ++ if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) ++ return false; ++ ++ return true; ++} ++ ++static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, ++ bool attach_drv) + { + struct fd f = fdget(ufd); + struct bpf_prog *prog; + +- prog = __bpf_prog_get(f); ++ prog = ____bpf_prog_get(f); + if (IS_ERR(prog)) + return prog; ++ if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { ++ prog = ERR_PTR(-EINVAL); ++ goto out; ++ } + + prog = bpf_prog_inc(prog); ++out: + fdput(f); +- + return prog; + } +-EXPORT_SYMBOL_GPL(bpf_prog_get); ++ ++struct bpf_prog *bpf_prog_get(u32 ufd) ++{ ++ return __bpf_prog_get(ufd, NULL, false); ++} ++ ++struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, ++ bool attach_drv) ++{ ++ return __bpf_prog_get(ufd, &type, attach_drv); ++} ++EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); ++ ++/* Initially all BPF programs could be loaded w/o specifying ++ * expected_attach_type. Later for some of them specifying expected_attach_type ++ * at load time became required so that program could be validated properly. ++ * Programs of types that are allowed to be loaded both w/ and w/o (for ++ * backward compatibility) expected_attach_type, should have the default attach ++ * type assigned to expected_attach_type for the latter case, so that it can be ++ * validated later at attach time. ++ * ++ * bpf_prog_load_fixup_attach_type() sets expected_attach_type in @attr if ++ * prog type requires it but has some attach types that have to be backward ++ * compatible. ++ */ ++static void bpf_prog_load_fixup_attach_type(union bpf_attr *attr) ++{ ++ switch (attr->prog_type) { ++ case BPF_PROG_TYPE_CGROUP_SOCK: ++ /* Unfortunately BPF_ATTACH_TYPE_UNSPEC enumeration doesn't ++ * exist so checking for non-zero is the way to go here. ++ */ ++ if (!attr->expected_attach_type) ++ attr->expected_attach_type = ++ BPF_CGROUP_INET_SOCK_CREATE; ++ break; ++ } ++} ++ ++static int ++bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type, ++ enum bpf_attach_type expected_attach_type) ++{ ++ switch (prog_type) { ++ case BPF_PROG_TYPE_CGROUP_SOCK: ++ switch (expected_attach_type) { ++ case BPF_CGROUP_INET_SOCK_CREATE: ++ case BPF_CGROUP_INET4_POST_BIND: ++ case BPF_CGROUP_INET6_POST_BIND: ++ return 0; ++ default: ++ return -EINVAL; ++ } ++ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: ++ switch (expected_attach_type) { ++ case BPF_CGROUP_INET4_BIND: ++ case BPF_CGROUP_INET6_BIND: ++ case BPF_CGROUP_INET4_CONNECT: ++ case BPF_CGROUP_INET6_CONNECT: ++ case BPF_CGROUP_UDP4_SENDMSG: ++ case BPF_CGROUP_UDP6_SENDMSG: ++ case BPF_CGROUP_UDP4_RECVMSG: ++ case BPF_CGROUP_UDP6_RECVMSG: ++ return 0; ++ default: ++ return -EINVAL; ++ } ++ case BPF_PROG_TYPE_CGROUP_SKB: ++ switch (expected_attach_type) { ++ case BPF_CGROUP_INET_INGRESS: ++ case BPF_CGROUP_INET_EGRESS: ++ return 0; ++ default: ++ return -EINVAL; ++ } ++ case BPF_PROG_TYPE_CGROUP_SOCKOPT: ++ switch (expected_attach_type) { ++ case BPF_CGROUP_SETSOCKOPT: ++ case BPF_CGROUP_GETSOCKOPT: ++ return 0; ++ default: ++ return -EINVAL; ++ } ++ default: ++ return 0; ++ } ++} + + /* last field in 'union bpf_attr' used by this command */ +-#define BPF_PROG_LOAD_LAST_FIELD kern_version ++#define BPF_PROG_LOAD_LAST_FIELD line_info_cnt + +-static int bpf_prog_load(union bpf_attr *attr) ++static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr) + { + enum bpf_prog_type type = attr->prog_type; + struct bpf_prog *prog; +@@ -629,8 +1568,19 @@ static int bpf_prog_load(union bpf_attr + if (CHECK_ATTR(BPF_PROG_LOAD)) + return -EINVAL; + ++ if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | ++ BPF_F_ANY_ALIGNMENT | ++ BPF_F_TEST_STATE_FREQ | ++ BPF_F_TEST_RND_HI32)) ++ return -EINVAL; ++ ++ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && ++ (attr->prog_flags & BPF_F_ANY_ALIGNMENT) && ++ !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ + /* copy eBPF program license from user space */ +- if (strncpy_from_user(license, u64_to_ptr(attr->license), ++ if (strncpy_from_user(license, u64_to_user_ptr(attr->license), + sizeof(license) - 1) < 0) + return -EFAULT; + license[sizeof(license) - 1] = 0; +@@ -638,30 +1588,36 @@ static int bpf_prog_load(union bpf_attr + /* eBPF programs must be GPL compatible to use GPL-ed functions */ + is_gpl = license_is_gpl_compatible(license); + +- if (attr->insn_cnt >= BPF_MAXINSNS) +- return -EINVAL; ++ if (attr->insn_cnt == 0 || ++ attr->insn_cnt > (capable(CAP_SYS_ADMIN) ? BPF_COMPLEXITY_LIMIT_INSNS : BPF_MAXINSNS)) ++ return -E2BIG; ++ if (type != BPF_PROG_TYPE_SOCKET_FILTER && ++ type != BPF_PROG_TYPE_CGROUP_SKB && ++ !capable(CAP_SYS_ADMIN)) ++ return -EPERM; + +- if (type == BPF_PROG_TYPE_KPROBE && +- attr->kern_version != LINUX_VERSION_CODE) ++ bpf_prog_load_fixup_attach_type(attr); ++ if (bpf_prog_load_check_attach_type(type, attr->expected_attach_type)) + return -EINVAL; + +- if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN)) +- return -EPERM; +- + /* plain bpf_prog allocation */ + prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER); + if (!prog) + return -ENOMEM; + ++ prog->expected_attach_type = attr->expected_attach_type; ++ ++ prog->aux->offload_requested = !!attr->prog_ifindex; ++ + err = bpf_prog_charge_memlock(prog); + if (err) +- goto free_prog_nouncharge; ++ goto free_prog_sec; + + prog->len = attr->insn_cnt; + + err = -EFAULT; +- if (copy_from_user(prog->insns, u64_to_ptr(attr->insns), +- prog->len * sizeof(struct bpf_insn)) != 0) ++ if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns), ++ bpf_prog_insn_size(prog)) != 0) + goto free_prog; + + prog->orig_prog = NULL; +@@ -675,91 +1631,720 @@ static int bpf_prog_load(union bpf_attr + if (err < 0) + goto free_prog; + ++ prog->aux->load_time = ktime_get_boot_ns(); ++ err = bpf_obj_name_cpy(prog->aux->name, attr->prog_name); ++ if (err) ++ goto free_prog; ++ + /* run eBPF verifier */ +- err = bpf_check(&prog, attr); ++ err = bpf_check(&prog, attr, uattr); + if (err < 0) + goto free_used_maps; + +- /* fixup BPF_CALL->imm field */ +- fixup_bpf_calls(prog); +- +- /* eBPF program is ready to be JITed */ +- err = bpf_prog_select_runtime(prog); ++ prog = bpf_prog_select_runtime(prog, &err); + if (err < 0) + goto free_used_maps; + +- err = bpf_prog_new_fd(prog); +- if (err < 0) +- /* failed to allocate fd */ ++ err = bpf_prog_alloc_id(prog); ++ if (err) + goto free_used_maps; + ++ /* Upon success of bpf_prog_alloc_id(), the BPF prog is ++ * effectively publicly exposed. However, retrieving via ++ * bpf_prog_get_fd_by_id() will take another reference, ++ * therefore it cannot be gone underneath us. ++ * ++ * Only for the time /after/ successful bpf_prog_new_fd() ++ * and before returning to userspace, we might just hold ++ * one reference and any parallel close on that fd could ++ * rip everything out. Hence, below notifications must ++ * happen before bpf_prog_new_fd(). ++ * ++ * Also, any failure handling from this point onwards must ++ * be using bpf_prog_put() given the program is exposed. ++ */ ++ bpf_prog_kallsyms_add(prog); ++ ++ err = bpf_prog_new_fd(prog); ++ if (err < 0) ++ bpf_prog_put(prog); + return err; + + free_used_maps: +- free_used_maps(prog->aux); ++ /* In case we have subprogs, we need to wait for a grace ++ * period before we can tear down JIT memory since symbols ++ * are already exposed under kallsyms. ++ */ ++ __bpf_prog_put_noref(prog, prog->aux->func_cnt); ++ return err; + free_prog: + bpf_prog_uncharge_memlock(prog); +-free_prog_nouncharge: ++free_prog_sec: + bpf_prog_free(prog); + return err; + } + +-#define BPF_OBJ_LAST_FIELD bpf_fd ++#define BPF_OBJ_LAST_FIELD file_flags + + static int bpf_obj_pin(const union bpf_attr *attr) + { +- if (CHECK_ATTR(BPF_OBJ)) ++ if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) + return -EINVAL; + +- return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname)); ++ return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); + } + + static int bpf_obj_get(const union bpf_attr *attr) + { +- if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0) ++ if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || ++ attr->file_flags & ~BPF_OBJ_FLAG_MASK) + return -EINVAL; + +- return bpf_obj_get_user(u64_to_ptr(attr->pathname)); ++ return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), ++ attr->file_flags); + } + +-SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) ++ ++#define BPF_PROG_ATTACH_LAST_FIELD attach_flags ++ ++#define BPF_F_ATTACH_MASK \ ++ (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI) ++ ++ ++#define BPF_PROG_DETACH_LAST_FIELD attach_type ++ ++ ++#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt ++ ++ ++#define BPF_PROG_TEST_RUN_LAST_FIELD test.ctx_out ++ ++static int bpf_prog_test_run(const union bpf_attr *attr, ++ union bpf_attr __user *uattr) + { +- union bpf_attr attr = {}; +- int err; ++ struct bpf_prog *prog; ++ int ret = -ENOTSUPP; + +- if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled) ++ if (!capable(CAP_SYS_ADMIN)) + return -EPERM; ++ if (CHECK_ATTR(BPF_PROG_TEST_RUN)) ++ return -EINVAL; ++ ++ if ((attr->test.ctx_size_in && !attr->test.ctx_in) || ++ (!attr->test.ctx_size_in && attr->test.ctx_in)) ++ return -EINVAL; ++ ++ if ((attr->test.ctx_size_out && !attr->test.ctx_out) || ++ (!attr->test.ctx_size_out && attr->test.ctx_out)) ++ return -EINVAL; ++ ++ prog = bpf_prog_get(attr->test.prog_fd); ++ if (IS_ERR(prog)) ++ return PTR_ERR(prog); ++ ++ if (prog->aux->ops->test_run) ++ ret = prog->aux->ops->test_run(prog, attr, uattr); ++ ++ bpf_prog_put(prog); ++ return ret; ++} ++ ++#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id ++ ++static int bpf_obj_get_next_id(const union bpf_attr *attr, ++ union bpf_attr __user *uattr, ++ struct idr *idr, ++ spinlock_t *lock) ++{ ++ u32 next_id = attr->start_id; ++ int err = 0; ++ ++ if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ next_id++; ++ spin_lock_bh(lock); ++ if (!idr_get_next(idr, &next_id)) ++ err = -ENOENT; ++ spin_unlock_bh(lock); ++ ++ if (!err) ++ err = put_user(next_id, &uattr->next_id); ++ ++ return err; ++} ++ ++#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id ++ ++static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) ++{ ++ struct bpf_prog *prog; ++ u32 id = attr->prog_id; ++ int fd; ++ ++ if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID)) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ spin_lock_bh(&prog_idr_lock); ++ prog = idr_find(&prog_idr, id); ++ if (prog) ++ prog = bpf_prog_inc_not_zero(prog); ++ else ++ prog = ERR_PTR(-ENOENT); ++ spin_unlock_bh(&prog_idr_lock); ++ ++ if (IS_ERR(prog)) ++ return PTR_ERR(prog); ++ ++ fd = bpf_prog_new_fd(prog); ++ if (fd < 0) ++ bpf_prog_put(prog); ++ ++ return fd; ++} ++ ++#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags ++ ++static int bpf_map_get_fd_by_id(const union bpf_attr *attr) ++{ ++ struct bpf_map *map; ++ u32 id = attr->map_id; ++ int f_flags; ++ int fd; ++ ++ if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || ++ attr->open_flags & ~BPF_OBJ_FLAG_MASK) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ f_flags = bpf_get_file_flag(attr->open_flags); ++ if (f_flags < 0) ++ return f_flags; ++ ++ spin_lock_bh(&map_idr_lock); ++ map = idr_find(&map_idr, id); ++ if (map) ++ map = __bpf_map_inc_not_zero(map, true); ++ else ++ map = ERR_PTR(-ENOENT); ++ spin_unlock_bh(&map_idr_lock); ++ ++ if (IS_ERR(map)) ++ return PTR_ERR(map); + +- if (!access_ok(VERIFY_READ, uattr, 1)) ++ fd = bpf_map_new_fd(map, f_flags); ++ if (fd < 0) ++ bpf_map_put_with_uref(map); ++ ++ return fd; ++} ++ ++static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog, ++ unsigned long addr, u32 *off, ++ u32 *type) ++{ ++ const struct bpf_map *map; ++ int i; ++ ++ for (i = 0, *off = 0; i < prog->aux->used_map_cnt; i++) { ++ map = prog->aux->used_maps[i]; ++ if (map == (void *)addr) { ++ *type = BPF_PSEUDO_MAP_FD; ++ return map; ++ } ++ if (!map->ops->map_direct_value_meta) ++ continue; ++ if (!map->ops->map_direct_value_meta(map, addr, off)) { ++ *type = BPF_PSEUDO_MAP_VALUE; ++ return map; ++ } ++ } ++ ++ return NULL; ++} ++ ++static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog, ++ const struct cred *f_cred) ++{ ++ const struct bpf_map *map; ++ struct bpf_insn *insns; ++ u32 off, type; ++ u64 imm; ++ int i; ++ ++ insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog), ++ GFP_USER); ++ if (!insns) ++ return insns; ++ ++ for (i = 0; i < prog->len; i++) { ++ if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) { ++ insns[i].code = BPF_JMP | BPF_CALL; ++ insns[i].imm = BPF_FUNC_tail_call; ++ /* fall-through */ ++ } ++ if (insns[i].code == (BPF_JMP | BPF_CALL) || ++ insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) { ++ if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) ++ insns[i].code = BPF_JMP | BPF_CALL; ++ if (!bpf_dump_raw_ok(f_cred)) ++ insns[i].imm = 0; ++ continue; ++ } ++ ++ if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW)) ++ continue; ++ ++ imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm; ++ map = bpf_map_from_imm(prog, imm, &off, &type); ++ if (map) { ++ insns[i].src_reg = type; ++ insns[i].imm = map->id; ++ insns[i + 1].imm = off; ++ continue; ++ } ++ } ++ ++ return insns; ++} ++ ++static int set_info_rec_size(struct bpf_prog_info *info) ++{ ++ /* ++ * Ensure info.*_rec_size is the same as kernel expected size ++ * ++ * or ++ * ++ * Only allow zero *_rec_size if both _rec_size and _cnt are ++ * zero. In this case, the kernel will set the expected ++ * _rec_size back to the info. ++ */ ++ ++ if ((info->nr_func_info || info->func_info_rec_size) && ++ info->func_info_rec_size != sizeof(struct bpf_func_info)) ++ return -EINVAL; ++ ++ if ((info->nr_line_info || info->line_info_rec_size) && ++ info->line_info_rec_size != sizeof(struct bpf_line_info)) ++ return -EINVAL; ++ ++ if ((info->nr_jited_line_info || info->jited_line_info_rec_size) && ++ info->jited_line_info_rec_size != sizeof(__u64)) ++ return -EINVAL; ++ ++ info->func_info_rec_size = sizeof(struct bpf_func_info); ++ info->line_info_rec_size = sizeof(struct bpf_line_info); ++ info->jited_line_info_rec_size = sizeof(__u64); ++ ++ return 0; ++} ++ ++static int bpf_prog_get_info_by_fd(struct file *file, ++ struct bpf_prog *prog, ++ const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info); ++ struct bpf_prog_info info; ++ u32 info_len = attr->info.info_len; ++ struct bpf_prog_stats stats; ++ char __user *uinsns; ++ u32 ulen; ++ int err; ++ ++ err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); ++ if (err) ++ return err; ++ info_len = min_t(u32, sizeof(info), info_len); ++ ++ memset(&info, 0, sizeof(info)); ++ if (copy_from_user(&info, uinfo, info_len)) + return -EFAULT; + +- if (size > PAGE_SIZE) /* silly large */ +- return -E2BIG; ++ info.type = prog->type; ++ info.id = prog->aux->id; ++ info.load_time = prog->aux->load_time; ++ info.created_by_uid = from_kuid_munged(current_user_ns(), ++ prog->aux->user->uid); ++ info.gpl_compatible = prog->gpl_compatible; ++ ++ memcpy(info.tag, prog->tag, sizeof(prog->tag)); ++ memcpy(info.name, prog->aux->name, sizeof(prog->aux->name)); ++ ++ ulen = info.nr_map_ids; ++ info.nr_map_ids = prog->aux->used_map_cnt; ++ ulen = min_t(u32, info.nr_map_ids, ulen); ++ if (ulen) { ++ u32 __user *user_map_ids = u64_to_user_ptr(info.map_ids); ++ u32 i; ++ ++ for (i = 0; i < ulen; i++) ++ if (put_user(prog->aux->used_maps[i]->id, ++ &user_map_ids[i])) ++ return -EFAULT; ++ } ++ ++ err = set_info_rec_size(&info); ++ if (err) ++ return err; ++ ++ bpf_prog_get_stats(prog, &stats); ++ info.run_time_ns = stats.nsecs; ++ info.run_cnt = stats.cnt; ++ ++ if (!capable(CAP_SYS_ADMIN)) { ++ info.jited_prog_len = 0; ++ info.xlated_prog_len = 0; ++ info.nr_jited_ksyms = 0; ++ info.nr_jited_func_lens = 0; ++ info.nr_func_info = 0; ++ info.nr_line_info = 0; ++ info.nr_jited_line_info = 0; ++ goto done; ++ } ++ ++ ulen = info.xlated_prog_len; ++ info.xlated_prog_len = bpf_prog_insn_size(prog); ++ if (info.xlated_prog_len && ulen) { ++ struct bpf_insn *insns_sanitized; ++ bool fault; ++ ++ if (prog->blinded && !bpf_dump_raw_ok(file->f_cred)) { ++ info.xlated_prog_insns = 0; ++ goto done; ++ } ++ insns_sanitized = bpf_insn_prepare_dump(prog, file->f_cred); ++ if (!insns_sanitized) ++ return -ENOMEM; ++ uinsns = u64_to_user_ptr(info.xlated_prog_insns); ++ ulen = min_t(u32, info.xlated_prog_len, ulen); ++ fault = copy_to_user(uinsns, insns_sanitized, ulen); ++ kfree(insns_sanitized); ++ if (fault) ++ return -EFAULT; ++ } ++ ++ /* NOTE: the following code is supposed to be skipped for offload. ++ * bpf_prog_offload_info_fill() is the place to fill similar fields ++ * for offload. ++ */ ++ ulen = info.jited_prog_len; ++ if (prog->aux->func_cnt) { ++ u32 i; ++ ++ info.jited_prog_len = 0; ++ for (i = 0; i < prog->aux->func_cnt; i++) ++ info.jited_prog_len += prog->aux->func[i]->jited_len; ++ } else { ++ info.jited_prog_len = prog->jited_len; ++ } ++ ++ if (info.jited_prog_len && ulen) { ++ if (bpf_dump_raw_ok(file->f_cred)) { ++ uinsns = u64_to_user_ptr(info.jited_prog_insns); ++ ulen = min_t(u32, info.jited_prog_len, ulen); ++ ++ /* for multi-function programs, copy the JITed ++ * instructions for all the functions ++ */ ++ if (prog->aux->func_cnt) { ++ u32 len, free, i; ++ u8 *img; ++ ++ free = ulen; ++ for (i = 0; i < prog->aux->func_cnt; i++) { ++ len = prog->aux->func[i]->jited_len; ++ len = min_t(u32, len, free); ++ img = (u8 *) prog->aux->func[i]->bpf_func; ++ if (copy_to_user(uinsns, img, len)) ++ return -EFAULT; ++ uinsns += len; ++ free -= len; ++ if (!free) ++ break; ++ } ++ } else { ++ if (copy_to_user(uinsns, prog->bpf_func, ulen)) ++ return -EFAULT; ++ } ++ } else { ++ info.jited_prog_insns = 0; ++ } ++ } ++ ++ ulen = info.nr_jited_ksyms; ++ info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; ++ if (ulen) { ++ if (bpf_dump_raw_ok(file->f_cred)) { ++ unsigned long ksym_addr; ++ u64 __user *user_ksyms; ++ u32 i; ++ ++ /* copy the address of the kernel symbol ++ * corresponding to each function ++ */ ++ ulen = min_t(u32, info.nr_jited_ksyms, ulen); ++ user_ksyms = u64_to_user_ptr(info.jited_ksyms); ++ if (prog->aux->func_cnt) { ++ for (i = 0; i < ulen; i++) { ++ ksym_addr = (unsigned long) ++ prog->aux->func[i]->bpf_func; ++ if (put_user((u64) ksym_addr, ++ &user_ksyms[i])) ++ return -EFAULT; ++ } ++ } else { ++ ksym_addr = (unsigned long) prog->bpf_func; ++ if (put_user((u64) ksym_addr, &user_ksyms[0])) ++ return -EFAULT; ++ } ++ } else { ++ info.jited_ksyms = 0; ++ } ++ } ++ ++ ulen = info.nr_jited_func_lens; ++ info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; ++ if (ulen) { ++ if (bpf_dump_raw_ok(file->f_cred)) { ++ u32 __user *user_lens; ++ u32 func_len, i; ++ ++ /* copy the JITed image lengths for each function */ ++ ulen = min_t(u32, info.nr_jited_func_lens, ulen); ++ user_lens = u64_to_user_ptr(info.jited_func_lens); ++ if (prog->aux->func_cnt) { ++ for (i = 0; i < ulen; i++) { ++ func_len = ++ prog->aux->func[i]->jited_len; ++ if (put_user(func_len, &user_lens[i])) ++ return -EFAULT; ++ } ++ } else { ++ func_len = prog->jited_len; ++ if (put_user(func_len, &user_lens[0])) ++ return -EFAULT; ++ } ++ } else { ++ info.jited_func_lens = 0; ++ } ++ } ++ ++ if (prog->aux->btf) ++ info.btf_id = btf_id(prog->aux->btf); ++ ++ ulen = info.nr_func_info; ++ info.nr_func_info = prog->aux->func_info_cnt; ++ if (info.nr_func_info && ulen) { ++ char __user *user_finfo; ++ ++ user_finfo = u64_to_user_ptr(info.func_info); ++ ulen = min_t(u32, info.nr_func_info, ulen); ++ if (copy_to_user(user_finfo, prog->aux->func_info, ++ info.func_info_rec_size * ulen)) ++ return -EFAULT; ++ } ++ ++ ulen = info.nr_line_info; ++ info.nr_line_info = prog->aux->nr_linfo; ++ if (info.nr_line_info && ulen) { ++ __u8 __user *user_linfo; ++ ++ user_linfo = u64_to_user_ptr(info.line_info); ++ ulen = min_t(u32, info.nr_line_info, ulen); ++ if (copy_to_user(user_linfo, prog->aux->linfo, ++ info.line_info_rec_size * ulen)) ++ return -EFAULT; ++ } + +- /* If we're handed a bigger struct than we know of, +- * ensure all the unknown bits are 0 - i.e. new +- * user-space does not rely on any kernel feature +- * extensions we dont know about yet. +- */ +- if (size > sizeof(attr)) { +- unsigned char __user *addr; +- unsigned char __user *end; +- unsigned char val; +- +- addr = (void __user *)uattr + sizeof(attr); +- end = (void __user *)uattr + size; +- +- for (; addr < end; addr++) { +- err = get_user(val, addr); +- if (err) +- return err; +- if (val) +- return -E2BIG; ++ ulen = info.nr_jited_line_info; ++ if (prog->aux->jited_linfo) ++ info.nr_jited_line_info = prog->aux->nr_linfo; ++ else ++ info.nr_jited_line_info = 0; ++ if (info.nr_jited_line_info && ulen) { ++ if (bpf_dump_raw_ok(file->f_cred)) { ++ __u64 __user *user_linfo; ++ u32 i; ++ ++ user_linfo = u64_to_user_ptr(info.jited_line_info); ++ ulen = min_t(u32, info.nr_jited_line_info, ulen); ++ for (i = 0; i < ulen; i++) { ++ if (put_user((__u64)(long)prog->aux->jited_linfo[i], ++ &user_linfo[i])) ++ return -EFAULT; ++ } ++ } else { ++ info.jited_line_info = 0; + } +- size = sizeof(attr); + } + ++ ulen = info.nr_prog_tags; ++ info.nr_prog_tags = prog->aux->func_cnt ? : 1; ++ if (ulen) { ++ __u8 __user (*user_prog_tags)[BPF_TAG_SIZE]; ++ u32 i; ++ ++ user_prog_tags = u64_to_user_ptr(info.prog_tags); ++ ulen = min_t(u32, info.nr_prog_tags, ulen); ++ if (prog->aux->func_cnt) { ++ for (i = 0; i < ulen; i++) { ++ if (copy_to_user(user_prog_tags[i], ++ prog->aux->func[i]->tag, ++ BPF_TAG_SIZE)) ++ return -EFAULT; ++ } ++ } else { ++ if (copy_to_user(user_prog_tags[0], ++ prog->tag, BPF_TAG_SIZE)) ++ return -EFAULT; ++ } ++ } ++ ++done: ++ if (copy_to_user(uinfo, &info, info_len) || ++ put_user(info_len, &uattr->info.info_len)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int bpf_map_get_info_by_fd(struct file *file, ++ struct bpf_map *map, ++ const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info); ++ struct bpf_map_info info; ++ u32 info_len = attr->info.info_len; ++ int err; ++ ++ err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len); ++ if (err) ++ return err; ++ info_len = min_t(u32, sizeof(info), info_len); ++ ++ memset(&info, 0, sizeof(info)); ++ info.type = map->map_type; ++ info.id = map->id; ++ info.key_size = map->key_size; ++ info.value_size = map->value_size; ++ info.max_entries = map->max_entries; ++ info.map_flags = map->map_flags; ++ memcpy(info.name, map->name, sizeof(map->name)); ++ ++ if (map->btf) { ++ info.btf_id = btf_id(map->btf); ++ info.btf_key_type_id = map->btf_key_type_id; ++ info.btf_value_type_id = map->btf_value_type_id; ++ } ++ ++ if (copy_to_user(uinfo, &info, info_len) || ++ put_user(info_len, &uattr->info.info_len)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++static int bpf_btf_get_info_by_fd(struct file *file, ++ struct btf *btf, ++ const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ struct bpf_btf_info __user *uinfo = u64_to_user_ptr(attr->info.info); ++ u32 info_len = attr->info.info_len; ++ int err; ++ ++ err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len); ++ if (err) ++ return err; ++ ++ return btf_get_info_by_fd(btf, attr, uattr); ++} ++ ++#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info ++ ++static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ int ufd = attr->info.bpf_fd; ++ struct fd f; ++ int err; ++ ++ if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD)) ++ return -EINVAL; ++ ++ f = fdget(ufd); ++ if (!f.file) ++ return -EBADFD; ++ ++ if (f.file->f_op == &bpf_prog_fops) ++ err = bpf_prog_get_info_by_fd(f.file, f.file->private_data, attr, ++ uattr); ++ else if (f.file->f_op == &bpf_map_fops) ++ err = bpf_map_get_info_by_fd(f.file, f.file->private_data, attr, ++ uattr); ++ else if (f.file->f_op == &btf_fops) ++ err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); ++ else ++ err = -EINVAL; ++ ++ fdput(f); ++ return err; ++} ++ ++#define BPF_BTF_LOAD_LAST_FIELD btf_log_level ++ ++static int bpf_btf_load(const union bpf_attr *attr) ++{ ++ if (CHECK_ATTR(BPF_BTF_LOAD)) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ return btf_new_fd(attr); ++} ++ ++#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id ++ ++static int bpf_btf_get_fd_by_id(const union bpf_attr *attr) ++{ ++ if (CHECK_ATTR(BPF_BTF_GET_FD_BY_ID)) ++ return -EINVAL; ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ return btf_get_fd_by_id(attr->btf_id); ++} ++ ++ ++#define BPF_TASK_FD_QUERY_LAST_FIELD task_fd_query.probe_addr ++ ++SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) ++{ ++ union bpf_attr attr; ++ int err; ++ ++ if (sysctl_unprivileged_bpf_disabled && !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ err = bpf_check_uarg_tail_zero(uattr, sizeof(attr), size); ++ if (err) ++ return err; ++ size = min_t(u32, size, sizeof(attr)); ++ + /* copy attributes from user space, may be less than sizeof(bpf_attr) */ ++ memset(&attr, 0, sizeof(attr)); + if (copy_from_user(&attr, uattr, size) != 0) + return -EFAULT; + +@@ -779,8 +2364,11 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf + case BPF_MAP_GET_NEXT_KEY: + err = map_get_next_key(&attr); + break; ++ case BPF_MAP_FREEZE: ++ err = map_freeze(&attr); ++ break; + case BPF_PROG_LOAD: +- err = bpf_prog_load(&attr); ++ err = bpf_prog_load(&attr, uattr); + break; + case BPF_OBJ_PIN: + err = bpf_obj_pin(&attr); +@@ -788,6 +2376,39 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf + case BPF_OBJ_GET: + err = bpf_obj_get(&attr); + break; ++ case BPF_PROG_TEST_RUN: ++ err = bpf_prog_test_run(&attr, uattr); ++ break; ++ case BPF_PROG_GET_NEXT_ID: ++ err = bpf_obj_get_next_id(&attr, uattr, ++ &prog_idr, &prog_idr_lock); ++ break; ++ case BPF_MAP_GET_NEXT_ID: ++ err = bpf_obj_get_next_id(&attr, uattr, ++ &map_idr, &map_idr_lock); ++ break; ++ case BPF_BTF_GET_NEXT_ID: ++ err = bpf_obj_get_next_id(&attr, uattr, ++ &btf_idr, &btf_idr_lock); ++ break; ++ case BPF_PROG_GET_FD_BY_ID: ++ err = bpf_prog_get_fd_by_id(&attr); ++ break; ++ case BPF_MAP_GET_FD_BY_ID: ++ err = bpf_map_get_fd_by_id(&attr); ++ break; ++ case BPF_OBJ_GET_INFO_BY_FD: ++ err = bpf_obj_get_info_by_fd(&attr, uattr); ++ break; ++ case BPF_BTF_LOAD: ++ err = bpf_btf_load(&attr); ++ break; ++ case BPF_BTF_GET_FD_BY_ID: ++ err = bpf_btf_get_fd_by_id(&attr); ++ break; ++ case BPF_MAP_LOOKUP_AND_DELETE_ELEM: ++ err = map_lookup_and_delete_elem(&attr); ++ break; + default: + err = -EINVAL; + break; +--- /dev/null ++++ b/kernel/bpf/sysfs_btf.c +@@ -0,0 +1,45 @@ ++// SPDX-License-Identifier: GPL-2.0 ++/* ++ * Provide kernel BTF information for introspection and use by eBPF tools. ++ */ ++#include ++#include ++#include ++#include ++#include ++ ++/* See scripts/link-vmlinux.sh, gen_btf() func for details */ ++extern char __weak __start_BTF[]; ++extern char __weak __stop_BTF[]; ++ ++static ssize_t ++btf_vmlinux_read(struct file *file, struct kobject *kobj, ++ struct bin_attribute *bin_attr, ++ char *buf, loff_t off, size_t len) ++{ ++ memcpy(buf, __start_BTF + off, len); ++ return len; ++} ++ ++static struct bin_attribute bin_attr_btf_vmlinux __ro_after_init = { ++ .attr = { .name = "vmlinux", .mode = 0444, }, ++ .read = btf_vmlinux_read, ++}; ++ ++static struct kobject *btf_kobj; ++ ++static int __init btf_vmlinux_init(void) ++{ ++ bin_attr_btf_vmlinux.size = __stop_BTF - __start_BTF; ++ ++ if (!__start_BTF || bin_attr_btf_vmlinux.size == 0) ++ return 0; ++ ++ btf_kobj = kobject_create_and_add("btf", kernel_kobj); ++ if (!btf_kobj) ++ return -ENOMEM; ++ ++ return sysfs_create_bin_file(btf_kobj, &bin_attr_btf_vmlinux); ++} ++ ++subsys_initcall(btf_vmlinux_init); +--- /dev/null ++++ b/kernel/bpf/tnum.c +@@ -0,0 +1,196 @@ ++// SPDX-License-Identifier: GPL-2.0-only ++/* tnum: tracked (or tristate) numbers ++ * ++ * A tnum tracks knowledge about the bits of a value. Each bit can be either ++ * known (0 or 1), or unknown (x). Arithmetic operations on tnums will ++ * propagate the unknown bits such that the tnum result represents all the ++ * possible results for possible values of the operands. ++ */ ++#include ++#include ++ ++#define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m} ++/* A completely unknown value */ ++const struct tnum tnum_unknown = { .value = 0, .mask = -1 }; ++ ++struct tnum tnum_const(u64 value) ++{ ++ return TNUM(value, 0); ++} ++ ++struct tnum tnum_range(u64 min, u64 max) ++{ ++ u64 chi = min ^ max, delta; ++ u8 bits = fls64(chi); ++ ++ /* special case, needed because 1ULL << 64 is undefined */ ++ if (bits > 63) ++ return tnum_unknown; ++ /* e.g. if chi = 4, bits = 3, delta = (1<<3) - 1 = 7. ++ * if chi = 0, bits = 0, delta = (1<<0) - 1 = 0, so we return ++ * constant min (since min == max). ++ */ ++ delta = (1ULL << bits) - 1; ++ return TNUM(min & ~delta, delta); ++} ++ ++struct tnum tnum_lshift(struct tnum a, u8 shift) ++{ ++ return TNUM(a.value << shift, a.mask << shift); ++} ++ ++struct tnum tnum_rshift(struct tnum a, u8 shift) ++{ ++ return TNUM(a.value >> shift, a.mask >> shift); ++} ++ ++struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness) ++{ ++ /* if a.value is negative, arithmetic shifting by minimum shift ++ * will have larger negative offset compared to more shifting. ++ * If a.value is nonnegative, arithmetic shifting by minimum shift ++ * will have larger positive offset compare to more shifting. ++ */ ++ if (insn_bitness == 32) ++ return TNUM((u32)(((s32)a.value) >> min_shift), ++ (u32)(((s32)a.mask) >> min_shift)); ++ else ++ return TNUM((s64)a.value >> min_shift, ++ (s64)a.mask >> min_shift); ++} ++ ++struct tnum tnum_add(struct tnum a, struct tnum b) ++{ ++ u64 sm, sv, sigma, chi, mu; ++ ++ sm = a.mask + b.mask; ++ sv = a.value + b.value; ++ sigma = sm + sv; ++ chi = sigma ^ sv; ++ mu = chi | a.mask | b.mask; ++ return TNUM(sv & ~mu, mu); ++} ++ ++struct tnum tnum_sub(struct tnum a, struct tnum b) ++{ ++ u64 dv, alpha, beta, chi, mu; ++ ++ dv = a.value - b.value; ++ alpha = dv + a.mask; ++ beta = dv - b.mask; ++ chi = alpha ^ beta; ++ mu = chi | a.mask | b.mask; ++ return TNUM(dv & ~mu, mu); ++} ++ ++struct tnum tnum_and(struct tnum a, struct tnum b) ++{ ++ u64 alpha, beta, v; ++ ++ alpha = a.value | a.mask; ++ beta = b.value | b.mask; ++ v = a.value & b.value; ++ return TNUM(v, alpha & beta & ~v); ++} ++ ++struct tnum tnum_or(struct tnum a, struct tnum b) ++{ ++ u64 v, mu; ++ ++ v = a.value | b.value; ++ mu = a.mask | b.mask; ++ return TNUM(v, mu & ~v); ++} ++ ++struct tnum tnum_xor(struct tnum a, struct tnum b) ++{ ++ u64 v, mu; ++ ++ v = a.value ^ b.value; ++ mu = a.mask | b.mask; ++ return TNUM(v & ~mu, mu); ++} ++ ++/* half-multiply add: acc += (unknown * mask * value). ++ * An intermediate step in the multiply algorithm. ++ */ ++static struct tnum hma(struct tnum acc, u64 value, u64 mask) ++{ ++ while (mask) { ++ if (mask & 1) ++ acc = tnum_add(acc, TNUM(0, value)); ++ mask >>= 1; ++ value <<= 1; ++ } ++ return acc; ++} ++ ++struct tnum tnum_mul(struct tnum a, struct tnum b) ++{ ++ struct tnum acc; ++ u64 pi; ++ ++ pi = a.value * b.value; ++ acc = hma(TNUM(pi, 0), a.mask, b.mask | b.value); ++ return hma(acc, b.mask, a.value); ++} ++ ++/* Note that if a and b disagree - i.e. one has a 'known 1' where the other has ++ * a 'known 0' - this will return a 'known 1' for that bit. ++ */ ++struct tnum tnum_intersect(struct tnum a, struct tnum b) ++{ ++ u64 v, mu; ++ ++ v = a.value | b.value; ++ mu = a.mask & b.mask; ++ return TNUM(v & ~mu, mu); ++} ++ ++struct tnum tnum_cast(struct tnum a, u8 size) ++{ ++ a.value &= (1ULL << (size * 8)) - 1; ++ a.mask &= (1ULL << (size * 8)) - 1; ++ return a; ++} ++ ++bool tnum_is_aligned(struct tnum a, u64 size) ++{ ++ if (!size) ++ return true; ++ return !((a.value | a.mask) & (size - 1)); ++} ++ ++bool tnum_in(struct tnum a, struct tnum b) ++{ ++ if (b.mask & ~a.mask) ++ return false; ++ b.value &= ~a.mask; ++ return a.value == b.value; ++} ++ ++int tnum_strn(char *str, size_t size, struct tnum a) ++{ ++ return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask); ++} ++EXPORT_SYMBOL_GPL(tnum_strn); ++ ++int tnum_sbin(char *str, size_t size, struct tnum a) ++{ ++ size_t n; ++ ++ for (n = 64; n; n--) { ++ if (n < size) { ++ if (a.mask & 1) ++ str[n - 1] = 'x'; ++ else if (a.value & 1) ++ str[n - 1] = '1'; ++ else ++ str[n - 1] = '0'; ++ } ++ a.mask >>= 1; ++ a.value >>= 1; ++ } ++ str[min(size - 1, (size_t)64)] = 0; ++ return 64; ++} +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -1,22 +1,36 @@ ++// SPDX-License-Identifier: GPL-2.0-only + /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com +- * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of version 2 of the GNU General Public +- * License as published by the Free Software Foundation. +- * +- * This program is distributed in the hope that it will be useful, but +- * WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +- * General Public License for more details. ++ * Copyright (c) 2016 Facebook ++ * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io + */ ++#include + #include + #include + #include + #include ++#include ++#include + #include + #include + #include + #include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "disasm.h" ++ ++static const struct bpf_verifier_ops * const bpf_verifier_ops[] = { ++#define BPF_PROG_TYPE(_id, _name) \ ++ [_id] = & _name ## _verifier_ops, ++#define BPF_MAP_TYPE(_id, _ops) ++#include ++#undef BPF_PROG_TYPE ++#undef BPF_MAP_TYPE ++}; + + /* bpf_check() is a static code analyzer that walks eBPF program + * instruction by instruction and updates register/stack state. +@@ -30,7 +44,7 @@ + * - out of bounds or malformed jumps + * The second pass is all possible path descent from the 1st insn. + * Since it's analyzing all pathes through the program, the length of the +- * analysis is limited to 32k insn, which may be hit even if total number of ++ * analysis is limited to 64k insn, which may be hit even if total number of + * insn is less then 4K, but there are too many branches that change stack/regs. + * Number of 'branches to be analyzed' is limited to 1k + * +@@ -58,13 +72,13 @@ + * (and -20 constant is saved for further stack bounds checking). + * Meaning that this reg is a pointer to stack plus known immediate constant. + * +- * Most of the time the registers have UNKNOWN_VALUE type, which ++ * Most of the time the registers have SCALAR_VALUE type, which + * means the register has some value, but it's not a valid pointer. +- * (like pointer plus pointer becomes UNKNOWN_VALUE type) ++ * (like pointer plus pointer becomes SCALAR_VALUE type) + * + * When verifier sees load or store instructions the type of base register +- * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, FRAME_PTR. These are three pointer +- * types recognized by check_mem_access() function. ++ * can be: PTR_TO_MAP_VALUE, PTR_TO_CTX, PTR_TO_STACK, PTR_TO_SOCKET. These are ++ * four pointer types recognized by check_mem_access() function. + * + * PTR_TO_MAP_VALUE means that this register is pointing to 'map element value' + * and the range of [ptr, ptr + map's value_size) is accessible. +@@ -123,346 +137,713 @@ + * + * After the call R0 is set to return type of the function and registers R1-R5 + * are set to NOT_INIT to indicate that they are no longer readable. ++ * ++ * The following reference types represent a potential reference to a kernel ++ * resource which, after first being allocated, must be checked and freed by ++ * the BPF program: ++ * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET ++ * ++ * When the verifier sees a helper call return a reference type, it allocates a ++ * pointer id for the reference and stores it in the current function state. ++ * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into ++ * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type ++ * passes through a NULL-check conditional. For the branch wherein the state is ++ * changed to CONST_IMM, the verifier releases the reference. ++ * ++ * For each helper function that allocates a reference, such as ++ * bpf_sk_lookup_tcp(), there is a corresponding release function, such as ++ * bpf_sk_release(). When a reference type passes into the release function, ++ * the verifier also releases the reference. If any unchecked or unreleased ++ * reference remains at the end of the program, the verifier rejects it. + */ + +-/* types of values stored in eBPF registers */ +-enum bpf_reg_type { +- NOT_INIT = 0, /* nothing was written into register */ +- UNKNOWN_VALUE, /* reg doesn't contain a valid pointer */ +- PTR_TO_CTX, /* reg points to bpf_context */ +- CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ +- PTR_TO_MAP_VALUE, /* reg points to map element value */ +- PTR_TO_MAP_VALUE_OR_NULL,/* points to map elem value or NULL */ +- FRAME_PTR, /* reg == frame_pointer */ +- PTR_TO_STACK, /* reg == frame_pointer + imm */ +- CONST_IMM, /* constant integer value */ +-}; +- +-struct reg_state { +- enum bpf_reg_type type; +- union { +- /* valid when type == CONST_IMM | PTR_TO_STACK */ +- int imm; +- +- /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | +- * PTR_TO_MAP_VALUE_OR_NULL +- */ +- struct bpf_map *map_ptr; +- }; +-}; +- +-enum bpf_stack_slot_type { +- STACK_INVALID, /* nothing was stored in this stack slot */ +- STACK_SPILL, /* register spilled into stack */ +- STACK_MISC /* BPF program wrote some data into this slot */ +-}; +- +-#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ +- +-/* state of the program: +- * type of all registers and stack info +- */ +-struct verifier_state { +- struct reg_state regs[MAX_BPF_REG]; +- u8 stack_slot_type[MAX_BPF_STACK]; +- struct reg_state spilled_regs[MAX_BPF_STACK / BPF_REG_SIZE]; +-}; +- +-/* linked list of verifier states used to prune search */ +-struct verifier_state_list { +- struct verifier_state state; +- struct verifier_state_list *next; +-}; +- + /* verifier_state + insn_idx are pushed to stack when branch is encountered */ +-struct verifier_stack_elem { ++struct bpf_verifier_stack_elem { + /* verifer state is 'st' + * before processing instruction 'insn_idx' + * and after processing instruction 'prev_insn_idx' + */ +- struct verifier_state st; ++ struct bpf_verifier_state st; + int insn_idx; + int prev_insn_idx; +- struct verifier_stack_elem *next; ++ struct bpf_verifier_stack_elem *next; + }; + +-#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ ++#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192 ++#define BPF_COMPLEXITY_LIMIT_STATES 64 + +-/* single container for all structs +- * one verifier_env per bpf_check() call +- */ +-struct verifier_env { +- struct bpf_prog *prog; /* eBPF program being verified */ +- struct verifier_stack_elem *head; /* stack of verifier states to be processed */ +- int stack_size; /* number of states to be processed */ +- struct verifier_state cur_state; /* current verifier state */ +- struct verifier_state_list **explored_states; /* search pruning optimization */ +- struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ +- u32 used_map_cnt; /* number of used maps */ +- bool allow_ptr_leaks; +-}; ++#define BPF_MAP_PTR_UNPRIV 1UL ++#define BPF_MAP_PTR_POISON ((void *)((0xeB9FUL << 1) + \ ++ POISON_POINTER_DELTA)) ++#define BPF_MAP_PTR(X) ((struct bpf_map *)((X) & ~BPF_MAP_PTR_UNPRIV)) + +-/* verbose verifier prints what it's seeing +- * bpf_check() is called under lock, so no race to access these global vars +- */ +-static u32 log_level, log_size, log_len; +-static char *log_buf; ++static bool bpf_map_ptr_poisoned(const struct bpf_insn_aux_data *aux) ++{ ++ return BPF_MAP_PTR(aux->map_state) == BPF_MAP_PTR_POISON; ++} ++ ++static bool bpf_map_ptr_unpriv(const struct bpf_insn_aux_data *aux) ++{ ++ return aux->map_state & BPF_MAP_PTR_UNPRIV; ++} ++ ++static void bpf_map_ptr_store(struct bpf_insn_aux_data *aux, ++ const struct bpf_map *map, bool unpriv) ++{ ++ BUILD_BUG_ON((unsigned long)BPF_MAP_PTR_POISON & BPF_MAP_PTR_UNPRIV); ++ unpriv |= bpf_map_ptr_unpriv(aux); ++ aux->map_state = (unsigned long)map | ++ (unpriv ? BPF_MAP_PTR_UNPRIV : 0UL); ++} ++ ++struct bpf_call_arg_meta { ++ struct bpf_map *map_ptr; ++ bool raw_mode; ++ bool pkt_access; ++ int regno; ++ int access_size; ++ u64 msize_max_value; ++ int ref_obj_id; ++ int func_id; ++}; + + static DEFINE_MUTEX(bpf_verifier_lock); + ++static const struct bpf_line_info * ++find_linfo(const struct bpf_verifier_env *env, u32 insn_off) ++{ ++ const struct bpf_line_info *linfo; ++ const struct bpf_prog *prog; ++ u32 i, nr_linfo; ++ ++ prog = env->prog; ++ nr_linfo = prog->aux->nr_linfo; ++ ++ if (!nr_linfo || insn_off >= prog->len) ++ return NULL; ++ ++ linfo = prog->aux->linfo; ++ for (i = 1; i < nr_linfo; i++) ++ if (insn_off < linfo[i].insn_off) ++ break; ++ ++ return &linfo[i - 1]; ++} ++ ++void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, ++ va_list args) ++{ ++ unsigned int n; ++ ++ n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args); ++ ++ WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1, ++ "verifier log line truncated - local buffer too short\n"); ++ ++ n = min(log->len_total - log->len_used - 1, n); ++ log->kbuf[n] = '\0'; ++ ++ if (!copy_to_user(log->ubuf + log->len_used, log->kbuf, n + 1)) ++ log->len_used += n; ++ else ++ log->ubuf = NULL; ++} ++ + /* log_level controls verbosity level of eBPF verifier. +- * verbose() is used to dump the verification trace to the log, so the user +- * can figure out what's wrong with the program ++ * bpf_verifier_log_write() is used to dump the verification trace to the log, ++ * so the user can figure out what's wrong with the program + */ +-static __printf(1, 2) void verbose(const char *fmt, ...) ++__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, ++ const char *fmt, ...) + { + va_list args; + +- if (log_level == 0 || log_len >= log_size - 1) ++ if (!bpf_verifier_log_needed(&env->log)) + return; + + va_start(args, fmt); +- log_len += vscnprintf(log_buf + log_len, log_size - log_len, fmt, args); ++ bpf_verifier_vlog(&env->log, fmt, args); + va_end(args); + } ++EXPORT_SYMBOL_GPL(bpf_verifier_log_write); ++ ++__printf(2, 3) static void verbose(void *private_data, const char *fmt, ...) ++{ ++ struct bpf_verifier_env *env = private_data; ++ va_list args; ++ ++ if (!bpf_verifier_log_needed(&env->log)) ++ return; ++ ++ va_start(args, fmt); ++ bpf_verifier_vlog(&env->log, fmt, args); ++ va_end(args); ++} ++ ++static const char *ltrim(const char *s) ++{ ++ while (isspace(*s)) ++ s++; ++ ++ return s; ++} ++ ++__printf(3, 4) static void verbose_linfo(struct bpf_verifier_env *env, ++ u32 insn_off, ++ const char *prefix_fmt, ...) ++{ ++ const struct bpf_line_info *linfo; ++ ++ if (!bpf_verifier_log_needed(&env->log)) ++ return; ++ ++ linfo = find_linfo(env, insn_off); ++ if (!linfo || linfo == env->prev_linfo) ++ return; ++ ++ if (prefix_fmt) { ++ va_list args; ++ ++ va_start(args, prefix_fmt); ++ bpf_verifier_vlog(&env->log, prefix_fmt, args); ++ va_end(args); ++ } ++ ++ verbose(env, "%s\n", ++ ltrim(btf_name_by_offset(env->prog->aux->btf, ++ linfo->line_off))); ++ ++ env->prev_linfo = linfo; ++} ++ ++static bool type_is_pkt_pointer(enum bpf_reg_type type) ++{ ++ return type == PTR_TO_PACKET || ++ type == PTR_TO_PACKET_META; ++} ++ ++static bool type_is_sk_pointer(enum bpf_reg_type type) ++{ ++ return type == PTR_TO_SOCKET || ++ type == PTR_TO_SOCK_COMMON || ++ type == PTR_TO_TCP_SOCK || ++ type == PTR_TO_XDP_SOCK; ++} ++ ++static bool reg_type_may_be_null(enum bpf_reg_type type) ++{ ++ return type == PTR_TO_MAP_VALUE_OR_NULL || ++ type == PTR_TO_SOCKET_OR_NULL || ++ type == PTR_TO_SOCK_COMMON_OR_NULL || ++ type == PTR_TO_TCP_SOCK_OR_NULL; ++} ++ ++static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg) ++{ ++ return reg->type == PTR_TO_MAP_VALUE && ++ map_value_has_spin_lock(reg->map_ptr); ++} ++ ++static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type) ++{ ++ return type == PTR_TO_SOCKET || ++ type == PTR_TO_SOCKET_OR_NULL || ++ type == PTR_TO_TCP_SOCK || ++ type == PTR_TO_TCP_SOCK_OR_NULL; ++} ++ ++static bool arg_type_may_be_refcounted(enum bpf_arg_type type) ++{ ++ return type == ARG_PTR_TO_SOCK_COMMON; ++} ++ ++/* Determine whether the function releases some resources allocated by another ++ * function call. The first reference type argument will be assumed to be ++ * released by release_reference(). ++ */ ++static bool is_release_function(enum bpf_func_id func_id) ++{ ++ return func_id == BPF_FUNC_sk_release; ++} ++ ++static bool is_acquire_function(enum bpf_func_id func_id) ++{ ++ return func_id == BPF_FUNC_sk_lookup_tcp || ++ func_id == BPF_FUNC_sk_lookup_udp || ++ func_id == BPF_FUNC_skc_lookup_tcp; ++} ++ ++static bool is_ptr_cast_function(enum bpf_func_id func_id) ++{ ++ return func_id == BPF_FUNC_tcp_sock || ++ func_id == BPF_FUNC_sk_fullsock; ++} + + /* string representation of 'enum bpf_reg_type' */ + static const char * const reg_type_str[] = { + [NOT_INIT] = "?", +- [UNKNOWN_VALUE] = "inv", ++ [SCALAR_VALUE] = "inv", + [PTR_TO_CTX] = "ctx", + [CONST_PTR_TO_MAP] = "map_ptr", + [PTR_TO_MAP_VALUE] = "map_value", + [PTR_TO_MAP_VALUE_OR_NULL] = "map_value_or_null", +- [FRAME_PTR] = "fp", + [PTR_TO_STACK] = "fp", +- [CONST_IMM] = "imm", ++ [PTR_TO_PACKET] = "pkt", ++ [PTR_TO_PACKET_META] = "pkt_meta", ++ [PTR_TO_PACKET_END] = "pkt_end", ++ [PTR_TO_FLOW_KEYS] = "flow_keys", ++ [PTR_TO_SOCKET] = "sock", ++ [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", ++ [PTR_TO_SOCK_COMMON] = "sock_common", ++ [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", ++ [PTR_TO_TCP_SOCK] = "tcp_sock", ++ [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", ++ [PTR_TO_TP_BUFFER] = "tp_buffer", ++ [PTR_TO_XDP_SOCK] = "xdp_sock", + }; + +-static void print_verifier_state(struct verifier_env *env) ++static char slot_type_char[] = { ++ [STACK_INVALID] = '?', ++ [STACK_SPILL] = 'r', ++ [STACK_MISC] = 'm', ++ [STACK_ZERO] = '0', ++}; ++ ++static void print_liveness(struct bpf_verifier_env *env, ++ enum bpf_reg_liveness live) ++{ ++ if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE)) ++ verbose(env, "_"); ++ if (live & REG_LIVE_READ) ++ verbose(env, "r"); ++ if (live & REG_LIVE_WRITTEN) ++ verbose(env, "w"); ++ if (live & REG_LIVE_DONE) ++ verbose(env, "D"); ++} ++ ++static struct bpf_func_state *func(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *reg) + { ++ struct bpf_verifier_state *cur = env->cur_state; ++ ++ return cur->frame[reg->frameno]; ++} ++ ++static void print_verifier_state(struct bpf_verifier_env *env, ++ const struct bpf_func_state *state) ++{ ++ const struct bpf_reg_state *reg; + enum bpf_reg_type t; + int i; + ++ if (state->frameno) ++ verbose(env, " frame%d:", state->frameno); + for (i = 0; i < MAX_BPF_REG; i++) { +- t = env->cur_state.regs[i].type; ++ reg = &state->regs[i]; ++ t = reg->type; + if (t == NOT_INIT) + continue; +- verbose(" R%d=%s", i, reg_type_str[t]); +- if (t == CONST_IMM || t == PTR_TO_STACK) +- verbose("%d", env->cur_state.regs[i].imm); +- else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || +- t == PTR_TO_MAP_VALUE_OR_NULL) +- verbose("(ks=%d,vs=%d)", +- env->cur_state.regs[i].map_ptr->key_size, +- env->cur_state.regs[i].map_ptr->value_size); +- } +- for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { +- if (env->cur_state.stack_slot_type[i] == STACK_SPILL) +- verbose(" fp%d=%s", -MAX_BPF_STACK + i, +- reg_type_str[env->cur_state.spilled_regs[i / BPF_REG_SIZE].type]); +- } +- verbose("\n"); +-} +- +-static const char *const bpf_class_string[] = { +- [BPF_LD] = "ld", +- [BPF_LDX] = "ldx", +- [BPF_ST] = "st", +- [BPF_STX] = "stx", +- [BPF_ALU] = "alu", +- [BPF_JMP] = "jmp", +- [BPF_RET] = "BUG", +- [BPF_ALU64] = "alu64", +-}; ++ verbose(env, " R%d", i); ++ print_liveness(env, reg->live); ++ verbose(env, "=%s", reg_type_str[t]); ++ if (t == SCALAR_VALUE && reg->precise) ++ verbose(env, "P"); ++ if ((t == SCALAR_VALUE || t == PTR_TO_STACK) && ++ tnum_is_const(reg->var_off)) { ++ /* reg->off should be 0 for SCALAR_VALUE */ ++ verbose(env, "%lld", reg->var_off.value + reg->off); ++ } else { ++ verbose(env, "(id=%d", reg->id); ++ if (reg_type_may_be_refcounted_or_null(t)) ++ verbose(env, ",ref_obj_id=%d", reg->ref_obj_id); ++ if (t != SCALAR_VALUE) ++ verbose(env, ",off=%d", reg->off); ++ if (type_is_pkt_pointer(t)) ++ verbose(env, ",r=%d", reg->range); ++ else if (t == CONST_PTR_TO_MAP || ++ t == PTR_TO_MAP_VALUE || ++ t == PTR_TO_MAP_VALUE_OR_NULL) ++ verbose(env, ",ks=%d,vs=%d", ++ reg->map_ptr->key_size, ++ reg->map_ptr->value_size); ++ if (tnum_is_const(reg->var_off)) { ++ /* Typically an immediate SCALAR_VALUE, but ++ * could be a pointer whose offset is too big ++ * for reg->off ++ */ ++ verbose(env, ",imm=%llx", reg->var_off.value); ++ } else { ++ if (reg->smin_value != reg->umin_value && ++ reg->smin_value != S64_MIN) ++ verbose(env, ",smin_value=%lld", ++ (long long)reg->smin_value); ++ if (reg->smax_value != reg->umax_value && ++ reg->smax_value != S64_MAX) ++ verbose(env, ",smax_value=%lld", ++ (long long)reg->smax_value); ++ if (reg->umin_value != 0) ++ verbose(env, ",umin_value=%llu", ++ (unsigned long long)reg->umin_value); ++ if (reg->umax_value != U64_MAX) ++ verbose(env, ",umax_value=%llu", ++ (unsigned long long)reg->umax_value); ++ if (!tnum_is_unknown(reg->var_off)) { ++ char tn_buf[48]; + +-static const char *const bpf_alu_string[16] = { +- [BPF_ADD >> 4] = "+=", +- [BPF_SUB >> 4] = "-=", +- [BPF_MUL >> 4] = "*=", +- [BPF_DIV >> 4] = "/=", +- [BPF_OR >> 4] = "|=", +- [BPF_AND >> 4] = "&=", +- [BPF_LSH >> 4] = "<<=", +- [BPF_RSH >> 4] = ">>=", +- [BPF_NEG >> 4] = "neg", +- [BPF_MOD >> 4] = "%=", +- [BPF_XOR >> 4] = "^=", +- [BPF_MOV >> 4] = "=", +- [BPF_ARSH >> 4] = "s>>=", +- [BPF_END >> 4] = "endian", +-}; ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, ",var_off=%s", tn_buf); ++ } ++ } ++ verbose(env, ")"); ++ } ++ } ++ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { ++ char types_buf[BPF_REG_SIZE + 1]; ++ bool valid = false; ++ int j; ++ ++ for (j = 0; j < BPF_REG_SIZE; j++) { ++ if (state->stack[i].slot_type[j] != STACK_INVALID) ++ valid = true; ++ types_buf[j] = slot_type_char[ ++ state->stack[i].slot_type[j]]; ++ } ++ types_buf[BPF_REG_SIZE] = 0; ++ if (!valid) ++ continue; ++ verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE); ++ print_liveness(env, state->stack[i].spilled_ptr.live); ++ if (state->stack[i].slot_type[0] == STACK_SPILL) { ++ reg = &state->stack[i].spilled_ptr; ++ t = reg->type; ++ verbose(env, "=%s", reg_type_str[t]); ++ if (t == SCALAR_VALUE && reg->precise) ++ verbose(env, "P"); ++ if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) ++ verbose(env, "%lld", reg->var_off.value + reg->off); ++ } else { ++ verbose(env, "=%s", types_buf); ++ } ++ } ++ if (state->acquired_refs && state->refs[0].id) { ++ verbose(env, " refs=%d", state->refs[0].id); ++ for (i = 1; i < state->acquired_refs; i++) ++ if (state->refs[i].id) ++ verbose(env, ",%d", state->refs[i].id); ++ } ++ verbose(env, "\n"); ++} + +-static const char *const bpf_ldst_string[] = { +- [BPF_W >> 3] = "u32", +- [BPF_H >> 3] = "u16", +- [BPF_B >> 3] = "u8", +- [BPF_DW >> 3] = "u64", +-}; ++#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \ ++static int copy_##NAME##_state(struct bpf_func_state *dst, \ ++ const struct bpf_func_state *src) \ ++{ \ ++ if (!src->FIELD) \ ++ return 0; \ ++ if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \ ++ /* internal bug, make state invalid to reject the program */ \ ++ memset(dst, 0, sizeof(*dst)); \ ++ return -EFAULT; \ ++ } \ ++ memcpy(dst->FIELD, src->FIELD, \ ++ sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ ++ return 0; \ ++} ++/* copy_reference_state() */ ++COPY_STATE_FN(reference, acquired_refs, refs, 1) ++/* copy_stack_state() */ ++COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) ++#undef COPY_STATE_FN ++ ++#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \ ++static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \ ++ bool copy_old) \ ++{ \ ++ u32 old_size = state->COUNT; \ ++ struct bpf_##NAME##_state *new_##FIELD; \ ++ int slot = size / SIZE; \ ++ \ ++ if (size <= old_size || !size) { \ ++ if (copy_old) \ ++ return 0; \ ++ state->COUNT = slot * SIZE; \ ++ if (!size && old_size) { \ ++ kfree(state->FIELD); \ ++ state->FIELD = NULL; \ ++ } \ ++ return 0; \ ++ } \ ++ new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \ ++ GFP_KERNEL); \ ++ if (!new_##FIELD) \ ++ return -ENOMEM; \ ++ if (copy_old) { \ ++ if (state->FIELD) \ ++ memcpy(new_##FIELD, state->FIELD, \ ++ sizeof(*new_##FIELD) * (old_size / SIZE)); \ ++ memset(new_##FIELD + old_size / SIZE, 0, \ ++ sizeof(*new_##FIELD) * (size - old_size) / SIZE); \ ++ } \ ++ state->COUNT = slot * SIZE; \ ++ kfree(state->FIELD); \ ++ state->FIELD = new_##FIELD; \ ++ return 0; \ ++} ++/* realloc_reference_state() */ ++REALLOC_STATE_FN(reference, acquired_refs, refs, 1) ++/* realloc_stack_state() */ ++REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) ++#undef REALLOC_STATE_FN ++ ++/* do_check() starts with zero-sized stack in struct bpf_verifier_state to ++ * make it consume minimal amount of memory. check_stack_write() access from ++ * the program calls into realloc_func_state() to grow the stack size. ++ * Note there is a non-zero 'parent' pointer inside bpf_verifier_state ++ * which realloc_stack_state() copies over. It points to previous ++ * bpf_verifier_state which is never reallocated. ++ */ ++static int realloc_func_state(struct bpf_func_state *state, int stack_size, ++ int refs_size, bool copy_old) ++{ ++ int err = realloc_reference_state(state, refs_size, copy_old); ++ if (err) ++ return err; ++ return realloc_stack_state(state, stack_size, copy_old); ++} + +-static const char *const bpf_jmp_string[16] = { +- [BPF_JA >> 4] = "jmp", +- [BPF_JEQ >> 4] = "==", +- [BPF_JGT >> 4] = ">", +- [BPF_JGE >> 4] = ">=", +- [BPF_JSET >> 4] = "&", +- [BPF_JNE >> 4] = "!=", +- [BPF_JSGT >> 4] = "s>", +- [BPF_JSGE >> 4] = "s>=", +- [BPF_CALL >> 4] = "call", +- [BPF_EXIT >> 4] = "exit", +-}; ++/* Acquire a pointer id from the env and update the state->refs to include ++ * this new pointer reference. ++ * On success, returns a valid pointer id to associate with the register ++ * On failure, returns a negative errno. ++ */ ++static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) ++{ ++ struct bpf_func_state *state = cur_func(env); ++ int new_ofs = state->acquired_refs; ++ int id, err; + +-static void print_bpf_insn(const struct verifier_env *env, +- const struct bpf_insn *insn) ++ err = realloc_reference_state(state, state->acquired_refs + 1, true); ++ if (err) ++ return err; ++ id = ++env->id_gen; ++ state->refs[new_ofs].id = id; ++ state->refs[new_ofs].insn_idx = insn_idx; ++ ++ return id; ++} ++ ++/* release function corresponding to acquire_reference_state(). Idempotent. */ ++static int release_reference_state(struct bpf_func_state *state, int ptr_id) + { +- u8 class = BPF_CLASS(insn->code); ++ int i, last_idx; + +- if (class == BPF_ALU || class == BPF_ALU64) { +- if (BPF_SRC(insn->code) == BPF_X) +- verbose("(%02x) %sr%d %s %sr%d\n", +- insn->code, class == BPF_ALU ? "(u32) " : "", +- insn->dst_reg, +- bpf_alu_string[BPF_OP(insn->code) >> 4], +- class == BPF_ALU ? "(u32) " : "", +- insn->src_reg); +- else +- verbose("(%02x) %sr%d %s %s%d\n", +- insn->code, class == BPF_ALU ? "(u32) " : "", +- insn->dst_reg, +- bpf_alu_string[BPF_OP(insn->code) >> 4], +- class == BPF_ALU ? "(u32) " : "", +- insn->imm); +- } else if (class == BPF_STX) { +- if (BPF_MODE(insn->code) == BPF_MEM) +- verbose("(%02x) *(%s *)(r%d %+d) = r%d\n", +- insn->code, +- bpf_ldst_string[BPF_SIZE(insn->code) >> 3], +- insn->dst_reg, +- insn->off, insn->src_reg); +- else if (BPF_MODE(insn->code) == BPF_XADD) +- verbose("(%02x) lock *(%s *)(r%d %+d) += r%d\n", +- insn->code, +- bpf_ldst_string[BPF_SIZE(insn->code) >> 3], +- insn->dst_reg, insn->off, +- insn->src_reg); +- else +- verbose("BUG_%02x\n", insn->code); +- } else if (class == BPF_ST) { +- if (BPF_MODE(insn->code) != BPF_MEM) { +- verbose("BUG_st_%02x\n", insn->code); +- return; +- } +- verbose("(%02x) *(%s *)(r%d %+d) = %d\n", +- insn->code, +- bpf_ldst_string[BPF_SIZE(insn->code) >> 3], +- insn->dst_reg, +- insn->off, insn->imm); +- } else if (class == BPF_LDX) { +- if (BPF_MODE(insn->code) != BPF_MEM) { +- verbose("BUG_ldx_%02x\n", insn->code); +- return; ++ last_idx = state->acquired_refs - 1; ++ for (i = 0; i < state->acquired_refs; i++) { ++ if (state->refs[i].id == ptr_id) { ++ if (last_idx && i != last_idx) ++ memcpy(&state->refs[i], &state->refs[last_idx], ++ sizeof(*state->refs)); ++ memset(&state->refs[last_idx], 0, sizeof(*state->refs)); ++ state->acquired_refs--; ++ return 0; + } +- verbose("(%02x) r%d = *(%s *)(r%d %+d)\n", +- insn->code, insn->dst_reg, +- bpf_ldst_string[BPF_SIZE(insn->code) >> 3], +- insn->src_reg, insn->off); +- } else if (class == BPF_LD) { +- if (BPF_MODE(insn->code) == BPF_ABS) { +- verbose("(%02x) r0 = *(%s *)skb[%d]\n", +- insn->code, +- bpf_ldst_string[BPF_SIZE(insn->code) >> 3], +- insn->imm); +- } else if (BPF_MODE(insn->code) == BPF_IND) { +- verbose("(%02x) r0 = *(%s *)skb[r%d + %d]\n", +- insn->code, +- bpf_ldst_string[BPF_SIZE(insn->code) >> 3], +- insn->src_reg, insn->imm); +- } else if (BPF_MODE(insn->code) == BPF_IMM && +- BPF_SIZE(insn->code) == BPF_DW) { +- /* At this point, we already made sure that the second +- * part of the ldimm64 insn is accessible. +- */ +- u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; +- bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD; ++ } ++ return -EINVAL; ++} + +- if (map_ptr && !env->allow_ptr_leaks) +- imm = 0; ++static int transfer_reference_state(struct bpf_func_state *dst, ++ struct bpf_func_state *src) ++{ ++ int err = realloc_reference_state(dst, src->acquired_refs, false); ++ if (err) ++ return err; ++ err = copy_reference_state(dst, src); ++ if (err) ++ return err; ++ return 0; ++} + +- verbose("(%02x) r%d = 0x%llx\n", insn->code, +- insn->dst_reg, (unsigned long long)imm); +- } else { +- verbose("BUG_ld_%02x\n", insn->code); +- return; +- } +- } else if (class == BPF_JMP) { +- u8 opcode = BPF_OP(insn->code); ++static void free_func_state(struct bpf_func_state *state) ++{ ++ if (!state) ++ return; ++ kfree(state->refs); ++ kfree(state->stack); ++ kfree(state); ++} + +- if (opcode == BPF_CALL) { +- verbose("(%02x) call %d\n", insn->code, insn->imm); +- } else if (insn->code == (BPF_JMP | BPF_JA)) { +- verbose("(%02x) goto pc%+d\n", +- insn->code, insn->off); +- } else if (insn->code == (BPF_JMP | BPF_EXIT)) { +- verbose("(%02x) exit\n", insn->code); +- } else if (BPF_SRC(insn->code) == BPF_X) { +- verbose("(%02x) if r%d %s r%d goto pc%+d\n", +- insn->code, insn->dst_reg, +- bpf_jmp_string[BPF_OP(insn->code) >> 4], +- insn->src_reg, insn->off); +- } else { +- verbose("(%02x) if r%d %s 0x%x goto pc%+d\n", +- insn->code, insn->dst_reg, +- bpf_jmp_string[BPF_OP(insn->code) >> 4], +- insn->imm, insn->off); ++static void clear_jmp_history(struct bpf_verifier_state *state) ++{ ++ kfree(state->jmp_history); ++ state->jmp_history = NULL; ++ state->jmp_history_cnt = 0; ++} ++ ++static void free_verifier_state(struct bpf_verifier_state *state, ++ bool free_self) ++{ ++ int i; ++ ++ for (i = 0; i <= state->curframe; i++) { ++ free_func_state(state->frame[i]); ++ state->frame[i] = NULL; ++ } ++ clear_jmp_history(state); ++ if (free_self) ++ kfree(state); ++} ++ ++/* copy verifier state from src to dst growing dst stack space ++ * when necessary to accommodate larger src stack ++ */ ++static int copy_func_state(struct bpf_func_state *dst, ++ const struct bpf_func_state *src) ++{ ++ int err; ++ ++ err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs, ++ false); ++ if (err) ++ return err; ++ memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs)); ++ err = copy_reference_state(dst, src); ++ if (err) ++ return err; ++ return copy_stack_state(dst, src); ++} ++ ++static int copy_verifier_state(struct bpf_verifier_state *dst_state, ++ const struct bpf_verifier_state *src) ++{ ++ struct bpf_func_state *dst; ++ u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt; ++ int i, err; ++ ++ if (dst_state->jmp_history_cnt < src->jmp_history_cnt) { ++ kfree(dst_state->jmp_history); ++ dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER); ++ if (!dst_state->jmp_history) ++ return -ENOMEM; ++ } ++ memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz); ++ dst_state->jmp_history_cnt = src->jmp_history_cnt; ++ ++ /* if dst has more stack frames then src frame, free them */ ++ for (i = src->curframe + 1; i <= dst_state->curframe; i++) { ++ free_func_state(dst_state->frame[i]); ++ dst_state->frame[i] = NULL; ++ } ++ dst_state->speculative = src->speculative; ++ dst_state->curframe = src->curframe; ++ dst_state->active_spin_lock = src->active_spin_lock; ++ dst_state->branches = src->branches; ++ dst_state->parent = src->parent; ++ dst_state->first_insn_idx = src->first_insn_idx; ++ dst_state->last_insn_idx = src->last_insn_idx; ++ for (i = 0; i <= src->curframe; i++) { ++ dst = dst_state->frame[i]; ++ if (!dst) { ++ dst = kzalloc(sizeof(*dst), GFP_KERNEL); ++ if (!dst) ++ return -ENOMEM; ++ dst_state->frame[i] = dst; + } +- } else { +- verbose("(%02x) %s\n", insn->code, bpf_class_string[class]); ++ err = copy_func_state(dst, src->frame[i]); ++ if (err) ++ return err; + } ++ return 0; + } + +-static int pop_stack(struct verifier_env *env, int *prev_insn_idx) ++static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) + { +- struct verifier_stack_elem *elem; +- int insn_idx; ++ while (st) { ++ u32 br = --st->branches; ++ ++ /* WARN_ON(br > 1) technically makes sense here, ++ * but see comment in push_stack(), hence: ++ */ ++ WARN_ONCE((int)br < 0, ++ "BUG update_branch_counts:branches_to_explore=%d\n", ++ br); ++ if (br) ++ break; ++ st = st->parent; ++ } ++} ++ ++static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx, ++ int *insn_idx) ++{ ++ struct bpf_verifier_state *cur = env->cur_state; ++ struct bpf_verifier_stack_elem *elem, *head = env->head; ++ int err; + + if (env->head == NULL) +- return -1; ++ return -ENOENT; + +- memcpy(&env->cur_state, &env->head->st, sizeof(env->cur_state)); +- insn_idx = env->head->insn_idx; ++ if (cur) { ++ err = copy_verifier_state(cur, &head->st); ++ if (err) ++ return err; ++ } ++ if (insn_idx) ++ *insn_idx = head->insn_idx; + if (prev_insn_idx) +- *prev_insn_idx = env->head->prev_insn_idx; +- elem = env->head->next; +- kfree(env->head); ++ *prev_insn_idx = head->prev_insn_idx; ++ elem = head->next; ++ free_verifier_state(&head->st, false); ++ kfree(head); + env->head = elem; + env->stack_size--; +- return insn_idx; ++ return 0; + } + +-static struct verifier_state *push_stack(struct verifier_env *env, int insn_idx, +- int prev_insn_idx) ++static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env, ++ int insn_idx, int prev_insn_idx, ++ bool speculative) + { +- struct verifier_stack_elem *elem; ++ struct bpf_verifier_state *cur = env->cur_state; ++ struct bpf_verifier_stack_elem *elem; ++ int err; + +- elem = kmalloc(sizeof(struct verifier_stack_elem), GFP_KERNEL); ++ elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL); + if (!elem) + goto err; + +- memcpy(&elem->st, &env->cur_state, sizeof(env->cur_state)); + elem->insn_idx = insn_idx; + elem->prev_insn_idx = prev_insn_idx; + elem->next = env->head; + env->head = elem; + env->stack_size++; +- if (env->stack_size > 1024) { +- verbose("BPF program is too complex\n"); ++ err = copy_verifier_state(&elem->st, cur); ++ if (err) ++ goto err; ++ elem->st.speculative |= speculative; ++ if (env->stack_size > BPF_COMPLEXITY_LIMIT_JMP_SEQ) { ++ verbose(env, "The sequence of %d jumps is too complex.\n", ++ env->stack_size); + goto err; + } ++ if (elem->st.parent) { ++ ++elem->st.parent->branches; ++ /* WARN_ON(branches > 2) technically makes sense here, ++ * but ++ * 1. speculative states will bump 'branches' for non-branch ++ * instructions ++ * 2. is_state_visited() heuristics may decide not to create ++ * a new state for a sequence of branches and all such current ++ * and cloned states will be pointing to a single parent state ++ * which might have large 'branches' count. ++ */ ++ } + return &elem->st; + err: ++ free_verifier_state(env->cur_state, true); ++ env->cur_state = NULL; + /* pop all elements and return */ +- while (pop_stack(env, NULL) >= 0); ++ while (!pop_stack(env, NULL, NULL)); + return NULL; + } + +@@ -471,29 +852,225 @@ static const int caller_saved[CALLER_SAV + BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 + }; + +-static void init_reg_state(struct reg_state *regs) ++static void __mark_reg_not_init(const struct bpf_verifier_env *env, ++ struct bpf_reg_state *reg); ++ ++/* Mark the unknown part of a register (variable offset or scalar value) as ++ * known to have the value @imm. ++ */ ++static void __mark_reg_known(struct bpf_reg_state *reg, u64 imm) ++{ ++ /* Clear id, off, and union(map_ptr, range) */ ++ memset(((u8 *)reg) + sizeof(reg->type), 0, ++ offsetof(struct bpf_reg_state, var_off) - sizeof(reg->type)); ++ reg->var_off = tnum_const(imm); ++ reg->smin_value = (s64)imm; ++ reg->smax_value = (s64)imm; ++ reg->umin_value = imm; ++ reg->umax_value = imm; ++} ++ ++/* Mark the 'variable offset' part of a register as zero. This should be ++ * used only on registers holding a pointer type. ++ */ ++static void __mark_reg_known_zero(struct bpf_reg_state *reg) ++{ ++ __mark_reg_known(reg, 0); ++} ++ ++static void __mark_reg_const_zero(struct bpf_reg_state *reg) ++{ ++ __mark_reg_known(reg, 0); ++ reg->type = SCALAR_VALUE; ++} ++ ++static void mark_reg_known_zero(struct bpf_verifier_env *env, ++ struct bpf_reg_state *regs, u32 regno) ++{ ++ if (WARN_ON(regno >= MAX_BPF_REG)) { ++ verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); ++ /* Something bad happened, let's kill all regs */ ++ for (regno = 0; regno < MAX_BPF_REG; regno++) ++ __mark_reg_not_init(env, regs + regno); ++ return; ++ } ++ __mark_reg_known_zero(regs + regno); ++} ++ ++static bool reg_is_pkt_pointer(const struct bpf_reg_state *reg) ++{ ++ return type_is_pkt_pointer(reg->type); ++} ++ ++static bool reg_is_pkt_pointer_any(const struct bpf_reg_state *reg) ++{ ++ return reg_is_pkt_pointer(reg) || ++ reg->type == PTR_TO_PACKET_END; ++} ++ ++/* Unmodified PTR_TO_PACKET[_META,_END] register from ctx access. */ ++static bool reg_is_init_pkt_pointer(const struct bpf_reg_state *reg, ++ enum bpf_reg_type which) ++{ ++ /* The register can already have a range from prior markings. ++ * This is fine as long as it hasn't been advanced from its ++ * origin. ++ */ ++ return reg->type == which && ++ reg->id == 0 && ++ reg->off == 0 && ++ tnum_equals_const(reg->var_off, 0); ++} ++ ++/* Attempts to improve min/max values based on var_off information */ ++static void __update_reg_bounds(struct bpf_reg_state *reg) ++{ ++ /* min signed is max(sign bit) | min(other bits) */ ++ reg->smin_value = max_t(s64, reg->smin_value, ++ reg->var_off.value | (reg->var_off.mask & S64_MIN)); ++ /* max signed is min(sign bit) | max(other bits) */ ++ reg->smax_value = min_t(s64, reg->smax_value, ++ reg->var_off.value | (reg->var_off.mask & S64_MAX)); ++ reg->umin_value = max(reg->umin_value, reg->var_off.value); ++ reg->umax_value = min(reg->umax_value, ++ reg->var_off.value | reg->var_off.mask); ++} ++ ++/* Uses signed min/max values to inform unsigned, and vice-versa */ ++static void __reg_deduce_bounds(struct bpf_reg_state *reg) + { ++ /* Learn sign from signed bounds. ++ * If we cannot cross the sign boundary, then signed and unsigned bounds ++ * are the same, so combine. This works even in the negative case, e.g. ++ * -3 s<= x s<= -1 implies 0xf...fd u<= x u<= 0xf...ff. ++ */ ++ if (reg->smin_value >= 0 || reg->smax_value < 0) { ++ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, ++ reg->umin_value); ++ reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, ++ reg->umax_value); ++ return; ++ } ++ /* Learn sign from unsigned bounds. Signed bounds cross the sign ++ * boundary, so we must be careful. ++ */ ++ if ((s64)reg->umax_value >= 0) { ++ /* Positive. We can't learn anything from the smin, but smax ++ * is positive, hence safe. ++ */ ++ reg->smin_value = reg->umin_value; ++ reg->smax_value = reg->umax_value = min_t(u64, reg->smax_value, ++ reg->umax_value); ++ } else if ((s64)reg->umin_value < 0) { ++ /* Negative. We can't learn anything from the smax, but smin ++ * is negative, hence safe. ++ */ ++ reg->smin_value = reg->umin_value = max_t(u64, reg->smin_value, ++ reg->umin_value); ++ reg->smax_value = reg->umax_value; ++ } ++} ++ ++/* Attempts to improve var_off based on unsigned min/max information */ ++static void __reg_bound_offset(struct bpf_reg_state *reg) ++{ ++ reg->var_off = tnum_intersect(reg->var_off, ++ tnum_range(reg->umin_value, ++ reg->umax_value)); ++} ++ ++/* Reset the min/max bounds of a register */ ++static void __mark_reg_unbounded(struct bpf_reg_state *reg) ++{ ++ reg->smin_value = S64_MIN; ++ reg->smax_value = S64_MAX; ++ reg->umin_value = 0; ++ reg->umax_value = U64_MAX; ++} ++ ++/* Mark a register as having a completely unknown (scalar) value. */ ++static void __mark_reg_unknown(const struct bpf_verifier_env *env, ++ struct bpf_reg_state *reg) ++{ ++ /* ++ * Clear type, id, off, and union(map_ptr, range) and ++ * padding between 'type' and union ++ */ ++ memset(reg, 0, offsetof(struct bpf_reg_state, var_off)); ++ reg->type = SCALAR_VALUE; ++ reg->var_off = tnum_unknown; ++ reg->frameno = 0; ++ reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? ++ true : false; ++ __mark_reg_unbounded(reg); ++} ++ ++static void mark_reg_unknown(struct bpf_verifier_env *env, ++ struct bpf_reg_state *regs, u32 regno) ++{ ++ if (WARN_ON(regno >= MAX_BPF_REG)) { ++ verbose(env, "mark_reg_unknown(regs, %u)\n", regno); ++ /* Something bad happened, let's kill all regs except FP */ ++ for (regno = 0; regno < BPF_REG_FP; regno++) ++ __mark_reg_not_init(env, regs + regno); ++ return; ++ } ++ __mark_reg_unknown(env, regs + regno); ++} ++ ++static void __mark_reg_not_init(const struct bpf_verifier_env *env, ++ struct bpf_reg_state *reg) ++{ ++ __mark_reg_unknown(env, reg); ++ reg->type = NOT_INIT; ++} ++ ++static void mark_reg_not_init(struct bpf_verifier_env *env, ++ struct bpf_reg_state *regs, u32 regno) ++{ ++ if (WARN_ON(regno >= MAX_BPF_REG)) { ++ verbose(env, "mark_reg_not_init(regs, %u)\n", regno); ++ /* Something bad happened, let's kill all regs except FP */ ++ for (regno = 0; regno < BPF_REG_FP; regno++) ++ __mark_reg_not_init(env, regs + regno); ++ return; ++ } ++ __mark_reg_not_init(env, regs + regno); ++} ++ ++#define DEF_NOT_SUBREG (0) ++static void init_reg_state(struct bpf_verifier_env *env, ++ struct bpf_func_state *state) ++{ ++ struct bpf_reg_state *regs = state->regs; + int i; + + for (i = 0; i < MAX_BPF_REG; i++) { +- regs[i].type = NOT_INIT; +- regs[i].imm = 0; +- regs[i].map_ptr = NULL; ++ mark_reg_not_init(env, regs, i); ++ regs[i].live = REG_LIVE_NONE; ++ regs[i].parent = NULL; ++ regs[i].subreg_def = DEF_NOT_SUBREG; + } + + /* frame pointer */ +- regs[BPF_REG_FP].type = FRAME_PTR; ++ regs[BPF_REG_FP].type = PTR_TO_STACK; ++ mark_reg_known_zero(env, regs, BPF_REG_FP); ++ regs[BPF_REG_FP].frameno = state->frameno; + + /* 1st arg to a function */ + regs[BPF_REG_1].type = PTR_TO_CTX; ++ mark_reg_known_zero(env, regs, BPF_REG_1); + } + +-static void mark_reg_unknown_value(struct reg_state *regs, u32 regno) ++#define BPF_MAIN_FUNC (-1) ++static void init_func_state(struct bpf_verifier_env *env, ++ struct bpf_func_state *state, ++ int callsite, int frameno, int subprogno) + { +- BUG_ON(regno >= MAX_BPF_REG); +- regs[regno].type = UNKNOWN_VALUE; +- regs[regno].imm = 0; +- regs[regno].map_ptr = NULL; ++ state->callsite = callsite; ++ state->frameno = frameno; ++ state->subprogno = subprogno; ++ init_reg_state(env, state); + } + + enum reg_arg_type { +@@ -502,44 +1079,760 @@ enum reg_arg_type { + DST_OP_NO_MARK /* same as above, check only, don't mark */ + }; + +-static int check_reg_arg(struct reg_state *regs, u32 regno, ++static int cmp_subprogs(const void *a, const void *b) ++{ ++ return ((struct bpf_subprog_info *)a)->start - ++ ((struct bpf_subprog_info *)b)->start; ++} ++ ++static int find_subprog(struct bpf_verifier_env *env, int off) ++{ ++ struct bpf_subprog_info *p; ++ ++ p = bsearch(&off, env->subprog_info, env->subprog_cnt, ++ sizeof(env->subprog_info[0]), cmp_subprogs); ++ if (!p) ++ return -ENOENT; ++ return p - env->subprog_info; ++ ++} ++ ++static int add_subprog(struct bpf_verifier_env *env, int off) ++{ ++ int insn_cnt = env->prog->len; ++ int ret; ++ ++ if (off >= insn_cnt || off < 0) { ++ verbose(env, "call to invalid destination\n"); ++ return -EINVAL; ++ } ++ ret = find_subprog(env, off); ++ if (ret >= 0) ++ return 0; ++ if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { ++ verbose(env, "too many subprograms\n"); ++ return -E2BIG; ++ } ++ env->subprog_info[env->subprog_cnt++].start = off; ++ sort(env->subprog_info, env->subprog_cnt, ++ sizeof(env->subprog_info[0]), cmp_subprogs, NULL); ++ return 0; ++} ++ ++static int check_subprogs(struct bpf_verifier_env *env) ++{ ++ int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; ++ struct bpf_subprog_info *subprog = env->subprog_info; ++ struct bpf_insn *insn = env->prog->insnsi; ++ int insn_cnt = env->prog->len; ++ ++ /* Add entry function. */ ++ ret = add_subprog(env, 0); ++ if (ret < 0) ++ return ret; ++ ++ /* determine subprog starts. The end is one before the next starts */ ++ for (i = 0; i < insn_cnt; i++) { ++ if (insn[i].code != (BPF_JMP | BPF_CALL)) ++ continue; ++ if (insn[i].src_reg != BPF_PSEUDO_CALL) ++ continue; ++ if (!env->allow_ptr_leaks) { ++ verbose(env, "function calls to other bpf functions are allowed for root only\n"); ++ return -EPERM; ++ } ++ ret = add_subprog(env, i + insn[i].imm + 1); ++ if (ret < 0) ++ return ret; ++ } ++ ++ /* Add a fake 'exit' subprog which could simplify subprog iteration ++ * logic. 'subprog_cnt' should not be increased. ++ */ ++ subprog[env->subprog_cnt].start = insn_cnt; ++ ++ if (env->log.level & BPF_LOG_LEVEL2) ++ for (i = 0; i < env->subprog_cnt; i++) ++ verbose(env, "func#%d @%d\n", i, subprog[i].start); ++ ++ /* now check that all jumps are within the same subprog */ ++ subprog_start = subprog[cur_subprog].start; ++ subprog_end = subprog[cur_subprog + 1].start; ++ for (i = 0; i < insn_cnt; i++) { ++ u8 code = insn[i].code; ++ ++ if (code == (BPF_JMP | BPF_CALL) && ++ insn[i].imm == BPF_FUNC_tail_call && ++ insn[i].src_reg != BPF_PSEUDO_CALL) ++ subprog[cur_subprog].has_tail_call = true; ++ if (BPF_CLASS(code) != BPF_JMP && BPF_CLASS(code) != BPF_JMP32) ++ goto next; ++ if (BPF_OP(code) == BPF_EXIT || BPF_OP(code) == BPF_CALL) ++ goto next; ++ off = i + insn[i].off + 1; ++ if (off < subprog_start || off >= subprog_end) { ++ verbose(env, "jump out of range from insn %d to %d\n", i, off); ++ return -EINVAL; ++ } ++next: ++ if (i == subprog_end - 1) { ++ /* to avoid fall-through from one subprog into another ++ * the last insn of the subprog should be either exit ++ * or unconditional jump back ++ */ ++ if (code != (BPF_JMP | BPF_EXIT) && ++ code != (BPF_JMP | BPF_JA)) { ++ verbose(env, "last insn is not an exit or jmp\n"); ++ return -EINVAL; ++ } ++ subprog_start = subprog_end; ++ cur_subprog++; ++ if (cur_subprog < env->subprog_cnt) ++ subprog_end = subprog[cur_subprog + 1].start; ++ } ++ } ++ return 0; ++} ++ ++/* Parentage chain of this register (or stack slot) should take care of all ++ * issues like callee-saved registers, stack slot allocation time, etc. ++ */ ++static int mark_reg_read(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *state, ++ struct bpf_reg_state *parent, u8 flag) ++{ ++ bool writes = parent == state->parent; /* Observe write marks */ ++ int cnt = 0; ++ ++ while (parent) { ++ /* if read wasn't screened by an earlier write ... */ ++ if (writes && state->live & REG_LIVE_WRITTEN) ++ break; ++ if (parent->live & REG_LIVE_DONE) { ++ verbose(env, "verifier BUG type %s var_off %lld off %d\n", ++ reg_type_str[parent->type], ++ parent->var_off.value, parent->off); ++ return -EFAULT; ++ } ++ /* The first condition is more likely to be true than the ++ * second, checked it first. ++ */ ++ if ((parent->live & REG_LIVE_READ) == flag || ++ parent->live & REG_LIVE_READ64) ++ /* The parentage chain never changes and ++ * this parent was already marked as LIVE_READ. ++ * There is no need to keep walking the chain again and ++ * keep re-marking all parents as LIVE_READ. ++ * This case happens when the same register is read ++ * multiple times without writes into it in-between. ++ * Also, if parent has the stronger REG_LIVE_READ64 set, ++ * then no need to set the weak REG_LIVE_READ32. ++ */ ++ break; ++ /* ... then we depend on parent's value */ ++ parent->live |= flag; ++ /* REG_LIVE_READ64 overrides REG_LIVE_READ32. */ ++ if (flag == REG_LIVE_READ64) ++ parent->live &= ~REG_LIVE_READ32; ++ state = parent; ++ parent = state->parent; ++ writes = true; ++ cnt++; ++ } ++ ++ if (env->longest_mark_read_walk < cnt) ++ env->longest_mark_read_walk = cnt; ++ return 0; ++} ++ ++/* This function is supposed to be used by the following 32-bit optimization ++ * code only. It returns TRUE if the source or destination register operates ++ * on 64-bit, otherwise return FALSE. ++ */ ++static bool is_reg64(struct bpf_verifier_env *env, struct bpf_insn *insn, ++ u32 regno, struct bpf_reg_state *reg, enum reg_arg_type t) ++{ ++ u8 code, class, op; ++ ++ code = insn->code; ++ class = BPF_CLASS(code); ++ op = BPF_OP(code); ++ if (class == BPF_JMP) { ++ /* BPF_EXIT for "main" will reach here. Return TRUE ++ * conservatively. ++ */ ++ if (op == BPF_EXIT) ++ return true; ++ if (op == BPF_CALL) { ++ /* BPF to BPF call will reach here because of marking ++ * caller saved clobber with DST_OP_NO_MARK for which we ++ * don't care the register def because they are anyway ++ * marked as NOT_INIT already. ++ */ ++ if (insn->src_reg == BPF_PSEUDO_CALL) ++ return false; ++ /* Helper call will reach here because of arg type ++ * check, conservatively return TRUE. ++ */ ++ if (t == SRC_OP) ++ return true; ++ ++ return false; ++ } ++ } ++ ++ if (class == BPF_ALU64 || class == BPF_JMP || ++ /* BPF_END always use BPF_ALU class. */ ++ (class == BPF_ALU && op == BPF_END && insn->imm == 64)) ++ return true; ++ ++ if (class == BPF_ALU || class == BPF_JMP32) ++ return false; ++ ++ if (class == BPF_LDX) { ++ if (t != SRC_OP) ++ return BPF_SIZE(code) == BPF_DW; ++ /* LDX source must be ptr. */ ++ return true; ++ } ++ ++ if (class == BPF_STX) { ++ if (reg->type != SCALAR_VALUE) ++ return true; ++ return BPF_SIZE(code) == BPF_DW; ++ } ++ ++ if (class == BPF_LD) { ++ u8 mode = BPF_MODE(code); ++ ++ /* LD_IMM64 */ ++ if (mode == BPF_IMM) ++ return true; ++ ++ /* Both LD_IND and LD_ABS return 32-bit data. */ ++ if (t != SRC_OP) ++ return false; ++ ++ /* Implicit ctx ptr. */ ++ if (regno == BPF_REG_6) ++ return true; ++ ++ /* Explicit source could be any width. */ ++ return true; ++ } ++ ++ if (class == BPF_ST) ++ /* The only source register for BPF_ST is a ptr. */ ++ return true; ++ ++ /* Conservatively return true at default. */ ++ return true; ++} ++ ++/* Return TRUE if INSN doesn't have explicit value define. */ ++static bool insn_no_def(struct bpf_insn *insn) ++{ ++ u8 class = BPF_CLASS(insn->code); ++ ++ return (class == BPF_JMP || class == BPF_JMP32 || ++ class == BPF_STX || class == BPF_ST); ++} ++ ++/* Return TRUE if INSN has defined any 32-bit value explicitly. */ ++static bool insn_has_def32(struct bpf_verifier_env *env, struct bpf_insn *insn) ++{ ++ if (insn_no_def(insn)) ++ return false; ++ ++ return !is_reg64(env, insn, insn->dst_reg, NULL, DST_OP); ++} ++ ++static void mark_insn_zext(struct bpf_verifier_env *env, ++ struct bpf_reg_state *reg) ++{ ++ s32 def_idx = reg->subreg_def; ++ ++ if (def_idx == DEF_NOT_SUBREG) ++ return; ++ ++ env->insn_aux_data[def_idx - 1].zext_dst = true; ++ /* The dst will be zero extended, so won't be sub-register anymore. */ ++ reg->subreg_def = DEF_NOT_SUBREG; ++} ++ ++static int check_reg_arg(struct bpf_verifier_env *env, u32 regno, + enum reg_arg_type t) + { ++ struct bpf_verifier_state *vstate = env->cur_state; ++ struct bpf_func_state *state = vstate->frame[vstate->curframe]; ++ struct bpf_insn *insn = env->prog->insnsi + env->insn_idx; ++ struct bpf_reg_state *reg, *regs = state->regs; ++ bool rw64; ++ + if (regno >= MAX_BPF_REG) { +- verbose("R%d is invalid\n", regno); ++ verbose(env, "R%d is invalid\n", regno); + return -EINVAL; + } + ++ reg = ®s[regno]; ++ rw64 = is_reg64(env, insn, regno, reg, t); + if (t == SRC_OP) { + /* check whether register used as source operand can be read */ +- if (regs[regno].type == NOT_INIT) { +- verbose("R%d !read_ok\n", regno); ++ if (reg->type == NOT_INIT) { ++ verbose(env, "R%d !read_ok\n", regno); + return -EACCES; + } ++ /* We don't need to worry about FP liveness because it's read-only */ ++ if (regno == BPF_REG_FP) ++ return 0; ++ ++ if (rw64) ++ mark_insn_zext(env, reg); ++ ++ return mark_reg_read(env, reg, reg->parent, ++ rw64 ? REG_LIVE_READ64 : REG_LIVE_READ32); + } else { + /* check whether register used as dest operand can be written to */ + if (regno == BPF_REG_FP) { +- verbose("frame pointer is read only\n"); ++ verbose(env, "frame pointer is read only\n"); + return -EACCES; + } ++ reg->live |= REG_LIVE_WRITTEN; ++ reg->subreg_def = rw64 ? DEF_NOT_SUBREG : env->insn_idx + 1; + if (t == DST_OP) +- mark_reg_unknown_value(regs, regno); ++ mark_reg_unknown(env, regs, regno); + } + return 0; + } + +-static int bpf_size_to_bytes(int bpf_size) ++/* for any branch, call, exit record the history of jmps in the given state */ ++static int push_jmp_history(struct bpf_verifier_env *env, ++ struct bpf_verifier_state *cur) + { +- if (bpf_size == BPF_W) +- return 4; +- else if (bpf_size == BPF_H) +- return 2; +- else if (bpf_size == BPF_B) +- return 1; +- else if (bpf_size == BPF_DW) +- return 8; +- else +- return -EINVAL; ++ u32 cnt = cur->jmp_history_cnt; ++ struct bpf_idx_pair *p; ++ ++ cnt++; ++ p = krealloc(cur->jmp_history, cnt * sizeof(*p), GFP_USER); ++ if (!p) ++ return -ENOMEM; ++ p[cnt - 1].idx = env->insn_idx; ++ p[cnt - 1].prev_idx = env->prev_insn_idx; ++ cur->jmp_history = p; ++ cur->jmp_history_cnt = cnt; ++ return 0; ++} ++ ++/* Backtrack one insn at a time. If idx is not at the top of recorded ++ * history then previous instruction came from straight line execution. ++ */ ++static int get_prev_insn_idx(struct bpf_verifier_state *st, int i, ++ u32 *history) ++{ ++ u32 cnt = *history; ++ ++ if (cnt && st->jmp_history[cnt - 1].idx == i) { ++ i = st->jmp_history[cnt - 1].prev_idx; ++ (*history)--; ++ } else { ++ i--; ++ } ++ return i; ++} ++ ++/* For given verifier state backtrack_insn() is called from the last insn to ++ * the first insn. Its purpose is to compute a bitmask of registers and ++ * stack slots that needs precision in the parent verifier state. ++ */ ++static int backtrack_insn(struct bpf_verifier_env *env, int idx, ++ u32 *reg_mask, u64 *stack_mask) ++{ ++ const struct bpf_insn_cbs cbs = { ++ .cb_print = verbose, ++ .private_data = env, ++ }; ++ struct bpf_insn *insn = env->prog->insnsi + idx; ++ u8 class = BPF_CLASS(insn->code); ++ u8 opcode = BPF_OP(insn->code); ++ u8 mode = BPF_MODE(insn->code); ++ u32 dreg = 1u << insn->dst_reg; ++ u32 sreg = 1u << insn->src_reg; ++ u32 spi; ++ ++ if (insn->code == 0) ++ return 0; ++ if (env->log.level & BPF_LOG_LEVEL) { ++ verbose(env, "regs=%x stack=%llx before ", *reg_mask, *stack_mask); ++ verbose(env, "%d: ", idx); ++ print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); ++ } ++ ++ if (class == BPF_ALU || class == BPF_ALU64) { ++ if (!(*reg_mask & dreg)) ++ return 0; ++ if (opcode == BPF_MOV) { ++ if (BPF_SRC(insn->code) == BPF_X) { ++ /* dreg = sreg ++ * dreg needs precision after this insn ++ * sreg needs precision before this insn ++ */ ++ *reg_mask &= ~dreg; ++ *reg_mask |= sreg; ++ } else { ++ /* dreg = K ++ * dreg needs precision after this insn. ++ * Corresponding register is already marked ++ * as precise=true in this verifier state. ++ * No further markings in parent are necessary ++ */ ++ *reg_mask &= ~dreg; ++ } ++ } else { ++ if (BPF_SRC(insn->code) == BPF_X) { ++ /* dreg += sreg ++ * both dreg and sreg need precision ++ * before this insn ++ */ ++ *reg_mask |= sreg; ++ } /* else dreg += K ++ * dreg still needs precision before this insn ++ */ ++ } ++ } else if (class == BPF_LDX) { ++ if (!(*reg_mask & dreg)) ++ return 0; ++ *reg_mask &= ~dreg; ++ ++ /* scalars can only be spilled into stack w/o losing precision. ++ * Load from any other memory can be zero extended. ++ * The desire to keep that precision is already indicated ++ * by 'precise' mark in corresponding register of this state. ++ * No further tracking necessary. ++ */ ++ if (insn->src_reg != BPF_REG_FP) ++ return 0; ++ if (BPF_SIZE(insn->code) != BPF_DW) ++ return 0; ++ ++ /* dreg = *(u64 *)[fp - off] was a fill from the stack. ++ * that [fp - off] slot contains scalar that needs to be ++ * tracked with precision ++ */ ++ spi = (-insn->off - 1) / BPF_REG_SIZE; ++ if (spi >= 64) { ++ verbose(env, "BUG spi %d\n", spi); ++ WARN_ONCE(1, "verifier backtracking bug"); ++ return -EFAULT; ++ } ++ *stack_mask |= 1ull << spi; ++ } else if (class == BPF_STX || class == BPF_ST) { ++ if (*reg_mask & dreg) ++ /* stx & st shouldn't be using _scalar_ dst_reg ++ * to access memory. It means backtracking ++ * encountered a case of pointer subtraction. ++ */ ++ return -ENOTSUPP; ++ /* scalars can only be spilled into stack */ ++ if (insn->dst_reg != BPF_REG_FP) ++ return 0; ++ if (BPF_SIZE(insn->code) != BPF_DW) ++ return 0; ++ spi = (-insn->off - 1) / BPF_REG_SIZE; ++ if (spi >= 64) { ++ verbose(env, "BUG spi %d\n", spi); ++ WARN_ONCE(1, "verifier backtracking bug"); ++ return -EFAULT; ++ } ++ if (!(*stack_mask & (1ull << spi))) ++ return 0; ++ *stack_mask &= ~(1ull << spi); ++ if (class == BPF_STX) ++ *reg_mask |= sreg; ++ } else if (class == BPF_JMP || class == BPF_JMP32) { ++ if (opcode == BPF_CALL) { ++ if (insn->src_reg == BPF_PSEUDO_CALL) ++ return -ENOTSUPP; ++ /* regular helper call sets R0 */ ++ *reg_mask &= ~1; ++ if (*reg_mask & 0x3f) { ++ /* if backtracing was looking for registers R1-R5 ++ * they should have been found already. ++ */ ++ verbose(env, "BUG regs %x\n", *reg_mask); ++ WARN_ONCE(1, "verifier backtracking bug"); ++ return -EFAULT; ++ } ++ } else if (opcode == BPF_EXIT) { ++ return -ENOTSUPP; ++ } ++ } else if (class == BPF_LD) { ++ if (!(*reg_mask & dreg)) ++ return 0; ++ *reg_mask &= ~dreg; ++ /* It's ld_imm64 or ld_abs or ld_ind. ++ * For ld_imm64 no further tracking of precision ++ * into parent is necessary ++ */ ++ if (mode == BPF_IND || mode == BPF_ABS) ++ /* to be analyzed */ ++ return -ENOTSUPP; ++ } ++ return 0; ++} ++ ++/* the scalar precision tracking algorithm: ++ * . at the start all registers have precise=false. ++ * . scalar ranges are tracked as normal through alu and jmp insns. ++ * . once precise value of the scalar register is used in: ++ * . ptr + scalar alu ++ * . if (scalar cond K|scalar) ++ * . helper_call(.., scalar, ...) where ARG_CONST is expected ++ * backtrack through the verifier states and mark all registers and ++ * stack slots with spilled constants that these scalar regisers ++ * should be precise. ++ * . during state pruning two registers (or spilled stack slots) ++ * are equivalent if both are not precise. ++ * ++ * Note the verifier cannot simply walk register parentage chain, ++ * since many different registers and stack slots could have been ++ * used to compute single precise scalar. ++ * ++ * The approach of starting with precise=true for all registers and then ++ * backtrack to mark a register as not precise when the verifier detects ++ * that program doesn't care about specific value (e.g., when helper ++ * takes register as ARG_ANYTHING parameter) is not safe. ++ * ++ * It's ok to walk single parentage chain of the verifier states. ++ * It's possible that this backtracking will go all the way till 1st insn. ++ * All other branches will be explored for needing precision later. ++ * ++ * The backtracking needs to deal with cases like: ++ * R8=map_value(id=0,off=0,ks=4,vs=1952,imm=0) R9_w=map_value(id=0,off=40,ks=4,vs=1952,imm=0) ++ * r9 -= r8 ++ * r5 = r9 ++ * if r5 > 0x79f goto pc+7 ++ * R5_w=inv(id=0,umax_value=1951,var_off=(0x0; 0x7ff)) ++ * r5 += 1 ++ * ... ++ * call bpf_perf_event_output#25 ++ * where .arg5_type = ARG_CONST_SIZE_OR_ZERO ++ * ++ * and this case: ++ * r6 = 1 ++ * call foo // uses callee's r6 inside to compute r0 ++ * r0 += r6 ++ * if r0 == 0 goto ++ * ++ * to track above reg_mask/stack_mask needs to be independent for each frame. ++ * ++ * Also if parent's curframe > frame where backtracking started, ++ * the verifier need to mark registers in both frames, otherwise callees ++ * may incorrectly prune callers. This is similar to ++ * commit 7640ead93924 ("bpf: verifier: make sure callees don't prune with caller differences") ++ * ++ * For now backtracking falls back into conservative marking. ++ */ ++static void mark_all_scalars_precise(struct bpf_verifier_env *env, ++ struct bpf_verifier_state *st) ++{ ++ struct bpf_func_state *func; ++ struct bpf_reg_state *reg; ++ int i, j; ++ ++ /* big hammer: mark all scalars precise in this path. ++ * pop_stack may still get !precise scalars. ++ */ ++ for (; st; st = st->parent) ++ for (i = 0; i <= st->curframe; i++) { ++ func = st->frame[i]; ++ for (j = 0; j < BPF_REG_FP; j++) { ++ reg = &func->regs[j]; ++ if (reg->type != SCALAR_VALUE) ++ continue; ++ reg->precise = true; ++ } ++ for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { ++ if (func->stack[j].slot_type[0] != STACK_SPILL) ++ continue; ++ reg = &func->stack[j].spilled_ptr; ++ if (reg->type != SCALAR_VALUE) ++ continue; ++ reg->precise = true; ++ } ++ } ++} ++ ++static int __mark_chain_precision(struct bpf_verifier_env *env, int regno, ++ int spi) ++{ ++ struct bpf_verifier_state *st = env->cur_state; ++ int first_idx = st->first_insn_idx; ++ int last_idx = env->insn_idx; ++ struct bpf_func_state *func; ++ struct bpf_reg_state *reg; ++ u32 reg_mask = regno >= 0 ? 1u << regno : 0; ++ u64 stack_mask = spi >= 0 ? 1ull << spi : 0; ++ bool skip_first = true; ++ bool new_marks = false; ++ int i, err; ++ ++ if (!env->allow_ptr_leaks) ++ /* backtracking is root only for now */ ++ return 0; ++ ++ func = st->frame[st->curframe]; ++ if (regno >= 0) { ++ reg = &func->regs[regno]; ++ if (reg->type != SCALAR_VALUE) { ++ WARN_ONCE(1, "backtracing misuse"); ++ return -EFAULT; ++ } ++ if (!reg->precise) ++ new_marks = true; ++ else ++ reg_mask = 0; ++ reg->precise = true; ++ } ++ ++ while (spi >= 0) { ++ if (func->stack[spi].slot_type[0] != STACK_SPILL) { ++ stack_mask = 0; ++ break; ++ } ++ reg = &func->stack[spi].spilled_ptr; ++ if (reg->type != SCALAR_VALUE) { ++ stack_mask = 0; ++ break; ++ } ++ if (!reg->precise) ++ new_marks = true; ++ else ++ stack_mask = 0; ++ reg->precise = true; ++ break; ++ } ++ ++ if (!new_marks) ++ return 0; ++ if (!reg_mask && !stack_mask) ++ return 0; ++ for (;;) { ++ DECLARE_BITMAP(mask, 64); ++ u32 history = st->jmp_history_cnt; ++ ++ if (env->log.level & BPF_LOG_LEVEL) ++ verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); ++ for (i = last_idx;;) { ++ if (skip_first) { ++ err = 0; ++ skip_first = false; ++ } else { ++ err = backtrack_insn(env, i, ®_mask, &stack_mask); ++ } ++ if (err == -ENOTSUPP) { ++ mark_all_scalars_precise(env, st); ++ return 0; ++ } else if (err) { ++ return err; ++ } ++ if (!reg_mask && !stack_mask) ++ /* Found assignment(s) into tracked register in this state. ++ * Since this state is already marked, just return. ++ * Nothing to be tracked further in the parent state. ++ */ ++ return 0; ++ if (i == first_idx) ++ break; ++ i = get_prev_insn_idx(st, i, &history); ++ if (i >= env->prog->len) { ++ /* This can happen if backtracking reached insn 0 ++ * and there are still reg_mask or stack_mask ++ * to backtrack. ++ * It means the backtracking missed the spot where ++ * particular register was initialized with a constant. ++ */ ++ verbose(env, "BUG backtracking idx %d\n", i); ++ WARN_ONCE(1, "verifier backtracking bug"); ++ return -EFAULT; ++ } ++ } ++ st = st->parent; ++ if (!st) ++ break; ++ ++ new_marks = false; ++ func = st->frame[st->curframe]; ++ bitmap_from_u64(mask, reg_mask); ++ for_each_set_bit(i, mask, 32) { ++ reg = &func->regs[i]; ++ if (reg->type != SCALAR_VALUE) { ++ reg_mask &= ~(1u << i); ++ continue; ++ } ++ if (!reg->precise) ++ new_marks = true; ++ reg->precise = true; ++ } ++ ++ bitmap_from_u64(mask, stack_mask); ++ for_each_set_bit(i, mask, 64) { ++ if (i >= func->allocated_stack / BPF_REG_SIZE) { ++ /* the sequence of instructions: ++ * 2: (bf) r3 = r10 ++ * 3: (7b) *(u64 *)(r3 -8) = r0 ++ * 4: (79) r4 = *(u64 *)(r10 -8) ++ * doesn't contain jmps. It's backtracked ++ * as a single block. ++ * During backtracking insn 3 is not recognized as ++ * stack access, so at the end of backtracking ++ * stack slot fp-8 is still marked in stack_mask. ++ * However the parent state may not have accessed ++ * fp-8 and it's "unallocated" stack space. ++ * In such case fallback to conservative. ++ */ ++ mark_all_scalars_precise(env, st); ++ return 0; ++ } ++ ++ if (func->stack[i].slot_type[0] != STACK_SPILL) { ++ stack_mask &= ~(1ull << i); ++ continue; ++ } ++ reg = &func->stack[i].spilled_ptr; ++ if (reg->type != SCALAR_VALUE) { ++ stack_mask &= ~(1ull << i); ++ continue; ++ } ++ if (!reg->precise) ++ new_marks = true; ++ reg->precise = true; ++ } ++ if (env->log.level & BPF_LOG_LEVEL) { ++ print_verifier_state(env, func); ++ verbose(env, "parent %s regs=%x stack=%llx marks\n", ++ new_marks ? "didn't have" : "already had", ++ reg_mask, stack_mask); ++ } ++ ++ if (!reg_mask && !stack_mask) ++ break; ++ if (!new_marks) ++ break; ++ ++ last_idx = st->last_insn_idx; ++ first_idx = st->first_insn_idx; ++ } ++ return 0; ++} ++ ++static int mark_chain_precision(struct bpf_verifier_env *env, int regno) ++{ ++ return __mark_chain_precision(env, regno, -1); ++} ++ ++static int mark_chain_precision_stack(struct bpf_verifier_env *env, int spi) ++{ ++ return __mark_chain_precision(env, -1, spi); + } + + static bool is_spillable_regtype(enum bpf_reg_type type) +@@ -549,129 +1842,932 @@ static bool is_spillable_regtype(enum bp + case PTR_TO_MAP_VALUE_OR_NULL: + case PTR_TO_STACK: + case PTR_TO_CTX: +- case FRAME_PTR: ++ case PTR_TO_PACKET: ++ case PTR_TO_PACKET_META: ++ case PTR_TO_PACKET_END: ++ case PTR_TO_FLOW_KEYS: + case CONST_PTR_TO_MAP: ++ case PTR_TO_SOCKET: ++ case PTR_TO_SOCKET_OR_NULL: ++ case PTR_TO_SOCK_COMMON: ++ case PTR_TO_SOCK_COMMON_OR_NULL: ++ case PTR_TO_TCP_SOCK: ++ case PTR_TO_TCP_SOCK_OR_NULL: ++ case PTR_TO_XDP_SOCK: + return true; + default: + return false; + } + } + ++/* Does this register contain a constant zero? */ ++static bool register_is_null(struct bpf_reg_state *reg) ++{ ++ return reg->type == SCALAR_VALUE && tnum_equals_const(reg->var_off, 0); ++} ++ ++static bool register_is_const(struct bpf_reg_state *reg) ++{ ++ return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off); ++} ++ ++static bool __is_pointer_value(bool allow_ptr_leaks, ++ const struct bpf_reg_state *reg) ++{ ++ if (allow_ptr_leaks) ++ return false; ++ ++ return reg->type != SCALAR_VALUE; ++} ++ ++static void save_register_state(struct bpf_func_state *state, ++ int spi, struct bpf_reg_state *reg) ++{ ++ int i; ++ ++ state->stack[spi].spilled_ptr = *reg; ++ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; ++ ++ for (i = 0; i < BPF_REG_SIZE; i++) ++ state->stack[spi].slot_type[i] = STACK_SPILL; ++} ++ + /* check_stack_read/write functions track spill/fill of registers, + * stack boundary and alignment are checked in check_mem_access() + */ +-static int check_stack_write(struct verifier_state *state, int off, int size, +- int value_regno) ++static int check_stack_write(struct bpf_verifier_env *env, ++ struct bpf_func_state *state, /* func where register points to */ ++ int off, int size, int value_regno, int insn_idx) + { +- int i; ++ struct bpf_func_state *cur; /* state of the current function */ ++ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err; ++ u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg; ++ struct bpf_reg_state *reg = NULL; ++ ++ err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), ++ state->acquired_refs, true); ++ if (err) ++ return err; + /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, + * so it's aligned access and [off, off + size) are within stack limits + */ ++ if (!env->allow_ptr_leaks && ++ state->stack[spi].slot_type[0] == STACK_SPILL && ++ size != BPF_REG_SIZE) { ++ verbose(env, "attempt to corrupt spilled pointer on stack\n"); ++ return -EACCES; ++ } + +- if (value_regno >= 0 && +- is_spillable_regtype(state->regs[value_regno].type)) { +- ++ cur = env->cur_state->frame[env->cur_state->curframe]; ++ if (value_regno >= 0) ++ reg = &cur->regs[value_regno]; ++ ++ if (reg && size == BPF_REG_SIZE && register_is_const(reg) && ++ !register_is_null(reg) && env->allow_ptr_leaks) { ++ if (dst_reg != BPF_REG_FP) { ++ /* The backtracking logic can only recognize explicit ++ * stack slot address like [fp - 8]. Other spill of ++ * scalar via different register has to be conervative. ++ * Backtrack from here and mark all registers as precise ++ * that contributed into 'reg' being a constant. ++ */ ++ err = mark_chain_precision(env, value_regno); ++ if (err) ++ return err; ++ } ++ save_register_state(state, spi, reg); ++ } else if (reg && is_spillable_regtype(reg->type)) { + /* register containing pointer is being spilled into stack */ + if (size != BPF_REG_SIZE) { +- verbose("invalid size of register spill\n"); ++ verbose_linfo(env, insn_idx, "; "); ++ verbose(env, "invalid size of register spill\n"); + return -EACCES; + } + +- /* save register state */ +- state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = +- state->regs[value_regno]; ++ if (state != cur && reg->type == PTR_TO_STACK) { ++ verbose(env, "cannot spill pointers to stack into stack frame of the caller\n"); ++ return -EINVAL; ++ } + +- for (i = 0; i < BPF_REG_SIZE; i++) +- state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_SPILL; ++ if (!env->allow_ptr_leaks) { ++ bool sanitize = false; ++ ++ if (state->stack[spi].slot_type[0] == STACK_SPILL && ++ register_is_const(&state->stack[spi].spilled_ptr)) ++ sanitize = true; ++ for (i = 0; i < BPF_REG_SIZE; i++) ++ if (state->stack[spi].slot_type[i] == STACK_MISC) { ++ sanitize = true; ++ break; ++ } ++ if (sanitize) { ++ int *poff = &env->insn_aux_data[insn_idx].sanitize_stack_off; ++ int soff = (-spi - 1) * BPF_REG_SIZE; ++ ++ /* detected reuse of integer stack slot with a pointer ++ * which means either llvm is reusing stack slot or ++ * an attacker is trying to exploit CVE-2018-3639 ++ * (speculative store bypass) ++ * Have to sanitize that slot with preemptive ++ * store of zero. ++ */ ++ if (*poff && *poff != soff) { ++ /* disallow programs where single insn stores ++ * into two different stack slots, since verifier ++ * cannot sanitize them ++ */ ++ verbose(env, ++ "insn %d cannot access two stack slots fp%d and fp%d", ++ insn_idx, *poff, soff); ++ return -EINVAL; ++ } ++ *poff = soff; ++ } ++ } ++ save_register_state(state, spi, reg); + } else { +- /* regular write of data into stack */ +- state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE] = +- (struct reg_state) {}; ++ u8 type = STACK_MISC; ++ ++ /* regular write of data into stack destroys any spilled ptr */ ++ state->stack[spi].spilled_ptr.type = NOT_INIT; ++ /* Mark slots as STACK_MISC if they belonged to spilled ptr. */ ++ if (state->stack[spi].slot_type[0] == STACK_SPILL) ++ for (i = 0; i < BPF_REG_SIZE; i++) ++ state->stack[spi].slot_type[i] = STACK_MISC; ++ ++ /* only mark the slot as written if all 8 bytes were written ++ * otherwise read propagation may incorrectly stop too soon ++ * when stack slots are partially written. ++ * This heuristic means that read propagation will be ++ * conservative, since it will add reg_live_read marks ++ * to stack slots all the way to first state when programs ++ * writes+reads less than 8 bytes ++ */ ++ if (size == BPF_REG_SIZE) ++ state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN; ++ ++ /* when we zero initialize stack slots mark them as such */ ++ if (reg && register_is_null(reg)) { ++ /* backtracking doesn't work for STACK_ZERO yet. */ ++ err = mark_chain_precision(env, value_regno); ++ if (err) ++ return err; ++ type = STACK_ZERO; ++ } + ++ /* Mark slots affected by this stack write. */ + for (i = 0; i < size; i++) +- state->stack_slot_type[MAX_BPF_STACK + off + i] = STACK_MISC; ++ state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = ++ type; + } + return 0; + } + +-static int check_stack_read(struct verifier_state *state, int off, int size, +- int value_regno) ++static int check_stack_read(struct bpf_verifier_env *env, ++ struct bpf_func_state *reg_state /* func where register points to */, ++ int off, int size, int value_regno) + { +- u8 *slot_type; +- int i; +- +- slot_type = &state->stack_slot_type[MAX_BPF_STACK + off]; ++ struct bpf_verifier_state *vstate = env->cur_state; ++ struct bpf_func_state *state = vstate->frame[vstate->curframe]; ++ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE; ++ struct bpf_reg_state *reg; ++ u8 *stype; ++ ++ if (reg_state->allocated_stack <= slot) { ++ verbose(env, "invalid read from stack off %d+0 size %d\n", ++ off, size); ++ return -EACCES; ++ } ++ stype = reg_state->stack[spi].slot_type; ++ reg = ®_state->stack[spi].spilled_ptr; + +- if (slot_type[0] == STACK_SPILL) { ++ if (stype[0] == STACK_SPILL) { + if (size != BPF_REG_SIZE) { +- verbose("invalid size of register spill\n"); +- return -EACCES; ++ if (reg->type != SCALAR_VALUE) { ++ verbose_linfo(env, env->insn_idx, "; "); ++ verbose(env, "invalid size of register fill\n"); ++ return -EACCES; ++ } ++ if (value_regno >= 0) { ++ mark_reg_unknown(env, state->regs, value_regno); ++ state->regs[value_regno].live |= REG_LIVE_WRITTEN; ++ } ++ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); ++ return 0; + } + for (i = 1; i < BPF_REG_SIZE; i++) { +- if (slot_type[i] != STACK_SPILL) { +- verbose("corrupted spill memory\n"); ++ if (stype[(slot - i) % BPF_REG_SIZE] != STACK_SPILL) { ++ verbose(env, "corrupted spill memory\n"); + return -EACCES; + } + } + +- if (value_regno >= 0) ++ if (value_regno >= 0) { + /* restore register state from stack */ +- state->regs[value_regno] = +- state->spilled_regs[(MAX_BPF_STACK + off) / BPF_REG_SIZE]; +- return 0; ++ state->regs[value_regno] = *reg; ++ /* mark reg as written since spilled pointer state likely ++ * has its liveness marks cleared by is_state_visited() ++ * which resets stack/reg liveness for state transitions ++ */ ++ state->regs[value_regno].live |= REG_LIVE_WRITTEN; ++ } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) { ++ /* If value_regno==-1, the caller is asking us whether ++ * it is acceptable to use this value as a SCALAR_VALUE ++ * (e.g. for XADD). ++ * We must not allow unprivileged callers to do that ++ * with spilled pointers. ++ */ ++ verbose(env, "leaking pointer from stack off %d\n", ++ off); ++ return -EACCES; ++ } ++ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); + } else { ++ int zeros = 0; ++ + for (i = 0; i < size; i++) { +- if (slot_type[i] != STACK_MISC) { +- verbose("invalid read from stack off %d+%d size %d\n", +- off, i, size); +- return -EACCES; ++ if (stype[(slot - i) % BPF_REG_SIZE] == STACK_MISC) ++ continue; ++ if (stype[(slot - i) % BPF_REG_SIZE] == STACK_ZERO) { ++ zeros++; ++ continue; + } ++ verbose(env, "invalid read from stack off %d+%d size %d\n", ++ off, i, size); ++ return -EACCES; ++ } ++ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64); ++ if (value_regno >= 0) { ++ if (zeros == size) { ++ /* any size read into register is zero extended, ++ * so the whole register == const_zero ++ */ ++ __mark_reg_const_zero(&state->regs[value_regno]); ++ /* backtracking doesn't support STACK_ZERO yet, ++ * so mark it precise here, so that later ++ * backtracking can stop here. ++ * Backtracking may not need this if this register ++ * doesn't participate in pointer adjustment. ++ * Forward propagation of precise flag is not ++ * necessary either. This mark is only to stop ++ * backtracking. Any register that contributed ++ * to const 0 was marked precise before spill. ++ */ ++ state->regs[value_regno].precise = true; ++ } else { ++ /* have read misc data from the stack */ ++ mark_reg_unknown(env, state->regs, value_regno); ++ } ++ state->regs[value_regno].live |= REG_LIVE_WRITTEN; + } +- if (value_regno >= 0) +- /* have read misc data from the stack */ +- mark_reg_unknown_value(state->regs, value_regno); +- return 0; + } ++ return 0; ++} ++ ++static int check_stack_access(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *reg, ++ int off, int size) ++{ ++ /* Stack accesses must be at a fixed offset, so that we ++ * can determine what type of data were returned. See ++ * check_stack_read(). ++ */ ++ if (!tnum_is_const(reg->var_off)) { ++ char tn_buf[48]; ++ ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, "variable stack access var_off=%s off=%d size=%d\n", ++ tn_buf, off, size); ++ return -EACCES; ++ } ++ ++ if (off >= 0 || off < -MAX_BPF_STACK) { ++ verbose(env, "invalid stack off=%d size=%d\n", off, size); ++ return -EACCES; ++ } ++ ++ return 0; ++} ++ ++static int check_map_access_type(struct bpf_verifier_env *env, u32 regno, ++ int off, int size, enum bpf_access_type type) ++{ ++ struct bpf_reg_state *regs = cur_regs(env); ++ struct bpf_map *map = regs[regno].map_ptr; ++ u32 cap = bpf_map_flags_to_cap(map); ++ ++ if (type == BPF_WRITE && !(cap & BPF_MAP_CAN_WRITE)) { ++ verbose(env, "write into map forbidden, value_size=%d off=%d size=%d\n", ++ map->value_size, off, size); ++ return -EACCES; ++ } ++ ++ if (type == BPF_READ && !(cap & BPF_MAP_CAN_READ)) { ++ verbose(env, "read from map forbidden, value_size=%d off=%d size=%d\n", ++ map->value_size, off, size); ++ return -EACCES; ++ } ++ ++ return 0; + } + + /* check read/write into map element returned by bpf_map_lookup_elem() */ +-static int check_map_access(struct verifier_env *env, u32 regno, int off, +- int size) ++static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off, ++ int size, bool zero_size_allowed) + { +- struct bpf_map *map = env->cur_state.regs[regno].map_ptr; ++ struct bpf_reg_state *regs = cur_regs(env); ++ struct bpf_map *map = regs[regno].map_ptr; + +- if (off < 0 || off + size > map->value_size) { +- verbose("invalid access to map value, value_size=%d off=%d size=%d\n", ++ if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || ++ off + size > map->value_size) { ++ verbose(env, "invalid access to map value, value_size=%d off=%d size=%d\n", + map->value_size, off, size); + return -EACCES; + } + return 0; + } + +-/* check access to 'struct bpf_context' fields */ +-static int check_ctx_access(struct verifier_env *env, int off, int size, +- enum bpf_access_type t) ++/* check read/write into a map element with possible variable offset */ ++static int check_map_access(struct bpf_verifier_env *env, u32 regno, ++ int off, int size, bool zero_size_allowed) ++{ ++ struct bpf_verifier_state *vstate = env->cur_state; ++ struct bpf_func_state *state = vstate->frame[vstate->curframe]; ++ struct bpf_reg_state *reg = &state->regs[regno]; ++ int err; ++ ++ /* We may have adjusted the register to this map value, so we ++ * need to try adding each of min_value and max_value to off ++ * to make sure our theoretical access will be safe. ++ */ ++ if (env->log.level & BPF_LOG_LEVEL) ++ print_verifier_state(env, state); ++ ++ /* The minimum value is only important with signed ++ * comparisons where we can't assume the floor of a ++ * value is 0. If we are using signed variables for our ++ * index'es we need to make sure that whatever we use ++ * will have a set floor within our range. ++ */ ++ if (reg->smin_value < 0 && ++ (reg->smin_value == S64_MIN || ++ (off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) || ++ reg->smin_value + off < 0)) { ++ verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", ++ regno); ++ return -EACCES; ++ } ++ err = __check_map_access(env, regno, reg->smin_value + off, size, ++ zero_size_allowed); ++ if (err) { ++ verbose(env, "R%d min value is outside of the array range\n", ++ regno); ++ return err; ++ } ++ ++ /* If we haven't set a max value then we need to bail since we can't be ++ * sure we won't do bad things. ++ * If reg->umax_value + off could overflow, treat that as unbounded too. ++ */ ++ if (reg->umax_value >= BPF_MAX_VAR_OFF) { ++ verbose(env, "R%d unbounded memory access, make sure to bounds check any array access into a map\n", ++ regno); ++ return -EACCES; ++ } ++ err = __check_map_access(env, regno, reg->umax_value + off, size, ++ zero_size_allowed); ++ if (err) ++ verbose(env, "R%d max value is outside of the array range\n", ++ regno); ++ ++ if (map_value_has_spin_lock(reg->map_ptr)) { ++ u32 lock = reg->map_ptr->spin_lock_off; ++ ++ /* if any part of struct bpf_spin_lock can be touched by ++ * load/store reject this program. ++ * To check that [x1, x2) overlaps with [y1, y2) ++ * it is sufficient to check x1 < y2 && y1 < x2. ++ */ ++ if (reg->smin_value + off < lock + sizeof(struct bpf_spin_lock) && ++ lock < reg->umax_value + off + size) { ++ verbose(env, "bpf_spin_lock cannot be accessed directly by load/store\n"); ++ return -EACCES; ++ } ++ } ++ return err; ++} ++ ++#define MAX_PACKET_OFF 0xffff ++ ++static bool may_access_direct_pkt_data(struct bpf_verifier_env *env, ++ const struct bpf_call_arg_meta *meta, ++ enum bpf_access_type t) ++{ ++ switch (env->prog->type) { ++ /* Program types only with direct read access go here! */ ++ case BPF_PROG_TYPE_LWT_IN: ++ case BPF_PROG_TYPE_LWT_OUT: ++ case BPF_PROG_TYPE_LWT_SEG6LOCAL: ++ case BPF_PROG_TYPE_SK_REUSEPORT: ++ case BPF_PROG_TYPE_FLOW_DISSECTOR: ++ case BPF_PROG_TYPE_CGROUP_SKB: ++ if (t == BPF_WRITE) ++ return false; ++ /* fallthrough */ ++ ++ /* Program types with direct read + write access go here! */ ++ case BPF_PROG_TYPE_SCHED_CLS: ++ case BPF_PROG_TYPE_SCHED_ACT: ++ case BPF_PROG_TYPE_XDP: ++ case BPF_PROG_TYPE_LWT_XMIT: ++ case BPF_PROG_TYPE_SK_SKB: ++ case BPF_PROG_TYPE_SK_MSG: ++ if (meta) ++ return meta->pkt_access; ++ ++ env->seen_direct_write = true; ++ return true; ++ ++ case BPF_PROG_TYPE_CGROUP_SOCKOPT: ++ if (t == BPF_WRITE) ++ env->seen_direct_write = true; ++ ++ return true; ++ ++ default: ++ return false; ++ } ++} ++ ++static int __check_packet_access(struct bpf_verifier_env *env, u32 regno, ++ int off, int size, bool zero_size_allowed) ++{ ++ struct bpf_reg_state *regs = cur_regs(env); ++ struct bpf_reg_state *reg = ®s[regno]; ++ ++ if (off < 0 || size < 0 || (size == 0 && !zero_size_allowed) || ++ (u64)off + size > reg->range) { ++ verbose(env, "invalid access to packet, off=%d size=%d, R%d(id=%d,off=%d,r=%d)\n", ++ off, size, regno, reg->id, reg->off, reg->range); ++ return -EACCES; ++ } ++ return 0; ++} ++ ++static int check_packet_access(struct bpf_verifier_env *env, u32 regno, int off, ++ int size, bool zero_size_allowed) ++{ ++ struct bpf_reg_state *regs = cur_regs(env); ++ struct bpf_reg_state *reg = ®s[regno]; ++ int err; ++ ++ /* We may have added a variable offset to the packet pointer; but any ++ * reg->range we have comes after that. We are only checking the fixed ++ * offset. ++ */ ++ ++ /* We don't allow negative numbers, because we aren't tracking enough ++ * detail to prove they're safe. ++ */ ++ if (reg->smin_value < 0) { ++ verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", ++ regno); ++ return -EACCES; ++ } ++ err = __check_packet_access(env, regno, off, size, zero_size_allowed); ++ if (err) { ++ verbose(env, "R%d offset is outside of the packet\n", regno); ++ return err; ++ } ++ ++ /* __check_packet_access has made sure "off + size - 1" is within u16. ++ * reg->umax_value can't be bigger than MAX_PACKET_OFF which is 0xffff, ++ * otherwise find_good_pkt_pointers would have refused to set range info ++ * that __check_packet_access would have rejected this pkt access. ++ * Therefore, "off + reg->umax_value + size - 1" won't overflow u32. ++ */ ++ env->prog->aux->max_pkt_offset = ++ max_t(u32, env->prog->aux->max_pkt_offset, ++ off + reg->umax_value + size - 1); ++ ++ return err; ++} ++ ++/* check access to 'struct bpf_context' fields. Supports fixed offsets only */ ++static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off, int size, ++ enum bpf_access_type t, enum bpf_reg_type *reg_type) + { +- if (env->prog->aux->ops->is_valid_access && +- env->prog->aux->ops->is_valid_access(off, size, t)) ++ struct bpf_insn_access_aux info = { ++ .reg_type = *reg_type, ++ }; ++ ++ if (env->ops->is_valid_access && ++ env->ops->is_valid_access(off, size, t, env->prog, &info)) { ++ /* A non zero info.ctx_field_size indicates that this field is a ++ * candidate for later verifier transformation to load the whole ++ * field and then apply a mask when accessed with a narrower ++ * access than actual ctx access size. A zero info.ctx_field_size ++ * will only allow for whole field access and rejects any other ++ * type of narrower access. ++ */ ++ *reg_type = info.reg_type; ++ ++ env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; ++ /* remember the offset of last byte accessed in ctx */ ++ if (env->prog->aux->max_ctx_offset < off + size) ++ env->prog->aux->max_ctx_offset = off + size; + return 0; ++ } + +- verbose("invalid bpf_context access off=%d size=%d\n", off, size); ++ verbose(env, "invalid bpf_context access off=%d size=%d\n", off, size); + return -EACCES; + } + +-static bool is_pointer_value(struct verifier_env *env, int regno) ++static int check_flow_keys_access(struct bpf_verifier_env *env, int off, ++ int size) + { +- if (env->allow_ptr_leaks) +- return false; ++ if (size < 0 || off < 0 || ++ (u64)off + size > sizeof(struct bpf_flow_keys)) { ++ verbose(env, "invalid access to flow keys off=%d size=%d\n", ++ off, size); ++ return -EACCES; ++ } ++ return 0; ++} + +- switch (env->cur_state.regs[regno].type) { +- case UNKNOWN_VALUE: +- case CONST_IMM: +- return false; ++static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, ++ u32 regno, int off, int size, ++ enum bpf_access_type t) ++{ ++ struct bpf_reg_state *regs = cur_regs(env); ++ struct bpf_reg_state *reg = ®s[regno]; ++ struct bpf_insn_access_aux info = {}; ++ bool valid; ++ ++ if (reg->smin_value < 0) { ++ verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", ++ regno); ++ return -EACCES; ++ } ++ ++ switch (reg->type) { ++ case PTR_TO_SOCK_COMMON: ++ valid = bpf_sock_common_is_valid_access(off, size, t, &info); ++ break; + default: +- return true; ++ valid = false; ++ } ++ ++ ++ if (valid) { ++ env->insn_aux_data[insn_idx].ctx_field_size = ++ info.ctx_field_size; ++ return 0; ++ } ++ ++ verbose(env, "R%d invalid %s access off=%d size=%d\n", ++ regno, reg_type_str[reg->type], off, size); ++ ++ return -EACCES; ++} ++ ++static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) ++{ ++ return cur_regs(env) + regno; ++} ++ ++static bool is_pointer_value(struct bpf_verifier_env *env, int regno) ++{ ++ return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); ++} ++ ++static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) ++{ ++ const struct bpf_reg_state *reg = reg_state(env, regno); ++ ++ return reg->type == PTR_TO_CTX; ++} ++ ++static bool is_sk_reg(struct bpf_verifier_env *env, int regno) ++{ ++ const struct bpf_reg_state *reg = reg_state(env, regno); ++ ++ return type_is_sk_pointer(reg->type); ++} ++ ++static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) ++{ ++ const struct bpf_reg_state *reg = reg_state(env, regno); ++ ++ return type_is_pkt_pointer(reg->type); ++} ++ ++static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) ++{ ++ const struct bpf_reg_state *reg = reg_state(env, regno); ++ ++ /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ ++ return reg->type == PTR_TO_FLOW_KEYS; ++} ++ ++static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *reg, ++ int off, int size, bool strict) ++{ ++ struct tnum reg_off; ++ int ip_align; ++ ++ /* Byte size accesses are always allowed. */ ++ if (!strict || size == 1) ++ return 0; ++ ++ /* For platforms that do not have a Kconfig enabling ++ * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS the value of ++ * NET_IP_ALIGN is universally set to '2'. And on platforms ++ * that do set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS, we get ++ * to this code only in strict mode where we want to emulate ++ * the NET_IP_ALIGN==2 checking. Therefore use an ++ * unconditional IP align value of '2'. ++ */ ++ ip_align = 2; ++ ++ reg_off = tnum_add(reg->var_off, tnum_const(ip_align + reg->off + off)); ++ if (!tnum_is_aligned(reg_off, size)) { ++ char tn_buf[48]; ++ ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, ++ "misaligned packet access off %d+%s+%d+%d size %d\n", ++ ip_align, tn_buf, reg->off, off, size); ++ return -EACCES; ++ } ++ ++ return 0; ++} ++ ++static int check_generic_ptr_alignment(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *reg, ++ const char *pointer_desc, ++ int off, int size, bool strict) ++{ ++ struct tnum reg_off; ++ ++ /* Byte size accesses are always allowed. */ ++ if (!strict || size == 1) ++ return 0; ++ ++ reg_off = tnum_add(reg->var_off, tnum_const(reg->off + off)); ++ if (!tnum_is_aligned(reg_off, size)) { ++ char tn_buf[48]; ++ ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, "misaligned %saccess off %s+%d+%d size %d\n", ++ pointer_desc, tn_buf, reg->off, off, size); ++ return -EACCES; ++ } ++ ++ return 0; ++} ++ ++static int check_ptr_alignment(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *reg, int off, ++ int size, bool strict_alignment_once) ++{ ++ bool strict = env->strict_alignment || strict_alignment_once; ++ const char *pointer_desc = ""; ++ ++ switch (reg->type) { ++ case PTR_TO_PACKET: ++ case PTR_TO_PACKET_META: ++ /* Special case, because of NET_IP_ALIGN. Given metadata sits ++ * right in front, treat it the very same way. ++ */ ++ return check_pkt_ptr_alignment(env, reg, off, size, strict); ++ case PTR_TO_FLOW_KEYS: ++ pointer_desc = "flow keys "; ++ break; ++ case PTR_TO_MAP_VALUE: ++ pointer_desc = "value "; ++ break; ++ case PTR_TO_CTX: ++ pointer_desc = "context "; ++ break; ++ case PTR_TO_STACK: ++ pointer_desc = "stack "; ++ /* The stack spill tracking logic in check_stack_write() ++ * and check_stack_read() relies on stack accesses being ++ * aligned. ++ */ ++ strict = true; ++ break; ++ case PTR_TO_SOCKET: ++ pointer_desc = "sock "; ++ break; ++ case PTR_TO_SOCK_COMMON: ++ pointer_desc = "sock_common "; ++ break; ++ case PTR_TO_TCP_SOCK: ++ pointer_desc = "tcp_sock "; ++ break; ++ case PTR_TO_XDP_SOCK: ++ pointer_desc = "xdp_sock "; ++ break; ++ default: ++ break; ++ } ++ return check_generic_ptr_alignment(env, reg, pointer_desc, off, size, ++ strict); ++} ++ ++static int update_stack_depth(struct bpf_verifier_env *env, ++ const struct bpf_func_state *func, ++ int off) ++{ ++ u16 stack = env->subprog_info[func->subprogno].stack_depth; ++ ++ if (stack >= -off) ++ return 0; ++ ++ /* update known max for given subprogram */ ++ env->subprog_info[func->subprogno].stack_depth = -off; ++ return 0; ++} ++ ++/* starting from main bpf function walk all instructions of the function ++ * and recursively walk all callees that given function can call. ++ * Ignore jump and exit insns. ++ * Since recursion is prevented by check_cfg() this algorithm ++ * only needs a local stack of MAX_CALL_FRAMES to remember callsites ++ */ ++static int check_max_stack_depth(struct bpf_verifier_env *env) ++{ ++ int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; ++ struct bpf_subprog_info *subprog = env->subprog_info; ++ struct bpf_insn *insn = env->prog->insnsi; ++ int ret_insn[MAX_CALL_FRAMES]; ++ int ret_prog[MAX_CALL_FRAMES]; ++ ++process_func: ++ /* protect against potential stack overflow that might happen when ++ * bpf2bpf calls get combined with tailcalls. Limit the caller's stack ++ * depth for such case down to 256 so that the worst case scenario ++ * would result in 8k stack size (32 which is tailcall limit * 256 = ++ * 8k). ++ * ++ * To get the idea what might happen, see an example: ++ * func1 -> sub rsp, 128 ++ * subfunc1 -> sub rsp, 256 ++ * tailcall1 -> add rsp, 256 ++ * func2 -> sub rsp, 192 (total stack size = 128 + 192 = 320) ++ * subfunc2 -> sub rsp, 64 ++ * subfunc22 -> sub rsp, 128 ++ * tailcall2 -> add rsp, 128 ++ * func3 -> sub rsp, 32 (total stack size 128 + 192 + 64 + 32 = 416) ++ * ++ * tailcall will unwind the current stack frame but it will not get rid ++ * of caller's stack as shown on the example above. ++ */ ++ if (idx && subprog[idx].has_tail_call && depth >= 256) { ++ verbose(env, ++ "tail_calls are not allowed when call stack of previous frames is %d bytes. Too large\n", ++ depth); ++ return -EACCES; ++ } ++ /* round up to 32-bytes, since this is granularity ++ * of interpreter stack size ++ */ ++ depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); ++ if (depth > MAX_BPF_STACK) { ++ verbose(env, "combined stack size of %d calls is %d. Too large\n", ++ frame + 1, depth); ++ return -EACCES; ++ } ++continue_func: ++ subprog_end = subprog[idx + 1].start; ++ for (; i < subprog_end; i++) { ++ if (insn[i].code != (BPF_JMP | BPF_CALL)) ++ continue; ++ if (insn[i].src_reg != BPF_PSEUDO_CALL) ++ continue; ++ /* remember insn and function to return to */ ++ ret_insn[frame] = i + 1; ++ ret_prog[frame] = idx; ++ ++ /* find the callee */ ++ i = i + insn[i].imm + 1; ++ idx = find_subprog(env, i); ++ if (idx < 0) { ++ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", ++ i); ++ return -EFAULT; ++ } ++ frame++; ++ if (frame >= MAX_CALL_FRAMES) { ++ verbose(env, "the call stack of %d frames is too deep !\n", ++ frame); ++ return -E2BIG; ++ } ++ goto process_func; ++ } ++ /* end of for() loop means the last insn of the 'subprog' ++ * was reached. Doesn't matter whether it was JA or EXIT ++ */ ++ if (frame == 0) ++ return 0; ++ depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); ++ frame--; ++ i = ret_insn[frame]; ++ idx = ret_prog[frame]; ++ goto continue_func; ++} ++ ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON ++static int get_callee_stack_depth(struct bpf_verifier_env *env, ++ const struct bpf_insn *insn, int idx) ++{ ++ int start = idx + insn->imm + 1, subprog; ++ ++ subprog = find_subprog(env, start); ++ if (subprog < 0) { ++ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", ++ start); ++ return -EFAULT; + } ++ return env->subprog_info[subprog].stack_depth; ++} ++#endif ++ ++static int check_ctx_reg(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *reg, int regno) ++{ ++ /* Access to ctx or passing it to a helper is only allowed in ++ * its original, unmodified form. ++ */ ++ ++ if (reg->off) { ++ verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n", ++ regno, reg->off); ++ return -EACCES; ++ } ++ ++ if (!tnum_is_const(reg->var_off) || reg->var_off.value) { ++ char tn_buf[48]; ++ ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf); ++ return -EACCES; ++ } ++ ++ return 0; ++} ++ ++static int check_tp_buffer_access(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *reg, ++ int regno, int off, int size) ++{ ++ if (off < 0) { ++ verbose(env, ++ "R%d invalid tracepoint buffer access: off=%d, size=%d", ++ regno, off, size); ++ return -EACCES; ++ } ++ if (!tnum_is_const(reg->var_off) || reg->var_off.value) { ++ char tn_buf[48]; ++ ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, ++ "R%d invalid variable buffer offset: off=%d, var_off=%s", ++ regno, off, tn_buf); ++ return -EACCES; ++ } ++ if (off + size > env->prog->aux->max_tp_access) ++ env->prog->aux->max_tp_access = off + size; ++ ++ return 0; ++} ++ ++ ++/* truncate register to smaller size (in bytes) ++ * must be called with size < BPF_REG_SIZE ++ */ ++static void coerce_reg_to_size(struct bpf_reg_state *reg, int size) ++{ ++ u64 mask; ++ ++ /* clear high bits in bit representation */ ++ reg->var_off = tnum_cast(reg->var_off, size); ++ ++ /* fix arithmetic bounds */ ++ mask = ((u64)1 << (size * 8)) - 1; ++ if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) { ++ reg->umin_value &= mask; ++ reg->umax_value &= mask; ++ } else { ++ reg->umin_value = 0; ++ reg->umax_value = mask; ++ } ++ reg->smin_value = reg->umin_value; ++ reg->smax_value = reg->umax_value; + } + + /* check whether memory at (regno + off) is accessible for t = (read | write) +@@ -680,225 +2776,692 @@ static bool is_pointer_value(struct veri + * if t==write && value_regno==-1, some unknown value is stored into memory + * if t==read && value_regno==-1, don't care what we read from memory + */ +-static int check_mem_access(struct verifier_env *env, u32 regno, int off, +- int bpf_size, enum bpf_access_type t, +- int value_regno) ++static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, ++ int off, int bpf_size, enum bpf_access_type t, ++ int value_regno, bool strict_alignment_once) + { +- struct verifier_state *state = &env->cur_state; ++ struct bpf_reg_state *regs = cur_regs(env); ++ struct bpf_reg_state *reg = regs + regno; ++ struct bpf_func_state *state; + int size, err = 0; + +- if (state->regs[regno].type == PTR_TO_STACK) +- off += state->regs[regno].imm; +- + size = bpf_size_to_bytes(bpf_size); + if (size < 0) + return size; + +- if (off % size != 0) { +- verbose("misaligned access off %d size %d\n", off, size); +- return -EACCES; +- } ++ /* alignment checks will add in reg->off themselves */ ++ err = check_ptr_alignment(env, reg, off, size, strict_alignment_once); ++ if (err) ++ return err; ++ ++ /* for access checks, reg->off is just part of off */ ++ off += reg->off; + +- if (state->regs[regno].type == PTR_TO_MAP_VALUE) { ++ if (reg->type == PTR_TO_MAP_VALUE) { + if (t == BPF_WRITE && value_regno >= 0 && + is_pointer_value(env, value_regno)) { +- verbose("R%d leaks addr into map\n", value_regno); ++ verbose(env, "R%d leaks addr into map\n", value_regno); + return -EACCES; + } +- err = check_map_access(env, regno, off, size); ++ err = check_map_access_type(env, regno, off, size, t); ++ if (err) ++ return err; ++ err = check_map_access(env, regno, off, size, false); + if (!err && t == BPF_READ && value_regno >= 0) +- mark_reg_unknown_value(state->regs, value_regno); ++ mark_reg_unknown(env, regs, value_regno); ++ ++ } else if (reg->type == PTR_TO_CTX) { ++ enum bpf_reg_type reg_type = SCALAR_VALUE; + +- } else if (state->regs[regno].type == PTR_TO_CTX) { + if (t == BPF_WRITE && value_regno >= 0 && + is_pointer_value(env, value_regno)) { +- verbose("R%d leaks addr into ctx\n", value_regno); ++ verbose(env, "R%d leaks addr into ctx\n", value_regno); + return -EACCES; + } +- err = check_ctx_access(env, off, size, t); +- if (!err && t == BPF_READ && value_regno >= 0) +- mark_reg_unknown_value(state->regs, value_regno); + +- } else if (state->regs[regno].type == FRAME_PTR || +- state->regs[regno].type == PTR_TO_STACK) { +- if (off >= 0 || off < -MAX_BPF_STACK) { +- verbose("invalid stack off=%d size=%d\n", off, size); ++ err = check_ctx_reg(env, reg, regno); ++ if (err < 0) ++ return err; ++ ++ err = check_ctx_access(env, insn_idx, off, size, t, ®_type); ++ if (!err && t == BPF_READ && value_regno >= 0) { ++ /* ctx access returns either a scalar, or a ++ * PTR_TO_PACKET[_META,_END]. In the latter ++ * case, we know the offset is zero. ++ */ ++ if (reg_type == SCALAR_VALUE) { ++ mark_reg_unknown(env, regs, value_regno); ++ } else { ++ mark_reg_known_zero(env, regs, ++ value_regno); ++ if (reg_type_may_be_null(reg_type)) ++ regs[value_regno].id = ++env->id_gen; ++ /* A load of ctx field could have different ++ * actual load size with the one encoded in the ++ * insn. When the dst is PTR, it is for sure not ++ * a sub-register. ++ */ ++ regs[value_regno].subreg_def = DEF_NOT_SUBREG; ++ } ++ regs[value_regno].type = reg_type; ++ } ++ ++ } else if (reg->type == PTR_TO_STACK) { ++ off += reg->var_off.value; ++ err = check_stack_access(env, reg, off, size); ++ if (err) ++ return err; ++ ++ state = func(env, reg); ++ err = update_stack_depth(env, state, off); ++ if (err) ++ return err; ++ ++ if (t == BPF_WRITE) ++ err = check_stack_write(env, state, off, size, ++ value_regno, insn_idx); ++ else ++ err = check_stack_read(env, state, off, size, ++ value_regno); ++ } else if (reg_is_pkt_pointer(reg)) { ++ if (t == BPF_WRITE && !may_access_direct_pkt_data(env, NULL, t)) { ++ verbose(env, "cannot write into packet\n"); ++ return -EACCES; ++ } ++ if (t == BPF_WRITE && value_regno >= 0 && ++ is_pointer_value(env, value_regno)) { ++ verbose(env, "R%d leaks addr into packet\n", ++ value_regno); ++ return -EACCES; ++ } ++ err = check_packet_access(env, regno, off, size, false); ++ if (!err && t == BPF_READ && value_regno >= 0) ++ mark_reg_unknown(env, regs, value_regno); ++ } else if (reg->type == PTR_TO_FLOW_KEYS) { ++ if (t == BPF_WRITE && value_regno >= 0 && ++ is_pointer_value(env, value_regno)) { ++ verbose(env, "R%d leaks addr into flow keys\n", ++ value_regno); + return -EACCES; + } ++ ++ err = check_flow_keys_access(env, off, size); ++ if (!err && t == BPF_READ && value_regno >= 0) ++ mark_reg_unknown(env, regs, value_regno); ++ } else if (type_is_sk_pointer(reg->type)) { + if (t == BPF_WRITE) { +- if (!env->allow_ptr_leaks && +- state->stack_slot_type[MAX_BPF_STACK + off] == STACK_SPILL && +- size != BPF_REG_SIZE) { +- verbose("attempt to corrupt spilled pointer on stack\n"); +- return -EACCES; +- } +- err = check_stack_write(state, off, size, value_regno); +- } else { +- err = check_stack_read(state, off, size, value_regno); ++ verbose(env, "R%d cannot write into %s\n", ++ regno, reg_type_str[reg->type]); ++ return -EACCES; + } ++ err = check_sock_access(env, insn_idx, regno, off, size, t); ++ if (!err && value_regno >= 0) ++ mark_reg_unknown(env, regs, value_regno); ++ } else if (reg->type == PTR_TO_TP_BUFFER) { ++ err = check_tp_buffer_access(env, reg, regno, off, size); ++ if (!err && t == BPF_READ && value_regno >= 0) ++ mark_reg_unknown(env, regs, value_regno); + } else { +- verbose("R%d invalid mem access '%s'\n", +- regno, reg_type_str[state->regs[regno].type]); ++ verbose(env, "R%d invalid mem access '%s'\n", regno, ++ reg_type_str[reg->type]); + return -EACCES; + } ++ ++ if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && ++ regs[value_regno].type == SCALAR_VALUE) { ++ /* b/h/w load zero-extends, mark upper bits as known 0 */ ++ coerce_reg_to_size(®s[value_regno], size); ++ } + return err; + } + +-static int check_xadd(struct verifier_env *env, struct bpf_insn *insn) ++static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_insn *insn) + { +- struct reg_state *regs = env->cur_state.regs; + int err; + + if ((BPF_SIZE(insn->code) != BPF_W && BPF_SIZE(insn->code) != BPF_DW) || + insn->imm != 0) { +- verbose("BPF_XADD uses reserved fields\n"); ++ verbose(env, "BPF_XADD uses reserved fields\n"); + return -EINVAL; + } + + /* check src1 operand */ +- err = check_reg_arg(regs, insn->src_reg, SRC_OP); ++ err = check_reg_arg(env, insn->src_reg, SRC_OP); + if (err) + return err; + + /* check src2 operand */ +- err = check_reg_arg(regs, insn->dst_reg, SRC_OP); ++ err = check_reg_arg(env, insn->dst_reg, SRC_OP); + if (err) + return err; + ++ if (is_pointer_value(env, insn->src_reg)) { ++ verbose(env, "R%d leaks addr into mem\n", insn->src_reg); ++ return -EACCES; ++ } ++ ++ if (is_ctx_reg(env, insn->dst_reg) || ++ is_pkt_reg(env, insn->dst_reg) || ++ is_flow_key_reg(env, insn->dst_reg) || ++ is_sk_reg(env, insn->dst_reg)) { ++ verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", ++ insn->dst_reg, ++ reg_type_str[reg_state(env, insn->dst_reg)->type]); ++ return -EACCES; ++ } ++ + /* check whether atomic_add can read the memory */ +- err = check_mem_access(env, insn->dst_reg, insn->off, +- BPF_SIZE(insn->code), BPF_READ, -1); ++ err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off, ++ BPF_SIZE(insn->code), BPF_READ, -1, true); + if (err) + return err; + + /* check whether atomic_add can write into the same memory */ +- return check_mem_access(env, insn->dst_reg, insn->off, +- BPF_SIZE(insn->code), BPF_WRITE, -1); ++ return check_mem_access(env, insn_idx, insn->dst_reg, insn->off, ++ BPF_SIZE(insn->code), BPF_WRITE, -1, true); ++} ++ ++static int __check_stack_boundary(struct bpf_verifier_env *env, u32 regno, ++ int off, int access_size, ++ bool zero_size_allowed) ++{ ++ struct bpf_reg_state *reg = reg_state(env, regno); ++ ++ if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || ++ access_size < 0 || (access_size == 0 && !zero_size_allowed)) { ++ if (tnum_is_const(reg->var_off)) { ++ verbose(env, "invalid stack type R%d off=%d access_size=%d\n", ++ regno, off, access_size); ++ } else { ++ char tn_buf[48]; ++ ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, "invalid stack type R%d var_off=%s access_size=%d\n", ++ regno, tn_buf, access_size); ++ } ++ return -EACCES; ++ } ++ return 0; + } + + /* when register 'regno' is passed into function that will read 'access_size' + * bytes from that pointer, make sure that it's within stack boundary +- * and all elements of stack are initialized ++ * and all elements of stack are initialized. ++ * Unlike most pointer bounds-checking functions, this one doesn't take an ++ * 'off' argument, so it has to add in reg->off itself. + */ +-static int check_stack_boundary(struct verifier_env *env, +- int regno, int access_size) ++static int check_stack_boundary(struct bpf_verifier_env *env, int regno, ++ int access_size, bool zero_size_allowed, ++ struct bpf_call_arg_meta *meta) + { +- struct verifier_state *state = &env->cur_state; +- struct reg_state *regs = state->regs; +- int off, i; ++ struct bpf_reg_state *reg = reg_state(env, regno); ++ struct bpf_func_state *state = func(env, reg); ++ int err, min_off, max_off, i, j, slot, spi; ++ ++ if (reg->type != PTR_TO_STACK) { ++ /* Allow zero-byte read from NULL, regardless of pointer type */ ++ if (zero_size_allowed && access_size == 0 && ++ register_is_null(reg)) ++ return 0; + +- if (regs[regno].type != PTR_TO_STACK) ++ verbose(env, "R%d type=%s expected=%s\n", regno, ++ reg_type_str[reg->type], ++ reg_type_str[PTR_TO_STACK]); + return -EACCES; ++ } + +- off = regs[regno].imm; +- if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || +- access_size <= 0) { +- verbose("invalid stack type R%d off=%d access_size=%d\n", +- regno, off, access_size); ++ if (tnum_is_const(reg->var_off)) { ++ min_off = max_off = reg->var_off.value + reg->off; ++ err = __check_stack_boundary(env, regno, min_off, access_size, ++ zero_size_allowed); ++ if (err) ++ return err; ++ } else { ++ /* Variable offset is prohibited for unprivileged mode for ++ * simplicity since it requires corresponding support in ++ * Spectre masking for stack ALU. ++ * See also retrieve_ptr_limit(). ++ */ ++ if (!env->allow_ptr_leaks) { ++ char tn_buf[48]; ++ ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, "R%d indirect variable offset stack access prohibited for !root, var_off=%s\n", ++ regno, tn_buf); ++ return -EACCES; ++ } ++ /* Only initialized buffer on stack is allowed to be accessed ++ * with variable offset. With uninitialized buffer it's hard to ++ * guarantee that whole memory is marked as initialized on ++ * helper return since specific bounds are unknown what may ++ * cause uninitialized stack leaking. ++ */ ++ if (meta && meta->raw_mode) ++ meta = NULL; ++ ++ if (reg->smax_value >= BPF_MAX_VAR_OFF || ++ reg->smax_value <= -BPF_MAX_VAR_OFF) { ++ verbose(env, "R%d unbounded indirect variable offset stack access\n", ++ regno); ++ return -EACCES; ++ } ++ min_off = reg->smin_value + reg->off; ++ max_off = reg->smax_value + reg->off; ++ err = __check_stack_boundary(env, regno, min_off, access_size, ++ zero_size_allowed); ++ if (err) { ++ verbose(env, "R%d min value is outside of stack bound\n", ++ regno); ++ return err; ++ } ++ err = __check_stack_boundary(env, regno, max_off, access_size, ++ zero_size_allowed); ++ if (err) { ++ verbose(env, "R%d max value is outside of stack bound\n", ++ regno); ++ return err; ++ } ++ } ++ ++ if (meta && meta->raw_mode) { ++ meta->access_size = access_size; ++ meta->regno = regno; ++ return 0; ++ } ++ ++ for (i = min_off; i < max_off + access_size; i++) { ++ u8 *stype; ++ ++ slot = -i - 1; ++ spi = slot / BPF_REG_SIZE; ++ if (state->allocated_stack <= slot) ++ goto err; ++ stype = &state->stack[spi].slot_type[slot % BPF_REG_SIZE]; ++ if (*stype == STACK_MISC) ++ goto mark; ++ if (*stype == STACK_ZERO) { ++ /* helper can write anything into the stack */ ++ *stype = STACK_MISC; ++ goto mark; ++ } ++ if (state->stack[spi].slot_type[0] == STACK_SPILL && ++ state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { ++ __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); ++ for (j = 0; j < BPF_REG_SIZE; j++) ++ state->stack[spi].slot_type[j] = STACK_MISC; ++ goto mark; ++ } ++ ++err: ++ if (tnum_is_const(reg->var_off)) { ++ verbose(env, "invalid indirect read from stack off %d+%d size %d\n", ++ min_off, i - min_off, access_size); ++ } else { ++ char tn_buf[48]; ++ ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, "invalid indirect read from stack var_off %s+%d size %d\n", ++ tn_buf, i - min_off, access_size); ++ } + return -EACCES; ++mark: ++ /* reading any byte out of 8-byte 'spill_slot' will cause ++ * the whole slot to be marked as 'read' ++ */ ++ mark_reg_read(env, &state->stack[spi].spilled_ptr, ++ state->stack[spi].spilled_ptr.parent, ++ REG_LIVE_READ64); + } ++ return update_stack_depth(env, state, min_off); ++} ++ ++static int check_helper_mem_access(struct bpf_verifier_env *env, int regno, ++ int access_size, bool zero_size_allowed, ++ struct bpf_call_arg_meta *meta) ++{ ++ struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; + +- for (i = 0; i < access_size; i++) { +- if (state->stack_slot_type[MAX_BPF_STACK + off + i] != STACK_MISC) { +- verbose("invalid indirect read from stack off %d+%d size %d\n", +- off, i, access_size); ++ switch (reg->type) { ++ case PTR_TO_PACKET: ++ case PTR_TO_PACKET_META: ++ return check_packet_access(env, regno, reg->off, access_size, ++ zero_size_allowed); ++ case PTR_TO_MAP_VALUE: ++ if (check_map_access_type(env, regno, reg->off, access_size, ++ meta && meta->raw_mode ? BPF_WRITE : ++ BPF_READ)) + return -EACCES; ++ return check_map_access(env, regno, reg->off, access_size, ++ zero_size_allowed); ++ default: /* scalar_value|ptr_to_stack or invalid ptr */ ++ return check_stack_boundary(env, regno, access_size, ++ zero_size_allowed, meta); ++ } ++} ++ ++/* Implementation details: ++ * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL ++ * Two bpf_map_lookups (even with the same key) will have different reg->id. ++ * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after ++ * value_or_null->value transition, since the verifier only cares about ++ * the range of access to valid map value pointer and doesn't care about actual ++ * address of the map element. ++ * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps ++ * reg->id > 0 after value_or_null->value transition. By doing so ++ * two bpf_map_lookups will be considered two different pointers that ++ * point to different bpf_spin_locks. ++ * The verifier allows taking only one bpf_spin_lock at a time to avoid ++ * dead-locks. ++ * Since only one bpf_spin_lock is allowed the checks are simpler than ++ * reg_is_refcounted() logic. The verifier needs to remember only ++ * one spin_lock instead of array of acquired_refs. ++ * cur_state->active_spin_lock remembers which map value element got locked ++ * and clears it after bpf_spin_unlock. ++ */ ++static int process_spin_lock(struct bpf_verifier_env *env, int regno, ++ bool is_lock) ++{ ++ struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; ++ struct bpf_verifier_state *cur = env->cur_state; ++ bool is_const = tnum_is_const(reg->var_off); ++ struct bpf_map *map = reg->map_ptr; ++ u64 val = reg->var_off.value; ++ ++ if (reg->type != PTR_TO_MAP_VALUE) { ++ verbose(env, "R%d is not a pointer to map_value\n", regno); ++ return -EINVAL; ++ } ++ if (!is_const) { ++ verbose(env, ++ "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n", ++ regno); ++ return -EINVAL; ++ } ++ if (!map->btf) { ++ verbose(env, ++ "map '%s' has to have BTF in order to use bpf_spin_lock\n", ++ map->name); ++ return -EINVAL; ++ } ++ if (!map_value_has_spin_lock(map)) { ++ if (map->spin_lock_off == -E2BIG) ++ verbose(env, ++ "map '%s' has more than one 'struct bpf_spin_lock'\n", ++ map->name); ++ else if (map->spin_lock_off == -ENOENT) ++ verbose(env, ++ "map '%s' doesn't have 'struct bpf_spin_lock'\n", ++ map->name); ++ else ++ verbose(env, ++ "map '%s' is not a struct type or bpf_spin_lock is mangled\n", ++ map->name); ++ return -EINVAL; ++ } ++ if (map->spin_lock_off != val + reg->off) { ++ verbose(env, "off %lld doesn't point to 'struct bpf_spin_lock'\n", ++ val + reg->off); ++ return -EINVAL; ++ } ++ if (is_lock) { ++ if (cur->active_spin_lock) { ++ verbose(env, ++ "Locking two bpf_spin_locks are not allowed\n"); ++ return -EINVAL; ++ } ++ cur->active_spin_lock = reg->id; ++ } else { ++ if (!cur->active_spin_lock) { ++ verbose(env, "bpf_spin_unlock without taking a lock\n"); ++ return -EINVAL; + } ++ if (cur->active_spin_lock != reg->id) { ++ verbose(env, "bpf_spin_unlock of different lock\n"); ++ return -EINVAL; ++ } ++ cur->active_spin_lock = 0; + } + return 0; + } + +-static int check_func_arg(struct verifier_env *env, u32 regno, +- enum bpf_arg_type arg_type, struct bpf_map **mapp) ++static bool arg_type_is_mem_ptr(enum bpf_arg_type type) ++{ ++ return type == ARG_PTR_TO_MEM || ++ type == ARG_PTR_TO_MEM_OR_NULL || ++ type == ARG_PTR_TO_UNINIT_MEM; ++} ++ ++static bool arg_type_is_mem_size(enum bpf_arg_type type) ++{ ++ return type == ARG_CONST_SIZE || ++ type == ARG_CONST_SIZE_OR_ZERO; ++} ++ ++static bool arg_type_is_int_ptr(enum bpf_arg_type type) ++{ ++ return type == ARG_PTR_TO_INT || ++ type == ARG_PTR_TO_LONG; ++} ++ ++static int int_ptr_type_to_size(enum bpf_arg_type type) ++{ ++ if (type == ARG_PTR_TO_INT) ++ return sizeof(u32); ++ else if (type == ARG_PTR_TO_LONG) ++ return sizeof(u64); ++ ++ return -EINVAL; ++} ++ ++static int check_func_arg(struct bpf_verifier_env *env, u32 regno, ++ enum bpf_arg_type arg_type, ++ struct bpf_call_arg_meta *meta) + { +- struct reg_state *reg = env->cur_state.regs + regno; +- enum bpf_reg_type expected_type; ++ struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; ++ enum bpf_reg_type expected_type, type = reg->type; + int err = 0; + + if (arg_type == ARG_DONTCARE) + return 0; + +- if (reg->type == NOT_INIT) { +- verbose("R%d !read_ok\n", regno); +- return -EACCES; +- } ++ err = check_reg_arg(env, regno, SRC_OP); ++ if (err) ++ return err; + + if (arg_type == ARG_ANYTHING) { + if (is_pointer_value(env, regno)) { +- verbose("R%d leaks addr into helper function\n", regno); ++ verbose(env, "R%d leaks addr into helper function\n", ++ regno); + return -EACCES; + } + return 0; + } + +- if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY || +- arg_type == ARG_PTR_TO_MAP_VALUE) { ++ if (type_is_pkt_pointer(type) && ++ !may_access_direct_pkt_data(env, meta, BPF_READ)) { ++ verbose(env, "helper access to the packet is not allowed\n"); ++ return -EACCES; ++ } ++ ++ if (arg_type == ARG_PTR_TO_MAP_KEY || ++ arg_type == ARG_PTR_TO_MAP_VALUE || ++ arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE || ++ arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) { + expected_type = PTR_TO_STACK; +- } else if (arg_type == ARG_CONST_STACK_SIZE) { +- expected_type = CONST_IMM; ++ if (register_is_null(reg) && ++ arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL) ++ /* final test in check_stack_boundary() */; ++ else if (!type_is_pkt_pointer(type) && ++ type != PTR_TO_MAP_VALUE && ++ type != expected_type) ++ goto err_type; ++ } else if (arg_type == ARG_CONST_SIZE || ++ arg_type == ARG_CONST_SIZE_OR_ZERO) { ++ expected_type = SCALAR_VALUE; ++ if (type != expected_type) ++ goto err_type; + } else if (arg_type == ARG_CONST_MAP_PTR) { + expected_type = CONST_PTR_TO_MAP; ++ if (type != expected_type) ++ goto err_type; + } else if (arg_type == ARG_PTR_TO_CTX) { + expected_type = PTR_TO_CTX; ++ if (type != expected_type) ++ goto err_type; ++ err = check_ctx_reg(env, reg, regno); ++ if (err < 0) ++ return err; ++ } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) { ++ expected_type = PTR_TO_SOCK_COMMON; ++ /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ ++ if (!type_is_sk_pointer(type)) ++ goto err_type; ++ if (reg->ref_obj_id) { ++ if (meta->ref_obj_id) { ++ verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n", ++ regno, reg->ref_obj_id, ++ meta->ref_obj_id); ++ return -EFAULT; ++ } ++ meta->ref_obj_id = reg->ref_obj_id; ++ } ++ } else if (arg_type == ARG_PTR_TO_SOCKET) { ++ expected_type = PTR_TO_SOCKET; ++ if (type != expected_type) ++ goto err_type; ++ } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) { ++ if (meta->func_id == BPF_FUNC_spin_lock) { ++ if (process_spin_lock(env, regno, true)) ++ return -EACCES; ++ } else if (meta->func_id == BPF_FUNC_spin_unlock) { ++ if (process_spin_lock(env, regno, false)) ++ return -EACCES; ++ } else { ++ verbose(env, "verifier internal error\n"); ++ return -EFAULT; ++ } ++ } else if (arg_type_is_mem_ptr(arg_type)) { ++ expected_type = PTR_TO_STACK; ++ /* One exception here. In case function allows for NULL to be ++ * passed in as argument, it's a SCALAR_VALUE type. Final test ++ * happens during stack boundary checking. ++ */ ++ if (register_is_null(reg) && ++ arg_type == ARG_PTR_TO_MEM_OR_NULL) ++ /* final test in check_stack_boundary() */; ++ else if (!type_is_pkt_pointer(type) && ++ type != PTR_TO_MAP_VALUE && ++ type != expected_type) ++ goto err_type; ++ meta->raw_mode = arg_type == ARG_PTR_TO_UNINIT_MEM; ++ } else if (arg_type_is_int_ptr(arg_type)) { ++ expected_type = PTR_TO_STACK; ++ if (!type_is_pkt_pointer(type) && ++ type != PTR_TO_MAP_VALUE && ++ type != expected_type) ++ goto err_type; + } else { +- verbose("unsupported arg_type %d\n", arg_type); ++ verbose(env, "unsupported arg_type %d\n", arg_type); + return -EFAULT; + } + +- if (reg->type != expected_type) { +- verbose("R%d type=%s expected=%s\n", regno, +- reg_type_str[reg->type], reg_type_str[expected_type]); +- return -EACCES; +- } +- + if (arg_type == ARG_CONST_MAP_PTR) { + /* bpf_map_xxx(map_ptr) call: remember that map_ptr */ +- *mapp = reg->map_ptr; +- ++ meta->map_ptr = reg->map_ptr; + } else if (arg_type == ARG_PTR_TO_MAP_KEY) { + /* bpf_map_xxx(..., map_ptr, ..., key) call: + * check that [key, key + map->key_size) are within + * stack limits and initialized + */ +- if (!*mapp) { ++ if (!meta->map_ptr) { + /* in function declaration map_ptr must come before + * map_key, so that it's verified and known before + * we have to check map_key here. Otherwise it means + * that kernel subsystem misconfigured verifier + */ +- verbose("invalid map_ptr to access map->key\n"); ++ verbose(env, "invalid map_ptr to access map->key\n"); + return -EACCES; + } +- err = check_stack_boundary(env, regno, (*mapp)->key_size); +- +- } else if (arg_type == ARG_PTR_TO_MAP_VALUE) { ++ err = check_helper_mem_access(env, regno, ++ meta->map_ptr->key_size, false, ++ NULL); ++ } else if (arg_type == ARG_PTR_TO_MAP_VALUE || ++ (arg_type == ARG_PTR_TO_MAP_VALUE_OR_NULL && ++ !register_is_null(reg)) || ++ arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE) { + /* bpf_map_xxx(..., map_ptr, ..., value) call: + * check [value, value + map->value_size) validity + */ +- if (!*mapp) { ++ if (!meta->map_ptr) { + /* kernel subsystem misconfigured verifier */ +- verbose("invalid map_ptr to access map->value\n"); ++ verbose(env, "invalid map_ptr to access map->value\n"); + return -EACCES; + } +- err = check_stack_boundary(env, regno, (*mapp)->value_size); ++ meta->raw_mode = (arg_type == ARG_PTR_TO_UNINIT_MAP_VALUE); ++ err = check_helper_mem_access(env, regno, ++ meta->map_ptr->value_size, false, ++ meta); ++ } else if (arg_type_is_mem_size(arg_type)) { ++ bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); + +- } else if (arg_type == ARG_CONST_STACK_SIZE) { +- /* bpf_xxx(..., buf, len) call will access 'len' bytes +- * from stack pointer 'buf'. Check it +- * note: regno == len, regno - 1 == buf ++ /* remember the mem_size which may be used later ++ * to refine return values. + */ +- if (regno == 0) { +- /* kernel subsystem misconfigured verifier */ +- verbose("ARG_CONST_STACK_SIZE cannot be first argument\n"); ++ meta->msize_max_value = reg->umax_value; ++ ++ /* The register is SCALAR_VALUE; the access check ++ * happens using its boundaries. ++ */ ++ if (!tnum_is_const(reg->var_off)) ++ /* For unprivileged variable accesses, disable raw ++ * mode so that the program is required to ++ * initialize all the memory that the helper could ++ * just partially fill up. ++ */ ++ meta = NULL; ++ ++ if (reg->smin_value < 0) { ++ verbose(env, "R%d min value is negative, either use unsigned or 'var &= const'\n", ++ regno); ++ return -EACCES; ++ } ++ ++ if (reg->umin_value == 0) { ++ err = check_helper_mem_access(env, regno - 1, 0, ++ zero_size_allowed, ++ meta); ++ if (err) ++ return err; ++ } ++ ++ if (reg->umax_value >= BPF_MAX_VAR_SIZ) { ++ verbose(env, "R%d unbounded memory access, use 'var &= const' or 'if (var < const)'\n", ++ regno); + return -EACCES; + } +- err = check_stack_boundary(env, regno - 1, reg->imm); ++ err = check_helper_mem_access(env, regno - 1, ++ reg->umax_value, ++ zero_size_allowed, meta); ++ if (!err) ++ err = mark_chain_precision(env, regno); ++ } else if (arg_type_is_int_ptr(arg_type)) { ++ int size = int_ptr_type_to_size(arg_type); ++ ++ err = check_helper_mem_access(env, regno, size, false, meta); ++ if (err) ++ return err; ++ err = check_ptr_alignment(env, reg, 0, size, true); + } + + return err; ++err_type: ++ verbose(env, "R%d type=%s expected=%s\n", regno, ++ reg_type_str[type], reg_type_str[expected_type]); ++ return -EACCES; + } + +-static int check_map_func_compatibility(struct bpf_map *map, int func_id) ++static int check_map_func_compatibility(struct bpf_verifier_env *env, ++ struct bpf_map *map, int func_id) + { + if (!map) + return 0; +@@ -911,7 +3474,74 @@ static int check_map_func_compatibility( + break; + case BPF_MAP_TYPE_PERF_EVENT_ARRAY: + if (func_id != BPF_FUNC_perf_event_read && +- func_id != BPF_FUNC_perf_event_output) ++ func_id != BPF_FUNC_perf_event_output && ++ func_id != BPF_FUNC_perf_event_read_value) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_STACK_TRACE: ++ if (func_id != BPF_FUNC_get_stackid) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_CGROUP_ARRAY: ++ if (func_id != BPF_FUNC_skb_under_cgroup && ++ func_id != BPF_FUNC_current_task_under_cgroup) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_CGROUP_STORAGE: ++ if (func_id != BPF_FUNC_get_local_storage) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_DEVMAP: ++ case BPF_MAP_TYPE_DEVMAP_HASH: ++ if (func_id != BPF_FUNC_redirect_map && ++ func_id != BPF_FUNC_map_lookup_elem) ++ goto error; ++ break; ++ /* Restrict bpf side of cpumap and xskmap, open when use-cases ++ * appear. ++ */ ++ case BPF_MAP_TYPE_CPUMAP: ++ if (func_id != BPF_FUNC_redirect_map) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_XSKMAP: ++ if (func_id != BPF_FUNC_redirect_map && ++ func_id != BPF_FUNC_map_lookup_elem) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_ARRAY_OF_MAPS: ++ case BPF_MAP_TYPE_HASH_OF_MAPS: ++ if (func_id != BPF_FUNC_map_lookup_elem) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_SOCKMAP: ++ if (func_id != BPF_FUNC_sk_redirect_map && ++ func_id != BPF_FUNC_sock_map_update && ++ func_id != BPF_FUNC_map_delete_elem && ++ func_id != BPF_FUNC_msg_redirect_map) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_SOCKHASH: ++ if (func_id != BPF_FUNC_sk_redirect_hash && ++ func_id != BPF_FUNC_sock_hash_update && ++ func_id != BPF_FUNC_map_delete_elem && ++ func_id != BPF_FUNC_msg_redirect_hash) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY: ++ if (func_id != BPF_FUNC_sk_select_reuseport) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_QUEUE: ++ case BPF_MAP_TYPE_STACK: ++ if (func_id != BPF_FUNC_map_peek_elem && ++ func_id != BPF_FUNC_map_pop_elem && ++ func_id != BPF_FUNC_map_push_elem) ++ goto error; ++ break; ++ case BPF_MAP_TYPE_SK_STORAGE: ++ if (func_id != BPF_FUNC_sk_storage_get && ++ func_id != BPF_FUNC_sk_storage_delete) + goto error; + break; + default: +@@ -923,109 +3553,1579 @@ static int check_map_func_compatibility( + case BPF_FUNC_tail_call: + if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) + goto error; ++ if (env->subprog_cnt > 1) { ++ verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); ++ return -EINVAL; ++ } + break; + case BPF_FUNC_perf_event_read: + case BPF_FUNC_perf_event_output: ++ case BPF_FUNC_perf_event_read_value: + if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) + goto error; + break; ++ case BPF_FUNC_get_stackid: ++ if (map->map_type != BPF_MAP_TYPE_STACK_TRACE) ++ goto error; ++ break; ++ case BPF_FUNC_current_task_under_cgroup: ++ case BPF_FUNC_skb_under_cgroup: ++ if (map->map_type != BPF_MAP_TYPE_CGROUP_ARRAY) ++ goto error; ++ break; ++ case BPF_FUNC_redirect_map: ++ if (map->map_type != BPF_MAP_TYPE_DEVMAP && ++ map->map_type != BPF_MAP_TYPE_DEVMAP_HASH && ++ map->map_type != BPF_MAP_TYPE_CPUMAP && ++ map->map_type != BPF_MAP_TYPE_XSKMAP) ++ goto error; ++ break; ++ case BPF_FUNC_sk_redirect_map: ++ case BPF_FUNC_msg_redirect_map: ++ case BPF_FUNC_sock_map_update: ++ if (map->map_type != BPF_MAP_TYPE_SOCKMAP) ++ goto error; ++ break; ++ case BPF_FUNC_sk_redirect_hash: ++ case BPF_FUNC_msg_redirect_hash: ++ case BPF_FUNC_sock_hash_update: ++ if (map->map_type != BPF_MAP_TYPE_SOCKHASH) ++ goto error; ++ break; ++ case BPF_FUNC_get_local_storage: ++ if (map->map_type != BPF_MAP_TYPE_CGROUP_STORAGE && ++ map->map_type != BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) ++ goto error; ++ break; ++ case BPF_FUNC_sk_select_reuseport: ++ if (map->map_type != BPF_MAP_TYPE_REUSEPORT_SOCKARRAY) ++ goto error; ++ break; ++ case BPF_FUNC_map_peek_elem: ++ case BPF_FUNC_map_pop_elem: ++ case BPF_FUNC_map_push_elem: ++ if (map->map_type != BPF_MAP_TYPE_QUEUE && ++ map->map_type != BPF_MAP_TYPE_STACK) ++ goto error; ++ break; ++ case BPF_FUNC_sk_storage_get: ++ case BPF_FUNC_sk_storage_delete: ++ if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) ++ goto error; ++ break; + default: + break; + } + + return 0; + error: +- verbose("cannot pass map_type %d into func %d\n", +- map->map_type, func_id); ++ verbose(env, "cannot pass map_type %d into func %s#%d\n", ++ map->map_type, func_id_name(func_id), func_id); + return -EINVAL; + } + +-static int check_call(struct verifier_env *env, int func_id) ++static bool check_raw_mode_ok(const struct bpf_func_proto *fn) ++{ ++ int count = 0; ++ ++ if (fn->arg1_type == ARG_PTR_TO_UNINIT_MEM) ++ count++; ++ if (fn->arg2_type == ARG_PTR_TO_UNINIT_MEM) ++ count++; ++ if (fn->arg3_type == ARG_PTR_TO_UNINIT_MEM) ++ count++; ++ if (fn->arg4_type == ARG_PTR_TO_UNINIT_MEM) ++ count++; ++ if (fn->arg5_type == ARG_PTR_TO_UNINIT_MEM) ++ count++; ++ ++ /* We only support one arg being in raw mode at the moment, ++ * which is sufficient for the helper functions we have ++ * right now. ++ */ ++ return count <= 1; ++} ++ ++static bool check_args_pair_invalid(enum bpf_arg_type arg_curr, ++ enum bpf_arg_type arg_next) ++{ ++ return (arg_type_is_mem_ptr(arg_curr) && ++ !arg_type_is_mem_size(arg_next)) || ++ (!arg_type_is_mem_ptr(arg_curr) && ++ arg_type_is_mem_size(arg_next)); ++} ++ ++static bool check_arg_pair_ok(const struct bpf_func_proto *fn) ++{ ++ /* bpf_xxx(..., buf, len) call will access 'len' ++ * bytes from memory 'buf'. Both arg types need ++ * to be paired, so make sure there's no buggy ++ * helper function specification. ++ */ ++ if (arg_type_is_mem_size(fn->arg1_type) || ++ arg_type_is_mem_ptr(fn->arg5_type) || ++ check_args_pair_invalid(fn->arg1_type, fn->arg2_type) || ++ check_args_pair_invalid(fn->arg2_type, fn->arg3_type) || ++ check_args_pair_invalid(fn->arg3_type, fn->arg4_type) || ++ check_args_pair_invalid(fn->arg4_type, fn->arg5_type)) ++ return false; ++ ++ return true; ++} ++ ++static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id) ++{ ++ int count = 0; ++ ++ if (arg_type_may_be_refcounted(fn->arg1_type)) ++ count++; ++ if (arg_type_may_be_refcounted(fn->arg2_type)) ++ count++; ++ if (arg_type_may_be_refcounted(fn->arg3_type)) ++ count++; ++ if (arg_type_may_be_refcounted(fn->arg4_type)) ++ count++; ++ if (arg_type_may_be_refcounted(fn->arg5_type)) ++ count++; ++ ++ /* A reference acquiring function cannot acquire ++ * another refcounted ptr. ++ */ ++ if (is_acquire_function(func_id) && count) ++ return false; ++ ++ /* We only support one arg being unreferenced at the moment, ++ * which is sufficient for the helper functions we have right now. ++ */ ++ return count <= 1; ++} ++ ++static int check_func_proto(const struct bpf_func_proto *fn, int func_id) ++{ ++ return check_raw_mode_ok(fn) && ++ check_arg_pair_ok(fn) && ++ check_refcount_ok(fn, func_id) ? 0 : -EINVAL; ++} ++ ++/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] ++ * are now invalid, so turn them into unknown SCALAR_VALUE. ++ */ ++static void __clear_all_pkt_pointers(struct bpf_verifier_env *env, ++ struct bpf_func_state *state) ++{ ++ struct bpf_reg_state *regs = state->regs, *reg; ++ int i; ++ ++ for (i = 0; i < MAX_BPF_REG; i++) ++ if (reg_is_pkt_pointer_any(®s[i])) ++ mark_reg_unknown(env, regs, i); ++ ++ bpf_for_each_spilled_reg(i, state, reg) { ++ if (!reg) ++ continue; ++ if (reg_is_pkt_pointer_any(reg)) ++ __mark_reg_unknown(env, reg); ++ } ++} ++ ++static void clear_all_pkt_pointers(struct bpf_verifier_env *env) ++{ ++ struct bpf_verifier_state *vstate = env->cur_state; ++ int i; ++ ++ for (i = 0; i <= vstate->curframe; i++) ++ __clear_all_pkt_pointers(env, vstate->frame[i]); ++} ++ ++static void release_reg_references(struct bpf_verifier_env *env, ++ struct bpf_func_state *state, ++ int ref_obj_id) ++{ ++ struct bpf_reg_state *regs = state->regs, *reg; ++ int i; ++ ++ for (i = 0; i < MAX_BPF_REG; i++) ++ if (regs[i].ref_obj_id == ref_obj_id) ++ mark_reg_unknown(env, regs, i); ++ ++ bpf_for_each_spilled_reg(i, state, reg) { ++ if (!reg) ++ continue; ++ if (reg->ref_obj_id == ref_obj_id) ++ __mark_reg_unknown(env, reg); ++ } ++} ++ ++/* The pointer with the specified id has released its reference to kernel ++ * resources. Identify all copies of the same pointer and clear the reference. ++ */ ++static int release_reference(struct bpf_verifier_env *env, ++ int ref_obj_id) ++{ ++ struct bpf_verifier_state *vstate = env->cur_state; ++ int err; ++ int i; ++ ++ err = release_reference_state(cur_func(env), ref_obj_id); ++ if (err) ++ return err; ++ ++ for (i = 0; i <= vstate->curframe; i++) ++ release_reg_references(env, vstate->frame[i], ref_obj_id); ++ ++ return 0; ++} ++ ++static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, ++ int *insn_idx) ++{ ++ struct bpf_verifier_state *state = env->cur_state; ++ struct bpf_func_state *caller, *callee; ++ int i, err, subprog, target_insn; ++ ++ if (state->curframe + 1 >= MAX_CALL_FRAMES) { ++ verbose(env, "the call stack of %d frames is too deep\n", ++ state->curframe + 2); ++ return -E2BIG; ++ } ++ ++ target_insn = *insn_idx + insn->imm; ++ subprog = find_subprog(env, target_insn + 1); ++ if (subprog < 0) { ++ verbose(env, "verifier bug. No program starts at insn %d\n", ++ target_insn + 1); ++ return -EFAULT; ++ } ++ ++ caller = state->frame[state->curframe]; ++ if (state->frame[state->curframe + 1]) { ++ verbose(env, "verifier bug. Frame %d already allocated\n", ++ state->curframe + 1); ++ return -EFAULT; ++ } ++ ++ callee = kzalloc(sizeof(*callee), GFP_KERNEL); ++ if (!callee) ++ return -ENOMEM; ++ state->frame[state->curframe + 1] = callee; ++ ++ /* callee cannot access r0, r6 - r9 for reading and has to write ++ * into its own stack before reading from it. ++ * callee can read/write into caller's stack ++ */ ++ init_func_state(env, callee, ++ /* remember the callsite, it will be used by bpf_exit */ ++ *insn_idx /* callsite */, ++ state->curframe + 1 /* frameno within this callchain */, ++ subprog /* subprog number within this prog */); ++ ++ /* Transfer references to the callee */ ++ err = transfer_reference_state(callee, caller); ++ if (err) ++ return err; ++ ++ /* copy r1 - r5 args that callee can access. The copy includes parent ++ * pointers, which connects us up to the liveness chain ++ */ ++ for (i = BPF_REG_1; i <= BPF_REG_5; i++) ++ callee->regs[i] = caller->regs[i]; ++ ++ /* after the call registers r0 - r5 were scratched */ ++ for (i = 0; i < CALLER_SAVED_REGS; i++) { ++ mark_reg_not_init(env, caller->regs, caller_saved[i]); ++ check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); ++ } ++ ++ /* only increment it after check_reg_arg() finished */ ++ state->curframe++; ++ ++ /* and go analyze first insn of the callee */ ++ *insn_idx = target_insn; ++ ++ if (env->log.level & BPF_LOG_LEVEL) { ++ verbose(env, "caller:\n"); ++ print_verifier_state(env, caller); ++ verbose(env, "callee:\n"); ++ print_verifier_state(env, callee); ++ } ++ return 0; ++} ++ ++static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) ++{ ++ struct bpf_verifier_state *state = env->cur_state; ++ struct bpf_func_state *caller, *callee; ++ struct bpf_reg_state *r0; ++ int err; ++ ++ callee = state->frame[state->curframe]; ++ r0 = &callee->regs[BPF_REG_0]; ++ if (r0->type == PTR_TO_STACK) { ++ /* technically it's ok to return caller's stack pointer ++ * (or caller's caller's pointer) back to the caller, ++ * since these pointers are valid. Only current stack ++ * pointer will be invalid as soon as function exits, ++ * but let's be conservative ++ */ ++ verbose(env, "cannot return stack pointer to the caller\n"); ++ return -EINVAL; ++ } ++ ++ state->curframe--; ++ caller = state->frame[state->curframe]; ++ /* return to the caller whatever r0 had in the callee */ ++ caller->regs[BPF_REG_0] = *r0; ++ ++ /* Transfer references to the caller */ ++ err = transfer_reference_state(caller, callee); ++ if (err) ++ return err; ++ ++ *insn_idx = callee->callsite + 1; ++ if (env->log.level & BPF_LOG_LEVEL) { ++ verbose(env, "returning from callee:\n"); ++ print_verifier_state(env, callee); ++ verbose(env, "to caller at %d:\n", *insn_idx); ++ print_verifier_state(env, caller); ++ } ++ /* clear everything in the callee */ ++ free_func_state(callee); ++ state->frame[state->curframe + 1] = NULL; ++ return 0; ++} ++ ++static int do_refine_retval_range(struct bpf_verifier_env *env, ++ struct bpf_reg_state *regs, int ret_type, ++ int func_id, struct bpf_call_arg_meta *meta) ++{ ++ struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; ++ struct bpf_reg_state tmp_reg = *ret_reg; ++ bool ret; ++ ++ if (ret_type != RET_INTEGER || ++ (func_id != BPF_FUNC_get_stack && ++ func_id != BPF_FUNC_probe_read_str)) ++ return 0; ++ ++ /* Error case where ret is in interval [S32MIN, -1]. */ ++ ret_reg->smin_value = S32_MIN; ++ ret_reg->smax_value = -1; ++ ++ __reg_deduce_bounds(ret_reg); ++ __reg_bound_offset(ret_reg); ++ __update_reg_bounds(ret_reg); ++ ++ ret = push_stack(env, env->insn_idx + 1, env->insn_idx, false); ++ if (!ret) ++ return -EFAULT; ++ ++ *ret_reg = tmp_reg; ++ ++ /* Success case where ret is in range [0, msize_max_value]. */ ++ ret_reg->smin_value = 0; ++ ret_reg->smax_value = meta->msize_max_value; ++ ret_reg->umin_value = ret_reg->smin_value; ++ ret_reg->umax_value = ret_reg->smax_value; ++ ++ __reg_deduce_bounds(ret_reg); ++ __reg_bound_offset(ret_reg); ++ __update_reg_bounds(ret_reg); ++ ++ return 0; ++} ++ ++static int ++record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta, ++ int func_id, int insn_idx) ++{ ++ struct bpf_insn_aux_data *aux = &env->insn_aux_data[insn_idx]; ++ struct bpf_map *map = meta->map_ptr; ++ ++ if (func_id != BPF_FUNC_tail_call && ++ func_id != BPF_FUNC_map_lookup_elem && ++ func_id != BPF_FUNC_map_update_elem && ++ func_id != BPF_FUNC_map_delete_elem && ++ func_id != BPF_FUNC_map_push_elem && ++ func_id != BPF_FUNC_map_pop_elem && ++ func_id != BPF_FUNC_map_peek_elem) ++ return 0; ++ ++ if (map == NULL) { ++ verbose(env, "kernel subsystem misconfigured verifier\n"); ++ return -EINVAL; ++ } ++ ++ /* In case of read-only, some additional restrictions ++ * need to be applied in order to prevent altering the ++ * state of the map from program side. ++ */ ++ if ((map->map_flags & BPF_F_RDONLY_PROG) && ++ (func_id == BPF_FUNC_map_delete_elem || ++ func_id == BPF_FUNC_map_update_elem || ++ func_id == BPF_FUNC_map_push_elem || ++ func_id == BPF_FUNC_map_pop_elem)) { ++ verbose(env, "write into map forbidden\n"); ++ return -EACCES; ++ } ++ ++ if (!BPF_MAP_PTR(aux->map_state)) ++ bpf_map_ptr_store(aux, meta->map_ptr, ++ meta->map_ptr->unpriv_array); ++ else if (BPF_MAP_PTR(aux->map_state) != meta->map_ptr) ++ bpf_map_ptr_store(aux, BPF_MAP_PTR_POISON, ++ meta->map_ptr->unpriv_array); ++ return 0; ++} ++ ++static int check_reference_leak(struct bpf_verifier_env *env) ++{ ++ struct bpf_func_state *state = cur_func(env); ++ int i; ++ ++ for (i = 0; i < state->acquired_refs; i++) { ++ verbose(env, "Unreleased reference id=%d alloc_insn=%d\n", ++ state->refs[i].id, state->refs[i].insn_idx); ++ } ++ return state->acquired_refs ? -EINVAL : 0; ++} ++ ++static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) + { +- struct verifier_state *state = &env->cur_state; + const struct bpf_func_proto *fn = NULL; +- struct reg_state *regs = state->regs; +- struct bpf_map *map = NULL; +- struct reg_state *reg; ++ struct bpf_reg_state *regs; ++ struct bpf_call_arg_meta meta; ++ bool changes_data; + int i, err; + + /* find function prototype */ + if (func_id < 0 || func_id >= __BPF_FUNC_MAX_ID) { +- verbose("invalid func %d\n", func_id); ++ verbose(env, "invalid func %s#%d\n", func_id_name(func_id), ++ func_id); + return -EINVAL; + } + +- if (env->prog->aux->ops->get_func_proto) +- fn = env->prog->aux->ops->get_func_proto(func_id); +- ++ if (env->ops->get_func_proto) ++ fn = env->ops->get_func_proto(func_id, env->prog); + if (!fn) { +- verbose("unknown func %d\n", func_id); ++ verbose(env, "unknown func %s#%d\n", func_id_name(func_id), ++ func_id); + return -EINVAL; + } + + /* eBPF programs must be GPL compatible to use GPL-ed functions */ + if (!env->prog->gpl_compatible && fn->gpl_only) { +- verbose("cannot call GPL only function from proprietary program\n"); ++ verbose(env, "cannot call GPL-restricted function from non-GPL compatible program\n"); ++ return -EINVAL; ++ } ++ ++ /* With LD_ABS/IND some JITs save/restore skb from r1. */ ++ changes_data = bpf_helper_changes_pkt_data(fn->func); ++ if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) { ++ verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n", ++ func_id_name(func_id), func_id); + return -EINVAL; + } + ++ memset(&meta, 0, sizeof(meta)); ++ meta.pkt_access = fn->pkt_access; ++ ++ err = check_func_proto(fn, func_id); ++ if (err) { ++ verbose(env, "kernel subsystem misconfigured func %s#%d\n", ++ func_id_name(func_id), func_id); ++ return err; ++ } ++ ++ meta.func_id = func_id; + /* check args */ +- err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &map); ++ err = check_func_arg(env, BPF_REG_1, fn->arg1_type, &meta); + if (err) + return err; +- err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &map); ++ err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); + if (err) + return err; +- err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &map); ++ err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); + if (err) + return err; +- err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &map); ++ err = check_func_arg(env, BPF_REG_4, fn->arg4_type, &meta); + if (err) + return err; +- err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &map); ++ err = check_func_arg(env, BPF_REG_5, fn->arg5_type, &meta); + if (err) + return err; + ++ err = record_func_map(env, &meta, func_id, insn_idx); ++ if (err) ++ return err; ++ ++ /* Mark slots with STACK_MISC in case of raw mode, stack offset ++ * is inferred from register state. ++ */ ++ for (i = 0; i < meta.access_size; i++) { ++ err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, ++ BPF_WRITE, -1, false); ++ if (err) ++ return err; ++ } ++ ++ if (func_id == BPF_FUNC_tail_call) { ++ err = check_reference_leak(env); ++ if (err) { ++ verbose(env, "tail_call would lead to reference leak\n"); ++ return err; ++ } ++ } else if (is_release_function(func_id)) { ++ err = release_reference(env, meta.ref_obj_id); ++ if (err) { ++ verbose(env, "func %s#%d reference has not been acquired before\n", ++ func_id_name(func_id), func_id); ++ return err; ++ } ++ } ++ ++ regs = cur_regs(env); ++ ++ /* check that flags argument in get_local_storage(map, flags) is 0, ++ * this is required because get_local_storage() can't return an error. ++ */ ++ if (func_id == BPF_FUNC_get_local_storage && ++ !register_is_null(®s[BPF_REG_2])) { ++ verbose(env, "get_local_storage() doesn't support non-zero flags\n"); ++ return -EINVAL; ++ } ++ + /* reset caller saved regs */ + for (i = 0; i < CALLER_SAVED_REGS; i++) { +- reg = regs + caller_saved[i]; +- reg->type = NOT_INIT; +- reg->imm = 0; ++ mark_reg_not_init(env, regs, caller_saved[i]); ++ check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); + } + +- /* update return register */ ++ /* helper call returns 64-bit value. */ ++ regs[BPF_REG_0].subreg_def = DEF_NOT_SUBREG; ++ ++ /* update return register (already marked as written above) */ + if (fn->ret_type == RET_INTEGER) { +- regs[BPF_REG_0].type = UNKNOWN_VALUE; ++ /* sets type to SCALAR_VALUE */ ++ mark_reg_unknown(env, regs, BPF_REG_0); + } else if (fn->ret_type == RET_VOID) { + regs[BPF_REG_0].type = NOT_INIT; +- } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL) { +- regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; ++ } else if (fn->ret_type == RET_PTR_TO_MAP_VALUE_OR_NULL || ++ fn->ret_type == RET_PTR_TO_MAP_VALUE) { ++ /* There is no offset yet applied, variable or fixed */ ++ mark_reg_known_zero(env, regs, BPF_REG_0); + /* remember map_ptr, so that check_map_access() + * can check 'value_size' boundary of memory access + * to map element returned from bpf_map_lookup_elem() + */ +- if (map == NULL) { +- verbose("kernel subsystem misconfigured verifier\n"); ++ if (meta.map_ptr == NULL) { ++ verbose(env, ++ "kernel subsystem misconfigured verifier\n"); + return -EINVAL; + } +- regs[BPF_REG_0].map_ptr = map; ++ regs[BPF_REG_0].map_ptr = meta.map_ptr; ++ if (fn->ret_type == RET_PTR_TO_MAP_VALUE) { ++ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE; ++ if (map_value_has_spin_lock(meta.map_ptr)) ++ regs[BPF_REG_0].id = ++env->id_gen; ++ } else { ++ regs[BPF_REG_0].type = PTR_TO_MAP_VALUE_OR_NULL; ++ regs[BPF_REG_0].id = ++env->id_gen; ++ } ++ } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { ++ mark_reg_known_zero(env, regs, BPF_REG_0); ++ regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; ++ regs[BPF_REG_0].id = ++env->id_gen; ++ } else if (fn->ret_type == RET_PTR_TO_SOCK_COMMON_OR_NULL) { ++ mark_reg_known_zero(env, regs, BPF_REG_0); ++ regs[BPF_REG_0].type = PTR_TO_SOCK_COMMON_OR_NULL; ++ regs[BPF_REG_0].id = ++env->id_gen; ++ } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { ++ mark_reg_known_zero(env, regs, BPF_REG_0); ++ regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; ++ regs[BPF_REG_0].id = ++env->id_gen; + } else { +- verbose("unknown return type %d of func %d\n", +- fn->ret_type, func_id); ++ verbose(env, "unknown return type %d of func %s#%d\n", ++ fn->ret_type, func_id_name(func_id), func_id); + return -EINVAL; + } + +- err = check_map_func_compatibility(map, func_id); ++ if (is_ptr_cast_function(func_id)) { ++ /* For release_reference() */ ++ regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id; ++ } else if (is_acquire_function(func_id)) { ++ int id = acquire_reference_state(env, insn_idx); ++ ++ if (id < 0) ++ return id; ++ /* For mark_ptr_or_null_reg() */ ++ regs[BPF_REG_0].id = id; ++ /* For release_reference() */ ++ regs[BPF_REG_0].ref_obj_id = id; ++ } ++ ++ err = do_refine_retval_range(env, regs, fn->ret_type, func_id, &meta); ++ if (err) ++ return err; ++ ++ err = check_map_func_compatibility(env, meta.map_ptr, func_id); + if (err) + return err; + ++ if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { ++ const char *err_str; ++ ++#ifdef CONFIG_PERF_EVENTS ++ err = get_callchain_buffers(sysctl_perf_event_max_stack); ++ err_str = "cannot get callchain buffer for func %s#%d\n"; ++#else ++ err = -ENOTSUPP; ++ err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; ++#endif ++ if (err) { ++ verbose(env, err_str, func_id_name(func_id), func_id); ++ return err; ++ } ++ ++ env->prog->has_callchain_buf = true; ++ } ++ ++ if (changes_data) ++ clear_all_pkt_pointers(env); + return 0; + } + ++static bool signed_add_overflows(s64 a, s64 b) ++{ ++ /* Do the add in u64, where overflow is well-defined */ ++ s64 res = (s64)((u64)a + (u64)b); ++ ++ if (b < 0) ++ return res > a; ++ return res < a; ++} ++ ++static bool signed_sub_overflows(s64 a, s64 b) ++{ ++ /* Do the sub in u64, where overflow is well-defined */ ++ s64 res = (s64)((u64)a - (u64)b); ++ ++ if (b < 0) ++ return res < a; ++ return res > a; ++} ++ ++static bool check_reg_sane_offset(struct bpf_verifier_env *env, ++ const struct bpf_reg_state *reg, ++ enum bpf_reg_type type) ++{ ++ bool known = tnum_is_const(reg->var_off); ++ s64 val = reg->var_off.value; ++ s64 smin = reg->smin_value; ++ ++ if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) { ++ verbose(env, "math between %s pointer and %lld is not allowed\n", ++ reg_type_str[type], val); ++ return false; ++ } ++ ++ if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) { ++ verbose(env, "%s pointer offset %d is not allowed\n", ++ reg_type_str[type], reg->off); ++ return false; ++ } ++ ++ if (smin == S64_MIN) { ++ verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n", ++ reg_type_str[type]); ++ return false; ++ } ++ ++ if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) { ++ verbose(env, "value %lld makes %s pointer be out of bounds\n", ++ smin, reg_type_str[type]); ++ return false; ++ } ++ ++ return true; ++} ++ ++static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env) ++{ ++ return &env->insn_aux_data[env->insn_idx]; ++} ++ ++enum { ++ REASON_BOUNDS = -1, ++ REASON_TYPE = -2, ++ REASON_PATHS = -3, ++ REASON_LIMIT = -4, ++ REASON_STACK = -5, ++}; ++ ++static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, ++ u32 *alu_limit, bool mask_to_left) ++{ ++ u32 max = 0, ptr_limit = 0; ++ ++ switch (ptr_reg->type) { ++ case PTR_TO_STACK: ++ /* Offset 0 is out-of-bounds, but acceptable start for the ++ * left direction, see BPF_REG_FP. Also, unknown scalar ++ * offset where we would need to deal with min/max bounds is ++ * currently prohibited for unprivileged. ++ */ ++ max = MAX_BPF_STACK + mask_to_left; ++ ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off); ++ break; ++ case PTR_TO_MAP_VALUE: ++ max = ptr_reg->map_ptr->value_size; ++ ptr_limit = (mask_to_left ? ++ ptr_reg->smin_value : ++ ptr_reg->umax_value) + ptr_reg->off; ++ break; ++ default: ++ return REASON_TYPE; ++ } ++ ++ if (ptr_limit >= max) ++ return REASON_LIMIT; ++ *alu_limit = ptr_limit; ++ return 0; ++} ++ ++static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, ++ const struct bpf_insn *insn) ++{ ++ return env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K; ++} ++ ++static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux, ++ u32 alu_state, u32 alu_limit) ++{ ++ /* If we arrived here from different branches with different ++ * state or limits to sanitize, then this won't work. ++ */ ++ if (aux->alu_state && ++ (aux->alu_state != alu_state || ++ aux->alu_limit != alu_limit)) ++ return REASON_PATHS; ++ ++ /* Corresponding fixup done in fixup_bpf_calls(). */ ++ aux->alu_state = alu_state; ++ aux->alu_limit = alu_limit; ++ return 0; ++} ++ ++static int sanitize_val_alu(struct bpf_verifier_env *env, ++ struct bpf_insn *insn) ++{ ++ struct bpf_insn_aux_data *aux = cur_aux(env); ++ ++ if (can_skip_alu_sanitation(env, insn)) ++ return 0; ++ ++ return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); ++} ++ ++static bool sanitize_needed(u8 opcode) ++{ ++ return opcode == BPF_ADD || opcode == BPF_SUB; ++} ++ ++struct bpf_sanitize_info { ++ struct bpf_insn_aux_data aux; ++ bool mask_to_left; ++}; ++ ++static struct bpf_verifier_state * ++sanitize_speculative_path(struct bpf_verifier_env *env, ++ const struct bpf_insn *insn, ++ u32 next_idx, u32 curr_idx) ++{ ++ struct bpf_verifier_state *branch; ++ struct bpf_reg_state *regs; ++ ++ branch = push_stack(env, next_idx, curr_idx, true); ++ if (branch && insn) { ++ regs = branch->frame[branch->curframe]->regs; ++ if (BPF_SRC(insn->code) == BPF_K) { ++ mark_reg_unknown(env, regs, insn->dst_reg); ++ } else if (BPF_SRC(insn->code) == BPF_X) { ++ mark_reg_unknown(env, regs, insn->dst_reg); ++ mark_reg_unknown(env, regs, insn->src_reg); ++ } ++ } ++ return branch; ++} ++ ++static int sanitize_ptr_alu(struct bpf_verifier_env *env, ++ struct bpf_insn *insn, ++ const struct bpf_reg_state *ptr_reg, ++ const struct bpf_reg_state *off_reg, ++ struct bpf_reg_state *dst_reg, ++ struct bpf_sanitize_info *info, ++ const bool commit_window) ++{ ++ struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux; ++ struct bpf_verifier_state *vstate = env->cur_state; ++ bool off_is_imm = tnum_is_const(off_reg->var_off); ++ bool off_is_neg = off_reg->smin_value < 0; ++ bool ptr_is_dst_reg = ptr_reg == dst_reg; ++ u8 opcode = BPF_OP(insn->code); ++ u32 alu_state, alu_limit; ++ struct bpf_reg_state tmp; ++ bool ret; ++ int err; ++ ++ if (can_skip_alu_sanitation(env, insn)) ++ return 0; ++ ++ /* We already marked aux for masking from non-speculative ++ * paths, thus we got here in the first place. We only care ++ * to explore bad access from here. ++ */ ++ if (vstate->speculative) ++ goto do_sim; ++ ++ if (!commit_window) { ++ if (!tnum_is_const(off_reg->var_off) && ++ (off_reg->smin_value < 0) != (off_reg->smax_value < 0)) ++ return REASON_BOUNDS; ++ ++ info->mask_to_left = (opcode == BPF_ADD && off_is_neg) || ++ (opcode == BPF_SUB && !off_is_neg); ++ } ++ ++ err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left); ++ if (err < 0) ++ return err; ++ ++ if (commit_window) { ++ /* In commit phase we narrow the masking window based on ++ * the observed pointer move after the simulated operation. ++ */ ++ alu_state = info->aux.alu_state; ++ alu_limit = abs(info->aux.alu_limit - alu_limit); ++ } else { ++ alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; ++ alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0; ++ alu_state |= ptr_is_dst_reg ? ++ BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; ++ } ++ ++ err = update_alu_sanitation_state(aux, alu_state, alu_limit); ++ if (err < 0) ++ return err; ++do_sim: ++ /* If we're in commit phase, we're done here given we already ++ * pushed the truncated dst_reg into the speculative verification ++ * stack. ++ * ++ * Also, when register is a known constant, we rewrite register-based ++ * operation to immediate-based, and thus do not need masking (and as ++ * a consequence, do not need to simulate the zero-truncation either). ++ */ ++ if (commit_window || off_is_imm) ++ return 0; ++ ++ /* Simulate and find potential out-of-bounds access under ++ * speculative execution from truncation as a result of ++ * masking when off was not within expected range. If off ++ * sits in dst, then we temporarily need to move ptr there ++ * to simulate dst (== 0) +/-= ptr. Needed, for example, ++ * for cases where we use K-based arithmetic in one direction ++ * and truncated reg-based in the other in order to explore ++ * bad access. ++ */ ++ if (!ptr_is_dst_reg) { ++ tmp = *dst_reg; ++ *dst_reg = *ptr_reg; ++ } ++ ret = sanitize_speculative_path(env, NULL, env->insn_idx + 1, ++ env->insn_idx); ++ if (!ptr_is_dst_reg && ret) ++ *dst_reg = tmp; ++ return !ret ? REASON_STACK : 0; ++} ++ ++static void sanitize_mark_insn_seen(struct bpf_verifier_env *env) ++{ ++ struct bpf_verifier_state *vstate = env->cur_state; ++ ++ /* If we simulate paths under speculation, we don't update the ++ * insn as 'seen' such that when we verify unreachable paths in ++ * the non-speculative domain, sanitize_dead_code() can still ++ * rewrite/sanitize them. ++ */ ++ if (!vstate->speculative) ++ env->insn_aux_data[env->insn_idx].seen = true; ++} ++ ++static int sanitize_err(struct bpf_verifier_env *env, ++ const struct bpf_insn *insn, int reason, ++ const struct bpf_reg_state *off_reg, ++ const struct bpf_reg_state *dst_reg) ++{ ++ static const char *err = "pointer arithmetic with it prohibited for !root"; ++ const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub"; ++ u32 dst = insn->dst_reg, src = insn->src_reg; ++ ++ switch (reason) { ++ case REASON_BOUNDS: ++ verbose(env, "R%d has unknown scalar with mixed signed bounds, %s\n", ++ off_reg == dst_reg ? dst : src, err); ++ break; ++ case REASON_TYPE: ++ verbose(env, "R%d has pointer with unsupported alu operation, %s\n", ++ off_reg == dst_reg ? src : dst, err); ++ break; ++ case REASON_PATHS: ++ verbose(env, "R%d tried to %s from different maps, paths or scalars, %s\n", ++ dst, op, err); ++ break; ++ case REASON_LIMIT: ++ verbose(env, "R%d tried to %s beyond pointer bounds, %s\n", ++ dst, op, err); ++ break; ++ case REASON_STACK: ++ verbose(env, "R%d could not be pushed for speculative verification, %s\n", ++ dst, err); ++ break; ++ default: ++ verbose(env, "verifier internal error: unknown reason (%d)\n", ++ reason); ++ break; ++ } ++ ++ return -EACCES; ++} ++ ++static int sanitize_check_bounds(struct bpf_verifier_env *env, ++ const struct bpf_insn *insn, ++ const struct bpf_reg_state *dst_reg) ++{ ++ u32 dst = insn->dst_reg; ++ ++ /* For unprivileged we require that resulting offset must be in bounds ++ * in order to be able to sanitize access later on. ++ */ ++ if (env->allow_ptr_leaks) ++ return 0; ++ ++ switch (dst_reg->type) { ++ case PTR_TO_STACK: ++ if (check_stack_access(env, dst_reg, dst_reg->off + ++ dst_reg->var_off.value, 1)) { ++ verbose(env, "R%d stack pointer arithmetic goes out of range, " ++ "prohibited for !root\n", dst); ++ return -EACCES; ++ } ++ break; ++ case PTR_TO_MAP_VALUE: ++ if (check_map_access(env, dst, dst_reg->off, 1, false)) { ++ verbose(env, "R%d pointer arithmetic of map value goes out of range, " ++ "prohibited for !root\n", dst); ++ return -EACCES; ++ } ++ break; ++ default: ++ break; ++ } ++ ++ return 0; ++} ++ ++/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. ++ * Caller should also handle BPF_MOV case separately. ++ * If we return -EACCES, caller may want to try again treating pointer as a ++ * scalar. So we only emit a diagnostic if !env->allow_ptr_leaks. ++ */ ++static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, ++ struct bpf_insn *insn, ++ const struct bpf_reg_state *ptr_reg, ++ const struct bpf_reg_state *off_reg) ++{ ++ struct bpf_verifier_state *vstate = env->cur_state; ++ struct bpf_func_state *state = vstate->frame[vstate->curframe]; ++ struct bpf_reg_state *regs = state->regs, *dst_reg; ++ bool known = tnum_is_const(off_reg->var_off); ++ s64 smin_val = off_reg->smin_value, smax_val = off_reg->smax_value, ++ smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; ++ u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, ++ umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; ++ struct bpf_sanitize_info info = {}; ++ u8 opcode = BPF_OP(insn->code); ++ u32 dst = insn->dst_reg; ++ int ret; ++ ++ dst_reg = ®s[dst]; ++ ++ if ((known && (smin_val != smax_val || umin_val != umax_val)) || ++ smin_val > smax_val || umin_val > umax_val) { ++ /* Taint dst register if offset had invalid bounds derived from ++ * e.g. dead branches. ++ */ ++ __mark_reg_unknown(env, dst_reg); ++ return 0; ++ } ++ ++ if (BPF_CLASS(insn->code) != BPF_ALU64) { ++ /* 32-bit ALU ops on pointers produce (meaningless) scalars */ ++ verbose(env, ++ "R%d 32-bit pointer arithmetic prohibited\n", ++ dst); ++ return -EACCES; ++ } ++ ++ switch (ptr_reg->type) { ++ case PTR_TO_MAP_VALUE_OR_NULL: ++ verbose(env, "R%d pointer arithmetic on %s prohibited, null-check it first\n", ++ dst, reg_type_str[ptr_reg->type]); ++ return -EACCES; ++ case CONST_PTR_TO_MAP: ++ /* smin_val represents the known value */ ++ if (known && smin_val == 0 && opcode == BPF_ADD) ++ break; ++ /* fall-through */ ++ case PTR_TO_PACKET_END: ++ case PTR_TO_SOCKET: ++ case PTR_TO_SOCKET_OR_NULL: ++ case PTR_TO_SOCK_COMMON: ++ case PTR_TO_SOCK_COMMON_OR_NULL: ++ case PTR_TO_TCP_SOCK: ++ case PTR_TO_TCP_SOCK_OR_NULL: ++ case PTR_TO_XDP_SOCK: ++ verbose(env, "R%d pointer arithmetic on %s prohibited\n", ++ dst, reg_type_str[ptr_reg->type]); ++ return -EACCES; ++ default: ++ break; ++ } ++ ++ /* In case of 'scalar += pointer', dst_reg inherits pointer type and id. ++ * The id may be overwritten later if we create a new variable offset. ++ */ ++ dst_reg->type = ptr_reg->type; ++ dst_reg->id = ptr_reg->id; ++ ++ if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) || ++ !check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) ++ return -EINVAL; ++ ++ if (sanitize_needed(opcode)) { ++ ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg, ++ &info, false); ++ if (ret < 0) ++ return sanitize_err(env, insn, ret, off_reg, dst_reg); ++ } ++ ++ switch (opcode) { ++ case BPF_ADD: ++ /* We can take a fixed offset as long as it doesn't overflow ++ * the s32 'off' field ++ */ ++ if (known && (ptr_reg->off + smin_val == ++ (s64)(s32)(ptr_reg->off + smin_val))) { ++ /* pointer += K. Accumulate it into fixed offset */ ++ dst_reg->smin_value = smin_ptr; ++ dst_reg->smax_value = smax_ptr; ++ dst_reg->umin_value = umin_ptr; ++ dst_reg->umax_value = umax_ptr; ++ dst_reg->var_off = ptr_reg->var_off; ++ dst_reg->off = ptr_reg->off + smin_val; ++ dst_reg->raw = ptr_reg->raw; ++ break; ++ } ++ /* A new variable offset is created. Note that off_reg->off ++ * == 0, since it's a scalar. ++ * dst_reg gets the pointer type and since some positive ++ * integer value was added to the pointer, give it a new 'id' ++ * if it's a PTR_TO_PACKET. ++ * this creates a new 'base' pointer, off_reg (variable) gets ++ * added into the variable offset, and we copy the fixed offset ++ * from ptr_reg. ++ */ ++ if (signed_add_overflows(smin_ptr, smin_val) || ++ signed_add_overflows(smax_ptr, smax_val)) { ++ dst_reg->smin_value = S64_MIN; ++ dst_reg->smax_value = S64_MAX; ++ } else { ++ dst_reg->smin_value = smin_ptr + smin_val; ++ dst_reg->smax_value = smax_ptr + smax_val; ++ } ++ if (umin_ptr + umin_val < umin_ptr || ++ umax_ptr + umax_val < umax_ptr) { ++ dst_reg->umin_value = 0; ++ dst_reg->umax_value = U64_MAX; ++ } else { ++ dst_reg->umin_value = umin_ptr + umin_val; ++ dst_reg->umax_value = umax_ptr + umax_val; ++ } ++ dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); ++ dst_reg->off = ptr_reg->off; ++ dst_reg->raw = ptr_reg->raw; ++ if (reg_is_pkt_pointer(ptr_reg)) { ++ dst_reg->id = ++env->id_gen; ++ /* something was added to pkt_ptr, set range to zero */ ++ dst_reg->raw = 0; ++ } ++ break; ++ case BPF_SUB: ++ if (dst_reg == off_reg) { ++ /* scalar -= pointer. Creates an unknown scalar */ ++ verbose(env, "R%d tried to subtract pointer from scalar\n", ++ dst); ++ return -EACCES; ++ } ++ /* We don't allow subtraction from FP, because (according to ++ * test_verifier.c test "invalid fp arithmetic", JITs might not ++ * be able to deal with it. ++ */ ++ if (ptr_reg->type == PTR_TO_STACK) { ++ verbose(env, "R%d subtraction from stack pointer prohibited\n", ++ dst); ++ return -EACCES; ++ } ++ if (known && (ptr_reg->off - smin_val == ++ (s64)(s32)(ptr_reg->off - smin_val))) { ++ /* pointer -= K. Subtract it from fixed offset */ ++ dst_reg->smin_value = smin_ptr; ++ dst_reg->smax_value = smax_ptr; ++ dst_reg->umin_value = umin_ptr; ++ dst_reg->umax_value = umax_ptr; ++ dst_reg->var_off = ptr_reg->var_off; ++ dst_reg->id = ptr_reg->id; ++ dst_reg->off = ptr_reg->off - smin_val; ++ dst_reg->raw = ptr_reg->raw; ++ break; ++ } ++ /* A new variable offset is created. If the subtrahend is known ++ * nonnegative, then any reg->range we had before is still good. ++ */ ++ if (signed_sub_overflows(smin_ptr, smax_val) || ++ signed_sub_overflows(smax_ptr, smin_val)) { ++ /* Overflow possible, we know nothing */ ++ dst_reg->smin_value = S64_MIN; ++ dst_reg->smax_value = S64_MAX; ++ } else { ++ dst_reg->smin_value = smin_ptr - smax_val; ++ dst_reg->smax_value = smax_ptr - smin_val; ++ } ++ if (umin_ptr < umax_val) { ++ /* Overflow possible, we know nothing */ ++ dst_reg->umin_value = 0; ++ dst_reg->umax_value = U64_MAX; ++ } else { ++ /* Cannot overflow (as long as bounds are consistent) */ ++ dst_reg->umin_value = umin_ptr - umax_val; ++ dst_reg->umax_value = umax_ptr - umin_val; ++ } ++ dst_reg->var_off = tnum_sub(ptr_reg->var_off, off_reg->var_off); ++ dst_reg->off = ptr_reg->off; ++ dst_reg->raw = ptr_reg->raw; ++ if (reg_is_pkt_pointer(ptr_reg)) { ++ dst_reg->id = ++env->id_gen; ++ /* something was added to pkt_ptr, set range to zero */ ++ if (smin_val < 0) ++ dst_reg->raw = 0; ++ } ++ break; ++ case BPF_AND: ++ case BPF_OR: ++ case BPF_XOR: ++ /* bitwise ops on pointers are troublesome, prohibit. */ ++ verbose(env, "R%d bitwise operator %s on pointer prohibited\n", ++ dst, bpf_alu_string[opcode >> 4]); ++ return -EACCES; ++ default: ++ /* other operators (e.g. MUL,LSH) produce non-pointer results */ ++ verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", ++ dst, bpf_alu_string[opcode >> 4]); ++ return -EACCES; ++ } ++ ++ if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type)) ++ return -EINVAL; ++ ++ __update_reg_bounds(dst_reg); ++ __reg_deduce_bounds(dst_reg); ++ __reg_bound_offset(dst_reg); ++ ++ if (sanitize_check_bounds(env, insn, dst_reg) < 0) ++ return -EACCES; ++ if (sanitize_needed(opcode)) { ++ ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg, ++ &info, true); ++ if (ret < 0) ++ return sanitize_err(env, insn, ret, off_reg, dst_reg); ++ } ++ ++ return 0; ++} ++ ++/* WARNING: This function does calculations on 64-bit values, but the actual ++ * execution may occur on 32-bit values. Therefore, things like bitshifts ++ * need extra checks in the 32-bit case. ++ */ ++static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, ++ struct bpf_insn *insn, ++ struct bpf_reg_state *dst_reg, ++ struct bpf_reg_state src_reg) ++{ ++ struct bpf_reg_state *regs = cur_regs(env); ++ u8 opcode = BPF_OP(insn->code); ++ bool src_known, dst_known; ++ s64 smin_val, smax_val; ++ u64 umin_val, umax_val; ++ u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; ++ int ret; ++ ++ if (insn_bitness == 32) { ++ /* Relevant for 32-bit RSH: Information can propagate towards ++ * LSB, so it isn't sufficient to only truncate the output to ++ * 32 bits. ++ */ ++ coerce_reg_to_size(dst_reg, 4); ++ coerce_reg_to_size(&src_reg, 4); ++ } ++ ++ smin_val = src_reg.smin_value; ++ smax_val = src_reg.smax_value; ++ umin_val = src_reg.umin_value; ++ umax_val = src_reg.umax_value; ++ src_known = tnum_is_const(src_reg.var_off); ++ dst_known = tnum_is_const(dst_reg->var_off); ++ ++ if ((src_known && (smin_val != smax_val || umin_val != umax_val)) || ++ smin_val > smax_val || umin_val > umax_val) { ++ /* Taint dst register if offset had invalid bounds derived from ++ * e.g. dead branches. ++ */ ++ __mark_reg_unknown(env, dst_reg); ++ return 0; ++ } ++ ++ if (!src_known && ++ opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { ++ __mark_reg_unknown(env, dst_reg); ++ return 0; ++ } ++ ++ if (sanitize_needed(opcode)) { ++ ret = sanitize_val_alu(env, insn); ++ if (ret < 0) ++ return sanitize_err(env, insn, ret, NULL, NULL); ++ } ++ ++ switch (opcode) { ++ case BPF_ADD: ++ if (signed_add_overflows(dst_reg->smin_value, smin_val) || ++ signed_add_overflows(dst_reg->smax_value, smax_val)) { ++ dst_reg->smin_value = S64_MIN; ++ dst_reg->smax_value = S64_MAX; ++ } else { ++ dst_reg->smin_value += smin_val; ++ dst_reg->smax_value += smax_val; ++ } ++ if (dst_reg->umin_value + umin_val < umin_val || ++ dst_reg->umax_value + umax_val < umax_val) { ++ dst_reg->umin_value = 0; ++ dst_reg->umax_value = U64_MAX; ++ } else { ++ dst_reg->umin_value += umin_val; ++ dst_reg->umax_value += umax_val; ++ } ++ dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); ++ break; ++ case BPF_SUB: ++ if (signed_sub_overflows(dst_reg->smin_value, smax_val) || ++ signed_sub_overflows(dst_reg->smax_value, smin_val)) { ++ /* Overflow possible, we know nothing */ ++ dst_reg->smin_value = S64_MIN; ++ dst_reg->smax_value = S64_MAX; ++ } else { ++ dst_reg->smin_value -= smax_val; ++ dst_reg->smax_value -= smin_val; ++ } ++ if (dst_reg->umin_value < umax_val) { ++ /* Overflow possible, we know nothing */ ++ dst_reg->umin_value = 0; ++ dst_reg->umax_value = U64_MAX; ++ } else { ++ /* Cannot overflow (as long as bounds are consistent) */ ++ dst_reg->umin_value -= umax_val; ++ dst_reg->umax_value -= umin_val; ++ } ++ dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off); ++ break; ++ case BPF_MUL: ++ dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off); ++ if (smin_val < 0 || dst_reg->smin_value < 0) { ++ /* Ain't nobody got time to multiply that sign */ ++ __mark_reg_unbounded(dst_reg); ++ __update_reg_bounds(dst_reg); ++ break; ++ } ++ /* Both values are positive, so we can work with unsigned and ++ * copy the result to signed (unless it exceeds S64_MAX). ++ */ ++ if (umax_val > U32_MAX || dst_reg->umax_value > U32_MAX) { ++ /* Potential overflow, we know nothing */ ++ __mark_reg_unbounded(dst_reg); ++ /* (except what we can learn from the var_off) */ ++ __update_reg_bounds(dst_reg); ++ break; ++ } ++ dst_reg->umin_value *= umin_val; ++ dst_reg->umax_value *= umax_val; ++ if (dst_reg->umax_value > S64_MAX) { ++ /* Overflow possible, we know nothing */ ++ dst_reg->smin_value = S64_MIN; ++ dst_reg->smax_value = S64_MAX; ++ } else { ++ dst_reg->smin_value = dst_reg->umin_value; ++ dst_reg->smax_value = dst_reg->umax_value; ++ } ++ break; ++ case BPF_AND: ++ if (src_known && dst_known) { ++ __mark_reg_known(dst_reg, dst_reg->var_off.value & ++ src_reg.var_off.value); ++ break; ++ } ++ /* We get our minimum from the var_off, since that's inherently ++ * bitwise. Our maximum is the minimum of the operands' maxima. ++ */ ++ dst_reg->var_off = tnum_and(dst_reg->var_off, src_reg.var_off); ++ dst_reg->umin_value = dst_reg->var_off.value; ++ dst_reg->umax_value = min(dst_reg->umax_value, umax_val); ++ if (dst_reg->smin_value < 0 || smin_val < 0) { ++ /* Lose signed bounds when ANDing negative numbers, ++ * ain't nobody got time for that. ++ */ ++ dst_reg->smin_value = S64_MIN; ++ dst_reg->smax_value = S64_MAX; ++ } else { ++ /* ANDing two positives gives a positive, so safe to ++ * cast result into s64. ++ */ ++ dst_reg->smin_value = dst_reg->umin_value; ++ dst_reg->smax_value = dst_reg->umax_value; ++ } ++ /* We may learn something more from the var_off */ ++ __update_reg_bounds(dst_reg); ++ break; ++ case BPF_OR: ++ if (src_known && dst_known) { ++ __mark_reg_known(dst_reg, dst_reg->var_off.value | ++ src_reg.var_off.value); ++ break; ++ } ++ /* We get our maximum from the var_off, and our minimum is the ++ * maximum of the operands' minima ++ */ ++ dst_reg->var_off = tnum_or(dst_reg->var_off, src_reg.var_off); ++ dst_reg->umin_value = max(dst_reg->umin_value, umin_val); ++ dst_reg->umax_value = dst_reg->var_off.value | ++ dst_reg->var_off.mask; ++ if (dst_reg->smin_value < 0 || smin_val < 0) { ++ /* Lose signed bounds when ORing negative numbers, ++ * ain't nobody got time for that. ++ */ ++ dst_reg->smin_value = S64_MIN; ++ dst_reg->smax_value = S64_MAX; ++ } else { ++ /* ORing two positives gives a positive, so safe to ++ * cast result into s64. ++ */ ++ dst_reg->smin_value = dst_reg->umin_value; ++ dst_reg->smax_value = dst_reg->umax_value; ++ } ++ /* We may learn something more from the var_off */ ++ __update_reg_bounds(dst_reg); ++ break; ++ case BPF_LSH: ++ if (umax_val >= insn_bitness) { ++ /* Shifts greater than 31 or 63 are undefined. ++ * This includes shifts by a negative number. ++ */ ++ mark_reg_unknown(env, regs, insn->dst_reg); ++ break; ++ } ++ /* We lose all sign bit information (except what we can pick ++ * up from var_off) ++ */ ++ dst_reg->smin_value = S64_MIN; ++ dst_reg->smax_value = S64_MAX; ++ /* If we might shift our top bit out, then we know nothing */ ++ if (dst_reg->umax_value > 1ULL << (63 - umax_val)) { ++ dst_reg->umin_value = 0; ++ dst_reg->umax_value = U64_MAX; ++ } else { ++ dst_reg->umin_value <<= umin_val; ++ dst_reg->umax_value <<= umax_val; ++ } ++ dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); ++ /* We may learn something more from the var_off */ ++ __update_reg_bounds(dst_reg); ++ break; ++ case BPF_RSH: ++ if (umax_val >= insn_bitness) { ++ /* Shifts greater than 31 or 63 are undefined. ++ * This includes shifts by a negative number. ++ */ ++ mark_reg_unknown(env, regs, insn->dst_reg); ++ break; ++ } ++ /* BPF_RSH is an unsigned shift. If the value in dst_reg might ++ * be negative, then either: ++ * 1) src_reg might be zero, so the sign bit of the result is ++ * unknown, so we lose our signed bounds ++ * 2) it's known negative, thus the unsigned bounds capture the ++ * signed bounds ++ * 3) the signed bounds cross zero, so they tell us nothing ++ * about the result ++ * If the value in dst_reg is known nonnegative, then again the ++ * unsigned bounts capture the signed bounds. ++ * Thus, in all cases it suffices to blow away our signed bounds ++ * and rely on inferring new ones from the unsigned bounds and ++ * var_off of the result. ++ */ ++ dst_reg->smin_value = S64_MIN; ++ dst_reg->smax_value = S64_MAX; ++ dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); ++ dst_reg->umin_value >>= umax_val; ++ dst_reg->umax_value >>= umin_val; ++ /* We may learn something more from the var_off */ ++ __update_reg_bounds(dst_reg); ++ break; ++ case BPF_ARSH: ++ if (umax_val >= insn_bitness) { ++ /* Shifts greater than 31 or 63 are undefined. ++ * This includes shifts by a negative number. ++ */ ++ mark_reg_unknown(env, regs, insn->dst_reg); ++ break; ++ } ++ ++ /* Upon reaching here, src_known is true and ++ * umax_val is equal to umin_val. ++ */ ++ if (insn_bitness == 32) { ++ dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val); ++ dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val); ++ } else { ++ dst_reg->smin_value >>= umin_val; ++ dst_reg->smax_value >>= umin_val; ++ } ++ ++ dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val, ++ insn_bitness); ++ ++ /* blow away the dst_reg umin_value/umax_value and rely on ++ * dst_reg var_off to refine the result. ++ */ ++ dst_reg->umin_value = 0; ++ dst_reg->umax_value = U64_MAX; ++ __update_reg_bounds(dst_reg); ++ break; ++ default: ++ mark_reg_unknown(env, regs, insn->dst_reg); ++ break; ++ } ++ ++ if (BPF_CLASS(insn->code) != BPF_ALU64) { ++ /* 32-bit ALU ops are (32,32)->32 */ ++ coerce_reg_to_size(dst_reg, 4); ++ } ++ ++ __reg_deduce_bounds(dst_reg); ++ __reg_bound_offset(dst_reg); ++ return 0; ++} ++ ++/* Handles ALU ops other than BPF_END, BPF_NEG and BPF_MOV: computes new min/max ++ * and var_off. ++ */ ++static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, ++ struct bpf_insn *insn) ++{ ++ struct bpf_verifier_state *vstate = env->cur_state; ++ struct bpf_func_state *state = vstate->frame[vstate->curframe]; ++ struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; ++ struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; ++ u8 opcode = BPF_OP(insn->code); ++ int err; ++ ++ dst_reg = ®s[insn->dst_reg]; ++ src_reg = NULL; ++ if (dst_reg->type != SCALAR_VALUE) ++ ptr_reg = dst_reg; ++ if (BPF_SRC(insn->code) == BPF_X) { ++ src_reg = ®s[insn->src_reg]; ++ if (src_reg->type != SCALAR_VALUE) { ++ if (dst_reg->type != SCALAR_VALUE) { ++ /* Combining two pointers by any ALU op yields ++ * an arbitrary scalar. Disallow all math except ++ * pointer subtraction ++ */ ++ if (opcode == BPF_SUB && env->allow_ptr_leaks) { ++ mark_reg_unknown(env, regs, insn->dst_reg); ++ return 0; ++ } ++ verbose(env, "R%d pointer %s pointer prohibited\n", ++ insn->dst_reg, ++ bpf_alu_string[opcode >> 4]); ++ return -EACCES; ++ } else { ++ /* scalar += pointer ++ * This is legal, but we have to reverse our ++ * src/dest handling in computing the range ++ */ ++ err = mark_chain_precision(env, insn->dst_reg); ++ if (err) ++ return err; ++ return adjust_ptr_min_max_vals(env, insn, ++ src_reg, dst_reg); ++ } ++ } else if (ptr_reg) { ++ /* pointer += scalar */ ++ err = mark_chain_precision(env, insn->src_reg); ++ if (err) ++ return err; ++ return adjust_ptr_min_max_vals(env, insn, ++ dst_reg, src_reg); ++ } ++ } else { ++ /* Pretend the src is a reg with a known value, since we only ++ * need to be able to read from this state. ++ */ ++ off_reg.type = SCALAR_VALUE; ++ __mark_reg_known(&off_reg, insn->imm); ++ src_reg = &off_reg; ++ if (ptr_reg) /* pointer += K */ ++ return adjust_ptr_min_max_vals(env, insn, ++ ptr_reg, src_reg); ++ } ++ ++ /* Got here implies adding two SCALAR_VALUEs */ ++ if (WARN_ON_ONCE(ptr_reg)) { ++ print_verifier_state(env, state); ++ verbose(env, "verifier internal error: unexpected ptr_reg\n"); ++ return -EINVAL; ++ } ++ if (WARN_ON(!src_reg)) { ++ print_verifier_state(env, state); ++ verbose(env, "verifier internal error: no src_reg\n"); ++ return -EINVAL; ++ } ++ return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); ++} ++ + /* check validity of 32-bit and 64-bit arithmetic operations */ +-static int check_alu_op(struct verifier_env *env, struct bpf_insn *insn) ++static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) + { +- struct reg_state *regs = env->cur_state.regs; ++ struct bpf_reg_state *regs = cur_regs(env); + u8 opcode = BPF_OP(insn->code); + int err; + +@@ -1034,30 +5134,31 @@ static int check_alu_op(struct verifier_ + if (BPF_SRC(insn->code) != 0 || + insn->src_reg != BPF_REG_0 || + insn->off != 0 || insn->imm != 0) { +- verbose("BPF_NEG uses reserved fields\n"); ++ verbose(env, "BPF_NEG uses reserved fields\n"); + return -EINVAL; + } + } else { + if (insn->src_reg != BPF_REG_0 || insn->off != 0 || +- (insn->imm != 16 && insn->imm != 32 && insn->imm != 64)) { +- verbose("BPF_END uses reserved fields\n"); ++ (insn->imm != 16 && insn->imm != 32 && insn->imm != 64) || ++ BPF_CLASS(insn->code) == BPF_ALU64) { ++ verbose(env, "BPF_END uses reserved fields\n"); + return -EINVAL; + } + } + + /* check src operand */ +- err = check_reg_arg(regs, insn->dst_reg, SRC_OP); ++ err = check_reg_arg(env, insn->dst_reg, SRC_OP); + if (err) + return err; + + if (is_pointer_value(env, insn->dst_reg)) { +- verbose("R%d pointer arithmetic prohibited\n", ++ verbose(env, "R%d pointer arithmetic prohibited\n", + insn->dst_reg); + return -EACCES; + } + + /* check dest operand */ +- err = check_reg_arg(regs, insn->dst_reg, DST_OP); ++ err = check_reg_arg(env, insn->dst_reg, DST_OP); + if (err) + return err; + +@@ -1065,81 +5166,100 @@ static int check_alu_op(struct verifier_ + + if (BPF_SRC(insn->code) == BPF_X) { + if (insn->imm != 0 || insn->off != 0) { +- verbose("BPF_MOV uses reserved fields\n"); ++ verbose(env, "BPF_MOV uses reserved fields\n"); + return -EINVAL; + } + + /* check src operand */ +- err = check_reg_arg(regs, insn->src_reg, SRC_OP); ++ err = check_reg_arg(env, insn->src_reg, SRC_OP); + if (err) + return err; + } else { + if (insn->src_reg != BPF_REG_0 || insn->off != 0) { +- verbose("BPF_MOV uses reserved fields\n"); ++ verbose(env, "BPF_MOV uses reserved fields\n"); + return -EINVAL; + } + } + +- /* check dest operand */ +- err = check_reg_arg(regs, insn->dst_reg, DST_OP); ++ /* check dest operand, mark as required later */ ++ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); + if (err) + return err; + + if (BPF_SRC(insn->code) == BPF_X) { ++ struct bpf_reg_state *src_reg = regs + insn->src_reg; ++ struct bpf_reg_state *dst_reg = regs + insn->dst_reg; ++ + if (BPF_CLASS(insn->code) == BPF_ALU64) { + /* case: R1 = R2 + * copy register state to dest reg + */ +- regs[insn->dst_reg] = regs[insn->src_reg]; ++ *dst_reg = *src_reg; ++ dst_reg->live |= REG_LIVE_WRITTEN; ++ dst_reg->subreg_def = DEF_NOT_SUBREG; + } else { ++ /* R1 = (u32) R2 */ + if (is_pointer_value(env, insn->src_reg)) { +- verbose("R%d partial copy of pointer\n", ++ verbose(env, ++ "R%d partial copy of pointer\n", + insn->src_reg); + return -EACCES; ++ } else if (src_reg->type == SCALAR_VALUE) { ++ *dst_reg = *src_reg; ++ dst_reg->live |= REG_LIVE_WRITTEN; ++ dst_reg->subreg_def = env->insn_idx + 1; ++ } else { ++ mark_reg_unknown(env, regs, ++ insn->dst_reg); + } +- regs[insn->dst_reg].type = UNKNOWN_VALUE; +- regs[insn->dst_reg].map_ptr = NULL; ++ coerce_reg_to_size(dst_reg, 4); + } + } else { + /* case: R = imm + * remember the value we stored into this reg + */ +- regs[insn->dst_reg].type = CONST_IMM; +- regs[insn->dst_reg].imm = insn->imm; ++ /* clear any state __mark_reg_known doesn't set */ ++ mark_reg_unknown(env, regs, insn->dst_reg); ++ regs[insn->dst_reg].type = SCALAR_VALUE; ++ if (BPF_CLASS(insn->code) == BPF_ALU64) { ++ __mark_reg_known(regs + insn->dst_reg, ++ insn->imm); ++ } else { ++ __mark_reg_known(regs + insn->dst_reg, ++ (u32)insn->imm); ++ } + } + + } else if (opcode > BPF_END) { +- verbose("invalid BPF_ALU opcode %x\n", opcode); ++ verbose(env, "invalid BPF_ALU opcode %x\n", opcode); + return -EINVAL; + + } else { /* all other ALU ops: and, sub, xor, add, ... */ + +- bool stack_relative = false; +- + if (BPF_SRC(insn->code) == BPF_X) { + if (insn->imm != 0 || insn->off != 0) { +- verbose("BPF_ALU uses reserved fields\n"); ++ verbose(env, "BPF_ALU uses reserved fields\n"); + return -EINVAL; + } + /* check src1 operand */ +- err = check_reg_arg(regs, insn->src_reg, SRC_OP); ++ err = check_reg_arg(env, insn->src_reg, SRC_OP); + if (err) + return err; + } else { + if (insn->src_reg != BPF_REG_0 || insn->off != 0) { +- verbose("BPF_ALU uses reserved fields\n"); ++ verbose(env, "BPF_ALU uses reserved fields\n"); + return -EINVAL; + } + } + + /* check src2 operand */ +- err = check_reg_arg(regs, insn->dst_reg, SRC_OP); ++ err = check_reg_arg(env, insn->dst_reg, SRC_OP); + if (err) + return err; + + if ((opcode == BPF_MOD || opcode == BPF_DIV) && + BPF_SRC(insn->code) == BPF_K && insn->imm == 0) { +- verbose("div by zero\n"); ++ verbose(env, "div by zero\n"); + return -EINVAL; + } + +@@ -1148,185 +5268,980 @@ static int check_alu_op(struct verifier_ + int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32; + + if (insn->imm < 0 || insn->imm >= size) { +- verbose("invalid shift %d\n", insn->imm); ++ verbose(env, "invalid shift %d\n", insn->imm); + return -EINVAL; + } + } + +- /* pattern match 'bpf_add Rx, imm' instruction */ +- if (opcode == BPF_ADD && BPF_CLASS(insn->code) == BPF_ALU64 && +- regs[insn->dst_reg].type == FRAME_PTR && +- BPF_SRC(insn->code) == BPF_K) { +- stack_relative = true; +- } else if (is_pointer_value(env, insn->dst_reg)) { +- verbose("R%d pointer arithmetic prohibited\n", +- insn->dst_reg); +- return -EACCES; +- } else if (BPF_SRC(insn->code) == BPF_X && +- is_pointer_value(env, insn->src_reg)) { +- verbose("R%d pointer arithmetic prohibited\n", +- insn->src_reg); +- return -EACCES; +- } +- + /* check dest operand */ +- err = check_reg_arg(regs, insn->dst_reg, DST_OP); ++ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); + if (err) + return err; + +- if (stack_relative) { +- regs[insn->dst_reg].type = PTR_TO_STACK; +- regs[insn->dst_reg].imm = insn->imm; +- } ++ return adjust_reg_min_max_vals(env, insn); + } + + return 0; + } + +-static int check_cond_jmp_op(struct verifier_env *env, ++static void __find_good_pkt_pointers(struct bpf_func_state *state, ++ struct bpf_reg_state *dst_reg, ++ enum bpf_reg_type type, u16 new_range) ++{ ++ struct bpf_reg_state *reg; ++ int i; ++ ++ for (i = 0; i < MAX_BPF_REG; i++) { ++ reg = &state->regs[i]; ++ if (reg->type == type && reg->id == dst_reg->id) ++ /* keep the maximum range already checked */ ++ reg->range = max(reg->range, new_range); ++ } ++ ++ bpf_for_each_spilled_reg(i, state, reg) { ++ if (!reg) ++ continue; ++ if (reg->type == type && reg->id == dst_reg->id) ++ reg->range = max(reg->range, new_range); ++ } ++} ++ ++static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, ++ struct bpf_reg_state *dst_reg, ++ enum bpf_reg_type type, ++ bool range_right_open) ++{ ++ u16 new_range; ++ int i; ++ ++ if (dst_reg->off < 0 || ++ (dst_reg->off == 0 && range_right_open)) ++ /* This doesn't give us any range */ ++ return; ++ ++ if (dst_reg->umax_value > MAX_PACKET_OFF || ++ dst_reg->umax_value + dst_reg->off > MAX_PACKET_OFF) ++ /* Risk of overflow. For instance, ptr + (1<<63) may be less ++ * than pkt_end, but that's because it's also less than pkt. ++ */ ++ return; ++ ++ new_range = dst_reg->off; ++ if (range_right_open) ++ new_range--; ++ ++ /* Examples for register markings: ++ * ++ * pkt_data in dst register: ++ * ++ * r2 = r3; ++ * r2 += 8; ++ * if (r2 > pkt_end) goto ++ * ++ * ++ * r2 = r3; ++ * r2 += 8; ++ * if (r2 < pkt_end) goto ++ * ++ * ++ * Where: ++ * r2 == dst_reg, pkt_end == src_reg ++ * r2=pkt(id=n,off=8,r=0) ++ * r3=pkt(id=n,off=0,r=0) ++ * ++ * pkt_data in src register: ++ * ++ * r2 = r3; ++ * r2 += 8; ++ * if (pkt_end >= r2) goto ++ * ++ * ++ * r2 = r3; ++ * r2 += 8; ++ * if (pkt_end <= r2) goto ++ * ++ * ++ * Where: ++ * pkt_end == dst_reg, r2 == src_reg ++ * r2=pkt(id=n,off=8,r=0) ++ * r3=pkt(id=n,off=0,r=0) ++ * ++ * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) ++ * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) ++ * and [r3, r3 + 8-1) respectively is safe to access depending on ++ * the check. ++ */ ++ ++ /* If our ids match, then we must have the same max_value. And we ++ * don't care about the other reg's fixed offset, since if it's too big ++ * the range won't allow anything. ++ * dst_reg->off is known < MAX_PACKET_OFF, therefore it fits in a u16. ++ */ ++ for (i = 0; i <= vstate->curframe; i++) ++ __find_good_pkt_pointers(vstate->frame[i], dst_reg, type, ++ new_range); ++} ++ ++/* compute branch direction of the expression "if (reg opcode val) goto target;" ++ * and return: ++ * 1 - branch will be taken and "goto target" will be executed ++ * 0 - branch will not be taken and fall-through to next insn ++ * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10] ++ */ ++static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode, ++ bool is_jmp32) ++{ ++ struct bpf_reg_state reg_lo; ++ s64 sval; ++ ++ if (__is_pointer_value(false, reg)) ++ return -1; ++ ++ if (is_jmp32) { ++ reg_lo = *reg; ++ reg = ®_lo; ++ /* For JMP32, only low 32 bits are compared, coerce_reg_to_size ++ * could truncate high bits and update umin/umax according to ++ * information of low bits. ++ */ ++ coerce_reg_to_size(reg, 4); ++ /* smin/smax need special handling. For example, after coerce, ++ * if smin_value is 0x00000000ffffffffLL, the value is -1 when ++ * used as operand to JMP32. It is a negative number from s32's ++ * point of view, while it is a positive number when seen as ++ * s64. The smin/smax are kept as s64, therefore, when used with ++ * JMP32, they need to be transformed into s32, then sign ++ * extended back to s64. ++ * ++ * Also, smin/smax were copied from umin/umax. If umin/umax has ++ * different sign bit, then min/max relationship doesn't ++ * maintain after casting into s32, for this case, set smin/smax ++ * to safest range. ++ */ ++ if ((reg->umax_value ^ reg->umin_value) & ++ (1ULL << 31)) { ++ reg->smin_value = S32_MIN; ++ reg->smax_value = S32_MAX; ++ } ++ reg->smin_value = (s64)(s32)reg->smin_value; ++ reg->smax_value = (s64)(s32)reg->smax_value; ++ ++ val = (u32)val; ++ sval = (s64)(s32)val; ++ } else { ++ sval = (s64)val; ++ } ++ ++ switch (opcode) { ++ case BPF_JEQ: ++ if (tnum_is_const(reg->var_off)) ++ return !!tnum_equals_const(reg->var_off, val); ++ break; ++ case BPF_JNE: ++ if (tnum_is_const(reg->var_off)) ++ return !tnum_equals_const(reg->var_off, val); ++ break; ++ case BPF_JSET: ++ if ((~reg->var_off.mask & reg->var_off.value) & val) ++ return 1; ++ if (!((reg->var_off.mask | reg->var_off.value) & val)) ++ return 0; ++ break; ++ case BPF_JGT: ++ if (reg->umin_value > val) ++ return 1; ++ else if (reg->umax_value <= val) ++ return 0; ++ break; ++ case BPF_JSGT: ++ if (reg->smin_value > sval) ++ return 1; ++ else if (reg->smax_value < sval) ++ return 0; ++ break; ++ case BPF_JLT: ++ if (reg->umax_value < val) ++ return 1; ++ else if (reg->umin_value >= val) ++ return 0; ++ break; ++ case BPF_JSLT: ++ if (reg->smax_value < sval) ++ return 1; ++ else if (reg->smin_value >= sval) ++ return 0; ++ break; ++ case BPF_JGE: ++ if (reg->umin_value >= val) ++ return 1; ++ else if (reg->umax_value < val) ++ return 0; ++ break; ++ case BPF_JSGE: ++ if (reg->smin_value >= sval) ++ return 1; ++ else if (reg->smax_value < sval) ++ return 0; ++ break; ++ case BPF_JLE: ++ if (reg->umax_value <= val) ++ return 1; ++ else if (reg->umin_value > val) ++ return 0; ++ break; ++ case BPF_JSLE: ++ if (reg->smax_value <= sval) ++ return 1; ++ else if (reg->smin_value > sval) ++ return 0; ++ break; ++ } ++ ++ return -1; ++} ++ ++/* Generate min value of the high 32-bit from TNUM info. */ ++static u64 gen_hi_min(struct tnum var) ++{ ++ return var.value & ~0xffffffffULL; ++} ++ ++/* Generate max value of the high 32-bit from TNUM info. */ ++static u64 gen_hi_max(struct tnum var) ++{ ++ return (var.value | var.mask) & ~0xffffffffULL; ++} ++ ++/* Return true if VAL is compared with a s64 sign extended from s32, and they ++ * are with the same signedness. ++ */ ++static bool cmp_val_with_extended_s64(s64 sval, struct bpf_reg_state *reg) ++{ ++ return ((s32)sval >= 0 && ++ reg->smin_value >= 0 && reg->smax_value <= S32_MAX) || ++ ((s32)sval < 0 && ++ reg->smax_value <= 0 && reg->smin_value >= S32_MIN); ++} ++ ++/* Constrain the possible values of @reg with unsigned upper bound @bound. ++ * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive. ++ * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits ++ * of @reg. ++ */ ++static void set_upper_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32, ++ bool is_exclusive) ++{ ++ if (is_exclusive) { ++ /* There are no values for `reg` that make `reg<0` true. */ ++ if (bound == 0) ++ return; ++ bound--; ++ } ++ if (is_jmp32) { ++ /* Constrain the register's value in the tnum representation. ++ * For 64-bit comparisons this happens later in ++ * __reg_bound_offset(), but for 32-bit comparisons, we can be ++ * more precise than what can be derived from the updated ++ * numeric bounds. ++ */ ++ struct tnum t = tnum_range(0, bound); ++ ++ t.mask |= ~0xffffffffULL; /* upper half is unknown */ ++ reg->var_off = tnum_intersect(reg->var_off, t); ++ ++ /* Compute the 64-bit bound from the 32-bit bound. */ ++ bound += gen_hi_max(reg->var_off); ++ } ++ reg->umax_value = min(reg->umax_value, bound); ++} ++ ++/* Constrain the possible values of @reg with unsigned lower bound @bound. ++ * If @is_exclusive, @bound is an exclusive limit, otherwise it is inclusive. ++ * If @is_jmp32, @bound is a 32-bit value that only constrains the low 32 bits ++ * of @reg. ++ */ ++static void set_lower_bound(struct bpf_reg_state *reg, u64 bound, bool is_jmp32, ++ bool is_exclusive) ++{ ++ if (is_exclusive) { ++ /* There are no values for `reg` that make `reg>MAX` true. */ ++ if (bound == (is_jmp32 ? U32_MAX : U64_MAX)) ++ return; ++ bound++; ++ } ++ if (is_jmp32) { ++ /* Constrain the register's value in the tnum representation. ++ * For 64-bit comparisons this happens later in ++ * __reg_bound_offset(), but for 32-bit comparisons, we can be ++ * more precise than what can be derived from the updated ++ * numeric bounds. ++ */ ++ struct tnum t = tnum_range(bound, U32_MAX); ++ ++ t.mask |= ~0xffffffffULL; /* upper half is unknown */ ++ reg->var_off = tnum_intersect(reg->var_off, t); ++ ++ /* Compute the 64-bit bound from the 32-bit bound. */ ++ bound += gen_hi_min(reg->var_off); ++ } ++ reg->umin_value = max(reg->umin_value, bound); ++} ++ ++/* Adjusts the register min/max values in the case that the dst_reg is the ++ * variable register that we are working on, and src_reg is a constant or we're ++ * simply doing a BPF_K check. ++ * In JEQ/JNE cases we also adjust the var_off values. ++ */ ++static void reg_set_min_max(struct bpf_reg_state *true_reg, ++ struct bpf_reg_state *false_reg, u64 val, ++ u8 opcode, bool is_jmp32) ++{ ++ s64 sval; ++ ++ /* If the dst_reg is a pointer, we can't learn anything about its ++ * variable offset from the compare (unless src_reg were a pointer into ++ * the same object, but we don't bother with that. ++ * Since false_reg and true_reg have the same type by construction, we ++ * only need to check one of them for pointerness. ++ */ ++ if (__is_pointer_value(false, false_reg)) ++ return; ++ ++ val = is_jmp32 ? (u32)val : val; ++ sval = is_jmp32 ? (s64)(s32)val : (s64)val; ++ ++ switch (opcode) { ++ case BPF_JEQ: ++ case BPF_JNE: ++ { ++ struct bpf_reg_state *reg = ++ opcode == BPF_JEQ ? true_reg : false_reg; ++ ++ /* For BPF_JEQ, if this is false we know nothing Jon Snow, but ++ * if it is true we know the value for sure. Likewise for ++ * BPF_JNE. ++ */ ++ if (is_jmp32) { ++ u64 old_v = reg->var_off.value; ++ u64 hi_mask = ~0xffffffffULL; ++ ++ reg->var_off.value = (old_v & hi_mask) | val; ++ reg->var_off.mask &= hi_mask; ++ } else { ++ __mark_reg_known(reg, val); ++ } ++ break; ++ } ++ case BPF_JSET: ++ false_reg->var_off = tnum_and(false_reg->var_off, ++ tnum_const(~val)); ++ if (is_power_of_2(val)) ++ true_reg->var_off = tnum_or(true_reg->var_off, ++ tnum_const(val)); ++ break; ++ case BPF_JGE: ++ case BPF_JGT: ++ { ++ set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JGE); ++ set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JGT); ++ break; ++ } ++ case BPF_JSGE: ++ case BPF_JSGT: ++ { ++ s64 false_smax = opcode == BPF_JSGT ? sval : sval - 1; ++ s64 true_smin = opcode == BPF_JSGT ? sval + 1 : sval; ++ ++ /* If the full s64 was not sign-extended from s32 then don't ++ * deduct further info. ++ */ ++ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) ++ break; ++ false_reg->smax_value = min(false_reg->smax_value, false_smax); ++ true_reg->smin_value = max(true_reg->smin_value, true_smin); ++ break; ++ } ++ case BPF_JLE: ++ case BPF_JLT: ++ { ++ set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JLE); ++ set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JLT); ++ break; ++ } ++ case BPF_JSLE: ++ case BPF_JSLT: ++ { ++ s64 false_smin = opcode == BPF_JSLT ? sval : sval + 1; ++ s64 true_smax = opcode == BPF_JSLT ? sval - 1 : sval; ++ ++ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) ++ break; ++ false_reg->smin_value = max(false_reg->smin_value, false_smin); ++ true_reg->smax_value = min(true_reg->smax_value, true_smax); ++ break; ++ } ++ default: ++ break; ++ } ++ ++ __reg_deduce_bounds(false_reg); ++ __reg_deduce_bounds(true_reg); ++ /* We might have learned some bits from the bounds. */ ++ __reg_bound_offset(false_reg); ++ __reg_bound_offset(true_reg); ++ /* Intersecting with the old var_off might have improved our bounds ++ * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), ++ * then new var_off is (0; 0x7f...fc) which improves our umax. ++ */ ++ __update_reg_bounds(false_reg); ++ __update_reg_bounds(true_reg); ++} ++ ++/* Same as above, but for the case that dst_reg holds a constant and src_reg is ++ * the variable reg. ++ */ ++static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, ++ struct bpf_reg_state *false_reg, u64 val, ++ u8 opcode, bool is_jmp32) ++{ ++ s64 sval; ++ ++ if (__is_pointer_value(false, false_reg)) ++ return; ++ ++ val = is_jmp32 ? (u32)val : val; ++ sval = is_jmp32 ? (s64)(s32)val : (s64)val; ++ ++ switch (opcode) { ++ case BPF_JEQ: ++ case BPF_JNE: ++ { ++ struct bpf_reg_state *reg = ++ opcode == BPF_JEQ ? true_reg : false_reg; ++ ++ if (is_jmp32) { ++ u64 old_v = reg->var_off.value; ++ u64 hi_mask = ~0xffffffffULL; ++ ++ reg->var_off.value = (old_v & hi_mask) | val; ++ reg->var_off.mask &= hi_mask; ++ } else { ++ __mark_reg_known(reg, val); ++ } ++ break; ++ } ++ case BPF_JSET: ++ false_reg->var_off = tnum_and(false_reg->var_off, ++ tnum_const(~val)); ++ if (is_power_of_2(val)) ++ true_reg->var_off = tnum_or(true_reg->var_off, ++ tnum_const(val)); ++ break; ++ case BPF_JGE: ++ case BPF_JGT: ++ { ++ set_lower_bound(false_reg, val, is_jmp32, opcode == BPF_JGE); ++ set_upper_bound(true_reg, val, is_jmp32, opcode == BPF_JGT); ++ break; ++ } ++ case BPF_JSGE: ++ case BPF_JSGT: ++ { ++ s64 false_smin = opcode == BPF_JSGT ? sval : sval + 1; ++ s64 true_smax = opcode == BPF_JSGT ? sval - 1 : sval; ++ ++ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) ++ break; ++ false_reg->smin_value = max(false_reg->smin_value, false_smin); ++ true_reg->smax_value = min(true_reg->smax_value, true_smax); ++ break; ++ } ++ case BPF_JLE: ++ case BPF_JLT: ++ { ++ set_upper_bound(false_reg, val, is_jmp32, opcode == BPF_JLE); ++ set_lower_bound(true_reg, val, is_jmp32, opcode == BPF_JLT); ++ break; ++ } ++ case BPF_JSLE: ++ case BPF_JSLT: ++ { ++ s64 false_smax = opcode == BPF_JSLT ? sval : sval - 1; ++ s64 true_smin = opcode == BPF_JSLT ? sval + 1 : sval; ++ ++ if (is_jmp32 && !cmp_val_with_extended_s64(sval, false_reg)) ++ break; ++ false_reg->smax_value = min(false_reg->smax_value, false_smax); ++ true_reg->smin_value = max(true_reg->smin_value, true_smin); ++ break; ++ } ++ default: ++ break; ++ } ++ ++ __reg_deduce_bounds(false_reg); ++ __reg_deduce_bounds(true_reg); ++ /* We might have learned some bits from the bounds. */ ++ __reg_bound_offset(false_reg); ++ __reg_bound_offset(true_reg); ++ /* Intersecting with the old var_off might have improved our bounds ++ * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), ++ * then new var_off is (0; 0x7f...fc) which improves our umax. ++ */ ++ __update_reg_bounds(false_reg); ++ __update_reg_bounds(true_reg); ++} ++ ++/* Regs are known to be equal, so intersect their min/max/var_off */ ++static void __reg_combine_min_max(struct bpf_reg_state *src_reg, ++ struct bpf_reg_state *dst_reg) ++{ ++ src_reg->umin_value = dst_reg->umin_value = max(src_reg->umin_value, ++ dst_reg->umin_value); ++ src_reg->umax_value = dst_reg->umax_value = min(src_reg->umax_value, ++ dst_reg->umax_value); ++ src_reg->smin_value = dst_reg->smin_value = max(src_reg->smin_value, ++ dst_reg->smin_value); ++ src_reg->smax_value = dst_reg->smax_value = min(src_reg->smax_value, ++ dst_reg->smax_value); ++ src_reg->var_off = dst_reg->var_off = tnum_intersect(src_reg->var_off, ++ dst_reg->var_off); ++ /* We might have learned new bounds from the var_off. */ ++ __update_reg_bounds(src_reg); ++ __update_reg_bounds(dst_reg); ++ /* We might have learned something about the sign bit. */ ++ __reg_deduce_bounds(src_reg); ++ __reg_deduce_bounds(dst_reg); ++ /* We might have learned some bits from the bounds. */ ++ __reg_bound_offset(src_reg); ++ __reg_bound_offset(dst_reg); ++ /* Intersecting with the old var_off might have improved our bounds ++ * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc), ++ * then new var_off is (0; 0x7f...fc) which improves our umax. ++ */ ++ __update_reg_bounds(src_reg); ++ __update_reg_bounds(dst_reg); ++} ++ ++static void reg_combine_min_max(struct bpf_reg_state *true_src, ++ struct bpf_reg_state *true_dst, ++ struct bpf_reg_state *false_src, ++ struct bpf_reg_state *false_dst, ++ u8 opcode) ++{ ++ switch (opcode) { ++ case BPF_JEQ: ++ __reg_combine_min_max(true_src, true_dst); ++ break; ++ case BPF_JNE: ++ __reg_combine_min_max(false_src, false_dst); ++ break; ++ } ++} ++ ++static void mark_ptr_or_null_reg(struct bpf_func_state *state, ++ struct bpf_reg_state *reg, u32 id, ++ bool is_null) ++{ ++ if (reg_type_may_be_null(reg->type) && reg->id == id) { ++ /* Old offset (both fixed and variable parts) should ++ * have been known-zero, because we don't allow pointer ++ * arithmetic on pointers that might be NULL. ++ */ ++ if (WARN_ON_ONCE(reg->smin_value || reg->smax_value || ++ !tnum_equals_const(reg->var_off, 0) || ++ reg->off)) { ++ __mark_reg_known_zero(reg); ++ reg->off = 0; ++ } ++ if (is_null) { ++ reg->type = SCALAR_VALUE; ++ } else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) { ++ if (reg->map_ptr->inner_map_meta) { ++ reg->type = CONST_PTR_TO_MAP; ++ reg->map_ptr = reg->map_ptr->inner_map_meta; ++ } else if (reg->map_ptr->map_type == ++ BPF_MAP_TYPE_XSKMAP) { ++ reg->type = PTR_TO_XDP_SOCK; ++ } else { ++ reg->type = PTR_TO_MAP_VALUE; ++ } ++ } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { ++ reg->type = PTR_TO_SOCKET; ++ } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) { ++ reg->type = PTR_TO_SOCK_COMMON; ++ } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { ++ reg->type = PTR_TO_TCP_SOCK; ++ } ++ if (is_null) { ++ /* We don't need id and ref_obj_id from this point ++ * onwards anymore, thus we should better reset it, ++ * so that state pruning has chances to take effect. ++ */ ++ reg->id = 0; ++ reg->ref_obj_id = 0; ++ } else if (!reg_may_point_to_spin_lock(reg)) { ++ /* For not-NULL ptr, reg->ref_obj_id will be reset ++ * in release_reg_references(). ++ * ++ * reg->id is still used by spin_lock ptr. Other ++ * than spin_lock ptr type, reg->id can be reset. ++ */ ++ reg->id = 0; ++ } ++ } ++} ++ ++static void __mark_ptr_or_null_regs(struct bpf_func_state *state, u32 id, ++ bool is_null) ++{ ++ struct bpf_reg_state *reg; ++ int i; ++ ++ for (i = 0; i < MAX_BPF_REG; i++) ++ mark_ptr_or_null_reg(state, &state->regs[i], id, is_null); ++ ++ bpf_for_each_spilled_reg(i, state, reg) { ++ if (!reg) ++ continue; ++ mark_ptr_or_null_reg(state, reg, id, is_null); ++ } ++} ++ ++/* The logic is similar to find_good_pkt_pointers(), both could eventually ++ * be folded together at some point. ++ */ ++static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, ++ bool is_null) ++{ ++ struct bpf_func_state *state = vstate->frame[vstate->curframe]; ++ struct bpf_reg_state *regs = state->regs; ++ u32 ref_obj_id = regs[regno].ref_obj_id; ++ u32 id = regs[regno].id; ++ int i; ++ ++ if (ref_obj_id && ref_obj_id == id && is_null) ++ /* regs[regno] is in the " == NULL" branch. ++ * No one could have freed the reference state before ++ * doing the NULL check. ++ */ ++ WARN_ON_ONCE(release_reference_state(state, id)); ++ ++ for (i = 0; i <= vstate->curframe; i++) ++ __mark_ptr_or_null_regs(vstate->frame[i], id, is_null); ++} ++ ++static bool try_match_pkt_pointers(const struct bpf_insn *insn, ++ struct bpf_reg_state *dst_reg, ++ struct bpf_reg_state *src_reg, ++ struct bpf_verifier_state *this_branch, ++ struct bpf_verifier_state *other_branch) ++{ ++ if (BPF_SRC(insn->code) != BPF_X) ++ return false; ++ ++ /* Pointers are always 64-bit. */ ++ if (BPF_CLASS(insn->code) == BPF_JMP32) ++ return false; ++ ++ switch (BPF_OP(insn->code)) { ++ case BPF_JGT: ++ if ((dst_reg->type == PTR_TO_PACKET && ++ src_reg->type == PTR_TO_PACKET_END) || ++ (dst_reg->type == PTR_TO_PACKET_META && ++ reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { ++ /* pkt_data' > pkt_end, pkt_meta' > pkt_data */ ++ find_good_pkt_pointers(this_branch, dst_reg, ++ dst_reg->type, false); ++ } else if ((dst_reg->type == PTR_TO_PACKET_END && ++ src_reg->type == PTR_TO_PACKET) || ++ (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && ++ src_reg->type == PTR_TO_PACKET_META)) { ++ /* pkt_end > pkt_data', pkt_data > pkt_meta' */ ++ find_good_pkt_pointers(other_branch, src_reg, ++ src_reg->type, true); ++ } else { ++ return false; ++ } ++ break; ++ case BPF_JLT: ++ if ((dst_reg->type == PTR_TO_PACKET && ++ src_reg->type == PTR_TO_PACKET_END) || ++ (dst_reg->type == PTR_TO_PACKET_META && ++ reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { ++ /* pkt_data' < pkt_end, pkt_meta' < pkt_data */ ++ find_good_pkt_pointers(other_branch, dst_reg, ++ dst_reg->type, true); ++ } else if ((dst_reg->type == PTR_TO_PACKET_END && ++ src_reg->type == PTR_TO_PACKET) || ++ (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && ++ src_reg->type == PTR_TO_PACKET_META)) { ++ /* pkt_end < pkt_data', pkt_data > pkt_meta' */ ++ find_good_pkt_pointers(this_branch, src_reg, ++ src_reg->type, false); ++ } else { ++ return false; ++ } ++ break; ++ case BPF_JGE: ++ if ((dst_reg->type == PTR_TO_PACKET && ++ src_reg->type == PTR_TO_PACKET_END) || ++ (dst_reg->type == PTR_TO_PACKET_META && ++ reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { ++ /* pkt_data' >= pkt_end, pkt_meta' >= pkt_data */ ++ find_good_pkt_pointers(this_branch, dst_reg, ++ dst_reg->type, true); ++ } else if ((dst_reg->type == PTR_TO_PACKET_END && ++ src_reg->type == PTR_TO_PACKET) || ++ (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && ++ src_reg->type == PTR_TO_PACKET_META)) { ++ /* pkt_end >= pkt_data', pkt_data >= pkt_meta' */ ++ find_good_pkt_pointers(other_branch, src_reg, ++ src_reg->type, false); ++ } else { ++ return false; ++ } ++ break; ++ case BPF_JLE: ++ if ((dst_reg->type == PTR_TO_PACKET && ++ src_reg->type == PTR_TO_PACKET_END) || ++ (dst_reg->type == PTR_TO_PACKET_META && ++ reg_is_init_pkt_pointer(src_reg, PTR_TO_PACKET))) { ++ /* pkt_data' <= pkt_end, pkt_meta' <= pkt_data */ ++ find_good_pkt_pointers(other_branch, dst_reg, ++ dst_reg->type, false); ++ } else if ((dst_reg->type == PTR_TO_PACKET_END && ++ src_reg->type == PTR_TO_PACKET) || ++ (reg_is_init_pkt_pointer(dst_reg, PTR_TO_PACKET) && ++ src_reg->type == PTR_TO_PACKET_META)) { ++ /* pkt_end <= pkt_data', pkt_data <= pkt_meta' */ ++ find_good_pkt_pointers(this_branch, src_reg, ++ src_reg->type, true); ++ } else { ++ return false; ++ } ++ break; ++ default: ++ return false; ++ } ++ ++ return true; ++} ++ ++static int check_cond_jmp_op(struct bpf_verifier_env *env, + struct bpf_insn *insn, int *insn_idx) + { +- struct reg_state *regs = env->cur_state.regs; +- struct verifier_state *other_branch; ++ struct bpf_verifier_state *this_branch = env->cur_state; ++ struct bpf_verifier_state *other_branch; ++ struct bpf_reg_state *regs = this_branch->frame[this_branch->curframe]->regs; ++ struct bpf_reg_state *dst_reg, *other_branch_regs, *src_reg = NULL; + u8 opcode = BPF_OP(insn->code); ++ bool is_jmp32; ++ int pred = -1; + int err; + +- if (opcode > BPF_EXIT) { +- verbose("invalid BPF_JMP opcode %x\n", opcode); ++ /* Only conditional jumps are expected to reach here. */ ++ if (opcode == BPF_JA || opcode > BPF_JSLE) { ++ verbose(env, "invalid BPF_JMP/JMP32 opcode %x\n", opcode); + return -EINVAL; + } + + if (BPF_SRC(insn->code) == BPF_X) { + if (insn->imm != 0) { +- verbose("BPF_JMP uses reserved fields\n"); ++ verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); + return -EINVAL; + } + + /* check src1 operand */ +- err = check_reg_arg(regs, insn->src_reg, SRC_OP); ++ err = check_reg_arg(env, insn->src_reg, SRC_OP); + if (err) + return err; + + if (is_pointer_value(env, insn->src_reg)) { +- verbose("R%d pointer comparison prohibited\n", ++ verbose(env, "R%d pointer comparison prohibited\n", + insn->src_reg); + return -EACCES; + } ++ src_reg = ®s[insn->src_reg]; + } else { + if (insn->src_reg != BPF_REG_0) { +- verbose("BPF_JMP uses reserved fields\n"); ++ verbose(env, "BPF_JMP/JMP32 uses reserved fields\n"); + return -EINVAL; + } + } + + /* check src2 operand */ +- err = check_reg_arg(regs, insn->dst_reg, SRC_OP); ++ err = check_reg_arg(env, insn->dst_reg, SRC_OP); + if (err) + return err; + +- /* detect if R == 0 where R was initialized to zero earlier */ +- if (BPF_SRC(insn->code) == BPF_K && +- (opcode == BPF_JEQ || opcode == BPF_JNE) && +- regs[insn->dst_reg].type == CONST_IMM && +- regs[insn->dst_reg].imm == insn->imm) { +- if (opcode == BPF_JEQ) { +- /* if (imm == imm) goto pc+off; +- * only follow the goto, ignore fall-through +- */ +- *insn_idx += insn->off; +- return 0; +- } else { +- /* if (imm != imm) goto pc+off; +- * only follow fall-through branch, since +- * that's where the program will go +- */ +- return 0; +- } ++ dst_reg = ®s[insn->dst_reg]; ++ is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; ++ ++ if (BPF_SRC(insn->code) == BPF_K) ++ pred = is_branch_taken(dst_reg, insn->imm, ++ opcode, is_jmp32); ++ else if (src_reg->type == SCALAR_VALUE && ++ tnum_is_const(src_reg->var_off)) ++ pred = is_branch_taken(dst_reg, src_reg->var_off.value, ++ opcode, is_jmp32); ++ if (pred >= 0) { ++ err = mark_chain_precision(env, insn->dst_reg); ++ if (BPF_SRC(insn->code) == BPF_X && !err) ++ err = mark_chain_precision(env, insn->src_reg); ++ if (err) ++ return err; ++ } ++ ++ if (pred == 1) { ++ /* Only follow the goto, ignore fall-through. If needed, push ++ * the fall-through branch for simulation under speculative ++ * execution. ++ */ ++ if (!env->allow_ptr_leaks && ++ !sanitize_speculative_path(env, insn, *insn_idx + 1, ++ *insn_idx)) ++ return -EFAULT; ++ *insn_idx += insn->off; ++ return 0; ++ } else if (pred == 0) { ++ /* Only follow the fall-through branch, since that's where the ++ * program will go. If needed, push the goto branch for ++ * simulation under speculative execution. ++ */ ++ if (!env->allow_ptr_leaks && ++ !sanitize_speculative_path(env, insn, ++ *insn_idx + insn->off + 1, ++ *insn_idx)) ++ return -EFAULT; ++ return 0; + } + +- other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx); ++ other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx, ++ false); + if (!other_branch) + return -EFAULT; ++ other_branch_regs = other_branch->frame[other_branch->curframe]->regs; + +- /* detect if R == 0 where R is returned value from bpf_map_lookup_elem() */ +- if (BPF_SRC(insn->code) == BPF_K && +- insn->imm == 0 && (opcode == BPF_JEQ || +- opcode == BPF_JNE) && +- regs[insn->dst_reg].type == PTR_TO_MAP_VALUE_OR_NULL) { +- if (opcode == BPF_JEQ) { +- /* next fallthrough insn can access memory via +- * this register +- */ +- regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; +- /* branch targer cannot access it, since reg == 0 */ +- other_branch->regs[insn->dst_reg].type = CONST_IMM; +- other_branch->regs[insn->dst_reg].imm = 0; +- } else { +- other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; +- regs[insn->dst_reg].type = CONST_IMM; +- regs[insn->dst_reg].imm = 0; +- } +- } else if (is_pointer_value(env, insn->dst_reg)) { +- verbose("R%d pointer comparison prohibited\n", insn->dst_reg); ++ /* detect if we are comparing against a constant value so we can adjust ++ * our min/max values for our dst register. ++ * this is only legit if both are scalars (or pointers to the same ++ * object, I suppose, but we don't support that right now), because ++ * otherwise the different base pointers mean the offsets aren't ++ * comparable. ++ */ ++ if (BPF_SRC(insn->code) == BPF_X) { ++ struct bpf_reg_state *src_reg = ®s[insn->src_reg]; ++ struct bpf_reg_state lo_reg0 = *dst_reg; ++ struct bpf_reg_state lo_reg1 = *src_reg; ++ struct bpf_reg_state *src_lo, *dst_lo; ++ ++ dst_lo = &lo_reg0; ++ src_lo = &lo_reg1; ++ coerce_reg_to_size(dst_lo, 4); ++ coerce_reg_to_size(src_lo, 4); ++ ++ if (dst_reg->type == SCALAR_VALUE && ++ src_reg->type == SCALAR_VALUE) { ++ if (tnum_is_const(src_reg->var_off) || ++ (is_jmp32 && tnum_is_const(src_lo->var_off))) ++ reg_set_min_max(&other_branch_regs[insn->dst_reg], ++ dst_reg, ++ is_jmp32 ++ ? src_lo->var_off.value ++ : src_reg->var_off.value, ++ opcode, is_jmp32); ++ else if (tnum_is_const(dst_reg->var_off) || ++ (is_jmp32 && tnum_is_const(dst_lo->var_off))) ++ reg_set_min_max_inv(&other_branch_regs[insn->src_reg], ++ src_reg, ++ is_jmp32 ++ ? dst_lo->var_off.value ++ : dst_reg->var_off.value, ++ opcode, is_jmp32); ++ else if (!is_jmp32 && ++ (opcode == BPF_JEQ || opcode == BPF_JNE)) ++ /* Comparing for equality, we can combine knowledge */ ++ reg_combine_min_max(&other_branch_regs[insn->src_reg], ++ &other_branch_regs[insn->dst_reg], ++ src_reg, dst_reg, opcode); ++ } ++ } else if (dst_reg->type == SCALAR_VALUE) { ++ reg_set_min_max(&other_branch_regs[insn->dst_reg], ++ dst_reg, insn->imm, opcode, is_jmp32); ++ } ++ ++ /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). ++ * NOTE: these optimizations below are related with pointer comparison ++ * which will never be JMP32. ++ */ ++ if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && ++ insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && ++ reg_type_may_be_null(dst_reg->type)) { ++ /* Mark all identical registers in each branch as either ++ * safe or unknown depending R == 0 or R != 0 conditional. ++ */ ++ mark_ptr_or_null_regs(this_branch, insn->dst_reg, ++ opcode == BPF_JNE); ++ mark_ptr_or_null_regs(other_branch, insn->dst_reg, ++ opcode == BPF_JEQ); ++ } else if (!try_match_pkt_pointers(insn, dst_reg, ®s[insn->src_reg], ++ this_branch, other_branch) && ++ is_pointer_value(env, insn->dst_reg)) { ++ verbose(env, "R%d pointer comparison prohibited\n", ++ insn->dst_reg); + return -EACCES; +- } else if (BPF_SRC(insn->code) == BPF_K && +- (opcode == BPF_JEQ || opcode == BPF_JNE)) { +- +- if (opcode == BPF_JEQ) { +- /* detect if (R == imm) goto +- * and in the target state recognize that R = imm +- */ +- other_branch->regs[insn->dst_reg].type = CONST_IMM; +- other_branch->regs[insn->dst_reg].imm = insn->imm; +- } else { +- /* detect if (R != imm) goto +- * and in the fall-through state recognize that R = imm +- */ +- regs[insn->dst_reg].type = CONST_IMM; +- regs[insn->dst_reg].imm = insn->imm; +- } + } +- if (log_level) +- print_verifier_state(env); ++ if (env->log.level & BPF_LOG_LEVEL) ++ print_verifier_state(env, this_branch->frame[this_branch->curframe]); + return 0; + } + +-/* return the map pointer stored inside BPF_LD_IMM64 instruction */ +-static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn) +-{ +- u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32; +- +- return (struct bpf_map *) (unsigned long) imm64; +-} +- + /* verify BPF_LD_IMM64 instruction */ +-static int check_ld_imm(struct verifier_env *env, struct bpf_insn *insn) ++static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn) + { +- struct reg_state *regs = env->cur_state.regs; ++ struct bpf_insn_aux_data *aux = cur_aux(env); ++ struct bpf_reg_state *regs = cur_regs(env); ++ struct bpf_map *map; + int err; + + if (BPF_SIZE(insn->code) != BPF_DW) { +- verbose("invalid BPF_LD_IMM insn\n"); ++ verbose(env, "invalid BPF_LD_IMM insn\n"); + return -EINVAL; + } + if (insn->off != 0) { +- verbose("BPF_LD_IMM64 uses reserved fields\n"); ++ verbose(env, "BPF_LD_IMM64 uses reserved fields\n"); + return -EINVAL; + } + +- err = check_reg_arg(regs, insn->dst_reg, DST_OP); ++ err = check_reg_arg(env, insn->dst_reg, DST_OP); + if (err) + return err; + +- if (insn->src_reg == 0) +- /* generic move 64-bit immediate into a register */ ++ if (insn->src_reg == 0) { ++ u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; ++ ++ regs[insn->dst_reg].type = SCALAR_VALUE; ++ __mark_reg_known(®s[insn->dst_reg], imm); + return 0; ++ } + +- /* replace_map_fd_with_map_ptr() should have caught bad ld_imm64 */ +- BUG_ON(insn->src_reg != BPF_PSEUDO_MAP_FD); ++ map = env->used_maps[aux->map_index]; ++ mark_reg_known_zero(env, regs, insn->dst_reg); ++ regs[insn->dst_reg].map_ptr = map; ++ ++ if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) { ++ regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; ++ regs[insn->dst_reg].off = aux->map_off; ++ if (map_value_has_spin_lock(map)) ++ regs[insn->dst_reg].id = ++env->id_gen; ++ } else if (insn->src_reg == BPF_PSEUDO_MAP_FD) { ++ regs[insn->dst_reg].type = CONST_PTR_TO_MAP; ++ } else { ++ verbose(env, "bpf verifier is misconfigured\n"); ++ return -EINVAL; ++ } + +- regs[insn->dst_reg].type = CONST_PTR_TO_MAP; +- regs[insn->dst_reg].map_ptr = ld_imm64_to_map_ptr(insn); + return 0; + } + +@@ -1357,53 +6272,148 @@ static bool may_access_skb(enum bpf_prog + * Output: + * R0 - 8/16/32-bit skb data converted to cpu endianness + */ +-static int check_ld_abs(struct verifier_env *env, struct bpf_insn *insn) ++static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) + { +- struct reg_state *regs = env->cur_state.regs; ++ struct bpf_reg_state *regs = cur_regs(env); ++ static const int ctx_reg = BPF_REG_6; + u8 mode = BPF_MODE(insn->code); +- struct reg_state *reg; + int i, err; + + if (!may_access_skb(env->prog->type)) { +- verbose("BPF_LD_ABS|IND instructions not allowed for this program type\n"); ++ verbose(env, "BPF_LD_[ABS|IND] instructions not allowed for this program type\n"); ++ return -EINVAL; ++ } ++ ++ if (!env->ops->gen_ld_abs) { ++ verbose(env, "bpf verifier is misconfigured\n"); ++ return -EINVAL; ++ } ++ ++ if (env->subprog_cnt > 1) { ++ /* when program has LD_ABS insn JITs and interpreter assume ++ * that r1 == ctx == skb which is not the case for callees ++ * that can have arbitrary arguments. It's problematic ++ * for main prog as well since JITs would need to analyze ++ * all functions in order to make proper register save/restore ++ * decisions in the main prog. Hence disallow LD_ABS with calls ++ */ ++ verbose(env, "BPF_LD_[ABS|IND] instructions cannot be mixed with bpf-to-bpf calls\n"); + return -EINVAL; + } + + if (insn->dst_reg != BPF_REG_0 || insn->off != 0 || + BPF_SIZE(insn->code) == BPF_DW || + (mode == BPF_ABS && insn->src_reg != BPF_REG_0)) { +- verbose("BPF_LD_ABS uses reserved fields\n"); ++ verbose(env, "BPF_LD_[ABS|IND] uses reserved fields\n"); + return -EINVAL; + } + + /* check whether implicit source operand (register R6) is readable */ +- err = check_reg_arg(regs, BPF_REG_6, SRC_OP); ++ err = check_reg_arg(env, ctx_reg, SRC_OP); + if (err) + return err; + +- if (regs[BPF_REG_6].type != PTR_TO_CTX) { +- verbose("at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); ++ /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as ++ * gen_ld_abs() may terminate the program at runtime, leading to ++ * reference leak. ++ */ ++ err = check_reference_leak(env); ++ if (err) { ++ verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n"); ++ return err; ++ } ++ ++ if (env->cur_state->active_spin_lock) { ++ verbose(env, "BPF_LD_[ABS|IND] cannot be used inside bpf_spin_lock-ed region\n"); ++ return -EINVAL; ++ } ++ ++ if (regs[ctx_reg].type != PTR_TO_CTX) { ++ verbose(env, ++ "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); + return -EINVAL; + } + + if (mode == BPF_IND) { + /* check explicit source operand */ +- err = check_reg_arg(regs, insn->src_reg, SRC_OP); ++ err = check_reg_arg(env, insn->src_reg, SRC_OP); + if (err) + return err; + } + ++ err = check_ctx_reg(env, ®s[ctx_reg], ctx_reg); ++ if (err < 0) ++ return err; ++ + /* reset caller saved regs to unreadable */ + for (i = 0; i < CALLER_SAVED_REGS; i++) { +- reg = regs + caller_saved[i]; +- reg->type = NOT_INIT; +- reg->imm = 0; ++ mark_reg_not_init(env, regs, caller_saved[i]); ++ check_reg_arg(env, caller_saved[i], DST_OP_NO_MARK); + } + + /* mark destination R0 register as readable, since it contains +- * the value fetched from the packet ++ * the value fetched from the packet. ++ * Already marked as written above. + */ +- regs[BPF_REG_0].type = UNKNOWN_VALUE; ++ mark_reg_unknown(env, regs, BPF_REG_0); ++ /* ld_abs load up to 32-bit skb data. */ ++ regs[BPF_REG_0].subreg_def = env->insn_idx + 1; ++ return 0; ++} ++ ++static int check_return_code(struct bpf_verifier_env *env) ++{ ++ struct tnum enforce_attach_type_range = tnum_unknown; ++ struct bpf_reg_state *reg; ++ struct tnum range = tnum_range(0, 1); ++ ++ switch (env->prog->type) { ++ case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: ++ if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG || ++ env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG) ++ range = tnum_range(1, 1); ++ break; ++ case BPF_PROG_TYPE_CGROUP_SKB: ++ if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) { ++ range = tnum_range(0, 3); ++ enforce_attach_type_range = tnum_range(2, 3); ++ } ++ break; ++ case BPF_PROG_TYPE_CGROUP_SOCK: ++ case BPF_PROG_TYPE_SOCK_OPS: ++ case BPF_PROG_TYPE_CGROUP_DEVICE: ++ case BPF_PROG_TYPE_CGROUP_SYSCTL: ++ case BPF_PROG_TYPE_CGROUP_SOCKOPT: ++ break; ++ default: ++ return 0; ++ } ++ ++ reg = cur_regs(env) + BPF_REG_0; ++ if (reg->type != SCALAR_VALUE) { ++ verbose(env, "At program exit the register R0 is not a known value (%s)\n", ++ reg_type_str[reg->type]); ++ return -EINVAL; ++ } ++ ++ if (!tnum_in(range, reg->var_off)) { ++ char tn_buf[48]; ++ ++ verbose(env, "At program exit the register R0 "); ++ if (!tnum_is_unknown(reg->var_off)) { ++ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); ++ verbose(env, "has value %s", tn_buf); ++ } else { ++ verbose(env, "has unknown scalar value"); ++ } ++ tnum_strn(tn_buf, sizeof(tn_buf), range); ++ verbose(env, " should have been in %s\n", tn_buf); ++ return -EINVAL; ++ } ++ ++ if (!tnum_is_unknown(enforce_attach_type_range) && ++ tnum_in(enforce_attach_type_range, reg->var_off)) ++ env->prog->enforce_expected_attach_type = 1; + return 0; + } + +@@ -1447,19 +6457,37 @@ enum { + BRANCH = 2, + }; + +-#define STATE_LIST_MARK ((struct verifier_state_list *) -1L) ++static u32 state_htab_size(struct bpf_verifier_env *env) ++{ ++ return env->prog->len; ++} ++ ++static struct bpf_verifier_state_list **explored_state( ++ struct bpf_verifier_env *env, ++ int idx) ++{ ++ struct bpf_verifier_state *cur = env->cur_state; ++ struct bpf_func_state *state = cur->frame[cur->curframe]; ++ ++ return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; ++} + +-static int *insn_stack; /* stack of insns to process */ +-static int cur_stack; /* current stack index */ +-static int *insn_state; ++static void init_explored_state(struct bpf_verifier_env *env, int idx) ++{ ++ env->insn_aux_data[idx].prune_point = true; ++} + + /* t, w, e - match pseudo-code above: + * t - index of current instruction + * w - next instruction + * e - edge + */ +-static int push_insn(int t, int w, int e, struct verifier_env *env) ++static int push_insn(int t, int w, int e, struct bpf_verifier_env *env, ++ bool loop_ok) + { ++ int *insn_stack = env->cfg.insn_stack; ++ int *insn_state = env->cfg.insn_state; ++ + if (e == FALLTHROUGH && insn_state[t] >= (DISCOVERED | FALLTHROUGH)) + return 0; + +@@ -1467,30 +6495,35 @@ static int push_insn(int t, int w, int e + return 0; + + if (w < 0 || w >= env->prog->len) { +- verbose("jump out of range from insn %d to %d\n", t, w); ++ verbose_linfo(env, t, "%d: ", t); ++ verbose(env, "jump out of range from insn %d to %d\n", t, w); + return -EINVAL; + } + + if (e == BRANCH) + /* mark branch target for state pruning */ +- env->explored_states[w] = STATE_LIST_MARK; ++ init_explored_state(env, w); + + if (insn_state[w] == 0) { + /* tree-edge */ + insn_state[t] = DISCOVERED | e; + insn_state[w] = DISCOVERED; +- if (cur_stack >= env->prog->len) ++ if (env->cfg.cur_stack >= env->prog->len) + return -E2BIG; +- insn_stack[cur_stack++] = w; ++ insn_stack[env->cfg.cur_stack++] = w; + return 1; + } else if ((insn_state[w] & 0xF0) == DISCOVERED) { +- verbose("back-edge from insn %d to %d\n", t, w); ++ if (loop_ok && env->allow_ptr_leaks) ++ return 0; ++ verbose_linfo(env, t, "%d: ", t); ++ verbose_linfo(env, w, "%d: ", w); ++ verbose(env, "back-edge from insn %d to %d\n", t, w); + return -EINVAL; + } else if (insn_state[w] == EXPLORED) { + /* forward- or cross-edge */ + insn_state[t] = DISCOVERED | e; + } else { +- verbose("insn state internal bug\n"); ++ verbose(env, "insn state internal bug\n"); + return -EFAULT; + } + return 0; +@@ -1499,43 +6532,56 @@ static int push_insn(int t, int w, int e + /* non-recursive depth-first-search to detect loops in BPF program + * loop == back-edge in directed graph + */ +-static int check_cfg(struct verifier_env *env) ++static int check_cfg(struct bpf_verifier_env *env) + { + struct bpf_insn *insns = env->prog->insnsi; + int insn_cnt = env->prog->len; ++ int *insn_stack, *insn_state; + int ret = 0; + int i, t; + +- insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); ++ insn_state = env->cfg.insn_state = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); + if (!insn_state) + return -ENOMEM; + +- insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); ++ insn_stack = env->cfg.insn_stack = kcalloc(insn_cnt, sizeof(int), GFP_KERNEL); + if (!insn_stack) { +- kfree(insn_state); ++ kvfree(insn_state); + return -ENOMEM; + } + + insn_state[0] = DISCOVERED; /* mark 1st insn as discovered */ + insn_stack[0] = 0; /* 0 is the first instruction */ +- cur_stack = 1; ++ env->cfg.cur_stack = 1; + + peek_stack: +- if (cur_stack == 0) ++ if (env->cfg.cur_stack == 0) + goto check_state; +- t = insn_stack[cur_stack - 1]; ++ t = insn_stack[env->cfg.cur_stack - 1]; + +- if (BPF_CLASS(insns[t].code) == BPF_JMP) { ++ if (BPF_CLASS(insns[t].code) == BPF_JMP || ++ BPF_CLASS(insns[t].code) == BPF_JMP32) { + u8 opcode = BPF_OP(insns[t].code); + + if (opcode == BPF_EXIT) { + goto mark_explored; + } else if (opcode == BPF_CALL) { +- ret = push_insn(t, t + 1, FALLTHROUGH, env); ++ ret = push_insn(t, t + 1, FALLTHROUGH, env, false); + if (ret == 1) + goto peek_stack; + else if (ret < 0) + goto err_free; ++ if (t + 1 < insn_cnt) ++ init_explored_state(env, t + 1); ++ if (insns[t].src_reg == BPF_PSEUDO_CALL) { ++ init_explored_state(env, t); ++ ret = push_insn(t, t + insns[t].imm + 1, BRANCH, ++ env, false); ++ if (ret == 1) ++ goto peek_stack; ++ else if (ret < 0) ++ goto err_free; ++ } + } else if (opcode == BPF_JA) { + if (BPF_SRC(insns[t].code) != BPF_K) { + ret = -EINVAL; +@@ -1543,25 +6589,31 @@ peek_stack: + } + /* unconditional jump with single edge */ + ret = push_insn(t, t + insns[t].off + 1, +- FALLTHROUGH, env); ++ FALLTHROUGH, env, true); + if (ret == 1) + goto peek_stack; + else if (ret < 0) + goto err_free; ++ /* unconditional jmp is not a good pruning point, ++ * but it's marked, since backtracking needs ++ * to record jmp history in is_state_visited(). ++ */ ++ init_explored_state(env, t + insns[t].off + 1); + /* tell verifier to check for equivalent states + * after every call and jump + */ + if (t + 1 < insn_cnt) +- env->explored_states[t + 1] = STATE_LIST_MARK; ++ init_explored_state(env, t + 1); + } else { + /* conditional jump with two edges */ +- ret = push_insn(t, t + 1, FALLTHROUGH, env); ++ init_explored_state(env, t); ++ ret = push_insn(t, t + 1, FALLTHROUGH, env, true); + if (ret == 1) + goto peek_stack; + else if (ret < 0) + goto err_free; + +- ret = push_insn(t, t + insns[t].off + 1, BRANCH, env); ++ ret = push_insn(t, t + insns[t].off + 1, BRANCH, env, true); + if (ret == 1) + goto peek_stack; + else if (ret < 0) +@@ -1571,7 +6623,7 @@ peek_stack: + /* all other non-branch instructions with single + * fall-through edge + */ +- ret = push_insn(t, t + 1, FALLTHROUGH, env); ++ ret = push_insn(t, t + 1, FALLTHROUGH, env, false); + if (ret == 1) + goto peek_stack; + else if (ret < 0) +@@ -1580,8 +6632,8 @@ peek_stack: + + mark_explored: + insn_state[t] = EXPLORED; +- if (cur_stack-- <= 0) { +- verbose("pop stack internal bug\n"); ++ if (env->cfg.cur_stack-- <= 0) { ++ verbose(env, "pop stack internal bug\n"); + ret = -EFAULT; + goto err_free; + } +@@ -1590,7 +6642,7 @@ mark_explored: + check_state: + for (i = 0; i < insn_cnt; i++) { + if (insn_state[i] != EXPLORED) { +- verbose("unreachable insn %d\n", i); ++ verbose(env, "unreachable insn %d\n", i); + ret = -EINVAL; + goto err_free; + } +@@ -1598,11 +6650,616 @@ check_state: + ret = 0; /* cfg looks good */ + + err_free: +- kfree(insn_state); +- kfree(insn_stack); ++ kvfree(insn_state); ++ kvfree(insn_stack); ++ env->cfg.insn_state = env->cfg.insn_stack = NULL; + return ret; + } + ++/* The minimum supported BTF func info size */ ++#define MIN_BPF_FUNCINFO_SIZE 8 ++#define MAX_FUNCINFO_REC_SIZE 252 ++ ++static int check_btf_func(struct bpf_verifier_env *env, ++ const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ u32 i, nfuncs, urec_size, min_size; ++ u32 krec_size = sizeof(struct bpf_func_info); ++ struct bpf_func_info *krecord; ++ const struct btf_type *type; ++ struct bpf_prog *prog; ++ const struct btf *btf; ++ void __user *urecord; ++ u32 prev_offset = 0; ++ int ret = 0; ++ ++ nfuncs = attr->func_info_cnt; ++ if (!nfuncs) ++ return 0; ++ ++ if (nfuncs != env->subprog_cnt) { ++ verbose(env, "number of funcs in func_info doesn't match number of subprogs\n"); ++ return -EINVAL; ++ } ++ ++ urec_size = attr->func_info_rec_size; ++ if (urec_size < MIN_BPF_FUNCINFO_SIZE || ++ urec_size > MAX_FUNCINFO_REC_SIZE || ++ urec_size % sizeof(u32)) { ++ verbose(env, "invalid func info rec size %u\n", urec_size); ++ return -EINVAL; ++ } ++ ++ prog = env->prog; ++ btf = prog->aux->btf; ++ ++ urecord = u64_to_user_ptr(attr->func_info); ++ min_size = min_t(u32, krec_size, urec_size); ++ ++ krecord = kcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN); ++ if (!krecord) ++ return -ENOMEM; ++ ++ for (i = 0; i < nfuncs; i++) { ++ ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size); ++ if (ret) { ++ if (ret == -E2BIG) { ++ verbose(env, "nonzero tailing record in func info"); ++ /* set the size kernel expects so loader can zero ++ * out the rest of the record. ++ */ ++ if (put_user(min_size, &uattr->func_info_rec_size)) ++ ret = -EFAULT; ++ } ++ goto err_free; ++ } ++ ++ if (copy_from_user(&krecord[i], urecord, min_size)) { ++ ret = -EFAULT; ++ goto err_free; ++ } ++ ++ /* check insn_off */ ++ if (i == 0) { ++ if (krecord[i].insn_off) { ++ verbose(env, ++ "nonzero insn_off %u for the first func info record", ++ krecord[i].insn_off); ++ ret = -EINVAL; ++ goto err_free; ++ } ++ } else if (krecord[i].insn_off <= prev_offset) { ++ verbose(env, ++ "same or smaller insn offset (%u) than previous func info record (%u)", ++ krecord[i].insn_off, prev_offset); ++ ret = -EINVAL; ++ goto err_free; ++ } ++ ++ if (env->subprog_info[i].start != krecord[i].insn_off) { ++ verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n"); ++ ret = -EINVAL; ++ goto err_free; ++ } ++ ++ /* check type_id */ ++ type = btf_type_by_id(btf, krecord[i].type_id); ++ if (!type || BTF_INFO_KIND(type->info) != BTF_KIND_FUNC) { ++ verbose(env, "invalid type id %d in func info", ++ krecord[i].type_id); ++ ret = -EINVAL; ++ goto err_free; ++ } ++ ++ prev_offset = krecord[i].insn_off; ++ urecord += urec_size; ++ } ++ ++ prog->aux->func_info = krecord; ++ prog->aux->func_info_cnt = nfuncs; ++ return 0; ++ ++err_free: ++ kvfree(krecord); ++ return ret; ++} ++ ++static void adjust_btf_func(struct bpf_verifier_env *env) ++{ ++ int i; ++ ++ if (!env->prog->aux->func_info) ++ return; ++ ++ for (i = 0; i < env->subprog_cnt; i++) ++ env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start; ++} ++ ++#define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \ ++ sizeof(((struct bpf_line_info *)(0))->line_col)) ++#define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE ++ ++static int check_btf_line(struct bpf_verifier_env *env, ++ const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0; ++ struct bpf_subprog_info *sub; ++ struct bpf_line_info *linfo; ++ struct bpf_prog *prog; ++ const struct btf *btf; ++ void __user *ulinfo; ++ int err; ++ ++ nr_linfo = attr->line_info_cnt; ++ if (!nr_linfo) ++ return 0; ++ ++ rec_size = attr->line_info_rec_size; ++ if (rec_size < MIN_BPF_LINEINFO_SIZE || ++ rec_size > MAX_LINEINFO_REC_SIZE || ++ rec_size & (sizeof(u32) - 1)) ++ return -EINVAL; ++ ++ /* Need to zero it in case the userspace may ++ * pass in a smaller bpf_line_info object. ++ */ ++ linfo = kcalloc(nr_linfo, sizeof(struct bpf_line_info), ++ GFP_KERNEL | __GFP_NOWARN); ++ if (!linfo) ++ return -ENOMEM; ++ ++ prog = env->prog; ++ btf = prog->aux->btf; ++ ++ s = 0; ++ sub = env->subprog_info; ++ ulinfo = u64_to_user_ptr(attr->line_info); ++ expected_size = sizeof(struct bpf_line_info); ++ ncopy = min_t(u32, expected_size, rec_size); ++ for (i = 0; i < nr_linfo; i++) { ++ err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size); ++ if (err) { ++ if (err == -E2BIG) { ++ verbose(env, "nonzero tailing record in line_info"); ++ if (put_user(expected_size, ++ &uattr->line_info_rec_size)) ++ err = -EFAULT; ++ } ++ goto err_free; ++ } ++ ++ if (copy_from_user(&linfo[i], ulinfo, ncopy)) { ++ err = -EFAULT; ++ goto err_free; ++ } ++ ++ /* ++ * Check insn_off to ensure ++ * 1) strictly increasing AND ++ * 2) bounded by prog->len ++ * ++ * The linfo[0].insn_off == 0 check logically falls into ++ * the later "missing bpf_line_info for func..." case ++ * because the first linfo[0].insn_off must be the ++ * first sub also and the first sub must have ++ * subprog_info[0].start == 0. ++ */ ++ if ((i && linfo[i].insn_off <= prev_offset) || ++ linfo[i].insn_off >= prog->len) { ++ verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n", ++ i, linfo[i].insn_off, prev_offset, ++ prog->len); ++ err = -EINVAL; ++ goto err_free; ++ } ++ ++ if (!prog->insnsi[linfo[i].insn_off].code) { ++ verbose(env, ++ "Invalid insn code at line_info[%u].insn_off\n", ++ i); ++ err = -EINVAL; ++ goto err_free; ++ } ++ ++ if (!btf_name_by_offset(btf, linfo[i].line_off) || ++ !btf_name_by_offset(btf, linfo[i].file_name_off)) { ++ verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i); ++ err = -EINVAL; ++ goto err_free; ++ } ++ ++ if (s != env->subprog_cnt) { ++ if (linfo[i].insn_off == sub[s].start) { ++ sub[s].linfo_idx = i; ++ s++; ++ } else if (sub[s].start < linfo[i].insn_off) { ++ verbose(env, "missing bpf_line_info for func#%u\n", s); ++ err = -EINVAL; ++ goto err_free; ++ } ++ } ++ ++ prev_offset = linfo[i].insn_off; ++ ulinfo += rec_size; ++ } ++ ++ if (s != env->subprog_cnt) { ++ verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n", ++ env->subprog_cnt - s, s); ++ err = -EINVAL; ++ goto err_free; ++ } ++ ++ prog->aux->linfo = linfo; ++ prog->aux->nr_linfo = nr_linfo; ++ ++ return 0; ++ ++err_free: ++ kvfree(linfo); ++ return err; ++} ++ ++static int check_btf_info(struct bpf_verifier_env *env, ++ const union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ struct btf *btf; ++ int err; ++ ++ if (!attr->func_info_cnt && !attr->line_info_cnt) ++ return 0; ++ ++ btf = btf_get_by_fd(attr->prog_btf_fd); ++ if (IS_ERR(btf)) ++ return PTR_ERR(btf); ++ env->prog->aux->btf = btf; ++ ++ err = check_btf_func(env, attr, uattr); ++ if (err) ++ return err; ++ ++ err = check_btf_line(env, attr, uattr); ++ if (err) ++ return err; ++ ++ return 0; ++} ++ ++/* check %cur's range satisfies %old's */ ++static bool range_within(struct bpf_reg_state *old, ++ struct bpf_reg_state *cur) ++{ ++ return old->umin_value <= cur->umin_value && ++ old->umax_value >= cur->umax_value && ++ old->smin_value <= cur->smin_value && ++ old->smax_value >= cur->smax_value; ++} ++ ++/* Maximum number of register states that can exist at once */ ++#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) ++struct idpair { ++ u32 old; ++ u32 cur; ++}; ++ ++/* If in the old state two registers had the same id, then they need to have ++ * the same id in the new state as well. But that id could be different from ++ * the old state, so we need to track the mapping from old to new ids. ++ * Once we have seen that, say, a reg with old id 5 had new id 9, any subsequent ++ * regs with old id 5 must also have new id 9 for the new state to be safe. But ++ * regs with a different old id could still have new id 9, we don't care about ++ * that. ++ * So we look through our idmap to see if this old id has been seen before. If ++ * so, we require the new id to match; otherwise, we add the id pair to the map. ++ */ ++static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < ID_MAP_SIZE; i++) { ++ if (!idmap[i].old) { ++ /* Reached an empty slot; haven't seen this id before */ ++ idmap[i].old = old_id; ++ idmap[i].cur = cur_id; ++ return true; ++ } ++ if (idmap[i].old == old_id) ++ return idmap[i].cur == cur_id; ++ } ++ /* We ran out of idmap slots, which should be impossible */ ++ WARN_ON_ONCE(1); ++ return false; ++} ++ ++static void clean_func_state(struct bpf_verifier_env *env, ++ struct bpf_func_state *st) ++{ ++ enum bpf_reg_liveness live; ++ int i, j; ++ ++ for (i = 0; i < BPF_REG_FP; i++) { ++ live = st->regs[i].live; ++ /* liveness must not touch this register anymore */ ++ st->regs[i].live |= REG_LIVE_DONE; ++ if (!(live & REG_LIVE_READ)) ++ /* since the register is unused, clear its state ++ * to make further comparison simpler ++ */ ++ __mark_reg_not_init(env, &st->regs[i]); ++ } ++ ++ for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { ++ live = st->stack[i].spilled_ptr.live; ++ /* liveness must not touch this stack slot anymore */ ++ st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; ++ if (!(live & REG_LIVE_READ)) { ++ __mark_reg_not_init(env, &st->stack[i].spilled_ptr); ++ for (j = 0; j < BPF_REG_SIZE; j++) ++ st->stack[i].slot_type[j] = STACK_INVALID; ++ } ++ } ++} ++ ++static void clean_verifier_state(struct bpf_verifier_env *env, ++ struct bpf_verifier_state *st) ++{ ++ int i; ++ ++ if (st->frame[0]->regs[0].live & REG_LIVE_DONE) ++ /* all regs in this state in all frames were already marked */ ++ return; ++ ++ for (i = 0; i <= st->curframe; i++) ++ clean_func_state(env, st->frame[i]); ++} ++ ++/* the parentage chains form a tree. ++ * the verifier states are added to state lists at given insn and ++ * pushed into state stack for future exploration. ++ * when the verifier reaches bpf_exit insn some of the verifer states ++ * stored in the state lists have their final liveness state already, ++ * but a lot of states will get revised from liveness point of view when ++ * the verifier explores other branches. ++ * Example: ++ * 1: r0 = 1 ++ * 2: if r1 == 100 goto pc+1 ++ * 3: r0 = 2 ++ * 4: exit ++ * when the verifier reaches exit insn the register r0 in the state list of ++ * insn 2 will be seen as !REG_LIVE_READ. Then the verifier pops the other_branch ++ * of insn 2 and goes exploring further. At the insn 4 it will walk the ++ * parentage chain from insn 4 into insn 2 and will mark r0 as REG_LIVE_READ. ++ * ++ * Since the verifier pushes the branch states as it sees them while exploring ++ * the program the condition of walking the branch instruction for the second ++ * time means that all states below this branch were already explored and ++ * their final liveness markes are already propagated. ++ * Hence when the verifier completes the search of state list in is_state_visited() ++ * we can call this clean_live_states() function to mark all liveness states ++ * as REG_LIVE_DONE to indicate that 'parent' pointers of 'struct bpf_reg_state' ++ * will not be used. ++ * This function also clears the registers and stack for states that !READ ++ * to simplify state merging. ++ * ++ * Important note here that walking the same branch instruction in the callee ++ * doesn't meant that the states are DONE. The verifier has to compare ++ * the callsites ++ */ ++static void clean_live_states(struct bpf_verifier_env *env, int insn, ++ struct bpf_verifier_state *cur) ++{ ++ struct bpf_verifier_state_list *sl; ++ int i; ++ ++ sl = *explored_state(env, insn); ++ while (sl) { ++ if (sl->state.branches) ++ goto next; ++ if (sl->state.insn_idx != insn || ++ sl->state.curframe != cur->curframe) ++ goto next; ++ for (i = 0; i <= cur->curframe; i++) ++ if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) ++ goto next; ++ clean_verifier_state(env, &sl->state); ++next: ++ sl = sl->next; ++ } ++} ++ ++/* Returns true if (rold safe implies rcur safe) */ ++static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, ++ struct idpair *idmap) ++{ ++ bool equal; ++ ++ if (!(rold->live & REG_LIVE_READ)) ++ /* explored state didn't use this */ ++ return true; ++ ++ equal = memcmp(rold, rcur, offsetof(struct bpf_reg_state, parent)) == 0; ++ ++ if (rold->type == PTR_TO_STACK) ++ /* two stack pointers are equal only if they're pointing to ++ * the same stack frame, since fp-8 in foo != fp-8 in bar ++ */ ++ return equal && rold->frameno == rcur->frameno; ++ ++ if (equal) ++ return true; ++ ++ if (rold->type == NOT_INIT) ++ /* explored state can't have used this */ ++ return true; ++ if (rcur->type == NOT_INIT) ++ return false; ++ switch (rold->type) { ++ case SCALAR_VALUE: ++ if (rcur->type == SCALAR_VALUE) { ++ if (!rold->precise && !rcur->precise) ++ return true; ++ /* new val must satisfy old val knowledge */ ++ return range_within(rold, rcur) && ++ tnum_in(rold->var_off, rcur->var_off); ++ } else { ++ /* We're trying to use a pointer in place of a scalar. ++ * Even if the scalar was unbounded, this could lead to ++ * pointer leaks because scalars are allowed to leak ++ * while pointers are not. We could make this safe in ++ * special cases if root is calling us, but it's ++ * probably not worth the hassle. ++ */ ++ return false; ++ } ++ case PTR_TO_MAP_VALUE: ++ /* If the new min/max/var_off satisfy the old ones and ++ * everything else matches, we are OK. ++ * 'id' is not compared, since it's only used for maps with ++ * bpf_spin_lock inside map element and in such cases if ++ * the rest of the prog is valid for one map element then ++ * it's valid for all map elements regardless of the key ++ * used in bpf_map_lookup() ++ */ ++ return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && ++ range_within(rold, rcur) && ++ tnum_in(rold->var_off, rcur->var_off); ++ case PTR_TO_MAP_VALUE_OR_NULL: ++ /* a PTR_TO_MAP_VALUE could be safe to use as a ++ * PTR_TO_MAP_VALUE_OR_NULL into the same map. ++ * However, if the old PTR_TO_MAP_VALUE_OR_NULL then got NULL- ++ * checked, doing so could have affected others with the same ++ * id, and we can't check for that because we lost the id when ++ * we converted to a PTR_TO_MAP_VALUE. ++ */ ++ if (rcur->type != PTR_TO_MAP_VALUE_OR_NULL) ++ return false; ++ if (memcmp(rold, rcur, offsetof(struct bpf_reg_state, id))) ++ return false; ++ /* Check our ids match any regs they're supposed to */ ++ return check_ids(rold->id, rcur->id, idmap); ++ case PTR_TO_PACKET_META: ++ case PTR_TO_PACKET: ++ if (rcur->type != rold->type) ++ return false; ++ /* We must have at least as much range as the old ptr ++ * did, so that any accesses which were safe before are ++ * still safe. This is true even if old range < old off, ++ * since someone could have accessed through (ptr - k), or ++ * even done ptr -= k in a register, to get a safe access. ++ */ ++ if (rold->range > rcur->range) ++ return false; ++ /* If the offsets don't match, we can't trust our alignment; ++ * nor can we be sure that we won't fall out of range. ++ */ ++ if (rold->off != rcur->off) ++ return false; ++ /* id relations must be preserved */ ++ if (rold->id && !check_ids(rold->id, rcur->id, idmap)) ++ return false; ++ /* new val must satisfy old val knowledge */ ++ return range_within(rold, rcur) && ++ tnum_in(rold->var_off, rcur->var_off); ++ case PTR_TO_CTX: ++ case CONST_PTR_TO_MAP: ++ case PTR_TO_PACKET_END: ++ case PTR_TO_FLOW_KEYS: ++ case PTR_TO_SOCKET: ++ case PTR_TO_SOCKET_OR_NULL: ++ case PTR_TO_SOCK_COMMON: ++ case PTR_TO_SOCK_COMMON_OR_NULL: ++ case PTR_TO_TCP_SOCK: ++ case PTR_TO_TCP_SOCK_OR_NULL: ++ case PTR_TO_XDP_SOCK: ++ /* Only valid matches are exact, which memcmp() above ++ * would have accepted ++ */ ++ default: ++ /* Don't know what's going on, just say it's not safe */ ++ return false; ++ } ++ ++ /* Shouldn't get here; if we do, say it's not safe */ ++ WARN_ON_ONCE(1); ++ return false; ++} ++ ++static bool stacksafe(struct bpf_func_state *old, ++ struct bpf_func_state *cur, ++ struct idpair *idmap) ++{ ++ int i, spi; ++ ++ /* walk slots of the explored stack and ignore any additional ++ * slots in the current stack, since explored(safe) state ++ * didn't use them ++ */ ++ for (i = 0; i < old->allocated_stack; i++) { ++ spi = i / BPF_REG_SIZE; ++ ++ if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { ++ i += BPF_REG_SIZE - 1; ++ /* explored state didn't use this */ ++ continue; ++ } ++ ++ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) ++ continue; ++ ++ /* explored stack has more populated slots than current stack ++ * and these slots were used ++ */ ++ if (i >= cur->allocated_stack) ++ return false; ++ ++ /* if old state was safe with misc data in the stack ++ * it will be safe with zero-initialized stack. ++ * The opposite is not true ++ */ ++ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_MISC && ++ cur->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_ZERO) ++ continue; ++ if (old->stack[spi].slot_type[i % BPF_REG_SIZE] != ++ cur->stack[spi].slot_type[i % BPF_REG_SIZE]) ++ /* Ex: old explored (safe) state has STACK_SPILL in ++ * this stack slot, but current has has STACK_MISC -> ++ * this verifier states are not equivalent, ++ * return false to continue verification of this path ++ */ ++ return false; ++ if (i % BPF_REG_SIZE) ++ continue; ++ if (old->stack[spi].slot_type[0] != STACK_SPILL) ++ continue; ++ if (!regsafe(&old->stack[spi].spilled_ptr, ++ &cur->stack[spi].spilled_ptr, ++ idmap)) ++ /* when explored and current stack slot are both storing ++ * spilled registers, check that stored pointers types ++ * are the same as well. ++ * Ex: explored safe path could have stored ++ * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -8} ++ * but current path has stored: ++ * (bpf_reg_state) {.type = PTR_TO_STACK, .off = -16} ++ * such verifier states are not equivalent. ++ * return false to continue verification of this path ++ */ ++ return false; ++ } ++ return true; ++} ++ ++static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur) ++{ ++ if (old->acquired_refs != cur->acquired_refs) ++ return false; ++ return !memcmp(old->refs, cur->refs, ++ sizeof(*old->refs) * old->acquired_refs); ++} ++ + /* compare two verifier states + * + * all states stored in state_list are known to be valid, since +@@ -1629,165 +7286,562 @@ err_free: + * whereas register type in current state is meaningful, it means that + * the current state will reach 'bpf_exit' instruction safely + */ +-static bool states_equal(struct verifier_state *old, struct verifier_state *cur) ++static bool func_states_equal(struct bpf_func_state *old, ++ struct bpf_func_state *cur) + { ++ struct idpair *idmap; ++ bool ret = false; + int i; + ++ idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL); ++ /* If we failed to allocate the idmap, just say it's not safe */ ++ if (!idmap) ++ return false; ++ + for (i = 0; i < MAX_BPF_REG; i++) { +- if (memcmp(&old->regs[i], &cur->regs[i], +- sizeof(old->regs[0])) != 0) { +- if (old->regs[i].type == NOT_INIT || +- (old->regs[i].type == UNKNOWN_VALUE && +- cur->regs[i].type != NOT_INIT)) +- continue; ++ if (!regsafe(&old->regs[i], &cur->regs[i], idmap)) ++ goto out_free; ++ } ++ ++ if (!stacksafe(old, cur, idmap)) ++ goto out_free; ++ ++ if (!refsafe(old, cur)) ++ goto out_free; ++ ret = true; ++out_free: ++ kfree(idmap); ++ return ret; ++} ++ ++static bool states_equal(struct bpf_verifier_env *env, ++ struct bpf_verifier_state *old, ++ struct bpf_verifier_state *cur) ++{ ++ int i; ++ ++ if (old->curframe != cur->curframe) ++ return false; ++ ++ /* Verification state from speculative execution simulation ++ * must never prune a non-speculative execution one. ++ */ ++ if (old->speculative && !cur->speculative) ++ return false; ++ ++ if (old->active_spin_lock != cur->active_spin_lock) ++ return false; ++ ++ /* for states to be equal callsites have to be the same ++ * and all frame states need to be equivalent ++ */ ++ for (i = 0; i <= old->curframe; i++) { ++ if (old->frame[i]->callsite != cur->frame[i]->callsite) ++ return false; ++ if (!func_states_equal(old->frame[i], cur->frame[i])) + return false; ++ } ++ return true; ++} ++ ++/* Return 0 if no propagation happened. Return negative error code if error ++ * happened. Otherwise, return the propagated bit. ++ */ ++static int propagate_liveness_reg(struct bpf_verifier_env *env, ++ struct bpf_reg_state *reg, ++ struct bpf_reg_state *parent_reg) ++{ ++ u8 parent_flag = parent_reg->live & REG_LIVE_READ; ++ u8 flag = reg->live & REG_LIVE_READ; ++ int err; ++ ++ /* When comes here, read flags of PARENT_REG or REG could be any of ++ * REG_LIVE_READ64, REG_LIVE_READ32, REG_LIVE_NONE. There is no need ++ * of propagation if PARENT_REG has strongest REG_LIVE_READ64. ++ */ ++ if (parent_flag == REG_LIVE_READ64 || ++ /* Or if there is no read flag from REG. */ ++ !flag || ++ /* Or if the read flag from REG is the same as PARENT_REG. */ ++ parent_flag == flag) ++ return 0; ++ ++ err = mark_reg_read(env, reg, parent_reg, flag); ++ if (err) ++ return err; ++ ++ return flag; ++} ++ ++/* A write screens off any subsequent reads; but write marks come from the ++ * straight-line code between a state and its parent. When we arrive at an ++ * equivalent state (jump target or such) we didn't arrive by the straight-line ++ * code, so read marks in the state must propagate to the parent regardless ++ * of the state's write marks. That's what 'parent == state->parent' comparison ++ * in mark_reg_read() is for. ++ */ ++static int propagate_liveness(struct bpf_verifier_env *env, ++ const struct bpf_verifier_state *vstate, ++ struct bpf_verifier_state *vparent) ++{ ++ struct bpf_reg_state *state_reg, *parent_reg; ++ struct bpf_func_state *state, *parent; ++ int i, frame, err = 0; ++ ++ if (vparent->curframe != vstate->curframe) { ++ WARN(1, "propagate_live: parent frame %d current frame %d\n", ++ vparent->curframe, vstate->curframe); ++ return -EFAULT; ++ } ++ /* Propagate read liveness of registers... */ ++ BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); ++ for (frame = 0; frame <= vstate->curframe; frame++) { ++ parent = vparent->frame[frame]; ++ state = vstate->frame[frame]; ++ parent_reg = parent->regs; ++ state_reg = state->regs; ++ /* We don't need to worry about FP liveness, it's read-only */ ++ for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) { ++ err = propagate_liveness_reg(env, &state_reg[i], ++ &parent_reg[i]); ++ if (err < 0) ++ return err; ++ if (err == REG_LIVE_READ64) ++ mark_insn_zext(env, &parent_reg[i]); ++ } ++ ++ /* Propagate stack slots. */ ++ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE && ++ i < parent->allocated_stack / BPF_REG_SIZE; i++) { ++ parent_reg = &parent->stack[i].spilled_ptr; ++ state_reg = &state->stack[i].spilled_ptr; ++ err = propagate_liveness_reg(env, state_reg, ++ parent_reg); ++ if (err < 0) ++ return err; + } + } ++ return 0; ++} + +- for (i = 0; i < MAX_BPF_STACK; i++) { +- if (old->stack_slot_type[i] == STACK_INVALID) ++/* find precise scalars in the previous equivalent state and ++ * propagate them into the current state ++ */ ++static int propagate_precision(struct bpf_verifier_env *env, ++ const struct bpf_verifier_state *old) ++{ ++ struct bpf_reg_state *state_reg; ++ struct bpf_func_state *state; ++ int i, err = 0; ++ ++ state = old->frame[old->curframe]; ++ state_reg = state->regs; ++ for (i = 0; i < BPF_REG_FP; i++, state_reg++) { ++ if (state_reg->type != SCALAR_VALUE || ++ !state_reg->precise) + continue; +- if (old->stack_slot_type[i] != cur->stack_slot_type[i]) +- /* Ex: old explored (safe) state has STACK_SPILL in +- * this stack slot, but current has has STACK_MISC -> +- * this verifier states are not equivalent, +- * return false to continue verification of this path +- */ +- return false; +- if (i % BPF_REG_SIZE) ++ if (env->log.level & BPF_LOG_LEVEL2) ++ verbose(env, "propagating r%d\n", i); ++ err = mark_chain_precision(env, i); ++ if (err < 0) ++ return err; ++ } ++ ++ for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) { ++ if (state->stack[i].slot_type[0] != STACK_SPILL) + continue; +- if (memcmp(&old->spilled_regs[i / BPF_REG_SIZE], +- &cur->spilled_regs[i / BPF_REG_SIZE], +- sizeof(old->spilled_regs[0]))) +- /* when explored and current stack slot types are +- * the same, check that stored pointers types +- * are the same as well. +- * Ex: explored safe path could have stored +- * (struct reg_state) {.type = PTR_TO_STACK, .imm = -8} +- * but current path has stored: +- * (struct reg_state) {.type = PTR_TO_STACK, .imm = -16} +- * such verifier states are not equivalent. +- * return false to continue verification of this path +- */ +- return false; +- else ++ state_reg = &state->stack[i].spilled_ptr; ++ if (state_reg->type != SCALAR_VALUE || ++ !state_reg->precise) + continue; ++ if (env->log.level & BPF_LOG_LEVEL2) ++ verbose(env, "propagating fp%d\n", ++ (-i - 1) * BPF_REG_SIZE); ++ err = mark_chain_precision_stack(env, i); ++ if (err < 0) ++ return err; + } ++ return 0; ++} ++ ++static bool states_maybe_looping(struct bpf_verifier_state *old, ++ struct bpf_verifier_state *cur) ++{ ++ struct bpf_func_state *fold, *fcur; ++ int i, fr = cur->curframe; ++ ++ if (old->curframe != fr) ++ return false; ++ ++ fold = old->frame[fr]; ++ fcur = cur->frame[fr]; ++ for (i = 0; i < MAX_BPF_REG; i++) ++ if (memcmp(&fold->regs[i], &fcur->regs[i], ++ offsetof(struct bpf_reg_state, parent))) ++ return false; + return true; + } + +-static int is_state_visited(struct verifier_env *env, int insn_idx) ++ ++static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) + { +- struct verifier_state_list *new_sl; +- struct verifier_state_list *sl; ++ struct bpf_verifier_state_list *new_sl; ++ struct bpf_verifier_state_list *sl, **pprev; ++ struct bpf_verifier_state *cur = env->cur_state, *new; ++ int i, j, err, states_cnt = 0; ++ bool add_new_state = env->test_state_freq ? true : false; + +- sl = env->explored_states[insn_idx]; +- if (!sl) ++ cur->last_insn_idx = env->prev_insn_idx; ++ if (!env->insn_aux_data[insn_idx].prune_point) + /* this 'insn_idx' instruction wasn't marked, so we will not + * be doing state search here + */ + return 0; + +- while (sl != STATE_LIST_MARK) { +- if (states_equal(&sl->state, &env->cur_state)) ++ /* bpf progs typically have pruning point every 4 instructions ++ * http://vger.kernel.org/bpfconf2019.html#session-1 ++ * Do not add new state for future pruning if the verifier hasn't seen ++ * at least 2 jumps and at least 8 instructions. ++ * This heuristics helps decrease 'total_states' and 'peak_states' metric. ++ * In tests that amounts to up to 50% reduction into total verifier ++ * memory consumption and 20% verifier time speedup. ++ */ ++ if (env->jmps_processed - env->prev_jmps_processed >= 2 && ++ env->insn_processed - env->prev_insn_processed >= 8) ++ add_new_state = true; ++ ++ pprev = explored_state(env, insn_idx); ++ sl = *pprev; ++ ++ clean_live_states(env, insn_idx, cur); ++ ++ while (sl) { ++ states_cnt++; ++ if (sl->state.insn_idx != insn_idx) ++ goto next; ++ if (sl->state.branches) { ++ if (states_maybe_looping(&sl->state, cur) && ++ states_equal(env, &sl->state, cur)) { ++ verbose_linfo(env, insn_idx, "; "); ++ verbose(env, "infinite loop detected at insn %d\n", insn_idx); ++ return -EINVAL; ++ } ++ /* if the verifier is processing a loop, avoid adding new state ++ * too often, since different loop iterations have distinct ++ * states and may not help future pruning. ++ * This threshold shouldn't be too low to make sure that ++ * a loop with large bound will be rejected quickly. ++ * The most abusive loop will be: ++ * r1 += 1 ++ * if r1 < 1000000 goto pc-2 ++ * 1M insn_procssed limit / 100 == 10k peak states. ++ * This threshold shouldn't be too high either, since states ++ * at the end of the loop are likely to be useful in pruning. ++ */ ++ if (env->jmps_processed - env->prev_jmps_processed < 20 && ++ env->insn_processed - env->prev_insn_processed < 100) ++ add_new_state = false; ++ goto miss; ++ } ++ if (states_equal(env, &sl->state, cur)) { ++ sl->hit_cnt++; + /* reached equivalent register/stack state, +- * prune the search ++ * prune the search. ++ * Registers read by the continuation are read by us. ++ * If we have any write marks in env->cur_state, they ++ * will prevent corresponding reads in the continuation ++ * from reaching our parent (an explored_state). Our ++ * own state will get the read marks recorded, but ++ * they'll be immediately forgotten as we're pruning ++ * this state and will pop a new one. + */ +- return 1; +- sl = sl->next; +- } ++ err = propagate_liveness(env, &sl->state, cur); + +- /* there were no equivalent states, remember current one. +- * technically the current state is not proven to be safe yet, +- * but it will either reach bpf_exit (which means it's safe) or +- * it will be rejected. Since there are no loops, we won't be +- * seeing this 'insn_idx' instruction again on the way to bpf_exit ++ /* if previous state reached the exit with precision and ++ * current state is equivalent to it (except precsion marks) ++ * the precision needs to be propagated back in ++ * the current state. ++ */ ++ err = err ? : push_jmp_history(env, cur); ++ err = err ? : propagate_precision(env, &sl->state); ++ if (err) ++ return err; ++ return 1; ++ } ++miss: ++ /* when new state is not going to be added do not increase miss count. ++ * Otherwise several loop iterations will remove the state ++ * recorded earlier. The goal of these heuristics is to have ++ * states from some iterations of the loop (some in the beginning ++ * and some at the end) to help pruning. ++ */ ++ if (add_new_state) ++ sl->miss_cnt++; ++ /* heuristic to determine whether this state is beneficial ++ * to keep checking from state equivalence point of view. ++ * Higher numbers increase max_states_per_insn and verification time, ++ * but do not meaningfully decrease insn_processed. ++ */ ++ if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { ++ /* the state is unlikely to be useful. Remove it to ++ * speed up verification ++ */ ++ *pprev = sl->next; ++ if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { ++ u32 br = sl->state.branches; ++ ++ WARN_ONCE(br, ++ "BUG live_done but branches_to_explore %d\n", ++ br); ++ free_verifier_state(&sl->state, false); ++ kfree(sl); ++ env->peak_states--; ++ } else { ++ /* cannot free this state, since parentage chain may ++ * walk it later. Add it for free_list instead to ++ * be freed at the end of verification ++ */ ++ sl->next = env->free_list; ++ env->free_list = sl; ++ } ++ sl = *pprev; ++ continue; ++ } ++next: ++ pprev = &sl->next; ++ sl = *pprev; ++ } ++ ++ if (env->max_states_per_insn < states_cnt) ++ env->max_states_per_insn = states_cnt; ++ ++ if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES) ++ return push_jmp_history(env, cur); ++ ++ if (!add_new_state) ++ return push_jmp_history(env, cur); ++ ++ /* There were no equivalent states, remember the current one. ++ * Technically the current state is not proven to be safe yet, ++ * but it will either reach outer most bpf_exit (which means it's safe) ++ * or it will be rejected. When there are no loops the verifier won't be ++ * seeing this tuple (frame[0].callsite, frame[1].callsite, .. insn_idx) ++ * again on the way to bpf_exit. ++ * When looping the sl->state.branches will be > 0 and this state ++ * will not be considered for equivalence until branches == 0. + */ +- new_sl = kmalloc(sizeof(struct verifier_state_list), GFP_USER); ++ new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL); + if (!new_sl) + return -ENOMEM; ++ env->total_states++; ++ env->peak_states++; ++ env->prev_jmps_processed = env->jmps_processed; ++ env->prev_insn_processed = env->insn_processed; + + /* add new state to the head of linked list */ +- memcpy(&new_sl->state, &env->cur_state, sizeof(env->cur_state)); +- new_sl->next = env->explored_states[insn_idx]; +- env->explored_states[insn_idx] = new_sl; ++ new = &new_sl->state; ++ err = copy_verifier_state(new, cur); ++ if (err) { ++ free_verifier_state(new, false); ++ kfree(new_sl); ++ return err; ++ } ++ new->insn_idx = insn_idx; ++ WARN_ONCE(new->branches != 1, ++ "BUG is_state_visited:branches_to_explore=%d insn %d\n", new->branches, insn_idx); ++ ++ cur->parent = new; ++ cur->first_insn_idx = insn_idx; ++ clear_jmp_history(cur); ++ new_sl->next = *explored_state(env, insn_idx); ++ *explored_state(env, insn_idx) = new_sl; ++ /* connect new state to parentage chain. Current frame needs all ++ * registers connected. Only r6 - r9 of the callers are alive (pushed ++ * to the stack implicitly by JITs) so in callers' frames connect just ++ * r6 - r9 as an optimization. Callers will have r1 - r5 connected to ++ * the state of the call instruction (with WRITTEN set), and r0 comes ++ * from callee with its full parentage chain, anyway. ++ */ ++ /* clear write marks in current state: the writes we did are not writes ++ * our child did, so they don't screen off its reads from us. ++ * (There are no read marks in current state, because reads always mark ++ * their parent and current state never has children yet. Only ++ * explored_states can get read marks.) ++ */ ++ for (j = 0; j <= cur->curframe; j++) { ++ for (i = j < cur->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) ++ cur->frame[j]->regs[i].parent = &new->frame[j]->regs[i]; ++ for (i = 0; i < BPF_REG_FP; i++) ++ cur->frame[j]->regs[i].live = REG_LIVE_NONE; ++ } ++ ++ /* all stack frames are accessible from callee, clear them all */ ++ for (j = 0; j <= cur->curframe; j++) { ++ struct bpf_func_state *frame = cur->frame[j]; ++ struct bpf_func_state *newframe = new->frame[j]; ++ ++ for (i = 0; i < frame->allocated_stack / BPF_REG_SIZE; i++) { ++ frame->stack[i].spilled_ptr.live = REG_LIVE_NONE; ++ frame->stack[i].spilled_ptr.parent = ++ &newframe->stack[i].spilled_ptr; ++ } ++ } + return 0; + } + +-static int do_check(struct verifier_env *env) ++/* Return true if it's OK to have the same insn return a different type. */ ++static bool reg_type_mismatch_ok(enum bpf_reg_type type) + { +- struct verifier_state *state = &env->cur_state; ++ switch (type) { ++ case PTR_TO_CTX: ++ case PTR_TO_SOCKET: ++ case PTR_TO_SOCKET_OR_NULL: ++ case PTR_TO_SOCK_COMMON: ++ case PTR_TO_SOCK_COMMON_OR_NULL: ++ case PTR_TO_TCP_SOCK: ++ case PTR_TO_TCP_SOCK_OR_NULL: ++ case PTR_TO_XDP_SOCK: ++ return false; ++ default: ++ return true; ++ } ++} ++ ++/* If an instruction was previously used with particular pointer types, then we ++ * need to be careful to avoid cases such as the below, where it may be ok ++ * for one branch accessing the pointer, but not ok for the other branch: ++ * ++ * R1 = sock_ptr ++ * goto X; ++ * ... ++ * R1 = some_other_valid_ptr; ++ * goto X; ++ * ... ++ * R2 = *(u32 *)(R1 + 0); ++ */ ++static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev) ++{ ++ return src != prev && (!reg_type_mismatch_ok(src) || ++ !reg_type_mismatch_ok(prev)); ++} ++ ++static int do_check(struct bpf_verifier_env *env) ++{ ++ struct bpf_verifier_state *state; + struct bpf_insn *insns = env->prog->insnsi; +- struct reg_state *regs = state->regs; ++ struct bpf_reg_state *regs; + int insn_cnt = env->prog->len; +- int insn_idx, prev_insn_idx = 0; +- int insn_processed = 0; + bool do_print_state = false; ++ int prev_insn_idx = -1; ++ ++ env->prev_linfo = NULL; ++ ++ state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL); ++ if (!state) ++ return -ENOMEM; ++ state->curframe = 0; ++ state->speculative = false; ++ state->branches = 1; ++ state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL); ++ if (!state->frame[0]) { ++ kfree(state); ++ return -ENOMEM; ++ } ++ env->cur_state = state; ++ init_func_state(env, state->frame[0], ++ BPF_MAIN_FUNC /* callsite */, ++ 0 /* frameno */, ++ 0 /* subprogno, zero == main subprog */); + +- init_reg_state(regs); +- insn_idx = 0; + for (;;) { + struct bpf_insn *insn; + u8 class; + int err; + +- if (insn_idx >= insn_cnt) { +- verbose("invalid insn idx %d insn_cnt %d\n", +- insn_idx, insn_cnt); ++ env->prev_insn_idx = prev_insn_idx; ++ if (env->insn_idx >= insn_cnt) { ++ verbose(env, "invalid insn idx %d insn_cnt %d\n", ++ env->insn_idx, insn_cnt); + return -EFAULT; + } + +- insn = &insns[insn_idx]; ++ insn = &insns[env->insn_idx]; + class = BPF_CLASS(insn->code); + +- if (++insn_processed > 32768) { +- verbose("BPF program is too large. Proccessed %d insn\n", +- insn_processed); ++ if (++env->insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) { ++ verbose(env, ++ "BPF program is too large. Processed %d insn\n", ++ env->insn_processed); + return -E2BIG; + } + +- err = is_state_visited(env, insn_idx); ++ err = is_state_visited(env, env->insn_idx); + if (err < 0) + return err; + if (err == 1) { + /* found equivalent state, can prune the search */ +- if (log_level) { ++ if (env->log.level & BPF_LOG_LEVEL) { + if (do_print_state) +- verbose("\nfrom %d to %d: safe\n", +- prev_insn_idx, insn_idx); ++ verbose(env, "\nfrom %d to %d%s: safe\n", ++ env->prev_insn_idx, env->insn_idx, ++ env->cur_state->speculative ? ++ " (speculative execution)" : ""); + else +- verbose("%d: safe\n", insn_idx); ++ verbose(env, "%d: safe\n", env->insn_idx); + } + goto process_bpf_exit; + } + +- if (log_level && do_print_state) { +- verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx); +- print_verifier_state(env); ++ if (signal_pending(current)) ++ return -EAGAIN; ++ ++ if (need_resched()) ++ cond_resched(); ++ ++ if (env->log.level & BPF_LOG_LEVEL2 || ++ (env->log.level & BPF_LOG_LEVEL && do_print_state)) { ++ if (env->log.level & BPF_LOG_LEVEL2) ++ verbose(env, "%d:", env->insn_idx); ++ else ++ verbose(env, "\nfrom %d to %d%s:", ++ env->prev_insn_idx, env->insn_idx, ++ env->cur_state->speculative ? ++ " (speculative execution)" : ""); ++ print_verifier_state(env, state->frame[state->curframe]); + do_print_state = false; + } + +- if (log_level) { +- verbose("%d: ", insn_idx); +- print_bpf_insn(env, insn); ++ if (env->log.level & BPF_LOG_LEVEL) { ++ const struct bpf_insn_cbs cbs = { ++ .cb_print = verbose, ++ .private_data = env, ++ }; ++ ++ verbose_linfo(env, env->insn_idx, "; "); ++ verbose(env, "%d: ", env->insn_idx); ++ print_bpf_insn(&cbs, insn, env->allow_ptr_leaks); + } + ++ regs = cur_regs(env); ++ sanitize_mark_insn_seen(env); ++ prev_insn_idx = env->insn_idx; ++ + if (class == BPF_ALU || class == BPF_ALU64) { + err = check_alu_op(env, insn); + if (err) + return err; + + } else if (class == BPF_LDX) { +- enum bpf_reg_type src_reg_type; ++ enum bpf_reg_type *prev_src_type, src_reg_type; + + /* check for reserved fields is already done */ + + /* check src operand */ +- err = check_reg_arg(regs, insn->src_reg, SRC_OP); ++ err = check_reg_arg(env, insn->src_reg, SRC_OP); + if (err) + return err; + +- err = check_reg_arg(regs, insn->dst_reg, DST_OP_NO_MARK); ++ err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK); + if (err) + return err; + +@@ -1796,27 +7850,22 @@ static int do_check(struct verifier_env + /* check that memory (src_reg + off) is readable, + * the state of dst_reg will be updated by this func + */ +- err = check_mem_access(env, insn->src_reg, insn->off, +- BPF_SIZE(insn->code), BPF_READ, +- insn->dst_reg); ++ err = check_mem_access(env, env->insn_idx, insn->src_reg, ++ insn->off, BPF_SIZE(insn->code), ++ BPF_READ, insn->dst_reg, false); + if (err) + return err; + +- if (BPF_SIZE(insn->code) != BPF_W) { +- insn_idx++; +- continue; +- } ++ prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type; + +- if (insn->imm == 0) { ++ if (*prev_src_type == NOT_INIT) { + /* saw a valid insn + * dst_reg = *(u32 *)(src_reg + off) +- * use reserved 'imm' field to mark this insn ++ * save type to validate intersecting paths + */ +- insn->imm = src_reg_type; ++ *prev_src_type = src_reg_type; + +- } else if (src_reg_type != insn->imm && +- (src_reg_type == PTR_TO_CTX || +- insn->imm == PTR_TO_CTX)) { ++ } else if (reg_type_mismatch(src_reg_type, *prev_src_type)) { + /* ABuser program is trying to use the same insn + * dst_reg = *(u32*) (src_reg + off) + * with different pointer types: +@@ -1824,79 +7873,98 @@ static int do_check(struct verifier_env + * src_reg == stack|map in some other branch. + * Reject it. + */ +- verbose("same insn cannot be used with different pointers\n"); ++ verbose(env, "same insn cannot be used with different pointers\n"); + return -EINVAL; + } + + } else if (class == BPF_STX) { +- enum bpf_reg_type dst_reg_type; ++ enum bpf_reg_type *prev_dst_type, dst_reg_type; + + if (BPF_MODE(insn->code) == BPF_XADD) { +- err = check_xadd(env, insn); ++ err = check_xadd(env, env->insn_idx, insn); + if (err) + return err; +- insn_idx++; ++ env->insn_idx++; + continue; + } + + /* check src1 operand */ +- err = check_reg_arg(regs, insn->src_reg, SRC_OP); ++ err = check_reg_arg(env, insn->src_reg, SRC_OP); + if (err) + return err; + /* check src2 operand */ +- err = check_reg_arg(regs, insn->dst_reg, SRC_OP); ++ err = check_reg_arg(env, insn->dst_reg, SRC_OP); + if (err) + return err; + + dst_reg_type = regs[insn->dst_reg].type; + + /* check that memory (dst_reg + off) is writeable */ +- err = check_mem_access(env, insn->dst_reg, insn->off, +- BPF_SIZE(insn->code), BPF_WRITE, +- insn->src_reg); ++ err = check_mem_access(env, env->insn_idx, insn->dst_reg, ++ insn->off, BPF_SIZE(insn->code), ++ BPF_WRITE, insn->src_reg, false); + if (err) + return err; + +- if (insn->imm == 0) { +- insn->imm = dst_reg_type; +- } else if (dst_reg_type != insn->imm && +- (dst_reg_type == PTR_TO_CTX || +- insn->imm == PTR_TO_CTX)) { +- verbose("same insn cannot be used with different pointers\n"); ++ prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type; ++ ++ if (*prev_dst_type == NOT_INIT) { ++ *prev_dst_type = dst_reg_type; ++ } else if (reg_type_mismatch(dst_reg_type, *prev_dst_type)) { ++ verbose(env, "same insn cannot be used with different pointers\n"); + return -EINVAL; + } + + } else if (class == BPF_ST) { + if (BPF_MODE(insn->code) != BPF_MEM || + insn->src_reg != BPF_REG_0) { +- verbose("BPF_ST uses reserved fields\n"); ++ verbose(env, "BPF_ST uses reserved fields\n"); + return -EINVAL; + } + /* check src operand */ +- err = check_reg_arg(regs, insn->dst_reg, SRC_OP); ++ err = check_reg_arg(env, insn->dst_reg, SRC_OP); + if (err) + return err; + ++ if (is_ctx_reg(env, insn->dst_reg)) { ++ verbose(env, "BPF_ST stores into R%d %s is not allowed\n", ++ insn->dst_reg, ++ reg_type_str[reg_state(env, insn->dst_reg)->type]); ++ return -EACCES; ++ } ++ + /* check that memory (dst_reg + off) is writeable */ +- err = check_mem_access(env, insn->dst_reg, insn->off, +- BPF_SIZE(insn->code), BPF_WRITE, +- -1); ++ err = check_mem_access(env, env->insn_idx, insn->dst_reg, ++ insn->off, BPF_SIZE(insn->code), ++ BPF_WRITE, -1, false); + if (err) + return err; + +- } else if (class == BPF_JMP) { ++ } else if (class == BPF_JMP || class == BPF_JMP32) { + u8 opcode = BPF_OP(insn->code); + ++ env->jmps_processed++; + if (opcode == BPF_CALL) { + if (BPF_SRC(insn->code) != BPF_K || + insn->off != 0 || +- insn->src_reg != BPF_REG_0 || +- insn->dst_reg != BPF_REG_0) { +- verbose("BPF_CALL uses reserved fields\n"); ++ (insn->src_reg != BPF_REG_0 && ++ insn->src_reg != BPF_PSEUDO_CALL) || ++ insn->dst_reg != BPF_REG_0 || ++ class == BPF_JMP32) { ++ verbose(env, "BPF_CALL uses reserved fields\n"); + return -EINVAL; + } + +- err = check_call(env, insn->imm); ++ if (env->cur_state->active_spin_lock && ++ (insn->src_reg == BPF_PSEUDO_CALL || ++ insn->imm != BPF_FUNC_spin_unlock)) { ++ verbose(env, "function calls are not allowed while holding a lock\n"); ++ return -EINVAL; ++ } ++ if (insn->src_reg == BPF_PSEUDO_CALL) ++ err = check_func_call(env, insn, &env->insn_idx); ++ else ++ err = check_helper_call(env, insn->imm, env->insn_idx); + if (err) + return err; + +@@ -1904,48 +7972,75 @@ static int do_check(struct verifier_env + if (BPF_SRC(insn->code) != BPF_K || + insn->imm != 0 || + insn->src_reg != BPF_REG_0 || +- insn->dst_reg != BPF_REG_0) { +- verbose("BPF_JA uses reserved fields\n"); ++ insn->dst_reg != BPF_REG_0 || ++ class == BPF_JMP32) { ++ verbose(env, "BPF_JA uses reserved fields\n"); + return -EINVAL; + } + +- insn_idx += insn->off + 1; ++ env->insn_idx += insn->off + 1; + continue; + + } else if (opcode == BPF_EXIT) { + if (BPF_SRC(insn->code) != BPF_K || + insn->imm != 0 || + insn->src_reg != BPF_REG_0 || +- insn->dst_reg != BPF_REG_0) { +- verbose("BPF_EXIT uses reserved fields\n"); ++ insn->dst_reg != BPF_REG_0 || ++ class == BPF_JMP32) { ++ verbose(env, "BPF_EXIT uses reserved fields\n"); + return -EINVAL; + } + ++ if (env->cur_state->active_spin_lock) { ++ verbose(env, "bpf_spin_unlock is missing\n"); ++ return -EINVAL; ++ } ++ ++ if (state->curframe) { ++ /* exit from nested function */ ++ err = prepare_func_exit(env, &env->insn_idx); ++ if (err) ++ return err; ++ do_print_state = true; ++ continue; ++ } ++ ++ err = check_reference_leak(env); ++ if (err) ++ return err; ++ + /* eBPF calling convetion is such that R0 is used + * to return the value from eBPF program. + * Make sure that it's readable at this time + * of bpf_exit, which means that program wrote + * something into it earlier + */ +- err = check_reg_arg(regs, BPF_REG_0, SRC_OP); ++ err = check_reg_arg(env, BPF_REG_0, SRC_OP); + if (err) + return err; + + if (is_pointer_value(env, BPF_REG_0)) { +- verbose("R0 leaks addr as return value\n"); ++ verbose(env, "R0 leaks addr as return value\n"); + return -EACCES; + } + ++ err = check_return_code(env); ++ if (err) ++ return err; + process_bpf_exit: +- insn_idx = pop_stack(env, &prev_insn_idx); +- if (insn_idx < 0) { ++ update_branch_counts(env, env->cur_state); ++ err = pop_stack(env, &prev_insn_idx, ++ &env->insn_idx); ++ if (err < 0) { ++ if (err != -ENOENT) ++ return err; + break; + } else { + do_print_state = true; + continue; + } + } else { +- err = check_cond_jmp_op(env, insn, &insn_idx); ++ err = check_cond_jmp_op(env, insn, &env->insn_idx); + if (err) + return err; + } +@@ -1962,83 +8057,194 @@ process_bpf_exit: + if (err) + return err; + +- insn_idx++; ++ env->insn_idx++; ++ sanitize_mark_insn_seen(env); + } else { +- verbose("invalid BPF_LD mode\n"); ++ verbose(env, "invalid BPF_LD mode\n"); + return -EINVAL; + } + } else { +- verbose("unknown insn class %d\n", class); ++ verbose(env, "unknown insn class %d\n", class); + return -EINVAL; + } + +- insn_idx++; ++ env->insn_idx++; + } + ++ env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; + return 0; + } + ++static int check_map_prealloc(struct bpf_map *map) ++{ ++ return (map->map_type != BPF_MAP_TYPE_HASH && ++ map->map_type != BPF_MAP_TYPE_PERCPU_HASH && ++ map->map_type != BPF_MAP_TYPE_HASH_OF_MAPS) || ++ !(map->map_flags & BPF_F_NO_PREALLOC); ++} ++ ++static bool is_tracing_prog_type(enum bpf_prog_type type) ++{ ++ switch (type) { ++ case BPF_PROG_TYPE_KPROBE: ++ case BPF_PROG_TYPE_TRACEPOINT: ++ case BPF_PROG_TYPE_PERF_EVENT: ++ case BPF_PROG_TYPE_RAW_TRACEPOINT: ++ return true; ++ default: ++ return false; ++ } ++} ++ ++static int check_map_prog_compatibility(struct bpf_verifier_env *env, ++ struct bpf_map *map, ++ struct bpf_prog *prog) ++ ++{ ++ /* Make sure that BPF_PROG_TYPE_PERF_EVENT programs only use ++ * preallocated hash maps, since doing memory allocation ++ * in overflow_handler can crash depending on where nmi got ++ * triggered. ++ */ ++ if (prog->type == BPF_PROG_TYPE_PERF_EVENT) { ++ if (!check_map_prealloc(map)) { ++ verbose(env, "perf_event programs can only use preallocated hash map\n"); ++ return -EINVAL; ++ } ++ if (map->inner_map_meta && ++ !check_map_prealloc(map->inner_map_meta)) { ++ verbose(env, "perf_event programs can only use preallocated inner hash map\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if ((is_tracing_prog_type(prog->type) || ++ prog->type == BPF_PROG_TYPE_SOCKET_FILTER) && ++ map_value_has_spin_lock(map)) { ++ verbose(env, "tracing progs cannot use bpf_spin_lock yet\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static bool bpf_map_is_cgroup_storage(struct bpf_map *map) ++{ ++ return (map->map_type == BPF_MAP_TYPE_CGROUP_STORAGE || ++ map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE); ++} ++ + /* look for pseudo eBPF instructions that access map FDs and + * replace them with actual map pointers + */ +-static int replace_map_fd_with_map_ptr(struct verifier_env *env) ++static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) + { + struct bpf_insn *insn = env->prog->insnsi; + int insn_cnt = env->prog->len; +- int i, j; ++ int i, j, err; ++ ++ err = bpf_prog_calc_tag(env->prog); ++ if (err) ++ return err; + + for (i = 0; i < insn_cnt; i++, insn++) { + if (BPF_CLASS(insn->code) == BPF_LDX && + (BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) { +- verbose("BPF_LDX uses reserved fields\n"); ++ verbose(env, "BPF_LDX uses reserved fields\n"); + return -EINVAL; + } + + if (BPF_CLASS(insn->code) == BPF_STX && + ((BPF_MODE(insn->code) != BPF_MEM && + BPF_MODE(insn->code) != BPF_XADD) || insn->imm != 0)) { +- verbose("BPF_STX uses reserved fields\n"); ++ verbose(env, "BPF_STX uses reserved fields\n"); + return -EINVAL; + } + + if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) { ++ struct bpf_insn_aux_data *aux; + struct bpf_map *map; + struct fd f; ++ u64 addr; + + if (i == insn_cnt - 1 || insn[1].code != 0 || + insn[1].dst_reg != 0 || insn[1].src_reg != 0 || + insn[1].off != 0) { +- verbose("invalid bpf_ld_imm64 insn\n"); ++ verbose(env, "invalid bpf_ld_imm64 insn\n"); + return -EINVAL; + } + +- if (insn->src_reg == 0) ++ if (insn[0].src_reg == 0) + /* valid generic load 64-bit imm */ + goto next_insn; + +- if (insn->src_reg != BPF_PSEUDO_MAP_FD) { +- verbose("unrecognized bpf_ld_imm64 insn\n"); ++ /* In final convert_pseudo_ld_imm64() step, this is ++ * converted into regular 64-bit imm load insn. ++ */ ++ if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD && ++ insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) || ++ (insn[0].src_reg == BPF_PSEUDO_MAP_FD && ++ insn[1].imm != 0)) { ++ verbose(env, ++ "unrecognized bpf_ld_imm64 insn\n"); + return -EINVAL; + } + +- f = fdget(insn->imm); ++ f = fdget(insn[0].imm); + map = __bpf_map_get(f); + if (IS_ERR(map)) { +- verbose("fd %d is not pointing to valid bpf_map\n", +- insn->imm); ++ verbose(env, "fd %d is not pointing to valid bpf_map\n", ++ insn[0].imm); + return PTR_ERR(map); + } + +- /* store map pointer inside BPF_LD_IMM64 instruction */ +- insn[0].imm = (u32) (unsigned long) map; +- insn[1].imm = ((u64) (unsigned long) map) >> 32; ++ err = check_map_prog_compatibility(env, map, env->prog); ++ if (err) { ++ fdput(f); ++ return err; ++ } ++ ++ aux = &env->insn_aux_data[i]; ++ if (insn->src_reg == BPF_PSEUDO_MAP_FD) { ++ addr = (unsigned long)map; ++ } else { ++ u32 off = insn[1].imm; ++ ++ if (off >= BPF_MAX_VAR_OFF) { ++ verbose(env, "direct value offset of %u is not allowed\n", off); ++ fdput(f); ++ return -EINVAL; ++ } ++ ++ if (!map->ops->map_direct_value_addr) { ++ verbose(env, "no direct value access support for this map type\n"); ++ fdput(f); ++ return -EINVAL; ++ } ++ ++ err = map->ops->map_direct_value_addr(map, &addr, off); ++ if (err) { ++ verbose(env, "invalid access to map value pointer, value_size=%u off=%u\n", ++ map->value_size, off); ++ fdput(f); ++ return err; ++ } ++ ++ aux->map_off = off; ++ addr += off; ++ } ++ ++ insn[0].imm = (u32)addr; ++ insn[1].imm = addr >> 32; + + /* check whether we recorded this map already */ +- for (j = 0; j < env->used_map_cnt; j++) ++ for (j = 0; j < env->used_map_cnt; j++) { + if (env->used_maps[j] == map) { ++ aux->map_index = j; + fdput(f); + goto next_insn; + } ++ } + + if (env->used_map_cnt >= MAX_USED_MAPS) { + fdput(f); +@@ -2048,19 +8254,31 @@ static int replace_map_fd_with_map_ptr(s + /* hold the map. If the program is rejected by verifier, + * the map will be released by release_maps() or it + * will be used by the valid program until it's unloaded +- * and all maps are released in free_bpf_prog_info() ++ * and all maps are released in free_used_maps() + */ + map = bpf_map_inc(map, false); + if (IS_ERR(map)) { + fdput(f); + return PTR_ERR(map); + } ++ ++ aux->map_index = env->used_map_cnt; + env->used_maps[env->used_map_cnt++] = map; + ++ if (bpf_map_is_cgroup_storage(map)) ++ return -EINVAL; ++ + fdput(f); + next_insn: + insn++; + i++; ++ continue; ++ } ++ ++ /* Basic sanity check before we invest more work here. */ ++ if (!bpf_opcode_in_insntable(insn->code)) { ++ verbose(env, "unknown opcode %02x\n", insn->code); ++ return -EINVAL; + } + } + +@@ -2072,7 +8290,7 @@ next_insn: + } + + /* drop refcnt of maps used by the rejected program */ +-static void release_maps(struct verifier_env *env) ++static void release_maps(struct bpf_verifier_env *env) + { + int i; + +@@ -2081,7 +8299,7 @@ static void release_maps(struct verifier + } + + /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ +-static void convert_pseudo_ld_imm64(struct verifier_env *env) ++static void convert_pseudo_ld_imm64(struct bpf_verifier_env *env) + { + struct bpf_insn *insn = env->prog->insnsi; + int insn_cnt = env->prog->len; +@@ -2092,201 +8310,1266 @@ static void convert_pseudo_ld_imm64(stru + insn->src_reg = 0; + } + +-static void adjust_branches(struct bpf_prog *prog, int pos, int delta) ++/* single env->prog->insni[off] instruction was replaced with the range ++ * insni[off, off + cnt). Adjust corresponding insn_aux_data by copying ++ * [0, off) and [off, end) to new locations, so the patched range stays zero ++ */ ++static int adjust_insn_aux_data(struct bpf_verifier_env *env, ++ struct bpf_prog *new_prog, u32 off, u32 cnt) + { +- struct bpf_insn *insn = prog->insnsi; +- int insn_cnt = prog->len; ++ struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; ++ struct bpf_insn *insn = new_prog->insnsi; ++ bool old_seen = old_data[off].seen; ++ u32 prog_len; + int i; + +- for (i = 0; i < insn_cnt; i++, insn++) { +- if (BPF_CLASS(insn->code) != BPF_JMP || +- BPF_OP(insn->code) == BPF_CALL || +- BPF_OP(insn->code) == BPF_EXIT) ++ /* aux info at OFF always needs adjustment, no matter fast path ++ * (cnt == 1) is taken or not. There is no guarantee INSN at OFF is the ++ * original insn at old prog. ++ */ ++ old_data[off].zext_dst = insn_has_def32(env, insn + off + cnt - 1); ++ ++ if (cnt == 1) ++ return 0; ++ prog_len = new_prog->len; ++ new_data = vzalloc(array_size(prog_len, ++ sizeof(struct bpf_insn_aux_data))); ++ if (!new_data) ++ return -ENOMEM; ++ memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); ++ memcpy(new_data + off + cnt - 1, old_data + off, ++ sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); ++ for (i = off; i < off + cnt - 1; i++) { ++ /* Expand insni[off]'s seen count to the patched range. */ ++ new_data[i].seen = old_seen; ++ new_data[i].zext_dst = insn_has_def32(env, insn + i); ++ } ++ env->insn_aux_data = new_data; ++ vfree(old_data); ++ return 0; ++} ++ ++static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len) ++{ ++ int i; ++ ++ if (len == 1) ++ return; ++ /* NOTE: fake 'exit' subprog should be updated as well. */ ++ for (i = 0; i <= env->subprog_cnt; i++) { ++ if (env->subprog_info[i].start <= off) + continue; ++ env->subprog_info[i].start += len - 1; ++ } ++} ++ ++static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 off, ++ const struct bpf_insn *patch, u32 len) ++{ ++ struct bpf_prog *new_prog; ++ ++ new_prog = bpf_patch_insn_single(env->prog, off, patch, len); ++ if (IS_ERR(new_prog)) { ++ if (PTR_ERR(new_prog) == -ERANGE) ++ verbose(env, ++ "insn %d cannot be patched due to 16-bit range\n", ++ env->insn_aux_data[off].orig_idx); ++ return NULL; ++ } ++ if (adjust_insn_aux_data(env, new_prog, off, len)) ++ return NULL; ++ adjust_subprog_starts(env, off, len); ++ return new_prog; ++} + +- /* adjust offset of jmps if necessary */ +- if (i < pos && i + insn->off + 1 > pos) +- insn->off += delta; +- else if (i > pos + delta && i + insn->off + 1 <= pos + delta) +- insn->off -= delta; ++static int adjust_subprog_starts_after_remove(struct bpf_verifier_env *env, ++ u32 off, u32 cnt) ++{ ++ int i, j; ++ ++ /* find first prog starting at or after off (first to remove) */ ++ for (i = 0; i < env->subprog_cnt; i++) ++ if (env->subprog_info[i].start >= off) ++ break; ++ /* find first prog starting at or after off + cnt (first to stay) */ ++ for (j = i; j < env->subprog_cnt; j++) ++ if (env->subprog_info[j].start >= off + cnt) ++ break; ++ /* if j doesn't start exactly at off + cnt, we are just removing ++ * the front of previous prog ++ */ ++ if (env->subprog_info[j].start != off + cnt) ++ j--; ++ ++ if (j > i) { ++ struct bpf_prog_aux *aux = env->prog->aux; ++ int move; ++ ++ /* move fake 'exit' subprog as well */ ++ move = env->subprog_cnt + 1 - j; ++ ++ memmove(env->subprog_info + i, ++ env->subprog_info + j, ++ sizeof(*env->subprog_info) * move); ++ env->subprog_cnt -= j - i; ++ ++ /* remove func_info */ ++ if (aux->func_info) { ++ move = aux->func_info_cnt - j; ++ ++ memmove(aux->func_info + i, ++ aux->func_info + j, ++ sizeof(*aux->func_info) * move); ++ aux->func_info_cnt -= j - i; ++ /* func_info->insn_off is set after all code rewrites, ++ * in adjust_btf_func() - no need to adjust ++ */ ++ } ++ } else { ++ /* convert i from "first prog to remove" to "first to adjust" */ ++ if (env->subprog_info[i].start == off) ++ i++; + } ++ ++ /* update fake 'exit' subprog as well */ ++ for (; i <= env->subprog_cnt; i++) ++ env->subprog_info[i].start -= cnt; ++ ++ return 0; ++} ++ ++static int bpf_adj_linfo_after_remove(struct bpf_verifier_env *env, u32 off, ++ u32 cnt) ++{ ++ struct bpf_prog *prog = env->prog; ++ u32 i, l_off, l_cnt, nr_linfo; ++ struct bpf_line_info *linfo; ++ ++ nr_linfo = prog->aux->nr_linfo; ++ if (!nr_linfo) ++ return 0; ++ ++ linfo = prog->aux->linfo; ++ ++ /* find first line info to remove, count lines to be removed */ ++ for (i = 0; i < nr_linfo; i++) ++ if (linfo[i].insn_off >= off) ++ break; ++ ++ l_off = i; ++ l_cnt = 0; ++ for (; i < nr_linfo; i++) ++ if (linfo[i].insn_off < off + cnt) ++ l_cnt++; ++ else ++ break; ++ ++ /* First live insn doesn't match first live linfo, it needs to "inherit" ++ * last removed linfo. prog is already modified, so prog->len == off ++ * means no live instructions after (tail of the program was removed). ++ */ ++ if (prog->len != off && l_cnt && ++ (i == nr_linfo || linfo[i].insn_off != off + cnt)) { ++ l_cnt--; ++ linfo[--i].insn_off = off + cnt; ++ } ++ ++ /* remove the line info which refer to the removed instructions */ ++ if (l_cnt) { ++ memmove(linfo + l_off, linfo + i, ++ sizeof(*linfo) * (nr_linfo - i)); ++ ++ prog->aux->nr_linfo -= l_cnt; ++ nr_linfo = prog->aux->nr_linfo; ++ } ++ ++ /* pull all linfo[i].insn_off >= off + cnt in by cnt */ ++ for (i = l_off; i < nr_linfo; i++) ++ linfo[i].insn_off -= cnt; ++ ++ /* fix up all subprogs (incl. 'exit') which start >= off */ ++ for (i = 0; i <= env->subprog_cnt; i++) ++ if (env->subprog_info[i].linfo_idx > l_off) { ++ /* program may have started in the removed region but ++ * may not be fully removed ++ */ ++ if (env->subprog_info[i].linfo_idx >= l_off + l_cnt) ++ env->subprog_info[i].linfo_idx -= l_cnt; ++ else ++ env->subprog_info[i].linfo_idx = l_off; ++ } ++ ++ return 0; ++} ++ ++static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt) ++{ ++ struct bpf_insn_aux_data *aux_data = env->insn_aux_data; ++ unsigned int orig_prog_len = env->prog->len; ++ int err; ++ ++ err = bpf_remove_insns(env->prog, off, cnt); ++ if (err) ++ return err; ++ ++ err = adjust_subprog_starts_after_remove(env, off, cnt); ++ if (err) ++ return err; ++ ++ err = bpf_adj_linfo_after_remove(env, off, cnt); ++ if (err) ++ return err; ++ ++ memmove(aux_data + off, aux_data + off + cnt, ++ sizeof(*aux_data) * (orig_prog_len - off - cnt)); ++ ++ return 0; + } + +-/* convert load instructions that access fields of 'struct __sk_buff' +- * into sequence of instructions that access fields of 'struct sk_buff' ++/* The verifier does more data flow analysis than llvm and will not ++ * explore branches that are dead at run time. Malicious programs can ++ * have dead code too. Therefore replace all dead at-run-time code ++ * with 'ja -1'. ++ * ++ * Just nops are not optimal, e.g. if they would sit at the end of the ++ * program and through another bug we would manage to jump there, then ++ * we'd execute beyond program memory otherwise. Returning exception ++ * code also wouldn't work since we can have subprogs where the dead ++ * code could be located. + */ +-static int convert_ctx_accesses(struct verifier_env *env) ++static void sanitize_dead_code(struct bpf_verifier_env *env) + { ++ struct bpf_insn_aux_data *aux_data = env->insn_aux_data; ++ struct bpf_insn trap = BPF_JMP_IMM(BPF_JA, 0, 0, -1); + struct bpf_insn *insn = env->prog->insnsi; ++ const int insn_cnt = env->prog->len; ++ int i; ++ ++ for (i = 0; i < insn_cnt; i++) { ++ if (aux_data[i].seen) ++ continue; ++ memcpy(insn + i, &trap, sizeof(trap)); ++ } ++} ++ ++static bool insn_is_cond_jump(u8 code) ++{ ++ u8 op; ++ ++ if (BPF_CLASS(code) == BPF_JMP32) ++ return true; ++ ++ if (BPF_CLASS(code) != BPF_JMP) ++ return false; ++ ++ op = BPF_OP(code); ++ return op != BPF_JA && op != BPF_EXIT && op != BPF_CALL; ++} ++ ++static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env) ++{ ++ struct bpf_insn_aux_data *aux_data = env->insn_aux_data; ++ struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); ++ struct bpf_insn *insn = env->prog->insnsi; ++ const int insn_cnt = env->prog->len; ++ int i; ++ ++ for (i = 0; i < insn_cnt; i++, insn++) { ++ if (!insn_is_cond_jump(insn->code)) ++ continue; ++ ++ if (!aux_data[i + 1].seen) ++ ja.off = insn->off; ++ else if (!aux_data[i + 1 + insn->off].seen) ++ ja.off = 0; ++ else ++ continue; ++ ++ memcpy(insn, &ja, sizeof(ja)); ++ } ++} ++ ++static int opt_remove_dead_code(struct bpf_verifier_env *env) ++{ ++ struct bpf_insn_aux_data *aux_data = env->insn_aux_data; + int insn_cnt = env->prog->len; +- struct bpf_insn insn_buf[16]; ++ int i, err; ++ ++ for (i = 0; i < insn_cnt; i++) { ++ int j; ++ ++ j = 0; ++ while (i + j < insn_cnt && !aux_data[i + j].seen) ++ j++; ++ if (!j) ++ continue; ++ ++ err = verifier_remove_insns(env, i, j); ++ if (err) ++ return err; ++ insn_cnt = env->prog->len; ++ } ++ ++ return 0; ++} ++ ++static int opt_remove_nops(struct bpf_verifier_env *env) ++{ ++ const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0); ++ struct bpf_insn *insn = env->prog->insnsi; ++ int insn_cnt = env->prog->len; ++ int i, err; ++ ++ for (i = 0; i < insn_cnt; i++) { ++ if (memcmp(&insn[i], &ja, sizeof(ja))) ++ continue; ++ ++ err = verifier_remove_insns(env, i, 1); ++ if (err) ++ return err; ++ insn_cnt--; ++ i--; ++ } ++ ++ return 0; ++} ++ ++static int opt_subreg_zext_lo32_rnd_hi32(struct bpf_verifier_env *env, ++ const union bpf_attr *attr) ++{ ++ struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4]; ++ struct bpf_insn_aux_data *aux = env->insn_aux_data; ++ int i, patch_len, delta = 0, len = env->prog->len; ++ struct bpf_insn *insns = env->prog->insnsi; ++ struct bpf_prog *new_prog; ++ bool rnd_hi32; ++ ++ rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32; ++ zext_patch[1] = BPF_ZEXT_REG(0); ++ rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0); ++ rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32); ++ rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX); ++ for (i = 0; i < len; i++) { ++ int adj_idx = i + delta; ++ struct bpf_insn insn; ++ ++ insn = insns[adj_idx]; ++ if (!aux[adj_idx].zext_dst) { ++ u8 code, class; ++ u32 imm_rnd; ++ ++ if (!rnd_hi32) ++ continue; ++ ++ code = insn.code; ++ class = BPF_CLASS(code); ++ if (insn_no_def(&insn)) ++ continue; ++ ++ /* NOTE: arg "reg" (the fourth one) is only used for ++ * BPF_STX which has been ruled out in above ++ * check, it is safe to pass NULL here. ++ */ ++ if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) { ++ if (class == BPF_LD && ++ BPF_MODE(code) == BPF_IMM) ++ i++; ++ continue; ++ } ++ ++ /* ctx load could be transformed into wider load. */ ++ if (class == BPF_LDX && ++ aux[adj_idx].ptr_type == PTR_TO_CTX) ++ continue; ++ ++ imm_rnd = get_random_int(); ++ rnd_hi32_patch[0] = insn; ++ rnd_hi32_patch[1].imm = imm_rnd; ++ rnd_hi32_patch[3].dst_reg = insn.dst_reg; ++ patch = rnd_hi32_patch; ++ patch_len = 4; ++ goto apply_patch_buffer; ++ } ++ ++ if (!bpf_jit_needs_zext()) ++ continue; ++ ++ zext_patch[0] = insn; ++ zext_patch[1].dst_reg = insn.dst_reg; ++ zext_patch[1].src_reg = insn.dst_reg; ++ patch = zext_patch; ++ patch_len = 2; ++apply_patch_buffer: ++ new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len); ++ if (!new_prog) ++ return -ENOMEM; ++ env->prog = new_prog; ++ insns = new_prog->insnsi; ++ aux = env->insn_aux_data; ++ delta += patch_len - 1; ++ } ++ ++ return 0; ++} ++ ++/* convert load instructions that access fields of a context type into a ++ * sequence of instructions that access fields of the underlying structure: ++ * struct __sk_buff -> struct sk_buff ++ * struct bpf_sock_ops -> struct sock ++ */ ++static int convert_ctx_accesses(struct bpf_verifier_env *env) ++{ ++ const struct bpf_verifier_ops *ops = env->ops; ++ int i, cnt, size, ctx_field_size, delta = 0; ++ const int insn_cnt = env->prog->len; ++ struct bpf_insn insn_buf[16], *insn; ++ u32 target_size, size_default, off; + struct bpf_prog *new_prog; +- u32 cnt; +- int i; + enum bpf_access_type type; ++ bool is_narrower_load; ++ ++ if (ops->gen_prologue || env->seen_direct_write) { ++ if (!ops->gen_prologue) { ++ verbose(env, "bpf verifier is misconfigured\n"); ++ return -EINVAL; ++ } ++ cnt = ops->gen_prologue(insn_buf, env->seen_direct_write, ++ env->prog); ++ if (cnt >= ARRAY_SIZE(insn_buf)) { ++ verbose(env, "bpf verifier is misconfigured\n"); ++ return -EINVAL; ++ } else if (cnt) { ++ new_prog = bpf_patch_insn_data(env, 0, insn_buf, cnt); ++ if (!new_prog) ++ return -ENOMEM; ++ ++ env->prog = new_prog; ++ delta += cnt - 1; ++ } ++ } + +- if (!env->prog->aux->ops->convert_ctx_access) ++ if (bpf_prog_is_dev_bound(env->prog->aux)) + return 0; + ++ insn = env->prog->insnsi + delta; ++ + for (i = 0; i < insn_cnt; i++, insn++) { +- if (insn->code == (BPF_LDX | BPF_MEM | BPF_W)) ++ bpf_convert_ctx_access_t convert_ctx_access; ++ ++ if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) || ++ insn->code == (BPF_LDX | BPF_MEM | BPF_H) || ++ insn->code == (BPF_LDX | BPF_MEM | BPF_W) || ++ insn->code == (BPF_LDX | BPF_MEM | BPF_DW)) + type = BPF_READ; +- else if (insn->code == (BPF_STX | BPF_MEM | BPF_W)) ++ else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) || ++ insn->code == (BPF_STX | BPF_MEM | BPF_H) || ++ insn->code == (BPF_STX | BPF_MEM | BPF_W) || ++ insn->code == (BPF_STX | BPF_MEM | BPF_DW)) + type = BPF_WRITE; + else + continue; + +- if (insn->imm != PTR_TO_CTX) { +- /* clear internal mark */ +- insn->imm = 0; ++ if (type == BPF_WRITE && ++ env->insn_aux_data[i + delta].sanitize_stack_off) { ++ struct bpf_insn patch[] = { ++ /* Sanitize suspicious stack slot with zero. ++ * There are no memory dependencies for this store, ++ * since it's only using frame pointer and immediate ++ * constant of zero ++ */ ++ BPF_ST_MEM(BPF_DW, BPF_REG_FP, ++ env->insn_aux_data[i + delta].sanitize_stack_off, ++ 0), ++ /* the original STX instruction will immediately ++ * overwrite the same stack slot with appropriate value ++ */ ++ *insn, ++ }; ++ ++ cnt = ARRAY_SIZE(patch); ++ new_prog = bpf_patch_insn_data(env, i + delta, patch, cnt); ++ if (!new_prog) ++ return -ENOMEM; ++ ++ delta += cnt - 1; ++ env->prog = new_prog; ++ insn = new_prog->insnsi + i + delta; ++ continue; ++ } ++ ++ switch (env->insn_aux_data[i + delta].ptr_type) { ++ case PTR_TO_CTX: ++ if (!ops->convert_ctx_access) ++ continue; ++ convert_ctx_access = ops->convert_ctx_access; ++ break; ++ default: + continue; + } + +- cnt = env->prog->aux->ops-> +- convert_ctx_access(type, insn->dst_reg, insn->src_reg, +- insn->off, insn_buf, env->prog); +- if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { +- verbose("bpf verifier is misconfigured\n"); ++ ctx_field_size = env->insn_aux_data[i + delta].ctx_field_size; ++ size = BPF_LDST_BYTES(insn); ++ ++ /* If the read access is a narrower load of the field, ++ * convert to a 4/8-byte load, to minimum program type specific ++ * convert_ctx_access changes. If conversion is successful, ++ * we will apply proper mask to the result. ++ */ ++ is_narrower_load = size < ctx_field_size; ++ size_default = bpf_ctx_off_adjust_machine(ctx_field_size); ++ off = insn->off; ++ if (is_narrower_load) { ++ u8 size_code; ++ ++ if (type == BPF_WRITE) { ++ verbose(env, "bpf verifier narrow ctx access misconfigured\n"); ++ return -EINVAL; ++ } ++ ++ size_code = BPF_H; ++ if (ctx_field_size == 4) ++ size_code = BPF_W; ++ else if (ctx_field_size == 8) ++ size_code = BPF_DW; ++ ++ insn->off = off & ~(size_default - 1); ++ insn->code = BPF_LDX | BPF_MEM | size_code; ++ } ++ ++ target_size = 0; ++ cnt = convert_ctx_access(type, insn, insn_buf, env->prog, ++ &target_size); ++ if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf) || ++ (ctx_field_size && !target_size)) { ++ verbose(env, "bpf verifier is misconfigured\n"); + return -EINVAL; + } + +- if (cnt == 1) { +- memcpy(insn, insn_buf, sizeof(*insn)); +- continue; ++ if (is_narrower_load && size < target_size) { ++ u8 shift = bpf_ctx_narrow_access_offset( ++ off, size, size_default) * 8; ++ if (ctx_field_size <= 4) { ++ if (shift) ++ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_RSH, ++ insn->dst_reg, ++ shift); ++ insn_buf[cnt++] = BPF_ALU32_IMM(BPF_AND, insn->dst_reg, ++ (1 << size * 8) - 1); ++ } else { ++ if (shift) ++ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_RSH, ++ insn->dst_reg, ++ shift); ++ insn_buf[cnt++] = BPF_ALU64_IMM(BPF_AND, insn->dst_reg, ++ (1ULL << size * 8) - 1); ++ } + } + +- /* several new insns need to be inserted. Make room for them */ +- insn_cnt += cnt - 1; +- new_prog = bpf_prog_realloc(env->prog, +- bpf_prog_size(insn_cnt), +- GFP_USER); ++ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); + if (!new_prog) + return -ENOMEM; + +- new_prog->len = insn_cnt; ++ delta += cnt - 1; + +- memmove(new_prog->insnsi + i + cnt, new_prog->insns + i + 1, +- sizeof(*insn) * (insn_cnt - i - cnt)); ++ /* keep walking new program and skip insns we just inserted */ ++ env->prog = new_prog; ++ insn = new_prog->insnsi + i + delta; ++ } + +- /* copy substitute insns in place of load instruction */ +- memcpy(new_prog->insnsi + i, insn_buf, sizeof(*insn) * cnt); ++ return 0; ++} + +- /* adjust branches in the whole program */ +- adjust_branches(new_prog, i, cnt - 1); ++static int jit_subprogs(struct bpf_verifier_env *env) ++{ ++ struct bpf_prog *prog = env->prog, **func, *tmp; ++ int i, j, subprog_start, subprog_end = 0, len, subprog; ++ struct bpf_insn *insn; ++ void *old_bpf_func; ++ int err; + +- /* keep walking new program and skip insns we just inserted */ +- env->prog = new_prog; +- insn = new_prog->insnsi + i + cnt - 1; +- i += cnt - 1; ++ if (env->subprog_cnt <= 1) ++ return 0; ++ ++ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { ++ if (insn->code != (BPF_JMP | BPF_CALL) || ++ insn->src_reg != BPF_PSEUDO_CALL) ++ continue; ++ /* Upon error here we cannot fall back to interpreter but ++ * need a hard reject of the program. Thus -EFAULT is ++ * propagated in any case. ++ */ ++ subprog = find_subprog(env, i + insn->imm + 1); ++ if (subprog < 0) { ++ WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", ++ i + insn->imm + 1); ++ return -EFAULT; ++ } ++ /* temporarily remember subprog id inside insn instead of ++ * aux_data, since next loop will split up all insns into funcs ++ */ ++ insn->off = subprog; ++ /* remember original imm in case JIT fails and fallback ++ * to interpreter will be needed ++ */ ++ env->insn_aux_data[i].call_imm = insn->imm; ++ /* point imm to __bpf_call_base+1 from JITs point of view */ ++ insn->imm = 1; ++ } ++ ++ err = bpf_prog_alloc_jited_linfo(prog); ++ if (err) ++ goto out_undo_insn; ++ ++ err = -ENOMEM; ++ func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL); ++ if (!func) ++ goto out_undo_insn; ++ ++ for (i = 0; i < env->subprog_cnt; i++) { ++ subprog_start = subprog_end; ++ subprog_end = env->subprog_info[i + 1].start; ++ ++ len = subprog_end - subprog_start; ++ /* BPF_PROG_RUN doesn't call subprogs directly, ++ * hence main prog stats include the runtime of subprogs. ++ * subprogs don't have IDs and not reachable via prog_get_next_id ++ * func[i]->aux->stats will never be accessed and stays NULL ++ */ ++ func[i] = bpf_prog_alloc_no_stats(bpf_prog_size(len), GFP_USER); ++ if (!func[i]) ++ goto out_free; ++ memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], ++ len * sizeof(struct bpf_insn)); ++ func[i]->type = prog->type; ++ func[i]->len = len; ++ if (bpf_prog_calc_tag(func[i])) ++ goto out_free; ++ func[i]->is_func = 1; ++ func[i]->aux->func_idx = i; ++ /* the btf and func_info will be freed only at prog->aux */ ++ func[i]->aux->btf = prog->aux->btf; ++ func[i]->aux->func_info = prog->aux->func_info; ++ ++ /* Use bpf_prog_F_tag to indicate functions in stack traces. ++ * Long term would need debug info to populate names ++ */ ++ func[i]->aux->name[0] = 'F'; ++ func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; ++ func[i]->jit_requested = 1; ++ func[i]->aux->linfo = prog->aux->linfo; ++ func[i]->aux->nr_linfo = prog->aux->nr_linfo; ++ func[i]->aux->jited_linfo = prog->aux->jited_linfo; ++ func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; ++ func[i] = bpf_int_jit_compile(func[i]); ++ if (!func[i]->jited) { ++ err = -ENOTSUPP; ++ goto out_free; ++ } ++ cond_resched(); ++ } ++ /* at this point all bpf functions were successfully JITed ++ * now populate all bpf_calls with correct addresses and ++ * run last pass of JIT ++ */ ++ for (i = 0; i < env->subprog_cnt; i++) { ++ insn = func[i]->insnsi; ++ for (j = 0; j < func[i]->len; j++, insn++) { ++ if (insn->code != (BPF_JMP | BPF_CALL) || ++ insn->src_reg != BPF_PSEUDO_CALL) ++ continue; ++ subprog = insn->off; ++ insn->imm = BPF_CAST_CALL(func[subprog]->bpf_func) - ++ __bpf_call_base; ++ } ++ ++ /* we use the aux data to keep a list of the start addresses ++ * of the JITed images for each function in the program ++ * ++ * for some architectures, such as powerpc64, the imm field ++ * might not be large enough to hold the offset of the start ++ * address of the callee's JITed image from __bpf_call_base ++ * ++ * in such cases, we can lookup the start address of a callee ++ * by using its subprog id, available from the off field of ++ * the call instruction, as an index for this list ++ */ ++ func[i]->aux->func = func; ++ func[i]->aux->func_cnt = env->subprog_cnt; ++ } ++ for (i = 0; i < env->subprog_cnt; i++) { ++ old_bpf_func = func[i]->bpf_func; ++ tmp = bpf_int_jit_compile(func[i]); ++ if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { ++ verbose(env, "JIT doesn't support bpf-to-bpf calls\n"); ++ err = -ENOTSUPP; ++ goto out_free; ++ } ++ cond_resched(); ++ } ++ ++ /* finally lock prog and jit images for all functions and ++ * populate kallsysm ++ */ ++ for (i = 0; i < env->subprog_cnt; i++) { ++ bpf_prog_lock_ro(func[i]); ++ bpf_prog_kallsyms_add(func[i]); ++ } ++ ++ /* Last step: make now unused interpreter insns from main ++ * prog consistent for later dump requests, so they can ++ * later look the same as if they were interpreted only. ++ */ ++ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { ++ if (insn->code != (BPF_JMP | BPF_CALL) || ++ insn->src_reg != BPF_PSEUDO_CALL) ++ continue; ++ insn->off = env->insn_aux_data[i].call_imm; ++ subprog = find_subprog(env, i + insn->off + 1); ++ insn->imm = subprog; + } + ++ prog->jited = 1; ++ prog->bpf_func = func[0]->bpf_func; ++ prog->aux->func = func; ++ prog->aux->func_cnt = env->subprog_cnt; ++ bpf_prog_free_unused_jited_linfo(prog); + return 0; ++out_free: ++ for (i = 0; i < env->subprog_cnt; i++) ++ if (func[i]) ++ bpf_jit_free(func[i]); ++ kfree(func); ++out_undo_insn: ++ /* cleanup main prog to be interpreted */ ++ prog->jit_requested = 0; ++ for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { ++ if (insn->code != (BPF_JMP | BPF_CALL) || ++ insn->src_reg != BPF_PSEUDO_CALL) ++ continue; ++ insn->off = 0; ++ insn->imm = env->insn_aux_data[i].call_imm; ++ } ++ bpf_prog_free_jited_linfo(prog); ++ return err; + } + +-static void free_states(struct verifier_env *env) ++static int fixup_call_args(struct bpf_verifier_env *env) + { +- struct verifier_state_list *sl, *sln; ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON ++ struct bpf_prog *prog = env->prog; ++ struct bpf_insn *insn = prog->insnsi; ++ int i, depth; ++#endif ++ int err = 0; ++ ++ if (env->prog->jit_requested && ++ !bpf_prog_is_dev_bound(env->prog->aux)) { ++ err = jit_subprogs(env); ++ if (err == 0) ++ return 0; ++ if (err == -EFAULT) ++ return err; ++ } ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON ++ for (i = 0; i < prog->len; i++, insn++) { ++ if (insn->code != (BPF_JMP | BPF_CALL) || ++ insn->src_reg != BPF_PSEUDO_CALL) ++ continue; ++ depth = get_callee_stack_depth(env, insn, i); ++ if (depth < 0) ++ return depth; ++ bpf_patch_call_args(insn, depth); ++ } ++ err = 0; ++#endif ++ return err; ++} ++ ++/* fixup insn->imm field of bpf_call instructions ++ * and inline eligible helpers as explicit sequence of BPF instructions ++ * ++ * this function is called after eBPF program passed verification ++ */ ++static int fixup_bpf_calls(struct bpf_verifier_env *env) ++{ ++ struct bpf_prog *prog = env->prog; ++ struct bpf_insn *insn = prog->insnsi; ++ const struct bpf_func_proto *fn; ++ const int insn_cnt = prog->len; ++ const struct bpf_map_ops *ops; ++ struct bpf_insn_aux_data *aux; ++ struct bpf_insn insn_buf[16]; ++ struct bpf_prog *new_prog; ++ struct bpf_map *map_ptr; ++ int i, cnt, delta = 0; ++ ++ for (i = 0; i < insn_cnt; i++, insn++) { ++ if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) || ++ insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) || ++ insn->code == (BPF_ALU | BPF_MOD | BPF_X) || ++ insn->code == (BPF_ALU | BPF_DIV | BPF_X)) { ++ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; ++ bool isdiv = BPF_OP(insn->code) == BPF_DIV; ++ struct bpf_insn *patchlet; ++ struct bpf_insn chk_and_div[] = { ++ /* [R,W]x div 0 -> 0 */ ++ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | ++ BPF_JNE | BPF_K, insn->src_reg, ++ 0, 2, 0), ++ BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg), ++ BPF_JMP_IMM(BPF_JA, 0, 0, 1), ++ *insn, ++ }; ++ struct bpf_insn chk_and_mod[] = { ++ /* [R,W]x mod 0 -> [R,W]x */ ++ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) | ++ BPF_JEQ | BPF_K, insn->src_reg, ++ 0, 1 + (is64 ? 0 : 1), 0), ++ *insn, ++ BPF_JMP_IMM(BPF_JA, 0, 0, 1), ++ BPF_MOV32_REG(insn->dst_reg, insn->dst_reg), ++ }; ++ ++ patchlet = isdiv ? chk_and_div : chk_and_mod; ++ cnt = isdiv ? ARRAY_SIZE(chk_and_div) : ++ ARRAY_SIZE(chk_and_mod) - (is64 ? 2 : 0); ++ ++ new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt); ++ if (!new_prog) ++ return -ENOMEM; ++ ++ delta += cnt - 1; ++ env->prog = prog = new_prog; ++ insn = new_prog->insnsi + i + delta; ++ continue; ++ } ++ ++ if (BPF_CLASS(insn->code) == BPF_LD && ++ (BPF_MODE(insn->code) == BPF_ABS || ++ BPF_MODE(insn->code) == BPF_IND)) { ++ cnt = env->ops->gen_ld_abs(insn, insn_buf); ++ if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { ++ verbose(env, "bpf verifier is misconfigured\n"); ++ return -EINVAL; ++ } ++ ++ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); ++ if (!new_prog) ++ return -ENOMEM; ++ ++ delta += cnt - 1; ++ env->prog = prog = new_prog; ++ insn = new_prog->insnsi + i + delta; ++ continue; ++ } ++ ++ if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) || ++ insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) { ++ const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X; ++ const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; ++ struct bpf_insn insn_buf[16]; ++ struct bpf_insn *patch = &insn_buf[0]; ++ bool issrc, isneg, isimm; ++ u32 off_reg; ++ ++ aux = &env->insn_aux_data[i + delta]; ++ if (!aux->alu_state || ++ aux->alu_state == BPF_ALU_NON_POINTER) ++ continue; ++ ++ isneg = aux->alu_state & BPF_ALU_NEG_VALUE; ++ issrc = (aux->alu_state & BPF_ALU_SANITIZE) == ++ BPF_ALU_SANITIZE_SRC; ++ isimm = aux->alu_state & BPF_ALU_IMMEDIATE; ++ ++ off_reg = issrc ? insn->src_reg : insn->dst_reg; ++ if (isimm) { ++ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); ++ } else { ++ if (isneg) ++ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); ++ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); ++ *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); ++ *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); ++ *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); ++ *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); ++ *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); ++ } ++ if (!issrc) ++ *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); ++ insn->src_reg = BPF_REG_AX; ++ if (isneg) ++ insn->code = insn->code == code_add ? ++ code_sub : code_add; ++ *patch++ = *insn; ++ if (issrc && isneg && !isimm) ++ *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); ++ cnt = patch - insn_buf; ++ ++ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); ++ if (!new_prog) ++ return -ENOMEM; ++ ++ delta += cnt - 1; ++ env->prog = prog = new_prog; ++ insn = new_prog->insnsi + i + delta; ++ continue; ++ } ++ ++ if (insn->code != (BPF_JMP | BPF_CALL)) ++ continue; ++ if (insn->src_reg == BPF_PSEUDO_CALL) ++ continue; ++ ++ if (insn->imm == BPF_FUNC_get_route_realm) ++ prog->dst_needed = 1; ++ if (insn->imm == BPF_FUNC_get_prandom_u32) ++ bpf_user_rnd_init_once(); ++ if (insn->imm == BPF_FUNC_override_return) ++ prog->kprobe_override = 1; ++ if (insn->imm == BPF_FUNC_tail_call) { ++ /* If we tail call into other programs, we ++ * cannot make any assumptions since they can ++ * be replaced dynamically during runtime in ++ * the program array. ++ */ ++ prog->cb_access = 1; ++ env->prog->aux->stack_depth = MAX_BPF_STACK; ++ env->prog->aux->max_pkt_offset = MAX_PACKET_OFF; ++ ++ /* mark bpf_tail_call as different opcode to avoid ++ * conditional branch in the interpeter for every normal ++ * call and to prevent accidental JITing by JIT compiler ++ * that doesn't support bpf_tail_call yet ++ */ ++ insn->imm = 0; ++ insn->code = BPF_JMP | BPF_TAIL_CALL; ++ ++ aux = &env->insn_aux_data[i + delta]; ++ if (!bpf_map_ptr_unpriv(aux)) ++ continue; ++ ++ /* instead of changing every JIT dealing with tail_call ++ * emit two extra insns: ++ * if (index >= max_entries) goto out; ++ * index &= array->index_mask; ++ * to avoid out-of-bounds cpu speculation ++ */ ++ if (bpf_map_ptr_poisoned(aux)) { ++ verbose(env, "tail_call abusing map_ptr\n"); ++ return -EINVAL; ++ } ++ ++ map_ptr = BPF_MAP_PTR(aux->map_state); ++ insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3, ++ map_ptr->max_entries, 2); ++ insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3, ++ container_of(map_ptr, ++ struct bpf_array, ++ map)->index_mask); ++ insn_buf[2] = *insn; ++ cnt = 3; ++ new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); ++ if (!new_prog) ++ return -ENOMEM; ++ ++ delta += cnt - 1; ++ env->prog = prog = new_prog; ++ insn = new_prog->insnsi + i + delta; ++ continue; ++ } ++ ++ /* BPF_EMIT_CALL() assumptions in some of the map_gen_lookup ++ * and other inlining handlers are currently limited to 64 bit ++ * only. ++ */ ++ if (prog->jit_requested && BITS_PER_LONG == 64 && ++ (insn->imm == BPF_FUNC_map_lookup_elem || ++ insn->imm == BPF_FUNC_map_update_elem || ++ insn->imm == BPF_FUNC_map_delete_elem || ++ insn->imm == BPF_FUNC_map_push_elem || ++ insn->imm == BPF_FUNC_map_pop_elem || ++ insn->imm == BPF_FUNC_map_peek_elem)) { ++ aux = &env->insn_aux_data[i + delta]; ++ if (bpf_map_ptr_poisoned(aux)) ++ goto patch_call_imm; ++ ++ map_ptr = BPF_MAP_PTR(aux->map_state); ++ ops = map_ptr->ops; ++ if (insn->imm == BPF_FUNC_map_lookup_elem && ++ ops->map_gen_lookup) { ++ cnt = ops->map_gen_lookup(map_ptr, insn_buf); ++ if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { ++ verbose(env, "bpf verifier is misconfigured\n"); ++ return -EINVAL; ++ } ++ ++ new_prog = bpf_patch_insn_data(env, i + delta, ++ insn_buf, cnt); ++ if (!new_prog) ++ return -ENOMEM; ++ ++ delta += cnt - 1; ++ env->prog = prog = new_prog; ++ insn = new_prog->insnsi + i + delta; ++ continue; ++ } ++ ++ BUILD_BUG_ON(!__same_type(ops->map_lookup_elem, ++ (void *(*)(struct bpf_map *map, void *key))NULL)); ++ BUILD_BUG_ON(!__same_type(ops->map_delete_elem, ++ (int (*)(struct bpf_map *map, void *key))NULL)); ++ BUILD_BUG_ON(!__same_type(ops->map_update_elem, ++ (int (*)(struct bpf_map *map, void *key, void *value, ++ u64 flags))NULL)); ++ BUILD_BUG_ON(!__same_type(ops->map_push_elem, ++ (int (*)(struct bpf_map *map, void *value, ++ u64 flags))NULL)); ++ BUILD_BUG_ON(!__same_type(ops->map_pop_elem, ++ (int (*)(struct bpf_map *map, void *value))NULL)); ++ BUILD_BUG_ON(!__same_type(ops->map_peek_elem, ++ (int (*)(struct bpf_map *map, void *value))NULL)); ++ ++ switch (insn->imm) { ++ case BPF_FUNC_map_lookup_elem: ++ insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - ++ __bpf_call_base; ++ continue; ++ case BPF_FUNC_map_update_elem: ++ insn->imm = BPF_CAST_CALL(ops->map_update_elem) - ++ __bpf_call_base; ++ continue; ++ case BPF_FUNC_map_delete_elem: ++ insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - ++ __bpf_call_base; ++ continue; ++ case BPF_FUNC_map_push_elem: ++ insn->imm = BPF_CAST_CALL(ops->map_push_elem) - ++ __bpf_call_base; ++ continue; ++ case BPF_FUNC_map_pop_elem: ++ insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - ++ __bpf_call_base; ++ continue; ++ case BPF_FUNC_map_peek_elem: ++ insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - ++ __bpf_call_base; ++ continue; ++ } ++ ++ goto patch_call_imm; ++ } ++ ++patch_call_imm: ++ fn = env->ops->get_func_proto(insn->imm, env->prog); ++ /* all functions that have prototype and verifier allowed ++ * programs to call them, must be real in-kernel functions ++ */ ++ if (!fn->func) { ++ verbose(env, ++ "kernel subsystem misconfigured func %s#%d\n", ++ func_id_name(insn->imm), insn->imm); ++ return -EFAULT; ++ } ++ insn->imm = fn->func - __bpf_call_base; ++ } ++ ++ return 0; ++} ++ ++static void free_states(struct bpf_verifier_env *env) ++{ ++ struct bpf_verifier_state_list *sl, *sln; + int i; + ++ sl = env->free_list; ++ while (sl) { ++ sln = sl->next; ++ free_verifier_state(&sl->state, false); ++ kfree(sl); ++ sl = sln; ++ } ++ + if (!env->explored_states) + return; + +- for (i = 0; i < env->prog->len; i++) { ++ for (i = 0; i < state_htab_size(env); i++) { + sl = env->explored_states[i]; + +- if (sl) +- while (sl != STATE_LIST_MARK) { +- sln = sl->next; +- kfree(sl); +- sl = sln; +- } ++ while (sl) { ++ sln = sl->next; ++ free_verifier_state(&sl->state, false); ++ kfree(sl); ++ sl = sln; ++ } + } + +- kfree(env->explored_states); ++ kvfree(env->explored_states); + } + +-int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) ++static void print_verification_stats(struct bpf_verifier_env *env) + { +- char __user *log_ubuf = NULL; +- struct verifier_env *env; +- int ret = -EINVAL; ++ int i; + +- if ((*prog)->len <= 0 || (*prog)->len > BPF_MAXINSNS) +- return -E2BIG; ++ if (env->log.level & BPF_LOG_STATS) { ++ verbose(env, "verification time %lld usec\n", ++ div_u64(env->verification_time, 1000)); ++ verbose(env, "stack depth "); ++ for (i = 0; i < env->subprog_cnt; i++) { ++ u32 depth = env->subprog_info[i].stack_depth; ++ ++ verbose(env, "%d", depth); ++ if (i + 1 < env->subprog_cnt) ++ verbose(env, "+"); ++ } ++ verbose(env, "\n"); ++ } ++ verbose(env, "processed %d insns (limit %d) max_states_per_insn %d " ++ "total_states %d peak_states %d mark_read %d\n", ++ env->insn_processed, BPF_COMPLEXITY_LIMIT_INSNS, ++ env->max_states_per_insn, env->total_states, ++ env->peak_states, env->longest_mark_read_walk); ++} ++ ++int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, ++ union bpf_attr __user *uattr) ++{ ++ u64 start_time = ktime_get_ns(); ++ struct bpf_verifier_env *env; ++ struct bpf_verifier_log *log; ++ int i, len, ret = -EINVAL; ++ bool is_priv; + +- /* 'struct verifier_env' can be global, but since it's not small, ++ /* no program is valid */ ++ if (ARRAY_SIZE(bpf_verifier_ops) == 0) ++ return -EINVAL; ++ ++ /* 'struct bpf_verifier_env' can be global, but since it's not small, + * allocate/free it every time bpf_check() is called + */ +- env = kzalloc(sizeof(struct verifier_env), GFP_KERNEL); ++ env = kzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL); + if (!env) + return -ENOMEM; ++ log = &env->log; + ++ len = (*prog)->len; ++ env->insn_aux_data = ++ vzalloc(array_size(sizeof(struct bpf_insn_aux_data), len)); ++ ret = -ENOMEM; ++ if (!env->insn_aux_data) ++ goto err_free_env; ++ for (i = 0; i < len; i++) ++ env->insn_aux_data[i].orig_idx = i; + env->prog = *prog; ++ env->ops = bpf_verifier_ops[env->prog->type]; ++ is_priv = capable(CAP_SYS_ADMIN); + + /* grab the mutex to protect few globals used by verifier */ +- mutex_lock(&bpf_verifier_lock); ++ if (!is_priv) ++ mutex_lock(&bpf_verifier_lock); + + if (attr->log_level || attr->log_buf || attr->log_size) { + /* user requested verbose verifier output + * and supplied buffer to store the verification trace + */ +- log_level = attr->log_level; +- log_ubuf = (char __user *) (unsigned long) attr->log_buf; +- log_size = attr->log_size; +- log_len = 0; ++ log->level = attr->log_level; ++ log->ubuf = (char __user *) (unsigned long) attr->log_buf; ++ log->len_total = attr->log_size; + + ret = -EINVAL; +- /* log_* values have to be sane */ +- if (log_size < 128 || log_size > UINT_MAX >> 8 || +- log_level == 0 || log_ubuf == NULL) +- goto free_env; +- +- ret = -ENOMEM; +- log_buf = vmalloc(log_size); +- if (!log_buf) +- goto free_env; +- } else { +- log_level = 0; +- } ++ /* log attributes have to be sane */ ++ if (log->len_total < 128 || log->len_total > UINT_MAX >> 2 || ++ !log->level || !log->ubuf || log->level & ~BPF_LOG_MASK) ++ goto err_unlock; ++ } ++ ++ env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT); ++ if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) ++ env->strict_alignment = true; ++ if (attr->prog_flags & BPF_F_ANY_ALIGNMENT) ++ env->strict_alignment = false; ++ ++ env->allow_ptr_leaks = is_priv; ++ ++ if (is_priv) ++ env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; + + ret = replace_map_fd_with_map_ptr(env); + if (ret < 0) + goto skip_full_check; + +- env->explored_states = kcalloc(env->prog->len, +- sizeof(struct verifier_state_list *), ++ env->explored_states = kcalloc(state_htab_size(env), ++ sizeof(struct bpf_verifier_state_list *), + GFP_USER); + ret = -ENOMEM; + if (!env->explored_states) + goto skip_full_check; + +- ret = check_cfg(env); ++ ret = check_subprogs(env); ++ if (ret < 0) ++ goto skip_full_check; ++ ++ ret = check_btf_info(env, attr, uattr); + if (ret < 0) + goto skip_full_check; + +- env->allow_ptr_leaks = capable(CAP_SYS_ADMIN); ++ ret = check_cfg(env); ++ if (ret < 0) ++ goto skip_full_check; + + ret = do_check(env); ++ if (env->cur_state) { ++ free_verifier_state(env->cur_state, true); ++ env->cur_state = NULL; ++ } + + skip_full_check: +- while (pop_stack(env, NULL) >= 0); ++ while (!pop_stack(env, NULL, NULL)); + free_states(env); + + if (ret == 0) ++ ret = check_max_stack_depth(env); ++ ++ /* instruction rewrites happen after this point */ ++ if (is_priv) { ++ if (ret == 0) ++ opt_hard_wire_dead_code_branches(env); ++ if (ret == 0) ++ ret = opt_remove_dead_code(env); ++ if (ret == 0) ++ ret = opt_remove_nops(env); ++ } else { ++ if (ret == 0) ++ sanitize_dead_code(env); ++ } ++ ++ if (ret == 0) + /* program is valid, convert *(u32*)(ctx + off) accesses */ + ret = convert_ctx_accesses(env); + +- if (log_level && log_len >= log_size - 1) { +- BUG_ON(log_len >= log_size); +- /* verifier log exceeded user supplied buffer */ +- ret = -ENOSPC; +- /* fall through to return what was recorded */ ++ if (ret == 0) ++ ret = fixup_bpf_calls(env); ++ ++ /* do 32-bit optimization after insn patching has done so those patched ++ * insns could be handled correctly. ++ */ ++ if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) { ++ ret = opt_subreg_zext_lo32_rnd_hi32(env, attr); ++ env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret ++ : false; + } + +- /* copy verifier log back to user space including trailing zero */ +- if (log_level && copy_to_user(log_ubuf, log_buf, log_len + 1) != 0) { ++ if (ret == 0) ++ ret = fixup_call_args(env); ++ ++ env->verification_time = ktime_get_ns() - start_time; ++ print_verification_stats(env); ++ ++ if (log->level && bpf_verifier_log_full(log)) ++ ret = -ENOSPC; ++ if (log->level && !log->ubuf) { + ret = -EFAULT; +- goto free_log_buf; ++ goto err_release_maps; + } + + if (ret == 0 && env->used_map_cnt) { +@@ -2297,7 +9580,7 @@ skip_full_check: + + if (!env->prog->aux->used_maps) { + ret = -ENOMEM; +- goto free_log_buf; ++ goto err_release_maps; + } + + memcpy(env->prog->aux->used_maps, env->used_maps, +@@ -2310,17 +9593,21 @@ skip_full_check: + convert_pseudo_ld_imm64(env); + } + +-free_log_buf: +- if (log_level) +- vfree(log_buf); +-free_env: ++ if (ret == 0) ++ adjust_btf_func(env); ++ ++err_release_maps: + if (!env->prog->aux->used_maps) + /* if we didn't copy map pointers into bpf_prog_info, release +- * them now. Otherwise free_bpf_prog_info() will release them. ++ * them now. Otherwise free_used_maps() will release them. + */ + release_maps(env); + *prog = env->prog; ++err_unlock: ++ if (!is_priv) ++ mutex_unlock(&bpf_verifier_lock); ++ vfree(env->insn_aux_data); ++err_free_env: + kfree(env); +- mutex_unlock(&bpf_verifier_lock); + return ret; + } +--- a/include/linux/filter.h ++++ b/include/linux/filter.h +@@ -1,3 +1,4 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ + /* + * Linux Socket Filter Data Structures + */ +@@ -7,16 +8,22 @@ + #include + + #include ++#include + #include + #include + #include + #include + #include + #include +-#include ++#include ++#include ++#include ++#include ++#include + +-#include ++#include + ++#include + #include + #include + +@@ -24,6 +31,11 @@ struct sk_buff; + struct sock; + struct seccomp_data; + struct bpf_prog_aux; ++struct xdp_rxq_info; ++struct xdp_buff; ++struct sock_reuseport; ++struct ctl_table; ++struct ctl_table_header; + + /* ArgX, context and stack frame pointer register positions. Note, + * Arg1, Arg2, Arg3, etc are used as argument mappings of function +@@ -40,7 +52,26 @@ struct bpf_prog_aux; + /* Additional register mappings for converted user programs. */ + #define BPF_REG_A BPF_REG_0 + #define BPF_REG_X BPF_REG_7 +-#define BPF_REG_TMP BPF_REG_8 ++#define BPF_REG_TMP BPF_REG_2 /* scratch reg */ ++#define BPF_REG_D BPF_REG_8 /* data, callee-saved */ ++#define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ ++ ++/* Kernel hidden auxiliary/helper register. */ ++#define BPF_REG_AX MAX_BPF_REG ++#define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) ++#define MAX_BPF_JIT_REG MAX_BPF_EXT_REG ++ ++/* unused opcode to mark special call to bpf_tail_call() helper */ ++#define BPF_TAIL_CALL 0xf0 ++ ++/* unused opcode to mark call to interpreter with arguments */ ++#define BPF_CALL_ARGS 0xe0 ++ ++/* As per nm, we expose JITed images as text (code) section for ++ * kallsyms. That way, tools like perf can find it to match ++ * addresses. ++ */ ++#define BPF_SYM_ELF_TYPE 't' + + /* BPF program can access up to 512 bytes of stack space. */ + #define MAX_BPF_STACK 512 +@@ -129,6 +160,20 @@ struct bpf_prog_aux; + .off = 0, \ + .imm = IMM }) + ++/* Special form of mov32, used for doing explicit zero extension on dst. */ ++#define BPF_ZEXT_REG(DST) \ ++ ((struct bpf_insn) { \ ++ .code = BPF_ALU | BPF_MOV | BPF_X, \ ++ .dst_reg = DST, \ ++ .src_reg = DST, \ ++ .off = 0, \ ++ .imm = 1 }) ++ ++static inline bool insn_is_zext(const struct bpf_insn *insn) ++{ ++ return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; ++} ++ + /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ + #define BPF_LD_IMM64(DST, IMM) \ + BPF_LD_IMM64_RAW(DST, 0, IMM) +@@ -249,8 +294,51 @@ struct bpf_prog_aux; + .off = OFF, \ + .imm = IMM }) + ++/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ ++ ++#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ ++ ((struct bpf_insn) { \ ++ .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ ++ .dst_reg = DST, \ ++ .src_reg = SRC, \ ++ .off = OFF, \ ++ .imm = 0 }) ++ ++/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ ++ ++#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ ++ ((struct bpf_insn) { \ ++ .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ ++ .dst_reg = DST, \ ++ .src_reg = 0, \ ++ .off = OFF, \ ++ .imm = IMM }) ++ ++/* Unconditional jumps, goto pc + off16 */ ++ ++#define BPF_JMP_A(OFF) \ ++ ((struct bpf_insn) { \ ++ .code = BPF_JMP | BPF_JA, \ ++ .dst_reg = 0, \ ++ .src_reg = 0, \ ++ .off = OFF, \ ++ .imm = 0 }) ++ ++/* Relative call */ ++ ++#define BPF_CALL_REL(TGT) \ ++ ((struct bpf_insn) { \ ++ .code = BPF_JMP | BPF_CALL, \ ++ .dst_reg = 0, \ ++ .src_reg = BPF_PSEUDO_CALL, \ ++ .off = 0, \ ++ .imm = TGT }) ++ + /* Function call */ + ++#define BPF_CAST_CALL(x) \ ++ ((u64 (*)(u64, u64, u64, u64, u64))(x)) ++ + #define BPF_EMIT_CALL(FUNC) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_CALL, \ +@@ -303,6 +391,112 @@ struct bpf_prog_aux; + bpf_size; \ + }) + ++#define bpf_size_to_bytes(bpf_size) \ ++({ \ ++ int bytes = -EINVAL; \ ++ \ ++ if (bpf_size == BPF_B) \ ++ bytes = sizeof(u8); \ ++ else if (bpf_size == BPF_H) \ ++ bytes = sizeof(u16); \ ++ else if (bpf_size == BPF_W) \ ++ bytes = sizeof(u32); \ ++ else if (bpf_size == BPF_DW) \ ++ bytes = sizeof(u64); \ ++ \ ++ bytes; \ ++}) ++ ++#define BPF_SIZEOF(type) \ ++ ({ \ ++ const int __size = bytes_to_bpf_size(sizeof(type)); \ ++ BUILD_BUG_ON(__size < 0); \ ++ __size; \ ++ }) ++ ++#define BPF_FIELD_SIZEOF(type, field) \ ++ ({ \ ++ const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \ ++ BUILD_BUG_ON(__size < 0); \ ++ __size; \ ++ }) ++ ++#define BPF_LDST_BYTES(insn) \ ++ ({ \ ++ const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ ++ WARN_ON(__size < 0); \ ++ __size; \ ++ }) ++ ++#define __BPF_MAP_0(m, v, ...) v ++#define __BPF_MAP_1(m, v, t, a, ...) m(t, a) ++#define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) ++#define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__) ++#define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__) ++#define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__) ++ ++#define __BPF_REG_0(...) __BPF_PAD(5) ++#define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4) ++#define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3) ++#define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2) ++#define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1) ++#define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__) ++ ++#define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) ++#define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) ++ ++#define __BPF_CAST(t, a) \ ++ (__force t) \ ++ (__force \ ++ typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \ ++ (unsigned long)0, (t)0))) a ++#define __BPF_V void ++#define __BPF_N ++ ++#define __BPF_DECL_ARGS(t, a) t a ++#define __BPF_DECL_REGS(t, a) u64 a ++ ++#define __BPF_PAD(n) \ ++ __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ ++ u64, __ur_3, u64, __ur_4, u64, __ur_5) ++ ++#define BPF_CALL_x(x, name, ...) \ ++ static __always_inline \ ++ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ ++ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ ++ u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ ++ { \ ++ return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ ++ } \ ++ static __always_inline \ ++ u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) ++ ++#define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__) ++#define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__) ++#define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__) ++#define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__) ++#define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) ++#define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) ++ ++#define bpf_ctx_range(TYPE, MEMBER) \ ++ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 ++#define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ ++ offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 ++#if BITS_PER_LONG == 64 ++# define bpf_ctx_range_ptr(TYPE, MEMBER) \ ++ offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 ++#else ++# define bpf_ctx_range_ptr(TYPE, MEMBER) \ ++ offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 ++#endif /* BITS_PER_LONG == 64 */ ++ ++#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ ++ ({ \ ++ BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \ ++ *(PTR_SIZE) = (SIZE); \ ++ offsetof(TYPE, MEMBER); \ ++ }) ++ + #ifdef CONFIG_COMPAT + /* A struct sock_filter is architecture independent. */ + struct compat_sock_fprog { +@@ -317,24 +511,33 @@ struct sock_fprog_kern { + }; + + struct bpf_binary_header { +- unsigned int pages; +- u8 image[]; ++ u32 pages; ++ /* Some arches need word alignment for their instructions */ ++ u8 image[] __aligned(4); + }; + + struct bpf_prog { + u16 pages; /* Number of allocated pages */ +- kmemcheck_bitfield_begin(meta); + u16 jited:1, /* Is our filter JIT'ed? */ ++ jit_requested:1,/* archs need to JIT the prog */ ++ undo_set_mem:1, /* Passed set_memory_ro() checkpoint */ + gpl_compatible:1, /* Is filter GPL compatible? */ + cb_access:1, /* Is control block accessed? */ +- dst_needed:1; /* Do we need dst entry? */ +- kmemcheck_bitfield_end(meta); +- u32 len; /* Number of filter blocks */ ++ dst_needed:1, /* Do we need dst entry? */ ++ blinded:1, /* Was blinded */ ++ is_func:1, /* program is a bpf function */ ++ kprobe_override:1, /* Do we override a kprobe? */ ++ has_callchain_buf:1, /* callchain buffer allocated? */ ++ enforce_expected_attach_type:1; /* Enforce expected_attach_type checking at attach time */ + enum bpf_prog_type type; /* Type of BPF program */ ++ enum bpf_attach_type expected_attach_type; /* For some prog types */ ++ u32 len; /* Number of filter blocks */ ++ u32 jited_len; /* Size of jited insns in bytes */ ++ u8 tag[BPF_TAG_SIZE]; + struct bpf_prog_aux *aux; /* Auxiliary fields */ + struct sock_fprog_kern *orig_prog; /* Original BPF program */ +- unsigned int (*bpf_func)(const struct sk_buff *skb, +- const struct bpf_insn *filter); ++ unsigned int (*bpf_func)(const void *ctx, ++ const struct bpf_insn *insn); + /* Instructions for interpreter */ + union { + struct sock_filter insns[0]; +@@ -343,44 +546,160 @@ struct bpf_prog { + }; + + struct sk_filter { +- atomic_t refcnt; ++ refcount_t refcnt; + struct rcu_head rcu; + struct bpf_prog *prog; + }; + +-#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) ++#define BPF_PROG_RUN(prog, ctx) ({ \ ++ u32 ret; \ ++ ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ ++ ret; }) ++ ++#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN ++ ++struct bpf_skb_data_end { ++ struct qdisc_skb_cb qdisc_cb; ++ void *data_meta; ++ void *data_end; ++}; + +-static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, +- struct sk_buff *skb) ++struct bpf_redirect_info { ++ u32 flags; ++ u32 tgt_index; ++ void *tgt_value; ++ struct bpf_map *map; ++ struct bpf_map *map_to_flush; ++ u32 kern_flags; ++}; ++ ++DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); ++ ++/* flags for bpf_redirect_info kern_flags */ ++#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ ++ ++/* Compute the linear packet data range [data, data_end) which ++ * will be accessed by various program types (cls_bpf, act_bpf, ++ * lwt, ...). Subsystems allowing direct data access must (!) ++ * ensure that cb[] area can be written to when BPF program is ++ * invoked (otherwise cb[] save/restore is necessary). ++ */ ++static inline void bpf_compute_data_pointers(struct sk_buff *skb) + { +- u8 *cb_data = qdisc_skb_cb(skb)->data; +- u8 saved_cb[QDISC_CB_PRIV_LEN]; +- u32 res; ++ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; ++ ++ BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); ++ cb->data_meta = skb->data; ++ cb->data_end = skb->data + skb_headlen(skb); ++} + ++/* Similar to bpf_compute_data_pointers(), except that save orginal ++ * data in cb->data and cb->meta_data for restore. ++ */ ++static inline void bpf_compute_and_save_data_end( ++ struct sk_buff *skb, void **saved_data_end) ++{ ++ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; ++ ++ *saved_data_end = cb->data_end; ++ cb->data_end = skb->data + skb_headlen(skb); ++} ++ ++/* Restore data saved by bpf_compute_data_pointers(). */ ++static inline void bpf_restore_data_end( ++ struct sk_buff *skb, void *saved_data_end) ++{ ++ struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; ++ ++ cb->data_end = saved_data_end; ++} ++ ++static inline u8 *bpf_skb_cb(struct sk_buff *skb) ++{ ++ /* eBPF programs may read/write skb->cb[] area to transfer meta ++ * data between tail calls. Since this also needs to work with ++ * tc, that scratch memory is mapped to qdisc_skb_cb's data area. ++ * ++ * In some socket filter cases, the cb unfortunately needs to be ++ * saved/restored so that protocol specific skb->cb[] data won't ++ * be lost. In any case, due to unpriviledged eBPF programs ++ * attached to sockets, we need to clear the bpf_skb_cb() area ++ * to not leak previous contents to user space. ++ */ ++ BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); + BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != +- QDISC_CB_PRIV_LEN); ++ FIELD_SIZEOF(struct qdisc_skb_cb, data)); ++ ++ return qdisc_skb_cb(skb)->data; ++} ++ ++static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, ++ struct sk_buff *skb) ++{ ++ u8 *cb_data = bpf_skb_cb(skb); ++ u8 cb_saved[BPF_SKB_CB_LEN]; ++ u32 res; + + if (unlikely(prog->cb_access)) { +- memcpy(saved_cb, cb_data, sizeof(saved_cb)); +- memset(cb_data, 0, sizeof(saved_cb)); ++ memcpy(cb_saved, cb_data, sizeof(cb_saved)); ++ memset(cb_data, 0, sizeof(cb_saved)); + } + + res = BPF_PROG_RUN(prog, skb); + + if (unlikely(prog->cb_access)) +- memcpy(cb_data, saved_cb, sizeof(saved_cb)); ++ memcpy(cb_data, cb_saved, sizeof(cb_saved)); ++ ++ return res; ++} ++ ++static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, ++ struct sk_buff *skb) ++{ ++ u32 res; + ++ preempt_disable(); ++ res = __bpf_prog_run_save_cb(prog, skb); ++ preempt_enable(); + return res; + } + + static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, + struct sk_buff *skb) + { +- u8 *cb_data = qdisc_skb_cb(skb)->data; ++ u8 *cb_data = bpf_skb_cb(skb); ++ u32 res; + + if (unlikely(prog->cb_access)) +- memset(cb_data, 0, QDISC_CB_PRIV_LEN); +- return BPF_PROG_RUN(prog, skb); ++ memset(cb_data, 0, BPF_SKB_CB_LEN); ++ ++ preempt_disable(); ++ res = BPF_PROG_RUN(prog, skb); ++ preempt_enable(); ++ return res; ++} ++ ++static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, ++ struct xdp_buff *xdp) ++{ ++ /* Caller needs to hold rcu_read_lock() (!), otherwise program ++ * can be released while still running, or map elements could be ++ * freed early while still having concurrent users. XDP fastpath ++ * already takes rcu_read_lock() when fetching the program, so ++ * it's not necessary here anymore. ++ */ ++ return BPF_PROG_RUN(prog, xdp); ++} ++ ++static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) ++{ ++ return prog->len * sizeof(struct bpf_insn); ++} ++ ++static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) ++{ ++ return round_up(bpf_prog_insn_size(prog) + ++ sizeof(__be64) + 1, SHA_MESSAGE_BYTES); + } + + static inline unsigned int bpf_prog_size(unsigned int proglen) +@@ -399,27 +718,77 @@ static inline bool bpf_prog_was_classic( + return prog->type == BPF_PROG_TYPE_UNSPEC; + } + ++static inline u32 bpf_ctx_off_adjust_machine(u32 size) ++{ ++ const u32 size_machine = sizeof(unsigned long); ++ ++ if (size > size_machine && size % size_machine == 0) ++ size = size_machine; ++ ++ return size; ++} ++ ++static inline bool ++bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) ++{ ++ return size <= size_default && (size & (size - 1)) == 0; ++} ++ ++static inline u8 ++bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) ++{ ++ u8 access_off = off & (size_default - 1); ++ ++#ifdef __LITTLE_ENDIAN ++ return access_off; ++#else ++ return size_default - (access_off + size); ++#endif ++} ++ ++#define bpf_ctx_wide_access_ok(off, size, type, field) \ ++ (size == sizeof(__u64) && \ ++ off >= offsetof(type, field) && \ ++ off + sizeof(__u64) <= offsetofend(type, field) && \ ++ off % sizeof(__u64) == 0) ++ + #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) + +-#ifdef CONFIG_DEBUG_SET_MODULE_RONX + static inline void bpf_prog_lock_ro(struct bpf_prog *fp) + { +- set_memory_ro((unsigned long)fp, fp->pages); ++#ifndef CONFIG_BPF_JIT_ALWAYS_ON ++ if (!fp->jited) { ++ fp->undo_set_mem = 1; ++ set_memory_ro((unsigned long)fp, fp->pages); ++ } ++#endif + } + + static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) + { +- set_memory_rw((unsigned long)fp, fp->pages); ++ if (fp->undo_set_mem) ++ set_memory_rw((unsigned long)fp, fp->pages); + } +-#else +-static inline void bpf_prog_lock_ro(struct bpf_prog *fp) ++ ++static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) + { ++ set_memory_ro((unsigned long)hdr, hdr->pages); ++ set_memory_x((unsigned long)hdr, hdr->pages); + } + +-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) ++static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr) ++{ ++ set_memory_rw((unsigned long)hdr, hdr->pages); ++} ++ ++static inline struct bpf_binary_header * ++bpf_jit_binary_hdr(const struct bpf_prog *fp) + { ++ unsigned long real_start = (unsigned long)fp->bpf_func; ++ unsigned long addr = real_start & PAGE_MASK; ++ ++ return (void *)addr; + } +-#endif /* CONFIG_DEBUG_SET_MODULE_RONX */ + + int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); + static inline int sk_filter(struct sock *sk, struct sk_buff *skb) +@@ -427,10 +796,20 @@ static inline int sk_filter(struct sock + return sk_filter_trim_cap(sk, skb, 1); + } + +-int bpf_prog_select_runtime(struct bpf_prog *fp); ++struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); + void bpf_prog_free(struct bpf_prog *fp); + ++bool bpf_opcode_in_insntable(u8 code); ++ ++void bpf_prog_free_linfo(struct bpf_prog *prog); ++void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, ++ const u32 *insn_to_jit_off); ++int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); ++void bpf_prog_free_jited_linfo(struct bpf_prog *prog); ++void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog); ++ + struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); ++struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); + struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, + gfp_t gfp_extra_flags); + void __bpf_prog_free(struct bpf_prog *fp); +@@ -450,12 +829,11 @@ int bpf_prog_create_from_user(struct bpf + void bpf_prog_destroy(struct bpf_prog *fp); + + int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); +-int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, +- bool locked); + int sk_attach_bpf(u32 ufd, struct sock *sk); ++int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); ++int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); ++void sk_reuseport_prog_free(struct bpf_prog *prog); + int sk_detach_filter(struct sock *sk); +-int __sk_detach_filter(struct sock *sk, bool locked); +- + int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, + unsigned int len); + +@@ -463,10 +841,100 @@ bool sk_filter_charge(struct sock *sk, s + void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); + + u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +-void bpf_int_jit_compile(struct bpf_prog *fp); +-bool bpf_helper_changes_skb_data(void *func); ++#define __bpf_call_base_args \ ++ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ ++ (void *)__bpf_call_base) ++ ++struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); ++void bpf_jit_compile(struct bpf_prog *prog); ++bool bpf_jit_needs_zext(void); ++bool bpf_helper_changes_pkt_data(void *func); ++ ++static inline bool bpf_dump_raw_ok(const struct cred *cred) ++{ ++ /* Reconstruction of call-sites is dependent on kallsyms, ++ * thus make dump the same restriction. ++ */ ++ return true; ++} ++ ++struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, ++ const struct bpf_insn *patch, u32 len); ++int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); ++ ++void bpf_clear_redirect_map(struct bpf_map *map); ++ ++static inline bool xdp_return_frame_no_direct(void) ++{ ++ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); ++ ++ return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; ++} ++ ++static inline void xdp_set_return_frame_no_direct(void) ++{ ++ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); ++ ++ ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; ++} ++ ++static inline void xdp_clear_return_frame_no_direct(void) ++{ ++ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); ++ ++ ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; ++} ++ ++static inline int xdp_ok_fwd_dev(const struct net_device *fwd, ++ unsigned int pktlen) ++{ ++ unsigned int len; ++ ++ if (unlikely(!(fwd->flags & IFF_UP))) ++ return -ENETDOWN; ++ ++ len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; ++ if (pktlen > len) ++ return -EMSGSIZE; ++ ++ return 0; ++} ++ ++/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the ++ * same cpu context. Further for best results no more than a single map ++ * for the do_redirect/do_flush pair should be used. This limitation is ++ * because we only track one map and force a flush when the map changes. ++ * This does not appear to be a real limitation for existing software. ++ */ ++int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, ++ struct xdp_buff *xdp, struct bpf_prog *prog); ++int xdp_do_redirect(struct net_device *dev, ++ struct xdp_buff *xdp, ++ struct bpf_prog *prog); ++void xdp_do_flush_map(void); ++ ++void bpf_warn_invalid_xdp_action(u32 act); ++ ++#ifdef CONFIG_INET ++struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, ++ struct bpf_prog *prog, struct sk_buff *skb, ++ u32 hash); ++#else ++static inline struct sock * ++bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, ++ struct bpf_prog *prog, struct sk_buff *skb, ++ u32 hash) ++{ ++ return NULL; ++} ++#endif + + #ifdef CONFIG_BPF_JIT ++extern int bpf_jit_enable; ++extern int bpf_jit_harden; ++extern int bpf_jit_kallsyms; ++extern long bpf_jit_limit; ++ + typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); + + struct bpf_binary_header * +@@ -474,10 +942,18 @@ bpf_jit_binary_alloc(unsigned int progle + unsigned int alignment, + bpf_jit_fill_hole_t bpf_fill_ill_insns); + void bpf_jit_binary_free(struct bpf_binary_header *hdr); +- +-void bpf_jit_compile(struct bpf_prog *fp); ++u64 bpf_jit_alloc_exec_limit(void); ++void *bpf_jit_alloc_exec(unsigned long size); ++void bpf_jit_free_exec(void *addr); + void bpf_jit_free(struct bpf_prog *fp); + ++int bpf_jit_get_func_addr(const struct bpf_prog *prog, ++ const struct bpf_insn *insn, bool extra_pass, ++ u64 *func_addr, bool *func_addr_fixed); ++ ++struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); ++void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); ++ + static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, + u32 pass, void *image) + { +@@ -488,17 +964,144 @@ static inline void bpf_jit_dump(unsigned + print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, + 16, 1, image, proglen, false); + } +-#else +-static inline void bpf_jit_compile(struct bpf_prog *fp) ++ ++static inline bool bpf_jit_is_ebpf(void) ++{ ++# ifdef CONFIG_HAVE_EBPF_JIT ++ return true; ++# else ++ return false; ++# endif ++} ++ ++static inline bool ebpf_jit_enabled(void) ++{ ++ return bpf_jit_enable && bpf_jit_is_ebpf(); ++} ++ ++static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) ++{ ++ return fp->jited && bpf_jit_is_ebpf(); ++} ++ ++static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) ++{ ++ /* These are the prerequisites, should someone ever have the ++ * idea to call blinding outside of them, we make sure to ++ * bail out. ++ */ ++ if (!bpf_jit_is_ebpf()) ++ return false; ++ if (!prog->jit_requested) ++ return false; ++ if (!bpf_jit_harden) ++ return false; ++ if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) ++ return false; ++ ++ return true; ++} ++ ++static inline bool bpf_jit_kallsyms_enabled(void) + { ++ /* There are a couple of corner cases where kallsyms should ++ * not be enabled f.e. on hardening. ++ */ ++ if (bpf_jit_harden) ++ return false; ++ if (!bpf_jit_kallsyms) ++ return false; ++ if (bpf_jit_kallsyms == 1) ++ return true; ++ ++ return false; ++} ++ ++const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, ++ unsigned long *off, char *sym); ++bool is_bpf_text_address(unsigned long addr); ++int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, ++ char *sym); ++ ++static inline const char * ++bpf_address_lookup(unsigned long addr, unsigned long *size, ++ unsigned long *off, char **modname, char *sym) ++{ ++ const char *ret = __bpf_address_lookup(addr, size, off, sym); ++ ++ if (ret && modname) ++ *modname = NULL; ++ return ret; ++} ++ ++void bpf_prog_kallsyms_add(struct bpf_prog *fp); ++void bpf_prog_kallsyms_del(struct bpf_prog *fp); ++void bpf_get_prog_name(const struct bpf_prog *prog, char *sym); ++ ++#else /* CONFIG_BPF_JIT */ ++ ++static inline bool ebpf_jit_enabled(void) ++{ ++ return false; ++} ++ ++static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) ++{ ++ return false; + } + + static inline void bpf_jit_free(struct bpf_prog *fp) + { + bpf_prog_unlock_free(fp); + } ++ ++static inline bool bpf_jit_kallsyms_enabled(void) ++{ ++ return false; ++} ++ ++static inline const char * ++__bpf_address_lookup(unsigned long addr, unsigned long *size, ++ unsigned long *off, char *sym) ++{ ++ return NULL; ++} ++ ++static inline bool is_bpf_text_address(unsigned long addr) ++{ ++ return false; ++} ++ ++static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, ++ char *type, char *sym) ++{ ++ return -ERANGE; ++} ++ ++static inline const char * ++bpf_address_lookup(unsigned long addr, unsigned long *size, ++ unsigned long *off, char **modname, char *sym) ++{ ++ return NULL; ++} ++ ++static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) ++{ ++} ++ ++static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) ++{ ++} ++ ++static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) ++{ ++ sym[0] = '\0'; ++} ++ + #endif /* CONFIG_BPF_JIT */ + ++void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); ++ + #define BPF_ANC BIT(15) + + static inline bool bpf_needs_clear_a(const struct sock_filter *first) +@@ -571,4 +1174,59 @@ static inline int bpf_tell_extensions(vo + return SKF_AD_MAX; + } + ++struct bpf_sock_addr_kern { ++ struct sock *sk; ++ struct sockaddr *uaddr; ++ /* Temporary "register" to make indirect stores to nested structures ++ * defined above. We need three registers to make such a store, but ++ * only two (src and dst) are available at convert_ctx_access time ++ */ ++ u64 tmp_reg; ++ void *t_ctx; /* Attach type specific context. */ ++}; ++ ++struct bpf_sock_ops_kern { ++ struct sock *sk; ++ u32 op; ++ union { ++ u32 args[4]; ++ u32 reply; ++ u32 replylong[4]; ++ }; ++ u32 is_fullsock; ++ u64 temp; /* temp and everything after is not ++ * initialized to 0 before calling ++ * the BPF program. New fields that ++ * should be initialized to 0 should ++ * be inserted before temp. ++ * temp is scratch storage used by ++ * sock_ops_convert_ctx_access ++ * as temporary storage of a register. ++ */ ++}; ++ ++struct bpf_sysctl_kern { ++ struct ctl_table_header *head; ++ struct ctl_table *table; ++ void *cur_val; ++ size_t cur_len; ++ void *new_val; ++ size_t new_len; ++ int new_updated; ++ int write; ++ loff_t *ppos; ++ /* Temporary "register" for indirect stores to ppos. */ ++ u64 tmp_reg; ++}; ++ ++struct bpf_sockopt_kern { ++ struct sock *sk; ++ u8 *optval; ++ u8 *optval_end; ++ s32 level; ++ s32 optname; ++ s32 optlen; ++ s32 retval; ++}; ++ + #endif /* __LINUX_FILTER_H__ */ +--- /dev/null ++++ b/include/linux/set_memory.h +@@ -0,0 +1,47 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* ++ * Copyright 2017, Michael Ellerman, IBM Corporation. ++ */ ++#ifndef _LINUX_SET_MEMORY_H_ ++#define _LINUX_SET_MEMORY_H_ ++ ++#include ++ ++#ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP ++static inline int set_direct_map_invalid_noflush(struct page *page) ++{ ++ return 0; ++} ++static inline int set_direct_map_default_noflush(struct page *page) ++{ ++ return 0; ++} ++#endif ++ ++#ifndef set_mce_nospec ++static inline int set_mce_nospec(unsigned long pfn, bool unmap) ++{ ++ return 0; ++} ++#endif ++ ++#ifndef clear_mce_nospec ++static inline int clear_mce_nospec(unsigned long pfn) ++{ ++ return 0; ++} ++#endif ++ ++#ifndef CONFIG_ARCH_HAS_MEM_ENCRYPT ++static inline int set_memory_encrypted(unsigned long addr, int numpages) ++{ ++ return 0; ++} ++ ++static inline int set_memory_decrypted(unsigned long addr, int numpages) ++{ ++ return 0; ++} ++#endif /* CONFIG_ARCH_HAS_MEM_ENCRYPT */ ++ ++#endif /* _LINUX_SET_MEMORY_H_ */ +--- /dev/null ++++ b/include/trace/events/xdp.h +@@ -0,0 +1,407 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM xdp ++ ++#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_XDP_H ++ ++#include ++#include ++#include ++#include ++ ++#define __XDP_ACT_MAP(FN) \ ++ FN(ABORTED) \ ++ FN(DROP) \ ++ FN(PASS) \ ++ FN(TX) \ ++ FN(REDIRECT) ++ ++#define __XDP_ACT_TP_FN(x) \ ++ TRACE_DEFINE_ENUM(XDP_##x); ++#define __XDP_ACT_SYM_FN(x) \ ++ { XDP_##x, #x }, ++#define __XDP_ACT_SYM_TAB \ ++ __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 } ++__XDP_ACT_MAP(__XDP_ACT_TP_FN) ++ ++TRACE_EVENT(xdp_exception, ++ ++ TP_PROTO(const struct net_device *dev, ++ const struct bpf_prog *xdp, u32 act), ++ ++ TP_ARGS(dev, xdp, act), ++ ++ TP_STRUCT__entry( ++ __field(int, prog_id) ++ __field(u32, act) ++ __field(int, ifindex) ++ ), ++ ++ TP_fast_assign( ++ __entry->prog_id = xdp->aux->id; ++ __entry->act = act; ++ __entry->ifindex = dev->ifindex; ++ ), ++ ++ TP_printk("prog_id=%d action=%s ifindex=%d", ++ __entry->prog_id, ++ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), ++ __entry->ifindex) ++); ++ ++TRACE_EVENT(xdp_bulk_tx, ++ ++ TP_PROTO(const struct net_device *dev, ++ int sent, int drops, int err), ++ ++ TP_ARGS(dev, sent, drops, err), ++ ++ TP_STRUCT__entry( ++ __field(int, ifindex) ++ __field(u32, act) ++ __field(int, drops) ++ __field(int, sent) ++ __field(int, err) ++ ), ++ ++ TP_fast_assign( ++ __entry->ifindex = dev->ifindex; ++ __entry->act = XDP_TX; ++ __entry->drops = drops; ++ __entry->sent = sent; ++ __entry->err = err; ++ ), ++ ++ TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d", ++ __entry->ifindex, ++ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), ++ __entry->sent, __entry->drops, __entry->err) ++); ++ ++DECLARE_EVENT_CLASS(xdp_redirect_template, ++ ++ TP_PROTO(const struct net_device *dev, ++ const struct bpf_prog *xdp, ++ int to_ifindex, int err, ++ const struct bpf_map *map, u32 map_index), ++ ++ TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), ++ ++ TP_STRUCT__entry( ++ __field(int, prog_id) ++ __field(u32, act) ++ __field(int, ifindex) ++ __field(int, err) ++ __field(int, to_ifindex) ++ __field(u32, map_id) ++ __field(int, map_index) ++ ), ++ ++ TP_fast_assign( ++ __entry->prog_id = xdp->aux->id; ++ __entry->act = XDP_REDIRECT; ++ __entry->ifindex = dev->ifindex; ++ __entry->err = err; ++ __entry->to_ifindex = to_ifindex; ++ __entry->map_id = map ? map->id : 0; ++ __entry->map_index = map_index; ++ ), ++ ++ TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d", ++ __entry->prog_id, ++ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), ++ __entry->ifindex, __entry->to_ifindex, ++ __entry->err) ++); ++ ++DEFINE_EVENT(xdp_redirect_template, xdp_redirect, ++ TP_PROTO(const struct net_device *dev, ++ const struct bpf_prog *xdp, ++ int to_ifindex, int err, ++ const struct bpf_map *map, u32 map_index), ++ TP_ARGS(dev, xdp, to_ifindex, err, map, map_index) ++); ++ ++DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err, ++ TP_PROTO(const struct net_device *dev, ++ const struct bpf_prog *xdp, ++ int to_ifindex, int err, ++ const struct bpf_map *map, u32 map_index), ++ TP_ARGS(dev, xdp, to_ifindex, err, map, map_index) ++); ++ ++#define _trace_xdp_redirect(dev, xdp, to) \ ++ trace_xdp_redirect(dev, xdp, to, 0, NULL, 0); ++ ++#define _trace_xdp_redirect_err(dev, xdp, to, err) \ ++ trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0); ++ ++DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map, ++ TP_PROTO(const struct net_device *dev, ++ const struct bpf_prog *xdp, ++ int to_ifindex, int err, ++ const struct bpf_map *map, u32 map_index), ++ TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), ++ TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d" ++ " map_id=%d map_index=%d", ++ __entry->prog_id, ++ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), ++ __entry->ifindex, __entry->to_ifindex, ++ __entry->err, ++ __entry->map_id, __entry->map_index) ++); ++ ++DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err, ++ TP_PROTO(const struct net_device *dev, ++ const struct bpf_prog *xdp, ++ int to_ifindex, int err, ++ const struct bpf_map *map, u32 map_index), ++ TP_ARGS(dev, xdp, to_ifindex, err, map, map_index), ++ TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d" ++ " map_id=%d map_index=%d", ++ __entry->prog_id, ++ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), ++ __entry->ifindex, __entry->to_ifindex, ++ __entry->err, ++ __entry->map_id, __entry->map_index) ++); ++ ++#ifndef __DEVMAP_OBJ_TYPE ++#define __DEVMAP_OBJ_TYPE ++struct _bpf_dtab_netdev { ++ struct net_device *dev; ++}; ++#endif /* __DEVMAP_OBJ_TYPE */ ++ ++#define devmap_ifindex(fwd, map) \ ++ ((map->map_type == BPF_MAP_TYPE_DEVMAP || \ ++ map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ? \ ++ ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0) ++ ++#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \ ++ trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \ ++ 0, map, idx) ++ ++#define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \ ++ trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \ ++ err, map, idx) ++ ++TRACE_EVENT(xdp_cpumap_kthread, ++ ++ TP_PROTO(int map_id, unsigned int processed, unsigned int drops, ++ int sched), ++ ++ TP_ARGS(map_id, processed, drops, sched), ++ ++ TP_STRUCT__entry( ++ __field(int, map_id) ++ __field(u32, act) ++ __field(int, cpu) ++ __field(unsigned int, drops) ++ __field(unsigned int, processed) ++ __field(int, sched) ++ ), ++ ++ TP_fast_assign( ++ __entry->map_id = map_id; ++ __entry->act = XDP_REDIRECT; ++ __entry->cpu = smp_processor_id(); ++ __entry->drops = drops; ++ __entry->processed = processed; ++ __entry->sched = sched; ++ ), ++ ++ TP_printk("kthread" ++ " cpu=%d map_id=%d action=%s" ++ " processed=%u drops=%u" ++ " sched=%d", ++ __entry->cpu, __entry->map_id, ++ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), ++ __entry->processed, __entry->drops, ++ __entry->sched) ++); ++ ++TRACE_EVENT(xdp_cpumap_enqueue, ++ ++ TP_PROTO(int map_id, unsigned int processed, unsigned int drops, ++ int to_cpu), ++ ++ TP_ARGS(map_id, processed, drops, to_cpu), ++ ++ TP_STRUCT__entry( ++ __field(int, map_id) ++ __field(u32, act) ++ __field(int, cpu) ++ __field(unsigned int, drops) ++ __field(unsigned int, processed) ++ __field(int, to_cpu) ++ ), ++ ++ TP_fast_assign( ++ __entry->map_id = map_id; ++ __entry->act = XDP_REDIRECT; ++ __entry->cpu = smp_processor_id(); ++ __entry->drops = drops; ++ __entry->processed = processed; ++ __entry->to_cpu = to_cpu; ++ ), ++ ++ TP_printk("enqueue" ++ " cpu=%d map_id=%d action=%s" ++ " processed=%u drops=%u" ++ " to_cpu=%d", ++ __entry->cpu, __entry->map_id, ++ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), ++ __entry->processed, __entry->drops, ++ __entry->to_cpu) ++); ++ ++TRACE_EVENT(xdp_devmap_xmit, ++ ++ TP_PROTO(const struct bpf_map *map, u32 map_index, ++ int sent, int drops, ++ const struct net_device *from_dev, ++ const struct net_device *to_dev, int err), ++ ++ TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err), ++ ++ TP_STRUCT__entry( ++ __field(int, map_id) ++ __field(u32, act) ++ __field(u32, map_index) ++ __field(int, drops) ++ __field(int, sent) ++ __field(int, from_ifindex) ++ __field(int, to_ifindex) ++ __field(int, err) ++ ), ++ ++ TP_fast_assign( ++ __entry->map_id = map->id; ++ __entry->act = XDP_REDIRECT; ++ __entry->map_index = map_index; ++ __entry->drops = drops; ++ __entry->sent = sent; ++ __entry->from_ifindex = from_dev->ifindex; ++ __entry->to_ifindex = to_dev->ifindex; ++ __entry->err = err; ++ ), ++ ++ TP_printk("ndo_xdp_xmit" ++ " map_id=%d map_index=%d action=%s" ++ " sent=%d drops=%d" ++ " from_ifindex=%d to_ifindex=%d err=%d", ++ __entry->map_id, __entry->map_index, ++ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB), ++ __entry->sent, __entry->drops, ++ __entry->from_ifindex, __entry->to_ifindex, __entry->err) ++); ++ ++/* Expect users already include , but not xdp_priv.h */ ++#include ++ ++#define __MEM_TYPE_MAP(FN) \ ++ FN(PAGE_SHARED) \ ++ FN(PAGE_ORDER0) \ ++ FN(PAGE_POOL) \ ++ FN(ZERO_COPY) ++ ++#define __MEM_TYPE_TP_FN(x) \ ++ TRACE_DEFINE_ENUM(MEM_TYPE_##x); ++#define __MEM_TYPE_SYM_FN(x) \ ++ { MEM_TYPE_##x, #x }, ++#define __MEM_TYPE_SYM_TAB \ ++ __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 } ++__MEM_TYPE_MAP(__MEM_TYPE_TP_FN) ++ ++TRACE_EVENT(mem_disconnect, ++ ++ TP_PROTO(const struct xdp_mem_allocator *xa), ++ ++ TP_ARGS(xa), ++ ++ TP_STRUCT__entry( ++ __field(const struct xdp_mem_allocator *, xa) ++ __field(u32, mem_id) ++ __field(u32, mem_type) ++ __field(const void *, allocator) ++ ), ++ ++ TP_fast_assign( ++ __entry->xa = xa; ++ __entry->mem_id = xa->mem.id; ++ __entry->mem_type = xa->mem.type; ++ __entry->allocator = xa->allocator; ++ ), ++ ++ TP_printk("mem_id=%d mem_type=%s allocator=%p", ++ __entry->mem_id, ++ __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), ++ __entry->allocator ++ ) ++); ++ ++TRACE_EVENT(mem_connect, ++ ++ TP_PROTO(const struct xdp_mem_allocator *xa, ++ const struct xdp_rxq_info *rxq), ++ ++ TP_ARGS(xa, rxq), ++ ++ TP_STRUCT__entry( ++ __field(const struct xdp_mem_allocator *, xa) ++ __field(u32, mem_id) ++ __field(u32, mem_type) ++ __field(const void *, allocator) ++ __field(const struct xdp_rxq_info *, rxq) ++ __field(int, ifindex) ++ ), ++ ++ TP_fast_assign( ++ __entry->xa = xa; ++ __entry->mem_id = xa->mem.id; ++ __entry->mem_type = xa->mem.type; ++ __entry->allocator = xa->allocator; ++ __entry->rxq = rxq; ++ __entry->ifindex = rxq->dev->ifindex; ++ ), ++ ++ TP_printk("mem_id=%d mem_type=%s allocator=%p" ++ " ifindex=%d", ++ __entry->mem_id, ++ __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), ++ __entry->allocator, ++ __entry->ifindex ++ ) ++); ++ ++TRACE_EVENT(mem_return_failed, ++ ++ TP_PROTO(const struct xdp_mem_info *mem, ++ const struct page *page), ++ ++ TP_ARGS(mem, page), ++ ++ TP_STRUCT__entry( ++ __field(const struct page *, page) ++ __field(u32, mem_id) ++ __field(u32, mem_type) ++ ), ++ ++ TP_fast_assign( ++ __entry->page = page; ++ __entry->mem_id = mem->id; ++ __entry->mem_type = mem->type; ++ ), ++ ++ TP_printk("mem_id=%d mem_type=%s page=%p", ++ __entry->mem_id, ++ __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB), ++ __entry->page ++ ) ++); ++ ++#endif /* _TRACE_XDP_H */ ++ ++#include +--- /dev/null ++++ b/include/net/xdp_priv.h +@@ -0,0 +1,20 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __LINUX_NET_XDP_PRIV_H__ ++#define __LINUX_NET_XDP_PRIV_H__ ++ ++#include ++#include ++ ++/* Private to net/core/xdp.c, but used by trace/events/xdp.h */ ++struct xdp_mem_allocator { ++ struct xdp_mem_info mem; ++ union { ++ void *allocator; ++ struct page_pool *page_pool; ++ struct zero_copy_allocator *zc_alloc; ++ }; ++ struct rhash_head node; ++ struct rcu_head rcu; ++}; ++ ++#endif /* __LINUX_NET_XDP_PRIV_H__ */ +--- /dev/null ++++ b/include/net/xdp.h +@@ -0,0 +1,184 @@ ++/* SPDX-License-Identifier: GPL-2.0-only */ ++/* include/net/xdp.h ++ * ++ * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc. ++ */ ++#ifndef __LINUX_NET_XDP_H__ ++#define __LINUX_NET_XDP_H__ ++ ++/** ++ * DOC: XDP RX-queue information ++ * ++ * The XDP RX-queue info (xdp_rxq_info) is associated with the driver ++ * level RX-ring queues. It is information that is specific to how ++ * the driver have configured a given RX-ring queue. ++ * ++ * Each xdp_buff frame received in the driver carry a (pointer) ++ * reference to this xdp_rxq_info structure. This provides the XDP ++ * data-path read-access to RX-info for both kernel and bpf-side ++ * (limited subset). ++ * ++ * For now, direct access is only safe while running in NAPI/softirq ++ * context. Contents is read-mostly and must not be updated during ++ * driver NAPI/softirq poll. ++ * ++ * The driver usage API is a register and unregister API. ++ * ++ * The struct is not directly tied to the XDP prog. A new XDP prog ++ * can be attached as long as it doesn't change the underlying ++ * RX-ring. If the RX-ring does change significantly, the NIC driver ++ * naturally need to stop the RX-ring before purging and reallocating ++ * memory. In that process the driver MUST call unregistor (which ++ * also apply for driver shutdown and unload). The register API is ++ * also mandatory during RX-ring setup. ++ */ ++ ++enum xdp_mem_type { ++ MEM_TYPE_PAGE_SHARED = 0, /* Split-page refcnt based model */ ++ MEM_TYPE_PAGE_ORDER0, /* Orig XDP full page model */ ++ MEM_TYPE_PAGE_POOL, ++ MEM_TYPE_ZERO_COPY, ++ MEM_TYPE_MAX, ++}; ++ ++/* XDP flags for ndo_xdp_xmit */ ++#define XDP_XMIT_FLUSH (1U << 0) /* doorbell signal consumer */ ++#define XDP_XMIT_FLAGS_MASK XDP_XMIT_FLUSH ++ ++struct xdp_mem_info { ++ u32 type; /* enum xdp_mem_type, but known size type */ ++ u32 id; ++}; ++ ++struct page_pool; ++ ++struct zero_copy_allocator { ++ void (*free)(struct zero_copy_allocator *zca, unsigned long handle); ++}; ++ ++struct xdp_rxq_info { ++ struct net_device *dev; ++ u32 queue_index; ++ u32 reg_state; ++ struct xdp_mem_info mem; ++} ____cacheline_aligned; /* perf critical, avoid false-sharing */ ++ ++struct xdp_buff { ++ void *data; ++ void *data_end; ++ void *data_meta; ++ void *data_hard_start; ++ unsigned long handle; ++ struct xdp_rxq_info *rxq; ++}; ++ ++struct xdp_frame { ++ void *data; ++ u16 len; ++ u16 headroom; ++ u16 metasize; ++ /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time, ++ * while mem info is valid on remote CPU. ++ */ ++ struct xdp_mem_info mem; ++ struct net_device *dev_rx; /* used by cpumap */ ++}; ++ ++/* Clear kernel pointers in xdp_frame */ ++static inline void xdp_scrub_frame(struct xdp_frame *frame) ++{ ++ frame->data = NULL; ++ frame->dev_rx = NULL; ++} ++ ++struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp); ++ ++/* Convert xdp_buff to xdp_frame */ ++static inline ++struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp) ++{ ++ struct xdp_frame *xdp_frame; ++ int metasize; ++ int headroom; ++ ++ if (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ++ return xdp_convert_zc_to_xdp_frame(xdp); ++ ++ /* Assure headroom is available for storing info */ ++ headroom = xdp->data - xdp->data_hard_start; ++ metasize = xdp->data - xdp->data_meta; ++ metasize = metasize > 0 ? metasize : 0; ++ if (unlikely((headroom - metasize) < sizeof(*xdp_frame))) ++ return NULL; ++ ++ /* Store info in top of packet */ ++ xdp_frame = xdp->data_hard_start; ++ ++ xdp_frame->data = xdp->data; ++ xdp_frame->len = xdp->data_end - xdp->data; ++ xdp_frame->headroom = headroom - sizeof(*xdp_frame); ++ xdp_frame->metasize = metasize; ++ ++ /* rxq only valid until napi_schedule ends, convert to xdp_mem_info */ ++ xdp_frame->mem = xdp->rxq->mem; ++ ++ return xdp_frame; ++} ++ ++void xdp_return_frame(struct xdp_frame *xdpf); ++void xdp_return_frame_rx_napi(struct xdp_frame *xdpf); ++void xdp_return_buff(struct xdp_buff *xdp); ++ ++/* When sending xdp_frame into the network stack, then there is no ++ * return point callback, which is needed to release e.g. DMA-mapping ++ * resources with page_pool. Thus, have explicit function to release ++ * frame resources. ++ */ ++void __xdp_release_frame(void *data, struct xdp_mem_info *mem); ++static inline void xdp_release_frame(struct xdp_frame *xdpf) ++{ ++ struct xdp_mem_info *mem = &xdpf->mem; ++ ++ /* Curr only page_pool needs this */ ++ if (mem->type == MEM_TYPE_PAGE_POOL) ++ __xdp_release_frame(xdpf->data, mem); ++} ++ ++int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq, ++ struct net_device *dev, u32 queue_index); ++void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq); ++void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq); ++bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq); ++int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, ++ enum xdp_mem_type type, void *allocator); ++void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq); ++ ++/* Drivers not supporting XDP metadata can use this helper, which ++ * rejects any room expansion for metadata as a result. ++ */ ++static __always_inline void ++xdp_set_data_meta_invalid(struct xdp_buff *xdp) ++{ ++ xdp->data_meta = xdp->data + 1; ++} ++ ++static __always_inline bool ++xdp_data_meta_unsupported(const struct xdp_buff *xdp) ++{ ++ return unlikely(xdp->data_meta > xdp->data); ++} ++ ++struct xdp_attachment_info { ++ struct bpf_prog *prog; ++ u32 flags; ++}; ++ ++struct netdev_bpf; ++int xdp_attachment_query(struct xdp_attachment_info *info, ++ struct netdev_bpf *bpf); ++bool xdp_attachment_flags_ok(struct xdp_attachment_info *info, ++ struct netdev_bpf *bpf); ++void xdp_attachment_setup(struct xdp_attachment_info *info, ++ struct netdev_bpf *bpf); ++ ++#endif /* __LINUX_NET_XDP_H__ */ +--- a/include/linux/atomic.h ++++ b/include/linux/atomic.h +@@ -437,6 +437,8 @@ static inline int atomic_add_unless(atom + return __atomic_add_unless(v, a, u) != u; + } + ++#define atomic_fetch_add_unless __atomic_add_unless ++ + /** + * atomic_inc_not_zero - increment unless the number is zero + * @v: pointer of type atomic_t +--- a/include/linux/kernel.h ++++ b/include/linux/kernel.h +@@ -45,6 +45,13 @@ + + #define STACK_MAGIC 0xdeadbeef + ++#define u64_to_user_ptr(x) ( \ ++ { \ ++ typecheck(u64, (x)); \ ++ (void __user *)(uintptr_t)(x); \ ++ } \ ++) ++ + #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) + + #define ALIGN(x, a) __ALIGN_KERNEL((x), (a)) +--- /dev/null ++++ b/include/linux/tnum.h +@@ -0,0 +1,89 @@ ++/* tnum: tracked (or tristate) numbers ++ * ++ * A tnum tracks knowledge about the bits of a value. Each bit can be either ++ * known (0 or 1), or unknown (x). Arithmetic operations on tnums will ++ * propagate the unknown bits such that the tnum result represents all the ++ * possible results for possible values of the operands. ++ */ ++ ++#ifndef _LINUX_TNUM_H ++#define _LINUX_TNUM_H ++ ++#include ++ ++struct tnum { ++ u64 value; ++ u64 mask; ++}; ++ ++/* Constructors */ ++/* Represent a known constant as a tnum. */ ++struct tnum tnum_const(u64 value); ++/* A completely unknown value */ ++extern const struct tnum tnum_unknown; ++/* A value that's unknown except that @min <= value <= @max */ ++struct tnum tnum_range(u64 min, u64 max); ++ ++/* Arithmetic and logical ops */ ++/* Shift a tnum left (by a fixed shift) */ ++struct tnum tnum_lshift(struct tnum a, u8 shift); ++/* Shift (rsh) a tnum right (by a fixed shift) */ ++struct tnum tnum_rshift(struct tnum a, u8 shift); ++/* Shift (arsh) a tnum right (by a fixed min_shift) */ ++struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness); ++/* Add two tnums, return @a + @b */ ++struct tnum tnum_add(struct tnum a, struct tnum b); ++/* Subtract two tnums, return @a - @b */ ++struct tnum tnum_sub(struct tnum a, struct tnum b); ++/* Bitwise-AND, return @a & @b */ ++struct tnum tnum_and(struct tnum a, struct tnum b); ++/* Bitwise-OR, return @a | @b */ ++struct tnum tnum_or(struct tnum a, struct tnum b); ++/* Bitwise-XOR, return @a ^ @b */ ++struct tnum tnum_xor(struct tnum a, struct tnum b); ++/* Multiply two tnums, return @a * @b */ ++struct tnum tnum_mul(struct tnum a, struct tnum b); ++ ++/* Return a tnum representing numbers satisfying both @a and @b */ ++struct tnum tnum_intersect(struct tnum a, struct tnum b); ++ ++/* Return @a with all but the lowest @size bytes cleared */ ++struct tnum tnum_cast(struct tnum a, u8 size); ++ ++/* Returns true if @a is a known constant */ ++static inline bool tnum_is_const(struct tnum a) ++{ ++ return !a.mask; ++} ++ ++/* Returns true if @a == tnum_const(@b) */ ++static inline bool tnum_equals_const(struct tnum a, u64 b) ++{ ++ return tnum_is_const(a) && a.value == b; ++} ++ ++/* Returns true if @a is completely unknown */ ++static inline bool tnum_is_unknown(struct tnum a) ++{ ++ return !~a.mask; ++} ++ ++/* Returns true if @a is known to be a multiple of @size. ++ * @size must be a power of two. ++ */ ++bool tnum_is_aligned(struct tnum a, u64 size); ++ ++/* Returns true if @b represents a subset of @a. */ ++bool tnum_in(struct tnum a, struct tnum b); ++ ++/* Formatting functions. These have snprintf-like semantics: they will write ++ * up to @size bytes (including the terminating NUL byte), and return the number ++ * of bytes (excluding the terminating NUL) which would have been written had ++ * sufficient space been available. (Thus tnum_sbin always returns 64.) ++ */ ++/* Format a tnum as a pair of hex numbers (value; mask) */ ++int tnum_strn(char *str, size_t size, struct tnum a); ++/* Format a tnum as tristate binary expansion */ ++int tnum_sbin(char *str, size_t size, struct tnum a); ++ ++#endif /* _LINUX_TNUM_H */ +--- a/include/linux/bitmap.h ++++ b/include/linux/bitmap.h +@@ -326,6 +326,24 @@ static inline int bitmap_parse(const cha + return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); + } + ++/** ++ * bitmap_from_u64 - Check and swap words within u64. ++ * @mask: source bitmap ++ * @dst: destination bitmap ++ * ++ * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]`` ++ * to read u64 mask, we will get the wrong word. ++ * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits, ++ * but we expect the lower 32-bits of u64. ++ */ ++static inline void bitmap_from_u64(unsigned long *dst, u64 mask) ++{ ++ dst[0] = mask & ULONG_MAX; ++ ++ if (sizeof(mask) > sizeof(unsigned long)) ++ dst[1] = mask >> 32; ++} ++ + #endif /* __ASSEMBLY__ */ + + #endif /* __LINUX_BITMAP_H */ +--- /dev/null ++++ b/include/linux/overflow.h +@@ -0,0 +1,320 @@ ++/* SPDX-License-Identifier: GPL-2.0 OR MIT */ ++#ifndef __LINUX_OVERFLOW_H ++#define __LINUX_OVERFLOW_H ++ ++#include ++#include ++ ++/* ++ * In the fallback code below, we need to compute the minimum and ++ * maximum values representable in a given type. These macros may also ++ * be useful elsewhere, so we provide them outside the ++ * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block. ++ * ++ * It would seem more obvious to do something like ++ * ++ * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0) ++ * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0) ++ * ++ * Unfortunately, the middle expressions, strictly speaking, have ++ * undefined behaviour, and at least some versions of gcc warn about ++ * the type_max expression (but not if -fsanitize=undefined is in ++ * effect; in that case, the warning is deferred to runtime...). ++ * ++ * The slightly excessive casting in type_min is to make sure the ++ * macros also produce sensible values for the exotic type _Bool. [The ++ * overflow checkers only almost work for _Bool, but that's ++ * a-feature-not-a-bug, since people shouldn't be doing arithmetic on ++ * _Bools. Besides, the gcc builtins don't allow _Bool* as third ++ * argument.] ++ * ++ * Idea stolen from ++ * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html - ++ * credit to Christian Biere. ++ */ ++#define is_signed_type(type) (((type)(-1)) < (type)1) ++#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type))) ++#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T))) ++#define type_min(T) ((T)((T)-type_max(T)-(T)1)) ++ ++/* ++ * Avoids triggering -Wtype-limits compilation warning, ++ * while using unsigned data types to check a < 0. ++ */ ++#define is_non_negative(a) ((a) > 0 || (a) == 0) ++#define is_negative(a) (!(is_non_negative(a))) ++ ++#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW ++/* ++ * For simplicity and code hygiene, the fallback code below insists on ++ * a, b and *d having the same type (similar to the min() and max() ++ * macros), whereas gcc's type-generic overflow checkers accept ++ * different types. Hence we don't just make check_add_overflow an ++ * alias for __builtin_add_overflow, but add type checks similar to ++ * below. ++ */ ++#define check_add_overflow(a, b, d) ({ \ ++ typeof(a) __a = (a); \ ++ typeof(b) __b = (b); \ ++ typeof(d) __d = (d); \ ++ (void) (&__a == &__b); \ ++ (void) (&__a == __d); \ ++ __builtin_add_overflow(__a, __b, __d); \ ++}) ++ ++#define check_sub_overflow(a, b, d) ({ \ ++ typeof(a) __a = (a); \ ++ typeof(b) __b = (b); \ ++ typeof(d) __d = (d); \ ++ (void) (&__a == &__b); \ ++ (void) (&__a == __d); \ ++ __builtin_sub_overflow(__a, __b, __d); \ ++}) ++ ++#define check_mul_overflow(a, b, d) ({ \ ++ typeof(a) __a = (a); \ ++ typeof(b) __b = (b); \ ++ typeof(d) __d = (d); \ ++ (void) (&__a == &__b); \ ++ (void) (&__a == __d); \ ++ __builtin_mul_overflow(__a, __b, __d); \ ++}) ++ ++#else ++ ++ ++/* Checking for unsigned overflow is relatively easy without causing UB. */ ++#define __unsigned_add_overflow(a, b, d) ({ \ ++ typeof(a) __a = (a); \ ++ typeof(b) __b = (b); \ ++ typeof(d) __d = (d); \ ++ (void) (&__a == &__b); \ ++ (void) (&__a == __d); \ ++ *__d = __a + __b; \ ++ *__d < __a; \ ++}) ++#define __unsigned_sub_overflow(a, b, d) ({ \ ++ typeof(a) __a = (a); \ ++ typeof(b) __b = (b); \ ++ typeof(d) __d = (d); \ ++ (void) (&__a == &__b); \ ++ (void) (&__a == __d); \ ++ *__d = __a - __b; \ ++ __a < __b; \ ++}) ++/* ++ * If one of a or b is a compile-time constant, this avoids a division. ++ */ ++#define __unsigned_mul_overflow(a, b, d) ({ \ ++ typeof(a) __a = (a); \ ++ typeof(b) __b = (b); \ ++ typeof(d) __d = (d); \ ++ (void) (&__a == &__b); \ ++ (void) (&__a == __d); \ ++ *__d = __a * __b; \ ++ __builtin_constant_p(__b) ? \ ++ __b > 0 && __a > type_max(typeof(__a)) / __b : \ ++ __a > 0 && __b > type_max(typeof(__b)) / __a; \ ++}) ++ ++/* ++ * For signed types, detecting overflow is much harder, especially if ++ * we want to avoid UB. But the interface of these macros is such that ++ * we must provide a result in *d, and in fact we must produce the ++ * result promised by gcc's builtins, which is simply the possibly ++ * wrapped-around value. Fortunately, we can just formally do the ++ * operations in the widest relevant unsigned type (u64) and then ++ * truncate the result - gcc is smart enough to generate the same code ++ * with and without the (u64) casts. ++ */ ++ ++/* ++ * Adding two signed integers can overflow only if they have the same ++ * sign, and overflow has happened iff the result has the opposite ++ * sign. ++ */ ++#define __signed_add_overflow(a, b, d) ({ \ ++ typeof(a) __a = (a); \ ++ typeof(b) __b = (b); \ ++ typeof(d) __d = (d); \ ++ (void) (&__a == &__b); \ ++ (void) (&__a == __d); \ ++ *__d = (u64)__a + (u64)__b; \ ++ (((~(__a ^ __b)) & (*__d ^ __a)) \ ++ & type_min(typeof(__a))) != 0; \ ++}) ++ ++/* ++ * Subtraction is similar, except that overflow can now happen only ++ * when the signs are opposite. In this case, overflow has happened if ++ * the result has the opposite sign of a. ++ */ ++#define __signed_sub_overflow(a, b, d) ({ \ ++ typeof(a) __a = (a); \ ++ typeof(b) __b = (b); \ ++ typeof(d) __d = (d); \ ++ (void) (&__a == &__b); \ ++ (void) (&__a == __d); \ ++ *__d = (u64)__a - (u64)__b; \ ++ ((((__a ^ __b)) & (*__d ^ __a)) \ ++ & type_min(typeof(__a))) != 0; \ ++}) ++ ++/* ++ * Signed multiplication is rather hard. gcc always follows C99, so ++ * division is truncated towards 0. This means that we can write the ++ * overflow check like this: ++ * ++ * (a > 0 && (b > MAX/a || b < MIN/a)) || ++ * (a < -1 && (b > MIN/a || b < MAX/a) || ++ * (a == -1 && b == MIN) ++ * ++ * The redundant casts of -1 are to silence an annoying -Wtype-limits ++ * (included in -Wextra) warning: When the type is u8 or u16, the ++ * __b_c_e in check_mul_overflow obviously selects ++ * __unsigned_mul_overflow, but unfortunately gcc still parses this ++ * code and warns about the limited range of __b. ++ */ ++ ++#define __signed_mul_overflow(a, b, d) ({ \ ++ typeof(a) __a = (a); \ ++ typeof(b) __b = (b); \ ++ typeof(d) __d = (d); \ ++ typeof(a) __tmax = type_max(typeof(a)); \ ++ typeof(a) __tmin = type_min(typeof(a)); \ ++ (void) (&__a == &__b); \ ++ (void) (&__a == __d); \ ++ *__d = (u64)__a * (u64)__b; \ ++ (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \ ++ (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \ ++ (__b == (typeof(__b))-1 && __a == __tmin); \ ++}) ++ ++ ++#define check_add_overflow(a, b, d) \ ++ __builtin_choose_expr(is_signed_type(typeof(a)), \ ++ __signed_add_overflow(a, b, d), \ ++ __unsigned_add_overflow(a, b, d)) ++ ++#define check_sub_overflow(a, b, d) \ ++ __builtin_choose_expr(is_signed_type(typeof(a)), \ ++ __signed_sub_overflow(a, b, d), \ ++ __unsigned_sub_overflow(a, b, d)) ++ ++#define check_mul_overflow(a, b, d) \ ++ __builtin_choose_expr(is_signed_type(typeof(a)), \ ++ __signed_mul_overflow(a, b, d), \ ++ __unsigned_mul_overflow(a, b, d)) ++ ++ ++#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */ ++ ++/** check_shl_overflow() - Calculate a left-shifted value and check overflow ++ * ++ * @a: Value to be shifted ++ * @s: How many bits left to shift ++ * @d: Pointer to where to store the result ++ * ++ * Computes *@d = (@a << @s) ++ * ++ * Returns true if '*d' cannot hold the result or when 'a << s' doesn't ++ * make sense. Example conditions: ++ * - 'a << s' causes bits to be lost when stored in *d. ++ * - 's' is garbage (e.g. negative) or so large that the result of ++ * 'a << s' is guaranteed to be 0. ++ * - 'a' is negative. ++ * - 'a << s' sets the sign bit, if any, in '*d'. ++ * ++ * '*d' will hold the results of the attempted shift, but is not ++ * considered "safe for use" if false is returned. ++ */ ++#define check_shl_overflow(a, s, d) ({ \ ++ typeof(a) _a = a; \ ++ typeof(s) _s = s; \ ++ typeof(d) _d = d; \ ++ u64 _a_full = _a; \ ++ unsigned int _to_shift = \ ++ is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \ ++ *_d = (_a_full << _to_shift); \ ++ (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \ ++ (*_d >> _to_shift) != _a); \ ++}) ++ ++/** ++ * array_size() - Calculate size of 2-dimensional array. ++ * ++ * @a: dimension one ++ * @b: dimension two ++ * ++ * Calculates size of 2-dimensional array: @a * @b. ++ * ++ * Returns: number of bytes needed to represent the array or SIZE_MAX on ++ * overflow. ++ */ ++static inline __must_check size_t array_size(size_t a, size_t b) ++{ ++ size_t bytes; ++ ++ if (check_mul_overflow(a, b, &bytes)) ++ return SIZE_MAX; ++ ++ return bytes; ++} ++ ++/** ++ * array3_size() - Calculate size of 3-dimensional array. ++ * ++ * @a: dimension one ++ * @b: dimension two ++ * @c: dimension three ++ * ++ * Calculates size of 3-dimensional array: @a * @b * @c. ++ * ++ * Returns: number of bytes needed to represent the array or SIZE_MAX on ++ * overflow. ++ */ ++static inline __must_check size_t array3_size(size_t a, size_t b, size_t c) ++{ ++ size_t bytes; ++ ++ if (check_mul_overflow(a, b, &bytes)) ++ return SIZE_MAX; ++ if (check_mul_overflow(bytes, c, &bytes)) ++ return SIZE_MAX; ++ ++ return bytes; ++} ++ ++/* ++ * Compute a*b+c, returning SIZE_MAX on overflow. Internal helper for ++ * struct_size() below. ++ */ ++static inline __must_check size_t __ab_c_size(size_t a, size_t b, size_t c) ++{ ++ size_t bytes; ++ ++ if (check_mul_overflow(a, b, &bytes)) ++ return SIZE_MAX; ++ if (check_add_overflow(bytes, c, &bytes)) ++ return SIZE_MAX; ++ ++ return bytes; ++} ++ ++/** ++ * struct_size() - Calculate size of structure with trailing array. ++ * @p: Pointer to the structure. ++ * @member: Name of the array member. ++ * @n: Number of elements in the array. ++ * ++ * Calculates size of memory needed for structure @p followed by an ++ * array of @n @member elements. ++ * ++ * Return: number of bytes needed or SIZE_MAX on overflow. ++ */ ++#define struct_size(p, member, n) \ ++ __ab_c_size(n, \ ++ sizeof(*(p)->member) + __must_be_array((p)->member),\ ++ sizeof(*(p))) ++ ++#endif /* __LINUX_OVERFLOW_H */ +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -1,3 +1,4 @@ ++// SPDX-License-Identifier: GPL-2.0-or-later + /* + * Linux Socket Filter - Kernel level socket filtering + * +@@ -12,11 +13,6 @@ + * Alexei Starovoitov + * Daniel Borkmann + * +- * This program is free software; you can redistribute it and/or +- * modify it under the terms of the GNU General Public License +- * as published by the Free Software Foundation; either version +- * 2 of the License, or (at your option) any later version. +- * + * Andi Kleen - Fix a few bad bugs and races. + * Kris Katterjohn - Added many additional checks in bpf_check_classic() + */ +@@ -26,11 +22,14 @@ + #include + #include + #include ++#include + #include + #include + #include + #include ++#include + #include ++#include + #include + #include + #include +@@ -39,17 +38,32 @@ + #include + #include + #include +-#include ++#include + #include ++#include + #include + #include + #include + #include + #include + #include +-#include + #include + #include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + + /** + * sk_filter_trim_cap - run a packet through a socket filter +@@ -84,7 +98,12 @@ int sk_filter_trim_cap(struct sock *sk, + rcu_read_lock(); + filter = rcu_dereference(sk->sk_filter); + if (filter) { +- unsigned int pkt_len = bpf_prog_run_save_cb(filter->prog, skb); ++ struct sock *save_sk = skb->sk; ++ unsigned int pkt_len; ++ ++ skb->sk = sk; ++ pkt_len = bpf_prog_run_save_cb(filter->prog, skb); ++ skb->sk = save_sk; + err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; + } + rcu_read_unlock(); +@@ -93,14 +112,13 @@ int sk_filter_trim_cap(struct sock *sk, + } + EXPORT_SYMBOL(sk_filter_trim_cap); + +-static u64 __skb_get_pay_offset(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) ++BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb) + { +- return skb_get_poff((struct sk_buff *)(unsigned long) ctx); ++ return skb_get_poff(skb); + } + +-static u64 __skb_get_nlattr(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) ++BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) + { +- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; + struct nlattr *nla; + + if (skb_is_nonlinear(skb)) +@@ -119,9 +137,8 @@ static u64 __skb_get_nlattr(u64 ctx, u64 + return 0; + } + +-static u64 __skb_get_nlattr_nest(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) ++BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) + { +- struct sk_buff *skb = (struct sk_buff *)(unsigned long) ctx; + struct nlattr *nla; + + if (skb_is_nonlinear(skb)) +@@ -144,11 +161,98 @@ static u64 __skb_get_nlattr_nest(u64 ctx + return 0; + } + +-static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5) ++BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, ++ data, int, headlen, int, offset) ++{ ++ u8 tmp, *ptr; ++ const int len = sizeof(tmp); ++ ++ if (offset >= 0) { ++ if (headlen - offset >= len) ++ return *(u8 *)(data + offset); ++ if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) ++ return tmp; ++ } else { ++ ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); ++ if (likely(ptr)) ++ return *(u8 *)ptr; ++ } ++ ++ return -EFAULT; ++} ++ ++BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, ++ int, offset) ++{ ++ return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len, ++ offset); ++} ++ ++BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, ++ data, int, headlen, int, offset) ++{ ++ u16 tmp, *ptr; ++ const int len = sizeof(tmp); ++ ++ if (offset >= 0) { ++ if (headlen - offset >= len) ++ return get_unaligned_be16(data + offset); ++ if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) ++ return be16_to_cpu(tmp); ++ } else { ++ ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); ++ if (likely(ptr)) ++ return get_unaligned_be16(ptr); ++ } ++ ++ return -EFAULT; ++} ++ ++BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, ++ int, offset) ++{ ++ return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len, ++ offset); ++} ++ ++BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, ++ data, int, headlen, int, offset) ++{ ++ u32 tmp, *ptr; ++ const int len = sizeof(tmp); ++ ++ if (likely(offset >= 0)) { ++ if (headlen - offset >= len) ++ return get_unaligned_be32(data + offset); ++ if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) ++ return be32_to_cpu(tmp); ++ } else { ++ ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); ++ if (likely(ptr)) ++ return get_unaligned_be32(ptr); ++ } ++ ++ return -EFAULT; ++} ++ ++BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, ++ int, offset) ++{ ++ return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len, ++ offset); ++} ++ ++BPF_CALL_0(bpf_get_raw_cpu_id) + { + return raw_smp_processor_id(); + } + ++static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { ++ .func = bpf_get_raw_cpu_id, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++}; ++ + static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, + struct bpf_insn *insn_buf) + { +@@ -178,22 +282,18 @@ static u32 convert_skb_access(int skb_fi + break; + + case SKF_AD_VLAN_TAG: +- case SKF_AD_VLAN_TAG_PRESENT: + BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); +- BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); + + /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ + *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, + offsetof(struct sk_buff, vlan_tci)); +- if (skb_field == SKF_AD_VLAN_TAG) { +- *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, +- ~VLAN_TAG_PRESENT); +- } else { +- /* dst_reg >>= 12 */ +- *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12); +- /* dst_reg &= 1 */ ++ break; ++ case SKF_AD_VLAN_TAG_PRESENT: ++ *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET()); ++ if (PKT_VLAN_PRESENT_BIT) ++ *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); ++ if (PKT_VLAN_PRESENT_BIT < 7) + *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); +- } + break; + } + +@@ -226,9 +326,8 @@ static bool convert_bpf_extensions(struc + case SKF_AD_OFF + SKF_AD_HATYPE: + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); + BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); +- BUILD_BUG_ON(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)) < 0); + +- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), + BPF_REG_TMP, BPF_REG_CTX, + offsetof(struct sk_buff, dev)); + /* if (tmp != 0) goto pc + 1 */ +@@ -295,16 +394,16 @@ static bool convert_bpf_extensions(struc + /* Emit call(arg1=CTX, arg2=A, arg3=X) */ + switch (fp->k) { + case SKF_AD_OFF + SKF_AD_PAY_OFFSET: +- *insn = BPF_EMIT_CALL(__skb_get_pay_offset); ++ *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset); + break; + case SKF_AD_OFF + SKF_AD_NLATTR: +- *insn = BPF_EMIT_CALL(__skb_get_nlattr); ++ *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr); + break; + case SKF_AD_OFF + SKF_AD_NLATTR_NEST: +- *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest); ++ *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest); + break; + case SKF_AD_OFF + SKF_AD_CPU: +- *insn = BPF_EMIT_CALL(__get_raw_cpu_id); ++ *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id); + break; + case SKF_AD_OFF + SKF_AD_RANDOM: + *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); +@@ -331,35 +430,101 @@ static bool convert_bpf_extensions(struc + return true; + } + ++static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) ++{ ++ const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS); ++ int size = bpf_size_to_bytes(BPF_SIZE(fp->code)); ++ bool endian = BPF_SIZE(fp->code) == BPF_H || ++ BPF_SIZE(fp->code) == BPF_W; ++ bool indirect = BPF_MODE(fp->code) == BPF_IND; ++ const int ip_align = NET_IP_ALIGN; ++ struct bpf_insn *insn = *insnp; ++ int offset = fp->k; ++ ++ if (!indirect && ++ ((unaligned_ok && offset >= 0) || ++ (!unaligned_ok && offset >= 0 && ++ offset + ip_align >= 0 && ++ offset + ip_align % size == 0))) { ++ bool ldx_off_ok = offset <= S16_MAX; ++ ++ *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); ++ if (offset) ++ *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); ++ *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, ++ size, 2 + endian + (!ldx_off_ok * 2)); ++ if (ldx_off_ok) { ++ *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, ++ BPF_REG_D, offset); ++ } else { ++ *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); ++ *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); ++ *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, ++ BPF_REG_TMP, 0); ++ } ++ if (endian) ++ *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); ++ *insn++ = BPF_JMP_A(8); ++ } ++ ++ *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); ++ *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D); ++ *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H); ++ if (!indirect) { ++ *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset); ++ } else { ++ *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X); ++ if (fp->k) ++ *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset); ++ } ++ ++ switch (BPF_SIZE(fp->code)) { ++ case BPF_B: ++ *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8); ++ break; ++ case BPF_H: ++ *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16); ++ break; ++ case BPF_W: ++ *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32); ++ break; ++ default: ++ return false; ++ } ++ ++ *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2); ++ *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); ++ *insn = BPF_EXIT_INSN(); ++ ++ *insnp = insn; ++ return true; ++} ++ + /** + * bpf_convert_filter - convert filter program + * @prog: the user passed filter program + * @len: the length of the user passed filter program +- * @new_prog: buffer where converted program will be stored ++ * @new_prog: allocated 'struct bpf_prog' or NULL + * @new_len: pointer to store length of converted program ++ * @seen_ld_abs: bool whether we've seen ld_abs/ind + * +- * Remap 'sock_filter' style BPF instruction set to 'sock_filter_ext' style. ++ * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' ++ * style extended BPF (eBPF). + * Conversion workflow: + * + * 1) First pass for calculating the new program length: +- * bpf_convert_filter(old_prog, old_len, NULL, &new_len) ++ * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs) + * + * 2) 2nd pass to remap in two passes: 1st pass finds new + * jump offsets, 2nd pass remapping: +- * new_prog = kmalloc(sizeof(struct bpf_insn) * new_len); +- * bpf_convert_filter(old_prog, old_len, new_prog, &new_len); +- * +- * User BPF's register A is mapped to our BPF register 6, user BPF +- * register X is mapped to BPF register 7; frame pointer is always +- * register 10; Context 'void *ctx' is stored in register 1, that is, +- * for socket filters: ctx == 'struct sk_buff *', for seccomp: +- * ctx == 'struct seccomp_data *'. ++ * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs) + */ + static int bpf_convert_filter(struct sock_filter *prog, int len, +- struct bpf_insn *new_prog, int *new_len) ++ struct bpf_prog *new_prog, int *new_len, ++ bool *seen_ld_abs) + { +- int new_flen = 0, pass = 0, target, i; +- struct bpf_insn *new_insn; ++ int new_flen = 0, pass = 0, target, i, stack_off; ++ struct bpf_insn *new_insn, *first_insn = NULL; + struct sock_filter *fp; + int *addrs = NULL; + u8 bpf_src; +@@ -371,6 +536,7 @@ static int bpf_convert_filter(struct soc + return -EINVAL; + + if (new_prog) { ++ first_insn = new_prog->insnsi; + addrs = kcalloc(len, sizeof(*addrs), + GFP_KERNEL | __GFP_NOWARN); + if (!addrs) +@@ -378,19 +544,47 @@ static int bpf_convert_filter(struct soc + } + + do_pass: +- new_insn = new_prog; ++ new_insn = first_insn; + fp = prog; + +- if (new_insn) +- *new_insn = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); +- new_insn++; ++ /* Classic BPF related prologue emission. */ ++ if (new_prog) { ++ /* Classic BPF expects A and X to be reset first. These need ++ * to be guaranteed to be the first two instructions. ++ */ ++ *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); ++ *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); ++ ++ /* All programs must keep CTX in callee saved BPF_REG_CTX. ++ * In eBPF case it's done by the compiler, here we need to ++ * do this ourself. Initial CTX is present in BPF_REG_ARG1. ++ */ ++ *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); ++ if (*seen_ld_abs) { ++ /* For packet access in classic BPF, cache skb->data ++ * in callee-saved BPF R8 and skb->len - skb->data_len ++ * (headlen) in BPF R9. Since classic BPF is read-only ++ * on CTX, we only need to cache it once. ++ */ ++ *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), ++ BPF_REG_D, BPF_REG_CTX, ++ offsetof(struct sk_buff, data)); ++ *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX, ++ offsetof(struct sk_buff, len)); ++ *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX, ++ offsetof(struct sk_buff, data_len)); ++ *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP); ++ } ++ } else { ++ new_insn += 3; ++ } + + for (i = 0; i < len; fp++, i++) { +- struct bpf_insn tmp_insns[6] = { }; ++ struct bpf_insn tmp_insns[32] = { }; + struct bpf_insn *insn = tmp_insns; + + if (addrs) +- addrs[i] = new_insn - new_prog; ++ addrs[i] = new_insn - first_insn; + + switch (fp->code) { + /* All arithmetic insns and skb loads map as-is. */ +@@ -429,6 +623,22 @@ do_pass: + BPF_MODE(fp->code) == BPF_ABS && + convert_bpf_extensions(fp, &insn)) + break; ++ if (BPF_CLASS(fp->code) == BPF_LD && ++ convert_bpf_ld_abs(fp, &insn)) { ++ *seen_ld_abs = true; ++ break; ++ } ++ ++ if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || ++ fp->code == (BPF_ALU | BPF_MOD | BPF_X)) { ++ *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); ++ /* Error with exception code on div/mod by 0. ++ * For cBPF programs, this was always return 0. ++ */ ++ *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2); ++ *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); ++ *insn++ = BPF_EXIT_INSN(); ++ } + + *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); + break; +@@ -441,11 +651,18 @@ do_pass: + + #define BPF_EMIT_JMP \ + do { \ ++ const s32 off_min = S16_MIN, off_max = S16_MAX; \ ++ s32 off; \ ++ \ + if (target >= len || target < 0) \ + goto err; \ +- insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ ++ off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ + /* Adjust pc relative offset for 2nd or 3rd insn. */ \ +- insn->off -= insn - tmp_insns; \ ++ off -= insn - tmp_insns; \ ++ /* Reject anything not fitting into insn->off. */ \ ++ if (off < off_min || off > off_max) \ ++ goto err; \ ++ insn->off = off; \ + } while (0) + + case BPF_JMP | BPF_JA: +@@ -487,14 +704,27 @@ do_pass: + break; + } + +- /* Convert JEQ into JNE when 'jump_true' is next insn. */ +- if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { +- insn->code = BPF_JMP | BPF_JNE | bpf_src; ++ /* Convert some jumps when 'jump_true' is next insn. */ ++ if (fp->jt == 0) { ++ switch (BPF_OP(fp->code)) { ++ case BPF_JEQ: ++ insn->code = BPF_JMP | BPF_JNE | bpf_src; ++ break; ++ case BPF_JGT: ++ insn->code = BPF_JMP | BPF_JLE | bpf_src; ++ break; ++ case BPF_JGE: ++ insn->code = BPF_JMP | BPF_JLT | bpf_src; ++ break; ++ default: ++ goto jmp_rest; ++ } ++ + target = i + fp->jf + 1; + BPF_EMIT_JMP; + break; + } +- ++jmp_rest: + /* Other jumps are mapped into two insns: Jxx and JA. */ + target = i + fp->jt + 1; + insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; +@@ -507,44 +737,64 @@ do_pass: + break; + + /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ +- case BPF_LDX | BPF_MSH | BPF_B: +- /* tmp = A */ +- *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A); ++ case BPF_LDX | BPF_MSH | BPF_B: { ++ struct sock_filter tmp = { ++ .code = BPF_LD | BPF_ABS | BPF_B, ++ .k = fp->k, ++ }; ++ ++ *seen_ld_abs = true; ++ ++ /* X = A */ ++ *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); + /* A = BPF_R0 = *(u8 *) (skb->data + K) */ +- *insn++ = BPF_LD_ABS(BPF_B, fp->k); ++ convert_bpf_ld_abs(&tmp, &insn); ++ insn++; + /* A &= 0xf */ + *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); + /* A <<= 2 */ + *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); ++ /* tmp = X */ ++ *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X); + /* X = A */ + *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); + /* A = tmp */ + *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); + break; +- +- /* RET_K, RET_A are remaped into 2 insns. */ ++ } ++ /* RET_K is remaped into 2 insns. RET_A case doesn't need an ++ * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. ++ */ + case BPF_RET | BPF_A: + case BPF_RET | BPF_K: +- *insn++ = BPF_MOV32_RAW(BPF_RVAL(fp->code) == BPF_K ? +- BPF_K : BPF_X, BPF_REG_0, +- BPF_REG_A, fp->k); ++ if (BPF_RVAL(fp->code) == BPF_K) ++ *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, ++ 0, fp->k); + *insn = BPF_EXIT_INSN(); + break; + + /* Store to stack. */ + case BPF_ST: + case BPF_STX: ++ stack_off = fp->k * 4 + 4; + *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == + BPF_ST ? BPF_REG_A : BPF_REG_X, +- -(BPF_MEMWORDS - fp->k) * 4); ++ -stack_off); ++ /* check_load_and_stores() verifies that classic BPF can ++ * load from stack only after write, so tracking ++ * stack_depth for ST|STX insns is enough ++ */ ++ if (new_prog && new_prog->aux->stack_depth < stack_off) ++ new_prog->aux->stack_depth = stack_off; + break; + + /* Load from stack. */ + case BPF_LD | BPF_MEM: + case BPF_LDX | BPF_MEM: ++ stack_off = fp->k * 4 + 4; + *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? + BPF_REG_A : BPF_REG_X, BPF_REG_FP, +- -(BPF_MEMWORDS - fp->k) * 4); ++ -stack_off); + break; + + /* A = K or X = K */ +@@ -592,13 +842,15 @@ do_pass: + + if (!new_prog) { + /* Only calculating new length. */ +- *new_len = new_insn - new_prog; ++ *new_len = new_insn - first_insn; ++ if (*seen_ld_abs) ++ *new_len += 4; /* Prologue bits. */ + return 0; + } + + pass++; +- if (new_flen != new_insn - new_prog) { +- new_flen = new_insn - new_prog; ++ if (new_flen != new_insn - first_insn) { ++ new_flen = new_insn - first_insn; + if (pass > 2) + goto err; + goto do_pass; +@@ -738,6 +990,17 @@ static bool chk_code_allowed(u16 code_to + return codes[code_to_probe]; + } + ++static bool bpf_check_basics_ok(const struct sock_filter *filter, ++ unsigned int flen) ++{ ++ if (filter == NULL) ++ return false; ++ if (flen == 0 || flen > BPF_MAXINSNS) ++ return false; ++ ++ return true; ++} ++ + /** + * bpf_check_classic - verify socket filter code + * @filter: filter to verify +@@ -758,9 +1021,6 @@ static int bpf_check_classic(const struc + bool anc_found; + int pc; + +- if (flen == 0 || flen > BPF_MAXINSNS) +- return -EINVAL; +- + /* Check the filter code now */ + for (pc = 0; pc < flen; pc++) { + const struct sock_filter *ftest = &filter[pc]; +@@ -901,7 +1161,7 @@ static void sk_filter_release_rcu(struct + */ + static void sk_filter_release(struct sk_filter *fp) + { +- if (atomic_dec_and_test(&fp->refcnt)) ++ if (refcount_dec_and_test(&fp->refcnt)) + call_rcu(&fp->rcu, sk_filter_release_rcu); + } + +@@ -916,25 +1176,37 @@ void sk_filter_uncharge(struct sock *sk, + /* try to charge the socket memory if there is space available + * return true on success + */ +-bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) ++static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) + { + u32 filter_size = bpf_prog_size(fp->prog->len); + + /* same check as in sock_kmalloc() */ + if (filter_size <= sysctl_optmem_max && + atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { +- atomic_inc(&fp->refcnt); + atomic_add(filter_size, &sk->sk_omem_alloc); + return true; + } + return false; + } + ++bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) ++{ ++ if (!refcount_inc_not_zero(&fp->refcnt)) ++ return false; ++ ++ if (!__sk_filter_charge(sk, fp)) { ++ sk_filter_release(fp); ++ return false; ++ } ++ return true; ++} ++ + static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) + { + struct sock_filter *old_prog; + struct bpf_prog *old_fp; + int err, new_len, old_len = fp->len; ++ bool seen_ld_abs = false; + + /* We are free to overwrite insns et al right here as it + * won't be used at this point in time anymore internally +@@ -956,7 +1228,8 @@ static struct bpf_prog *bpf_migrate_filt + } + + /* 1st pass: calculate the new program length. */ +- err = bpf_convert_filter(old_prog, old_len, NULL, &new_len); ++ err = bpf_convert_filter(old_prog, old_len, NULL, &new_len, ++ &seen_ld_abs); + if (err) + goto out_err_free; + +@@ -975,7 +1248,8 @@ static struct bpf_prog *bpf_migrate_filt + fp->len = new_len; + + /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ +- err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len); ++ err = bpf_convert_filter(old_prog, old_len, fp, &new_len, ++ &seen_ld_abs); + if (err) + /* 2nd bpf_convert_filter() can fail only if it fails + * to allocate memory, remapping must succeed. Note, +@@ -984,7 +1258,9 @@ static struct bpf_prog *bpf_migrate_filt + */ + goto out_err_free; + +- bpf_prog_select_runtime(fp); ++ fp = bpf_prog_select_runtime(fp, &err); ++ if (err) ++ goto out_err_free; + + kfree(old_prog); + return fp; +@@ -1051,7 +1327,7 @@ int bpf_prog_create(struct bpf_prog **pf + struct bpf_prog *fp; + + /* Make sure new filter is there and in the right amounts. */ +- if (fprog->filter == NULL) ++ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) + return -EINVAL; + + fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); +@@ -1098,7 +1374,7 @@ int bpf_prog_create_from_user(struct bpf + int err; + + /* Make sure new filter is there and in the right amounts. */ +- if (fprog->filter == NULL) ++ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) + return -EINVAL; + + fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); +@@ -1139,8 +1415,7 @@ void bpf_prog_destroy(struct bpf_prog *f + } + EXPORT_SYMBOL_GPL(bpf_prog_destroy); + +-static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk, +- bool locked) ++static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) + { + struct sk_filter *fp, *old_fp; + +@@ -1149,53 +1424,44 @@ static int __sk_attach_prog(struct bpf_p + return -ENOMEM; + + fp->prog = prog; +- atomic_set(&fp->refcnt, 0); + +- if (!sk_filter_charge(sk, fp)) { ++ if (!__sk_filter_charge(sk, fp)) { + kfree(fp); + return -ENOMEM; + } ++ refcount_set(&fp->refcnt, 1); + +- old_fp = rcu_dereference_protected(sk->sk_filter, locked); ++ old_fp = rcu_dereference_protected(sk->sk_filter, ++ lockdep_sock_is_held(sk)); + rcu_assign_pointer(sk->sk_filter, fp); ++ + if (old_fp) + sk_filter_uncharge(sk, old_fp); + + return 0; + } + +-/** +- * sk_attach_filter - attach a socket filter +- * @fprog: the filter program +- * @sk: the socket to use +- * +- * Attach the user's filter code. We first run some sanity checks on +- * it to make sure it does not explode on us later. If an error +- * occurs or there is insufficient memory for the filter a negative +- * errno code is returned. On success the return is zero. +- */ +-int __sk_attach_filter(struct sock_fprog *fprog, struct sock *sk, +- bool locked) ++static ++struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) + { + unsigned int fsize = bpf_classic_proglen(fprog); +- unsigned int bpf_fsize = bpf_prog_size(fprog->len); + struct bpf_prog *prog; + int err; + + if (sock_flag(sk, SOCK_FILTER_LOCKED)) +- return -EPERM; ++ return ERR_PTR(-EPERM); + + /* Make sure new filter is there and in the right amounts. */ +- if (fprog->filter == NULL) +- return -EINVAL; ++ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) ++ return ERR_PTR(-EINVAL); + +- prog = bpf_prog_alloc(bpf_fsize, 0); ++ prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); + if (!prog) +- return -ENOMEM; ++ return ERR_PTR(-ENOMEM); + + if (copy_from_user(prog->insns, fprog->filter, fsize)) { + __bpf_prog_free(prog); +- return -EFAULT; ++ return ERR_PTR(-EINVAL); + } + + prog->len = fprog->len; +@@ -1203,17 +1469,34 @@ int __sk_attach_filter(struct sock_fprog + err = bpf_prog_store_orig_filter(prog, fprog); + if (err) { + __bpf_prog_free(prog); +- return -ENOMEM; ++ return ERR_PTR(-ENOMEM); + } + + /* bpf_prepare_filter() already takes care of freeing + * memory in case something goes wrong. + */ +- prog = bpf_prepare_filter(prog, NULL); ++ return bpf_prepare_filter(prog, NULL); ++} ++ ++/** ++ * sk_attach_filter - attach a socket filter ++ * @fprog: the filter program ++ * @sk: the socket to use ++ * ++ * Attach the user's filter code. We first run some sanity checks on ++ * it to make sure it does not explode on us later. If an error ++ * occurs or there is insufficient memory for the filter a negative ++ * errno code is returned. On success the return is zero. ++ */ ++int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) ++{ ++ struct bpf_prog *prog = __get_filter(fprog, sk); ++ int err; ++ + if (IS_ERR(prog)) + return PTR_ERR(prog); + +- err = __sk_attach_prog(prog, sk, locked); ++ err = __sk_attach_prog(prog, sk); + if (err < 0) { + __bpf_prog_release(prog); + return err; +@@ -1221,31 +1504,25 @@ int __sk_attach_filter(struct sock_fprog + + return 0; + } +-EXPORT_SYMBOL_GPL(__sk_attach_filter); ++EXPORT_SYMBOL_GPL(sk_attach_filter); + +-int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) ++static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) + { +- return __sk_attach_filter(fprog, sk, sock_owned_by_user(sk)); ++ if (sock_flag(sk, SOCK_FILTER_LOCKED)) ++ return ERR_PTR(-EPERM); ++ ++ return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); + } + + int sk_attach_bpf(u32 ufd, struct sock *sk) + { +- struct bpf_prog *prog; ++ struct bpf_prog *prog = __get_bpf(ufd, sk); + int err; + +- if (sock_flag(sk, SOCK_FILTER_LOCKED)) +- return -EPERM; +- +- prog = bpf_prog_get(ufd); + if (IS_ERR(prog)) + return PTR_ERR(prog); + +- if (prog->type != BPF_PROG_TYPE_SOCKET_FILTER) { +- bpf_prog_put(prog); +- return -EINVAL; +- } +- +- err = __sk_attach_prog(prog, sk, sock_owned_by_user(sk)); ++ err = __sk_attach_prog(prog, sk); + if (err < 0) { + bpf_prog_put(prog); + return err; +@@ -1254,79 +1531,201 @@ int sk_attach_bpf(u32 ufd, struct sock * + return 0; + } + +-#define BPF_RECOMPUTE_CSUM(flags) ((flags) & 1) ++struct bpf_scratchpad { ++ union { ++ __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; ++ u8 buff[MAX_BPF_STACK]; ++ }; ++}; ++ ++static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); ++ ++static inline int __bpf_try_make_writable(struct sk_buff *skb, ++ unsigned int write_len) ++{ ++ return skb_ensure_writable(skb, write_len); ++} ++ ++static inline int bpf_try_make_writable(struct sk_buff *skb, ++ unsigned int write_len) ++{ ++ int err = __bpf_try_make_writable(skb, write_len); ++ ++ bpf_compute_data_pointers(skb); ++ return err; ++} ++ ++static int bpf_try_make_head_writable(struct sk_buff *skb) ++{ ++ return bpf_try_make_writable(skb, skb_headlen(skb)); ++} + +-static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags) ++static inline void bpf_push_mac_rcsum(struct sk_buff *skb) ++{ ++ if (skb_at_tc_ingress(skb)) ++ skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); ++} ++ ++static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) ++{ ++ if (skb_at_tc_ingress(skb)) ++ skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); ++} ++ ++BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, ++ const void *, from, u32, len, u64, flags) + { +- struct sk_buff *skb = (struct sk_buff *) (long) r1; +- int offset = (int) r2; +- void *from = (void *) (long) r3; +- unsigned int len = (unsigned int) r4; +- char buf[16]; + void *ptr; + +- /* bpf verifier guarantees that: +- * 'from' pointer points to bpf program stack +- * 'len' bytes of it were initialized +- * 'len' > 0 +- * 'skb' is a valid pointer to 'struct sk_buff' +- * +- * so check for invalid 'offset' and too large 'len' +- */ +- if (unlikely((u32) offset > 0xffff || len > sizeof(buf))) ++ if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) ++ return -EINVAL; ++ if (unlikely(offset > 0xffff)) + return -EFAULT; +- if (unlikely(skb_try_make_writable(skb, offset + len))) ++ if (unlikely(bpf_try_make_writable(skb, offset + len))) + return -EFAULT; + +- ptr = skb_header_pointer(skb, offset, len, buf); +- if (unlikely(!ptr)) +- return -EFAULT; +- +- if (BPF_RECOMPUTE_CSUM(flags)) +- skb_postpull_rcsum(skb, ptr, len); ++ ptr = skb->data + offset; ++ if (flags & BPF_F_RECOMPUTE_CSUM) ++ __skb_postpull_rcsum(skb, ptr, len, offset); + + memcpy(ptr, from, len); + +- if (ptr == buf) +- /* skb_store_bits cannot return -EFAULT here */ +- skb_store_bits(skb, offset, ptr, len); ++ if (flags & BPF_F_RECOMPUTE_CSUM) ++ __skb_postpush_rcsum(skb, ptr, len, offset); ++ if (flags & BPF_F_INVALIDATE_HASH) ++ skb_clear_hash(skb); + +- if (BPF_RECOMPUTE_CSUM(flags) && skb->ip_summed == CHECKSUM_COMPLETE) +- skb->csum = csum_add(skb->csum, csum_partial(ptr, len, 0)); + return 0; + } + +-const struct bpf_func_proto bpf_skb_store_bytes_proto = { ++static const struct bpf_func_proto bpf_skb_store_bytes_proto = { + .func = bpf_skb_store_bytes, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, +- .arg3_type = ARG_PTR_TO_STACK, +- .arg4_type = ARG_CONST_STACK_SIZE, ++ .arg3_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_CONST_SIZE, + .arg5_type = ARG_ANYTHING, + }; + +-#define BPF_HEADER_FIELD_SIZE(flags) ((flags) & 0x0f) +-#define BPF_IS_PSEUDO_HEADER(flags) ((flags) & 0x10) ++BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, ++ void *, to, u32, len) ++{ ++ void *ptr; ++ ++ if (unlikely(offset > 0xffff)) ++ goto err_clear; ++ ++ ptr = skb_header_pointer(skb, offset, len, to); ++ if (unlikely(!ptr)) ++ goto err_clear; ++ if (ptr != to) ++ memcpy(to, ptr, len); + +-static u64 bpf_l3_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) ++ return 0; ++err_clear: ++ memset(to, 0, len); ++ return -EFAULT; ++} ++ ++static const struct bpf_func_proto bpf_skb_load_bytes_proto = { ++ .func = bpf_skb_load_bytes, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_ANYTHING, ++ .arg3_type = ARG_PTR_TO_UNINIT_MEM, ++ .arg4_type = ARG_CONST_SIZE, ++}; ++ ++BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, ++ u32, offset, void *, to, u32, len, u32, start_header) + { +- struct sk_buff *skb = (struct sk_buff *) (long) r1; +- int offset = (int) r2; +- __sum16 sum, *ptr; ++ u8 *end = skb_tail_pointer(skb); ++ u8 *start, *ptr; + +- if (unlikely((u32) offset > 0xffff)) +- return -EFAULT; ++ if (unlikely(offset > 0xffff)) ++ goto err_clear; + +- if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum)))) +- return -EFAULT; ++ switch (start_header) { ++ case BPF_HDR_START_MAC: ++ if (unlikely(!skb_mac_header_was_set(skb))) ++ goto err_clear; ++ start = skb_mac_header(skb); ++ break; ++ case BPF_HDR_START_NET: ++ start = skb_network_header(skb); ++ break; ++ default: ++ goto err_clear; ++ } + +- ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); +- if (unlikely(!ptr)) ++ ptr = start + offset; ++ ++ if (likely(ptr + len <= end)) { ++ memcpy(to, ptr, len); ++ return 0; ++ } ++ ++err_clear: ++ memset(to, 0, len); ++ return -EFAULT; ++} ++ ++static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = { ++ .func = bpf_skb_load_bytes_relative, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_ANYTHING, ++ .arg3_type = ARG_PTR_TO_UNINIT_MEM, ++ .arg4_type = ARG_CONST_SIZE, ++ .arg5_type = ARG_ANYTHING, ++}; ++ ++BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) ++{ ++ /* Idea is the following: should the needed direct read/write ++ * test fail during runtime, we can pull in more data and redo ++ * again, since implicitly, we invalidate previous checks here. ++ * ++ * Or, since we know how much we need to make read/writeable, ++ * this can be done once at the program beginning for direct ++ * access case. By this we overcome limitations of only current ++ * headroom being accessible. ++ */ ++ return bpf_try_make_writable(skb, len ? : skb_headlen(skb)); ++} ++ ++static const struct bpf_func_proto bpf_skb_pull_data_proto = { ++ .func = bpf_skb_pull_data, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_ANYTHING, ++}; ++ ++BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, ++ u64, from, u64, to, u64, flags) ++{ ++ __sum16 *ptr; ++ ++ if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) ++ return -EINVAL; ++ if (unlikely(offset > 0xffff || offset & 1)) ++ return -EFAULT; ++ if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) + return -EFAULT; + +- switch (BPF_HEADER_FIELD_SIZE(flags)) { ++ ptr = (__sum16 *)(skb->data + offset); ++ switch (flags & BPF_F_HDR_FIELD_MASK) { ++ case 0: ++ if (unlikely(from != 0)) ++ return -EINVAL; ++ ++ csum_replace_by_diff(ptr, to); ++ break; + case 2: + csum_replace2(ptr, from, to); + break; +@@ -1337,14 +1736,10 @@ static u64 bpf_l3_csum_replace(u64 r1, u + return -EINVAL; + } + +- if (ptr == &sum) +- /* skb_store_bits guaranteed to not return -EFAULT here */ +- skb_store_bits(skb, offset, ptr, sizeof(sum)); +- + return 0; + } + +-const struct bpf_func_proto bpf_l3_csum_replace_proto = { ++static const struct bpf_func_proto bpf_l3_csum_replace_proto = { + .func = bpf_l3_csum_replace, + .gpl_only = false, + .ret_type = RET_INTEGER, +@@ -1355,23 +1750,33 @@ const struct bpf_func_proto bpf_l3_csum_ + .arg5_type = ARG_ANYTHING, + }; + +-static u64 bpf_l4_csum_replace(u64 r1, u64 r2, u64 from, u64 to, u64 flags) ++BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, ++ u64, from, u64, to, u64, flags) + { +- struct sk_buff *skb = (struct sk_buff *) (long) r1; +- bool is_pseudo = !!BPF_IS_PSEUDO_HEADER(flags); +- int offset = (int) r2; +- __sum16 sum, *ptr; ++ bool is_pseudo = flags & BPF_F_PSEUDO_HDR; ++ bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; ++ bool do_mforce = flags & BPF_F_MARK_ENFORCE; ++ __sum16 *ptr; + +- if (unlikely((u32) offset > 0xffff)) ++ if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | ++ BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) ++ return -EINVAL; ++ if (unlikely(offset > 0xffff || offset & 1)) + return -EFAULT; +- if (unlikely(skb_try_make_writable(skb, offset + sizeof(sum)))) ++ if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) + return -EFAULT; + +- ptr = skb_header_pointer(skb, offset, sizeof(sum), &sum); +- if (unlikely(!ptr)) +- return -EFAULT; ++ ptr = (__sum16 *)(skb->data + offset); ++ if (is_mmzero && !do_mforce && !*ptr) ++ return 0; + +- switch (BPF_HEADER_FIELD_SIZE(flags)) { ++ switch (flags & BPF_F_HDR_FIELD_MASK) { ++ case 0: ++ if (unlikely(from != 0)) ++ return -EINVAL; ++ ++ inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); ++ break; + case 2: + inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); + break; +@@ -1382,14 +1787,12 @@ static u64 bpf_l4_csum_replace(u64 r1, u + return -EINVAL; + } + +- if (ptr == &sum) +- /* skb_store_bits guaranteed to not return -EFAULT here */ +- skb_store_bits(skb, offset, ptr, sizeof(sum)); +- ++ if (is_mmzero && !*ptr) ++ *ptr = CSUM_MANGLED_0; + return 0; + } + +-const struct bpf_func_proto bpf_l4_csum_replace_proto = { ++static const struct bpf_func_proto bpf_l4_csum_replace_proto = { + .func = bpf_l4_csum_replace, + .gpl_only = false, + .ret_type = RET_INTEGER, +@@ -1400,30 +1803,172 @@ const struct bpf_func_proto bpf_l4_csum_ + .arg5_type = ARG_ANYTHING, + }; + +-#define BPF_IS_REDIRECT_INGRESS(flags) ((flags) & 1) ++BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, ++ __be32 *, to, u32, to_size, __wsum, seed) ++{ ++ struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); ++ u32 diff_size = from_size + to_size; ++ int i, j = 0; ++ ++ /* This is quite flexible, some examples: ++ * ++ * from_size == 0, to_size > 0, seed := csum --> pushing data ++ * from_size > 0, to_size == 0, seed := csum --> pulling data ++ * from_size > 0, to_size > 0, seed := 0 --> diffing data ++ * ++ * Even for diffing, from_size and to_size don't need to be equal. ++ */ ++ if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) || ++ diff_size > sizeof(sp->diff))) ++ return -EINVAL; ++ ++ for (i = 0; i < from_size / sizeof(__be32); i++, j++) ++ sp->diff[j] = ~from[i]; ++ for (i = 0; i < to_size / sizeof(__be32); i++, j++) ++ sp->diff[j] = to[i]; ++ ++ return csum_partial(sp->diff, diff_size, seed); ++} ++ ++static const struct bpf_func_proto bpf_csum_diff_proto = { ++ .func = bpf_csum_diff, ++ .gpl_only = false, ++ .pkt_access = true, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_MEM_OR_NULL, ++ .arg2_type = ARG_CONST_SIZE_OR_ZERO, ++ .arg3_type = ARG_PTR_TO_MEM_OR_NULL, ++ .arg4_type = ARG_CONST_SIZE_OR_ZERO, ++ .arg5_type = ARG_ANYTHING, ++}; ++ ++BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) ++{ ++ /* The interface is to be used in combination with bpf_csum_diff() ++ * for direct packet writes. csum rotation for alignment as well ++ * as emulating csum_sub() can be done from the eBPF program. ++ */ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ return (skb->csum = csum_add(skb->csum, csum)); ++ ++ return -ENOTSUPP; ++} ++ ++static const struct bpf_func_proto bpf_csum_update_proto = { ++ .func = bpf_csum_update, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_ANYTHING, ++}; ++ ++static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) ++{ ++ return dev_forward_skb(dev, skb); ++} ++ ++static inline int __bpf_rx_skb_no_mac(struct net_device *dev, ++ struct sk_buff *skb) ++{ ++ int ret = ____dev_forward_skb(dev, skb); ++ ++ if (likely(!ret)) { ++ skb->dev = dev; ++ ret = netif_rx(skb); ++ } ++ ++ return ret; ++} ++ ++static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) ++{ ++ int ret; ++ ++ skb->dev = dev; ++ skb->tstamp.tv64 = 0; ++ ++ ret = dev_queue_xmit(skb); ++ ++ return ret; ++} ++ ++static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, ++ u32 flags) ++{ ++ unsigned int mlen = skb_network_offset(skb); ++ ++ if (mlen) { ++ __skb_pull(skb, mlen); ++ ++ /* At ingress, the mac header has already been pulled once. ++ * At egress, skb_pospull_rcsum has to be done in case that ++ * the skb is originated from ingress (i.e. a forwarded skb) ++ * to ensure that rcsum starts at net header. ++ */ ++ if (!skb_at_tc_ingress(skb)) ++ skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); ++ } ++ skb_pop_mac_header(skb); ++ skb_reset_mac_len(skb); ++ return flags & BPF_F_INGRESS ? ++ __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); ++} + +-static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5) ++static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, ++ u32 flags) ++{ ++ /* Verify that a link layer header is carried */ ++ if (unlikely(skb->mac_header >= skb->network_header)) { ++ kfree_skb(skb); ++ return -ERANGE; ++ } ++ ++ bpf_push_mac_rcsum(skb); ++ return flags & BPF_F_INGRESS ? ++ __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); ++} ++ ++static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, ++ u32 flags) ++{ ++ if (dev_is_mac_header_xmit(dev)) ++ return __bpf_redirect_common(skb, dev, flags); ++ else ++ return __bpf_redirect_no_mac(skb, dev, flags); ++} ++ ++BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) + { +- struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2; + struct net_device *dev; ++ struct sk_buff *clone; ++ int ret; ++ ++ if (unlikely(flags & ~(BPF_F_INGRESS))) ++ return -EINVAL; + + dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); + if (unlikely(!dev)) + return -EINVAL; + +- skb2 = skb_clone(skb, GFP_ATOMIC); +- if (unlikely(!skb2)) ++ clone = skb_clone(skb, GFP_ATOMIC); ++ if (unlikely(!clone)) + return -ENOMEM; + +- if (BPF_IS_REDIRECT_INGRESS(flags)) +- return dev_forward_skb(dev, skb2); ++ /* For direct write, we need to keep the invariant that the skbs ++ * we're dealing with need to be uncloned. Should uncloning fail ++ * here, we need to free the just generated clone to unclone once ++ * again. ++ */ ++ ret = bpf_try_make_head_writable(skb); ++ if (unlikely(ret)) { ++ kfree_skb(clone); ++ return -ENOMEM; ++ } + +- skb2->dev = dev; +- skb_sender_cpu_clear(skb2); +- return dev_queue_xmit(skb2); ++ return __bpf_redirect(clone, dev, flags); + } + +-const struct bpf_func_proto bpf_clone_redirect_proto = { ++static const struct bpf_func_proto bpf_clone_redirect_proto = { + .func = bpf_clone_redirect, + .gpl_only = false, + .ret_type = RET_INTEGER, +@@ -1432,42 +1977,38 @@ const struct bpf_func_proto bpf_clone_re + .arg3_type = ARG_ANYTHING, + }; + +-struct redirect_info { +- u32 ifindex; +- u32 flags; +-}; ++DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); ++EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); + +-static DEFINE_PER_CPU(struct redirect_info, redirect_info); +-static u64 bpf_redirect(u64 ifindex, u64 flags, u64 r3, u64 r4, u64 r5) ++BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) + { +- struct redirect_info *ri = this_cpu_ptr(&redirect_info); ++ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); ++ ++ if (unlikely(flags & ~(BPF_F_INGRESS))) ++ return TC_ACT_SHOT; + +- ri->ifindex = ifindex; + ri->flags = flags; ++ ri->tgt_index = ifindex; ++ + return TC_ACT_REDIRECT; + } + + int skb_do_redirect(struct sk_buff *skb) + { +- struct redirect_info *ri = this_cpu_ptr(&redirect_info); ++ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct net_device *dev; + +- dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex); +- ri->ifindex = 0; ++ dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index); ++ ri->tgt_index = 0; + if (unlikely(!dev)) { + kfree_skb(skb); + return -EINVAL; + } + +- if (BPF_IS_REDIRECT_INGRESS(ri->flags)) +- return dev_forward_skb(dev, skb); +- +- skb->dev = dev; +- skb_sender_cpu_clear(skb); +- return dev_queue_xmit(skb); ++ return __bpf_redirect(skb, dev, ri->flags); + } + +-const struct bpf_func_proto bpf_redirect_proto = { ++static const struct bpf_func_proto bpf_redirect_proto = { + .func = bpf_redirect, + .gpl_only = false, + .ret_type = RET_INTEGER, +@@ -1475,50 +2016,75 @@ const struct bpf_func_proto bpf_redirect + .arg2_type = ARG_ANYTHING, + }; + +-static u64 bpf_get_cgroup_classid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) + { +- return task_get_classid((struct sk_buff *) (unsigned long) r1); ++ /* If skb_clear_hash() was called due to mangling, we can ++ * trigger SW recalculation here. Later access to hash ++ * can then use the inline skb->hash via context directly ++ * instead of calling this helper again. ++ */ ++ return skb_get_hash(skb); + } + +-static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { +- .func = bpf_get_cgroup_classid, +- .gpl_only = false, +- .ret_type = RET_INTEGER, +- .arg1_type = ARG_PTR_TO_CTX, ++static const struct bpf_func_proto bpf_get_hash_recalc_proto = { ++ .func = bpf_get_hash_recalc, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, + }; + +-static u64 bpf_get_route_realm(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb) + { +-#ifdef CONFIG_IP_ROUTE_CLASSID +- const struct dst_entry *dst; ++ /* After all direct packet write, this can be used once for ++ * triggering a lazy recalc on next skb_get_hash() invocation. ++ */ ++ skb_clear_hash(skb); ++ return 0; ++} + +- dst = skb_dst((struct sk_buff *) (unsigned long) r1); +- if (dst) +- return dst->tclassid; +-#endif ++static const struct bpf_func_proto bpf_set_hash_invalid_proto = { ++ .func = bpf_set_hash_invalid, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++}; ++ ++BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash) ++{ ++ /* Set user specified hash as L4(+), so that it gets returned ++ * on skb_get_hash() call unless BPF prog later on triggers a ++ * skb_clear_hash(). ++ */ ++ __skb_set_sw_hash(skb, hash, true); + return 0; + } + +-static const struct bpf_func_proto bpf_get_route_realm_proto = { +- .func = bpf_get_route_realm, +- .gpl_only = false, +- .ret_type = RET_INTEGER, +- .arg1_type = ARG_PTR_TO_CTX, ++static const struct bpf_func_proto bpf_set_hash_proto = { ++ .func = bpf_set_hash, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_ANYTHING, + }; + +-static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5) ++BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, ++ u16, vlan_tci) + { +- struct sk_buff *skb = (struct sk_buff *) (long) r1; +- __be16 vlan_proto = (__force __be16) r2; ++ int ret; + + if (unlikely(vlan_proto != htons(ETH_P_8021Q) && + vlan_proto != htons(ETH_P_8021AD))) + vlan_proto = htons(ETH_P_8021Q); + +- return skb_vlan_push(skb, vlan_proto, vlan_tci); ++ bpf_push_mac_rcsum(skb); ++ ret = skb_vlan_push(skb, vlan_proto, vlan_tci); ++ bpf_pull_mac_rcsum(skb); ++ ++ bpf_compute_data_pointers(skb); ++ return ret; + } + +-const struct bpf_func_proto bpf_skb_vlan_push_proto = { ++static const struct bpf_func_proto bpf_skb_vlan_push_proto = { + .func = bpf_skb_vlan_push, + .gpl_only = false, + .ret_type = RET_INTEGER, +@@ -1526,116 +2092,401 @@ const struct bpf_func_proto bpf_skb_vlan + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, + }; +-EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto); + +-static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ++BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) + { +- struct sk_buff *skb = (struct sk_buff *) (long) r1; ++ int ret; ++ ++ bpf_push_mac_rcsum(skb); ++ ret = skb_vlan_pop(skb); ++ bpf_pull_mac_rcsum(skb); + +- return skb_vlan_pop(skb); ++ bpf_compute_data_pointers(skb); ++ return ret; + } + +-const struct bpf_func_proto bpf_skb_vlan_pop_proto = { ++static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { + .func = bpf_skb_vlan_pop, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + }; +-EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto); + +-bool bpf_helper_changes_skb_data(void *func) ++BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) + { +- if (func == bpf_skb_vlan_push) +- return true; +- if (func == bpf_skb_vlan_pop) +- return true; +- if (func == bpf_skb_store_bytes) +- return true; +- if (func == bpf_l3_csum_replace) +- return true; +- if (func == bpf_l4_csum_replace) +- return true; ++ /* We only allow a restricted subset to be changed for now. */ ++ if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || ++ !skb_pkt_type_ok(pkt_type))) ++ return -EINVAL; + +- return false; ++ skb->pkt_type = pkt_type; ++ return 0; ++} ++ ++static const struct bpf_func_proto bpf_skb_change_type_proto = { ++ .func = bpf_skb_change_type, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_ANYTHING, ++}; ++ ++#define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \ ++ BPF_F_ADJ_ROOM_ENCAP_L3_IPV6) ++ ++#define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \ ++ BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \ ++ BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \ ++ BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \ ++ BPF_F_ADJ_ROOM_ENCAP_L2( \ ++ BPF_ADJ_ROOM_ENCAP_L2_MASK)) ++ ++#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC ++ ++static u32 __bpf_skb_min_len(const struct sk_buff *skb) ++{ ++ u32 min_len = skb_network_offset(skb); ++ ++ if (skb_transport_header_was_set(skb)) ++ min_len = skb_transport_offset(skb); ++ if (skb->ip_summed == CHECKSUM_PARTIAL) ++ min_len = skb_checksum_start_offset(skb) + ++ skb->csum_offset + sizeof(__sum16); ++ return min_len; + } + +-static u64 bpf_skb_get_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) ++static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) + { +- struct sk_buff *skb = (struct sk_buff *) (long) r1; +- struct bpf_tunnel_key *to = (struct bpf_tunnel_key *) (long) r2; +- struct ip_tunnel_info *info = skb_tunnel_info(skb); ++ unsigned int old_len = skb->len; ++ int ret; + +- if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags || !info)) ++ ret = __skb_grow_rcsum(skb, new_len); ++ if (!ret) ++ memset(skb->data + old_len, 0, new_len - old_len); ++ return ret; ++} ++ ++static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) ++{ ++ return __skb_trim_rcsum(skb, new_len); ++} ++ ++static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, ++ u64 flags) ++{ ++ u32 max_len = BPF_SKB_MAX_LEN; ++ u32 min_len = __bpf_skb_min_len(skb); ++ int ret; ++ ++ if (unlikely(flags || new_len > max_len || new_len < min_len)) + return -EINVAL; +- if (ip_tunnel_info_af(info) != AF_INET) ++ if (skb->encapsulation) ++ return -ENOTSUPP; ++ ++ /* The basic idea of this helper is that it's performing the ++ * needed work to either grow or trim an skb, and eBPF program ++ * rewrites the rest via helpers like bpf_skb_store_bytes(), ++ * bpf_lX_csum_replace() and others rather than passing a raw ++ * buffer here. This one is a slow path helper and intended ++ * for replies with control messages. ++ * ++ * Like in bpf_skb_change_proto(), we want to keep this rather ++ * minimal and without protocol specifics so that we are able ++ * to separate concerns as in bpf_skb_store_bytes() should only ++ * be the one responsible for writing buffers. ++ * ++ * It's really expected to be a slow path operation here for ++ * control message replies, so we're implicitly linearizing, ++ * uncloning and drop offloads from the skb by this. ++ */ ++ ret = __bpf_try_make_writable(skb, skb->len); ++ if (!ret) { ++ if (new_len > skb->len) ++ ret = bpf_skb_grow_rcsum(skb, new_len); ++ else if (new_len < skb->len) ++ ret = bpf_skb_trim_rcsum(skb, new_len); ++ if (!ret && skb_is_gso(skb)) ++ skb_gso_reset(skb); ++ } ++ return ret; ++} ++ ++BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, ++ u64, flags) ++{ ++ int ret = __bpf_skb_change_tail(skb, new_len, flags); ++ ++ bpf_compute_data_pointers(skb); ++ return ret; ++} ++ ++static const struct bpf_func_proto bpf_skb_change_tail_proto = { ++ .func = bpf_skb_change_tail, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_ANYTHING, ++ .arg3_type = ARG_ANYTHING, ++}; ++ ++static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, ++ u64 flags) ++{ ++ u32 max_len = BPF_SKB_MAX_LEN; ++ u32 new_len = skb->len + head_room; ++ int ret; ++ ++ if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || ++ new_len < skb->len)) + return -EINVAL; + +- to->tunnel_id = be64_to_cpu(info->key.tun_id); +- to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); ++ ret = skb_cow(skb, head_room); ++ if (likely(!ret)) { ++ /* Idea for this helper is that we currently only ++ * allow to expand on mac header. This means that ++ * skb->protocol network header, etc, stay as is. ++ * Compared to bpf_skb_change_tail(), we're more ++ * flexible due to not needing to linearize or ++ * reset GSO. Intention for this helper is to be ++ * used by an L3 skb that needs to push mac header ++ * for redirection into L2 device. ++ */ ++ __skb_push(skb, head_room); ++ memset(skb->data, 0, head_room); ++ skb_reset_mac_header(skb); ++ skb_reset_mac_len(skb); ++ } + +- return 0; ++ return ret; + } + +-const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { +- .func = bpf_skb_get_tunnel_key, ++BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, ++ u64, flags) ++{ ++ int ret = __bpf_skb_change_head(skb, head_room, flags); ++ ++ bpf_compute_data_pointers(skb); ++ return ret; ++} ++ ++static const struct bpf_func_proto bpf_skb_change_head_proto = { ++ .func = bpf_skb_change_head, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_STACK, +- .arg3_type = ARG_CONST_STACK_SIZE, +- .arg4_type = ARG_ANYTHING, ++ .arg2_type = ARG_ANYTHING, ++ .arg3_type = ARG_ANYTHING, + }; + +-static struct metadata_dst __percpu *md_dst; ++void bpf_clear_redirect_map(struct bpf_map *map) ++{ ++ struct bpf_redirect_info *ri; ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ ri = per_cpu_ptr(&bpf_redirect_info, cpu); ++ /* Avoid polluting remote cacheline due to writes if ++ * not needed. Once we pass this test, we need the ++ * cmpxchg() to make sure it hasn't been changed in ++ * the meantime by remote CPU. ++ */ ++ if (unlikely(READ_ONCE(ri->map) == map)) ++ cmpxchg(&ri->map, map, NULL); ++ } ++} ++ ++static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, ++ unsigned long off, unsigned long len) ++{ ++ void *ptr = skb_header_pointer(skb, off, len, dst_buff); ++ ++ if (unlikely(!ptr)) ++ return len; ++ if (ptr != dst_buff) ++ memcpy(dst_buff, ptr, len); ++ ++ return 0; ++} + +-static u64 bpf_skb_set_tunnel_key(u64 r1, u64 r2, u64 size, u64 flags, u64 r5) ++BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, ++ u64, flags, void *, meta, u64, meta_size) + { +- struct sk_buff *skb = (struct sk_buff *) (long) r1; +- struct bpf_tunnel_key *from = (struct bpf_tunnel_key *) (long) r2; +- struct metadata_dst *md = this_cpu_ptr(md_dst); +- struct ip_tunnel_info *info; ++ u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; + +- if (unlikely(size != sizeof(struct bpf_tunnel_key) || flags)) ++ if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) + return -EINVAL; ++ if (unlikely(skb_size > skb->len)) ++ return -EFAULT; + +- skb_dst_drop(skb); +- dst_hold((struct dst_entry *) md); +- skb_dst_set(skb, (struct dst_entry *) md); ++ return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, ++ bpf_skb_copy); ++} + +- info = &md->u.tun_info; +- info->mode = IP_TUNNEL_INFO_TX; +- info->key.tun_flags = TUNNEL_KEY; +- info->key.tun_id = cpu_to_be64(from->tunnel_id); +- info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); ++static const struct bpf_func_proto bpf_skb_event_output_proto = { ++ .func = bpf_skb_event_output, ++ .gpl_only = true, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_CONST_MAP_PTR, ++ .arg3_type = ARG_ANYTHING, ++ .arg4_type = ARG_PTR_TO_MEM, ++ .arg5_type = ARG_CONST_SIZE_OR_ZERO, ++}; ++ ++ ++const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; ++EXPORT_SYMBOL_GPL(ipv6_bpf_stub); ++ ++#ifdef CONFIG_XFRM ++BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index, ++ struct bpf_xfrm_state *, to, u32, size, u64, flags) ++{ ++ const struct sec_path *sp = skb_sec_path(skb); ++ const struct xfrm_state *x; ++ ++ if (!sp || unlikely(index >= sp->len || flags)) ++ goto err_clear; ++ ++ x = sp->xvec[index]; ++ ++ if (unlikely(size != sizeof(struct bpf_xfrm_state))) ++ goto err_clear; ++ ++ to->reqid = x->props.reqid; ++ to->spi = x->id.spi; ++ to->family = x->props.family; ++ to->ext = 0; ++ ++ if (to->family == AF_INET6) { ++ memcpy(to->remote_ipv6, x->props.saddr.a6, ++ sizeof(to->remote_ipv6)); ++ } else { ++ to->remote_ipv4 = x->props.saddr.a4; ++ memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); ++ } + + return 0; ++err_clear: ++ memset(to, 0, size); ++ return -EINVAL; + } + +-const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { +- .func = bpf_skb_set_tunnel_key, ++static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = { ++ .func = bpf_skb_get_xfrm_state, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, +- .arg2_type = ARG_PTR_TO_STACK, +- .arg3_type = ARG_CONST_STACK_SIZE, +- .arg4_type = ARG_ANYTHING, ++ .arg2_type = ARG_ANYTHING, ++ .arg3_type = ARG_PTR_TO_UNINIT_MEM, ++ .arg4_type = ARG_CONST_SIZE, ++ .arg5_type = ARG_ANYTHING, + }; ++#endif ++ + +-static const struct bpf_func_proto *bpf_get_skb_set_tunnel_key_proto(void) ++#if IS_ENABLED(CONFIG_LWTUNNEL_BPF) ++static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, ++ bool ingress) + { +- if (!md_dst) { +- /* race is not possible, since it's called from +- * verifier that is holding verifier mutex +- */ +- md_dst = metadata_dst_alloc_percpu(0, GFP_KERNEL); +- if (!md_dst) +- return NULL; ++ return bpf_lwt_push_ip_encap(skb, hdr, len, ingress); ++} ++#endif ++ ++BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr, ++ u32, len) ++{ ++ switch (type) { ++#if IS_ENABLED(CONFIG_LWTUNNEL_BPF) ++ case BPF_LWT_ENCAP_IP: ++ return bpf_push_ip_encap(skb, hdr, len, true /* ingress */); ++#endif ++ default: ++ return -EINVAL; + } +- return &bpf_skb_set_tunnel_key_proto; ++} ++ ++BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type, ++ void *, hdr, u32, len) ++{ ++ switch (type) { ++#if IS_ENABLED(CONFIG_LWTUNNEL_BPF) ++ case BPF_LWT_ENCAP_IP: ++ return bpf_push_ip_encap(skb, hdr, len, false /* egress */); ++#endif ++ default: ++ return -EINVAL; ++ } ++} ++ ++static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = { ++ .func = bpf_lwt_in_push_encap, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_ANYTHING, ++ .arg3_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_CONST_SIZE ++}; ++ ++static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = { ++ .func = bpf_lwt_xmit_push_encap, ++ .gpl_only = false, ++ .ret_type = RET_INTEGER, ++ .arg1_type = ARG_PTR_TO_CTX, ++ .arg2_type = ARG_ANYTHING, ++ .arg3_type = ARG_PTR_TO_MEM, ++ .arg4_type = ARG_CONST_SIZE ++}; ++ ++bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++ ++u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, u32 *target_size) ++{ ++ return 0; ++} ++ ++BPF_CALL_1(bpf_tcp_sock, struct sock *, sk) ++{ ++ if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) ++ return (unsigned long)sk; ++ ++ return (unsigned long)NULL; ++} ++ ++const struct bpf_func_proto bpf_tcp_sock_proto = { ++ .func = bpf_tcp_sock, ++ .gpl_only = false, ++ .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL, ++ .arg1_type = ARG_PTR_TO_SOCK_COMMON, ++}; ++ ++bool bpf_helper_changes_pkt_data(void *func) ++{ ++ if (func == bpf_skb_vlan_push || ++ func == bpf_skb_vlan_pop || ++ func == bpf_skb_store_bytes || ++ func == bpf_skb_change_head || ++ func == bpf_skb_change_tail || ++ func == bpf_skb_pull_data || ++ func == bpf_clone_redirect || ++ func == bpf_l3_csum_replace || ++ func == bpf_l4_csum_replace || ++ func == bpf_lwt_in_push_encap || ++ func == bpf_lwt_xmit_push_encap) ++ return true; ++ ++ return false; + } + + static const struct bpf_func_proto * +-sk_filter_func_proto(enum bpf_func_id func_id) ++bpf_base_func_proto(enum bpf_func_id func_id) + { + switch (func_id) { + case BPF_FUNC_map_lookup_elem: +@@ -1644,283 +2495,1168 @@ sk_filter_func_proto(enum bpf_func_id fu + return &bpf_map_update_elem_proto; + case BPF_FUNC_map_delete_elem: + return &bpf_map_delete_elem_proto; ++ case BPF_FUNC_map_push_elem: ++ return &bpf_map_push_elem_proto; ++ case BPF_FUNC_map_pop_elem: ++ return &bpf_map_pop_elem_proto; ++ case BPF_FUNC_map_peek_elem: ++ return &bpf_map_peek_elem_proto; + case BPF_FUNC_get_prandom_u32: + return &bpf_get_prandom_u32_proto; + case BPF_FUNC_get_smp_processor_id: +- return &bpf_get_smp_processor_id_proto; ++ return &bpf_get_raw_smp_processor_id_proto; ++ case BPF_FUNC_get_numa_node_id: ++ return &bpf_get_numa_node_id_proto; + case BPF_FUNC_tail_call: + return &bpf_tail_call_proto; + case BPF_FUNC_ktime_get_ns: + return &bpf_ktime_get_ns_proto; ++ default: ++ break; ++ } ++ ++ if (!capable(CAP_SYS_ADMIN)) ++ return NULL; ++ ++ switch (func_id) { ++ case BPF_FUNC_spin_lock: ++ return &bpf_spin_lock_proto; ++ case BPF_FUNC_spin_unlock: ++ return &bpf_spin_unlock_proto; + case BPF_FUNC_trace_printk: +- if (capable(CAP_SYS_ADMIN)) +- return bpf_get_trace_printk_proto(); ++ return bpf_get_trace_printk_proto(); + default: + return NULL; + } + } + + static const struct bpf_func_proto * +-tc_cls_act_func_proto(enum bpf_func_id func_id) ++sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ return bpf_base_func_proto(func_id); ++} ++ ++static const struct bpf_func_proto * ++sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ return bpf_base_func_proto(func_id); ++} ++ ++const struct bpf_func_proto bpf_sk_storage_get_proto __weak; ++const struct bpf_func_proto bpf_sk_storage_delete_proto __weak; ++ ++static const struct bpf_func_proto * ++tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) + { + switch (func_id) { + case BPF_FUNC_skb_store_bytes: + return &bpf_skb_store_bytes_proto; ++ case BPF_FUNC_skb_load_bytes: ++ return &bpf_skb_load_bytes_proto; ++ case BPF_FUNC_skb_load_bytes_relative: ++ return &bpf_skb_load_bytes_relative_proto; ++ case BPF_FUNC_skb_pull_data: ++ return &bpf_skb_pull_data_proto; ++ case BPF_FUNC_csum_diff: ++ return &bpf_csum_diff_proto; ++ case BPF_FUNC_csum_update: ++ return &bpf_csum_update_proto; + case BPF_FUNC_l3_csum_replace: + return &bpf_l3_csum_replace_proto; + case BPF_FUNC_l4_csum_replace: + return &bpf_l4_csum_replace_proto; + case BPF_FUNC_clone_redirect: + return &bpf_clone_redirect_proto; +- case BPF_FUNC_get_cgroup_classid: +- return &bpf_get_cgroup_classid_proto; + case BPF_FUNC_skb_vlan_push: + return &bpf_skb_vlan_push_proto; + case BPF_FUNC_skb_vlan_pop: + return &bpf_skb_vlan_pop_proto; +- case BPF_FUNC_skb_get_tunnel_key: +- return &bpf_skb_get_tunnel_key_proto; +- case BPF_FUNC_skb_set_tunnel_key: +- return bpf_get_skb_set_tunnel_key_proto(); ++ case BPF_FUNC_skb_change_type: ++ return &bpf_skb_change_type_proto; ++ case BPF_FUNC_skb_change_tail: ++ return &bpf_skb_change_tail_proto; ++ case BPF_FUNC_redirect: ++ return &bpf_redirect_proto; ++ case BPF_FUNC_get_hash_recalc: ++ return &bpf_get_hash_recalc_proto; ++ case BPF_FUNC_set_hash_invalid: ++ return &bpf_set_hash_invalid_proto; ++ case BPF_FUNC_set_hash: ++ return &bpf_set_hash_proto; ++ case BPF_FUNC_perf_event_output: ++ return &bpf_skb_event_output_proto; ++ case BPF_FUNC_get_smp_processor_id: ++ return &bpf_get_smp_processor_id_proto; ++#ifdef CONFIG_XFRM ++ case BPF_FUNC_skb_get_xfrm_state: ++ return &bpf_skb_get_xfrm_state_proto; ++#endif ++ default: ++ return bpf_base_func_proto(func_id); ++ } ++} ++ ++static const struct bpf_func_proto * ++xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ switch (func_id) { ++ default: ++ return bpf_base_func_proto(func_id); ++ } ++} ++ ++const struct bpf_func_proto bpf_sock_map_update_proto __weak; ++const struct bpf_func_proto bpf_sock_hash_update_proto __weak; ++ ++static const struct bpf_func_proto * ++sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ switch (func_id) { ++ default: ++ return bpf_base_func_proto(func_id); ++ } ++} ++ ++const struct bpf_func_proto bpf_msg_redirect_map_proto __weak; ++const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak; ++ ++static const struct bpf_func_proto * ++sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ switch (func_id) { ++ default: ++ return bpf_base_func_proto(func_id); ++ } ++} ++ ++const struct bpf_func_proto bpf_sk_redirect_map_proto __weak; ++const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak; ++ ++static const struct bpf_func_proto * ++sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ return bpf_base_func_proto(func_id); ++} ++ ++static const struct bpf_func_proto * ++flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ switch (func_id) { ++ default: ++ return bpf_base_func_proto(func_id); ++ } ++} ++ ++static const struct bpf_func_proto * ++lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ switch (func_id) { ++ case BPF_FUNC_skb_load_bytes: ++ return &bpf_skb_load_bytes_proto; ++ case BPF_FUNC_skb_pull_data: ++ return &bpf_skb_pull_data_proto; ++ case BPF_FUNC_csum_diff: ++ return &bpf_csum_diff_proto; ++ case BPF_FUNC_get_hash_recalc: ++ return &bpf_get_hash_recalc_proto; ++ case BPF_FUNC_perf_event_output: ++ return &bpf_skb_event_output_proto; ++ case BPF_FUNC_get_smp_processor_id: ++ return &bpf_get_smp_processor_id_proto; ++ default: ++ return bpf_base_func_proto(func_id); ++ } ++} ++ ++static const struct bpf_func_proto * ++lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ switch (func_id) { ++ case BPF_FUNC_lwt_push_encap: ++ return &bpf_lwt_in_push_encap_proto; ++ default: ++ return lwt_out_func_proto(func_id, prog); ++ } ++} ++ ++static const struct bpf_func_proto * ++lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ++{ ++ switch (func_id) { + case BPF_FUNC_redirect: + return &bpf_redirect_proto; +- case BPF_FUNC_get_route_realm: +- return &bpf_get_route_realm_proto; ++ case BPF_FUNC_clone_redirect: ++ return &bpf_clone_redirect_proto; ++ case BPF_FUNC_skb_change_tail: ++ return &bpf_skb_change_tail_proto; ++ case BPF_FUNC_skb_change_head: ++ return &bpf_skb_change_head_proto; ++ case BPF_FUNC_skb_store_bytes: ++ return &bpf_skb_store_bytes_proto; ++ case BPF_FUNC_csum_update: ++ return &bpf_csum_update_proto; ++ case BPF_FUNC_l3_csum_replace: ++ return &bpf_l3_csum_replace_proto; ++ case BPF_FUNC_l4_csum_replace: ++ return &bpf_l4_csum_replace_proto; ++ case BPF_FUNC_set_hash_invalid: ++ return &bpf_set_hash_invalid_proto; ++ case BPF_FUNC_lwt_push_encap: ++ return &bpf_lwt_xmit_push_encap_proto; + default: +- return sk_filter_func_proto(func_id); ++ return lwt_out_func_proto(func_id, prog); + } + } + +-static bool __is_valid_access(int off, int size, enum bpf_access_type type) ++static const struct bpf_func_proto * ++lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) + { +- /* check bounds */ ++ switch (func_id) { ++ default: ++ return lwt_out_func_proto(func_id, prog); ++ } ++} ++ ++static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ const int size_default = sizeof(__u32); ++ + if (off < 0 || off >= sizeof(struct __sk_buff)) + return false; + +- /* disallow misaligned access */ ++ /* The verifier guarantees that size > 0. */ + if (off % size != 0) + return false; + +- /* all __sk_buff fields are __u32 */ +- if (size != 4) ++ switch (off) { ++ case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): ++ if (off + size > offsetofend(struct __sk_buff, cb[4])) ++ return false; ++ break; ++ case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]): ++ case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]): ++ case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): ++ case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): ++ case bpf_ctx_range(struct __sk_buff, data): ++ case bpf_ctx_range(struct __sk_buff, data_meta): ++ case bpf_ctx_range(struct __sk_buff, data_end): ++ if (size != size_default) ++ return false; ++ break; ++ case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): + return false; ++ case bpf_ctx_range(struct __sk_buff, tstamp): ++ if (size != sizeof(__u64)) ++ return false; ++ break; ++ case offsetof(struct __sk_buff, sk): ++ if (type == BPF_WRITE || size != sizeof(__u64)) ++ return false; ++ info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; ++ break; ++ default: ++ /* Only narrow read access allowed for now. */ ++ if (type == BPF_WRITE) { ++ if (size != size_default) ++ return false; ++ } else { ++ bpf_ctx_record_field_size(info, size_default); ++ if (!bpf_ctx_narrow_access_ok(off, size, size_default)) ++ return false; ++ } ++ } + + return true; + } + + static bool sk_filter_is_valid_access(int off, int size, +- enum bpf_access_type type) ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) + { +- if (off == offsetof(struct __sk_buff, tc_classid)) ++ return false; ++} ++ ++static bool lwt_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ switch (off) { ++ case bpf_ctx_range(struct __sk_buff, tc_classid): ++ case bpf_ctx_range_till(struct __sk_buff, family, local_port): ++ case bpf_ctx_range(struct __sk_buff, data_meta): ++ case bpf_ctx_range(struct __sk_buff, tstamp): ++ case bpf_ctx_range(struct __sk_buff, wire_len): + return false; ++ } + + if (type == BPF_WRITE) { + switch (off) { +- case offsetof(struct __sk_buff, cb[0]) ... +- offsetof(struct __sk_buff, cb[4]): ++ case bpf_ctx_range(struct __sk_buff, mark): ++ case bpf_ctx_range(struct __sk_buff, priority): ++ case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): + break; + default: + return false; + } + } + +- return __is_valid_access(off, size, type); ++ switch (off) { ++ case bpf_ctx_range(struct __sk_buff, data): ++ info->reg_type = PTR_TO_PACKET; ++ break; ++ case bpf_ctx_range(struct __sk_buff, data_end): ++ info->reg_type = PTR_TO_PACKET_END; ++ break; ++ } ++ ++ return bpf_skb_is_valid_access(off, size, type, prog, info); + } + +-static bool tc_cls_act_is_valid_access(int off, int size, +- enum bpf_access_type type) ++ ++bool bpf_sock_common_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ struct bpf_insn_access_aux *info) ++{ ++ switch (off) { ++ case bpf_ctx_range_till(struct bpf_sock, type, priority): ++ return false; ++ default: ++ return bpf_sock_is_valid_access(off, size, type, info); ++ } ++} ++ ++bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++ ++static bool sock_filter_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++ ++static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write, ++ const struct bpf_prog *prog) ++{ ++ /* Neither direct read nor direct write requires any preliminary ++ * action. ++ */ ++ return 0; ++} ++ ++static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, ++ const struct bpf_prog *prog, int drop_verdict) ++{ ++ struct bpf_insn *insn = insn_buf; ++ ++ if (!direct_write) ++ return 0; ++ ++ /* if (!skb->cloned) ++ * goto start; ++ * ++ * (Fast-path, otherwise approximation that we might be ++ * a clone, do the rest in helper.) ++ */ ++ *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET()); ++ *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); ++ ++ /* ret = bpf_skb_pull_data(skb, 0); */ ++ *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); ++ *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); ++ *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, ++ BPF_FUNC_skb_pull_data); ++ /* if (!ret) ++ * goto restore; ++ * return TC_ACT_SHOT; ++ */ ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); ++ *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict); ++ *insn++ = BPF_EXIT_INSN(); ++ ++ /* restore: */ ++ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); ++ /* start: */ ++ *insn++ = prog->insnsi[0]; ++ ++ return insn - insn_buf; ++} ++ ++static int bpf_gen_ld_abs(const struct bpf_insn *orig, ++ struct bpf_insn *insn_buf) ++{ ++ bool indirect = BPF_MODE(orig->code) == BPF_IND; ++ struct bpf_insn *insn = insn_buf; ++ ++ if (!indirect) { ++ *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); ++ } else { ++ *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg); ++ if (orig->imm) ++ *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); ++ } ++ /* We're guaranteed here that CTX is in R6. */ ++ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); ++ ++ switch (BPF_SIZE(orig->code)) { ++ case BPF_B: ++ *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache); ++ break; ++ case BPF_H: ++ *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache); ++ break; ++ case BPF_W: ++ *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache); ++ break; ++ } ++ ++ *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2); ++ *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0); ++ *insn++ = BPF_EXIT_INSN(); ++ ++ return insn - insn_buf; ++} ++ ++static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, ++ const struct bpf_prog *prog) + { +- if (off == offsetof(struct __sk_buff, tc_classid)) +- return type == BPF_WRITE ? true : false; ++ return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT); ++} + ++static bool tc_cls_act_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ + if (type == BPF_WRITE) { + switch (off) { +- case offsetof(struct __sk_buff, mark): +- case offsetof(struct __sk_buff, tc_index): +- case offsetof(struct __sk_buff, priority): +- case offsetof(struct __sk_buff, cb[0]) ... +- offsetof(struct __sk_buff, cb[4]): ++ case bpf_ctx_range(struct __sk_buff, mark): ++ case bpf_ctx_range(struct __sk_buff, tc_index): ++ case bpf_ctx_range(struct __sk_buff, priority): ++ case bpf_ctx_range(struct __sk_buff, tc_classid): ++ case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): ++ case bpf_ctx_range(struct __sk_buff, tstamp): ++ case bpf_ctx_range(struct __sk_buff, queue_mapping): + break; + default: + return false; + } + } +- return __is_valid_access(off, size, type); ++ ++ switch (off) { ++ case bpf_ctx_range(struct __sk_buff, data): ++ info->reg_type = PTR_TO_PACKET; ++ break; ++ case bpf_ctx_range(struct __sk_buff, data_meta): ++ info->reg_type = PTR_TO_PACKET_META; ++ break; ++ case bpf_ctx_range(struct __sk_buff, data_end): ++ info->reg_type = PTR_TO_PACKET_END; ++ break; ++ case bpf_ctx_range_till(struct __sk_buff, family, local_port): ++ return false; ++ } ++ ++ return bpf_skb_is_valid_access(off, size, type, prog, info); ++} ++ ++static bool xdp_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++ ++void bpf_warn_invalid_xdp_action(u32 act) ++{ ++} ++EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); ++ ++static bool sock_addr_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++ ++static bool sock_ops_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++ ++static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write, ++ const struct bpf_prog *prog) ++{ ++ return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP); ++} ++ ++static bool sk_skb_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++ ++static bool sk_msg_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; ++} ++ ++static bool flow_dissector_is_valid_access(int off, int size, ++ enum bpf_access_type type, ++ const struct bpf_prog *prog, ++ struct bpf_insn_access_aux *info) ++{ ++ return false; + } + +-static u32 bpf_net_convert_ctx_access(enum bpf_access_type type, int dst_reg, +- int src_reg, int ctx_off, +- struct bpf_insn *insn_buf, +- struct bpf_prog *prog) ++static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, ++ u32 *target_size) ++ ++{ ++ return 0; ++} ++ ++static u32 bpf_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, u32 *target_size) + { + struct bpf_insn *insn = insn_buf; ++ int off; + +- switch (ctx_off) { ++ switch (si->off) { + case offsetof(struct __sk_buff, len): +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); +- +- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, +- offsetof(struct sk_buff, len)); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, len, 4, ++ target_size)); + break; + + case offsetof(struct __sk_buff, protocol): +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); +- +- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, +- offsetof(struct sk_buff, protocol)); ++ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, protocol, 2, ++ target_size)); + break; + + case offsetof(struct __sk_buff, vlan_proto): +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); +- +- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, +- offsetof(struct sk_buff, vlan_proto)); ++ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, vlan_proto, 2, ++ target_size)); + break; + + case offsetof(struct __sk_buff, priority): +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4); +- + if (type == BPF_WRITE) +- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, +- offsetof(struct sk_buff, priority)); ++ *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, priority, 4, ++ target_size)); + else +- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, +- offsetof(struct sk_buff, priority)); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, priority, 4, ++ target_size)); + break; + + case offsetof(struct __sk_buff, ingress_ifindex): +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4); +- +- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, +- offsetof(struct sk_buff, skb_iif)); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, skb_iif, 4, ++ target_size)); + break; + + case offsetof(struct __sk_buff, ifindex): +- BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); +- +- *insn++ = BPF_LDX_MEM(bytes_to_bpf_size(FIELD_SIZEOF(struct sk_buff, dev)), +- dst_reg, src_reg, ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), ++ si->dst_reg, si->src_reg, + offsetof(struct sk_buff, dev)); +- *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1); +- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg, +- offsetof(struct net_device, ifindex)); ++ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, ++ bpf_target_off(struct net_device, ifindex, 4, ++ target_size)); + break; + + case offsetof(struct __sk_buff, hash): +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); +- +- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, +- offsetof(struct sk_buff, hash)); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, hash, 4, ++ target_size)); + break; + + case offsetof(struct __sk_buff, mark): +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); +- + if (type == BPF_WRITE) +- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, +- offsetof(struct sk_buff, mark)); ++ *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, mark, 4, ++ target_size)); + else +- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, +- offsetof(struct sk_buff, mark)); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, mark, 4, ++ target_size)); + break; + + case offsetof(struct __sk_buff, pkt_type): +- return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn); ++ *target_size = 1; ++ *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, ++ PKT_TYPE_OFFSET()); ++ *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX); ++#ifdef __BIG_ENDIAN_BITFIELD ++ *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5); ++#endif ++ break; + + case offsetof(struct __sk_buff, queue_mapping): +- return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn); ++ if (type == BPF_WRITE) { ++ *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, USHRT_MAX, 1); ++ *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, ++ queue_mapping, ++ 2, target_size)); ++ } else { ++ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, ++ queue_mapping, ++ 2, target_size)); ++ } ++ break; + + case offsetof(struct __sk_buff, vlan_present): +- return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, +- dst_reg, src_reg, insn); ++ *target_size = 1; ++ *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, ++ PKT_VLAN_PRESENT_OFFSET()); ++ if (PKT_VLAN_PRESENT_BIT) ++ *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT); ++ if (PKT_VLAN_PRESENT_BIT < 7) ++ *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1); ++ break; + + case offsetof(struct __sk_buff, vlan_tci): +- return convert_skb_access(SKF_AD_VLAN_TAG, +- dst_reg, src_reg, insn); ++ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, vlan_tci, 2, ++ target_size)); ++ break; + + case offsetof(struct __sk_buff, cb[0]) ... +- offsetof(struct __sk_buff, cb[4]): ++ offsetofend(struct __sk_buff, cb[4]) - 1: + BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); ++ BUILD_BUG_ON((offsetof(struct sk_buff, cb) + ++ offsetof(struct qdisc_skb_cb, data)) % ++ sizeof(__u64)); + + prog->cb_access = 1; +- ctx_off -= offsetof(struct __sk_buff, cb[0]); +- ctx_off += offsetof(struct sk_buff, cb); +- ctx_off += offsetof(struct qdisc_skb_cb, data); ++ off = si->off; ++ off -= offsetof(struct __sk_buff, cb[0]); ++ off += offsetof(struct sk_buff, cb); ++ off += offsetof(struct qdisc_skb_cb, data); + if (type == BPF_WRITE) +- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off); ++ *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, ++ si->src_reg, off); + else +- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off); ++ *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, ++ si->src_reg, off); + break; + + case offsetof(struct __sk_buff, tc_classid): +- ctx_off -= offsetof(struct __sk_buff, tc_classid); +- ctx_off += offsetof(struct sk_buff, cb); +- ctx_off += offsetof(struct qdisc_skb_cb, tc_classid); +- WARN_ON(type != BPF_WRITE); +- *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off); ++ BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2); ++ ++ off = si->off; ++ off -= offsetof(struct __sk_buff, tc_classid); ++ off += offsetof(struct sk_buff, cb); ++ off += offsetof(struct qdisc_skb_cb, tc_classid); ++ *target_size = 2; ++ if (type == BPF_WRITE) ++ *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, ++ si->src_reg, off); ++ else ++ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, ++ si->src_reg, off); ++ break; ++ ++ case offsetof(struct __sk_buff, data): ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, data)); ++ break; ++ ++ case offsetof(struct __sk_buff, data_meta): ++ off = si->off; ++ off -= offsetof(struct __sk_buff, data_meta); ++ off += offsetof(struct sk_buff, cb); ++ off += offsetof(struct bpf_skb_data_end, data_meta); ++ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, ++ si->src_reg, off); ++ break; ++ ++ case offsetof(struct __sk_buff, data_end): ++ off = si->off; ++ off -= offsetof(struct __sk_buff, data_end); ++ off += offsetof(struct sk_buff, cb); ++ off += offsetof(struct bpf_skb_data_end, data_end); ++ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, ++ si->src_reg, off); + break; + + case offsetof(struct __sk_buff, tc_index): + #ifdef CONFIG_NET_SCHED +- BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2); +- + if (type == BPF_WRITE) +- *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, +- offsetof(struct sk_buff, tc_index)); ++ *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, tc_index, 2, ++ target_size)); + else +- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, +- offsetof(struct sk_buff, tc_index)); +- break; ++ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, tc_index, 2, ++ target_size)); + #else ++ *target_size = 2; + if (type == BPF_WRITE) +- *insn++ = BPF_MOV64_REG(dst_reg, dst_reg); ++ *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg); + else +- *insn++ = BPF_MOV64_IMM(dst_reg, 0); ++ *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); ++#endif ++ break; ++ ++ case offsetof(struct __sk_buff, napi_id): ++ *target_size = 4; ++ *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); ++ break; ++ case offsetof(struct __sk_buff, family): ++ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); ++ ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, sk)); ++ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, ++ bpf_target_off(struct sock_common, ++ skc_family, ++ 2, target_size)); ++ break; ++ case offsetof(struct __sk_buff, remote_ip4): ++ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); ++ ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, sk)); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, ++ bpf_target_off(struct sock_common, ++ skc_daddr, ++ 4, target_size)); ++ break; ++ case offsetof(struct __sk_buff, local_ip4): ++ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, ++ skc_rcv_saddr) != 4); ++ ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, sk)); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, ++ bpf_target_off(struct sock_common, ++ skc_rcv_saddr, ++ 4, target_size)); ++ break; ++ case offsetof(struct __sk_buff, remote_ip6[0]) ... ++ offsetof(struct __sk_buff, remote_ip6[3]): ++#if IS_ENABLED(CONFIG_IPV6) ++ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, ++ skc_v6_daddr.s6_addr32[0]) != 4); ++ ++ off = si->off; ++ off -= offsetof(struct __sk_buff, remote_ip6[0]); ++ ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, sk)); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, ++ offsetof(struct sock_common, ++ skc_v6_daddr.s6_addr32[0]) + ++ off); ++#else ++ *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); ++#endif ++ break; ++ case offsetof(struct __sk_buff, local_ip6[0]) ... ++ offsetof(struct __sk_buff, local_ip6[3]): ++#if IS_ENABLED(CONFIG_IPV6) ++ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, ++ skc_v6_rcv_saddr.s6_addr32[0]) != 4); ++ ++ off = si->off; ++ off -= offsetof(struct __sk_buff, local_ip6[0]); ++ ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, sk)); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, ++ offsetof(struct sock_common, ++ skc_v6_rcv_saddr.s6_addr32[0]) + ++ off); ++#else ++ *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); ++#endif ++ break; ++ ++ case offsetof(struct __sk_buff, remote_port): ++ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); ++ ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, sk)); ++ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, ++ bpf_target_off(struct sock_common, ++ skc_dport, ++ 2, target_size)); ++#ifndef __BIG_ENDIAN_BITFIELD ++ *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); ++#endif + break; ++ ++ case offsetof(struct __sk_buff, local_port): ++ BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); ++ ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, sk)); ++ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, ++ bpf_target_off(struct sock_common, ++ skc_num, 2, target_size)); ++ break; ++ ++ case offsetof(struct __sk_buff, tstamp): ++ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8); ++ ++ if (type == BPF_WRITE) ++ *insn++ = BPF_STX_MEM(BPF_DW, ++ si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, ++ tstamp, 8, ++ target_size)); ++ else ++ *insn++ = BPF_LDX_MEM(BPF_DW, ++ si->dst_reg, si->src_reg, ++ bpf_target_off(struct sk_buff, ++ tstamp, 8, ++ target_size)); ++ break; ++ ++ case offsetof(struct __sk_buff, gso_segs): ++ /* si->dst_reg = skb_shinfo(SKB); */ ++#ifdef NET_SKBUFF_DATA_USES_OFFSET ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), ++ BPF_REG_AX, si->src_reg, ++ offsetof(struct sk_buff, end)); ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, head)); ++ *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX); ++#else ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, end)); + #endif ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs), ++ si->dst_reg, si->dst_reg, ++ bpf_target_off(struct skb_shared_info, ++ gso_segs, 2, ++ target_size)); ++ break; ++ case offsetof(struct __sk_buff, wire_len): ++ BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4); ++ ++ off = si->off; ++ off -= offsetof(struct __sk_buff, wire_len); ++ off += offsetof(struct sk_buff, cb); ++ off += offsetof(struct qdisc_skb_cb, pkt_len); ++ *target_size = 4; ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off); ++ break; ++ ++ case offsetof(struct __sk_buff, sk): ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, sk)); ++ break; + } + + return insn - insn_buf; + } + +-static const struct bpf_verifier_ops sk_filter_ops = { +- .get_func_proto = sk_filter_func_proto, +- .is_valid_access = sk_filter_is_valid_access, +- .convert_ctx_access = bpf_net_convert_ctx_access, ++u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, u32 *target_size) ++{ ++ return 0; ++} ++ ++static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, u32 *target_size) ++{ ++ struct bpf_insn *insn = insn_buf; ++ ++ switch (si->off) { ++ case offsetof(struct __sk_buff, ifindex): ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), ++ si->dst_reg, si->src_reg, ++ offsetof(struct sk_buff, dev)); ++ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, ++ bpf_target_off(struct net_device, ifindex, 4, ++ target_size)); ++ break; ++ default: ++ return bpf_convert_ctx_access(type, si, insn_buf, prog, ++ target_size); ++ } ++ ++ return insn - insn_buf; ++} ++ ++static u32 xdp_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, u32 *target_size) ++{ ++ return 0; ++} ++ ++/* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of ++ * context Structure, F is Field in context structure that contains a pointer ++ * to Nested Structure of type NS that has the field NF. ++ * ++ * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make ++ * sure that SIZE is not greater than actual size of S.F.NF. ++ * ++ * If offset OFF is provided, the load happens from that offset relative to ++ * offset of NF. ++ */ ++#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \ ++ do { \ ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \ ++ si->src_reg, offsetof(S, F)); \ ++ *insn++ = BPF_LDX_MEM( \ ++ SIZE, si->dst_reg, si->dst_reg, \ ++ bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ ++ target_size) \ ++ + OFF); \ ++ } while (0) ++ ++#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \ ++ SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \ ++ BPF_FIELD_SIZEOF(NS, NF), 0) ++ ++/* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to ++ * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation. ++ * ++ * In addition it uses Temporary Field TF (member of struct S) as the 3rd ++ * "register" since two registers available in convert_ctx_access are not ++ * enough: we can't override neither SRC, since it contains value to store, nor ++ * DST since it contains pointer to context that may be used by later ++ * instructions. But we need a temporary place to save pointer to nested ++ * structure whose field we want to store to. ++ */ ++#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) \ ++ do { \ ++ int tmp_reg = BPF_REG_9; \ ++ if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ ++ --tmp_reg; \ ++ if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ ++ --tmp_reg; \ ++ *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \ ++ offsetof(S, TF)); \ ++ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ ++ si->dst_reg, offsetof(S, F)); \ ++ *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \ ++ bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ ++ target_size) \ ++ + OFF); \ ++ *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \ ++ offsetof(S, TF)); \ ++ } while (0) ++ ++#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \ ++ TF) \ ++ do { \ ++ if (type == BPF_WRITE) { \ ++ SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, \ ++ OFF, TF); \ ++ } else { \ ++ SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \ ++ S, NS, F, NF, SIZE, OFF); \ ++ } \ ++ } while (0) ++ ++#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \ ++ SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \ ++ S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF) ++ ++static u32 sock_addr_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, u32 *target_size) ++{ ++ return 0; ++} ++ ++static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, ++ u32 *target_size) ++{ ++ return 0; ++} ++ ++static u32 sk_skb_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, u32 *target_size) ++{ ++ return 0; ++} ++ ++static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, ++ const struct bpf_insn *si, ++ struct bpf_insn *insn_buf, ++ struct bpf_prog *prog, u32 *target_size) ++{ ++ return 0; ++} ++ ++const struct bpf_verifier_ops sk_filter_verifier_ops = { ++ .is_valid_access = sk_filter_is_valid_access, ++ .convert_ctx_access = bpf_convert_ctx_access, ++ .gen_ld_abs = bpf_gen_ld_abs, + }; + +-static const struct bpf_verifier_ops tc_cls_act_ops = { +- .get_func_proto = tc_cls_act_func_proto, +- .is_valid_access = tc_cls_act_is_valid_access, +- .convert_ctx_access = bpf_net_convert_ctx_access, ++const struct bpf_prog_ops sk_filter_prog_ops = { + }; + +-static struct bpf_prog_type_list sk_filter_type __read_mostly = { +- .ops = &sk_filter_ops, +- .type = BPF_PROG_TYPE_SOCKET_FILTER, ++const struct bpf_verifier_ops tc_cls_act_verifier_ops = { ++ .get_func_proto = tc_cls_act_func_proto, ++ .is_valid_access = tc_cls_act_is_valid_access, ++ .convert_ctx_access = tc_cls_act_convert_ctx_access, ++ .gen_prologue = tc_cls_act_prologue, ++ .gen_ld_abs = bpf_gen_ld_abs, + }; + +-static struct bpf_prog_type_list sched_cls_type __read_mostly = { +- .ops = &tc_cls_act_ops, +- .type = BPF_PROG_TYPE_SCHED_CLS, ++const struct bpf_prog_ops tc_cls_act_prog_ops = { + }; + +-static struct bpf_prog_type_list sched_act_type __read_mostly = { +- .ops = &tc_cls_act_ops, +- .type = BPF_PROG_TYPE_SCHED_ACT, ++const struct bpf_verifier_ops xdp_verifier_ops = { ++ .get_func_proto = xdp_func_proto, ++ .is_valid_access = xdp_is_valid_access, ++ .convert_ctx_access = xdp_convert_ctx_access, ++ .gen_prologue = bpf_noop_prologue, + }; + +-static int __init register_sk_filter_ops(void) +-{ +- bpf_register_prog_type(&sk_filter_type); +- bpf_register_prog_type(&sched_cls_type); +- bpf_register_prog_type(&sched_act_type); ++const struct bpf_verifier_ops lwt_in_verifier_ops = { ++ .get_func_proto = lwt_in_func_proto, ++ .is_valid_access = lwt_is_valid_access, ++ .convert_ctx_access = bpf_convert_ctx_access, ++}; + +- return 0; +-} +-late_initcall(register_sk_filter_ops); ++const struct bpf_prog_ops lwt_in_prog_ops = { ++}; ++ ++const struct bpf_verifier_ops lwt_out_verifier_ops = { ++ .get_func_proto = lwt_out_func_proto, ++ .is_valid_access = lwt_is_valid_access, ++ .convert_ctx_access = bpf_convert_ctx_access, ++}; ++ ++const struct bpf_prog_ops lwt_out_prog_ops = { ++}; ++ ++const struct bpf_verifier_ops lwt_xmit_verifier_ops = { ++ .get_func_proto = lwt_xmit_func_proto, ++ .is_valid_access = lwt_is_valid_access, ++ .convert_ctx_access = bpf_convert_ctx_access, ++ .gen_prologue = tc_cls_act_prologue, ++}; ++ ++const struct bpf_prog_ops lwt_xmit_prog_ops = { ++}; ++ ++const struct bpf_verifier_ops lwt_seg6local_verifier_ops = { ++ .get_func_proto = lwt_seg6local_func_proto, ++ .is_valid_access = lwt_is_valid_access, ++ .convert_ctx_access = bpf_convert_ctx_access, ++}; ++ ++const struct bpf_prog_ops lwt_seg6local_prog_ops = { ++}; ++ ++const struct bpf_verifier_ops cg_sock_verifier_ops = { ++ .get_func_proto = sock_filter_func_proto, ++ .is_valid_access = sock_filter_is_valid_access, ++ .convert_ctx_access = bpf_sock_convert_ctx_access, ++}; ++ ++const struct bpf_prog_ops cg_sock_prog_ops = { ++}; ++ ++const struct bpf_verifier_ops cg_sock_addr_verifier_ops = { ++ .get_func_proto = sock_addr_func_proto, ++ .is_valid_access = sock_addr_is_valid_access, ++ .convert_ctx_access = sock_addr_convert_ctx_access, ++}; + +-int __sk_detach_filter(struct sock *sk, bool locked) ++const struct bpf_prog_ops cg_sock_addr_prog_ops = { ++}; ++ ++const struct bpf_verifier_ops sock_ops_verifier_ops = { ++ .get_func_proto = sock_ops_func_proto, ++ .is_valid_access = sock_ops_is_valid_access, ++ .convert_ctx_access = sock_ops_convert_ctx_access, ++}; ++ ++const struct bpf_prog_ops sock_ops_prog_ops = { ++}; ++ ++const struct bpf_verifier_ops sk_skb_verifier_ops = { ++ .get_func_proto = sk_skb_func_proto, ++ .is_valid_access = sk_skb_is_valid_access, ++ .convert_ctx_access = sk_skb_convert_ctx_access, ++ .gen_prologue = sk_skb_prologue, ++}; ++ ++const struct bpf_prog_ops sk_skb_prog_ops = { ++}; ++ ++const struct bpf_verifier_ops sk_msg_verifier_ops = { ++ .get_func_proto = sk_msg_func_proto, ++ .is_valid_access = sk_msg_is_valid_access, ++ .convert_ctx_access = sk_msg_convert_ctx_access, ++ .gen_prologue = bpf_noop_prologue, ++}; ++ ++const struct bpf_prog_ops sk_msg_prog_ops = { ++}; ++ ++const struct bpf_verifier_ops flow_dissector_verifier_ops = { ++ .get_func_proto = flow_dissector_func_proto, ++ .is_valid_access = flow_dissector_is_valid_access, ++ .convert_ctx_access = flow_dissector_convert_ctx_access, ++}; ++ ++const struct bpf_prog_ops flow_dissector_prog_ops = { ++}; ++ ++int sk_detach_filter(struct sock *sk) + { + int ret = -ENOENT; + struct sk_filter *filter; +@@ -1928,7 +3664,8 @@ int __sk_detach_filter(struct sock *sk, + if (sock_flag(sk, SOCK_FILTER_LOCKED)) + return -EPERM; + +- filter = rcu_dereference_protected(sk->sk_filter, locked); ++ filter = rcu_dereference_protected(sk->sk_filter, ++ lockdep_sock_is_held(sk)); + if (filter) { + RCU_INIT_POINTER(sk->sk_filter, NULL); + sk_filter_uncharge(sk, filter); +@@ -1937,12 +3674,7 @@ int __sk_detach_filter(struct sock *sk, + + return ret; + } +-EXPORT_SYMBOL_GPL(__sk_detach_filter); +- +-int sk_detach_filter(struct sock *sk) +-{ +- return __sk_detach_filter(sk, sock_owned_by_user(sk)); +-} ++EXPORT_SYMBOL_GPL(sk_detach_filter); + + int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, + unsigned int len) +@@ -1953,7 +3685,7 @@ int sk_get_filter(struct sock *sk, struc + + lock_sock(sk); + filter = rcu_dereference_protected(sk->sk_filter, +- sock_owned_by_user(sk)); ++ lockdep_sock_is_held(sk)); + if (!filter) + goto out; + +@@ -1987,3 +3719,5 @@ out: + release_sock(sk); + return ret; + } ++ ++ +--- a/include/asm-generic/barrier.h ++++ b/include/asm-generic/barrier.h +@@ -119,5 +119,29 @@ do { \ + ___p1; \ + }) + ++/** ++ * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees ++ * @ptr: pointer to the variable to wait on ++ * @cond: boolean expression to wait for ++ * ++ * Equivalent to using READ_ONCE() on the condition variable. ++ * ++ * Due to C lacking lambda expressions we load the value of *ptr into a ++ * pre-named variable @VAL to be used in @cond. ++ */ ++#ifndef smp_cond_load_relaxed ++#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ ++ typeof(ptr) __PTR = (ptr); \ ++ typeof(*ptr) VAL; \ ++ for (;;) { \ ++ VAL = READ_ONCE(*__PTR); \ ++ if (cond_expr) \ ++ break; \ ++ cpu_relax(); \ ++ } \ ++ VAL; \ ++}) ++#endif ++ + #endif /* !__ASSEMBLY__ */ + #endif /* __ASM_GENERIC_BARRIER_H */ +--- a/arch/arm/include/asm/barrier.h ++++ b/arch/arm/include/asm/barrier.h +@@ -94,4 +94,6 @@ do { \ + #define smp_mb__after_atomic() smp_mb() + + #endif /* !__ASSEMBLY__ */ ++ ++#include + #endif /* __ASM_BARRIER_H */ +--- a/include/linux/list_nulls.h ++++ b/include/linux/list_nulls.h +@@ -1,3 +1,4 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ + #ifndef _LINUX_LIST_NULLS_H + #define _LINUX_LIST_NULLS_H + +@@ -29,6 +30,11 @@ struct hlist_nulls_node { + ((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls)) + + #define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member) ++ ++#define hlist_nulls_entry_safe(ptr, type, member) \ ++ ({ typeof(ptr) ____ptr = (ptr); \ ++ !is_a_nulls(____ptr) ? hlist_nulls_entry(____ptr, type, member) : NULL; \ ++ }) + /** + * ptr_is_a_nulls - Test if a ptr is a nulls + * @ptr: ptr to be tested +@@ -57,7 +63,7 @@ static inline int hlist_nulls_unhashed(c + + static inline int hlist_nulls_empty(const struct hlist_nulls_head *h) + { +- return is_a_nulls(h->first); ++ return is_a_nulls(READ_ONCE(h->first)); + } + + static inline void hlist_nulls_add_head(struct hlist_nulls_node *n, +@@ -66,10 +72,10 @@ static inline void hlist_nulls_add_head( + struct hlist_nulls_node *first = h->first; + + n->next = first; +- n->pprev = &h->first; ++ WRITE_ONCE(n->pprev, &h->first); + h->first = n; + if (!is_a_nulls(first)) +- first->pprev = &n->next; ++ WRITE_ONCE(first->pprev, &n->next); + } + + static inline void __hlist_nulls_del(struct hlist_nulls_node *n) +@@ -79,13 +85,13 @@ static inline void __hlist_nulls_del(str + + WRITE_ONCE(*pprev, next); + if (!is_a_nulls(next)) +- next->pprev = pprev; ++ WRITE_ONCE(next->pprev, pprev); + } + + static inline void hlist_nulls_del(struct hlist_nulls_node *n) + { + __hlist_nulls_del(n); +- n->pprev = LIST_POISON2; ++ WRITE_ONCE(n->pprev, LIST_POISON2); + } + + /** +--- a/include/linux/rculist_nulls.h ++++ b/include/linux/rculist_nulls.h +@@ -1,3 +1,4 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ + #ifndef _LINUX_RCULIST_NULLS_H + #define _LINUX_RCULIST_NULLS_H + +@@ -33,7 +34,7 @@ static inline void hlist_nulls_del_init_ + { + if (!hlist_nulls_unhashed(n)) { + __hlist_nulls_del(n); +- n->pprev = NULL; ++ WRITE_ONCE(n->pprev, NULL); + } + } + +@@ -65,7 +66,7 @@ static inline void hlist_nulls_del_init_ + static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n) + { + __hlist_nulls_del(n); +- n->pprev = LIST_POISON2; ++ WRITE_ONCE(n->pprev, LIST_POISON2); + } + + /** +@@ -93,11 +94,49 @@ static inline void hlist_nulls_add_head_ + struct hlist_nulls_node *first = h->first; + + n->next = first; +- n->pprev = &h->first; ++ WRITE_ONCE(n->pprev, &h->first); + rcu_assign_pointer(hlist_nulls_first_rcu(h), n); + if (!is_a_nulls(first)) +- first->pprev = &n->next; ++ WRITE_ONCE(first->pprev, &n->next); + } ++ ++/** ++ * hlist_nulls_add_tail_rcu ++ * @n: the element to add to the hash list. ++ * @h: the list to add to. ++ * ++ * Description: ++ * Adds the specified element to the specified hlist_nulls, ++ * while permitting racing traversals. ++ * ++ * The caller must take whatever precautions are necessary ++ * (such as holding appropriate locks) to avoid racing ++ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu() ++ * or hlist_nulls_del_rcu(), running on this same list. ++ * However, it is perfectly legal to run concurrently with ++ * the _rcu list-traversal primitives, such as ++ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency ++ * problems on Alpha CPUs. Regardless of the type of CPU, the ++ * list-traversal primitive must be guarded by rcu_read_lock(). ++ */ ++static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n, ++ struct hlist_nulls_head *h) ++{ ++ struct hlist_nulls_node *i, *last = NULL; ++ ++ /* Note: write side code, so rcu accessors are not needed. */ ++ for (i = h->first; !is_a_nulls(i); i = i->next) ++ last = i; ++ ++ if (last) { ++ n->next = last->next; ++ n->pprev = &last->next; ++ rcu_assign_pointer(hlist_next_rcu(last), n); ++ } else { ++ hlist_nulls_add_head_rcu(n, h); ++ } ++} ++ + /** + * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type + * @tpos: the type * to use as a loop cursor. +@@ -107,7 +146,7 @@ static inline void hlist_nulls_add_head_ + * + * The barrier() is needed to make sure compiler doesn't cache first element [1], + * as this loop can be restarted [2] +- * [1] Documentation/atomic_ops.txt around line 114 ++ * [1] Documentation/core-api/atomic_ops.rst around line 114 + * [2] Documentation/RCU/rculist_nulls.txt around line 146 + */ + #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ +@@ -117,5 +156,19 @@ static inline void hlist_nulls_add_head_ + ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ + pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) + ++/** ++ * hlist_nulls_for_each_entry_safe - ++ * iterate over list of given type safe against removal of list entry ++ * @tpos: the type * to use as a loop cursor. ++ * @pos: the &struct hlist_nulls_node to use as a loop cursor. ++ * @head: the head for your list. ++ * @member: the name of the hlist_nulls_node within the struct. ++ */ ++#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \ ++ for (({barrier();}), \ ++ pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ ++ (!is_a_nulls(pos)) && \ ++ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \ ++ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });) + #endif + #endif +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -627,8 +627,9 @@ static int tun_attach(struct tun_struct + + /* Re-attach the filter to persist device */ + if (!skip_filter && (tun->filter_attached == true)) { +- err = __sk_attach_filter(&tun->fprog, tfile->socket.sk, +- lockdep_rtnl_is_held()); ++ lock_sock(tfile->socket.sk); ++ err = sk_attach_filter(&tun->fprog, tfile->socket.sk); ++ release_sock(tfile->socket.sk); + if (!err) + goto out; + } +@@ -1835,7 +1836,9 @@ static void tun_detach_filter(struct tun + + for (i = 0; i < n; i++) { + tfile = rtnl_dereference(tun->tfiles[i]); +- __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held()); ++ lock_sock(tfile->socket.sk); ++ sk_detach_filter(tfile->socket.sk); ++ release_sock(tfile->socket.sk); + } + + tun->filter_attached = false; +@@ -1848,8 +1851,9 @@ static int tun_attach_filter(struct tun_ + + for (i = 0; i < tun->numqueues; i++) { + tfile = rtnl_dereference(tun->tfiles[i]); +- ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk, +- lockdep_rtnl_is_held()); ++ lock_sock(tfile->socket.sk); ++ ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); ++ release_sock(tfile->socket.sk); + if (ret) { + tun_detach_filter(tun, i); + return ret; +--- a/include/linux/list.h ++++ b/include/linux/list.h +@@ -1,3 +1,4 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ + #ifndef _LINUX_LIST_H + #define _LINUX_LIST_H + +@@ -24,31 +25,46 @@ + + static inline void INIT_LIST_HEAD(struct list_head *list) + { +- list->next = list; ++ WRITE_ONCE(list->next, list); + list->prev = list; + } + ++#ifdef CONFIG_DEBUG_LIST ++extern bool __list_add_valid(struct list_head *new, ++ struct list_head *prev, ++ struct list_head *next); ++extern bool __list_del_entry_valid(struct list_head *entry); ++#else ++static inline bool __list_add_valid(struct list_head *new, ++ struct list_head *prev, ++ struct list_head *next) ++{ ++ return true; ++} ++static inline bool __list_del_entry_valid(struct list_head *entry) ++{ ++ return true; ++} ++#endif ++ + /* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +-#ifndef CONFIG_DEBUG_LIST + static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) + { ++ if (!__list_add_valid(new, prev, next)) ++ return; ++ + next->prev = new; + new->next = next; + new->prev = prev; +- prev->next = new; ++ WRITE_ONCE(prev->next, new); + } +-#else +-extern void __list_add(struct list_head *new, +- struct list_head *prev, +- struct list_head *next); +-#endif + + /** + * list_add - add a new entry +@@ -90,28 +106,40 @@ static inline void __list_del(struct lis + WRITE_ONCE(prev->next, next); + } + ++/* ++ * Delete a list entry and clear the 'prev' pointer. ++ * ++ * This is a special-purpose list clearing method used in the networking code ++ * for lists allocated as per-cpu, where we don't want to incur the extra ++ * WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this ++ * needs to check the node 'prev' pointer instead of calling list_empty(). ++ */ ++static inline void __list_del_clearprev(struct list_head *entry) ++{ ++ __list_del(entry->prev, entry->next); ++ entry->prev = NULL; ++} ++ + /** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty() on entry does not return true after this, the entry is + * in an undefined state. + */ +-#ifndef CONFIG_DEBUG_LIST + static inline void __list_del_entry(struct list_head *entry) + { ++ if (!__list_del_entry_valid(entry)) ++ return; ++ + __list_del(entry->prev, entry->next); + } + + static inline void list_del(struct list_head *entry) + { +- __list_del(entry->prev, entry->next); ++ __list_del_entry(entry); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; + } +-#else +-extern void __list_del_entry(struct list_head *entry); +-extern void list_del(struct list_head *entry); +-#endif + + /** + * list_replace - replace old entry by new one +@@ -137,6 +165,23 @@ static inline void list_replace_init(str + } + + /** ++ * list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position ++ * @entry1: the location to place entry2 ++ * @entry2: the location to place entry1 ++ */ ++static inline void list_swap(struct list_head *entry1, ++ struct list_head *entry2) ++{ ++ struct list_head *pos = entry2->prev; ++ ++ list_del(entry2); ++ list_replace(entry1, entry2); ++ if (pos == entry1) ++ pos = entry2; ++ list_add(entry1, pos); ++} ++ ++/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +@@ -170,6 +215,40 @@ static inline void list_move_tail(struct + } + + /** ++ * list_bulk_move_tail - move a subsection of a list to its tail ++ * @head: the head that will follow our entry ++ * @first: first entry to move ++ * @last: last entry to move, can be the same as first ++ * ++ * Move all entries between @first and including @last before @head. ++ * All three entries must belong to the same linked list. ++ */ ++static inline void list_bulk_move_tail(struct list_head *head, ++ struct list_head *first, ++ struct list_head *last) ++{ ++ first->prev->next = last->next; ++ last->next->prev = first->prev; ++ ++ head->prev->next = first; ++ first->prev = head->prev; ++ ++ last->next = head; ++ head->prev = last; ++} ++ ++/** ++ * list_is_first -- tests whether @list is the first entry in list @head ++ * @list: the entry to test ++ * @head: the head of the list ++ */ ++static inline int list_is_first(const struct list_head *list, ++ const struct list_head *head) ++{ ++ return list->prev == head; ++} ++ ++/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list +@@ -186,7 +265,7 @@ static inline int list_is_last(const str + */ + static inline int list_empty(const struct list_head *head) + { +- return head->next == head; ++ return READ_ONCE(head->next) == head; + } + + /** +@@ -223,6 +302,24 @@ static inline void list_rotate_left(stru + } + + /** ++ * list_rotate_to_front() - Rotate list to specific item. ++ * @list: The desired new front of the list. ++ * @head: The head of the list. ++ * ++ * Rotates list so that @list becomes the new front of the list. ++ */ ++static inline void list_rotate_to_front(struct list_head *list, ++ struct list_head *head) ++{ ++ /* ++ * Deletes the list head from the list denoted by @head and ++ * places it as the tail of @list, this effectively rotates the ++ * list so that @list is at the front. ++ */ ++ list_move_tail(head, list); ++} ++ ++/** + * list_is_singular - tests whether a list has just one entry. + * @head: the list to test. + */ +@@ -271,6 +368,36 @@ static inline void list_cut_position(str + __list_cut_position(list, head, entry); + } + ++/** ++ * list_cut_before - cut a list into two, before given entry ++ * @list: a new list to add all removed entries ++ * @head: a list with entries ++ * @entry: an entry within head, could be the head itself ++ * ++ * This helper moves the initial part of @head, up to but ++ * excluding @entry, from @head to @list. You should pass ++ * in @entry an element you know is on @head. @list should ++ * be an empty list or a list you do not care about losing ++ * its data. ++ * If @entry == @head, all entries on @head are moved to ++ * @list. ++ */ ++static inline void list_cut_before(struct list_head *list, ++ struct list_head *head, ++ struct list_head *entry) ++{ ++ if (head->next == entry) { ++ INIT_LIST_HEAD(list); ++ return; ++ } ++ list->next = head->next; ++ list->next->prev = list; ++ list->prev = entry->prev; ++ list->prev->next = list; ++ head->next = entry; ++ entry->prev = head; ++} ++ + static inline void __list_splice(const struct list_head *list, + struct list_head *prev, + struct list_head *next) +@@ -381,8 +508,11 @@ static inline void list_splice_tail_init + * + * Note that if the list is empty, it returns NULL. + */ +-#define list_first_entry_or_null(ptr, type, member) \ +- (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) ++#define list_first_entry_or_null(ptr, type, member) ({ \ ++ struct list_head *head__ = (ptr); \ ++ struct list_head *pos__ = READ_ONCE(head__->next); \ ++ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \ ++}) + + /** + * list_next_entry - get the next element in list +@@ -511,6 +641,19 @@ static inline void list_splice_tail_init + pos = list_next_entry(pos, member)) + + /** ++ * list_for_each_entry_from_reverse - iterate backwards over list of given type ++ * from the current point ++ * @pos: the type * to use as a loop cursor. ++ * @head: the head for your list. ++ * @member: the name of the list_head within the struct. ++ * ++ * Iterate backwards over list of given type, continuing from current position. ++ */ ++#define list_for_each_entry_from_reverse(pos, head, member) \ ++ for (; &pos->member != (head); \ ++ pos = list_prev_entry(pos, member)) ++ ++/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage +@@ -608,7 +751,7 @@ static inline int hlist_unhashed(const s + + static inline int hlist_empty(const struct hlist_head *h) + { +- return !h->first; ++ return !READ_ONCE(h->first); + } + + static inline void __hlist_del(struct hlist_node *n) +@@ -642,7 +785,7 @@ static inline void hlist_add_head(struct + n->next = first; + if (first) + first->pprev = &n->next; +- h->first = n; ++ WRITE_ONCE(h->first, n); + n->pprev = &h->first; + } + +@@ -653,7 +796,7 @@ static inline void hlist_add_before(stru + n->pprev = next->pprev; + n->next = next; + next->pprev = &n->next; +- *(n->pprev) = n; ++ WRITE_ONCE(*(n->pprev), n); + } + + static inline void hlist_add_behind(struct hlist_node *n, +@@ -679,6 +822,16 @@ static inline bool hlist_fake(struct hli + } + + /* ++ * Check whether the node is the only node of the head without ++ * accessing head: ++ */ ++static inline bool ++hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) ++{ ++ return !n->next && n->pprev == &h->first; ++} ++ ++/* + * Move a list from one list head to another. Fixup the pprev + * reference of the first entry if it exists. + */ +--- /dev/null ++++ b/include/linux/ptr_ring.h +@@ -0,0 +1,673 @@ ++/* SPDX-License-Identifier: GPL-2.0-or-later */ ++/* ++ * Definitions for the 'struct ptr_ring' datastructure. ++ * ++ * Author: ++ * Michael S. Tsirkin ++ * ++ * Copyright (C) 2016 Red Hat, Inc. ++ * ++ * This is a limited-size FIFO maintaining pointers in FIFO order, with ++ * one CPU producing entries and another consuming entries from a FIFO. ++ * ++ * This implementation tries to minimize cache-contention when there is a ++ * single producer and a single consumer CPU. ++ */ ++ ++#ifndef _LINUX_PTR_RING_H ++#define _LINUX_PTR_RING_H 1 ++ ++#ifdef __KERNEL__ ++#include ++#include ++#include ++#include ++#include ++#include ++#endif ++ ++struct ptr_ring { ++ int producer ____cacheline_aligned_in_smp; ++ spinlock_t producer_lock; ++ int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */ ++ int consumer_tail; /* next entry to invalidate */ ++ spinlock_t consumer_lock; ++ /* Shared consumer/producer data */ ++ /* Read-only by both the producer and the consumer */ ++ int size ____cacheline_aligned_in_smp; /* max entries in queue */ ++ int batch; /* number of entries to consume in a batch */ ++ void **queue; ++}; ++ ++/* Note: callers invoking this in a loop must use a compiler barrier, ++ * for example cpu_relax(). ++ * ++ * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock: ++ * see e.g. ptr_ring_full. ++ */ ++static inline bool __ptr_ring_full(struct ptr_ring *r) ++{ ++ return r->queue[r->producer]; ++} ++ ++static inline bool ptr_ring_full(struct ptr_ring *r) ++{ ++ bool ret; ++ ++ spin_lock(&r->producer_lock); ++ ret = __ptr_ring_full(r); ++ spin_unlock(&r->producer_lock); ++ ++ return ret; ++} ++ ++static inline bool ptr_ring_full_irq(struct ptr_ring *r) ++{ ++ bool ret; ++ ++ spin_lock_irq(&r->producer_lock); ++ ret = __ptr_ring_full(r); ++ spin_unlock_irq(&r->producer_lock); ++ ++ return ret; ++} ++ ++static inline bool ptr_ring_full_any(struct ptr_ring *r) ++{ ++ unsigned long flags; ++ bool ret; ++ ++ spin_lock_irqsave(&r->producer_lock, flags); ++ ret = __ptr_ring_full(r); ++ spin_unlock_irqrestore(&r->producer_lock, flags); ++ ++ return ret; ++} ++ ++static inline bool ptr_ring_full_bh(struct ptr_ring *r) ++{ ++ bool ret; ++ ++ spin_lock_bh(&r->producer_lock); ++ ret = __ptr_ring_full(r); ++ spin_unlock_bh(&r->producer_lock); ++ ++ return ret; ++} ++ ++/* Note: callers invoking this in a loop must use a compiler barrier, ++ * for example cpu_relax(). Callers must hold producer_lock. ++ * Callers are responsible for making sure pointer that is being queued ++ * points to a valid data. ++ */ ++static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) ++{ ++ if (unlikely(!r->size) || r->queue[r->producer]) ++ return -ENOSPC; ++ ++ /* Make sure the pointer we are storing points to a valid data. */ ++ /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ ++ smp_wmb(); ++ ++ WRITE_ONCE(r->queue[r->producer++], ptr); ++ if (unlikely(r->producer >= r->size)) ++ r->producer = 0; ++ return 0; ++} ++ ++/* ++ * Note: resize (below) nests producer lock within consumer lock, so if you ++ * consume in interrupt or BH context, you must disable interrupts/BH when ++ * calling this. ++ */ ++static inline int ptr_ring_produce(struct ptr_ring *r, void *ptr) ++{ ++ int ret; ++ ++ spin_lock(&r->producer_lock); ++ ret = __ptr_ring_produce(r, ptr); ++ spin_unlock(&r->producer_lock); ++ ++ return ret; ++} ++ ++static inline int ptr_ring_produce_irq(struct ptr_ring *r, void *ptr) ++{ ++ int ret; ++ ++ spin_lock_irq(&r->producer_lock); ++ ret = __ptr_ring_produce(r, ptr); ++ spin_unlock_irq(&r->producer_lock); ++ ++ return ret; ++} ++ ++static inline int ptr_ring_produce_any(struct ptr_ring *r, void *ptr) ++{ ++ unsigned long flags; ++ int ret; ++ ++ spin_lock_irqsave(&r->producer_lock, flags); ++ ret = __ptr_ring_produce(r, ptr); ++ spin_unlock_irqrestore(&r->producer_lock, flags); ++ ++ return ret; ++} ++ ++static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) ++{ ++ int ret; ++ ++ spin_lock_bh(&r->producer_lock); ++ ret = __ptr_ring_produce(r, ptr); ++ spin_unlock_bh(&r->producer_lock); ++ ++ return ret; ++} ++ ++static inline void *__ptr_ring_peek(struct ptr_ring *r) ++{ ++ if (likely(r->size)) ++ return READ_ONCE(r->queue[r->consumer_head]); ++ return NULL; ++} ++ ++/* ++ * Test ring empty status without taking any locks. ++ * ++ * NB: This is only safe to call if ring is never resized. ++ * ++ * However, if some other CPU consumes ring entries at the same time, the value ++ * returned is not guaranteed to be correct. ++ * ++ * In this case - to avoid incorrectly detecting the ring ++ * as empty - the CPU consuming the ring entries is responsible ++ * for either consuming all ring entries until the ring is empty, ++ * or synchronizing with some other CPU and causing it to ++ * re-test __ptr_ring_empty and/or consume the ring enteries ++ * after the synchronization point. ++ * ++ * Note: callers invoking this in a loop must use a compiler barrier, ++ * for example cpu_relax(). ++ */ ++static inline bool __ptr_ring_empty(struct ptr_ring *r) ++{ ++ if (likely(r->size)) ++ return !r->queue[READ_ONCE(r->consumer_head)]; ++ return true; ++} ++ ++static inline bool ptr_ring_empty(struct ptr_ring *r) ++{ ++ bool ret; ++ ++ spin_lock(&r->consumer_lock); ++ ret = __ptr_ring_empty(r); ++ spin_unlock(&r->consumer_lock); ++ ++ return ret; ++} ++ ++static inline bool ptr_ring_empty_irq(struct ptr_ring *r) ++{ ++ bool ret; ++ ++ spin_lock_irq(&r->consumer_lock); ++ ret = __ptr_ring_empty(r); ++ spin_unlock_irq(&r->consumer_lock); ++ ++ return ret; ++} ++ ++static inline bool ptr_ring_empty_any(struct ptr_ring *r) ++{ ++ unsigned long flags; ++ bool ret; ++ ++ spin_lock_irqsave(&r->consumer_lock, flags); ++ ret = __ptr_ring_empty(r); ++ spin_unlock_irqrestore(&r->consumer_lock, flags); ++ ++ return ret; ++} ++ ++static inline bool ptr_ring_empty_bh(struct ptr_ring *r) ++{ ++ bool ret; ++ ++ spin_lock_bh(&r->consumer_lock); ++ ret = __ptr_ring_empty(r); ++ spin_unlock_bh(&r->consumer_lock); ++ ++ return ret; ++} ++ ++/* Must only be called after __ptr_ring_peek returned !NULL */ ++static inline void __ptr_ring_discard_one(struct ptr_ring *r) ++{ ++ /* Fundamentally, what we want to do is update consumer ++ * index and zero out the entry so producer can reuse it. ++ * Doing it naively at each consume would be as simple as: ++ * consumer = r->consumer; ++ * r->queue[consumer++] = NULL; ++ * if (unlikely(consumer >= r->size)) ++ * consumer = 0; ++ * r->consumer = consumer; ++ * but that is suboptimal when the ring is full as producer is writing ++ * out new entries in the same cache line. Defer these updates until a ++ * batch of entries has been consumed. ++ */ ++ /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty ++ * to work correctly. ++ */ ++ int consumer_head = r->consumer_head; ++ int head = consumer_head++; ++ ++ /* Once we have processed enough entries invalidate them in ++ * the ring all at once so producer can reuse their space in the ring. ++ * We also do this when we reach end of the ring - not mandatory ++ * but helps keep the implementation simple. ++ */ ++ if (unlikely(consumer_head - r->consumer_tail >= r->batch || ++ consumer_head >= r->size)) { ++ /* Zero out entries in the reverse order: this way we touch the ++ * cache line that producer might currently be reading the last; ++ * producer won't make progress and touch other cache lines ++ * besides the first one until we write out all entries. ++ */ ++ while (likely(head >= r->consumer_tail)) ++ r->queue[head--] = NULL; ++ r->consumer_tail = consumer_head; ++ } ++ if (unlikely(consumer_head >= r->size)) { ++ consumer_head = 0; ++ r->consumer_tail = 0; ++ } ++ /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ ++ WRITE_ONCE(r->consumer_head, consumer_head); ++} ++ ++static inline void *__ptr_ring_consume(struct ptr_ring *r) ++{ ++ void *ptr; ++ ++ /* The READ_ONCE in __ptr_ring_peek guarantees that anyone ++ * accessing data through the pointer is up to date. Pairs ++ * with smp_wmb in __ptr_ring_produce. ++ */ ++ ptr = __ptr_ring_peek(r); ++ if (ptr) ++ __ptr_ring_discard_one(r); ++ ++ return ptr; ++} ++ ++static inline int __ptr_ring_consume_batched(struct ptr_ring *r, ++ void **array, int n) ++{ ++ void *ptr; ++ int i; ++ ++ for (i = 0; i < n; i++) { ++ ptr = __ptr_ring_consume(r); ++ if (!ptr) ++ break; ++ array[i] = ptr; ++ } ++ ++ return i; ++} ++ ++/* ++ * Note: resize (below) nests producer lock within consumer lock, so if you ++ * call this in interrupt or BH context, you must disable interrupts/BH when ++ * producing. ++ */ ++static inline void *ptr_ring_consume(struct ptr_ring *r) ++{ ++ void *ptr; ++ ++ spin_lock(&r->consumer_lock); ++ ptr = __ptr_ring_consume(r); ++ spin_unlock(&r->consumer_lock); ++ ++ return ptr; ++} ++ ++static inline void *ptr_ring_consume_irq(struct ptr_ring *r) ++{ ++ void *ptr; ++ ++ spin_lock_irq(&r->consumer_lock); ++ ptr = __ptr_ring_consume(r); ++ spin_unlock_irq(&r->consumer_lock); ++ ++ return ptr; ++} ++ ++static inline void *ptr_ring_consume_any(struct ptr_ring *r) ++{ ++ unsigned long flags; ++ void *ptr; ++ ++ spin_lock_irqsave(&r->consumer_lock, flags); ++ ptr = __ptr_ring_consume(r); ++ spin_unlock_irqrestore(&r->consumer_lock, flags); ++ ++ return ptr; ++} ++ ++static inline void *ptr_ring_consume_bh(struct ptr_ring *r) ++{ ++ void *ptr; ++ ++ spin_lock_bh(&r->consumer_lock); ++ ptr = __ptr_ring_consume(r); ++ spin_unlock_bh(&r->consumer_lock); ++ ++ return ptr; ++} ++ ++static inline int ptr_ring_consume_batched(struct ptr_ring *r, ++ void **array, int n) ++{ ++ int ret; ++ ++ spin_lock(&r->consumer_lock); ++ ret = __ptr_ring_consume_batched(r, array, n); ++ spin_unlock(&r->consumer_lock); ++ ++ return ret; ++} ++ ++static inline int ptr_ring_consume_batched_irq(struct ptr_ring *r, ++ void **array, int n) ++{ ++ int ret; ++ ++ spin_lock_irq(&r->consumer_lock); ++ ret = __ptr_ring_consume_batched(r, array, n); ++ spin_unlock_irq(&r->consumer_lock); ++ ++ return ret; ++} ++ ++static inline int ptr_ring_consume_batched_any(struct ptr_ring *r, ++ void **array, int n) ++{ ++ unsigned long flags; ++ int ret; ++ ++ spin_lock_irqsave(&r->consumer_lock, flags); ++ ret = __ptr_ring_consume_batched(r, array, n); ++ spin_unlock_irqrestore(&r->consumer_lock, flags); ++ ++ return ret; ++} ++ ++static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r, ++ void **array, int n) ++{ ++ int ret; ++ ++ spin_lock_bh(&r->consumer_lock); ++ ret = __ptr_ring_consume_batched(r, array, n); ++ spin_unlock_bh(&r->consumer_lock); ++ ++ return ret; ++} ++ ++/* Cast to structure type and call a function without discarding from FIFO. ++ * Function must return a value. ++ * Callers must take consumer_lock. ++ */ ++#define __PTR_RING_PEEK_CALL(r, f) ((f)(__ptr_ring_peek(r))) ++ ++#define PTR_RING_PEEK_CALL(r, f) ({ \ ++ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ ++ \ ++ spin_lock(&(r)->consumer_lock); \ ++ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ ++ spin_unlock(&(r)->consumer_lock); \ ++ __PTR_RING_PEEK_CALL_v; \ ++}) ++ ++#define PTR_RING_PEEK_CALL_IRQ(r, f) ({ \ ++ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ ++ \ ++ spin_lock_irq(&(r)->consumer_lock); \ ++ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ ++ spin_unlock_irq(&(r)->consumer_lock); \ ++ __PTR_RING_PEEK_CALL_v; \ ++}) ++ ++#define PTR_RING_PEEK_CALL_BH(r, f) ({ \ ++ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ ++ \ ++ spin_lock_bh(&(r)->consumer_lock); \ ++ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ ++ spin_unlock_bh(&(r)->consumer_lock); \ ++ __PTR_RING_PEEK_CALL_v; \ ++}) ++ ++#define PTR_RING_PEEK_CALL_ANY(r, f) ({ \ ++ typeof((f)(NULL)) __PTR_RING_PEEK_CALL_v; \ ++ unsigned long __PTR_RING_PEEK_CALL_f;\ ++ \ ++ spin_lock_irqsave(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ ++ __PTR_RING_PEEK_CALL_v = __PTR_RING_PEEK_CALL(r, f); \ ++ spin_unlock_irqrestore(&(r)->consumer_lock, __PTR_RING_PEEK_CALL_f); \ ++ __PTR_RING_PEEK_CALL_v; \ ++}) ++ ++/* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See ++ * documentation for vmalloc for which of them are legal. ++ */ ++static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) ++{ ++ if (size > KMALLOC_MAX_SIZE / sizeof(void *)) ++ return NULL; ++ return kmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); ++} ++ ++static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) ++{ ++ r->size = size; ++ r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue)); ++ /* We need to set batch at least to 1 to make logic ++ * in __ptr_ring_discard_one work correctly. ++ * Batching too much (because ring is small) would cause a lot of ++ * burstiness. Needs tuning, for now disable batching. ++ */ ++ if (r->batch > r->size / 2 || !r->batch) ++ r->batch = 1; ++} ++ ++static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) ++{ ++ r->queue = __ptr_ring_init_queue_alloc(size, gfp); ++ if (!r->queue) ++ return -ENOMEM; ++ ++ __ptr_ring_set_size(r, size); ++ r->producer = r->consumer_head = r->consumer_tail = 0; ++ spin_lock_init(&r->producer_lock); ++ spin_lock_init(&r->consumer_lock); ++ ++ return 0; ++} ++ ++/* ++ * Return entries into ring. Destroy entries that don't fit. ++ * ++ * Note: this is expected to be a rare slow path operation. ++ * ++ * Note: producer lock is nested within consumer lock, so if you ++ * resize you must make sure all uses nest correctly. ++ * In particular if you consume ring in interrupt or BH context, you must ++ * disable interrupts/BH when doing so. ++ */ ++static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, ++ void (*destroy)(void *)) ++{ ++ unsigned long flags; ++ int head; ++ ++ spin_lock_irqsave(&r->consumer_lock, flags); ++ spin_lock(&r->producer_lock); ++ ++ if (!r->size) ++ goto done; ++ ++ /* ++ * Clean out buffered entries (for simplicity). This way following code ++ * can test entries for NULL and if not assume they are valid. ++ */ ++ head = r->consumer_head - 1; ++ while (likely(head >= r->consumer_tail)) ++ r->queue[head--] = NULL; ++ r->consumer_tail = r->consumer_head; ++ ++ /* ++ * Go over entries in batch, start moving head back and copy entries. ++ * Stop when we run into previously unconsumed entries. ++ */ ++ while (n) { ++ head = r->consumer_head - 1; ++ if (head < 0) ++ head = r->size - 1; ++ if (r->queue[head]) { ++ /* This batch entry will have to be destroyed. */ ++ goto done; ++ } ++ r->queue[head] = batch[--n]; ++ r->consumer_tail = head; ++ /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ ++ WRITE_ONCE(r->consumer_head, head); ++ } ++ ++done: ++ /* Destroy all entries left in the batch. */ ++ while (n) ++ destroy(batch[--n]); ++ spin_unlock(&r->producer_lock); ++ spin_unlock_irqrestore(&r->consumer_lock, flags); ++} ++ ++static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue, ++ int size, gfp_t gfp, ++ void (*destroy)(void *)) ++{ ++ int producer = 0; ++ void **old; ++ void *ptr; ++ ++ while ((ptr = __ptr_ring_consume(r))) ++ if (producer < size) ++ queue[producer++] = ptr; ++ else if (destroy) ++ destroy(ptr); ++ ++ if (producer >= size) ++ producer = 0; ++ __ptr_ring_set_size(r, size); ++ r->producer = producer; ++ r->consumer_head = 0; ++ r->consumer_tail = 0; ++ old = r->queue; ++ r->queue = queue; ++ ++ return old; ++} ++ ++/* ++ * Note: producer lock is nested within consumer lock, so if you ++ * resize you must make sure all uses nest correctly. ++ * In particular if you consume ring in interrupt or BH context, you must ++ * disable interrupts/BH when doing so. ++ */ ++static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, ++ void (*destroy)(void *)) ++{ ++ unsigned long flags; ++ void **queue = __ptr_ring_init_queue_alloc(size, gfp); ++ void **old; ++ ++ if (!queue) ++ return -ENOMEM; ++ ++ spin_lock_irqsave(&(r)->consumer_lock, flags); ++ spin_lock(&(r)->producer_lock); ++ ++ old = __ptr_ring_swap_queue(r, queue, size, gfp, destroy); ++ ++ spin_unlock(&(r)->producer_lock); ++ spin_unlock_irqrestore(&(r)->consumer_lock, flags); ++ ++ kvfree(old); ++ ++ return 0; ++} ++ ++/* ++ * Note: producer lock is nested within consumer lock, so if you ++ * resize you must make sure all uses nest correctly. ++ * In particular if you consume ring in interrupt or BH context, you must ++ * disable interrupts/BH when doing so. ++ */ ++static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, ++ unsigned int nrings, ++ int size, ++ gfp_t gfp, void (*destroy)(void *)) ++{ ++ unsigned long flags; ++ void ***queues; ++ int i; ++ ++ queues = kmalloc_array(nrings, sizeof(*queues), gfp); ++ if (!queues) ++ goto noqueues; ++ ++ for (i = 0; i < nrings; ++i) { ++ queues[i] = __ptr_ring_init_queue_alloc(size, gfp); ++ if (!queues[i]) ++ goto nomem; ++ } ++ ++ for (i = 0; i < nrings; ++i) { ++ spin_lock_irqsave(&(rings[i])->consumer_lock, flags); ++ spin_lock(&(rings[i])->producer_lock); ++ queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], ++ size, gfp, destroy); ++ spin_unlock(&(rings[i])->producer_lock); ++ spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); ++ } ++ ++ for (i = 0; i < nrings; ++i) ++ kvfree(queues[i]); ++ ++ kfree(queues); ++ ++ return 0; ++ ++nomem: ++ while (--i >= 0) ++ kvfree(queues[i]); ++ ++ kfree(queues); ++ ++noqueues: ++ return -ENOMEM; ++} ++ ++static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) ++{ ++ void *ptr; ++ ++ if (destroy) ++ while ((ptr = ptr_ring_consume(r))) ++ destroy(ptr); ++ kvfree(r->queue); ++} ++ ++#endif /* _LINUX_PTR_RING_H */ +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -37,6 +37,7 @@ + #include + #include + #include ++#include + #include + + /* A. Checksumming of received packets by device. +@@ -592,13 +593,23 @@ struct sk_buff { + */ + kmemcheck_bitfield_begin(flags1); + __u16 queue_mapping; ++ ++/* if you move cloned around you also must adapt those constants */ ++#ifdef __BIG_ENDIAN_BITFIELD ++#define CLONED_MASK (1 << 7) ++#else ++#define CLONED_MASK 1 ++#endif ++#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset) ++ ++ __u8 __cloned_offset[0]; + __u8 cloned:1, + nohdr:1, + fclone:2, + peeked:1, + head_frag:1, +- xmit_more:1; +- /* one bit hole */ ++ xmit_more:1, ++ __unused:1; /* one bit hole */ + kmemcheck_bitfield_end(flags1); + + /* fields enclosed in headers_start/headers_end are copied +@@ -639,6 +650,14 @@ struct sk_buff { + __u8 csum_level:2; + __u8 csum_bad:1; + ++#ifdef __BIG_ENDIAN_BITFIELD ++#define PKT_VLAN_PRESENT_BIT 7 ++#else ++#define PKT_VLAN_PRESENT_BIT 0 ++#endif ++#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset) ++ __u8 __pkt_vlan_present_offset[0]; ++ __u8 vlan_present:1; + #ifdef CONFIG_IPV6_NDISC_NODETYPE + __u8 ndisc_nodetype:2; + #endif +@@ -647,7 +666,7 @@ struct sk_buff { + __u8 remcsum_offload:1; + __u8 gro_skip:1; + __u8 fast_forwarded:1; +- /* 1 or 3 bit hole */ ++ /* 0 or 2 bit hole */ + + #ifdef CONFIG_NET_SCHED + __u16 tc_index; /* traffic control index */ +@@ -805,6 +824,15 @@ static inline struct rtable *skb_rtable( + return (struct rtable *)skb_dst(skb); + } + ++/* For mangling skb->pkt_type from user space side from applications ++ * such as nft, tc, etc, we only allow a conservative subset of ++ * possible pkt_types to be set. ++*/ ++static inline bool skb_pkt_type_ok(u32 ptype) ++{ ++ return ptype <= PACKET_OTHERHOST; ++} ++ + void kfree_skb(struct sk_buff *skb); + void kfree_skb_list(struct sk_buff *segs); + void skb_tx_error(struct sk_buff *skb); +@@ -2127,6 +2155,11 @@ static inline unsigned char *skb_mac_hea + return skb->head + skb->mac_header; + } + ++static inline u32 skb_mac_header_len(const struct sk_buff *skb) ++{ ++ return skb->network_header - skb->mac_header; ++} ++ + static inline int skb_mac_header_was_set(const struct sk_buff *skb) + { + return skb->mac_header != (typeof(skb->mac_header))~0U; +@@ -2256,7 +2289,7 @@ static inline int pskb_network_may_pull( + + int ___pskb_trim(struct sk_buff *skb, unsigned int len); + +-static inline void __skb_trim(struct sk_buff *skb, unsigned int len) ++static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) + { + if (unlikely(skb_is_nonlinear(skb))) { + WARN_ON(1); +@@ -2266,6 +2299,11 @@ static inline void __skb_trim(struct sk_ + skb_set_tail_pointer(skb, len); + } + ++static inline void __skb_trim(struct sk_buff *skb, unsigned int len) ++{ ++ __skb_set_length(skb, len); ++} ++ + void skb_trim(struct sk_buff *skb, unsigned int len); + + static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) +@@ -2318,6 +2356,20 @@ static inline struct sk_buff *skb_reduce + return skb; + } + ++static inline int __skb_grow(struct sk_buff *skb, unsigned int len) ++{ ++ unsigned int diff = len - skb->len; ++ ++ if (skb_tailroom(skb) < diff) { ++ int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), ++ GFP_ATOMIC); ++ if (ret) ++ return ret; ++ } ++ __skb_set_length(skb, len); ++ return 0; ++} ++ + /** + * skb_orphan - orphan a buffer + * @skb: buffer to orphan +@@ -2818,6 +2870,18 @@ static inline int skb_linearize_cow(stru + __skb_linearize(skb) : 0; + } + ++static __always_inline void ++__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len, ++ unsigned int off) ++{ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ skb->csum = csum_block_sub(skb->csum, ++ csum_partial(start, len, 0), off); ++ else if (skb->ip_summed == CHECKSUM_PARTIAL && ++ skb_checksum_start_offset(skb) < 0) ++ skb->ip_summed = CHECKSUM_NONE; ++} ++ + /** + * skb_postpull_rcsum - update checksum for received skb after pull + * @skb: buffer to update +@@ -2828,36 +2892,38 @@ static inline int skb_linearize_cow(stru + * update the CHECKSUM_COMPLETE checksum, or set ip_summed to + * CHECKSUM_NONE so that it can be recomputed from scratch. + */ +- + static inline void skb_postpull_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) + { +- if (skb->ip_summed == CHECKSUM_COMPLETE) +- skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0)); +- else if (skb->ip_summed == CHECKSUM_PARTIAL && +- skb_checksum_start_offset(skb) < 0) +- skb->ip_summed = CHECKSUM_NONE; ++ __skb_postpull_rcsum(skb, start, len, 0); + } + +-unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); ++static __always_inline void ++__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len, ++ unsigned int off) ++{ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ skb->csum = csum_block_add(skb->csum, ++ csum_partial(start, len, 0), off); ++} + ++/** ++ * skb_postpush_rcsum - update checksum for received skb after push ++ * @skb: buffer to update ++ * @start: start of data after push ++ * @len: length of data pushed ++ * ++ * After doing a push on a received packet, you need to call this to ++ * update the CHECKSUM_COMPLETE checksum. ++ */ + static inline void skb_postpush_rcsum(struct sk_buff *skb, + const void *start, unsigned int len) + { +- /* For performing the reverse operation to skb_postpull_rcsum(), +- * we can instead of ... +- * +- * skb->csum = csum_add(skb->csum, csum_partial(start, len, 0)); +- * +- * ... just use this equivalent version here to save a few +- * instructions. Feeding csum of 0 in csum_partial() and later +- * on adding skb->csum is equivalent to feed skb->csum in the +- * first place. +- */ +- if (skb->ip_summed == CHECKSUM_COMPLETE) +- skb->csum = csum_partial(start, len, skb->csum); ++ __skb_postpush_rcsum(skb, start, len, 0); + } + ++unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len); ++ + /** + * skb_push_rcsum - push skb and update receive checksum + * @skb: buffer to update +@@ -2901,6 +2967,21 @@ static inline int pskb_trim_rcsum(struct + #define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode)) + #define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode)) + ++static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) ++{ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ skb->ip_summed = CHECKSUM_NONE; ++ __skb_trim(skb, len); ++ return 0; ++} ++ ++static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) ++{ ++ if (skb->ip_summed == CHECKSUM_COMPLETE) ++ skb->ip_summed = CHECKSUM_NONE; ++ return __skb_grow(skb, len); ++} ++ + #define skb_queue_walk(queue, skb) \ + for (skb = (queue)->next; \ + skb != (struct sk_buff *)(queue); \ +@@ -3662,6 +3743,13 @@ static inline bool skb_is_gso_v6(const s + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; + } + ++static inline void skb_gso_reset(struct sk_buff *skb) ++{ ++ skb_shinfo(skb)->gso_size = 0; ++ skb_shinfo(skb)->gso_segs = 0; ++ skb_shinfo(skb)->gso_type = 0; ++} ++ + void __skb_warn_lro_forwarding(const struct sk_buff *skb); + + static inline bool skb_warn_if_lro(const struct sk_buff *skb) +--- a/include/linux/if_arp.h ++++ b/include/linux/if_arp.h +@@ -44,4 +44,21 @@ static inline int arp_hdr_len(struct net + return sizeof(struct arphdr) + (dev->addr_len + sizeof(u32)) * 2; + } + } ++ ++static inline bool dev_is_mac_header_xmit(const struct net_device *dev) ++{ ++ switch (dev->type) { ++ case ARPHRD_TUNNEL: ++ case ARPHRD_TUNNEL6: ++ case ARPHRD_SIT: ++ case ARPHRD_IPGRE: ++ case ARPHRD_VOID: ++ case ARPHRD_NONE: ++ case ARPHRD_RAWIP: ++ return false; ++ default: ++ return true; ++ } ++} ++ + #endif /* _LINUX_IF_ARP_H */ +--- a/include/linux/if_vlan.h ++++ b/include/linux/if_vlan.h +@@ -66,7 +66,6 @@ static inline struct vlan_ethhdr *vlan_e + #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ + #define VLAN_PRIO_SHIFT 13 + #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ +-#define VLAN_TAG_PRESENT VLAN_CFI_MASK + #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ + #define VLAN_N_VID 4096 + +@@ -78,8 +77,8 @@ static inline bool is_vlan_dev(struct ne + return dev->priv_flags & IFF_802_1Q_VLAN; + } + +-#define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) +-#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) ++#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present) ++#define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) + #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) + + /** +@@ -376,6 +375,31 @@ static inline struct sk_buff *vlan_inser + return skb; + } + ++/** ++ * __vlan_hwaccel_clear_tag - clear hardware accelerated VLAN info ++ * @skb: skbuff to clear ++ * ++ * Clears the VLAN information from @skb ++ */ ++static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) ++{ ++ skb->vlan_present = 0; ++} ++ ++/** ++ * __vlan_hwaccel_copy_tag - copy hardware accelerated VLAN info from another skb ++ * @dst: skbuff to copy to ++ * @src: skbuff to copy from ++ * ++ * Copies VLAN information from @src to @dst (for branchless code) ++ */ ++static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) ++{ ++ dst->vlan_present = src->vlan_present; ++ dst->vlan_proto = src->vlan_proto; ++ dst->vlan_tci = src->vlan_tci; ++} ++ + /* + * __vlan_hwaccel_push_inside - pushes vlan tag to the payload + * @skb: skbuff to tag +@@ -390,7 +414,7 @@ static inline struct sk_buff *__vlan_hwa + skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto, + skb_vlan_tag_get(skb)); + if (likely(skb)) +- skb->vlan_tci = 0; ++ __vlan_hwaccel_clear_tag(skb); + return skb; + } + /* +@@ -422,7 +446,8 @@ static inline void __vlan_hwaccel_put_ta + __be16 vlan_proto, u16 vlan_tci) + { + skb->vlan_proto = vlan_proto; +- skb->vlan_tci = VLAN_TAG_PRESENT | vlan_tci; ++ skb->vlan_tci = vlan_tci; ++ skb->vlan_present = 1; + } + + /** +--- a/include/net/checksum.h ++++ b/include/net/checksum.h +@@ -120,6 +120,11 @@ static inline __wsum csum_partial_ext(co + + #define CSUM_MANGLED_0 ((__force __sum16)0xffff) + ++static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) ++{ ++ *sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); ++} ++ + static inline void csum_replace4(__sum16 *sum, __be32 from, __be32 to) + { + __wsum tmp = csum_sub(~csum_unfold(*sum), (__force __wsum)from); +--- a/lib/test_bpf.c ++++ b/lib/test_bpf.c +@@ -38,6 +38,7 @@ + #define SKB_HASH 0x1234aaab + #define SKB_QUEUE_MAP 123 + #define SKB_VLAN_TCI 0xffff ++#define SKB_VLAN_PRESENT 1 + #define SKB_DEV_IFINDEX 577 + #define SKB_DEV_TYPE 588 + +@@ -691,8 +692,8 @@ static struct bpf_test tests[] = { + CLASSIC, + { }, + { +- { 1, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT }, +- { 10, SKB_VLAN_TCI & ~VLAN_TAG_PRESENT } ++ { 1, SKB_VLAN_TCI }, ++ { 10, SKB_VLAN_TCI } + }, + }, + { +@@ -705,8 +706,8 @@ static struct bpf_test tests[] = { + CLASSIC, + { }, + { +- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, +- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } ++ { 1, SKB_VLAN_PRESENT }, ++ { 10, SKB_VLAN_PRESENT } + }, + }, + { +@@ -4432,8 +4433,8 @@ static struct bpf_test tests[] = { + CLASSIC, + { }, + { +- { 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }, +- { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) } ++ { 1, SKB_VLAN_PRESENT }, ++ { 10, SKB_VLAN_PRESENT } + }, + .fill_helper = bpf_fill_maxinsns6, + }, +@@ -5144,6 +5145,7 @@ static struct sk_buff *populate_skb(char + skb->hash = SKB_HASH; + skb->queue_mapping = SKB_QUEUE_MAP; + skb->vlan_tci = SKB_VLAN_TCI; ++ skb->vlan_present = SKB_VLAN_PRESENT; + skb->dev = &dev; + skb->dev->ifindex = SKB_DEV_IFINDEX; + skb->dev->type = SKB_DEV_TYPE; +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -3171,6 +3171,21 @@ int __dev_forward_skb(struct net_device + int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); + bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb); + ++static __always_inline int ____dev_forward_skb(struct net_device *dev, ++ struct sk_buff *skb) ++{ ++ if (skb_orphan_frags(skb, GFP_ATOMIC) || ++ unlikely(!is_skb_forwardable(dev, skb))) { ++ atomic_long_inc(&dev->rx_dropped); ++ kfree_skb(skb); ++ return NET_RX_DROP; ++ } ++ ++ skb_scrub_packet(skb, true); ++ skb->priority = 0; ++ return 0; ++} ++ + extern int netdev_budget; + + /* Called by rtnetlink.c:rtnl_unlock() */ +--- a/net/openvswitch/actions.c ++++ b/net/openvswitch/actions.c +@@ -246,7 +246,7 @@ static int push_vlan(struct sk_buff *skb + else + key->eth.tci = vlan->vlan_tci; + return skb_vlan_push(skb, vlan->vlan_tpid, +- ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); ++ ntohs(vlan->vlan_tci) & ~VLAN_CFI_MASK); + } + + /* 'src' is already properly masked. */ +--- a/net/openvswitch/flow.c ++++ b/net/openvswitch/flow.c +@@ -318,7 +318,7 @@ static int parse_vlan(struct sk_buff *sk + return -ENOMEM; + + qp = (struct qtag_prefix *) skb->data; +- key->eth.tci = qp->tci | htons(VLAN_TAG_PRESENT); ++ key->eth.tci = qp->tci | htons(VLAN_CFI_MASK); + __skb_pull(skb, sizeof(struct qtag_prefix)); + + return 0; +--- a/net/openvswitch/flow.h ++++ b/net/openvswitch/flow.h +@@ -69,7 +69,7 @@ struct sw_flow_key { + struct { + u8 src[ETH_ALEN]; /* Ethernet source address. */ + u8 dst[ETH_ALEN]; /* Ethernet destination address. */ +- __be16 tci; /* 0 if no VLAN, VLAN_TAG_PRESENT set otherwise. */ ++ __be16 tci; /* 0 if no VLAN, VLAN_CFI_MASK set otherwise. */ + __be16 type; /* Ethernet frame type. */ + } eth; + union { +--- a/net/openvswitch/flow_netlink.c ++++ b/net/openvswitch/flow_netlink.c +@@ -925,11 +925,11 @@ static int ovs_key_from_nlattrs(struct n + __be16 tci; + + tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); +- if (!(tci & htons(VLAN_TAG_PRESENT))) { ++ if (!(tci & htons(VLAN_CFI_MASK))) { + if (is_mask) +- OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit."); ++ OVS_NLERR(log, "VLAN TCI mask does not have exact match for VLAN_CFI_MASK bit."); + else +- OVS_NLERR(log, "VLAN TCI does not have VLAN_TAG_PRESENT bit set."); ++ OVS_NLERR(log, "VLAN TCI does not have VLAN_CFI_MASK bit set."); + + return -EINVAL; + } +@@ -1209,7 +1209,7 @@ int ovs_nla_get_match(struct net *net, s + key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP); + encap_valid = true; + +- if (tci & htons(VLAN_TAG_PRESENT)) { ++ if (tci & htons(VLAN_CFI_MASK)) { + err = parse_flow_nlattrs(encap, a, &key_attrs, log); + if (err) + return err; +@@ -1297,7 +1297,7 @@ int ovs_nla_get_match(struct net *net, s + if (a[OVS_KEY_ATTR_VLAN]) + tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]); + +- if (!(tci & htons(VLAN_TAG_PRESENT))) { ++ if (!(tci & htons(VLAN_CFI_MASK))) { + OVS_NLERR(log, "VLAN tag present bit must have an exact match (tci_mask=%x).", + ntohs(tci)); + err = -EINVAL; +@@ -2272,7 +2272,7 @@ static int __ovs_nla_copy_actions(struct + vlan = nla_data(a); + if (vlan->vlan_tpid != htons(ETH_P_8021Q)) + return -EINVAL; +- if (!(vlan->vlan_tci & htons(VLAN_TAG_PRESENT))) ++ if (!(vlan->vlan_tci & htons(VLAN_CFI_MASK))) + return -EINVAL; + vlan_tci = vlan->vlan_tci; + break; +@@ -2288,7 +2288,7 @@ static int __ovs_nla_copy_actions(struct + /* Prohibit push MPLS other than to a white list + * for packets that have a known tag order. + */ +- if (vlan_tci & htons(VLAN_TAG_PRESENT) || ++ if (vlan_tci & htons(VLAN_CFI_MASK) || + (eth_type != htons(ETH_P_IP) && + eth_type != htons(ETH_P_IPV6) && + eth_type != htons(ETH_P_ARP) && +@@ -2300,7 +2300,7 @@ static int __ovs_nla_copy_actions(struct + } + + case OVS_ACTION_ATTR_POP_MPLS: +- if (vlan_tci & htons(VLAN_TAG_PRESENT) || ++ if (vlan_tci & htons(VLAN_CFI_MASK) || + !eth_p_mpls(eth_type)) + return -EINVAL; + +--- a/net/sched/act_bpf.c ++++ b/net/sched/act_bpf.c +@@ -220,7 +220,7 @@ static int tcf_bpf_init_from_efd(struct + + bpf_fd = nla_get_u32(tb[TCA_ACT_BPF_FD]); + +- fp = bpf_prog_get(bpf_fd); ++ fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_ACT, false); + if (IS_ERR(fp)) + return PTR_ERR(fp); + +--- a/net/sched/cls_bpf.c ++++ b/net/sched/cls_bpf.c +@@ -267,7 +267,7 @@ static int cls_bpf_prog_from_efd(struct + + bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); + +- fp = bpf_prog_get(bpf_fd); ++ fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, false); + if (IS_ERR(fp)) + return PTR_ERR(fp); + +--- a/net/8021q/vlan_core.c ++++ b/net/8021q/vlan_core.c +@@ -50,7 +50,7 @@ bool vlan_do_receive(struct sk_buff **sk + } + + skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); +- skb->vlan_tci = 0; ++ __vlan_hwaccel_clear_tag(skb); + + rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); + +--- a/net/ipv4/ip_tunnel_core.c ++++ b/net/ipv4/ip_tunnel_core.c +@@ -128,7 +128,7 @@ int iptunnel_pull_header(struct sk_buff + secpath_reset(skb); + skb_clear_hash_if_not_l4(skb); + skb_dst_drop(skb); +- skb->vlan_tci = 0; ++ __vlan_hwaccel_clear_tag(skb); + skb_set_queue_mapping(skb, 0); + skb->pkt_type = PACKET_HOST; + +--- a/net/bridge/br_netfilter_hooks.c ++++ b/net/bridge/br_netfilter_hooks.c +@@ -673,10 +673,8 @@ static int br_nf_push_frag_xmit(struct n + return 0; + } + +- if (data->vlan_tci) { +- skb->vlan_tci = data->vlan_tci; +- skb->vlan_proto = data->vlan_proto; +- } ++ if (data->vlan_proto) ++ __vlan_hwaccel_put_tag(skb, data->vlan_proto, data->vlan_tci); + + skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size); + __skb_push(skb, data->encap_size); +@@ -740,8 +738,13 @@ static int br_nf_dev_queue_xmit(struct n + + data = this_cpu_ptr(&brnf_frag_data_storage); + +- data->vlan_tci = skb->vlan_tci; +- data->vlan_proto = skb->vlan_proto; ++ if (skb_vlan_tag_present(skb)) { ++ data->vlan_tci = skb->vlan_tci; ++ data->vlan_proto = skb->vlan_proto; ++ } else { ++ data->vlan_proto = 0; ++ } ++ + data->encap_size = nf_bridge_encap_header_len(skb); + data->size = ETH_HLEN + data->encap_size; + +--- a/net/bridge/br_private.h ++++ b/net/bridge/br_private.h +@@ -751,7 +751,7 @@ static inline int br_vlan_get_tag(const + int err = 0; + + if (skb_vlan_tag_present(skb)) { +- *vid = skb_vlan_tag_get(skb) & VLAN_VID_MASK; ++ *vid = skb_vlan_tag_get_id(skb); + } else { + *vid = 0; + err = -EINVAL; +--- a/net/bridge/br_vlan.c ++++ b/net/bridge/br_vlan.c +@@ -354,7 +354,7 @@ struct sk_buff *br_handle_vlan(struct ne + } + } + if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED) +- skb->vlan_tci = 0; ++ __vlan_hwaccel_clear_tag(skb); + + out: + return skb; +@@ -420,8 +420,8 @@ static bool __allowed_ingress(struct net + __vlan_hwaccel_put_tag(skb, proto, pvid); + else + /* Priority-tagged Frame. +- * At this point, We know that skb->vlan_tci had +- * VLAN_TAG_PRESENT bit and its VID field was 0x000. ++ * At this point, we know that skb->vlan_tci VID ++ * field was 0. + * We update only VID field and preserve PCP field. + */ + skb->vlan_tci |= pvid; +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3988,7 +3988,7 @@ ncls: + * and set skb->priority like in vlan_do_receive() + * For the time being, just ignore Priority Code Point + */ +- skb->vlan_tci = 0; ++ __vlan_hwaccel_clear_tag(skb); + } + + type = skb->protocol; +@@ -4211,7 +4211,9 @@ static void gro_list_prepare(struct napi + } + + diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; +- diffs |= p->vlan_tci ^ skb->vlan_tci; ++ diffs |= skb_vlan_tag_present(p) ^ skb_vlan_tag_present(skb); ++ if (skb_vlan_tag_present(p)) ++ diffs |= p->vlan_tci ^ skb->vlan_tci; + diffs |= skb_metadata_dst_cmp(p, skb); + if (maclen == ETH_HLEN) + diffs |= compare_ether_header(skb_mac_header(p), +@@ -4452,7 +4454,7 @@ static void napi_reuse_skb(struct napi_s + __skb_pull(skb, skb_headlen(skb)); + /* restore the reserve we had after netdev_alloc_skb_ip_align() */ + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); +- skb->vlan_tci = 0; ++ __vlan_hwaccel_clear_tag(skb); + skb->dev = napi->dev; + skb->skb_iif = 0; + skb->encapsulation = 0; +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -4491,7 +4491,7 @@ int skb_vlan_pop(struct sk_buff *skb) + int err; + + if (likely(skb_vlan_tag_present(skb))) { +- skb->vlan_tci = 0; ++ __vlan_hwaccel_clear_tag(skb); + } else { + if (unlikely((skb->protocol != htons(ETH_P_8021Q) && + skb->protocol != htons(ETH_P_8021AD)) || +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -1484,6 +1484,14 @@ do { \ + lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \ + } while (0) + ++#ifdef CONFIG_LOCKDEP ++static inline bool lockdep_sock_is_held(struct sock *sk) ++{ ++ return lockdep_is_held(&sk->sk_lock) || ++ lockdep_is_held(&sk->sk_lock.slock); ++} ++#endif ++ + void lock_sock_nested(struct sock *sk, int subclass); + + static inline void lock_sock(struct sock *sk) diff --git a/feeds/ipq807x/ipq807x/patches/200-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch b/feeds/ipq807x/ipq807x/patches/210-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch similarity index 89% rename from feeds/ipq807x/ipq807x/patches/200-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch rename to feeds/ipq807x/ipq807x/patches/210-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch index 44a781b70..961140aab 100644 --- a/feeds/ipq807x/ipq807x/patches/200-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch +++ b/feeds/ipq807x/ipq807x/patches/210-v5.12-net-extract-napi-poll-functionality-to-__napi_poll.patch @@ -18,7 +18,7 @@ Signed-off-by: David S. Miller --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -6352,15 +6352,10 @@ void netif_napi_del(struct napi_struct * +@@ -6322,15 +6322,10 @@ void netif_napi_del(struct napi_struct * } EXPORT_SYMBOL(netif_napi_del); @@ -35,7 +35,7 @@ Signed-off-by: David S. Miller weight = n->weight; /* This NAPI_STATE_SCHED test is for avoiding a race -@@ -6378,7 +6373,7 @@ static int napi_poll(struct napi_struct +@@ -6348,7 +6343,7 @@ static int napi_poll(struct napi_struct WARN_ON_ONCE(work > weight); if (likely(work < weight)) @@ -44,7 +44,7 @@ Signed-off-by: David S. Miller /* Drivers must not modify the NAPI state if they * consume the entire weight. In such cases this code -@@ -6387,7 +6382,7 @@ static int napi_poll(struct napi_struct +@@ -6357,7 +6352,7 @@ static int napi_poll(struct napi_struct */ if (unlikely(napi_disable_pending(n))) { napi_complete(n); @@ -53,7 +53,7 @@ Signed-off-by: David S. Miller } if (n->gro_bitmask) { -@@ -6405,12 +6400,29 @@ static int napi_poll(struct napi_struct +@@ -6375,12 +6370,29 @@ static int napi_poll(struct napi_struct if (unlikely(!list_empty(&n->poll_list))) { pr_warn_once("%s: Budget exhausted after napi rescheduled\n", n->dev ? n->dev->name : "backlog"); diff --git a/feeds/ipq807x/ipq807x/patches/201-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch b/feeds/ipq807x/ipq807x/patches/211-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch similarity index 72% rename from feeds/ipq807x/ipq807x/patches/201-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch rename to feeds/ipq807x/ipq807x/patches/211-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch index a99d62eb3..e1268d96b 100644 --- a/feeds/ipq807x/ipq807x/patches/201-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch +++ b/feeds/ipq807x/ipq807x/patches/211-v5.12-net-implement-threaded-able-napi-poll-loop-support.patch @@ -30,7 +30,7 @@ Signed-off-by: David S. Miller --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -338,6 +338,7 @@ struct napi_struct { +@@ -319,6 +319,7 @@ struct napi_struct { struct list_head dev_list; struct hlist_node napi_hash_node; unsigned int napi_id; @@ -38,33 +38,18 @@ Signed-off-by: David S. Miller }; enum { -@@ -348,6 +349,7 @@ enum { - NAPI_STATE_HASHED, /* In NAPI hash (busy polling possible) */ - NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ - NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ -+ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ - }; - - enum { -@@ -358,6 +360,7 @@ enum { - NAPIF_STATE_HASHED = BIT(NAPI_STATE_HASHED), - NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), - NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), -+ NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), +@@ -326,6 +327,7 @@ enum { + NAPI_STATE_DISABLE, /* Disable pending */ + NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ + NAPI_STATE_HASHED, /* In NAPI hash */ ++ NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ }; enum gro_result { -@@ -502,20 +505,7 @@ bool napi_hash_del(struct napi_struct *n +@@ -501,13 +503,7 @@ void napi_disable(struct napi_struct *n) + * Resume NAPI from being scheduled on this context. + * Must be paired with napi_disable. */ - void napi_disable(struct napi_struct *n); - --/** -- * napi_enable - enable NAPI scheduling -- * @n: NAPI context -- * -- * Resume NAPI from being scheduled on this context. -- * Must be paired with napi_disable. -- */ -static inline void napi_enable(struct napi_struct *n) -{ - BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state)); @@ -76,34 +61,34 @@ Signed-off-by: David S. Miller /** * napi_synchronize - wait until NAPI is not running -@@ -1834,6 +1824,8 @@ enum netdev_ml_priv_type { - * - * @wol_enabled: Wake-on-LAN is enabled +@@ -1573,6 +1569,8 @@ enum netdev_priv_flags_ext { + * switch driver and used to set the phys state of the + * switch port. * + * @threaded: napi threaded mode is enabled + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ -@@ -2137,6 +2129,7 @@ struct net_device { - struct lock_class_key addr_list_lock_key; - bool proto_down; - unsigned wol_enabled:1; +@@ -1852,6 +1850,7 @@ struct net_device { + struct phy_device *phydev; + struct lock_class_key *qdisc_tx_busylock; + bool proto_down; + unsigned threaded:1; }; #define to_net_dev(d) container_of(d, struct net_device, dev) --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -91,6 +91,7 @@ - #include +@@ -94,6 +94,7 @@ #include + #include #include +#include - #include - #include #include -@@ -1286,6 +1287,27 @@ void netdev_notify_peers(struct net_devi + #include + #include +@@ -1304,6 +1305,27 @@ void netdev_notify_peers(struct net_devi } EXPORT_SYMBOL(netdev_notify_peers); @@ -128,10 +113,10 @@ Signed-off-by: David S. Miller + return err; +} + - static int __dev_open(struct net_device *dev, struct netlink_ext_ack *extack) + static int __dev_open(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; -@@ -3898,6 +3920,21 @@ int gro_normal_batch __read_mostly = 8; +@@ -3248,6 +3270,21 @@ int weight_p __read_mostly = 64; static inline void ____napi_schedule(struct softnet_data *sd, struct napi_struct *napi) { @@ -153,10 +138,10 @@ Signed-off-by: David S. Miller list_add_tail(&napi->poll_list, &sd->poll_list); __raise_softirq_irqoff(NET_RX_SOFTIRQ); } -@@ -6306,6 +6343,12 @@ void netif_napi_add(struct net_device *d - set_bit(NAPI_STATE_NPSVC, &napi->state); - list_add_rcu(&napi->dev_list, &dev->napi_list); - napi_hash_add(napi); +@@ -4828,9 +4865,33 @@ void netif_napi_add(struct net_device *d + napi->poll_owner = -1; + #endif + set_bit(NAPI_STATE_SCHED, &napi->state); + /* Create kthread for this napi if dev->threaded is set. + * Clear dev->threaded if kthread creation failed so that + * threaded mode will not be enabled in napi_enable(). @@ -166,14 +151,6 @@ Signed-off-by: David S. Miller } EXPORT_SYMBOL(netif_napi_add); -@@ -6322,9 +6365,28 @@ void napi_disable(struct napi_struct *n) - hrtimer_cancel(&n->timer); - - clear_bit(NAPI_STATE_DISABLE, &n->state); -+ clear_bit(NAPI_STATE_THREADED, &n->state); - } - EXPORT_SYMBOL(napi_disable); - +/** + * napi_enable - enable NAPI scheduling + * @n: NAPI context @@ -192,13 +169,21 @@ Signed-off-by: David S. Miller +} +EXPORT_SYMBOL(napi_enable); + - static void flush_gro_hash(struct napi_struct *napi) + void napi_disable(struct napi_struct *n) { - int i; -@@ -6349,6 +6411,11 @@ void netif_napi_del(struct napi_struct * + might_sleep(); +@@ -4844,6 +4905,7 @@ void napi_disable(struct napi_struct *n) + hrtimer_cancel(&n->timer); - flush_gro_hash(napi); - napi->gro_bitmask = 0; + clear_bit(NAPI_STATE_DISABLE, &n->state); ++ clear_bit(NAPI_STATE_THREADED, &n->state); + } + EXPORT_SYMBOL(napi_disable); + +@@ -4855,6 +4917,11 @@ void netif_napi_del(struct napi_struct * + kfree_skb_list(napi->gro_list); + napi->gro_list = NULL; + napi->gro_count = 0; + + if (napi->thread) { + kthread_stop(napi->thread); @@ -207,7 +192,7 @@ Signed-off-by: David S. Miller } EXPORT_SYMBOL(netif_napi_del); -@@ -6428,6 +6495,51 @@ static int napi_poll(struct napi_struct +@@ -4940,6 +5007,50 @@ static int napi_poll(struct napi_struct return work; } @@ -244,7 +229,6 @@ Signed-off-by: David S. Miller + __napi_poll(napi, &repoll); + netpoll_poll_unlock(have); + -+ __kfree_skb_flush(); + local_bh_enable(); + + if (!repoll) @@ -256,6 +240,6 @@ Signed-off-by: David S. Miller + return 0; +} + - static __latent_entropy void net_rx_action(struct softirq_action *h) + static void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); diff --git a/feeds/ipq807x/ipq807x/patches/202-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch b/feeds/ipq807x/ipq807x/patches/212-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch similarity index 75% rename from feeds/ipq807x/ipq807x/patches/202-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch rename to feeds/ipq807x/ipq807x/patches/212-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch index d1982a85d..5db5c8171 100644 --- a/feeds/ipq807x/ipq807x/patches/202-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch +++ b/feeds/ipq807x/ipq807x/patches/212-v5.12-net-add-sysfs-attribute-to-control-napi-threaded-mod.patch @@ -23,41 +23,20 @@ Reviewed-by: Alexander Duyck Signed-off-by: David S. Miller --- ---- a/Documentation/ABI/testing/sysfs-class-net -+++ b/Documentation/ABI/testing/sysfs-class-net -@@ -301,3 +301,18 @@ Contact: netdev@vger.kernel.org - Description: - 32-bit unsigned integer counting the number of times the link has - been down -+ -+What: /sys/class/net//threaded -+Date: Jan 2021 -+KernelVersion: 5.12 -+Contact: netdev@vger.kernel.org -+Description: -+ Boolean value to control the threaded mode per device. User could -+ set this value to enable/disable threaded mode for all napi -+ belonging to this device, without the need to do device up/down. -+ -+ Possible values: -+ == ================================== -+ 0 threaded mode disabled for this dev -+ 1 threaded mode enabled for this dev -+ == ================================== --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -496,6 +496,8 @@ static inline bool napi_complete(struct +@@ -496,6 +496,8 @@ void napi_hash_del(struct napi_struct *n */ - bool napi_hash_del(struct napi_struct *napi); + void napi_disable(struct napi_struct *n); +int dev_set_threaded(struct net_device *dev, bool threaded); + /** - * napi_disable - prevent NAPI from scheduling - * @n: NAPI context + * napi_enable - enable NAPI scheduling + * @n: napi context --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3924,8 +3924,9 @@ static inline void ____napi_schedule(str +@@ -3274,8 +3274,9 @@ static inline void ____napi_schedule(str if (test_bit(NAPI_STATE_THREADED, &napi->state)) { /* Paired with smp_mb__before_atomic() in @@ -69,8 +48,8 @@ Signed-off-by: David S. Miller * wake_up_process() when it's not NULL. */ thread = READ_ONCE(napi->thread); -@@ -6320,6 +6321,49 @@ static void init_gro_hash(struct napi_st - napi->gro_bitmask = 0; +@@ -4844,6 +4845,49 @@ static enum hrtimer_restart napi_watchdo + return HRTIMER_NORESTART; } +int dev_set_threaded(struct net_device *dev, bool threaded) @@ -121,7 +100,7 @@ Signed-off-by: David S. Miller { --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c -@@ -557,6 +557,45 @@ static ssize_t phys_switch_id_show(struc +@@ -486,6 +486,45 @@ static ssize_t phys_switch_id_show(struc } static DEVICE_ATTR_RO(phys_switch_id); @@ -164,13 +143,13 @@ Signed-off-by: David S. Miller +} +static DEVICE_ATTR_RW(threaded); + - static struct attribute *net_class_attrs[] __ro_after_init = { + static struct attribute *net_class_attrs[] = { &dev_attr_netdev_group.attr, &dev_attr_type.attr, -@@ -587,6 +626,7 @@ static struct attribute *net_class_attrs +@@ -514,6 +553,7 @@ static struct attribute *net_class_attrs + &dev_attr_phys_port_name.attr, + &dev_attr_phys_switch_id.attr, &dev_attr_proto_down.attr, - &dev_attr_carrier_up_count.attr, - &dev_attr_carrier_down_count.attr, + &dev_attr_threaded.attr, NULL, }; diff --git a/feeds/ipq807x/ipq807x/patches/203-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch b/feeds/ipq807x/ipq807x/patches/213-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch similarity index 63% rename from feeds/ipq807x/ipq807x/patches/203-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch rename to feeds/ipq807x/ipq807x/patches/213-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch index 456859f46..82f4a352d 100644 --- a/feeds/ipq807x/ipq807x/patches/203-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch +++ b/feeds/ipq807x/ipq807x/patches/213-v5.12-net-fix-race-between-napi-kthread-mode-and-busy-poll.patch @@ -27,25 +27,17 @@ Cc: Hannes Frederic Sowa --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -350,6 +350,7 @@ enum { - NAPI_STATE_NO_BUSY_POLL,/* Do not add in napi_hash, no busy polling */ - NAPI_STATE_IN_BUSY_POLL,/* sk_busy_loop() owns this NAPI */ - NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ +@@ -328,6 +328,7 @@ enum { + NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */ + NAPI_STATE_HASHED, /* In NAPI hash */ + NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/ + NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */ }; - enum { -@@ -361,6 +362,7 @@ enum { - NAPIF_STATE_NO_BUSY_POLL = BIT(NAPI_STATE_NO_BUSY_POLL), - NAPIF_STATE_IN_BUSY_POLL = BIT(NAPI_STATE_IN_BUSY_POLL), - NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED), -+ NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED), - }; - enum gro_result { --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3931,6 +3931,8 @@ static inline void ____napi_schedule(str +@@ -3281,6 +3281,8 @@ static inline void ____napi_schedule(str */ thread = READ_ONCE(napi->thread); if (thread) { @@ -54,17 +46,50 @@ Cc: Hannes Frederic Sowa wake_up_process(thread); return; } -@@ -6108,7 +6110,8 @@ bool napi_complete_done(struct napi_stru +@@ -4745,12 +4747,14 @@ void __napi_complete(struct napi_struct + list_del_init(&n->poll_list); + smp_mb__before_atomic(); + clear_bit(NAPI_STATE_SCHED, &n->state); ++ clear_bit(NAPI_STATE_SCHED_THREADED, &n->state); + } + EXPORT_SYMBOL(__napi_complete); - WARN_ON_ONCE(!(val & NAPIF_STATE_SCHED)); + void napi_complete_done(struct napi_struct *n, int work_done) + { + unsigned long flags; ++ unsigned long val, new; -- new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED); -+ new = val & ~(NAPIF_STATE_MISSED | NAPIF_STATE_SCHED | -+ NAPIF_STATE_SCHED_THREADED); + /* + * don't let napi dequeue from the cpu poll list +@@ -4771,14 +4775,19 @@ void napi_complete_done(struct napi_stru + else + napi_gro_flush(n, false); + } +- if (likely(list_empty(&n->poll_list))) { +- WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state)); +- } else { +- /* If n->poll_list is not empty, we need to mask irqs */ +- local_irq_save(flags); +- __napi_complete(n); +- local_irq_restore(flags); +- } ++ ++ /* If n->poll_list is not empty, we need to mask irqs */ ++ local_irq_save(flags); ++ list_del_init(&n->poll_list); ++ local_irq_restore(flags); ++ ++ do { ++ val = READ_ONCE(n->state); ++ ++ WARN_ON_ONCE(!(val & BIT(NAPI_STATE_SCHED))); ++ new = val & ~(BIT(NAPI_STATE_SCHED) | ++ BIT(NAPI_STATE_SCHED_THREADED)); ++ } while (cmpxchg(&n->state, val, new) != val); + } + EXPORT_SYMBOL(napi_complete_done); - /* If STATE_MISSED was set, leave STATE_SCHED set, - * because we will call napi->poll() one more time. -@@ -6541,16 +6544,25 @@ static int napi_poll(struct napi_struct +@@ -5053,16 +5062,25 @@ static int napi_poll(struct napi_struct static int napi_thread_wait(struct napi_struct *napi) { diff --git a/feeds/ipq807x/ipq807x/patches/204-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch b/feeds/ipq807x/ipq807x/patches/214-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch similarity index 93% rename from feeds/ipq807x/ipq807x/patches/204-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch rename to feeds/ipq807x/ipq807x/patches/214-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch index 8383c27fe..6ad3ef3a9 100644 --- a/feeds/ipq807x/ipq807x/patches/204-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch +++ b/feeds/ipq807x/ipq807x/patches/214-v5.12-net-fix-hangup-on-napi_disable-for-threaded-napi.patch @@ -34,7 +34,7 @@ Signed-off-by: Jakub Kicinski --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -6548,7 +6548,7 @@ static int napi_thread_wait(struct napi_ +@@ -5066,7 +5066,7 @@ static int napi_thread_wait(struct napi_ set_current_state(TASK_INTERRUPTIBLE); @@ -43,7 +43,7 @@ Signed-off-by: Jakub Kicinski /* Testing SCHED_THREADED bit here to make sure the current * kthread owns this napi and could poll on this napi. * Testing SCHED bit is not enough because SCHED bit might be -@@ -6566,6 +6566,7 @@ static int napi_thread_wait(struct napi_ +@@ -5084,6 +5084,7 @@ static int napi_thread_wait(struct napi_ set_current_state(TASK_INTERRUPTIBLE); } __set_current_state(TASK_RUNNING); diff --git a/feeds/ipq807x/ipq807x/patches/220-net-sched-add-clsact-qdisc.patch b/feeds/ipq807x/ipq807x/patches/220-net-sched-add-clsact-qdisc.patch new file mode 100644 index 000000000..b5aec6a16 --- /dev/null +++ b/feeds/ipq807x/ipq807x/patches/220-net-sched-add-clsact-qdisc.patch @@ -0,0 +1,439 @@ +From: Daniel Borkmann +Date: Thu, 7 Jan 2016 22:29:47 +0100 +Subject: [PATCH] net, sched: add clsact qdisc + +This work adds a generalization of the ingress qdisc as a qdisc holding +only classifiers. The clsact qdisc works on ingress, but also on egress. +In both cases, it's execution happens without taking the qdisc lock, and +the main difference for the egress part compared to prior version of [1] +is that this can be applied with _any_ underlying real egress qdisc (also +classless ones). + +Besides solving the use-case of [1], that is, allowing for more programmability +on assigning skb->priority for the mqprio case that is supported by most +popular 10G+ NICs, it also opens up a lot more flexibility for other tc +applications. The main work on classification can already be done at clsact +egress time if the use-case allows and state stored for later retrieval +f.e. again in skb->priority with major/minors (which is checked by most +classful qdiscs before consulting tc_classify()) and/or in other skb fields +like skb->tc_index for some light-weight post-processing to get to the +eventual classid in case of a classful qdisc. Another use case is that +the clsact egress part allows to have a central egress counterpart to +the ingress classifiers, so that classifiers can easily share state (e.g. +in cls_bpf via eBPF maps) for ingress and egress. + +Currently, default setups like mq + pfifo_fast would require for this to +use, for example, prio qdisc instead (to get a tc_classify() run) and to +duplicate the egress classifier for each queue. With clsact, it allows +for leaving the setup as is, it can additionally assign skb->priority to +put the skb in one of pfifo_fast's bands and it can share state with maps. +Moreover, we can access the skb's dst entry (f.e. to retrieve tclassid) +w/o the need to perform a skb_dst_force() to hold on to it any longer. In +lwt case, we can also use this facility to setup dst metadata via cls_bpf +(bpf_skb_set_tunnel_key()) without needing a real egress qdisc just for +that (case of IFF_NO_QUEUE devices, for example). + +The realization can be done without any changes to the scheduler core +framework. All it takes is that we have two a-priori defined minors/child +classes, where we can mux between ingress and egress classifier list +(dev->ingress_cl_list and dev->egress_cl_list, latter stored close to +dev->_tx to avoid extra cacheline miss for moderate loads). The egress +part is a bit similar modelled to handle_ing() and patched to a noop in +case the functionality is not used. Both handlers are now called +sch_handle_ingress() and sch_handle_egress(), code sharing among the two +doesn't seem practical as there are various minor differences in both +paths, so that making them conditional in a single handler would rather +slow things down. + +Full compatibility to ingress qdisc is provided as well. Since both +piggyback on TC_H_CLSACT, only one of them (ingress/clsact) can exist +per netdevice, and thus ingress qdisc specific behaviour can be retained +for user space. This means, either a user does 'tc qdisc add dev foo ingress' +and configures ingress qdisc as usual, or the 'tc qdisc add dev foo clsact' +alternative, where both, ingress and egress classifier can be configured +as in the below example. ingress qdisc supports attaching classifier to any +minor number whereas clsact has two fixed minors for muxing between the +lists, therefore to not break user space setups, they are better done as +two separate qdiscs. + +I decided to extend the sch_ingress module with clsact functionality so +that commonly used code can be reused, the module is being aliased with +sch_clsact so that it can be auto-loaded properly. Alternative would have been +to add a flag when initializing ingress to alter its behaviour plus aliasing +to a different name (as it's more than just ingress). However, the first would +end up, based on the flag, choosing the new/old behaviour by calling different +function implementations to handle each anyway, the latter would require to +register ingress qdisc once again under different alias. So, this really begs +to provide a minimal, cleaner approach to have Qdisc_ops and Qdisc_class_ops +by its own that share callbacks used by both. + +Example, adding qdisc: + + # tc qdisc add dev foo clsact + # tc qdisc show dev foo + qdisc mq 0: root + qdisc pfifo_fast 0: parent :1 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1 + qdisc pfifo_fast 0: parent :2 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1 + qdisc pfifo_fast 0: parent :3 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1 + qdisc pfifo_fast 0: parent :4 bands 3 priomap 1 2 2 2 1 2 0 0 1 1 1 1 1 1 1 1 + qdisc clsact ffff: parent ffff:fff1 + +Adding filters (deleting, etc works analogous by specifying ingress/egress): + + # tc filter add dev foo ingress bpf da obj bar.o sec ingress + # tc filter add dev foo egress bpf da obj bar.o sec egress + # tc filter show dev foo ingress + filter protocol all pref 49152 bpf + filter protocol all pref 49152 bpf handle 0x1 bar.o:[ingress] direct-action + # tc filter show dev foo egress + filter protocol all pref 49152 bpf + filter protocol all pref 49152 bpf handle 0x1 bar.o:[egress] direct-action + +A 'tc filter show dev foo' or 'tc filter show dev foo parent ffff:' will +show an empty list for clsact. Either using the parent names (ingress/egress) +or specifying the full major/minor will then show the related filter lists. + +Prior work on a mqprio prequeue() facility [1] was done mainly by John Fastabend. + + [1] http://patchwork.ozlabs.org/patch/512949/ + +Signed-off-by: Daniel Borkmann +Acked-by: John Fastabend +Signed-off-by: David S. Miller +--- + +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1770,7 +1770,9 @@ struct net_device { + #ifdef CONFIG_XPS + struct xps_dev_maps __rcu *xps_maps; + #endif +- ++#ifdef CONFIG_NET_CLS_ACT ++ struct tcf_proto __rcu *egress_cl_list; ++#endif + #ifdef CONFIG_NET_SWITCHDEV + u32 offload_fwd_mark; + #endif +--- a/include/linux/rtnetlink.h ++++ b/include/linux/rtnetlink.h +@@ -84,6 +84,11 @@ void net_inc_ingress_queue(void); + void net_dec_ingress_queue(void); + #endif + ++#ifdef CONFIG_NET_EGRESS ++void net_inc_egress_queue(void); ++void net_dec_egress_queue(void); ++#endif ++ + extern void rtnetlink_init(void); + extern void __rtnl_unlock(void); + +--- a/include/uapi/linux/pkt_sched.h ++++ b/include/uapi/linux/pkt_sched.h +@@ -72,6 +72,10 @@ struct tc_estimator { + #define TC_H_UNSPEC (0U) + #define TC_H_ROOT (0xFFFFFFFFU) + #define TC_H_INGRESS (0xFFFFFFF1U) ++#define TC_H_CLSACT TC_H_INGRESS ++ ++#define TC_H_MIN_INGRESS 0xFFF2U ++#define TC_H_MIN_EGRESS 0xFFF3U + + /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ + enum tc_link_layer { +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -1697,6 +1697,22 @@ void net_dec_ingress_queue(void) + EXPORT_SYMBOL_GPL(net_dec_ingress_queue); + #endif + ++#ifdef CONFIG_NET_EGRESS ++static struct static_key egress_needed __read_mostly; ++ ++void net_inc_egress_queue(void) ++{ ++ static_key_slow_inc(&egress_needed); ++} ++EXPORT_SYMBOL_GPL(net_inc_egress_queue); ++ ++void net_dec_egress_queue(void) ++{ ++ static_key_slow_dec(&egress_needed); ++} ++EXPORT_SYMBOL_GPL(net_dec_egress_queue); ++#endif ++ + static struct static_key netstamp_needed __read_mostly; + #ifdef HAVE_JUMP_LABEL + static atomic_t netstamp_needed_deferred; +@@ -2936,7 +2952,6 @@ static inline int __dev_xmit_skb(struct + bool contended; + int rc; + +- qdisc_pkt_len_init(skb); + qdisc_calculate_pkt_len(skb, q); + /* + * Heuristic to force contended enqueues to serialize on a +@@ -3028,6 +3043,49 @@ int dev_loopback_xmit(struct net *net, s + } + EXPORT_SYMBOL(dev_loopback_xmit); + ++#ifdef CONFIG_NET_EGRESS ++static struct sk_buff * ++sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) ++{ ++ struct tcf_proto *cl = rcu_dereference_bh(dev->egress_cl_list); ++ struct tcf_result cl_res; ++ ++ if (!cl) ++ return skb; ++ ++ /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set ++ * earlier by the caller. ++ */ ++ qdisc_bstats_cpu_update(cl->q, skb); ++ ++ switch (tc_classify(skb, cl, &cl_res, false)) { ++ case TC_ACT_OK: ++ case TC_ACT_RECLASSIFY: ++ skb->tc_index = TC_H_MIN(cl_res.classid); ++ break; ++ case TC_ACT_SHOT: ++ qdisc_qstats_cpu_drop(cl->q); ++ *ret = NET_XMIT_DROP; ++ goto drop; ++ case TC_ACT_STOLEN: ++ case TC_ACT_QUEUED: ++ *ret = NET_XMIT_SUCCESS; ++drop: ++ kfree_skb(skb); ++ return NULL; ++ case TC_ACT_REDIRECT: ++ /* No need to push/pop skb's mac_header here on egress! */ ++ skb_do_redirect(skb); ++ *ret = NET_XMIT_SUCCESS; ++ return NULL; ++ default: ++ break; ++ } ++ ++ return skb; ++} ++#endif /* CONFIG_NET_EGRESS */ ++ + static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb) + { + #ifdef CONFIG_XPS +@@ -3152,6 +3210,17 @@ static int __dev_queue_xmit(struct sk_bu + + skb_update_prio(skb); + ++ qdisc_pkt_len_init(skb); ++#ifdef CONFIG_NET_CLS_ACT ++ skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); ++# ifdef CONFIG_NET_EGRESS ++ if (static_key_false(&egress_needed)) { ++ skb = sch_handle_egress(skb, &rc, dev); ++ if (!skb) ++ goto out; ++ } ++# endif ++#endif + /* If device/qdisc don't need skb->dst, release it right now while + * its hot in this cpu cache. + */ +@@ -3173,9 +3242,6 @@ static int __dev_queue_xmit(struct sk_bu + txq = netdev_pick_tx(dev, skb, accel_priv); + q = rcu_dereference_bh(txq->qdisc); + +-#ifdef CONFIG_NET_CLS_ACT +- skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS); +-#endif + trace_net_dev_queue(skb); + if (q->enqueue) { + rc = __dev_xmit_skb(skb, q, dev, txq); +@@ -3750,9 +3816,9 @@ int (*br_fdb_test_addr_hook)(struct net_ + EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook); + #endif + +-static inline struct sk_buff *handle_ing(struct sk_buff *skb, +- struct packet_type **pt_prev, +- int *ret, struct net_device *orig_dev) ++static inline struct sk_buff * ++sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, ++ struct net_device *orig_dev) + { + #ifdef CONFIG_NET_CLS_ACT + struct tcf_proto *cl = rcu_dereference_bh(skb->dev->ingress_cl_list); +@@ -3974,7 +4040,7 @@ another_round: + skip_taps: + #ifdef CONFIG_NET_INGRESS + if (static_key_false(&ingress_needed)) { +- skb = handle_ing(skb, &pt_prev, &ret, orig_dev); ++ skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev); + if (!skb) + goto out; + +--- a/net/Kconfig ++++ b/net/Kconfig +@@ -54,6 +54,9 @@ config COMPAT_NETLINK_MESSAGES + config NET_INGRESS + bool + ++config NET_EGRESS ++ bool ++ + menu "Networking options" + + source "net/packet/Kconfig" +--- a/net/sched/cls_bpf.c ++++ b/net/sched/cls_bpf.c +@@ -291,7 +291,7 @@ static int cls_bpf_prog_from_efd(struct + prog->bpf_name = name; + prog->filter = fp; + +- if (fp->dst_needed) ++ if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS)) + netif_keep_dst(qdisc_dev(tp->q)); + + return 0; +--- a/net/sched/Kconfig ++++ b/net/sched/Kconfig +@@ -342,15 +342,21 @@ config NET_SCH_PIE + If unsure, say N. + + config NET_SCH_INGRESS +- tristate "Ingress Qdisc" ++ tristate "Ingress/classifier-action Qdisc" + depends on NET_CLS_ACT + select NET_INGRESS ++ select NET_EGRESS + ---help--- +- Say Y here if you want to use classifiers for incoming packets. ++ Say Y here if you want to use classifiers for incoming and/or outgoing ++ packets. This qdisc doesn't do anything else besides running classifiers, ++ which can also have actions attached to them. In case of outgoing packets, ++ classifiers that this qdisc holds are executed in the transmit path ++ before real enqueuing to an egress qdisc happens. ++ + If unsure, say Y. + +- To compile this code as a module, choose M here: the +- module will be called sch_ingress. ++ To compile this code as a module, choose M here: the module will be ++ called sch_ingress with alias of sch_clsact. + + config NET_SCH_PLUG + tristate "Plug network traffic until release (PLUG)" +--- a/net/sched/sch_ingress.c ++++ b/net/sched/sch_ingress.c +@@ -1,4 +1,5 @@ +-/* net/sched/sch_ingress.c - Ingress qdisc ++/* net/sched/sch_ingress.c - Ingress and clsact qdisc ++ * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version +@@ -98,17 +99,100 @@ static struct Qdisc_ops ingress_qdisc_op + .owner = THIS_MODULE, + }; + ++static unsigned long clsact_get(struct Qdisc *sch, u32 classid) ++{ ++ switch (TC_H_MIN(classid)) { ++ case TC_H_MIN(TC_H_MIN_INGRESS): ++ case TC_H_MIN(TC_H_MIN_EGRESS): ++ return TC_H_MIN(classid); ++ default: ++ return 0; ++ } ++} ++ ++static unsigned long clsact_bind_filter(struct Qdisc *sch, ++ unsigned long parent, u32 classid) ++{ ++ return clsact_get(sch, classid); ++} ++ ++static struct tcf_proto __rcu **clsact_find_tcf(struct Qdisc *sch, ++ unsigned long cl) ++{ ++ struct net_device *dev = qdisc_dev(sch); ++ ++ switch (cl) { ++ case TC_H_MIN(TC_H_MIN_INGRESS): ++ return &dev->ingress_cl_list; ++ case TC_H_MIN(TC_H_MIN_EGRESS): ++ return &dev->egress_cl_list; ++ default: ++ return NULL; ++ } ++} ++ ++static int clsact_init(struct Qdisc *sch, struct nlattr *opt) ++{ ++ net_inc_ingress_queue(); ++ net_inc_egress_queue(); ++ ++ sch->flags |= TCQ_F_CPUSTATS; ++ ++ return 0; ++} ++ ++static void clsact_destroy(struct Qdisc *sch) ++{ ++ struct net_device *dev = qdisc_dev(sch); ++ ++ tcf_destroy_chain(&dev->ingress_cl_list); ++ tcf_destroy_chain(&dev->egress_cl_list); ++ ++ net_dec_ingress_queue(); ++ net_dec_egress_queue(); ++} ++ ++static const struct Qdisc_class_ops clsact_class_ops = { ++ .leaf = ingress_leaf, ++ .get = clsact_get, ++ .put = ingress_put, ++ .walk = ingress_walk, ++ .tcf_chain = clsact_find_tcf, ++ .bind_tcf = clsact_bind_filter, ++ .unbind_tcf = ingress_put, ++}; ++ ++static struct Qdisc_ops clsact_qdisc_ops __read_mostly = { ++ .cl_ops = &clsact_class_ops, ++ .id = "clsact", ++ .init = clsact_init, ++ .destroy = clsact_destroy, ++ .dump = ingress_dump, ++ .owner = THIS_MODULE, ++}; ++ + static int __init ingress_module_init(void) + { +- return register_qdisc(&ingress_qdisc_ops); ++ int ret; ++ ++ ret = register_qdisc(&ingress_qdisc_ops); ++ if (!ret) { ++ ret = register_qdisc(&clsact_qdisc_ops); ++ if (ret) ++ unregister_qdisc(&ingress_qdisc_ops); ++ } ++ ++ return ret; + } + + static void __exit ingress_module_exit(void) + { + unregister_qdisc(&ingress_qdisc_ops); ++ unregister_qdisc(&clsact_qdisc_ops); + } + + module_init(ingress_module_init); + module_exit(ingress_module_exit); + ++MODULE_ALIAS("sch_clsact"); + MODULE_LICENSE("GPL"); diff --git a/feeds/ipq807x/kmod-sched-cake/Makefile b/feeds/ipq807x/kmod-sched-cake/Makefile new file mode 100644 index 000000000..cf859745b --- /dev/null +++ b/feeds/ipq807x/kmod-sched-cake/Makefile @@ -0,0 +1,43 @@ +# +# Copyright (C) 2016 LEDE +# +# This is free software, licensed under the GNU General Public License v2. +# See /LICENSE for more information. +# + +include $(TOPDIR)/rules.mk +include $(INCLUDE_DIR)/kernel.mk + +PKG_NAME:=sched-cake +PKG_RELEASE:=1 + +PKG_SOURCE_PROTO:=git +PKG_SOURCE_URL:=https://github.com/dtaht/sch_cake.git +PKG_MIRROR_HASH:=8bb4fa43368be5b5839a350419701b0bb3881b1641e037affea42630d75e56e6 +PKG_SOURCE_DATE:=2021-07-09 +PKG_SOURCE_VERSION:=d9e1398cc9091e9e7c7a740361e4617b75c24427 +#PKG_MIRROR_HASH:=5bf06a804824db36ae393fc174aeec7b12633176e05a765c0931b39df5bd34df +PKG_MAINTAINER:=Kevin Darbyshire-Bryant + +include $(INCLUDE_DIR)/package.mk + +define KernelPackage/sched-cake + SUBMENU:=Network Support + TITLE:=Cake fq_codel/blue derived shaper + URL:=https://github.com/dtaht/sch_cake + FILES:=$(PKG_BUILD_DIR)/sch_cake.ko + AUTOLOAD:=$(call AutoLoad,75,sch_cake) + DEPENDS:=+kmod-ipt-conntrack +endef + +include $(INCLUDE_DIR)/kernel-defaults.mk + +define KernelPackage/sched-cake/description + Common Applications Kept Enhanced fq_codel/blue derived shaper +endef + +define Build/Compile + $(KERNEL_MAKE) SUBDIRS="$(PKG_BUILD_DIR)" modules +endef + +$(eval $(call KernelPackage,sched-cake)) diff --git a/feeds/ipq807x/kmod-sched-cake/patches/100-compat.patch b/feeds/ipq807x/kmod-sched-cake/patches/100-compat.patch new file mode 100644 index 000000000..aaa1272b9 --- /dev/null +++ b/feeds/ipq807x/kmod-sched-cake/patches/100-compat.patch @@ -0,0 +1,20 @@ +Index: sched-cake-2021-07-09-d9e1398c/cobalt_compat.h +=================================================================== +--- sched-cake-2021-07-09-d9e1398c.orig/cobalt_compat.h ++++ sched-cake-2021-07-09-d9e1398c/cobalt_compat.h +@@ -95,15 +95,6 @@ static inline unsigned int __tcp_hdrlen( + } + #endif + +-#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE +-static inline int skb_try_make_writable(struct sk_buff *skb, +- unsigned int write_len) +-{ +- return skb_cloned(skb) && !skb_clone_writable(skb, write_len) && +- pskb_expand_head(skb, 0, 0, GFP_ATOMIC); +-} +-#endif +- + #if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE + static inline int skb_mac_offset(const struct sk_buff *skb) + { diff --git a/feeds/ipq807x/qca-nss-clients/Makefile b/feeds/ipq807x/qca-nss-clients/Makefile index be2756a27..683baf2de 100644 --- a/feeds/ipq807x/qca-nss-clients/Makefile +++ b/feeds/ipq807x/qca-nss-clients/Makefile @@ -5,9 +5,9 @@ PKG_NAME:=qca-nss-clients PKG_SOURCE_PROTO:=git PKG_BRANCH:=master PKG_RELEASE:=2 -PKG_SOURCE_URL:=https://git.codelinaro.org/clo/qsdk/oss/lklm/nss-clients -PKG_MIRROR_HASH:=95bae8cc23fe950aae5d6b6fd402b6ab4339782ffb08eb29d249105fcb9022bf -PKG_VERSION:=30742bb1decd5fe7b4e01be8081ab0a99c1f6888 +PKG_SOURCE_URL:=https://source.codeaurora.org/quic/qsdk/oss/lklm/nss-clients/ +PKG_MIRROR_HASH:=802bf8b2dac8da0549e108b873afd982d127370c07d6574ece71f902eafe7698 +PKG_VERSION:=153998d70fdba508a59a28c13a606032cbf32686 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) @@ -602,7 +602,6 @@ define Build/Compile SoC="$(subtarget)" \ DTLSMGR_DIR="$(DTLSMGR_DIR)" \ IPSECMGR_DIR="$(IPSECMGR_DIR)" \ - KBUILD_MODPOST_WARN=1 \ modules endef diff --git a/feeds/ipq807x/qca-nss-dp/Makefile b/feeds/ipq807x/qca-nss-dp/Makefile index 547452ea3..60df6dc53 100644 --- a/feeds/ipq807x/qca-nss-dp/Makefile +++ b/feeds/ipq807x/qca-nss-dp/Makefile @@ -5,13 +5,6 @@ PKG_NAME:=qca-nss-dp PKG_SOURCE_PROTO:=git PKG_BRANCH:=master PKG_RELEASE:=1 -PKG_SOURCE_URL:=https://git.codelinaro.org/clo/qsdk/oss/lklm/nss-dp -PKG_MIRROR_HASH:=dc5e870bf781d052399e8bbc0aa3d6593abeeff29304b64c685584f09fd29519 -PKG_VERSION:=480f036cc96d4e5faa426cfcf90fa7e64dff87e8 - -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) -PKG_SOURCE_VERSION:=$(PKG_VERSION) include $(INCLUDE_DIR)/package.mk @@ -19,8 +12,7 @@ define KernelPackage/qca-nss-dp SECTION:=kernel CATEGORY:=Kernel modules SUBMENU:=Network Devices - DEPENDS:=@TARGET_ipq807x||TARGET_ipq_ipq807x_64||TARGET_ipq807x||TARGET_ipq_ipq60xx||TARGET_ipq_ipq60xx_64||TARGET_ipq60xx||TARGET_ipq_ipq50xx||TARGET_ipq_ipq50xx_64||TARGET_ipq50xx\ - +kmod-qca-ssdk + DEPENDS:=@TARGET_ipq807x +kmod-qca-ssdk TITLE:=Kernel driver for NSS data plane FILES:=$(PKG_BUILD_DIR)/qca-nss-dp.ko AUTOLOAD:=$(call AutoLoad,31,qca-nss-dp) @@ -44,7 +36,7 @@ NSS_DP_HAL_DIR:=$(PKG_BUILD_DIR)/hal hal_arch:=$(subtarget) define Build/Configure - $(LN) $(NSS_DP_HAL_DIR)/soc_ops/$(hal_arch)/nss_$(hal_arch).h \ + $(LN) $(NSS_DP_HAL_DIR)/arch/$(hal_arch)/nss_$(hal_arch).h \ $(PKG_BUILD_DIR)/exports/nss_dp_arch.h endef @@ -53,8 +45,7 @@ define Build/Compile CROSS_COMPILE="$(TARGET_CROSS)" \ ARCH="$(LINUX_KARCH)" \ M="$(PKG_BUILD_DIR)" \ - EXTRA_CFLAGS="$(EXTRA_CFLAGS)" SoC="$(hal_arch)" \ - KBUILD_MODPOST_WARN=1 \ + EXTRA_CFLAGS="$(EXTRA_CFLAGS)" SoC="$(subtarget)" \ modules endef diff --git a/feeds/ipq807x/qca-nss-dp/Makefile.orig b/feeds/ipq807x/qca-nss-dp/Makefile.orig deleted file mode 100644 index 976ffc4bc..000000000 --- a/feeds/ipq807x/qca-nss-dp/Makefile.orig +++ /dev/null @@ -1,59 +0,0 @@ -include $(TOPDIR)/rules.mk -include $(INCLUDE_DIR)/kernel.mk - -PKG_NAME:=qca-nss-dp -PKG_SOURCE_PROTO:=git -PKG_BRANCH:=master -PKG_RELEASE:=1 -PKG_SOURCE_URL:=https://git.codelinaro.org/clo/qsdk/oss/lklm/nss-dp -PKG_MIRROR_HASH:=dc5e870bf781d052399e8bbc0aa3d6593abeeff29304b64c685584f09fd29519 -PKG_VERSION:=480f036cc96d4e5faa426cfcf90fa7e64dff87e8 - -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) -PKG_SOURCE_VERSION:=$(PKG_VERSION) - -include $(INCLUDE_DIR)/package.mk - -define KernelPackage/qca-nss-dp - SECTION:=kernel - CATEGORY:=Kernel modules - SUBMENU:=Network Devices - DEPENDS:=@TARGET_ipq807x +kmod-qca-ssdk - TITLE:=Kernel driver for NSS data plane - FILES:=$(PKG_BUILD_DIR)/qca-nss-dp.ko - AUTOLOAD:=$(call AutoLoad,31,qca-nss-dp) -endef - -define KernelPackage/qca-nss-dp/Description -This package contains a NSS data plane driver for QCA chipset -endef - -define Build/InstallDev - mkdir -p $(1)/usr/include/qca-nss-dp - $(CP) $(PKG_BUILD_DIR)/exports/* $(1)/usr/include/qca-nss-dp/ -endef - -EXTRA_CFLAGS+= \ - -I$(STAGING_DIR)/usr/include/qca-ssdk - -subtarget:=$(SUBTARGET) - -NSS_DP_HAL_DIR:=$(PKG_BUILD_DIR)/hal -hal_arch:=$(subtarget) - -define Build/Configure - $(LN) $(NSS_DP_HAL_DIR)/soc_ops/$(hal_arch)/nss_$(hal_arch).h \ - $(PKG_BUILD_DIR)/exports/nss_dp_arch.h -endef - -define Build/Compile - $(MAKE) -C "$(LINUX_DIR)" \ - CROSS_COMPILE="$(TARGET_CROSS)" \ - ARCH="$(LINUX_KARCH)" \ - M="$(PKG_BUILD_DIR)" \ - EXTRA_CFLAGS="$(EXTRA_CFLAGS)" SoC=""$(subtarget)"" \ - modules -endef - -$(eval $(call KernelPackage,qca-nss-dp)) diff --git a/feeds/ipq807x/qca-nss-dp/src/Makefile b/feeds/ipq807x/qca-nss-dp/src/Makefile new file mode 100644 index 000000000..04e6a4531 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/Makefile @@ -0,0 +1,56 @@ +################################################### +# Makefile for the NSS data plane driver +################################################### + +obj ?= . + +obj-m += qca-nss-dp.o + +qca-nss-dp-objs += nss_dp_attach.o \ + nss_dp_ethtools.o \ + nss_dp_main.o + +ifneq ($(CONFIG_NET_SWITCHDEV),) +qca-nss-dp-objs += nss_dp_switchdev.o +endif + +ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64 ipq60xx ipq60xx_64)) +qca-nss-dp-objs += hal/edma/edma_cfg.o \ + hal/edma/edma_data_plane.o \ + hal/edma/edma_tx_rx.o \ + hal/gmac_hal_ops/qcom/qcom_if.o \ + hal/gmac_hal_ops/syn/xgmac/syn_if.o +endif + +NSS_DP_INCLUDE = -I$(obj)/include -I$(obj)/exports -I$(obj)/gmac_hal_ops/include \ + -I$(obj)/hal/include + +ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64)) +NSS_DP_INCLUDE += -I$(obj)/hal/gmac_hal_ops/syn/gmac +endif + +ccflags-y += $(NSS_DP_INCLUDE) +ccflags-y += -Wall -Werror + +ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64 ipq60xx ipq60xx_64)) +ccflags-y += -DNSS_DP_PPE_SUPPORT +endif + +ifeq ($(SoC),$(filter $(SoC),ipq60xx ipq60xx_64)) +qca-nss-dp-objs += hal/arch/ipq60xx/nss_ipq60xx.o +ccflags-y += -DNSS_DP_IPQ60XX +endif + +ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64)) +qca-nss-dp-objs += hal/arch/ipq807x/nss_ipq807x.o +ccflags-y += -DNSS_DP_IPQ807X -DNSS_DP_EDMA_TX_SMALL_PKT_WAR +endif + +ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64)) +qca-nss-dp-objs += hal/arch/ipq50xx/nss_ipq50xx.o \ + hal/gmac_hal_ops/syn/gmac/syn_if.o \ + hal/syn_gmac_dp/syn_data_plane.o \ + hal/syn_gmac_dp/syn_dp_tx_rx.o \ + hal/syn_gmac_dp/syn_dp_cfg.o +ccflags-y += -DNSS_DP_IPQ50XX +endif diff --git a/feeds/ipq807x/qca-nss-dp/src/exports/nss_dp_api_if.h b/feeds/ipq807x/qca-nss-dp/src/exports/nss_dp_api_if.h new file mode 100644 index 000000000..2710b790e --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/exports/nss_dp_api_if.h @@ -0,0 +1,219 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_dp_api_if.h + * nss-dp exported structures/apis. + * + * This file declares all the public interfaces + * for NSS data-plane driver. + */ + +#ifndef __NSS_DP_API_IF_H +#define __NSS_DP_API_IF_H + +#include "nss_dp_arch.h" + +/** + * @addtogroup nss_dp_subsystem + * @{ + */ + +/* + * NSS DP status + */ +#define NSS_DP_SUCCESS 0 +#define NSS_DP_FAILURE -1 + +/* + * NSS DP platform specific defines + */ +#define NSS_DP_START_IFNUM NSS_DP_HAL_START_IFNUM + /**< First GMAC interface number (0/1) depending on SoC. */ +#define NSS_DP_MAX_MTU_SIZE NSS_DP_HAL_MAX_MTU_SIZE +#define NSS_DP_MAX_PACKET_LEN NSS_DP_HAL_MAX_PACKET_LEN +#define NSS_DP_MAX_INTERFACES (NSS_DP_HAL_MAX_PORTS + NSS_DP_HAL_START_IFNUM) + /**< Last interface index for the SoC, to be used by qca-nss-drv. */ + +/* + * NSS PTP service code + */ +#define NSS_PTP_EVENT_SERVICE_CODE 0x9 + +/** + * nss_dp_data_plane_ctx + * Data plane context base class. + */ +struct nss_dp_data_plane_ctx { + struct net_device *dev; +}; + +/** + * nss_dp_gmac_stats + * The per-GMAC statistics structure. + */ +struct nss_dp_gmac_stats { + struct nss_dp_hal_gmac_stats stats; +}; + +/** + * nss_dp_data_plane_ops + * Per data-plane ops structure. + * + * Default would be slowpath and can be overridden by nss-drv + */ +struct nss_dp_data_plane_ops { + int (*init)(struct nss_dp_data_plane_ctx *dpc); + int (*open)(struct nss_dp_data_plane_ctx *dpc, uint32_t tx_desc_ring, + uint32_t rx_desc_ring, uint32_t mode); + int (*close)(struct nss_dp_data_plane_ctx *dpc); + int (*link_state)(struct nss_dp_data_plane_ctx *dpc, + uint32_t link_state); + int (*mac_addr)(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr); + int (*change_mtu)(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu); + netdev_tx_t (*xmit)(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *os_buf); + void (*set_features)(struct nss_dp_data_plane_ctx *dpc); + int (*pause_on_off)(struct nss_dp_data_plane_ctx *dpc, + uint32_t pause_on); + int (*vsi_assign)(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi); + int (*vsi_unassign)(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi); + int (*rx_flow_steer)(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb, + uint32_t cpu, bool is_add); + void (*get_stats)(struct nss_dp_data_plane_ctx *dpc, struct nss_dp_gmac_stats *stats); + int (*deinit)(struct nss_dp_data_plane_ctx *dpc); +}; + +/** + * nss_dp_receive + * Called by overlay drivers to deliver packets to nss-dp. + * + * @datatypes + * net_device + * sk_buff + * napi_struct + * + * @param[in] netdev Pointer to netdev structure on which packet is received. + * @param[in] skb Pointer to the received packet. + * @param[in] napi Pointer to napi context. + */ +void nss_dp_receive(struct net_device *netdev, struct sk_buff *skb, + struct napi_struct *napi); + +/** + * nss_dp_is_in_open_state + * Returns if a data plane is opened or not. + * + * @datatypes + * net_device + * + * @param[in] netdev Pointer to netdev structure. + * + * @return + * bool + */ +bool nss_dp_is_in_open_state(struct net_device *netdev); + +/** + * nss_dp_override_data_palne + * API to allow overlay drivers to override the data plane. + * + * @datatypes + * net_device + * nss_dp_data_plane_ops + * nss_dp_data_plane_ctx + * + * @param[in] netdev Pointer to netdev structure. + * @param[in] dp_ops Pointer to respective data plane ops structure. + * @param[in] dpc Pointer to data plane context. + * + * @return + * int + */ +int nss_dp_override_data_plane(struct net_device *netdev, + struct nss_dp_data_plane_ops *dp_ops, + struct nss_dp_data_plane_ctx *dpc); + +/** + * nss_dp_start_data_plane + * Dataplane API to inform netdev when it is ready to start. + * + * @datatypes + * net_device + * nss_dp_data_plane_ctx + * + * @param[in] netdev Pointer to netdev structure. + * @param[in] dpc Pointer to data plane context. + */ +void nss_dp_start_data_plane(struct net_device *netdev, + struct nss_dp_data_plane_ctx *dpc); + +/** + * nss_dp_restore_data_plane + * Called by overlay drivers to detach itself from nss-dp. + * + * @datatypes + * net_device + * + * @param[in] netdev Pointer to netdev structure. + */ +void nss_dp_restore_data_plane(struct net_device *netdev); + +/** + * nss_dp_get_netdev_by_nss_if_num + * Returns the net device of the corresponding id if it exists. + * + * @datatypes + * int + * + * @param[in] interface ID of the physical mac port. + * + * @return + * Pointer to netdev structure. + */ +struct net_device *nss_dp_get_netdev_by_nss_if_num(int if_num); + +/** + * nss_phy_tstamp_rx_buf + * Receive timestamp packet. + * + * @datatypes + * sk_buff + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] skb Pointer to the packet. + */ +void nss_phy_tstamp_rx_buf(void *app_data, struct sk_buff *skb); + +/** + * nss_phy_tstamp_tx_buf + * Transmit timestamp packet + * + * @datatypes + * net_device + * sk_buff + * + * @param[in] net_device Pointer to netdev structure. + * @param[in] skb Pointer to the packet. + */ +void nss_phy_tstamp_tx_buf(struct net_device *ndev, struct sk_buff *skb); + +/** + *@} + */ + +#endif /** __NSS_DP_API_IF_H */ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq50xx/nss_ipq50xx.c b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq50xx/nss_ipq50xx.c new file mode 100644 index 000000000..8ddfee8d9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq50xx/nss_ipq50xx.c @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include "nss_dp_hal.h" + +/* + * nss_dp_hal_tcsr_base_get + * Reads TCSR base address from DTS + */ +static uint32_t nss_dp_hal_tcsr_base_get(void) +{ + uint32_t tcsr_base_addr = 0; + struct device_node *dp_cmn; + + /* + * Get reference to NSS dp common device node + */ + dp_cmn = of_find_node_by_name(NULL, "nss-dp-common"); + if (!dp_cmn) { + pr_info("%s: NSS DP common node not found\n", __func__); + return 0; + } + + if (of_property_read_u32(dp_cmn, "qcom,tcsr-base", &tcsr_base_addr)) { + pr_err("%s: error reading TCSR base\n", __func__); + } + of_node_put(dp_cmn); + + return tcsr_base_addr; +} + +/* + * nss_dp_hal_tcsr_set + * Sets the TCSR axi cache override register + */ +static void nss_dp_hal_tcsr_set(void) +{ + void __iomem *tcsr_addr = NULL; + uint32_t tcsr_base; + int err; + + tcsr_base = nss_dp_hal_tcsr_base_get(); + if (!tcsr_base) { + pr_err("%s: Unable to get TCSR base address\n", __func__); + return; + } + + /* + * Check if Trust Zone is enabled in the system. + * If yes, we need to go through SCM API call to program TCSR register. + * If TZ is not enabled, we can write to the register directly. + */ + if (qcom_scm_is_available()) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + err = qcom_scm_tcsr_reg_write((tcsr_base + TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET), + TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE); +#else + err = qti_scm_tcsr_reg_write((tcsr_base + TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET), + TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE); +#endif + if (err) { + pr_err("%s: SCM TCSR write error: %d\n", __func__, err); + } + } else { + tcsr_addr = ioremap_nocache((tcsr_base + TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET), + TCSR_GMAC_AXI_CACHE_OVERRIDE_REG_SIZE); + if (!tcsr_addr) { + pr_err("%s: ioremap failed\n", __func__); + return; + } + writel(TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE, tcsr_addr); + iounmap(tcsr_addr); + } +} + +/* + * nss_dp_hal_get_data_plane_ops + * Return the data plane ops for GMAC data plane. + */ +struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void) +{ + return &nss_dp_gmac_ops; +} + +/* + * nss_dp_hal_clk_enable + * Function to enable GCC_SNOC_GMAC_AXI_CLK. + * + * These clocks are required for GMAC operations. + */ +void nss_dp_hal_clk_enable(struct nss_dp_dev *dp_priv) +{ + struct platform_device *pdev = dp_priv->pdev; + struct device *dev = &pdev->dev; + struct clk *gmac_clk = NULL; + int err; + + gmac_clk = devm_clk_get(dev, NSS_SNOC_GMAC_AXI_CLK); + if (IS_ERR(gmac_clk)) { + pr_err("%s: cannot get clock: %s\n", __func__, + NSS_SNOC_GMAC_AXI_CLK); + return; + } + + err = clk_prepare_enable(gmac_clk); + if (err) { + pr_err("%s: cannot enable clock: %s, err: %d\n", __func__, + NSS_SNOC_GMAC_AXI_CLK, err); + return; + } +} + +/* + * nss_dp_hal_init + * Sets the gmac ops based on the GMAC type. + */ +bool nss_dp_hal_init(void) +{ + nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_GMAC); + + /* + * Program the global GMAC AXI Cache override register + * for optimized AXI DMA operation. + */ + nss_dp_hal_tcsr_set(); + return true; +} + +/* + * nss_dp_hal_cleanup + * Sets the gmac ops to NULL. + */ +void nss_dp_hal_cleanup(void) +{ + nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_GMAC); +} diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq50xx/nss_ipq50xx.h b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq50xx/nss_ipq50xx.h new file mode 100644 index 000000000..cae6407ce --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq50xx/nss_ipq50xx.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __NSS_DP_ARCH_H__ +#define __NSS_DP_ARCH_H__ + +#define NSS_DP_HAL_MAX_PORTS 2 +#define NSS_DP_HAL_CPU_NUM 2 +#define NSS_DP_HAL_START_IFNUM 0 +#define NSS_DP_GMAC_NORMAL_FRAME_MTU 1500 +#define NSS_DP_GMAC_MINI_JUMBO_FRAME_MTU 1978 +#define NSS_DP_GMAC_FULL_JUMBO_FRAME_MTU 9000 +#define NSS_DP_HAL_MAX_MTU_SIZE NSS_DP_GMAC_FULL_JUMBO_FRAME_MTU +#define NSS_DP_HAL_MAX_PACKET_LEN 65535 + +/* + * TCSR_GMAC_AXI_CACHE_OVERRIDE register size + */ +#define TCSR_GMAC_AXI_CACHE_OVERRIDE_REG_SIZE 4 + +/* + * TCSR_GMAC_AXI_CACHE_OVERRIDE Register offset + */ +#define TCSR_GMAC_AXI_CACHE_OVERRIDE_OFFSET 0x6224 + +/* + * Value for TCSR_GMAC_AXI_CACHE_OVERRIDE register + */ +#define TCSR_GMAC_AXI_CACHE_OVERRIDE_VALUE 0x05050505 + +/* + * GCC_SNOC_GMAC_AXI_CLOCK + */ +#define NSS_SNOC_GMAC_AXI_CLK "nss-snoc-gmac-axi-clk" + +/** + * nss_dp_hal_gmac_stats + * The per-GMAC statistics structure. + */ +struct nss_dp_hal_gmac_stats { + uint64_t rx_bytes; /**< Number of RX bytes */ + uint64_t rx_packets; /**< Number of RX packets */ + uint64_t rx_errors; /**< Number of RX errors */ + uint64_t rx_receive_errors; /**< Number of RX receive errors */ + uint64_t rx_descriptor_errors; /**< Number of RX descriptor errors */ + uint64_t rx_late_collision_errors; + /**< Number of RX late collision errors */ + uint64_t rx_dribble_bit_errors; /**< Number of RX dribble bit errors */ + uint64_t rx_length_errors; /**< Number of RX length errors */ + uint64_t rx_ip_header_errors; /**< Number of RX IP header errors read from rxdec */ + uint64_t rx_ip_payload_errors; /**< Number of RX IP payload errors */ + uint64_t rx_no_buffer_errors; /**< Number of RX no-buffer errors */ + uint64_t rx_transport_csum_bypassed; + /**< Number of RX packets where the transport checksum was bypassed */ + uint64_t tx_bytes; /**< Number of TX bytes */ + uint64_t tx_packets; /**< Number of TX packets */ + uint64_t tx_collisions; /**< Number of TX collisions */ + uint64_t tx_errors; /**< Number of TX errors */ + uint64_t tx_jabber_timeout_errors; + /**< Number of TX jabber timeout errors */ + uint64_t tx_frame_flushed_errors; + /**< Number of TX frame flushed errors */ + uint64_t tx_loss_of_carrier_errors; + /**< Number of TX loss of carrier errors */ + uint64_t tx_no_carrier_errors; /**< Number of TX no carrier errors */ + uint64_t tx_late_collision_errors; + /**< Number of TX late collision errors */ + uint64_t tx_excessive_collision_errors; + /**< Number of TX excessive collision errors */ + uint64_t tx_excessive_deferral_errors; + /**< Number of TX excessive deferral errors */ + uint64_t tx_underflow_errors; /**< Number of TX underflow errors */ + uint64_t tx_ip_header_errors; /**< Number of TX IP header errors */ + uint64_t tx_ip_payload_errors; /**< Number of TX IP payload errors */ + uint64_t tx_dropped; /**< Number of TX dropped packets */ + uint64_t hw_errs[10]; /**< GMAC DMA error counters */ + uint64_t rx_missed; /**< Number of RX packets missed by the DMA */ + uint64_t fifo_overflows; /**< Number of RX FIFO overflows signalled by the DMA */ + uint64_t rx_scatter_errors; /**< Number of scattered frames received by the DMA */ + uint64_t tx_ts_create_errors; /**< Number of tx timestamp creation errors */ + uint64_t gmac_total_ticks; /**< Total clock ticks spend inside the GMAC */ + uint64_t gmac_worst_case_ticks; /**< Worst case iteration of the GMAC in ticks */ + uint64_t gmac_iterations; /**< Number of iterations around the GMAC */ + uint64_t tx_pause_frames; /**< Number of pause frames sent by the GMAC */ + uint64_t mmc_rx_overflow_errors; + /**< Number of RX overflow errors */ + uint64_t mmc_rx_watchdog_timeout_errors; + /**< Number of RX watchdog timeout errors */ + uint64_t mmc_rx_crc_errors; /**< Number of RX CRC errors */ + uint64_t mmc_rx_ip_header_errors; + /**< Number of RX IP header errors read from MMC counter*/ + uint64_t mmc_rx_octets_g; + /**< Number of good octets received */ + uint64_t mmc_rx_ucast_frames; /**< Number of Unicast frames received */ + uint64_t mmc_rx_bcast_frames; /**< Number of Bcast frames received */ + uint64_t mmc_rx_mcast_frames; /**< Number of Mcast frames received */ + uint64_t mmc_rx_undersize; + /**< Number of RX undersize frames */ + uint64_t mmc_rx_oversize; + /**< Number of RX oversize frames */ + uint64_t mmc_rx_jabber; /**< Number of jabber frames */ + uint64_t mmc_rx_octets_gb; + /**< Number of good/bad octets */ + uint64_t mmc_rx_frag_frames_g; /**< Number of good ipv4 frag frames */ + uint64_t mmc_tx_octets_g; /**< Number of good octets sent */ + uint64_t mmc_tx_ucast_frames; /**< Number of Unicast frames sent*/ + uint64_t mmc_tx_bcast_frames; /**< Number of Broadcast frames sent */ + uint64_t mmc_tx_mcast_frames; /**< Number of Multicast frames sent */ + uint64_t mmc_tx_deferred; /**< Number of Deferred frames sent */ + uint64_t mmc_tx_single_col; /**< Number of single collisions */ + uint64_t mmc_tx_multiple_col; /**< Number of multiple collisions */ + uint64_t mmc_tx_octets_gb; /**< Number of good/bad octets sent*/ +}; + +extern struct nss_dp_data_plane_ops nss_dp_gmac_ops; + +#endif /* __NSS_DP_ARCH_H__ */ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq60xx/nss_ipq60xx.c b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq60xx/nss_ipq60xx.c new file mode 100644 index 000000000..dab4276e4 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq60xx/nss_ipq60xx.c @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "nss_dp_hal.h" +#include "edma.h" + +/* + * nss_dp_hal_get_data_plane_ops() + * Return the data plane ops for edma data plane. + */ +struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void) +{ + return &nss_dp_edma_ops; +} + +/* + * nss_dp_hal_init() + * Initialize EDMA and set gmac ops. + */ +bool nss_dp_hal_init(void) +{ + nss_dp_hal_set_gmac_ops(&qcom_hal_ops, GMAC_HAL_TYPE_QCOM); + nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_XGMAC); + + if (edma_init()) { + return false; + } + return true; +} + +/* + * nss_dp_hal_cleanup() + * Cleanup EDMA and set gmac ops to NULL. + */ +void nss_dp_hal_cleanup(void) +{ + nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_QCOM); + nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_XGMAC); + edma_cleanup(false); +} diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq60xx/nss_ipq60xx.h b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq60xx/nss_ipq60xx.h new file mode 100644 index 000000000..26dc76726 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq60xx/nss_ipq60xx.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __NSS_DP_ARCH_H__ +#define __NSS_DP_ARCH_H__ + +#define NSS_DP_HAL_MAX_PORTS 5 +#define NSS_DP_HAL_CPU_NUM 4 +#define NSS_DP_HAL_START_IFNUM 1 +#define NSS_DP_HAL_MAX_MTU_SIZE 9216 +#define NSS_DP_HAL_MAX_PACKET_LEN 65535 +#define NSS_DP_PREHEADER_SIZE 32 + +/** + * nss_dp_hal_gmac_stats + * The per-GMAC statistics structure. + */ +struct nss_dp_hal_gmac_stats { +}; + +#endif /* __NSS_DP_ARCH_H__ */ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq807x/nss_ipq807x.c b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq807x/nss_ipq807x.c new file mode 100644 index 000000000..dab4276e4 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq807x/nss_ipq807x.c @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "nss_dp_hal.h" +#include "edma.h" + +/* + * nss_dp_hal_get_data_plane_ops() + * Return the data plane ops for edma data plane. + */ +struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void) +{ + return &nss_dp_edma_ops; +} + +/* + * nss_dp_hal_init() + * Initialize EDMA and set gmac ops. + */ +bool nss_dp_hal_init(void) +{ + nss_dp_hal_set_gmac_ops(&qcom_hal_ops, GMAC_HAL_TYPE_QCOM); + nss_dp_hal_set_gmac_ops(&syn_hal_ops, GMAC_HAL_TYPE_SYN_XGMAC); + + if (edma_init()) { + return false; + } + return true; +} + +/* + * nss_dp_hal_cleanup() + * Cleanup EDMA and set gmac ops to NULL. + */ +void nss_dp_hal_cleanup(void) +{ + nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_QCOM); + nss_dp_hal_set_gmac_ops(NULL, GMAC_HAL_TYPE_SYN_XGMAC); + edma_cleanup(false); +} diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq807x/nss_ipq807x.h b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq807x/nss_ipq807x.h new file mode 100644 index 000000000..6926e562e --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/arch/ipq807x/nss_ipq807x.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __NSS_DP_ARCH_H__ +#define __NSS_DP_ARCH_H__ + +#define NSS_DP_HAL_MAX_PORTS 6 +#define NSS_DP_HAL_CPU_NUM 4 +#define NSS_DP_HAL_START_IFNUM 1 +#define NSS_DP_HAL_MAX_MTU_SIZE 9216 +#define NSS_DP_HAL_MAX_PACKET_LEN 65535 +#define NSS_DP_PREHEADER_SIZE 32 + +/** + * nss_dp_hal_gmac_stats + * The per-GMAC statistics structure. + */ +struct nss_dp_hal_gmac_stats { +}; + +#endif /* __NSS_DP_ARCH_H__ */ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_cfg.c b/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_cfg.c new file mode 100644 index 000000000..d2563c860 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_cfg.c @@ -0,0 +1,967 @@ +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER + * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE + * USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include +#include +#include +#include + +#include "nss_dp_dev.h" +#include "edma_regs.h" +#include "edma_data_plane.h" + +#define EDMA_HW_RESET_ID "edma_rst" + +/* + * edma_cleanup_rxfill_ring_res() + * Cleanup resources for one RxFill ring + */ +static void edma_cleanup_rxfill_ring_res(struct edma_hw *ehw, + struct edma_rxfill_ring *rxfill_ring) +{ + struct platform_device *pdev = ehw->pdev; + struct sk_buff *skb; + uint16_t cons_idx, curr_idx; + struct edma_rxfill_desc *rxfill_desc; + uint32_t reg_data = 0; + struct edma_rx_preheader *rxph = NULL; + int store_idx; + + /* + * Read RXFILL ring producer index + */ + reg_data = edma_reg_read(EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->id)); + curr_idx = reg_data & EDMA_RXFILL_PROD_IDX_MASK; + + /* + * Read RXFILL ring consumer index + */ + reg_data = edma_reg_read(EDMA_REG_RXFILL_CONS_IDX(rxfill_ring->id)); + cons_idx = reg_data & EDMA_RXFILL_CONS_IDX_MASK; + + while (curr_idx != cons_idx) { + /* + * Get RXFILL descriptor + */ + rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, cons_idx); + + /* + * Get Rx preheader + */ + rxph = (struct edma_rx_preheader *) + phys_to_virt(rxfill_desc->buffer_addr); + + dma_unmap_single(&pdev->dev, rxfill_desc->buffer_addr, + EDMA_RX_BUFF_SIZE, DMA_FROM_DEVICE); + + /* + * Get sk_buff and free it + */ + store_idx = rxph->opaque; + skb = ehw->rx_skb_store[store_idx]; + ehw->rx_skb_store[store_idx] = NULL; + dev_kfree_skb_any(skb); + cons_idx++; + if (cons_idx == rxfill_ring->count) + cons_idx = 0; + } + + /* + * Free RXFILL ring descriptors + */ + dma_free_coherent(&pdev->dev, + (sizeof(struct edma_rxfill_desc) + * rxfill_ring->count), + rxfill_ring->desc, rxfill_ring->dma); +} + +/* + * edma_setup_rxfill_ring_res() + * Setup resources for one RxFill ring + */ +static int edma_setup_rxfill_ring_res(struct edma_hw *ehw, + struct edma_rxfill_ring *rxfill_ring) +{ + struct platform_device *pdev = ehw->pdev; + + /* + * Allocate RxFill ring descriptors + */ + rxfill_ring->desc = dma_alloc_coherent(&pdev->dev, + (sizeof(struct edma_rxfill_desc) + * rxfill_ring->count), + &rxfill_ring->dma, GFP_KERNEL); + if (!rxfill_ring->desc) { + pr_warn("Descriptor alloc for RXFILL ring %u failed\n", + rxfill_ring->id); + return -ENOMEM; + } + + spin_lock_init(&rxfill_ring->lock); + return 0; +} + +/* + * edma_setup_rxdesc_ring_res() + * Setup resources for one RxDesc ring + */ +static int edma_setup_rxdesc_ring_res(struct edma_hw *ehw, + struct edma_rxdesc_ring *rxdesc_ring) +{ + struct platform_device *pdev = ehw->pdev; + + /* + * Allocate RxDesc ring descriptors + */ + rxdesc_ring->desc = dma_alloc_coherent(&pdev->dev, + (sizeof(struct edma_rxdesc_desc) + * rxdesc_ring->count), + &rxdesc_ring->dma, GFP_KERNEL); + if (!rxdesc_ring->desc) { + pr_warn("Descriptor alloc for RXDESC ring %u failed\n", + rxdesc_ring->id); + return -ENOMEM; + } + + return 0; +} + +/* + * edma_cleanup_rxdesc_ring_res() + * Cleanup resources for RxDesc ring + */ +static void edma_cleanup_rxdesc_ring_res(struct edma_hw *ehw, + struct edma_rxdesc_ring *rxdesc_ring) +{ + struct platform_device *pdev = ehw->pdev; + struct sk_buff *skb; + struct edma_rxdesc_desc *rxdesc_desc; + struct edma_rx_preheader *rxph = NULL; + uint16_t prod_idx = 0; + uint16_t cons_idx = 0; + int store_idx; + + cons_idx = edma_reg_read(EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->id)) + & EDMA_RXDESC_CONS_IDX_MASK; + + prod_idx = edma_reg_read(EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->id)) + & EDMA_RXDESC_PROD_IDX_MASK; + + /* + * Free any buffers assigned to any descriptors + */ + while (cons_idx != prod_idx) { + rxdesc_desc = EDMA_RXDESC_DESC(rxdesc_ring, cons_idx); + + rxph = (struct edma_rx_preheader *) + phys_to_virt(rxdesc_desc->buffer_addr); + + dma_unmap_single(&pdev->dev, rxdesc_desc->buffer_addr, + EDMA_RX_BUFF_SIZE, DMA_FROM_DEVICE); + store_idx = rxph->opaque; + skb = ehw->rx_skb_store[store_idx]; + ehw->rx_skb_store[store_idx] = NULL; + dev_kfree_skb_any(skb); + + /* + * Update consumer index + */ + if (++cons_idx == rxdesc_ring->count) + cons_idx = 0; + } + + /* + * Free RXDESC ring descriptors + */ + dma_free_coherent(&pdev->dev, + (sizeof(struct edma_rxdesc_desc) + * rxdesc_ring->count), + rxdesc_ring->desc, rxdesc_ring->dma); +} + +/* + * edma_cleanup_txcmpl_ring_res() + * Cleanup resources for one TxCmpl ring + */ +static void edma_cleanup_txcmpl_ring_res(struct edma_hw *ehw, + struct edma_txcmpl_ring *txcmpl_ring) +{ + struct platform_device *pdev = ehw->pdev; + + /* + * Free any buffers assigned to any descriptors + */ + edma_clean_tx(ehw, txcmpl_ring); + + /* + * Free TxCmpl ring descriptors + */ + dma_free_coherent(&pdev->dev, + (sizeof(struct edma_txcmpl_desc) + * txcmpl_ring->count), + txcmpl_ring->desc, txcmpl_ring->dma); +} + +/* + * edma_setup_txcmpl_ring_res() + * Setup resources for one TxCmpl ring + */ +static int edma_setup_txcmpl_ring_res(struct edma_hw *ehw, + struct edma_txcmpl_ring *txcmpl_ring) +{ + struct platform_device *pdev = ehw->pdev; + + /* + * Allocate TxCmpl ring descriptors + */ + txcmpl_ring->desc = dma_alloc_coherent(&pdev->dev, + (sizeof(struct edma_txcmpl_desc) + * txcmpl_ring->count), + &txcmpl_ring->dma, GFP_KERNEL); + + if (!txcmpl_ring->desc) { + pr_warn("Descriptor alloc for TXCMPL ring %u failed\n", + txcmpl_ring->id); + + return -ENOMEM; + } + + return 0; +} + +/* + * edma_cleanup_txdesc_ring_res() + * Cleanup resources for one TxDesc ring + */ +static void edma_cleanup_txdesc_ring_res(struct edma_hw *ehw, + struct edma_txdesc_ring *txdesc_ring) +{ + struct platform_device *pdev = ehw->pdev; + struct sk_buff *skb = NULL; + struct edma_txdesc_desc *txdesc = NULL; + uint16_t prod_idx, cons_idx; + size_t buf_len; + uint32_t data; + int store_idx; + + /* + * Free any buffers assigned to any descriptors + */ + data = edma_reg_read(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id)); + prod_idx = data & EDMA_TXDESC_PROD_IDX_MASK; + + data = edma_reg_read(EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id)); + cons_idx = data & EDMA_TXDESC_CONS_IDX_MASK; + + while (cons_idx != prod_idx) { + txdesc = EDMA_TXDESC_DESC(txdesc_ring, cons_idx); + store_idx = txdesc->buffer_addr; + skb = ehw->tx_skb_store[store_idx]; + ehw->tx_skb_store[store_idx] = NULL; + + buf_len = (txdesc->word1 & EDMA_TXDESC_DATA_LENGTH_MASK) >> + EDMA_TXDESC_DATA_LENGTH_SHIFT; + + dma_unmap_single(&pdev->dev, (dma_addr_t)skb->data, + buf_len + EDMA_TX_PREHDR_SIZE, DMA_TO_DEVICE); + + dev_kfree_skb_any(skb); + cons_idx = (cons_idx + 1) & (txdesc_ring->count - 1); + cons_idx++; + if (cons_idx == txdesc_ring->count) + cons_idx = 0; + + } + + /* + * Free Tx ring descriptors + */ + dma_free_coherent(&pdev->dev, + (sizeof(struct edma_txdesc_desc) + * txdesc_ring->count), + txdesc_ring->desc, txdesc_ring->dma); + +} + +/* + * edma_setup_txdesc_ring_res() + * Setup resources for one TxDesc ring + */ +static int edma_setup_txdesc_ring_res(struct edma_hw *ehw, + struct edma_txdesc_ring *txdesc_ring) +{ + struct platform_device *pdev = ehw->pdev; + + /* + * Allocate Tx ring descriptors + */ + txdesc_ring->desc = dma_alloc_coherent(&pdev->dev, + (sizeof(struct edma_txdesc_desc) + * txdesc_ring->count), + &txdesc_ring->dma, GFP_KERNEL); + if (!txdesc_ring->desc) { + pr_warn("Descriptor alloc for TXDESC ring %u failed\n", + txdesc_ring->id); + return -ENOMEM; + } + + spin_lock_init(&txdesc_ring->tx_lock); + + return 0; +} + +/* + * edma_setup_ring_resources() + * Allocate/setup resources for EDMA rings + */ +static int edma_setup_ring_resources(struct edma_hw *ehw) +{ + struct edma_txcmpl_ring *txcmpl_ring = NULL; + struct edma_txdesc_ring *txdesc_ring = NULL; + struct edma_rxfill_ring *rxfill_ring = NULL; + struct edma_rxdesc_ring *rxdesc_ring = NULL; + int i; + int ret; + int index; + + /* + * Allocate TxDesc ring descriptors + */ + for (i = 0; i < ehw->txdesc_rings; i++) { + txdesc_ring = &ehw->txdesc_ring[i]; + txdesc_ring->count = EDMA_RING_SIZE; + txdesc_ring->id = ehw->txdesc_ring_start + i; + + ret = edma_setup_txdesc_ring_res(ehw, txdesc_ring); + if (ret != 0) { + while (i-- >= 0) + edma_cleanup_txdesc_ring_res(ehw, + &ehw->txdesc_ring[i]); + + return -ENOMEM; + + } + } + + /* + * Allocate TxCmpl ring descriptors + */ + for (i = 0; i < ehw->txcmpl_rings; i++) { + txcmpl_ring = &ehw->txcmpl_ring[i]; + txcmpl_ring->count = EDMA_RING_SIZE; + txcmpl_ring->id = ehw->txcmpl_ring_start + i; + + ret = edma_setup_txcmpl_ring_res(ehw, txcmpl_ring); + + if (ret != 0) { + while (i-- >= 0) + edma_cleanup_txcmpl_ring_res(ehw, + &ehw->txcmpl_ring[i]); + + goto txcmpl_mem_alloc_fail; + } + } + + /* + * Allocate Rx fill ring descriptors + */ + for (i = 0; i < ehw->rxfill_rings; i++) { + rxfill_ring = &ehw->rxfill_ring[i]; + rxfill_ring->count = EDMA_RING_SIZE; + rxfill_ring->id = ehw->rxfill_ring_start + i; + + ret = edma_setup_rxfill_ring_res(ehw, rxfill_ring); + if (ret != 0) { + while (--i >= 0) + edma_cleanup_rxfill_ring_res(ehw, + &ehw->rxfill_ring[i]); + + goto rxfill_mem_alloc_fail; + } + } + + /* + * Allocate RxDesc ring descriptors + */ + for (i = 0; i < ehw->rxdesc_rings; i++) { + rxdesc_ring = &ehw->rxdesc_ring[i]; + rxdesc_ring->count = EDMA_RING_SIZE; + rxdesc_ring->id = ehw->rxdesc_ring_start + i; + + /* + * Create a mapping between RX Desc ring and Rx fill ring. + * Number of fill rings are lesser than the descriptor rings + * Share the fill rings across descriptor rings. + */ + + index = ehw->rxfill_ring_start + (i % ehw->rxfill_rings); + rxdesc_ring->rxfill = + &ehw->rxfill_ring[index - ehw->rxfill_ring_start]; + + ret = edma_setup_rxdesc_ring_res(ehw, rxdesc_ring); + if (ret != 0) { + while (--i >= 0) + edma_cleanup_rxdesc_ring_res(ehw, + &ehw->rxdesc_ring[i]); + + goto rxdesc_mem_alloc_fail; + } + } + + return 0; + +rxdesc_mem_alloc_fail: + for (i = 0; i < ehw->rxfill_rings; i++) + edma_cleanup_rxfill_ring_res(ehw, &ehw->rxfill_ring[i]); + +rxfill_mem_alloc_fail: + for (i = 0; i < ehw->txcmpl_rings; i++) + edma_cleanup_txcmpl_ring_res(ehw, &ehw->txcmpl_ring[i]); + +txcmpl_mem_alloc_fail: + for (i = 0; i < ehw->txdesc_rings; i++) + edma_cleanup_txdesc_ring_res(ehw, &ehw->txdesc_ring[i]); + + return -ENOMEM; +} + +/* + * edma_free_rings() + * Free EDMA software rings + */ +static void edma_free_rings(struct edma_hw *ehw) +{ + kfree(ehw->rxfill_ring); + kfree(ehw->rxdesc_ring); + kfree(ehw->txdesc_ring); + kfree(ehw->txcmpl_ring); +} + +/* + * edma_alloc_rings() + * Allocate EDMA software rings + */ +static int edma_alloc_rings(struct edma_hw *ehw) +{ + ehw->rxfill_ring = kzalloc((sizeof(struct edma_rxfill_ring) * + ehw->rxfill_rings), GFP_KERNEL); + if (!ehw->rxfill_ring) + return -ENOMEM; + + ehw->rxdesc_ring = kzalloc((sizeof(struct edma_rxdesc_ring) * + ehw->rxdesc_rings), GFP_KERNEL); + if (!ehw->rxdesc_ring) + goto rxdesc_ring_alloc_fail; + + ehw->txdesc_ring = kzalloc((sizeof(struct edma_txdesc_ring) * + ehw->txdesc_rings), GFP_KERNEL); + if (!ehw->txdesc_ring) + goto txdesc_ring_alloc_fail; + + ehw->txcmpl_ring = kzalloc((sizeof(struct edma_txcmpl_ring) * + ehw->txcmpl_rings), GFP_KERNEL); + if (!ehw->txcmpl_ring) + goto txcmpl_ring_alloc_fail; + + pr_info("Num rings - TxDesc:%u (%u-%u) TxCmpl:%u (%u-%u)\n", + ehw->txdesc_rings, ehw->txdesc_ring_start, + (ehw->txdesc_ring_start + ehw->txdesc_rings - 1), + ehw->txcmpl_rings, ehw->txcmpl_ring_start, + (ehw->txcmpl_ring_start + ehw->txcmpl_rings - 1)); + + pr_info("RxDesc:%u (%u-%u) RxFill:%u (%u-%u)\n", + ehw->rxdesc_rings, ehw->rxdesc_ring_start, + (ehw->rxdesc_ring_start + ehw->rxdesc_rings - 1), + ehw->rxfill_rings, ehw->rxfill_ring_start, + (ehw->rxfill_ring_start + ehw->rxfill_rings - 1)); + + return 0; +txcmpl_ring_alloc_fail: + kfree(ehw->txdesc_ring); +txdesc_ring_alloc_fail: + kfree(ehw->rxdesc_ring); +rxdesc_ring_alloc_fail: + kfree(ehw->rxfill_ring); + return -ENOMEM; +} + +/* + * edma_cleanup_rings() + * Cleanup EDMA rings + */ +void edma_cleanup_rings(struct edma_hw *ehw) +{ + int i; + + /* + * Free any buffers assigned to any descriptors + */ + for (i = 0; i < ehw->txdesc_rings; i++) + edma_cleanup_txdesc_ring_res(ehw, &ehw->txdesc_ring[i]); + + /* + * Free Tx completion descriptors + */ + for (i = 0; i < ehw->txcmpl_rings; i++) + edma_cleanup_txcmpl_ring_res(ehw, &ehw->txcmpl_ring[i]); + + /* + * Free Rx fill ring descriptors + */ + for (i = 0; i < ehw->rxfill_rings; i++) + edma_cleanup_rxfill_ring_res(ehw, &ehw->rxfill_ring[i]); + + /* + * Free Rx completion ring descriptors + */ + for (i = 0; i < ehw->rxdesc_rings; i++) + edma_cleanup_rxdesc_ring_res(ehw, &ehw->rxdesc_ring[i]); + + edma_free_rings(ehw); +} + +/* + * edma_init_rings() + * Initialize EDMA rings + */ +static int edma_init_rings(struct edma_hw *ehw) +{ + int ret = 0; + + ret = edma_alloc_rings(ehw); + if (ret) + return ret; + + ret = edma_setup_ring_resources(ehw); + if (ret) + return ret; + + return 0; +} + +/* + * edma_configure_txdesc_ring() + * Configure one TxDesc ring + */ +static void edma_configure_txdesc_ring(struct edma_hw *ehw, + struct edma_txdesc_ring *txdesc_ring) +{ + uint32_t data = 0; + uint16_t hw_cons_idx = 0; + + /* + * Configure TXDESC ring + */ + edma_reg_write(EDMA_REG_TXDESC_BA(txdesc_ring->id), + (uint32_t)(txdesc_ring->dma & + EDMA_RING_DMA_MASK)); + + edma_reg_write(EDMA_REG_TXDESC_RING_SIZE(txdesc_ring->id), + (uint32_t)(txdesc_ring->count & + EDMA_TXDESC_RING_SIZE_MASK)); + + data = edma_reg_read(EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id)); + data &= ~(EDMA_TXDESC_CONS_IDX_MASK); + hw_cons_idx = data; + + data = edma_reg_read(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id)); + data &= ~(EDMA_TXDESC_PROD_IDX_MASK); + data |= hw_cons_idx & EDMA_TXDESC_PROD_IDX_MASK; + edma_reg_write(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id), data); +} + +/* + * edma_configure_txcmpl_ring() + * Configure one TxCmpl ring + */ +static void edma_configure_txcmpl_ring(struct edma_hw *ehw, + struct edma_txcmpl_ring *txcmpl_ring) +{ + uint32_t tx_mod_timer; + + /* + * Configure TxCmpl ring base address + */ + edma_reg_write(EDMA_REG_TXCMPL_BA(txcmpl_ring->id), + (uint32_t)(txcmpl_ring->dma & EDMA_RING_DMA_MASK)); + edma_reg_write(EDMA_REG_TXCMPL_RING_SIZE(txcmpl_ring->id), + (uint32_t)(txcmpl_ring->count + & EDMA_TXDESC_RING_SIZE_MASK)); + + /* + * Set TxCmpl ret mode to opaque + */ + edma_reg_write(EDMA_REG_TXCMPL_CTRL(txcmpl_ring->id), + EDMA_TXCMPL_RETMODE_OPAQUE); + + tx_mod_timer = (EDMA_TX_MOD_TIMER & EDMA_TX_MOD_TIMER_INIT_MASK) + << EDMA_TX_MOD_TIMER_INIT_SHIFT; + edma_reg_write(EDMA_REG_TX_MOD_TIMER(txcmpl_ring->id), + tx_mod_timer); + + edma_reg_write(EDMA_REG_TX_INT_CTRL(txcmpl_ring->id), 0x2); +} + +/* + * edma_configure_rxdesc_ring() + * Configure one RxDesc ring + */ +static void edma_configure_rxdesc_ring(struct edma_hw *ehw, + struct edma_rxdesc_ring *rxdesc_ring) +{ + uint32_t data; + + edma_reg_write(EDMA_REG_RXDESC_BA(rxdesc_ring->id), + (uint32_t)(rxdesc_ring->dma & 0xffffffff)); + + data = rxdesc_ring->count & EDMA_RXDESC_RING_SIZE_MASK; + data |= (ehw->rx_payload_offset & EDMA_RXDESC_PL_OFFSET_MASK) + << EDMA_RXDESC_PL_OFFSET_SHIFT; + edma_reg_write(EDMA_REG_RXDESC_RING_SIZE(rxdesc_ring->id), data); + + data = (EDMA_RX_MOD_TIMER_INIT & EDMA_RX_MOD_TIMER_INIT_MASK) + << EDMA_RX_MOD_TIMER_INIT_SHIFT; + edma_reg_write(EDMA_REG_RX_MOD_TIMER(rxdesc_ring->id), data); + + /* + * Enable ring. Set ret mode to 'opaque'. + */ + edma_reg_write(EDMA_REG_RX_INT_CTRL(rxdesc_ring->id), 0x2); +} + +/* + * edma_configure_rxfill_ring() + * Configure one RxFill ring + */ +static void edma_configure_rxfill_ring(struct edma_hw *ehw, + struct edma_rxfill_ring *rxfill_ring) +{ + uint32_t data = 0; + + edma_reg_write(EDMA_REG_RXFILL_BA(rxfill_ring->id), + (uint32_t)(rxfill_ring->dma & EDMA_RING_DMA_MASK)); + + data = rxfill_ring->count & EDMA_RXFILL_RING_SIZE_MASK; + edma_reg_write(EDMA_REG_RXFILL_RING_SIZE(rxfill_ring->id), data); + + /* + * Alloc Rx buffers + */ + edma_alloc_rx_buffer(ehw, rxfill_ring); +} + +/* + * edma_configure_rings() + * Configure EDMA rings + */ +static void edma_configure_rings(struct edma_hw *ehw) +{ + int i = 0; + + /* + * Initialize the store + */ + for (i = 0; i < EDMA_RING_SIZE; i++) { + ehw->tx_skb_store[i] = NULL; + ehw->rx_skb_store[i] = NULL; + } + + /* + * Configure TXDESC ring + */ + for (i = 0; i < ehw->txdesc_rings; i++) + edma_configure_txdesc_ring(ehw, &ehw->txdesc_ring[i]); + + /* + * Configure TXCMPL ring + */ + for (i = 0; i < ehw->txcmpl_rings; i++) + edma_configure_txcmpl_ring(ehw, &ehw->txcmpl_ring[i]); + + /* + * Configure RXFILL rings + */ + for (i = 0; i < ehw->rxfill_rings; i++) + edma_configure_rxfill_ring(ehw, &ehw->rxfill_ring[i]); + + /* + * Configure RXDESC ring + */ + for (i = 0; i < ehw->rxdesc_rings; i++) + edma_configure_rxdesc_ring(ehw, &ehw->rxdesc_ring[i]); +} + +/* + * edma_hw_reset() + * Reset EDMA Hardware during initialization + */ +int edma_hw_reset(struct edma_hw *ehw) +{ + struct reset_control *rst; + struct platform_device *pdev = ehw->pdev; + + rst = devm_reset_control_get(&pdev->dev, EDMA_HW_RESET_ID); + if (IS_ERR(rst)) { + pr_warn("DTS Node: %s does not exist\n", EDMA_HW_RESET_ID); + return -EINVAL; + } + + reset_control_assert(rst); + udelay(100); + + reset_control_deassert(rst); + udelay(100); + + pr_info("EDMA HW Reset completed succesfully\n"); + + return 0; +} + +/* + * edma_hw_init() + * EDMA hw init + */ +int edma_hw_init(struct edma_hw *ehw) +{ + int ret = 0; + int desc_index; + uint32_t i, data, reg = 0; + struct edma_rxdesc_ring *rxdesc_ring = NULL; + + data = edma_reg_read(EDMA_REG_MAS_CTRL); + pr_info("EDMA ver %d hw init\n", data); + + /* + * Setup private data structure + */ + ehw->misc_intr_mask = 0x0; + ehw->rxfill_intr_mask = EDMA_RXFILL_INT_MASK; + ehw->rxdesc_intr_mask = EDMA_RXDESC_INT_MASK_PKT_INT; + ehw->txcmpl_intr_mask = EDMA_TX_INT_MASK_PKT_INT | + EDMA_TX_INT_MASK_UGT_INT; + ehw->rx_payload_offset = EDMA_RX_PREHDR_SIZE; + ehw->active = 0; + ehw->edma_initialized = false; + + /* Reset EDMA */ + ret = edma_hw_reset(ehw); + if (ret) + return ret; + + /* + * Disable interrupts + */ + for (i = 0; i < EDMA_MAX_TXCMPL_RINGS; i++) + edma_reg_write(EDMA_REG_TX_INT_MASK(i), 0); + + for (i = 0; i < EDMA_MAX_RXFILL_RINGS; i++) + edma_reg_write(EDMA_REG_RXFILL_INT_MASK(i), 0); + + for (i = 0; i < EDMA_MAX_RXDESC_RINGS; i++) + edma_reg_write(EDMA_REG_RX_INT_CTRL(i), 0); + + /* + * Disable Rx rings + */ + for (i = 0; i < EDMA_MAX_RXDESC_RINGS; i++) { + data = edma_reg_read(EDMA_REG_RXDESC_CTRL(i)); + data &= ~EDMA_RXDESC_RX_EN; + edma_reg_write(EDMA_REG_RXDESC_CTRL(i), data); + } + + /* + * Disable RxFill Rings + */ + for (i = 0; i < EDMA_MAX_RXFILL_RINGS; i++) { + data = edma_reg_read(EDMA_REG_RXFILL_RING_EN(i)); + data &= ~EDMA_RXFILL_RING_EN; + edma_reg_write(EDMA_REG_RXFILL_RING_EN(i), data); + } + + /* + * Disable Tx rings + */ + for (desc_index = 0; desc_index < EDMA_MAX_TXDESC_RINGS; desc_index++) { + data = edma_reg_read(EDMA_REG_TXDESC_CTRL(desc_index)); + data &= ~EDMA_TXDESC_TX_EN; + edma_reg_write(EDMA_REG_TXDESC_CTRL(desc_index), data); + } + +#if defined(NSS_DP_IPQ807X) + /* + * Clear the TXDESC2CMPL_MAP_xx reg before setting up + * the mapping. This register holds TXDESC to TXFILL ring + * mapping. + */ + edma_reg_write(EDMA_REG_TXDESC2CMPL_MAP_0, 0); + edma_reg_write(EDMA_REG_TXDESC2CMPL_MAP_1, 0); + edma_reg_write(EDMA_REG_TXDESC2CMPL_MAP_2, 0); + desc_index = ehw->txcmpl_ring_start; + + /* + * 3 registers to hold the completion mapping for total 24 + * TX desc rings (0-9,10-19 and rest). In each entry 3 bits hold + * the mapping for a particular TX desc ring. + */ + for (i = ehw->txdesc_ring_start; + i < ehw->txdesc_ring_end; i++) { + if (i >= 0 && i <= 9) + reg = EDMA_REG_TXDESC2CMPL_MAP_0; + else if (i >= 10 && i <= 19) + reg = EDMA_REG_TXDESC2CMPL_MAP_1; + else + reg = EDMA_REG_TXDESC2CMPL_MAP_2; + + pr_debug("Configure TXDESC:%u to use TXCMPL:%u\n", + i, desc_index); + + data = edma_reg_read(reg); + data |= (desc_index & 0x7) << ((i % 10) * 3); + edma_reg_write(reg, data); + + desc_index++; + if (desc_index == ehw->txcmpl_ring_end) + desc_index = ehw->txcmpl_ring_start; + } +#endif + + /* + * Set PPE QID to EDMA Rx ring mapping. + * When coming up use only queue 0. + * HOST EDMA rings. FW EDMA comes up and overwrites as required. + * Each entry can hold mapping for 8 PPE queues and entry size is + * 4 bytes + */ + desc_index = ehw->rxdesc_ring_start; + data = 0; + data |= (desc_index & 0xF); + edma_reg_write(EDMA_QID2RID_TABLE_MEM(0), data); + pr_debug("Configure QID2RID reg:0x%x to 0x%x\n", reg, data); + + ret = edma_init_rings(ehw); + if (ret) + return ret; + + edma_configure_rings(ehw); + + /* + * Set RXDESC2FILL_MAP_xx reg. + * There are two registers RXDESC2FILL_0 and RXDESC2FILL_1 + * 3 bits holds the rx fill ring mapping for each of the + * rx descriptor ring. + */ + edma_reg_write(EDMA_REG_RXDESC2FILL_MAP_0, 0); + edma_reg_write(EDMA_REG_RXDESC2FILL_MAP_1, 0); + for (i = ehw->rxdesc_ring_start; + i < ehw->rxdesc_ring_end; i++) { + if ((i >= 0) && (i <= 9)) + reg = EDMA_REG_RXDESC2FILL_MAP_0; + else + reg = EDMA_REG_RXDESC2FILL_MAP_1; + + rxdesc_ring = &ehw->rxdesc_ring[i - ehw->rxdesc_ring_start]; + + pr_debug("Configure RXDESC:%u to use RXFILL:%u\n", + rxdesc_ring->id, rxdesc_ring->rxfill->id); + + data = edma_reg_read(reg); + data |= (rxdesc_ring->rxfill->id & 0x7) << ((i % 10) * 3); + edma_reg_write(reg, data); + } + + reg = EDMA_REG_RXDESC2FILL_MAP_0; + pr_debug("EDMA_REG_RXDESC2FILL_MAP_0: 0x%x\n", edma_reg_read(reg)); + reg = EDMA_REG_RXDESC2FILL_MAP_1; + pr_debug("EDMA_REG_RXDESC2FILL_MAP_1: 0x%x\n", edma_reg_read(reg)); + +#if defined(NSS_DP_IPQ807X) + reg = EDMA_REG_TXDESC2CMPL_MAP_0; + pr_debug("EDMA_REG_TXDESC2CMPL_MAP_0: 0x%x\n", edma_reg_read(reg)); + reg = EDMA_REG_TXDESC2CMPL_MAP_1; + pr_debug("EDMA_REG_TXDESC2CMPL_MAP_1: 0x%x\n", edma_reg_read(reg)); + reg = EDMA_REG_TXDESC2CMPL_MAP_2; + pr_debug("EDMA_REG_TXDESC2CMPL_MAP_2: 0x%x\n", edma_reg_read(reg)); +#endif + + /* + * Configure DMA request priority, DMA read burst length, + * and AXI write size. + */ + data = EDMA_DMAR_BURST_LEN_SET(EDMA_BURST_LEN_ENABLE) + | EDMA_DMAR_REQ_PRI_SET(0) + | EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SET(31) + | EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SET(7) + | EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SET(7); + edma_reg_write(EDMA_REG_DMAR_CTRL, data); +#if defined(NSS_DP_IPQ60XX) + data = edma_reg_read(EDMA_REG_AXIW_CTRL); + data |= EDMA_AXIW_MAX_WR_SIZE_EN; + edma_reg_write(EDMA_REG_AXIW_CTRL, data); +#endif + + /* + * Misc error mask + */ + data = EDMA_MISC_AXI_RD_ERR_MASK_EN | + EDMA_MISC_AXI_WR_ERR_MASK_EN | + EDMA_MISC_RX_DESC_FIFO_FULL_MASK_EN | + EDMA_MISC_RX_ERR_BUF_SIZE_MASK_EN | + EDMA_MISC_TX_SRAM_FULL_MASK_EN | + EDMA_MISC_TX_CMPL_BUF_FULL_MASK_EN | + EDMA_MISC_DATA_LEN_ERR_MASK_EN; +#if defined(NSS_DP_IPQ807X) + data |= EDMA_MISC_PKT_LEN_LA_64K_MASK_EN | + EDMA_MISC_PKT_LEN_LE_40_MASK_EN; +#else + data |= EDMA_MISC_TX_TIMEOUT_MASK_EN; +#endif + edma_reg_write(EDMA_REG_MISC_INT_MASK, data); + + /* + * Global EDMA enable and padding enable + */ + data = EDMA_PORT_PAD_EN | EDMA_PORT_EDMA_EN; + edma_reg_write(EDMA_REG_PORT_CTRL, data); + + /* + * Enable Rx rings + */ + for (i = ehw->rxdesc_ring_start; i < ehw->rxdesc_ring_end; i++) { + data = edma_reg_read(EDMA_REG_RXDESC_CTRL(i)); + data |= EDMA_RXDESC_RX_EN; + edma_reg_write(EDMA_REG_RXDESC_CTRL(i), data); + } + + for (i = ehw->rxfill_ring_start; i < ehw->rxfill_ring_end; i++) { + data = edma_reg_read(EDMA_REG_RXFILL_RING_EN(i)); + data |= EDMA_RXFILL_RING_EN; + edma_reg_write(EDMA_REG_RXFILL_RING_EN(i), data); + } + + /* + * Enable Tx rings + */ + for (i = ehw->txdesc_ring_start; i < ehw->txdesc_ring_end; i++) { + data = edma_reg_read(EDMA_REG_TXDESC_CTRL(i)); + data |= EDMA_TXDESC_TX_EN; + edma_reg_write(EDMA_REG_TXDESC_CTRL(i), data); + } + + ehw->edma_initialized = true; + + return 0; +} diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_data_plane.c b/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_data_plane.c new file mode 100644 index 000000000..cbaee4fc4 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_data_plane.c @@ -0,0 +1,962 @@ +/* + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER + * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE + * USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include +#include +#include +#include +#include +#include + +#include "nss_dp_dev.h" +#include "edma_regs.h" +#include "edma_data_plane.h" + +/* + * EDMA hardware instance + */ +struct edma_hw edma_hw; + +/* + * edma_get_port_num_from_netdev() + * Get port number from net device + */ +static int edma_get_port_num_from_netdev(struct net_device *netdev) +{ + int i; + + for (i = 0; i < EDMA_MAX_GMACS; i++) { + /* In the port-id to netdev mapping table, port-id + * starts from 1 and table index starts from 0. + * So we return index + 1 for port-id + */ + if (edma_hw.netdev_arr[i] == netdev) + return i+1; + } + + return -1; +} + +/* + * edma_reg_read() + * Read EDMA register + */ +uint32_t edma_reg_read(uint32_t reg_off) +{ + return (uint32_t)readl(edma_hw.reg_base + reg_off); +} + +/* + * edma_reg_write() + * Write EDMA register + */ +void edma_reg_write(uint32_t reg_off, uint32_t val) +{ + writel(val, edma_hw.reg_base + reg_off); +} + +/* + * edma_disable_interrupts() + * Disable EDMA RX/TX interrupt masks. + */ +static void edma_disable_interrupts(void) +{ + struct edma_rxdesc_ring *rxdesc_ring = NULL; + struct edma_rxfill_ring *rxfill_ring = NULL; + struct edma_txcmpl_ring *txcmpl_ring = NULL; + int i; + + for (i = 0; i < edma_hw.rxdesc_rings; i++) { + rxdesc_ring = &edma_hw.rxdesc_ring[i]; + edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id), + EDMA_MASK_INT_CLEAR); + } + + for (i = 0; i < edma_hw.txcmpl_rings; i++) { + txcmpl_ring = &edma_hw.txcmpl_ring[i]; + edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id), + EDMA_MASK_INT_CLEAR); + } + + for (i = 0; i < edma_hw.rxfill_rings; i++) { + rxfill_ring = &edma_hw.rxfill_ring[i]; + edma_reg_write(EDMA_REG_RXFILL_INT_MASK(rxfill_ring->id), + EDMA_MASK_INT_CLEAR); + } + + /* + * Clear MISC interrupt mask. + */ + edma_reg_write(EDMA_REG_MISC_INT_MASK, EDMA_MASK_INT_CLEAR); +} + +/* + * edma_enable_interrupts() + * Enable RX/TX EDMA interrupt masks. + */ +static void edma_enable_interrupts(void) +{ + struct edma_rxdesc_ring *rxdesc_ring = NULL; + struct edma_rxfill_ring *rxfill_ring = NULL; + struct edma_txcmpl_ring *txcmpl_ring = NULL; + int i; + + for (i = 0; i < edma_hw.rxfill_rings; i++) { + rxfill_ring = &edma_hw.rxfill_ring[i]; + edma_reg_write(EDMA_REG_RXFILL_INT_MASK(rxfill_ring->id), + edma_hw.rxfill_intr_mask); + } + + for (i = 0; i < edma_hw.txcmpl_rings; i++) { + txcmpl_ring = &edma_hw.txcmpl_ring[i]; + edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id), + edma_hw.txcmpl_intr_mask); + } + + for (i = 0; i < edma_hw.rxdesc_rings; i++) { + rxdesc_ring = &edma_hw.rxdesc_ring[i]; + edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id), + edma_hw.rxdesc_intr_mask); + } + + /* + * Enable MISC interrupt mask. + */ + edma_reg_write(EDMA_REG_MISC_INT_MASK, edma_hw.misc_intr_mask); +} + +/* + * nss_dp_edma_if_open() + * Do slow path data plane open + */ +static int edma_if_open(struct nss_dp_data_plane_ctx *dpc, + uint32_t tx_desc_ring, uint32_t rx_desc_ring, + uint32_t mode) +{ + if (!dpc->dev) + return NSS_DP_FAILURE; + + /* + * Enable NAPI + */ + if (edma_hw.active++ != 0) + return NSS_DP_SUCCESS; + + napi_enable(&edma_hw.napi); + + /* + * Enable the interrupt masks. + */ + edma_enable_interrupts(); + + return NSS_DP_SUCCESS; +} + +/* + * edma_if_close() + * Do slow path data plane close + */ +static int edma_if_close(struct nss_dp_data_plane_ctx *dpc) +{ + if (--edma_hw.active != 0) + return NSS_DP_SUCCESS; + + /* + * Disable the interrupt masks. + */ + edma_disable_interrupts(); + + /* + * Disable NAPI + */ + napi_disable(&edma_hw.napi); + return NSS_DP_SUCCESS; +} + +/* + * edma_if_link_state() + */ +static int edma_if_link_state(struct nss_dp_data_plane_ctx *dpc, + uint32_t link_state) +{ + return NSS_DP_SUCCESS; +} + +/* + * edma_if_mac_addr() + */ +static int edma_if_mac_addr(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr) +{ + return NSS_DP_SUCCESS; +} + +/* + * edma_if_change_mtu() + */ +static int edma_if_change_mtu(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu) +{ + return NSS_DP_SUCCESS; +} + +/* + * edma_if_xmit() + * Transmit a packet using EDMA + */ +static netdev_tx_t edma_if_xmit(struct nss_dp_data_plane_ctx *dpc, + struct sk_buff *skb) +{ + struct net_device *netdev = dpc->dev; + int ret; + uint32_t tx_ring, skbq, nhead, ntail; + bool expand_skb = false; + + if (skb->len < ETH_HLEN) { + netdev_dbg(netdev, "skb->len < ETH_HLEN\n"); + goto drop; + } + + /* + * Select a Tx ring + */ + skbq = skb_get_queue_mapping(skb); + tx_ring = 0; + if ((edma_hw.txdesc_rings > 1) && (skbq > 0)) + tx_ring = edma_hw.txdesc_rings % skbq; + + /* + * Check for non-linear skb + */ + if (skb_is_nonlinear(skb)) { + netdev_dbg(netdev, "cannot Tx non-linear skb:%px\n", skb); + goto drop; + } + + /* + * Check for headroom/tailroom and clone + */ + nhead = netdev->needed_headroom; + ntail = netdev->needed_tailroom; + + if (skb_cloned(skb) || + (skb_headroom(skb) < nhead) || + (skb_headroom(skb) < ntail)) { + expand_skb = true; + } + + /* + * Expand the skb. This also unclones a cloned skb. + */ + if (expand_skb && pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC)) { + netdev_dbg(netdev, "cannot expand skb:%px\n", skb); + goto drop; + } + + /* + * Transmit the packet + */ + ret = edma_ring_xmit(&edma_hw, netdev, skb, + &edma_hw.txdesc_ring[tx_ring]); + if (ret == EDMA_TX_OK) + return NETDEV_TX_OK; + + /* + * Not enough descriptors. Stop netdev Tx queue. + */ + if (ret == EDMA_TX_DESC) { + netif_stop_queue(netdev); + return NETDEV_TX_BUSY; + } + +drop: + dev_kfree_skb_any(skb); + netdev->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +/* + * edma_if_set_features() + * Set the supported net_device features + */ +static void edma_if_set_features(struct nss_dp_data_plane_ctx *dpc) +{ + /* + * TODO - add flags to support HIGHMEM/cksum offload VLAN + * the features are enabled. + */ +} + +/* TODO - check if this is needed */ +/* + * edma_if_pause_on_off() + * Set pause frames on or off + * + * No need to send a message if we defaulted to slow path. + */ +static int edma_if_pause_on_off(struct nss_dp_data_plane_ctx *dpc, + uint32_t pause_on) +{ + return NSS_DP_SUCCESS; +} + +/* + * edma_if_vsi_assign() + * assign vsi of the data plane + * + */ +static int edma_if_vsi_assign(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi) +{ + struct net_device *netdev = dpc->dev; + int32_t port_num; + + port_num = edma_get_port_num_from_netdev(netdev); + + if (port_num < 0) + return NSS_DP_FAILURE; + + if (fal_port_vsi_set(0, port_num, vsi) < 0) + return NSS_DP_FAILURE; + + return NSS_DP_SUCCESS; +} + +/* + * edma_if_vsi_unassign() + * unassign vsi of the data plane + * + */ +static int edma_if_vsi_unassign(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi) +{ + struct net_device *netdev = dpc->dev; + uint32_t port_num; + + port_num = edma_get_port_num_from_netdev(netdev); + + if (port_num < 0) + return NSS_DP_FAILURE; + + if (fal_port_vsi_set(0, port_num, 0xffff) < 0) + return NSS_DP_FAILURE; + + return NSS_DP_SUCCESS; +} + +#ifdef CONFIG_RFS_ACCEL +/* + * edma_if_rx_flow_steer() + * Flow steer of the data plane + * + * Initial receive flow steering function for data plane operation. + */ +static int edma_if_rx_flow_steer(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb, + uint32_t cpu, bool is_add) +{ + return NSS_DP_SUCCESS; +} +#endif + +/* + * edma_if_deinit() + * Free edma resources + */ +static int edma_if_deinit(struct nss_dp_data_plane_ctx *dpc) +{ + /* + * Free up resources used by EDMA if all the + * interfaces have been overridden + * */ + if (edma_hw.dp_override_cnt == EDMA_MAX_GMACS - 1) { + edma_cleanup(true); + } else { + edma_hw.dp_override_cnt++; + } + + return NSS_DP_SUCCESS; +} + +/* + * edma_irq_init() + * Initialize interrupt handlers for the driver + */ +static int edma_irq_init(void) +{ + int err; + uint32_t entry_num, i; + + /* + * Get TXCMPL rings IRQ numbers + */ + entry_num = 0; + for (i = 0; i < edma_hw.txcmpl_rings; i++, entry_num++) { + edma_hw.txcmpl_intr[i] = + platform_get_irq(edma_hw.pdev, entry_num); + if (edma_hw.txcmpl_intr[i] < 0) { + pr_warn("%s: txcmpl_intr[%u] irq get failed\n", + (edma_hw.device_node)->name, i); + return -1; + } + + pr_debug("%s: txcmpl_intr[%u] = %u\n", + (edma_hw.device_node)->name, + i, edma_hw.txcmpl_intr[i]); + } + + /* + * Get RXFILL rings IRQ numbers + */ + for (i = 0; i < edma_hw.rxfill_rings; i++, entry_num++) { + edma_hw.rxfill_intr[i] = + platform_get_irq(edma_hw.pdev, entry_num); + if (edma_hw.rxfill_intr[i] < 0) { + pr_warn("%s: rxfill_intr[%u] irq get failed\n", + (edma_hw.device_node)->name, i); + return -1; + } + + pr_debug("%s: rxfill_intr[%u] = %u\n", + (edma_hw.device_node)->name, + i, edma_hw.rxfill_intr[i]); + } + + /* + * Get RXDESC rings IRQ numbers + * + */ + for (i = 0; i < edma_hw.rxdesc_rings; i++, entry_num++) { + edma_hw.rxdesc_intr[i] = + platform_get_irq(edma_hw.pdev, entry_num); + if (edma_hw.rxdesc_intr[i] < 0) { + pr_warn("%s: rxdesc_intr[%u] irq get failed\n", + (edma_hw.device_node)->name, i); + return -1; + } + + pr_debug("%s: rxdesc_intr[%u] = %u\n", + (edma_hw.device_node)->name, + i, edma_hw.rxdesc_intr[i]); + } + + /* + * Get misc IRQ number + */ + edma_hw.misc_intr = platform_get_irq(edma_hw.pdev, entry_num); + pr_debug("%s: misc IRQ:%u\n", + (edma_hw.device_node)->name, + edma_hw.misc_intr); + + /* + * Request IRQ for TXCMPL rings + */ + for (i = 0; i < edma_hw.txcmpl_rings; i++) { + err = request_irq(edma_hw.txcmpl_intr[i], + edma_handle_irq, IRQF_SHARED, + "edma_txcmpl", (void *)edma_hw.pdev); + if (err) { + pr_debug("TXCMPL ring IRQ:%d request failed\n", + edma_hw.txcmpl_intr[i]); + return -1; + + } + } + + /* + * Request IRQ for RXFILL rings + */ + for (i = 0; i < edma_hw.rxfill_rings; i++) { + err = request_irq(edma_hw.rxfill_intr[i], + edma_handle_irq, IRQF_SHARED, + "edma_rxfill", (void *)edma_hw.pdev); + if (err) { + pr_debug("RXFILL ring IRQ:%d request failed\n", + edma_hw.rxfill_intr[i]); + goto rx_fill_ring_intr_req_fail; + } + } + + /* + * Request IRQ for RXDESC rings + */ + for (i = 0; i < edma_hw.rxdesc_rings; i++) { + err = request_irq(edma_hw.rxdesc_intr[i], + edma_handle_irq, IRQF_SHARED, + "edma_rxdesc", (void *)edma_hw.pdev); + if (err) { + pr_debug("RXDESC ring IRQ:%d request failed\n", + edma_hw.rxdesc_intr[i]); + goto rx_desc_ring_intr_req_fail; + } + } + + /* + * Request Misc IRQ + */ + err = request_irq(edma_hw.misc_intr, edma_handle_misc_irq, + IRQF_SHARED, "edma_misc", + (void *)edma_hw.pdev); + if (err) { + pr_debug("MISC IRQ:%d request failed\n", + edma_hw.misc_intr); + goto misc_intr_req_fail; + } + + return 0; + +misc_intr_req_fail: + + /* + * Free IRQ for RXDESC rings + */ + for (i = 0; i < edma_hw.rxdesc_rings; i++) { + synchronize_irq(edma_hw.rxdesc_intr[i]); + free_irq(edma_hw.rxdesc_intr[i], + (void *)&(edma_hw.pdev)->dev); + } + +rx_desc_ring_intr_req_fail: + + /* + * Free IRQ for RXFILL rings + */ + for (i = 0; i < edma_hw.rxfill_rings; i++) { + synchronize_irq(edma_hw.rxfill_intr[i]); + free_irq(edma_hw.rxfill_intr[i], + (void *)&(edma_hw.pdev)->dev); + } + +rx_fill_ring_intr_req_fail: + + /* + * Free IRQ for TXCMPL rings + */ + for (i = 0; i < edma_hw.txcmpl_rings; i++) { + + synchronize_irq(edma_hw.txcmpl_intr[i]); + free_irq(edma_hw.txcmpl_intr[i], + (void *)&(edma_hw.pdev)->dev); + } + + return -1; +} + +/* + * edma_register_netdevice() + * Register netdevice with EDMA + */ +static int edma_register_netdevice(struct net_device *netdev, uint32_t macid) +{ + if (!netdev) { + pr_info("nss_dp_edma: Invalid netdev pointer %px\n", netdev); + return -EINVAL; + } + + if ((macid < EDMA_START_GMACS) || (macid > EDMA_MAX_GMACS)) { + netdev_dbg(netdev, "nss_dp_edma: Invalid macid(%d) for %s\n", + macid, netdev->name); + return -EINVAL; + } + + netdev_info(netdev, "nss_dp_edma: Registering netdev %s(qcom-id:%d) with EDMA\n", + netdev->name, macid); + + /* + * We expect 'macid' to correspond to ports numbers on + * IPQ807x. These begin from '1' and hence we subtract + * one when using it as an array index. + */ + edma_hw.netdev_arr[macid - 1] = netdev; + + /* + * NAPI add + */ + if (!edma_hw.napi_added) { + netif_napi_add(netdev, &edma_hw.napi, edma_napi, + EDMA_NAPI_WORK); + /* + * Register the interrupt handlers and enable interrupts + */ + if (edma_irq_init() < 0) + return -EINVAL; + + edma_hw.napi_added = 1; + } + + return 0; +} + +/* + * edma_if_init() + */ + +static int edma_if_init(struct nss_dp_data_plane_ctx *dpc) +{ + + struct net_device *netdev = dpc->dev; + struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev); + int ret = 0; + + /* + * Register the netdev + */ + ret = edma_register_netdevice(netdev, dp_dev->macid); + if (ret) { + netdev_dbg(netdev, + "Error registering netdevice with EDMA %s\n", + netdev->name); + return NSS_DP_FAILURE; + } + + /* + * Headroom needed for Tx preheader + */ + netdev->needed_headroom += EDMA_TX_PREHDR_SIZE; + + return NSS_DP_SUCCESS; +} + +/* + * nss_dp_edma_ops + */ +struct nss_dp_data_plane_ops nss_dp_edma_ops = { + .init = edma_if_init, + .open = edma_if_open, + .close = edma_if_close, + .link_state = edma_if_link_state, + .mac_addr = edma_if_mac_addr, + .change_mtu = edma_if_change_mtu, + .xmit = edma_if_xmit, + .set_features = edma_if_set_features, + .pause_on_off = edma_if_pause_on_off, + .vsi_assign = edma_if_vsi_assign, + .vsi_unassign = edma_if_vsi_unassign, +#ifdef CONFIG_RFS_ACCEL + .rx_flow_steer = edma_if_rx_flow_steer, +#endif + .deinit = edma_if_deinit, +}; + +/* + * edma_of_get_pdata() + * Read the device tree details for EDMA + */ +static int edma_of_get_pdata(struct resource *edma_res) +{ + /* + * Find EDMA node in device tree + */ + edma_hw.device_node = of_find_node_by_name(NULL, + EDMA_DEVICE_NODE_NAME); + if (!edma_hw.device_node) { + pr_warn("EDMA device tree node (%s) not found\n", + EDMA_DEVICE_NODE_NAME); + return -EINVAL; + } + + /* + * Get EDMA device node + */ + edma_hw.pdev = of_find_device_by_node(edma_hw.device_node); + if (!edma_hw.pdev) { + pr_warn("Platform device for node %px(%s) not found\n", + edma_hw.device_node, + (edma_hw.device_node)->name); + return -EINVAL; + } + + /* + * Get EDMA register resource + */ + if (of_address_to_resource(edma_hw.device_node, 0, edma_res) != 0) { + pr_warn("Unable to get register address for edma device: " + EDMA_DEVICE_NODE_NAME"\n"); + return -EINVAL; + } + + /* + * Get id of first TXDESC ring + */ + if (of_property_read_u32(edma_hw.device_node, "qcom,txdesc-ring-start", + &edma_hw.txdesc_ring_start) != 0) { + pr_warn("Read error 1st TXDESC ring (txdesc_ring_start)\n"); + return -EINVAL; + } + + /* + * Get number of TXDESC rings + */ + if (of_property_read_u32(edma_hw.device_node, "qcom,txdesc-rings", + &edma_hw.txdesc_rings) != 0) { + pr_warn("Unable to read number of txdesc rings.\n"); + return -EINVAL; + } + edma_hw.txdesc_ring_end = edma_hw.txdesc_ring_start + + edma_hw.txdesc_rings; + + /* + * Get id of first TXCMPL ring + */ + if (of_property_read_u32(edma_hw.device_node, "qcom,txcmpl-ring-start", + &edma_hw.txcmpl_ring_start) != 0) { + pr_warn("Read error 1st TXCMPL ring (txcmpl_ring_start)\n"); + return -EINVAL; + } + + /* + * Get number of TXCMPL rings + */ + if (of_property_read_u32(edma_hw.device_node, "qcom,txcmpl-rings", + &edma_hw.txcmpl_rings) != 0) { + pr_warn("Unable to read number of txcmpl rings.\n"); + return -EINVAL; + } + edma_hw.txcmpl_ring_end = edma_hw.txcmpl_ring_start + + edma_hw.txcmpl_rings; + + /* + * Get id of first RXFILL ring + */ + if (of_property_read_u32(edma_hw.device_node, "qcom,rxfill-ring-start", + &edma_hw.rxfill_ring_start) != 0) { + pr_warn("Read error 1st RXFILL ring (rxfill-ring-start)\n"); + return -EINVAL; + } + + /* + * Get number of RXFILL rings + */ + if (of_property_read_u32(edma_hw.device_node, "qcom,rxfill-rings", + &edma_hw.rxfill_rings) != 0) { + pr_warn("Unable to read number of rxfill rings.\n"); + return -EINVAL; + } + edma_hw.rxfill_ring_end = edma_hw.rxfill_ring_start + + edma_hw.rxfill_rings; + + /* + * Get id of first RXDESC ring + */ + if (of_property_read_u32(edma_hw.device_node, "qcom,rxdesc-ring-start", + &edma_hw.rxdesc_ring_start) != 0) { + pr_warn("Read error 1st RXDESC ring (rxdesc-ring-start)\n"); + return -EINVAL; + } + + /* + * Get number of RXDESC rings + */ + if (of_property_read_u32(edma_hw.device_node, "qcom,rxdesc-rings", + &edma_hw.rxdesc_rings) != 0) { + pr_warn("Unable to read number of rxdesc rings.\n"); + return -EINVAL; + } + edma_hw.rxdesc_ring_end = edma_hw.rxdesc_ring_start + + edma_hw.rxdesc_rings; + + return 0; +} + +/* + * edma_init() + * EDMA init + */ +int edma_init(void) +{ + int ret = 0; + struct resource res_edma; + + /* + * Get all the DTS data needed + */ + if (edma_of_get_pdata(&res_edma) < 0) { + pr_warn("Unable to get EDMA DTS data.\n"); + return -EINVAL; + } + + /* + * Request memory region for EDMA registers + */ + edma_hw.reg_resource = request_mem_region(res_edma.start, + resource_size(&res_edma), + EDMA_DEVICE_NODE_NAME); + if (!edma_hw.reg_resource) { + pr_warn("Unable to request EDMA register memory.\n"); + return -EFAULT; + } + + /* + * Remap register resource + */ + edma_hw.reg_base = ioremap_nocache((edma_hw.reg_resource)->start, + resource_size(edma_hw.reg_resource)); + if (!edma_hw.reg_base) { + pr_warn("Unable to remap EDMA register memory.\n"); + ret = -EFAULT; + goto edma_init_remap_fail; + } + + if (edma_hw_init(&edma_hw) != 0) { + ret = -EFAULT; + goto edma_init_hw_init_fail; + } + + platform_set_drvdata(edma_hw.pdev, (void *)&edma_hw); + + edma_hw.napi_added = 0; + + return 0; + +edma_init_hw_init_fail: + iounmap(edma_hw.reg_base); + +edma_init_remap_fail: + release_mem_region((edma_hw.reg_resource)->start, + resource_size(edma_hw.reg_resource)); + return ret; +} + +/* + * edma_disable_port() + * EDMA disable port + */ +static void edma_disable_port(void) +{ + edma_reg_write(EDMA_REG_PORT_CTRL, EDMA_DISABLE); +} + +/* + * edma_cleanup() + * EDMA cleanup + */ +void edma_cleanup(bool is_dp_override) +{ + int i; + struct edma_txcmpl_ring *txcmpl_ring = NULL; + struct edma_rxdesc_ring *rxdesc_ring = NULL; + + /* + * The cleanup can happen from data plane override + * or from module_exit, we want to cleanup only once + */ + if (!edma_hw.edma_initialized) { + /* + * Disable EDMA only at module exit time, since NSS firmware + * depends on this setting. + */ + if (!is_dp_override) { + edma_disable_port(); + } + return; + } + + /* + * Disable Rx rings used by this driver + */ + for (i = edma_hw.rxdesc_ring_start; i < edma_hw.rxdesc_ring_end; i++) + edma_reg_write(EDMA_REG_RXDESC_CTRL(i), EDMA_RING_DISABLE); + + /* + * Disable Tx rings used by this driver + */ + for (i = edma_hw.txdesc_ring_start; i < edma_hw.txdesc_ring_end; i++) { + txcmpl_ring = &edma_hw.txcmpl_ring[i]; + edma_reg_write(EDMA_REG_TXDESC_CTRL(i), + EDMA_RING_DISABLE); + } + + /* + * Disable RxFill Rings used by this driver + */ + for (i = edma_hw.rxfill_ring_start; i < edma_hw.rxfill_ring_end; i++) + edma_reg_write(EDMA_REG_RXFILL_RING_EN(i), EDMA_RING_DISABLE); + + /* + * Clear interrupt mask + */ + for (i = 0; i < edma_hw.rxdesc_rings; i++) { + rxdesc_ring = &edma_hw.rxdesc_ring[i]; + edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id), + EDMA_MASK_INT_CLEAR); + } + + for (i = 0; i < edma_hw.txcmpl_rings; i++) { + txcmpl_ring = &edma_hw.txcmpl_ring[i]; + edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id), + EDMA_MASK_INT_CLEAR); + } + + edma_reg_write(EDMA_REG_MISC_INT_MASK, EDMA_MASK_INT_CLEAR); + /* + * Remove interrupt handlers and NAPI + */ + if (edma_hw.napi_added) { + + /* + * Free IRQ for TXCMPL rings + */ + for (i = 0; i < edma_hw.txcmpl_rings; i++) { + synchronize_irq(edma_hw.txcmpl_intr[i]); + free_irq(edma_hw.txcmpl_intr[i], + (void *)(edma_hw.pdev)); + } + + /* + * Free IRQ for RXFILL rings + */ + for (i = 0; i < edma_hw.rxfill_rings; i++) { + synchronize_irq(edma_hw.rxfill_intr[i]); + free_irq(edma_hw.rxfill_intr[i], + (void *)(edma_hw.pdev)); + } + + /* + * Free IRQ for RXDESC rings + */ + for (i = 0; i < edma_hw.rxdesc_rings; i++) { + synchronize_irq(edma_hw.rxdesc_intr[i]); + free_irq(edma_hw.rxdesc_intr[i], + (void *)(edma_hw.pdev)); + } + + /* + * Free Misc IRQ + */ + synchronize_irq(edma_hw.misc_intr); + free_irq(edma_hw.misc_intr, (void *)(edma_hw.pdev)); + + netif_napi_del(&edma_hw.napi); + edma_hw.napi_added = 0; + } + + /* + * Disable EDMA only at module exit time, since NSS firmware + * depends on this setting. + */ + if (!is_dp_override) { + edma_disable_port(); + } + + /* + * cleanup rings and free + */ + edma_cleanup_rings(&edma_hw); + iounmap(edma_hw.reg_base); + release_mem_region((edma_hw.reg_resource)->start, + resource_size(edma_hw.reg_resource)); + + /* + * Mark initialize false, so that we do not + * try to cleanup again + */ + edma_hw.edma_initialized = false; +} diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_data_plane.h b/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_data_plane.h new file mode 100644 index 000000000..ac6593ac2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_data_plane.h @@ -0,0 +1,287 @@ +/* + ************************************************************************** + * Copyright (c) 2016, 2018-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** +*/ + +#include "nss_dp_dev.h" + +#ifndef __NSS_DP_EDMA_DATAPLANE__ +#define __NSS_DP_EDMA_DATAPLANE__ + +#define EDMA_BUF_SIZE 2000 +#define EDMA_DEVICE_NODE_NAME "edma" +#define EDMA_RX_BUFF_SIZE (EDMA_BUF_SIZE + EDMA_RX_PREHDR_SIZE) +#define EDMA_RX_PREHDR_SIZE (sizeof(struct edma_rx_preheader)) +#define EDMA_TX_PREHDR_SIZE (sizeof(struct edma_tx_preheader)) +#define EDMA_RING_SIZE 128 +#define EDMA_NAPI_WORK 100 +#define EDMA_START_GMACS NSS_DP_START_IFNUM +#define EDMA_MAX_GMACS NSS_DP_HAL_MAX_PORTS +#define EDMA_TX_PKT_MIN_SIZE 33 /* IPQ807x EDMA needs a minimum packet size of 33 bytes */ +#if defined(NSS_DP_IPQ60XX) +#define EDMA_MAX_TXCMPL_RINGS 24 /* Max TxCmpl rings */ +#else +#define EDMA_MAX_TXCMPL_RINGS 8 /* Max TxCmpl rings */ +#endif +#define EDMA_MAX_RXDESC_RINGS 16 /* Max RxDesc rings */ +#define EDMA_MAX_RXFILL_RINGS 8 /* Max RxFill rings */ +#define EDMA_MAX_TXDESC_RINGS 24 /* Max TxDesc rings */ +#define EDMA_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) +#define EDMA_RXFILL_DESC(R, i) EDMA_GET_DESC(R, i, struct edma_rxfill_desc) +#define EDMA_RXDESC_DESC(R, i) EDMA_GET_DESC(R, i, struct edma_rxdesc_desc) +#define EDMA_TXDESC_DESC(R, i) EDMA_GET_DESC(R, i, struct edma_txdesc_desc) +#define EDMA_RXPH_SRC_INFO_TYPE_GET(rxph) (((rxph)->src_info >> 8) & 0xf0) +#define EDMA_RXPH_SERVICE_CODE_GET(rxph) (((rxph)->rx_pre4) & 0xff) + +/* + * Tx descriptor + */ +struct edma_txdesc_desc { + uint32_t buffer_addr; + /* buffer address */ + uint32_t word1; + /* more bit, TSO, preheader, pool, offset and length */ +}; + +/* + * TxCmpl descriptor + */ +struct edma_txcmpl_desc { + uint32_t buffer_addr; /* buffer address/opaque */ + uint32_t status; /* status */ +}; + +/* + * Rx descriptor + */ +struct edma_rxdesc_desc { + uint32_t buffer_addr; /* buffer address */ + uint32_t status; /* status */ +}; + +/* + * RxFill descriptor + */ +struct edma_rxfill_desc { + uint32_t buffer_addr; /* Buffer address */ + uint32_t word1; /* opaque_ind and buffer size */ +}; + +/* + * Tx descriptor ring + */ +struct edma_txdesc_ring { + uint32_t id; /* TXDESC ring number */ + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + spinlock_t tx_lock; /* Tx ring lock */ + uint16_t count; /* number of descriptors */ +}; + +/* + * TxCmpl ring + */ +struct edma_txcmpl_ring { + uint32_t id; /* TXCMPL ring number */ + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + uint16_t count; /* number of descriptors in the ring */ +}; + +/* + * RxFill ring + */ +struct edma_rxfill_ring { + uint32_t id; /* RXFILL ring number */ + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + spinlock_t lock; /* Rx ring lock */ + uint16_t count; /* number of descriptors in the ring */ +}; + +/* + * RxDesc ring + */ +struct edma_rxdesc_ring { + uint32_t id; /* RXDESC ring number */ + struct edma_rxfill_ring *rxfill; /* RXFILL ring used */ + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + uint16_t count; /* number of descriptors in the ring */ +}; + +/* + * EDMA Tx Preheader + */ +struct edma_tx_preheader { + uint32_t opaque; /* Opaque, contains skb pointer */ + uint16_t src_info; /* Src information */ + uint16_t dst_info; /* Dest information */ + uint32_t tx_pre2; /* SVLAN & CVLAN flag, drop prec, hash value */ + uint32_t tx_pre3; /* STAG, CTAG */ + uint32_t tx_pre4; /* CPU code, L3 & L4 offset, service code */ + uint32_t tx_pre5; /* IP addr index, ACL index */ + uint32_t tx_pre6; /* IP payload checksum, copy2cpu, timestamp, dscp */ + uint32_t tx_pre7; /* Timestamp, QoS TAG */ +}; + +/* + * EDMA Rx Preheader + */ +struct edma_rx_preheader { + uint32_t opaque; + /* Opaque, contains skb pointer*/ + uint16_t src_info; + /* Src information */ + uint16_t dst_info; + /* Dest information */ + uint32_t rx_pre2; + /* SVLAN & CVLAN flag, drop prec, hash value */ + uint32_t rx_pre3; + /* STAG, CTAG */ + uint32_t rx_pre4; + /* CPU code, L3 & L4 offset, service code */ + uint32_t rx_pre5; + /* IP addr index, ACL index */ + uint32_t rx_pre6; + /* IP payload checksum, copy2cpu, timestamp, dscp */ + uint32_t rx_pre7; + /* Timestamp, QoS TAG */ +}; + +enum edma_tx { + EDMA_TX_OK = 0, /* Tx success */ + EDMA_TX_DESC = 1, /* Not enough descriptors */ + EDMA_TX_FAIL = 2, /* Tx failure */ +}; + +/* + * EDMA private data structure + */ +struct edma_hw { + struct napi_struct napi; + /* napi structure */ + struct net_device *netdev_arr[EDMA_MAX_GMACS]; + /* netdev for each gmac port */ + struct device_node *device_node; + /* Device tree node */ + struct platform_device *pdev; + /* Platform device */ + void __iomem *reg_base; + /* Base register address */ + struct resource *reg_resource; + /* Memory resource */ + uint16_t rx_payload_offset; + /* start of the payload offset */ + uint32_t flags; + /* internal flags */ + int active; + /* status */ + int napi_added; + /* flag to indicate napi add status */ + + /* + * Debugfs entries + */ + struct dentry *edma_dentry; + struct dentry *txdesc_dentry; + struct dentry *txcmpl_dentry; + struct dentry *rxdesc_dentry; + + /* + * Store for tx and rx skbs + */ + struct sk_buff *rx_skb_store[EDMA_RING_SIZE]; + struct sk_buff *tx_skb_store[EDMA_RING_SIZE]; + + struct edma_rxfill_ring *rxfill_ring; + /* Rx Fill Ring, SW is producer */ + struct edma_rxdesc_ring *rxdesc_ring; + /* Rx Descriptor Ring, SW is consumer */ + struct edma_txdesc_ring *txdesc_ring; + /* Tx Descriptor Ring, SW is producer */ + struct edma_txcmpl_ring *txcmpl_ring; + /* Tx Completion Ring, SW is consumer */ + + uint32_t txdesc_rings; + /* Number of TxDesc rings */ + uint32_t txdesc_ring_start; + /* Id of first TXDESC ring */ + uint32_t txdesc_ring_end; + /* Id of the last TXDESC ring */ + uint32_t txcmpl_rings; + /* Number of TxCmpl rings */ + uint32_t txcmpl_ring_start; + /* Id of first TXCMPL ring */ + uint32_t txcmpl_ring_end; + /* Id of last TXCMPL ring */ + uint32_t rxfill_rings; + /* Number of RxFill rings */ + uint32_t rxfill_ring_start; + /* Id of first RxFill ring */ + uint32_t rxfill_ring_end; + /* Id of last RxFill ring */ + uint32_t rxdesc_rings; + /* Number of RxDesc rings */ + uint32_t rxdesc_ring_start; + /* Id of first RxDesc ring */ + uint32_t rxdesc_ring_end; + /* Id of last RxDesc ring */ + uint32_t txcmpl_intr[EDMA_MAX_TXCMPL_RINGS]; + /* TxCmpl ring IRQ numbers */ + uint32_t rxfill_intr[EDMA_MAX_RXFILL_RINGS]; + /* Rx fill ring IRQ numbers */ + uint32_t rxdesc_intr[EDMA_MAX_RXDESC_RINGS]; + /* Rx desc ring IRQ numbers */ + uint32_t misc_intr; + /* Misc IRQ number */ + + uint32_t tx_intr_mask; + /* Tx interrupt mask */ + uint32_t rxfill_intr_mask; + /* Rx fill ring interrupt mask */ + uint32_t rxdesc_intr_mask; + /* Rx Desc ring interrupt mask */ + uint32_t txcmpl_intr_mask; + /* Tx Cmpl ring interrupt mask */ + uint32_t misc_intr_mask; + /* misc interrupt interrupt mask */ + uint32_t dp_override_cnt; + /* number of interfaces overriden */ + bool edma_initialized; + /* flag to check initialization status */ +}; + +extern struct edma_hw edma_hw; + +uint32_t edma_reg_read(uint32_t reg_off); +void edma_reg_write(uint32_t reg_off, uint32_t val); + +int edma_alloc_rx_buffer(struct edma_hw *ehw, + struct edma_rxfill_ring *rxfill_ring); +enum edma_tx edma_ring_xmit(struct edma_hw *ehw, + struct net_device *netdev, + struct sk_buff *skb, + struct edma_txdesc_ring *txdesc_ring); +uint32_t edma_clean_tx(struct edma_hw *ehw, + struct edma_txcmpl_ring *txcmpl_ring); +irqreturn_t edma_handle_irq(int irq, void *ctx); +irqreturn_t edma_handle_misc_irq(int irq, void *ctx); +int edma_napi(struct napi_struct *napi, int budget); +void edma_cleanup_rings(struct edma_hw *ehw); +void edma_cleanup(bool is_dp_override); +int edma_hw_init(struct edma_hw *ehw); +#endif /* __NSS_DP_EDMA_DATAPLANE__ */ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_regs.h b/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_regs.h new file mode 100644 index 000000000..e724cc7cf --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_regs.h @@ -0,0 +1,454 @@ +/* + ************************************************************************** + * Copyright (c) 2016,2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** +*/ + +#ifndef __EDMA_REGS__ +#define __EDMA_REGS__ + +/* + * IPQ807x EDMA register offsets + */ +#define EDMA_REG_MAS_CTRL 0x0 +#define EDMA_REG_PORT_CTRL 0x4 +#define EDMA_REG_VLAN_CTRL 0x8 +#define EDMA_REG_RXDESC2FILL_MAP_0 0x18 +#define EDMA_REG_RXDESC2FILL_MAP_1 0x1c +#define EDMA_REG_TXQ_CTRL 0x20 +#define EDMA_REG_TXQ_CTRL_2 0x24 +#define EDMA_REG_TXQ_FC_0 0x28 +#define EDMA_REG_TXQ_FC_1 0x30 +#define EDMA_REG_TXQ_FC_2 0x34 +#define EDMA_REG_TXQ_FC_3 0x38 +#define EDMA_REG_RXQ_CTRL 0x3c +#define EDMA_REG_RX_TX_FULL_QID 0x40 +#define EDMA_REG_RXQ_FC_THRE 0x44 +#define EDMA_REG_DMAR_CTRL 0x48 +#define EDMA_REG_AXIR_CTRL 0x4c +#define EDMA_REG_AXIW_CTRL 0x50 +#define EDMA_REG_MIN_MSS 0x54 +#define EDMA_REG_LOOPBACK_CTRL 0x58 +#define EDMA_REG_MISC_INT_STAT 0x5c +#define EDMA_REG_MISC_INT_MASK 0x60 +#define EDMA_REG_DBG_CTRL 0x64 +#define EDMA_REG_DBG_DATA 0x68 +#define EDMA_REG_TXDESC_BA(n) (0x1000 + (0x1000 * n)) +#define EDMA_REG_TXDESC_PROD_IDX(n) (0x1004 + (0x1000 * n)) +#define EDMA_REG_TXDESC_CONS_IDX(n) (0x1008 + (0x1000 * n)) +#define EDMA_REG_TXDESC_RING_SIZE(n) (0x100c + (0x1000 * n)) +#define EDMA_REG_TXDESC_CTRL(n) (0x1010 + (0x1000 * n)) +#if defined(NSS_DP_IPQ807X) +#define EDMA_REG_TXDESC2CMPL_MAP_0 0xc +#define EDMA_REG_TXDESC2CMPL_MAP_1 0x10 +#define EDMA_REG_TXDESC2CMPL_MAP_2 0x14 +#define EDMA_REG_TXCMPL_BASE 0x19000 +#define EDMA_REG_TX_BASE 0x21000 +#else +#define EDMA_REG_TXCMPL_BASE 0x79000 +#define EDMA_REG_TX_BASE 0x91000 +#endif +#define EDMA_REG_TXCMPL_BA_OFFSET 0x00000 +#define EDMA_REG_TXCMPL_PROD_IDX_OFFSET 0x00004 +#define EDMA_REG_TXCMPL_CONS_IDX_OFFSET 0x00008 +#define EDMA_REG_TXCMPL_RING_SIZE_OFFSET 0x0000c +#define EDMA_REG_TXCMPL_UGT_THRE_OFFSET 0x00010 +#define EDMA_REG_TXCMPL_CTRL_OFFSET 0x00014 +#define EDMA_REG_TXCMPL_BPC_OFFSET 0x00018 +#define EDMA_REG_TX_INT_STAT_OFFSET 0x00000 +#define EDMA_REG_TX_INT_MASK_OFFSET 0x00004 +#define EDMA_REG_TX_MOD_TIMER_OFFSET 0x00008 +#define EDMA_REG_TX_INT_CTRL_OFFSET 0x0000c +#define EDMA_REG_TXCMPL_BA(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_BA_OFFSET + (0x1000 * n)) +#define EDMA_REG_TXCMPL_PROD_IDX(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_PROD_IDX_OFFSET + (0x1000 * n)) +#define EDMA_REG_TXCMPL_CONS_IDX(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_CONS_IDX_OFFSET + (0x1000 * n)) +#define EDMA_REG_TXCMPL_RING_SIZE(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_RING_SIZE_OFFSET + (0x1000 * n)) +#define EDMA_REG_TXCMPL_UGT_THRE(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_UGT_THRE_OFFSET + (0x1000 * n)) +#define EDMA_REG_TXCMPL_CTRL(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_CTRL_OFFSET + (0x1000 * n)) +#define EDMA_REG_TXCMPL_BPC(n) (EDMA_REG_TXCMPL_BASE + EDMA_REG_TXCMPL_BPC_OFFSET + (0x1000 * n)) +#define EDMA_REG_TX_INT_STAT(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_INT_STAT_OFFSET + (0x1000 * n)) +#define EDMA_REG_TX_INT_MASK(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_INT_MASK_OFFSET + (0x1000 * n)) +#define EDMA_REG_TX_MOD_TIMER(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_MOD_TIMER_OFFSET + (0x1000 * n)) +#define EDMA_REG_TX_INT_CTRL(n) (EDMA_REG_TX_BASE + EDMA_REG_TX_INT_CTRL_OFFSET + (0x1000 * n)) +#define EDMA_REG_RXFILL_BA(n) (0x29000 + (0x1000 * n)) +#define EDMA_REG_RXFILL_PROD_IDX(n) (0x29004 + (0x1000 * n)) +#define EDMA_REG_RXFILL_CONS_IDX(n) (0x29008 + (0x1000 * n)) +#define EDMA_REG_RXFILL_RING_SIZE(n) (0x2900c + (0x1000 * n)) +#define EDMA_REG_RXFILL_BUFFER1_SIZE(n) (0x29010 + (0x1000 * n)) +#define EDMA_REG_RXFILL_FC_THRE(n) (0x29014 + (0x1000 * n)) +#define EDMA_REG_RXFILL_UGT_THRE(n) (0x29018 + (0x1000 * n)) +#define EDMA_REG_RXFILL_RING_EN(n) (0x2901c + (0x1000 * n)) +#define EDMA_REG_RXFILL_DISABLE(n) (0x29020 + (0x1000 * n)) +#define EDMA_REG_RXFILL_DISABLE_DONE(n) (0x29024 + (0x1000 * n)) +#define EDMA_REG_RXFILL_INT_STAT(n) (0x31000 + (0x1000 * n)) +#define EDMA_REG_RXFILL_INT_MASK(n) (0x31004 + (0x1000 * n)) +#define EDMA_REG_RXDESC_BA(n) (0x39000 + (0x1000 * n)) +#define EDMA_REG_RXDESC_PROD_IDX(n) (0x39004 + (0x1000 * n)) +#define EDMA_REG_RXDESC_CONS_IDX(n) (0x39008 + (0x1000 * n)) +#define EDMA_REG_RXDESC_RING_SIZE(n) (0x3900c + (0x1000 * n)) +#define EDMA_REG_RXDESC_FC_THRE(n) (0x39010 + (0x1000 * n)) +#define EDMA_REG_RXDESC_UGT_THRE(n) (0x39014 + (0x1000 * n)) +#define EDMA_REG_RXDESC_CTRL(n) (0x39018 + (0x1000 * n)) +#define EDMA_REG_RXDESC_BPC(n) (0x3901c + (0x1000 * n)) +#define EDMA_REG_RXDESC_INT_STAT(n) (0x49000 + (0x1000 * n)) +#define EDMA_REG_RXDESC_INT_MASK(n) (0x49004 + (0x1000 * n)) +#define EDMA_REG_RX_MOD_TIMER(n) (0x49008 + (0x1000 * n)) +#define EDMA_REG_RX_INT_CTRL(n) (0x4900c + (0x1000 * n)) +#define EDMA_QID2RID_TABLE_MEM(q) (0x5a000 + (0x4 * q)) +#define EDMA_REG_RXRING_PC(n) (0x5A200 + (0x10 * n)) +#define EDMA_REG_RXRING_BC_0(n) (0x5A204 + (0x10 * n)) +#define EDMA_REG_RXRING_BC_1(n) (0x5A208 + (0x10 * n)) +#define EDMA_REG_TXRING_PC(n) (0x74000 + (0x10 * n)) +#define EDMA_REG_TXRING_BC_0(n) (0x74004 + (0x10 * n)) +#define EDMA_REG_TXRING_BC_1(n) (0x74008 + (0x10 * n)) + +/* + * EDMA_REG_PORT_CTRL register + */ +#define EDMA_PORT_PAD_EN 0x1 +#define EDMA_PORT_EDMA_EN 0x2 + +/* + * EDMA_REG_TXQ_CTRL register + */ +#define EDMA_TXDESC_PF_THRE_MASK 0xf +#define EDMA_TXDESC_PF_THRE_SHIFT 0 +#define EDMA_TXCMPL_WB_THRE_MASK 0xf +#define EDMA_TXCMPL_WB_THRE_SHIFT 4 +#define EDMA_TXDESC_PKT_SRAM_THRE_MASK 0xff +#define EDMA_TXDESC_PKT_SRAM_THRE_SHIFT 8 +#define EDMA_TXCMPL_WB_TIMER_MASK 0xffff +#define EDMA_TXCMPL_WB_TIMER_SHIFT 16 + +/* + * EDMA_REG_RXQ_CTRL register + */ +#define EDMA_RXFILL_PF_THRE_MASK 0xf +#define EDMA_RXFILL_PF_THRE_SHIFT 0 +#define EDMA_RXDESC_WB_THRE_MASK 0xf +#define EDMA_RXDESC_WB_THRE_SHIFT 4 +#define EDMA_RXDESC_WB_TIMER_MASK 0xffff +#define EDMA_RXDESC_WB_TIMER_SHIFT 16 + +/* + * EDMA_REG_RX_TX_FULL_QID register + */ +#define EDMA_RX_DESC_FULL_QID_MASK 0xff +#define EDMA_RX_DESC_FULL_QID_SHIFT 0 +#define EDMA_TX_CMPL_BUF_FULL_QID_MASK 0xff +#define EDMA_TX_CMPL_BUF_FULL_QID_SHIFT 8 +#define EDMA_TX_SRAM_FULL_QID_MASK 0x1f +#define EDMA_TX_SRAM_FULL_QID_SHIFT 16 + +/* + * EDMA_REG_RXQ_FC_THRE reister + */ +#define EDMA_RXFILL_FIFO_XOFF_THRE_MASK 0x1f +#define EDMA_RXFILL_FIFO_XOFF_THRE_SHIFT 0 +#define EDMA_DESC_FIFO_XOFF_THRE_MASK 0x3f +#define EDMA_DESC_FIFO_XOFF_THRE_SHIFT 16 + +/* + * EDMA_REG_DMAR_CTRL register + */ +#define EDMA_DMAR_REQ_PRI_MASK 0x7 +#define EDMA_DMAR_REQ_PRI_SHIFT 0 +#define EDMA_DMAR_BURST_LEN_MASK 0x1 +#define EDMA_DMAR_BURST_LEN_SHIFT 3 +#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK 0x1f +#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SHIFT 4 +#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK 0x7 +#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SHIFT 9 +#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK 0x7 +#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SHIFT 12 + +#define EDMA_DMAR_REQ_PRI_SET(x) (((x) & EDMA_DMAR_REQ_PRI_MASK) << EDMA_DMAR_REQ_PRI_SHIFT) +#define EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SET(x) (((x) & EDMA_DMAR_TXDATA_OUTSTANDING_NUM_MASK) << EDMA_DMAR_TXDATA_OUTSTANDING_NUM_SHIFT) +#define EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SET(x) (((x) & EDMA_DMAR_TXDESC_OUTSTANDING_NUM_MASK) << EDMA_DMAR_TXDESC_OUTSTANDING_NUM_SHIFT) +#define EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SET(x) (((x) & EDMA_DMAR_RXFILL_OUTSTANDING_NUM_MASK) << EDMA_DMAR_RXFILL_OUTSTANDING_NUM_SHIFT) +#define EDMA_DMAR_BURST_LEN_SET(x) (((x) & EDMA_DMAR_BURST_LEN_MASK) << EDMA_DMAR_BURST_LEN_SHIFT) + +/* + * Enable 128 byte EDMA burts for IPQ60xx + */ +#if defined(NSS_DP_IPQ60XX) +#define EDMA_BURST_LEN_ENABLE 1 +#else +#define EDMA_BURST_LEN_ENABLE 0 +#endif + +/* + * EDMA_REG_AXIW_CTRL_REG + */ +#define EDMA_AXIW_MAX_WR_SIZE_EN 0x400 + +/* + * EDMA DISABLE + */ +#define EDMA_DISABLE 0 + +/* + * EDMA_REG_TXDESC_PROD_IDX register + */ +#define EDMA_TXDESC_PROD_IDX_MASK 0xffff + +/* + * EDMA_REG_TXDESC_CONS_IDX register + */ +#define EDMA_TXDESC_CONS_IDX_MASK 0xffff + +/* + * EDMA_REG_TXDESC_RING_SIZE register + */ +#define EDMA_TXDESC_RING_SIZE_MASK 0xffff + +/* + * EDMA_REG_TXDESC_CTRL register + */ +#define EDMA_TXDESC_ARB_GRP_ID_MASK 0x3 +#define EDMA_TXDESC_ARB_GRP_ID_SHIFT 4 +#define EDMA_TXDESC_FC_GRP_ID_MASK 0x7 +#define EDMA_TXDESC_FC_GRP_ID_SHIFT 1 +#define EDMA_TXDESC_TX_EN 0x1 + +/* + * EDMA_REG_TXCMPL_PROD_IDX register + */ +#define EDMA_TXCMPL_PROD_IDX_MASK 0xffff + +/* + * EDMA_REG_TXCMPL_CONS_IDX register + */ +#define EDMA_TXCMPL_CONS_IDX_MASK 0xffff + +/* + * EDMA_REG_TXCMPL_RING_SIZE register + */ +#define EDMA_TXCMPL_RING_SIZE_MASK 0xffff + +/* + * EDMA_REG_TXCMPL_UGT_THRE register + */ +#define EDMA_TXCMPL_LOW_THRE_MASK 0xffff +#define EDMA_TXCMPL_LOW_THRE_SHIFT 0 +#define EDMA_TXCMPL_FC_THRE_MASK 0x3f +#define EDMA_TXCMPL_FC_THRE_SHIFT 16 + +/* + * EDMA_REG_TXCMPL_CTRL register + */ +#define EDMA_TXCMPL_RET_MODE_BUFF_ADDR 0x0 +#define EDMA_TXCMPL_RET_MODE_OPAQUE 0x1 + +/* + * EDMA_REG_TX_MOD_TIMER register + */ +#define EDMA_TX_MOD_TIMER_INIT_MASK 0xffff +#define EDMA_TX_MOD_TIMER_INIT_SHIFT 0 + +/* + * EDMA_REG_TX_INT_CTRL register + */ +#define EDMA_TX_INT_MASK 0x3 + +/* + * EDMA_REG_RXFILL_PROD_IDX register + */ +#define EDMA_RXFILL_PROD_IDX_MASK 0xffff + +/* + * EDMA_REG_RXFILL_CONS_IDX register + */ +#define EDMA_RXFILL_CONS_IDX_MASK 0xffff + +/* + * EDMA_REG_RXFILL_RING_SIZE register + */ +#define EDMA_RXFILL_RING_SIZE_MASK 0xffff +#define EDMA_RXFILL_BUF_SIZE_MASK 0x3fff +#define EDMA_RXFILL_BUF_SIZE_SHIFT 16 + +/* + * EDMA_REG_RXFILL_FC_THRE register + */ +#define EDMA_RXFILL_FC_XON_THRE_MASK 0x7ff +#define EDMA_RXFILL_FC_XON_THRE_SHIFT 12 +#define EDMA_RXFILL_FC_XOFF_THRE_MASK 0x7ff +#define EDMA_RXFILL_FC_XOFF_THRE_SHIFT 0 + +/* + * EDMA_REG_RXFILL_UGT_THRE register + */ +#define EDMA_RXFILL_LOW_THRE_MASK 0xffff +#define EDMA_RXFILL_LOW_THRE_SHIFT 0 + +/* + * EDMA_REG_RXFILL_RING_EN register + */ +#define EDMA_RXFILL_RING_EN 0x1 + +/* + * EDMA_REG_RXFILL_INT_MASK register + */ +#define EDMA_RXFILL_INT_MASK 0x1 + +/* + * EDMA_REG_RXDESC_PROD_IDX register + */ +#define EDMA_RXDESC_PROD_IDX_MASK 0xffff + +/* + * EDMA_REG_RXDESC_CONS_IDX register + */ +#define EDMA_RXDESC_CONS_IDX_MASK 0xffff + +/* + * EDMA_REG_RXDESC_RING_SIZE register + */ +#define EDMA_RXDESC_RING_SIZE_MASK 0xffff +#define EDMA_RXDESC_PL_OFFSET_MASK 0x1ff +#define EDMA_RXDESC_PL_OFFSET_SHIFT 16 + +/* + * EDMA_REG_RXDESC_FC_THRE register + */ +#define EDMA_RXDESC_FC_XON_THRE_MASK 0x7ff +#define EDMA_RXDESC_FC_XON_THRE_SHIFT 12 +#define EDMA_RXDESC_FC_XOFF_THRE_MASK 0x7ff +#define EDMA_RXDESC_FC_XOFF_THRE_SHIFT 0 + +/* + * EDMA_REG_RXDESC_UGT_THRE register + */ +#define EDMA_RXDESC_LOW_THRE_MASK 0xffff +#define EDMA_RXDESC_LOW_THRE_SHIFT 0 + +/* + * EDMA_REG_RXDESC_CTRL register + */ +#define EDMA_RXDESC_STAG_REMOVE_EN 0x8 +#define EDMA_RXDESC_CTAG_REMOVE_EN 0x4 +#define EDMA_RXDESC_QDISC_EN 0x2 +#define EDMA_RXDESC_RX_EN 0x1 + +/* + * EDMA_REG_TX_INT_MASK register + */ +#define EDMA_TX_INT_MASK_PKT_INT 0x1 +#define EDMA_TX_INT_MASK_UGT_INT 0x2 + +/* + * EDMA_REG_RXDESC_INT_STAT register + */ +#define EDMA_RXDESC_INT_STAT_PKT_INT 0x1 +#define EDMA_RXDESC_INT_STAT_UGT_INT 0x2 + +/* + * EDMA_REG_RXDESC_INT_MASK register + */ +#define EDMA_RXDESC_INT_MASK_PKT_INT 0x1 +#define EDMA_RXDESC_INT_MASK_TIMER_INT_DIS 0x2 + +#define EDMA_MASK_INT_DISABLE 0x0 +#define EDMA_MASK_INT_CLEAR 0x0 + +/* + * EDMA_REG_RX_MOD_TIMER register + */ +#define EDMA_RX_MOD_TIMER_INIT_MASK 0xffff +#define EDMA_RX_MOD_TIMER_INIT_SHIFT 0 + +/* + * EDMA QID2RID register sizes + */ +#define EDMA_QID2RID_DEPTH 0x40 +#define EDMA_QID2RID_QUEUES_PER_ENTRY 8 + +/* + * TXDESC shift values + */ +#define EDMA_TXDESC_MORE_SHIFT 31 +#define EDMA_TXDESC_TSO_EN_SHIFT 30 +#define EDMA_TXDESC_PREHEADER_SHIFT 29 +#define EDMA_TXDESC_POOL_ID_SHIFT 24 +#define EDMA_TXDESC_POOL_ID_MASK 0x1f +#define EDMA_TXDESC_DATA_OFFSET_SHIFT 16 +#define EDMA_TXDESC_DATA_OFFSET_MASK 0xff +#define EDMA_TXDESC_DATA_LENGTH_SHIFT 0 +#define EDMA_TXDESC_DATA_LENGTH_MASK 0xffff + +#define EDMA_PREHDR_DSTINFO_PORTID_IND 0x20 +#define EDMA_PREHDR_PORTNUM_BITS 0x0fff +#define EDMA_RING_DMA_MASK 0xffffffff +/* + * RXDESC shift values + */ +#define EDMA_RXDESC_RX_RXFILL_CNT_MASK 0x000f +#define EDMA_RXDESC_RX_RXFILL_CNT_SHIFT 16 + +#define EDMA_RXDESC_PKT_SIZE_MASK 0x3fff +#define EDMA_RXDESC_PKT_SIZE_SHIFT 0 + +#define EDMA_RXDESC_RXD_VALID_MASK 0x1 +#define EDMA_RXDESC_RXD_VALID_SHIFT 31 + +#define EDMA_RXDESC_PACKET_LEN_MASK 0x3fff +#define EDMA_RXDESC_RING_INT_STATUS_MASK 0x3 + +#define EDMA_RING_DISABLE 0 +#define EDMA_TXCMPL_RING_INT_STATUS_MASK 0x3 +#define EDMA_TXCMPL_RETMODE_OPAQUE 0x0 +#define EDMA_RXFILL_RING_INT_STATUS_MASK 0x1 + +/* + * TODO tune the timer and threshold values + */ +#define EDMA_RXFILL_FIFO_XOFF_THRE 0x3 +#define EDMA_RXFILL_PF_THRE 0x3 +#define EDMA_RXDESC_WB_THRE 0x0 +#define EDMA_RXDESC_WB_TIMER 0x2 + +#define EDMA_RXDESC_XON_THRE 50 +#define EDMA_RXDESC_XOFF_THRE 30 +#define EDMA_RXDESC_LOW_THRE 0 +#define EDMA_RX_MOD_TIMER_INIT 1000 + +#define EDMA_TXDESC_PF_THRE 0x3 +#define EDMA_TXCMPL_WB_THRE 0X0 +#define EDMA_TXDESC_PKT_SRAM_THRE 0x20 +#define EDMA_TXCMPL_WB_TIMER 0x2 + +#define EDMA_TX_MOD_TIMER 150 + +/* + * EDMA misc error mask + */ +#define EDMA_MISC_AXI_RD_ERR_MASK_EN 0x1 +#define EDMA_MISC_AXI_WR_ERR_MASK_EN 0x2 +#define EDMA_MISC_RX_DESC_FIFO_FULL_MASK_EN 0x4 +#define EDMA_MISC_RX_ERR_BUF_SIZE_MASK_EN 0x8 +#define EDMA_MISC_TX_SRAM_FULL_MASK_EN 0x10 +#define EDMA_MISC_TX_CMPL_BUF_FULL_MASK_EN 0x20 + +#if defined(NSS_DP_IPQ807X) +#define EDMA_MISC_PKT_LEN_LA_64K_MASK_EN 0x40 +#define EDMA_MISC_PKT_LEN_LE_40_MASK_EN 0x80 +#define EDMA_MISC_DATA_LEN_ERR_MASK_EN 0x100 +#else +#define EDMA_MISC_DATA_LEN_ERR_MASK_EN 0x40 +#define EDMA_MISC_TX_TIMEOUT_MASK_EN 0x80 +#endif + +#endif /* __EDMA_REGS__ */ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_tx_rx.c b/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_tx_rx.c new file mode 100644 index 000000000..0f42a7e50 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/edma/edma_tx_rx.c @@ -0,0 +1,795 @@ +/* + * Copyright (c) 2016-2018, 2020-21, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER + * RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE + * USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include + +#include "nss_dp_dev.h" +#include "edma_regs.h" +#include "edma_data_plane.h" + +/* + * edma_alloc_rx_buffer() + * Alloc Rx buffers for one RxFill ring + */ +int edma_alloc_rx_buffer(struct edma_hw *ehw, + struct edma_rxfill_ring *rxfill_ring) +{ + struct platform_device *pdev = ehw->pdev; + struct sk_buff *skb; + uint16_t num_alloc = 0; + uint16_t cons, next, counter; + struct edma_rxfill_desc *rxfill_desc; + uint32_t reg_data = 0; + uint32_t store_index = 0; + struct edma_rx_preheader *rxph = NULL; + + /* + * Read RXFILL ring producer index + */ + reg_data = edma_reg_read(EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->id)); + next = reg_data & EDMA_RXFILL_PROD_IDX_MASK & (rxfill_ring->count - 1); + + /* + * Read RXFILL ring consumer index + */ + reg_data = edma_reg_read(EDMA_REG_RXFILL_CONS_IDX(rxfill_ring->id)); + cons = reg_data & EDMA_RXFILL_CONS_IDX_MASK; + + while (1) { + counter = next; + if (++counter == rxfill_ring->count) + counter = 0; + + if (counter == cons) + break; + + /* + * Allocate buffer + */ + skb = dev_alloc_skb(EDMA_RX_BUFF_SIZE); + if (unlikely(!skb)) + break; + + /* + * Get RXFILL descriptor + */ + rxfill_desc = EDMA_RXFILL_DESC(rxfill_ring, next); + + /* + * Make room for Rx preheader + */ + rxph = (struct edma_rx_preheader *) + skb_push(skb, EDMA_RX_PREHDR_SIZE); + + /* + * Store the skb in the rx store + */ + store_index = next; + if (ehw->rx_skb_store[store_index] != NULL) { + dev_kfree_skb_any(skb); + break; + } + ehw->rx_skb_store[store_index] = skb; + memcpy((uint8_t *)&rxph->opaque, (uint8_t *)&store_index, 4); + /* + * Save buffer size in RXFILL descriptor + */ + rxfill_desc->word1 = cpu_to_le32(EDMA_RX_BUFF_SIZE + & EDMA_RXFILL_BUF_SIZE_MASK); + + /* + * Map Rx buffer for DMA + */ + rxfill_desc->buffer_addr = cpu_to_le32(dma_map_single( + &pdev->dev, + skb->data, + EDMA_RX_BUFF_SIZE, + DMA_FROM_DEVICE)); + + if (!rxfill_desc->buffer_addr) { + dev_kfree_skb_any(skb); + ehw->rx_skb_store[store_index] = NULL; + break; + } + + num_alloc++; + next = counter; + } + + if (num_alloc) { + /* + * Update RXFILL ring producer index + */ + reg_data = next & EDMA_RXFILL_PROD_IDX_MASK; + + /* + * make sure the producer index updated before + * updating the hardware + */ + wmb(); + + edma_reg_write(EDMA_REG_RXFILL_PROD_IDX(rxfill_ring->id), + reg_data); + } + + return num_alloc; +} + +/* + * edma_clean_tx() + * Reap Tx descriptors + */ +uint32_t edma_clean_tx(struct edma_hw *ehw, + struct edma_txcmpl_ring *txcmpl_ring) +{ + struct platform_device *pdev = ehw->pdev; + struct edma_txcmpl_desc *txcmpl = NULL; + uint16_t prod_idx = 0; + uint16_t cons_idx = 0; + uint32_t data = 0; + uint32_t txcmpl_consumed = 0; + struct sk_buff *skb; + uint32_t len; + int store_index; + dma_addr_t daddr; + + /* + * Get TXCMPL ring producer index + */ + data = edma_reg_read(EDMA_REG_TXCMPL_PROD_IDX(txcmpl_ring->id)); + prod_idx = data & EDMA_TXCMPL_PROD_IDX_MASK; + + /* + * Get TXCMPL ring consumer index + */ + data = edma_reg_read(EDMA_REG_TXCMPL_CONS_IDX(txcmpl_ring->id)); + cons_idx = data & EDMA_TXCMPL_CONS_IDX_MASK; + + while (cons_idx != prod_idx) { + txcmpl = &(((struct edma_txcmpl_desc *) + (txcmpl_ring->desc))[cons_idx]); + + /* + * skb for this is stored in tx store and + * tx header contains the index in the field + * buffer address (opaque) of txcmpl + */ + store_index = txcmpl->buffer_addr; + skb = ehw->tx_skb_store[store_index]; + ehw->tx_skb_store[store_index] = NULL; + + if (unlikely(!skb)) { + pr_warn("Invalid skb: cons_idx:%u prod_idx:%u status %x\n", + cons_idx, prod_idx, txcmpl->status); + goto next_txcmpl_desc; + } + + len = skb_headlen(skb); + daddr = (dma_addr_t)virt_to_phys(skb->data); + + pr_debug("skb:%px cons_idx:%d prod_idx:%d word1:0x%x\n", + skb, cons_idx, prod_idx, txcmpl->status); + + dma_unmap_single(&pdev->dev, daddr, + len, DMA_TO_DEVICE); + dev_kfree_skb_any(skb); + +next_txcmpl_desc: + if (++cons_idx == txcmpl_ring->count) + cons_idx = 0; + + txcmpl_consumed++; + } + + if (txcmpl_consumed == 0) + return 0; + + pr_debug("TXCMPL:%u txcmpl_consumed:%u prod_idx:%u cons_idx:%u\n", + txcmpl_ring->id, txcmpl_consumed, prod_idx, cons_idx); + + /* + * Update TXCMPL ring consumer index + */ + wmb(); + edma_reg_write(EDMA_REG_TXCMPL_CONS_IDX(txcmpl_ring->id), cons_idx); + + return txcmpl_consumed; +} + +/* + * nss_phy_tstamp_rx_buf() + * Receive timestamp packet + */ +void nss_phy_tstamp_rx_buf(__attribute__((unused))void *app_data, struct sk_buff *skb) +{ + struct net_device *ndev = skb->dev; + + /* + * The PTP_CLASS_ value 0 is passed to phy driver, which will be + * set to the correct PTP class value by calling ptp_classify_raw + * in drv->rxtstamp function. + */ + if (ndev && ndev->phydev && ndev->phydev->drv && + ndev->phydev->drv->rxtstamp) + if(ndev->phydev->drv->rxtstamp(ndev->phydev, skb, 0)) + return; + + netif_receive_skb(skb); +} +EXPORT_SYMBOL(nss_phy_tstamp_rx_buf); + +/* + * nss_phy_tstamp_tx_buf() + * Transmit timestamp packet + */ +void nss_phy_tstamp_tx_buf(struct net_device *ndev, struct sk_buff *skb) +{ + /* + * Function drv->txtstamp will create a clone of skb if necessary, + * the PTP_CLASS_ value 0 is passed to phy driver, which will be + * set to the correct PTP class value by calling ptp_classify_raw + * in the drv->txtstamp function. + */ + if (ndev && ndev->phydev && ndev->phydev->drv && + ndev->phydev->drv->txtstamp) + ndev->phydev->drv->txtstamp(ndev->phydev, skb, 0); +} +EXPORT_SYMBOL(nss_phy_tstamp_tx_buf); + +/* + * edma_clean_rx() + * Reap Rx descriptors + */ +static uint32_t edma_clean_rx(struct edma_hw *ehw, + int work_to_do, + struct edma_rxdesc_ring *rxdesc_ring) +{ + struct platform_device *pdev = ehw->pdev; + struct net_device *ndev; + struct sk_buff *skb = NULL; + struct edma_rxdesc_desc *rxdesc_desc; + struct edma_rx_preheader *rxph = NULL; + uint16_t prod_idx = 0; + int src_port_num = 0; + int pkt_length = 0; + uint16_t cons_idx = 0; + uint32_t work_done = 0; + int store_index; + + /* + * Read Rx ring consumer index + */ + cons_idx = edma_reg_read(EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->id)) + & EDMA_RXDESC_CONS_IDX_MASK; + + while (1) { + /* + * Read Rx ring producer index + */ + prod_idx = edma_reg_read( + EDMA_REG_RXDESC_PROD_IDX(rxdesc_ring->id)) + & EDMA_RXDESC_PROD_IDX_MASK; + + if (cons_idx == prod_idx) + break; + + if (work_done >= work_to_do) + break; + + rxdesc_desc = EDMA_RXDESC_DESC(rxdesc_ring, cons_idx); + + /* + * Get Rx preheader + */ + rxph = (struct edma_rx_preheader *) + phys_to_virt(rxdesc_desc->buffer_addr); + + /* + * DMA unmap Rx buffer + */ + dma_unmap_single(&pdev->dev, + rxdesc_desc->buffer_addr, + EDMA_RX_BUFF_SIZE, + DMA_FROM_DEVICE); + + store_index = rxph->opaque; + skb = ehw->rx_skb_store[store_index]; + ehw->rx_skb_store[store_index] = NULL; + if (unlikely(!skb)) { + pr_warn("WARN: empty skb reference in rx_store:%d\n", + cons_idx); + goto next_rx_desc; + } + + /* + * Check src_info from Rx preheader + */ + if (EDMA_RXPH_SRC_INFO_TYPE_GET(rxph) == + EDMA_PREHDR_DSTINFO_PORTID_IND) { + src_port_num = rxph->src_info & + EDMA_PREHDR_PORTNUM_BITS; + } else { + pr_warn("WARN: src_info_type:0x%x. Drop skb:%px\n", + EDMA_RXPH_SRC_INFO_TYPE_GET(rxph), skb); + dev_kfree_skb_any(skb); + goto next_rx_desc; + } + + /* + * Get packet length + */ + pkt_length = rxdesc_desc->status & EDMA_RXDESC_PACKET_LEN_MASK; + + if (unlikely((src_port_num < NSS_DP_START_IFNUM) || + (src_port_num > NSS_DP_HAL_MAX_PORTS))) { + pr_warn("WARN: Port number error :%d. Drop skb:%px\n", + src_port_num, skb); + dev_kfree_skb_any(skb); + goto next_rx_desc; + } + + /* + * Get netdev for this port using the source port + * number as index into the netdev array. We need to + * subtract one since the indices start form '0' and + * port numbers start from '1'. + */ + ndev = ehw->netdev_arr[src_port_num - 1]; + if (unlikely(!ndev)) { + pr_warn("WARN: netdev Null src_info_type:0x%x. Drop skb:%px\n", + src_port_num, skb); + dev_kfree_skb_any(skb); + goto next_rx_desc; + } + + if (unlikely(!netif_running(ndev))) { + dev_kfree_skb_any(skb); + goto next_rx_desc; + } + + /* + * Remove Rx preheader + */ + skb_pull(skb, EDMA_RX_PREHDR_SIZE); + + /* + * Update skb fields and indicate packet to stack + */ + skb->dev = ndev; + skb->skb_iif = ndev->ifindex; + skb_put(skb, pkt_length); + skb->protocol = eth_type_trans(skb, skb->dev); +#ifdef CONFIG_NET_SWITCHDEV +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + skb->offload_fwd_mark = ndev->offload_fwd_mark; +#else + /* + * TODO: Implement ndo_get_devlink_port() + */ + skb->offload_fwd_mark = 0; +#endif + pr_debug("skb:%px ring_idx:%u pktlen:%d proto:0x%x mark:%u\n", + skb, cons_idx, pkt_length, skb->protocol, + skb->offload_fwd_mark); +#else + pr_debug("skb:%px ring_idx:%u pktlen:%d proto:0x%x\n", + skb, cons_idx, pkt_length, skb->protocol); +#endif + /* + * Deliver the ptp packet to phy driver for RX timestamping + */ + if (unlikely(EDMA_RXPH_SERVICE_CODE_GET(rxph) == + NSS_PTP_EVENT_SERVICE_CODE)) + nss_phy_tstamp_rx_buf(ndev, skb); + else + netif_receive_skb(skb); + +next_rx_desc: + /* + * Update consumer index + */ + if (++cons_idx == rxdesc_ring->count) + cons_idx = 0; + + /* + * Update work done + */ + work_done++; + } + + edma_alloc_rx_buffer(ehw, rxdesc_ring->rxfill); + + /* + * make sure the consumer index is updated + * before updating the hardware + */ + wmb(); + edma_reg_write(EDMA_REG_RXDESC_CONS_IDX(rxdesc_ring->id), cons_idx); + return work_done; +} + +/* + * edma_napi() + * EDMA NAPI handler + */ +int edma_napi(struct napi_struct *napi, int budget) +{ + struct edma_hw *ehw = container_of(napi, struct edma_hw, napi); + struct edma_txcmpl_ring *txcmpl_ring = NULL; + struct edma_rxdesc_ring *rxdesc_ring = NULL; + struct edma_rxfill_ring *rxfill_ring = NULL; + + struct net_device *ndev; + int work_done = 0; + int i; + + for (i = 0; i < ehw->rxdesc_rings; i++) { + rxdesc_ring = &ehw->rxdesc_ring[i]; + work_done += edma_clean_rx(ehw, budget, rxdesc_ring); + } + + for (i = 0; i < ehw->txcmpl_rings; i++) { + txcmpl_ring = &ehw->txcmpl_ring[i]; + work_done += edma_clean_tx(ehw, txcmpl_ring); + } + + for (i = 0; i < ehw->rxfill_rings; i++) { + rxfill_ring = &ehw->rxfill_ring[i]; + work_done += edma_alloc_rx_buffer(ehw, rxfill_ring); + } + + /* + * Resume netdev Tx queue + */ + /* + * TODO works currently since we have a single queue. + * Need to make sure we have support in place when there is + * support for multiple queues + */ + for (i = 0; i < EDMA_MAX_GMACS; i++) { + ndev = ehw->netdev_arr[i]; + if (!ndev) + continue; + + if (netif_queue_stopped(ndev) && netif_carrier_ok(ndev)) + netif_start_queue(ndev); + } + + /* + * TODO - rework and fix the budget control + */ + if (work_done < budget) { + /* + * TODO per core NAPI + */ + napi_complete(napi); + + /* + * Set RXDESC ring interrupt mask + */ + for (i = 0; i < ehw->rxdesc_rings; i++) { + rxdesc_ring = &ehw->rxdesc_ring[i]; + edma_reg_write( + EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id), + ehw->rxdesc_intr_mask); + } + + /* + * Set TXCMPL ring interrupt mask + */ + for (i = 0; i < ehw->txcmpl_rings; i++) { + txcmpl_ring = &ehw->txcmpl_ring[i]; + edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id), + ehw->txcmpl_intr_mask); + } + + /* + * Set RXFILL ring interrupt mask + */ + for (i = 0; i < ehw->rxfill_rings; i++) { + rxfill_ring = &ehw->rxfill_ring[i]; + edma_reg_write(EDMA_REG_RXFILL_INT_MASK( + rxfill_ring->id), + edma_hw.rxfill_intr_mask); + } + } + return work_done; +} + +/* + * edma_ring_xmit() + * Transmit a packet using an EDMA ring + */ +enum edma_tx edma_ring_xmit(struct edma_hw *ehw, + struct net_device *netdev, + struct sk_buff *skb, + struct edma_txdesc_ring *txdesc_ring) +{ + struct nss_dp_dev *dp_dev = netdev_priv(netdev); + struct edma_txdesc_desc *txdesc = NULL; + uint16_t buf_len; + uint16_t hw_next_to_use, hw_next_to_clean, chk_idx; + uint32_t data; + uint32_t store_index = 0; + struct edma_tx_preheader *txph = NULL; + + /* + * TODO - revisit locking + */ + spin_lock_bh(&txdesc_ring->tx_lock); + + /* + * Read TXDESC ring producer index + */ + data = edma_reg_read(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id)); + hw_next_to_use = data & EDMA_TXDESC_PROD_IDX_MASK; + + /* + * Read TXDESC ring consumer index + */ + /* + * TODO - read to local variable to optimize uncached access + */ + data = edma_reg_read(EDMA_REG_TXDESC_CONS_IDX(txdesc_ring->id)); + hw_next_to_clean = data & EDMA_TXDESC_CONS_IDX_MASK; + + /* + * Check for available Tx descriptor + */ + chk_idx = (hw_next_to_use + 1) & (txdesc_ring->count-1); + + if (chk_idx == hw_next_to_clean) { + spin_unlock_bh(&txdesc_ring->tx_lock); + return EDMA_TX_DESC; + } + +#if defined(NSS_DP_EDMA_TX_SMALL_PKT_WAR) + /* + * IPQ807x EDMA hardware can't process the packet if the packet size is + * less than EDMA_TX_PKT_MIN_SIZE (33 Byte). So, if the packet size + * is indeed less than EDMA_TX_PKT_MIN_SIZE, perform padding + * (if possible), otherwise drop the packet. + * Using skb_padto() API for padding the packet. This API will drop + * the packet if the padding is not possible. + */ + if (unlikely(skb->len < EDMA_TX_PKT_MIN_SIZE)) { + if (skb_padto(skb, EDMA_TX_PKT_MIN_SIZE)) { + netdev_dbg(netdev, "padding couldn't happen, skb is freed.\n"); + netdev->stats.tx_dropped++; + spin_unlock_bh(&txdesc_ring->tx_lock); + return EDMA_TX_OK; + } + skb->len = EDMA_TX_PKT_MIN_SIZE; + } +#endif + + buf_len = skb_headlen(skb); + + /* + * Deliver the ptp packet to phy driver for TX timestamping + */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) + nss_phy_tstamp_tx_buf(netdev, skb); + + /* + * Make room for Tx preheader + */ + txph = (struct edma_tx_preheader *)skb_push(skb, + EDMA_TX_PREHDR_SIZE); + memset((void *)txph, 0, EDMA_TX_PREHDR_SIZE); + + /* + * Populate Tx preheader dst info, port id is macid in dp_dev + */ + txph->dst_info = (EDMA_PREHDR_DSTINFO_PORTID_IND << 8) | + (dp_dev->macid & 0x0fff); + + /* + * Store the skb in tx_store + */ + store_index = hw_next_to_use & (txdesc_ring->count - 1); + if (unlikely(ehw->tx_skb_store[store_index] != NULL)) { + spin_unlock_bh(&txdesc_ring->tx_lock); + return EDMA_TX_DESC; + } + + ehw->tx_skb_store[store_index] = skb; + memcpy(skb->data, &store_index, 4); + + /* + * Get Tx descriptor + */ + txdesc = EDMA_TXDESC_DESC(txdesc_ring, hw_next_to_use); + memset(txdesc, 0, sizeof(struct edma_txdesc_desc)); + + /* + * Map buffer to DMA address + */ + txdesc->buffer_addr = cpu_to_le32(dma_map_single(&(ehw->pdev)->dev, + skb->data, + buf_len + EDMA_TX_PREHDR_SIZE, + DMA_TO_DEVICE)); + if (!txdesc->buffer_addr) { + /* + * DMA map failed for this address. Drop it + * and make sure does not got to stack again + */ + dev_kfree_skb_any(skb); + + ehw->tx_skb_store[store_index] = NULL; + spin_unlock_bh(&txdesc_ring->tx_lock); + return EDMA_TX_OK; + } + + /* + * Populate Tx descriptor + */ + txdesc->word1 |= (1 << EDMA_TXDESC_PREHEADER_SHIFT) + | ((EDMA_TX_PREHDR_SIZE & EDMA_TXDESC_DATA_OFFSET_MASK) + << EDMA_TXDESC_DATA_OFFSET_SHIFT); + txdesc->word1 |= ((buf_len & EDMA_TXDESC_DATA_LENGTH_MASK) + << EDMA_TXDESC_DATA_LENGTH_SHIFT); + + netdev_dbg(netdev, "skb:%px tx_ring:%u proto:0x%x\n", + skb, txdesc_ring->id, ntohs(skb->protocol)); + netdev_dbg(netdev, "port:%u prod_idx:%u cons_idx:%u\n", + dp_dev->macid, hw_next_to_use, hw_next_to_clean); + + /* + * Update producer index + */ + hw_next_to_use = (hw_next_to_use + 1) & (txdesc_ring->count - 1); + + /* + * make sure the hw_next_to_use is updated before the + * write to hardware + */ + wmb(); + + edma_reg_write(EDMA_REG_TXDESC_PROD_IDX(txdesc_ring->id), + hw_next_to_use & EDMA_TXDESC_PROD_IDX_MASK); + spin_unlock_bh(&txdesc_ring->tx_lock); + return EDMA_TX_OK; +} + +/* + * edma_handle_misc_irq() + * Process IRQ + */ +irqreturn_t edma_handle_misc_irq(int irq, void *ctx) +{ + uint32_t misc_intr_status = 0; + uint32_t reg_data = 0; + struct edma_hw *ehw = NULL; + struct platform_device *pdev = (struct platform_device *)ctx; + + ehw = platform_get_drvdata(pdev); + + /* + * Read Misc intr status + */ + reg_data = edma_reg_read(EDMA_REG_MISC_INT_STAT); + misc_intr_status = reg_data & ehw->misc_intr_mask; + + /* + * TODO - error logging + */ + if (misc_intr_status == 0) + return IRQ_NONE; + else + edma_reg_write(EDMA_REG_MISC_INT_MASK, EDMA_MASK_INT_DISABLE); + + return IRQ_HANDLED; +} + +/* + * edma_handle_irq() + * Process IRQ and schedule napi + */ +irqreturn_t edma_handle_irq(int irq, void *ctx) +{ + uint32_t reg_data = 0; + uint32_t rxdesc_intr_status = 0; + uint32_t txcmpl_intr_status = 0; + uint32_t rxfill_intr_status = 0; + int i; + struct edma_txcmpl_ring *txcmpl_ring = NULL; + struct edma_rxdesc_ring *rxdesc_ring = NULL; + struct edma_rxfill_ring *rxfill_ring = NULL; + struct edma_hw *ehw = NULL; + struct platform_device *pdev = (struct platform_device *)ctx; + + ehw = platform_get_drvdata(pdev); + if (!ehw) { + pr_info("Unable to retrieve platrofm data"); + return IRQ_HANDLED; + } + + /* + * Read RxDesc intr status + */ + for (i = 0; i < ehw->rxdesc_rings; i++) { + rxdesc_ring = &ehw->rxdesc_ring[i]; + reg_data = edma_reg_read( + EDMA_REG_RXDESC_INT_STAT(rxdesc_ring->id)); + rxdesc_intr_status |= reg_data & + EDMA_RXDESC_RING_INT_STATUS_MASK; + + /* + * Disable RxDesc intr + */ + edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id), + EDMA_MASK_INT_DISABLE); + } + + /* + * Read TxCmpl intr status + */ + for (i = 0; i < ehw->txcmpl_rings; i++) { + txcmpl_ring = &ehw->txcmpl_ring[i]; + reg_data = edma_reg_read( + EDMA_REG_TX_INT_STAT(txcmpl_ring->id)); + txcmpl_intr_status |= reg_data & + EDMA_TXCMPL_RING_INT_STATUS_MASK; + + /* + * Disable TxCmpl intr + */ + edma_reg_write(EDMA_REG_TX_INT_MASK(txcmpl_ring->id), + EDMA_MASK_INT_DISABLE); + } + + /* + * Read RxFill intr status + */ + for (i = 0; i < ehw->rxfill_rings; i++) { + rxfill_ring = &ehw->rxfill_ring[i]; + reg_data = edma_reg_read( + EDMA_REG_RXFILL_INT_STAT(rxfill_ring->id)); + rxfill_intr_status |= reg_data & + EDMA_RXFILL_RING_INT_STATUS_MASK; + + /* + * Disable RxFill intr + */ + edma_reg_write(EDMA_REG_RXFILL_INT_MASK(rxfill_ring->id), + EDMA_MASK_INT_DISABLE); + + } + + if ((rxdesc_intr_status == 0) && (txcmpl_intr_status == 0) && + (rxfill_intr_status == 0)) + return IRQ_NONE; + + for (i = 0; i < ehw->rxdesc_rings; i++) { + rxdesc_ring = &ehw->rxdesc_ring[i]; + edma_reg_write(EDMA_REG_RXDESC_INT_MASK(rxdesc_ring->id), + EDMA_MASK_INT_DISABLE); + } + + /* + *TODO - per core NAPI + */ + if (rxdesc_intr_status || txcmpl_intr_status || rxfill_intr_status) + if (likely(napi_schedule_prep(&ehw->napi))) + __napi_schedule(&ehw->napi); + + return IRQ_HANDLED; +} diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_dev.h b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_dev.h new file mode 100644 index 000000000..79da08661 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_dev.h @@ -0,0 +1,697 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __QCOM_DEV_H__ +#define __QCOM_DEV_H__ + +#include +#include "qcom_reg.h" +#include +#include + +/* + * Subclass for base nss_gmac_haldev + */ +struct qcom_hal_dev { + struct nss_gmac_hal_dev nghd; /* Base class */ + fal_mib_counter_t stats; /* Stats structure */ +}; +/* + * qcom_set_rx_flow_ctrl() + */ +static inline void qcom_set_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_FLOW_ENABLE); +} + +/* + * qcom_clear_rx_flow_ctrl() + */ +static inline void qcom_clear_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_FLOW_ENABLE); +} + +/* + * qcom_set_tx_flow_ctrl() + */ +static inline void qcom_set_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_FLOW_ENABLE); +} + +/* + * qcom_clear_tx_flow_ctrl() + */ +static inline void qcom_clear_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_FLOW_ENABLE); +} + +/* + * qcom_clear_mac_ctrl0() + */ +static inline void qcom_clear_mac_ctrl0(struct nss_gmac_hal_dev *nghd) +{ + hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL0, 0); +} + +/* + * qcom_rx_enable() + */ +static inline void qcom_rx_enable(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_MAC_ENABLE); +} + +/* + * qcom_rx_disable() + * Disable the reception of frames on GMII/MII. + * GMAC receive state machine is disabled after completion of reception of + * current frame. + */ +static inline void qcom_rx_disable(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_RX_MAC_ENABLE); +} + +/* + * qcom_tx_enable() + */ +static inline void qcom_tx_enable(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_MAC_ENABLE); +} + +/* + * qcom_tx_disable() + * Disable the transmission of frames on GMII/MII. + * GMAC transmit state machine is disabled after completion of + * transmission of current frame. + */ +static inline void qcom_tx_disable(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_TX_MAC_ENABLE); +} + +/* + * qcom_set_full_duplex() + */ +static inline void qcom_set_full_duplex(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_DUPLEX); +} + +/* + * qcom_set_half_duplex() + */ +static inline void qcom_set_half_duplex(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_ENABLE, QCOM_DUPLEX); +} + +/* + * qcom_set_ipgt() + */ +static inline void qcom_set_ipgt(struct nss_gmac_hal_dev *nghd, uint32_t ipgt) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL0); + data &= ~QCOM_IPGT_POS; + ipgt = ipgt << QCOM_IPGT_LSB; + data |= ipgt; + hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL0, data); +} + +/* + * qcom_set_ipgr() + */ +static inline void qcom_set_ipgr(struct nss_gmac_hal_dev *nghd, uint32_t ipgr) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL0); + data &= ~QCOM_IPGR2_POS; + ipgr = ipgr << QCOM_IPGR2_LSB; + data |= ipgr; + hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL0, data); +} + +/* + * qcom_set_half_thdf_ctrl() + */ +static inline void qcom_set_half_thdf_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_HALF_THDF_CTRL); +} + +/* + * qcom_reset_half_thdf_ctrl() + */ +static inline void qcom_reset_half_thdf_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_HALF_THDF_CTRL); +} + +/* + * qcom_set_frame_len_chk() + */ +static inline void qcom_set_frame_len_chk(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_FLCHK); +} + +/* + * qcom_reset_frame_len_chk() + */ +static inline void qcom_reset_frame_len_chk(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_FLCHK); +} + +/* + * qcom_set_abebe() + */ +static inline void qcom_set_abebe(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_ABEBE); +} + +/* + * qcom_reset_abebe() + */ +static inline void qcom_reset_abebe(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_ABEBE); +} + +/* + * qcom_set_amaxe() + */ +static inline void qcom_set_amaxe(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_AMAXE); +} + +/* + * qcom_reset_amaxe() + */ +static inline void qcom_reset_amaxe(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_AMAXE); +} + +/* + * qcom_set_bpnb() + */ +static inline void qcom_set_bpnb(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_BPNB); +} + +/* + * qcom_reset_bpnb() + */ +static inline void qcom_reset_bpnb(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_BPNB); +} + +/* + * qcom_set_nobo() + */ +static inline void qcom_set_nobo(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_NOBO); +} + +/* + * qcom_reset_nobo() + */ +static inline void qcom_reset_nobo(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_NOBO); +} + +/* + * qcom_set_drbnib_rxok() + */ +static inline void qcom_set_drbnib_rxok(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_DRBNIB_RXOK); +} + +/* + * qcom_reset_drbnib_rxok() + */ +static inline void qcom_reset_drbnib_rxok(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL0, QCOM_DRBNIB_RXOK); +} + +/* + * qcom_set_jam_ipg() + */ +static inline void qcom_set_jam_ipg(struct nss_gmac_hal_dev *nghd, + uint32_t jam_ipg) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1); + data &= ~QCOM_JAM_IPG_POS; + jam_ipg = jam_ipg << QCOM_JAM_IPG_LSB; + data |= jam_ipg; + hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data); +} + +/* + * qcom_set_ctrl1_test_pause() + */ +static inline void qcom_set_ctrl1_test_pause(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TPAUSE); +} + +/* + * qcom_reset_ctrl1_test_pause() + */ +static inline void qcom_reset_ctrl1_test_pause(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TPAUSE); +} + +/* + * qcom_reset_ctrl1_test_pause() + */ +static inline void qcom_set_tctl(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TCTL); +} + +/* + * qcom_reset_tctl() + */ +static inline void qcom_reset_tctl(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_TCTL); +} + +/* + * qcom_set_sstct() + */ +static inline void qcom_set_sstct(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SSTCT); +} + +/* + * qcom_reset_sstct() + */ +static inline void qcom_reset_sstct(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SSTCT); +} + +/* + * qcom_set_simr() + */ +static inline void qcom_set_simr(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SIMR); +} + +/* + * qcom_reset_simr() + */ +static inline void qcom_reset_simr(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_SIMR); +} + +/* + * qcom_set_retry() + */ +static inline void qcom_set_retry(struct nss_gmac_hal_dev *nghd, uint32_t retry) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1); + data &= ~QCOM_RETRY_POS; + retry = retry << QCOM_RETRY_LSB; + data |= retry; + hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data); +} + +/* + * qcom_set_prlen() + */ +static inline void qcom_set_prlen(struct nss_gmac_hal_dev *nghd, uint32_t prlen) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1); + data &= ~QCOM_PRLEN_POS; + prlen = prlen << QCOM_PRLEN_LSB; + data |= prlen; + hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data); +} + +/* + * qcom_set_ppad() + */ +static inline void qcom_set_ppad(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PPAD); +} + +/* + * qcom_reset_ppad() + */ +static inline void qcom_reset_ppad(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PPAD); +} + +/* + * qcom_set_povr() + */ +static inline void qcom_set_povr(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_POVR); +} + +/* + * qcom_reset_povr() + */ +static inline void qcom_reset_povr(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_POVR); +} + +/* + * qcom_set_phug() + */ +static inline void qcom_set_phug(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PHUG); +} + +/* + * qcom_reset_phug() + */ +static inline void qcom_reset_phug(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_PHUG); +} + +/* + * qcom_set_mbof() + */ +static inline void qcom_set_mbof(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_MBOF); +} + +/* + * qcom_reset_mbof() + */ +static inline void qcom_reset_mbof(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_MBOF); +} + +/* + * qcom_set_lcol() + */ +static inline void qcom_set_lcol(struct nss_gmac_hal_dev *nghd, uint32_t lcol) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, QCOM_MAC_CTRL1); + data &= ~QCOM_LCOL_POS; + lcol = lcol << QCOM_LCOL_LSB; + data |= lcol; + hal_write_reg(nghd->mac_base, QCOM_MAC_CTRL1, data); +} + +/* + * qcom_set_long_jam() + */ +static inline void qcom_set_long_jam(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_LONG_JAM); +} + +/* + * qcom_reset_long_jam() + */ +static inline void qcom_reset_long_jam(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL1, QCOM_LONG_JAM); +} + +/* + * qcom_set_ipg_dec_len() + */ +static inline void qcom_set_ipg_dec_len(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC_LEN); +} + +/* + * qcom_reset_ipg_dec_len() + */ +static inline void qcom_reset_ipg_dec_len(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC_LEN); +} + +/* + * qcom_set_ctrl2_test_pause() + */ +static inline void qcom_set_ctrl2_test_pause(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_TEST_PAUSE); +} + +/* + * qcom_reset_ctrl2_test_pause() + */ +static inline void qcom_reset_ctrl2_test_pause(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_TEST_PAUSE); +} + +/* + * qcom_set_mac_loopback() + */ +static inline void qcom_set_mac_loopback(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_MAC_LOOPBACK); +} + +/* + * qcom_reset_mac_loopback() + */ +static inline void qcom_reset_mac_loopback(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_MAC_LOOPBACK); +} + +/* + * qcom_set_ipg_dec() + */ +static inline void qcom_set_ipg_dec(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC); +} + +/* + * qcom_reset_ipg_dec() + */ +static inline void qcom_reset_ipg_dec(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_IPG_DEC); +} + +/* + * qcom_set_crs_sel() + */ +static inline void qcom_set_crs_sel(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_SRS_SEL); +} + +/* + * qcom_reset_crs_sel() + */ +static inline void qcom_reset_crs_sel(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_SRS_SEL); +} + +/* + * qcom_set_crc_rsv() + */ +static inline void qcom_set_crc_rsv(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_CRC_RSV); +} + +/* + * qcom_reset_crc_rsv() + */ +static inline void qcom_reset_crc_rsv(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_CTRL2, QCOM_CRC_RSV); +} + +/* + * qcom_set_ipgr1() + */ +static inline void qcom_set_ipgr1(struct nss_gmac_hal_dev *nghd, uint32_t ipgr1) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL); + data &= ~QCOM_DBG_IPGR1_POS; + ipgr1 = ipgr1 << QCOM_DBG_IPGR1_LSB; + data |= ipgr1; + hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL, data); +} + +/* + * qcom_set_hihg_ipg() + */ +static inline void qcom_set_hihg_ipg(struct nss_gmac_hal_dev *nghd, + uint32_t hihg_ipg) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL); + data &= ~QCOM_DBG_HIHG_IPG_POS; + data |= hihg_ipg << QCOM_DBG_HIHG_IPG_LSB; + hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL, data); +} + +/* + * qcom_set_mac_ipg_ctrl() + */ +static inline void qcom_set_mac_ipg_ctrl(struct nss_gmac_hal_dev *nghd, + uint32_t mac_ipg_ctrl) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL); + data &= ~QCOM_DBG_MAC_IPG_CTRL_POS; + data |= mac_ipg_ctrl << QCOM_DBG_MAC_IPG_CTRL_LSB; + hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_CTRL, data); +} + +/* + * qcom_set_mac_len_ctrl() + */ +static inline void qcom_set_mac_len_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_MAC_LEN_CTRL); +} + +/* + * qcom_reset_mac_len_ctrl() + */ +static inline void qcom_reset_mac_len_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_MAC_LEN_CTRL); +} + +/* + * qcom_set_edxsdfr_transmit() + */ +static inline void qcom_set_edxsdfr_transmit(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_EDxSDFR_TRANS); +} + +/* + * qcom_reset_edxsdfr_transmit() + */ +static inline void qcom_reset_edxsdfr_transmit(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, QCOM_MAC_DBG_CTRL, QCOM_DBG_EDxSDFR_TRANS); +} + +/* + * qcom_set_mac_dbg_addr() + */ +static inline void qcom_set_mac_dbg_addr(struct nss_gmac_hal_dev *nghd, + uint8_t mac_dbg_addr) +{ + hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_ADDR, mac_dbg_addr); +} + +/* + * qcom_set_mac_dbg_data() + */ +static inline void qcom_set_mac_dbg_data(struct nss_gmac_hal_dev *nghd, + uint32_t mac_dbg_data) +{ + hal_write_reg(nghd->mac_base, QCOM_MAC_DBG_DATA, mac_dbg_data); +} + +/* + * qcom_set_mac_jumbosize() + */ +static inline void qcom_set_mac_jumbosize(struct nss_gmac_hal_dev *nghd, + uint16_t mac_jumbo_size) +{ + hal_write_reg(nghd->mac_base, QCOM_MAC_JMB_SIZE, mac_jumbo_size); +} + +/* + * qcom_clear_mib_ctrl() + */ +static inline void qcom_clear_mib_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_write_reg(nghd->mac_base, QCOM_MAC_MIB_CTRL, 0); +} + +/* + * qcom_set_mib_ctrl() + */ +static inline void qcom_set_mib_ctrl(struct nss_gmac_hal_dev *nghd, + int mib_settings) +{ + hal_set_reg_bits(nghd, QCOM_MAC_MIB_CTRL, + mib_settings); +} + +/* + * qcom_get_stats() + */ +static int qcom_get_stats(struct nss_gmac_hal_dev *nghd) +{ + struct qcom_hal_dev *qhd = (struct qcom_hal_dev *)nghd; + fal_mib_counter_t *stats = &(qhd->stats); + + if (fal_mib_counter_get(0, nghd->mac_id, stats) < 0) + return -1; + + return 0; +} +#endif /* __QCOM_DEV_H__ */ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_if.c b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_if.c new file mode 100644 index 000000000..b9b5968bf --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_if.c @@ -0,0 +1,479 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include +#include "qcom_dev.h" + +#define QCOM_STAT(m) offsetof(fal_mib_counter_t, m) + +/* + * Ethtool stats pointer structure + */ +struct qcom_ethtool_stats { + uint8_t stat_string[ETH_GSTRING_LEN]; + uint32_t stat_offset; +}; + +/* + * Array of strings describing statistics + */ +static const struct qcom_ethtool_stats qcom_gstrings_stats[] = { + {"rx_broadcast", QCOM_STAT(RxBroad)}, + {"rx_pause", QCOM_STAT(RxPause)}, + {"rx_unicast", QCOM_STAT(RxUniCast)}, + {"rx_multicast", QCOM_STAT(RxMulti)}, + {"rx_fcserr", QCOM_STAT(RxFcsErr)}, + {"rx_alignerr", QCOM_STAT(RxAllignErr)}, + {"rx_runt", QCOM_STAT(RxRunt)}, + {"rx_frag", QCOM_STAT(RxFragment)}, + {"rx_jmbfcserr", QCOM_STAT(RxJumboFcsErr)}, + {"rx_jmbalignerr", QCOM_STAT(RxJumboAligenErr)}, + {"rx_pkt64", QCOM_STAT(Rx64Byte)}, + {"rx_pkt65to127", QCOM_STAT(Rx128Byte)}, + {"rx_pkt128to255", QCOM_STAT(Rx256Byte)}, + {"rx_pkt256to511", QCOM_STAT(Rx512Byte)}, + {"rx_pkt512to1023", QCOM_STAT(Rx1024Byte)}, + {"rx_pkt1024to1518", QCOM_STAT(Rx1518Byte)}, + {"rx_pkt1519tox", QCOM_STAT(RxMaxByte)}, + {"rx_toolong", QCOM_STAT(RxTooLong)}, + {"rx_pktgoodbyte", QCOM_STAT(RxGoodByte)}, + {"rx_pktbadbyte", QCOM_STAT(RxBadByte)}, + {"rx_overflow", QCOM_STAT(RxOverFlow)}, + {"tx_broadcast", QCOM_STAT(TxBroad)}, + {"tx_pause", QCOM_STAT(TxPause)}, + {"tx_multicast", QCOM_STAT(TxMulti)}, + {"tx_underrun", QCOM_STAT(TxUnderRun)}, + {"tx_pkt64", QCOM_STAT(Tx64Byte)}, + {"tx_pkt65to127", QCOM_STAT(Tx128Byte)}, + {"tx_pkt128to255", QCOM_STAT(Tx256Byte)}, + {"tx_pkt256to511", QCOM_STAT(Tx512Byte)}, + {"tx_pkt512to1023", QCOM_STAT(Tx1024Byte)}, + {"tx_pkt1024to1518", QCOM_STAT(Tx1518Byte)}, + {"tx_pkt1519tox", QCOM_STAT(TxMaxByte)}, + {"tx_oversize", QCOM_STAT(TxOverSize)}, + {"tx_pktbyte_h", QCOM_STAT(TxByte)}, + {"tx_collisions", QCOM_STAT(TxCollision)}, + {"tx_abortcol", QCOM_STAT(TxAbortCol)}, + {"tx_multicol", QCOM_STAT(TxMultiCol)}, + {"tx_singlecol", QCOM_STAT(TxSingalCol)}, + {"tx_exesdeffer", QCOM_STAT(TxExcDefer)}, + {"tx_deffer", QCOM_STAT(TxDefer)}, + {"tx_latecol", QCOM_STAT(TxLateCol)}, + {"tx_unicast", QCOM_STAT(TxUniCast)}, +}; + +/* + * Array of strings describing private flag names + */ +static const char * const qcom_strings_priv_flags[] = { + "linkpoll", + "tstamp", + "tsmode", +}; + +#define QCOM_STATS_LEN ARRAY_SIZE(qcom_gstrings_stats) +#define QCOM_PRIV_FLAGS_LEN ARRAY_SIZE(qcom_strings_priv_flags) + +/* + * qcom_set_mac_speed() + */ +static int32_t qcom_set_mac_speed(struct nss_gmac_hal_dev *nghd, + uint32_t mac_speed) +{ + struct net_device *netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); + return 0; +} + +/* + * qcom_get_mac_speed() + */ +static uint32_t qcom_get_mac_speed(struct nss_gmac_hal_dev *nghd) +{ + struct net_device *netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); + return 0; +} + +/* + * qcom_set_duplex_mode() + */ +static void qcom_set_duplex_mode(struct nss_gmac_hal_dev *nghd, + uint8_t duplex_mode) +{ + struct net_device *netdev = nghd->netdev; + + netdev_warn(netdev, "This API deprecated\n"); +} + +/* + * qcom_get_duplex_mode() + */ +static uint8_t qcom_get_duplex_mode(struct nss_gmac_hal_dev *nghd) +{ + struct net_device *netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); + return 0; +} + +/* + * qcom_rx_flow_control() + */ +static void qcom_rx_flow_control(struct nss_gmac_hal_dev *nghd, bool enabled) +{ + if (enabled) + qcom_set_rx_flow_ctrl(nghd); + else + qcom_clear_rx_flow_ctrl(nghd); +} + +/* + * qcom_tx_flow_control() + */ +static void qcom_tx_flow_control(struct nss_gmac_hal_dev *nghd, bool enabled) +{ + if (enabled) + qcom_set_tx_flow_ctrl(nghd); + else + qcom_clear_tx_flow_ctrl(nghd); +} + +/* + * qcom_get_mib_stats() + */ +static int32_t qcom_get_mib_stats(struct nss_gmac_hal_dev *nghd) +{ + if (qcom_get_stats(nghd)) + return -1; + + return 0; +} + +/* + * qcom_set_maxframe() + */ +static int32_t qcom_set_maxframe(struct nss_gmac_hal_dev *nghd, + uint32_t maxframe) +{ + return fal_port_max_frame_size_set(0, nghd->mac_id, maxframe); +} + +/* + * qcom_get_maxframe() + */ +static int32_t qcom_get_maxframe(struct nss_gmac_hal_dev *nghd) +{ + int ret; + uint32_t mtu; + + ret = fal_port_max_frame_size_get(0, nghd->mac_id, &mtu); + + if (!ret) + return mtu; + + return ret; +} + +/* + * qcom_get_netdev_stats() + */ +static int32_t qcom_get_netdev_stats(struct nss_gmac_hal_dev *nghd, + struct rtnl_link_stats64 *stats) +{ + struct qcom_hal_dev *qhd = (struct qcom_hal_dev *)nghd; + fal_mib_counter_t *hal_stats = &(qhd->stats); + + if (qcom_get_mib_stats(nghd)) + return -1; + + stats->rx_packets = hal_stats->RxUniCast + hal_stats->RxBroad + + hal_stats->RxMulti; + stats->tx_packets = hal_stats->TxUniCast + hal_stats->TxBroad + + hal_stats->TxMulti; + stats->rx_bytes = hal_stats->RxGoodByte; + stats->tx_bytes = hal_stats->TxByte; + + /* RX errors */ + stats->rx_crc_errors = hal_stats->RxFcsErr + hal_stats->RxJumboFcsErr; + stats->rx_frame_errors = hal_stats->RxAllignErr + + hal_stats->RxJumboAligenErr; + stats->rx_fifo_errors = hal_stats->RxRunt; + stats->rx_errors = stats->rx_crc_errors + stats->rx_frame_errors + + stats->rx_fifo_errors; + + stats->rx_dropped = hal_stats->RxTooLong + stats->rx_errors; + + /* TX errors */ + stats->tx_fifo_errors = hal_stats->TxUnderRun; + stats->tx_aborted_errors = hal_stats->TxAbortCol; + stats->tx_errors = stats->tx_fifo_errors + stats->tx_aborted_errors; + + stats->collisions = hal_stats->TxCollision; + stats->multicast = hal_stats->RxMulti; + + return 0; +} + +/* + * qcom_get_strset_count() + * Get string set count for ethtool operations + */ +int32_t qcom_get_strset_count(struct nss_gmac_hal_dev *nghd, int32_t sset) +{ + struct net_device *netdev = nghd->netdev; + + switch (sset) { + case ETH_SS_STATS: + return QCOM_STATS_LEN; + case ETH_SS_PRIV_FLAGS: + return QCOM_PRIV_FLAGS_LEN; + } + + netdev_dbg(netdev, "%s: Invalid string set\n", __func__); + return -EPERM; +} + +/* + * qcom_get_strings() + * Get strings + */ +int32_t qcom_get_strings(struct nss_gmac_hal_dev *nghd, int32_t sset, + uint8_t *data) +{ + struct net_device *netdev = nghd->netdev; + int i; + + switch (sset) { + case ETH_SS_STATS: + for (i = 0; i < QCOM_STATS_LEN; i++) { + memcpy(data, qcom_gstrings_stats[i].stat_string, + strlen(qcom_gstrings_stats[i].stat_string)); + data += ETH_GSTRING_LEN; + } + break; + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < QCOM_PRIV_FLAGS_LEN; i++) { + memcpy(data, qcom_strings_priv_flags[i], + strlen(qcom_strings_priv_flags[i])); + data += ETH_GSTRING_LEN; + } + break; + default: + netdev_dbg(netdev, "%s: Invalid string set\n", __func__); + return -EPERM; + } + + return 0; +} + +/* + * qcom_get_eth_stats() + */ +static int32_t qcom_get_eth_stats(struct nss_gmac_hal_dev *nghd, uint64_t *data) +{ + struct qcom_hal_dev *qhd = (struct qcom_hal_dev *)nghd; + fal_mib_counter_t *stats = &(qhd->stats); + uint8_t *p; + int i; + + if (qcom_get_mib_stats(nghd)) + return -1; + + for (i = 0; i < QCOM_STATS_LEN; i++) { + p = (uint8_t *)stats + qcom_gstrings_stats[i].stat_offset; + data[i] = *(uint32_t *)p; + } + + return 0; +} + +/* + * qcom_send_pause_frame() + */ +static void qcom_send_pause_frame(struct nss_gmac_hal_dev *nghd) +{ + qcom_set_ctrl2_test_pause(nghd); +} + +/* + * qcom_stop_pause_frame() + */ +static void qcom_stop_pause_frame(struct nss_gmac_hal_dev *nghd) +{ + qcom_reset_ctrl2_test_pause(nghd); +} + +/* + * qcom_start() + */ +static int32_t qcom_start(struct nss_gmac_hal_dev *nghd) +{ + qcom_set_full_duplex(nghd); + + /* TODO: Read speed from dts */ + + if (qcom_set_mac_speed(nghd, SPEED_1000)) + return -1; + + qcom_tx_enable(nghd); + qcom_rx_enable(nghd); + + netdev_dbg(nghd->netdev, "%s: mac_base:0x%px mac_enable:0x%x\n", + __func__, nghd->mac_base, + hal_read_reg(nghd->mac_base, QCOM_MAC_ENABLE)); + + return 0; +} + +/* + * qcom_stop() + */ +static int32_t qcom_stop(struct nss_gmac_hal_dev *nghd) +{ + qcom_tx_disable(nghd); + qcom_rx_disable(nghd); + + netdev_dbg(nghd->netdev, "%s: mac_base:0x%px mac_enable:0x%x\n", + __func__, nghd->mac_base, + hal_read_reg(nghd->mac_base, QCOM_MAC_ENABLE)); + return 0; +} + +/* + * qcom_init() + */ +static void *qcom_init(struct gmac_hal_platform_data *gmacpdata) +{ + struct qcom_hal_dev *qhd = NULL; + struct net_device *ndev = NULL; + struct nss_dp_dev *dp_priv = NULL; + struct resource *res; + + ndev = gmacpdata->netdev; + dp_priv = netdev_priv(ndev); + + res = platform_get_resource(dp_priv->pdev, IORESOURCE_MEM, 0); + if (!res) { + netdev_dbg(ndev, "Resource get failed.\n"); + return NULL; + } + + if (!devm_request_mem_region(&dp_priv->pdev->dev, res->start, + resource_size(res), ndev->name)) { + netdev_dbg(ndev, "Request mem region failed. Returning...\n"); + return NULL; + } + + qhd = (struct qcom_hal_dev *)devm_kzalloc(&dp_priv->pdev->dev, + sizeof(struct qcom_hal_dev), GFP_KERNEL); + if (!qhd) { + netdev_dbg(ndev, "kzalloc failed. Returning...\n"); + return NULL; + } + + /* Save netdev context in QCOM HAL context */ + qhd->nghd.netdev = gmacpdata->netdev; + qhd->nghd.mac_id = gmacpdata->macid; + + /* Populate the mac base addresses */ + qhd->nghd.mac_base = devm_ioremap_nocache(&dp_priv->pdev->dev, + res->start, resource_size(res)); + if (!qhd->nghd.mac_base) { + netdev_dbg(ndev, "ioremap fail.\n"); + return NULL; + } + + spin_lock_init(&qhd->nghd.slock); + + netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n", + gmacpdata->reg_len, + ndev->base_addr, + qhd->nghd.mac_base); + + /* Reset MIB Stats */ + if (fal_mib_port_flush_counters(0, qhd->nghd.mac_id)) { + netdev_dbg(ndev, "MIB stats Reset fail.\n"); + } + + return (struct nss_gmac_hal_dev *)qhd; +} + +/* + * qcom_get_mac_address() + */ +static void qcom_get_mac_address(struct nss_gmac_hal_dev *nghd, + uint8_t *macaddr) +{ + uint32_t data = hal_read_reg(nghd->mac_base, QCOM_MAC_ADDR0); + macaddr[5] = (data >> 8) & 0xff; + macaddr[4] = (data) & 0xff; + + data = hal_read_reg(nghd->mac_base, QCOM_MAC_ADDR1); + macaddr[0] = (data >> 24) & 0xff; + macaddr[1] = (data >> 16) & 0xff; + macaddr[2] = (data >> 8) & 0xff; + macaddr[3] = (data) & 0xff; +} + +/* + * qcom_set_mac_address() + */ +static void qcom_set_mac_address(struct nss_gmac_hal_dev *nghd, + uint8_t *macaddr) +{ + uint32_t data = (macaddr[5] << 8) | macaddr[4]; + hal_write_reg(nghd->mac_base, QCOM_MAC_ADDR0, data); + data = (macaddr[0] << 24) | (macaddr[1] << 16) + | (macaddr[2] << 8) | macaddr[3]; + hal_write_reg(nghd->mac_base, QCOM_MAC_ADDR1, data); +} + +/* + * MAC hal_ops base structure + */ +struct nss_gmac_hal_ops qcom_hal_ops = { + .init = &qcom_init, + .start = &qcom_start, + .stop = &qcom_stop, + .setmacaddr = &qcom_set_mac_address, + .getmacaddr = &qcom_get_mac_address, + .rxflowcontrol = &qcom_rx_flow_control, + .txflowcontrol = &qcom_tx_flow_control, + .setspeed = &qcom_set_mac_speed, + .getspeed = &qcom_get_mac_speed, + .setduplex = &qcom_set_duplex_mode, + .getduplex = &qcom_get_duplex_mode, + .getstats = &qcom_get_mib_stats, + .setmaxframe = &qcom_set_maxframe, + .getmaxframe = &qcom_get_maxframe, + .getndostats = &qcom_get_netdev_stats, + .getssetcount = &qcom_get_strset_count, + .getstrings = &qcom_get_strings, + .getethtoolstats = &qcom_get_eth_stats, + .sendpause = &qcom_send_pause_frame, + .stoppause = &qcom_stop_pause_frame, +}; diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_reg.h b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_reg.h new file mode 100644 index 000000000..9210c2a50 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/qcom/qcom_reg.h @@ -0,0 +1,156 @@ +/* + ************************************************************************** + * Copyright (c) 2016,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __QCOM_REG_H__ +#define __QCOM_REG_H__ + +/* Register Offsets */ +/* Offsets of GMAC config and status registers within NSS_GMAC_QCOM_MAC_BASE */ +#define QCOM_MAC_ENABLE 0x0000 +#define QCOM_MAC_SPEED 0x0004 +#define QCOM_MAC_ADDR0 0x0008 +#define QCOM_MAC_ADDR1 0x000c +#define QCOM_MAC_CTRL0 0x0010 +#define QCOM_MAC_CTRL1 0x0014 +#define QCOM_MAC_CTRL2 0x0018 +#define QCOM_MAC_DBG_CTRL 0x001c +#define QCOM_MAC_DBG_ADDR 0x0020 +#define QCOM_MAC_DBG_DATA 0x0024 +#define QCOM_MAC_JMB_SIZE 0x0030 +#define QCOM_MAC_MIB_CTRL 0x0034 + +/* RX stats */ +#define QCOM_RXBROAD 0x0040 +#define QCOM_RXPAUSE 0x0044 +#define QCOM_RXMULTI 0x0048 +#define QCOM_RXFCSERR 0x004c +#define QCOM_RXALIGNERR 0x0050 +#define QCOM_RXRUNT 0x0054 +#define QCOM_RXFRAG 0x0058 +#define QCOM_RXJMBFCSERR 0x005c +#define QCOM_RXJMBALIGNERR 0x0060 +#define QCOM_RXPKT64 0x0064 +#define QCOM_RXPKT65TO127 0x0068 +#define QCOM_RXPKT128TO255 0x006c +#define QCOM_RXPKT256TO511 0x0070 +#define QCOM_RXPKT512TO1023 0x0074 +#define QCOM_RXPKT1024TO1518 0x0078 +#define QCOM_RXPKT1519TOX 0x007c +#define QCOM_RXPKTTOOLONG 0x0080 +#define QCOM_RXPKTGOODBYTE_L 0x0084 +#define QCOM_RXPKTGOODBYTE_H 0x0088 +#define QCOM_RXPKTBADBYTE_L 0x008c +#define QCOM_RXPKTBADBYTE_H 0x0090 +#define QCOM_RXUNI 0x0094 + +/* TX stats */ +#define QCOM_TXBROAD 0x00a0 +#define QCOM_TXPAUSE 0x00a4 +#define QCOM_TXMULTI 0x00a8 +#define QCOM_TXUNDERUN 0x00aC +#define QCOM_TXPKT64 0x00b0 +#define QCOM_TXPKT65TO127 0x00b4 +#define QCOM_TXPKT128TO255 0x00b8 +#define QCOM_TXPKT256TO511 0x00bc +#define QCOM_TXPKT512TO1023 0x00c0 +#define QCOM_TXPKT1024TO1518 0x00c4 +#define QCOM_TXPKT1519TOX 0x00c8 +#define QCOM_TXPKTBYTE_L 0x00cc +#define QCOM_TXPKTBYTE_H 0x00d0 +#define QCOM_TXCOLLISIONS 0x00d4 +#define QCOM_TXABORTCOL 0x00d8 +#define QCOM_TXMULTICOL 0x00dc +#define QCOM_TXSINGLECOL 0x00e0 +#define QCOM_TXEXCESSIVEDEFER 0x00e4 +#define QCOM_TXDEFER 0x00e8 +#define QCOM_TXLATECOL 0x00ec +#define QCOM_TXUNI 0x00f0 + +/* Bit Masks */ +/* GMAC BITs */ +#define QCOM_RX_MAC_ENABLE 1 +#define QCOM_TX_MAC_ENABLE 0x2 +#define QCOM_DUPLEX 0x10 +#define QCOM_RX_FLOW_ENABLE 0x20 +#define QCOM_TX_FLOW_ENABLE 0x40 + +#define QCOM_MAC_SPEED_10 0 +#define QCOM_MAC_SPEED_100 1 +#define QCOM_MAC_SPEED_1000 2 + +/* MAC CTRL0 */ +#define QCOM_IPGT_POS 0x0000007f +#define QCOM_IPGT_LSB 0 +#define QCOM_IPGR2_POS 0x00007f00 +#define QCOM_IPGR2_LSB 8 +#define QCOM_HALF_THDF_CTRL 0x8000 +#define QCOM_HUGE_RECV 0x10000 +#define QCOM_HUGE_TRANS 0x20000 +#define QCOM_FLCHK 0x40000 +#define QCOM_ABEBE 0x80000 +#define QCOM_AMAXE 0x10000000 +#define QCOM_BPNB 0x20000000 +#define QCOM_NOBO 0x40000000 +#define QCOM_DRBNIB_RXOK 0x80000000 + +/* MAC CTRL1 */ +#define QCOM_JAM_IPG_POS 0x0000000f +#define QCOM_JAM_IPG_LSB 0 +#define QCOM_TPAUSE 0x10 +#define QCOM_TCTL 0x20 +#define QCOM_SSTCT 0x40 +#define QCOM_SIMR 0x80 +#define QCOM_RETRY_POS 0x00000f00 +#define QCOM_RETRY_LSB 8 +#define QCOM_PRLEN_POS 0x0000f000 +#define QCOM_PRLEN_LSB 8 +#define QCOM_PPAD 0x10000 +#define QCOM_POVR 0x20000 +#define QCOM_PHUG 0x40000 +#define QCOM_MBOF 0x80000 +#define QCOM_LCOL_POS 0x0ff00000 +#define QCOM_LCOL_LSB 20 +#define QCOM_LONG_JAM 0x10000000 + +/* MAC CTRL2 */ +#define QCOM_IPG_DEC_LEN 0x2 +#define QCOM_TEST_PAUSE 0x4 +#define QCOM_MAC_LPI_TX_IDLE 0x8 +#define QCOM_MAC_LOOPBACK 0x10 +#define QCOM_IPG_DEC 0x20 +#define QCOM_SRS_SEL 0x40 +#define QCOM_CRC_RSV 0x80 +#define QCOM_MAXFR_POS 0x003fff00 +#define QCOM_MAXFR_LSB 8 + +/* MAC DEBUG_CTRL */ +#define QCOM_DBG_IPGR1_POS 0x0000007f +#define QCOM_DBG_IPGR1_LSB 0 +#define QCOM_DBG_HIHG_IPG_POS 0x0000ff00 +#define QCOM_DBG_HIHG_IPG_LSB 8 +#define QCOM_DBG_MAC_IPG_CTRL_POS 0x0000ff00 +#define QCOM_DBG_MAC_IPG_CTRL_LSB 20 +#define QCOM_DBG_MAC_LEN_CTRL 0x40000000 +#define QCOM_DBG_EDxSDFR_TRANS 0x80000000 + +/* MAC MIB-CTRL*/ +#define QCOM_MIB_ENABLE 1 +#define QCOM_MIB_RESET 0x2 +#define QCOM_MIB_RD_CLR 0x4 + +#endif /*__QCOM_REG_H__*/ diff --git a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq5018-q14.dts b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_dev.h similarity index 68% rename from feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq5018-q14.dts rename to feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_dev.h index b510ea6e8..0bfec1b99 100644 --- a/feeds/ipq807x/ipq807x/files/arch/arm/boot/dts/qcom-ipq5018-q14.dts +++ b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_dev.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2020, The Linux Foundation. All rights reserved. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -14,5 +14,17 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "../../../arm64/boot/dts/qcom/qcom-ipq5018-q14.dts" -#include "ipq5018.dtsi" +#ifndef __SYN_DEV_H__ +#define __SYN_DEV_H__ + +#include + +/* + * Subclass for base nss_gmac_hal_dev + */ +struct syn_hal_dev { + struct nss_gmac_hal_dev nghd; /* Base class */ + struct nss_dp_gmac_stats stats; /* Stats structure */ +}; + +#endif /*__SYN_DEV_H__*/ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_if.c b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_if.c new file mode 100644 index 000000000..2601ff204 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_if.c @@ -0,0 +1,959 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "syn_dev.h" +#include "syn_reg.h" + +#define SYN_STAT(m) offsetof(struct nss_dp_hal_gmac_stats, m) +#define HW_ERR_SIZE sizeof(uint64_t) + +/* + * Array to store ethtool statistics + */ +struct syn_ethtool_stats { + uint8_t stat_string[ETH_GSTRING_LEN]; + uint64_t stat_offset; +}; + +/* + * Array of strings describing statistics + */ +static const struct syn_ethtool_stats syn_gstrings_stats[] = { + {"rx_bytes", SYN_STAT(rx_bytes)}, + {"rx_packets", SYN_STAT(rx_packets)}, + {"rx_errors", SYN_STAT(rx_errors)}, + {"rx_receive_errors", SYN_STAT(rx_receive_errors)}, + {"rx_descriptor_errors", SYN_STAT(rx_descriptor_errors)}, + {"rx_late_collision_errors", SYN_STAT(rx_late_collision_errors)}, + {"rx_dribble_bit_errors", SYN_STAT(rx_dribble_bit_errors)}, + {"rx_length_errors", SYN_STAT(rx_length_errors)}, + {"rx_ip_header_errors", SYN_STAT(rx_ip_header_errors)}, + {"rx_ip_payload_errors", SYN_STAT(rx_ip_payload_errors)}, + {"rx_no_buffer_errors", SYN_STAT(rx_no_buffer_errors)}, + {"rx_transport_csum_bypassed", SYN_STAT(rx_transport_csum_bypassed)}, + {"tx_bytes", SYN_STAT(tx_bytes)}, + {"tx_packets", SYN_STAT(tx_packets)}, + {"tx_collisions", SYN_STAT(tx_collisions)}, + {"tx_errors", SYN_STAT(tx_errors)}, + {"tx_jabber_timeout_errors", SYN_STAT(tx_jabber_timeout_errors)}, + {"tx_frame_flushed_errors", SYN_STAT(tx_frame_flushed_errors)}, + {"tx_loss_of_carrier_errors", SYN_STAT(tx_loss_of_carrier_errors)}, + {"tx_no_carrier_errors", SYN_STAT(tx_no_carrier_errors)}, + {"tx_late_collision_errors", SYN_STAT(tx_late_collision_errors)}, + {"tx_excessive_collision_errors", SYN_STAT(tx_excessive_collision_errors)}, + {"tx_excessive_deferral_errors", SYN_STAT(tx_excessive_deferral_errors)}, + {"tx_underflow_errors", SYN_STAT(tx_underflow_errors)}, + {"tx_ip_header_errors", SYN_STAT(tx_ip_header_errors)}, + {"tx_ip_payload_errors", SYN_STAT(tx_ip_payload_errors)}, + {"tx_dropped", SYN_STAT(tx_dropped)}, + {"rx_missed", SYN_STAT(rx_missed)}, + {"fifo_overflows", SYN_STAT(fifo_overflows)}, + {"rx_scatter_errors", SYN_STAT(rx_scatter_errors)}, + {"tx_ts_create_errors", SYN_STAT(tx_ts_create_errors)}, + {"pmt_interrupts", SYN_STAT(hw_errs[0])}, + {"mmc_interrupts", SYN_STAT(hw_errs[0]) + (1 * HW_ERR_SIZE)}, + {"line_interface_interrupts", SYN_STAT(hw_errs[0]) + (2 * HW_ERR_SIZE)}, + {"fatal_bus_error_interrupts", SYN_STAT(hw_errs[0]) + (3 * HW_ERR_SIZE)}, + {"rx_buffer_unavailable_interrupts", SYN_STAT(hw_errs[0]) + (4 * HW_ERR_SIZE)}, + {"rx_process_stopped_interrupts", SYN_STAT(hw_errs[0]) + (5 * HW_ERR_SIZE)}, + {"tx_underflow_interrupts", SYN_STAT(hw_errs[0]) + (6 * HW_ERR_SIZE)}, + {"rx_overflow_interrupts", SYN_STAT(hw_errs[0]) + (7 * HW_ERR_SIZE)}, + {"tx_jabber_timeout_interrutps", SYN_STAT(hw_errs[0]) + (8 * HW_ERR_SIZE)}, + {"tx_process_stopped_interrutps", SYN_STAT(hw_errs[0]) + (9 * HW_ERR_SIZE)}, + {"gmac_total_ticks", SYN_STAT(gmac_total_ticks)}, + {"gmac_worst_case_ticks", SYN_STAT(gmac_worst_case_ticks)}, + {"gmac_iterations", SYN_STAT(gmac_iterations)}, + {"tx_pause_frames", SYN_STAT(tx_pause_frames)}, + {"mmc_rx_overflow_errors", SYN_STAT(mmc_rx_overflow_errors)}, + {"mmc_rx_watchdog_timeout_errors", SYN_STAT(mmc_rx_watchdog_timeout_errors)}, + {"mmc_rx_crc_errors", SYN_STAT(mmc_rx_crc_errors)}, + {"mmc_rx_ip_header_errors", SYN_STAT(mmc_rx_ip_header_errors)}, + {"mmc_rx_octets_g", SYN_STAT(mmc_rx_octets_g)}, + {"mmc_rx_ucast_frames", SYN_STAT(mmc_rx_ucast_frames)}, + {"mmc_rx_bcast_frames", SYN_STAT(mmc_rx_bcast_frames)}, + {"mmc_rx_mcast_frames", SYN_STAT(mmc_rx_mcast_frames)}, + {"mmc_rx_undersize", SYN_STAT(mmc_rx_undersize)}, + {"mmc_rx_oversize", SYN_STAT(mmc_rx_oversize)}, + {"mmc_rx_jabber", SYN_STAT(mmc_rx_jabber)}, + {"mmc_rx_octets_gb", SYN_STAT(mmc_rx_octets_gb)}, + {"mmc_rx_frag_frames_g", SYN_STAT(mmc_rx_frag_frames_g)}, + {"mmc_tx_octets_g", SYN_STAT(mmc_tx_octets_g)}, + {"mmc_tx_ucast_frames", SYN_STAT(mmc_tx_ucast_frames)}, + {"mmc_tx_bcast_frames", SYN_STAT(mmc_tx_bcast_frames)}, + {"mmc_tx_mcast_frames", SYN_STAT(mmc_tx_mcast_frames)}, + {"mmc_tx_deferred", SYN_STAT(mmc_tx_deferred)}, + {"mmc_tx_single_col", SYN_STAT(mmc_tx_single_col)}, + {"mmc_tx_multiple_col", SYN_STAT(mmc_tx_multiple_col)}, + {"mmc_tx_octets_gb", SYN_STAT(mmc_tx_octets_gb)}, +}; + +#define SYN_STATS_LEN ARRAY_SIZE(syn_gstrings_stats) + +/* + * syn_set_rx_flow_ctrl() + */ +static inline void syn_set_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL, + SYN_MAC_FC_RX_FLOW_CONTROL); +} + +/* + * syn_clear_rx_flow_ctrl() + */ +static inline void syn_clear_rx_flow_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, SYN_MAC_FLOW_CONTROL, + SYN_MAC_FC_RX_FLOW_CONTROL); + +} + +/* + * syn_set_tx_flow_ctrl() + */ +static inline void syn_set_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL, + SYN_MAC_FC_TX_FLOW_CONTROL); +} + +/* + * syn_send_tx_pause_frame() + */ +static inline void syn_send_tx_pause_frame(struct nss_gmac_hal_dev *nghd) +{ + syn_set_tx_flow_ctrl(nghd); + hal_set_reg_bits(nghd, SYN_MAC_FLOW_CONTROL, + SYN_MAC_FC_SEND_PAUSE_FRAME); +} + +/* + * syn_clear_tx_flow_ctrl() + */ +static inline void syn_clear_tx_flow_ctrl(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, SYN_MAC_FLOW_CONTROL, + SYN_MAC_FC_TX_FLOW_CONTROL); +} + +/* + * syn_rx_enable() + */ +static inline void syn_rx_enable(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_CONFIGURATION, SYN_MAC_RX); + hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_FILTER_OFF); +} + +/* + * syn_tx_enable() + */ +static inline void syn_tx_enable(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_CONFIGURATION, SYN_MAC_TX); +} + +/************Ip checksum offloading APIs*************/ + +/* + * syn_enable_rx_chksum_offload() + * Enable IPv4 header and IPv4/IPv6 TCP/UDP checksum calculation by GMAC. + */ +static inline void syn_enable_rx_chksum_offload(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, + SYN_MAC_CONFIGURATION, SYN_MAC_RX_IPC_OFFLOAD); +} + +/* + * syn_disable_rx_chksum_offload() + * Disable the IP checksum offloading in receive path. + */ +static inline void syn_disable_rx_chksum_offload(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, + SYN_MAC_CONFIGURATION, SYN_MAC_RX_IPC_OFFLOAD); +} + +/* + * syn_rx_tcpip_chksum_drop_enable() + * Instruct the DMA to drop the packets that fail TCP/IP checksum. + * + * This is to instruct the receive DMA engine to drop the recevied + * packet if they fails the tcp/ip checksum in hardware. Valid only when + * full checksum offloading is enabled(type-2). + */ +static inline void syn_rx_tcpip_chksum_drop_enable(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, + SYN_DMA_OPERATION_MODE, SYN_DMA_DISABLE_DROP_TCP_CS); +} + +/*******************Ip checksum offloading APIs**********************/ + +/* + * syn_ipc_offload_init() + * Initialize IPC Checksum offloading. + */ +static inline void syn_ipc_offload_init(struct nss_gmac_hal_dev *nghd) +{ + struct nss_dp_dev *dp_priv; + dp_priv = netdev_priv(nghd->netdev); + + if (test_bit(__NSS_DP_RXCSUM, &dp_priv->flags)) { + /* + * Enable the offload engine in the receive path + */ + syn_enable_rx_chksum_offload(nghd); + + /* + * DMA drops the packets if error in encapsulated ethernet + * payload. + */ + syn_rx_tcpip_chksum_drop_enable(nghd); + netdev_dbg(nghd->netdev, "%s: enable Rx checksum\n", __func__); + } else { + syn_disable_rx_chksum_offload(nghd); + netdev_dbg(nghd->netdev, "%s: disable Rx checksum\n", __func__); + } +} + +/* + * syn_disable_mac_interrupt() + * Disable all the interrupts. + */ +static inline void syn_disable_mac_interrupt(struct nss_gmac_hal_dev *nghd) +{ + hal_write_reg(nghd->mac_base, SYN_INTERRUPT_MASK, 0xffffffff); +} + +/* + * syn_disable_mmc_tx_interrupt() + * Disable the MMC Tx interrupt. + * + * The MMC tx interrupts are masked out as per the mask specified. + */ +static inline void syn_disable_mmc_tx_interrupt(struct nss_gmac_hal_dev *nghd, + uint32_t mask) +{ + hal_set_reg_bits(nghd, SYN_MMC_TX_INTERRUPT_MASK, mask); +} + +/* + * syn_disable_mmc_rx_interrupt() + * Disable the MMC Rx interrupt. + * + * The MMC rx interrupts are masked out as per the mask specified. + */ +static inline void syn_disable_mmc_rx_interrupt(struct nss_gmac_hal_dev *nghd, + uint32_t mask) +{ + hal_set_reg_bits(nghd, SYN_MMC_RX_INTERRUPT_MASK, mask); +} + +/* + * syn_disable_mmc_ipc_rx_interrupt() + * Disable the MMC ipc rx checksum offload interrupt. + * + * The MMC ipc rx checksum offload interrupts are masked out as + * per the mask specified. + */ +static inline void syn_disable_mmc_ipc_rx_interrupt(struct nss_gmac_hal_dev *nghd, + uint32_t mask) +{ + hal_set_reg_bits(nghd, SYN_MMC_IPC_RX_INTR_MASK, mask); +} + +/* + * syn_disable_dma_interrupt() + * Disables all DMA interrupts. + */ +void syn_disable_dma_interrupt(struct nss_gmac_hal_dev *nghd) +{ + hal_write_reg(nghd->mac_base, SYN_DMA_INT_ENABLE, SYN_DMA_INT_DISABLE); +} + +/* + * syn_enable_dma_interrupt() + * Enables all DMA interrupts. + */ +void syn_enable_dma_interrupt(struct nss_gmac_hal_dev *nghd) +{ + hal_write_reg(nghd->mac_base, SYN_DMA_INT_ENABLE, SYN_DMA_INT_EN); +} + +/* + * syn_disable_interrupt_all() + * Disable all the interrupts. + */ +static inline void syn_disable_interrupt_all(struct nss_gmac_hal_dev *nghd) +{ + syn_disable_mac_interrupt(nghd); + syn_disable_dma_interrupt(nghd); + syn_disable_mmc_tx_interrupt(nghd, 0xFFFFFFFF); + syn_disable_mmc_rx_interrupt(nghd, 0xFFFFFFFF); + syn_disable_mmc_ipc_rx_interrupt(nghd, 0xFFFFFFFF); +} + +/* + * syn_dma_bus_mode_init() + * Function to program DMA bus mode register. + */ +static inline void syn_dma_bus_mode_init(struct nss_gmac_hal_dev *nghd) +{ + hal_write_reg(nghd->mac_base, SYN_DMA_BUS_MODE, SYN_DMA_BUS_MODE_VAL); +} + +/* + * syn_clear_dma_status() + * Clear all the pending dma interrupts. + */ +void syn_clear_dma_status(struct nss_gmac_hal_dev *nghd) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, SYN_DMA_STATUS); + hal_write_reg(nghd->mac_base, SYN_DMA_STATUS, data); +} + +/* + * syn_enable_dma_rx() + * Enable Rx GMAC operation + */ +void syn_enable_dma_rx(struct nss_gmac_hal_dev *nghd) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE); + data |= SYN_DMA_RX_START; + hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data); +} + +/* + * syn_disable_dma_rx() + * Disable Rx GMAC operation + */ +void syn_disable_dma_rx(struct nss_gmac_hal_dev *nghd) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE); + data &= ~SYN_DMA_RX_START; + hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data); +} + +/* + * syn_enable_dma_tx() + * Enable Rx GMAC operation + */ +void syn_enable_dma_tx(struct nss_gmac_hal_dev *nghd) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE); + data |= SYN_DMA_TX_START; + hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data); +} + +/* + * syn_disable_dma_tx() + * Disable Rx GMAC operation + */ +void syn_disable_dma_tx(struct nss_gmac_hal_dev *nghd) +{ + uint32_t data; + + data = hal_read_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE); + data &= ~SYN_DMA_TX_START; + hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, data); +} + +/* + * syn_resume_dma_tx + * Resumes the DMA Transmission. + */ +void syn_resume_dma_tx(struct nss_gmac_hal_dev *nghd) +{ + hal_write_reg(nghd->mac_base, SYN_DMA_TX_POLL_DEMAND, 0); +} + +/* + * syn_get_rx_missed + * Get Rx missed errors + */ +uint32_t syn_get_rx_missed(struct nss_gmac_hal_dev *nghd) +{ + uint32_t missed_frame_buff_overflow; + missed_frame_buff_overflow = hal_read_reg(nghd->mac_base, SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER); + return missed_frame_buff_overflow & 0xFFFF; +} + +/* + * syn_get_fifo_overflows + * Get FIFO overflows + */ +uint32_t syn_get_fifo_overflows(struct nss_gmac_hal_dev *nghd) +{ + uint32_t missed_frame_buff_overflow; + missed_frame_buff_overflow = hal_read_reg(nghd->mac_base, SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER); + return (missed_frame_buff_overflow >> 17) & 0x7ff; +} + +/* + * syn_init_tx_desc_base() + * Programs the Dma Tx Base address with the starting address of the descriptor ring or chain. + */ +void syn_init_tx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t tx_desc_dma) +{ + hal_write_reg(nghd->mac_base, SYN_DMA_TX_DESCRIPTOR_LIST_ADDRESS, tx_desc_dma); +} + +/* + * syn_init_rx_desc_base() + * Programs the Dma Rx Base address with the starting address of the descriptor ring or chain. + */ +void syn_init_rx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t rx_desc_dma) +{ + hal_write_reg(nghd->mac_base, SYN_DMA_RX_DESCRIPTOR_LIST_ADDRESS, rx_desc_dma); +} + +/* + * syn_dma_axi_bus_mode_init() + * Function to program DMA AXI bus mode register. + */ +static inline void syn_dma_axi_bus_mode_init(struct nss_gmac_hal_dev *nghd) +{ + hal_write_reg(nghd->mac_base, SYN_DMA_AXI_BUS_MODE, + SYN_DMA_AXI_BUS_MODE_VAL); +} + +/* + * syn_dma_operation_mode_init() + * Function to program DMA Operation Mode register. + */ +static inline void syn_dma_operation_mode_init(struct nss_gmac_hal_dev *nghd) +{ + hal_write_reg(nghd->mac_base, SYN_DMA_OPERATION_MODE, SYN_DMA_OMR); +} + +/* + * syn_broadcast_enable() + * Enables Broadcast frames. + * + * When enabled Address filtering module passes all incoming broadcast frames. + */ +static inline void syn_broadcast_enable(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_BROADCAST); +} + +/* + * syn_multicast_enable() + * Enables Multicast frames. + * + * When enabled all multicast frames are passed. + */ +static inline void syn_multicast_enable(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_MULTICAST_FILTER); +} + +/* + * syn_promisc_enable() + * Enables promiscous mode. + * + * When enabled Address filter modules pass all incoming frames + * regardless of their Destination and source addresses. + */ +static inline void syn_promisc_enable(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, SYN_MAC_FILTER_OFF); + hal_set_reg_bits(nghd, SYN_MAC_FRAME_FILTER, + SYN_MAC_PROMISCUOUS_MODE_ON); +} + +/* + * syn_get_stats() + */ +static int syn_get_stats(struct nss_gmac_hal_dev *nghd) +{ + struct nss_dp_dev *dp_priv; + struct syn_hal_dev *shd; + struct nss_dp_gmac_stats *stats; + + BUG_ON(nghd == NULL); + + shd = (struct syn_hal_dev *)nghd; + stats = &(shd->stats); + + dp_priv = netdev_priv(nghd->netdev); + if (!dp_priv->data_plane_ops) + return -1; + + dp_priv->data_plane_ops->get_stats(dp_priv->dpc, stats); + + return 0; +} + +/* + * syn_rx_flow_control() + */ +static void syn_rx_flow_control(struct nss_gmac_hal_dev *nghd, + bool enabled) +{ + BUG_ON(nghd == NULL); + + if (enabled) + syn_set_rx_flow_ctrl(nghd); + else + syn_clear_rx_flow_ctrl(nghd); +} + +/* + * syn_tx_flow_control() + */ +static void syn_tx_flow_control(struct nss_gmac_hal_dev *nghd, + bool enabled) +{ + BUG_ON(nghd == NULL); + + if (enabled) + syn_set_tx_flow_ctrl(nghd); + else + syn_clear_tx_flow_ctrl(nghd); +} + +/* + * syn_get_max_frame_size() + */ +static int32_t syn_get_max_frame_size(struct nss_gmac_hal_dev *nghd) +{ + int ret; + uint32_t mtu; + + BUG_ON(nghd == NULL); + + ret = fal_port_max_frame_size_get(0, nghd->mac_id, &mtu); + + if (!ret) + return mtu; + + return ret; +} + +/* + * syn_set_max_frame_size() + */ +static int32_t syn_set_max_frame_size(struct nss_gmac_hal_dev *nghd, + uint32_t val) +{ + BUG_ON(nghd == NULL); + + return fal_port_max_frame_size_set(0, nghd->mac_id, val); +} + +/* + * syn_set_mac_speed() + */ +static int32_t syn_set_mac_speed(struct nss_gmac_hal_dev *nghd, + uint32_t mac_speed) +{ + struct net_device *netdev; + BUG_ON(nghd == NULL); + + netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); + return 0; +} + +/* + * syn_get_mac_speed() + */ +static uint32_t syn_get_mac_speed(struct nss_gmac_hal_dev *nghd) +{ + struct net_device *netdev; + BUG_ON(nghd == NULL); + + netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); + return 0; +} + +/* + * syn_set_duplex_mode() + */ +static void syn_set_duplex_mode(struct nss_gmac_hal_dev *nghd, + uint8_t duplex_mode) +{ + struct net_device *netdev; + BUG_ON(nghd == NULL); + + netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); +} + +/* + * syn_get_duplex_mode() + */ +static uint8_t syn_get_duplex_mode(struct nss_gmac_hal_dev *nghd) +{ + struct net_device *netdev; + BUG_ON(nghd == NULL); + + netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); + return 0; +} + +/* + * syn_get_netdev_stats() + */ +static int syn_get_netdev_stats(struct nss_gmac_hal_dev *nghd, + struct rtnl_link_stats64 *stats) +{ + struct syn_hal_dev *shd; + struct nss_dp_hal_gmac_stats *ndo_stats; + + BUG_ON(nghd == NULL); + + shd = (struct syn_hal_dev *)nghd; + ndo_stats = &(shd->stats.stats); + + /* + * Read stats from the registered dataplane. + */ + if (syn_get_stats(nghd)) + return -1; + + stats->rx_packets = ndo_stats->rx_packets; + stats->rx_bytes = ndo_stats->rx_bytes; + stats->rx_errors = ndo_stats->rx_errors; + stats->rx_dropped = ndo_stats->rx_errors; + stats->rx_length_errors = ndo_stats->rx_length_errors; + stats->rx_over_errors = ndo_stats->mmc_rx_overflow_errors; + stats->rx_crc_errors = ndo_stats->mmc_rx_crc_errors; + stats->rx_frame_errors = ndo_stats->rx_dribble_bit_errors; + stats->rx_fifo_errors = ndo_stats->fifo_overflows; + stats->rx_missed_errors = ndo_stats->rx_missed; + stats->collisions = ndo_stats->tx_collisions + ndo_stats->rx_late_collision_errors; + stats->tx_packets = ndo_stats->tx_packets; + stats->tx_bytes = ndo_stats->tx_bytes; + stats->tx_errors = ndo_stats->tx_errors; + stats->tx_dropped = ndo_stats->tx_dropped; + stats->tx_carrier_errors = ndo_stats->tx_loss_of_carrier_errors + ndo_stats->tx_no_carrier_errors; + stats->tx_fifo_errors = ndo_stats->tx_underflow_errors; + stats->tx_window_errors = ndo_stats->tx_late_collision_errors; + + return 0; +} + +/* + * syn_get_eth_stats() + */ +static int32_t syn_get_eth_stats(struct nss_gmac_hal_dev *nghd, + uint64_t *data) +{ + struct syn_hal_dev *shd; + struct nss_dp_gmac_stats *stats; + uint8_t *p = NULL; + int i; + + BUG_ON(nghd == NULL); + + shd = (struct syn_hal_dev *)nghd; + stats = &(shd->stats); + + /* + * Read stats from the registered dataplane. + */ + if (syn_get_stats(nghd)) + return -1; + + for (i = 0; i < SYN_STATS_LEN; i++) { + p = ((uint8_t *)(stats) + + syn_gstrings_stats[i].stat_offset); + data[i] = *(uint32_t *)p; + } + + return 0; +} + +/* + * syn_get_strset_count() + */ +static int32_t syn_get_strset_count(struct nss_gmac_hal_dev *nghd, + int32_t sset) +{ + struct net_device *netdev; + BUG_ON(nghd == NULL); + + netdev = nghd->netdev; + + switch (sset) { + case ETH_SS_STATS: + return SYN_STATS_LEN; + } + + netdev_dbg(netdev, "%s: Invalid string set\n", __func__); + return -EPERM; +} + +/* + * syn_get_strings() + */ +static int32_t syn_get_strings(struct nss_gmac_hal_dev *nghd, + int32_t stringset, uint8_t *data) +{ + struct net_device *netdev; + int i; + + BUG_ON(nghd == NULL); + + netdev = nghd->netdev; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < SYN_STATS_LEN; i++) { + memcpy(data, syn_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + break; + + default: + netdev_dbg(netdev, "%s: Invalid string set\n", __func__); + return -EPERM; + } + + return 0; +} + +/* + * syn_send_pause_frame() + */ +static void syn_send_pause_frame(struct nss_gmac_hal_dev *nghd) +{ + BUG_ON(nghd == NULL); + + syn_send_tx_pause_frame(nghd); +} + +/* + * syn_set_mac_address() + */ +static void syn_set_mac_address(struct nss_gmac_hal_dev *nghd, + uint8_t *macaddr) +{ + uint32_t data; + + BUG_ON(nghd == NULL); + + if (!macaddr) { + netdev_warn(nghd->netdev, "macaddr is not valid.\n"); + return; + } + + data = (macaddr[5] << 8) | macaddr[4] | SYN_MAC_ADDR_HIGH_AE; + hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH, data); + data = (macaddr[3] << 24) | (macaddr[2] << 16) | (macaddr[1] << 8) + | macaddr[0]; + hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW, data); +} + +/* + * syn_get_mac_address() + */ +static void syn_get_mac_address(struct nss_gmac_hal_dev *nghd, + uint8_t *macaddr) +{ + uint32_t data; + + BUG_ON(nghd == NULL); + + if (!macaddr) { + netdev_warn(nghd->netdev, "macaddr is not valid.\n"); + return; + } + + data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH); + macaddr[5] = (data >> 8) & 0xff; + macaddr[4] = (data) & 0xff; + + data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW); + macaddr[3] = (data >> 24) & 0xff; + macaddr[2] = (data >> 16) & 0xff; + macaddr[1] = (data >> 8) & 0xff; + macaddr[0] = (data) & 0xff; +} + +/* + * syn_dma_init() + * Initialize settings for GMAC DMA and AXI bus. + */ +static void syn_dma_init(struct nss_gmac_hal_dev *nghd) +{ + struct net_device *ndev = nghd->netdev; + struct nss_dp_dev *dp_priv = netdev_priv(ndev); + + /* + * Enable SoC specific GMAC clocks. + */ + nss_dp_hal_clk_enable(dp_priv); + + /* + * Configure DMA registers. + */ + syn_dma_bus_mode_init(nghd); + syn_dma_axi_bus_mode_init(nghd); + syn_dma_operation_mode_init(nghd); +} + +/* + * syn_init() + */ +static void *syn_init(struct gmac_hal_platform_data *gmacpdata) +{ + struct syn_hal_dev *shd = NULL; + struct net_device *ndev = NULL; + struct nss_dp_dev *dp_priv = NULL; + struct resource *res; + + ndev = gmacpdata->netdev; + dp_priv = netdev_priv(ndev); + + res = platform_get_resource(dp_priv->pdev, IORESOURCE_MEM, 0); + if (!res) { + netdev_dbg(ndev, "Resource get failed.\n"); + return NULL; + } + + shd = (struct syn_hal_dev *)devm_kzalloc(&dp_priv->pdev->dev, + sizeof(struct syn_hal_dev), + GFP_KERNEL); + if (!shd) { + netdev_dbg(ndev, "kzalloc failed. Returning...\n"); + return NULL; + } + + shd->nghd.mac_reg_len = resource_size(res); + shd->nghd.memres = devm_request_mem_region(&dp_priv->pdev->dev, + res->start, + resource_size(res), + ndev->name); + if (!shd->nghd.memres) { + netdev_dbg(ndev, "Request mem region failed. Returning...\n"); + devm_kfree(&dp_priv->pdev->dev, shd); + return NULL; + } + + /* + * Save netdev context in syn HAL context + */ + shd->nghd.netdev = gmacpdata->netdev; + shd->nghd.mac_id = gmacpdata->macid; + shd->nghd.duplex_mode = DUPLEX_FULL; + + set_bit(__NSS_DP_RXCSUM, &dp_priv->flags); + + /* + * Populate the mac base addresses + */ + shd->nghd.mac_base = + devm_ioremap_nocache(&dp_priv->pdev->dev, res->start, + resource_size(res)); + if (!shd->nghd.mac_base) { + netdev_dbg(ndev, "ioremap fail.\n"); + devm_kfree(&dp_priv->pdev->dev, shd); + return NULL; + } + + spin_lock_init(&shd->nghd.slock); + + netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n", + gmacpdata->reg_len, + ndev->base_addr, + shd->nghd.mac_base); + + syn_disable_interrupt_all(&shd->nghd); + syn_dma_init(&shd->nghd); + syn_ipc_offload_init(&shd->nghd); + syn_promisc_enable(&shd->nghd); + syn_broadcast_enable(&shd->nghd); + syn_multicast_enable(&shd->nghd); + syn_rx_enable(&shd->nghd); + syn_tx_enable(&shd->nghd); + + /* + * Reset MIB Stats + */ + if (fal_mib_port_flush_counters(0, shd->nghd.mac_id)) { + netdev_dbg(ndev, "MIB stats Reset fail.\n"); + } + + return (struct nss_gmac_hal_dev *)shd; +} + +/* + * syn_exit() + */ +static void syn_exit(struct nss_gmac_hal_dev *nghd) +{ + struct nss_dp_dev *dp_priv = NULL; + + dp_priv = netdev_priv(nghd->netdev); + devm_iounmap(&dp_priv->pdev->dev, + (void *)nghd->mac_base); + devm_release_mem_region(&dp_priv->pdev->dev, + (nghd->memres)->start, + nghd->mac_reg_len); + + nghd->memres = NULL; + nghd->mac_base = NULL; +} + +struct nss_gmac_hal_ops syn_hal_ops = { + .init = &syn_init, + .start = NULL, + .stop = NULL, + .exit = &syn_exit, + .setmacaddr = &syn_set_mac_address, + .getmacaddr = &syn_get_mac_address, + .rxflowcontrol = &syn_rx_flow_control, + .txflowcontrol = &syn_tx_flow_control, + .setspeed = &syn_set_mac_speed, + .getspeed = &syn_get_mac_speed, + .setduplex = &syn_set_duplex_mode, + .getduplex = &syn_get_duplex_mode, + .setmaxframe = &syn_set_max_frame_size, + .getmaxframe = &syn_get_max_frame_size, + .getndostats = &syn_get_netdev_stats, + .getssetcount = &syn_get_strset_count, + .getstrings = &syn_get_strings, + .getethtoolstats = &syn_get_eth_stats, + .sendpause = &syn_send_pause_frame, +}; diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_reg.h b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_reg.h new file mode 100644 index 000000000..aba916e4e --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/gmac/syn_reg.h @@ -0,0 +1,531 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __SYN_REG_H__ +#define __SYN_REG_H__ + +/* + * MAC register offset + */ +#define SYN_MAC_CONFIGURATION 0x0000 +#define SYN_MAC_FRAME_FILTER 0x0004 +#define SYN_MAC_FLOW_CONTROL 0x0018 +#define SYN_VLAN_TAG 0x001C +#define SYN_VERSION 0x0020 +#define SYN_DEBUG 0x0024 +#define SYN_REMOTE_WAKE_UP_FRAME_FILTER 0x0028 +#define SYN_PMT_CONTROL_STATUS 0x002C +#define SYN_LPI_CONTROL_STATUS 0x0030 +#define SYN_LPI_TIMERS_CONTROL 0x0034 +#define SYN_INTERRUPT_STATUS 0x0038 +#define SYN_INTERRUPT_MASK 0x003C + +/* + * MAC address register offset + */ +#define SYN_MAC_ADDR0_HIGH 0x0040 +#define SYN_MAC_ADDR0_LOW 0x0044 +#define SYN_MAC_ADDR1_HIGH 0x0048 +#define SYN_MAC_ADDR1_LOW 0x004C +#define SYN_MAC_ADDR2_HIGH 0x0050 +#define SYN_MAC_ADDR2_LOW 0x0054 +#define SYN_MAC_ADDR3_HIGH 0x0058 +#define SYN_MAC_ADDR3_LOW 0x005C +#define SYN_MAC_ADDR4_HIGH 0x0060 +#define SYN_MAC_ADDR4_LOW 0x0064 + +/* + * Watchdog timeout register + */ +#define SYN_WDOG_TIMEOUT 0x00DC + +/* + * Mac Management Counters (MMC) register offset + */ +#define SYN_MMC_CONTROL 0x0100 +#define SYN_MMC_RX_INTERRUPT 0x0104 +#define SYN_MMC_TX_INTERRUPT 0x0108 +#define SYN_MMC_RX_INTERRUPT_MASK 0x010C +#define SYN_MMC_TX_INTERRUPT_MASK 0x0110 +#define SYN_MMC_IPC_RX_INTR_MASK 0x0200 + +/* + * DMA Register offset + */ +#define SYN_DMA_BUS_MODE 0x1000 +#define SYN_DMA_TX_POLL_DEMAND 0x1004 +#define SYN_DMA_RX_POLL_DEMAND 0x1008 +#define SYN_DMA_RX_DESCRIPTOR_LIST_ADDRESS 0x100C +#define SYN_DMA_TX_DESCRIPTOR_LIST_ADDRESS 0x1010 +#define SYN_DMA_STATUS 0x1014 +#define SYN_DMA_OPERATION_MODE 0x1018 +#define SYN_DMA_INT_ENABLE 0x101C +#define SYN_DMA_MISSED_FRAME_AND_BUFF_OVERFLOW_COUNTER 0x1020 +#define SYN_DMA_RX_INTERRUPT_WATCHDOG_TIMER 0x1024 +#define SYN_DMA_AXI_BUS_MODE 0x1028 +#define SYN_DMA_AHB_OR_AXI_STATUS 0x102C +#define SYN_DMA_CURRENT_HOST_TX_DESCRIPTOR 0x1048 +#define SYN_DMA_CURRENT_HOST_RX_DESCRIPTOR 0x104C +#define SYN_DMA_CURRENT_HOST_TX_BUFFER_ADDRESS 0x1050 +#define SYN_DMA_CURRENT_HOST_RX_BUFFER_ADDRESS 0x1054 + +/* + * Optional HW feature register + */ +#define SYN_HW_FEATURE 0x1058 + +/* + * Register Bit Definitions + */ + +/* + * SYN_MAC_CONFIGURATION = 0x0000, MAC config Register Layout + */ +enum syn_mac_config_reg { + SYN_MAC_TWOKPE = 0x08000000, /* Support for 2K packets */ + SYN_MAC_TWOKPE_ENABLE = 0x08000000, + SYN_MAC_TWOKPE_DISABLE = 0x00000000, + SYN_MAC_CST = 0x02000000, /* (CST) CRC Stripping for Type Frames */ + SYN_MAC_CST_ENABLE = 0x02000000, + SYN_MAC_CST_DISABLE = 0x02000000, + SYN_MAC_TC = 0x01000000, /* (TC) Transmit configuration */ + SYN_MAC_WATCHDOG = 0x00800000, + SYN_MAC_WATCHDOG_ENABLE = 0x00000000, /* Enable watchdog timer */ + SYN_MAC_WATCHDOG_DISABLE = 0x00800000, /* (WD)Disable watchdog timer on Rx */ + SYN_MAC_JABBER = 0x00400000, + SYN_MAC_JABBER_ENABLE = 0x00000000, /* Enable jabber timer */ + SYN_MAC_JABBER_DISABLE = 0x00400000, /* (JD)Disable jabber timer on Tx */ + SYN_MAC_FRAME_BURST = 0x00200000, + SYN_MAC_FRAME_BURST_ENABLE = 0x00200000, /* (BE)Enable frame bursting + during Tx */ + SYN_MAC_FRAME_BURST_DISABLE = 0x00000000, /* Disable frame bursting */ + SYN_MAC_JUMBO_FRAME = 0x00100000, + SYN_MAC_JUMBO_FRAME_ENABLE = 0x00100000, /* (JE)Enable jumbo frame for Rx */ + SYN_MAC_JUMBO_FRAME_DISABLE = 0x00000000, /* Disable jumbo frame */ + SYN_MAC_INTER_FRAME_GAP7 = 0x000E0000, /* (IFG) Config7 - 40bit times */ + SYN_MAC_INTER_FRAME_GAP6 = 0x000C0000, /* (IFG) Config6 - 48bit times */ + SYN_MAC_INTER_FRAME_GAP5 = 0x000A0000, /* (IFG) Config5 - 56bit times */ + SYN_MAC_INTER_FRAME_GAP4 = 0x00080000, /* (IFG) Config4 - 64bit times */ + SYN_MAC_INTER_FRAME_GAP3 = 0x00060000, /* (IFG) Config3 - 72bit times */ + SYN_MAC_INTER_FRAME_GAP2 = 0x00040000, /* (IFG) Config2 - 80bit times */ + SYN_MAC_INTER_FRAME_GAP1 = 0x00020000, /* (IFG) Config1 - 88bit times */ + SYN_MAC_INTER_FRAME_GAP0 = 0x00000000, /* (IFG) Config0 - 96bit times */ + SYN_MAC_DISABLE_CRS = 0x00010000, /* (DCRS) Disable Carrier Sense During Transmission */ + SYN_MAC_MII_GMII = 0x00008000, + SYN_MAC_SELECT_MII = 0x00008000, /* (PS)Port Select-MII mode */ + SYN_MAC_SELECT_GMII = 0x00000000, /* GMII mode */ + SYN_MAC_FE_SPEED100 = 0x00004000, /* (FES)Fast Ethernet speed 100Mbps */ + SYN_MAC_FE_SPEED = 0x00004000, /* (FES)Fast Ethernet speed 100Mbps */ + SYN_MAC_FE_SPEED10 = 0x00000000, /* (FES)Fast Ethernet speed 10Mbps */ + SYN_MAC_RX_OWN = 0x00002000, + SYN_MAC_DISABLE_RX_OWN = 0x00002000, /* (DO)Disable receive own packets */ + SYN_MAC_ENABLE_RX_OWN = 0x00000000, /* Enable receive own packets */ + SYN_MAC_LOOPBACK = 0x00001000, + SYN_MAC_LOOPBACK_ON = 0x00001000, /* (LM)Loopback mode for GMII/MII */ + SYN_MAC_LOOPBACK_OFF = 0x00000000, /* Normal mode */ + SYN_MAC_DUPLEX = 0x00000800, + SYN_MAC_FULL_DUPLEX = 0x00000800, /* (DM)Full duplex mode */ + SYN_MAC_HALF_DUPLEX = 0x00000000, /* Half duplex mode */ + SYN_MAC_RX_IPC_OFFLOAD = 0x00000400, /* IPC checksum offload */ + SYN_MAC_RX_IPC_OFFLOAD_ENABLE = 0x00000400, + SYN_MAC_RX_IPC_OFFLOAD_DISABLE = 0x00000000, + SYN_MAC_RETRY = 0x00000200, + SYN_MAC_RETRY_DISABLE = 0x00000200, /* (DR)Disable Retry */ + SYN_MAC_RETRY_ENABLE = 0x00000000, /* Enable retransmission as per BL */ + SYN_MAC_LINK_UP = 0x00000100, /* (LUD)Link UP */ + SYN_MAC_LINK_DOWN = 0x00000100, /* Link Down */ + SYN_MAC_PAD_CRC_STRIP = 0x00000080, + SYN_MAC_PAD_CRC_STRIP_ENABLE = 0x00000080, /* (ACS) Automatic Pad/Crc strip enable */ + SYN_MAC_PAD_CRC_STRIP_DISABLE = 0x00000000, /* Automatic Pad/Crc stripping disable */ + SYN_MAC_BACKOFF_LIMIT = 0x00000060, + SYN_MAC_BACKOFF_LIMIT3 = 0x00000060, /* (BL)Back-off limit in HD mode */ + SYN_MAC_BACKOFF_LIMIT2 = 0x00000040, + SYN_MAC_BACKOFF_LIMIT1 = 0x00000020, + SYN_MAC_BACKOFF_LIMIT0 = 0x00000000, + SYN_MAC_DEFERRAL_CHECK = 0x00000010, + SYN_MAC_DEFERRAL_CHECK_ENABLE = 0x00000010, /* (DC)Deferral check enable in HD mode */ + SYN_MAC_DEFERRAL_CHECK_DISABLE = 0x00000000, /* Deferral check disable */ + SYN_MAC_TX = 0x00000008, + SYN_MAC_TX_ENABLE = 0x00000008, /* (TE)Transmitter enable */ + SYN_MAC_TX_DISABLE = 0x00000000, /* Transmitter disable */ + SYN_MAC_RX = 0x00000004, + SYN_MAC_RX_ENABLE = 0x00000004, /* (RE)Receiver enable */ + SYN_MAC_RX_DISABLE = 0x00000000, /* Receiver disable */ + SYN_MAC_PRELEN_RESERVED = 0x00000003, /* Preamble Length for Transmit Frames */ + SYN_MAC_PRELEN_3B = 0x00000002, + SYN_MAC_PRELEN_5B = 0x00000001, + SYN_MAC_PRELEN_7B = 0x00000000, +}; + +/* + * SYN_MAC_FRAME_FILTER = 0x0004, Mac frame filtering controls Register + */ +enum syn_mac_frame_filter_reg { + SYN_MAC_FILTER = 0x80000000, + SYN_MAC_FILTER_OFF = 0x80000000, /* (RA)Receive all incoming packets */ + SYN_MAC_FILTER_ON = 0x00000000, /* Receive filtered pkts only */ + SYN_MAC_HASH_PERFECT_FILTER = 0x00000400, /* Hash or Perfect Filter enable */ + SYN_MAC_SRC_ADDR_FILTER = 0x00000200, + SYN_MAC_SRC_ADDR_FILTER_ENABLE = 0x00000200, /* (SAF)Source Address Filter enable */ + SYN_MAC_SRC_ADDR_FILTER_DISABLE = 0x00000000, + SYN_MAC_SRC_INVA_ADDR_FILTER = 0x00000100, + SYN_MAC_SRC_INV_ADDR_FILTER_EN = 0x00000100, /* (SAIF)Inv Src Addr Filter enable */ + SYN_MAC_SRC_INV_ADDR_FILTER_DIS = 0x00000000, + SYN_MAC_PASS_CONTROL = 0x000000C0, + SYN_MAC_PASS_CONTROL3 = 0x000000C0, /* (PCF)Forwards ctrl frames that pass AF */ + SYN_MAC_PASS_CONTROL2 = 0x00000080, /* Forwards all control frames + even if they fail the AF */ + SYN_MAC_PASS_CONTROL1 = 0x00000040, /* Forwards all control frames except + PAUSE control frames to application + even if they fail the AF */ + SYN_MAC_PASS_CONTROL0 = 0x00000000, /* Don't pass control frames */ + SYN_MAC_BROADCAST = 0x00000020, + SYN_MAC_BROADCAST_DISABLE = 0x00000020, /* (DBF)Disable Rx of broadcast frames */ + SYN_MAC_BROADCAST_ENABLE = 0x00000000, /* Enable broadcast frames */ + SYN_MAC_MULTICAST_FILTER = 0x00000010, + SYN_MAC_MULTICAST_FILTER_OFF = 0x00000010, /* (PM) Pass all multicast packets */ + SYN_MAC_MULTICAST_FILTER_ON = 0x00000000, /* Pass filtered multicast packets */ + SYN_MAC_DEST_ADDR_FILTER = 0x00000008, + SYN_MAC_DEST_ADDR_FILTER_INV = 0x00000008, /* (DAIF)Inverse filtering for DA */ + SYN_MAC_DEST_ADDR_FILTER_NOR = 0x00000000, /* Normal filtering for DA */ + SYN_MAC_MCAST_HASH_FILTER = 0x00000004, + SYN_MAC_MCAST_HASH_FILTER_ON = 0x00000004, /* (HMC)perfom multicast hash filtering */ + SYN_MAC_MCAST_HASH_FILTER_OFF = 0x00000000, /* perfect filtering only */ + SYN_MAC_UCAST_HASH_FILTER = 0x00000002, + SYN_MAC_UCAST_HASH_FILTER_ON = 0x00000002, /* (HUC)Unicast Hash filtering only */ + SYN_MAC_UCAST_HASH_FILTER_OFF = 0x00000000, /* perfect filtering only */ + SYN_MAC_PROMISCUOUS_MODE = 0x00000001, + SYN_MAC_PROMISCUOUS_MODE_ON = 0x00000001, /* Receive all frames */ + SYN_MAC_PROMISCUOUS_MODE_OFF = 0x00000000, /* Receive filtered packets only */ +}; + +/* + * SYN_MAC_FLOW_CONTROL = 0x0018, Flow control Register Layout + */ +enum syn_mac_flow_control_reg { + SYN_MAC_FC_PAUSE_TIME_MASK = 0xFFFF0000, /* (PT) PAUSE TIME field + in the control frame */ + SYN_MAC_FC_PAUSE_TIME_SHIFT = 16, + SYN_MAC_FC_PAUSE_LOW_THRESH = 0x00000030, + SYN_MAC_FC_PAUSE_LOW_THRESH3 = 0x00000030, /* (PLT)thresh for pause + tmr 256 slot time */ + SYN_MAC_FC_PAUSE_LOW_THRESH2 = 0x00000020, /* 144 slot time */ + SYN_MAC_FC_PAUSE_LOW_THRESH1 = 0x00000010, /* 28 slot time */ + SYN_MAC_FC_PAUSE_LOW_THRESH0 = 0x00000000, /* 4 slot time */ + SYN_MAC_FC_UNICAST_PAUSE_FRAME = 0x00000008, + SYN_MAC_FC_UNICAST_PAUSE_FRAME_ON = 0x00000008, /* (UP)Detect pause frame + with unicast addr. */ + SYN_MAC_FC_UNICAST_PAUSE_FRAME_OFF = 0x00000000,/* Detect only pause frame + with multicast addr. */ + SYN_MAC_FC_RX_FLOW_CONTROL = 0x00000004, + SYN_MAC_FC_RX_FLOW_CONTROL_ENABLE = 0x00000004, /* (RFE)Enable Rx flow control */ + SYN_MAC_FC_RX_FLOW_CONTROL_DISABLE = 0x00000000,/* Disable Rx flow control */ + SYN_MAC_FC_TX_FLOW_CONTROL = 0x00000002, + SYN_MAC_FC_TX_FLOW_CONTROL_ENABLE = 0x00000002, /* (TFE)Enable Tx flow control */ + SYN_MAC_FC_TX_FLOW_CONTROL_DISABLE = 0x00000000,/* Disable flow control */ + SYN_MAC_FC_FLOW_CONTROL_BACK_PRESSURE = 0x00000001, + SYN_MAC_FC_SEND_PAUSE_FRAME = 0x00000001, /* (FCB/PBA)send pause frm/Apply + back pressure */ +}; + +/* + * SYN_MAC_ADDR_HIGH Register + */ +enum syn_mac_addr_high { + SYN_MAC_ADDR_HIGH_AE = 0x80000000, +}; + +/* + * SYN_DMA_BUS_MODE = 0x0000, CSR0 - Bus Mode + */ +enum syn_dma_bus_mode_reg { + SYN_DMA_FIXED_BURST_ENABLE = 0x00010000, /* (FB)Fixed Burst SINGLE, INCR4, + INCR8 or INCR16 */ + SYN_DMA_FIXED_BURST_DISABLE = 0x00000000, /* SINGLE, INCR */ + SYN_DMA_TX_PRIORITY_RATIO11 = 0x00000000, /* (PR)TX:RX DMA priority ratio 1:1 */ + SYN_DMA_TX_PRIORITY_RATIO21 = 0x00004000, /* (PR)TX:RX DMA priority ratio 2:1 */ + SYN_DMA_TX_PRIORITY_RATIO31 = 0x00008000, /* (PR)TX:RX DMA priority ratio 3:1 */ + SYN_DMA_TX_PRIORITY_RATIO41 = 0x0000C000, /* (PR)TX:RX DMA priority ratio 4:1 */ + SYN_DMA_ADDRESS_ALIGNED_BEATS = 0x02000000, /* Address Aligned beats */ + SYN_DMA_BURST_LENGTHX8 = 0x01000000, /* When set mutiplies the PBL by 8 */ + SYN_DMA_BURST_LENGTH256 = 0x01002000, /* (dma_burst_lengthx8 | + dma_burst_length32) = 256 */ + SYN_DMA_BURST_LENGTH128 = 0x01001000, /* (dma_burst_lengthx8 | + dma_burst_length16) = 128 */ + SYN_DMA_BURST_LENGTH64 = 0x01000800, /* (dma_burst_lengthx8 | + dma_burst_length8) = 64 */ + /* (PBL) programmable burst length */ + SYN_DMA_BURST_LENGTH32 = 0x00002000, /* Dma burst length = 32 */ + SYN_DMA_BURST_LENGTH16 = 0x00001000, /* Dma burst length = 16 */ + SYN_DMA_BURST_LENGTH8 = 0x00000800, /* Dma burst length = 8 */ + SYN_DMA_BURST_LENGTH4 = 0x00000400, /* Dma burst length = 4 */ + SYN_DMA_BURST_LENGTH2 = 0x00000200, /* Dma burst length = 2 */ + SYN_DMA_BURST_LENGTH1 = 0x00000100, /* Dma burst length = 1 */ + SYN_DMA_BURST_LENGTH0 = 0x00000000, /* Dma burst length = 0 */ + + SYN_DMA_DESCRIPTOR8_WORDS = 0x00000080, /* Enh Descriptor works 1=> + 8 word descriptor */ + SYN_DMA_DESCRIPTOR4_WORDS = 0x00000000, /* Enh Descriptor works 0=> + 4 word descriptor */ + SYN_DMA_DESCRIPTOR_SKIP16 = 0x00000040, /* (DSL)Descriptor skip length (no.of dwords) */ + SYN_DMA_DESCRIPTOR_SKIP8 = 0x00000020, /* between two unchained descriptors */ + SYN_DMA_DESCRIPTOR_SKIP4 = 0x00000010, + SYN_DMA_DESCRIPTOR_SKIP2 = 0x00000008, + SYN_DMA_DESCRIPTOR_SKIP1 = 0x00000004, + SYN_DMA_DESCRIPTOR_SKIP0 = 0x00000000, + SYN_DMA_ARBIT_RR = 0x00000000, /* (DA) DMA RR arbitration */ + SYN_DMA_ARBIT_PR = 0x00000002, /* Rx has priority over Tx */ + SYN_DMA_RESET_ON = 0x00000001, /* (SWR)Software Reset DMA engine */ + SYN_DMA_RESET_OFF = 0x00000000, +}; + +/* + * SYN_DMA_STATUS = 0x0014, CSR5 - Dma status Register + */ +enum syn_dma_status_reg { + SYN_DMA_GMAC_PMT_INTR = 0x10000000, /* (GPI)Gmac subsystem interrupt */ + SYN_DMA_GMAC_MMC_INTR = 0x08000000, /* (GMI)Gmac MMC subsystem interrupt */ + SYN_DMA_GMAC_LINE_INTF_INTR = 0x04000000, /* Line interface interrupt */ + SYN_DMA_ERROR_BIT2 = 0x02000000, /* (EB)Error bits 0-data buffer, 1-desc access */ + SYN_DMA_ERROR_BIT1 = 0x01000000, /* (EB)Error bits 0-write trnsf, 1-read transfer */ + SYN_DMA_ERROR_BIT0 = 0x00800000, /* (EB)Error bits 0-Rx DMA, 1-Tx DMA */ + SYN_DMA_TX_STATE = 0x00700000, /* (TS)Transmit process state */ + SYN_DMA_TX_STOPPED = 0x00000000, /* Stopped - Reset or Stop Tx Command issued */ + SYN_DMA_TX_FETCHING = 0x00100000, /* Running - fetching the Tx descriptor */ + SYN_DMA_TX_WAITING = 0x00200000, /* Running - waiting for status */ + SYN_DMA_TX_READING = 0x00300000, /* Running - reading the data from host memory */ + SYN_DMA_TX_SUSPENDED = 0x00600000, /* Suspended - Tx Descriptor unavailabe */ + SYN_DMA_TX_CLOSING = 0x00700000, /* Running - closing Rx descriptor */ + SYN_DMA_RX_STATE = 0x000E0000, /* (RS)Receive process state */ + SYN_DMA_RX_STOPPED = 0x00000000, /* Stopped - Reset or Stop Rx Command issued */ + SYN_DMA_RX_FETCHING = 0x00020000, /* Running - fetching the Rx descriptor */ + SYN_DMA_RX_WAITING = 0x00060000, /* Running - waiting for packet */ + SYN_DMA_RX_SUSPENDED = 0x00080000, /* Suspended - Rx Descriptor unavailable */ + SYN_DMA_RX_CLOSING = 0x000A0000, /* Running - closing descriptor */ + SYN_DMA_RX_QUEUING = 0x000E0000, /* Running - queuing the receive frame into host memory */ + SYN_DMA_INT_NORMAL = 0x00010000, /* (NIS)Normal interrupt summary */ + SYN_DMA_INT_ABNORMAL = 0x00008000, /* (AIS)Abnormal interrupt summary */ + SYN_DMA_INT_EARLY_RX = 0x00004000, /* Early receive interrupt (Normal) */ + SYN_DMA_INT_BUS_ERROR = 0x00002000, /* Fatal bus error (Abnormal) */ + SYN_DMA_INT_EARLY_TX = 0x00000400, /* Early transmit interrupt (Abnormal) */ + SYN_DMA_INT_RX_WDOG_TO = 0x00000200, /* Receive Watchdog Timeout (Abnormal) */ + SYN_DMA_INT_RX_STOPPED = 0x00000100, /* Receive process stopped (Abnormal) */ + SYN_DMA_INT_RX_NO_BUFFER = 0x00000080, /* RX buffer unavailable (Abnormal) */ + SYN_DMA_INT_RX_COMPLETED = 0x00000040, /* Completion of frame RX (Normal) */ + SYN_DMA_INT_TX_UNDERFLOW = 0x00000020, /* Transmit underflow (Abnormal) */ + SYN_DMA_INT_RCV_OVERFLOW = 0x00000010, /* RX Buffer overflow interrupt */ + SYN_DMA_INT_TX_JABBER_TO = 0x00000008, /* TX Jabber Timeout (Abnormal) */ + SYN_DMA_INT_TX_NO_BUFFER = 0x00000004, /* TX buffer unavailable (Normal) */ + SYN_DMA_INT_TX_STOPPED = 0x00000002, /* TX process stopped (Abnormal) */ + SYN_DMA_INT_TX_COMPLETED = 0x00000001, /* Transmit completed (Normal) */ +}; + +/* + * SYN_DMA_OPERATION_MODE = 0x0018, CSR6 - Dma Operation Mode Register + */ +enum syn_dma_operation_mode_reg { + SYN_DMA_DISABLE_DROP_TCP_CS = 0x04000000, /* (DT) Dis. drop. of tcp/ip + CS error frames */ + SYN_DMA_RX_STORE_AND_FORWARD = 0x02000000, /* Rx (SF)Store and forward */ + SYN_DMA_RX_FRAME_FLUSH = 0x01000000, /* Disable Receive Frame Flush*/ + SYN_DMA_TX_STORE_AND_FORWARD = 0x00200000, /* Tx (SF)Store and forward */ + SYN_DMA_FLUSH_TX_FIFO = 0x00100000, /* (FTF)Tx FIFO controller + is reset to default */ + SYN_DMA_TX_THRESH_CTRL = 0x0001C000, /* (TTC)Controls thre Thresh of + MTL tx Fifo */ + SYN_DMA_TX_THRESH_CTRL16 = 0x0001C000, /* (TTC)Controls thre Thresh of + MTL tx Fifo 16 */ + SYN_DMA_TX_THRESH_CTRL24 = 0x00018000, /* (TTC)Controls thre Thresh of + MTL tx Fifo 24 */ + SYN_DMA_TX_THRESH_CTRL32 = 0x00014000, /* (TTC)Controls thre Thresh of + MTL tx Fifo 32 */ + SYN_DMA_TX_THRESH_CTRL40 = 0x00010000, /* (TTC)Controls thre Thresh of + MTL tx Fifo 40 */ + SYN_DMA_TX_THRESH_CTRL256 = 0x0000c000, /* (TTC)Controls thre Thresh of + MTL tx Fifo 256 */ + SYN_DMA_TX_THRESH_CTRL192 = 0x00008000, /* (TTC)Controls thre Thresh of + MTL tx Fifo 192 */ + SYN_DMA_TX_THRESH_CTRL128 = 0x00004000, /* (TTC)Controls thre Thresh of + MTL tx Fifo 128 */ + SYN_DMA_TX_THRESH_CTRL64 = 0x00000000, /* (TTC)Controls thre Thresh of + MTL tx Fifo 64 */ + SYN_DMA_TX_START = 0x00002000, /* (ST)Start/Stop transmission*/ + SYN_DMA_RX_FLOW_CTRL_DEACT = 0x00401800, /* (RFD)Rx flow control + deact. Threshold */ + SYN_DMA_RX_FLOW_CTRL_DEACT1K = 0x00000000, /* (RFD)Rx flow control + deact. Threshold (1kbytes) */ + SYN_DMA_RX_FLOW_CTRL_DEACT2K = 0x00000800, /* (RFD)Rx flow control + deact. Threshold (2kbytes) */ + SYN_DMA_RX_FLOW_CTRL_DEACT3K = 0x00001000, /* (RFD)Rx flow control + deact. Threshold (3kbytes) */ + SYN_DMA_RX_FLOW_CTRL_DEACT4K = 0x00001800, /* (RFD)Rx flow control + deact. Threshold (4kbytes) */ + SYN_DMA_RX_FLOW_CTRL_DEACT5K = 0x00400000, /* (RFD)Rx flow control + deact. Threshold (4kbytes) */ + SYN_DMA_RX_FLOW_CTRL_DEACT6K = 0x00400800, /* (RFD)Rx flow control + deact. Threshold (4kbytes) */ + SYN_DMA_RX_FLOW_CTRL_DEACT7K = 0x00401000, /* (RFD)Rx flow control + deact. Threshold (4kbytes) */ + SYN_DMA_RX_FLOW_CTRL_ACT = 0x00800600, /* (RFA)Rx flow control + Act. Threshold */ + SYN_DMA_RX_FLOW_CTRL_ACT1K = 0x00000000, /* (RFA)Rx flow control + Act. Threshold (1kbytes) */ + SYN_DMA_RX_FLOW_CTRL_ACT2K = 0x00000200, /* (RFA)Rx flow control + Act. Threshold (2kbytes) */ + SYN_DMA_RX_FLOW_CTRL_ACT3K = 0x00000400, /* (RFA)Rx flow control + Act. Threshold (3kbytes) */ + SYN_DMA_RX_FLOW_CTRL_ACT4K = 0x00000600, /* (RFA)Rx flow control + Act. Threshold (4kbytes) */ + SYN_DMA_RX_FLOW_CTRL_ACT5K = 0x00800000, /* (RFA)Rx flow control + Act. Threshold (5kbytes) */ + SYN_DMA_RX_FLOW_CTRL_ACT6K = 0x00800200, /* (RFA)Rx flow control + Act. Threshold (6kbytes) */ + SYN_DMA_RX_FLOW_CTRL_ACT7K = 0x00800400, /* (RFA)Rx flow control + Act. Threshold (7kbytes) */ + SYN_DMA_RX_THRESH_CTRL = 0x00000018, /* (RTC)Controls thre + Thresh of MTL rx Fifo */ + SYN_DMA_RX_THRESH_CTRL64 = 0x00000000, /* (RTC)Controls thre + Thresh of MTL tx Fifo 64 */ + SYN_DMA_RX_THRESH_CTRL32 = 0x00000008, /* (RTC)Controls thre + Thresh of MTL tx Fifo 32 */ + SYN_DMA_RX_THRESH_CTRL96 = 0x00000010, /* (RTC)Controls thre + Thresh of MTL tx Fifo 96 */ + SYN_DMA_RX_THRESH_CTRL128 = 0x00000018, /* (RTC)Controls thre + Thresh of MTL tx Fifo 128 */ + SYN_DMA_EN_HW_FLOW_CTRL = 0x00000100, /* (EFC)Enable HW flow control*/ + SYN_DMA_DIS_HW_FLOW_CTRL = 0x00000000, /* Disable HW flow control */ + SYN_DMA_FWD_ERROR_FRAMES = 0x00000080, /* (FEF)Forward error frames */ + SYN_DMA_FWD_UNDER_SZ_FRAMES = 0x00000040, /* (FUF)Forward undersize + frames */ + SYN_DMA_TX_SECOND_FRAME = 0x00000004, /* (OSF)Operate on 2nd frame */ + SYN_DMA_RX_START = 0x00000002, /* (SR)Start/Stop reception */ +}; + +/* + * SYN_DMA_INT_ENABLE = 0x101C, CSR7 - Interrupt enable Register Layout + */ +enum syn_dma_interrupt_reg { + SYN_DMA_IE_NORMAL = SYN_DMA_INT_NORMAL, /* Normal interrupt enable */ + SYN_DMA_IE_ABNORMAL = SYN_DMA_INT_ABNORMAL, /* Abnormal interrupt enable */ + SYN_DMA_IE_EARLY_RX = SYN_DMA_INT_EARLY_RX, /* Early RX interrupt enable */ + SYN_DMA_IE_BUS_ERROR = SYN_DMA_INT_BUS_ERROR, /* Fatal bus error enable */ + SYN_DMA_IE_EARLY_TX = SYN_DMA_INT_EARLY_TX, /* Early TX interrupt enable */ + SYN_DMA_IE_RX_WDOG_TO = SYN_DMA_INT_RX_WDOG_TO, /* RX Watchdog Timeout enable */ + SYN_DMA_IE_RX_STOPPED = SYN_DMA_INT_RX_STOPPED, /* RX process stopped enable */ + SYN_DMA_IE_RX_NO_BUFFER = SYN_DMA_INT_RX_NO_BUFFER, + /* Receive buffer unavailable enable */ + SYN_DMA_IE_RX_COMPLETED = SYN_DMA_INT_RX_COMPLETED, + /* Completion of frame reception enable */ + SYN_DMA_IE_TX_UNDERFLOW = SYN_DMA_INT_TX_UNDERFLOW, + /* TX underflow enable */ + SYN_DMA_IE_RX_OVERFLOW = SYN_DMA_INT_RCV_OVERFLOW, + /* RX Buffer overflow interrupt */ + SYN_DMA_IE_TX_JABBER_TO = SYN_DMA_INT_TX_JABBER_TO, + /* TX Jabber Timeout enable */ + SYN_DMA_IE_TX_NO_BUFFER = SYN_DMA_INT_TX_NO_BUFFER, + /* TX buffer unavailable enable */ + SYN_DMA_IE_TX_STOPPED = SYN_DMA_INT_TX_STOPPED, + /* TX process stopped enable */ + SYN_DMA_IE_TX_COMPLETED = SYN_DMA_INT_TX_COMPLETED, + /* TX completed enable */ +}; + +/* + * SYN_DMA_AXI_BUS_MODE = 0x1028 + */ +enum syn_dma_axi_bus_mode_reg { + SYN_DMA_EN_LPI = 0x80000000, + SYN_DMA_LPI_XIT_FRM = 0x40000000, + SYN_DMA_WR_OSR_NUM_REQS16 = 0x00F00000, + SYN_DMA_WR_OSR_NUM_REQS8 = 0x00700000, + SYN_DMA_WR_OSR_NUM_REQS4 = 0x00300000, + SYN_DMA_WR_OSR_NUM_REQS2 = 0x00100000, + SYN_DMA_WR_OSR_NUM_REQS1 = 0x00000000, + SYN_DMA_RD_OSR_NUM_REQS16 = 0x000F0000, + SYN_DMA_RD_OSR_NUM_REQS8 = 0x00070000, + SYN_DMA_RD_OSR_NUM_REQS4 = 0x00030000, + SYN_DMA_RD_OSR_NUM_REQS2 = 0x00010000, + SYN_DMA_RD_OSR_NUM_REQS1 = 0x00000000, + SYN_DMA_ONEKBBE = 0x00002000, + SYN_DMA_AXI_AAL = 0x00001000, + SYN_DMA_AXI_BLEN256 = 0x00000080, + SYN_DMA_AXI_BLEN128 = 0x00000040, + SYN_DMA_AXI_BLEN64 = 0x00000020, + SYN_DMA_AXI_BLEN32 = 0x00000010, + SYN_DMA_AXI_BLEN16 = 0x00000008, + SYN_DMA_AXI_BLEN8 = 0x00000004, + SYN_DMA_AXI_BLEN4 = 0x00000002, + SYN_DMA_UNDEFINED = 0x00000001, +}; + +/* + * Values to initialize DMA registers + */ +enum syn_dma_init_values { + /* + * Interrupt groups + */ + SYN_DMA_INT_ERROR_MASK = SYN_DMA_INT_BUS_ERROR, /* Error */ + SYN_DMA_INT_RX_ABN_MASK = SYN_DMA_INT_RX_NO_BUFFER, /* RX abnormal intr */ + SYN_DMA_INT_RX_NORM_MASK = SYN_DMA_INT_RX_COMPLETED, /* RXnormal intr */ + SYN_DMA_INT_RX_STOPPED_MASK = SYN_DMA_INT_RX_STOPPED, /* RXstopped */ + SYN_DMA_INT_TX_ABN_MASK = SYN_DMA_INT_TX_UNDERFLOW, /* TX abnormal intr */ + SYN_DMA_INT_TX_NORM_MASK = SYN_DMA_INT_TX_COMPLETED, /* TX normal intr */ + SYN_DMA_INT_TX_STOPPED_MASK = SYN_DMA_INT_TX_STOPPED, /* TX stopped */ + + SYN_DMA_BUS_MODE_INIT = SYN_DMA_FIXED_BURST_ENABLE | SYN_DMA_BURST_LENGTH8 + | SYN_DMA_DESCRIPTOR_SKIP2 | SYN_DMA_RESET_OFF, + + SYN_DMA_BUS_MODE_VAL = SYN_DMA_BURST_LENGTH32 + | SYN_DMA_BURST_LENGTHX8 | SYN_DMA_DESCRIPTOR_SKIP0 + | SYN_DMA_DESCRIPTOR8_WORDS | SYN_DMA_ARBIT_PR | SYN_DMA_ADDRESS_ALIGNED_BEATS, + + SYN_DMA_OMR = SYN_DMA_TX_STORE_AND_FORWARD | SYN_DMA_RX_STORE_AND_FORWARD + | SYN_DMA_RX_THRESH_CTRL128 | SYN_DMA_TX_SECOND_FRAME, + + SYN_DMA_INT_EN = SYN_DMA_IE_NORMAL | SYN_DMA_IE_ABNORMAL | SYN_DMA_INT_ERROR_MASK + | SYN_DMA_INT_RX_ABN_MASK | SYN_DMA_INT_RX_NORM_MASK + | SYN_DMA_INT_RX_STOPPED_MASK | SYN_DMA_INT_TX_ABN_MASK + | SYN_DMA_INT_TX_NORM_MASK | SYN_DMA_INT_TX_STOPPED_MASK, + SYN_DMA_INT_DISABLE = 0, + SYN_DMA_AXI_BUS_MODE_VAL = SYN_DMA_AXI_BLEN16 | SYN_DMA_RD_OSR_NUM_REQS8 + | SYN_DMA_WR_OSR_NUM_REQS8, +}; + +/* + * desc_mode + * GMAC descriptors mode + */ +enum desc_mode { + RINGMODE = 0x00000001, + CHAINMODE = 0x00000002, +}; + +extern void syn_disable_dma_interrupt(struct nss_gmac_hal_dev *nghd); +extern void syn_enable_dma_interrupt(struct nss_gmac_hal_dev *nghd); +extern void syn_enable_dma_rx(struct nss_gmac_hal_dev *nghd); +extern void syn_disable_dma_rx(struct nss_gmac_hal_dev *nghd); +extern void syn_enable_dma_tx(struct nss_gmac_hal_dev *nghd); +extern void syn_disable_dma_tx(struct nss_gmac_hal_dev *nghd); +extern void syn_clear_dma_status(struct nss_gmac_hal_dev *nghd); +extern void syn_resume_dma_tx(struct nss_gmac_hal_dev *nghd); +extern uint32_t syn_get_rx_missed(struct nss_gmac_hal_dev *nghd); +extern uint32_t syn_get_fifo_overflows(struct nss_gmac_hal_dev *nghd); + +extern void syn_init_tx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t tx_desc_dma); +extern void syn_init_rx_desc_base(struct nss_gmac_hal_dev *nghd, uint32_t rx_desc_dma); + +#endif /*__SYN_REG_H__*/ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_dev.h b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_dev.h new file mode 100644 index 000000000..bdccd09eb --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_dev.h @@ -0,0 +1,189 @@ +/* + ************************************************************************** + * Copyright (c) 2016,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __SYN_DEV_H__ +#define __SYN_DEV_H__ + +#include "syn_reg.h" +#include +#include + +/* + * Subclass for base nss_gmac_haldev + */ +struct syn_hal_dev { + struct nss_gmac_hal_dev nghd; /* Base class */ + fal_xgmib_info_t stats; /* Stats structure */ +}; + +/* + * syn_set_rx_flow_ctrl() + */ +static inline void syn_set_rx_flow_ctrl( + struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_RX_FLOW_CTL, + SYN_MAC_RX_FLOW_ENABLE); +} + +/* + * syn_clear_rx_flow_ctrl() + */ +static inline void syn_clear_rx_flow_ctrl( + struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, SYN_MAC_RX_FLOW_CTL, + SYN_MAC_RX_FLOW_ENABLE); +} + +/* + * syn_set_tx_flow_ctrl() + */ +static inline void syn_set_tx_flow_ctrl( + struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL, + SYN_MAC_TX_FLOW_ENABLE); +} + +/* + * syn_send_tx_pause_frame() + */ +static inline void syn_send_tx_pause_frame( + struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL, + SYN_MAC_TX_FLOW_ENABLE); + hal_set_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL, + SYN_MAC_TX_PAUSE_SEND); +} + +/* + * syn_clear_tx_flow_ctrl() + */ +static inline void syn_clear_tx_flow_ctrl( + struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, SYN_MAC_Q0_TX_FLOW_CTL, + SYN_MAC_TX_FLOW_ENABLE); +} + +/* + * syn_clear_mac_ctrl() + */ +static inline void syn_clear_mac_ctrl( + struct nss_gmac_hal_dev *nghd) +{ + hal_write_reg(nghd->mac_base, SYN_MAC_TX_CONFIG, 0); + hal_write_reg(nghd->mac_base, SYN_MAC_RX_CONFIG, 0); +} + +/* + * syn_rx_enable() + */ +static inline void syn_rx_enable(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_RX_CONFIG, SYN_MAC_RX_ENABLE); + hal_set_reg_bits(nghd, SYN_MAC_PACKET_FILTER, SYN_MAC_RX_ENABLE); +} + +/* + * syn_rx_disable() + */ +static inline void syn_rx_disable(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, SYN_MAC_RX_CONFIG, SYN_MAC_RX_ENABLE); +} + +/* + * syn_tx_enable() + */ +static inline void syn_tx_enable(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_TX_CONFIG, SYN_MAC_TX_ENABLE); +} + +/* + * syn_tx_disable() + */ +static inline void syn_tx_disable(struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, SYN_MAC_TX_CONFIG, + SYN_MAC_TX_ENABLE); +} + +/* + * syn_set_mmc_stats() + */ +static inline void syn_set_mmc_stats(struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_MMC_CTL, + SYN_MAC_MMC_RSTONRD); +} + +/* + * syn_rx_jumbo_frame_enable() + */ +static inline void syn_rx_jumbo_frame_enable( + struct nss_gmac_hal_dev *nghd) +{ + hal_set_reg_bits(nghd, SYN_MAC_RX_CONFIG, + SYN_MAC_JUMBO_FRAME_ENABLE); +} + +/* + * syn_rx_jumbo_frame_disable() + */ +static inline void syn_rx_jumbo_frame_disable( + struct nss_gmac_hal_dev *nghd) +{ + hal_clear_reg_bits(nghd, SYN_MAC_RX_CONFIG, + SYN_MAC_JUMBO_FRAME_ENABLE); +} + +/* + * syn_set_full_duplex() + */ +static inline void syn_set_full_duplex( + struct nss_gmac_hal_dev *nghd) +{ + /* TBD */ + return; +} + +/* + * syn_set_half_duplex() + */ +static inline void syn_set_half_duplex( + struct nss_gmac_hal_dev *nghd) +{ + /* TBD */ + return; +} + +static int syn_get_stats(struct nss_gmac_hal_dev *nghd) +{ + struct syn_hal_dev *shd = (struct syn_hal_dev *)nghd; + fal_xgmib_info_t *stats = &(shd->stats); + + if (fal_get_xgmib_info(0, nghd->mac_id, stats)) + return -1; + + return 0; +} +#endif /*__SYN_DEV_H__*/ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_if.c b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_if.c new file mode 100644 index 000000000..1ab621a6f --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_if.c @@ -0,0 +1,505 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ +#include +#include +#include +#include +#include +#include +#include +#include "syn_dev.h" + +#define SYN_STAT(m) offsetof(fal_xgmib_info_t, m) + +struct syn_ethtool_stats { + uint8_t stat_string[ETH_GSTRING_LEN]; + uint64_t stat_offset; +}; + +/* + * Array of strings describing statistics + */ +static const struct syn_ethtool_stats syn_gstrings_stats[] = { + {"rx_frame", SYN_STAT(RxFrame)}, + {"rx_bytes", SYN_STAT(RxByte)}, + {"rx_bytes_g", SYN_STAT(RxByteGood)}, + {"rx_broadcast", SYN_STAT(RxBroadGood)}, + {"rx_multicast", SYN_STAT(RxMultiGood)}, + {"rx_crc_err", SYN_STAT(RxFcsErr)}, + {"rx_runt_err", SYN_STAT(RxRuntErr)}, + {"rx_jabber_err", SYN_STAT(RxJabberError)}, + {"rx_undersize", SYN_STAT(RxUndersizeGood)}, + {"rx_oversize", SYN_STAT(RxOversizeGood)}, + {"rx_pkt64", SYN_STAT(Rx64Byte)}, + {"rx_pkt65to127", SYN_STAT(Rx128Byte)}, + {"rx_pkt128to255", SYN_STAT(Rx256Byte)}, + {"rx_pkt256to511", SYN_STAT(Rx512Byte)}, + {"rx_pkt512to1023", SYN_STAT(Rx1024Byte)}, + {"rx_pkt1024tomax", SYN_STAT(RxMaxByte)}, + {"rx_unicast", SYN_STAT(RxUnicastGood)}, + {"rx_len_err", SYN_STAT(RxLengthError)}, + {"rx_outofrange_err_ctr", SYN_STAT(RxOutOfRangeError)}, + {"rx_pause", SYN_STAT(RxPause)}, + {"rx_fifo_overflow", SYN_STAT(RxOverFlow)}, + {"rx_vlan", SYN_STAT(RxVLANFrameGoodBad)}, + {"rx_wdog", SYN_STAT(RxWatchDogError)}, + {"rx_lpi_usec_ctr", SYN_STAT(RxLPIUsec)}, + {"rx_lpi_tran_ctr", SYN_STAT(RxLPITran)}, + {"rx_drop_frame_ctr", SYN_STAT(RxDropFrameGoodBad)}, + {"rx_drop_byte_ctr", SYN_STAT(RxDropByteGoodBad)}, + {"tx_bytes", SYN_STAT(TxByte)}, + {"tx_frame", SYN_STAT(TxFrame)}, + {"tx_broadcast", SYN_STAT(TxBroadGood)}, + {"tx_broadcast_gb", SYN_STAT(TxBroad)}, + {"tx_multicast", SYN_STAT(TxMultiGood)}, + {"tx_multicast_gb", SYN_STAT(TxMulti)}, + {"tx_pkt64", SYN_STAT(Tx64Byte)}, + {"tx_pkt65to127", SYN_STAT(Tx128Byte)}, + {"tx_pkt128to255", SYN_STAT(Tx256Byte)}, + {"tx_pkt256to511", SYN_STAT(Tx512Byte)}, + {"tx_pkt512to1023", SYN_STAT(Tx1024Byte)}, + {"tx_pkt1024tomax", SYN_STAT(TxMaxByte)}, + {"tx_unicast", SYN_STAT(TxUnicast)}, + {"tx_underflow_err", SYN_STAT(TxUnderFlowError)}, + {"tx_bytes_g", SYN_STAT(TxByteGood)}, + {"tx_frame_g", SYN_STAT(TxFrameGood)}, + {"tx_pause", SYN_STAT(TxPause)}, + {"tx_vlan", SYN_STAT(TxVLANFrameGood)}, + {"tx_lpi_usec_ctr", SYN_STAT(TxLPIUsec)}, + {"tx_lpi_tran_ctr", SYN_STAT(TxLPITran)}, +}; + +/* + * Array of strings describing private flag names + */ +static const char *const syn_strings_priv_flags[] = { + "test", +}; + +#define SYN_STATS_LEN ARRAY_SIZE(syn_gstrings_stats) +#define SYN_PRIV_FLAGS_LEN ARRAY_SIZE(syn_strings_priv_flags) + +/* + * syn_rx_flow_control() + */ +static void syn_rx_flow_control(struct nss_gmac_hal_dev *nghd, + bool enabled) +{ + BUG_ON(nghd == NULL); + + if (enabled) + syn_set_rx_flow_ctrl(nghd); + else + syn_clear_rx_flow_ctrl(nghd); +} + +/* + * syn_tx_flow_control() + */ +static void syn_tx_flow_control(struct nss_gmac_hal_dev *nghd, + bool enabled) +{ + BUG_ON(nghd == NULL); + + if (enabled) + syn_set_tx_flow_ctrl(nghd); + else + syn_clear_tx_flow_ctrl(nghd); +} + +/* + * syn_get_mmc_stats() + */ +static int32_t syn_get_mmc_stats(struct nss_gmac_hal_dev *nghd) +{ + BUG_ON(nghd == NULL); + + if (syn_get_stats(nghd)) + return -1; + + return 0; +} + +/* + * syn_get_max_frame_size() + */ +static int32_t syn_get_max_frame_size(struct nss_gmac_hal_dev *nghd) +{ + int ret; + uint32_t mtu; + + ret = fal_port_max_frame_size_get(0, nghd->mac_id, &mtu); + + if (!ret) + return mtu; + + return ret; +} + +/* + * syn_set_max_frame_size() + */ +static int32_t syn_set_max_frame_size(struct nss_gmac_hal_dev *nghd, + uint32_t val) +{ + return fal_port_max_frame_size_set(0, nghd->mac_id, val); +} + +/* + * syn_set_mac_speed() + */ +static int32_t syn_set_mac_speed(struct nss_gmac_hal_dev *nghd, + uint32_t mac_speed) +{ + struct net_device *netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); + return 0; +} + +/* + * syn_get_mac_speed() + */ +static uint32_t syn_get_mac_speed(struct nss_gmac_hal_dev *nghd) +{ + struct net_device *netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); + return 0; +} + +/* + * syn_set_duplex_mode() + */ +static void syn_set_duplex_mode(struct nss_gmac_hal_dev *nghd, + uint8_t duplex_mode) +{ + struct net_device *netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); +} + +/* + * syn_get_duplex_mode() + */ +static uint8_t syn_get_duplex_mode(struct nss_gmac_hal_dev *nghd) +{ + struct net_device *netdev = nghd->netdev; + + netdev_warn(netdev, "API deprecated\n"); + return 0; +} + +/* + * syn_get_netdev_stats() + */ +static int syn_get_netdev_stats(struct nss_gmac_hal_dev *nghd, + struct rtnl_link_stats64 *stats) +{ + struct syn_hal_dev *shd; + fal_xgmib_info_t *hal_stats; + + BUG_ON(nghd == NULL); + + shd = (struct syn_hal_dev *)nghd; + hal_stats = &(shd->stats); + + if (syn_get_stats(nghd)) + return -1; + + stats->rx_packets = hal_stats->RxUnicastGood + + hal_stats->RxBroadGood + hal_stats->RxMultiGood; + stats->tx_packets = hal_stats->TxUnicast + + hal_stats->TxBroadGood + hal_stats->TxMultiGood; + stats->rx_bytes = hal_stats->RxByte; + stats->tx_bytes = hal_stats->TxByte; + stats->multicast = + hal_stats->RxMultiGood; + stats->rx_dropped = + hal_stats->RxDropFrameGoodBad; + stats->rx_length_errors = + hal_stats->RxLengthError; + stats->rx_crc_errors = + hal_stats->RxFcsErr; + stats->rx_fifo_errors = + hal_stats->RxOverFlow; + + return 0; +} + +/* + * syn_get_eth_stats() + */ +static int32_t syn_get_eth_stats(struct nss_gmac_hal_dev *nghd, + uint64_t *data) +{ + struct syn_hal_dev *shd; + fal_xgmib_info_t *stats; + uint8_t *p = NULL; + int i; + + BUG_ON(nghd == NULL); + + shd = (struct syn_hal_dev *)nghd; + stats = &(shd->stats); + + if (syn_get_stats(nghd)) + return -1; + + for (i = 0; i < SYN_STATS_LEN; i++) { + p = ((uint8_t *)(stats) + + syn_gstrings_stats[i].stat_offset); + data[i] = *(uint32_t *)p; + } + + return 0; +} + +/* + * syn_get_strset_count() + */ +static int32_t syn_get_strset_count(struct nss_gmac_hal_dev *nghd, + int32_t sset) +{ + struct net_device *netdev; + + BUG_ON(nghd == NULL); + + netdev = nghd->netdev; + + switch (sset) { + case ETH_SS_STATS: + return SYN_STATS_LEN; + + case ETH_SS_PRIV_FLAGS: + return SYN_PRIV_FLAGS_LEN; + } + + netdev_dbg(netdev, "%s: Invalid string set\n", __func__); + return -EPERM; +} + +/* + * syn_get_strings() + */ +static int32_t syn_get_strings(struct nss_gmac_hal_dev *nghd, + int32_t stringset, uint8_t *data) +{ + struct net_device *netdev; + int i; + + BUG_ON(nghd == NULL); + + netdev = nghd->netdev; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < SYN_STATS_LEN; i++) { + memcpy(data, syn_gstrings_stats[i].stat_string, + strlen(syn_gstrings_stats[i].stat_string)); + data += ETH_GSTRING_LEN; + } + break; + + case ETH_SS_PRIV_FLAGS: + for (i = 0; i < SYN_PRIV_FLAGS_LEN; i++) { + memcpy(data, syn_strings_priv_flags[i], + strlen(syn_strings_priv_flags[i])); + data += ETH_GSTRING_LEN; + } + + break; + default: + netdev_dbg(netdev, "%s: Invalid string set\n", __func__); + return -EPERM; + } + + return 0; +} + +/* + * syn_send_pause_frame() + */ +static void syn_send_pause_frame(struct nss_gmac_hal_dev *nghd) +{ + BUG_ON(nghd == NULL); + + syn_send_tx_pause_frame(nghd); +} + +/* + * syn_start + */ +static int32_t syn_start(struct nss_gmac_hal_dev *nghd) +{ + BUG_ON(nghd == NULL); + + syn_tx_enable(nghd); + syn_rx_enable(nghd); + syn_set_full_duplex(nghd); + if (syn_set_mac_speed(nghd, SPEED_10000)) + return -1; + + netdev_dbg(nghd->netdev, + "%s: mac_base:0x%px tx_enable:0x%x rx_enable:0x%x\n", + __func__, + nghd->mac_base, + hal_read_reg(nghd->mac_base, + SYN_MAC_TX_CONFIG), + hal_read_reg(nghd->mac_base, + SYN_MAC_RX_CONFIG)); + + return 0; +} + +/* + * syn_stop + */ +static int32_t syn_stop(struct nss_gmac_hal_dev *nghd) +{ + BUG_ON(nghd == NULL); + + syn_tx_disable(nghd); + syn_rx_disable(nghd); + + netdev_dbg(nghd->netdev, "%s: Stopping mac_base:0x%px\n", __func__, + nghd->mac_base); + + return 0; +} + +/* + * syn_init() + */ +static void *syn_init(struct gmac_hal_platform_data *gmacpdata) +{ + struct syn_hal_dev *shd = NULL; + struct net_device *ndev = NULL; + struct nss_dp_dev *dp_priv = NULL; + struct resource *res; + + ndev = gmacpdata->netdev; + dp_priv = netdev_priv(ndev); + + res = platform_get_resource(dp_priv->pdev, IORESOURCE_MEM, 0); + if (!res) { + netdev_dbg(ndev, "Resource get failed.\n"); + return NULL; + } + + if (!devm_request_mem_region(&dp_priv->pdev->dev, res->start, + resource_size(res), ndev->name)) { + netdev_dbg(ndev, "Request mem region failed. Returning...\n"); + return NULL; + } + + shd = (struct syn_hal_dev *)devm_kzalloc(&dp_priv->pdev->dev, + sizeof(struct syn_hal_dev), + GFP_KERNEL); + if (!shd) { + netdev_dbg(ndev, "kzalloc failed. Returning...\n"); + return NULL; + } + + /* Save netdev context in syn HAL context */ + shd->nghd.netdev = gmacpdata->netdev; + shd->nghd.mac_id = gmacpdata->macid; + + /* Populate the mac base addresses */ + shd->nghd.mac_base = + devm_ioremap_nocache(&dp_priv->pdev->dev, res->start, + resource_size(res)); + if (!shd->nghd.mac_base) { + netdev_dbg(ndev, "ioremap fail.\n"); + return NULL; + } + + spin_lock_init(&shd->nghd.slock); + + netdev_dbg(ndev, "ioremap OK.Size 0x%x Ndev base 0x%lx macbase 0x%px\n", + gmacpdata->reg_len, + ndev->base_addr, + shd->nghd.mac_base); + + /* Reset MIB Stats */ + if (fal_mib_port_flush_counters(0, shd->nghd.mac_id)) { + netdev_dbg(ndev, "MIB stats Reset fail.\n"); + } + + return (struct nss_gmac_hal_dev *)shd; +} + +/* + * syn_set_mac_address() + */ +static void syn_set_mac_address(struct nss_gmac_hal_dev *nghd, + uint8_t *macaddr) +{ + uint32_t data; + + BUG_ON(nghd == NULL); + + data = (macaddr[5] << 8) | macaddr[4] | SYN_MAC_ADDR_RSVD_BIT; + hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH, data); + data = (macaddr[3] << 24) | (macaddr[2] << 16) | (macaddr[1] << 8) + | macaddr[0]; + hal_write_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW, data); +} + +/* + * syn_get_mac_address() + */ +static void syn_get_mac_address(struct nss_gmac_hal_dev *nghd, + uint8_t *macaddr) +{ + uint32_t data; + + BUG_ON(nghd == NULL); + + data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_HIGH); + macaddr[5] = (data >> 8) & 0xff; + macaddr[4] = (data) & 0xff; + + data = hal_read_reg(nghd->mac_base, SYN_MAC_ADDR0_LOW); + macaddr[3] = (data >> 24) & 0xff; + macaddr[2] = (data >> 16) & 0xff; + macaddr[1] = (data >> 8) & 0xff; + macaddr[0] = (data) & 0xff; +} + +struct nss_gmac_hal_ops syn_hal_ops = { + .init = &syn_init, + .start = &syn_start, + .stop = &syn_stop, + .setmacaddr = &syn_set_mac_address, + .getmacaddr = &syn_get_mac_address, + .rxflowcontrol = &syn_rx_flow_control, + .txflowcontrol = &syn_tx_flow_control, + .setspeed = &syn_set_mac_speed, + .getspeed = &syn_get_mac_speed, + .setduplex = &syn_set_duplex_mode, + .getduplex = &syn_get_duplex_mode, + .getstats = &syn_get_mmc_stats, + .setmaxframe = &syn_set_max_frame_size, + .getmaxframe = &syn_get_max_frame_size, + .getndostats = &syn_get_netdev_stats, + .getssetcount = &syn_get_strset_count, + .getstrings = &syn_get_strings, + .getethtoolstats = &syn_get_eth_stats, + .sendpause = &syn_send_pause_frame, +}; diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_reg.h b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_reg.h new file mode 100644 index 000000000..f76fce1a0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/gmac_hal_ops/syn/xgmac/syn_reg.h @@ -0,0 +1,255 @@ +/* + ************************************************************************** + * Copyright (c) 2016,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __SYN_REG_H__ +#define __SYN_REG_H__ + +/* + * + MAC Register Offset + * + */ +#define SYN_MAC_TX_CONFIG 0x0000 +#define SYN_MAC_RX_CONFIG 0x0004 +#define SYN_MAC_PACKET_FILTER 0x0008 +#define SYN_MAC_WDOG_TIMEOUT 0x000c +#define SYN_MAC_HASH_TBL_REG0 0x0010 +#define SYN_MAC_VLAN_TAG 0x0050 +#define SYN_MAC_VLAN_HASH_TBL 0x0058 +#define SYN_MAC_VLAN_INCL 0x0060 +#define SYN_MAC_INNER_VLAN_INCL 0x0064 +#define SYN_MAC_RX_ETH_TYP_MATCH 0x006c +#define SYN_MAC_Q0_TX_FLOW_CTL 0x0070 +#define SYN_MAC_Q1_TX_FLOW_CTL 0x0074 +#define SYN_MAC_Q2_TX_FLOW_CTL 0x0078 +#define SYN_MAC_Q3_TX_FLOW_CTL 0x007c +#define SYN_MAC_Q4_TX_FLOW_CTL 0x0080 +#define SYN_MAC_Q5_TX_FLOW_CTL 0x0084 +#define SYN_MAC_Q6_TX_FLOW_CTL 0x0088 +#define SYN_MAC_Q7_TX_FLOW_CTL 0x008c +#define SYN_MAC_RX_FLOW_CTL 0x0090 +#define SYN_MAC_RXQ_CTL0 0x00a0 +#define SYN_MAC_RXQ_CTL1 0x00a4 +#define SYN_MAC_RXQ_CTL2 0x00a8 +#define SYN_MAC_RXQ_CTL3 0x00ac +#define SYN_MAC_INT_STATUS 0x00b0 +#define SYN_MAC_INT_ENABLE 0x00b4 +#define SYN_MAC_TX_RX_STATUS 0x00b8 +#define SYN_MAC_PMT_CTL_STATUS 0x00c0 +#define SYN_MAC_RWK_PACKET_FILTER 0x00c4 +#define SYN_MAC_LPI_CTL_STATUS 0x00d0 +#define SYN_MAC_LPI_TIMER_STATUS 0x00d4 +#define SYN_MAC_VERSION 0x0110 +#define SYN_MAC_DEBUG 0x0114 +#define SYN_MAC_FW_FEATURE0 0x011c +#define SYN_MAC_FW_FEATURE1 0x0120 +#define SYN_MAC_FW_FEATURE2 0x0124 +#define SYN_MAC_GPIO_CTL 0x0278 +#define SYN_MAC_GPIO_STATUS 0x027c +#define SYN_MAC_ADDR0_HIGH 0x0300 +#define SYN_MAC_ADDR0_LOW 0x0304 +#define SYN_MAC_ADDR1_HIGH 0x0308 +#define SYN_MAC_ADDR1_LOW 0x030c +#define SYN_MAC_TS_CTL 0x0d00 +#define SYN_MAC_SUB_SEC_INCR 0x0d04 +#define SYN_MAC_SYS_TIME_SECS 0x0d08 +#define SYN_MAC_SYS_TIME_NSECS 0x0d0c +#define SYN_MAC_SYS_TIME_SECS_UPDATE 0x0d10 +#define SYN_MAC_SYS_TIME_NSECS_UPDATE 0x0d14 +#define SYN_MAC_TS_ADDEND 0x0d18 +#define SYN_MAC_TS_STATUS 0x0d20 +#define SYN_MAC_TX_TS_STATUS_NSECS 0x0d30 +#define SYN_MAC_TX_TS_STATUS_SECS 0x0d34 +#define SYN_MAC_PPS_CTL 0x0d70 +#define SYN_MAC_MMC_CTL 0x0800 +#define SYN_MAC_MMC_RX_INT 0x0804 +#define SYN_MAC_MMC_TX_INT 0x0808 +#define SYN_MAC_MMC_RX_INT_EN 0x080c +#define SYN_MAC_MMC_TX_INT_EN 0x0810 + +/* MAC TX MMC Counters */ +#define SYN_MAC_MMC_TX_BCAST_LO 0x0824 +#define SYN_MAC_MMC_TX_BCAST_HI 0x0828 +#define SYN_MAC_MMC_TX_FRAME_LO 0x0894 +#define SYN_MAC_MMC_TX_FRAME_HI 0x0898 +#define SYN_MAC_MMC_TX_MCAST_LO 0x082c +#define SYN_MAC_MMC_TX_MCAST_HI 0x0830 +#define SYN_MAC_MMC_TX_PKT64_LO 0x0834 +#define SYN_MAC_MMC_TX_PKT64_HI 0x0838 +#define SYN_MAC_MMC_TX_PKT65TO127_LO 0x083c +#define SYN_MAC_MMC_TX_PKT65TO127_HI 0x0840 +#define SYN_MAC_MMC_TX_PKT128TO255_LO 0x0844 +#define SYN_MAC_MMC_TX_PKT128TO255_HI 0x0848 +#define SYN_MAC_MMC_TX_PKT256TO511_LO 0x084c +#define SYN_MAC_MMC_TX_PKT256TO511_HI 0x0850 +#define SYN_MAC_MMC_TX_PKT512TO1023_LO 0x0854 +#define SYN_MAC_MMC_TX_PKT512TO1023_HI 0x0858 +#define SYN_MAC_MMC_TX_PKT1024TOMAX_LO 0x085c +#define SYN_MAC_MMC_TX_PKT1024TOMAX_HI 0x0860 +#define SYN_MAC_MMC_TX_UNICAST_LO 0x0864 +#define SYN_MAC_MMC_TX_UNICAST_HI 0x0868 +#define SYN_MAC_MMC_TX_MCAST_GB_LO 0x086c +#define SYN_MAC_MMC_TX_MCAST_GB_HI 0x0870 +#define SYN_MAC_MMC_TX_BCAST_GB_LO 0x0874 +#define SYN_MAC_MMC_TX_BCAST_GB_HI 0x0878 +#define SYN_MAC_MMC_TX_UNDERFLOW_ERR_LO 0x087c +#define SYN_MAC_MMC_TX_UNDERFLOW_ERR_HI 0x0880 +#define SYN_MAC_MMC_TX_BYTES_LO 0x0884 +#define SYN_MAC_MMC_TX_BYTES_HI 0x0888 +#define SYN_MAC_MMC_TX_PAUSE_FRAME_LO 0x0894 +#define SYN_MAC_MMC_TX_PAUSE_FRAME_HI 0x0898 +#define SYN_MAC_MMC_TX_VLAN_LO 0x089c +#define SYN_MAC_MMC_TX_VLAN_HI 0x08a0 +#define SYN_MAC_MMC_TX_LPI_USEC_CTR_LO 0x08a4 +#define SYN_MAC_MMC_TX_LPI_USEC_CTR_HI 0x08a8 + +/* MAC RX MMC Counters */ +#define SYN_MAC_MMC_RX_FRAME_LO 0x0900 +#define SYN_MAC_MMC_RX_FRAME_HI 0x0904 +#define SYN_MAC_MMC_RX_BYTES_LO 0x0910 +#define SYN_MAC_MMC_RX_BYTES_HI 0x0914 +#define SYN_MAC_MMC_RX_BCAST_LO 0x0918 +#define SYN_MAC_MMC_RX_BCAST_HI 0x091c +#define SYN_MAC_MMC_RX_MCAST_LO 0x0920 +#define SYN_MAC_MMC_RX_MCAST_HI 0x0924 +#define SYN_MAC_MMC_RX_CRC_ERR_LO 0x0928 +#define SYN_MAC_MMC_RX_CRC_ERR_HI 0x092c +#define SYN_MAC_MMC_RX_RUNT_ERR 0x0930 +#define SYN_MAC_MMC_RX_JABBER_ERR 0x0934 +#define SYN_MAC_MMC_RX_UNDERSIZE 0x0938 +#define SYN_MAC_MMC_RX_OVERSIZE 0x093c +#define SYN_MAC_MMC_RX_PKT64_LO 0x0940 +#define SYN_MAC_MMC_RX_PKT64_HI 0x0944 +#define SYN_MAC_MMC_RX_PKT65TO127_LO 0x0948 +#define SYN_MAC_MMC_RX_PKT65TO127_HI 0x094c +#define SYN_MAC_MMC_RX_PKT128TO255_LO 0x0950 +#define SYN_MAC_MMC_RX_PKT128TO255_HI 0x0954 +#define SYN_MAC_MMC_RX_PKT256TO511_LO 0x0958 +#define SYN_MAC_MMC_RX_PKT256TO511_HI 0x095c +#define SYN_MAC_MMC_RX_PKT512TO1023_LO 0x0960 +#define SYN_MAC_MMC_RX_PKT512TO1023_HI 0x0964 +#define SYN_MAC_MMC_RX_PKT1024TOMAX_LO 0x0968 +#define SYN_MAC_MMC_RX_PKT1024TOMAX_HI 0x096c +#define SYN_MAC_MMC_RX_UNICAST_LO 0x0970 +#define SYN_MAC_MMC_RX_UNICAST_HI 0x0974 +#define SYN_MAC_MMC_RX_LEN_ERR_LO 0x0978 +#define SYN_MAC_MMC_RX_LEN_ERR_HI 0x097c +#define SYN_MAC_MMC_RX_PAUSE_FRAME_LO 0x0988 +#define SYN_MAC_MMC_RX_PAUSE_FRAME_HI 0x098c +#define SYN_MAC_MMC_RX_FIFO_OVERFLOW_LO 0x0990 +#define SYN_MAC_MMC_RX_FIFO_OVERFLOW_HI 0x0994 +#define SYN_MAC_MMC_RX_VLAN_FRAME_LO 0x0998 +#define SYN_MAC_MMC_RX_VLAN_FRAME_HI 0x099c +#define SYN_MAC_MMC_RX_LPI_USEC_CTR_LO 0x09a4 +#define SYN_MAC_MMC_RX_LPI_USEC_CTR_HI 0x09a8 +#define SYN_MAC_MMC_RX_DISCARD_FRAME_LO 0x09ac +#define SYN_MAC_MMC_RX_DISCARD_FRAME_HI 0x09b0 + +/* MAC Register Bit Definitions*/ + +/* SYN_MAC_Q0_TX_FLOW_CTL Bit definitions */ +#define SYN_MAC_TX_PAUSE_SEND 0x00000001 +#define SYN_MAC_TX_FLOW_ENABLE 0x00000002 +#define SYN_MAC_TX_PAUSE_LOW_THRESHOLD 0x00000070 +#define SYN_MAC_ADDR_RSVD_BIT 0x80000000 + +/* SYN_MAC_RX_FLOW_CTL Bit definitions */ +#define SYN_MAC_RX_FLOW_ENABLE 0x00000001 + +/* SYN_MAC_TX_CONFIG Bit definitions */ +#define SYN_MAC_TX_ENABLE 0x00000001 +#define SYN_MAC_TX_SPEED_SELECT 0x60000000 + +/* SYN_MAC_RX_CONFIG Bit definitions */ +#define SYN_MAC_RX_ENABLE 0x00000001 +#define SYN_MAC_JUMBO_FRAME_ENABLE 0x00000100 + +#define SYN_MAC_SPEED_10G 0x0 +#define SYN_MAC_SPEED_2_5G 0x2 +#define SYN_MAC_SPEED_1G 0x3 +#define SYN_MAC_SPEED_BITPOS 29 +#define SYN_MAC_SPEED_BITMASK 0x3 + +#define SYN_MAC_DEFAULT_MAX_FRAME_SIZE 1518 +#define SYN_MAC_MAX_FRAME_SIZE_BITPOS 16 +#define SYN_MAC_MAX_FRAME_SIZE_BITMASK 0x3fff + +/* SYN_MAC_MMC_CTL Bit definitions */ +#define SYN_MAC_MMC_RSTONRD 0x00000004 + +/* + * + MTL Register Offset + * + */ +#define SYN_MTL_OPER_MODE 0x1000 +#define SYN_MTL_DEBUG_CTL 0x1008 +#define SYN_MTL_DEBUG_STATUS 0x100c +#define SYN_MTL_DEBUG_DATA 0x1010 +#define SYN_MTL_INT_STATUS 0x1020 +#define SYN_MTL_RXQ_DMA_MAP0 0x1030 +#define SYN_MTL_RXQ_DMA_MAP1 0x1034 +#define SYN_MTL_RXQ_DMA_MAP2 0x1038 +#define SYN_MTL_TC_PRIO_MAP0 0x1040 +#define SYN_MTL_TC_PRIO_MAP1 0x1044 +#define SYN_MTL_TXQ0_OPER_MODE 0x1100 +#define SYN_MTL_TXQ0_UNDERFLOW 0x1104 +#define SYN_MTL_TXQ0_DEBUG 0x1108 +#define SYN_MTL_TC0_ETS_CTL 0x1110 +#define SYN_MTL_TC0_ETS_STATUS 0x1114 +#define SYN_MTL_TC0_QUANTUM_WEIGHT 0x1118 +#define SYN_MTL_RXQ0_DEBUG 0x1148 +#define SYN_MTL_RXQ0_CTL 0x114c +#define SYN_MTL_RXQ0_FLOW_CTL 0x1150 +#define SYN_MTL_Q0_INT_ENABLE 0x1170 +#define SYN_MTL_Q0_INT_STATUS 0x1174 + +/* MTL Register Bit definitions */ + +/* + * + DMA Register Offset + * + */ +#define SYN_DMA_MODE 0x3000 +#define SYN_DMA_SYSBUS_MODE 0x3004 +#define SYN_DMA_INT_STATUS 0x3008 +#define SYN_DMA_AXI_TX_AR_ACE_CTL 0x3010 +#define SYN_DMA_AXI_RX_AW_ACE_CTL 0x3018 +#define SYN_DMA_AXI_TXRX_AWAR_ACE_CTL 0x301c +#define SYN_DMA_DEBUG_STATUS0 0x3020 +#define SYN_DMA_DEBUG_STATUS1 0x3024 +#define SYN_DMA_TX_EDMA_CTL 0x3040 +#define SYN_DMA_RX_EDMA_CTL 0x3044 +#define SYN_DMA_CH0_CTL 0x3100 +#define SYN_DMA_CH0_TX_CTL 0x3104 +#define SYN_DMA_CH0_RX_CTL 0x3108 +#define SYN_DMA_CH0_TXDESC_LIST_HADDR 0x3110 +#define SYN_DMA_CH0_TXDESC_LIST_LADDR 0x3114 +#define SYN_DMA_CH0_RXDESC_LIST_HADDR 0x3118 +#define SYN_DMA_CH0_RXDESC_LIST_LADDR 0x311c +#define SYN_DMA_CH0_TXDESC_TAIL_LPTR 0x3124 +#define SYN_DMA_CH0_RXDESC_TAIL_LPTR 0x312c +#define SYN_DMA_CH0_TXDESC_RING_LEN 0x3130 +#define SYN_DMA_CH0_RXDESC_RING_LEN 0x3134 +#define SYN_DMA_INT_ENABLE 0x3138 +#define SYN_DMA_RX_INT_WDOG_TIMER 0x313c + +/* DMA Register Bit definitions */ + +#endif /*__SYN_REG_H__*/ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/include/edma.h b/feeds/ipq807x/qca-nss-dp/src/hal/include/edma.h new file mode 100644 index 000000000..9ed0c3840 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/include/edma.h @@ -0,0 +1,31 @@ +/* + ************************************************************************** + * Copyright (c) 2016, 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** +*/ + +/* + * This file includes declarations defined by the EDMA + * dataplane and used by other layers of this driver. + */ + +#ifndef __NSS_DP_EDMA__ +#define __NSS_DP_EDMA__ + +extern int edma_init(void); +extern void edma_cleanup(bool is_dp_override); +extern struct nss_dp_data_plane_ops nss_dp_edma_ops; + +#endif /*__NSS_DP_EDMA__ */ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/include/nss_dp_hal.h b/feeds/ipq807x/qca-nss-dp/src/hal/include/nss_dp_hal.h new file mode 100644 index 000000000..89cdb1abe --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/include/nss_dp_hal.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __NSS_DP_HAL_H__ +#define __NSS_DP_HAL_H__ + +#include "nss_dp_dev.h" + +/* + * nss_dp_hal_get_gmac_ops() + * Returns gmac hal ops based on the GMAC type. + */ +static inline struct nss_gmac_hal_ops *nss_dp_hal_get_gmac_ops(uint32_t gmac_type) +{ + return dp_global_ctx.gmac_hal_ops[gmac_type]; +} + +/* + * nss_dp_hal_set_gmac_ops() + * Sets dp global gmac hal ops based on the GMAC type. + */ +static inline void nss_dp_hal_set_gmac_ops(struct nss_gmac_hal_ops *hal_ops, uint32_t gmac_type) +{ + dp_global_ctx.gmac_hal_ops[gmac_type] = hal_ops; +} + +/* + * HAL functions implemented by SoC specific source files. + */ +extern bool nss_dp_hal_init(void); +extern void nss_dp_hal_cleanup(void); +extern void nss_dp_hal_clk_enable(struct nss_dp_dev *dp_priv); +extern struct nss_dp_data_plane_ops *nss_dp_hal_get_data_plane_ops(void); + +#endif /* __NSS_DP_HAL_H__ */ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/include/nss_dp_hal_if.h b/feeds/ipq807x/qca-nss-dp/src/hal/include/nss_dp_hal_if.h new file mode 100644 index 000000000..68fc2da3e --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/include/nss_dp_hal_if.h @@ -0,0 +1,162 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017,2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF0 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_DP_HAL_IF_H__ +#define __NSS_DP_HAL_IF_H__ + +#include +#include +#include + +enum gmac_device_type { + GMAC_HAL_TYPE_QCOM = 0, /* 1G GMAC type */ + GMAC_HAL_TYPE_SYN_XGMAC,/* Synopsys XGMAC type */ + GMAC_HAL_TYPE_SYN_GMAC, /* Synopsys 1G GMAC type */ + GMAC_HAL_TYPE_MAX +}; + +/* + * gmac_hal_platform_data + */ +struct gmac_hal_platform_data { + struct net_device *netdev; /* Net device */ + uint32_t reg_len; /* Register space length */ + uint32_t mactype; /* MAC chip type */ + uint32_t macid; /* MAC sequence id on the Chip */ +}; + +/* + * NSS GMAC HAL device data + */ +struct nss_gmac_hal_dev { + void __iomem *mac_base; /* Base address of MAC registers */ + uint32_t version; /* GMAC Revision version */ + uint32_t drv_flags; /* Driver specific feature flags */ + + /* + * Phy related stuff + */ + uint32_t link_state; /* Link status as reported by the Phy */ + uint32_t duplex_mode; /* Duplex mode of the Phy */ + uint32_t speed; /* Speed of the Phy */ + uint32_t loop_back_mode;/* Loopback status of the Phy */ + uint32_t phy_mii_type; /* RGMII/SGMII/XSGMII */ + + struct net_device *netdev; + struct resource *memres; + uint32_t mac_reg_len; /* MAC Register block length */ + uint32_t mac_id; /* MAC sequence id on the Chip */ + spinlock_t slock; /* lock to protect concurrent reg access */ +}; + +/* + * nss_gmac_hal_ops + */ +struct nss_gmac_hal_ops { + void* (*init)(struct gmac_hal_platform_data *); + void (*exit)(struct nss_gmac_hal_dev *); + int32_t (*start)(struct nss_gmac_hal_dev *); + int32_t (*stop)(struct nss_gmac_hal_dev *); + void (*setmacaddr)(struct nss_gmac_hal_dev *, uint8_t *); + void (*getmacaddr)(struct nss_gmac_hal_dev *, uint8_t *); + void (*promisc)(struct nss_gmac_hal_dev *, bool enabled); + void (*multicast)(struct nss_gmac_hal_dev *, bool enabled); + void (*broadcast)(struct nss_gmac_hal_dev *, bool enabled); + void (*rxcsumoffload)(struct nss_gmac_hal_dev *, bool enabled); + void (*txcsumoffload)(struct nss_gmac_hal_dev *, bool enabled); + void (*rxflowcontrol)(struct nss_gmac_hal_dev *, bool enabled); + void (*txflowcontrol)(struct nss_gmac_hal_dev *, bool enabled); + int32_t (*setspeed)(struct nss_gmac_hal_dev *, uint32_t); + uint32_t (*getspeed)(struct nss_gmac_hal_dev *); + void (*setduplex)(struct nss_gmac_hal_dev *, uint8_t); + uint8_t (*getduplex)(struct nss_gmac_hal_dev *); + int32_t (*getstats)(struct nss_gmac_hal_dev *); + int32_t (*setmaxframe)(struct nss_gmac_hal_dev *, uint32_t); + int32_t (*getmaxframe)(struct nss_gmac_hal_dev *); + int32_t (*getndostats)(struct nss_gmac_hal_dev *, + struct rtnl_link_stats64 *); + void (*sendpause)(struct nss_gmac_hal_dev *); + void (*stoppause)(struct nss_gmac_hal_dev *); + int32_t (*getssetcount)(struct nss_gmac_hal_dev *, int32_t); + int32_t (*getstrings)(struct nss_gmac_hal_dev *, int32_t, uint8_t *); + int32_t (*getethtoolstats)(struct nss_gmac_hal_dev *, uint64_t *); +}; + +extern struct nss_gmac_hal_ops qcom_hal_ops; +extern struct nss_gmac_hal_ops syn_hal_ops; + +/********************************************************** + * Common functions + **********************************************************/ +/* + * hal_read_reg() + */ +static inline uint32_t hal_read_reg(void __iomem *regbase, uint32_t regoffset) +{ + return readl_relaxed(regbase + regoffset); +} + +/* + * hal_write_reg() + */ +static inline void hal_write_reg(void __iomem *regbase, uint32_t regoffset, + uint32_t regdata) +{ + writel_relaxed(regdata, regbase + regoffset); +} + +/* + * hal_set_reg_bits() + */ +static inline void hal_set_reg_bits(struct nss_gmac_hal_dev *nghd, + uint32_t regoffset, + uint32_t bitpos) +{ + uint32_t data; + + spin_lock(&nghd->slock); + data = bitpos | hal_read_reg(nghd->mac_base, regoffset); + hal_write_reg(nghd->mac_base, regoffset, data); + spin_unlock(&nghd->slock); +} + +/* + * hal_clear_reg_bits() + */ +static inline void hal_clear_reg_bits(struct nss_gmac_hal_dev *nghd, + uint32_t regoffset, + uint32_t bitpos) +{ + uint32_t data; + + spin_lock(&nghd->slock); + data = ~bitpos & hal_read_reg(nghd->mac_base, regoffset); + hal_write_reg(nghd->mac_base, regoffset, data); + spin_unlock(&nghd->slock); +} + +/* + * hal_check_reg_bits() + */ +static inline bool hal_check_reg_bits(void __iomem *regbase, + uint32_t regoffset, + uint32_t bitpos) +{ + return (bitpos & hal_read_reg(regbase, regoffset)) != 0; +} +#endif /* __NSS_DP_HAL_IF_H__ */ diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_data_plane.c b/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_data_plane.c new file mode 100644 index 000000000..0b9bdce1f --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_data_plane.c @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "syn_data_plane.h" +#include "syn_reg.h" + +#define SYN_DP_NAPI_BUDGET 64 + +/* + * GMAC Ring info + */ +struct syn_dp_info dp_info[NSS_DP_HAL_MAX_PORTS]; + +/* + * syn_dp_napi_poll() + * Scheduled by napi to process RX and TX complete + */ +static int syn_dp_napi_poll(struct napi_struct *napi, int budget) +{ + struct nss_dp_dev *gmac_dev = container_of(napi, struct nss_dp_dev, napi); + struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1]; + int work_done; + + /* + * Update GMAC stats + */ + spin_lock_bh(&dp_info->stats_lock); + dp_info->stats.stats.rx_missed += syn_get_rx_missed(gmac_dev->gmac_hal_ctx); + dp_info->stats.stats.rx_missed += syn_get_fifo_overflows(gmac_dev->gmac_hal_ctx); + spin_unlock_bh(&dp_info->stats_lock); + + syn_dp_process_tx_complete(gmac_dev, dev_info); + work_done = syn_dp_rx(gmac_dev, dev_info, budget); + syn_dp_rx_refill(gmac_dev, dev_info); + + if (work_done < budget) { + napi_complete(napi); + syn_enable_dma_interrupt(gmac_dev->gmac_hal_ctx); + } + + return work_done; +} + +/* + * syn_dp_handle_irq() + * Process IRQ and schedule napi + */ +static irqreturn_t syn_dp_handle_irq(int irq, void *ctx) +{ + struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)ctx; + struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx; + + syn_clear_dma_status(nghd); + syn_disable_dma_interrupt(nghd); + + /* + * Schedule NAPI + */ + napi_schedule(&gmac_dev->napi); + + return IRQ_HANDLED; +} + +/* + * syn_dp_if_init() + * Initialize the GMAC data plane operations + */ +static int syn_dp_if_init(struct nss_dp_data_plane_ctx *dpc) +{ + struct net_device *netdev = dpc->dev; + struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev); + uint32_t macid = gmac_dev->macid; + struct syn_dp_info *dev_info = &dp_info[macid - 1]; + struct device *dev = &gmac_dev->pdev->dev; + int err; + + if (!netdev) { + netdev_dbg(netdev, "nss_dp_gmac: Invalid netdev pointer %px\n", netdev); + return NSS_DP_FAILURE; + } + + netdev_info(netdev, "nss_dp_gmac: Registering netdev %s(qcom-id:%d) with GMAC\n", netdev->name, macid); + + if (!dev_info->napi_added) { + netif_napi_add(netdev, &gmac_dev->napi, syn_dp_napi_poll, SYN_DP_NAPI_BUDGET); + + /* + * Requesting irq + */ + netdev->irq = platform_get_irq(gmac_dev->pdev, 0); + err = request_irq(netdev->irq, syn_dp_handle_irq, 0, "nss-dp-gmac", gmac_dev); + if (err) { + netdev_dbg(netdev, "err_code:%d, Mac %d IRQ %d request failed\n", err, + gmac_dev->macid, netdev->irq); + return NSS_DP_FAILURE; + } + + gmac_dev->drv_flags |= NSS_DP_PRIV_FLAG(IRQ_REQUESTED); + dev_info->napi_added = 1; + } + + /* + * Forcing the kernel to use 32-bit DMA addressing + */ + dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); + + /* + * Initialize the Tx/Rx ring + */ + if (syn_dp_setup_rings(gmac_dev, netdev, dev, dev_info)) { + netdev_dbg(netdev, "nss_dp_gmac: Error initializing GMAC rings %px\n", netdev); + return NSS_DP_FAILURE; + } + + spin_lock_init(&dev_info->data_lock); + spin_lock_init(&dev_info->stats_lock); + + netdev_dbg(netdev,"Synopsys GMAC dataplane initialized\n"); + + return NSS_DP_SUCCESS; +} + +/* + * syn_dp_if_open() + * Open the GMAC data plane operations + */ +static int syn_dp_if_open(struct nss_dp_data_plane_ctx *dpc, uint32_t tx_desc_ring, + uint32_t rx_desc_ring, uint32_t mode) +{ + struct net_device *netdev = dpc->dev; + struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev); + struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx; + + syn_enable_dma_rx(nghd); + syn_enable_dma_tx(nghd); + + napi_enable(&gmac_dev->napi); + syn_enable_dma_interrupt(nghd); + + netdev_dbg(netdev, "Synopsys GMAC dataplane opened\n"); + + return NSS_DP_SUCCESS; +} + +/* + * syn_dp_if_close() + * Close the GMAC data plane operations + */ +static int syn_dp_if_close(struct nss_dp_data_plane_ctx *dpc) +{ + struct net_device *netdev = dpc->dev; + struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev); + struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx; + + syn_disable_dma_rx(nghd); + syn_disable_dma_tx(nghd); + + syn_disable_dma_interrupt(nghd); + napi_disable(&gmac_dev->napi); + + netdev_dbg(netdev, "Synopsys GMAC dataplane closed\n"); + + return NSS_DP_SUCCESS; +} + +/* + * syn_dp_if_link_state() + * Change of link for the dataplane + */ +static int syn_dp_if_link_state(struct nss_dp_data_plane_ctx *dpc, uint32_t link_state) +{ + struct net_device *netdev = dpc->dev; + + /* + * Switch interrupt based on the link state + */ + if (link_state) { + netdev_dbg(netdev, "Data plane link up\n"); + } else { + netdev_dbg(netdev, "Data plane link down\n"); + } + + return NSS_DP_SUCCESS; +} + +/* + * syn_dp_if_mac_addr() + */ +static int syn_dp_if_mac_addr(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr) +{ + return NSS_DP_SUCCESS; +} + +/* + * syn_dp_if_change_mtu() + */ +static int syn_dp_if_change_mtu(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu) +{ + /* + * TODO: Work on MTU fix along with register update for frame length + */ + return NSS_DP_SUCCESS; +} + +/* + * syn_dp_if_set_features() + * Set the supported net_device features + */ +static void syn_dp_if_set_features(struct nss_dp_data_plane_ctx *dpc) +{ + struct net_device *netdev = dpc->dev; + + netdev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + netdev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + netdev->vlan_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; + netdev->wanted_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM; +} + +/* + * syn_dp_if_xmit() + * Dataplane method to transmit the packet + */ +static netdev_tx_t syn_dp_if_xmit(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb) +{ + struct net_device *netdev = dpc->dev; + struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev); + struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1]; + int nfrags = skb_shinfo(skb)->nr_frags; + + /* + * Most likely, it is not a fragmented pkt, optimize for that + */ + if (likely(nfrags == 0)) { + if (syn_dp_tx(gmac_dev, dev_info, skb)) { + goto drop; + } + + return NETDEV_TX_OK; + } + +drop: + dev_kfree_skb_any(skb); + dev_info->stats.stats.tx_dropped++; + + return NETDEV_TX_BUSY; +} + +/* + * syn_dp_if_pause_on_off() + */ +static int syn_dp_if_pause_on_off(struct nss_dp_data_plane_ctx *dpc, uint32_t pause_on) +{ + return NSS_DP_SUCCESS; +} + +/* + * syn_dp_if_get_stats + * Get Synopsys GMAC data plane stats + */ +static void syn_dp_if_get_stats(struct nss_dp_data_plane_ctx *dpc, struct nss_dp_gmac_stats *stats) +{ + struct net_device *netdev = dpc->dev; + struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev); + struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1]; + + spin_lock_bh(&dev_info->stats_lock); + netdev_dbg(netdev, "GETTING stats: rx_packets:%llu rx_bytes:%llu mmc_rx_crc_errors:%llu", dev_info->stats.stats.rx_packets, + dev_info->stats.stats.rx_bytes, dev_info->stats.stats.mmc_rx_crc_errors); + memcpy(stats, &dev_info->stats, sizeof(*stats)); + spin_unlock_bh(&dev_info->stats_lock); +} + +/* + * syn_dp_if_deinit() + * Free all the Synopsys GMAC resources + */ +static int syn_dp_if_deinit(struct nss_dp_data_plane_ctx *dpc) +{ + struct net_device *netdev = dpc->dev; + struct nss_dp_dev *gmac_dev = (struct nss_dp_dev *)netdev_priv(netdev); + struct syn_dp_info *dev_info = &dp_info[gmac_dev->macid - 1]; + + if (dev_info->napi_added) { + /* + * Remove interrupt handlers and NAPI + */ + if (gmac_dev->drv_flags & NSS_DP_PRIV_FLAG(IRQ_REQUESTED)) { + netdev_dbg(netdev, "Freeing IRQ %d for Mac %d\n", netdev->irq, gmac_dev->macid); + synchronize_irq(netdev->irq); + free_irq(netdev->irq, gmac_dev); + gmac_dev->drv_flags &= ~NSS_DP_PRIV_FLAG(IRQ_REQUESTED); + } + + netif_napi_del(&gmac_dev->napi); + dev_info->napi_added = 0; + } + + /* + * Cleanup and free the rings + */ + syn_dp_cleanup_rings(gmac_dev, netdev, dev_info); + + return NSS_DP_SUCCESS; +} + +/* + * nss_dp_gmac_ops + * Data plane operations for Synopsys GMAC + */ +struct nss_dp_data_plane_ops nss_dp_gmac_ops = { + .init = syn_dp_if_init, + .open = syn_dp_if_open, + .close = syn_dp_if_close, + .link_state = syn_dp_if_link_state, + .mac_addr = syn_dp_if_mac_addr, + .change_mtu = syn_dp_if_change_mtu, + .xmit = syn_dp_if_xmit, + .set_features = syn_dp_if_set_features, + .pause_on_off = syn_dp_if_pause_on_off, + .get_stats = syn_dp_if_get_stats, + .deinit = syn_dp_if_deinit, +}; diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_data_plane.h b/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_data_plane.h new file mode 100644 index 000000000..c96309599 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_data_plane.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#ifndef __NSS_DP_SYN_DATAPLANE__ +#define __NSS_DP_SYN_DATAPLANE__ + +#include "nss_dp_dev.h" +#include "syn_dma_desc.h" + +#define SYN_DP_TX_DESC_SIZE 128 /* Tx Descriptors needed in the descriptor pool/queue */ +#define SYN_DP_RX_DESC_SIZE 128 /* Rx Descriptors needed in the descriptor pool/queue */ +#define SYN_DP_MINI_JUMBO_FRAME_MTU 1978 +#define SYN_DP_MAX_DESC_BUFF 0x1FFF /* Max size of buffer that can be programed into one field of desc */ + +/* + * syn_dp_info + * Synopysys GMAC Dataplane information + */ +struct syn_dp_info { + struct nss_dp_gmac_stats stats; /* GMAC driver stats */ + + struct sk_buff *rx_skb_list[SYN_DP_RX_DESC_SIZE]; /* Rx skb pool helping RX DMA descriptors*/ + + dma_addr_t rx_desc_dma; /* Dma-albe address of first rx descriptor + either in ring or chain mode, this is + used by the GMAC device */ + + struct dma_desc *rx_desc; /* start address of RX descriptors ring or + chain, this is used by the driver */ + + uint32_t busy_rx_desc; /* Number of Rx Descriptors owned by + DMA at any given time */ + + uint32_t rx_desc_count; /* number of rx descriptors in the + tx descriptor queue/pool */ + + uint32_t rx_busy; /* index of the rx descriptor owned by DMA, + obtained by nss_gmac_get_rx_qptr() */ + + uint32_t rx_next; /* index of the rx descriptor next available + with driver, given to DMA by + nss_gmac_set_rx_qptr()*/ + + struct dma_desc *rx_busy_desc; /* Rx Descriptor address corresponding + to the index tx_busy */ + + struct dma_desc *rx_next_desc; /* Rx Descriptor address corresponding + to the index rx_next */ + + struct sk_buff *tx_skb_list[SYN_DP_RX_DESC_SIZE]; /* Tx skb pool helping RX DMA descriptors*/ + + dma_addr_t tx_desc_dma; /* Dma-able address of first tx descriptor + either in ring or chain mode, this is used + by the GMAC device */ + + struct dma_desc *tx_desc; /* start address of TX descriptors ring or + chain, this is used by the driver */ + + uint32_t busy_tx_desc; /* Number of Tx Descriptors owned by + DMA at any given time */ + + uint32_t tx_desc_count; /* number of tx descriptors in the + rx descriptor queue/pool */ + + uint32_t tx_busy; /* index of the tx descriptor owned by DMA, + is obtained by nss_gmac_get_tx_qptr() */ + + uint32_t tx_next; /* index of the tx descriptor next available + with driver, given to DMA by + nss_gmac_set_tx_qptr() */ + + struct dma_desc *tx_busy_desc; /* Tx Descriptor address corresponding + to the index tx_busy */ + + struct dma_desc *tx_next_desc; /* Tx Descriptor address corresponding + to the index tx_next */ + + spinlock_t data_lock; /* Lock to protect datapath */ + spinlock_t stats_lock; /* Lock to protect datapath */ + int napi_added; /* flag to indicate napi add status */ +}; + +/* + * GMAC Tx/Tx APIs + */ +int syn_dp_setup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info); +int syn_dp_cleanup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct syn_dp_info *dev_info); + +int syn_dp_rx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, int budget); +void syn_dp_rx_refill(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info); + +int syn_dp_tx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb); +void syn_dp_process_tx_complete(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info); + +#endif /* __NSS_DP_SYN_DATAPLANE__ */ \ No newline at end of file diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dma_desc.h b/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dma_desc.h new file mode 100644 index 000000000..5b50d388f --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dma_desc.h @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __SYN_DESC__ +#define __SYN_DESC__ + +/********************************************************** + * DMA Engine descriptors + **********************************************************/ +/* +******Enhanced Descritpor structure to support 8K buffer per buffer ******* + +dma_rx_base_addr = 0x000C, CSR3 - Receive Descriptor list base address +dma_rx_base_addr is the pointer to the first Rx Descriptors. +The Descriptor format in Little endian with a 32 bit Data bus is as shown below. + +Similarly +dma_tx_base_addr = 0x0010, CSR4 - Transmit Descriptor list base address +dma_tx_base_addr is the pointer to the first Tx Descriptors. +The Descriptor format in Little endian with a 32 bit Data bus is as shown below. + ------------------------------------------------------------------------- + RDES0 |OWN (31)| Status | + ------------------------------------------------------------------------- + RDES1 | Ctrl | Res | Byte Count Buffer 2 | Ctrl | Res | Byte Count Buffer 1 | + ------------------------------------------------------------------------- + RDES2 | Buffer 1 Address | + ------------------------------------------------------------------------- + RDES3 | Buffer 2 Address / Next Descriptor Address | + ------------------------------------------------------------------------- + RDES4 | Extended Status | + ------------------------------------------------------------------------- + RDES5 | Reserved | + ------------------------------------------------------------------------- + RDES6 | Receive Timestamp Low | + ------------------------------------------------------------------------- + RDES7 | Receive Timestamp High | + ------------------------------------------------------------------------- + + ------------------------------------------------------------------------ + TDES0 |OWN (31)| Ctrl | Res | Ctrl | Res | Status | + ------------------------------------------------------------------------ + TDES1 | Res | Byte Count Buffer 2 | Res | Byte Count Buffer 1 | + ------------------------------------------------------------------------ + TDES2 | Buffer 1 Address | + ------------------------------------------------------------------------ + TDES3 | Buffer 2 Address / Next Descriptor Address | + ------------------------------------------------------------------------ + TDES4 | Reserved | + ------------------------------------------------------------------------ + TDES5 | Reserved | + ------------------------------------------------------------------------ + TDES6 | Transmit Timestamp Low | + ------------------------------------------------------------------------ + TDES7 | Transmit Timestamp Higher | + ------------------------------------------------------------------------ +*/ + +/* + * dma_descriptor_status + * status word of DMA descriptor + */ +enum dma_descriptor_status { + desc_own_by_dma = 0x80000000, /* (OWN)Descriptor is + owned by DMA engine */ + desc_rx_da_filter_fail = 0x40000000, /* (AFM)Rx - DA Filter + Fail for the rx frame */ + desc_rx_frame_length_mask = 0x3FFF0000, /* (FL)Receive descriptor + frame length */ + desc_rx_frame_length_shift = 16, + desc_rx_error = 0x00008000, /* (ES)Error summary bit + - OR of the following bits: + DE || OE || IPC || GF || LC || RWT + || RE || CE */ + desc_rx_truncated = 0x00004000, /* (DE)Rx - no more descriptors + for receive frame */ + desc_sa_filter_fail = 0x00002000, /* (SAF)Rx - SA Filter Fail for + the received frame */ + desc_rx_length_error = 0x00001000, /* (LE)Rx - frm size not + matching with len field */ + desc_rx_overflow = 0x00000800, /* (OE)Rx - frm was damaged due + to buffer overflow */ + desc_rx_vlan_tag = 0x00000400, /* (VLAN)Rx - received frame + is a VLAN frame */ + desc_rx_first = 0x00000200, /* (FS)Rx - first + descriptor of the frame */ + desc_rx_last = 0x00000100, /* (LS)Rx - last + descriptor of the frame */ + desc_rx_long_frame = 0x00000080, /* (Giant Frame)Rx - frame is + longer than 1518/1522 */ + desc_rx_collision = 0x00000040, /* (LC)Rx - late collision + occurred during reception */ + desc_rx_frame_ether = 0x00000020, /* (FT)Rx - Frame type - Ether, + otherwise 802.3 */ + desc_rx_watchdog = 0x00000010, /* (RWT)Rx - watchdog timer + expired during reception */ + desc_rx_mii_error = 0x00000008, /* (RE)Rx - error reported + by MII interface */ + desc_rx_dribbling = 0x00000004, /* (DE)Rx - frame contains non + int multiple of 8 bits */ + desc_rx_crc = 0x00000002, /* (CE)Rx - CRC error */ + desc_rx_ext_sts = 0x00000001, /* Extended Status Available + in RDES4 */ + desc_tx_error = 0x00008000, /* (ES)Error summary Bits */ + desc_tx_int_enable = 0x40000000, /* (IC)Tx - interrupt on + completion */ + desc_tx_last = 0x20000000, /* (LS)Tx - Last segment of the + frame */ + desc_tx_first = 0x10000000, /* (FS)Tx - First segment of the + frame */ + desc_tx_disable_crc = 0x08000000, /* (DC)Tx - Add CRC disabled + (first segment only) */ + desc_tx_disable_padd = 0x04000000, /* (DP)disable padding, + added by - reyaz */ + desc_tx_cis_mask = 0x00c00000, /* Tx checksum offloading + control mask */ + desc_tx_cis_bypass = 0x00000000, /* Checksum bypass */ + desc_tx_cis_ipv4_hdr_cs = 0x00400000, /* IPv4 header checksum */ + desc_tx_cis_tcp_only_cs = 0x00800000, /* TCP/UDP/ICMP checksum. + Pseudo header checksum + is assumed to be present */ + desc_tx_cis_tcp_pseudo_cs = 0x00c00000, /* TCP/UDP/ICMP checksum fully + in hardware including + pseudo header */ + desc_tx_desc_end_of_ring = 0x00200000, /* (TER)End of descriptor ring*/ + desc_tx_desc_chain = 0x00100000, /* (TCH)Second buffer address + is chain address */ + desc_rx_chk_bit0 = 0x00000001, /* Rx Payload Checksum Error */ + desc_rx_chk_bit7 = 0x00000080, /* (IPC CS ERROR)Rx - Ipv4 + header checksum error */ + desc_rx_chk_bit5 = 0x00000020, /* (FT)Rx - Frame type - Ether, + otherwise 802.3 */ + desc_rx_ts_avail = 0x00000080, /* Time stamp available */ + desc_rx_frame_type = 0x00000020, /* (FT)Rx - Frame type - Ether, + otherwise 802.3 */ + desc_tx_ipv4_chk_error = 0x00010000, /* (IHE) Tx Ip header error */ + desc_tx_timeout = 0x00004000, /* (JT)Tx - Transmit + jabber timeout */ + desc_tx_frame_flushed = 0x00002000, /* (FF)Tx - DMA/MTL flushed + the frame due to SW flush */ + desc_tx_pay_chk_error = 0x00001000, /* (PCE) Tx Payload checksum + Error */ + desc_tx_lost_carrier = 0x00000800, /* (LC)Tx - carrier lost + during tramsmission */ + desc_tx_no_carrier = 0x00000400, /* (NC)Tx - no carrier signal + from the tranceiver */ + desc_tx_late_collision = 0x00000200, /* (LC)Tx - transmission aborted + due to collision */ + desc_tx_exc_collisions = 0x00000100, /* (EC)Tx - transmission aborted + after 16 collisions */ + desc_tx_vlan_frame = 0x00000080, /* (VF)Tx - VLAN-type frame */ + desc_tx_coll_mask = 0x00000078, /* (CC)Tx - Collision count */ + desc_tx_coll_shift = 3, + desc_tx_exc_deferral = 0x00000004, /* (ED)Tx - excessive deferral */ + desc_tx_underflow = 0x00000002, /* (UF)Tx - late data arrival + from the memory */ + desc_tx_deferred = 0x00000001, /* (DB)Tx - frame + transmision deferred */ + + /* + * This explains the RDES1/TDES1 bits layout + * ------------------------------------------------------ + * RDES1/TDES1 | Control Bits | Byte Count Buf 2 | Byte Count Buf 1 | + * ------------------------------------------------------ + */ + + /* dma_descriptor_length */ /* length word of DMA descriptor */ + desc_rx_dis_int_compl = 0x80000000, /* (Disable Rx int on completion) */ + desc_rx_desc_end_of_ring = 0x00008000, /* (RER)End of descriptor ring */ + desc_rx_desc_chain = 0x00004000, /* (RCH)Second buffer address + is chain address */ + desc_size2_mask = 0x1FFF0000, /* (RBS2/TBS2) Buffer 2 size */ + desc_size2_shift = 16, + desc_size1_mask = 0x00001FFF, /* (RBS1/TBS1) Buffer 1 size */ + desc_size1_shift = 0, + + /* + * This explains the RDES4 Extended Status bits layout + * -------------------------------------------------------- + * RDES4 | Extended Status | + * -------------------------------------------------------- + */ + desc_rx_ts_dropped = 0x00004000, /* PTP snapshot available */ + desc_rx_ptp_ver = 0x00002000, /* When set indicates IEEE1584 + Version 2 (else Ver1) */ + desc_rx_ptp_frame_type = 0x00001000, /* PTP frame type Indicates PTP + sent over ethernet */ + desc_rx_ptp_message_type = 0x00000F00, /* Message Type */ + desc_rx_ptp_no = 0x00000000, /* 0000 => No PTP message rcvd */ + desc_rx_ptp_sync = 0x00000100, /* 0001 => Sync (all clock + types) received */ + desc_rx_ptp_follow_up = 0x00000200, /* 0010 => Follow_Up (all clock + types) received */ + desc_rx_ptp_delay_req = 0x00000300, /* 0011 => Delay_Req (all clock + types) received */ + desc_rx_ptp_delay_resp = 0x00000400, /* 0100 => Delay_Resp (all clock + types) received */ + desc_rx_ptp_pdelay_req = 0x00000500, /* 0101 => Pdelay_Req (in P + to P tras clk) or Announce + in Ord and Bound clk */ + desc_rx_ptp_pdelay_resp = 0x00000600, /* 0110 => Pdealy_Resp(in P to + P trans clk) or Management in + Ord and Bound clk */ + desc_rx_ptp_pdelay_resp_fp = 0x00000700,/* 0111 => Pdelay_Resp_Follow_Up + (in P to P trans clk) or + Signaling in Ord and Bound + clk */ + desc_rx_ptp_ipv6 = 0x00000080, /* Received Packet is in IPV6 */ + desc_rx_ptp_ipv4 = 0x00000040, /* Received Packet is in IPV4 */ + desc_rx_chk_sum_bypass = 0x00000020, /* When set indicates checksum + offload engine is bypassed */ + desc_rx_ip_payload_error = 0x00000010, /* When set indicates 16bit IP + payload CS is in error */ + desc_rx_ip_header_error = 0x00000008, /* When set indicates 16bit IPV4 + hdr CS is err or IP datagram + version is not consistent + with Ethernet type value */ + desc_rx_ip_payload_type = 0x00000007, /* Indicate the type of payload + encapsulated in IPdatagram + processed by COE (Rx) */ + desc_rx_ip_payload_unknown = 0x00000000,/* Unknown or didnot process + IP payload */ + desc_rx_ip_payload_udp = 0x00000001, /* UDP */ + desc_rx_ip_payload_tcp = 0x00000002, /* TCP */ + desc_rx_ip_payload_icmp = 0x00000003, /* ICMP */ +}; + +/* + * dma_desc + * DMA Descriptor Structure + * + * The structure is common for both receive and transmit descriptors. + */ +struct dma_desc { + uint32_t status; /* Status */ + uint32_t length; /* Buffer 1 and Buffer 2 length */ + uint32_t buffer1; /* Network Buffer 1 pointer (DMA-able)*/ + uint32_t data1; /* This holds virtual address of + buffer1, not used by DMA */ + + /* This data below is used only by driver */ + uint32_t extstatus; /* Extended status of a Rx Descriptor */ + uint32_t reserved1; /* Reserved word */ + uint32_t timestamplow; /* Lower 32 bits of the 64 + bit timestamp value */ + uint32_t timestamphigh; /* Higher 32 bits of the 64 + bit timestamp value */ +}; + +/* + * syn_dp_gmac_tx_checksum_offload_tcp_pseudo + * The checksum offload engine is enabled to do complete checksum computation. + */ +static inline void syn_dp_gmac_tx_checksum_offload_tcp_pseudo(struct dma_desc *desc) +{ + desc->status = ((desc->status & (~desc_tx_cis_mask)) | desc_tx_cis_tcp_pseudo_cs); +} + +/* + * syn_dp_gmac_tx_desc_init_ring + * Initialize the tx descriptors for ring or chain mode operation. + */ +static inline void syn_dp_gmac_tx_desc_init_ring(struct dma_desc *desc, uint32_t no_of_desc) +{ + struct dma_desc *last_desc = desc + no_of_desc - 1; + memset(desc, 0, no_of_desc * sizeof(struct dma_desc)); + last_desc->status = desc_tx_desc_end_of_ring; +} + +/* + * syn_dp_gmac_rx_desc_init_ring + * Initialize the rx descriptors for ring or chain mode operation. + */ +static inline void syn_dp_gmac_rx_desc_init_ring(struct dma_desc *desc, uint32_t no_of_desc) +{ + struct dma_desc *last_desc = desc + no_of_desc - 1; + memset(desc, 0, no_of_desc * sizeof(struct dma_desc)); + last_desc->length = desc_rx_desc_end_of_ring; +} + +/* + * syn_dp_gmac_is_rx_desc_valid + * Checks whether the rx descriptor is valid. + */ +static inline bool syn_dp_gmac_is_rx_desc_valid(uint32_t status) +{ + return (status & (desc_rx_error | desc_rx_first | desc_rx_last)) == + (desc_rx_first | desc_rx_last); +} + +/* + * syn_dp_gmac_get_rx_desc_frame_length + * Returns the byte length of received frame including CRC. + */ +static inline uint32_t syn_dp_gmac_get_rx_desc_frame_length(uint32_t status) +{ + return (status & desc_rx_frame_length_mask) >> desc_rx_frame_length_shift; +} + +/* + * syn_dp_gmac_is_desc_owned_by_dma + * Checks whether the descriptor is owned by DMA. + */ +static inline bool syn_dp_gmac_is_desc_owned_by_dma(struct dma_desc *desc) +{ + return (desc->status & desc_own_by_dma) == desc_own_by_dma; +} + +/* + * syn_dp_gmac_is_desc_empty + * Checks whether the descriptor is empty. + */ +static inline bool syn_dp_gmac_is_desc_empty(struct dma_desc *desc) +{ + /* + * If length of both buffer1 & buffer2 are zero then desc is empty + */ + return (desc->length & desc_size1_mask) == 0; +} + +/* + * syn_dp_gmac_get_tx_collision_count + * Gives the transmission collision count. + */ +static inline uint32_t syn_dp_gmac_get_tx_collision_count(uint32_t status) +{ + return (status & desc_tx_coll_mask) >> desc_tx_coll_shift; +} + +#endif /* __SYN_DESC__ */ \ No newline at end of file diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dp_cfg.c b/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dp_cfg.c new file mode 100644 index 000000000..ff1869990 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dp_cfg.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include "nss_dp_dev.h" +#include "syn_data_plane.h" +#include "syn_reg.h" + +/* + * syn_dp_setup_rx_desc_queue + * This sets up the receive Descriptor queue in ring or chain mode. + */ +static int syn_dp_setup_rx_desc_queue(struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info, + uint32_t no_of_desc, uint32_t desc_mode) +{ + struct dma_desc *first_desc = NULL; + dma_addr_t dma_addr; + + dev_info->rx_desc_count = 0; + + BUG_ON(desc_mode != RINGMODE); + BUG_ON((no_of_desc & (no_of_desc - 1)) != 0); + + netdev_dbg(netdev, "total size of memory required for Rx Descriptors in Ring Mode = %u\n", (uint32_t)((sizeof(struct dma_desc) * no_of_desc))); + + first_desc = dma_alloc_coherent(dev, sizeof(struct dma_desc) * no_of_desc, &dma_addr, GFP_KERNEL); + if (first_desc == NULL) { + netdev_dbg(netdev, "Error in Rx Descriptor Memory allocation in Ring mode\n"); + return -ENOMEM; + } + + dev_info->rx_desc_count = no_of_desc; + dev_info->rx_desc = first_desc; + dev_info->rx_desc_dma = dma_addr; + + netdev_dbg(netdev, "Rx Descriptors in Ring Mode: No. of descriptors = %d base = 0x%px dma = 0x%px\n", + no_of_desc, first_desc, (void *)dma_addr); + + syn_dp_gmac_rx_desc_init_ring(dev_info->rx_desc, no_of_desc); + + dev_info->rx_next = 0; + dev_info->rx_busy = 0; + dev_info->rx_next_desc = first_desc; + dev_info->rx_busy_desc = first_desc; + dev_info->busy_rx_desc = 0; + + return 0; +} + +/* + * syn_dp_setup_tx_desc_queue + * This sets up the transmit Descriptor queue in ring or chain mode. + */ +static int syn_dp_setup_tx_desc_queue(struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info, + uint32_t no_of_desc, uint32_t desc_mode) +{ + struct dma_desc *first_desc = NULL; + dma_addr_t dma_addr; + + dev_info->tx_desc_count = 0; + + BUG_ON(desc_mode != RINGMODE); + BUG_ON((no_of_desc & (no_of_desc - 1)) != 0); + + netdev_dbg(netdev, "Total size of memory required for Tx Descriptors in Ring Mode = %u\n", (uint32_t)((sizeof(struct dma_desc) * no_of_desc))); + + first_desc = dma_alloc_coherent(dev, sizeof(struct dma_desc) * no_of_desc, &dma_addr, GFP_KERNEL); + if (first_desc == NULL) { + netdev_dbg(netdev, "Error in Tx Descriptors memory allocation\n"); + return -ENOMEM; + } + + dev_info->tx_desc_count = no_of_desc; + dev_info->tx_desc = first_desc; + dev_info->tx_desc_dma = dma_addr; + netdev_dbg(netdev, "Tx Descriptors in Ring Mode: No. of descriptors = %d base = 0x%px dma = 0x%px\n" + , no_of_desc, first_desc, (void *)dma_addr); + + syn_dp_gmac_tx_desc_init_ring(dev_info->tx_desc, dev_info->tx_desc_count); + + dev_info->tx_next = 0; + dev_info->tx_busy = 0; + dev_info->tx_next_desc = first_desc; + dev_info->tx_busy_desc = first_desc; + dev_info->busy_tx_desc = 0; + + return 0; +} + +/* + * syn_dp_setup_rings + * Perform initial setup of Tx/Rx rings + */ +int syn_dp_setup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct device *dev, struct syn_dp_info *dev_info) +{ + struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx; + int err; + + err = syn_dp_setup_rx_desc_queue(netdev, dev, dev_info, SYN_DP_RX_DESC_SIZE, RINGMODE); + if (err) { + netdev_dbg(netdev, "nss_dp_gmac: rx descriptor setup unsuccessfull, err code: %d", err); + return NSS_DP_FAILURE; + } + + err = syn_dp_setup_tx_desc_queue(netdev, dev, dev_info, SYN_DP_TX_DESC_SIZE, RINGMODE); + if (err) { + netdev_dbg(netdev, "nss_dp_gmac: tx descriptor setup unsuccessfull, err code: %d", err); + return NSS_DP_FAILURE; + } + + syn_dp_rx_refill(gmac_dev, dev_info); + + syn_init_tx_desc_base(nghd, dev_info->tx_desc_dma); + syn_init_rx_desc_base(nghd, dev_info->rx_desc_dma); + + return NSS_DP_SUCCESS; +} + +/* + * syn_dp_cleanup_rings + * Cleanup Synopsys GMAC rings + */ +int syn_dp_cleanup_rings(struct nss_dp_dev *gmac_dev, struct net_device *netdev, struct syn_dp_info *dev_info) +{ + uint32_t rx_skb_index; + struct dma_desc *rxdesc; + + uint32_t tx_skb_index; + struct dma_desc *txdesc; + int i; + struct sk_buff *skb; + + /* + * Rx Ring cleaning + * We are assuming that the NAPI poll was already completed. + * No need of a lock here since the NAPI and interrupts have been disabled now + */ + rx_skb_index = dev_info->rx_busy; + for (i = 0; i < dev_info->busy_rx_desc; i++) { + rx_skb_index = rx_skb_index & (dev_info->rx_desc_count - 1); + rxdesc = dev_info->rx_busy_desc; + + dma_unmap_single(&(gmac_dev->netdev->dev), rxdesc->buffer1, + SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE); + + skb = dev_info->rx_skb_list[rx_skb_index]; + if (unlikely(skb != NULL)) { + dev_kfree_skb(skb); + dev_info->rx_skb_list[rx_skb_index] = NULL; + } + } + + dma_free_coherent(&(gmac_dev->netdev->dev), (sizeof(struct dma_desc) * SYN_DP_RX_DESC_SIZE), + dev_info->rx_desc, dev_info->rx_desc_dma); + + /* + * Tx Ring cleaning + */ + spin_lock_bh(&dev_info->data_lock); + + tx_skb_index = dev_info->tx_busy; + for (i = 0; i < dev_info->busy_tx_desc; i++) { + tx_skb_index = tx_skb_index & (dev_info->tx_desc_count - 1); + txdesc = dev_info->tx_busy_desc; + + dma_unmap_single(&(gmac_dev->netdev->dev), txdesc->buffer1, + SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE); + + skb = dev_info->tx_skb_list[tx_skb_index]; + if (unlikely(skb != NULL)) { + dev_kfree_skb(skb); + dev_info->tx_skb_list[tx_skb_index] = NULL; + } + } + + spin_unlock_bh(&dev_info->data_lock); + + dma_free_coherent(&(gmac_dev->netdev->dev), (sizeof(struct dma_desc) * SYN_DP_TX_DESC_SIZE), + dev_info->tx_desc, dev_info->tx_desc_dma); + + return 0; +} \ No newline at end of file diff --git a/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dp_tx_rx.c b/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dp_tx_rx.c new file mode 100644 index 000000000..ea01884d8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/hal/syn_gmac_dp/syn_dp_tx_rx.c @@ -0,0 +1,425 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include + +#include +#include +#include + +#include "syn_data_plane.h" +#include "syn_reg.h" + +/* + * syn_dp_reset_rx_qptr + * Reset the descriptor after Rx is over. + */ +static inline void syn_dp_reset_rx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info) +{ + + /* Index of descriptor the DMA just completed. + * May be useful when data is spread over multiple buffers/descriptors + */ + uint32_t rxnext = dev_info->rx_busy; + struct dma_desc *rxdesc = dev_info->rx_busy_desc; + + BUG_ON(rxdesc != (dev_info->rx_desc + rxnext)); + dev_info->rx_busy = (rxnext + 1) & (dev_info->rx_desc_count - 1); + dev_info->rx_busy_desc = dev_info->rx_desc + dev_info->rx_busy; + + dev_info->rx_skb_list[rxnext] = NULL; + rxdesc->status = 0; + rxdesc->length &= desc_rx_desc_end_of_ring; + rxdesc->buffer1 = 0; + rxdesc->data1 = 0; + rxdesc->reserved1 = 0; + + /* + * This returns one descriptor to processor. So busy count will be decremented by one. + */ + dev_info->busy_rx_desc--; +} + +/* + * syn_dp_set_rx_qptr + * Prepares the descriptor to receive packets. + */ +static inline int32_t syn_dp_set_rx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, + uint32_t Buffer1, uint32_t Length1, struct sk_buff *skb) +{ + uint32_t rxnext = dev_info->rx_next; + struct dma_desc *rxdesc = dev_info->rx_next_desc; + uint32_t rx_skb_index = rxnext; + + BUG_ON(dev_info->busy_rx_desc >= dev_info->rx_desc_count); + BUG_ON(rxdesc != (dev_info->rx_desc + rxnext)); + BUG_ON(!syn_dp_gmac_is_desc_empty(rxdesc)); + BUG_ON(syn_dp_gmac_is_desc_owned_by_dma(rxdesc)); + + if (Length1 > SYN_DP_MAX_DESC_BUFF) { + rxdesc->length |= (SYN_DP_MAX_DESC_BUFF << desc_size1_shift) & desc_size1_mask; + rxdesc->length |= ((Length1 - SYN_DP_MAX_DESC_BUFF) << desc_size2_shift) & desc_size2_mask; + } else { + rxdesc->length |= ((Length1 << desc_size1_shift) & desc_size1_mask); + } + + rxdesc->buffer1 = Buffer1; + dev_info->rx_skb_list[rx_skb_index] = skb; + + /* Program second buffer address if using two buffers. */ + if (Length1 > SYN_DP_MAX_DESC_BUFF) + rxdesc->data1 = Buffer1 + SYN_DP_MAX_DESC_BUFF; + else + rxdesc->data1 = 0; + + rxdesc->extstatus = 0; + rxdesc->timestamplow = 0; + rxdesc->timestamphigh = 0; + + /* + * Ensure all write completed before setting own by dma bit so when gmac + * HW takeover this descriptor, all the fields are filled correctly + */ + wmb(); + rxdesc->status = desc_own_by_dma; + + dev_info->rx_next = (rxnext + 1) & (dev_info->rx_desc_count - 1); + dev_info->rx_next_desc = dev_info->rx_desc + dev_info->rx_next; + + /* + * 1 descriptor will be given to HW. So busy count incremented by 1. + */ + dev_info->busy_rx_desc++; + + return rxnext; +} + +/* + * syn_dp_rx_refill + * Refill the RX descrptor + */ +void syn_dp_rx_refill(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info) +{ + struct net_device *netdev = gmac_dev->netdev; + struct device *dev = &gmac_dev->pdev->dev; + int empty_count = SYN_DP_RX_DESC_SIZE - dev_info->busy_rx_desc; + + dma_addr_t dma_addr; + int i; + struct sk_buff *skb; + + for (i = 0; i < empty_count; i++) { + skb = __netdev_alloc_skb(netdev, SYN_DP_MINI_JUMBO_FRAME_MTU, GFP_ATOMIC); + if (unlikely(skb == NULL)) { + netdev_dbg(netdev, "Unable to allocate skb, will try next time\n"); + break; + } + + skb_reserve(skb, NET_IP_ALIGN); + + dma_addr = dma_map_single(dev, skb->data, SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, dma_addr))) { + dev_kfree_skb(skb); + netdev_dbg(netdev, "DMA mapping failed for empty buffer\n"); + break; + } + + syn_dp_set_rx_qptr(gmac_dev, dev_info, dma_addr, SYN_DP_MINI_JUMBO_FRAME_MTU, skb); + } +} + +/* + * syn_dp_rx() + * Process RX packets + */ +int syn_dp_rx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, int budget) +{ + struct dma_desc *desc = NULL; + int frame_length, busy; + uint32_t status; + struct sk_buff *rx_skb; + uint32_t rx_skb_index; + + if (!dev_info->busy_rx_desc) { + /* no desc are held by gmac dma, we are done */ + return 0; + } + + busy = dev_info->busy_rx_desc; + if (busy > budget) + busy = budget; + + do { + desc = dev_info->rx_busy_desc; + if (syn_dp_gmac_is_desc_owned_by_dma(desc)) { + /* desc still hold by gmac dma, so we are done */ + break; + } + + status = desc->status; + + rx_skb_index = dev_info->rx_busy; + rx_skb = dev_info->rx_skb_list[rx_skb_index]; + + dma_unmap_single(&(gmac_dev->netdev->dev), desc->buffer1, + SYN_DP_MINI_JUMBO_FRAME_MTU, DMA_FROM_DEVICE); + + spin_lock_bh(&dev_info->stats_lock); + if (likely(syn_dp_gmac_is_rx_desc_valid(status))) { + /* We have a pkt to process get the frame length */ + frame_length = syn_dp_gmac_get_rx_desc_frame_length(status); + /* Get rid of FCS: 4 */ + frame_length -= ETH_FCS_LEN; + + /* Valid packet, collect stats */ + dev_info->stats.stats.rx_packets++; + dev_info->stats.stats.rx_bytes += frame_length; + + /* type_trans and deliver to linux */ + skb_put(rx_skb, frame_length); + rx_skb->protocol = eth_type_trans(rx_skb, gmac_dev->netdev); + rx_skb->ip_summed = CHECKSUM_UNNECESSARY; + napi_gro_receive(&gmac_dev->napi, rx_skb); + + } else { + dev_info->stats.stats.rx_errors++; + dev_kfree_skb(rx_skb); + + if (status & (desc_rx_crc | desc_rx_collision | + desc_rx_overflow | desc_rx_dribbling | + desc_rx_length_error)) { + dev_info->stats.stats.mmc_rx_crc_errors += (status & desc_rx_crc) ? 1 : 0; + dev_info->stats.stats.rx_late_collision_errors += (status & desc_rx_collision) ? 1 : 0; + dev_info->stats.stats.mmc_rx_overflow_errors += (status & desc_rx_overflow) ? 1 : 0; + dev_info->stats.stats.rx_dribble_bit_errors += (status & desc_rx_dribbling) ? 1 : 0; + dev_info->stats.stats.rx_length_errors += (status & desc_rx_length_error) ? 1 : 0; + } + } + + spin_unlock_bh(&dev_info->stats_lock); + + syn_dp_reset_rx_qptr(gmac_dev, dev_info); + busy--; + } while (busy > 0); + return budget - busy; +} + +/* + * syn_dp_reset_tx_qptr + * Reset the descriptor after Tx is over. + */ +static inline void syn_dp_reset_tx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info) +{ + uint32_t txover = dev_info->tx_busy; + struct dma_desc *txdesc = dev_info->tx_busy_desc; + + BUG_ON(txdesc != (dev_info->tx_desc + txover)); + dev_info->tx_busy = (txover + 1) & (dev_info->tx_desc_count - 1); + dev_info->tx_busy_desc = dev_info->tx_desc + dev_info->tx_busy; + + dev_info->tx_skb_list[txover] = NULL; + txdesc->status &= desc_tx_desc_end_of_ring; + txdesc->length = 0; + txdesc->buffer1 = 0; + txdesc->data1 = 0; + txdesc->reserved1 = 0; + + /* + * Busy tx descriptor is reduced by one as + * it will be handed over to Processor now. + */ + dev_info->busy_tx_desc--; +} + +/* + * syn_dp_set_tx_qptr + * Populate the tx desc structure with the buffer address. + */ +static inline struct dma_desc *syn_dp_set_tx_qptr(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, + uint32_t Buffer1, uint32_t Length1, struct sk_buff *skb, uint32_t offload_needed, + uint32_t tx_cntl, uint32_t set_dma) +{ + uint32_t txnext = dev_info->tx_next; + struct dma_desc *txdesc = dev_info->tx_next_desc; + uint32_t tx_skb_index = txnext; + + BUG_ON(dev_info->busy_tx_desc > dev_info->tx_desc_count); + BUG_ON(txdesc != (dev_info->tx_desc + txnext)); + BUG_ON(!syn_dp_gmac_is_desc_empty(txdesc)); + BUG_ON(syn_dp_gmac_is_desc_owned_by_dma(txdesc)); + + if (Length1 > SYN_DP_MAX_DESC_BUFF) { + txdesc->length |= (SYN_DP_MAX_DESC_BUFF << desc_size1_shift) & desc_size1_mask; + txdesc->length |= + ((Length1 - SYN_DP_MAX_DESC_BUFF) << desc_size2_shift) & desc_size2_mask; + } else { + txdesc->length |= ((Length1 << desc_size1_shift) & desc_size1_mask); + } + + txdesc->status |= tx_cntl; + txdesc->buffer1 = Buffer1; + + dev_info->tx_skb_list[tx_skb_index] = skb; + + /* Program second buffer address if using two buffers. */ + if (Length1 > SYN_DP_MAX_DESC_BUFF) + txdesc->data1 = Buffer1 + SYN_DP_MAX_DESC_BUFF; + else + txdesc->data1 = 0; + + if (likely(offload_needed)) { + syn_dp_gmac_tx_checksum_offload_tcp_pseudo(txdesc); + } + + /* + * Ensure all write completed before setting own by dma bit so when gmac + * HW takeover this descriptor, all the fields are filled correctly + */ + wmb(); + txdesc->status |= set_dma; + + dev_info->tx_next = (txnext + 1) & (dev_info->tx_desc_count - 1); + dev_info->tx_next_desc = dev_info->tx_desc + dev_info->tx_next; + + return txdesc; +} + +/* + * syn_dp_tx_queue_desc + * Queue TX descriptor to the TX ring + */ +static void syn_dp_tx_desc_queue(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb, dma_addr_t dma_addr) +{ + unsigned int len = skb->len; + + spin_lock_bh(&dev_info->data_lock); + + syn_dp_set_tx_qptr(gmac_dev, dev_info, dma_addr, len, skb, (skb->ip_summed == CHECKSUM_PARTIAL), + (desc_tx_last | desc_tx_first | desc_tx_int_enable), desc_own_by_dma); + dev_info->busy_tx_desc++; + + spin_unlock_bh(&dev_info->data_lock); +} + +/* + * syn_dp_process_tx_complete + * Xmit complete, clear descriptor and free the skb + */ +void syn_dp_process_tx_complete(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info) +{ + int busy, len; + uint32_t status; + struct dma_desc *desc = NULL; + struct sk_buff *skb; + uint32_t tx_skb_index; + + spin_lock_bh(&dev_info->data_lock); + busy = dev_info->busy_tx_desc; + + if (!busy) { + /* No desc are hold by gmac dma, we are done */ + spin_unlock_bh(&dev_info->data_lock); + return; + } + + do { + desc = dev_info->tx_busy_desc; + if (syn_dp_gmac_is_desc_owned_by_dma(desc)) { + /* desc still hold by gmac dma, so we are done */ + break; + } + + len = (desc->length & desc_size1_mask) >> desc_size1_shift; + dma_unmap_single(&(gmac_dev->pdev->dev), desc->buffer1, len, DMA_TO_DEVICE); + + status = desc->status; + if (status & desc_tx_last) { + /* TX is done for this whole skb, we can free it */ + /* Get the skb from the tx skb pool */ + tx_skb_index = dev_info->tx_busy; + skb = dev_info->tx_skb_list[tx_skb_index]; + + BUG_ON(!skb); + dev_kfree_skb(skb); + + spin_lock_bh(&dev_info->stats_lock); + + if (unlikely(status & desc_tx_error)) { + /* Some error happen, collect statistics */ + dev_info->stats.stats.tx_errors++; + dev_info->stats.stats.tx_jabber_timeout_errors += (status & desc_tx_timeout) ? 1 : 0; + dev_info->stats.stats.tx_frame_flushed_errors += (status & desc_tx_frame_flushed) ? 1 : 0; + dev_info->stats.stats.tx_loss_of_carrier_errors += (status & desc_tx_lost_carrier) ? 1 : 0; + dev_info->stats.stats.tx_no_carrier_errors += (status & desc_tx_no_carrier) ? 1 : 0; + dev_info->stats.stats.tx_late_collision_errors += (status & desc_tx_late_collision) ? 1 : 0; + dev_info->stats.stats.tx_excessive_collision_errors += (status & desc_tx_exc_collisions) ? 1 : 0; + dev_info->stats.stats.tx_excessive_deferral_errors += (status & desc_tx_exc_deferral) ? 1 : 0; + dev_info->stats.stats.tx_underflow_errors += (status & desc_tx_underflow) ? 1 : 0; + dev_info->stats.stats.tx_ip_header_errors += (status & desc_tx_ipv4_chk_error) ? 1 : 0; + dev_info->stats.stats.tx_ip_payload_errors += (status & desc_tx_pay_chk_error) ? 1 : 0; + } else { + /* No error, recored tx pkts/bytes and + * collision + */ + dev_info->stats.stats.tx_packets++; + dev_info->stats.stats.tx_collisions += syn_dp_gmac_get_tx_collision_count(status); + dev_info->stats.stats.tx_bytes += len; + } + + spin_unlock_bh(&dev_info->stats_lock); + } + syn_dp_reset_tx_qptr(gmac_dev, dev_info); + busy--; + } while (busy > 0); + + spin_unlock_bh(&dev_info->data_lock); +} + +/* + * syn_dp_tx + * TX routine for Synopsys GMAC + */ +int syn_dp_tx(struct nss_dp_dev *gmac_dev, struct syn_dp_info *dev_info, struct sk_buff *skb) +{ + struct net_device *netdev = gmac_dev->netdev; + struct nss_gmac_hal_dev *nghd = gmac_dev->gmac_hal_ctx; + unsigned len = skb->len; + dma_addr_t dma_addr; + + /* + * If we don't have enough tx descriptor for this pkt, return busy. + */ + if ((SYN_DP_TX_DESC_SIZE - dev_info->busy_tx_desc) < 1) { + netdev_dbg(netdev, "Not enough descriptors available"); + return -1; + } + + dma_addr = dma_map_single(&gmac_dev->pdev->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&gmac_dev->pdev->dev, dma_addr))) { + netdev_dbg(netdev, "DMA mapping failed for empty buffer\n"); + return -1; + } + + /* + * Queue packet to the GMAC rings + */ + syn_dp_tx_desc_queue(gmac_dev, dev_info, skb, dma_addr); + + syn_resume_dma_tx(nghd); + + return 0; +} \ No newline at end of file diff --git a/feeds/ipq807x/qca-nss-dp/src/include/nss_dp_dev.h b/feeds/ipq807x/qca-nss-dp/src/include/nss_dp_dev.h new file mode 100644 index 000000000..439777349 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/include/nss_dp_dev.h @@ -0,0 +1,132 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_DP_DEV_H__ +#define __NSS_DP_DEV_H__ + +#include +#include +#include +#include +#include +#include +#include + +#include "nss_dp_api_if.h" +#include "nss_dp_hal_if.h" + +#define NSS_DP_ACL_DEV_ID 0 + +struct nss_dp_global_ctx; + +/* + * nss data plane device structure + */ +struct nss_dp_dev { + uint32_t macid; /* Sequence# of Mac on the platform */ + uint32_t vsi; /* vsi number */ + unsigned long flags; /* Status flags */ + unsigned long drv_flags; /* Driver specific feature flags */ + + /* Phy related stuff */ + struct phy_device *phydev; /* Phy device */ + struct mii_bus *miibus; /* MII bus */ + uint32_t phy_mii_type; /* RGMII/SGMII/QSGMII */ + uint32_t phy_mdio_addr; /* Mdio address */ + bool link_poll; /* Link polling enable? */ + uint32_t forced_speed; /* Forced speed? */ + uint32_t forced_duplex; /* Forced duplex? */ + uint32_t link_state; /* Current link state */ + uint32_t pause; /* Current flow control settings */ + + struct net_device *netdev; + struct platform_device *pdev; + struct napi_struct napi; + + struct nss_dp_data_plane_ctx *dpc; + /* context when NSS owns GMACs */ + struct nss_dp_data_plane_ops *data_plane_ops; + /* ops for each data plane */ + struct nss_dp_global_ctx *ctx; /* Global NSS DP context */ + struct nss_gmac_hal_dev *gmac_hal_ctx; /* context of gmac hal */ + struct nss_gmac_hal_ops *gmac_hal_ops; /* GMAC HAL OPS */ + + /* switchdev related attributes */ +#ifdef CONFIG_NET_SWITCHDEV + u8 stp_state; /* STP state of this physical port */ + unsigned long brport_flags; /* bridge port flags */ +#endif +}; + +/* + * nss data plane global context + */ +struct nss_dp_global_ctx { + struct nss_dp_dev *nss_dp[NSS_DP_HAL_MAX_PORTS]; + struct nss_gmac_hal_ops *gmac_hal_ops[GMAC_HAL_TYPE_MAX]; + /* GMAC HAL OPS */ + bool common_init_done; /* Flag to hold common init state */ + uint8_t slowproto_acl_bm; /* Port bitmap to allow slow protocol packets */ +}; + +/* Global data */ +extern struct nss_dp_global_ctx dp_global_ctx; +extern struct nss_dp_data_plane_ctx dp_global_data_plane_ctx[NSS_DP_HAL_MAX_PORTS]; + +/* + * nss data plane link state + */ +enum nss_dp_link_state { + __NSS_DP_LINK_UP, /* Indicate link is UP */ + __NSS_DP_LINK_DOWN /* Indicate link is down */ +}; + +/* + * nss data plane status + */ +enum nss_dp_state { + __NSS_DP_UP, /* set to indicate the interface is UP */ + __NSS_DP_RXCSUM, /* Rx checksum enabled */ + __NSS_DP_AUTONEG, /* Autonegotiation Enabled */ + __NSS_DP_LINKPOLL, /* Poll link status */ +}; + +/* + * nss data plane private flags + */ +enum nss_dp_priv_flags { + __NSS_DP_PRIV_FLAG_INIT_DONE, + __NSS_DP_PRIV_FLAG_IRQ_REQUESTED, + __NSS_DP_PRIV_FLAG_MAX, +}; +#define NSS_DP_PRIV_FLAG(x) (1 << __NSS_DP_PRIV_FLAG_ ## x) + +/* + * nss_dp_set_ethtool_ops() + */ +void nss_dp_set_ethtool_ops(struct net_device *netdev); + +/* + * nss data plane switchdev helpers + */ +#ifdef CONFIG_NET_SWITCHDEV +void nss_dp_switchdev_setup(struct net_device *dev); +bool nss_dp_is_phy_dev(struct net_device *dev); +#endif + +#endif /* __NSS_DP_DEV_H__ */ diff --git a/feeds/ipq807x/qca-nss-dp/src/nss_dp_attach.c b/feeds/ipq807x/qca-nss-dp/src/nss_dp_attach.c new file mode 100644 index 000000000..94e8f6900 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/nss_dp_attach.c @@ -0,0 +1,192 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include +#include "nss_dp_hal.h" + +/* + * nss_dp_reset_netdev_features() + * Resets the netdev features + */ +static inline void nss_dp_reset_netdev_features(struct net_device *netdev) +{ + netdev->features = 0; + netdev->hw_features = 0; + netdev->vlan_features = 0; + netdev->wanted_features = 0; +} + +/* + * nss_dp_receive() + * Called by overlay drivers to deliver packets to nss-dp + */ +void nss_dp_receive(struct net_device *netdev, struct sk_buff *skb, + struct napi_struct *napi) +{ + struct nss_dp_dev *dp_dev = netdev_priv(netdev); + + skb->dev = netdev; + skb->protocol = eth_type_trans(skb, netdev); + netdev_dbg(netdev, "Rx on port%d, packet len %d, CSUM %d\n", + dp_dev->macid, skb->len, skb->ip_summed); + +#ifdef CONFIG_NET_SWITCHDEV +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + skb->offload_fwd_mark = netdev->offload_fwd_mark; +#else + /* + * TODO: Implement ndo_get_devlink_port() + */ + skb->offload_fwd_mark = 0; +#endif +#endif + + napi_gro_receive(napi, skb); +} +EXPORT_SYMBOL(nss_dp_receive); + +/* + * nss_dp_is_in_open_state() + * Return if a data plane is opened or not + */ +bool nss_dp_is_in_open_state(struct net_device *netdev) +{ + struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev); + + if (test_bit(__NSS_DP_UP, &dp_dev->flags)) + return true; + return false; +} +EXPORT_SYMBOL(nss_dp_is_in_open_state); + +/* + * nss_dp_override_data_plane() + * API to allow overlay drivers to override the data plane + */ +int nss_dp_override_data_plane(struct net_device *netdev, + struct nss_dp_data_plane_ops *dp_ops, + struct nss_dp_data_plane_ctx *dpc) +{ + struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev); + + if (!dp_ops->open || !dp_ops->close || !dp_ops->link_state + || !dp_ops->mac_addr || !dp_ops->change_mtu || !dp_ops->xmit + || !dp_ops->set_features || !dp_ops->pause_on_off || !dp_ops->deinit) { + netdev_dbg(netdev, "All the op functions must be present, reject this registeration\n"); + return NSS_DP_FAILURE; + } + + /* + * If this data plane is up, close the netdev to force TX/RX stop, and + * also reset the features + */ + if (test_bit(__NSS_DP_UP, &dp_dev->flags)) { + netdev->netdev_ops->ndo_stop(netdev); + nss_dp_reset_netdev_features(netdev); + } + + /* + * Free up the resources used by the data plane + */ + if (dp_dev->drv_flags & NSS_DP_PRIV_FLAG(INIT_DONE)) { + if (dp_dev->data_plane_ops->deinit(dpc)) { + netdev_dbg(netdev, "Data plane init failed\n"); + return -ENOMEM; + } + + dp_dev->drv_flags &= ~NSS_DP_PRIV_FLAG(INIT_DONE); + } + + /* + * Override the data_plane_ctx, data_plane_ops + */ + dp_dev->dpc = dpc; + dp_dev->data_plane_ops = dp_ops; + + return NSS_DP_SUCCESS; +} +EXPORT_SYMBOL(nss_dp_override_data_plane); + +/* + * nss_dp_start_data_plane() + * Data plane to inform netdev it is ready to start + */ +void nss_dp_start_data_plane(struct net_device *netdev, + struct nss_dp_data_plane_ctx *dpc) +{ + struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev); + + if (test_bit(__NSS_DP_UP, &dp_dev->flags)) { + netdev_dbg(netdev, "This netdev already up, something is wrong\n"); + return; + } + + if (dp_dev->dpc != dpc) { + netdev_dbg(netdev, "Cookie %px does not match, reject\n", dpc); + return; + } + + netdev->netdev_ops->ndo_open(dp_dev->netdev); +} +EXPORT_SYMBOL(nss_dp_start_data_plane); + +/* + * nss_dp_restore_data_plane() + * Called by overlay drivers to detach itself from nss-dp + */ +void nss_dp_restore_data_plane(struct net_device *netdev) +{ + struct nss_dp_dev *dp_dev = (struct nss_dp_dev *)netdev_priv(netdev); + + /* + * If this data plane is up, close the netdev to force TX/RX stop, and + * also reset the features + */ + if (test_bit(__NSS_DP_UP, &dp_dev->flags)) { + netdev->netdev_ops->ndo_stop(netdev); + nss_dp_reset_netdev_features(netdev); + } + + dp_dev->data_plane_ops = nss_dp_hal_get_data_plane_ops(); + dp_dev->dpc = &dp_global_data_plane_ctx[dp_dev->macid - NSS_DP_START_IFNUM]; + + /* + * TODO: Re-initialize EDMA dataplane + */ +} +EXPORT_SYMBOL(nss_dp_restore_data_plane); + +/* + * nss_dp_get_netdev_by_nss_if_num() + * return the net device of the corrsponding id if exist + */ +struct net_device *nss_dp_get_netdev_by_nss_if_num(int if_num) +{ + struct nss_dp_dev *dp_dev; + + if ((if_num > NSS_DP_HAL_MAX_PORTS) || (if_num < NSS_DP_START_IFNUM)) { + pr_err("Invalid if_num %d\n", if_num); + return NULL; + } + + dp_dev = dp_global_ctx.nss_dp[if_num - NSS_DP_START_IFNUM]; + if (!dp_dev) + return NULL; + return dp_dev->netdev; +} +EXPORT_SYMBOL(nss_dp_get_netdev_by_nss_if_num); diff --git a/feeds/ipq807x/qca-nss-dp/src/nss_dp_ethtools.c b/feeds/ipq807x/qca-nss-dp/src/nss_dp_ethtools.c new file mode 100644 index 000000000..289bf87ee --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/nss_dp_ethtools.c @@ -0,0 +1,378 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include +#include +#include +#include +#include "nss_dp_dev.h" +#include "fal/fal_port_ctrl.h" + +/* + * nss_dp_get_ethtool_stats() + */ +static void nss_dp_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + dp_priv->gmac_hal_ops->getethtoolstats(dp_priv->gmac_hal_ctx, data); +} + +/* + * nss_dp_get_strset_count() + */ +static int32_t nss_dp_get_strset_count(struct net_device *netdev, int32_t sset) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + return dp_priv->gmac_hal_ops->getssetcount(dp_priv->gmac_hal_ctx, sset); +} + +/* + * nss_dp_get_strings() + */ +static void nss_dp_get_strings(struct net_device *netdev, uint32_t stringset, + uint8_t *data) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + dp_priv->gmac_hal_ops->getstrings(dp_priv->gmac_hal_ctx, stringset, + data); +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +/* + * nss_dp_get_settings() + */ +static int32_t nss_dp_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + /* + * If there is a PHY attached, get the status from Kernel helper + */ + if (dp_priv->phydev) + return phy_ethtool_gset(dp_priv->phydev, cmd); + + return -EIO; +} + +/* + * nss_dp_set_settings() + */ +static int32_t nss_dp_set_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + if (!dp_priv->phydev) + return -EIO; + + return phy_ethtool_sset(dp_priv->phydev, cmd); +} +#endif + +/* + * nss_dp_get_pauseparam() + */ +static void nss_dp_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + pause->rx_pause = dp_priv->pause & FLOW_CTRL_RX ? 1 : 0; + pause->tx_pause = dp_priv->pause & FLOW_CTRL_TX ? 1 : 0; + pause->autoneg = AUTONEG_ENABLE; +} + +/* + * nss_dp_set_pauseparam() + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +static int32_t nss_dp_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + /* set flow control settings */ + dp_priv->pause = 0; + if (pause->rx_pause) + dp_priv->pause |= FLOW_CTRL_RX; + + if (pause->tx_pause) + dp_priv->pause |= FLOW_CTRL_TX; + + if (!dp_priv->phydev) + return 0; + + /* Update flow control advertisment */ + dp_priv->phydev->advertising &= + ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + + if (pause->rx_pause) + dp_priv->phydev->advertising |= + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + + if (pause->tx_pause) + dp_priv->phydev->advertising |= ADVERTISED_Asym_Pause; + + genphy_config_aneg(dp_priv->phydev); + + return 0; +} +#else +static int32_t nss_dp_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; + + /* set flow control settings */ + dp_priv->pause = 0; + if (pause->rx_pause) + dp_priv->pause |= FLOW_CTRL_RX; + + if (pause->tx_pause) + dp_priv->pause |= FLOW_CTRL_TX; + + if (!dp_priv->phydev) + return 0; + + /* Update flow control advertisment */ + linkmode_copy(advertising, dp_priv->phydev->advertising); + + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising); + + if (pause->rx_pause) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising); + } + + if (pause->tx_pause) + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising); + + linkmode_copy(dp_priv->phydev->advertising, advertising); + genphy_config_aneg(dp_priv->phydev); + + return 0; +} +#endif + +/* + * nss_dp_fal_to_ethtool_linkmode_xlate() + * Translate linkmode from FAL type to ethtool type. + */ +static inline void nss_dp_fal_to_ethtool_linkmode_xlate(uint32_t *xlate_to, uint32_t *xlate_from) +{ + uint32_t pos; + + while (*xlate_from) { + pos = ffs(*xlate_from); + switch (1 << (pos - 1)) { + case FAL_PHY_EEE_10BASE_T: + *xlate_to |= SUPPORTED_10baseT_Full; + break; + + case FAL_PHY_EEE_100BASE_T: + *xlate_to |= SUPPORTED_100baseT_Full; + break; + + case FAL_PHY_EEE_1000BASE_T: + *xlate_to |= SUPPORTED_1000baseT_Full; + break; + + case FAL_PHY_EEE_2500BASE_T: + *xlate_to |= SUPPORTED_2500baseX_Full; + break; + + case FAL_PHY_EEE_5000BASE_T: + /* + * Ethtool does not support enumeration for 5G. + */ + break; + + case FAL_PHY_EEE_10000BASE_T: + *xlate_to |= SUPPORTED_10000baseT_Full; + break; + } + + *xlate_from &= (~(1 << (pos - 1))); + } +} + +/* + * nss_dp_get_eee() + * Get EEE settings. + */ +static int32_t nss_dp_get_eee(struct net_device *netdev, struct ethtool_eee *eee) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + fal_port_eee_cfg_t port_eee_cfg; + uint32_t port_id; + sw_error_t ret; + + memset(&port_eee_cfg, 0, sizeof(fal_port_eee_cfg_t)); + port_id = dp_priv->macid; + ret = fal_port_interface_eee_cfg_get(NSS_DP_ACL_DEV_ID, port_id, &port_eee_cfg); + if (ret != SW_OK) { + netdev_dbg(netdev, "Could not fetch EEE settings err = %d\n", ret); + return -EIO; + } + + /* + * Translate the FAL linkmode types to ethtool linkmode types. + */ + nss_dp_fal_to_ethtool_linkmode_xlate(&eee->supported, &port_eee_cfg.capability); + nss_dp_fal_to_ethtool_linkmode_xlate(&eee->advertised, &port_eee_cfg.advertisement); + nss_dp_fal_to_ethtool_linkmode_xlate(&eee->lp_advertised, &port_eee_cfg.link_partner_advertisement); + eee->eee_enabled = port_eee_cfg.enable; + eee->eee_active = port_eee_cfg.eee_status; + eee->tx_lpi_enabled = port_eee_cfg.lpi_tx_enable; + eee->tx_lpi_timer = port_eee_cfg.lpi_sleep_timer; + + return 0; +} + +/* + * nss_dp_set_eee() + * Set EEE settings. + */ +static int32_t nss_dp_set_eee(struct net_device *netdev, struct ethtool_eee *eee) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + fal_port_eee_cfg_t port_eee_cfg, port_eee_cur_cfg; + uint32_t port_id, pos; + sw_error_t ret; + + memset(&port_eee_cfg, 0, sizeof(fal_port_eee_cfg_t)); + memset(&port_eee_cur_cfg, 0, sizeof(fal_port_eee_cfg_t)); + port_id = dp_priv->macid; + + /* + * Get current EEE configuration. + */ + ret = fal_port_interface_eee_cfg_get(NSS_DP_ACL_DEV_ID, port_id, &port_eee_cur_cfg); + if (ret != SW_OK) { + netdev_dbg(netdev, "Could not fetch EEE settings err = %d\n", ret); + return -EIO; + } + + port_eee_cfg.enable = eee->eee_enabled; + + /* + * Translate the ethtool speed types to FAL speed types. + */ + while (eee->advertised) { + pos = ffs(eee->advertised); + switch (1 << (pos - 1)) { + case ADVERTISED_10baseT_Full: + if (port_eee_cur_cfg.capability & FAL_PHY_EEE_10BASE_T) { + port_eee_cfg.advertisement |= FAL_PHY_EEE_10BASE_T; + break; + } + + netdev_dbg(netdev, "Advertised value 10baseT_Full is not supported\n"); + return -EIO; + + case ADVERTISED_100baseT_Full: + if (port_eee_cur_cfg.capability & FAL_PHY_EEE_100BASE_T) { + port_eee_cfg.advertisement |= FAL_PHY_EEE_100BASE_T; + break; + } + + netdev_dbg(netdev, "Advertised value 100baseT_Full is not supported\n"); + return -EIO; + + case ADVERTISED_1000baseT_Full: + if (port_eee_cur_cfg.capability & FAL_PHY_EEE_1000BASE_T) { + port_eee_cfg.advertisement |= FAL_PHY_EEE_1000BASE_T; + break; + } + + netdev_dbg(netdev, "Advertised value 1000baseT_Full is not supported\n"); + return -EIO; + + case ADVERTISED_2500baseX_Full: + if (port_eee_cur_cfg.capability & FAL_PHY_EEE_2500BASE_T) { + port_eee_cfg.advertisement |= FAL_PHY_EEE_2500BASE_T; + break; + } + + netdev_dbg(netdev, "Advertised value 2500baseX_Full is not supported\n"); + return -EIO; + + case ADVERTISED_10000baseT_Full: + if (port_eee_cur_cfg.capability & FAL_PHY_EEE_10000BASE_T) { + port_eee_cfg.advertisement |= FAL_PHY_EEE_10000BASE_T; + break; + } + + netdev_dbg(netdev, "Advertised value 10000baseT_Full is not supported\n"); + return -EIO; + + default: + netdev_dbg(netdev, "Advertised value is not supported\n"); + return -EIO; + } + + eee->advertised &= (~(1 << (pos - 1))); + } + + port_eee_cfg.lpi_tx_enable = eee->tx_lpi_enabled; + port_eee_cfg.lpi_sleep_timer = eee->tx_lpi_timer; + ret = fal_port_interface_eee_cfg_set(NSS_DP_ACL_DEV_ID, port_id, &port_eee_cfg); + if (ret != SW_OK) { + netdev_dbg(netdev, "Could not configure EEE err = %d\n", ret); + return -EIO; + } + + return 0; +} + +/* + * Ethtool operations + */ +struct ethtool_ops nss_dp_ethtool_ops = { + .get_strings = &nss_dp_get_strings, + .get_sset_count = &nss_dp_get_strset_count, + .get_ethtool_stats = &nss_dp_get_ethtool_stats, + .get_link = ðtool_op_get_link, +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + .get_settings = &nss_dp_get_settings, + .set_settings = &nss_dp_set_settings, +#else + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +#endif + .get_pauseparam = &nss_dp_get_pauseparam, + .set_pauseparam = &nss_dp_set_pauseparam, + .get_eee = &nss_dp_get_eee, + .set_eee = &nss_dp_set_eee, +}; + +/* + * nss_dp_set_ethtool_ops() + * Set ethtool operations + */ +void nss_dp_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &nss_dp_ethtool_ops; +} diff --git a/feeds/ipq807x/qca-nss-dp/src/nss_dp_main.c b/feeds/ipq807x/qca-nss-dp/src/nss_dp_main.c new file mode 100644 index 000000000..5580b1331 --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/nss_dp_main.c @@ -0,0 +1,830 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if defined(NSS_DP_PPE_SUPPORT) +#include +#endif +#include + +#include "nss_dp_hal.h" + +/* + * Number of TX/RX queue supported is based on the number of host CPU + */ +#define NSS_DP_NETDEV_TX_QUEUE_NUM NSS_DP_HAL_CPU_NUM +#define NSS_DP_NETDEV_RX_QUEUE_NUM NSS_DP_HAL_CPU_NUM + +/* ipq40xx_mdio_data */ +struct ipq40xx_mdio_data { + struct mii_bus *mii_bus; + void __iomem *membase; + int phy_irq[PHY_MAX_ADDR]; +}; + +/* Global data */ +struct nss_dp_global_ctx dp_global_ctx; +struct nss_dp_data_plane_ctx dp_global_data_plane_ctx[NSS_DP_HAL_MAX_PORTS]; + +/* + * nss_dp_do_ioctl() + */ +static int32_t nss_dp_do_ioctl(struct net_device *netdev, struct ifreq *ifr, + int32_t cmd) +{ + int ret = -EINVAL; + struct nss_dp_dev *dp_priv; + + if (!netdev || !ifr) + return ret; + + dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + if (dp_priv->phydev) + return phy_mii_ioctl(dp_priv->phydev, ifr, cmd); + + return ret; +} + +/* + * nss_dp_change_mtu() + */ +static int32_t nss_dp_change_mtu(struct net_device *netdev, int32_t newmtu) +{ + int ret = -EINVAL; + struct nss_dp_dev *dp_priv; + + if (!netdev) + return ret; + + dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + /* Let the underlying data plane decide if the newmtu is applicable */ + if (dp_priv->data_plane_ops->change_mtu(dp_priv->dpc, newmtu)) { + netdev_dbg(netdev, "Data plane change mtu failed\n"); + return ret; + } + + netdev->mtu = newmtu; + + return 0; +} + +/* + * nss_dp_set_mac_address() + */ +static int32_t nss_dp_set_mac_address(struct net_device *netdev, void *macaddr) +{ + struct nss_dp_dev *dp_priv; + struct sockaddr *addr = (struct sockaddr *)macaddr; + int ret = 0; + + if (!netdev) + return -EINVAL; + + dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + netdev_dbg(netdev, "AddrFamily: %d, %0x:%0x:%0x:%0x:%0x:%0x\n", + addr->sa_family, addr->sa_data[0], addr->sa_data[1], + addr->sa_data[2], addr->sa_data[3], addr->sa_data[4], + addr->sa_data[5]); + + ret = eth_prepare_mac_addr_change(netdev, macaddr); + if (ret) + return ret; + + if (dp_priv->data_plane_ops->mac_addr(dp_priv->dpc, macaddr)) { + netdev_dbg(netdev, "Data plane set MAC address failed\n"); + return -EAGAIN; + } + + eth_commit_mac_addr_change(netdev, macaddr); + + dp_priv->gmac_hal_ops->setmacaddr(dp_priv->gmac_hal_ctx, + (uint8_t *)addr->sa_data); + + return 0; +} + +/* + * nss_dp_get_stats64() + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +static struct rtnl_link_stats64 *nss_dp_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct nss_dp_dev *dp_priv; + + if (!netdev) + return stats; + + dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + dp_priv->gmac_hal_ops->getndostats(dp_priv->gmac_hal_ctx, stats); + + return stats; +} +#else +static void nss_dp_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct nss_dp_dev *dp_priv; + + if (!netdev) + return; + + dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + dp_priv->gmac_hal_ops->getndostats(dp_priv->gmac_hal_ctx, stats); +} +#endif + +/* + * nss_dp_xmit() + */ +static netdev_tx_t nss_dp_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct nss_dp_dev *dp_priv; + + if (!skb || !netdev) + return NETDEV_TX_OK; + + dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + netdev_dbg(netdev, "Tx packet, len %d\n", skb->len); + + return dp_priv->data_plane_ops->xmit(dp_priv->dpc, skb); +} + +/* + * nss_dp_close() + */ +static int nss_dp_close(struct net_device *netdev) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + if (!dp_priv) + return -EINVAL; + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + /* Notify data plane link is going down */ + if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 0)) { + netdev_dbg(netdev, "Data plane set link failed\n"); + return -EAGAIN; + } + + if (dp_priv->phydev) + phy_stop(dp_priv->phydev); + dp_priv->link_state = __NSS_DP_LINK_DOWN; + +#if defined(NSS_DP_PPE_SUPPORT) + /* Notify data plane to unassign VSI */ + if (dp_priv->data_plane_ops->vsi_unassign(dp_priv->dpc, dp_priv->vsi)) { + netdev_dbg(netdev, "Data plane vsi unassign failed\n"); + return -EAGAIN; + } +#endif + + /* + * Notify data plane to close + */ + if (dp_priv->data_plane_ops->close(dp_priv->dpc)) { + netdev_dbg(netdev, "Data plane close failed\n"); + return -EAGAIN; + } + + clear_bit(__NSS_DP_UP, &dp_priv->flags); + + return 0; +} + +/* + * nss_dp_open() + */ +static int nss_dp_open(struct net_device *netdev) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + + if (!dp_priv) + return -EINVAL; + + netif_carrier_off(netdev); + + /* + * Call data plane init if it has not been done yet + */ + if (!(dp_priv->drv_flags & NSS_DP_PRIV_FLAG(INIT_DONE))) { + if (dp_priv->data_plane_ops->init(dp_priv->dpc)) { + netdev_dbg(netdev, "Data plane init failed\n"); + return -ENOMEM; + } + + dp_priv->drv_flags |= NSS_DP_PRIV_FLAG(INIT_DONE); + } + + /* + * Inform the Linux Networking stack about the hardwar capability of + * checksum offloading and other features. Each data_plane is + * responsible to maintain the feature set it supports + */ + dp_priv->data_plane_ops->set_features(dp_priv->dpc); + + set_bit(__NSS_DP_UP, &dp_priv->flags); + +#if defined(NSS_DP_PPE_SUPPORT) + if (dp_priv->data_plane_ops->vsi_assign(dp_priv->dpc, dp_priv->vsi)) { + netdev_dbg(netdev, "Data plane vsi assign failed\n"); + return -EAGAIN; + } +#endif + + if (dp_priv->data_plane_ops->mac_addr(dp_priv->dpc, netdev->dev_addr)) { + netdev_dbg(netdev, "Data plane set MAC address failed\n"); + return -EAGAIN; + } + + if (dp_priv->data_plane_ops->change_mtu(dp_priv->dpc, netdev->mtu)) { + netdev_dbg(netdev, "Data plane change mtu failed\n"); + return -EAGAIN; + } + + if (dp_priv->data_plane_ops->open(dp_priv->dpc, 0, 0, 0)) { + netdev_dbg(netdev, "Data plane open failed\n"); + return -EAGAIN; + } + + netif_start_queue(netdev); + + if (!dp_priv->link_poll) { + /* Notify data plane link is up */ + if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 1)) { + netdev_dbg(netdev, "Data plane set link failed\n"); + return -EAGAIN; + } + dp_priv->link_state = __NSS_DP_LINK_UP; + netif_carrier_on(netdev); + } else { + dp_priv->link_state = __NSS_DP_LINK_DOWN; + phy_start(dp_priv->phydev); + phy_start_aneg(dp_priv->phydev); + } + + return 0; +} + +#ifdef CONFIG_RFS_ACCEL +/* + * nss_dp_rx_flow_steer() + * Steer the flow rule to NSS + */ +static int nss_dp_rx_flow_steer(struct net_device *netdev, const struct sk_buff *_skb, + uint16_t rxq, uint32_t flow) +{ + struct nss_dp_dev *dp_priv; + struct netdev_rx_queue *rxqueue; + struct rps_sock_flow_table *sock_flow_table; + struct rps_dev_flow_table *flow_table; + struct rps_dev_flow *rxflow; + struct sk_buff *skb = (struct sk_buff *)_skb; + uint16_t index; + uint32_t hash; + uint32_t rfscpu; + uint32_t rxcpu; + + if (!netdev) + return -EINVAL; + + dp_priv = (struct nss_dp_dev *)netdev_priv(netdev); + if (!dp_priv) + return -EINVAL; + + rxqueue = netdev->_rx; + + if (skb_rx_queue_recorded(skb)) { + index = skb_get_rx_queue(skb); + rxqueue += index; + } + + flow_table = rcu_dereference(rxqueue->rps_flow_table); + if (!flow_table) { + netdev_dbg(netdev, "RX queue RPS flow table not found\n"); + return -EINVAL; + } + + hash = skb_get_hash(skb); + rxflow = &flow_table->flows[hash & flow_table->mask]; + rxcpu = (uint32_t)rxflow->cpu; + + sock_flow_table = rcu_dereference(rps_sock_flow_table); + if (!sock_flow_table) { + netdev_dbg(netdev, "Global RPS flow table not found\n"); + return -EINVAL; + } + + rfscpu = sock_flow_table->ents[hash & sock_flow_table->mask]; + rfscpu &= rps_cpu_mask; + + if (rxcpu == rfscpu) + return 0; + + /* + * check rx_flow_steer is defined in data plane ops + */ + if (!dp_priv->data_plane_ops->rx_flow_steer) { + netdev_dbg(netdev, "Data plane ops not defined for flow steer\n"); + return -EINVAL; + } + + /* + * Delete the old flow rule + */ + if (dp_priv->data_plane_ops->rx_flow_steer(dp_priv->dpc, skb, rxcpu, false)) { + netdev_dbg(netdev, "Data plane delete flow rule failed\n"); + return -EAGAIN; + } + + /* + * Add the new flow rule + */ + if (dp_priv->data_plane_ops->rx_flow_steer(dp_priv->dpc, skb, rfscpu, true)) { + netdev_dbg(netdev, "Data plane add flow rule failed\n"); + return -EAGAIN; + } + + return 0; +} +#endif + +/* + * nss_dp_select_queue() + * Select tx queue + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +static u16 nss_dp_select_queue(struct net_device *netdev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +#else +static u16 nss_dp_select_queue(struct net_device *netdev, struct sk_buff *skb, + struct net_device *sb_dev) +#endif +{ + int cpu = get_cpu(); + put_cpu(); + + /* + * The number of queue is matching the number of CPUs so get_cpu will + * always match a valid queue + */ + return cpu; +} + +/* + * Netdevice operations + */ +static const struct net_device_ops nss_dp_netdev_ops = { + .ndo_open = nss_dp_open, + .ndo_stop = nss_dp_close, + .ndo_start_xmit = nss_dp_xmit, + .ndo_get_stats64 = nss_dp_get_stats64, + .ndo_set_mac_address = nss_dp_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = nss_dp_change_mtu, + .ndo_do_ioctl = nss_dp_do_ioctl, + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + .ndo_bridge_setlink = switchdev_port_bridge_setlink, + .ndo_bridge_getlink = switchdev_port_bridge_getlink, + .ndo_bridge_dellink = switchdev_port_bridge_dellink, +#endif + .ndo_select_queue = nss_dp_select_queue, + +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = nss_dp_rx_flow_steer, +#endif +}; + +/* + * nss_dp_of_get_pdata() + */ +static int32_t nss_dp_of_get_pdata(struct device_node *np, + struct net_device *netdev, + struct gmac_hal_platform_data *hal_pdata) +{ + uint8_t *maddr; + struct nss_dp_dev *dp_priv; + struct resource memres_devtree = {0}; + + dp_priv = netdev_priv(netdev); + + if (of_property_read_u32(np, "qcom,id", &dp_priv->macid)) { + pr_err("%s: error reading id\n", np->name); + return -EFAULT; + } + + if (dp_priv->macid > NSS_DP_HAL_MAX_PORTS || !dp_priv->macid) { + pr_err("%s: invalid macid %d\n", np->name, dp_priv->macid); + return -EFAULT; + } + + if (of_property_read_u32(np, "qcom,mactype", &hal_pdata->mactype)) { + pr_err("%s: error reading mactype\n", np->name); + return -EFAULT; + } + + if (of_address_to_resource(np, 0, &memres_devtree) != 0) + return -EFAULT; + + netdev->base_addr = memres_devtree.start; + hal_pdata->reg_len = resource_size(&memres_devtree); + hal_pdata->netdev = netdev; + hal_pdata->macid = dp_priv->macid; + + dp_priv->phy_mii_type = of_get_phy_mode(np); + dp_priv->link_poll = of_property_read_bool(np, "qcom,link-poll"); + if (of_property_read_u32(np, "qcom,phy-mdio-addr", + &dp_priv->phy_mdio_addr) && dp_priv->link_poll) { + pr_err("%s: mdio addr required if link polling is enabled\n", + np->name); + return -EFAULT; + } + + of_property_read_u32(np, "qcom,forced-speed", &dp_priv->forced_speed); + of_property_read_u32(np, "qcom,forced-duplex", &dp_priv->forced_duplex); + + maddr = (uint8_t *)of_get_mac_address(np); +#if (LINUX_VERSION_CODE > KERNEL_VERSION(5, 4, 0)) + if (IS_ERR((void *)maddr)) { + maddr = NULL; + } +#endif + + if (maddr && is_valid_ether_addr(maddr)) { + ether_addr_copy(netdev->dev_addr, maddr); + } else { + random_ether_addr(netdev->dev_addr); + pr_info("GMAC%d(%px) Invalid MAC@ - using %pM\n", dp_priv->macid, + dp_priv, netdev->dev_addr); + } + + return 0; +} + +/* + * nss_dp_mdio_attach() + */ +static struct mii_bus *nss_dp_mdio_attach(struct platform_device *pdev) +{ + struct device_node *mdio_node; + struct platform_device *mdio_plat; + struct ipq40xx_mdio_data *mdio_data; + + /* + * Find mii_bus using "mdio-bus" handle. + */ + mdio_node = of_parse_phandle(pdev->dev.of_node, "mdio-bus", 0); + if (mdio_node) { + return of_mdio_find_bus(mdio_node); + } + + mdio_node = of_find_compatible_node(NULL, NULL, "qcom,ipq40xx-mdio"); + if (!mdio_node) { + dev_err(&pdev->dev, "cannot find mdio node by phandle\n"); + return NULL; + } + + mdio_plat = of_find_device_by_node(mdio_node); + if (!mdio_plat) { + dev_err(&pdev->dev, "cannot find platform device from mdio node\n"); + of_node_put(mdio_node); + return NULL; + } + + mdio_data = dev_get_drvdata(&mdio_plat->dev); + if (!mdio_data) { + dev_err(&pdev->dev, "cannot get mii bus reference from device data\n"); + of_node_put(mdio_node); + return NULL; + } + + return mdio_data->mii_bus; +} + +#ifdef CONFIG_NET_SWITCHDEV +/* + * nss_dp_is_phy_dev() + * Check if it is dp device + */ +bool nss_dp_is_phy_dev(struct net_device *dev) +{ + return (dev->netdev_ops == &nss_dp_netdev_ops); +} +#endif + +/* + * nss_dp_adjust_link() + */ +void nss_dp_adjust_link(struct net_device *netdev) +{ + struct nss_dp_dev *dp_priv = netdev_priv(netdev); + int current_state = dp_priv->link_state; + + if (!test_bit(__NSS_DP_UP, &dp_priv->flags)) + return; + + if (dp_priv->phydev->link && (current_state == __NSS_DP_LINK_UP)) + return; + + if (!dp_priv->phydev->link && (current_state == __NSS_DP_LINK_DOWN)) + return; + + if (current_state == __NSS_DP_LINK_DOWN) { + netdev_info(netdev, "PHY Link up speed: %d\n", + dp_priv->phydev->speed); + if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 1)) { + netdev_dbg(netdev, "Data plane set link up failed\n"); + return; + } + dp_priv->link_state = __NSS_DP_LINK_UP; + netif_carrier_on(netdev); + } else { + netdev_info(netdev, "PHY Link is down\n"); + if (dp_priv->data_plane_ops->link_state(dp_priv->dpc, 0)) { + netdev_dbg(netdev, "Data plane set link down failed\n"); + return; + } + dp_priv->link_state = __NSS_DP_LINK_DOWN; + netif_carrier_off(netdev); + } +} + +/* + * nss_dp_probe() + */ +static int32_t nss_dp_probe(struct platform_device *pdev) +{ + struct net_device *netdev; + struct nss_dp_dev *dp_priv; + struct device_node *np = pdev->dev.of_node; + struct gmac_hal_platform_data gmac_hal_pdata; + int32_t ret = 0; + uint8_t phy_id[MII_BUS_ID_SIZE + 3]; +#if defined(NSS_DP_PPE_SUPPORT) + uint32_t vsi_id; + fal_port_t port_id; +#endif + + /* TODO: See if we need to do some SoC level common init */ + + netdev = alloc_etherdev_mqs(sizeof(struct nss_dp_dev), + NSS_DP_NETDEV_TX_QUEUE_NUM, NSS_DP_NETDEV_RX_QUEUE_NUM); + if (!netdev) { + pr_info("alloc_etherdev() failed\n"); + return -ENOMEM; + } + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) + /* max_mtu is set to 1500 in ether_setup() */ + netdev->max_mtu = ETH_MAX_MTU; +#endif + + dp_priv = netdev_priv(netdev); + memset((void *)dp_priv, 0, sizeof(struct nss_dp_dev)); + + dp_priv->pdev = pdev; + dp_priv->netdev = netdev; + netdev->watchdog_timeo = 5 * HZ; + netdev->netdev_ops = &nss_dp_netdev_ops; + nss_dp_set_ethtool_ops(netdev); +#ifdef CONFIG_NET_SWITCHDEV + nss_dp_switchdev_setup(netdev); +#endif + + ret = nss_dp_of_get_pdata(np, netdev, &gmac_hal_pdata); + if (ret != 0) { + goto fail; + } + + /* Use data plane ops as per the configured SoC */ + dp_priv->data_plane_ops = nss_dp_hal_get_data_plane_ops(); + if (!dp_priv->data_plane_ops) { + netdev_dbg(netdev, "Dataplane ops not found.\n"); + goto fail; + } + + dp_priv->dpc = &dp_global_data_plane_ctx[dp_priv->macid-1]; + dp_priv->dpc->dev = netdev; + dp_priv->ctx = &dp_global_ctx; + + /* TODO:locks init */ + + /* + * HAL's init function will return the pointer to the HAL context + * (private to hal), which dp will store in its data structures. + * The subsequent hal_ops calls expect the DP to pass the HAL + * context pointer as an argument + */ + dp_priv->gmac_hal_ops = nss_dp_hal_get_gmac_ops(gmac_hal_pdata.mactype); + if (!dp_priv->gmac_hal_ops) { + netdev_dbg(netdev, "Unsupported Mac type: %d\n", gmac_hal_pdata.mactype); + goto fail; + } + + dp_priv->gmac_hal_ctx = dp_priv->gmac_hal_ops->init(&gmac_hal_pdata); + if (!(dp_priv->gmac_hal_ctx)) { + netdev_dbg(netdev, "gmac hal init failed\n"); + goto fail; + } + + if (dp_priv->link_poll) { + dp_priv->miibus = nss_dp_mdio_attach(pdev); + if (!dp_priv->miibus) { + netdev_dbg(netdev, "failed to find miibus\n"); + goto fail; + } + snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, + dp_priv->miibus->id, dp_priv->phy_mdio_addr); + + SET_NETDEV_DEV(netdev, &pdev->dev); + + dp_priv->phydev = phy_connect(netdev, phy_id, + &nss_dp_adjust_link, + dp_priv->phy_mii_type); + if (IS_ERR(dp_priv->phydev)) { + netdev_dbg(netdev, "failed to connect to phy device\n"); + goto fail; + } + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + dp_priv->phydev->advertising |= + (ADVERTISED_Pause | ADVERTISED_Asym_Pause); + dp_priv->phydev->supported |= + (SUPPORTED_Pause | SUPPORTED_Asym_Pause); +#else + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, dp_priv->phydev->advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dp_priv->phydev->advertising); + + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, dp_priv->phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, dp_priv->phydev->supported); +#endif + } + +#if defined(NSS_DP_PPE_SUPPORT) + /* Get port's default VSI */ + port_id = dp_priv->macid; + if (ppe_port_vsi_get(0, port_id, &vsi_id)) { + netdev_dbg(netdev, "failed to get port's default VSI\n"); + goto fail; + } + + dp_priv->vsi = vsi_id; +#endif + + /* TODO: Features: CSUM, tx/rx offload... configure */ + + /* Register the network interface */ + ret = register_netdev(netdev); + if (ret) { + netdev_dbg(netdev, "Error registering netdevice %s\n", + netdev->name); + dp_priv->gmac_hal_ops->exit(dp_priv->gmac_hal_ctx); + goto fail; + } + + dp_global_ctx.nss_dp[dp_priv->macid - 1] = dp_priv; + dp_global_ctx.slowproto_acl_bm = 0; + + netdev_dbg(netdev, "Init NSS DP GMAC%d (base = 0x%lx)\n", dp_priv->macid, netdev->base_addr); + + return 0; + +fail: + free_netdev(netdev); + return -EFAULT; +} + +/* + * nss_dp_remove() + */ +static int nss_dp_remove(struct platform_device *pdev) +{ + uint32_t i; + struct nss_dp_dev *dp_priv; + struct nss_gmac_hal_ops *hal_ops; + + for (i = 0; i < NSS_DP_HAL_MAX_PORTS; i++) { + dp_priv = dp_global_ctx.nss_dp[i]; + if (!dp_priv) + continue; + + hal_ops = dp_priv->gmac_hal_ops; + if (dp_priv->phydev) + phy_disconnect(dp_priv->phydev); + unregister_netdev(dp_priv->netdev); + hal_ops->exit(dp_priv->gmac_hal_ctx); + free_netdev(dp_priv->netdev); + dp_global_ctx.nss_dp[i] = NULL; + } + + return 0; +} + +static struct of_device_id nss_dp_dt_ids[] = { + { .compatible = "qcom,nss-dp" }, + {}, +}; +MODULE_DEVICE_TABLE(of, nss_dp_dt_ids); + +static struct platform_driver nss_dp_drv = { + .probe = nss_dp_probe, + .remove = nss_dp_remove, + .driver = { + .name = "nss-dp", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(nss_dp_dt_ids), + }, +}; + +/* + * nss_dp_init() + */ +int __init nss_dp_init(void) +{ + int ret; + + /* + * Bail out on not supported platform + * TODO: Handle this properly with SoC ops + */ + if (!of_machine_is_compatible("qcom,ipq807x") && + !of_machine_is_compatible("qcom,ipq8074") && + !of_machine_is_compatible("qcom,ipq6018") && + !of_machine_is_compatible("qcom,ipq5018")) + return 0; + + /* + * TODO Move this to soc_ops + */ + dp_global_ctx.common_init_done = false; + if (!nss_dp_hal_init()) { + pr_err("DP hal init failed.\n"); + return -EFAULT; + } + + ret = platform_driver_register(&nss_dp_drv); + if (ret) + pr_info("NSS DP platform drv register failed\n"); + + dp_global_ctx.common_init_done = true; + pr_info("**********************************************************\n"); + pr_info("* NSS Data Plane driver\n"); + pr_info("**********************************************************\n"); + + return ret; +} + +/* + * nss_dp_exit() + */ +void __exit nss_dp_exit(void) +{ + + /* + * TODO Move this to soc_ops + */ + if (dp_global_ctx.common_init_done) { + nss_dp_hal_cleanup(); + dp_global_ctx.common_init_done = false; + } + + platform_driver_unregister(&nss_dp_drv); +} + +module_init(nss_dp_init); +module_exit(nss_dp_exit); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("NSS Data Plane Network Driver"); diff --git a/feeds/ipq807x/qca-nss-dp/src/nss_dp_switchdev.c b/feeds/ipq807x/qca-nss-dp/src/nss_dp_switchdev.c new file mode 100644 index 000000000..68bc7697f --- /dev/null +++ b/feeds/ipq807x/qca-nss-dp/src/nss_dp_switchdev.c @@ -0,0 +1,367 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include +#include +#include +#include + +#include "nss_dp_dev.h" +#include "fal/fal_stp.h" +#include "fal/fal_ctrlpkt.h" + +#define NSS_DP_SWITCH_ID 0 +#define NSS_DP_SW_ETHTYPE_PID 0 /* PPE ethtype profile ID for slow protocols */ +#define ETH_P_NONE 0 + +/* + * nss_dp_set_slow_proto_filter() + * Enable/Disable filter to allow Ethernet slow-protocol + */ +static void nss_dp_set_slow_proto_filter(struct nss_dp_dev *dp_priv, bool filter_enable) +{ + sw_error_t ret = 0; + fal_ctrlpkt_profile_t profile; + fal_ctrlpkt_action_t action; + + memset(&profile, 0, sizeof(profile)); + + /* + * Action is redirect cpu + */ + action.action = FAL_MAC_RDT_TO_CPU; + action.sg_bypass = A_FALSE; + + /* + * Bypass stp + */ + action.in_stp_bypass = A_TRUE; + action.in_vlan_fltr_bypass = A_FALSE; + action.l2_filter_bypass = A_FALSE; + profile.action = action; + profile.ethtype_profile_bitmap = 0x1; + + /* + * Set port map + */ + profile.port_map = (1 << dp_priv->macid); + if (filter_enable) { + ret = fal_mgmtctrl_ctrlpkt_profile_add(NSS_DP_SWITCH_ID, &profile); + if (ret != SW_OK) { + netdev_dbg(dp_priv->netdev, "failed to add profile for port_map: 0x%x, ret: %d\n", profile.port_map, ret); + return; + } + + /* + * Enable filter to allow ethernet slow-protocol, + * if this is the first port being disabled by STP + */ + if (!dp_priv->ctx->slowproto_acl_bm) { + ret = fal_mgmtctrl_ethtype_profile_set(NSS_DP_SWITCH_ID, NSS_DP_SW_ETHTYPE_PID, ETH_P_SLOW); + if (ret != SW_OK) { + netdev_dbg(dp_priv->netdev, "failed to set ethertype profile: 0x%x, ret: %d\n", ETH_P_SLOW, ret); + ret = fal_mgmtctrl_ctrlpkt_profile_del(NSS_DP_SWITCH_ID, &profile); + if (ret != SW_OK) { + netdev_dbg(dp_priv->netdev, "failed to delete profile for port_map: 0x%x, ret: %d\n", profile.port_map, ret); + } + return; + } + } + + /* + * Add port to port bitmap + */ + dp_priv->ctx->slowproto_acl_bm = dp_priv->ctx->slowproto_acl_bm | (1 << dp_priv->macid); + } else { + + ret = fal_mgmtctrl_ctrlpkt_profile_del(NSS_DP_SWITCH_ID, &profile); + if (ret != SW_OK) { + netdev_dbg(dp_priv->netdev, "failed to delete profile for port_map: 0x%x, ret: %d\n", profile.port_map, ret); + return; + } + + /* + * Delete port from port bitmap + */ + dp_priv->ctx->slowproto_acl_bm = dp_priv->ctx->slowproto_acl_bm & (~(1 << dp_priv->macid)); + + /* + * If all ports are in STP-enabled state, then we do not need + * the filter to allow ethernet slow protocol packets + */ + if (!dp_priv->ctx->slowproto_acl_bm) { + ret = fal_mgmtctrl_ethtype_profile_set(NSS_DP_SWITCH_ID, NSS_DP_SW_ETHTYPE_PID, ETH_P_NONE); + if (ret != SW_OK) { + netdev_dbg(dp_priv->netdev, "failed to reset ethertype profile: 0x%x ret: %d\n", ETH_P_NONE, ret); + } + } + } +} + +/* + * nss_dp_stp_state_set() + * Set bridge port STP state to the port of NSS data plane. + */ +static int nss_dp_stp_state_set(struct nss_dp_dev *dp_priv, u8 state) +{ + sw_error_t err; + fal_stp_state_t stp_state; + + switch (state) { + case BR_STATE_DISABLED: + stp_state = FAL_STP_DISABLED; + + /* + * Dynamic bond interfaces which are bridge slaves need to receive + * ethernet slow protocol packets for LACP protocol even in STP + * disabled state + */ + nss_dp_set_slow_proto_filter(dp_priv, true); + break; + case BR_STATE_LISTENING: + stp_state = FAL_STP_LISTENING; + break; + case BR_STATE_BLOCKING: + stp_state = FAL_STP_BLOCKING; + break; + case BR_STATE_LEARNING: + stp_state = FAL_STP_LEARNING; + break; + case BR_STATE_FORWARDING: + stp_state = FAL_STP_FORWARDING; + + /* + * Remove the filter for allowing ethernet slow protocol packets + * for bond interfaces + */ + nss_dp_set_slow_proto_filter(dp_priv, false); + break; + default: + return -EOPNOTSUPP; + } + + err = fal_stp_port_state_set(NSS_DP_SWITCH_ID, 0, dp_priv->macid, + stp_state); + if (err) { + netdev_dbg(dp_priv->netdev, "failed to set ftp state\n"); + + /* + * Restore the slow proto filters + */ + if (state == BR_STATE_DISABLED) + nss_dp_set_slow_proto_filter(dp_priv, false); + else if (state == BR_STATE_FORWARDING) + nss_dp_set_slow_proto_filter(dp_priv, true); + + return -EINVAL; + } + + return 0; +} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +/* + * nss_dp_attr_get() + * Get port information to update switchdev attribute for NSS data plane. + */ +static int nss_dp_attr_get(struct net_device *dev, struct switchdev_attr *attr) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: + attr->u.ppid.id_len = 1; + attr->u.ppid.id[0] = NSS_DP_SWITCH_ID; + break; + + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + attr->u.brport_flags = dp_priv->brport_flags; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +/* + * nss_dp_attr_set() + * Get switchdev attribute and set to the device of NSS data plane. + */ +static int nss_dp_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev); + struct net_device *upper_dev; + struct vlan_dev_priv *vlan; + struct list_head *iter; + uint32_t stp_state = attr->u.stp_state; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + dp_priv->brport_flags = attr->u.brport_flags; + netdev_dbg(dev, "set brport_flags %lu\n", attr->u.brport_flags); + return 0; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + /* + * The stp state is not changed to FAL_STP_DISABLED if + * the net_device (dev) has any vlan configured. Otherwise + * traffic on other vlan(s) will not work. + * + * Note: STP for VLANs is not supported by PPE. + */ + if ((stp_state == BR_STATE_DISABLED) || + (stp_state == BR_STATE_BLOCKING)) { + rcu_read_lock(); + netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) { + if (!is_vlan_dev(upper_dev)) + continue; + + vlan = vlan_dev_priv(upper_dev); + if (vlan->real_dev == dev) { + rcu_read_unlock(); + netdev_dbg(dev, "Do not update stp state to: %u since vlan id: %d is configured on netdevice: %s\n", + stp_state, vlan->vlan_id, vlan->real_dev->name); + return 0; + } + } + + rcu_read_unlock(); + } + + return nss_dp_stp_state_set(dp_priv, stp_state); + default: + return -EOPNOTSUPP; + } +} + +/* + * nss_dp_switchdev_ops + * Switchdev operations of NSS data plane. + */ +static const struct switchdev_ops nss_dp_switchdev_ops = { + .switchdev_port_attr_get = nss_dp_attr_get, + .switchdev_port_attr_set = nss_dp_attr_set, +}; + +/* + * nss_dp_switchdev_setup() + * Set up NSS data plane switchdev operations. + */ +void nss_dp_switchdev_setup(struct net_device *dev) +{ + dev->switchdev_ops = &nss_dp_switchdev_ops; + switchdev_port_fwd_mark_set(dev, NULL, false); +} +#else + +/* + * nss_dp_port_attr_set() + * Sets attributes + */ +static int nss_dp_port_attr_set(struct net_device *dev, + const struct switchdev_attr *attr, + struct switchdev_trans *trans) +{ + struct nss_dp_dev *dp_priv = (struct nss_dp_dev *)netdev_priv(dev); + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + dp_priv->brport_flags = attr->u.brport_flags; + netdev_dbg(dev, "set brport_flags %lu\n", attr->u.brport_flags); + return 0; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + return nss_dp_stp_state_set(dp_priv, attr->u.stp_state); + default: + return -EOPNOTSUPP; + } + +} + +/* + * nss_dp_switchdev_port_attr_set_event() + * Attribute set event + */ +static int nss_dp_switchdev_port_attr_set_event(struct net_device *netdev, + struct switchdev_notifier_port_attr_info *port_attr_info) +{ + int err; + + err = nss_dp_port_attr_set(netdev, port_attr_info->attr, + port_attr_info->trans); + + port_attr_info->handled = true; + return notifier_from_errno(err); +} + +/* + * nss_dp_switchdev_event() + * Switch dev event on netdevice + */ +static int nss_dp_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + + /* + * Handle switchdev event only for physical devices + */ + if (!nss_dp_is_phy_dev(dev)) { + return NOTIFY_DONE; + } + + if (event == SWITCHDEV_PORT_ATTR_SET) + nss_dp_switchdev_port_attr_set_event(dev, ptr); + + return NOTIFY_DONE; +} + +static struct notifier_block nss_dp_switchdev_notifier = { + .notifier_call = nss_dp_switchdev_event, +}; + +static bool switch_init_done; + +/* + * nss_dp_switchdev_setup() + * Setup switch dev + */ +void nss_dp_switchdev_setup(struct net_device *dev) +{ + int err; + + if (switch_init_done) { + return; + } + + err = register_switchdev_blocking_notifier(&nss_dp_switchdev_notifier); + if (err) { + netdev_dbg(dev, "%px:Failed to register switchdev notifier\n", dev); + } + + switch_init_done = true; + +} +#endif diff --git a/feeds/ipq807x/qca-nss-drv/200-napi_threaded.patch b/feeds/ipq807x/qca-nss-drv/200-napi_threaded.patch deleted file mode 100644 index 534898cdc..000000000 --- a/feeds/ipq807x/qca-nss-drv/200-napi_threaded.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/nss_hal/nss_hal.c -+++ b/nss_hal/nss_hal.c -@@ -306,6 +306,8 @@ int nss_hal_probe(struct platform_device - * Initialize the dummy netdevice. - */ - init_dummy_netdev(&nss_ctx->napi_ndev); -+ strcpy(nss_ctx->napi_ndev.name, "nss"); -+ nss_ctx->napi_ndev.threaded = 1; - - for (i = 0; i < npd->num_irq; i++) { - err = nss_hal_register_irq(nss_ctx, npd, &nss_ctx->napi_ndev, i); diff --git a/feeds/ipq807x/qca-nss-drv/Makefile b/feeds/ipq807x/qca-nss-drv/Makefile index 4c3f89f35..71d73b564 100644 --- a/feeds/ipq807x/qca-nss-drv/Makefile +++ b/feeds/ipq807x/qca-nss-drv/Makefile @@ -2,17 +2,8 @@ include $(TOPDIR)/rules.mk include $(INCLUDE_DIR)/kernel.mk PKG_NAME:=qca-nss-drv -PKG_SOURCE_PROTO:=git PKG_BRANCH:=master PKG_RELEASE:=2 -PKG_SOURCE_URL:=https://git.codelinaro.org/clo/qsdk/oss/lklm/nss-drv -PKG_MIRROR_HASH:=350af8f6b1f06ea0871314783a5362483963bbe327ba774ba49a2a775847363e -PKG_VERSION:=7563fe37340ec4a28b7821a0ac17608ff30e2a85 - -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) -PKG_SOURCE_VERSION:=$(PKG_VERSION) - NSS_CLIENTS_DIR:=$(TOPDIR)/qca/src/qca-nss-clients @@ -114,7 +105,6 @@ define Build/Compile ARCH="$(LINUX_KARCH)" \ M="$(PKG_BUILD_DIR)" \ EXTRA_CFLAGS="$(EXTRA_CFLAGS)" SoC="$(subtarget)" \ - KBUILD_MODPOST_WARN=1 \ modules endef diff --git a/feeds/ipq807x/qca-nss-drv/patches/200-napi_threaded.patch b/feeds/ipq807x/qca-nss-drv/patches/200-napi_threaded.patch index d5cf24424..534898cdc 100644 --- a/feeds/ipq807x/qca-nss-drv/patches/200-napi_threaded.patch +++ b/feeds/ipq807x/qca-nss-drv/patches/200-napi_threaded.patch @@ -1,6 +1,6 @@ --- a/nss_hal/nss_hal.c +++ b/nss_hal/nss_hal.c -@@ -301,6 +301,8 @@ int nss_hal_probe(struct platform_device +@@ -306,6 +306,8 @@ int nss_hal_probe(struct platform_device * Initialize the dummy netdevice. */ init_dummy_netdev(&nss_ctx->napi_ndev); diff --git a/feeds/ipq807x/qca-nss-drv/src/Makefile b/feeds/ipq807x/qca-nss-drv/src/Makefile new file mode 100644 index 000000000..f5c4b9040 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/Makefile @@ -0,0 +1,537 @@ +# ################################################### +# # Makefile for the NSS driver +# ################################################### + +obj-m += qca-nss-drv.o + +# +# List the files that belong to the driver in alphabetical order. +# +qca-nss-drv-objs := \ + nss_cmn.o \ + nss_core.o \ + nss_coredump.o \ + nss_drv_stats.o \ + nss_drv_strings.o \ + nss_dynamic_interface.o \ + nss_dynamic_interface_log.o \ + nss_dynamic_interface_stats.o \ + nss_eth_rx.o \ + nss_eth_rx_stats.o \ + nss_eth_rx_strings.o \ + nss_gmac_stats.o \ + nss_if.o \ + nss_if_log.o \ + nss_init.o \ + nss_ipv4.o \ + nss_ipv4_stats.o \ + nss_ipv4_strings.o \ + nss_ipv4_log.o \ + nss_log.o \ + nss_lso_rx.o \ + nss_lso_rx_stats.o \ + nss_lso_rx_strings.o \ + nss_meminfo.o \ + nss_n2h.o \ + nss_n2h_stats.o \ + nss_n2h_strings.o \ + nss_phys_if.o \ + nss_pm.o \ + nss_profiler.o \ + nss_project.o \ + nss_pppoe.o \ + nss_pppoe_log.o \ + nss_pppoe_stats.o \ + nss_pppoe_strings.o \ + nss_rps.o \ + nss_stats.o \ + nss_strings.o \ + nss_tx_msg_sync.o \ + nss_unaligned.o \ + nss_unaligned_log.o \ + nss_unaligned_stats.o \ + nss_virt_if.o \ + nss_virt_if_stats.o \ + nss_vlan.o \ + nss_vlan_log.o \ + nss_wifi.o \ + nss_wifi_log.o \ + nss_wifi_stats.o \ + nss_wifi_vdev.o \ + nss_wifili.o \ + nss_wifili_log.o \ + nss_wifili_stats.o \ + nss_wifili_strings.o \ + nss_wifi_mac_db.o + +# Base NSS data plane/HAL support +qca-nss-drv-objs += nss_data_plane/nss_data_plane_common.o +qca-nss-drv-objs += nss_hal/nss_hal.o + +ifneq "$(NSS_DRV_BRIDGE_ENABLE)" "n" +ccflags-y += -DNSS_DRV_BRIDGE_ENABLE +qca-nss-drv-objs += \ + nss_bridge.o \ + nss_bridge_log.o +endif + +ifneq "$(NSS_DRV_WIFI_EXT_VDEV_ENABLE)" "n" +ccflags-y += -DNSS_DRV_WIFI_EXT_VDEV_ENABLE +qca-nss-drv-objs += \ + nss_wifi_ext_vdev.o \ + nss_wifi_ext_vdev_stats.o \ + nss_wifi_ext_vdev_log.o +endif + +ifneq "$(NSS_DRV_WIFI_MESH_ENABLE)" "n" +ccflags-y += -DNSS_DRV_WIFI_MESH_ENABLE +qca-nss-drv-objs += \ + nss_wifi_mesh.o \ + nss_wifi_mesh_log.o \ + nss_wifi_mesh_stats.o \ + nss_wifi_mesh_strings.o +endif + +ifneq "$(NSS_DRV_IPV4_REASM_ENABLE)" "n" +ccflags-y += -DNSS_DRV_IPV4_REASM_ENABLE +qca-nss-drv-objs += \ + nss_ipv4_reasm.o \ + nss_ipv4_reasm_stats.o \ + nss_ipv4_reasm_strings.o +endif + +ifneq "$(NSS_DRV_L2TP_ENABLE)" "n" +ccflags-y += -DNSS_DRV_L2TP_ENABLE +qca-nss-drv-objs += \ + nss_l2tpv2.o \ + nss_l2tpv2_log.o \ + nss_l2tpv2_stats.o \ + nss_l2tpv2_strings.o +endif + +ifneq "$(NSS_DRV_LAG_ENABLE)" "n" +ccflags-y += -DNSS_DRV_LAG_ENABLE +qca-nss-drv-objs += \ + nss_lag.o \ + nss_lag_log.o +endif + +ifneq "$(NSS_DRV_PVXLAN_ENABLE)" "n" +ccflags-y += -DNSS_DRV_PVXLAN_ENABLE +qca-nss-drv-objs += \ + nss_pvxlan.o \ + nss_pvxlan_log.o \ + nss_pvxlan_stats.o +endif + +ifneq "$(NSS_DRV_IPV6_ENABLE)" "n" +ccflags-y += -DNSS_DRV_IPV6_ENABLE +qca-nss-drv-objs += \ + nss_ipv6.o \ + nss_ipv6_stats.o \ + nss_ipv6_strings.o \ + nss_ipv6_log.o +ifneq "$(NSS_DRV_IPV6_REASM_ENABLE)" "n" +ccflags-y += -DNSS_DRV_IPV6_REASM_ENABLE +qca-nss-drv-objs += \ + nss_ipv6_reasm.o \ + nss_ipv6_reasm_stats.o \ + nss_ipv6_reasm_strings.o +endif +endif + +ifneq "$(NSS_DRV_TSTAMP_ENABLE)" "n" +ccflags-y += -DNSS_DRV_TSTAMP_ENABLE +qca-nss-drv-objs += \ + nss_tstamp.o \ + nss_tstamp_stats.o +endif + +ifneq "$(NSS_DRV_GRE_ENABLE)" "n" +ccflags-y += -DNSS_DRV_GRE_ENABLE +qca-nss-drv-objs += \ + nss_gre.o \ + nss_gre_log.o \ + nss_gre_stats.o \ + nss_gre_strings.o +endif + +ifneq "$(NSS_DRV_GRE_REDIR_ENABLE)" "n" +ccflags-y += -DNSS_DRV_GRE_REDIR_ENABLE +qca-nss-drv-objs += \ + nss_gre_redir.o \ + nss_gre_redir_log.o \ + nss_gre_redir_lag_ds.o \ + nss_gre_redir_lag_ds_log.o \ + nss_gre_redir_lag_ds_stats.o \ + nss_gre_redir_lag_ds_strings.o \ + nss_gre_redir_lag_us.o \ + nss_gre_redir_lag_us_log.o \ + nss_gre_redir_lag_us_stats.o \ + nss_gre_redir_lag_us_strings.o \ + nss_gre_redir_stats.o \ + nss_gre_redir_strings.o \ + nss_gre_redir_mark.o \ + nss_gre_redir_mark_log.o \ + nss_gre_redir_mark_stats.o \ + nss_gre_redir_mark_strings.o +endif + +ifneq "$(NSS_DRV_GRE_TUNNEL_ENABLE)" "n" +ccflags-y += -DNSS_DRV_GRE_TUNNEL_ENABLE +qca-nss-drv-objs += \ + nss_gre_tunnel.o \ + nss_gre_tunnel_log.o \ + nss_gre_tunnel_stats.o \ + nss_gre_tunnel_strings.o +endif + +ifneq "$(NSS_DRV_CAPWAP_ENABLE)" "n" +ccflags-y += -DNSS_DRV_CAPWAP_ENABLE +qca-nss-drv-objs += \ + nss_capwap.o \ + nss_capwap_log.o \ + nss_capwap_stats.o \ + nss_capwap_strings.o +endif + +ifneq "$(NSS_DRV_MAPT_ENABLE)" "n" +ccflags-y += -DNSS_DRV_MAPT_ENABLE +qca-nss-drv-objs += \ + nss_map_t.o \ + nss_map_t_log.o \ + nss_map_t_stats.o \ + nss_map_t_strings.o +endif + +ifneq "$(NSS_DRV_PPTP_ENABLE)" "n" +ccflags-y += -DNSS_DRV_PPTP_ENABLE +qca-nss-drv-objs += \ + nss_pptp.o \ + nss_pptp_log.o \ + nss_pptp_stats.o \ + nss_pptp_strings.o +endif + +ifneq "$(NSS_DRV_SHAPER_ENABLE)" "n" +ccflags-y += -DNSS_DRV_SHAPER_ENABLE +qca-nss-drv-objs += \ + nss_shaper.o +endif + +ifneq "$(NSS_DRV_SJACK_ENABLE)" "n" +ccflags-y += -DNSS_DRV_SJACK_ENABLE +qca-nss-drv-objs += \ + nss_sjack.o \ + nss_sjack_log.o \ + nss_sjack_stats.o +endif + +ifneq "$(NSS_DRV_TUN6RD_ENABLE)" "n" +ccflags-y += -DNSS_DRV_TUN6RD_ENABLE +qca-nss-drv-objs += \ + nss_tun6rd.o \ + nss_tun6rd_log.o +endif + +ifneq "$(NSS_DRV_TRUSTSEC_ENABLE)" "n" +ccflags-y += -DNSS_DRV_TRUSTSEC_ENABLE +qca-nss-drv-objs += \ + nss_trustsec_tx.o \ + nss_trustsec_tx_log.o \ + nss_trustsec_tx_stats.o +endif + +ifneq "$(NSS_DRV_TUNIPIP6_ENABLE)" "n" +ccflags-y += -DNSS_DRV_TUNIPIP6_ENABLE +qca-nss-drv-objs += \ + nss_tunipip6.o \ + nss_tunipip6_log.o \ + nss_tunipip6_stats.o +endif + +ifneq "$(NSS_DRV_QRFS_ENABLE)" "n" +ccflags-y += -DNSS_DRV_QRFS_ENABLE +qca-nss-drv-objs += \ + nss_qrfs.o \ + nss_qrfs_log.o \ + nss_qrfs_stats.o +endif + +ifneq "$(NSS_DRV_RMNET_ENABLE)" "n" +ccflags-y += -DNSS_DRV_RMNET_ENABLE +qca-nss-drv-objs += \ + nss_rmnet_rx.o \ + nss_rmnet_rx_stats.o +endif + +ifneq "$(NSS_DRV_PORTID_ENABLE)" "n" +ccflags-y += -DNSS_DRV_PORTID_ENABLE +qca-nss-drv-objs += \ + nss_portid.o \ + nss_portid_log.o \ + nss_portid_stats.o +endif + +ifneq "$(NSS_DRV_IGS_ENABLE)" "n" +ccflags-y += -DNSS_DRV_IGS_ENABLE +qca-nss-drv-objs += \ + nss_igs.o \ + nss_igs_stats.o +endif + +ifneq "$(NSS_DRV_OAM_ENABLE)" "n" +ccflags-y += -DNSS_DRV_OAM_ENABLE +qca-nss-drv-objs += \ + nss_oam.o \ + nss_oam_log.o +endif + +ifneq "$(NSS_DRV_CLMAP_ENABLE)" "n" +ccflags-y += -DNSS_DRV_CLMAP_ENABLE +qca-nss-drv-objs += \ + nss_clmap.o \ + nss_clmap_log.o \ + nss_clmap_stats.o \ + nss_clmap_strings.o +endif + + +ifneq "$(NSS_DRV_VXLAN_ENABLE)" "n" +ccflags-y += -DNSS_DRV_VXLAN_ENABLE +qca-nss-drv-objs += \ + nss_vxlan.o \ + nss_vxlan_log.o \ + nss_vxlan_stats.o +endif + +ifneq "$(NSS_DRV_MATCH_ENABLE)" "n" +ccflags-y += -DNSS_DRV_MATCH_ENABLE +qca-nss-drv-objs += \ + nss_match.o \ + nss_match_log.o \ + nss_match_stats.o \ + nss_match_strings.o +endif + +ifneq "$(NSS_DRV_MIRROR_ENABLE)" "n" +ccflags-y += -DNSS_DRV_MIRROR_ENABLE +qca-nss-drv-objs += \ + nss_mirror.o \ + nss_mirror_log.o \ + nss_mirror_stats.o \ + nss_mirror_strings.o +endif + +ifneq "$(NSS_DRV_UDP_ST_ENABLE)" "n" +ccflags-y += -DNSS_DRV_UDP_ST_ENABLE +qca-nss-drv-objs += \ + nss_udp_st.o \ + nss_udp_st_log.o \ + nss_udp_st_stats.o \ + nss_udp_st_strings.o +endif + +ifeq ($(SoC),$(filter $(SoC),ipq806x)) +qca-nss-drv-objs += nss_data_plane/nss_data_plane_gmac.o \ + nss_hal/ipq806x/nss_hal_pvt.o + +ifneq "$(NSS_DRV_C2C_ENABLE)" "n" +ccflags-y += -DNSS_DRV_C2C_ENABLE +qca-nss-drv-objs += \ + nss_c2c_tx.o \ + nss_c2c_tx_log.o \ + nss_c2c_tx_stats.o \ + nss_c2c_tx_strings.o \ + nss_c2c_rx.o \ + nss_c2c_rx_stats.o \ + nss_c2c_rx_strings.o +endif +ifneq "$(NSS_DRV_IPSEC_ENABLE)" "n" +ccflags-y += -DNSS_DRV_IPSEC_ENABLE +qca-nss-drv-objs += \ + nss_ipsec_log.o \ + nss_ipsec.o +endif + +ifneq "$(NSS_DRV_CRYPTO_ENABLE)" "n" +ccflags-y += -DNSS_DRV_CRYPTO_ENABLE +qca-nss-drv-objs += \ + nss_crypto.o \ + nss_crypto_log.o +endif + +ifneq "$(NSS_DRV_DTLS_ENABLE)" "n" +ccflags-y += -DNSS_DRV_DTLS_ENABLE +qca-nss-drv-objs += \ + nss_dtls.o \ + nss_dtls_log.o \ + nss_dtls_stats.o +endif +ccflags-y += -I$(obj)/nss_hal/ipq806x -DNSS_HAL_IPQ806X_SUPPORT +endif + +ifeq ($(SoC),$(filter $(SoC),ipq60xx ipq60xx_64 ipq807x ipq807x_64)) +qca-nss-drv-objs += nss_data_plane/nss_data_plane.o \ + nss_edma.o \ + nss_edma_stats.o \ + nss_edma_strings.o \ + nss_ppe.o \ + nss_ppe_log.o \ + nss_ppe_stats.o \ + nss_ppe_strings.o \ + nss_ppe_vp.o \ + nss_ppe_vp_log.o \ + nss_ppe_vp_stats.o + +ccflags-y += -DNSS_DRV_PPE_ENABLE +ccflags-y += -DNSS_DRV_EDMA_ENABLE + +ifneq "$(NSS_DRV_IPSEC_ENABLE)" "n" +ccflags-y += -DNSS_DRV_IPSEC_ENABLE +qca-nss-drv-objs += \ + nss_ipsec_cmn_log.o \ + nss_ipsec_cmn.o \ + nss_ipsec_cmn_stats.o \ + nss_ipsec_cmn_strings.o +endif + +ifneq "$(NSS_DRV_CRYPTO_ENABLE)" "n" +ccflags-y += -DNSS_DRV_CRYPTO_ENABLE +ccflags-y += -DNSS_DRV_DMA_ENABLE + +qca-nss-drv-objs += \ + nss_crypto_cmn.o \ + nss_crypto_cmn_log.o \ + nss_crypto_cmn_stats.o \ + nss_crypto_cmn_strings.o \ + nss_dma.o \ + nss_dma_log.o \ + nss_dma_stats.o \ + nss_dma_strings.o +endif + +ifneq "$(NSS_DRV_DTLS_ENABLE)" "n" +ccflags-y += -DNSS_DRV_DTLS_ENABLE +qca-nss-drv-objs += \ + nss_dtls_cmn.o \ + nss_dtls_cmn_log.o \ + nss_dtls_cmn_stats.o \ + nss_dtls_cmn_strings.o +endif + +ifneq "$(NSS_DRV_QVPN_ENABLE)" "n" +ccflags-y += -DNSS_DRV_QVPN_ENABLE +qca-nss-drv-objs += \ + nss_qvpn.o \ + nss_qvpn_log.o \ + nss_qvpn_stats.o \ + nss_qvpn_strings.o +endif +ifneq "$(NSS_DRV_TLS_ENABLE)" "n" +ccflags-y += -DNSS_DRV_TLS_ENABLE +qca-nss-drv-objs += \ + nss_tls.o \ + nss_tls_log.o \ + nss_tls_stats.o \ + nss_tls_strings.o +endif +endif + +ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64)) +qca-nss-drv-objs += nss_hal/ipq807x/nss_hal_pvt.o \ + nss_data_plane/hal/nss_ipq807x.o +ifneq "$(NSS_DRV_C2C_ENABLE)" "n" +ccflags-y += -DNSS_DRV_C2C_ENABLE +qca-nss-drv-objs += \ + nss_c2c_tx.o \ + nss_c2c_tx_log.o \ + nss_c2c_tx_stats.o \ + nss_c2c_tx_strings.o \ + nss_c2c_rx.o \ + nss_c2c_rx_stats.o \ + nss_c2c_rx_strings.o +endif +ccflags-y += -I$(obj)/nss_hal/ipq807x -DNSS_HAL_IPQ807x_SUPPORT -DNSS_MULTI_H2N_DATA_RING_SUPPORT +endif + +ifeq ($(SoC),$(filter $(SoC),ipq60xx ipq60xx_64)) +qca-nss-drv-objs += nss_hal/ipq60xx/nss_hal_pvt.o \ + nss_data_plane/hal/nss_ipq60xx.o +ccflags-y += -I$(obj)/nss_hal/ipq60xx -DNSS_HAL_IPQ60XX_SUPPORT -DNSS_MULTI_H2N_DATA_RING_SUPPORT +endif + +ifeq ($(SoC),$(filter $(SoC),ipq50xx ipq50xx_64)) +qca-nss-drv-objs += nss_data_plane/nss_data_plane.o \ + nss_hal/ipq50xx/nss_hal_pvt.o \ + nss_data_plane/hal/nss_ipq50xx.o + +ifneq "$(NSS_DRV_IPSEC_ENABLE)" "n" +ccflags-y += -DNSS_DRV_IPSEC_ENABLE +qca-nss-drv-objs += \ + nss_ipsec_cmn_log.o \ + nss_ipsec_cmn.o \ + nss_ipsec_cmn_stats.o \ + nss_ipsec_cmn_strings.o +endif + +ifneq "$(NSS_DRV_CRYPTO_ENABLE)" "n" +ccflags-y += -DNSS_DRV_CRYPTO_ENABLE +qca-nss-drv-objs += \ + nss_crypto_cmn.o \ + nss_crypto_cmn_log.o \ + nss_crypto_cmn_stats.o \ + nss_crypto_cmn_strings.o +endif + +ifneq "$(NSS_DRV_DTLS_ENABLE)" "n" +ccflags-y += -DNSS_DRV_DTLS_ENABLE +qca-nss-drv-objs += \ + nss_dtls_cmn.o \ + nss_dtls_cmn_log.o \ + nss_dtls_cmn_stats.o \ + nss_dtls_cmn_strings.o +endif +ccflags-y += -I$(obj)/nss_hal/ipq50xx -DNSS_HAL_IPQ50XX_SUPPORT -DNSS_MULTI_H2N_DATA_RING_SUPPORT +endif + +ccflags-y += -I$(obj)/nss_hal/include -I$(obj)/nss_data_plane/include -I$(obj)/exports -DNSS_DEBUG_LEVEL=0 -DNSS_PKT_STATS_ENABLED=1 +ccflags-y += -I$(obj)/nss_data_plane/hal/include +ccflags-y += -DNSS_PM_DEBUG_LEVEL=0 -DNSS_SKB_REUSE_SUPPORT=1 +ccflags-y += -Wall -Werror + +KERNELVERSION := $(word 1, $(subst ., ,$(KERNELVERSION))).$(word 2, $(subst ., ,$(KERNELVERSION))) + +ifneq ($(findstring 3.4, $(KERNELVERSION)),) +NSS_CCFLAGS = -DNSS_DT_SUPPORT=0 -DNSS_FW_DBG_SUPPORT=1 -DNSS_PM_SUPPORT=1 +else +NSS_CCFLAGS = -DNSS_DT_SUPPORT=1 -DNSS_FW_DBG_SUPPORT=0 -DNSS_PM_SUPPORT=0 + +ccflags-y += -I$(obj) +endif + +# Fabric scaling is supported in 3.14 and 4.4 only +ifneq ($(findstring 3.14, $(KERNELVERSION)),) +NSS_CCFLAGS += -DNSS_FABRIC_SCALING_SUPPORT=1 +else ifneq ($(findstring 4.4, $(KERNELVERSION)),) +NSS_CCFLAGS += -DNSS_FABRIC_SCALING_SUPPORT=1 +else +NSS_CCFLAGS += -DNSS_FABRIC_SCALING_SUPPORT=0 +endif + +# Disable Frequency scaling +ifeq "$(NSS_FREQ_SCALE_DISABLE)" "y" +ccflags-y += -DNSS_FREQ_SCALE_SUPPORT=0 +else +qca-nss-drv-objs += \ + nss_freq.o \ + nss_freq_log.o \ + nss_freq_stats.o +ccflags-y += -DNSS_FREQ_SCALE_SUPPORT=1 +endif + +ccflags-y += $(NSS_CCFLAGS) + +export NSS_CCFLAGS + +obj ?= . diff --git a/feeds/ipq807x/qca-nss-drv/src/Makefile.fsm b/feeds/ipq807x/qca-nss-drv/src/Makefile.fsm new file mode 100644 index 000000000..93ca00725 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/Makefile.fsm @@ -0,0 +1,123 @@ +# ################################################### +# # Makefile for the NSS driver +# ################################################### +obj-m += qca-nss-drv.o + +# +# List the files that belong to the driver in alphabetical order. +# +qca-nss-drv-objs := \ + nss_bridge.o \ + nss_bridge_log.o \ + nss_capwap.o \ + nss_capwap_log.o \ + nss_capwap_stats.o \ + nss_cmn.o \ + nss_core.o \ + nss_coredump.o \ + nss_crypto.o \ + nss_crypto_log.o \ + nss_dtls.o \ + nss_dtls_log.o \ + nss_dtls_stats.o \ + nss_dynamic_interface.o \ + nss_dynamic_interface_log.o \ + nss_edma.o \ + nss_edma_stats.o \ + nss_eth_rx.o \ + nss_eth_rx_stats.o \ + nss_gre.o \ + nss_gre_log.o \ + nss_gre_stats.o \ + nss_gre_redir.o \ + nss_gre_redir_log.o \ + nss_gre_redir_stats.o \ + nss_gre_tunnel.o \ + nss_gre_tunnel_log.o \ + nss_gre_tunnel_stats.o \ + nss_if.o \ + nss_if_log.o \ + nss_init.o \ + nss_ipsec.o \ + nss_ipsec_log.o \ + nss_ipv4.o \ + nss_ipv4_stats.o \ + nss_ipv4_log.o \ + nss_ipv4_reasm.o \ + nss_ipv4_reasm_stats.o \ + nss_ipv6.o \ + nss_ipv6_stats.o \ + nss_ipv6_log.o \ + nss_ipv6_reasm.o \ + nss_ipv6_reasm_stats.o \ + nss_l2tpv2.o \ + nss_l2tpv2_log.o \ + nss_l2tpv2_stats.o \ + nss_lag.o \ + nss_lag_log.o \ + nss_log.o \ + nss_lso_rx.o \ + nss_lso_rx_stats.o \ + nss_map_t.o \ + nss_map_t_log.o \ + nss_map_t_stats.o \ + nss_n2h.o \ + nss_n2h_stats.o \ + nss_oam.o \ + nss_oam_log.o \ + nss_phys_if.o \ + nss_pm.o \ + nss_profiler.o \ + nss_portid.o \ + nss_portid_log.o \ + nss_portid_stats.o \ + nss_ppe.o \ + nss_ppe_log.o \ + nss_ppe_stats.o \ + nss_pppoe.o \ + nss_pppoe_log.o \ + nss_pppoe_stats.o \ + nss_pptp.o \ + nss_pptp_log.o \ + nss_pptp_stats.o \ + nss_rps.o \ + nss_shaper.o \ + nss_sjack.o \ + nss_sjack_log.o \ + nss_sjack_stats.o \ + nss_stats.o \ + nss_tstamp.o \ + nss_tstamp_stats.o \ + nss_tun6rd.o \ + nss_tun6rd_log.o \ + nss_trustsec_tx.o \ + nss_trustsec_tx_log.o \ + nss_trustsec_tx_stats.o \ + nss_tunipip6.o \ + nss_tunipip6_log.o \ + nss_unaligned.o \ + nss_unaligned_log.o \ + nss_unaligned_stats.o \ + nss_virt_if.o \ + nss_virt_if_stats.o \ + nss_vlan.o \ + nss_vlan_log.o \ + nss_wifi.o \ + nss_wifi_log.o \ + nss_wifi_stats.o \ + nss_wifi_if.o \ + nss_wifi_if_stats.o \ + nss_wifi_vdev.o + +qca-nss-drv-objs += nss_hal/nss_hal.o +qca-nss-drv-objs += nss_hal/fsm9010/nss_hal_pvt.o +qca-nss-drv-objs += nss_data_plane/nss_data_plane_common.o +qca-nss-drv-objs += nss_data_plane/nss_data_plane_gmac.o + +ccflags-y += -I$(obj)/nss_hal/include +ccflags-y += -I$(obj)/nss_data_plane/include +ccflags-y += -I$(obj)/exports +ccflags-y += -I$(obj)/nss_hal/fsm9010 -DNSS_HAL_FSM9010_SUPPORT +ccflags-y += -DNSS_DEBUG_LEVEL=0 -DNSS_PKT_STATS_ENABLED=1 +ccflags-y += -DNSS_DT_SUPPORT=1 -DNSS_PM_SUPPORT=0 -DNSS_FW_DBG_SUPPORT=0 -DNSS_SKB_REUSE_SUPPORT=0 +ccflags-y += -DNSS_PPP_SUPPORT=0 -DNSS_FREQ_SCALE_SUPPORT=0 -DNSS_FABRIC_SCALING_SUPPORT=0 diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_fsm9010.h b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_fsm9010.h new file mode 100644 index 000000000..7e472907c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_fsm9010.h @@ -0,0 +1,43 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_fsm9010.h + * Architecture dependent parameters. + */ +#ifndef __NSS_FSM9010_H +#define __NSS_FSM9010_H + +/** + * @addtogroup nss_arch_macros_fsm9010 + * @{ + */ + +#define NSS_MAX_NUM_PRI 1 /**< Maximum number of priority queues in NSS. */ +#define NSS_HOST_CORES 4 /**< Number of host cores. */ + +#define NSS_N2H_RING_COUNT 3 /**< Number of N2H rings. */ +#define NSS_H2N_RING_COUNT 4 /**< Number of H2N rings. */ +#define NSS_RING_SIZE 128 /**< Ring size. */ + +#define NSS_IMEM_START 0xE4000000 /**< NSS IMEM start address. */ +#define NSS_IMEM_SIZE 0x10000 /**< NSS IMEM size. */ + +/** + * @} + */ + +#endif /** __NSS_FSM9010_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq40xx.h b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq40xx.h new file mode 100644 index 000000000..d6d335132 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq40xx.h @@ -0,0 +1,43 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipq40xx.h + * Architecture dependent parameters. + */ +#ifndef __NSS_IPQ40XX_H +#define __NSS_IPQ40XX_H + +/** + * @addtogroup nss_arch_macros_ipq40xx + * @{ + */ + +#define NSS_MAX_NUM_PRI 1 /**< Maximum number of priority queues in NSS. */ +#define NSS_HOST_CORES 4 /**< Number of host cores. */ + +#define NSS_N2H_RING_COUNT 0 /**< Number of N2H rings. */ +#define NSS_H2N_RING_COUNT 0 /**< Number of H2N rings. */ +#define NSS_RING_SIZE 128 /**< Ring size. */ + +#define NSS_IMEM_START 0x39000000 /**< NSS IMEM start address. */ +#define NSS_IMEM_SIZE 0x10000 /**< NSS IMEM size per core. */ + +/** + * @} + */ + +#endif /** __NSS_IPQ40XX_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq50xx.h b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq50xx.h new file mode 100644 index 000000000..e83649f81 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq50xx.h @@ -0,0 +1,40 @@ +/* + ************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipq50xx.h + * Architecture dependent parameters. + */ +#ifndef __NSS_IPQ50XX_H +#define __NSS_IPQ50XX_H + +/** + * @addtogroup nss_arch_macros_ipq50xx + * @{ + */ + +#define NSS_MAX_NUM_PRI 4 /**< Maximum number of priority queues in NSS. */ +#define NSS_HOST_CORES 2 /**< Number of host cores. */ + +#define NSS_N2H_RING_COUNT 3 /**< Number of N2H rings. */ +#define NSS_H2N_RING_COUNT 7 /**< Number of H2N rings. */ +#define NSS_RING_SIZE 128 /**< Ring size. */ + +/** + * @} + */ + +#endif /** __NSS_IPQ50XX_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq50xx_64.h b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq50xx_64.h new file mode 100644 index 000000000..b756c5af7 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq50xx_64.h @@ -0,0 +1,40 @@ +/* + ************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipq50xx_64.h + * Architecture dependent parameters. + */ +#ifndef __NSS_IPQ50XX_64_H +#define __NSS_IPQ50XX_64_H + +/** + * @addtogroup nss_arch_macros_ipq50xx_64 + * @{ + */ + +#define NSS_MAX_NUM_PRI 4 /**< Maximum number of priority queues in NSS. */ +#define NSS_HOST_CORES 2 /**< Number of host cores. */ + +#define NSS_N2H_RING_COUNT 3 /**< Number of N2H rings. */ +#define NSS_H2N_RING_COUNT 7 /**< Number of H2N rings. */ +#define NSS_RING_SIZE 128 /**< Ring size. */ + +/** + * @} + */ + +#endif /** __NSS_IPQ50XX_64_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq60xx.h b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq60xx.h new file mode 100644 index 000000000..bc0396a46 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq60xx.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipq60xx.h + * Architecture dependent parameters. + */ +#ifndef __NSS_IPQ60XX_H +#define __NSS_IPQ60XX_H + +/** + * @addtogroup nss_arch_macros_ipq60xx + * @{ + */ + +#define NSS_MAX_NUM_PRI 4 /**< Maximum number of priority queues in NSS. */ +#define NSS_HOST_CORES 4 /**< Number of host cores. */ +#define NSS_PPE_SUPPORTED /**< PPE supported flag. */ + +#define NSS_N2H_RING_COUNT 5 /**< Number of N2H rings. */ +#define NSS_H2N_RING_COUNT 11 /**< Number of H2N rings. */ +#define NSS_RING_SIZE 128 /**< Ring size. */ + +/** + * @} + */ + +#endif /** __NSS_IPQ60XX_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq60xx_64.h b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq60xx_64.h new file mode 100644 index 000000000..a0e5e9ea8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq60xx_64.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipq60xx_64.h + * Architecture dependent parameters. + */ +#ifndef __NSS_IPQ60XX_64_H +#define __NSS_IPQ60XX_64_H + +/** + * @addtogroup nss_arch_macros_ipq60xx_64 + * @{ + */ + +#define NSS_MAX_NUM_PRI 4 /**< Maximum number of priority queues in NSS. */ +#define NSS_HOST_CORES 4 /**< Number of host cores. */ +#define NSS_PPE_SUPPORTED /**< PPE supported flag. */ + +#define NSS_N2H_RING_COUNT 5 /**< Number of N2H rings. */ +#define NSS_H2N_RING_COUNT 11 /**< Number of H2N rings. */ +#define NSS_RING_SIZE 128 /**< Ring size. */ + +/** + * @} + */ + +#endif /** __NSS_IPQ60XX_64_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq806x.h b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq806x.h new file mode 100644 index 000000000..216f950ef --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq806x.h @@ -0,0 +1,43 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipq806x.h + * Architecture dependent parameters. + */ +#ifndef __NSS_IPQ806X_H +#define __NSS_IPQ806X_H + +/** + * @addtogroup nss_arch_macros_ipq806x + * @{ + */ + +#define NSS_MAX_NUM_PRI 4 /**< Maximum number of priority queues in NSS. */ +#define NSS_HOST_CORES 2 /**< Number of host cores. */ + +#define NSS_N2H_RING_COUNT 3 /**< Number of N2H rings. */ +#define NSS_H2N_RING_COUNT 4 /**< Number of H2N rings. */ +#define NSS_RING_SIZE 128 /**< Ring size. */ + +#define NSS_IMEM_START 0x39000000 /**< NSS IMEM start address. */ +#define NSS_IMEM_SIZE 0x10000 /**< NSS IMEM size per core. */ + +/** + * @} + */ + +#endif /** __NSS_IPQ806X_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq807x.h b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq807x.h new file mode 100644 index 000000000..c8fc26dc2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq807x.h @@ -0,0 +1,44 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipq807x.h + * Architecture dependent parameters. + */ +#ifndef __NSS_IPQ807X_H +#define __NSS_IPQ807X_H + +/** + * @addtogroup nss_arch_macros_ipq807x + * @{ + */ + +#define NSS_MAX_NUM_PRI 4 /**< Maximum number of priority queues in NSS. */ +#define NSS_HOST_CORES 4 /**< Number of host cores. */ +#define NSS_PPE_SUPPORTED /**< PPE supported flag. */ + +#define NSS_N2H_RING_COUNT 5 /**< Number of N2H rings. */ +#define NSS_H2N_RING_COUNT 11 /**< Number of H2N rings. */ +#define NSS_RING_SIZE 128 /**< Ring size. */ + +#define NSS_IMEM_START 0x38000000 /**< NSS IMEM start address. */ +#define NSS_IMEM_SIZE 0x30000 /**< NSS IMEM size per core. */ + +/** + * @} + */ + +#endif /** __NSS_IPQ807X_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq807x_64.h b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq807x_64.h new file mode 100644 index 000000000..fec7aa538 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/arch/nss_ipq807x_64.h @@ -0,0 +1,44 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipq807x_64.h + * Architecture dependent parameters. + */ +#ifndef __NSS_IPQ807x_64_H +#define __NSS_IPQ807x_64_H + +/** + * @addtogroup nss_arch_macros_ipq807x_64 + * @{ + */ + +#define NSS_MAX_NUM_PRI 4 /**< Maximum number of priority queues in NSS. */ +#define NSS_HOST_CORES 4 /**< Number of host cores. */ +#define NSS_PPE_SUPPORTED /**< PPE supported flag. */ + +#define NSS_N2H_RING_COUNT 5 /**< Number of N2H rings. */ +#define NSS_H2N_RING_COUNT 11 /**< Number of H2N rings. */ +#define NSS_RING_SIZE 128 /**< Ring size. */ + +#define NSS_IMEM_START 0x38000000 /**< NSS IMEM start address. */ +#define NSS_IMEM_SIZE 0x30000 /**< NSS IMEM size per core. */ + +/** + * @} + */ + +#endif /** __NSS_IPQ807x_64_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_api_if.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_api_if.h new file mode 100755 index 000000000..ddf6b7c68 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_api_if.h @@ -0,0 +1,319 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_api_if.h + * NSS driver APIs and Declarations. + * + * This file declares all the public interfaces for NSS driver. + */ + +#ifndef __NSS_API_IF_H +#define __NSS_API_IF_H + +#ifdef __KERNEL__ /* only kernel will use. */ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +#include +#include +#include +#include "nss_arch.h" +#include "nss_def.h" +#include "nss_cmn.h" +#include "nss_tun6rd.h" +#include "nss_l2tpv2.h" +#include "nss_pptp.h" +#include "nss_map_t.h" +#include "nss_tunipip6.h" +#include "nss_lag.h" +#include "nss_stats_public.h" +#include "nss_ipv4.h" +#include "nss_ipv6.h" +#include "nss_shaper.h" +#include "nss_if.h" +#include "nss_phy_if.h" +#include "nss_virt_if.h" +#include "nss_pppoe.h" +#include "nss_crypto.h" +#include "nss_crypto_cmn.h" +#include "nss_dma.h" + +#include "nss_profiler.h" +#include "nss_dynamic_interface.h" +#include "nss_ipsec.h" +#include "nss_ipsec_cmn.h" +#include "nss_gre.h" +#include "nss_gre_redir.h" +#include "nss_gre_redir_lag.h" +#include "nss_gre_tunnel.h" +#include "nss_sjack.h" +#include "nss_capwap.h" +#include "nss_wifi.h" +#include "nss_wifi_vdev.h" +#include "nss_n2h.h" +#include "nss_rps.h" +#include "nss_portid.h" +#include "nss_oam.h" +#include "nss_dtls.h" +#include "nss_dtls_cmn.h" +#include "nss_tls.h" +#include "nss_edma.h" +#include "nss_bridge.h" +#include "nss_ppe.h" +#include "nss_trustsec_tx.h" +#include "nss_vlan.h" +#include "nss_igs.h" +#include "nss_mirror.h" +#include "nss_wifili_if.h" +#include "nss_project.h" +#include "nss_qrfs.h" +#include "nss_c2c_tx.h" +#include "nss_qvpn.h" +#include "nss_unaligned.h" +#include "nss_pvxlan.h" +#include "nss_vxlan.h" +#include "nss_pm.h" +#include "nss_freq.h" +#include "nss_tstamp.h" +#include "nss_gre_redir_mark.h" +#include "nss_clmap.h" +#include "nss_rmnet_rx.h" +#include "nss_match.h" +#include "nss_eth_rx.h" +#include "nss_c2c_rx.h" +#include "nss_ipv6_reasm.h" +#include "nss_ipv4_reasm.h" +#include "nss_lso_rx.h" +#include "nss_wifi_mac_db_if.h" +#include "nss_wifi_ext_vdev_if.h" +#include "nss_wifili_if.h" +#include "nss_ppe_vp.h" +#include "nss_wifi_mesh.h" +#include "nss_udp_st.h" +#endif + +#endif /*__KERNEL__ */ + +/** + * @addtogroup nss_driver_subsystem + * @{ + */ + +#define NSS_MAX_CORES 2 /**< Maximum number of core interfaces. */ + +#define NSS_MAX_DEVICE_INTERFACES (NSS_MAX_PHYSICAL_INTERFACES + NSS_MAX_VIRTUAL_INTERFACES + NSS_MAX_TUNNEL_INTERFACES + NSS_MAX_DYNAMIC_INTERFACES) + /**< Maximum number of device interfaces. */ + +#define NSS_MAX_NET_INTERFACES (NSS_MAX_DEVICE_INTERFACES + NSS_MAX_SPECIAL_INTERFACES) + /**< Maximum number of network interfaces. */ + +#define NSS_MAX_PHYSICAL_INTERFACES 8 /**< Maximum number of physical interfaces. */ +#define NSS_MAX_VIRTUAL_INTERFACES 16 /**< Maximum number of virtual interfaces. */ +#define NSS_MAX_TUNNEL_INTERFACES 4 /**< Maximum number of tunnel interfaces. */ +#define NSS_MAX_SPECIAL_INTERFACES 69 /**< Maximum number of special interfaces. */ +#define NSS_MAX_WIFI_RADIO_INTERFACES 3 /**< Maximum number of radio interfaces. */ + +/* + * Start of individual interface groups + */ +#define NSS_PHYSICAL_IF_START 0 + /**< Beginning of the physical interfaces. */ + +#define NSS_VIRTUAL_IF_START (NSS_PHYSICAL_IF_START + NSS_MAX_PHYSICAL_INTERFACES) + /**< Beginning of the virtual interfaces. */ + +#define NSS_TUNNEL_IF_START (NSS_VIRTUAL_IF_START + NSS_MAX_VIRTUAL_INTERFACES) + /**< Beginning of the tunnel interfaces. */ + +#define NSS_DYNAMIC_IF_START (NSS_TUNNEL_IF_START + NSS_MAX_TUNNEL_INTERFACES) + /**< Beginning of the dynamic interfaces. */ + +#define NSS_SPECIAL_IF_START (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES) + /**< Beginning of the special interfaces. */ + +/* + * Tunnel interface numbers + */ +#define NSS_IPSEC_ENCAP_IF_NUMBER (NSS_TUNNEL_IF_START + 0) + /**< Tunnel interface number for IPsec encapsulation interfaces. */ +#define NSS_IPSEC_DECAP_IF_NUMBER (NSS_TUNNEL_IF_START + 1) + /**< Tunnel interface number for IPsec decapsulation interfaces. */ +#define NSS_TUN6RD_INTERFACE (NSS_TUNNEL_IF_START + 2) + /**< Tunnel interface number for TUN6RD interfaces. */ +#define NSS_TUNIPIP6_INTERFACE (NSS_TUNNEL_IF_START + 3) + /**< Tunnel interface number for TUNIPIP6 interfaces. */ + +/* + * Special interface numbers + */ +#define NSS_N2H_INTERFACE (NSS_SPECIAL_IF_START + 0) + /**< Special interface number for N2H. */ +#define NSS_ETH_RX_INTERFACE (NSS_SPECIAL_IF_START + 2) + /**< Special interface number for Ethernet Rx. */ +#define NSS_PPPOE_INTERFACE (NSS_SPECIAL_IF_START + 3) + /**< Special interface number for PPPoE. */ +#define NSS_IPV4_RX_INTERFACE (NSS_SPECIAL_IF_START + 5) + /**< Special interface number for IPv4. */ +#define NSS_IPV6_RX_INTERFACE (NSS_SPECIAL_IF_START + 7) + /**< Special interface number for IPv6. */ +#define NSS_PROFILER_INTERFACE (NSS_SPECIAL_IF_START + 8) + /**< Special interface number for profile. */ +#define NSS_CRYPTO_INTERFACE (NSS_SPECIAL_IF_START + 9) + /**< Special interface number for crypto CE5. */ +#define NSS_DTLS_INTERFACE (NSS_SPECIAL_IF_START + 10) + /**< Special interface number for DTLS. */ +#define NSS_CRYPTO_CMN_INTERFACE (NSS_SPECIAL_IF_START + 11) + /**< Special interface number for crypto common. */ +#define NSS_C2C_TX_INTERFACE (NSS_SPECIAL_IF_START + 12) + /**< Virtual interface number for core-to-core transmissions. */ +#define NSS_C2C_RX_INTERFACE (NSS_SPECIAL_IF_START + 13) + /**< Virtual interface number for core-to-core reception. */ +#define NSS_IPSEC_CMN_INTERFACE (NSS_SPECIAL_IF_START + 18) + /**< Virtual interface number for IPSec rule. */ +#define NSS_COREFREQ_INTERFACE (NSS_SPECIAL_IF_START + 19) + /**< Virtual interface number for core frequency. */ +#define NSS_DYNAMIC_INTERFACE (NSS_SPECIAL_IF_START + 20) + /**< Special interface number for dynamic interfaces. */ +#define NSS_GRE_REDIR_INTERFACE (NSS_SPECIAL_IF_START + 21) + /**< Special interface number for GRE redirect base interfaces. */ +#define NSS_LSO_RX_INTERFACE (NSS_SPECIAL_IF_START + 22) + /**< Special interface number for LSO. */ +#define NSS_SJACK_INTERFACE (NSS_SPECIAL_IF_START + 23) + /**< Special interface number for GRE REDIR base interfaces. */ +#define NSS_IPV4_REASM_INTERFACE (NSS_SPECIAL_IF_START + 24) + /**< Special interface number for IPv4 reassembly interfaces. */ +#define NSS_DEBUG_INTERFACE (NSS_SPECIAL_IF_START + 25) + /**< Special interface number for debug. */ +#define NSS_WIFI_INTERFACE0 (NSS_SPECIAL_IF_START + 26) + /**< Special interface number for Wi-Fi radio 0. */ +#define NSS_WIFI_INTERFACE1 (NSS_SPECIAL_IF_START + 27) + /**< Special interface number for Wi-Fi radio 1. */ +#define NSS_WIFI_INTERFACE2 (NSS_SPECIAL_IF_START + 28) + /**< Special interface number for Wi-Fi radio 2. */ +#define NSS_IPV6_REASM_INTERFACE (NSS_SPECIAL_IF_START + 29) + /**< Special interface number for IPv6 reassembly. */ +#define NSS_LAG0_INTERFACE_NUM (NSS_SPECIAL_IF_START + 30) + /**< Special interface number for LAG0. */ +#define NSS_LAG1_INTERFACE_NUM (NSS_SPECIAL_IF_START + 31) + /**< Special interface number for LAG1. */ +#define NSS_LAG2_INTERFACE_NUM (NSS_SPECIAL_IF_START + 32) + /**< Special interface number for LAG2. */ +#define NSS_LAG3_INTERFACE_NUM (NSS_SPECIAL_IF_START + 33) + /**< Special interface number for LAG3. */ +#define NSS_L2TPV2_INTERFACE (NSS_SPECIAL_IF_START + 34) + /**< Special interface number for L2TPv2 UDP encapsulation. */ +#define NSS_PPTP_INTERFACE (NSS_SPECIAL_IF_START + 36) + /**< Special interface number for PPTP-to-decapsulation. */ +#define NSS_PORTID_INTERFACE (NSS_SPECIAL_IF_START + 37) + /**< Special interface number for port ID. */ +#define NSS_OAM_INTERFACE (NSS_SPECIAL_IF_START + 38) + /**< Special interface number for OAM. */ +#define NSS_MAP_T_INTERFACE (NSS_SPECIAL_IF_START + 39) + /**< Special interface number for MAP-T. */ +#define NSS_PPE_INTERFACE (NSS_SPECIAL_IF_START + 40) + /**< Special interface number for PPE. */ +#define NSS_EDMA_INTERFACE (NSS_SPECIAL_IF_START + 41) + /**< Special interface number for EDMA. */ +#define NSS_GRE_TUNNEL_INTERFACE (NSS_SPECIAL_IF_START + 42) + /**< Special interface number for NSS GRE tunnel. */ +#define NSS_TRUSTSEC_TX_INTERFACE (NSS_SPECIAL_IF_START + 43) + /**< Special interface number for TrustSec Tx. */ +#define NSS_VAP_INTERFACE (NSS_SPECIAL_IF_START + 44) + /**< Special interface number for NSS Wi-Fi VAPs base interfaces. */ +#define NSS_VLAN_INTERFACE (NSS_SPECIAL_IF_START + 45) + /**< Special interface number for VLAN. */ +#define NSS_GRE_INTERFACE (NSS_SPECIAL_IF_START + 46) + /**< Special interface number for GRE. */ +#define NSS_WIFILI_INTERNAL_INTERFACE (NSS_SPECIAL_IF_START + 47) + /**< Special interface number for wifili internal instance. */ +#define NSS_PROJECT_INTERFACE (NSS_SPECIAL_IF_START + 48) + /**< Special interface number for project node. */ +#define NSS_PBUF_MGR_FREE_INTERFACE (NSS_SPECIAL_IF_START + 49) + /**< Special interface number for PBUF_MGR_FREE node. */ +#define NSS_REDIR_RX_INTERFACE (NSS_SPECIAL_IF_START + 50) + /**< Special interface number for 802.3 redirect node. */ +#define NSS_QRFS_INTERFACE (NSS_SPECIAL_IF_START + 51) + /**< Special interface number for QRFS. */ +#define NSS_GRE_REDIR_LAG_INTERFACE (NSS_SPECIAL_IF_START + 52) + /**< Special interface number for GRE redirect link aggregation interface. */ +#define NSS_UNALIGNED_INTERFACE (NSS_SPECIAL_IF_START + 53) + /**< Special interface number for unaligned handler. */ +#define NSS_TSTAMP_TX_INTERFACE (NSS_SPECIAL_IF_START + 54) + /**< Special interface number for timestamp transmit. */ +#define NSS_TSTAMP_RX_INTERFACE (NSS_SPECIAL_IF_START + 55) + /**< Special interface number for timestamp receive. */ +#define NSS_GRE_REDIR_MARK_INTERFACE (NSS_SPECIAL_IF_START + 56) + /**< Special interface number for GRE redirect mark. */ +#define NSS_VXLAN_INTERFACE (NSS_SPECIAL_IF_START + 57) + /**< Special interface number for VxLAN handler. */ +#define NSS_RMNET_RX_INTERFACE (NSS_SPECIAL_IF_START + 58) + /**< Special interface number for remote wireless wide area network receive handler. */ +#define NSS_WIFILI_EXTERNAL_INTERFACE0 (NSS_SPECIAL_IF_START + 59) + /**< Special interface number for first external radio instance. */ +#define NSS_WIFILI_EXTERNAL_INTERFACE1 (NSS_SPECIAL_IF_START + 60) + /**< Special interface number for second external radio instance. */ +#define NSS_TLS_INTERFACE (NSS_SPECIAL_IF_START + 61) + /**< Special interface number for TLS. */ +#define NSS_PPE_VP_INTERFACE (NSS_SPECIAL_IF_START + 62) + /**< Special interface number for the virtual port (62, 63, 64) interface. */ +#define NSS_WIFI_MAC_DB_INTERFACE (NSS_SPECIAL_IF_START + 65) + /**< Special interface number for the Wi-Fi MAC database. */ +#define NSS_DMA_INTERFACE (NSS_SPECIAL_IF_START + 66) + /**< Special interface number for the DMA interface. */ +#define NSS_WIFI_EXT_VDEV_INTERFACE (NSS_SPECIAL_IF_START + 67) + /**< Special interface number for the Wi-Fi extended virtual interface. */ +#define NSS_UDP_ST_INTERFACE (NSS_SPECIAL_IF_START + 68) + /**< Special interface number for the UDP speed test interface. */ + +#ifdef __KERNEL__ /* only kernel will use. */ + +/* + * General utilities + */ + +/** + * General callback function for all interface messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_if_rx_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_get_state + * Gets the NSS state. + * + * @param[in] nss_ctx Pointer to the NSS context. + * + * @return + * NSS state. + */ +extern nss_state_t nss_get_state(void *nss_ctx); + +#endif /*__KERNEL__ */ + +/* + * Once Everything is arranged correctly, will be placed at top + */ + +/** + *@} + */ + +#endif /** __NSS_API_IF_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_bridge.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_bridge.h new file mode 100644 index 000000000..783696912 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_bridge.h @@ -0,0 +1,362 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2018,2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_bridge.h + * NSS Bridge interface definitions. + */ + +#ifndef __NSS_BRIDGE_H +#define __NSS_BRIDGE_H + +/** + * @addtogroup nss_bridge_subsystem + * @{ + */ + +/** + * nss_bridge_msg_types + * Message types for the NSS bridge. + */ +enum nss_bridge_msg_types { + NSS_BRIDGE_MSG_JOIN = NSS_IF_MAX_MSG_TYPES + 1, + NSS_BRIDGE_MSG_LEAVE, + NSS_BRIDGE_MSG_SET_FDB_LEARN, + NSS_BRIDGE_MSG_TYPE_MAX, +}; + +/** + * nss_bridge_error_types + * Error types for the NSS bridge. + */ +enum nss_bridge_error_types { + NSS_BRIDGE_ERROR_UNKNOWN_MSG = NSS_IF_ERROR_TYPE_MAX + 1, + NSS_BRIDGE_ERROR_TYPE_MAX, +}; + +/** + * nss_bridge_fdb_learn_mode + * FDB learning mode for the NSS bridge. + */ +enum nss_bridge_fdb_learn_mode { + NSS_BRIDGE_FDB_LEARN_ENABLE, + NSS_BRIDGE_FDB_LEARN_DISABLE, + NSS_BRIDGE_FDB_LEARN_MODE_MAX, +}; + +/** + * nss_bridge_join_msg + * Information for joining the bridge. + */ +struct nss_bridge_join_msg { + uint32_t if_num; /**< NSS interface to add to a bridge. */ +}; + +/** + * nss_bridge_leave_msg + * Information for leaving the bridge. + */ +struct nss_bridge_leave_msg { + uint32_t if_num; /**< NSS interface to remove from a bridge. */ +}; + +/** + * nss_bridge_set_fdb_learn_msg + * Information for FDB learning status on bridge interface. + */ +struct nss_bridge_set_fdb_learn_msg { + uint32_t mode; /**< FDB learning mode of bridge interface. */ +}; + +/** + * nss_bridge_msg + * Data for sending and receiving bridge interface messages. + */ +struct nss_bridge_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a bridge interface message. + */ + union { + union nss_if_msgs if_msg; + /**< NSS interface base message. */ + struct nss_bridge_join_msg br_join; + /**< Join the bridge. */ + struct nss_bridge_leave_msg br_leave; + /**< Leave the bridge. */ + struct nss_bridge_set_fdb_learn_msg fdb_learn; + /**< FDB learning status of bridge. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_bridge_verify_if_num + * Verifies if the interface is type bridge. + * + * @param[in] if_num Interface number to be verified. + * + * @return + * True if if_num is of type bridge. + */ +bool nss_bridge_verify_if_num(uint32_t if_num); + +/** + * nss_bridge_tx_msg + * Sends bridge messages to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_bridge_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_bridge_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_bridge_msg *msg); + +/** + * nss_bridge_tx_msg_sync + * Sends bridge messages synchronously to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_bridge_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in,out] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_bridge_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_bridge_msg *msg); + +/** + * nss_bridge_msg_init + * Initializes a bridge message. + * + * @datatypes + * nss_bridge_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num Interface number + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +void nss_bridge_msg_init(struct nss_bridge_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_bridge_get_context + * Gets the bridge context used in nss_bridge_tx. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_bridge_get_context(void); + +/** + * Callback function for receiving bridge data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_bridge_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving bridge messages. + * + * @datatypes + * nss_bridge_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_bridge_msg_callback_t)(void *app_data, struct nss_bridge_msg *msg); + +/** + * nss_bridge_register + * Registers the bridge interface with the NSS for sending and receiving + * messages. + * + * @param[in] if_num NSS interface number. + * @param[in] netdev Pointer to the associated network device. + * @param[in] bridge_data_cb Callback for the bridge data. + * @param[in] bridge_msg_cb Callback for the bridge message. + * @param[in] features Data socket buffer types supported by this interface. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_bridge_register(uint32_t if_num, struct net_device *netdev, nss_bridge_callback_t bridge_data_cb, nss_bridge_msg_callback_t bridge_msg_cb, uint32_t features, void *app_data); + +/** + * nss_bridge_unregister + * Deregisters the bridge interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + */ +void nss_bridge_unregister(uint32_t if_num); + +/** + * nss_bridge_notify_register + * Registers a notifier callback for bridge messages with the NSS. + * + * @datatypes + * nss_bridge_msg_callback_t + * + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_bridge_notify_register(nss_bridge_msg_callback_t cb, void *app_data); + +/** + * nss_bridge_notify_unregister + * Deregisters a bridge message notifier callback from the NSS. + * + * @return + * None. + */ +void nss_bridge_notify_unregister(void); + +/** + * nss_bridge_tx_set_mtu_msg + * Sends a message to the bridge to set the MTU. + * + * @param[in] bridge_if_num Interface number of the bridge. + * @param[in] mtu MTU value to set. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_bridge_tx_set_mtu_msg(uint32_t bridge_if_num, uint32_t mtu); + +/** + * nss_bridge_tx_set_mac_addr_msg + * Sends a message to the bridge to set the MAC address. + * + * @param[in] bridge_if_num Interface number of the bridge. + * @param[in] addr Pointer to the MAC address. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_bridge_tx_set_mac_addr_msg(uint32_t bridge_if_num, uint8_t *addr); + +/** + * nss_bridge_tx_join_msg + * Sends the bridge a message to join with a slave interface. + * + * @datatypes + * net_device + * + * @param[in] bridge_if_num Interface number of the bridge. + * @param[in] netdev Pointer to the associated network device (the + * slave interface). + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_bridge_tx_join_msg(uint32_t bridge_if_num, struct net_device *netdev); + +/** + * nss_bridge_tx_leave_msg + * Sends the bridge a message that the slave interface is leaving the bridge. + * + * @datatypes + * net_device + * + * @param[in] bridge_if_num Interface number of the bridge. + * @param[in] netdev Pointer to the associated network device (the + * slave interface). + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_bridge_tx_leave_msg(uint32_t bridge_if_num, struct net_device *netdev); + +/** + * nss_bridge_tx_vsi_assign_msg + * Sends the bridge a message to assign a VSI. + * + * @param[in] if_num Interface number of the bridge. + * @param[in] vsi VSI to assign. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_bridge_tx_vsi_assign_msg(uint32_t if_num, uint32_t vsi); + +/** + * nss_bridge_tx_vsi_unassign_msg + * Sends the bridge a message to unassign a VSI. + * + * @param[in] if_num Interface number of the bridge. + * @param[in] vsi VSI to unassign. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_bridge_tx_vsi_unassign_msg(uint32_t if_num, uint32_t vsi); + +/** + * nss_bridge_tx_set_fdb_learn_msg + * Sends a message to notify NSS about FDB learning enable/disable event. + * + * @datatypes + * nss_bridge_fdb_learn_mode + * + * @param[in] bridge_if_num Interface number of the bridge. + * @param[in] fdb_learn FDB learning disable/enable. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_bridge_tx_set_fdb_learn_msg(uint32_t bridge_if_num, enum nss_bridge_fdb_learn_mode fdb_learn); + +/** + * nss_bridge_init + * Initializes the bridge. + * + * @return + * None. + */ +void nss_bridge_init(void); + +/** + * @} + */ + +#endif /* __NSS_BRIDGE_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_c2c_rx.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_c2c_rx.h new file mode 100644 index 000000000..5605abd99 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_c2c_rx.h @@ -0,0 +1,86 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_c2c_rx.h + * NSS core-to-core reception interface definitions. + */ + +#ifndef __NSS_C2C_RX_H +#define __NSS_C2C_RX_H + +/** + * @addtogroup nss_c2c_rx_subsystem + * @{ + */ + +/** + * nss_c2c_rx_stats_types + * Core-to-core reception node statistics. + */ +enum nss_c2c_rx_stats_types { + NSS_C2C_RX_STATS_PBUF_SIMPLE = NSS_STATS_NODE_MAX, + /**< Number of received simple pbufs. */ + NSS_C2C_RX_STATS_PBUF_SG, /**< Number of scatter-gather pbufs received. */ + NSS_C2C_RX_STATS_PBUF_RETURNING, /**< Number of returning scatter-gather pbufs. */ + NSS_C2C_RX_STATS_INVAL_DEST, /**< Number of pbuf enqueue failures because of destination is invalid. */ + NSS_C2C_RX_STATS_MAX, /**< Maximum message type. */ +}; + +/** + * nss_c2c_rx_stats_notification + * Core-to-core reception statistics structure. + */ +struct nss_c2c_rx_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint64_t stats[NSS_C2C_RX_STATS_MAX]; /**< Core-to-core reception statistics. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ +/** + * nss_c2c_rx_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_c2c_rx_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_c2c_rx_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_c2c_rx_stats_unregister_notifier(struct notifier_block *nb); +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_C2C_RX_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_c2c_tx.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_c2c_tx.h new file mode 100644 index 000000000..a9ecc8ce0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_c2c_tx.h @@ -0,0 +1,308 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_c2c_tx.h + * NSS core-to-core transmission interface definitions. + */ + +#ifndef __NSS_C2C_TX_H +#define __NSS_C2C_TX_H + +/** + * @addtogroup nss_c2c_tx_subsystem + * @{ + */ + +/** + * nss_c2c_tx_msg_type + * Supported message types. + */ +enum nss_c2c_tx_msg_type { + NSS_C2C_TX_MSG_TYPE_STATS, /**< Statistics synchronization. */ + NSS_C2C_TX_MSG_TYPE_TX_MAP, /**< Open engine synchronization. */ + NSS_C2C_TX_MSG_TYPE_PERFORMANCE_TEST, /**< Performance test. */ + NSS_C2C_TX_MSG_TYPE_MAX /**< Maximum message type. */ +}; + +/** + * nss_c2c_tx_msg_error + * Message error types. + */ +enum nss_c2c_tx_msg_error { + NSS_C2C_TX_MSG_ERROR_NONE, /**< No error. */ + NSS_C2C_TX_MSG_ERROR_INVAL_OP, /**< Invalid operation. */ + NSS_C2C_TX_MSG_ERROR_INVALID_TEST_ID, /**< Invalid test ID. */ + NSS_C2C_TX_MSG_ERROR_MAX /**< Maximum error type. */ +}; + +/** + * nss_c2c_tx_test_type + * Supported core-to core transmission tests. + */ +enum nss_c2c_tx_test_type { + NSS_C2C_TX_TEST_TYPE_SIMPLE = 1, + /**< Tests the performance of simple pbufs. */ + NSS_C2C_TX_TEST_TYPE_SG_CHAIN, + /**< Tests the performance of scatter-gather chain pbufs. */ + NSS_C2C_TX_TEST_TYPE_SG_REF, + /**< Tests the performance of scatter-gather pbuf that has references. */ + NSS_C2C_TX_TEST_TYPE_SG_REFED, + /**< Tests the performance of referenced pbuf. */ + NSS_C2C_TX_TEST_TYPE_MAX + /**< Maximum message type. */ +}; + +/** + * nss_c2c_tx_stats_types + * Core-to-core transmission node statistics. + */ +enum nss_c2c_tx_stats_types { + NSS_C2C_TX_STATS_PBUF_SIMPLE = NSS_STATS_NODE_MAX, + /**< Number of received simple pbuf. */ + NSS_C2C_TX_STATS_PBUF_SG, /**< Number of scatter-gather pbuf received. */ + NSS_C2C_TX_STATS_PBUF_RETURNING, /**< Number of returning scatter-gather pbuf. */ + NSS_C2C_TX_STATS_MAX, /**< Maximum message type. */ +}; + +/** + * nss_c2c_tx_stats_notification + * Core-to-core transmission statistics structure. + */ +struct nss_c2c_tx_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint64_t stats[NSS_C2C_TX_STATS_MAX]; /**< Core-to-core transmission statistics. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ +/** + * nss_c2c_tx_map + * Core-to-core transmission queue address and interrupt address. + */ +struct nss_c2c_tx_map { + uint32_t tx_map; /**< Peer core core-to-core receiver queue start address. */ + uint32_t c2c_intr_addr; /**< Peer core core-to-core interrupt register address. */ +}; + +/** + * nss_c2c_tx_stats + * The NSS core-to-core transmission node statistics structure. + */ +struct nss_c2c_tx_stats { + struct nss_cmn_node_stats node_stats; + /**< Common node statistics for core-to-core transmissions. */ + uint32_t pbuf_simple; /**< Number of received simple pbuf. */ + uint32_t pbuf_sg; /**< Number of scattered/gathered pbuf received. */ + uint32_t pbuf_returning; /**< Number of returning scattered/gathered pbuf. */ +}; + +/** + * nss_c2c_tx_test + * Start performance test for the given test ID. + */ +struct nss_c2c_tx_test { + uint32_t test_id; /**< ID of the core-to-core communication test. */ +}; + +/** + * nss_c2c_tx_msg + * Message structure to send/receive core-to-core transmission commands. + */ +struct nss_c2c_tx_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a NSS core-to-core transmission rule or statistics message. + */ + union { + struct nss_c2c_tx_map map; /**< Core-to-core transmissions memory map. */ + struct nss_c2c_tx_stats stats; /**< Core-to-core transmissions statistics. */ + struct nss_c2c_tx_test test; /**< Core-to-core performance test. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_c2c_tx_register_handler + * Registers the core-to-core transmissions message handler. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * + * @return + * None. + */ +void nss_c2c_tx_register_handler(struct nss_ctx_instance *nss_ctx); + +/** + * Callback function for receiving core-to-core transmissions messages. + * + * @datatypes + * nss_c2c_tx_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_c2c_tx_msg_callback_t)(void *app_data, struct nss_c2c_tx_msg *msg); + +/** + * nss_c2c_tx_tx_msg + * Transmits a core-to-core transmissions message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_c2c_tx_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] nctm Pointer to the message data. + * + * @return + * Status of the transmit operation. + */ +extern nss_tx_status_t nss_c2c_tx_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_c2c_tx_msg *nctm); + +/** + * nss_c2c_tx_msg_init + * Initializes core-to-core transmissions messages. + * + * @datatypes + * nss_c2c_tx_msg \n + * nss_c2c_tx_msg_callback_t + * + * @param[in] nct Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_c2c_tx_msg_init(struct nss_c2c_tx_msg *nct, uint16_t if_num, uint32_t type, uint32_t len, + nss_c2c_tx_msg_callback_t cb, void *app_data); + +/** + * nss_c2c_tx_notify_register + * Registers a notifier callback for core-to-core transmission messages with the NSS. + * + * @datatypes + * nss_c2c_tx_msg_callback_t + * + * @param[in] core NSS core number index to the notifier callback table. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_c2c_tx_notify_register(int core, nss_c2c_tx_msg_callback_t cb, void *app_data); + +/** + * nss_c2c_tx_notify_unregister + * Deregisters a core-to-core transmission message notifier callback from the NSS. + * + * @param[in] core NSS core number index to the notifier callback table. + * + * @return + * None. + * + * @dependencies + * The notifier callback must have been previously registered. + */ +void nss_c2c_tx_notify_unregister(int core); + +/** + * nss_c2c_tx_msg_cfg_map + * Sends core-to-core transmissions map to NSS + * + * @datatypes + * nss_ctx_instance \n + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] tx_map Peer core core-to-core receiver queue start address. + * @param[in] c2c_addr Peer core core-to-core interrupt register address. + * + * @return + * Status of the transmit operation. + */ +extern nss_tx_status_t nss_c2c_tx_msg_cfg_map(struct nss_ctx_instance *nss_ctx, uint32_t tx_map, uint32_t c2c_addr); + +/** + * nss_c2c_tx_register_sysctl + * Registers the core-to-core transmission sysctl entries to the sysctl tree. + * + * @return + * None. + */ +extern void nss_c2c_tx_register_sysctl(void); + +/** + * nss_c2c_tx_unregister_sysctl + * Deregisters the core-to-core transmission sysctl entries from the sysctl tree. + * + * @return + * None. + * + * @dependencies + * The system control must have been previously registered. + */ +extern void nss_c2c_tx_unregister_sysctl(void); + +/** + * nss_c2c_tx_init + * Initializes the core-to-core transmission. + * + * @return + * None. + */ +void nss_c2c_tx_init(void); + +/** + * nss_c2c_tx_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_c2c_tx_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_c2c_tx_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_c2c_tx_stats_unregister_notifier(struct notifier_block *nb); +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_C2C_TX_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_capwap.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_capwap.h new file mode 100644 index 000000000..525fff524 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_capwap.h @@ -0,0 +1,659 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * @file nss_capwap.h + * NSS CAPWAP interface definitions. + */ + +#ifndef __NSS_CAPWAP_H +#define __NSS_CAPWAP_H + +/** + * @addtogroup nss_capwap_subsystem + * @{ + */ + +/** + * Size of the headroom required for CAPWAP packets. + */ +#define NSS_CAPWAP_HEADROOM 256 + +/** + * nss_capwap_stats_encap_types + * CAPWAP encapsulation statistics. + */ +enum nss_capwap_stats_encap_types { + NSS_CAPWAP_STATS_ENCAP_TX_PKTS, + NSS_CAPWAP_STATS_ENCAP_TX_BYTES, + NSS_CAPWAP_STATS_ENCAP_TX_SEGMENTS, + NSS_CAPWAP_STATS_ENCAP_TX_DROP_SG_REF, + NSS_CAPWAP_STATS_ENCAP_TX_DROP_VER_MISMATCH, + NSS_CAPWAP_STATS_ENCAP_TX_DROP_UNALIGN, + NSS_CAPWAP_STATS_ENCAP_TX_DROP_HEADER_ROOM, + NSS_CAPWAP_STATS_ENCAP_TX_DROP_DTLS, + NSS_CAPWAP_STATS_ENCAP_TX_DROP_NWIRELESS, + NSS_CAPWAP_STATS_ENCAP_TX_DROP_QUEUE_FULL, + NSS_CAPWAP_STATS_ENCAP_TX_DROP_MEM_FAIL, + NSS_CAPWAP_STATS_ENCAP_FAST_MEM, + NSS_CAPWAP_STATS_ENCAP_MAX +}; + +/** + * nss_capwap_stats_decap_types + * CAPWAP decapsulation statistics. + */ +enum nss_capwap_stats_decap_types { + NSS_CAPWAP_STATS_DECAP_RX_PKTS, + NSS_CAPWAP_STATS_DECAP_RX_BYTES, + NSS_CAPWAP_STATS_DECAP_RX_DTLS_PKTS, + NSS_CAPWAP_STATS_DECAP_RX_SEGMENTS, + NSS_CAPWAP_STATS_DECAP_RX_DROP, + NSS_CAPWAP_STATS_DECAP_RX_DROP_OVERSIZE, + NSS_CAPWAP_STATS_DECAP_RX_DROP_FRAG_TIMEOUT, + NSS_CAPWAP_STATS_DECAP_RX_DROP_DUP_FRAG, + NSS_CAPWAP_STATS_DECAP_RX_DROP_FRAG_GAP, + NSS_CAPWAP_STATS_DECAP_RX_DROP_QUEUE_FULL, + NSS_CAPWAP_STATS_DECAP_RX_DROP_N2H_QUEUE_FULL, + NSS_CAPWAP_STATS_DECAP_RX_DROP_MEM_FAIL, + NSS_CAPWAP_STATS_DECAP_RX_DROP_CHECKSUM, + NSS_CAPWAP_STATS_DECAP_RX_MALFORMED, + NSS_CAPWAP_STATS_DECAP_FAST_MEM, + NSS_CAPWAP_STATS_DECAP_MAX +}; + +/** + * nss_capwap_msg_type + * CAPWAP message types. + */ +typedef enum nss_capwap_msg_type { + NSS_CAPWAP_MSG_TYPE_NONE, + NSS_CAPWAP_MSG_TYPE_CFG_RULE, + NSS_CAPWAP_MSG_TYPE_UNCFG_RULE, + NSS_CAPWAP_MSG_TYPE_ENABLE_TUNNEL, + NSS_CAPWAP_MSG_TYPE_DISABLE_TUNNEL, + NSS_CAPWAP_MSG_TYPE_UPDATE_PATH_MTU, + NSS_CAPWAP_MSG_TYPE_SYNC_STATS, + NSS_CAPWAP_MSG_TYPE_VERSION, /**< Default is version 1. */ + NSS_CAPWAP_MSG_TYPE_DTLS, + NSS_CAPWAP_MSG_TYPE_FLOW_RULE_ADD, + NSS_CAPWAP_MSG_TYPE_FLOW_RULE_DEL, + NSS_CAPWAP_MSG_TYPE_MAX, +} nss_capwap_msg_type_t; + +/** + * nss_capwap_msg_response + * Error types for CAPWAP responses to messages from the host. + */ +typedef enum nss_capwap_msg_response { + NSS_CAPWAP_ERROR_MSG_INVALID_REASSEMBLY_TIMEOUT, + NSS_CAPWAP_ERROR_MSG_INVALID_PATH_MTU, + NSS_CAPWAP_ERROR_MSG_INVALID_MAX_FRAGMENT, + NSS_CAPWAP_ERROR_MSG_INVALID_BUFFER_SIZE, + NSS_CAPWAP_ERROR_MSG_INVALID_L3_PROTO, + NSS_CAPWAP_ERROR_MSG_INVALID_UDP_PROTO, + NSS_CAPWAP_ERROR_MSG_INVALID_VERSION, + NSS_CAPWAP_ERROR_MSG_TUNNEL_DISABLED, + NSS_CAPWAP_ERROR_MSG_TUNNEL_ENABLED, + NSS_CAPWAP_ERROR_MSG_TUNNEL_NOT_CFG, + NSS_CAPWAP_ERROR_MSG_INVALID_IP_NODE, + NSS_CAPWAP_ERROR_MSG_INVALID_TYPE_FLAG, + NSS_CAPWAP_ERROR_MSG_INVALID_DTLS_CFG, + NSS_CAPWAP_ERROR_MSG_FLOW_TABLE_FULL, + NSS_CAPWAP_ERROR_MSG_FLOW_EXIST, + NSS_CAPWAP_ERROR_MSG_FLOW_NOT_EXIST, + NSS_CAPWAP_ERROR_MSG_MAX, +} nss_capwap_msg_response_t; + +/** + * nss_capwap_stats_msg + * Per-tunnel statistics messages from the NSS firmware. + */ +struct nss_capwap_stats_msg { + struct nss_cmn_node_stats pnode_stats; /**< Common firmware statistics. */ + uint32_t dtls_pkts; /**< Number of DTLS packets flowing through. */ + + /* + * Rx/decap stats + */ + uint32_t rx_dup_frag; /**< Number of duplicate fragments. */ + uint32_t rx_segments; /**< Number of segments or fragments. */ + + /** + * Packets dropped because they are larger than the payload size. + */ + uint32_t rx_oversize_drops; + + uint32_t rx_frag_timeout_drops; + /**< Packets dropped because of a reassembly timeout. */ + uint32_t rx_n2h_drops; + /**< Packets dropped because of error in packet processing. */ + uint32_t rx_n2h_queue_full_drops; + /**< Packets dropped because the NSS-to-host queue is full. */ + uint32_t rx_csum_drops; + /**< Packets dropped because of a checksum mismatch. */ + uint32_t rx_malformed; + /**< Packets dropped because of a malformed packet. */ + uint32_t rx_mem_failure_drops; + /**< Packets dropped because of a memory failure. */ + uint32_t rx_frag_gap_drops; + /**< Packets dropped because of a non-sequential fragment offset. */ + + /* + * Tx/encap stats + */ + uint32_t tx_segments; /**< Number of segments or fragments. */ + uint32_t tx_queue_full_drops; /**< Packets dropped because of a full queue. */ + uint32_t tx_mem_failure_drops; + /**< Packets dropped because of a memory failure. */ + uint32_t tx_dropped_sg_ref; + /**< Packets dropped because of a scatter-gather reference. */ + uint32_t tx_dropped_ver_mis; + /**< Packets dropped because of a version mismatch. */ + uint32_t Reserved; + /**< Reserved. */ + uint32_t tx_dropped_hroom; + /**< Packets dropped because of insufficent headroom. */ + uint32_t tx_dropped_dtls; + /**< Packets dropped because of a DTLS packet. */ + uint32_t tx_dropped_nwireless; + /**< Packets dropped because the nwireless field information is wrong. */ + + uint32_t fast_mem; + /**< Set to 1 when tunnel is operating in fast memory. */ +}; + +/** + * nss_capwap_ip + * IP versions. + */ +struct nss_capwap_ip { + /** + * Union of IPv4 and IPv6 IP addresses. + */ + union { + uint32_t ipv4; /**< IPv4 address. */ + uint32_t ipv6[4]; /**< IPv6 address. */ + } ip; /**< Union of IPv4 and IPv6 IP addresses. */ +}; + +/** + * nss_capwap_encap_rule + * Encapsulation information for a CAPWAP tunnel. + */ +struct nss_capwap_encap_rule { + struct nss_capwap_ip src_ip; /**< Source IP. */ + uint32_t src_port; /**< Source port. */ + struct nss_capwap_ip dest_ip; /**< Destination IP. */ + uint32_t dest_port; /**< Destination port. */ + uint32_t path_mtu; /**< MTU on the path. */ +}; + +/** + * nss_capwap_decap_rule + * Decapsulation information for a CAPWAP tunnel. + */ +struct nss_capwap_decap_rule { + uint32_t reassembly_timeout; /**< Timeout in milliseconds. */ + uint32_t max_fragments; /**< Maximum number of fragments expected. */ + uint32_t max_buffer_size; /**< Maximum size of the payload buffer. */ +}; + +/** + * nss_capwap_rule_msg + * CAPWAP rule message. + * + * The same rule structure applies for both encapsulation and decapsulation + * in a tunnel. + */ +struct nss_capwap_rule_msg { + struct nss_capwap_encap_rule encap; /**< Encapsulation portion of the rule. */ + struct nss_capwap_decap_rule decap; /**< Decapsulation portion of the rule. */ + uint32_t stats_timer; /**< Statistics interval timer in milliseconds. */ + + /** + * Core to choose for receiving packets. + * + * Set to -1 for the NSS firmware to decide. + */ + int8_t rps; + + uint8_t type_flags; /**< VLAN or PPPOE is configured. */ + uint8_t l3_proto; + /**< Prototype is NSS_CAPWAP_TUNNEL_IPV4 or NSS_CAPWAP_TUNNEL_IPV6. */ + uint8_t which_udp; /**< Tunnel uses the UDP or UDPLite protocol. */ + uint32_t mtu_adjust; /**< MTU is reserved for a DTLS process. */ + uint32_t gmac_ifnum; /**< Outgoing physical interface. */ + uint32_t enabled_features; + /**< Tunnel enabled features bit flag. */ + + /* + * Parameters for each features + */ + uint32_t dtls_inner_if_num; /**< Interface number of the associated DTLS node. */ + uint8_t bssid[ETH_ALEN]; /**< BSSID value. */ + uint16_t outer_sgt_value; + /**< Security Group Tag value configured for this tunnel. */ +}; + +/** + * nss_capwap_version_msg + * Message to set the CAPWAP version. + */ +struct nss_capwap_version_msg { + uint32_t version; /**< CAPWAP protocol version. */ +}; + +/** + * nss_capwap_path_mtu_msg + * Message information for the path MTU. + */ +struct nss_capwap_path_mtu_msg { + uint32_t path_mtu; /**< Path MTU value between the controller and access point. */ +}; + +/** + * nss_capwap_dtls_msg + * DTLS message information. + */ +struct nss_capwap_dtls_msg { + uint32_t enable; /**< Enable or disable DTLS. */ + uint32_t dtls_inner_if_num; /**< Interface number of the associated DTLS. */ + uint32_t mtu_adjust; /**< MTU adjustment reported by the DTLS node. */ + uint32_t reserved; /**< Reserved field for future use. */ +}; + +/** + * nss_capwap_flow_rule_msg + * CAPWAP flow rule message structure. + */ +struct nss_capwap_flow_rule_msg { + /* + * 5-tuple info. + */ + uint16_t ip_version; /**< IP version. */ + uint16_t protocol; /**< Layer 4 protocol. */ + uint16_t src_port; /**< Source port. */ + uint16_t dst_port; /**< Destination port. */ + uint32_t src_ip[4]; /**< Source IP address. */ + uint32_t dst_ip[4]; /**< Destination IP address. */ + + /* + * Flow attributes. + */ + uint32_t flow_id; /**< Flow identification. */ +}; + +/** + * nss_capwap_enable_tunnel_msg + * Structure to update sibling interface number. + */ +struct nss_capwap_enable_tunnel_msg { + uint32_t sibling_if_num; /**< Sibling interface Number. */ +}; + +/** + * nss_capwap_msg + * Data for sending and receiving CAPWAP messages. + */ +struct nss_capwap_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a CAPWAP common message. + */ + union { + struct nss_capwap_rule_msg rule; + /**< Rule information. */ + struct nss_capwap_path_mtu_msg mtu; + /**< New MTU information. */ + struct nss_capwap_stats_msg stats; + /**< CAPWAP statistics. */ + struct nss_capwap_version_msg version; + /**< CAPWAP version to use. */ + struct nss_capwap_dtls_msg dtls; + /**< DTLS configuration. */ + struct nss_capwap_flow_rule_msg flow_rule_add; + /**< Flow rule add message. */ + struct nss_capwap_flow_rule_msg flow_rule_del; + /**< Flow rule delete message. */ + struct nss_capwap_enable_tunnel_msg enable_tunnel; + /**< Enable tunnel message. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_capwap_pn_stats + * Pnode statistics (64-bit version). + */ +struct nss_capwap_pn_stats { + uint64_t rx_packets; /**< Number of packets received. */ + uint64_t rx_bytes; /**< Number of bytes received. */ + uint64_t rx_dropped; /**< Number of dropped Rx packets. */ + uint64_t tx_packets; /**< Number of packets transmitted. */ + uint64_t tx_bytes; /**< Number of bytes transmitted. */ +}; + +/** + * nss_capwap_tunnel_stats + * Per-tunnel statistics seen by the HLOS. + */ +struct nss_capwap_tunnel_stats { + struct nss_capwap_pn_stats pnode_stats; /**< Common firmware statistics. */ + uint64_t dtls_pkts; /**< Number of DTLS packets flowing through. */ + + /* + * Rx/decap stats + */ + uint64_t rx_dup_frag; /**< Number of duplicate fragments. */ + uint64_t rx_segments; /**< Number of segments or fragments. */ + + /** + * Packets dropped because they are larger than the payload size. + */ + uint64_t rx_oversize_drops; + + uint64_t rx_frag_timeout_drops; + /**< Packets dropped because of a reassembly timeout. */ + uint64_t rx_n2h_drops; + /**< Packets dropped because of error in processing the packet. */ + uint64_t rx_n2h_queue_full_drops; + /**< Packets dropped because the NSS-to-host queue is full. */ + uint64_t rx_csum_drops; + /**< Packets dropped because of a checksum mismatch. */ + uint64_t rx_malformed; + /**< Packets dropped because of a malformed packet. */ + uint64_t rx_mem_failure_drops; + /**< Packets dropped because of a memory failure. */ + uint64_t rx_frag_gap_drops; + /**< Packets dropped because of a non-sequential fragment offset. */ + + /* + * Tx/encap stats + */ + uint64_t tx_dropped_inner; /**id + 1) << NSS_CORE_ID_SHIFT)) + +/** + * Macro to obtain a core local interface number. + */ +#define NSS_INTERFACE_NUM_GET(interface) ((interface) & 0xffffff) + +/** + * Macro to obtain an interface core number. + */ +#define NSS_INTERFACE_NUM_GET_COREID(interface) ((interface >> NSS_CORE_ID_SHIFT) & 0xff) + +/* + * Common enumerations. + */ + +/** + * nss_tx_status_t + * Tx command failure results. + * + * Types starting with NSS_TX_FAILURE_SYNC_ are only used by synchronous messages. + */ +typedef enum { + NSS_TX_SUCCESS = 0, + NSS_TX_FAILURE, + NSS_TX_FAILURE_QUEUE, + NSS_TX_FAILURE_NOT_READY, + NSS_TX_FAILURE_TOO_LARGE, + NSS_TX_FAILURE_TOO_SHORT, + NSS_TX_FAILURE_NOT_SUPPORTED, + NSS_TX_FAILURE_BAD_PARAM, + NSS_TX_FAILURE_NOT_ENABLED, + NSS_TX_FAILURE_SYNC_BAD_PARAM, + NSS_TX_FAILURE_SYNC_TIMEOUT, + NSS_TX_FAILURE_SYNC_FW_ERR, + NSS_TX_FAILURE_MAX, +} nss_tx_status_t; + +/** + * nss_state_t + * Initialization states. + */ +typedef enum { + NSS_STATE_UNINITIALIZED = 0, + NSS_STATE_INITIALIZED +} nss_state_t; + +/** + * nss_core_id_t + * NSS core IDs. + */ +typedef enum { + NSS_CORE_0 = 0, + NSS_CORE_1, + NSS_CORE_MAX +} nss_core_id_t; + +/** + * nss_cb_register_status_t + * Callback registration states. + */ +typedef enum { + NSS_CB_REGISTER_SUCCESS = 0, + NSS_CB_REGISTER_FAILED, +} nss_cb_register_status_t; + +/** + * nss_cb_unregister_status_t + * Callback deregistration states. + */ +typedef enum { + NSS_CB_UNREGISTER_SUCCESS = 0, + NSS_CB_UNREGISTER_FAILED, +} nss_cb_unregister_status_t; + +/** + * nss_cmn_response + * Responses for a common message. + */ +enum nss_cmn_response { + NSS_CMN_RESPONSE_ACK, + NSS_CMN_RESPONSE_EVERSION, + NSS_CMN_RESPONSE_EINTERFACE, + NSS_CMN_RESPONSE_ELENGTH, + NSS_CMN_RESPONSE_EMSG, + NSS_CMN_RESPONSE_NOTIFY, + NSS_CMN_RESPONSE_LAST +}; + +/** + * Array of log messages for common NSS responses. + */ +extern int8_t *nss_cmn_response_str[NSS_CMN_RESPONSE_LAST]; + +/** + * nss_cmn_msg + * Common message information. + */ +struct nss_cmn_msg { + uint16_t version; /**< Version ID for the main message format. */ + uint16_t len; /**< Length of the message, excluding the header. */ + uint32_t interface; /**< Primary key for all messages. */ + enum nss_cmn_response response; + /**< Primary response. All messages must specify one of these responses. */ + + uint32_t type; /**< Decentralized request number used to match response numbers. */ + uint32_t error; /**< Decentralized specific error message (response == EMSG). */ + + /** + * Padding used to start the callback from a 64-bit boundary. This field can be reused. + */ + uint32_t reserved; + + nss_ptr_t cb; /**< Contains the callback pointer. */ +#ifndef __LP64__ + uint32_t padding1; /**< Padding used to fit 64 bits. Do not reuse. */ +#endif + nss_ptr_t app_data; /**< Contains the application data. */ +#ifndef __LP64__ + uint32_t padding2; /**< Padding used to fit 64 bits. Do not reuse. */ +#endif +}; + +/** + * nss_cmn_node_stats + * Common per-node statistics. + */ +struct nss_cmn_node_stats { + uint32_t rx_packets; /**< Number of packets received. */ + uint32_t rx_bytes; /**< Number of bytes received. */ + uint32_t tx_packets; /**< Number of packets transmitted. */ + uint32_t tx_bytes; /**< Number of bytes transmitted. */ + uint32_t rx_dropped[NSS_MAX_NUM_PRI]; /**< Packets dropped on receive due to queue full. */ +}; + +/** + * nss_cmn_get_msg_len + * Gets the message length of a host-to-NSS message. + * + * @datatypes + * nss_cmn_get_msg_len + * + * @param[in] ncm Pointer to the common message. + * + * @return + * Length of the message specified in the argument to this function. + */ +static inline uint32_t nss_cmn_get_msg_len(struct nss_cmn_msg *ncm) +{ + return ncm->len + sizeof(struct nss_cmn_msg); +} + +#ifdef __KERNEL__ /* only for kernel to use. */ + +/** + * nss_cmn_msg_init + * Initializes the common area of an asynchronous host-to-NSS message. + * + * @datatypes + * nss_cmn_msg + * + * @param[in,out] ncm Pointer to the common message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the callback function. + * @param[in] app_data Pointer to the application context for this message. + * + * @return + * None. + */ +extern void nss_cmn_msg_init(struct nss_cmn_msg *ncm, uint32_t if_num, uint32_t type, uint32_t len, + void *cb, void *app_data); + +/** + * nss_cmn_msg_sync_init + * Initializes the common message of a synchronous host-to-NSS message. + * + * @datatypes + * nss_cmn_msg + * + * @param[in,out] ncm Pointer to the common message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * + * @return + * None. + */ +extern void nss_cmn_msg_sync_init(struct nss_cmn_msg *ncm, uint32_t if_num, uint32_t type, uint32_t len); + +/** + * nss_cmn_get_interface_number + * Gets the interface number. + * + * @datatypes + * nss_ctx_instance \n + * net_device + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] dev Pointer to the OS network device pointer. + * + * @return + * Interface number. + */ +extern int32_t nss_cmn_get_interface_number(struct nss_ctx_instance *nss_ctx, struct net_device *dev); + +/** + * nss_cmn_get_interface_number_by_dev + * Gets the interface number of a device. + * + * @datatypes + * net_device + * + * @param[in] dev Pointer to the OS network device pointer. + * + * @return + * Interface number, or -1 on failure. + */ +extern int32_t nss_cmn_get_interface_number_by_dev(struct net_device *dev); + +/** + * nss_cmn_get_interface_number_by_dev_and_type + * Gets the interface number by a device and its type. + * + * @datatypes + * net_device + * + * @param[in] dev Pointer to the OS network device pointer. + * @param[in] type Type of this interface. + * + * @return + * Interface number, or -1 on failure. + */ +extern int32_t nss_cmn_get_interface_number_by_dev_and_type(struct net_device *dev, uint32_t type); + +/** + * nss_cmn_interface_is_redirect + * Determines if the interface number is a redirect interface. + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] interface_num NSS interface number. + * + * @return + * TRUE if the number is a redirect interface. Otherwise FALSE. + */ +extern bool nss_cmn_interface_is_redirect(struct nss_ctx_instance *nss_ctx, int32_t interface_num); + +/** + * nss_cmn_append_core_id + * Append core ID on NSS interface number. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * + * @return + * Interface number with core ID. + */ +extern int nss_cmn_append_core_id(struct nss_ctx_instance *nss_ctx, int if_num); + +/** + * nss_cmn_get_interface_dev + * Gets an interface device pointer. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * + * @return + * Interface device pointer. + */ +extern struct net_device *nss_cmn_get_interface_dev(struct nss_ctx_instance *nss_ctx, uint32_t if_num); + +/** + * nss_cmn_get_state + * Obtains the NSS state. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * + * @return + * NSS state that indicates whether the NSS core is initialized. For possible values, see nss_state_t. + */ +extern nss_state_t nss_cmn_get_state(struct nss_ctx_instance *nss_ctx); + +/** + * Callback function for queue decongestion messages. + * + * @param[in] app_data Pointer to the application context for this message. + */ +typedef void (*nss_cmn_queue_decongestion_callback_t)(void *app_data); + +/** + * nss_cmn_register_queue_decongestion + * Registers a queue for a decongestion event. + * + * The callback function is called with the spinlock held. The function should avoid deadlocks + * caused by attempting to acquire multiple spinlocks. + + * @datatypes + * nss_ctx_instance \n + * nss_cmn_queue_decongestion_callback_t + * + * @param[in,out] nss_ctx Pointer to the NSS context. + * @param[in] event_callback Callback for the message. + * @param[in] app_data Pointer to the application context to be returned in the + * callback. + * + * @return + * #NSS_CB_REGISTER_SUCCESS if registration is successful. + * @par + * Otherwise, #NSS_CB_REGISTER_FAILED. + */ +extern nss_cb_register_status_t nss_cmn_register_queue_decongestion(struct nss_ctx_instance *nss_ctx, nss_cmn_queue_decongestion_callback_t event_callback, void *app_data); + +/** + * nss_cmn_unregister_queue_decongestion + * Deregisters a queue from receiving a decongestion event. + * + * @datatypes + * nss_ctx_instance \n + * nss_cmn_queue_decongestion_callback_t + * + * @param[in,out] nss_ctx Pointer to the NSS context. + * @param[in] event_callback Callback for the message. + * + * @return + * #NSS_CB_REGISTER_SUCCESS if registration is successful. + * @par + * Otherwise, #NSS_CB_REGISTER_FAILED. + * + * @dependencies + * The callback function must have been previously registered. + */ +extern nss_cb_unregister_status_t nss_cmn_unregister_queue_decongestion(struct nss_ctx_instance *nss_ctx, nss_cmn_queue_decongestion_callback_t event_callback); + +/** + * Callback function for packets with service code. + * + * @param[in] app_data Pointer to the application context for this message. + * @param[in] nbuf Pointer to the socket buffer. + */ +typedef void (*nss_cmn_service_code_callback_t)(void *app_data, struct sk_buff *nbuf); + +/** + * nss_cmn_register_service_code + * Registers a callback for a service code. + * + * @datatypes + * nss_ctx_instance \n + * nss_cmn_service_code_callback_t + * + * @param[in,out] nss_ctx Pointer to the NSS context. + * @param[in] cb Callback for the message. + * @param[in] service_code Service code found attached to the packet. + * @param[in] app_data Pointer to the application context to be returned in the + * callback. + * + * @return + * #NSS_CB_REGISTER_SUCCESS if registration is successful. + * @par + * Otherwise, #NSS_CB_REGISTER_FAILED. + */ +extern nss_cb_register_status_t nss_cmn_register_service_code(struct nss_ctx_instance *nss_ctx, nss_cmn_service_code_callback_t cb, uint8_t service_code, void *app_data); + +/** + * nss_cmn_unregister_service_code + * Deregisters a callback for the given service code. + * + * @datatypes + * nss_ctx_instance \n + * nss_cmn_service_code_callback_t + * + * @param[in,out] nss_ctx Pointer to the NSS context. + * @param[in] cb Callback for the message. + * @param[in] service_code Service code found attached to the packet. + * + * @return + * #NSS_CB_REGISTER_SUCCESS if registration is successful. + * @par + * Otherwise, #NSS_CB_REGISTER_FAILED. + * + * @dependencies + * The callback function must have been previously registered. + */ +extern nss_cb_unregister_status_t nss_cmn_unregister_service_code(struct nss_ctx_instance *nss_ctx, nss_cmn_service_code_callback_t cb, uint8_t service_code); + +/** + * nss_cmn_get_nss_enabled + * Checks whether the NSS mode is supported on the platform. + * + * @return + * TRUE if NSS is supported. \n + * Otherwise, FALSE. + */ +extern bool nss_cmn_get_nss_enabled(void); + +/** + * nss_cmn_rx_dropped_sum + * Sums dropped packet count of all NSS pnode queues. + * + * @datatypes + * nss_cmn_node_stats \n + * + * @param[in] node_stats Pointer to node statistics. + * + * @return + * Total dropped packets count. + */ +extern uint32_t nss_cmn_rx_dropped_sum(struct nss_cmn_node_stats *node_stats); + +#endif /* __KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_CMN_MSG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_crypto.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_crypto.h new file mode 100644 index 000000000..5ef514d6e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_crypto.h @@ -0,0 +1,392 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_crypto.h + * NSS Crypto interface definitions. + */ + +#ifndef __NSS_CRYPTO_H +#define __NSS_CRYPTO_H + +/** + * @addtogroup nss_crypto_subsystem + * @{ + */ + +#define NSS_CRYPTO_MAX_IDXS 64 /**< Maximum number of supported sessions. */ +#define NSS_CRYPTO_MAX_ENGINES 4 /**< Maximum number of engines available. */ +#define NSS_CRYPTO_BAM_PP 2 /**< Bus Access Manager pipe pairs. */ + +/** + * nss_crypto_hash + * Hash sizes supported by the hardware. + */ +enum nss_crypto_hash { + NSS_CRYPTO_HASH_SHA96 = 12, + NSS_CRYPTO_HASH_SHA128 = 16, + NSS_CRYPTO_HASH_SHA160 = 20, + NSS_CRYPTO_HASH_SHA256 = 32 +}; + +/** + * nss_crypto_cipher + * Cipher algorithms. + */ +enum nss_crypto_cipher { + NSS_CRYPTO_CIPHER_NONE = 0, + NSS_CRYPTO_CIPHER_AES_CBC, /**< AES, and CBC for 128-bit and 256-bit key sizes. */ + NSS_CRYPTO_CIPHER_DES, /**< DES, and CBC for 64-bit key size. */ + NSS_CRYPTO_CIPHER_NULL, /**< NULL and CBC. */ + NSS_CRYPTO_CIPHER_AES_CTR, /**< AES, and CTR for 128-bit and 256-bit key sizes. */ + NSS_CRYPTO_CIPHER_MAX +}; + +/** + * nss_crypto_auth + * Authentication algorithms. + */ +enum nss_crypto_auth { + NSS_CRYPTO_AUTH_NONE = 0, + NSS_CRYPTO_AUTH_SHA1_HMAC, + NSS_CRYPTO_AUTH_SHA256_HMAC, + NSS_CRYPTO_AUTH_NULL, + NSS_CRYPTO_AUTH_MAX +}; + +/** + * nss_crypto_msg_type + * Synchronization types. + */ +enum nss_crypto_msg_type { + NSS_CRYPTO_MSG_TYPE_NONE = 0, + NSS_CRYPTO_MSG_TYPE_OPEN_ENG = 1, + NSS_CRYPTO_MSG_TYPE_CLOSE_ENG = 2, + NSS_CRYPTO_MSG_TYPE_UPDATE_SESSION = 3, + NSS_CRYPTO_MSG_TYPE_STATS = 4, + NSS_CRYPTO_MSG_TYPE_MAX +}; + +/** + * nss_crypto_msg_error + * Response types. + */ +enum nss_crypto_msg_error { + NSS_CRYPTO_MSG_ERROR_NONE = 0, + NSS_CRYPTO_MSG_ERROR_INVAL_ENG = 1, + NSS_CRYPTO_MSG_ERROR_UNSUPP_OP = 2, + NSS_CRYPTO_MSG_ERROR_INVAL_OP = 3, + NSS_CRYPTO_MSG_ERROR_INVAL_IDX_RANGE = 4, + NSS_CRYPTO_MSG_ERROR_IDX_ALLOC_FAIL = 5, + NSS_CRYPTO_MSG_ERROR_MAX +}; + +/** + * nss_crypto_session_state + * Session states. + */ +enum nss_crypto_session_state { + NSS_CRYPTO_SESSION_STATE_NONE = 0, + NSS_CRYPTO_SESSION_STATE_ACTIVE = 1, + NSS_CRYPTO_SESSION_STATE_FREE = 2 +}; + +/** + * nss_crypto_buf_origin + * Origins of the crypto session. + */ +enum nss_crypto_buf_origin { + NSS_CRYPTO_BUF_ORIGIN_HOST = 0x001, + NSS_CRYPTO_BUF_ORIGIN_NSS = 0x0002, +}; + +/** + * nss_crypto_idx + * Crypto session index information. + */ +struct nss_crypto_idx { + uint16_t pp_num; /**< Pipe pair index. */ + uint16_t cmd_len; /**< Command block length to program. */ + uint32_t cblk_paddr; /**< Physical address of the command block. */ +}; + +/** + * nss_crypto_config_eng + * Engine configuration information for opening the engine from the host. + * + * This structure is called to initialize the crypto NSS engine-specific data + * structures. Ideally, the host can send a single probe for all engines, but + * the current implementation relies on probes per engine. + */ +struct nss_crypto_config_eng { + uint32_t eng_id; /**< Engine number to open. */ + uint32_t bam_pbase; /**< BAM base address (physical). */ + uint32_t crypto_pbase; /**< Crypto base address (physical). */ + uint32_t desc_paddr[NSS_CRYPTO_BAM_PP]; + /**< Pipe description address (physical). */ + struct nss_crypto_idx idx[NSS_CRYPTO_MAX_IDXS]; + /**< Allocated session indices. */ +}; + +/** + * nss_crypto_config_session + * Session-related state configuration. + */ +struct nss_crypto_config_session { + uint32_t idx; /**< Session index on which the state is reset. */ + uint32_t state; /**< Index state of the session. */ + uint32_t iv_len; /**< Length of the initialization vector. */ +}; + +/** + * nss_crypto_stats + * Crypto statistics. + */ +struct nss_crypto_stats { + uint32_t queued; /**< Number of frames waiting to be processed. */ + uint32_t completed; /**< Number of frames processed. */ + uint32_t dropped; /**< Number of frames dropped or not processed. */ +}; + +/** + * nss_crypto_sync_stats + * Statistics synchronized to the host. + */ +struct nss_crypto_sync_stats { + struct nss_crypto_stats eng_stats[NSS_CRYPTO_MAX_ENGINES]; + /**< Tx or Rx statistics captured per crypto engine. */ + struct nss_crypto_stats idx_stats[NSS_CRYPTO_MAX_IDXS]; + /**< Tx or Rx statistics captured per session. */ + struct nss_crypto_stats total; + /**< Total statistics captured in and out of the engine. */ +}; + +/** + * nss_crypto_msg + * Data for sending and receiving crypto messages. + */ +struct nss_crypto_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a crypto message. + */ + union { + struct nss_crypto_config_eng eng; + /**< Opens an engine. */ + struct nss_crypto_config_session session; + /**< Resets the statistics. */ + struct nss_crypto_sync_stats stats; + /**< Synchronized statistics for crypto. */ + } msg; /**< Message payload. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ + +/** + * Message notification callback. + * + * @datatypes + * nss_crypto_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_crypto_msg_callback_t)(void *app_data, struct nss_crypto_msg *msg); + +/** + * Data callback. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_crypto_buf_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Power management event callback. + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] turbo Turbo mode event. + * @param[in] auto_scale Specifies the auto scaling of the NSS clock frequency. + * + * @return + * TRUE if crypto is scaled to turbo. + */ +typedef bool (*nss_crypto_pm_event_callback_t)(void *app_data, bool turbo, bool auto_scale); + +/** + * nss_crypto_tx_msg + * Sends a crypto message. + * + * @datatypes + * nss_ctx_instance \n + * nss_crypto_msg + * + * @param[in] nss_ctx Pointer to the NSS context of the HLOS driver. + * @param[in] msg Pointer to the message data. + * + * @return + * None. + */ +extern nss_tx_status_t nss_crypto_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_crypto_msg *msg); + +/** + * nss_crypto_tx_buf + * Sends a crypto data packet. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context of the HLOS driver + * @param[in] if_num NSS interface number. + * @param[in] skb Pointer to the data socket buffer. + * + * @return + * None. + */ +extern nss_tx_status_t nss_crypto_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb); + +/** + * nss_crypto_notify_register + * Registers an event callback handler with the HLOS driver. + * + * @datatypes + * nss_crypto_msg_callback_t + * + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern struct nss_ctx_instance *nss_crypto_notify_register(nss_crypto_msg_callback_t cb, void *app_data); + +/** + * nss_crypto_data_register + * Registers a data callback handler with the HLOS driver. + * + * @datatypes + * nss_crypto_buf_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] cb Callback function for the data. + * @param[in] netdev Pointer to the network device. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * None. + */ +extern struct nss_ctx_instance *nss_crypto_data_register(uint32_t if_num, nss_crypto_buf_callback_t cb, + struct net_device *netdev, uint32_t features); + +/** + * nss_crypto_pm_notify_register + * Registers a power management event callback handler with the HLOS driver. + * + * @datatypes + * nss_crypto_pm_event_callback_t + * + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_crypto_pm_notify_register(nss_crypto_pm_event_callback_t cb, void *app_data); + +/** + * nss_crypto_notify_unregister + * Deregisters an event callback handler notifier from the HLOS driver. + * + * @datatypes + * nss_ctx_instance + * + * @param[in,out] ctx Pointer to the context of the HLOS driver. + * + * @return + * None. + * + * @dependencies + * The event callback handler must have been previously registered. + */ +extern void nss_crypto_notify_unregister(struct nss_ctx_instance *ctx); + +/** + * nss_crypto_data_unregister + * Deregisters a data callback handler from the HLOS driver. + * + * @datatypes + * nss_ctx_instance + * + * @param[in,out] ctx Pointer to the context of the HLOS driver. + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The callback handler must have been previously registered. + */ +extern void nss_crypto_data_unregister(struct nss_ctx_instance *ctx, uint32_t if_num); + +/** + * nss_crypto_pm_notify_unregister + * Deregisters a power management event callback handler from the HLOS driver. + * + * @return + * None. + * + * @dependencies + * The callback handler must have been previously registered. + */ +extern void nss_crypto_pm_notify_unregister(void); + +/** + * nss_crypto_msg_init + * Initializes a crypto-specific message. + * + * @datatypes + * nss_crypto_msg \n + * nss_crypto_msg_callback_t + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_crypto_msg_init(struct nss_crypto_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, + nss_crypto_msg_callback_t cb, void *app_data); + +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_CRYPTO_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_crypto_cmn.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_crypto_cmn.h new file mode 100644 index 000000000..61b97f1a8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_crypto_cmn.h @@ -0,0 +1,460 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_crypto_cmn.h + * NSS Crypto common interface definitions. + */ +#ifndef __NSS_CRYPTO_CMN_H +#define __NSS_CRYPTO_CMN_H + +/** + * @addtogroup nss_crypto_subsystem + * @{ + */ + +/* + * Context message related array sizes + */ +#define NSS_CRYPTO_CMN_CTX_SPARE 4 /**< Context spare words size. */ +#define NSS_CRYPTO_CMN_VER_WORDS 4 /**< Firmware version words size.*/ +#define NSS_CRYPTO_CIPHER_KEYLEN_MAX 32 /**< Maximum cipher keysize. */ +#define NSS_CRYPTO_AUTH_KEYLEN_MAX 128 /**< Maximum authorization keysize. */ +#define NSS_CRYPTO_NONCE_SIZE_MAX 4 /**< Maximum authorization keysize. */ + +/** + * nss_crypto_cmn_algo + * List of crypto algorithms supported. + */ +enum nss_crypto_cmn_algo { + NSS_CRYPTO_CMN_ALGO_NULL, /**< NULL transform. */ + NSS_CRYPTO_CMN_ALGO_3DES_CBC, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES128_CBC, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES192_CBC, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES256_CBC, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES128_CTR, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES192_CTR, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES256_CTR, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES128_ECB, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES192_ECB, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES256_ECB, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES128_GCM, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES192_GCM, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_AES256_GCM, /**< Asynchronous block cipher. */ + NSS_CRYPTO_CMN_ALGO_MD5_HASH, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_SHA160_HASH, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_SHA224_HASH, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_SHA256_HASH, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_SHA384_HASH, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_SHA512_HASH, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_MD5_HMAC, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_SHA160_HMAC, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_SHA224_HMAC, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_SHA256_HMAC, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_SHA384_HMAC, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_SHA512_HMAC, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_AES128_GMAC, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_AES192_GMAC, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_AES256_GMAC, /**< Asynchronous digest. */ + NSS_CRYPTO_CMN_ALGO_AES128_GCM_GMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES128_CBC_MD5_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES128_CBC_SHA160_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES128_CBC_SHA256_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES128_CBC_SHA384_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES128_CBC_SHA512_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_GCM_GMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_CBC_MD5_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_CBC_SHA160_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_CBC_SHA256_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_CBC_SHA384_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_CBC_SHA512_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_GCM_GMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_CBC_MD5_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_CBC_SHA160_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_CBC_SHA256_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_CBC_SHA384_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_CBC_SHA512_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES128_CTR_MD5_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES128_CTR_SHA160_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES128_CTR_SHA256_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES128_CTR_SHA384_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES128_CTR_SHA512_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_CTR_MD5_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_CTR_SHA160_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_CTR_SHA256_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_CTR_SHA384_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES192_CTR_SHA512_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_CTR_MD5_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_CTR_SHA160_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_CTR_SHA256_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_CTR_SHA384_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_AES256_CTR_SHA512_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_3DES_CBC_MD5_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_3DES_CBC_SHA160_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_3DES_CBC_SHA256_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_3DES_CBC_SHA384_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_3DES_CBC_SHA512_HMAC, /**< AEAD transform. */ + NSS_CRYPTO_CMN_ALGO_MAX +}; + +/** + * nss_crypto_cmn_resp_error + * Response errors from crypto hardware + */ +enum nss_crypto_cmn_resp_error { + NSS_CRYPTO_CMN_RESP_ERROR_NONE = 0, /**< No error. */ + NSS_CRYPTO_CMN_RESP_ERROR_HDR_VERSION, /**< Header version mismatch. */ + NSS_CRYPTO_CMN_RESP_ERROR_CTX_RANGE, /**< Crypto index out-of-range. */ + NSS_CRYPTO_CMN_RESP_ERROR_CTX_NOUSE, /**< Crypto index is freed. */ + NSS_CRYPTO_CMN_RESP_ERROR_DATA_EMPTY, /**< Crypto data is empty. */ + NSS_CRYPTO_CMN_RESP_ERROR_DATA_LEN, /**< Crypto data length. */ + NSS_CRYPTO_CMN_RESP_ERROR_DATA_TIMEOUT, /**< Data timeout from hardware. */ + NSS_CRYPTO_CMN_RESP_ERROR_CIPHER_ALGO, /**< Cipher algorithm is not supported. */ + NSS_CRYPTO_CMN_RESP_ERROR_CIPHER_MODE, /**< Cipher mode is not supported. */ + NSS_CRYPTO_CMN_RESP_ERROR_CIPHER_BLK_LEN, /**< Cipher block length is not aligned. */ + NSS_CRYPTO_CMN_RESP_ERROR_HASH_CHECK, /**< Hash check failed. */ + NSS_CRYPTO_CMN_RESP_ERROR_HASH_NOSPACE, /**< No space to write hash. */ + NSS_CRYPTO_CMN_RESP_ERROR_HW_STATUS, /**< More errors in hardware status. */ + NSS_CRYPTO_CMN_RESP_ERROR_MAX +}; + +/** + * nss_crypto_cmn_msg_type + * Message types supported. + */ +enum nss_crypto_cmn_msg_type { + NSS_CRYPTO_CMN_MSG_TYPE_NONE = 0, /**< Invalid message. */ + NSS_CRYPTO_CMN_MSG_TYPE_SETUP_NODE, /**< Initialize node. */ + NSS_CRYPTO_CMN_MSG_TYPE_SETUP_ENG, /**< Initialize engine. */ + NSS_CRYPTO_CMN_MSG_TYPE_SETUP_DMA, /**< Initialize DMA pair. */ + NSS_CRYPTO_CMN_MSG_TYPE_SETUP_CTX, /**< Update context information. */ + NSS_CRYPTO_CMN_MSG_TYPE_CLEAR_CTX, /**< Clear context information. */ + NSS_CRYPTO_CMN_MSG_TYPE_VERIFY_CTX, /**< Verify if context is active. */ + NSS_CRYPTO_CMN_MSG_TYPE_SYNC_NODE_STATS, /**< Synchronous node statistics. */ + NSS_CRYPTO_CMN_MSG_TYPE_SYNC_ENG_STATS, /**< Synchronous engine statistics. */ + NSS_CRYPTO_CMN_MSG_TYPE_SYNC_CTX_STATS, /**< Synchronous context statistics. */ + NSS_CRYPTO_CMN_MSG_TYPE_MAX +}; + +/** + * nss_crypto_cmn_msg_error + * Message error types supported. + */ +enum nss_crypto_cmn_msg_error { + NSS_CRYPTO_CMN_MSG_ERROR_NONE = 0, + NSS_CRYPTO_CMN_MSG_ERROR_HDR_VERSION_NOSUPP, /**< Common header version not supported. */ + NSS_CRYPTO_CMN_MSG_ERROR_NODE_CTX_RANGE, /**< Context index out-of-range for node. */ + NSS_CRYPTO_CMN_MSG_ERROR_DMA_MASK, /**< DMA mask is out-of-range. */ + NSS_CRYPTO_CMN_MSG_ERROR_DMA_POW2, /**< DMA count is not a power-of-two. */ + NSS_CRYPTO_CMN_MSG_ERROR_DMA_MAX_TOKEN, /**< DMA count exceeds token count. */ + NSS_CRYPTO_CMN_MSG_ERROR_DMA_TOKEN_ALLOC, /**< Failed to allocate token. */ + NSS_CRYPTO_CMN_MSG_ERROR_CTX_RANGE, /**< Context index out-of-range. */ + NSS_CRYPTO_CMN_MSG_ERROR_CTX_INUSE, /**< Context has references. */ + NSS_CRYPTO_CMN_MSG_ERROR_CTX_WORDS, /**< Context size is bad. */ + NSS_CRYPTO_CMN_MSG_ERROR_CTX_ALGO, /**< Context algorithm is bad. */ + NSS_CRYPTO_CMN_MSG_ERROR_CTX_ALLOC, /**< Context alloc failed. */ + NSS_CRYPTO_CMN_MSG_ERROR_CTX_NOUSE, /**< Context has no references. */ + NSS_CRYPTO_CMN_MSG_ERROR_CTX_FLAGS, /**< Invalid context flags. */ + NSS_CRYPTO_CMN_MSG_ERROR_MAX +}; + +/** + * nss_crypto_cmn_ctx_flags + * Context message specific flags. + */ +enum nss_crypto_cmn_ctx_flags { + NSS_CRYPTO_CMN_CTX_FLAGS_NONE = 0, /**< Invalid flags. */ + NSS_CRYPTO_CMN_CTX_FLAGS_SEC_OFFSET = 0x01, /**< Secure offset is valid. */ + NSS_CRYPTO_CMN_CTX_FLAGS_SPARE0 = 0x02, /**< Spare word-0 valid. */ + NSS_CRYPTO_CMN_CTX_FLAGS_SPARE1 = 0x04, /**< Spare word-1 valid. */ + NSS_CRYPTO_CMN_CTX_FLAGS_SPARE2 = 0x08, /**< Spare word-2 valid. */ + NSS_CRYPTO_CMN_CTX_FLAGS_SPARE3 = 0x10, /**< Spare word-3 valid. */ + NSS_CRYPTO_CMN_CTX_FLAGS_MAX +}; + +/** + * nss_crypto_cmn_node + * Node message for setting up the crypto node. + * + * Note: Upon boot this is the first message sent by Host to NSS crypto. + * - It notifies the maximum number of crypto context. + * - It notifies the maximum number of DMA rings. + * - It returns the maximum size of crypto context record. + */ +struct nss_crypto_cmn_node { + uint32_t max_dma_rings; /**< Maximum DMA rings supported. */ + uint32_t max_ctx; /**< Maximum contexts. */ + uint32_t max_ctx_size; /**< Maximum context size. */ +}; + +/** + * nss_crypto_cmn_engine + * Engine message for setting up the instance of crypto engine. + * + * Note: This is sent after 'node' message for each engine to + * - Get valid DMA pairs supported by firmware. + * - Get maximum request/token count available in firmware. + */ +struct nss_crypto_cmn_engine { + uint32_t fw_ver[NSS_CRYPTO_CMN_VER_WORDS]; /**< Firmware version. */ + uint32_t dma_mask; /**< Max DMA rings. */ + uint32_t req_count; /**< Token count. */ +}; + +/** + * nss_crypto_cmn_dma + * DMA message for setting up each DMA pair per engine. + */ +struct nss_crypto_cmn_dma { + uint16_t pair_id; /**< DMA pair ID. */ +}; + +/** + * nss_crypto_cmn_ctx + * Context message for setting up a crypto context in firmware. + */ +struct nss_crypto_cmn_ctx { + uint32_t spare[NSS_CRYPTO_CMN_CTX_SPARE]; /**< Context spare words. */ + uint16_t index; /**< Crypto index. */ + uint16_t sec_offset; /**< Secure offset for copying keys. */ + + uint8_t cipher_key[NSS_CRYPTO_CIPHER_KEYLEN_MAX]; /**< Array containing cipher keys. */ + uint8_t auth_key[NSS_CRYPTO_AUTH_KEYLEN_MAX]; /**< Array containing authorization keys. */ + uint8_t nonce[NSS_CRYPTO_NONCE_SIZE_MAX]; /**< Nonce value. */ + + uint16_t auth_keylen; /**< Authorization key length. */ + uint8_t res[2]; /**< Reserved. */ + + enum nss_crypto_cmn_algo algo; /**< Crypto algorithm. */ + enum nss_crypto_cmn_ctx_flags flags; /**< Context specific flags. */ +}; + +/** + * nss_crypto_cmn_stats + * Statistics message applicable for node/engine/context. + */ +struct nss_crypto_cmn_stats { + struct nss_cmn_node_stats nstats; /**< Common node statistics. */ + uint32_t fail_version; /**< Version mismatch failures. */ + uint32_t fail_ctx; /**< Context related failures. */ + uint32_t fail_dma; /**< DMA descriptor full. */ +}; + +/** + * nss_crypto_cmn_msg + * Crypto common configuration message. + */ +struct nss_crypto_cmn_msg { + struct nss_cmn_msg cm; /**< Common header. */ + uint32_t seq_num; /**< Sequence number for messages. */ + uint32_t uid; /**< Unique ID to identify engine and context. */ + + union { + struct nss_crypto_cmn_node node; /**< Node message. */ + struct nss_crypto_cmn_engine eng; /**< Engine message. */ + struct nss_crypto_cmn_dma dma; /**< DMA message. */ + struct nss_crypto_cmn_ctx ctx; /**< Context message. */ + struct nss_crypto_cmn_stats stats; /**< Statistics message. */ + } msg; +}; + +#ifdef __KERNEL__ /* only kernel will use */ + +/** + * Callback function for receiving crypto transformation upon completion. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Networking device registered for callback. + * @param[in] skb Packet buffer. + * @param[in] napi NAPI pointer for Linux NAPI handling. + * + * @return + * None. + */ +typedef void (*nss_crypto_cmn_buf_callback_t)(struct net_device *netdev, struct sk_buff *skb, + struct napi_struct *napi); + +/** + * Callback function for receiving crypto_cmn messages. + * + * @datatypes + * nss_crypto_cmn_msg + * + * @param[in] app_data Context of the callback user. + * @param[in] msg Crypto common message. + * + * @return + * None. + */ +typedef void (*nss_crypto_cmn_msg_callback_t)(void *app_data, struct nss_crypto_cmn_msg *msg); + +/** + * nss_crypto_cmn_tx_buf + * Send crypto payload to firmware for transformation. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx NSS context per NSS core. + * @param[in] if_num Crypto interface to send the buffer. + * @param[in] skb Crypto payload. + * + * @return + * Status of the TX operation. + */ +extern nss_tx_status_t nss_crypto_cmn_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb); + +/** + * nss_crypto_cmn_tx_msg + * Send crypto message to firmware for configuration. + * + * @datatypes + * nss_ctx_instance \n + * nss_crypto_cmn_msg + * + * @param[in] nss_ctx] NSS context per NSS core. + * @param[in] msg Control message. + * + * @return + * Status of the TX operation. + */ +extern nss_tx_status_t nss_crypto_cmn_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_crypto_cmn_msg *msg); + +/** + * nss_crypto_cmn_tx_msg + * Send crypto message to firmware for configuration synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_crypto_cmn_msg + * + * @param[in] nss_ctx NSS context per NSS core. + * @param[in,out] msg Crypto message, response data is copied. + * + * @return + * Status of the TX operation. + * + * @note + * Response data for the message is copied into the 'msg'. + * The caller should read the content of the 'msg' to find out errors. + * The caller needs to invoke this from a non-atomic context. + */ +extern nss_tx_status_t nss_crypto_cmn_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_crypto_cmn_msg *msg); + +/** + * nss_crypto_cmn_notify_register + * Register a event callback handler with NSS driver + * + * @datatypes + * nss_crypto_cmn_msg_callback_t + * + * @param[in] cb Event callback function. + * @param[in] app_data Context of the callback user. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_crypto_cmn_notify_register(nss_crypto_cmn_msg_callback_t cb, void *app_data); + +/** + * nss_crypto_cmn_notify_unregister + * De-register the event callback handler with NSS driver. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] ctx Pointer to the NSS context per NSS core. + * + * @return + * None. + */ +extern void nss_crypto_cmn_notify_unregister(struct nss_ctx_instance *ctx); + +/** + * nss_crypto_cmn_data_register + * Crypto data register. + * + * @datatypes + * nss_crypto_cmn_buf_callback_t \n + * net_device + * + * @param[in] if_num Interface number. + * @param[in] cb Callback function. + * @param[in] netdev Net device. + * @param[in] features Features supported. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_crypto_cmn_data_register(uint32_t if_num, + nss_crypto_cmn_buf_callback_t cb, + struct net_device *netdev, + uint32_t features); + +/** + * nss_crypto_cmn_data_unregister + * Crypto data de-register. + * + * @param[in] ctx NSS context per NSS core. + * @param[in] if_num Interface number. + * + * @return + * None. + */ +extern void nss_crypto_cmn_data_unregister(struct nss_ctx_instance *ctx, uint32_t if_num); + +/** + * nss_crypto_cmn_get_context + * Get the per NSS core context enabled for crypto. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_crypto_cmn_get_context(void); + +/** + * nss_crypto_cmn_msg_init + * Crypto common message initialization. + * + * @datatypes + * nss_crypto_cmn_msg \n + * nss_crypto_cmn_msg_callback_t + * + * @param[in] ncm Crypto common message. + * @param[in] if_num Interface number. + * @param[in] type Message type. + * @param[in] len Common message length. + * @param[in] cb Callback function. + * @param[in] app_data Appllication data. + * + * @return + * None. + */ +extern void nss_crypto_cmn_msg_init(struct nss_crypto_cmn_msg *ncm, uint16_t if_num, + uint32_t type, uint32_t len, nss_crypto_cmn_msg_callback_t cb, + void *app_data); + +#endif /*__KERNEL__ */ + +/** + * @} + */ +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_def.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_def.h new file mode 100644 index 000000000..9bfab7909 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_def.h @@ -0,0 +1,57 @@ +/* + ************************************************************************** + * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_def.h + * NSS definitions + */ + +#ifndef __NSS_DEF_H +#define __NSS_DEF_H + +/** + * @addtogroup nss_common_subsystem + * @{ + */ + +#define NSS_ETH_NORMAL_FRAME_MTU 1500 /**< MTU of a normal frame.*/ +#define NSS_ETH_MINI_JUMBO_FRAME_MTU 1978 /**< MTU of a mini-jumbo frame. */ +#define NSS_ETH_FULL_JUMBO_FRAME_MTU 9600 /**< MTU of a full jumbo frame. */ + +/** + * Number of ingress or egress VLANS supported in a connection entry. + */ +#define MAX_VLAN_DEPTH 2 + +/** + * Number of egress interfaces supported in a multicast connection entry. + */ +#define NSS_MC_IF_MAX 16 + +/** + * Real pointer size of the system. + */ +#ifdef __LP64__ +typedef uint64_t nss_ptr_t; +#else +typedef uint32_t nss_ptr_t; +#endif + +/** + * @} + */ + +#endif /** __NSS_DEF_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_dma.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_dma.h new file mode 100755 index 000000000..d0aaa01de --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_dma.h @@ -0,0 +1,333 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ +/** + * @file nss_dma.h + * NSS DMA for linearization and split interface definitions. + */ + +#ifndef __NSS_DMA_H +#define __NSS_DMA_H + +/** + * @addtogroup nss_dma_subsystem + * @{ + */ + +/** + * nss_dma_msg_type + * Supported message types. + */ +enum nss_dma_msg_type { + NSS_DMA_MSG_TYPE_NONE, /**< Invalid message type. */ + NSS_DMA_MSG_TYPE_CONFIGURE, /**< Configure DMA. */ + NSS_DMA_MSG_TYPE_SYNC_STATS, /**< Statistics synchronization. */ + NSS_DMA_MSG_TYPE_TEST_PERF, /**< Performance test. */ + NSS_DMA_MSG_TYPE_MAX /**< Maximum message type. */ +}; + +/** + * nss_dma_msg_error + * Message error types. + */ +enum nss_dma_msg_error { + NSS_DMA_MSG_ERROR_NONE, /**< No error. */ + NSS_DMA_MSG_ERROR_HW_INIT, /**< Invalid operation. */ + NSS_DMA_MSG_ERROR_UNHANDLED, /**< Invalid test ID. */ + NSS_DMA_MSG_ERROR_TEST, /**< Performance test failed. */ + NSS_DMA_MSG_ERROR_MAX /**< Maximum error type. */ +}; + +/** + * nss_dma_stats_types + * DMA node statistics. + */ +enum nss_dma_stats_types { + NSS_DMA_STATS_NO_REQ = NSS_STATS_NODE_MAX, + /**< Request descriptor not available. */ + NSS_DMA_STATS_NO_DESC, /**< DMA descriptors not available. */ + NSS_DMA_STATS_NEXTHOP, /**< Failed to retrive next hop. */ + NSS_DMA_STATS_FAIL_NEXTHOP_QUEUE, + /**< Failed to queue next hop. */ + NSS_DMA_STATS_FAIL_LINEAR_SZ, /**< Failed to get memory for linearization. */ + NSS_DMA_STATS_FAIL_LINEAR_ALLOC,/**< Failed to allocate buffer for linearization. */ + NSS_DMA_STATS_FAIL_LINEAR_NO_SG,/**< Skip linearization due to non-SG packet. */ + NSS_DMA_STATS_FAIL_SPLIT_SZ, /**< Failed to spliting buffer into multiple buffers. */ + NSS_DMA_STATS_FAIL_SPLIT_ALLOC, /**< Failed to allocate buffer for split. */ + NSS_DMA_STATS_FAIL_SYNC_ALLOC, /**< Failed to allocate buffer for sending statistics. */ + NSS_DMA_STATS_FAIL_CTX_ACTIVE, /**< Failed to queue as the node is not active. */ + NSS_DMA_STATS_FAIL_HW_E0, /**< Failed to process in hardware, error code E0. */ + NSS_DMA_STATS_FAIL_HW_E1, /**< Failed to process in hardware, error code E1. */ + NSS_DMA_STATS_FAIL_HW_E2, /**< Failed to process in hardware, error code E2. */ + NSS_DMA_STATS_FAIL_HW_E3, /**< Failed to process in hardware, error code E3. */ + NSS_DMA_STATS_FAIL_HW_E4, /**< Failed to process in hardware, error code E4. */ + NSS_DMA_STATS_FAIL_HW_E5, /**< Failed to process in hardware, error code E5. */ + NSS_DMA_STATS_FAIL_HW_E6, /**< Failed to process in hardware, error code E6. */ + NSS_DMA_STATS_FAIL_HW_E7, /**< Failed to process in hardware, error code E7. */ + NSS_DMA_STATS_FAIL_HW_E8, /**< Failed to process in hardware, error code E8. */ + NSS_DMA_STATS_FAIL_HW_E9, /**< Failed to process in hardware, error code E9. */ + NSS_DMA_STATS_FAIL_HW_E10, /**< Failed to process in hardware, error code E10. */ + NSS_DMA_STATS_FAIL_HW_E11, /**< Failed to process in hardware, error code E11. */ + NSS_DMA_STATS_FAIL_HW_E12, /**< Failed to process in hardware, error code E12. */ + NSS_DMA_STATS_FAIL_HW_E13, /**< Failed to process in hardware, error code E13. */ + NSS_DMA_STATS_FAIL_HW_E14, /**< Failed to process in hardware, error code E14. */ + NSS_DMA_STATS_FAIL_HW_E15, /**< Failed to process in hardware, error code E15. */ + NSS_DMA_STATS_MAX, /**< Maximum message type. */ +}; + +/** + * nss_dma_test_type + * DMA Test types. + */ +enum nss_dma_test_type { + NSS_DMA_TEST_TYPE_DEFAULT = 0, /**< Test default segment size. */ + NSS_DMA_TEST_TYPE_SWEEP, /**< Test sweep segment size. */ + NSS_DMA_TEST_TYPE_LARGE, /**< Test large segment size. */ + NSS_DMA_TEST_TYPE_VERIFY, /**< Verify contents at receive processing. */ + NSS_DMA_TEST_TYPE_MAX /**< Maximum test type. */ +}; + +/** + * nss_dma_stats_notification + * DMA transmission statistics structure. + */ +struct nss_dma_stats_notification { + uint64_t stats_ctx[NSS_DMA_STATS_MAX]; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ +/* + * Maximum number of HW specific statistics + */ +#define NSS_DMA_HW_ERROR_MAX 16 + +/* + * Test configuration flags + */ +#define NSS_DMA_TEST_FLAGS_LINEARIZE 0x01 /**< Linearize test. */ +#define NSS_DMA_TEST_FLAGS_SPLIT 0x02 /**< Split test. */ + +/** + * nss_dma_test_cfg + * Test configuration. + */ +struct nss_dma_test_cfg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics for DMA interface. */ + uint32_t flags; /**< Test configuration flags. */ + uint32_t time_delta; /**< Difference between start and end. */ + uint16_t packet_count; /**< Number of packets to send. */ + uint16_t type; /**< Type of test to run. */ +}; + +/** + * nss_dma_stats + * DMA statistics. + */ +struct nss_dma_stats { + struct nss_cmn_node_stats node_stats; /**< Common node statistics for DMA interface. */ + uint32_t no_req; /**< Request descriptor not available. */ + uint32_t no_desc; /**< DMA descriptors not available. */ + uint32_t fail_nexthop; /**< Failed to retrive next hop. */ + uint32_t fail_nexthop_queue; /**< Failed to queue next hop. */ + uint32_t fail_linear_sz; /**< Failed to get memory for linearization. */ + uint32_t fail_linear_alloc; /**< Failed to allocate buffer for linearization. */ + uint32_t fail_linear_no_sg; /**< Skip linearization due to non-SG packet. */ + uint32_t fail_split_sz; /**< Failed to spliting buffer into multiple buffers. */ + uint32_t fail_split_alloc; /**< Failed to allocate buffer for split. */ + uint32_t fail_sync_alloc; /**< Failed to allocate buffer for sending statistics. */ + uint32_t fail_ctx_active; /**< Failed to queue as the node is not active. */ + uint32_t fail_hw[NSS_DMA_HW_ERROR_MAX]; /**< Hardware failures. */ +}; + +/** + * nss_dma_msg + * Message structure for configuring the DMA interface. + */ +struct nss_dma_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a NSS core-to-core transmission rule or statistics message. + */ + union { + struct nss_dma_test_cfg test_cfg; + /**< DMA test configuration. */ + struct nss_dma_stats stats; /**< DMA interface statistics. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_dma_register_handler + * Registers the DMA message handler. + * + * @return + * None. + */ +void nss_dma_register_handler(void); + +/** + * Callback function for receiving DMA messages. + * + * @datatypes + * nss_c2c_tx_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_dma_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_dma_tx_msg + * Transmits a DMA message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_dma_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] ndm Pointer to the message data. + * + * @return + * Status of the transmit operation. + */ +extern nss_tx_status_t nss_dma_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_dma_msg *ndm); + +/** + * nss_dma_msg_init + * Initializes DMA messages. + * + * @datatypes + * nss_dma_msg \n + * nss_dma_msg_callback_t + * + * @param[in] ndm Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_dma_msg_init(struct nss_dma_msg *ndm, uint16_t if_num, uint32_t type, uint32_t len, + nss_dma_msg_callback_t cb, void *app_data); + +/** + * nss_dma_notify_register + * Registers a notifier callback for DMA messages with the NSS. + * + * @datatypes + * nss_dma_msg_callback_t + * + * @param[in] core NSS core number index to the notifier callback table. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_dma_notify_register(int core, nss_dma_msg_callback_t cb, void *app_data); + +/** + * nss_dma_notify_unregister + * Deregisters a DMA message notifier callback from the NSS. + * + * @param[in] core NSS core number index to the notifier callback table. + * + * @return + * None. + * + * @dependencies + * The notifier callback must have been previously registered. + */ +void nss_dma_notify_unregister(int core); + +/** + * nss_dma_register_sysctl + * Registers the DMA interface to Linux system control tree. + * + * @return + * None. + */ +extern void nss_dma_register_sysctl(void); + +/** + * nss_dma_unregister_sysctl + * Deregisters the DMA interface from Linux system control tree. + * + * @return + * None. + * + * @dependencies + * The system control must have been previously registered. + */ +extern void nss_dma_unregister_sysctl(void); + +/** + * nss_dma_init + * Initializes the DMA interface. + * + * @return + * None. + */ +void nss_dma_init(void); + +/** + * nss_dma_get_context + * Get the per NSS core context enabled for DMA. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_dma_get_context(void); + +/** + * nss_dma_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_dma_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_dma_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_dma_stats_register_notifier(struct notifier_block *nb); +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_C2C_TX_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_dtls.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_dtls.h new file mode 100644 index 000000000..d237bfda2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_dtls.h @@ -0,0 +1,335 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_dtls.h + * NSS DTLS interface definitions. + */ + +#ifndef _NSS_DTLS_H_ +#define _NSS_DTLS_H_ + +/** + * @addtogroup nss_dtls_subsystem + * @{ + */ + +#define NSS_MAX_DTLS_SESSIONS 8 /**< Maximum number of supported DTLS sessions. */ + +/** + * nss_dtls_metadata_types + * Message types for DTLS requests and responses. + */ +enum nss_dtls_metadata_types { + NSS_DTLS_MSG_SESSION_CONFIGURE, + NSS_DTLS_MSG_SESSION_DESTROY, + NSS_DTLS_MSG_SESSION_STATS, + NSS_DTLS_MSG_REKEY_ENCAP_CIPHER_UPDATE, + NSS_DTLS_MSG_REKEY_ENCAP_CIPHER_SWITCH, + NSS_DTLS_MSG_REKEY_DECAP_CIPHER_UPDATE, + NSS_DTLS_MSG_REKEY_DECAP_CIPHER_SWITCH, + NSS_DTLS_MSG_MAX +}; + +/** + * nss_dtls_error_response_types + * Error types for DTLS responses. + */ +enum nss_dtls_error_response_types { + NSS_DTLS_ERR_UNKNOWN_MSG = 1, + NSS_DTLS_ERR_INVALID_APP_IF = 2, + NSS_DTLS_ERR_INVALID_CPARAM = 3, + NSS_DTLS_ERR_INVALID_VER = 4, + NSS_DTLS_ERR_NOMEM = 5, + NSS_DTLS_ERR_MAX, +}; + +/** + * nss_dtls_session_stats + * DTLS session statistics. + */ +struct nss_dtls_session_stats { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t tx_auth_done; /**< Tx authentication is done. */ + uint32_t rx_auth_done; /**< Rx authentication is successful. */ + uint32_t tx_cipher_done; /**< Tx cipher is complete. */ + uint32_t rx_cipher_done; /**< Rx cipher is complete. */ + uint32_t tx_cbuf_alloc_fail; /**< Tx crypto buffer allocation failure. */ + uint32_t rx_cbuf_alloc_fail; /**< Rx crypto buffer allocation failure. */ + uint32_t tx_cenqueue_fail; /**< Tx enqueue-to-crypto failure. */ + uint32_t rx_cenqueue_fail; /**< Rx enqueue-to-crypto failure. */ + uint32_t tx_dropped_hroom; + /**< Tx packets dropped because of insufficent headroom. */ + uint32_t tx_dropped_troom; + /**< Tx packets dropped because of insufficent tailroom. */ + uint32_t tx_forward_enqueue_fail; + /**< Tx enqueue failed to forward a node after encapsulation. */ + uint32_t rx_forward_enqueue_fail; + /**< Rx enqueue failed to receive a node after decapsulation. */ + uint32_t rx_invalid_version; /**< Rx invalid DTLS version. */ + uint32_t rx_invalid_epoch; /**< Rx invalid DTLS epoch. */ + uint32_t rx_malformed; /**< Rx malformed DTLS record. */ + uint32_t rx_cipher_fail; /**< Rx cipher failure. */ + uint32_t rx_auth_fail; /**< Rx authentication failure. */ + uint32_t rx_capwap_classify_fail; /**< Rx CAPWAP classification failure. */ + uint32_t rx_single_rec_dgram; /**< Rx single record datagrams processed. */ + uint32_t rx_multi_rec_dgram; /**< Rx multi-record datagrams processed. */ + uint32_t rx_replay_fail; /**< Rx anti-replay failures. */ + uint32_t rx_replay_duplicate; + /**< Rx anti-replay failed because of a duplicate record. */ + uint32_t rx_replay_out_of_window; + /**< Rx anti-replay failed because of an out-of-window record. */ + uint32_t outflow_queue_full; + /**< Tx packets dropped because the encapsulation queue is full. */ + uint32_t decap_queue_full; + /**< Rx packets dropped because the decapsulation queue is full. */ + uint32_t pbuf_alloc_fail; + /**< Packets dropped because of a buffer allocation failure. */ + uint32_t pbuf_copy_fail; + /**< Packets dropped because of a buffer copy failure. */ + uint16_t epoch; /**< Current epoch. */ + uint16_t tx_seq_high; /**< Upper 16 bits of the current sequence number. */ + uint32_t tx_seq_low; /**< Lower 32 bits of the current sequence number. */ +}; + +/** + * nss_dtls_session_cipher_update + * Information for a cipher update message in a DTLS session. + */ +struct nss_dtls_session_cipher_update { + uint32_t crypto_idx; /**< Crypto index for encapsulation. */ + uint32_t hash_len; /**< Authentication hash length for encapsulation. */ + uint32_t iv_len; /**< Crypto IV length for encapsulation. */ + uint32_t cipher_algo; /**< Encapsulation cipher. */ + uint32_t auth_algo; /**< Encapsulation authentication algorithm. */ + uint16_t epoch; /**< Epoch indicator. */ + uint16_t reserved; /**< Reserved for message alignment.*/ +}; + +/** + * nss_dtls_session_configure + * Configuration message for a DTLS session. + */ +struct nss_dtls_session_configure { + uint32_t ver; /**< DTLS version. */ + uint32_t flags; /**< DTLS flags. */ + uint32_t crypto_idx_encap; /**< Crypto index for encapsulation. */ + uint32_t crypto_idx_decap; /**< Crypto index for decapsulation. */ + uint32_t iv_len_encap; /**< Crypto IV length for encapsulation. */ + uint32_t iv_len_decap; /**< Crypto IV length for decapsulation. */ + uint32_t hash_len_encap; + /**< Authentication hash length for encapsulation. */ + uint32_t hash_len_decap; + /**< Authentication hash length for decapsulation. */ + uint32_t cipher_algo_encap; /**< Cipher algorithm for encapsulation. */ + uint32_t auth_algo_encap; /**< Authentication algorithm encapsulation. */ + uint32_t cipher_algo_decap; /**< Cipher algorithm for decapsulation. */ + uint32_t auth_algo_decap; /**< Authentication algorithm decapsulation. */ + uint32_t nss_app_if; + /**< Interface of the node that receives decapsulated packets. */ + uint16_t sport; /**< Source UDP/UDPLite port. */ + uint16_t dport; /**< Destination UDP/UDPLite port. */ + uint32_t sip[4]; /**< Source IPv4/IPv6 address. */ + uint32_t dip[4]; /**< Destination IPv4/IPv6 address. */ + uint16_t window_size; /**< Anti-replay window size. */ + uint16_t epoch; /**< Epoch indicator. */ + uint8_t oip_ttl; /**< Maximum outer IP time-to-live value. */ + uint8_t reserved1; /**< Reserved for message alignment. */ + uint16_t reserved2; /**< Reserved for message alignment. */ +}; + +/** + * nss_dtls_msg + * Data for sending and receiving DTLS messages. + */ +struct nss_dtls_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a DTLS message. + */ + union { + struct nss_dtls_session_configure cfg; + /**< Session configuration. */ + struct nss_dtls_session_cipher_update cipher_update; + /**< Cipher update information. */ + struct nss_dtls_session_stats stats; + /**< Session statistics. */ + } msg; /**< Message payload for DTLS session messages exchanged with NSS core. */ +}; + +/** + * nss_dtls_tx_buf + * Sends a DTLS data packet to the NSS. + * + * @datatypes + * sk_buff \n + * nss_ctx_instance + * + * @param[in] os_buf Pointer to the OS data buffer. + * @param[in] if_num NSS interface number. + * @param[in] nss_ctx Pointer to the NSS core context. + * + * @return + * Status of Tx buffer forwarded to NSS for DTLS operation. + */ +nss_tx_status_t nss_dtls_tx_buf(struct sk_buff *os_buf, uint32_t if_num, + struct nss_ctx_instance *nss_ctx); + +/** + * nss_dtls_tx_msg + * Sends DTLS messages. + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in,out] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_dtls_tx_msg(struct nss_ctx_instance *nss_ctx, + struct nss_dtls_msg *msg); + +/** + * nss_dtls_tx_msg_sync + * Sends DTLS messages synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_dtls_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in,out] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_dtls_tx_msg_sync(struct nss_ctx_instance *nss_ctx, + struct nss_dtls_msg *msg); + +/** + * Callback function for receiving DTLS messages. + * + * @datatypes + * nss_dtls_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_dtls_msg_callback_t)(void *app_data, + struct nss_dtls_msg *msg); + +/** + * Callback function for receiving DTLS session data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_dtls_data_callback_t)(struct net_device *netdev, + struct sk_buff *skb, + struct napi_struct *napi); + +/** + * nss_dtls_register_if + * Registers a DTLS session interface with the NSS for sending and receiving + * messages. + * + * @datatypes + * nss_dtls_data_callback_t \n + * nss_dtls_msg_callback_t + * + * @param[in] if_num NSS interface number. + * @param[in] cb Callback function for the message. + * @param[in] msg_callback Callback for DTLS tunnel message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * @param[in] app_ctx Pointer to the application context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_dtls_register_if(uint32_t if_num, + nss_dtls_data_callback_t cb, + nss_dtls_msg_callback_t msg_callback, + struct net_device *netdev, + uint32_t features, + void *app_ctx); + +/** + * nss_dtls_unregister_if + * Deregisters a DTLS session interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The DTLS session interface must have been previously registered. + */ +extern void nss_dtls_unregister_if(uint32_t if_num); + +/** + * nss_dtls_msg_init + * Initializes a DTLS message. + * + * @datatypes + * nss_dtls_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context. + * + * @return + * None. + */ +extern void nss_dtls_msg_init(struct nss_dtls_msg *ncm, uint16_t if_num, + uint32_t type, uint32_t len, void *cb, + void *app_data); + +/** + * nss_dtls_get_context + * Gets the NSS core context for the DTLS session. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_dtls_get_context(void); + +/** + * nss_dtls_get_ifnum_with_coreid + * Gets the DTLS interface number with a core ID. + * + * @param[in] if_num NSS interface number. + * + * @return + * Interface number with the core ID. + */ +extern int32_t nss_dtls_get_ifnum_with_coreid(int32_t if_num); + +/** + * @} + */ + +#endif /* _NSS_DTLS_H_. */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_dtls_cmn.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_dtls_cmn.h new file mode 100644 index 000000000..78b166365 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_dtls_cmn.h @@ -0,0 +1,512 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_dtls_cmn.h + * NSS DTLS common interface definitions, supports inner/outer interface split. + */ + +#ifndef _NSS_DTLS_CMN_H_ +#define _NSS_DTLS_CMN_H_ + +/** + * @addtogroup nss_dtls_subsystem + * @{ + */ + +#define NSS_DTLS_CMN_CTX_HDR_IPV6 0x0001 /**< DTLS with IPv6. */ +#define NSS_DTLS_CMN_CTX_HDR_UDPLITE 0x0002 /**< DTLS with UDPLite. */ +#define NSS_DTLS_CMN_CTX_HDR_CAPWAP 0x0004 /**< DTLS with CAPWAP. */ +#define NSS_DTLS_CMN_CTX_CIPHER_MODE_GCM 0x0008 /**< DTLS with GCM cipher mode. */ +#define NSS_DTLS_CMN_CTX_ENCAP_UDPLITE_CSUM 0x10000 /**< Checksum only UDPLite header. */ +#define NSS_DTLS_CMN_CTX_ENCAP_METADATA 0x20000 /**< Valid metadata in encapsulation direction. */ +#define NSS_DTLS_CMN_CTX_DECAP_ACCEPT_ALL 0x40000 /**< Exception all error packets to host. */ + +#define NSS_DTLS_CMN_CLE_MAX 32 /**< Max classification error. */ + +/** + * nss_dtls_cmn_metadata_types + * Message types for DTLS requests and responses. + */ +enum nss_dtls_cmn_msg_type { + NSS_DTLS_CMN_MSG_TYPE_CONFIGURE_NODE, /**< Configure DTLS firmware node. */ + NSS_DTLS_CMN_MSG_TYPE_CONFIGURE_HDR, /**< Configure the base context parameter. */ + NSS_DTLS_CMN_MSG_TYPE_CONFIGURE_DTLS, /**< Configure DTLS parameters. */ + NSS_DTLS_CMN_MSG_TYPE_SWITCH_DTLS, /**< Switch to new DTLS transform. */ + NSS_DTLS_CMN_MSG_TYPE_DECONFIGURE, /**< Deconfigure context. */ + NSS_DTLS_CMN_MSG_TYPE_SYNC_STATS, /**< Synchronize statistics. */ + NSS_DTLS_CMN_MSG_TYPE_NODE_STATS, /**< Node statistics. */ + NSS_DTLS_CMN_MSG_MAX +}; + +/** + * nss_dtls_cmn_error_response_types + * Error types for DTLS responses. + */ +enum nss_dtls_cmn_error { + NSS_DTLS_CMN_ERROR_NONE = 0, + NSS_DTLS_CMN_ERROR_UNKNOWN_MSG, + NSS_DTLS_CMN_ERROR_INVALID_DESTIF, + NSS_DTLS_CMN_ERROR_INVALID_SRCIF, + NSS_DTLS_CMN_ERROR_INVALID_CRYPTO, + NSS_DTLS_CMN_ERROR_INVALID_VER, + NSS_DTLS_CMN_ERROR_INVALID_CTX_TYPE, + NSS_DTLS_CMN_ERROR_INVALID_CTX_WORDS, + NSS_DTLS_CMN_ERROR_FAIL_ALLOC_HWCTX, + NSS_DTLS_CMN_ERROR_FAIL_COPY_CTX, + NSS_DTLS_CMN_ERROR_FAIL_SWITCH_HWCTX, + NSS_DTLS_CMN_ERROR_ALREADY_CONFIGURED, + NSS_DTLS_CMN_ERROR_FAIL_NOMEM, + NSS_DTLS_CMN_ERROR_FAIL_COPY_NONCE, + NSS_DTLS_CMN_ERROR_MAX, +}; + +/** + * nss_dtls_cmn_ctx_stats_types + * DTLS common context statistics types. + */ +enum nss_dtls_cmn_ctx_stats_types { + NSS_DTLS_CMN_STATS_RX_SINGLE_REC = NSS_STATS_NODE_MAX, + /**< Received single DTLS record datagrams. */ + NSS_DTLS_CMN_STATS_RX_MULTI_REC, /**< Received multiple DTLS record datagrams. */ + NSS_DTLS_CMN_STATS_FAIL_CRYPTO_RESOURCE,/**< Failure in crypto resource allocation. */ + NSS_DTLS_CMN_STATS_FAIL_CRYPTO_ENQUEUE, /**< Failure due to full queue in crypto or hardware. */ + NSS_DTLS_CMN_STATS_FAIL_HEADROOM, /**< Failure in headroom check. */ + NSS_DTLS_CMN_STATS_FAIL_TAILROOM, /**< Failure in tailroom check. */ + NSS_DTLS_CMN_STATS_FAIL_VER, /**< Failure in DTLS version check. */ + NSS_DTLS_CMN_STATS_FAIL_EPOCH, /**< Failure in DTLS epoch check. */ + NSS_DTLS_CMN_STATS_FAIL_DTLS_RECORD, /**< Failure in reading DTLS record. */ + NSS_DTLS_CMN_STATS_FAIL_CAPWAP, /**< Failure in CAPWAP classification. */ + NSS_DTLS_CMN_STATS_FAIL_REPLAY, /**< Failure in anti-replay check. */ + NSS_DTLS_CMN_STATS_FAIL_REPLAY_DUP, /**< Failure in anti-replay; duplicate records. */ + NSS_DTLS_CMN_STATS_FAIL_REPLAY_WIN, /**< Failure in anti-replay; packet outside the window. */ + NSS_DTLS_CMN_STATS_FAIL_QUEUE, /**< Failure due to full queue in DTLS. */ + NSS_DTLS_CMN_STATS_FAIL_QUEUE_NEXTHOP, /**< Failure due to full queue in next hop. */ + NSS_DTLS_CMN_STATS_FAIL_PBUF_ALLOC, /**< Failure in pbuf allocation. */ + NSS_DTLS_CMN_STATS_FAIL_PBUF_LINEAR, /**< Failure in pbuf linearization. */ + NSS_DTLS_CMN_STATS_FAIL_PBUF_STATS, /**< Failure in pbuf allocation for statistics. */ + NSS_DTLS_CMN_STATS_FAIL_PBUF_ALIGN, /**< Failure in pbuf alignment. */ + NSS_DTLS_CMN_STATS_FAIL_CTX_ACTIVE, /**< Failure in enqueue due to inactive context. */ + NSS_DTLS_CMN_STATS_FAIL_HWCTX_ACTIVE, /**< Failure in enqueue due to inactive hardware context. */ + NSS_DTLS_CMN_STATS_FAIL_CIPHER, /**< Failure in decrypting the data. */ + NSS_DTLS_CMN_STATS_FAIL_AUTH, /**< Failure in authenticating the data. */ + NSS_DTLS_CMN_STATS_FAIL_SEQ_OVF, /**< Failure due to sequence number overflow. */ + NSS_DTLS_CMN_STATS_FAIL_BLK_LEN, /**< Failure in decapsulation due to bad cipher length. */ + NSS_DTLS_CMN_STATS_FAIL_HASH_LEN, /**< Failure in decapsulation due to bad hash length. */ + NSS_DTLS_CMN_STATS_LEN_ERROR, /**< Length error. */ + NSS_DTLS_CMN_STATS_TOKEN_ERROR, /**< Token error, unknown token command or instruction. */ + NSS_DTLS_CMN_STATS_BYPASS_ERROR, /**< Token contains too much bypass data. */ + NSS_DTLS_CMN_STATS_CONFIG_ERROR, /**< Invalid command, algorithm, or mode combination. */ + NSS_DTLS_CMN_STATS_ALGO_ERROR, /**< Unsupported algorithm. */ + NSS_DTLS_CMN_STATS_HASH_OVF_ERROR, /**< Hash input overflow. */ + NSS_DTLS_CMN_STATS_TTL_ERROR, /**< TTL or HOP-Limit underflow. */ + NSS_DTLS_CMN_STATS_CSUM_ERROR, /**< Checksum error. */ + NSS_DTLS_CMN_STATS_TIMEOUT_ERROR, /**< Data timed out. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_0, /**< Classification failure 0. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_1, /**< Classification failure 1. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_2, /**< Classification failure 2. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_3, /**< Classification failure 3. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_4, /**< Classification failure 4. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_5, /**< Classification failure 5. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_6, /**< Classification failure 6. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_7, /**< Classification failure 7. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_8, /**< Classification failure 8. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_9, /**< Classification failure 9. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_10, /**< Classification failure 10. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_11, /**< Classification failure 11. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_12, /**< Classification failure 12. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_13, /**< Classification failure 13. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_14, /**< Classification failure 14. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_15, /**< Classification failure 15. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_16, /**< Classification failure 16. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_17, /**< Classification failure 17. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_18, /**< Classification failure 18. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_19, /**< Classification failure 19. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_20, /**< Classification failure 20. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_21, /**< Classification failure 21. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_22, /**< Classification failure 22. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_23, /**< Classification failure 23. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_24, /**< Classification failure 24. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_25, /**< Classification failure 25. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_26, /**< Classification failure 26. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_27, /**< Classification failure 27. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_28, /**< Classification failure 28. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_29, /**< Classification failure 29. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_30, /**< Classification failure 30. */ + NSS_DTLS_CMN_STATS_CLE_ERROR_31, /**< Classification failure 31. */ + NSS_DTLS_CMN_STATS_SEQ_LOW, /**< Lower 32 bits of current transmit sequence number. */ + NSS_DTLS_CMN_STATS_SEQ_HIGH, /**< Upper 16 bits of current transmit sequence number. */ + NSS_DTLS_CMN_STATS_EPOCH, /**< Current epoch value. */ + NSS_DTLS_CMN_CTX_STATS_MAX, /**< Maximum message type. */ +}; + +/** + * nss_dtls_cmn_node_stats + * DTLS node statistics. + */ +struct nss_dtls_cmn_node_stats { + uint32_t fail_ctx_alloc; /**< Failure in allocating a context. */ + uint32_t fail_ctx_free; /**< Failure in freeing up the context. */ + uint32_t fail_pbuf_stats; /**< Failure in pbuf allocation for statistics. */ +}; + +/** + * nss_dtls_cmn_hw_stats + * DTLS hardware statistics. + */ +struct nss_dtls_cmn_hw_stats { + uint32_t len_error; /**< Length error. */ + uint32_t token_error; /**< Token error, unknown token command/instruction. */ + uint32_t bypass_error; /**< Token contains too much bypass data. */ + uint32_t config_error; /**< Invalid command/algorithm/mode/combination. */ + uint32_t algo_error; /**< Unsupported algorithm. */ + uint32_t hash_ovf_error; /**< Hash input overflow. */ + uint32_t ttl_error; /**< TTL or HOP-Limit underflow. */ + uint32_t csum_error; /**< Checksum error. */ + uint32_t timeout_error; /**< Data timed-out. */ +}; + +/** + * nss_dtls_cmn_ctx_stats + * DTLS session statistics. + */ +struct nss_dtls_cmn_ctx_stats { + struct nss_cmn_node_stats pkt; /**< Common node statistics. */ + uint32_t rx_single_rec; /**< Received single DTLS record datagrams. */ + uint32_t rx_multi_rec; /**< Received multiple DTLS record datagrams. */ + uint32_t fail_crypto_resource; /**< Failure in allocation of crypto resource. */ + uint32_t fail_crypto_enqueue; /**< Failure due to queue full in crypto or hardware. */ + uint32_t fail_headroom; /**< Failure in headroom check. */ + uint32_t fail_tailroom; /**< Failure in tailroom check. */ + uint32_t fail_ver; /**< Failure in DTLS version check. */ + uint32_t fail_epoch; /**< Failure in DTLS epoch check. */ + uint32_t fail_dtls_record; /**< Failure in reading DTLS record. */ + uint32_t fail_capwap; /**< Failure in CAPWAP classification. */ + uint32_t fail_replay; /**< Failure in anti-replay check. */ + uint32_t fail_replay_dup; /**< Failure in anti-replay; duplicate records. */ + uint32_t fail_replay_win; /**< Failure in anti-replay; packet outside the window. */ + uint32_t fail_queue; /**< Failure due to queue full in DTLS. */ + uint32_t fail_queue_nexthop; /**< Failure due to queue full in next_hop. */ + uint32_t fail_pbuf_alloc; /**< Failure in pbuf allocation. */ + uint32_t fail_pbuf_linear; /**< Failure in pbuf linearization. */ + uint32_t fail_pbuf_stats; /**< Failure in pbuf allocation for statistics. */ + uint32_t fail_pbuf_align; /**< Failure in pbuf alignment. */ + uint32_t fail_ctx_active; /**< Failure in enqueue due to inactive context. */ + uint32_t fail_hwctx_active; /**< Failure in enqueue due to inactive hardware context. */ + uint32_t fail_cipher; /**< Failure in decrypting the data. */ + uint32_t fail_auth; /**< Failure in authenticating the data. */ + uint32_t fail_seq_ovf; /**< Failure due to sequence number overflow. */ + uint32_t fail_blk_len; /**< Failure in decapsulation due to bad cipher block length. */ + uint32_t fail_hash_len; /**< Failure in decapsulation due to bad hash block length. */ + + struct nss_dtls_cmn_hw_stats fail_hw; /**< Hardware failure statistics. */ + + uint32_t fail_cle[NSS_DTLS_CMN_CLE_MAX];/**< Classification errors. */ + + uint32_t seq_low; /**< Lower 32 bits of current Tx sequence number. */ + uint32_t seq_high; /**< Upper 16 bits of current Tx sequence number. */ + + uint16_t epoch; /**< Current epoch value. */ + uint8_t res1[2]; /**< Reserved for future use. */ + + uint8_t res2[16]; /**< Reserved for future use. */ +}; + +/** + * nss_dtls_cmn_ctx_config_hdr + * Parameters for outer header transform. + */ +struct nss_dtls_cmn_ctx_config_hdr { + uint32_t flags; /**< Context flags. */ + uint32_t dest_ifnum; /**< Destination interface for packets. */ + uint32_t src_ifnum; /**< Source interface of packets. */ + uint32_t sip[4]; /**< Source IPv4/v6 address. */ + uint32_t dip[4]; /**< Destination IPv4/v6 address. */ + + uint16_t sport; /**< Source UDP/UDPLite port. */ + uint16_t dport; /**< Destination UDP/UDPLite port. */ + + uint8_t hop_limit_ttl; /**< IP header TTL field. */ + uint8_t dscp; /**< DSCP value. */ + uint8_t dscp_copy; /**< Copy DSCP value. */ + uint8_t df; /**< Do not fragment DTLS over IPv4. */ +}; + +/** + * nss_dtls_cmn_ctx_config_dtls + * Parameters for DTLS transform. + */ +struct nss_dtls_cmn_ctx_config_dtls { + uint32_t ver; /**< Version (enum dtls_cmn_ver). */ + uint32_t crypto_idx; /**< Crypto index for cipher context. */ + + uint16_t window_size; /**< Anti-replay window size. */ + uint16_t epoch; /**< Initial epoch value. */ + + uint8_t iv_len; /**< Crypto IV length for encapsulation. */ + uint8_t hash_len; /**< Auth hash length for encapsulation. */ + uint8_t blk_len; /**< Cipher block length. */ + uint8_t res1; /**< Reserved for alignment. */ +}; + +/** + * nss_dtls_cmn_stats_notification + * DTLS common transmission statistics structure. + */ +struct nss_dtls_cmn_stats_notification { + uint64_t stats_ctx[NSS_DTLS_CMN_CTX_STATS_MAX]; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_dtls_cmn_msg + * Data for sending and receiving DTLS messages. + */ +struct nss_dtls_cmn_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a DTLS message. + */ + union { + struct nss_dtls_cmn_ctx_config_hdr hdr_cfg; /**< Session configuration. */ + struct nss_dtls_cmn_ctx_config_dtls dtls_cfg; /**< Cipher update information. */ + struct nss_dtls_cmn_ctx_stats stats; /**< Session statistics. */ + struct nss_dtls_cmn_node_stats node_stats; /**< Node statistics. */ + } msg; /**< Message payload for DTLS session messages exchanged with NSS core. */ +}; + +#ifdef __KERNEL__ /* only for kernel use. */ +/** + * Callback function for receiving DTLS messages. + * + * @datatypes + * nss_dtls_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_dtls_cmn_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * Callback function for receiving DTLS session data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_dtls_cmn_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_dtls_cmn_tx_buf + * Sends a DTLS data packet to the NSS. + * + * @datatypes + * sk_buff \n + * nss_ctx_instance + * + * @param[in] os_buf Pointer to the OS data buffer. + * @param[in] if_num NSS interface number. + * @param[in] nss_ctx Pointer to the NSS core context. + * + * @return + * Status of Tx buffer forwarded to NSS for DTLS operation. + */ +nss_tx_status_t nss_dtls_cmn_tx_buf(struct sk_buff *os_buf, uint32_t if_num, struct nss_ctx_instance *nss_ctx); + +/** + * nss_dtls_cmn_tx_msg + * Sends DTLS messages. + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in,out] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_dtls_cmn_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_dtls_cmn_msg *msg); + +/** + * nss_dtls_cmn_tx_msg_sync + * Sends DTLS messages synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_dtls_cmn_msg_type \n + * nss_dtls_cmn_msg \n + * nss_dtls_cmn_error + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] ndcm Pointer to the message data. + * @param[in,out] resp Response for the configuration. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_dtls_cmn_tx_msg_sync(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + enum nss_dtls_cmn_msg_type type, uint16_t len, + struct nss_dtls_cmn_msg *ndcm, enum nss_dtls_cmn_error *resp); + +/** + * nss_dtls_cmn_unregister_if + * Deregisters a DTLS session interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The DTLS session interface must have been previously registered. + */ +extern void nss_dtls_cmn_unregister_if(uint32_t if_num); + +/** + * nss_dtls_cmn_register_if + * Registers a DTLS session interface with the NSS for sending and receiving + * messages. + * + * @datatypes + * nss_dtls_cmn_data_callback_t \n + * nss_dtls_cmn_msg_callback_t + * + * @param[in] if_num NSS interface number. + * @param[in] data_cb Callback function for the message. + * @param[in] msg_cb Callback for DTLS tunnel message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * @param[in] type Type of message. + * @param[in] app_ctx Pointer to the application context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_dtls_cmn_register_if(uint32_t if_num, + nss_dtls_cmn_data_callback_t data_cb, + nss_dtls_cmn_msg_callback_t msg_cb, + struct net_device *netdev, + uint32_t features, + uint32_t type, + void *app_ctx); + +/** + * nss_dtls_cmn_notify_unregister + * Deregisters an event callback. + * + * @param[in] ifnum NSS interface number. + * + * @return + * None. + */ +extern void nss_dtls_cmn_notify_unregister(uint32_t ifnum); + +/** + * nss_dtls_cmn_notify_register + * Registers an event callback to handle notification from DTLS firmware package. + * + * @param[in] ifnum NSS interface number. + * @param[in] ev_cb Callback for DTLS tunnel message. + * @param[in] app_data Pointer to the application context. + * + * @return + * Pointer to NSS core context. + */ +extern struct nss_ctx_instance *nss_dtls_cmn_notify_register(uint32_t ifnum, nss_dtls_cmn_msg_callback_t ev_cb, + void *app_data); + +/** + * nss_dtls_cmn_msg_init + * Initializes a DTLS message. + * + * @datatypes + * nss_dtls_cmn_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context. + * + * @return + * None. + */ +extern void nss_dtls_cmn_msg_init(struct nss_dtls_cmn_msg *ncm, uint32_t if_num, uint32_t type, uint32_t len, void *cb, + void *app_data); + +/** + * nss_dtls_cmn_get_context + * Gets the NSS core context for the DTLS session. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_dtls_cmn_get_context(void); + +/** + * nss_dtls_cmn_get_ifnum + * Gets the DTLS interface number with a core ID. + * + * @param[in] if_num NSS interface number. + * + * @return + * Interface number with the core ID. + */ +extern int32_t nss_dtls_cmn_get_ifnum(int32_t if_num); + +/** + * nss_dtls_cmn_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_dtls_cmn_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_dtls_cmn_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_dtls_cmn_stats_register_notifier(struct notifier_block *nb); + +/** + * @} + */ + +#endif /* __KERNEL__ */ +#endif /* _NSS_DTLS_CMN_H_. */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_dynamic_interface.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_dynamic_interface.h new file mode 100644 index 000000000..7595af151 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_dynamic_interface.h @@ -0,0 +1,343 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * @file nss_dynamic_interface.h + * NSS Dynamic interface definitions. + */ + +#ifndef __NSS_DYNAMIC_INTERFACE_H +#define __NSS_DYNAMIC_INTERFACE_H + +/** + * @addtogroup nss_dynamic_interface_subsystem + * @{ + */ + +#define NSS_MAX_DYNAMIC_INTERFACES 128 /**< Maximum number of dynamic interfaces. */ + +/** + * nss_dynamic_interface_type + * Dynamic interface types. + * + * @note + * Every time a new dynamic interface type is added to an enumeration in the following list, + * a corresponding type name string should be added in the dynamic interface type string array. + */ +enum nss_dynamic_interface_type { + NSS_DYNAMIC_INTERFACE_TYPE_NONE, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR, + NSS_DYNAMIC_INTERFACE_TYPE_RESERVED_5, + NSS_DYNAMIC_INTERFACE_TYPE_TUNIPIP6_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_TUNIPIP6_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_RESERVED, + NSS_DYNAMIC_INTERFACE_TYPE_VAP, + NSS_DYNAMIC_INTERFACE_TYPE_RESERVED_0, + NSS_DYNAMIC_INTERFACE_TYPE_PPPOE, + NSS_DYNAMIC_INTERFACE_TYPE_VIRTIF_DEPRECATED, + NSS_DYNAMIC_INTERFACE_TYPE_L2TPV2, + NSS_DYNAMIC_INTERFACE_TYPE_RESERVED_4, + NSS_DYNAMIC_INTERFACE_TYPE_PORTID, + NSS_DYNAMIC_INTERFACE_TYPE_DTLS, + NSS_DYNAMIC_INTERFACE_TYPE_QVPN_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_QVPN_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_BRIDGE, + NSS_DYNAMIC_INTERFACE_TYPE_VLAN, + NSS_DYNAMIC_INTERFACE_TYPE_RESERVED_3, + NSS_DYNAMIC_INTERFACE_TYPE_WIFILI_INTERNAL, + NSS_DYNAMIC_INTERFACE_TYPE_MAP_T_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_MAP_T_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_HOST_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_OFFL_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_SJACK_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INLINE_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INLINE_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_N2H, + NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_H2N, + NSS_DYNAMIC_INTERFACE_TYPE_TUN6RD_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_TUN6RD_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INNER_EXCEPTION, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_US, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_DS, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_GRE_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_PPTP_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_PPTP_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_PPTP_HOST_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_MDATA_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_MDATA_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_REDIRECT, + NSS_DYNAMIC_INTERFACE_TYPE_PVXLAN_HOST_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_PVXLAN_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_IGS, + NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US, + NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS, + NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_MATCH, + NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_N2H, + NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_H2N, + NSS_DYNAMIC_INTERFACE_TYPE_WIFILI_EXTERNAL0, + NSS_DYNAMIC_INTERFACE_TYPE_WIFILI_EXTERNAL1, + NSS_DYNAMIC_INTERFACE_TYPE_TLS_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_TLS_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_MIRROR, + NSS_DYNAMIC_INTERFACE_TYPE_WIFI_EXT_VDEV_WDS, + NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_HOST_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_WIFI_EXT_VDEV_VLAN, + NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_INNER, + NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_OUTER, + NSS_DYNAMIC_INTERFACE_TYPE_MAX +}; + +typedef enum nss_dynamic_interface_type nss_dynamic_interface_assigned; + +/** + * nss_dynamic_interface_message_types + * Message types for dynamic interface requests. + */ +enum nss_dynamic_interface_message_types { + NSS_DYNAMIC_INTERFACE_ALLOC_NODE, + NSS_DYNAMIC_INTERFACE_DEALLOC_NODE, + NSS_DYNAMIC_INTERFACE_MAX, +}; + +/** + * nss_dynamic_interface_error_types + * Error types for dynamic interface requests. + */ +enum nss_dynamic_interface_error_types { + NSS_DYNAMIC_INTERFACE_ERR_EUNKNOWN = 1, + NSS_DYNAMIC_INTERFACE_ERR_EUNAVAIL, + NSS_DYNAMIC_INTERFACE_ERR_INVALID_TYPE, + NSS_DYNAMIC_INTERFACE_ERR_INVALID_INTERFACE_NUM, + NSS_DYNAMIC_INTERFACE_ERR_ALLOC_FUNC_UNAVAILABLE, + NSS_DYNAMIC_INTERFACE_ERR_DEALLOC_FUNC_UNAVAILABLE, + NSS_DYNAMIC_INTERFACE_ERR_EALLOC, + NSS_DYNAMIC_INTERFACE_ERR_IFNUM_TYPE_MISMATCH, + NSS_DYNAMIC_INTERFACE_ERR_MAX, +}; + +/** + * nss_dynamic_interface_stats_notification + * Dynamic interface statistics structure. + */ +struct nss_dynamic_interface_notification { + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Dynamic interface number. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ +/** + * nss_dynamic_interface_alloc_node_msg + * Message information for a dynamic interface allocation node. + */ +struct nss_dynamic_interface_alloc_node_msg { + enum nss_dynamic_interface_type type; /**< Type of dynamic interface. */ + + /* + * Response. + */ + int if_num; /**< Dynamic interface number. */ +}; + +/** + * nss_dynamic_interface_dealloc_node_msg + * Message information for dynamic interface deallocation node. + */ +struct nss_dynamic_interface_dealloc_node_msg { + enum nss_dynamic_interface_type type; + /**< Type of dynamic interface. */ + int if_num; /**< Dynamic interface number. */ +}; + +/** + * nss_dynamic_interface_msg + * Data for sending and receiving dynamic interface messages. + */ +struct nss_dynamic_interface_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a dynamic interface message. + */ + union { + struct nss_dynamic_interface_alloc_node_msg alloc_node; + /**< Allocates a dynamic node. */ + struct nss_dynamic_interface_dealloc_node_msg dealloc_node; + /**< Deallocates a dynamic node. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_dynamic_interface_alloc_node + * Allocates a node for a dynamic interface. + * + * @datatypes + * nss_dynamic_interface_type + * + * @param[in] type Type of dynamic interface. + * + * @return + * Number for the dynamic interface created. + * @par + * Otherwise, -1 for a failure. + */ +extern int nss_dynamic_interface_alloc_node(enum nss_dynamic_interface_type type); + +/** + * nss_dynamic_interface_dealloc_node + * Deallocates a node created for a dynamic interface on the NSS. + * + * @datatypes + * nss_dynamic_interface_type + * + * @param[in] if_num Dynamic interface number. + * @param[in] type Type of dynamic interface. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_dynamic_interface_dealloc_node(int if_num, enum nss_dynamic_interface_type type); + +/** + * nss_is_dynamic_interface + * Specifies whether the interface number belongs to the dynamic interface. + * + * @param[in] if_num Dynamic interface number. + * + * @return + * TRUE or FALSE + */ +extern bool nss_is_dynamic_interface(int if_num); + +/** + * nss_dynamic_interface_get_nss_ctx_by_type + * Returns NSS context corresponding to the dynamic interface type. + * + * @datatypes + * nss_dynamic_interface_type + * + * @param[in] type Type of dynamic interface. + * + * @return + * Pointer to the NSS context. + */ +extern struct nss_ctx_instance *nss_dynamic_interface_get_nss_ctx_by_type(enum nss_dynamic_interface_type type); + +/** + * nss_dynamic_interface_get_type + * Returns the type of dynamic interface. + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num Interface number of dynamic interface. + * + * @return + * Type of dynamic interface per the dynamic interface number. + */ +extern enum nss_dynamic_interface_type nss_dynamic_interface_get_type(struct nss_ctx_instance *nss_ctx, int if_num); + +/** + * nss_dynamic_interface_tx + * Transmits an asynchronous message to the firmware. + * + * @datatypes + * nss_ctx_instance \n + * nss_dynamic_interface_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the transmit operation. + */ +extern nss_tx_status_t nss_dynamic_interface_tx(struct nss_ctx_instance *nss_ctx, struct nss_dynamic_interface_msg *msg); + +/** + * Callback function for dynamic interface messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_dynamic_interface_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_dynamic_interface_msg_init + * Initializes a dynamic interface message. + * + * @datatypes + * nss_dynamic_interface_msg + * + * @param[in] ndm Pointer to the dynamic interface message. + * @param[in] if_num Dynamic interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context that is passed to the callback function. + * + * @return + * None. + */ +void nss_dynamic_interface_msg_init(struct nss_dynamic_interface_msg *ndm, uint16_t if_num, uint32_t type, uint32_t len, + void *cb, void *app_data); + +/** + * nss_dynamic_interface_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_dynamic_interface_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_dynamic_interface_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_dynamic_interface_stats_unregister_notifier(struct notifier_block *nb); +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_DYNAMIC_INTERFACE_H*/ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_edma.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_edma.h new file mode 100644 index 000000000..d5cecf79a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_edma.h @@ -0,0 +1,375 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_edma.h + * NSS EDMA interface definitions. + */ + +#ifndef __NSS_EDMA_H +#define __NSS_EDMA_H + +/** + * @addtogroup nss_edma_subsystem + * @{ + */ + +/* + * NSS EDMA port and ring defines + */ +#define NSS_EDMA_NUM_PORTS_MAX 256 + /**< Maximum number of EDMA ports. */ +#define NSS_EDMA_NUM_RX_RING_MAX 16 + /**< Maximum number of physical EDMA Rx rings. */ +#define NSS_EDMA_NUM_RXFILL_RING_MAX 8 + /**< Maximum number of physical EDMA Rx fill rings. */ +#define NSS_EDMA_NUM_TX_RING_MAX 24 + /**< Maximum number of physical EDMA Tx rings. */ +#define NSS_EDMA_NUM_TXCMPL_RING_MAX 8 + /**< Maximum number of physical EDMA Tx complete rings. */ +#define NSS_EDMA_STATS_MSG_MAX_PORTS 16 + /**< Maximum ports processed per statistics message. */ + +/** + * nss_edma_metadata_types + * Message types for EDMA requests and responses. + */ +enum nss_edma_metadata_types { + NSS_METADATA_TYPE_EDMA_PORT_STATS_SYNC, + NSS_METADATA_TYPE_EDMA_RING_STATS_SYNC, + NSS_METADATA_TYPE_EDMA_ERR_STATS_SYNC, + NSS_METADATA_TYPE_EDMA_MAX +}; + +/** + * nss_edma_port_t + * EDMA port types. + */ +enum nss_edma_port_t { + NSS_EDMA_PORT_PHYSICAL, + NSS_EDMA_PORT_VIRTUAL, + NSS_EDMA_PORT_TYPE_MAX +}; + +/** + * nss_edma_stats_tx_t + * Types of EDMA Tx ring statistics. + */ +enum nss_edma_stats_tx_t { + NSS_EDMA_STATS_TX_ERR, + NSS_EDMA_STATS_TX_DROPPED, + NSS_EDMA_STATS_TX_DESC, + NSS_EDMA_STATS_TX_MAX +}; + +/** + * nss_edma_stats_rx_t + * Types of EDMA Rx ring statistics. + */ +enum nss_edma_stats_rx_t { + NSS_EDMA_STATS_RX_CSUM_ERR, + NSS_EDMA_STATS_RX_DESC, + NSS_EDMA_STATS_RX_QOS_ERR, + NSS_EDMA_STATS_RX_SRC_PORT_INVALID, + NSS_EDMA_STATS_RX_SRC_IF_INVALID, + NSS_EDMA_STATS_RX_MAX +}; + +/** + * nss_edma_stats_txcmpl_t + * Types of EDMA Tx complete statistics. + */ +enum nss_edma_stats_txcmpl_t { + NSS_EDMA_STATS_TXCMPL_DESC, + NSS_EDMA_STATS_TXCMPL_MAX +}; + +/** + * nss_edma_stats_rxfill_t + * Types of EDMA Rx fill statistics. + */ +enum nss_edma_stats_rxfill_t { + NSS_EDMA_STATS_RXFILL_DESC, + NSS_EDMA_STATS_RXFILL_MAX +}; + +/** + * nss_edma_port_ring_map_t + * Port to EDMA ring map. + */ +enum nss_edma_port_ring_map_t { + NSS_EDMA_PORT_RX_RING, + NSS_EDMA_PORT_TX_RING, + NSS_EDMA_PORT_RING_MAP_MAX +}; + +/** + * nss_edma_err_t + * Types of EDMA error statistics. + */ +enum nss_edma_err_t { + NSS_EDMA_AXI_RD_ERR, + NSS_EDMA_AXI_WR_ERR, + NSS_EDMA_RX_DESC_FIFO_FULL_ERR, + NSS_EDMA_RX_BUF_SIZE_ERR, + NSS_EDMA_TX_SRAM_FULL_ERR, + NSS_EDMA_TX_CMPL_BUF_FULL_ERR, + NSS_EDMA_PKT_LEN_LA64K_ERR, + NSS_EDMA_PKT_LEN_LE33_ERR, + NSS_EDMA_DATA_LEN_ERR, + NSS_EDMA_ALLOC_FAIL_CNT, + NSS_EDMA_QOS_INVAL_DST_DROPS, + NSS_EDMA_ERR_STATS_MAX +}; + +/** + * nss_edma_rx_ring_stats + * EDMA Rx ring statistics. + */ +struct nss_edma_rx_ring_stats { + uint32_t rx_csum_err; /**< Number of Rx checksum errors. */ + uint32_t desc_cnt; /**< Number of descriptors processed. */ + uint32_t qos_err; /**< Number of QoS errors. */ + uint32_t rx_src_port_invalid; /**< Number of source port invalid errors. */ + uint32_t rx_src_if_invalid; /**< Number of source interface invalid errors. */ +}; + +/** + * nss_edma_tx_ring_stats + * EDMA Tx ring statistics. + */ +struct nss_edma_tx_ring_stats { + uint32_t tx_err; /**< Number of Tx errors. */ + uint32_t tx_dropped; /**< Number of Tx dropped packets. */ + uint32_t desc_cnt; /**< Number of descriptors processed. */ +}; + +/** + * nss_edma_rxfill_ring_stats + * EDMA Rx fill ring statistics. + */ +struct nss_edma_rxfill_ring_stats { + uint32_t desc_cnt; /**< Number of descriptors processed. */ +}; + +/** + * nss_edma_txcmpl_ring_stats + * EDMA Tx complete ring statistics. + */ +struct nss_edma_txcmpl_ring_stats { + uint32_t desc_cnt; /**< Number of descriptors processed. */ +}; + +/** + * nss_edma_port_stats + * Statistics for each EDMA port. + */ +struct nss_edma_port_stats { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + enum nss_edma_port_t port_type; /**< Type of port. */ + uint16_t edma_rx_ring; /**< Rx ring statistics. */ + uint16_t edma_tx_ring; /**< Tx ring statistics. */ +}; + +/** + * nss_edma_port_stats_sync + * Statistics for a group of EDMA ports. + */ +struct nss_edma_port_stats_sync { + uint16_t start_port; /**< Starting index of the subset. */ + uint16_t end_port; /**< Ending index of the subset. */ + struct nss_edma_port_stats port_stats[]; + /**< Subset of EDMA port statistics. */ +}; + +/** + * nss_edma_ring_stats_sync + * EDMA ring statistics. + */ +struct nss_edma_ring_stats_sync { + struct nss_edma_tx_ring_stats tx_ring[NSS_EDMA_NUM_TX_RING_MAX]; + /**< EDMA Tx ring statistics. */ + struct nss_edma_rx_ring_stats rx_ring[NSS_EDMA_NUM_RX_RING_MAX]; + /**< EDMA Rx ring statistics. */ + struct nss_edma_txcmpl_ring_stats txcmpl_ring[NSS_EDMA_NUM_TXCMPL_RING_MAX]; + /**< EDMA Tx complete ring statistics. */ + struct nss_edma_rxfill_ring_stats rxfill_ring[NSS_EDMA_NUM_RXFILL_RING_MAX]; + /**< EDMA Rx fill ring statistics. */ +}; + +/** + * nss_edma_misc_err_stats + * EDMA error statistics. + */ +struct nss_edma_misc_err_stats { + uint32_t axi_rd_err; /**< EDMA AXI read error. */ + uint32_t axi_wr_err; /**< EDMA AXI write error. */ + uint32_t rx_desc_fifo_full_err; /**< EDMA receive descriptor FIFO full error. */ + uint32_t rx_buf_size_err; /**< EDMA receive buffer size error. */ + uint32_t tx_sram_full_err; /**< EDMA transmit SRAM full error. */ + uint32_t tx_cmpl_buf_full_err; /**< EDMA transmit completion buffer full error. */ + uint32_t pkt_len_la64k_err; /**< EDMA packet length greater than 64k error. */ + uint32_t pkt_len_le33_err; /**< EDMA packet length smaller than 33b error. */ + uint32_t data_len_err; /**< EDMA data length error. */ + uint32_t alloc_fail_cnt; /**< EDMA number of times the allocation of pbuf for statistics failed. */ + uint32_t qos_inval_dst_drops; /**< EDMA number of QoS packet dropped due to invalid destination. */ +}; + +/** + * nss_edma_err_stats_sync + * Message for error statistics. + */ +struct nss_edma_err_stats_sync { + struct nss_edma_misc_err_stats msg_err_stats; /**< Message for error statistics. */ +}; + +/** + * nss_edma_msg + * Data for sending and receiving EDMA messages (to synchronize with + * the firmware EDMA). + */ +struct nss_edma_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of an EDMA message. + */ + union { + struct nss_edma_port_stats_sync port_stats; + /**< EDMA port statistics message payload. */ + struct nss_edma_ring_stats_sync ring_stats; + /**< EDMA ring statistics message payload. */ + struct nss_edma_err_stats_sync err_stats; + /**< EDMA error statistics message payload. */ + } msg; /**< EDMA message payload. */ +}; + +/** + * nss_edma_port_info + * NSS EDMA port statistics. + */ +struct nss_edma_port_info { + uint64_t port_stats[NSS_STATS_NODE_MAX]; /**< EDMA port statistics. */ + uint64_t port_type; /**< EDMA port type. */ + uint64_t port_ring_map[NSS_EDMA_PORT_RING_MAP_MAX]; /**< EDMA ring statistics. */ +}; + +/** + * nss_edma_stats + * NSS EDMA node statistics. + */ +struct nss_edma_stats { + struct nss_edma_port_info port[NSS_EDMA_NUM_PORTS_MAX]; + /**< EDMA port statistics. */ + uint64_t tx_stats[NSS_EDMA_NUM_TX_RING_MAX][NSS_EDMA_STATS_TX_MAX]; + /**< Physical EDMA Tx ring statistics. */ + uint64_t rx_stats[NSS_EDMA_NUM_RX_RING_MAX][NSS_EDMA_STATS_RX_MAX]; + /**< Physical EDMA Rx ring statistics. */ + uint64_t txcmpl_stats[NSS_EDMA_NUM_TXCMPL_RING_MAX][NSS_EDMA_STATS_TXCMPL_MAX]; + /**< Physical EDMA Tx complete statistics. */ + uint64_t rxfill_stats[NSS_EDMA_NUM_RXFILL_RING_MAX][NSS_EDMA_STATS_RXFILL_MAX]; + /**< Physical EDMA Rx fill statistics. */ + uint64_t misc_err[NSS_EDMA_ERR_STATS_MAX]; + /**< EDMA error complete statistics. */ +}; + +#ifdef __KERNEL__ + +/** + * Callback function for receiving EDMA messages. + * + * @datatypes + * nss_edma_msg + * + * @param[in] app_data Pointer to the application context for this message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_edma_msg_callback_t)(void *app_data, struct nss_edma_msg *msg); + +/** + * nss_edma_notify_register + * Registers a callback notifier with the NSS for sending and receiving messages. + * + * @datatypes + * nss_edma_msg_callback_t + * + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context for this message. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_edma_notify_register(nss_edma_msg_callback_t cb, void *app_data); + +/** + * nss_edma_notify_unregister + * Deregisters a callback notifier from the NSS. + * + * @return + * None. + * + * @dependencies + * The callback notifier must have been previously registered. + */ +extern void nss_edma_notify_unregister(void); + +/** + * nss_edma_get_stats + * Sends EDMA statistics to NSS clients. + * + * @param[in] stats EDMA statistics to be sent to Netlink. + * @param[in] port_id EDMA port ID. + * + * @return + * None. + */ +void nss_edma_get_stats(uint64_t *stats, int port_id); + +/** + * nss_edma_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_edma_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_edma_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_edma_stats_unregister_notifier(struct notifier_block *nb); + +#endif /* __KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_EDMA_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_eth_rx.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_eth_rx.h new file mode 100644 index 000000000..90f5a5381 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_eth_rx.h @@ -0,0 +1,100 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_eth_rx.h + * NSS Ethernet interface definitions. + */ + +#ifndef __NSS_ETH_RX_H +#define __NSS_ETH_RX_H + +/** + * @addtogroup nss_eth_rx_subsystem + * @{ + */ + +/** + * nss_eth_rx_stats + * Ethernet node statistics. + */ +enum nss_eth_rx_stats { + NSS_ETH_RX_STATS_TOTAL_TICKS, /**< Total clock ticks spent inside the Ethernet package. */ + NSS_ETH_RX_STATS_WORST_CASE_TICKS, /**< Worst case iteration of the Ethernet in ticks. */ + NSS_ETH_RX_STATS_ITERATIONS, /**< Number of iterations around Ethernet. */ + NSS_ETH_RX_STATS_MAX, /**< Maximum message type. */ +}; + +/** + * nss_eth_rx_exception_events + * Exception events from bridge or route handler. + */ +enum nss_eth_rx_exception_events { + NSS_ETH_RX_EXCEPTION_EVENT_UNKNOWN_L3_PROTOCOL, + NSS_ETH_RX_EXCEPTION_EVENT_ETH_HDR_MISSING, + NSS_ETH_RX_EXCEPTION_EVENT_VLAN_MISSING, + NSS_ETH_RX_EXCEPTION_EVENT_TRUSTSEC_HDR_MISSING, + NSS_ETH_RX_EXCEPTION_EVENT_MAX, +}; + +/** + * nss_eth_rx_stats_notification + * Data for sending Ethernet statistics. + */ +struct nss_eth_rx_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint64_t cmn_node_stats[NSS_STATS_NODE_MAX]; /**< Node statistics. */ + uint64_t special_stats[NSS_ETH_RX_STATS_MAX]; /**< Special statistics. */ + uint64_t exception_stats[NSS_ETH_RX_EXCEPTION_EVENT_MAX]; /**< Exception statistics. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ +/** + * nss_eth_rx_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_eth_rx_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_eth_rx_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_eth_rx_stats_unregister_notifier(struct notifier_block *nb); +#endif /*__KERNEL__ */ + +/** + *@} + */ + +#endif /* __NSS_ETH_RX_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_freq.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_freq.h new file mode 100644 index 000000000..6ce11b1b8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_freq.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * @file nss_freq.h + * NSS frequency definitions. + */ + +#ifndef __NSS_FREQ_H +#define __NSS_FREQ_H + +/** + * @addtogroup nss_freq_subsystem + * @{ + */ + +/** + * nss_freq_change + * Changes the frequency of the NSS cores. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] eng Frequency value in Hz. + * @param[in] stats_enable Enable NSS to send scaling statistics. + * @param[in] start_or_end Start or end of the frequency change. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_freq_change(struct nss_ctx_instance *nss_ctx, uint32_t eng, uint32_t stats_enable, uint32_t start_or_end); + +/** + * nss_freq_get_cpu_usage + * Returns the CPU usage value in percentage at any instance for a required core. Range of usage is 0-100. + * + * @param[in] core_id NSS Core ID. + * + * @return + * CPU usage value in percentage averaged over 1 second. -1 in case of error. + * @note + * This API does not support gathering CPU usage data for core 1. + */ +extern int8_t nss_freq_get_cpu_usage(uint32_t core_id); + +/** + * @} + */ + +#endif /* __NSS_FREQ_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre.h new file mode 100644 index 000000000..986166835 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre.h @@ -0,0 +1,494 @@ +/* + **************************************************************************** + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +/** + * @file nss_gre.h + * NSS GRE interface definitions. + */ +#ifndef _NSS_GRE_H_ +#define _NSS_GRE_H_ + +#include +#include + +/** + * @addtogroup nss_gre_subsystem + * @{ + */ + +/** + * Maximum number of session debug statistics + */ +#define NSS_GRE_MAX_DEBUG_SESSION_STATS 16 + +/** + * GRE flags + */ +#define NSS_GRE_CONFIG_IKEY_VALID 0x00000001 /**< Incoming key of GRE header. */ +#define NSS_GRE_CONFIG_OKEY_VALID 0x00000002 /**< Key for outgoing GRE header. */ +#define NSS_GRE_CONFIG_ISEQ_VALID 0x00000004 /**< Enable sequence checking for incoming GRE traffic. */ +#define NSS_GRE_CONFIG_OSEQ_VALID 0x00000008 /**< Add sequence number for out going GRE packets. */ +#define NSS_GRE_CONFIG_ICSUM_VALID 0x00000010 /**< Validate incoming GRE header checksum. */ +#define NSS_GRE_CONFIG_OCSUM_VALID 0x00000020 /**< Add checksum header to GRE header. */ +#define NSS_GRE_CONFIG_TOS_INHERIT 0x00000040 /**< Inherit inner IP TOS to tunnel header, if not set configure provided TOS. */ +#define NSS_GRE_CONFIG_TTL_INHERIT 0x00000080 /**< Inherit inner IP TTL to tunnel header, if not set configure provided TTL. */ +#define NSS_GRE_CONFIG_SET_DF 0x00000100 /**< Enable DF bit on tunnel IP header. */ +#define NSS_GRE_CONFIG_SET_MAC 0x00000200 /**< Add MAC header to GRE+IP tunnel header. */ +#define NSS_GRE_CONFIG_SET_PADDING 0x00000400 /**< Add PADDING to align tunnel IP/GRE header. */ +#define NSS_GRE_CONFIG_NEXT_NODE_AVAILABLE 0x00000800 /**< Use provided next node instead of existing next node. */ +#define NSS_GRE_CONFIG_COPY_METADATA 0x00001000 /**< Enable metadata copy in NSS during alignment. */ +#define NSS_GRE_CONFIG_USE_UNALIGNED 0x00002000 /**< Use unaligned infrastructure in NSS. */ +#define NSS_GRE_CONFIG_DSCP_VALID 0x00004000 /**< Add DSCP per packet. */ + +/** + * nss_gre_error_types. + * Error types for GRE configuration messages. + */ +enum nss_gre_error_types { + NSS_GRE_ERR_UNKNOWN_MSG = 1, /**< Unknown message. */ + NSS_GRE_ERR_IF_INVALID = 2, /**< Invalid interface. */ + NSS_GRE_ERR_MODE_INVALID = 3, /**< Invalid mode type. */ + NSS_GRE_ERR_IP_INVALID = 4, /**< Invalid IP type. */ + NSS_GRE_ERR_GRE_SESSION_PARAMS_INVALID = 5, /**< Invalid GRE session parameters provided. */ + NSS_GRE_ERR_DSCP_CFG_INVALID = 6, /**< Both TOS and DSCP flags are enabled. */ + NSS_GRE_ERR_MAX, /**< Maximum GRE error. */ +}; + +/** + * nss_gre_info + * GRE private information. + */ +struct nss_gre_info { + /** + * Union of IPv4/IPv6 tunnel. + */ + union { + struct ip_tunnel t4; /**< IPv4 tunnel. */ + struct ip6_tnl t6; /**< IPv6 tunnel. */ + } t; /**< IPv4 and IPv6 tunnel. */ + int nss_if_number_inner; /**< NSS interface number for GRE inner. */ + struct net_device *next_dev_inner; /**< Next network device for inner flow. */ + struct net_device *next_dev_outer; /**< Next network device for outer flow. */ + uint8_t gre_hlen; /**< GRE header length. */ + uint8_t pad_len; /**< Pad length. */ +}; + +/** + * nss_gre_msg_types + * Message types for GRE requests and responses. + */ +enum nss_gre_msg_types { + NSS_GRE_MSG_ENCAP_CONFIGURE = NSS_IF_MAX_MSG_TYPES + 1, + NSS_GRE_MSG_DECAP_CONFIGURE, + NSS_GRE_MSG_ENCAP_DECONFIGURE, + NSS_GRE_MSG_DECAP_DECONFIGURE, + NSS_GRE_MSG_SESSION_STATS, + NSS_GRE_MSG_BASE_STATS, + NSS_GRE_MSG_MAX +}; + +/** + * GRE Mode Types + */ +enum nss_gre_mode { + NSS_GRE_MODE_TUN, /**< GRE Tunnel interface. */ + NSS_GRE_MODE_TAP, /**< GRE Tap interface. */ + NSS_GRE_MODE_MAX /**< Maxmum GRE mode. */ +}; + +/** + * GRE IP Types + */ +enum nss_gre_ip_types { + NSS_GRE_IP_IPV4, /**< Outer Tunnel is IPV4. */ + NSS_GRE_IP_IPV6, /**< Outer Tunnel is IPV6. */ + NSS_GRE_IP_MAX, /**< Maximum IP Types. */ +}; + +/** + * nss_gre_base_types + * GRE base debug statistics. + */ +enum nss_gre_base_types { + NSS_GRE_BASE_RX_PACKETS, /**< Receive packet count. */ + NSS_GRE_BASE_RX_DROPPED, /**< Number of packet dropped at receive. */ + NSS_GRE_BASE_EXP_ETH_HDR_MISSING, /**< Ethernet header missing. */ + NSS_GRE_BASE_EXP_ETH_TYPE_NON_IP, /**< Packet is not IPV4 or IPV6. */ + NSS_GRE_BASE_EXP_IP_UNKNOWN_PROTOCOL, /**< Packet protocol is unknown. */ + NSS_GRE_BASE_EXP_IP_HEADER_INCOMPLETE, /**< Bad IP header. */ + NSS_GRE_BASE_EXP_IP_BAD_TOTAL_LENGTH, /**< IP total length is invalid. */ + NSS_GRE_BASE_EXP_IP_BAD_CHECKSUM, /**< IP checksum is bad. */ + NSS_GRE_BASE_EXP_IP_DATAGRAM_INCOMPLETE,/**< Bad packet. */ + NSS_GRE_BASE_EXP_IP_FRAGMENT, /**< IP packet is a fragment. */ + NSS_GRE_BASE_EXP_IP_OPTIONS_INCOMPLETE, /**< IP option is invalid. */ + NSS_GRE_BASE_EXP_IP_WITH_OPTIONS, /**< IP packet with options. */ + NSS_GRE_BASE_EXP_IPV6_UNKNOWN_PROTOCOL, /**< Protocol is unknown. */ + NSS_GRE_BASE_EXP_IPV6_HEADER_INCOMPLETE,/**< Incomplete ipv6 header. */ + NSS_GRE_BASE_EXP_GRE_UNKNOWN_SESSION, /**< Unknown GRE session. */ + NSS_GRE_BASE_EXP_GRE_NODE_INACTIVE, /**< GRE node is inactive. */ + NSS_GRE_BASE_DEBUG_MAX, /**< GRE base debug maximum. */ +}; + +/** + * nss_gre_session_types + * GRE session packet drop and exception events. + */ +enum nss_gre_session_types { + NSS_GRE_SESSION_PBUF_ALLOC_FAIL, /**< Pbuf allocation failure. */ + NSS_GRE_SESSION_DECAP_FORWARD_ENQUEUE_FAIL, /**< Receive forward enqueue failure. */ + NSS_GRE_SESSION_ENCAP_FORWARD_ENQUEUE_FAIL, /**< Transmit forward enqueue failure. */ + NSS_GRE_SESSION_DECAP_TX_FORWARDED, /**< Number of packets forwarded after decapsulation. */ + NSS_GRE_SESSION_ENCAP_RX_RECEIVED, /**< Number of packets received for encapsulation. */ + NSS_GRE_SESSION_ENCAP_RX_DROPPED, /**< Packets dropped while enqueuing for encapsulation. */ + NSS_GRE_SESSION_ENCAP_RX_LINEAR_FAIL, /**< Packets dropped during encapsulation linearization. */ + NSS_GRE_SESSION_EXP_RX_KEY_ERROR, /**< Receive key error. */ + NSS_GRE_SESSION_EXP_RX_SEQ_ERROR, /**< Receive Sequence number error. */ + NSS_GRE_SESSION_EXP_RX_CS_ERROR, /**< Receive checksum error */ + NSS_GRE_SESSION_EXP_RX_FLAG_MISMATCH, /**< Receive flag mismatch. */ + NSS_GRE_SESSION_EXP_RX_MALFORMED, /**< Receive packet is malformed. */ + NSS_GRE_SESSION_EXP_RX_INVALID_PROTOCOL, /**< Receive packet protocol is invalid. */ + NSS_GRE_SESSION_EXP_RX_NO_HEADROOM, /**< Packet does not have enough headroom. */ + NSS_GRE_SESSION_DEBUG_MAX, /**< Session debug maximum. */ +}; + +/** + * GRE create message structure. + */ +struct nss_gre_config_msg { + uint32_t src_ip[4]; /**< Source IPv4 or IPv6 Adddress. */ + uint32_t dest_ip[4]; /**< Destination IPv4 or IPv6 Adddress. */ + uint32_t flags; /**< GRE Flags. */ + uint32_t ikey; /**< GRE rx KEY.*/ + uint32_t okey; /**< GRE tx KEY. */ + uint32_t mode; /**< GRE TUN or TAP. */ + uint32_t ip_type; /**< IPv4 or IPv6 type. */ + uint32_t next_node_if_num; /**< To whom to forward packets. */ + uint32_t sibling_if_num; /**< Sibling interface number. */ + uint16_t src_mac[3]; /**< Source MAC address. */ + uint16_t dest_mac[3]; /**< Destination MAC address. */ + uint8_t ttl; /**< TTL or HOPLIMIT. */ + uint8_t tos; /**< Type of service. */ + uint16_t metadata_size; /**< Metadata copy size. */ +}; + +/** + * GRE link up message structure + */ +struct nss_gre_linkup_msg { + int if_number; /**< Interface number. */ +}; + +/** + * GRE link down message structure + */ +struct nss_gre_linkdown_msg { + int if_number; /**< Interface number. */ +}; + +/** + * GRE deconfig message structure + */ +struct nss_gre_deconfig_msg { + int if_number; /**< Interface number */ +}; + +/** + * GRE session statistics message + */ +struct nss_gre_session_stats_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t stats[NSS_GRE_SESSION_DEBUG_MAX]; /**< Session debug statistics. */ +}; + +/** + * GRE base statistics message + */ +struct nss_gre_base_stats_msg { + uint32_t stats[NSS_GRE_BASE_DEBUG_MAX]; /**< Base debug statistics. */ +}; + +/** + * nss_gre_base_stats_notification + * GRE transmission statistics structure. + */ +struct nss_gre_base_stats_notification { + uint64_t stats_base_ctx[NSS_GRE_BASE_DEBUG_MAX]; /**< Base debug transmission statistics. */ + uint32_t core_id; /**< Core ID. */ +}; + +/** + * nss_gre_session_stats_notification + * GRE transmission statistics structure. + */ +struct nss_gre_session_stats_notification { + uint64_t stats_session_ctx[NSS_GRE_SESSION_DEBUG_MAX]; /**< Session debug transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_gre_msg + * Message structure to send/receive GRE messages + */ +struct nss_gre_msg { + struct nss_cmn_msg cm; /**< Common message header */ + + /** + * Payload of a GRE message. + */ + union { + struct nss_gre_config_msg cmsg; /**< GRE session config message. */ + struct nss_gre_deconfig_msg dmsg; /**< GRE session deconfig message. */ + struct nss_gre_linkup_msg linkup; /**< GRE link up message. */ + struct nss_gre_linkdown_msg linkdown; /**< GRE link down message. */ + struct nss_gre_session_stats_msg sstats; /**< GRE session statistics message. */ + struct nss_gre_base_stats_msg bstats; /**< Base statistics message. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function to receive GRE messages + * + * @datatypes + * nss_gre_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_gre_msg_callback_t)(void *app_data, struct nss_gre_msg *msg); + +/** + * nss_gre_tx_msg + * Sends GRE messages to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_msg *msg); + +/** + * nss_gre_tx_msg_sync + * Sends GRE messages to the NSS synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_msg *msg); + +/** + * nss_gre_tx_buf + * Sends packet to the NSS + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num Nss interface number. + * @param[in] skb Pointer to sk_buff. + * + * @return Tx status + */ +extern nss_tx_status_t nss_gre_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb); + +/** + * nss_gre_get_context. + * Gets the GRE context used in nss_gre_tx. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_gre_get_context(void); + +/** + * + * nss_gre_ifnum_with_core_id + * Append core ID on GRE interface. + * + * @param[in] if_num NSS interface number. + * + * @return + * GRE interface number with core ID. + */ +extern int nss_gre_ifnum_with_core_id(int if_num); + +/** + * Callback function for receiving GRE session data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_gre_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_gre_register_if + * Registers the GRE interface with the NSS for sending and + * receiving messages. + * + * @datatypes + * nss_gre_data_callback_t \n + * nss_gre_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] type NSS interface type. + * @param[in] gre_callback Callback for the data. + * @param[in] msg_callback Callback for the message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Socket buffer types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_gre_register_if(uint32_t if_num, uint32_t type, nss_gre_data_callback_t gre_callback, + nss_gre_msg_callback_t msg_callback, struct net_device *netdev, uint32_t features); + +/** + * nss_gre_unregister_if + * Deregisters the GRE interface from the NSS. + * + * @param[in] if_num NSS interface number. +. * + * @return + * None. + * + * @dependencies + * The tunnel interface must have been previously registered. + */ +extern void nss_gre_unregister_if(uint32_t if_num); + +/** + * nss_gre_msg_init + * Initializes a GRE message. + * + * @datatypes + * nss_gre_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num Interface number + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_gre_msg_init(struct nss_gre_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_gre_register_handler + * Registers the GRE interface with the NSS debug statistics handler. + * + * @return + * None. + */ +extern void nss_gre_register_handler(void); + +/** + * Callback function for updating stats. + * + * @datatypes + * net_device \n + * sk_buff \n + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * + * @return + * None. + */ +typedef void (*nss_gre_pkt_callback_t)(struct net_device *netdev, struct sk_buff *skb); + +/** + * nss_gre_register_pkt_callback + * Register for rx packet call back. + * + * @datatypes + * nss_gre_pkt_callback_t + * + * @param[in] cb Call back function which needs to be registered. + * + * @return + * None. + */ +extern void nss_gre_register_pkt_callback(nss_gre_pkt_callback_t cb); + +/** + * nss_gre_unregister_pkt_callback + * Unregister for rx packet call back. + * + * @datatypes + * nss_gre_pkt_callback_t + * + * @return + * None. + */ +extern void nss_gre_unregister_pkt_callback(void); + +/** + * nss_gre_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_gre_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_stats_register_notifier(struct notifier_block *nb); + +/** + * @} + */ + +#endif /* _NSS_GRE_H_ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir.h new file mode 100644 index 000000000..5d3150bf3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir.h @@ -0,0 +1,712 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2015, 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_gre_redir.h + * NSS GRE Redirect interface definitions. + */ + +#ifndef __NSS_GRE_REDIR_H +#define __NSS_GRE_REDIR_H + +/** + * @addtogroup nss_gre_redirect_subsystem + * @{ + */ + +#define NSS_GRE_REDIR_MAX_INTERFACES 24 /**< Maximum number of redirect interfaces. */ +#define NSS_GRE_REDIR_IP_DF_OVERRIDE_FLAG 0x80 /**< Override Do not Fragment bit in IPv4 flags. */ +#define NSS_GRE_REDIR_PER_PACKET_METADATA_OFFSET 4 /**< Offset of per packet metadata from start of packet. */ +#define NSS_GRE_REDIR_MAX_RADIO 5 /**< Maximum number of radios. */ +#define NSS_GRE_REDIR_HEADER_VERSION 0 /**< Version for GRE header. */ + +/** + * nss_gre_redir_ip_hdr_type + * IP header types. + */ +enum nss_gre_redir_ip_hdr_type { + NSS_GRE_REDIR_IP_HDR_TYPE_IPV4 = 1, + NSS_GRE_REDIR_IP_HDR_TYPE_IPV6 = 2, +}; + +/** + * nss_gre_redir_message_types + * Message types for GRE redirect requests and responses. + */ +enum nss_gre_redir_message_types { + NSS_GRE_REDIR_TX_TUNNEL_INNER_CONFIGURE_MSG, /**< Configure message for inner node. */ + NSS_GRE_REDIR_TX_TUNNEL_OUTER_CONFIGURE_MSG, /**< Configure message for outer node. */ + NSS_GRE_REDIR_TX_INTERFACE_MAP_MSG, /**< Interface map message. */ + NSS_GRE_REDIR_TX_INTERFACE_UNMAP_MSG, /**< Interface unmap message. */ + NSS_GRE_REDIR_TX_SJACK_MAP_MSG, /**< SJACK map message. */ + NSS_GRE_REDIR_TX_SJACK_UNMAP_MSG, /**< SJACK unmap message. */ + NSS_GRE_REDIR_RX_STATS_SYNC_MSG, /**< Statistics synchronization message. */ + NSS_GRE_REDIR_EXCEPTION_DS_REG_CB_MSG, /**< Configure message to register callback. */ + NSS_GRE_REDIR_MAX_MSG_TYPES, /**< Maximum message type. */ +}; + +/** + * nss_gre_redir_error_types + * Error types for GRE redirect configuration messages. + */ +enum nss_gre_redir_error_types { + NSS_GRE_REDIR_ERROR_NONE, /**< Configuration successful. */ + NSS_GRE_REDIR_ERROR_UNKNOWN_MSG_TYPE, /**< Unknown configuration message type error. */ + NSS_GRE_REDIR_ERROR_INVALID_IP_HDR_TYPE, /**< Invalid IP header type error. */ + NSS_GRE_REDIR_ERROR_MAP_TABLE_FULL, /**< Map table full error. */ + NSS_GRE_REDIR_ERROR_MAP_INVALID_PARAM, /**< Invalid parameter with map message error. */ + NSS_GRE_REDIR_ERROR_UNMAP_INVALID_PARAM, /**< Invalid parameter with unmap message error. */ + NSS_GRE_REDIR_ERROR_ENCAP_MAP_EXIST, /**< Encapsulation map entry already exist. */ + NSS_GRE_REDIR_ERROR_DECAP_MAP_EXIST, /**< Decapsulation map entry already exist. */ + NSS_GRE_REDIR_ERROR_ENCAP_MAP_ALLOC_FAIL, /**< Encapsulation map entry allocation failure error. */ + NSS_GRE_REDIR_ERROR_DECAP_MAP_ALLOC_FAIL, /**< Decapsulation map entry allocation failure error. */ + NSS_GRE_REDIR_ERROR_ENCAP_ENTRY_UNMAPPED, /**< Encapsulation map entry already unmapped. */ + NSS_GRE_REDIR_ERROR_DECAP_ENTRY_UNMAPPED, /**< Decapsulation map entry already unmapped. */ + NSS_GRE_REDIR_ERROR_INVALID_ETH_IF, /**< Invalid Ethernet NSS interface. */ + NSS_GRE_REDIR_ERROR_INVALID_VAP_NEXTHOP_IF, /**< Invalid nexthop NSS interface. */ + NSS_GRE_REDIR_ERROR_INVALID_PEER_INTERFACE, /**< Invalid peer interface during tunnel configuration. */ +}; + +/** + * nss_gre_redir_tunnel_types + * GRE tunnel types. + */ +enum nss_gre_redir_tunnel_types { + NSS_GRE_REDIR_TUNNEL_TYPE_UNKNOWN, /**< Reserved. */ + NSS_GRE_REDIR_TUNNEL_TYPE_TUN, /**< Tunnel mode. */ + NSS_GRE_REDIR_TUNNEL_TYPE_DTUN, /**< D-tunnel mode. */ + NSS_GRE_REDIR_TUNNEL_TYPE_SPLIT, /**< Split mode. */ + NSS_GRE_REDIR_TUNNEL_TYPE_MAX, /**< Maximum tunnel type. */ +}; + +/** + * nss_gre_redir_stats_types + * GRE redirect statistics types. + */ +enum nss_gre_redir_stats_types { + NSS_GRE_REDIR_STATS_TX_DROPS = NSS_STATS_NODE_MAX, + /**< Dropped transmit packets. */ + NSS_GRE_REDIR_STATS_SJACK_RX_PKTS, /**< SJACK receive packet counter. */ + NSS_GRE_REDIR_STATS_SJACK_TX_PKTS, /**< SJACK transmit packet counter. */ + NSS_GRE_REDIR_STATS_OFFLOAD_RX_PKTS_0, /**< Offload receive packet counter 0. */ + NSS_GRE_REDIR_STATS_OFFLOAD_RX_PKTS_1, /**< Offload receive packet counter 1. */ + NSS_GRE_REDIR_STATS_OFFLOAD_RX_PKTS_2, /**< Offload receive packet counter 2. */ + NSS_GRE_REDIR_STATS_OFFLOAD_RX_PKTS_3, /**< Offload receive packet counter 3. */ + NSS_GRE_REDIR_STATS_OFFLOAD_RX_PKTS_4, /**< Offload receive packet counter 4. */ + NSS_GRE_REDIR_STATS_OFFLOAD_TX_PKTS_0, /**< Offload transmit packet counter 0. */ + NSS_GRE_REDIR_STATS_OFFLOAD_TX_PKTS_1, /**< Offload transmit packet counter 1. */ + NSS_GRE_REDIR_STATS_OFFLOAD_TX_PKTS_2, /**< Offload transmit packet counter 2. */ + NSS_GRE_REDIR_STATS_OFFLOAD_TX_PKTS_3, /**< Offload transmit packet counter 3. */ + NSS_GRE_REDIR_STATS_OFFLOAD_TX_PKTS_4, /**< Offload transmit packet counter 4. */ + NSS_GRE_REDIR_STATS_EXCEPTION_US_RX_PKTS, + /**< Upstream exception receive packet counter. */ + NSS_GRE_REDIR_STATS_EXCEPTION_US_TX_PKTS, + /**< Upstream exception transmit packet counter. */ + NSS_GRE_REDIR_STATS_EXCEPTION_DS_RX_PKTS, + /**< Downstream exception receive packet counter. */ + NSS_GRE_REDIR_STATS_EXCEPTION_DS_TX_PKTS, + /**< Downstream exception transmit packet counter. */ + NSS_GRE_REDIR_STATS_ENCAP_SG_ALLOC_DROP, + /**< Encapsulation drop counters due to scatter gather buffer allocation failure. */ + NSS_GRE_REDIR_STATS_DECAP_FAIL_DROP, + /**< Decapsulation drop counters due to invalid IP header. */ + NSS_GRE_REDIR_STATS_DECAP_SPLIT_DROP, + /**< Decapsulation drop counters due to split flow processing. */ + NSS_GRE_REDIR_STATS_SPLIT_SG_ALLOC_FAIL, + /**< Split processing fail counter due to scatter gather buffer allocation failure. */ + NSS_GRE_REDIR_STATS_SPLIT_LINEAR_COPY_FAIL, + /**< Split processing fail counter due to linear copy fail. */ + NSS_GRE_REDIR_STATS_SPLIT_NOT_ENOUGH_TAILROOM, + /**< Split processing fail counter due to insufficient tailroom. */ + NSS_GRE_REDIR_STATS_EXCEPTION_DS_INVALID_DST_DROP, + /**< Downstream exception handling fail counter due to invalid destination. */ + NSS_GRE_REDIR_STATS_DECAP_EAPOL_FRAMES, + /**< Decapsulation EAPoL frame counters. */ + NSS_GRE_REDIR_STATS_EXCEPTION_DS_INV_APPID, + /**< Invalid application ID for the transmit completion packets on exception downstream node. */ + NSS_GRE_REDIR_STATS_HEADROOM_UNAVAILABLE, + /**< Packet headroom unavailable to write metadata. */ + NSS_GRE_REDIR_STATS_TX_COMPLETION_SUCCESS, + /**< Host enqueue success count for the transmit completion packets. */ + NSS_GRE_REDIR_STATS_TX_COMPLETION_DROP, + /**< Host enqueue drop count for the transmit completion packets. */ + NSS_GRE_REDIR_STATS_MAX /**< Maximum statistics type. */ +}; + +/** + * nss_gre_redir_inner_configure_msg + * Message information for configuring GRE inner node. + */ +struct nss_gre_redir_inner_configure_msg { + uint32_t ip_hdr_type; /**< IP header type (IPv4 or IPv6). */ + + /** + * IPv4 or IPv6 source address (lower 4 bytes are applicable for IPv4). + */ + uint32_t ip_src_addr[4]; + + /** + * IPv4 or IPv6 destination address (lower 4 bytes are applicable for IPv4). + */ + uint32_t ip_dest_addr[4]; + + /** + * The host outer-interface which handles post-encapsulation exception packets + * originating from this inner interface. + */ + uint32_t except_outerif; + + uint8_t ip_df_policy; /**< Default Do Not Fragment policy for the IP header. */ + uint8_t ip_ttl; /**< Time-to-live value for the IP header. */ + uint8_t gre_version; /**< Header version. */ +}; + +/** + * nss_gre_redir_outer_configure_msg + * Message information for configuring GRE outer node. + */ +struct nss_gre_redir_outer_configure_msg { + uint32_t ip_hdr_type; /**< IP header type (IPv4 or IPv6). */ + + /** + * The host inner-interface which handles post-decapsulation exception packets + * originating from this outer interface, for flows destined to a VAP handled + * by host. + */ + uint32_t except_hostif; + + /** + * The host inner-interface which handles post-decapsulation exception packets + * originating from this outer interface, for flows destined to a VAP handled + * by NSS. + */ + uint32_t except_offlif; + + /** + * The host inner-interface which handles post-decapsulation exception packets + * originating from this outer interface, for flows destined to SJACK. + */ + uint32_t except_sjackif; + + /** + * CPU core to which these packets should be steered. + * - 0 -- Use core 0 + * - 1 -- Use core 1 + * - 2 -- Use core 2 + * - 3 -- Use core 3 + */ + uint8_t rps_hint; + + /** + * Flag to indicate validity of RPS hint. + */ + uint8_t rps_hint_valid; + +}; + +/** + * nss_gre_redir_exception_ds_reg_cb_msg + * Message information to register callback on VAP for GRE exception downstream. + */ +struct nss_gre_redir_exception_ds_reg_cb_msg { + uint32_t dst_vap_nssif; /**< NSS VAP interface on which the callback is registered. */ +}; + +/** + * nss_gre_redir_interface_map_msg + * Message information for adding a VAP interface-to-tunnel ID mapping. + */ +struct nss_gre_redir_interface_map_msg { + uint32_t vap_nssif; /**< NSS VAP interface. */ + uint32_t nexthop_nssif; /**< Next hop NSS interface number. */ + uint16_t radio_id; /**< Radio ID to derive tunnel ID. */ + uint16_t vap_id; /**< VAP ID to derive tunnel ID. */ + uint16_t lag_en; /**< Flag for LAG mode. */ + uint16_t tunnel_type; /**< Type of tunnel. */ + + /** + * IPsec security association pattern. Pattern + * 0x5A is supported only. + */ + uint8_t ipsec_pattern; +}; + +/** + * nss_gre_redir_interface_unmap_msg + * Message information for deleting a VAP interface-to-tunnel ID mapping. + */ +struct nss_gre_redir_interface_unmap_msg { + uint32_t vap_nssif; /**< NSS VAP interface. */ + uint16_t radio_id; /**< Radio ID to derive tunnel ID. */ + uint16_t vap_id; /**< VAP ID to derive tunnel ID. */ +}; + +/** + * nss_gre_redir_sjack_map_msg + * Message information for adding an Ethernet interface-to-tunnel ID mapping. + */ +struct nss_gre_redir_sjack_map_msg { + uint32_t eth_nssif; /**< NSS Ethernet interface number. */ + uint32_t eth_id; /**< Ethernet interface ID. */ + + /** + * IPsec security association pattern. Pattern + * 0x5A is supported only. + */ + uint8_t ipsec_pattern; +}; + +/** + * nss_gre_redir_sjack_unmap_msg + * Message information for deleting an Ethernet interface-to-tunnel ID mapping. + */ +struct nss_gre_redir_sjack_unmap_msg { + uint32_t eth_nssif; /**< NSS Ethernet interface number. */ + uint32_t eth_id; /**< Ethernet interface ID. */ +}; + +/** + * nss_gre_redir_stats_sync_msg + * Message information for synchronized GRE redirect statistics. + */ +struct nss_gre_redir_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t sjack_rx_packets; /**< SJACK packet counter. */ + uint32_t offl_rx_pkts[NSS_GRE_REDIR_MAX_RADIO]; /**< Offload packet counter. */ + uint32_t encap_sg_alloc_drop; /**< Encapsulation drop counters due to scatter gather buffer allocation failure. */ + uint32_t decap_fail_drop; /**< Decapsulation drop counters due to invalid IP header. */ + uint32_t decap_split_drop; /**< Decapsulation drop counters due to split flow processing. */ + uint32_t split_sg_alloc_fail; /**< Split processing fail counter due to scatter gather buffer allocation failure. */ + uint32_t split_linear_copy_fail; /**< Split processing fail counter due to linear copy fail. */ + uint32_t split_not_enough_tailroom; /**< Split processing fail counter due to insufficient tailroom. */ + uint32_t exception_ds_invalid_dst_drop; /**< Downstream exception handling fail counter due to invalid destination. */ + uint32_t decap_eapol_frames; /**< Decapsulation EAPoL frame counters. */ + uint32_t exception_ds_inv_appid; /**< Invalid application ID for the Tx completion packets on exception downstream node. */ + uint32_t headroom_unavail; /**< Packet headroom unavailable to write metadata. */ + uint32_t tx_completion_success; /**< Host enqueue success count for the Tx completion packets. */ + uint32_t tx_completion_drop; /**< Host enqueue drop count for the Tx completion packets. */ +}; + +/** + * nss_gre_redir_tun_stats + * GRE redirect statistics to accumulate all the stats values. + */ +struct nss_gre_redir_tun_stats { + uint64_t rx_packets; /**< Number of packets received. */ + uint64_t rx_bytes; /**< Number of bytes received. */ + uint64_t tx_packets; /**< Number of packets transmitted. */ + uint64_t tx_bytes; /**< Number of bytes transmitted. */ + uint64_t rx_dropped[NSS_MAX_NUM_PRI]; + /**< Packets dropped on receive due to queue full. */ + uint64_t tx_dropped; /**< Dropped transmit packets. */ + uint64_t sjack_rx_packets; /**< SJACK receive packet counter. */ + uint64_t sjack_tx_packets; /**< SJACK transmit packet counter. */ + uint64_t offl_rx_pkts[NSS_GRE_REDIR_MAX_RADIO]; /**< Offload receive packet counter per radio. */ + uint64_t offl_tx_pkts[NSS_GRE_REDIR_MAX_RADIO]; /**< Offload transmit packet counter per radio. */ + uint64_t exception_us_rx; /**< Upstream exception receive packet counter. */ + uint64_t exception_us_tx; /**< Upstream exception transmit packet counter. */ + uint64_t exception_ds_rx; /**< Downstream exception receive packet counter. */ + uint64_t exception_ds_tx; /**< Downstream exception transmit packet counter. */ + uint64_t encap_sg_alloc_drop; + /**< Encapsulation drop counters due to scatter gather buffer allocation failure. */ + uint64_t decap_fail_drop; /**< Decapsulation drop counters due to invalid IP header. */ + uint64_t decap_split_drop; /**< Decapsulation drop counters due to split flow processing. */ + uint64_t split_sg_alloc_fail; + /**< Split processing fail counter due to scatter gather buffer allocation failure. */ + uint64_t split_linear_copy_fail; + /**< Split processing fail counter due to linear copy fail. */ + uint64_t split_not_enough_tailroom; + /**< Split processing fail counter due to insufficient tailroom. */ + uint64_t exception_ds_invalid_dst_drop; + /**< Downstream exception handling fail counter due to invalid destination. */ + uint64_t decap_eapol_frames; /**< Decapsulation EAPoL frame counters. */ + uint64_t exception_ds_inv_appid; + /**< Invalid application ID for the transmit completion packets on exception downstream node. */ + uint64_t headroom_unavail; /**< Packet headroom unavailable to write metadata. */ + uint64_t tx_completion_success; /**< Host enqueue success count for the transmit completion packets. */ + uint64_t tx_completion_drop; /**< Host enqueue drop count for the transmit completion packets. */ +}; + +/** + * nss_gre_redir_tunnel_stats + * GRE redirect statistics as seen by the HLOS. + */ +struct nss_gre_redir_tunnel_stats { + struct net_device *dev; /**< Net device. */ + struct nss_gre_redir_tun_stats tstats; /**< Structure to accumulate all the statistics. */ + uint32_t ref_count; /**< Reference count for statistics. */ +}; + +/** + * nss_gre_redir_stats_notification + * GRE redirect transmission statistics structure. + */ +struct nss_gre_redir_stats_notification { + struct nss_gre_redir_tunnel_stats stats_ctx; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_gre_redir_msg + * Data for sending and receiving GRE tunnel redirect messages. + */ +struct nss_gre_redir_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a GRE tunnel redirect message. + */ + union { + struct nss_gre_redir_inner_configure_msg inner_configure; + /**< Configure a GRE inner node. */ + struct nss_gre_redir_outer_configure_msg outer_configure; + /**< Configure a GRE outer node. */ + struct nss_gre_redir_interface_map_msg interface_map; + /**< Add a VAP interface-to-tunnel ID mapping. */ + struct nss_gre_redir_interface_unmap_msg interface_unmap; + /**< Delete a VAP interafce-to-tunnel ID mapping. */ + struct nss_gre_redir_sjack_map_msg sjack_map; + /**< Add an Ethernet interface-to-tunnel ID mapping for SJACK. */ + struct nss_gre_redir_sjack_unmap_msg sjack_unmap; + /**< Delete an Ethernet interface-to-tunnel ID mapping for SJACK. */ + struct nss_gre_redir_stats_sync_msg stats_sync; + /**< Synchronized tunnel statistics. */ + struct nss_gre_redir_exception_ds_reg_cb_msg exception_ds_configure; + /**< Registering callback on VAP for the GRE downstream flows. */ + } msg; /**< Message payload for GRE redirect messages exchanged with NSS core. */ + +}; + +/** + * nss_gre_redir_encap_per_pkt_metadata + * Metadata information for an HLOS-to-NSS packet. + */ +struct nss_gre_redir_encap_per_pkt_metadata { + uint16_t gre_tunnel_id; /**< ID of the tunnel. */ + uint8_t gre_flags; /**< Flags field from GRE header. */ + uint8_t gre_prio; /**< Priority field from GRE header. */ + uint8_t gre_seq; /**< Sequence number. */ + uint8_t ip_dscp; /**< DSCP values. */ + + /** + * Override the default DF policy for the packet by setting bit 8. + * The lower 7 bits provide the DF value to be used for this packet. + */ + uint8_t ip_df_override; + + /** + * IPsec security association pattern. Pattern + * 0x5A is supported only. + */ + uint8_t ipsec_pattern; +}; + +/** + * nss_gre_redir_decap_per_pkt_metadata + * Metadata information for an NSS-to-HLOS packet. + */ +struct nss_gre_redir_decap_per_pkt_metadata { + uint32_t src_if_num; /**< Number of the source Ethernet interface. */ + uint16_t gre_tunnel_id; /**< ID of the tunnel. */ + uint8_t gre_flags; /**< Flags from GRE header. */ + uint8_t gre_prio; /**< Priority from GRE header. */ + uint8_t gre_seq; /**< Sequence number. */ +}; + +/** + * nss_gre_redir_exception_us_metadata + * Metadata information for upstream exception packet. + * + * Note: Additional fields need to be added by customer as required. + */ +struct nss_gre_redir_exception_us_metadata { + uint8_t tid; /**< TID value. */ +}; + +/** + * nss_gre_redir_exception_ds_metadata + * Metadata information for downstream exception packet. + * + * Note: Additional fields need to be added by customer as required. + */ +struct nss_gre_redir_exception_ds_metadata { + uint32_t dst_vap_nssif; /**< Destination VAP interface number. */ + uint8_t tid; /**< TID value. */ + uint8_t app_id; /**< Application ID. */ + uint16_t hw_hash_idx; /**< Hardware AST hash index value. */ + uint32_t tx_status; /**< Tx status. */ +}; + +/** + * Callback function for receiving GRE tunnel data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_gre_redir_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving GRE tunnel messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_gre_redir_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_gre_redir_unregister_if + * Deregisters a GRE tunnel interface from the NSS. + * + * @param[in] if_num NSS interface number. +. * + * @return + * None. + * + * @dependencies + * The tunnel interface must have been previously registered. + * + * @return + * True if successful, else false. + */ +extern bool nss_gre_redir_unregister_if(uint32_t if_num); + +/** + * nss_gre_redir_tx_msg + * Sends GRE redirect tunnel messages. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_redir_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_msg *msg); + +/** + * nss_gre_redir_tx_buf + * Sends GRE redirect tunnel packets. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] os_buf Pointer to the OS buffer (e.g., skbuff). + * @param[in] if_num Tunnel interface number. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, + uint32_t if_num); + +/** + * nss_gre_redir_tx_buf_noreuse + * Sends GRE redirect tunnel packets. + * + * Note: The buffers will be not be reused or + * kept in the accelerator. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] os_buf Pointer to the OS buffer (e.g., skbuff). + * @param[in] if_num Tunnel interface number. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_tx_buf_noreuse(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, + uint32_t if_num); + +/** + * nss_gre_redir_stats_get + * Gets GRE redirect tunnel statistics. + * + * @datatypes + * nss_gre_redir_tunnel_stats + * + * @param[in] index Index in the tunnel statistics array. + * @param[out] stats Pointer to the tunnel statistics. + * + * @return + * TRUE or FALSE. + */ +extern bool nss_gre_redir_stats_get(int index, struct nss_gre_redir_tunnel_stats *stats); + +/** + * nss_gre_redir_alloc_and_register_node + * Allocates and registers GRE redirect dynamic node with NSS. + * + * @datatypes + * net_device \n + * nss_gre_redir_data_callback_t \n + * nss_gre_redir_msg_callback_t \n + * + * @param[in] dev Pointer to the associated network device. + * @param[in] data_cb Callback for the data. + * @param[in] msg_cb Callback for the message. + * @param[in] type Type of dynamic node. + * @param[in] app_ctx Application context for notify callback. + * + * @return + * NSS interface number allocated. + */ +extern int nss_gre_redir_alloc_and_register_node(struct net_device *dev, + nss_gre_redir_data_callback_t data_cb, + nss_gre_redir_msg_callback_t msg_cb, + uint32_t type, void *app_ctx); + +/** + * nss_gre_redir_configure_inner_node + * Configures inner GRE redirect node. + * + * @datatypes + * nss_gre_redir_inner_configure_msg + * + * @param[in] ifnum NSS interface number. + * @param[in] ngrcm Inner node configuration message. + * + * @return + * Status of Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_configure_inner_node(int ifnum, + struct nss_gre_redir_inner_configure_msg *ngrcm); + +/** + * nss_gre_redir_configure_outer_node + * Configures outer GRE redirect node. + * + * @datatypes + * nss_gre_redir_outer_configure_msg + * + * @param[in] ifnum NSS interface number. + * @param[in] ngrcm Outer node configuration message. + * + * @return + * Status of Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_configure_outer_node(int ifnum, + struct nss_gre_redir_outer_configure_msg *ngrcm); + +/** + * nss_gre_redir_exception_ds_reg_cb + * Configure a callback on VAP for downstream GRE exception flows. + * + * @datatypes + * nss_gre_redir_exception_ds_reg_cb_msg + * + * @param[in] ifnum NSS interface number. + * @param[in] ngrcm Downstream exception callback registration message. + * + * @return + * Status of Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_exception_ds_reg_cb(int ifnum, + struct nss_gre_redir_exception_ds_reg_cb_msg *ngrcm); + +/** + * nss_gre_redir_tx_msg_sync + * Sends messages to NSS firmware synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_redir_msg + * + * @param[in] nss_ctx NSS core context. + * @param[in] ngrm Pointer to GRE redirect message data. + * + * @return + * Status of Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_msg *ngrm); + +/** + * nss_gre_redir_get_context + * Gets the GRE redirect context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_gre_redir_get_context(void); + +/** + * nss_gre_redir_get_dentry + * Returns directory entry created in debugfs for statistics. + * + * @return + * Pointer to created directory entry for GRE redirect. + */ +extern struct dentry *nss_gre_redir_get_dentry(void); + +/** + * nss_gre_redir_get_device + * Gets the original device from probe. + * + * @return + * Pointer to the device. + */ +extern struct device *nss_gre_redir_get_device(void); + +/** + * nss_gre_redir_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_redir_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_gre_redir_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_redir_stats_register_notifier(struct notifier_block *nb); + +/** + * @} + */ + +#endif /* __NSS_GRE_REDIR_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir_lag.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir_lag.h new file mode 100644 index 000000000..b10264dcf --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir_lag.h @@ -0,0 +1,732 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_gre_redir_lag.h + * NSS GRE redirect LAG interface definitions. + */ + +#ifndef __NSS_GRE_REDIR_LAG_H +#define __NSS_GRE_REDIR_LAG_H + +/** + * @addtogroup nss_gre_redirect_subsystem + * @{ + */ + +#define NSS_GRE_REDIR_LAG_MAX_NODE 12 /**< Maximum number of LAG nodes. */ +#define NSS_GRE_REDIR_LAG_MAX_SLAVE 8 /**< Maximum number of GRE redirect nodes per LAG node. */ +#define NSS_GRE_REDIR_LAG_MIN_SLAVE 2 /**< Minimum required GRE redirect nodes per LAG node. */ +#define NSS_GRE_REDIR_LAG_US_STATS_SYNC_RETRY 3 /**< Number of retries for sending query hash messages. */ +#define NSS_GRE_REDIR_LAG_US_MAX_HASH_PER_MSG 80 /**< Maximum hash entries per message. */ + +/* + * nss_gre_redir_lag_err_types + * GRE redirect LAG error types. + */ +enum nss_gre_redir_lag_err_types { + NSS_GRE_REDIR_LAG_SUCCESS, + NSS_GRE_REDIR_LAG_ERR_INCORRECT_IFNUM, + NSS_GRE_REDIR_LAG_ERR_CORE_UNREGISTER_FAILED, + NSS_GRE_REDIR_LAG_ERR_STATS_INDEX_NOT_FOUND, + NSS_GRE_REDIR_LAG_ERR_DEALLOC_FAILED, + NSS_GRE_REDIR_LAG_ERR_MAX, +}; + +/** + * nss_gre_redir_lag_us_message_types + * GRE redirect LAG upstream message types. + */ +enum nss_gre_redir_lag_us_message_types { + NSS_GRE_REDIR_LAG_US_CONFIG_MSG, + NSS_GRE_REDIR_LAG_US_ADD_HASH_NODE_MSG, + NSS_GRE_REDIR_LAG_US_DEL_HASH_NODE_MSG, + NSS_GRE_REDIR_LAG_US_QUERY_HASH_NODE_MSG, + NSS_GRE_REDIR_LAG_US_CMN_STATS_SYNC_MSG, + NSS_GRE_REDIR_LAG_US_DB_HASH_NODE_MSG, + NSS_GRE_REDIR_LAG_US_MAX_MSG_TYPES, +}; + +/** + * nss_gre_redir_lag_ds_message_types + * GRE redirect LAG downstream message types. + */ +enum nss_gre_redir_lag_ds_message_types { + NSS_GRE_REDIR_LAG_DS_ADD_STA_MSG, + NSS_GRE_REDIR_LAG_DS_DEL_STA_MSG, + NSS_GRE_REDIR_LAG_DS_UPDATE_STA_MSG, + NSS_GRE_REDIR_LAG_DS_STATS_SYNC_MSG, + NSS_GRE_REDIR_LAG_DS_MAX_MSG_TYPES, +}; + +/** + * nss_gre_redir_lag_ds_stats_types + * GRE redirect LAG downstream statistics. + */ +enum nss_gre_redir_lag_ds_stats_types { + NSS_GRE_REDIR_LAG_DS_STATS_DST_INVALID = NSS_STATS_NODE_MAX, + /**< Packets that do not have a valid destination. */ + NSS_GRE_REDIR_LAG_DS_STATS_EXCEPTION_PKT, /**< Packets that are exceptioned to host. */ + NSS_GRE_REDIR_LAG_DS_STATS_MAX, /**< Maximum statistics type. */ +}; + +/** + * nss_gre_redir_lag_us_stats_types + * GRE redirect LAG upstream statistics. + */ +enum nss_gre_redir_lag_us_stats_types { + NSS_GRE_REDIR_LAG_US_STATS_AMSDU_PKTS = NSS_STATS_NODE_MAX, + /**< Number of AMSDU packets seen. */ + NSS_GRE_REDIR_LAG_US_STATS_AMSDU_PKTS_ENQUEUED, /**< Number of AMSDU packets enqueued. */ + NSS_GRE_REDIR_LAG_US_STATS_AMSDU_PKTS_EXCEPTIONED, + /**< Number of AMSDU packets exceptioned. */ + NSS_GRE_REDIR_LAG_US_STATS_EXCEPTIONED, /**< Number of exceptioned packets. */ + NSS_GRE_REDIR_LAG_US_STATS_FREED, /**< Freed packets when equeue to NSS to host fails. */ + NSS_GRE_REDIR_LAG_US_STATS_ADD_ATTEMPT, /**< Add hash attempts. */ + NSS_GRE_REDIR_LAG_US_STATS_ADD_SUCCESS, /**< Add hash success. */ + NSS_GRE_REDIR_LAG_US_STATS_ADD_FAIL_TABLE_FULL, /**< Add hash failed due to full table. */ + NSS_GRE_REDIR_LAG_US_STATS_ADD_FAIL_EXISTS, /**< Add hash failed as entry already exists. */ + NSS_GRE_REDIR_LAG_US_STATS_DEL_ATTEMPT, /**< Delete hash attempts. */ + NSS_GRE_REDIR_LAG_US_STATS_DEL_SUCCESS, /**< Delete hash success. */ + NSS_GRE_REDIR_LAG_US_STATS_DEL_FAIL_NOT_FOUND, /**< Delete hash failed as entry not found in hash table. */ + NSS_GRE_REDIR_LAG_US_STATS_MAX, /**< Maximum statistics type. */ +}; + +/** + * nss_gre_redir_lag_us_hash_mode + * GRE redirect LAG upstream hash modes. + */ +enum nss_gre_redir_lag_us_hash_mode { + NSS_GRE_REDIR_LAG_US_HASH_MODE_SRC_AND_DEST, + NSS_GRE_REDIR_LAG_US_HASH_MODE_SRC, + NSS_GRE_REDIR_LAG_US_HASH_MODE_DEST, + NSS_GRE_REDIR_LAG_US_HASH_MODE_MAX, +}; + +/** + * nss_gre_redir_lag_us_config_msg + * Upstream configure message. + */ +struct nss_gre_redir_lag_us_config_msg { + uint32_t hash_mode; /**< Hash operating mode. */ + uint32_t num_slaves; /**< Number of slaves. */ + uint32_t if_num[NSS_GRE_REDIR_LAG_MAX_SLAVE]; /**< NSS interface numbers of GRE redirect tunnels. */ +}; + +/** + * nss_gre_redir_lag_us_add_hash_node_msg + * Message to add hash entry. + */ +struct nss_gre_redir_lag_us_add_hash_node_msg { + uint32_t if_num; /**< NSS interface number of GRE redirect. */ + uint16_t src_mac[ETH_ALEN / 2]; /**< Source MAC address. */ + uint16_t dest_mac[ETH_ALEN / 2]; /**< Destination MAC address. */ +}; + +/** + * nss_gre_redir_lag_us_del_hash_node_msg + * Message to delete hash entry. + */ +struct nss_gre_redir_lag_us_del_hash_node_msg { + uint16_t src_mac[ETH_ALEN / 2]; /**< Source MAC address. */ + uint16_t dest_mac[ETH_ALEN / 2]; /**< Destination MAC address. */ +}; + +/** + * nss_gre_redir_lag_us_query_hash_node_msg + * Message to query if a hash entry is present. + */ +struct nss_gre_redir_lag_us_query_hash_node_msg { + uint16_t src_mac[ETH_ALEN / 2]; /**< Source MAC address. */ + uint16_t dest_mac[ETH_ALEN / 2]; /**< Destination MAC address. */ + uint32_t ifnum; /**< NSS interface number of GRE redirect. */ +}; + +/** + * nss_gre_redir_lag_us_cmn_sync_stats + * GRE redirect LAG upstream statistics. + */ +struct nss_gre_redir_lag_us_cmn_sync_stats { + uint32_t amsdu_pkts; /**< Number of AMSDU packets seen. */ + uint32_t amsdu_pkts_enqueued; /**< Number of AMSDU packets enqueued. */ + uint32_t amsdu_pkts_exceptioned; /**< Number of AMSDU packets exceptioned. */ + uint32_t exceptioned; /**< Number of exceptioned packets. */ + uint32_t freed; /**< Number of freed packets. */ +}; + +/** + * nss_gre_redir_lag_us_cmn_db_sync_stats + * Upstream database statistics. + */ +struct nss_gre_redir_lag_us_cmn_db_sync_stats { + uint32_t add_attempt; /**< Add hash attempts. */ + uint32_t add_success; /**< Add hash success. */ + uint32_t add_fail_table_full; /**< Add hash failed due to full table. */ + uint32_t add_fail_exists; /**< Add hash failed as entry already exists. */ + uint32_t del_attempt; /**< Delete hash attempts. */ + uint32_t del_success; /**< Delete hash success. */ + uint32_t del_fail_not_found; /**< Delete hash failed as entry not found in hash table. */ +}; + +/** + * nss_gre_redir_lag_us_tunnel_hash_node_stats + * Hash statistics for GRE redirect LAG. + */ +struct nss_gre_redir_lag_us_tunnel_hash_node_stats { + uint64_t hits; /**< Number of hits on this hash entry. */ + uint32_t if_num; /**< GRE redirect interface number. */ + uint16_t src_mac[ETH_ALEN / 2]; /**< Source MAC address. */ + uint16_t dest_mac[ETH_ALEN / 2]; /**< Destination MAC address. */ +}; + +/** + * nss_gre_redir_lag_us_hash_stats_query_msg + * Hash statistics synchronization message. + */ +struct nss_gre_redir_lag_us_hash_stats_query_msg { + /* + * Request. + * Hash stats request has starting index of hash entry. + * Request is initiated by driver periodically. + */ + uint16_t db_entry_idx; /**< Starting index of request. */ + + /* + * Response. + * Response contains count of hash entries. It also has next field + * which used as the request index in subsequent request by caller. + */ + uint16_t db_entry_next; /**< Next index to be requested. */ + uint16_t count; /**< Number of hash entries in the message. */ + uint16_t reserved; /**< Reserved. */ + struct nss_gre_redir_lag_us_tunnel_hash_node_stats hstats[NSS_GRE_REDIR_LAG_US_MAX_HASH_PER_MSG]; + /**< Array of hash table entries. */ +}; + +/** + * nss_gre_redir_lag_us_cmn_sync_stats_msg + * Upstream statistics synchronization message. + */ +struct nss_gre_redir_lag_us_cmn_sync_stats_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + struct nss_gre_redir_lag_us_cmn_sync_stats us_stats; /**< Upstream statistics. */ + struct nss_gre_redir_lag_us_cmn_db_sync_stats db_stats; /**< Common hash statistics. */ +}; + +/** + *nss_gre_redir_lag_us_msg + * GRE redirect LAG upstream messages. + */ +struct nss_gre_redir_lag_us_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /* + * Payload of a GRE redirect LAG message. + */ + union { + struct nss_gre_redir_lag_us_config_msg config_us; /**< Upstream configuration message. */ + struct nss_gre_redir_lag_us_add_hash_node_msg add_hash; /**< Add hash entry. */ + struct nss_gre_redir_lag_us_del_hash_node_msg del_hash; /**< Delete hash entry. */ + struct nss_gre_redir_lag_us_query_hash_node_msg query_hash; /**< Hash entry query message. */ + struct nss_gre_redir_lag_us_cmn_sync_stats_msg us_sync_stats; /**< Upstream statistics. */ + struct nss_gre_redir_lag_us_hash_stats_query_msg hash_stats; /**< Hash statistics message. */ + } msg; /**< GRE redirect LAG upstream message payload. */ +}; + +/** + * nss_gre_redir_lag_us_cmn_stats + * GRE redirect LAG upstream statistics. + */ +struct nss_gre_redir_lag_us_cmn_stats { + uint64_t amsdu_pkts; /**< Number of AMSDU packets seen. */ + uint64_t amsdu_pkts_enqueued; /**< Number of AMSDU packets enqueued. */ + uint64_t amsdu_pkts_exceptioned; /**< Number of AMSDU packets exceptioned. */ + uint64_t exceptioned; /**< Number of exceptioned packets. */ + uint64_t freed; /**< Freed packets when equeue to NSS to host fails. */ +}; + +/** + * nss_gre_redir_lag_us_cmn_db_stats + * Upstream database statistics. + */ +struct nss_gre_redir_lag_us_cmn_db_stats { + uint64_t add_attempt; /**< Add hash attempts. */ + uint64_t add_success; /**< Add hash success. */ + uint64_t add_fail_table_full; /**< Add hash failed due to full table. */ + uint64_t add_fail_exists; /**< Add hash failed as entry already exists. */ + uint64_t del_attempt; /**< Delete hash attempts. */ + uint64_t del_success; /**< Delete hash success. */ + uint64_t del_fail_not_found; /**< Delete hash failed as entry not found in hash table. */ +}; + +/** + * nss_gre_redir_lag_us_tunnel_stats + * Upstream tunnel node statistics. + */ +struct nss_gre_redir_lag_us_tunnel_stats { + uint64_t rx_packets; /**< Received packets. */ + uint64_t rx_bytes; /**< Received bytes. */ + uint64_t tx_packets; /**< Transmit packets. */ + uint64_t tx_bytes; /**< Transmit bytes. */ + uint64_t rx_dropped[NSS_MAX_NUM_PRI]; /**< Packets dropped on receive due to queue full. */ + struct nss_gre_redir_lag_us_cmn_stats us_stats; /**< Common node statistics. */ + struct nss_gre_redir_lag_us_cmn_db_stats db_stats; /**< Common hash statistics. */ +}; + +/** + * nss_gre_redir_lag_us_stats_notification + * GRE redirect LAG upstream transmission statistics structure. + */ +struct nss_gre_redir_lag_us_stats_notification { + struct nss_gre_redir_lag_us_tunnel_stats stats_ctx; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_gre_redir_lag_ds_add_sta_msg + * Message to add station in LAG deployment. + */ +struct nss_gre_redir_lag_ds_add_sta_msg { + uint16_t mac[ETH_ALEN / 2]; /**< Station MAC address. */ + uint8_t reorder_type; /**< Reorder type for downstream. */ +}; + +/** + * nss_gre_redir_lag_ds_delete_sta_msg + * Message to delete station in LAG deployment. + */ +struct nss_gre_redir_lag_ds_delete_sta_msg { + uint16_t mac[ETH_ALEN / 2]; /**< Station MAC address. */ +}; + +/** + * nss_gre_redir_lag_ds_update_sta_msg + * Message to update station. + */ +struct nss_gre_redir_lag_ds_update_sta_msg { + uint16_t mac[ETH_ALEN / 2]; /**< Station MAC address. */ + uint8_t reorder_type; /**< Reorder type for downstream. */ +}; + +/** + * nss_gre_redir_lag_ds_stats + * GRE redirect link aggregation downstream statistics structure. + */ +struct nss_gre_redir_lag_ds_stats { + uint32_t dst_invalid; /**< Invalid destination packets. */ + uint32_t exception_cnt; /**< Exception count. */ +}; + +/** + * nss_gre_redir_lag_ds_sync_stats_msg + * Downstream statistics synchronization message. + */ +struct nss_gre_redir_lag_ds_sync_stats_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + struct nss_gre_redir_lag_ds_stats ds_stats; /**< GRE redirect LAG downstream statistics. */ +}; + +/** + *nss_gre_redir_lag_ds_msg + * GRE redirect LAG downstream messages. + */ +struct nss_gre_redir_lag_ds_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a GRE redirect LAG downstream message. + */ + union { + struct nss_gre_redir_lag_ds_add_sta_msg add_sta; /**< Add station entry. */ + struct nss_gre_redir_lag_ds_delete_sta_msg del_sta; /**< Delete station entry. */ + struct nss_gre_redir_lag_ds_update_sta_msg update_sta; /**< Station entry update message. */ + struct nss_gre_redir_lag_ds_sync_stats_msg ds_sync_stats; /**< Downstream statistics. */ + } msg; /**< GRE redirect LAG downstream message payload. */ +}; + +/** + * nss_gre_redir_lag_ds_tun_stats + * Downstream statistics. + */ +struct nss_gre_redir_lag_ds_tun_stats { + uint64_t rx_packets; /**< Received packets. */ + uint64_t rx_bytes; /**< Received bytes. */ + uint64_t tx_packets; /**< Transmit packets. */ + uint64_t tx_bytes; /**< Transmit bytes. */ + uint64_t rx_dropped[NSS_MAX_NUM_PRI]; + /**< Packets dropped on receive due to queue full. */ + uint64_t dst_invalid; /**< Packets that do not have a valid destination. */ + uint64_t exception_cnt; /**< Packets that are exceptioned to host. */ + uint32_t ifnum; /**< NSS interface number. */ + bool valid; /**< Valid flag. */ +}; + +/** + * nss_gre_redir_lag_ds_stats_notification + * GRE redirect LAG downstream transmission statistics structure. + */ +struct nss_gre_redir_lag_ds_stats_notification { + struct nss_gre_redir_lag_ds_tun_stats stats_ctx; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * Callback function for receiving GRE redirect LAG upstream data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_gre_redir_lag_us_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving GRE redirect LAG downstream data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_gre_redir_lag_ds_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + + /** + * Callback function for receiving GRE redirect LAG upstream messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_gre_redir_lag_us_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + + /** + * Callback function for receiving GRE redirect LAG downstream messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_gre_redir_lag_ds_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_gre_redir_lag_us_alloc_and_register_node + * Allocates and registers GRE redirect upstream LAG node. + * + * @datatypes + * net_device \n + * nss_gre_redir_lag_us_data_callback_t \n + * nss_gre_redir_lag_us_msg_callback_t + * + * @param[in] dev Net device pointer. + * @param[in] cb_func_data Data callback function. + * @param[in] cb_func_msg Message callback function. + * @param[in] app_ctx Application context for notify callback. + * + * @return + * Interface number if allocation and registration is succesful, else -1. + */ +extern int nss_gre_redir_lag_us_alloc_and_register_node(struct net_device *dev, + nss_gre_redir_lag_us_data_callback_t cb_func_data, + nss_gre_redir_lag_us_msg_callback_t cb_func_msg, void *app_ctx); + +/** + * nss_gre_redir_lag_ds_alloc_and_register_node + * Allocates and registers GRE redirect downstream LAG node. + * + * @datatypes + * net_device \n + * nss_gre_redir_lag_ds_data_callback_t \n + * nss_gre_redir_lag_ds_msg_callback_t + * + * @param[in] dev Net device pointer. + * @param[in] cb_func_data Data callback function. + * @param[in] cb_func_msg Message callback function. + * @param[in] app_ctx Application context for notify callback. + * + * @return + * Interface number if allocation and registration is succesful, else -1. + */ +extern int nss_gre_redir_lag_ds_alloc_and_register_node(struct net_device *dev, + nss_gre_redir_lag_ds_data_callback_t cb_func_data, + nss_gre_redir_lag_ds_msg_callback_t cb_func_msg, void *app_data); + +/** + * nss_gre_redir_lag_us_configure_node + * Configures LAG upstream node. + * + * @datatypes + * nss_gre_redir_lag_us_config_msg + * + * @param[in] ifnum NSS interface number. + * @param[in] ngluc Pointer to LAG upstream node configuration message. + * + * @return + * True if successful, else false. + */ +extern bool nss_gre_redir_lag_us_configure_node(uint32_t ifnum, + struct nss_gre_redir_lag_us_config_msg *ngluc); + +/** + * nss_gre_redir_lag_us_unregister_and_dealloc + * Deregister and deallocate GRE redirect upstream node. + * + * @param[in] if_num NSS interface number. + * + * @return + * Error code. + * + * @dependencies + * The GRE redirect LAG interface must have been previously registered. + */ +extern enum nss_gre_redir_lag_err_types nss_gre_redir_lag_us_unregister_and_dealloc(uint32_t if_num); + +/** + * nss_gre_redir_lag_ds_unregister_and_dealloc + * Deregisters and dealloc GRE redirect LAG downstream interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * Error code. + * + * @dependencies + * The GRE redirect LAG interface must have been previously registered. + */ +extern enum nss_gre_redir_lag_err_types nss_gre_redir_lag_ds_unregister_and_dealloc(uint32_t if_num); + +/** + * nss_gre_redir_lag_us_tx_msg + * Sends GRE redirect upstream LAG messages asynchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_redir_lag_us_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_lag_us_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_us_msg *msg); + +/** + * nss_gre_redir_lag_ds_tx_msg + * Sends GRE redirect downstream LAG messages asynchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_redir_lag_ds_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_lag_ds_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_ds_msg *msg); + +/** + * nss_gre_redir_lag_us_tx_buf + * Sends packets to GRE Redirect LAG upstream node. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] os_buf Pointer to the OS buffer (e.g., skbuff). + * @param[in] if_num Tunnel interface number. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_lag_us_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, + uint32_t if_num); + +/** + * nss_gre_redir_lag_ds_tx_buf + * Sends packets to GRE Redirect LAG downstream node. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] os_buf Pointer to the OS buffer (e.g., skbuff). + * @param[in] if_num Tunnel interface number. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_lag_ds_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, + uint32_t if_num); + +/** + * nss_gre_redir_lag_us_tx_msg_sync + * Sends upstream LAG messages to NSS firmware synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_redir_lag_us_msg + * + * @param[in] nss_ctx NSS core context. + * @param[in] ngrm Pointer to GRE redirect upstream LAG message data. + * + * @return + * Status of Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_lag_us_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_us_msg *ngrm); + +/** + * nss_gre_redir_lag_ds_tx_msg_sync + * Sends downstream LAG messages to NSS firmware synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_redir_lag_ds_msg + * + * @param[in] nss_ctx NSS core context. + * @param[in] ngrm Pointer to GRE redirect downstream LAG message data. + * + * @return + * Status of Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_lag_ds_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_ds_msg *ngrm); + +/** + * nss_gre_redir_lag_us_stats_get + * Fetches common node statistics for upstream GRE Redir LAG. + * + * @datatypes + * nss_gre_redir_lag_us_tunnel_stats + * + * @param[out] cmn_stats Pointer to common node statistics structure. + * @param[in] index Index to fetch statistics from. + * + * @return + * True if successful, else false. + */ +extern bool nss_gre_redir_lag_us_stats_get(struct nss_gre_redir_lag_us_tunnel_stats *cmn_stats, uint32_t index); + +/** + * nss_gre_redir_lag_ds_stats_get + * Fetches common node statistics for downstream GRE Redir LAG. + * + * @datatypes + * nss_gre_redir_lag_ds_tun_stats + * + * @param[out] cmn_stats Pointer to common node statistics structure. + * @param[in] index Index to fetch statistics from. + * + * @return + * True if successful, else false. + */ +extern bool nss_gre_redir_lag_ds_stats_get(struct nss_gre_redir_lag_ds_tun_stats *cmn_stats, uint32_t index); + +/** + * nss_gre_redir_lag_us_get_context + * Gets the GRE redirect LAG upstream context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_gre_redir_lag_us_get_context(void); + +/** + * nss_gre_redir_lag_ds_get_context + * Gets the GRE redirect LAG downstream context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_gre_redir_lag_ds_get_context(void); + +/** + * nss_gre_redir_lag_ds_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_redir_lag_ds_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_gre_redir_lag_ds_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_redir_lag_ds_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_gre_redir_lag_us_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_redir_lag_us_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_gre_redir_lag_us_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_redir_lag_us_stats_register_notifier(struct notifier_block *nb); + +/** + * @} + */ + +#endif /* __NSS_GRE_REDIR_LAG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir_mark.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir_mark.h new file mode 100644 index 000000000..39e024bfb --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_redir_mark.h @@ -0,0 +1,338 @@ +/* + ************************************************************************** + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_gre_redir_mark.h + * NSS GRE Redirect mark interface definitions. + */ + +#ifndef __NSS_GRE_REDIR_MARK_H +#define __NSS_GRE_REDIR_MARK_H + +#define NSS_GRE_REDIR_MARK_HLOS_MAGIC 0xaade /**< Metadata magic set by HLOS. */ +#define NSS_GRE_REDIR_MARK_NSS_MAGIC 0xaadf /**< Metadata magic set by NSS. */ + +extern struct nss_gre_redir_mark_stats gre_mark_stats; + +/** + * nss_gre_redir_mark messages + * Message types for GRE redirect mark requests and responses. + */ +enum nss_gre_redir_mark_msg_types { + NSS_GRE_REDIR_MARK_REG_CB_MSG, /**< Register callback configuration message. */ + NSS_GRE_REDIR_MARK_STATS_SYNC_MSG, /**< Statistics synchronization message. */ + NSS_GRE_REDIR_MARK_MSG_MAX, /**< Maximum message type. */ +}; + +/** + * nss_gre_redir_mark errors + * Error codes for GRE redirect mark configuration message. + */ +enum nss_gre_redir_mark_error_types { + NSS_GRE_REDIR_MARK_ERROR_NONE, /**< Configuration successful. */ + NSS_GRE_REDIR_MARK_ERROR_INV_IF_NUM, /**< Invalid interface number for callback registration. */ + NSS_GRE_REDIR_MARK_ERROR_INV_ETH_TYPE, /**< Invalid Ethernet type for the destination interface. */ + NSS_GRE_REDIR_MARK_ERROR_TYPE_MAX +}; + +/** + * nss_gre_redir_mark_stats_types + * GRE redirect mark statistics types. + */ +enum nss_gre_redir_mark_stats_types { + NSS_GRE_REDIR_MARK_STATS_HLOS_MAGIC_FAILED = NSS_STATS_NODE_MAX, + /**< HLOS magic fail count. */ + NSS_GRE_REDIR_MARK_STATS_INV_DST_IF_DROPS, /**< Invalid transmit interface drop count. */ + NSS_GRE_REDIR_MARK_STATS_DST_IF_ENQUEUE, /**< Next egress interface enqueue success count. */ + NSS_GRE_REDIR_MARK_STATS_DST_IF_ENQUEUE_DROPS, /**< Next egress interface enqueue drop count. */ + NSS_GRE_REDIR_MARK_STATS_INV_APPID, /**< Invalid application ID for the transmit completion packets. */ + NSS_GRE_REDIR_MARK_STATS_HEADROOM_UNAVAILABLE, /**< Packet headroom unavailable to write metadata. */ + NSS_GRE_REDIR_MARK_STATS_TX_COMPLETION_SUCCESS, /**< Transmit completion host enqueue success count. */ + NSS_GRE_REDIR_MARK_STATS_TX_COMPLETION_DROPS, /**< Transmit completion host enqueue drop count. */ + NSS_GRE_REDIR_MARK_STATS_MAX /**< Maximum statistics type. */ +}; + +/** + * nss_gre_redir_mark_metadata + * HLOS to NSS per packet downstream metadata. + */ +struct nss_gre_redir_mark_metadata { + uint32_t dst_ifnum; /**< Destination Tx interface number. */ + uint8_t wifi_tid; /**< TID value. */ + uint8_t app_id; /**< Application ID. */ + uint16_t hw_hash_idx; /**< Hardware AST hash index value. */ + uint32_t tx_status; /**< Transmit status. */ + uint16_t offset; /**< Buffer offset from the metadata. */ + uint16_t magic; /**< Metadata magic. */ +}; + +/** + * nss_gre_redir_mark_stats + * GRE redirect mark statistics. + */ +struct nss_gre_redir_mark_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t hlos_magic_fail; /**< HLOS magic fail count. */ + uint32_t invalid_dst_drop; /**< Invalid transmit interface drop count. */ + uint32_t dst_enqueue_success; /**< Next egress interface enqueue success count. */ + uint32_t dst_enqueue_drop; /**< Next egress interface enqueue drop count. */ + uint32_t inv_appid; /**< Invalid application ID for the transmit completion packets. */ + uint32_t headroom_unavail; /**< Packet headroom unavailable to write metadata. */ + uint32_t tx_completion_success; /**< Transmit completion host enqueue success count. */ + uint32_t tx_completion_drop; /**< Transmit completion host enqueue drop count. */ +}; + +/** + * nss_gre_redir_mark_register_cb_msg + * Transmit completion function register configuration message. + */ +struct nss_gre_redir_mark_register_cb_msg { + uint32_t nss_if_num; /**< NSS transmit interface number on which callback needs to be registered. */ +}; + +/** + * nss_gre_redir_mark_stats_notification + * GRE redirect mark transmission statistics structure. + */ +struct nss_gre_redir_mark_stats_notification { + uint64_t stats_ctx[NSS_GRE_REDIR_MARK_STATS_MAX]; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_gre_redir_mark_msg + * Structure that describes the interface message. + */ +struct nss_gre_redir_mark_msg { + struct nss_cmn_msg cm; /**< Common message. */ + + /** + * Payload of a GRE redirect mark message. + */ + union { + struct nss_gre_redir_mark_register_cb_msg reg_cb_msg; + /**< Configuration message to register for callback on completion. */ + struct nss_gre_redir_mark_stats_sync_msg stats_sync; + /**< Mark node statistics synchronization. */ + } msg; /**< Message payload for GRE redirect mark messages exchanged with NSS core. */ +}; + +/** + * Callback function for receiving GRE redirect mark data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_gre_redir_mark_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving GRE redirect mark messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_gre_redir_mark_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_gre_redir_mark_unregister_if + * Deregisters a GRE redirect mark interface from the NSS. + * + * @param[in] if_num GRE redirect mark interface number. + * + * @return + * None. + * + * @dependencies + * The GRE redirect mark interface must have been previously registered. + * + * @return + * True if successful, else false. + */ +extern bool nss_gre_redir_mark_unregister_if(uint32_t if_num); + +/** + * nss_gre_redir_mark_tx_buf + * Sends data buffers to NSS firmware asynchronously. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] os_buf Pointer to the OS buffer (e.g. skbuff). + * @param[in] if_num GRE redirect mark interface number. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_mark_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, + uint32_t if_num); + +/** + * nss_gre_redir_mark_reg_cb + * Configure a callback on VAP for downstream application flows. + * + * @datatypes + * nss_gre_redir_mark_register_cb_msg + * + * @param[in] ifnum NSS interface number. + * @param[in] ngrcm Downstream application callback registration message. + * + * @return + * Status of Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_mark_reg_cb(int ifnum, + struct nss_gre_redir_mark_register_cb_msg *ngrcm); + +/** + * nss_gre_redir_mark_tx_msg + * Sends GRE redirect mark messages. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_redir_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_mark_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_mark_msg *msg); + +/** + * nss_gre_redir_mark_tx_msg_sync + * Sends messages to NSS firmware synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_redir_mark_msg + * + * @param[in] nss_ctx NSS core context. + * @param[in] ngrm Pointer to GRE redirect mark message data. + * + * @return + * Status of Tx operation. + */ +extern nss_tx_status_t nss_gre_redir_mark_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_mark_msg *ngrm); + +/** + * nss_gre_redir_mark_stats_get + * Gets GRE redirect mark statistics. + * + * @datatypes + * nss_gre_redir_mark_stats + * + * @param[out] stats Pointer to the memory address, which must be large enough to + * hold all the statistics. + * + * @return + * TRUE or FALSE. + */ +extern bool nss_gre_redir_mark_stats_get(struct nss_gre_redir_mark_stats *stats); + +/** + * nss_gre_redir_alloc_and_register_node + * Registers GRE redirect mark static node with NSS. + * + * @datatypes + * net_device \n + * nss_gre_redir_mark_data_callback_t \n + * nss_gre_redir_mark_msg_callback_t + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] if_num NSS interface number. + * @param[in] cb_func_data Callback for the data. + * @param[in] cb_func_msg Callback for the message. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * NSS interface number allocated. + */ +extern struct nss_ctx_instance *nss_gre_redir_mark_register_if(struct net_device *netdev, uint32_t if_num, + nss_gre_redir_mark_data_callback_t cb_func_data, nss_gre_redir_mark_msg_callback_t cb_func_msg, + uint32_t features); + +/** + * nss_gre_redir_mark_get_context + * Gets the GRE redirect mark context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_gre_redir_mark_get_context(void); + +/** + * nss_gre_redir_mark_get_dentry + * Returns directory entry created in debug filesystem for statistics. + * + * @return + * Pointer to created directory entry for GRE redirect mark. + */ +extern struct dentry *nss_gre_redir_mark_get_dentry(void); + +/* + * nss_gre_redir_mark_get_device + * Gets the original device from probe. + * + * @return + * Pointer to the device. + */ +extern struct device *nss_gre_redir_mark_get_device(void); + +/** + * nss_gre_redir_mark_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_redir_mark_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_gre_redir_mark_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_redir_mark_stats_register_notifier(struct notifier_block *nb); + +/** + * @} + */ + +#endif /* __NSS_GRE_REDIR_MARK_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_tunnel.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_tunnel.h new file mode 100644 index 000000000..53f76ed1e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_gre_tunnel.h @@ -0,0 +1,428 @@ +/* + **************************************************************************** + * Copyright (c) 2016-2018, 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +/** + * @file nss_gre_tunnel.h + * NSS GRE Tunnel interface definitions. + */ + +#ifndef __NSS_GRE_TUNNEL_H +#define __NSS_GRE_TUNNEL_H + +/** + * @addtogroup nss_gre_tunnel_subsystem + * @{ + */ + +/** + * Maximum number of supported GRE tunnel sessions. + */ +#define NSS_MAX_GRE_TUNNEL_SESSIONS 16 + +/** + * nss_gre_tunnel_message_types + * Message types for a GRE tunnel rule. + */ +enum nss_gre_tunnel_message_types { + NSS_GRE_TUNNEL_MSG_CONFIGURE, + NSS_GRE_TUNNEL_MSG_SESSION_DESTROY, + NSS_GRE_TUNNEL_MSG_STATS, + NSS_GRE_TUNNEL_MSG_CONFIGURE_DI_TO_WLAN_ID, + NSS_GRE_TUNNEL_MSG_INQUIRY, + NSS_GRE_TUNNEL_MSG_MAX, +}; + +/** + * nss_gre_tunnel_encrypt_types + * Encryption types for a GRE tunnel. + */ +enum nss_gre_tunnel_encrypt_types { + NSS_GRE_TUNNEL_ENCRYPT_NONE, + NSS_GRE_TUNNEL_ENCRYPT_AES128_CBC, + NSS_GRE_TUNNEL_ENCRYPT_AES256_CBC, + NSS_GRE_TUNNEL_ENCRYPT_MAX, +}; + +/** + * nss_gre_tunnel_mode_types + * Mode types for a GRE tunnel. + */ +enum nss_gre_tunnel_mode_types { + NSS_GRE_TUNNEL_MODE_GRE, + NSS_GRE_TUNNEL_MODE_GRE_UDP, + NSS_GRE_TUNNEL_MODE_MAX, +}; + +/** + * nss_gre_tunnel_ip_types + * IP types for a GRE tunnel. + */ +enum nss_gre_tunnel_ip_types { + NSS_GRE_TUNNEL_IP_IPV4, + NSS_GRE_TUNNEL_IP_IPV6, + NSS_GRE_TUNNEL_IP_MAX, +}; + +/** + * nss_gre_tunnel_error_types + * Error types for a GRE tunnel. + */ +enum nss_gre_tunnel_error_types { + NSS_GRE_TUNNEL_ERR_UNKNOWN_MSG = 1, + NSS_GRE_TUNNEL_ERR_IF_INVALID = 2, + NSS_GRE_TUNNEL_ERR_CPARAM_INVALID = 3, + NSS_GRE_TUNNEL_ERR_MODE_INVALID = 4, + NSS_GRE_TUNNEL_ERR_ENCRYPT_INVALID = 5, + NSS_GRE_TUNNEL_ERR_IP_INVALID = 6, + NSS_GRE_TUNNEL_ERR_ENCRYPT_IDX_INVALID = 7, + NSS_GRE_TUNNEL_ERR_NOMEM = 8, + NSS_GRE_TUNNEL_ERR_PROTO_TEB_INVALID = 9, + NSS_GRE_TUNNEL_ERR_SIBLING_IF = 10, + NSS_GRE_TUNNEL_ERR_CRYPTO_NODE_ID = 11, + NSS_GRE_TUNNEL_ERR_RPS = 12, + NSS_GRE_TUNNEL_ERR_DI_INVALID = 13, + NSS_GRE_TUNNEL_ERR_MAX, +}; + +/** + * nss_gre_tunnel_stats_type + * GRE tunnel session debug statistic counters. + */ +enum nss_gre_tunnel_stats_type { + NSS_GRE_TUNNEL_STATS_SESSION_RX_PKTS, /**< Number of packets received. */ + NSS_GRE_TUNNEL_STATS_SESSION_TX_PKTS, /**< Number of packets transmitted. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_QUEUE_0_DROPPED, /**< Dropped receive packets 0. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_QUEUE_1_DROPPED, /**< Dropped receive packets 1. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_QUEUE_2_DROPPED, /**< Dropped receive packets 2. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_QUEUE_3_DROPPED, /**< Dropped receive packets 3. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_MALFORMED, /**< Malformed packet was received. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_INVALID_PROT, /**< Invalid protocol was received. */ + NSS_GRE_TUNNEL_STATS_SESSION_DECAP_QUEUE_FULL, /**< Decapsulation queue is full. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_SINGLE_REC_DGRAM, /**< Single fragment was received. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_INVALID_REC_DGRAM, /**< Invalid fragment was received. */ + NSS_GRE_TUNNEL_STATS_SESSION_BUFFER_ALLOC_FAIL, /**< Buffer memory allocation failed. */ + NSS_GRE_TUNNEL_STATS_SESSION_BUFFER_COPY_FAIL, /**< Buffer memory copy failed. */ + NSS_GRE_TUNNEL_STATS_SESSION_OUTFLOW_QUEUE_FULL, /**< Outflow queue is full. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_DROPPED_HROOM, /**< Packets dropped because of insufficent headroom. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_CBUFFER_ALLOC_FAIL, /**< Receive crypto buffer allocation failed. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_CENQUEUE_FAIL, /**< Receive enqueue-to-crypto failed. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_DECRYPT_DONE, /**< Receive decryption is complete. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_FORWARD_ENQUEUE_FAIL, /**< Receive forward enqueue failed. */ + NSS_GRE_TUNNEL_STATS_SESSION_TX_CBUFFER_ALLOC_FAIL, /**< Receive crypto buffer allocation failed. */ + NSS_GRE_TUNNEL_STATS_SESSION_TX_CENQUEUE_FAIL, /**< Transmit enqueue-to-crypto failed. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_DROPPED_TROOM, /**< Packets dropped because of insufficent tailroom. */ + NSS_GRE_TUNNEL_STATS_SESSION_TX_FORWARD_ENQUEUE_FAIL, /**< Transmit forward enqueue failed. */ + NSS_GRE_TUNNEL_STATS_SESSION_TX_CIPHER_DONE, /**< Transmit cipher is complete. */ + NSS_GRE_TUNNEL_STATS_SESSION_CRYPTO_NOSUPP, /**< Error count for non-supported crypto packets. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_DROPPED_MH_VERSION, /**< Receive drop: bad meta header. */ + NSS_GRE_TUNNEL_STATS_SESSION_RX_UNALIGNED_PKT, /**< Counter for unaligned packets. */ + NSS_GRE_TUNNEL_STATS_SESSION_MAX, /**< Maximum statistics type. */ +}; + +/** + * nss_gre_tunnel_di_to_wlan_id + * Dynamic interface to WLAN ID message structure. + */ +struct nss_gre_tunnel_di_to_wlan_id { + uint32_t dynamic_interface_num; /**< Dynamic interface number. */ + uint16_t wlan_id; /**< WLAN ID number. */ + uint16_t fwd_policy; /**< Forward policy bits. */ +}; + +/** + * nss_gre_tunnel_configure + * Message information for configuring a GRE tunnel. + */ +struct nss_gre_tunnel_configure { + uint32_t mh_version; /**< Meta header version. */ + uint8_t gre_mode; /**< GRE or GRE plus UDP. */ + uint8_t ip_type; /**< IPv4 or IPv6. */ + uint16_t encrypt_type; /**< Encryption type. */ + uint32_t src_ip[4]; /**< Source IPv4 or IPv6 address. */ + uint32_t dest_ip[4]; /**< Destination IPv4 or IPv6 address. */ + uint16_t src_port; /**< GRE plus UDP only for the source. */ + uint16_t dest_port; /**< GRE plus UDP only for the destination. */ + uint32_t crypto_node_id; /**< Cryto node identifier. */ + uint32_t crypto_idx_encrypt; /**< Crypto index for encryption. */ + uint32_t crypto_idx_decrypt; /**< Crypto index for decryption. */ + uint32_t word0; /**< Word0 header. */ + uint8_t iv_val[16]; /**< Initialization vector value. */ + uint32_t sibling_if; /**< Sibling interface number. */ + uint8_t ttl; /**< Time-to-live value of the IP header. */ + int8_t rps; /**< Steer packets to host core. */ + uint16_t reserved; /**< Reserved space. */ + uint32_t word1; /**< Word1 header. */ + uint32_t word2; /**< Word2 header. */ + uint32_t word3; /**< Word3 header. */ +}; + +/** + * nss_gre_tunnel_stats + * Message statistics for a GRE tunnel. + */ +struct nss_gre_tunnel_stats { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t rx_malformed; /**< Malformed packet was received. */ + uint32_t rx_invalid_prot; /**< Invalid protocol was received. */ + uint32_t decap_queue_full; /**< Decapsulation queue is full. */ + uint32_t rx_single_rec_dgram; /**< Single fragment was received. */ + uint32_t rx_invalid_rec_dgram; /**< Invalid fragment was received. */ + uint32_t buffer_alloc_fail; /**< Buffer memory allocation failed. */ + uint32_t buffer_copy_fail; /**< Buffer memory copy failed. */ + uint32_t outflow_queue_full; /**< Outflow queue is full. */ + uint32_t rx_dropped_hroom; + /**< Packets dropped because of insufficent headroom. */ + uint32_t rx_cbuf_alloc_fail; + /**< Receive crypto buffer allocation failed. */ + uint32_t rx_cenqueue_fail; /**< Receive enqueue-to-crypto failed. */ + uint32_t rx_decrypt_done; /**< Receive decryption is complete. */ + uint32_t rx_forward_enqueue_fail; /**< Receive forward enqueue failed. */ + uint32_t tx_cbuf_alloc_fail; + /**< Receive crypto buffer allocation failed. */ + uint32_t tx_cenqueue_fail; /**< Transmit enqueue-to-crypto failed. */ + uint32_t rx_dropped_troom; + /**< Packets dropped because of insufficent tailroom. */ + uint32_t tx_forward_enqueue_fail; /**< Transmit forward enqueue failed. */ + uint32_t tx_cipher_done; /**< Transmit cipher is complete. */ + uint32_t crypto_nosupp; + /**< Error count for non-supported crypto packets. */ + uint32_t rx_dropped_mh_ver; /**< Receive drop: bad meta header. */ + uint32_t rx_unaligned_pkt; /**< Counter for unaligned packets. */ +#if defined(NSS_HAL_IPQ807x_SUPPORT) + uint32_t crypto_resp_error[NSS_CRYPTO_CMN_RESP_ERROR_MAX]; + /** Crypto response errors. */ +#endif +}; + +/** + * nss_gre_tunnel_stats_notification + * GRE tunnel transmission statistics structure. + */ +struct nss_gre_tunnel_stats_notification { + uint64_t stats_ctx[NSS_GRE_TUNNEL_STATS_SESSION_MAX + NSS_CRYPTO_CMN_RESP_ERROR_MAX]; + /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_gre_tunnel_msg + * Data for sending and receiving GRE tunnel messages. + */ +struct nss_gre_tunnel_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a GRE tunnel message. + */ + union { + struct nss_gre_tunnel_configure configure; /**< Tunnel configuration data. */ + struct nss_gre_tunnel_stats stats; /**< Tunnel statistics. */ + struct nss_gre_tunnel_di_to_wlan_id dtwi; /**< Tunnel dynamic interface number to WLAN ID mapping. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving GRE tunnel messages. + * + * @datatypes + * nss_gre_tunnel_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_gre_tunnel_msg_callback_t)(void *app_data, struct nss_gre_tunnel_msg *msg); + +/** + * Callback function for receiving GRE tunnel session data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_gre_tunnel_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_gre_tunnel_tx_buf + * Sends a GRE tunnel packet. + * + * @datatypes + * sk_buff \n + * nss_ctx_instance + * + * @param[in] skb Pointer to the data socket buffer. + * @param[in] if_num Tunnel interface number. + * @param[in] nss_ctx Pointer to the NSS context. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_tunnel_tx_buf(struct sk_buff *skb, uint32_t if_num, struct nss_ctx_instance *nss_ctx); + +/** + * nss_gre_tunnel_tx_msg + * Sends a GRE tunnel message. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_tunnel_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_tunnel_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_tunnel_msg *msg); + +/** + * nss_gre_tunnel_tx_msg_sync + * Sends a GRE tunnel message synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_gre_tunnel_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_gre_tunnel_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_tunnel_msg *msg); + +/** + * nss_gre_tunnel_msg_init + * Initalizes a GRE tunnel message. + * + * @datatypes + * nss_gre_tunnel_msg + * + * @param[in] ngtm Pointer to the tunnel message. + * @param[in] if_num Tunnel interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_gre_tunnel_msg_init(struct nss_gre_tunnel_msg *ngtm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_gre_tunnel_get_ctx + * Returns the NSS context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_gre_tunnel_get_ctx(void); + +/** + * nss_gre_tunnel_register_if + * Registers a network device with the NSS for sending and receiving tunnel + * messages. + * + * @datatypes + * nss_gre_tunnel_data_callback_t \n + * nss_gre_tunnel_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] cb Callback function for the message. + * @param[in] ev_cb Callback for the GRE tunnel message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * @param[in] app_ctx Pointer to the application context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_gre_tunnel_register_if(uint32_t if_num, + nss_gre_tunnel_data_callback_t cb, + nss_gre_tunnel_msg_callback_t ev_cb, + struct net_device *netdev, + uint32_t features, + void *app_ctx); + +/** + * nss_gre_tunnel_unregister_if + * Deregisters a network device from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The network device must have been previously registered. + */ +extern void nss_gre_tunnel_unregister_if(uint32_t if_num); + +/** + * nss_gre_tunnel_inquiry() + * Inquiry if a GRE tunnel has been established in NSS FW. + * + * @param[in] inquiry_info Query parameters similar to creation parameters. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Status of the Tx operation + */ +extern nss_tx_status_t nss_gre_tunnel_inquiry( + struct nss_gre_tunnel_configure *inquiry_info, + nss_gre_tunnel_msg_callback_t cb, void *app_data); + +/** + * nss_gre_tunnel_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_tunnel_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_gre_tunnel_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_gre_tunnel_stats_register_notifier(struct notifier_block *nb); + +/** + * @} + */ + +#endif /* __NSS_GRE_TUNNEL_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_if.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_if.h new file mode 100644 index 000000000..abda94189 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_if.h @@ -0,0 +1,454 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_if.h + * NSS interface definitions. + */ + +#ifndef __NSS_IF_H +#define __NSS_IF_H + +#define NSS_IF_TX_TIMEOUT 3000 /* 3 Seconds */ + +/** + * @addtogroup nss_driver_subsystem + * @{ + */ + +/** + * nss_if_message_types + * Message types for the NSS interface. + */ +enum nss_if_message_types { + NSS_IF_OPEN, + NSS_IF_CLOSE, + NSS_IF_LINK_STATE_NOTIFY, + NSS_IF_MTU_CHANGE, + NSS_IF_MAC_ADDR_SET, + NSS_IF_STATS, + NSS_IF_ISHAPER_ASSIGN, + NSS_IF_BSHAPER_ASSIGN, + NSS_IF_ISHAPER_UNASSIGN, + NSS_IF_BSHAPER_UNASSIGN, + NSS_IF_ISHAPER_CONFIG, + NSS_IF_BSHAPER_CONFIG, + NSS_IF_PAUSE_ON_OFF, + NSS_IF_VSI_ASSIGN, + NSS_IF_VSI_UNASSIGN, + NSS_IF_SET_NEXTHOP, + NSS_IF_SET_IGS_NODE, + NSS_IF_CLEAR_IGS_NODE, + NSS_IF_RESET_NEXTHOP, + NSS_IF_PPE_PORT_CREATE, + NSS_IF_PPE_PORT_DESTROY, + NSS_IF_MAX_MSG_TYPES = 9999, +}; + +/** + * nss_if_error_types + * Error types for the NSS interface. + */ +enum nss_if_error_types { + NSS_IF_ERROR_NO_ISHAPERS, + NSS_IF_ERROR_NO_BSHAPERS, + NSS_IF_ERROR_NO_ISHAPER, + NSS_IF_ERROR_NO_BSHAPER, + NSS_IF_ERROR_ISHAPER_OLD, + NSS_IF_ERROR_BSHAPER_OLD, + NSS_IF_ERROR_ISHAPER_CONFIG_FAILED, + NSS_IF_ERROR_BSHAPER_CONFIG_FAILED, + NSS_IF_ERROR_TYPE_UNKNOWN, + NSS_IF_ERROR_TYPE_EOPEN, + NSS_IF_ERROR_TYPE_INVALID_MTU, + NSS_IF_ERROR_TYPE_INVALID_MAC_ADDR, + NSS_IF_ERROR_TYPE_VSI_NOT_MATCH, + NSS_IF_ERROR_TYPE_VSI_REASSIGN, + NSS_IF_ERROR_TYPE_VSI_INVALID, + NSS_IF_ERROR_TYPE_MAX = 9999, +}; + +/** + * nss_if_data_align + * Data alignment modes for the NSS interface. + */ +enum nss_if_data_align { + NSS_IF_DATA_ALIGN_2BYTE = 0, + NSS_IF_DATA_ALIGN_4BYTE = 2, +}; + +/** + * nss_if_open + * Message information for opening the NSS interface. + */ +struct nss_if_open { + uint32_t tx_desc_ring; /**< Tx descriptor ring address. */ + uint32_t rx_desc_ring; /**< Rx descriptor ring address. */ + uint32_t rx_forward_if; /**< Forward received packets to this interface. */ + uint32_t alignment_mode;/**< Header alignment mode. */ +}; + +/** + * nss_if_close + * Message information for closing the NSS interface. + */ +struct nss_if_close { + uint32_t reserved; /**< Placeholder for the structure. */ +}; + +/** + * nss_if_link_state_notify + * Link state notification sent to the NSS interface. + */ +struct nss_if_link_state_notify { + uint32_t state; + /**< Link state UP is bit 0 set. Other bits are as defined by Linux to indicate speed and duplex. */ +}; + +/** + * nss_if_mtu_change + * MTU change for the NSS interface. + */ +struct nss_if_mtu_change { + uint16_t min_buf_size; /**< Changed value for the minimum buffer size. */ +}; + +/** + * nss_if_pause_on_off + * Enables or disables a pause frame for the NSS interface. + */ +struct nss_if_pause_on_off { + uint32_t pause_on; /**< Turn the pause frame ON or OFF. */ +}; + +/** + * nss_if_mac_address_set + * MAC address setting. + */ +struct nss_if_mac_address_set { + uint8_t mac_addr[ETH_ALEN]; /**< MAC address. */ +}; + +/** + * nss_if_shaper_assign + * Shaper assignment message. + */ +struct nss_if_shaper_assign { + uint32_t shaper_id; /**< ID of the request. */ + uint32_t new_shaper_id; /**< ID of the response. */ +}; + +/** + * nss_if_shaper_unassign + * Shaper unassign message. + */ +struct nss_if_shaper_unassign { + uint32_t shaper_id; /**< ID of the request. */ +}; + +/** + * nss_if_shaper_configure + * Shaper configuration message. + */ +struct nss_if_shaper_configure { + struct nss_shaper_configure config; /**< Specific shaper message for a particular interface. */ +}; + +/** + * nss_if_vsi_assign + * VSI assignment message. + */ +struct nss_if_vsi_assign { + uint32_t vsi; /**< Virtual interface number. */ +}; + +/** + * nss_if_vsi_unassign + * VSI unassign message. + */ +struct nss_if_vsi_unassign { + uint32_t vsi; /**< Virtual interface number. */ +}; + +/** + * nss_if_set_nexthop + * Message to set nexthop for an interface. + */ +struct nss_if_set_nexthop { + uint32_t nexthop; /**< Nexthop interface number. */ +}; + +/** + * nss_if_igs_config + * Ingress shaper set/clear configure message structure. + */ +struct nss_if_igs_config { + int32_t igs_num; /**< Ingress shaper interface number. */ +}; + +/** + * nss_if_ppe_port_create + * Message to create PPE port. + */ +struct nss_if_ppe_port_create { + int32_t ppe_port_num; /**< PPE port number returned by NSS. */ +}; + +/** + * nss_if_msgs + * Information for physical NSS interface command messages. + */ +union nss_if_msgs { + struct nss_if_link_state_notify link_state_notify; + /**< Link status notification. */ + struct nss_if_open open; + /**< Open the NSS interface. */ + struct nss_if_close close; + /**< Close the NSS interface. */ + struct nss_if_mtu_change mtu_change; + /**< MTU change notification. */ + struct nss_if_mac_address_set mac_address_set; + /**< MAC address setting. */ + struct nss_cmn_node_stats stats; + /**< Synchronize the satistics. */ + struct nss_if_shaper_assign shaper_assign; + /**< Assign the shaper. */ + struct nss_if_shaper_unassign shaper_unassign; + /**< Unassign the shaper. */ + struct nss_if_shaper_configure shaper_configure; + /**< Configure the shaper. */ + struct nss_if_pause_on_off pause_on_off; + /**< ON or OFF notification for a Pause frame. */ + struct nss_if_vsi_assign vsi_assign; + /**< Assign the VSI. */ + struct nss_if_vsi_unassign vsi_unassign; + /**< Remove the VSI assignment. */ + struct nss_if_set_nexthop set_nexthop; + /**< Set nexthop of interface. */ + struct nss_if_igs_config config_igs; + /**< Configure an ingress shaper interface. */ + struct nss_if_ppe_port_create ppe_port_create; + /**< Create a PPE port. */ +}; + +/** + * nss_if_msg + * Data for sending and receiving base class messages for all interface types. + */ +struct nss_if_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + union nss_if_msgs msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving NSS interface messages. + * + * @datatypes + * nss_if_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_if_msg_callback_t)(void *app_data, struct nss_if_msg *msg); + +/** + * Callback function for receiving NSS interface data. + * + * TODO: Adjust to pass app_data as unknown to the + * list layer and netdev/sk as known. + * + * @datatypes + * net_device \n + * sk_buff + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + */ +typedef void (*nss_if_rx_callback_t)(struct net_device *netdev, struct sk_buff *skb); + +/** + * nss_if_register + * Registers the NSS interface for sending and receiving GMAC packets and messages. + * + * @datatypes + * nss_if_rx_callback_t \n + * nss_if_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] rx_callback Receive callback for the packet. + * @param[in] msg_callback Receive callback for message. + * @param[in] if_ctx Pointer to the interface context provided in the + callback. This context must be the OS network + device context pointer (net_device in Linux). + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_if_register(uint32_t if_num, + nss_if_rx_callback_t rx_callback, + nss_if_msg_callback_t msg_callback, + struct net_device *if_ctx); + +/** + * nss_if_tx_buf + * Sends GMAC packets to a specific physical or virtual network interface. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] os_buf Pointer to the OS buffer (e.g., skbuff). + * @param[in] if_num Network physical or virtual interface number. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_if_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num); + +/** + * nss_if_tx_msg + * Sends a message to the NSS interface. + * + * @datatypes + * nss_ctx_instance \n + * nss_if_msg + * + * @param[in,out] nss_ctx Pointer to the NSS context. + * @param[in] nim Pointer to the NSS interface message. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_if_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_if_msg *nim); + +/** + * nss_if_msg_sync + * Sends a message to the NSS interface and wait for the response. + * + * @datatypes + * nss_ctx_instance \n + * nss_if_msg + * + * @param[in,out] nss_ctx Pointer to the NSS context. + * @param[in] nim Pointer to the NSS interface message. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_if_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_if_msg *nim); + +/** + * nss_if_set_nexthop + * Configure the next hop for an interface. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * @param[in] nexthop NSS interface number for next hop node. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_if_set_nexthop(struct nss_ctx_instance *nss_ctx, uint32_t if_num, uint32_t nexthop); + +/** + * nss_if_reset_nexthop + * De-configure the next hop for an interface. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_if_reset_nexthop(struct nss_ctx_instance *nss_ctx, uint32_t if_num); + +/** + * nss_if_change_mtu + * Change the MTU of the interface. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * @param[in] mtu New MTU. + * + * @return + * Status of the transmit operation. + */ +nss_tx_status_t nss_if_change_mtu(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num, uint16_t mtu); + +/** + * nss_if_change_mac_addr + * Change the MAC address of the interface. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * @param[in] mac_addr New MAC address. + * + * @return + * Status of the transmit operation. + */ +nss_tx_status_t nss_if_change_mac_addr(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num, uint8_t *mac_addr); + +/** + * nss_if_vsi_unassign + * Detach the VSI ID from the given interface. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * @param[in] vsi VSI ID. + * + * @return + * Status of the transmit operation. + */ +nss_tx_status_t nss_if_vsi_unassign(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num, uint32_t vsi); + +/** + * nss_if_vsi_assign + * Attach the VSI ID to the given interface. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * @param[in] vsi VSI ID. + * + * @return + * Status of the transmit operation. + */ +nss_tx_status_t nss_if_vsi_assign(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num, uint32_t vsi); + +/** + * @} + */ + +#endif /* __NSS_IF_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_igs.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_igs.h new file mode 100644 index 000000000..651fea4e9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_igs.h @@ -0,0 +1,213 @@ +/* + ************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_igs.h + * NSS ingress shaper interface definitions. + */ + +#ifndef _NSS_IGS_H_ +#define _NSS_IGS_H_ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +#ifdef CONFIG_NET_CLS_ACT +#include +#endif +#endif + +/** + * @addtogroup nss_ingress_shaper_subsystem + * @{ + */ + +/** + * Maximum number of supported ingress shaping interfaces. + */ +#define NSS_MAX_IGS_DYNAMIC_INTERFACES 8 + +/** + * nss_igs_msg_types + * Message types for ingress shaper requests and responses. + */ +enum nss_igs_msg_types { + NSS_IGS_MSG_SYNC_STATS = NSS_IF_MAX_MSG_TYPES + 1, + NSS_IGS_MSG_MAX +}; + +/** + * nss_igs_node_stats + * Ingress shaping node debug statistics structure. + */ +struct nss_igs_node_stats { + uint32_t tx_dropped; /**< Dropped post shaping. */ + uint32_t shaper_drop; /**< Dropped during shaper enqueue. */ + uint32_t ipv4_parse_fail; /**< IPv4 parse fail. */ + uint32_t ipv4_unknown_gre_type; /**< IPv4 unknown GRE type. */ + uint32_t ipv4_unknown_l4; /**< IPv4 unknown L4 type. */ + uint32_t ipv4_no_cme; /**< IPv4 connection match entry not found. */ + uint32_t ipv4_frag_initial; /**< IPv4 initial fragment. */ + uint32_t ipv4_frag_non_initial; /**< Ipv4 subsequent fragment. */ + uint32_t ipv4_malformed_udp; /**< Incomplete IPv4 UDP packet. */ + uint32_t ipv4_malformed_tcp; /**< Incomplete IPv4 TCP packet. */ + uint32_t ipv4_malformed_udpl; /**< Incomplete IPv4 UDP-Lite packet. */ + uint32_t ipv4_malformed_gre; /**< Incomplete IPv4 GRE packet. */ + uint32_t ipv6_parse_fail; /**< IPv6 parse fail. */ + uint32_t ipv6_unknown_l4; /**< IPv6 unknown L4 type. */ + uint32_t ipv6_no_cme; /**< IPv6 connection match entry not found. */ + uint32_t ipv6_frag_initial; /**< IPv6 initial fragment. */ + uint32_t ipv6_frag_non_initial; /**< Ipv6 subsequent fragment. */ + uint32_t ipv6_malformed_udp; /**< Incomplete IPv6 UDP packet. */ + uint32_t ipv6_malformed_tcp; /**< Incomplete IPv6 TCP packet. */ + uint32_t ipv6_malformed_udpl; /**< Incomplete IPv6 UDP-Lite packet. */ + uint32_t ipv6_malformed_frag; /**< Incomplete IPv6 fragment. */ + uint32_t event_no_si; /**< No shaper configured. */ + uint32_t eth_parse_fail; /**< Ethernet header parse failed. */ + uint32_t eth_unknown_type; /**< Non-IP/PPPoE ether type. */ + uint32_t pppoe_non_ip; /**< Non-IP PPPoE packet. */ + uint32_t pppoe_malformed; /**< Incomplete PPPoE packet. */ +}; + +/** + * nss_igs_stats_sync_msg + * Message information for ingress shaping synchronization statistics. + */ +struct nss_igs_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + struct nss_igs_node_stats igs_stats; /**< Debug statistics for ingress shaping. */ +}; + +/** + * nss_igs_msg + * Data for sending and receiving ingress shaper messages. + */ +struct nss_igs_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a ingress shaper message. + */ + union { + union nss_if_msgs if_msg; + /**< NSS interface base message. */ + struct nss_igs_stats_sync_msg stats; + /**< Statistics message to host. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving ingress shaper messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_igs_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_igs_get_context + * Gets the ingress shaper context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_igs_get_context(void); + +/** + * nss_igs_register_if + * Registers a ingress shaper interface with the NSS for sending and receiving messages. + * + * @datatypes + * nss_igs_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] type NSS interface type. + * @param[in] msg_callback Callback for the ingress shaper message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_igs_register_if(uint32_t if_num, uint32_t type, + nss_igs_msg_callback_t msg_callback, struct net_device *netdev, uint32_t features); + +/** + * nss_igs_unregister_if + * Deregisters a ingress shaper interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + */ +extern void nss_igs_unregister_if(uint32_t if_num); + +/** + * nss_igs_verify_if_num + * Verify whether interface is an ingress shaper interface or not. + * + * @param[in] if_num NSS interface number. + * + * @return + * True if interface is an ingress shaper interface. + */ +extern bool nss_igs_verify_if_num(uint32_t if_num); + + +#ifdef CONFIG_NET_CLS_ACT +/* + * nss_igs_module_save() + * Save the ingress shaping module reference. + * + * @datatypes + * tc_action_ops \n + * module + * + * @param[in] act Operation structure for ingress shaping action. + * @param[in] module Module structure of ingress shaping module. + * + * @return + * None. + */ +extern void nss_igs_module_save(struct tc_action_ops *act, struct module *module); +#endif + +/* + * nss_igs_module_get() + * Get the ingress shaping module reference. + * + * @return + * False if not able to take the ingress shaping module reference, otherwise true. + * + */ +extern bool nss_igs_module_get(void); + +/* + * nss_igs_module_put() + * Release the ingress shaping module reference. + * + * @return + * None. + */ +extern void nss_igs_module_put(void); + +/** + * @} + */ +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsec.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsec.h new file mode 100644 index 000000000..3a9de9339 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsec.h @@ -0,0 +1,550 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipsec.h + * NSS IPSec interface definitions. + */ + +#ifndef __NSS_IPSEC_H +#define __NSS_IPSEC_H + +/* + * For some reason Linux doesn't define this in if_arp.h, + * refer http://www.iana.org/assignments/arp-parameters/arp-parameters.xhtml + * for the full list + */ + +/** + * @addtogroup nss_ipsec_subsystem + * @{ + */ + +#define NSS_IPSEC_ARPHRD_IPSEC 31 + /**< ARP (iana.org) hardware type for an IPsec tunnel. */ +#define NSS_IPSEC_MAX_RULES 256 + /**< Maximum number of rules supported. */ +#define NSS_IPSEC_MAX_SA NSS_CRYPTO_MAX_IDXS + /**< Maximum number of SAs supported. */ + +#if (~(NSS_IPSEC_MAX_RULES - 1) & (NSS_IPSEC_MAX_RULES >> 1)) +#error "NSS Max SA should be a power of 2" +#endif + +/** + * Size of an IPsec message. + */ +#define NSS_IPSEC_MSG_LEN (sizeof(struct nss_ipsec_msg) - sizeof(struct nss_cmn_msg)) + +/** + * nss_ipsec_msg_type + * Rules for the IPsec interface. + */ +enum nss_ipsec_msg_type { + NSS_IPSEC_MSG_TYPE_NONE = 0, + NSS_IPSEC_MSG_TYPE_ADD_RULE = 1, + NSS_IPSEC_MSG_TYPE_DEL_RULE = 2, + NSS_IPSEC_MSG_TYPE_FLUSH_TUN = 3, + NSS_IPSEC_MSG_TYPE_SYNC_SA_STATS = 4, + NSS_IPSEC_MSG_TYPE_SYNC_FLOW_STATS = 5, + NSS_IPSEC_MSG_TYPE_SYNC_NODE_STATS = 6, + NSS_IPSEC_MSG_TYPE_CONFIGURE_NODE = 7, + NSS_IPSEC_MSG_TYPE_MAX +}; + +/** + * nss_ipsec_status + * Status types for the IPsec interface. + */ +typedef enum nss_ipsec_status { + NSS_IPSEC_STATUS_OK = 0, + NSS_IPSEC_STATUS_ENOMEM = 1, + NSS_IPSEC_STATUS_ENOENT = 2, + NSS_IPSEC_STATUS_MAX +} nss_ipsec_status_t; + +/** + * nss_ipsec_error_type + * Error types for the IPsec interface. + */ +enum nss_ipsec_error_type { + NSS_IPSEC_ERROR_TYPE_NONE = 0, + NSS_IPSEC_ERROR_TYPE_HASH_DUPLICATE = 1, + NSS_IPSEC_ERROR_TYPE_HASH_COLLISION = 2, + NSS_IPSEC_ERROR_TYPE_UNHANDLED_MSG = 3, + NSS_IPSEC_ERROR_TYPE_INVALID_RULE = 4, + NSS_IPSEC_ERROR_TYPE_MAX_SA = 5, + NSS_IPSEC_ERROR_TYPE_MAX_FLOW = 6, + NSS_IPSEC_ERROR_TYPE_INVALID_CINDEX = 7, + NSS_IPSEC_ERROR_TYPE_INVALID_IPVER = 8, + NSS_IPSEC_ERROR_TYPE_MAX +}; + +/** + * nss_ipsec_type + * Operation types for the IPsec interface. + */ +enum nss_ipsec_type { + NSS_IPSEC_TYPE_NONE = 0, + NSS_IPSEC_TYPE_ENCAP = 1, + NSS_IPSEC_TYPE_DECAP = 2, + NSS_IPSEC_TYPE_MAX +}; + +/** + * nss_ipsec_tuple + * Common IPsec rule selector tuple for encapsulation and decapsulation. + * + * This selector is used for preparing a lookup tuple for incoming packets. + * The tuple is used to derive the index into the rule table. + * + * Choosing the selector fields depends on the IPsec encapsulation or decapsulation + * package. The host has no understanding of the index derived from the selector fields, + * and thus it provides information for all entries in the structure. + * + * The encapsulation and decapsulation packages return the index in their respective + * tables to the host. The host stores the rule for future reference purposes. + */ +struct nss_ipsec_tuple { + uint32_t dst_addr[4]; /**< Destination IP address. */ + uint32_t src_addr[4]; /**< Source IP address. */ + + uint32_t esp_spi; /**< SPI index. */ + + uint16_t dst_port; /**< Destination port (UDP or TCP). */ + uint16_t src_port; /**< Source port (UDP or TCP). */ + + uint8_t proto_next_hdr; /**< IP header type. */ + uint8_t ip_ver; /**< IP version. */ + uint8_t res[2]; /**< Reserved for 4-byte alignment. */ +}; + +/** + * nss_ipsec_rule_oip + * Common information about the IPsec rule outer IP header. + */ +struct nss_ipsec_rule_oip { + uint32_t dst_addr[4]; /**< IPv4 destination address to apply. */ + uint32_t src_addr[4]; /**< IPv4 source address to apply. */ + + uint32_t esp_spi; /**< ESP SPI index to apply. */ + + uint16_t dst_port; /**< Destination port (UDP or TCP). */ + uint16_t src_port; /**< Source port (UDP or TCP). */ + + uint8_t ttl_hop_limit; /**< IPv4 time-to-live value to apply. */ + uint8_t ip_ver; /**< IP version. */ + uint8_t proto_next_hdr; /**< IP header type. */ + uint8_t res; /**< Reserved for 4-byte alignment. */ +}; + +/** + * nss_ipsec_rule_data + * IPsec rule data used for per-packet transformation. + */ +struct nss_ipsec_rule_data { + + uint16_t crypto_index; /**< Crypto index for the security association. */ + uint16_t window_size; /**< ESP sequence number window. */ + + uint8_t cipher_blk_len; /**< Size of the cipher block. */ + uint8_t iv_len; /**< Size of the initialization vector. */ + uint8_t nat_t_req; /**< NAT-T required. */ + uint8_t esp_icv_len; /**< Size of the ICV to be produced as a result of authentication. */ + + uint8_t esp_seq_skip; /**< Skip an ESP sequence number. */ + uint8_t esp_tail_skip; /**< Skip an ESP trailer. */ + uint8_t use_pattern; /**< Use random pattern in a hash calculation. */ + uint8_t enable_esn; /**< Enable extended sequence number. */ + + uint8_t dscp; /**< Default DSCP value of the SA. */ + uint8_t df; /**< Default dont fragment value of the SA. */ + uint8_t copy_dscp; /**< The flag tells whether to copy DSCP from inner header. */ + uint8_t copy_df; /**< The flag tells Whether to copy DF from inner header. */ + + uint32_t res2[4]; /**< Reserved 16 bytes for future use. */ +}; + +/** + * nss_ipsec_rule + * Push message for IPsec rules. + * + * This message is sent from the host to the NSS for performing an operation + * on NSS rule tables. + */ +struct nss_ipsec_rule { + struct nss_ipsec_rule_oip oip; /**< Per rule outer IP information. */ + struct nss_ipsec_rule_data data;/**< Per rule data. */ + + uint32_t index; /**< Index provided by the NSS. */ + uint32_t sa_idx; /**< Rule index for the security association table. */ +}; + +/** + * nss_ipsec_configure_node + * Push message for setting IPsec inline mode and initializing DMA rings. + */ +struct nss_ipsec_configure_node { + bool dma_redirect; /**< Program redirect DMA ring. */ + bool dma_lookaside; /**< Program lookaside DMA ring. */ +}; + +/** + * nss_ipsec_sa_stats + * Packet statistics per security association. + */ +struct nss_ipsec_sa_stats { + uint32_t count; /**< Packets processed. */ + uint32_t bytes; /**< Bytes processed. */ + uint32_t no_headroom; /**< Insufficient headroom. */ + uint32_t no_tailroom; /**< Insufficient tailroom. */ + uint32_t no_resource; /**< No crypto buffer. */ + uint32_t fail_queue; /**< Failed to enqueue. */ + uint32_t fail_hash; /**< Hash mismatch. */ + uint32_t fail_replay; /**< Replay check failure. */ + uint64_t seq_num; /**< Current sequence number. */ + uint64_t window_max; /**< Maximum size of the window. */ + uint32_t window_size; /**< Current window size. */ + uint32_t fail_hash_cont; /**< Consecutive hash fail count. */ + uint8_t esn_enabled; /**< Indicates whether ESN is enabled. */ + uint8_t res[3]; /**< Reserved for future use. */ +} /** @cond */ __attribute__((packed))/** @endcond */; + +/** + * nss_ipsec_flow_stats + * Per-flow statistics. + */ +struct nss_ipsec_flow_stats { + uint32_t processed; /**< Packets processed for this flow. */ + + uint8_t use_pattern; /**< Use random pattern. */ + uint8_t res[3]; /**< Reserved for 4-byte alignment padding. */ +}; + +/** + * nss_ipsec_node_stats + * Per-node statistics. + */ +struct nss_ipsec_node_stats { + uint32_t enqueued; /**< Packets enqueued to the node. */ + uint32_t completed; /**< Packets processed by the node. */ + uint32_t linearized; /**< Packet is linear. */ + uint32_t exceptioned; /**< Packets exception from the NSS. */ + uint32_t fail_enqueue; /**< Packets failed to enqueue. */ + uint32_t redir_rx; /**< Packets received in redirect ring. */ + uint32_t fail_redir; /**< Packets dropped in redirect ring. */ +}; + +/** + * nss_ipsec_stats + * Common statistics structure. + */ +union nss_ipsec_stats { + struct nss_ipsec_sa_stats sa; /**< Security association statistics. */ + struct nss_ipsec_flow_stats flow; /**< Flow statistics. */ + struct nss_ipsec_node_stats node; /**< Node statistics. */ +}; + +/** + * nss_ipsec_msg + * Data for sending and receiving IPsec messages. + */ +struct nss_ipsec_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + uint32_t tunnel_id; /**< ID of the tunnel associated with the message. */ + struct nss_ipsec_tuple tuple; + /**< Tuple to look up the SA table for encapsulation or decapsulation. */ + enum nss_ipsec_type type; /**< Encapsulation or decapsulation operation. */ + + /** + * Payload of an IPsec message. + */ + union { + struct nss_ipsec_rule rule; + /**< IPsec rule message. */ + struct nss_ipsec_configure_node node; + /**< IPsec node message. */ + union nss_ipsec_stats stats; + /**< Retrieve statistics for the tunnel. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving message notifications. + * + * @datatypes + * nss_ipsec_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_ipsec_msg_callback_t)(void *app_data, struct nss_ipsec_msg *msg); + +/** + * Callback function for receiving data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the message data. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_ipsec_buf_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_ipsec_tx_msg + * Sends an IPsec message to the NSS HLOS driver. + * + * @datatypes + * nss_ctx_instance \n + * nss_ipsec_msg + * + * @param[in] nss_ctx Pointer to the NSS HLOS driver context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipsec_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_ipsec_msg *msg); + +/** + * nss_ipsec_tx_msg_sync + * Sends IPsec messages synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_ipsec_msg_type \n + * nss_ipsec_msg \n + * nss_ipsec_error_type + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num Configuration interface number. + * @param[in] type Type of the message. + * @param[in] len Size of the payload. + * @param[in] nim Pointer to the message data. + * @param[in,out] resp Response for the configuration. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipsec_tx_msg_sync(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + enum nss_ipsec_msg_type type, uint16_t len, + struct nss_ipsec_msg *nim, enum nss_ipsec_error_type *resp); + +/** + * nss_ipsec_tx_buf + * Sends a plain text packet to NSS for IPsec encapsulation or decapsulation. + * + * @datatypes + * sk_buff + * + * @param[in] skb Pointer to the message data. + * @param[in] if_num Pointer to the NSS interface number. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipsec_tx_buf(struct sk_buff *skb, uint32_t if_num); + +/** + * nss_ipsec_notify_register + * Registers an event callback handler with the HLOS driver. + * + * When registered, the message callback is called when the NSS + * sends a response to the message sent by the host. + * + * @datatypes + * nss_ipsec_msg_callback_t + * + * @param[in] if_num NSS interface number. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the context of the message. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_ipsec_notify_register(uint32_t if_num, nss_ipsec_msg_callback_t cb, void *app_data); + +/** + * nss_ipsec_data_register + * Registers a data callback handler with the HLOS driver. + * + * The HLOS driver calls the registered data callback to return + * the packet to the OS. + * + * @datatypes + * nss_ipsec_buf_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] cb Callback function for the data. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_ipsec_data_register(uint32_t if_num, nss_ipsec_buf_callback_t cb, struct net_device *netdev, uint32_t features); + +/** + * nss_ipsec_notify_unregister + * Deregisters the message notifier from the HLOS driver. + * + * @datatypes + * nss_ctx_instance + * + * @param[in,out] ctx Pointer to the context of the HLOS driver. + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The message notifier must have been previously registered. + */ +extern void nss_ipsec_notify_unregister(struct nss_ctx_instance *ctx, uint32_t if_num); + +/** + * nss_ipsec_data_unregister + * Deregisters the data notifier from the HLOS driver. + * + * @datatypes + * nss_ctx_instance + * + * @param[in,out] ctx Pointer to the context of the HLOS driver. + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The data notifier must have been previously registered. + */ +extern void nss_ipsec_data_unregister(struct nss_ctx_instance *ctx, uint32_t if_num); + +/** + * nss_ipsec_get_context + * Gets the NSS context for the IPsec handle. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_ipsec_get_context(void); + +/** + * nss_ipsec_get_ifnum + * Gets the IPsec interface number with a core ID. + * + * @param[in] if_num NSS interface number. + * + * @return + * Interface number with the core ID. + */ +extern int32_t nss_ipsec_get_ifnum(int32_t if_num); + +/** + * nss_ipsec_msg_init + * Initializes an IPsec message. + * + * @datatypes + * nss_ipsec_msg \n + * nss_ipsec_msg_callback_t + * + * @param[in,out] nim Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_ipsec_msg_init(struct nss_ipsec_msg *nim, uint16_t if_num, uint32_t type, uint32_t len, + nss_ipsec_msg_callback_t cb, void *app_data); + +/** + * nss_ipsec_get_encap_interface + * Gets the NSS interface number to be used for IPsec encapsulation message. + * + * @return + * Encapsulation interface number. + */ +extern int32_t nss_ipsec_get_encap_interface(void); + +/** + * nss_ipsec_get_decap_interface + * Gets the NSS interface number to be used for an IPsec decapsulation message. + * + * @return + * Decapsulation interface number. + */ +extern int32_t nss_ipsec_get_decap_interface(void); + +/** + * nss_ipsec_get_data_interface + * Gets the NSS interface number to be used for an IPsec data transfer. + * + * @return + * NSS interface number. + */ +extern int32_t nss_ipsec_get_data_interface(void); + +/** + * nss_ipsec_ppe_port_config + * Configure Packet Processing Engine IPsec port. + * + * @datatypes + * nss_ctx_instance \n + * net_device + * + * @param[in] ctx Pointer to the context of the HLOS driver. + * @param[in] netdev Pointer to the associated network device. + * @param[in] if_num Data interface number. + * @param[in] vsi_num Virtual switch instance number. + * + * @return + * True if successful, else false. + */ +extern bool nss_ipsec_ppe_port_config(struct nss_ctx_instance *ctx, struct net_device *netdev, + uint32_t if_num, uint32_t vsi_num); + +/** + * nss_ipsec_ppe_mtu_update() + * Configure Packet Processing Engine MTU for IPsec in-line. + * + * @datatypes + * nss_ctx_instance \n + * + * @param[in] ctx Pointer to the context of the HLOS driver. + * @param[in] if_num Data interface number. + * @param[in] mtu Maximum transmission unit of Interface number. + * @param[in] mru Maximum Receive unit of Interface number. + * + * @return + * True if successful, else false. + */ +bool nss_ipsec_ppe_mtu_update(struct nss_ctx_instance *ctx, uint32_t if_num, uint16_t mtu, uint16_t mru); + +/** + * @} + */ + +#endif /* __NSS_IPSEC_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsec_cmn.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsec_cmn.h new file mode 100644 index 000000000..f9f4c78c5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsec_cmn.h @@ -0,0 +1,691 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipsec_cmn.h + * NSS IPsec interface definitions. + */ + +#ifndef __NSS_IPSEC_CMN_H_ +#define __NSS_IPSEC_CMN_H_ + +/** + * @addtogroup nss_ipsec_subsystem + * @{ + */ + +#define NSS_IPSEC_CMN_ARPHRD_IPSEC 31 /**< ARP (iana.org) hardware type for an IPsec tunnel. */ + +/** + * Flags for SA configuration. + */ +#define NSS_IPSEC_CMN_FLAG_IPV6 (0x1 << 0) /**< IPv6 header. */ +#define NSS_IPSEC_CMN_FLAG_IPV4_NATT (0x1 << 1) /**< IPv4 NAT traversal. */ +#define NSS_IPSEC_CMN_FLAG_IPV4_UDP (0x1 << 2) /**< IPv4 UDP traversal. */ +#define NSS_IPSEC_CMN_FLAG_ESP_ESN (0x1 << 3) /**< Enable ESP extended sequence number. */ +#define NSS_IPSEC_CMN_FLAG_ESP_SKIP (0x1 << 4) /**< Skip ESP sequence number and ICV. */ +#define NSS_IPSEC_CMN_FLAG_ESP_REPLAY (0x1 << 5) /**< Check ESP replay counter. */ +#define NSS_IPSEC_CMN_FLAG_CIPHER_NULL (0x1 << 6) /**< NULL cipher mode. */ +#define NSS_IPSEC_CMN_FLAG_CIPHER_GCM (0x1 << 7) /**< Galios counter mode. */ +#define NSS_IPSEC_CMN_FLAG_COPY_DSCP (0x1 << 8) /**< Copy DSCP from inner to outer header. */ +#define NSS_IPSEC_CMN_FLAG_COPY_DF (0x1 << 9) /**< Copy DF from inner node to outer node. */ +#define NSS_IPSEC_CMN_FLAG_MODE_TRANS (0x1 << 10) /**< Encapsulate or decapsulate in transport mode (default is tunnel mode). */ + +#define NSS_IPSEC_CMN_FLAG_HDR_MASK \ + (NSS_IPSEC_CMN_FLAG_IPV6 | NSS_IPSEC_CMN_FLAG_IPV4_NATT | NSS_IPSEC_CMN_FLAG_IPV4_UDP) + /**< Flag header mask. */ + +#define NSS_IPSEC_CMN_FEATURE_INLINE_ACCEL 0x1 /**< Interface enabled for inline exception. */ + +#define NSS_IPSEC_CMN_MDATA_VERSION 0x01 /**< Metadata version. */ +#define NSS_IPSEC_CMN_MDATA_MAGIC 0x8893 /**< Metadata magic. */ +#define NSS_IPSEC_CMN_MDATA_ORIGIN_HOST 0x01 /**< Metadata originates at the host. */ +#define NSS_IPSEC_CMN_MDATA_ALIGN_SZ sizeof(uint32_t) /**< Metadata alignment size. */ +/** + * nss_ipsec_cmn_msg_type + * IPsec message types. + */ +enum nss_ipsec_cmn_msg_type { + NSS_IPSEC_CMN_MSG_TYPE_NONE = 0, /**< Nothing to do. */ + NSS_IPSEC_CMN_MSG_TYPE_NODE_CONFIG = 1, /**< Configure IPsec node. */ + NSS_IPSEC_CMN_MSG_TYPE_CTX_CONFIG = 2, /**< Configure IPsec dynamic node. */ + NSS_IPSEC_CMN_MSG_TYPE_CTX_SYNC = 3, /**< Synchronize context statistics to host. */ + NSS_IPSEC_CMN_MSG_TYPE_SA_CREATE = 4, /**< Create SA. */ + NSS_IPSEC_CMN_MSG_TYPE_SA_DESTROY = 5, /**< Destroy SA. */ + NSS_IPSEC_CMN_MSG_TYPE_SA_SYNC = 6, /**< Synchronize SA statistics to host. */ + NSS_IPSEC_CMN_MSG_TYPE_FLOW_CREATE = 7, /**< Create flow. */ + NSS_IPSEC_CMN_MSG_TYPE_FLOW_DESTROY = 8, /**< Delete flow. */ + NSS_IPSEC_CMN_MSG_TYPE_MAX +}; + +/** + * nss_ipsec_cmn_msg_error + * IPsec message error types. + */ +enum nss_ipsec_cmn_msg_error { + NSS_IPSEC_CMN_MSG_ERROR_NONE = 0, /**< No error. */ + NSS_IPSEC_CMN_MSG_ERROR_CTX_INVAL = 1, /**< Invalid context. */ + NSS_IPSEC_CMN_MSG_ERROR_SA_ALLOC = 2, /**< Failed to allocate SA. */ + NSS_IPSEC_CMN_MSG_ERROR_SA_INVAL = 3, /**< Invalid SA. */ + NSS_IPSEC_CMN_MSG_ERROR_SA_DUP = 4, /**< SA exists. */ + NSS_IPSEC_CMN_MSG_ERROR_SA_INUSE = 5, /**< SA is in use. */ + NSS_IPSEC_CMN_MSG_ERROR_FLOW_ALLOC = 6, /**< Failed to allocate flow. */ + NSS_IPSEC_CMN_MSG_ERROR_FLOW_INVAL = 7, /**< Flow not found. */ + NSS_IPSEC_CMN_MSG_ERROR_FLOW_DUP = 8, /**< Duplicate flow. */ + NSS_IPSEC_CMN_MSG_ERROR_FLOW_SA = 9, /**< Failed to find SA for the flow. */ + NSS_IPSEC_CMN_MSG_ERROR_NODE_REG_DYNIF = 10, + /**< Error registering dynamic interface. */ + NSS_IPSEC_CMN_MSG_ERROR_UNHANDLED_MSG= 11, /**< Unhandled message type. */ + NSS_IPSEC_CMN_MSG_ERROR_MAX /**< Maximum error message. */ +}; + +/** + * nss_ipsec_cmn_ctx_type + * IPsec context type. + */ +enum nss_ipsec_cmn_ctx_type { + NSS_IPSEC_CMN_CTX_TYPE_NONE = 0, /**< Invalid direction. */ + NSS_IPSEC_CMN_CTX_TYPE_INNER, /**< Encapsulation. */ + NSS_IPSEC_CMN_CTX_TYPE_MDATA_INNER, /**< Metadata for encapsulation. */ + NSS_IPSEC_CMN_CTX_TYPE_OUTER, /**< Decapsulation. */ + NSS_IPSEC_CMN_CTX_TYPE_MDATA_OUTER, /**< Metadata for decapsulation. */ + NSS_IPSEC_CMN_CTX_TYPE_REDIR, /**< Redirect. */ + NSS_IPSEC_CMN_CTX_TYPE_MAX +}; + +/** + * nss_ipsec_cmn_stats_types + * IPsec common statistics types. + */ +enum nss_ipsec_cmn_stats_types { + NSS_IPSEC_CMN_STATS_FAIL_HEADROOM = NSS_STATS_NODE_MAX, + /**< Failure in headroom check. */ + NSS_IPSEC_CMN_STATS_FAIL_TAILROOM, /**< Failure in tailroom check. */ + NSS_IPSEC_CMN_STATS_FAIL_REPLAY, /**< Failure in anti-replay check. */ + NSS_IPSEC_CMN_STATS_FAIL_REPLAY_DUP, /**< Failure in anti-replay; duplicate records. */ + NSS_IPSEC_CMN_STATS_FAIL_REPLAY_WIN, /**< Failure in anti-replay; packet outside the window. */ + NSS_IPSEC_CMN_STATS_FAIL_PBUF_CRYPTO, /**< Failure in crypto pbuf allocation. */ + NSS_IPSEC_CMN_STATS_FAIL_QUEUE, /**< Failure due to queue full in IPsec. */ + NSS_IPSEC_CMN_STATS_FAIL_QUEUE_CRYPTO, /**< Failure due to queue full in crypto. */ + NSS_IPSEC_CMN_STATS_FAIL_QUEUE_NEXTHOP, /**< Failure due to queue full in next hop. */ + NSS_IPSEC_CMN_STATS_FAIL_PBUF_ALLOC, /**< Failure in pbuf allocation. */ + NSS_IPSEC_CMN_STATS_FAIL_PBUF_LINEAR, /**< Failure in pbuf linearization. */ + NSS_IPSEC_CMN_STATS_FAIL_PBUF_STATS, /**< Failure in pbuf allocation for statistics. */ + NSS_IPSEC_CMN_STATS_FAIL_PBUF_ALIGN, /**< Failure in pbuf access due to non-word alignmnt */ + NSS_IPSEC_CMN_STATS_FAIL_CIPHER, /**< Failure in decrypting the data. */ + NSS_IPSEC_CMN_STATS_FAIL_AUTH, /**< Failure in authenticating the data. */ + NSS_IPSEC_CMN_STATS_FAIL_SEQ_OVF, /**< Failure due to sequence number rollover. */ + NSS_IPSEC_CMN_STATS_FAIL_BLK_LEN, /**< Failure in decapsulation due to bad cipher block length. */ + NSS_IPSEC_CMN_STATS_FAIL_HASH_LEN, /**< Failure in decapsulation due to bad hash block length. */ + NSS_IPSEC_CMN_STATS_FAIL_TRANSFORM, /**< Failure in transformation; general error. */ + NSS_IPSEC_CMN_STATS_FAIL_CRYPTO, /**< Failure in crypto transformation. */ + NSS_IPSEC_CMN_STATS_FAIL_CLE, /**< Failure in classification; general failure. */ + NSS_IPSEC_CMN_STATS_IS_STOPPED, /**< Indicates if SA is stopped; for example: sequence overflow. */ + NSS_IPSEC_CMN_STATS_MAX, /**< Maximum statistics type. */ +}; + +/** + * nss_ipsec_cmn_flow_tuple + * IPsec tuple for creating flow entries. + * + * Note: This is a common selector which is used for preparing + * a lookup tuple for incoming packets. The tuple is used + * for computing the hash index in the flow table. There are multiple + * fields in the tuple and the recipient node decides which fields + * it must use from the tuple to calculate the hash index. The host + * has no view of the hash index and hence must compute its own index + * based on the tuple. + */ +struct nss_ipsec_cmn_flow_tuple { + uint32_t dest_ip[4]; /**< Destination IP. */ + uint32_t src_ip[4]; /**< Source IP. */ + uint32_t spi_index; /**< ESP SPI index. */ + + uint16_t dst_port; /**< Destination L4 port. */ + uint16_t src_port; /**< Source L4 port. */ + + uint8_t user_pattern; /**< User defined field. */ + uint8_t protocol; /**< IP protocol types. */ + uint8_t ip_ver; /**< IP version. */ +}; + +/** + *nss_ipsec_cmn_sa_tuple + * IPsec outer header configuration. + */ +struct nss_ipsec_cmn_sa_tuple { + uint32_t dest_ip[4]; /**< Destination IP. */ + uint32_t src_ip[4]; /**< Source IP. */ + uint32_t spi_index; /**< ESP SPI index. */ + + uint16_t dest_port; /* Destination L4 port. */ + uint16_t src_port; /* Source L4 port. */ + + uint16_t crypto_index; /**< Crypto index for the SA. */ + uint8_t protocol; /**< Outer protocol. */ + uint8_t ip_ver; /**< IP version. */ + + uint8_t hop_limit; /**< Time-to-Live or next hop limit. */ + uint8_t res[3]; /**< Reserved. */ +}; + +/** + *nss_ipsec_cmn_sa_data + * IPsec SA data used for transformation. + */ +struct nss_ipsec_cmn_sa_data { + uint32_t seq_start; /**< Starting sequence number. */ + uint32_t flags; /**< Configuration flags. */ + + uint16_t window_size; /**< ESP sequence number window. */ + uint8_t dscp; /**< Default DSCP value of the SA. */ + uint8_t df; /**< Default do not fragment value of the SA. */ + + uint8_t blk_len; /**< Cipher block length. */ + uint8_t iv_len; /**< IV length. */ + uint8_t icv_len; /**< ESP trailers ICV length to apply. */ + uint8_t res1; /**< Reserved. */ + + uint32_t res2[4]; /**< Reserved for future use. */ +}; + +/** + * nss_ipsec_cmn_flow + * IPsec flow configuration message. + */ +struct nss_ipsec_cmn_flow { + struct nss_ipsec_cmn_flow_tuple flow_tuple; /**< Flow tuple. */ + struct nss_ipsec_cmn_sa_tuple sa_tuple; /**< SA tuple. */ +}; + +/** + * nss_ipsec_cmn_sa + * IPsec SA configuration message. + */ +struct nss_ipsec_cmn_sa { + struct nss_ipsec_cmn_sa_tuple sa_tuple; /**< SA tuple. */ + struct nss_ipsec_cmn_sa_data sa_data; /**< SA data. */ +}; + +/** + * nss_ipsec_cmn_ctx + * IPsec context configuration. + */ +struct nss_ipsec_cmn_ctx { + enum nss_ipsec_cmn_ctx_type type; /**< Node type. */ + uint32_t except_ifnum; /**< Exception interface for egress. */ + uint32_t sibling_ifnum; /**< Sibling interface. */ +}; + +/** + * nss_ipsec_cmn_node + * IPsec node configuration. + */ +struct nss_ipsec_cmn_node { + bool dma_redirect; /**< Enable redirect DMA ring. */ + bool dma_lookaside; /**< Enable lookaside DMA ring. */ + uint16_t max_sa; /**< Maximum number of SA(s) supported. */ +}; + +/** + * nss_ipsec_cmn_sa_replay + * IPsec replay statistics + */ +struct nss_ipsec_cmn_sa_replay { + uint64_t seq_start; /**< Start of replay window. */ + uint64_t seq_cur; /**< Current sequence number. */ + uint16_t window_size; /**< Window size. */ + uint8_t res[6]; /**< Reserved for future use. */ +}; + +/** + * nss_ipsec_cmn_sa_stats + * IPsec SA statistics. + */ +struct nss_ipsec_cmn_sa_stats { + struct nss_cmn_node_stats cmn_stats; /**< Packet statistics. */ + uint32_t fail_headroom; /**< Failed headroom check. */ + uint32_t fail_tailroom; /**< Failed tailroom check. */ + uint32_t fail_replay; /**< Failure in anti-replay check. */ + uint32_t fail_replay_dup; /**< Failure in anti-replay; duplicate records. */ + uint32_t fail_replay_win; /**< Failure in anti-replay; packet outside the window. */ + uint32_t fail_pbuf_crypto; /**< Failed to allocate crypto pbuf. */ + uint32_t fail_queue; /**< Failure due to queue full in IPsec. */ + uint32_t fail_queue_crypto; /**< Failure due to queue full in crypto. */ + uint32_t fail_queue_nexthop; /**< Failure due to queue full in next hop. */ + uint32_t fail_pbuf_alloc; /**< Failure in pbuf allocation. */ + uint32_t fail_pbuf_linear; /**< Failure in pbuf linearization. */ + uint32_t fail_pbuf_stats; /**< Failure in pbuf allocation for statistics. */ + uint32_t fail_pbuf_align; /**< Failure in pbuf access due to non-word alignment. */ + uint32_t fail_cipher; /**< Failure in decrypting the data. */ + uint32_t fail_auth; /**< Failure in authenticating the data. */ + uint32_t fail_seq_ovf; /**< Failure due to sequence number rollover. */ + uint32_t fail_blk_len; /**< Failure in decapsulation due to bad cipher block length. */ + uint32_t fail_hash_len; /**< Failure in decapsulation due to bad hash block length. */ + uint32_t fail_transform; /**< Failure in transformation; general error. */ + uint32_t fail_crypto; /**< Failure in crypto transformation. */ + uint32_t fail_cle; /**< Failure in classification; general failure. */ + uint32_t is_stopped; /**< Indicates if SA is stopped; for example, seq overflow. */ +}; + +/** + * nss_ipsec_cmn_sa_sync + * IPsec SA sync message. + */ +struct nss_ipsec_cmn_sa_sync { + struct nss_ipsec_cmn_sa_replay replay; /**< Replay statistics. */ + struct nss_ipsec_cmn_sa_tuple sa_tuple; /**< SA tuple. */ + struct nss_ipsec_cmn_sa_stats stats; /**< Packet and failure statistics. */ +}; + +/** + * nss_ipsec_cmn_ctx_stats + * IPsec context statistics. + */ +struct nss_ipsec_cmn_ctx_stats { + struct nss_cmn_node_stats cmn_stats; + /**< Packet statistics. */ + uint32_t exceptioned; /**< Exceptioned to host. */ + uint32_t linearized; /**< Linearized packets. */ + uint32_t redirected; /**< Redirected from inline. */ + uint32_t dropped; /**< Total dropped packets. */ + uint32_t fail_sa; /**< Failed to find SA. */ + uint32_t fail_flow; /**< Failed to find flow. */ + uint32_t fail_stats; /**< Failed to send statistics. */ + uint32_t fail_exception; /**< Failed to exception. */ + uint32_t fail_transform; /**< Failed to produce output. */ + uint32_t fail_linearized; /**< Failed to linearize. */ + uint32_t fail_mdata_ver; /**< Invalid metadata version. */ + uint32_t fail_ctx_active; /**< Failed to queue as context is not active. */ + uint32_t fail_pbuf_crypto; /**< Failed to allocate pbuf for crypto operation. */ + uint32_t fail_queue_crypto; /**< Failed to queue pbuf to crypto pnode. */ +}; + +/** + * nss_ipsec_cmn_ctx_sync + * IPsec context synchronous message. + */ +struct nss_ipsec_cmn_ctx_sync { + enum nss_ipsec_cmn_ctx_type type; /**< IPsec context type. */ + struct nss_ipsec_cmn_ctx_stats stats; /**< Context statistics. */ +}; + +/** + * nss_ipsec_cmn_mdata_cmn + * IPsec common metadata information. + */ +struct nss_ipsec_cmn_mdata_cmn { + uint8_t version; /**< Metadata version. */ + uint8_t origin; /**< Metadata origin (host or NSS). */ + uint16_t len; /**< Metadata length including extra bytes. */ + uint8_t res[2]; /**< Reserved for future. */ + uint16_t magic; /**< Metadata magic. */ +}; + +/** + * nss_ipsec_cmn_mdata_encap + * IPsec encapsulation metadata information. + */ +struct nss_ipsec_cmn_mdata_encap { + struct nss_ipsec_cmn_sa_tuple sa; /**< SA tuple. */ + uint32_t seq_num; /**< Sequence number for encapsulation (zero disables it). */ + uint16_t data_len; /**< Length of data to encapsulate. */ + uint16_t flags; /**< Encapsulation metadata flags. */ +}; + +/** + * nss_ipsec_cmn_mdata_decap + * IPsec decapsulation metadata information. + */ +struct nss_ipsec_cmn_mdata_decap { + struct nss_ipsec_cmn_sa_tuple sa; /**< SA tuple. */ +}; + +/** + * nss_ipsec_cmn_mdata + * IPsec metadata for host originated packets. + */ +struct nss_ipsec_cmn_mdata { + struct nss_ipsec_cmn_mdata_cmn cm; /**< Common metadata. */ + + union { + struct nss_ipsec_cmn_mdata_encap encap; /**< Encapsulation metadata. */ + struct nss_ipsec_cmn_mdata_decap decap; /**< Decapsulation metadata. */ + } data; /**< Metadata payload. */ +}; + +/** + * nss_ipsec_cmn_stats_notification + * IPsec common transmission statistics structure. + */ +struct nss_ipsec_cmn_stats_notification { + uint64_t stats_ctx[NSS_IPSEC_CMN_STATS_MAX]; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_ipsec_cmn_msg + * Message structure for NSS IPsec messages. + */ +struct nss_ipsec_cmn_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of IPsec interface message. + */ + union { + struct nss_ipsec_cmn_node node; /**< Node configuration message. */ + struct nss_ipsec_cmn_ctx ctx; /**< Context configuration message. */ + struct nss_ipsec_cmn_sa sa; /**< SA configuration message. */ + struct nss_ipsec_cmn_flow flow; /**< Flow configuration message. */ + struct nss_ipsec_cmn_sa_sync sa_sync; /**< SA statistics message. */ + struct nss_ipsec_cmn_ctx_sync ctx_sync; /**< Context statistics message. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_ipsec_cmn_mdata_init + * Initialize the metadata common fields. + * + * @datatypes + * nss_ipsec_cmn_mdata + * + * @param[in] mdata Metadata pointer. + * @param[in] len Metadata length including extra bytes. + * + * @return + * Pointer to metadata payload. + */ +static inline void *nss_ipsec_cmn_mdata_init(struct nss_ipsec_cmn_mdata *mdata, uint16_t len) +{ + mdata->cm.len = len; + mdata->cm.magic = NSS_IPSEC_CMN_MDATA_MAGIC; + mdata->cm.version = NSS_IPSEC_CMN_MDATA_VERSION; + mdata->cm.origin = NSS_IPSEC_CMN_MDATA_ORIGIN_HOST; + + return &mdata->data; +} + +/** + * Callback function for receiving message notifications. + * + * @datatypes + * nss_ipsec_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_ipsec_cmn_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * Callback function for receiving data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the message data. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_ipsec_cmn_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_ipsec_cmn_get_context + * Gets the NSS context for the IPsec handle. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_ipsec_cmn_get_context(void); + +/** + * nss_ipsec_cmn_get_ifnum_with_coreid + * Gets the IPsec interface number with a core ID. + * + * @param[in] ifnum NSS interface number. + * + * @return + * Interface number with the core ID. + */ +extern uint32_t nss_ipsec_cmn_get_ifnum_with_coreid(int32_t ifnum); + +/** + * nss_ipsec_cmn_unregister_if + * Deregisters an IPSEC tunnel interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The tunnel interface must have been previously registered. + * + * @return + * True if successful, else false. + */ +extern bool nss_ipsec_cmn_unregister_if(uint32_t if_num); + +/** + * nss_ipsec_cmn_register_if + * Registers the IPsec interface with the NSS for sending and + * receiving messages. + * + * @datatypes + * nss_ipsec_cmn_data_callback_t \n + * nss_ipsec_cmn_msg_callback_t \n + * nss_dynamic_interface_type \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] netdev Pointer to the associated network device. + * @param[in] cb_data Callback for the data. + * @param[in] cb_msg Callback for the message. + * @param[in] features Socket buffer types supported by this interface. + * @param[in] type Dynamic interface type. + * @param[in] app_data Application context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_ipsec_cmn_register_if(uint32_t if_num, struct net_device *netdev, + nss_ipsec_cmn_data_callback_t cb_data, + nss_ipsec_cmn_msg_callback_t cb_msg, + uint32_t features, enum nss_dynamic_interface_type type, void *app_data); + +/** + * nss_ipsec_cmn_notify_unregister + * Deregisters the message notifier from the HLOS driver. + * + * @datatypes + * nss_ctx_instance + * + * @param[in,out] ctx Pointer to the context of the HLOS driver. + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The message notifier must have been previously registered. + */ +extern void nss_ipsec_cmn_notify_unregister(struct nss_ctx_instance *ctx, uint32_t if_num); + +/** + * nss_ipsec_cmn_notify_register + * Registers an event callback to handle notifications from the IPsec firmware package. + * + * @datatypes + * nss_ipsec_cmn_msg_callback_t \n + * + * @param[in] ifnum NSS interface number. + * @param[in] cb Callback for IPsec message. + * @param[in] app_data Pointer to the application context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_ipsec_cmn_notify_register(uint32_t ifnum, nss_ipsec_cmn_msg_callback_t cb, void *app_data); + +/** + * nss_ipsec_cmn_msg_init + * Initializes an IPsec message. + * + * @datatypes + * nss_ipsec_cmn_msg \n + * nss_ipsec_cmn_msg_type \n + * nss_ipsec_cmn_msg_callback_t + * + * @param[in,out] nim Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_ipsec_cmn_msg_init(struct nss_ipsec_cmn_msg *nim, uint16_t if_num, enum nss_ipsec_cmn_msg_type type, + uint16_t len, nss_ipsec_cmn_msg_callback_t cb, void *app_data); + +/** + * nss_ipsec_cmn_tx_msg + * Sends an asynchronous IPsec message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_ipsec_cmn_msg + * + * @param[in] nss_ctx Pointer to the NSS HLOS driver context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipsec_cmn_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_ipsec_cmn_msg *msg); + +/** + * nss_ipsec_cmn_tx_msg_sync + * Sends a synchronous IPsec message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_ipsec_cmn_msg_type \n + * nss_ipsec_cmn_msg + * + * @param[in] nss_ctx Pointer to the NSS HLOS driver context. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] nicm Pointer to the NSS IPsec message. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipsec_cmn_tx_msg_sync(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + enum nss_ipsec_cmn_msg_type type, uint16_t len, + struct nss_ipsec_cmn_msg *nicm); + +/** + * nss_ipsec_cmn_tx_buf + * Sends a buffer to NSS for IPsec encapsulation or de-capsulation. + * + * @datatypes + * sk_buff \n + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS HLOS driver context. + * @param[in] skb Pointer to the message data. + * @param[in] if_num Pointer to the NSS interface number. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipsec_cmn_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *skb, uint32_t if_num); + +/** + * nss_ipsec_cmn_ppe_port_config + * Configure Packet Processing Engine IPsec port. + * + * @datatypes + * nss_ctx_instance \n + * net_device + * + * @param[in] ctx Pointer to the context of the HLOS driver. + * @param[in] netdev Pointer to the associated network device. + * @param[in] if_num Data interface number. + * @param[in] vsi_num Virtual switch instance number. + * + * @return + * True if successful, else false. + */ +extern bool nss_ipsec_cmn_ppe_port_config(struct nss_ctx_instance *ctx, struct net_device *netdev, + uint32_t if_num, uint32_t vsi_num); + +/** + * nss_ipsec_cmn_ppe_mtu_update() + * Configure Packet Processing Engine MTU for IPsec inline. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] ctx Pointer to the context of the HLOS driver. + * @param[in] if_num Data interface number. + * @param[in] mtu Maximum transmission unit of interface number. + * @param[in] mru Maximum receive unit of interface number. + * + * @return + * True if successful, else false. + */ +bool nss_ipsec_cmn_ppe_mtu_update(struct nss_ctx_instance *ctx, uint32_t if_num, uint16_t mtu, uint16_t mru); + +/** + * nss_ipsec_cmn_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_ipsec_cmn_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_ipsec_cmn_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_ipsec_cmn_stats_register_notifier(struct notifier_block *nb); + +/** + * @} + */ + +#endif /* !__NSS_IPSEC_CMN_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsecmgr.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsecmgr.h new file mode 100644 index 000000000..3fe3460f9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipsecmgr.h @@ -0,0 +1,443 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipsecmgr.h + * NSS IPSec Manager interface definitions. + */ + +#ifndef __NSS_IPSECMGR_H +#define __NSS_IPSECMGR_H + +/** + * @addtogroup nss_ipsec_subsystem + * @{ + */ + +#define NSS_IPSECMGR_DEBUG_LVL_ERROR 1 /**< Turn on debug for an error. */ +#define NSS_IPSECMGR_DEBUG_LVL_WARN 2 /**< Turn on debug for a warning. */ +#define NSS_IPSECMGR_DEBUG_LVL_INFO 3 /**< Turn on debug for information. */ +#define NSS_IPSECMGR_DEBUG_LVL_TRACE 4 /**< Turn on debug for trace. */ + +#define NSS_IPSECMGR_TUN_NAME "ipsectun%d" + /**< IPsec tunnel name. */ +#define NSS_IPSECMGR_MAX_TUNNELS (NSS_CRYPTO_MAX_IDXS/2) + /**< Maximum number of IPsec tunnels. */ + +/** + * Length of the header added after encapsulation. + * + * This estimate must be accurate but large enough to accomodate most use cases. + */ +#define NSS_IPSECMGR_TUN_MAX_HDR_LEN 96 + +/* + * Space required in the head and tail of the buffer + */ +#define NSS_IPSECMGR_TUN_HEADROOM 128 /**< Size of the buffer headroom. */ +#define NSS_IPSECMGR_TUN_TAILROOM 192 /**< Size of the buffer tailroom. */ + +#define NSS_IPSECMGR_TUN_MTU(x) (x - NSS_IPSECMGR_TUN_MAX_HDR_LEN) + /**< MTU of the IPsec tunnel. */ + +#define NSS_IPSECMGR_NATT_PORT_DATA 4500 /**< Number of the NATT port. */ + +#define NSS_IPSECMGR_MIN_REPLAY_WIN 32 /**< Minimum size of the replay window. */ +#define NSS_IPSECMGR_MAX_REPLAY_WIN 1024 /**< Maximum size of the replay window. */ +#define NSS_IPSECMGR_MAX_ICV_LEN 32 /**< Maximum size of the ICV. */ +#define NSS_IPSECMGR_MAX_DSCP 63 /**< Maximum size of the descriptor. */ + +/** + * nss_ipsecmgr_flow_type + * Flow types for the IPsec manager. + */ +enum nss_ipsecmgr_flow_type { + NSS_IPSECMGR_FLOW_TYPE_NONE = 0, + NSS_IPSECMGR_FLOW_TYPE_V4_TUPLE = 1, + NSS_IPSECMGR_FLOW_TYPE_V6_TUPLE = 2, + NSS_IPSECMGR_FLOW_TYPE_V4_SUBNET = 3, + NSS_IPSECMGR_FLOW_TYPE_V6_SUBNET = 4, + NSS_IPSECMGR_FLOW_TYPE_MAX +}; + +/** + * nss_ipsecmgr_sa_type + * Security association types for the IPsec manager. + */ +enum nss_ipsecmgr_sa_type { + NSS_IPSECMGR_SA_TYPE_NONE = 0, + NSS_IPSECMGR_SA_TYPE_V4 = 1, + NSS_IPSECMGR_SA_TYPE_V6 = 2, + NSS_IPSECMGR_SA_TYPE_MAX +}; + +/** + * nss_ipsecmgr_event_type + * Event types for the IPsec manager. + */ +enum nss_ipsecmgr_event_type { + NSS_IPSECMGR_EVENT_NONE = 0, + NSS_IPSECMGR_EVENT_SA_STATS, + NSS_IPSECMGR_EVENT_MAX +}; + +/** + * nss_ipsecmgr_sa_v4 + * IPv4 security associations for the IPsec manager. + */ +struct nss_ipsecmgr_sa_v4 { + uint32_t src_ip; /**< IPv4 source IP. */ + uint32_t dst_ip; /**< IPv4 destination IP. */ + uint32_t ttl; /**< IPv4 time-to-live. */ + uint32_t spi_index; /**< ESP SPI index. */ +}; + +/** + * nss_ipsecmgr_sa_v6 + * IPv6 security associations for the IPsec manager. + */ +struct nss_ipsecmgr_sa_v6 { + uint32_t src_ip[4]; /**< IPv6 source IP. */ + uint32_t dst_ip[4]; /**< IPv6 destination IP. */ + uint32_t hop_limit; /**< IPv6 hop limit. */ + uint32_t spi_index; /**< SPI index of the encapsulating security payload (ESP). */ +}; + +/** + * nss_ipsecmgr_sa_data + * Security association data for the IPsec manager. + * + * For DSCP marking, use the following settings: + * - Copy inner header to outer header: + * - dscp_copy = 1 + * - dscp = 0 + * - Fixed mark on outer header: + * - dscp_copy = 0 + * - dscp = <0 to 63> + */ +struct nss_ipsecmgr_sa_data { + uint32_t crypto_index; /**< Crypto session index returned by the driver. */ + + /** + * Security association data for the IPsec manager. + */ + struct { + uint16_t replay_win; + /**< Sequence number window size for anti-replay. */ + uint8_t icv_len; + /**< Hash length. */ + uint8_t dscp; + /**< Default DSCP value of the security association. */ + + bool dscp_copy; + /**< Copy DSCP from the inner header to the outer header. */ + bool nat_t_req; + /**< NAT-T is required. */ + bool seq_skip; + /**< Skip the ESP sequence for encapsulation. */ + bool trailer_skip; + /**< Skip the ESP trailer for encapsulation. */ + bool df_copy; + /**< Copy DF from the inner header to the outer header. */ + uint8_t df; + /**< DF value for the outer header, if nocopy is selected. */ + } esp; /**< Payload of security association data. */ + + bool enable_esn; /**< Enable the extended sequence number. */ + bool use_pattern; /**< Use a random pattern in a hash calculation. */ + uint32_t fail_hash_thresh; /**< Threshold for consecutive hash failure. */ +}; + +/** + * nss_ipsecmgr_encap_v4_tuple + * IPv4 encapsulation flow tuple for the IPsec manager. + */ +struct nss_ipsecmgr_encap_v4_tuple { + uint32_t src_ip; /**< Source IP. */ + uint32_t dst_ip; /**< Destination IP. */ + uint32_t protocol; /**< Protocol. */ +}; + +/** + * nss_ipsecmgr_encap_v6_tuple + * IPv6 encapsulation flow tuple for the IPsec manager. + */ +struct nss_ipsecmgr_encap_v6_tuple { + uint32_t src_ip[4]; /**< Source IP. */ + uint32_t dst_ip[4]; /**< Destination IP. */ + uint32_t next_hdr; /**< Transport layer protocol. */ +}; + +/** + * nss_ipsecmgr_encap_v4_subnet + * IPv4 encapsulation flow subnet for the IPsec manager. + */ +struct nss_ipsecmgr_encap_v4_subnet { + uint32_t dst_subnet; /**< Destination subnet. */ + uint32_t dst_mask; /**< Destination subnet mask. */ + uint32_t protocol; /**< IPv4 or IPv6 protocol. */ +}; + +/** + * nss_ipsecmgr_encap_v6_subnet + * IPv6 encapsulation flow subnet for the IPsec manager. + * + * Store least significant word in dst_subnet[0] and the most significant word + * in dst_subnet[3]. + */ +struct nss_ipsecmgr_encap_v6_subnet { + uint32_t dst_subnet[4]; /**< Destination subnet. */ + uint32_t dst_mask[4]; /**< Destination subnet mask. */ + uint32_t next_hdr; /**< Transport layer protocol. */ +}; + +/** + * nss_ipsecmgr_sa + * Security association information for the IPsec manager. + */ +struct nss_ipsecmgr_sa { + enum nss_ipsecmgr_sa_type type; /**< Security association type. */ + + /** + * IPsec manager security association data. + */ + union { + struct nss_ipsecmgr_sa_v4 v4; /**< IPv4 security association. */ + struct nss_ipsecmgr_sa_v6 v6; /**< IPv6 security association. */ + } data; /**< IPsec manager security association data. */ +}; + +/** + * nss_ipsecmgr_sa_stats + * Security association statistics exported by the IPsec manager. + */ +struct nss_ipsecmgr_sa_stats { + struct nss_ipsecmgr_sa sa; /**< Security association information. */ + uint32_t crypto_index; /**< Crypto session index. */ + + /** + * Security association statistics used by the IPsec manager. + */ + struct { + uint32_t bytes; /**< Number of bytes processed. */ + uint32_t count; /**< Number of packets processed. */ + } pkts; /**< Processing statistics. */ + + uint64_t seq_num; /**< Current sequence number. */ + uint64_t window_max; /**< Maximum size of the window. */ + uint32_t window_size; /**< Current size of the window. */ + + bool fail_hash_alarm; + /**< Alarm for consecutive hash fail. */ + bool esn_enabled; + /**< Specifies whether ESN is enabled. */ +}; + +/** + * nss_ipsecmgr_event + * Event information for the IPsec manager. + */ +struct nss_ipsecmgr_event { + enum nss_ipsecmgr_event_type type; /**< Event type. */ + + /** + * Event information statistics for the IPsec manager. + */ + union { + struct nss_ipsecmgr_sa_stats stats; + /**< Security association statistics. */ + } data; /**< Event information. */ +}; + +/** + * nss_ipsecmgr_encap_flow + * Encapsulation flow information for the IPsec manager. + */ +struct nss_ipsecmgr_encap_flow { + enum nss_ipsecmgr_flow_type type; /**< Flow type. */ + + /** + * Payload of encapsulation flow data for the IPsec manager. + */ + union { + struct nss_ipsecmgr_encap_v4_tuple v4_tuple; + /**< IPv4 tuple. */ + struct nss_ipsecmgr_encap_v4_subnet v4_subnet; + /**< IPv4 subnet. */ + struct nss_ipsecmgr_encap_v6_tuple v6_tuple; + /**< IPv6 tuple. */ + struct nss_ipsecmgr_encap_v6_subnet v6_subnet; + /**< IPv6 subnet. */ + } data; /**< Encapsulation flow information. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ + +/** + * Callback function for receiving IPsec data. + * + * @datatypes + * sk_buff + * + * @param[in] ctx Pointer to the context of the data. + * @param[in] skb Pointer to the data socket buffer. + */ +typedef void (*nss_ipsecmgr_data_cb_t) (void *ctx, struct sk_buff *skb); + +/** + * Callback function for receiving IPsec events. + * + * @datatypes + * nss_ipsecmgr_event + * + * @param[in] ctx Pointer to the context of the event. + * @param[in] ev Pointer to the event. + */ +typedef void (*nss_ipsecmgr_event_cb_t) (void *ctx, struct nss_ipsecmgr_event *ev); + +/** + * nss_ipsecmgr_callback + * Callback information. + */ +struct nss_ipsecmgr_callback { + void *ctx; /**< Context of the caller. */ + nss_ipsecmgr_data_cb_t data_fn; /**< Data callback function. */ + nss_ipsecmgr_event_cb_t event_fn; /**< Event callback function. */ +}; + +/** + * nss_ipsecmgr_tunnel_add + * Adds a new IPsec tunnel. + * + * @datatypes + * nss_ipsecmgr_callback + * + * @param[in] cb Pointer to the message callback. + * + * @return + * Linux NETDEVICE or NULL. + */ +struct net_device *nss_ipsecmgr_tunnel_add(struct nss_ipsecmgr_callback *cb); + +/** + * nss_ipsecmgr_tunnel_del + * Deletes an existing IPsec tunnel. + * + * @datatypes + * net_device + * + * @param[in] tun Pointer to the network device associated with the tunnel. + * + * @return + * Success or failure. + */ +bool nss_ipsecmgr_tunnel_del(struct net_device *tun); + +/** + * nss_ipsecmgr_tunnel_update_callback + * Updates the binding of netdevice and callback. + * + * @datatypes + * net_device + * + * @param[in] tun Pointer to IPsec tunnel. + * @param[in] cur Pointer to Linux netdevice. + * + * @return + * None. + */ +void nss_ipsecmgr_tunnel_update_callback(struct net_device *tun, struct net_device *cur); + +/** + * nss_ipsecmgr_encap_add + * Adds an encapsulation flow rule to the IPsec offload database. + * + * @datatypes + * net_device \n + * nss_ipsecmgr_encap_flow \n + * nss_ipsecmgr_sa \n + * nss_ipsecmgr_sa_data + * + * @param[in] tun Pointer to the network device associated with the tunnel. + * @param[in] flow Pointer to the flow or subnet to add. + * @param[in] sa Pointer to the security association for the flow. + * @param[in] data Pointer to additional security association data. + * + * @return + * Success or failure. + */ +bool nss_ipsecmgr_encap_add(struct net_device *tun, struct nss_ipsecmgr_encap_flow *flow, struct nss_ipsecmgr_sa *sa, + struct nss_ipsecmgr_sa_data *data); + +/** + * nss_ipsecmgr_encap_del + * Deletes an encapsulation flow rule from the IPsec offload database. + * + * @datatypes + * net_device \n + * nss_ipsecmgr_encap_flow \n + * nss_ipsecmgr_sa + * + * @param[in] tun Pointer to the network device associated with the tunnel. + * @param[in] flow Pointer to the flow or subnet to delete. + * @param[in] sa Pointer to the security association for the flow. + * + * @return + * Success or failure. + */ +bool nss_ipsecmgr_encap_del(struct net_device *tun, struct nss_ipsecmgr_encap_flow *flow, struct nss_ipsecmgr_sa *sa); + +/** + * nss_ipsecmgr_decap_add + * Adds a decapsulation security association to the offload database. + * + * @datatypes + * net_device \n + * nss_ipsecmgr_sa \n + * nss_ipsenss_ipsecmgr_sa_datacmgr_sa + * + * @param[in] tun Pointer to the network device associated with the tunnel. + * @param[in] sa Pointer to the security association for the decapsulation. + * @param[in] data Pointer to additional security association data. + * + * @return + * Success or failure. + */ +bool nss_ipsecmgr_decap_add(struct net_device *tun, struct nss_ipsecmgr_sa *sa, struct nss_ipsecmgr_sa_data *data); + +/** + * nss_ipsecmgr_sa_flush + * Flushes the security association and all associated flows and subnets. + * + * @datatypes + * net_device \n + * nss_ipsecmgr_sa + * + * @param[in] tun Pointer to the network device associated with the tunnel. + * @param[in] sa Pointer to the security association to flush. + * + * @return + * Success or failure. + */ +bool nss_ipsecmgr_sa_flush(struct net_device *tun, struct nss_ipsecmgr_sa *sa); + +#endif /* __KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_IPSECMGR_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv4.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv4.h new file mode 100644 index 000000000..ee3a552eb --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv4.h @@ -0,0 +1,1310 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipv4.h + * NSS IPv4 interface definitions. + */ + +#ifndef __NSS_IPV4_H +#define __NSS_IPV4_H + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +#include "nss_stats_public.h" +#endif + +/** + * @addtogroup nss_ipv4_subsystem + * @{ + */ + +/* + * IPv4 connection flags (to be used with nss_ipv4_create::flags). + */ +#define NSS_IPV4_CREATE_FLAG_NO_SEQ_CHECK 0x01 + /**< Rule for not checking sequence numbers. */ +#define NSS_IPV4_CREATE_FLAG_BRIDGE_FLOW 0x02 + /**< Rule that indicates pure bridge flow (no routing is involved). */ +#define NSS_IPV4_CREATE_FLAG_ROUTED 0x04 /**< Rule for a routed connection. */ + +#define NSS_IPV4_CREATE_FLAG_DSCP_MARKING 0x08 /**< Rule for DSCP marking. */ +#define NSS_IPV4_CREATE_FLAG_VLAN_MARKING 0x10 /**< Rule for VLAN marking. */ +#define NSS_IPV4_CREATE_FLAG_QOS_VALID 0x20 /**< Rule for QoS is valid. */ + +/** + * nss_ipv4_create + * Information for an IPv4 flow or connection create rule. + * + * All fields must be passed in host-endian order. + */ +struct nss_ipv4_create { + int32_t src_interface_num; + /**< Source interface number (virtual or physical). */ + int32_t dest_interface_num; + /**< Destination interface number (virtual or physical). */ + int32_t protocol; /**< L4 protocol (e.g., TCP or UDP). */ + uint32_t flags; /**< Flags (if any) associated with this rule. */ + uint32_t from_mtu; /**< MTU of the incoming interface. */ + uint32_t to_mtu; /**< MTU of the outgoing interface. */ + uint32_t src_ip; /**< Source IP address. */ + int32_t src_port; /**< Source L4 port (e.g., TCP or UDP port). */ + uint32_t src_ip_xlate; /**< Translated source IP address (used with SNAT). */ + int32_t src_port_xlate; /**< Translated source L4 port (used with SNAT). */ + uint32_t dest_ip; /**< Destination IP address. */ + int32_t dest_port; /**< Destination L4 port (e.g., TCP or UDP port). */ + uint32_t dest_ip_xlate; + /**< Translated destination IP address (used with DNAT). */ + int32_t dest_port_xlate; + /**< Translated destination L4 port (used with DNAT). */ + uint8_t src_mac[ETH_ALEN]; + /**< Source MAC address. */ + uint8_t dest_mac[ETH_ALEN]; + /**< Destination MAC address. */ + uint8_t src_mac_xlate[ETH_ALEN]; + /**< Translated source MAC address (post-routing). */ + uint8_t dest_mac_xlate[ETH_ALEN]; + /**< Translated destination MAC address (post-routing). */ + uint8_t flow_window_scale; /**< Window scaling factor (TCP). */ + uint32_t flow_max_window; /**< Maximum window size (TCP). */ + uint32_t flow_end; /**< TCP window end. */ + uint32_t flow_max_end; /**< TCP window maximum end. */ + uint32_t flow_pppoe_if_exist; + /**< Flow direction: PPPoE interface exist flag. */ + int32_t flow_pppoe_if_num; + /**< Flow direction: PPPoE interface number. */ + uint16_t ingress_vlan_tag; /**< Ingress VLAN tag expected for this flow. */ + uint8_t return_window_scale; + /**< Window scaling factor of the return direction (TCP). */ + uint32_t return_max_window; + /**< Maximum window size of the return direction. */ + uint32_t return_end; + /**< Flow end for the return direction. */ + uint32_t return_max_end; + /**< Flow maximum end for the return direction. */ + uint32_t return_pppoe_if_exist; + /**< Return direction: PPPoE interface existence flag. */ + int32_t return_pppoe_if_num; + /**< Return direction: PPPoE interface number. */ + uint16_t egress_vlan_tag; /**< Egress VLAN tag expected for this flow. */ + uint8_t spo_needed; /**< Indicates whether SPO is required. */ + uint32_t param_a0; /**< Custom parameter 0. */ + uint32_t param_a1; /**< Custom parameter 1. */ + uint32_t param_a2; /**< Custom parameter 2. */ + uint32_t param_a3; /**< Custom parameter 3. */ + uint32_t param_a4; /**< Custom parameter 4. */ + uint32_t qos_tag; /**< Deprecated, will be removed soon. */ + uint32_t flow_qos_tag; /**< QoS tag value for the flow direction. */ + uint32_t return_qos_tag; /**< QoS tag value for the return direction. */ + uint8_t dscp_itag; /**< DSCP marking tag. */ + uint8_t dscp_imask; /**< DSCP marking input mask. */ + uint8_t dscp_omask; /**< DSCP marking output mask. */ + uint8_t dscp_oval; /**< DSCP marking output value. */ + uint16_t vlan_itag; /**< VLAN marking tag. */ + uint16_t vlan_imask; /**< VLAN marking input mask. */ + uint16_t vlan_omask; /**< VLAN marking output mask. */ + uint16_t vlan_oval; /**< VLAN marking output value. */ + uint32_t in_vlan_tag[MAX_VLAN_DEPTH]; + /**< Ingress VLAN tag expected for this flow. */ + uint32_t out_vlan_tag[MAX_VLAN_DEPTH]; + /**< Egress VLAN tag expected for this flow. */ + uint8_t flow_dscp; /**< IP DSCP value for the flow direction. */ + uint8_t return_dscp; /**< IP DSCP value for the return direction. */ +}; + +/** + * nss_ipv4_destroy + * Information for an IPv4 flow or connection destroy rule. + */ +struct nss_ipv4_destroy { + int32_t protocol; /**< L4 protocol ID. */ + uint32_t src_ip; /**< Source IP address. */ + int32_t src_port; /**< Source L4 port (e.g., TCP or UDP port). */ + uint32_t dest_ip; /**< Destination IP address. */ + int32_t dest_port; /**< Destination L4 port (e.g., TCP or UDP port). */ +}; + +/** + * nss_ipv4_message_types + * IPv4 bridge and routing rule message types. + * + * NSS_IPV4_RX_DEPRECATED0 is a deprecated type. It is kept for backward compatibility. + */ +enum nss_ipv4_message_types { + NSS_IPV4_TX_CREATE_RULE_MSG, + NSS_IPV4_TX_DESTROY_RULE_MSG, + NSS_IPV4_RX_DEPRECATED0, + NSS_IPV4_RX_CONN_STATS_SYNC_MSG, + NSS_IPV4_RX_NODE_STATS_SYNC_MSG, + NSS_IPV4_TX_CONN_CFG_RULE_MSG, + NSS_IPV4_TX_CREATE_MC_RULE_MSG, + NSS_IPV4_TX_CONN_STATS_SYNC_MANY_MSG, + NSS_IPV4_TX_ACCEL_MODE_CFG_MSG, + NSS_IPV4_TX_CONN_CFG_INQUIRY_MSG, + NSS_IPV4_TX_CONN_TABLE_SIZE_MSG, + NSS_IPV4_TX_DSCP2PRI_CFG_MSG, + NSS_IPV4_TX_RPS_HASH_BITMAP_CFG_MSG, + NSS_IPV4_MAX_MSG_TYPES, +}; + +/** + * nss_ipv4_dscp_map_actions + * Action types mapped to DSCP values. + */ +enum nss_ipv4_dscp_map_actions { + NSS_IPV4_DSCP_MAP_ACTION_ACCEL, + NSS_IPV4_DSCP_MAP_ACTION_DONT_ACCEL, + NSS_IPV4_DSCP_MAP_ACTION_MAX, +}; + +/** + * nss_ipv4_stats_types + * IPv4 node statistics. + */ +enum nss_ipv4_stats_types { + NSS_IPV4_STATS_ACCELERATED_RX_PKTS = 0, + /**< Accelerated IPv4 Rx packets. */ + NSS_IPV4_STATS_ACCELERATED_RX_BYTES, + /**< Accelerated IPv4 Rx bytes. */ + NSS_IPV4_STATS_ACCELERATED_TX_PKTS, + /**< Accelerated IPv4 Tx packets. */ + NSS_IPV4_STATS_ACCELERATED_TX_BYTES, + /**< Accelerated IPv4 Tx bytes. */ + NSS_IPV4_STATS_CONNECTION_CREATE_REQUESTS, + /**< Number of IPv4 connection create requests. */ + NSS_IPV4_STATS_CONNECTION_CREATE_COLLISIONS, + /**< Number of IPv4 connection create requests that collided with existing entries. */ + NSS_IPV4_STATS_CONNECTION_CREATE_INVALID_INTERFACE, + /**< Number of IPv4 connection create requests that had invalid interface. */ + NSS_IPV4_STATS_CONNECTION_DESTROY_REQUESTS, + /**< Number of IPv4 connection destroy requests. */ + NSS_IPV4_STATS_CONNECTION_DESTROY_MISSES, + /**< Number of IPv4 connection destroy requests that missed the cache. */ + NSS_IPV4_STATS_CONNECTION_HASH_HITS, + /**< Number of IPv4 connection hash hits. */ + NSS_IPV4_STATS_CONNECTION_HASH_REORDERS, + /**< Number of IPv4 connection hash reorders. */ + NSS_IPV4_STATS_CONNECTION_FLUSHES, + /**< Number of IPv4 connection flushes. */ + NSS_IPV4_STATS_CONNECTION_EVICTIONS, + /**< Number of IPv4 connection evictions. */ + NSS_IPV4_STATS_FRAGMENTATIONS, + /**< Number of successful IPv4 fragmentations performed. */ + NSS_IPV4_STATS_DROPPED_BY_RULE, + /**< Number of IPv4 packets dropped because of a drop rule.*/ + NSS_IPV4_STATS_MC_CONNECTION_CREATE_REQUESTS, + /**< Number of successful IPv4 multicast create requests. */ + NSS_IPV4_STATS_MC_CONNECTION_UPDATE_REQUESTS, + /**< Number of successful IPv4 multicast update requests. */ + NSS_IPV4_STATS_MC_CONNECTION_CREATE_INVALID_INTERFACE, + /**< Number of IPv4 multicast connection create requests that had invalid interface. */ + NSS_IPV4_STATS_MC_CONNECTION_DESTROY_REQUESTS, + /**< Number of IPv4 multicast connection destroy requests. */ + NSS_IPV4_STATS_MC_CONNECTION_DESTROY_MISSES, + /**< Number of IPv4 multicast connection destroy requests that missed the cache. */ + NSS_IPV4_STATS_MC_CONNECTION_FLUSHES, + /**< Number of IPv4 multicast connection flushes. */ + NSS_IPV4_STATS_CONNECTION_CREATE_INVALID_MIRROR_IFNUM, + /**< Number of IPv4 mirror connection requests with an invalid interface number. */ + NSS_IPV4_STATS_CONNECTION_CREATE_INVALID_MIRROR_IFTYPE, + /**< Number of IPv4 mirror connection requests with an invalid interface type. */ + NSS_IPV4_STATS_MIRROR_FAILURES, + /**< Number of IPv4 mirror failures. */ + NSS_IPV4_STATS_MAX, + /**< Maximum message type. */ +}; + +/* + * NSS IPv4 rule creation & rule update flags. + */ +#define NSS_IPV4_RULE_CREATE_FLAG_NO_SEQ_CHECK 0x01 + /**< Do not perform TCP sequence number checks. */ +#define NSS_IPV4_RULE_CREATE_FLAG_BRIDGE_FLOW 0x02 + /**< Pure bridge forwarding flow. */ +#define NSS_IPV4_RULE_CREATE_FLAG_ROUTED 0x04 + /**< Rule for a routed connection. */ +#define NSS_IPV4_RULE_CREATE_FLAG_DSCP_MARKING 0x08 + /**< Rule for configuring DSCP marking. */ +#define NSS_IPV4_RULE_CREATE_FLAG_VLAN_MARKING 0x10 + /**< Rule for configuring VLAN marking. */ +#define NSS_IPV4_RULE_UPDATE_FLAG_CHANGE_MTU 0x20 + /**< Update MTU of the connection interfaces. */ +#define NSS_IPV4_RULE_CREATE_FLAG_ICMP_NO_CME_FLUSH 0x40 + /**< Rule for not flushing connection match entry on an ICMP packet. */ + +/** + * L2 payload is not IPv4, but it consists of an encapsulating protocol that + * carries an IPv4 payload within it. + */ +#define NSS_IPV4_RULE_CREATE_FLAG_L2_ENCAP 0x80 +#define NSS_IPV4_RULE_CREATE_FLAG_DROP 0x100 + /**< Rule to drop packets. */ +#define NSS_IPV4_RULE_CREATE_FLAG_EXCEPTION 0x200 + /**< Rule to except packets. */ +#define NSS_IPV4_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK 0x400 + /**< Check the source interface for the rule. */ +#define NSS_IPV4_RULE_CREATE_FLAG_NO_SRC_IDENT 0x800 + /**< Zero out the source identifier for the rule. */ +#define NSS_IPV4_RULE_CREATE_FLAG_NO_MAC 0x1000 + /**< Flag to bypass writing MAC addresses. */ +#define NSS_IPV4_RULE_CREATE_FLAG_EMESH_SP 0x2000 + /**< Mark rule as E-MESH Service Prioritization valid. */ + +/* + * Validity flags for rule creation. + */ +#define NSS_IPV4_RULE_CREATE_CONN_VALID 0x01 /**< Connection is valid. */ +#define NSS_IPV4_RULE_CREATE_TCP_VALID 0x02 /**< TCP protocol fields are valid. */ +#define NSS_IPV4_RULE_CREATE_PPPOE_VALID 0x04 /**< PPPoE fields are valid. */ +#define NSS_IPV4_RULE_CREATE_QOS_VALID 0x08 /**< QoS fields are valid. */ +#define NSS_IPV4_RULE_CREATE_VLAN_VALID 0x10 /**< VLAN fields are valid. */ +#define NSS_IPV4_RULE_CREATE_DSCP_MARKING_VALID 0x20 + /**< DSCP marking fields are valid. */ +#define NSS_IPV4_RULE_CREATE_VLAN_MARKING_VALID 0x40 + /**< VLAN marking fields are valid. */ +#define NSS_IPV4_RULE_CREATE_SRC_MAC_VALID 0x80 + /**< Source MAC address fields are valid. */ +#define NSS_IPV4_RULE_CREATE_NEXTHOP_VALID 0x100 + /**< Next hop interface number fields are valid. */ +#define NSS_IPV4_RULE_CREATE_RPS_VALID 0x200 + /**< RPS for core selection is valid. */ +#define NSS_IPV4_RULE_CREATE_DEST_MAC_VALID 0x400 + /**< Destination MAC address fields are valid. */ +#define NSS_IPV4_RULE_CREATE_IGS_VALID 0x800 + /**< Ingress shaping fields are valid. */ +#define NSS_IPV4_RULE_CREATE_IDENTIFIER_VALID 0x1000 + /**< Identifier is valid. */ +#define NSS_IPV4_RULE_CREATE_MIRROR_VALID 0x2000 /**< Mirror fields are valid. */ + +/* + * Multicast command rule flags + */ +#define NSS_IPV4_MC_RULE_CREATE_FLAG_MC_UPDATE 0x01 /**< Multicast rule update. */ +#define NSS_IPV4_MC_RULE_CREATE_FLAG_MC_EMESH_SP 0x02 + /**< Mark multicast rule as E-MESH Service Prioritization valid. */ + +/* + * Multicast command validity flags + */ +#define NSS_IPV4_MC_RULE_CREATE_FLAG_QOS_VALID 0x01 + /**< QoS fields are valid. */ +#define NSS_IPV4_MC_RULE_CREATE_FLAG_DSCP_MARKING_VALID 0x02 + /**< DSCP fields are valid. */ +#define NSS_IPV4_MC_RULE_CREATE_FLAG_INGRESS_VLAN_VALID 0x04 + /**< Ingress VLAN fields are valid. */ +#define NSS_IPV4_MC_RULE_CREATE_FLAG_INGRESS_PPPOE 0x08 + /**< Ingress PPPoE fields are valid. */ +#define NSS_IPV4_MC_RULE_CREATE_FLAG_IGS_VALID 0x10 + /**< Ingress shaping fields are valid. */ + +/* + * Per-interface rule flags for a multicast connection (to be used with the rule_flags + * field of nss_ipv4_mc_if_rule structure). + */ +#define NSS_IPV4_MC_RULE_CREATE_IF_FLAG_BRIDGE_FLOW 0x01 + /**< Multicast connection rule is created for a bridge flow. */ +#define NSS_IPV4_MC_RULE_CREATE_IF_FLAG_ROUTED_FLOW 0x02 + /**< Multicast connection rule is created for a routed flow. */ +#define NSS_IPV4_MC_RULE_CREATE_IF_FLAG_JOIN 0x04 + /**< Interface has joined the flow. */ +#define NSS_IPV4_MC_RULE_CREATE_IF_FLAG_LEAVE 0x08 + /**< Interface has left the flow. */ + +/* + * Per-interface valid flags for a multicast connection (to be used with the valid_flags + * field of nss_ipv4_mc_if_rule structure). + */ +#define NSS_IPV4_MC_RULE_CREATE_IF_FLAG_VLAN_VALID 0x01 + /**< VLAN fields are valid. */ +#define NSS_IPV4_MC_RULE_CREATE_IF_FLAG_PPPOE_VALID 0x02 + /**< PPPoE fields are valid. */ +#define NSS_IPV4_MC_RULE_CREATE_IF_FLAG_NAT_VALID 0x4 + /**< Interface is configured with the source NAT. */ + +/* + * Source MAC address valid flags (to be used with mac_valid_flags field of nss_ipv4_src_mac_rule structure) + */ +#define NSS_IPV4_SRC_MAC_FLOW_VALID 0x01 + /**< MAC address for the flow interface is valid. */ +#define NSS_IPV4_SRC_MAC_RETURN_VALID 0x02 + /**< MAC address for the return interface is valid. */ + +/* + * Identifier valid flags (to be used with identifier_valid_flags field of nss_ipv4_identifier_rule structure) + */ +#define NSS_IPV4_FLOW_IDENTIFIER_VALID 0x01 + /**< Identifier for flow direction is valid. */ +#define NSS_IPV4_RETURN_IDENTIFIER_VALID 0x02 + /**< Identifier for return direction is valid. */ + +/* + * Mirror valid flags (to be used with the valid field of nss_ipv4_mirror_rule structure) + */ +#define NSS_IPV4_MIRROR_FLOW_VALID 0x01 + /**< Mirror interface number for the flow direction is valid. */ +#define NSS_IPV4_MIRROR_RETURN_VALID 0x02 + /**< Mirror interface number for the return direction is valid. */ + + +/** + * nss_ipv4_5tuple + * Common 5-tuple information. + */ +struct nss_ipv4_5tuple { + uint32_t flow_ip; /**< Flow IP address. */ + uint32_t flow_ident; /**< Flow identifier (e.g., TCP or UDP port). */ + uint32_t return_ip; /**< Return IP address. */ + uint32_t return_ident; /**< Return identier (e.g., TCP or UDP port). */ + uint8_t protocol; /**< Protocol number. */ + uint8_t reserved[3]; /**< Padded for alignment. */ +}; + +/** + * nss_ipv4_connection_rule + * Information for creating a connection. + */ +struct nss_ipv4_connection_rule { + uint16_t flow_mac[3]; /**< Flow MAC address. */ + uint16_t return_mac[3]; /**< Return MAC address. */ + int32_t flow_interface_num; /**< Flow interface number. */ + int32_t return_interface_num; /**< Return interface number. */ + uint32_t flow_mtu; /**< MTU for the flow interface. */ + uint32_t return_mtu; /**< MTU for the return interface. */ + uint32_t flow_ip_xlate; /**< Translated flow IP address. */ + uint32_t return_ip_xlate; /**< Translated return IP address. */ + uint32_t flow_ident_xlate; /**< Translated flow identifier (e.g., port). */ + uint32_t return_ident_xlate; /**< Translated return identifier (e.g., port). */ +}; + +/** + * nss_ipv4_pppoe_rule + * Information for PPPoE connection rules. + */ +struct nss_ipv4_pppoe_rule { + uint32_t flow_if_exist; + /**< PPPoE interface existence flag for the flow direction. */ + int32_t flow_if_num; + /**< PPPoE interface number for the flow direction. */ + uint32_t return_if_exist; + /**< PPPoE interface existence flag for the return direction. */ + int32_t return_if_num; + /**< PPPoE interface number for the return direction. */ +}; + +/** + * nss_ipv4_dscp_rule + * Information for DSCP connection rules. + */ +struct nss_ipv4_dscp_rule { + uint8_t flow_dscp; /**< Egress DSCP value for the flow direction. */ + uint8_t return_dscp; /**< Egress DSCP value for the return direction. */ + uint8_t reserved[2]; /**< Padded for alignment. */ +}; + +/** + * nss_ipv4_vlan_rule + * Information for VLAN connection rules. + */ +struct nss_ipv4_vlan_rule { + uint32_t ingress_vlan_tag; /**< VLAN tag for the ingress packets. */ + uint32_t egress_vlan_tag; /**< VLAN tag for egress packets. */ +}; + +/** + * nss_ipv4_nexthop + * Information for next hop interface numbers. + * + * A next hop is the next interface that will receive the packet (as opposed to + * the final interface when the packet leaves the device. + */ +struct nss_ipv4_nexthop { + /** + * Next hop interface number of the flow direction (from which the connection + * originated). + */ + int32_t flow_nexthop; + /** + * Next hop interface number of the return direction (to which the connection + * is destined). + */ + int32_t return_nexthop; +}; + +/** + * nss_ipv4_protocol_tcp_rule + * Information for TCP connection rules. + */ +struct nss_ipv4_protocol_tcp_rule { + uint32_t flow_max_window; + /**< Largest seen window for the flow direction. */ + uint32_t return_max_window; + /**< Largest seen window for the return direction. */ + + /** + * Largest seen sequence + segment length for the flow direction. + */ + uint32_t flow_end; + + /** + * Largest seen sequence + segment length for the return direction. + */ + uint32_t return_end; + + uint32_t flow_max_end; + /**< Largest seen ack + max(1, win) for the flow direction. */ + uint32_t return_max_end; + /**< Largest seen ack + max(1, win) for the return direction. */ + uint8_t flow_window_scale; + /**< Window scaling factor for the flow direction. */ + uint8_t return_window_scale; + /**< Window scaling factor for the return direction. */ + uint16_t reserved; /**< Alignment padding. */ +}; + +/** + * nss_ipv4_igs_rule + * Information for ingress shaping connection rules. + */ +struct nss_ipv4_igs_rule { + uint16_t igs_flow_qos_tag; + /**< Ingress shaping QoS tag associated with this rule for the flow direction. */ + uint16_t igs_return_qos_tag; + /**< Ingress shaping QoS tag associated with this rule for the return direction. */ +}; + +/** + * nss_ipv4_qos_rule + * Information for QoS connection rules. + */ +struct nss_ipv4_qos_rule { + uint32_t flow_qos_tag; + /**< QoS tag associated with this rule for the flow direction. */ + uint32_t return_qos_tag; + /**< QoS tag associated with this rule for the return direction. */ +}; + +/** + * nss_ipv4_src_mac_rule + * Information for source MAC address rules. + */ +struct nss_ipv4_src_mac_rule { + uint32_t mac_valid_flags; /**< MAC address validity flags. */ + uint16_t flow_src_mac[3]; /**< Source MAC address for the flow direction. */ + uint16_t return_src_mac[3]; /**< Source MAC address for the return direction. */ +}; + +/** + * nss_ipv4_rps_rule + * RPS rule structure. + */ +struct nss_ipv4_rps_rule { + uint8_t flow_rps; + /**< RPS for core selection for flow direction. */ + uint8_t return_rps; + /**< RPS for core selection for return direction. */ + uint8_t reserved[2]; + /**< Padded for alignment. */ +}; + +/** + * nss_ipv4_identifier_rule + * Identifier rule structure. + */ +struct nss_ipv4_identifier_rule { + uint32_t identifier_valid_flags; + /**< Identifier validity flags. */ + uint32_t flow_identifier; + /**< Identifier for flow direction. */ + uint32_t return_identifier; + /**< Identifier for return direction. */ +}; + +/** + * nss_ipv4_mirror_rule + * Mirror rule structure. + */ +struct nss_ipv4_mirror_rule { + uint32_t valid; /**< Mirror validity flags. */ + nss_if_num_t flow_ifnum; /**< Flow mirror interface number. */ + nss_if_num_t return_ifnum; /**< Return mirror interface number. */ +}; + +/** + * nss_ipv4_error_response_types + * Error types for IPv4 messages. + */ +enum nss_ipv4_error_response_types { + NSS_IPV4_UNKNOWN_MSG_TYPE = 1, + NSS_IPV4_CR_INVALID_PNODE_ERROR, + NSS_IPV4_CR_MISSING_CONNECTION_RULE_ERROR, + NSS_IPV4_CR_BUFFER_ALLOC_FAIL_ERROR, + NSS_IPV4_DR_NO_CONNECTION_ENTRY_ERROR, + NSS_IPV4_CR_CONN_CFG_ALREADY_CONFIGURED_ERROR, + NSS_IPV4_CR_CONN_CFG_NOT_MULTIPLE_OF_QUANTA_ERROR, + NSS_IPV4_CR_CONN_CFG_EXCEEDS_LIMIT_ERROR, + NSS_IPV4_CR_CONN_CFG_MEM_ALLOC_FAIL_ERROR, + NSS_IPV4_CR_MULTICAST_INVALID_PROTOCOL, + NSS_IPV4_CR_MULTICAST_UPDATE_INVALID_FLAGS, + NSS_IPV4_CR_MULTICAST_UPDATE_INVALID_IF, + NSS_IPV4_CR_ACCEL_MODE_CONFIG_INVALID, + NSS_IPV4_CR_INVALID_MSG_ERROR, + NSS_IPV4_CR_DSCP2PRI_PRI_INVALID, + NSS_IPV4_CR_DSCP2PRI_CONFIG_INVALID, + NSS_IPV4_CR_INVALID_RPS, + NSS_IPV4_CR_HASH_BITMAP_INVALID, + NSS_IPV4_DR_HW_DECEL_FAIL_ERROR, + NSS_IPV4_CR_RETURN_EXIST_ERROR, + NSS_IPV4_CR_INVALID_IDENTIFIER, + NSS_IPV4_CR_EMESH_SP_CONFIG_INVALID, + NSS_IPV4_LAST +}; + +/** + * nss_ipv4_rule_create_msg + * IPv4 rule for creating sub-messages. + */ +struct nss_ipv4_rule_create_msg { + /* + * Request + */ + uint16_t valid_flags; + /**< Bit flags associated with the validity of parameters. */ + uint16_t rule_flags; + /**< Bit flags associated with the rule. */ + struct nss_ipv4_5tuple tuple; + /**< Holds values of the 5 tuple. */ + struct nss_ipv4_connection_rule conn_rule; + /**< Basic connection-specific data. */ + struct nss_ipv4_protocol_tcp_rule tcp_rule; + /**< TCP-related accleration parameters. */ + struct nss_ipv4_pppoe_rule pppoe_rule; + /**< PPPoE-related accleration parameters. */ + struct nss_ipv4_qos_rule qos_rule; + /**< QoS-related accleration parameters. */ + struct nss_ipv4_dscp_rule dscp_rule; + /**< DSCP-related accleration parameters. */ + struct nss_ipv4_vlan_rule vlan_primary_rule; + /**< Primary VLAN-related accleration parameters. */ + struct nss_ipv4_vlan_rule vlan_secondary_rule; + /**< Secondary VLAN-related accleration parameters. */ + struct nss_ipv4_src_mac_rule src_mac_rule; + /**< Source MAC address-related acceleration parameters. */ + struct nss_ipv4_nexthop nexthop_rule; + /**< Parameters related to the next hop. */ + struct nss_ipv4_rps_rule rps_rule; + /**< RPS parameter. */ + struct nss_ipv4_igs_rule igs_rule; + /**< Ingress shaping related accleration parameters. */ + struct nss_ipv4_identifier_rule identifier; + /**< Rule for adding identifier. */ + struct nss_ipv4_mirror_rule mirror_rule; + /**< Mirror rule parameter. */ +}; + +/** + * nss_ipv4_inquiry_msg + * IPv4 connection inquiry naming structure. + */ +struct nss_ipv4_inquiry_msg { + /** + * Request by its 5-tuple and get response for other items. + */ + struct nss_ipv4_rule_create_msg rr; +}; + +/** + * nss_ipv4_mc_if_rule + * IPv4 multicast rule for creating per-interface information. + */ +struct nss_ipv4_mc_if_rule { + uint16_t rule_flags; /**< Bit flags associated with the rule. */ + uint16_t valid_flags; + /**< Bit flags associated with the validity of parameters. */ + uint32_t xlate_src_ip; /**< Translated flow IP address. */ + uint32_t xlate_src_ident; /**< Translated flow identifier (e.g., port). */ + uint32_t egress_vlan_tag[MAX_VLAN_DEPTH]; + /**< VLAN tag stack for the egress packets. */ + int32_t pppoe_if_num; /**< PPPoE interface number. */ + uint32_t if_num; /**< Interface number. */ + uint32_t if_mtu; /**< Interface MTU. */ + uint16_t if_mac[3]; /**< Interface MAC address. */ + uint8_t reserved[2]; /**< Reserved 2 bytes for alignment. */ +}; + +/** + * nss_ipv4_mc_rule_create_msg + * IPv4 multicast rule for creating sub-messages. + */ +struct nss_ipv4_mc_rule_create_msg { + struct nss_ipv4_5tuple tuple; /**< Holds values of the 5 tuple. */ + + uint32_t rule_flags; /**< Multicast command rule flags. */ + uint32_t valid_flags; /**< Multicast command validity flags. */ + uint32_t src_interface_num; + /**< Source interface number (virtual or physical). */ + uint32_t ingress_vlan_tag[MAX_VLAN_DEPTH]; + /**< VLAN tag stack for the ingress packets. */ + uint32_t qos_tag; /**< QoS tag for the rule. */ + uint16_t dest_mac[3]; /**< Destination multicast MAC address. */ + uint16_t if_count; /**< Number of destination interfaces. */ + uint8_t egress_dscp; /**< Egress DSCP value for the flow. */ + uint8_t reserved[1]; /**< Reserved 1 byte for alignment. */ + uint16_t igs_qos_tag; /**< Ingress shaping QoS tag for the rule. */ + + struct nss_ipv4_mc_if_rule if_rule[NSS_MC_IF_MAX]; + /**< Per-interface information. */ +}; + +/** + * nss_ipv4_rule_destroy_msg + * IPv4 rule for destroying sub-messages. + */ +struct nss_ipv4_rule_destroy_msg { + struct nss_ipv4_5tuple tuple; /**< Holds values of the 5 tuple. */ +}; + +/** + * nss_ipv4_rule_conn_get_table_size_msg + * IPv4 rule for fetching connection tables size. + */ +struct nss_ipv4_rule_conn_get_table_size_msg { + uint32_t num_conn; /**< Number of supported IPv4 connections. */ + uint32_t ce_table_size; /**< Size of the connection entry table in NSS firmware. */ + uint32_t cme_table_size; /**< Size of the connection match entry table in NSS firmware. */ +}; + +/** + * nss_ipv4_rule_conn_cfg_msg + * IPv4 rule for connection configuration sub-messages. + */ +struct nss_ipv4_rule_conn_cfg_msg { + uint32_t num_conn; /**< Number of supported IPv4 connections. */ + uint32_t ce_mem; /**< Memory allocated by host for connection entries table. */ + uint32_t cme_mem; /**< Memory allocated by host for connection match entries table. */ +}; + +/* + * IPv4 rule synchronization reasons. + */ +#define NSS_IPV4_RULE_SYNC_REASON_STATS 0 + /**< Rule for synchronizing statistics. */ +#define NSS_IPV4_RULE_SYNC_REASON_FLUSH 1 + /**< Rule for flushing a cache entry. */ +#define NSS_IPV4_RULE_SYNC_REASON_EVICT 2 + /**< Rule for evicting a cache entry. */ +#define NSS_IPV4_RULE_SYNC_REASON_DESTROY 3 + /**< Rule for destroying a cache entry (requested by the host OS). */ + +/** + * nss_ipv4_conn_sync + * IPv4 connection synchronization message. + */ +struct nss_ipv4_conn_sync { + uint32_t reserved; /**< Reserved field for backward compatibility. */ + uint8_t protocol; /**< Protocol number. */ + uint32_t flow_ip; /**< Flow IP address. */ + uint32_t flow_ip_xlate; /**< Translated flow IP address. */ + uint32_t flow_ident; /**< Flow identifier (e.g., port). */ + uint32_t flow_ident_xlate; /**< Translated flow identifier (e.g., port). */ + uint32_t flow_max_window; /**< Largest seen window for the flow direction. */ + + /** + * Largest seen sequence + segment length for the flow direction. + */ + uint32_t flow_end; + + uint32_t flow_max_end; + /**< Largest seen ack + max(1, win) for the flow direction. */ + uint32_t flow_rx_packet_count; + /**< Rx packet count for the flow interface. */ + uint32_t flow_rx_byte_count; + /**< Rx byte count for the flow interface. */ + uint32_t flow_tx_packet_count; + /**< Tx packet count for the flow interface. */ + uint32_t flow_tx_byte_count; + /**< Tx byte count for the flow interface. */ + uint32_t return_ip; /**< Return IP address. */ + uint32_t return_ip_xlate; /**< Translated return IP address. */ + uint32_t return_ident; /**< Return identier (e.g., port). */ + uint32_t return_ident_xlate; /**< Translated return identifier (e.g., port). */ + uint32_t return_max_window; + /**< Largest seen window for the return direction. */ + + /** + * Largest seen sequence + segment length for the return direction. + */ + uint32_t return_end; + + uint32_t return_max_end; + /**< Largest seen ack + max(1, win) for the return direction. */ + uint32_t return_rx_packet_count; + /**< Rx packet count for the return interface. */ + uint32_t return_rx_byte_count; + /**< Rx byte count for the return interface. */ + uint32_t return_tx_packet_count; + /**< Tx packet count for the return interface. */ + uint32_t return_tx_byte_count; + /**< Tx byte count for the return interface. */ + uint32_t inc_ticks; /**< Number of ticks since the last synchronization. */ + uint32_t reason; /**< Reason for the synchronization. */ + + uint8_t flags; /**< Bit flags associated with the rule. */ + uint32_t qos_tag; /**< QoS tag. */ + uint32_t cause; /**< Flush cause associated with the rule. */ +}; + +/** + * nss_ipv4_conn_sync_many_msg + * Information for a multiple IPv4 connection statistics synchronization message. + */ +struct nss_ipv4_conn_sync_many_msg { + /* + * Request + */ + uint16_t index; /**< Request connection statistics from the index. */ + uint16_t size; /**< Buffer size of this message. */ + + /* + * Response + */ + uint16_t next; /**< Firmware response for the next connection to be requested. */ + uint16_t count; /**< Number of synchronized connections included in this message. */ + struct nss_ipv4_conn_sync conn_sync[]; /**< Array for the statistics. */ +}; + +/** + * nss_ipv4_accel_mode_cfg_msg + * IPv4 acceleration mode configuration. + */ +struct nss_ipv4_accel_mode_cfg_msg { + uint32_t mode; /**< Type of acceleration mode. */ +}; + +/** + * nss_ipv4_dscp2pri_cfg_msg + * IPv4 dscp2pri configuration msg. + */ +struct nss_ipv4_dscp2pri_cfg_msg { + uint8_t dscp; /**< Value of DSCP. */ + uint8_t priority; /**< Corresponding priority. */ +}; + +/** + * nss_ipv4_rps_hash_bitmap_cfg_msg + * RPS hash mask configuration. + * + * The bitmap represents the host cores to which NSS firmware can steer + * packets based on packet hash. The least significant bit represents core0. + */ +struct nss_ipv4_rps_hash_bitmap_cfg_msg { + uint32_t hash_bitmap; /**< Hash mask. */ +}; + +/** + * nss_ipv4_exception_events + * Exception events from the bridge or route handler. + */ +enum nss_ipv4_exception_events { + NSS_IPV4_EXCEPTION_EVENT_ICMP_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_ICMP_UNHANDLED_TYPE, + NSS_IPV4_EXCEPTION_EVENT_ICMP_IPV4_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_ICMP_IPV4_UDP_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_ICMP_IPV4_TCP_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_ICMP_IPV4_UNKNOWN_PROTOCOL, + NSS_IPV4_EXCEPTION_EVENT_ICMP_NO_ICME, + NSS_IPV4_EXCEPTION_EVENT_ICMP_FLUSH_TO_HOST, + NSS_IPV4_EXCEPTION_EVENT_TCP_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_TCP_NO_ICME, + NSS_IPV4_EXCEPTION_EVENT_TCP_IP_OPTION, + NSS_IPV4_EXCEPTION_EVENT_TCP_IP_FRAGMENT, + NSS_IPV4_EXCEPTION_EVENT_TCP_SMALL_TTL, + NSS_IPV4_EXCEPTION_EVENT_TCP_NEEDS_FRAGMENTATION, + NSS_IPV4_EXCEPTION_EVENT_TCP_FLAGS, + NSS_IPV4_EXCEPTION_EVENT_TCP_SEQ_EXCEEDS_RIGHT_EDGE, + NSS_IPV4_EXCEPTION_EVENT_TCP_SMALL_DATA_OFFS, + NSS_IPV4_EXCEPTION_EVENT_TCP_BAD_SACK, + NSS_IPV4_EXCEPTION_EVENT_TCP_BIG_DATA_OFFS, + NSS_IPV4_EXCEPTION_EVENT_TCP_SEQ_BEFORE_LEFT_EDGE, + NSS_IPV4_EXCEPTION_EVENT_TCP_ACK_EXCEEDS_RIGHT_EDGE, + NSS_IPV4_EXCEPTION_EVENT_TCP_ACK_BEFORE_LEFT_EDGE, + NSS_IPV4_EXCEPTION_EVENT_UDP_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_UDP_NO_ICME, + NSS_IPV4_EXCEPTION_EVENT_UDP_IP_OPTION, + NSS_IPV4_EXCEPTION_EVENT_UDP_IP_FRAGMENT, + NSS_IPV4_EXCEPTION_EVENT_UDP_SMALL_TTL, + NSS_IPV4_EXCEPTION_EVENT_UDP_NEEDS_FRAGMENTATION, + NSS_IPV4_EXCEPTION_EVENT_WRONG_TARGET_MAC, + NSS_IPV4_EXCEPTION_EVENT_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_BAD_TOTAL_LENGTH, + NSS_IPV4_EXCEPTION_EVENT_BAD_CHECKSUM, + NSS_IPV4_EXCEPTION_EVENT_NON_INITIAL_FRAGMENT, + NSS_IPV4_EXCEPTION_EVENT_DATAGRAM_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_OPTIONS_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_UNKNOWN_PROTOCOL, + NSS_IPV4_EXCEPTION_EVENT_ESP_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_ESP_NO_ICME, + NSS_IPV4_EXCEPTION_EVENT_ESP_IP_OPTION, + NSS_IPV4_EXCEPTION_EVENT_ESP_IP_FRAGMENT, + NSS_IPV4_EXCEPTION_EVENT_ESP_SMALL_TTL, + NSS_IPV4_EXCEPTION_EVENT_ESP_NEEDS_FRAGMENTATION, + NSS_IPV4_EXCEPTION_EVENT_IVID_MISMATCH, + NSS_IPV4_EXCEPTION_EVENT_IVID_MISSING, + NSS_IPV4_EXCEPTION_EVENT_6RD_NO_ICME, + NSS_IPV4_EXCEPTION_EVENT_6RD_IP_OPTION, + NSS_IPV4_EXCEPTION_EVENT_6RD_IP_FRAGMENT, + NSS_IPV4_EXCEPTION_EVENT_6RD_NEEDS_FRAGMENTATION, + NSS_IPV4_EXCEPTION_EVENT_DSCP_MARKING_MISMATCH, + NSS_IPV4_EXCEPTION_EVENT_VLAN_MARKING_MISMATCH, + NSS_IPV4_EXCEPTION_EVENT_INTERFACE_MISMATCH, + NSS_IPV4_EXCEPTION_EVENT_GRE_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_GRE_NO_ICME, + NSS_IPV4_EXCEPTION_EVENT_GRE_IP_OPTION, + NSS_IPV4_EXCEPTION_EVENT_GRE_IP_FRAGMENT, + NSS_IPV4_EXCEPTION_EVENT_GRE_SMALL_TTL, + NSS_IPV4_EXCEPTION_EVENT_GRE_NEEDS_FRAGMENTATION, + NSS_IPV4_EXCEPTION_EVENT_PPTP_GRE_SESSION_MATCH_FAIL, + NSS_IPV4_EXCEPTION_EVENT_PPTP_GRE_INVALID_PROTO, + NSS_IPV4_EXCEPTION_EVENT_PPTP_GRE_NO_CME, + NSS_IPV4_EXCEPTION_EVENT_PPTP_GRE_IP_OPTION, + NSS_IPV4_EXCEPTION_EVENT_PPTP_GRE_IP_FRAGMENT, + NSS_IPV4_EXCEPTION_EVENT_PPTP_GRE_SMALL_TTL, + NSS_IPV4_EXCEPTION_EVENT_PPTP_GRE_NEEDS_FRAGMENTATION, + NSS_IPV4_EXCEPTION_EVENT_DESTROY, + NSS_IPV4_EXCEPTION_EVENT_FRAG_DF_SET, + NSS_IPV4_EXCEPTION_EVENT_FRAG_FAIL, + NSS_IPV4_EXCEPTION_EVENT_ICMP_IPV4_UDPLITE_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_UDPLITE_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_UDPLITE_NO_ICME, + NSS_IPV4_EXCEPTION_EVENT_UDPLITE_IP_OPTION, + NSS_IPV4_EXCEPTION_EVENT_UDPLITE_IP_FRAGMENT, + NSS_IPV4_EXCEPTION_EVENT_UDPLITE_SMALL_TTL, + NSS_IPV4_EXCEPTION_EVENT_UDPLITE_NEEDS_FRAGMENTATION, + NSS_IPV4_EXCEPTION_EVENT_MC_UDP_NO_ICME, + NSS_IPV4_EXCEPTION_EVENT_MC_MEM_ALLOC_FAILURE, + NSS_IPV4_EXCEPTION_EVENT_MC_UPDATE_FAILURE, + NSS_IPV4_EXCEPTION_EVENT_MC_PBUF_ALLOC_FAILURE, + NSS_IPV4_EXCEPTION_EVENT_PPPOE_BRIDGE_NO_ICME, + NSS_IPV4_EXCEPTION_EVENT_PPPOE_NO_SESSION, + NSS_IPV4_EXCEPTION_EVENT_ICMP_IPV4_GRE_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_ICMP_IPV4_ESP_HEADER_INCOMPLETE, + NSS_IPV4_EXCEPTION_EVENT_EMESH_PRIO_MISMATCH, + NSS_IPV4_EXCEPTION_EVENT_MAX +}; + +/** + * nss_ipv4_node_sync + * IPv4 node synchronization statistics. + */ +struct nss_ipv4_node_sync { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t ipv4_connection_create_requests; + /**< Number of connection create requests. */ + + /** + * Number of connection create requests that collided with existing entries. + */ + uint32_t ipv4_connection_create_collisions; + + /** + * Number of connection create requests that had invalid interfaces. + */ + uint32_t ipv4_connection_create_invalid_interface; + + uint32_t ipv4_connection_destroy_requests; + /**< Number of connection destroy requests. */ + uint32_t ipv4_connection_destroy_misses; + /**< Number of connection destroy requests that missed the cache. */ + uint32_t ipv4_connection_hash_hits; /**< Number of connection hash hits. */ + uint32_t ipv4_connection_hash_reorders; /**< Number of connection hash reorders. */ + uint32_t ipv4_connection_flushes; /**< Number of connection flushes. */ + uint32_t ipv4_connection_evictions; /**< Number of connection evictions. */ + uint32_t ipv4_fragmentations; + /**< Number of successful IPv4 fragmentations performed. */ + uint32_t ipv4_dropped_by_rule; + /**< Number of IPv4 packets dropped because of a drop rule. */ + uint32_t ipv4_mc_connection_create_requests; + /**< Number of multicast connection create requests. */ + uint32_t ipv4_mc_connection_update_requests; + /**< Number of multicast connection update requests. */ + + /** + * Number of multicast connection create requests that had invalid interfaces. + */ + uint32_t ipv4_mc_connection_create_invalid_interface; + + uint32_t ipv4_mc_connection_destroy_requests; + /**< Number of multicast connection destroy requests. */ + + /** + * Number of multicast connection destroy requests that missed the cache. + */ + uint32_t ipv4_mc_connection_destroy_misses; + + uint32_t ipv4_mc_connection_flushes; + /**< Number of multicast connection flushes. */ + + uint32_t ipv4_connection_create_invalid_mirror_ifnum; + /**< Number of create request failed with an invalid mirror interface number. */ + + uint32_t ipv4_connection_create_invalid_mirror_iftype; + /**< Number of create request failed with an invalid mirror interface type. */ + + uint32_t ipv4_mirror_failures; + /**< Mirror packet failed. */ + + uint32_t exception_events[NSS_IPV4_EXCEPTION_EVENT_MAX]; + /**< Number of exception events. */ +}; + +/** + * nss_ipv4_msg + * Data for sending and receiving IPv4 bridge or routing messages. + */ +struct nss_ipv4_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of an IPv4 bridge or routing message. + */ + union { + struct nss_ipv4_rule_create_msg rule_create; + /**< Create a rule. */ + struct nss_ipv4_rule_destroy_msg rule_destroy; + /**< Destroy a rule. */ + struct nss_ipv4_conn_sync conn_stats; + /**< Synchronize connection statistics. */ + struct nss_ipv4_node_sync node_stats; + /**< Synchronize node statistics. */ + struct nss_ipv4_rule_conn_get_table_size_msg size; + /**< Get the size for connection tables. */ + struct nss_ipv4_rule_conn_cfg_msg rule_conn_cfg; + /**< Configure a rule connection. */ + struct nss_ipv4_mc_rule_create_msg mc_rule_create; + /**< Create a multicast rule. */ + struct nss_ipv4_conn_sync_many_msg conn_stats_many; + /**< Synchronize multiple connection statistics. */ + struct nss_ipv4_accel_mode_cfg_msg accel_mode_cfg; + /**< Acceleration mode. */ + struct nss_ipv4_inquiry_msg inquiry; + /**< Inquiry if a connection has created. */ + struct nss_ipv4_dscp2pri_cfg_msg dscp2pri_cfg; + /**< Configure dscp2pri mapping. */ + struct nss_ipv4_rps_hash_bitmap_cfg_msg rps_hash_bitmap; + /**< Configure rps_hash_bitmap. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_ipv4_stats_notification + * Data for sending IPv4 statistics. + */ +struct nss_ipv4_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint64_t cmn_node_stats[NSS_STATS_NODE_MAX]; /**< Node statistics. */ + uint64_t special_stats[NSS_IPV4_STATS_MAX]; /**< IPv4 special statistics. */ + uint64_t exception_stats[NSS_IPV4_EXCEPTION_EVENT_MAX]; /**< IPv4 exception statistics. */ +}; + +/** + * Configured IPv4 connection number to use for calculating the total number of + * connections. + */ +extern int nss_ipv4_conn_cfg; + +#ifdef __KERNEL__ /* only kernel will use. */ + +/** + * nss_ipv4_max_conn_count + * Returns the maximum number of IPv4 connections that the NSS acceleration + * engine supports. + * + * @return + * Number of connections that can be accelerated. + */ +int nss_ipv4_max_conn_count(void); + +/** + * Callback function for receiving IPv4 messages. + * + * @datatypes + * nss_ipv4_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_ipv4_msg_callback_t)(void *app_data, struct nss_ipv4_msg *msg); + +/** + * nss_ipv4_tx + * Transmits an IPv4 message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_ipv4_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipv4_tx(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_msg *msg); + +/** + * nss_ipv4_tx_sync + * Transmits a synchronous IPv4 message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_ipv4_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipv4_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_msg *msg); + +/** + * nss_ipv4_tx_with_size + * Transmits an IPv4 message with a specified size to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_ipv4_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * @param[in] size Actual size of this message. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipv4_tx_with_size(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_msg *msg, uint32_t size); + +/** + * nss_ipv4_notify_register + * Registers a notifier callback to forward the IPv4 messages received from the NSS + * firmware to the registered subsystem. + * + * @datatypes + * nss_ipv4_msg_callback_t + * + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_ipv4_notify_register(nss_ipv4_msg_callback_t cb, void *app_data); + +/** + * nss_ipv4_notify_unregister + * Degisters an IPv4 message notifier callback from the NSS. + * + * @return + * None. + * + * @dependencies + * The notifier callback must have been previously registered. + */ +extern void nss_ipv4_notify_unregister(void); + +/** + * nss_ipv4_conn_sync_many_notify_register + * Registers a notifier callback with the NSS for connection synchronization + * message responses. + * + * @datatypes + * nss_ipv4_msg_callback_t + * + * @param[in] cb Callback function for the message. + * + * @return + * None. + */ +extern void nss_ipv4_conn_sync_many_notify_register(nss_ipv4_msg_callback_t cb); + +/** + * nss_ipv4_conn_sync_many_notify_unregister + * Degisters a connection synchronization notifier callback from the NSS. + * + * @return + * None. + * + * @dependencies + * The notifier callback must have been previously registered. + */ +extern void nss_ipv4_conn_sync_many_notify_unregister(void); + +/** + * nss_ipv4_get_mgr + * Gets the NSS context that is managing IPv4 processes. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_ipv4_get_mgr(void); + +/** + * nss_ipv4_register_handler + * Registers the IPv4 message handler. + * + * @return + * None. + */ +void nss_ipv4_register_handler(void); + +/** + * nss_ipv4_register_sysctl + * Registers the IPv4 system control table. + * + * @return + * None. + */ +void nss_ipv4_register_sysctl(void); + +/** + * nss_ipv4_unregister_sysctl + * Deregisters the IPv4 system control table. + * + * @return + * None. + * + * @dependencies + * The system control table must have been previously registered. + */ +void nss_ipv4_unregister_sysctl(void); + +/** + * nss_ipv4_msg_init + * Initializes IPv4 messages. + * + * @datatypes + * nss_ipv4_msg \n + * nss_ipv4_msg_callback_t + * + * @param[in,out] nim Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_ipv4_msg_init(struct nss_ipv4_msg *nim, uint16_t if_num, uint32_t type, uint32_t len, + nss_ipv4_msg_callback_t cb, void *app_data); + +/** + * nss_ipv4_update_conn_count + * Sets the maximum number of IPv4 connections. + * + * @param[in] ipv4_max_conn Maximum number. + * + * @return + * 0 -- Success + */ +int nss_ipv4_update_conn_count(int ipv4_max_conn); + +/** + * nss_ipv4_free_conn_tables + * Frees memory allocated for connection tables. + * + * @return + * None. + */ +extern void nss_ipv4_free_conn_tables(void); + +/** + * nss_ipv4_dscp_action_get + * Gets the action value of the DSCP. + * + * @param[in] dscp Value of the DSCP field. + * + * @return + * Action value of the DSCP field. + */ +enum nss_ipv4_dscp_map_actions nss_ipv4_dscp_action_get(uint8_t dscp); + +/* + * Logger APIs + */ + +/** + * nss_ipv4_log_tx_msg + * Logs an IPv4 message that is sent to the NSS firmware. + * + * @datatypes + * nss_ipv4_msg + * + * @param[in] nim Pointer to the NSS interface message. + * + * @return + * None. + */ +void nss_ipv4_log_tx_msg(struct nss_ipv4_msg *nim); + +/** + * nss_ipv4_log_rx_msg + * Logs an IPv4 message that is received from the NSS firmware. + * + * @datatypes + * nss_ipv4_msg + * + * @param[in] nim Pointer to the NSS interface message. + * + * @return + * None. + */ +void nss_ipv4_log_rx_msg(struct nss_ipv4_msg *nim); + +/** + * nss_ipv4_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_ipv4_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_ipv4_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_ipv4_stats_unregister_notifier(struct notifier_block *nb); + +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_IPV4_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv4_reasm.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv4_reasm.h new file mode 100644 index 000000000..f7785cfe1 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv4_reasm.h @@ -0,0 +1,89 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipv4_reasm.h + * NSS IPv4 reassembly interface definitions. + */ + +#ifndef __NSS_IPV4_REASM_H +#define __NSS_IPV4_REASM_H + +/** + * @addtogroup nss_ipv4_reasm_subsystem + * @{ + */ + +/* + * nss_ipv4_reasm_stats_types + * IPv4 reassembly node statistics. + */ +enum nss_ipv4_reasm_stats_types { + NSS_IPV4_REASM_STATS_EVICTIONS, /**< Number of evicted fragment queues due to set memory threshold. */ + NSS_IPV4_REASM_STATS_ALLOC_FAILS, /**< Number of fragment queue allocation failures. */ + NSS_IPV4_REASM_STATS_TIMEOUTS, /**< Number of expired fragment queues. */ + NSS_IPV4_REASM_STATS_MAX, /**< Maximum message type. */ +}; + +/** + * nss_ipv4_reasm_stats_notification + * Data for sending IPv4 reassembly statistics. + */ +struct nss_ipv4_reasm_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint64_t cmn_node_stats[NSS_STATS_NODE_MAX]; /**< Common node statistics. */ + uint64_t ipv4_reasm_stats[NSS_IPV4_REASM_STATS_MAX]; /**< IPv4 reassembly statistics. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ + +/** + * nss_ipv4_reasm_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_ipv4_reasm_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_ipv4_reasm_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_ipv4_reasm_stats_unregister_notifier(struct notifier_block *nb); + +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_IPV4_REASM_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv6.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv6.h new file mode 100644 index 000000000..930e74cba --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv6.h @@ -0,0 +1,1304 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipv6.h + * NSS IPv6 interface definitions. + */ + +#ifndef __NSS_IPV6_H +#define __NSS_IPV6_H + +/** + * @addtogroup nss_ipv6_subsystem + * @{ + */ + +/** + * Converts the format of an IPv6 address from Linux to NSS. @hideinitializer + */ +#define IN6_ADDR_TO_IPV6_ADDR(ipv6, in6) \ + { \ + ((uint32_t *)ipv6)[0] = in6.in6_u.u6_addr32[0]; \ + ((uint32_t *)ipv6)[1] = in6.in6_u.u6_addr32[1]; \ + ((uint32_t *)ipv6)[2] = in6.in6_u.u6_addr32[2]; \ + ((uint32_t *)ipv6)[3] = in6.in6_u.u6_addr32[3]; \ + } + +/** + * Converts the format of an IPv6 address from NSS to Linux. @hideinitializer + */ +#define IPV6_ADDR_TO_IN6_ADDR(in6, ipv6) \ + { \ + in6.in6_u.u6_addr32[0] = ((uint32_t *)ipv6)[0]; \ + in6.in6_u.u6_addr32[1] = ((uint32_t *)ipv6)[1]; \ + in6.in6_u.u6_addr32[2] = ((uint32_t *)ipv6)[2]; \ + in6.in6_u.u6_addr32[3] = ((uint32_t *)ipv6)[3]; \ + } + +/** + * Format of an IPv6 address (16 * 8 bits). + */ +#define IPV6_ADDR_OCTAL_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x" + +/** + * Prints an IPv6 address (16 * 8 bits). + */ +#define IPV6_ADDR_TO_OCTAL(ipv6) ((uint16_t *)ipv6)[0], ((uint16_t *)ipv6)[1], ((uint16_t *)ipv6)[2], ((uint16_t *)ipv6)[3], ((uint16_t *)ipv6)[4], ((uint16_t *)ipv6)[5], ((uint16_t *)ipv6)[6], ((uint16_t *)ipv6)[7] + +/* + * IPv6 connection flags (to be used with nss_ipv6_create::flags. + */ +#define NSS_IPV6_CREATE_FLAG_NO_SEQ_CHECK 0x1 + /**< Indicates that sequence numbers are not to be checked. */ +#define NSS_IPV6_CREATE_FLAG_BRIDGE_FLOW 0x02 + /**< Indicates that this is a pure bridge flow (no routing is involved). */ +#define NSS_IPV6_CREATE_FLAG_ROUTED 0x04 /**< Rule is for a routed connection. */ +#define NSS_IPV6_CREATE_FLAG_DSCP_MARKING 0x08 /**< Rule for DSCP marking. */ +#define NSS_IPV6_CREATE_FLAG_VLAN_MARKING 0x10 /**< Rule for VLAN marking. */ +#define NSS_IPV6_CREATE_FLAG_QOS_VALID 0x20 /**< Rule for Valid QoS. */ + +/** + * nss_ipv6_create + * Information for an IPv6 flow or connection create rule. + * + * All fields must be passed in host-endian order. + */ +struct nss_ipv6_create { + int32_t src_interface_num; + /**< Source interface number (virtual or physical). */ + int32_t dest_interface_num; + /**< Destination interface number (virtual or physical). */ + int32_t protocol; /**< L4 protocol (e.g., TCP or UDP). */ + uint32_t flags; /**< Flags (if any) associated with this rule. */ + uint32_t from_mtu; /**< MTU of the incoming interface. */ + uint32_t to_mtu; /**< MTU of the outgoing interface. */ + uint32_t src_ip[4]; /**< Source IP address. */ + int32_t src_port; /**< Source L4 port (e.g., TCP or UDP port). */ + uint32_t dest_ip[4]; /**< Destination IP address. */ + int32_t dest_port; /**< Destination L4 port (e.g., TCP or UDP port). */ + uint8_t src_mac[ETH_ALEN]; /**< Source MAC address. */ + uint8_t dest_mac[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t flow_window_scale; /**< Window scaling factor (TCP). */ + uint32_t flow_max_window; /**< Maximum window size (TCP). */ + uint32_t flow_end; /**< TCP window end. */ + uint32_t flow_max_end; /**< TCP window maximum end. */ + uint32_t flow_pppoe_if_exist; + /**< Flow direction: PPPoE interface existence flag. */ + int32_t flow_pppoe_if_num; + /**< Flow direction: PPPoE interface number. */ + uint16_t ingress_vlan_tag; + /**< Ingress VLAN tag expected for this flow. */ + uint8_t return_window_scale; + /**< Window scaling factor (TCP) for the return direction. */ + uint32_t return_max_window; + /**< Maximum window size (TCP) for the return direction. */ + uint32_t return_end; + /**< End for the return direction. */ + uint32_t return_max_end; + /**< Maximum end for the return direction. */ + uint32_t return_pppoe_if_exist; + /**< Return direction: PPPoE interface exist flag. */ + int32_t return_pppoe_if_num; + /**< Return direction: PPPoE interface number. */ + uint16_t egress_vlan_tag; /**< Egress VLAN tag expected for this flow. */ + uint32_t qos_tag; /**< Deprecated; will be removed soon. */ + uint32_t flow_qos_tag; /**< QoS tag value for flow direction. */ + uint32_t return_qos_tag; /**< QoS tag value for the return direction. */ + uint8_t dscp_itag; /**< DSCP marking tag. */ + uint8_t dscp_imask; /**< DSCP marking input mask. */ + uint8_t dscp_omask; /**< DSCP marking output mask. */ + uint8_t dscp_oval; /**< DSCP marking output value. */ + uint16_t vlan_itag; /**< VLAN marking tag. */ + uint16_t vlan_imask; /**< VLAN marking input mask. */ + uint16_t vlan_omask; /**< VLAN marking output mask. */ + uint16_t vlan_oval; /**< VLAN marking output value. */ + uint32_t in_vlan_tag[MAX_VLAN_DEPTH]; + /**< Ingress VLAN tag expected for this flow. */ + uint32_t out_vlan_tag[MAX_VLAN_DEPTH]; + /**< Egress VLAN tag expected for this flow. */ + uint8_t flow_dscp; /**< IP DSCP value for flow direction. */ + uint8_t return_dscp; /**< IP DSCP value for the return direction. */ +}; + +/** + * nss_ipv6_destroy + * Information for an IPv6 flow or connection destroy rule. + */ +struct nss_ipv6_destroy { + int32_t protocol; /**< L4 protocol (e.g., TCP or UDP). */ + uint32_t src_ip[4]; /**< Source IP address. */ + int32_t src_port; /**< Source L4 port (e.g., TCP or UDP port). */ + uint32_t dest_ip[4]; /**< Destination IP address. */ + int32_t dest_port; /**< Destination L4 port (e.g., TCP or UDP port). */ +}; + +/** + * nss_ipv6_stats_types + * IPv6 node statistics. + */ +enum nss_ipv6_stats_types { + NSS_IPV6_STATS_ACCELERATED_RX_PKTS, + /**< Accelerated IPv6 Rx packets. */ + NSS_IPV6_STATS_ACCELERATED_RX_BYTES, + /**< Accelerated IPv6 Rx bytes. */ + NSS_IPV6_STATS_ACCELERATED_TX_PKTS, + /**< Accelerated IPv6 Tx packets. */ + NSS_IPV6_STATS_ACCELERATED_TX_BYTES, + /**< Accelerated IPv6 Tx bytes. */ + NSS_IPV6_STATS_CONNECTION_CREATE_REQUESTS, + /**< Number of IPv6 connection create requests. */ + NSS_IPV6_STATS_CONNECTION_CREATE_COLLISIONS, + /**< Number of IPv6 connection create requests that collided with existing entries. */ + NSS_IPV6_STATS_CONNECTION_CREATE_INVALID_INTERFACE, + /**< Number of IPv6 connection create requests that had invalid interface. */ + NSS_IPV6_STATS_CONNECTION_DESTROY_REQUESTS, + /**< Number of IPv6 connection destroy requests. */ + NSS_IPV6_STATS_CONNECTION_DESTROY_MISSES, + /**< Number of IPv6 connection destroy requests that missed the cache. */ + NSS_IPV6_STATS_CONNECTION_HASH_HITS, + /**< Number of IPv6 connection hash hits. */ + NSS_IPV6_STATS_CONNECTION_HASH_REORDERS, + /**< Number of IPv6 connection hash reorders. */ + NSS_IPV6_STATS_CONNECTION_FLUSHES, + /**< Number of IPv6 connection flushes. */ + NSS_IPV6_STATS_CONNECTION_EVICTIONS, + /**< Number of IPv6 connection evictions. */ + NSS_IPV6_STATS_FRAGMENTATIONS, + /**< Number of successful IPv6 fragmentations performed. */ + NSS_IPV6_STATS_FRAG_FAILS, + /**< Number of IPv6 fragmentation fails. */ + NSS_IPV6_STATS_DROPPED_BY_RULE, + /**< Number of IPv6 packets dropped by a drop rule. */ + NSS_IPV6_STATS_MC_CONNECTION_CREATE_REQUESTS, + /**< Number of successful IPv6 multicast create requests. */ + NSS_IPV6_STATS_MC_CONNECTION_UPDATE_REQUESTS, + /**< Number of successful IPv6 multicast update requests. */ + NSS_IPV6_STATS_MC_CONNECTION_CREATE_INVALID_INTERFACE, + /**< Number of IPv6 multicast connection create requests that had invalid interface. */ + NSS_IPV6_STATS_MC_CONNECTION_DESTROY_REQUESTS, + /**< Number of IPv6 multicast connection destroy requests. */ + NSS_IPV6_STATS_MC_CONNECTION_DESTROY_MISSES, + /**< Number of IPv6 multicast connection destroy requests that missed the cache. */ + NSS_IPV6_STATS_MC_CONNECTION_FLUSHES, + /**< Number of IPv6 multicast connection flushes. */ + NSS_IPV6_STATS_CONNECTION_CREATE_INVALID_MIRROR_IFNUM, + /**< Number of IPv6 mirror connection requests with an invalid interface number. */ + NSS_IPV6_STATS_CONNECTION_CREATE_INVALID_MIRROR_IFTYPE, + /**< Number of IPv6 mirror connection requests with an invalid interface type. */ + + NSS_IPV6_STATS_MIRROR_FAILURES, + /**< Number of IPv6 mirror failures. */ + + NSS_IPV6_STATS_MAX, + /**< Maximum message type. */ +}; + +/** + * nss_ipv6_message_types + * IPv6 bridge and routing rule message types. + * + * NSS_IPV6_RX_DEPRECATED0 is a deprecated type. It is kept for backward compatibility. + */ +enum nss_ipv6_message_types { + NSS_IPV6_TX_CREATE_RULE_MSG, + NSS_IPV6_TX_DESTROY_RULE_MSG, + NSS_IPV6_RX_DEPRECATED0, + NSS_IPV6_RX_CONN_STATS_SYNC_MSG, + NSS_IPV6_RX_NODE_STATS_SYNC_MSG, + NSS_IPV6_TX_CONN_CFG_RULE_MSG, + NSS_IPV6_TX_CREATE_MC_RULE_MSG, + NSS_IPV6_TX_CONN_STATS_SYNC_MANY_MSG, + NSS_IPV6_TX_ACCEL_MODE_CFG_MSG, + NSS_IPV6_TX_CONN_CFG_INQUIRY_MSG, + NSS_IPV6_TX_CONN_TABLE_SIZE_MSG, + NSS_IPV6_TX_DSCP2PRI_CFG_MSG, + NSS_IPV6_TX_RPS_HASH_BITMAP_CFG_MSG, + NSS_IPV6_MAX_MSG_TYPES, +}; + +/** + * nss_ipv6_dscp_map_actions + * Action types mapped to DSCP values. + */ +enum nss_ipv6_dscp_map_actions { + NSS_IPV6_DSCP_MAP_ACTION_ACCEL, + NSS_IPV6_DSCP_MAP_ACTION_DONT_ACCEL, + NSS_IPV6_DSCP_MAP_ACTION_MAX, +}; + +/* + * NSS IPv6 rule creation flags. + */ +#define NSS_IPV6_RULE_CREATE_FLAG_NO_SEQ_CHECK 0x01 + /**< Do not perform TCP sequence number checks. */ +#define NSS_IPV6_RULE_CREATE_FLAG_BRIDGE_FLOW 0x02 + /**< This is a pure bridge forwarding flow. */ +#define NSS_IPV6_RULE_CREATE_FLAG_ROUTED 0x04 + /**< Rule is for a routed connection. */ +#define NSS_IPV6_RULE_CREATE_FLAG_DSCP_MARKING 0x08 + /**< Rule has for a DSCP marking configured. */ +#define NSS_IPV6_RULE_CREATE_FLAG_VLAN_MARKING 0x10 + /**< Rule has for a VLAN marking configured. */ +#define NSS_IPV6_RULE_CREATE_FLAG_ICMP_NO_CME_FLUSH 0x20 + /**< Rule for not flushing connection match entry on ICMP packet. */ +#define NSS_IPV6_RULE_UPDATE_FLAG_CHANGE_MTU 0x40 + /**< Rule updation for MTU change. */ + +/** The L2 payload is not IPv6 but consists of an encapsulating protocol that carries an IPv6 payload within it. + */ +#define NSS_IPV6_RULE_CREATE_FLAG_L2_ENCAP 0x80 + +#define NSS_IPV6_RULE_CREATE_FLAG_DROP 0x100 + /**< Drop packets. */ +#define NSS_IPV6_RULE_CREATE_FLAG_EXCEPTION 0x200 + /**< Rule to except packets. */ +#define NSS_IPV6_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK 0x400 + /**< Check the source interface for the rule. */ +#define NSS_IPV6_RULE_CREATE_FLAG_NO_SRC_IDENT 0x800 + /**< Flag to indicate NSS to ignore src_ident and use value 0 for it during rule addition. */ +#define NSS_IPV6_RULE_CREATE_FLAG_NO_MAC 0x1000 + /**< Flag to bypass writing MAC addresses. */ +#define NSS_IPV6_RULE_CREATE_FLAG_EMESH_SP 0x2000 + /**< Mark rule as E-MESH Service Prioritization valid. */ + +/* + * IPv6 rule creation validity flags. + */ +#define NSS_IPV6_RULE_CREATE_CONN_VALID 0x01 /**< Connection is valid. */ +#define NSS_IPV6_RULE_CREATE_TCP_VALID 0x02 /**< TCP protocol fields are valid. */ +#define NSS_IPV6_RULE_CREATE_PPPOE_VALID 0x04 /**< PPPoE fields are valid. */ +#define NSS_IPV6_RULE_CREATE_QOS_VALID 0x08 /**< QoS fields are valid. */ +#define NSS_IPV6_RULE_CREATE_VLAN_VALID 0x10 /**< VLAN fields are valid. */ +#define NSS_IPV6_RULE_CREATE_DSCP_MARKING_VALID 0x20 + /**< DSCP marking fields are valid. */ +#define NSS_IPV6_RULE_CREATE_VLAN_MARKING_VALID 0x40 + /**< VLAN marking fields are valid. */ +#define NSS_IPV6_RULE_CREATE_SRC_MAC_VALID 0x80 + /**< Source MAC address fields are valid. */ +#define NSS_IPV6_RULE_CREATE_NEXTHOP_VALID 0x100 + /**< Next hop interface number fields are valid. */ +#define NSS_IPV6_RULE_CREATE_RPS_VALID 0x200 /**< RPS for core selection is valid. */ +#define NSS_IPV6_RULE_CREATE_DEST_MAC_VALID 0x400 + /**< Destination MAC address fields are valid. */ +#define NSS_IPV6_RULE_CREATE_IGS_VALID 0x800 /**< Ingress shaping fields are valid. */ +#define NSS_IPV6_RULE_CREATE_IDENTIFIER_VALID 0x1000 /**< Identifier is valid. */ +#define NSS_IPV6_RULE_CREATE_MIRROR_VALID 0x2000 /**< Mirror fields are valid. */ + +/* + * Multicast command rule flags + */ +#define NSS_IPV6_MC_RULE_CREATE_FLAG_MC_UPDATE 0x01 /**< Multicast rule update. */ +#define NSS_IPV6_MC_RULE_CREATE_FLAG_MC_EMESH_SP 0x02 + /**< Mark multicast rule as E-MESH Service Prioritization valid. */ + +/* + * Multicast command validity flags + */ +#define NSS_IPV6_MC_RULE_CREATE_FLAG_QOS_VALID 0x01 + /**< QoS fields are valid. */ +#define NSS_IPV6_MC_RULE_CREATE_FLAG_DSCP_MARKING_VALID 0x02 + /**< DSCP fields are valid. */ +#define NSS_IPV6_MC_RULE_CREATE_FLAG_INGRESS_VLAN_VALID 0x04 + /**< Ingress VLAN fields are valid. */ +#define NSS_IPV6_MC_RULE_CREATE_FLAG_INGRESS_PPPOE 0x08 + /**< Ingress PPPoE fields are valid. */ +#define NSS_IPV6_MC_RULE_CREATE_FLAG_IGS_VALID 0x10 + /**< Ingress shaping fields are valid. */ + +/* + * Per-interface rule flags for a multicast connection (to be used with the rule_flags + * field of nss_ipv6_mc_if_rule structure). + */ +#define NSS_IPV6_MC_RULE_CREATE_IF_FLAG_BRIDGE_FLOW 0x01 + /**< Bridge flow. */ +#define NSS_IPV6_MC_RULE_CREATE_IF_FLAG_ROUTED_FLOW 0x02 + /**< Routed flow. */ +#define NSS_IPV6_MC_RULE_CREATE_IF_FLAG_JOIN 0x04 + /**< Interface has joined the flow. */ +#define NSS_IPV6_MC_RULE_CREATE_IF_FLAG_LEAVE 0x08 + /**< Interface has left the flow. */ + +/* + * Per-interface valid flags for a multicast connection (to be used with the valid_flags + * field of nss_ipv6_mc_if_rule structure). + */ +#define NSS_IPV6_MC_RULE_CREATE_IF_FLAG_VLAN_VALID 0x01 + /**< VLAN fields are valid. */ +#define NSS_IPV6_MC_RULE_CREATE_IF_FLAG_PPPOE_VALID 0x02 + /**< PPPoE fields are valid. */ + +/* + * Source MAC address valid flags (to be used with mac_valid_flags field of nss_ipv6_src_mac_rule structure) + */ +#define NSS_IPV6_SRC_MAC_FLOW_VALID 0x01 + /**< MAC address for the flow interface is valid. */ +#define NSS_IPV6_SRC_MAC_RETURN_VALID 0x02 + /**< MAC address for the return interface is valid. */ + +/* + * Identifier valid flags (to be used with identifier_valid_flags field of nss_ipv6_identifier_rule structure) + */ +#define NSS_IPV6_FLOW_IDENTIFIER_VALID 0x01 + /**< Identifier for flow direction is valid. */ +#define NSS_IPV6_RETURN_IDENTIFIER_VALID 0x02 + /**< Identifier for return direction is valid. */ + +/* + * Mirror valid flags (to be used with the valid field of nss_ipv6_mirror_rule structure) + */ +#define NSS_IPV6_MIRROR_FLOW_VALID 0x01 + /**< Mirror interface number for the flow direction is valid. */ +#define NSS_IPV6_MIRROR_RETURN_VALID 0x02 + /**< Mirror interface number for the return direction is valid. */ + +/** + * nss_ipv6_exception_events + * Exception events from an IPv6 bridge or route handler. + */ +enum nss_ipv6_exception_events { + NSS_IPV6_EXCEPTION_EVENT_ICMP_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_ICMP_UNHANDLED_TYPE, + NSS_IPV6_EXCEPTION_EVENT_ICMP_IPV6_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_ICMP_IPV6_UDP_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_ICMP_IPV6_TCP_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_ICMP_IPV6_UNKNOWN_PROTOCOL, + NSS_IPV6_EXCEPTION_EVENT_ICMP_NO_ICME, + NSS_IPV6_EXCEPTION_EVENT_ICMP_FLUSH_TO_HOST, + NSS_IPV6_EXCEPTION_EVENT_TCP_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_TCP_NO_ICME, + NSS_IPV6_EXCEPTION_EVENT_TCP_SMALL_HOP_LIMIT, + NSS_IPV6_EXCEPTION_EVENT_TCP_NEEDS_FRAGMENTATION, + NSS_IPV6_EXCEPTION_EVENT_TCP_FLAGS, + NSS_IPV6_EXCEPTION_EVENT_TCP_SEQ_EXCEEDS_RIGHT_EDGE, + NSS_IPV6_EXCEPTION_EVENT_TCP_SMALL_DATA_OFFS, + NSS_IPV6_EXCEPTION_EVENT_TCP_BAD_SACK, + NSS_IPV6_EXCEPTION_EVENT_TCP_BIG_DATA_OFFS, + NSS_IPV6_EXCEPTION_EVENT_TCP_SEQ_BEFORE_LEFT_EDGE, + NSS_IPV6_EXCEPTION_EVENT_TCP_ACK_EXCEEDS_RIGHT_EDGE, + NSS_IPV6_EXCEPTION_EVENT_TCP_ACK_BEFORE_LEFT_EDGE, + NSS_IPV6_EXCEPTION_EVENT_UDP_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_UDP_NO_ICME, + NSS_IPV6_EXCEPTION_EVENT_UDP_SMALL_HOP_LIMIT, + NSS_IPV6_EXCEPTION_EVENT_UDP_NEEDS_FRAGMENTATION, + NSS_IPV6_EXCEPTION_EVENT_WRONG_TARGET_MAC, + NSS_IPV6_EXCEPTION_EVENT_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_UNKNOWN_PROTOCOL, + NSS_IPV6_EXCEPTION_EVENT_IVID_MISMATCH, + NSS_IPV6_EXCEPTION_EVENT_IVID_MISSING, + NSS_IPV6_EXCEPTION_EVENT_DSCP_MARKING_MISMATCH, + NSS_IPV6_EXCEPTION_EVENT_VLAN_MARKING_MISMATCH, + NSS_IPV6_EXCEPTION_EVENT_INTERFACE_MISMATCH, + NSS_IPV6_EXCEPTION_EVENT_GRE_NO_ICME, + NSS_IPV6_EXCEPTION_EVENT_GRE_NEEDS_FRAGMENTATION, + NSS_IPV6_EXCEPTION_EVENT_GRE_SMALL_HOP_LIMIT, + NSS_IPV6_EXCEPTION_EVENT_DESTROY, + NSS_IPV6_EXCEPTION_EVENT_ICMP_IPV6_UDPLITE_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_UDPLITE_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_UDPLITE_NO_ICME, + NSS_IPV6_EXCEPTION_EVENT_UDPLITE_SMALL_HOP_LIMIT, + NSS_IPV6_EXCEPTION_EVENT_UDPLITE_NEEDS_FRAGMENTATION, + NSS_IPV6_EXCEPTION_EVENT_MC_UDP_NO_ICME, + NSS_IPV6_EXCEPTION_EVENT_MC_MEM_ALLOC_FAILURE, + NSS_IPV6_EXCEPTION_EVENT_MC_UPDATE_FAILURE, + NSS_IPV6_EXCEPTION_EVENT_MC_PBUF_ALLOC_FAILURE, + NSS_IPV6_EXCEPTION_EVENT_ESP_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_ESP_NO_ICME, + NSS_IPV6_EXCEPTION_EVENT_ESP_IP_FRAGMENT, + NSS_IPV6_EXCEPTION_EVENT_ESP_SMALL_HOP_LIMIT, + NSS_IPV6_EXCEPTION_EVENT_ESP_NEEDS_FRAGMENTATION, + NSS_IPV6_EXCEPTION_EVENT_TUNIPIP6_NO_ICME, + NSS_IPV6_EXCEPTION_EVENT_TUNIPIP6_SMALL_HOP_LIMIT, + NSS_IPV6_EXCEPTION_EVENT_TUNIPIP6_NEEDS_FRAGMENTATION, + NSS_IPV6_EXCEPTION_EVENT_PPPOE_BRIDGE_NO_ICME, + NSS_IPV6_EXCEPTION_EVENT_DONT_FRAG_SET, + NSS_IPV6_EXCEPTION_EVENT_REASSEMBLY_NOT_SUPPORTED, + NSS_IPV6_EXCEPTION_EVENT_PPPOE_NO_SESSION, + NSS_IPV6_EXCEPTION_EVENT_ICMP_IPV6_GRE_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_ICMP_IPV6_ESP_HEADER_INCOMPLETE, + NSS_IPV6_EXCEPTION_EVENT_EMESH_PRIO_MISMATCH, + NSS_IPV6_EXCEPTION_EVENT_MAX +}; + +/** + * nss_ipv6_5tuple + * Common 5-tuple information. + */ +struct nss_ipv6_5tuple { + uint32_t flow_ip[4]; /**< Flow IP address. */ + uint32_t flow_ident; /**< Flow identifier (e.g., TCP or UDP port). */ + uint32_t return_ip[4]; /**< Return IP address. */ + uint32_t return_ident; /**< Return identier (e.g., TCP or UDP port). */ + uint8_t protocol; /**< Protocol number. */ + uint8_t reserved[3]; /**< Padded for alignment. */ +}; + +/** + * nss_ipv6_connection_rule + * Information for creating a connection. + */ +struct nss_ipv6_connection_rule { + uint16_t flow_mac[3]; /**< Flow MAC address. */ + uint16_t return_mac[3]; /**< Return MAC address. */ + int32_t flow_interface_num; /**< Flow interface number. */ + int32_t return_interface_num; /**< Return interface number. */ + uint32_t flow_mtu; /**< MTU for the flow interface. */ + uint32_t return_mtu; /**< MTU for the return interface. */ +}; + +/** + * nss_ipv6_pppoe_rule + * Information for PPPoE connection rules. + */ +struct nss_ipv6_pppoe_rule { + uint32_t flow_if_exist; + /**< PPPoE interface existence flag for the flow direction. */ + int32_t flow_if_num; + /**< PPPoE interface number for the flow direction. */ + uint32_t return_if_exist; + /**< PPPoE interface existence flag for the return direction. */ + int32_t return_if_num; + /**< PPPoE interface number for the return direction. */ +}; + +/** + * nss_ipv6_dscp_rule + * Information for DSCP connection rules. + */ +struct nss_ipv6_dscp_rule { + uint8_t flow_dscp; /**< Egress DSCP value for the flow direction. */ + uint8_t return_dscp; /**< Egress DSCP value for the return direction. */ + uint8_t reserved[2]; /**< Padded for alignment. */ +}; + +/** + * nss_ipv6_vlan_rule + * Information for VLAN connection rules. + */ +struct nss_ipv6_vlan_rule { + uint32_t ingress_vlan_tag; /**< VLAN tag for the ingress packets. */ + uint32_t egress_vlan_tag; /**< VLAN tag for egress packets. */ +}; + +/** + * nss_ipv6_nexthop + * Information for the next hop interface numbers. + * + * A next hop is the next interface that will receive the packet as opposed to + * the final interface when the packet leaves the device. + */ +struct nss_ipv6_nexthop { + /** + * Next hop interface number of the flow direction (from which the connection + * originated). + */ + int32_t flow_nexthop; + /** + * Next hop interface number of the return direction (to which the connection + * is destined). + */ + int32_t return_nexthop; +}; + +/** + * nss_ipv6_protocol_tcp_rule + * Information for TCP connection rules. + */ +struct nss_ipv6_protocol_tcp_rule { + uint32_t flow_max_window; + /**< Largest seen window for the flow direction. */ + uint32_t flow_end; + /**< Largest seen sequence + segment length for the flow direction. */ + uint32_t flow_max_end; + /**< Largest seen ack + max(1, win) for the flow direction. */ + uint32_t return_max_window; + /**< Largest seen window for the return direction. */ + uint32_t return_end; + /**< Largest seen sequence + segment length for the return direction. */ + uint32_t return_max_end; + /**< Largest seen ack + max(1, win) for the return direction. */ + uint8_t flow_window_scale; + /**< Window scaling factor for the flow direction. */ + uint8_t return_window_scale; + /**< Window scaling factor for the return direction. */ + uint16_t reserved; + /**< Alignment padding. */ +}; + +/** + * nss_ipv6_igs_rule + * Information for ingress shaping connection rules. + */ +struct nss_ipv6_igs_rule { + uint16_t igs_flow_qos_tag; + /**< Ingress shaping QoS tag associated with this rule for the flow direction. */ + uint16_t igs_return_qos_tag; + /**< Ingress shaping QoS tag associated with this rule for the return direction. */ +}; + +/** + * nss_ipv6_qos_rule + * Information for QoS connection rules. + */ +struct nss_ipv6_qos_rule { + uint32_t flow_qos_tag; + /**< QoS tag associated with this rule for the flow direction. */ + uint32_t return_qos_tag; + /**< QoS tag associated with this rule for the return direction. */ +}; + +/** + * nss_ipv6_src_mac_rule + * Information for source MAC address rules. + */ +struct nss_ipv6_src_mac_rule { + uint32_t mac_valid_flags; /**< MAC address validity flags. */ + uint16_t flow_src_mac[3]; /**< Source MAC address for the flow direction. */ + uint16_t return_src_mac[3]; /**< Source MAC address for the return direction. */ +}; + +/** + * nss_ipv6_rps_rule + * RPS rule structure. + */ +struct nss_ipv6_rps_rule { + uint8_t flow_rps; + /**< RPS for core selection for flow direction. */ + uint8_t return_rps; + /**< RPS for core selection for return direction. */ + uint8_t reserved[2]; + /**< Padded for alignment. */ +}; + +/** + * nss_ipv6_identifier_rule + * Identifier rule structure. + */ +struct nss_ipv6_identifier_rule { + uint32_t identifier_valid_flags; + /**< Identifier validity flags. */ + uint32_t flow_identifier; + /**< Identifier for flow direction. */ + uint32_t return_identifier; + /**< Identifier for return direction. */ +}; + +/** + * nss_ipv6_mirror_rule + * Mirror rule structure. + */ +struct nss_ipv6_mirror_rule { + uint32_t valid; /**< Mirror validity flags. */ + nss_if_num_t flow_ifnum; /**< Flow mirror interface number. */ + nss_if_num_t return_ifnum; /**< Return mirror interface number. */ +}; + +/** + * nss_ipv6_error_response_types + * Error types for IPv6 messages. + */ +enum nss_ipv6_error_response_types { + NSS_IPV6_UNKNOWN_MSG_TYPE = 1, /**< Unknown error. */ + NSS_IPV6_CR_INVALID_PNODE_ERROR, /**< Invalid interface number. */ + NSS_IPV6_CR_MISSING_CONNECTION_RULE_ERROR, /**< Missing connection rule. */ + NSS_IPV6_CR_BUFFER_ALLOC_FAIL_ERROR, /**< Buffer allocation failed. */ + NSS_IPV6_DR_NO_CONNECTION_ENTRY_ERROR, + /**< No connection was found to delete. */ + NSS_IPV6_CR_CONN_CFG_ALREADY_CONFIGURED_ERROR, + /**< Connection configuration was already done once. */ + + NSS_IPV6_CR_CONN_CFG_NOT_MULTIPLE_OF_QUANTA_ERROR, + /**< Input for connection configuration is not a multiple of quanta. */ + + /** + * Input for connection configuration exceeds the maximum number of supported + * connections. + */ + NSS_IPV6_CR_CONN_CFG_EXCEEDS_LIMIT_ERROR, + + /** + * Memory allocation for connection configuration failed at the NSS firmware. + */ + NSS_IPV6_CR_CONN_CFG_MEM_ALLOC_FAIL_ERROR, + + NSS_IPV6_CR_MULTICAST_INVALID_PROTOCOL, + /**< Invalid L4 protocol for creating a multicast rule. */ + NSS_IPV6_CR_MULTICAST_UPDATE_INVALID_FLAGS, + /**< Invalid multicast flags for updating multicast. */ + NSS_IPV6_CR_MULTICAST_UPDATE_INVALID_IF, + /**< Invalid interface for updating multicast. */ + NSS_IPV6_CR_ACCEL_MODE_CONFIG_INVALID, + /**< Invalid config value for acceleration mode. */ + NSS_IPV6_CR_INVALID_MSG_ERROR, + /**< Invalid message size error. */ + NSS_IPV6_CR_DSCP2PRI_PRI_INVALID, + /**< Priority value out of range error. */ + NSS_IPV6_CR_DSCP2PRI_CONFIG_INVALID, + /**< Invalid DSCP value. */ + NSS_IPV6_CR_INVALID_RPS, + /**< Invalid RPS Value. */ + NSS_IPV6_HASH_BITMAP_INVALID, + /**< Invalid hash bitmap. */ + NSS_IPV6_DR_HW_DECEL_FAIL_ERROR, + /**< Hardware deceleration fail error. */ + NSS_IPV6_CR_RETURN_EXIST_ERROR, + /**< Rule creation failed because a 5-tuple return already exists. */ + NSS_IPV6_CR_INVALID_IDENTIFIER, + /**< Invalid identifier value. */ + NSS_IPV6_CR_EMESH_SP_CONFIG_INVALID, + /**< Rule creation failed because Qos tag was not set for a Emesh SP rule. */ + NSS_IPV6_LAST + /**< Maximum number of error responses. */ +}; + +/** + * nss_ipv6_rule_create_msg + * IPv6 rule for creating sub-messages. + */ +struct nss_ipv6_rule_create_msg { + /* + * Request + */ + uint16_t valid_flags; + /**< Bit flags associated with the validity of parameters. */ + uint16_t rule_flags; + /**< Bit flags associated with the rule. */ + struct nss_ipv6_5tuple tuple; + /**< Holds values of the 5 tuple. */ + struct nss_ipv6_connection_rule conn_rule; + /**< Basic connection-specific data. */ + struct nss_ipv6_protocol_tcp_rule tcp_rule; + /**< Protocol-related accleration parameters. */ + struct nss_ipv6_pppoe_rule pppoe_rule; + /**< PPPoE-related accleration parameters. */ + struct nss_ipv6_qos_rule qos_rule; + /**< QoS-related accleration parameters. */ + struct nss_ipv6_dscp_rule dscp_rule; + /**< DSCP-related accleration parameters. */ + struct nss_ipv6_vlan_rule vlan_primary_rule; + /**< VLAN-related accleration parameters. */ + struct nss_ipv6_vlan_rule vlan_secondary_rule; + /**< VLAN-related accleration parameters. */ + struct nss_ipv6_src_mac_rule src_mac_rule; + /**< Source MAC address-related acceleration parameters. */ + struct nss_ipv6_nexthop nexthop_rule; + /**< Parameters related to the next hop. */ + struct nss_ipv6_rps_rule rps_rule; + /**< RPS parameter. */ + struct nss_ipv6_igs_rule igs_rule; + /**< Ingress shaping related accleration parameters. */ + struct nss_ipv6_identifier_rule identifier; + /**< Rule for adding identifier. */ + struct nss_ipv6_mirror_rule mirror_rule; + /**< Mirror rule parameter. */ +}; + +/** + * nss_ipv6_inquiry_msg + * IPv6 connection inquiry sub-messages. + */ +struct nss_ipv6_inquiry_msg { + /** + * Request by 5-tuple and response in other items. + */ + struct nss_ipv6_rule_create_msg rr; +}; + +/** + * nss_ipv6_mc_if_rule + * IPv6 multicast rule for creating a per-interface payload. + */ +struct nss_ipv6_mc_if_rule { + uint16_t rule_flags; + /**< Bit flags associated with the rule for this interface. */ + + /** + * Bit flags associated with the validity of parameters for this interface. + */ + uint16_t valid_flags; + + uint32_t egress_vlan_tag[MAX_VLAN_DEPTH]; + /**< VLAN tag stack for the egress packets. */ + int32_t pppoe_if_num; /**< PPPoE interface number. */ + uint32_t if_num; /**< Interface number. */ + uint32_t if_mtu; /**< MTU of the interface. */ + uint16_t if_mac[3]; /**< Interface MAC address. */ + uint8_t reserved[2]; /**< Reserved 2 bytes for alignment. */ +}; + +/** + * nss_ipv6_mc_rule_create_msg + * IPv6 multicast rule for creating sub-messages. + */ +struct nss_ipv6_mc_rule_create_msg { + struct nss_ipv6_5tuple tuple; /**< Holds values of the 5 tuple. */ + + uint32_t rule_flags; /**< Multicast command rule flags. */ + uint32_t valid_flags; /**< Multicast command validity flags. */ + uint32_t src_interface_num; + /**< Source interface number (virtual or physical). */ + uint32_t ingress_vlan_tag[MAX_VLAN_DEPTH]; + /**< VLAN tag stack for the ingress packets. */ + uint32_t qos_tag; /**< QoS tag for the flow. */ + uint16_t dest_mac[3]; /**< Destination multicast MAC address. */ + uint16_t if_count; /**< Number of destination interfaces. */ + uint8_t egress_dscp; /**< Egress DSCP value for the flow. */ + uint8_t reserved[1]; /**< Reserved 1 byte for alignment. */ + uint16_t igs_qos_tag; /**< Ingress shaping QoS tag for the flow. */ + + struct nss_ipv6_mc_if_rule if_rule[NSS_MC_IF_MAX]; + /**< Per-interface information. */ +}; + +/** + * nss_ipv6_rule_destroy_msg + * IPv6 rule for deleting sub-messages. + */ +struct nss_ipv6_rule_destroy_msg { + struct nss_ipv6_5tuple tuple; /**< Holds values of the 5 tuple. */ +}; + +/** + * nss_ipv6_rule_conn_get_table_size_msg + * IPv6 rule for fetching connection tables size. + */ +struct nss_ipv6_rule_conn_get_table_size_msg { + uint32_t num_conn; + /**< Number of supported IPv6 connections. */ + uint32_t ce_table_size; + /**< Size of the connection entry table in NSS firmware. */ + uint32_t cme_table_size; + /**< Size of the connection match entry table in NSS firmware. */ +}; + +/** + * nss_ipv6_rule_conn_cfg_msg + * IPv6 rule for connection configuration sub-messages. + */ +struct nss_ipv6_rule_conn_cfg_msg { + uint32_t num_conn; /**< Number of supported IPv6 connections. */ + uint32_t ce_mem; /**< Memory allocated by host for connection entries table. */ + uint32_t cme_mem; /**< Memory allocated by host for connection match entries table. */ +}; + +/* + * IPv6 rule synchronization reasons. + */ +#define NSS_IPV6_RULE_SYNC_REASON_STATS 0 + /**< Rule for synchronizing statistics. */ +#define NSS_IPV6_RULE_SYNC_REASON_FLUSH 1 + /**< Rule for flushing a cache entry. */ +#define NSS_IPV6_RULE_SYNC_REASON_EVICT 2 + /**< Rule for evicting a cache entry. */ +#define NSS_IPV6_RULE_SYNC_REASON_DESTROY 3 + /**< Rule for destroying a cache entry (requested by the host OS). */ + +/** + * nss_ipv6_conn_sync + * IPv6 connection synchronization message. + */ +struct nss_ipv6_conn_sync { + uint32_t reserved; /**< Reserved field for backward compatibility. */ + uint8_t protocol; /**< Protocol number. */ + uint32_t flow_ip[4]; /**< Flow IP address. */ + uint32_t flow_ident; /**< Flow identifier (e.g., port). */ + uint32_t flow_max_window; /**< Largest seen window for the flow direction. */ + + /** + * Largest seen sequence + segment length for the flow direction. + */ + uint32_t flow_end; + + uint32_t flow_max_end; + /**< Largest seen ack + max(1, win) for the flow direction. */ + uint32_t flow_rx_packet_count; /**< Rx packet count for the flow interface. */ + uint32_t flow_rx_byte_count; /**< Rx byte count for the flow interface. */ + uint32_t flow_tx_packet_count; /**< Tx packet count for the flow interface. */ + uint32_t flow_tx_byte_count; /**< Tx byte count for the flow interface. */ + uint32_t return_ip[4]; /**< Return IP address. */ + uint32_t return_ident; /**< Return identier (e.g., port). */ + uint32_t return_max_window; + /**< Largest seen window for the return direction. */ + + /** + * Largest seen sequence + segment length for the return direction. + */ + uint32_t return_end; + + uint32_t return_max_end; + /**< Largest seen ack + max(1, win) for the return direction. */ + uint32_t return_rx_packet_count; + /**< Rx packet count for the return interface. */ + uint32_t return_rx_byte_count; + /**< Rx byte count for the return interface. */ + uint32_t return_tx_packet_count; + /**< Tx packet count for the return interface. */ + uint32_t return_tx_byte_count; + /**< Tx byte count for the return interface. */ + uint32_t inc_ticks; /**< Number of ticks since the last synchronization. */ + uint32_t reason; /**< Reason for the synchronization. */ + uint8_t flags; /**< Bit flags associated with the rule. */ + uint32_t qos_tag; /**< QoS tag. */ + uint32_t cause; /**< Flush cause associated with the rule. */ +}; + +/** + * nss_ipv6_conn_sync_many_msg + * Information for a multiple IPv6 connection statistics synchronization message. + */ +struct nss_ipv6_conn_sync_many_msg { + /* Request. */ + uint16_t index; /**< Request connection statistics from the index. */ + uint16_t size; /**< Buffer size of this message. */ + + /* Response. */ + uint16_t next; /**< Firmware response for the next connection to be requested. */ + uint16_t count; /**< Number of synchronized connections included in this message. */ + + struct nss_ipv6_conn_sync conn_sync[]; /**< Array for the statistics. */ +}; + +/** + * nss_ipv6_accel_mode_cfg_msg + * IPv6 acceleration mode configuration. + */ +struct nss_ipv6_accel_mode_cfg_msg { + uint32_t mode; /**< Type of acceleration mode. */ +}; + +/** + * nss_ipv6_dscp2pri_cfg_msg + * IPv6 dscp2pri configuration msg. + */ +struct nss_ipv6_dscp2pri_cfg_msg { + uint8_t dscp; /**< Value of DSCP. */ + uint8_t priority; /**< Corresponding priority. */ +}; + +/** + * nss_ipv6_rps_hash_bitmap_cfg_msg + * RPS hash mask configuration. + * + * The bitmap represents the host cores to which NSS firmware can steer + * packets based on packet hash. The least significant bit represents core0. + */ +struct nss_ipv6_rps_hash_bitmap_cfg_msg { + uint32_t hash_bitmap; /**< Hash mask. */ +}; + +/** + * nss_ipv6_node_sync + * IPv6 node synchronization statistics. + */ +struct nss_ipv6_node_sync { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t ipv6_connection_create_requests; + /**< Number of connection create requests. */ + + /** + * Number of connection create requests that collided with the existing entries. + */ + uint32_t ipv6_connection_create_collisions; + + /** + * Number of connection create requests that had invalid interfaces. + */ + uint32_t ipv6_connection_create_invalid_interface; + + uint32_t ipv6_connection_destroy_requests; + /**< Number of connection destroy requests. */ + uint32_t ipv6_connection_destroy_misses; + /**< Number of connection destroy requests that missed the cache. */ + uint32_t ipv6_connection_hash_hits; /**< Number of connection hash hits. */ + uint32_t ipv6_connection_hash_reorders; /**< Number of connection hash reorders. */ + uint32_t ipv6_connection_flushes; /**< Number of connection flushes. */ + uint32_t ipv6_connection_evictions; /**< Number of connection evictions. */ + uint32_t ipv6_fragmentations; /**< Number of successful fragmentations. */ + uint32_t ipv6_frag_fails; /**< Number of fragmentation fails. */ + uint32_t ipv6_dropped_by_rule; /**< Number of packets dropped by a drop rule.*/ + uint32_t ipv6_mc_connection_create_requests; + /**< Number of multicast connection create requests. */ + uint32_t ipv6_mc_connection_update_requests; + /**< Number of multicast connection update requests. */ + + /** + * Number of multicast connection create requests that had invalid interfaces. + */ + uint32_t ipv6_mc_connection_create_invalid_interface; + + uint32_t ipv6_mc_connection_destroy_requests; + /**< Number of multicast connection destroy requests. */ + + /** + * Number of multicast connection destroy requests that missed the cache. + */ + uint32_t ipv6_mc_connection_destroy_misses; + + uint32_t ipv6_mc_connection_flushes; + /**< Number of multicast connection flushes. */ + + uint32_t ipv6_connection_create_invalid_mirror_ifnum; + /**< Number of create request failed with an invalid mirror interface number. */ + + uint32_t ipv6_connection_create_invalid_mirror_iftype; + /**< Number of create request failed with an invalid mirror interface type. */ + + uint32_t ipv6_mirror_failures; + /**< Mirror packet failed. */ + + uint32_t exception_events[NSS_IPV6_EXCEPTION_EVENT_MAX]; + /**< Number of exception events. */ +}; + +/** + * nss_ipv6_msg + * Data for sending and receiving IPv6 bridge or routing messages. + */ +struct nss_ipv6_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of an IPv6 bridge or routing message. + */ + union { + struct nss_ipv6_rule_create_msg rule_create; + /**< Create a rule. */ + struct nss_ipv6_rule_destroy_msg rule_destroy; + /**< Destroy a rule. */ + struct nss_ipv6_conn_sync conn_stats; + /**< Synchronize statistics. */ + struct nss_ipv6_node_sync node_stats; + /**< Synchronize node statistics. */ + struct nss_ipv6_rule_conn_get_table_size_msg size; + /**< Get the size for connection tables. */ + struct nss_ipv6_rule_conn_cfg_msg rule_conn_cfg; + /**< Configure a rule connection. */ + struct nss_ipv6_mc_rule_create_msg mc_rule_create; + /**< Create a multicast rule. */ + struct nss_ipv6_conn_sync_many_msg conn_stats_many; + /**< Synchronize multiple connection statistics. */ + struct nss_ipv6_accel_mode_cfg_msg accel_mode_cfg; + /**< Configure acceleration mode. */ + struct nss_ipv6_inquiry_msg inquiry; + /**< Inquiry if a connection has been created. */ + struct nss_ipv6_dscp2pri_cfg_msg dscp2pri_cfg; + /**< Configure DSCP-to-priority mapping. */ + struct nss_ipv6_rps_hash_bitmap_cfg_msg rps_hash_bitmap; + /**< Configure rps_hash_bitmap. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_ipv6_stats_notification + * Data for sending IPv6 statistics. + */ +struct nss_ipv6_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint64_t cmn_node_stats[NSS_STATS_NODE_MAX]; /**< Common node statistics. */ + uint64_t special_stats[NSS_IPV6_STATS_MAX]; /**< IPv6 special statistics. */ + uint64_t exception_stats[NSS_IPV6_EXCEPTION_EVENT_MAX]; /**< IPv6 exception statistics. */ +}; + +/** + * Configured IPv6 connection number to use for calculating the total number of + * connections. + */ +extern int nss_ipv6_conn_cfg; + +#ifdef __KERNEL__ + +/** + * nss_ipv6_max_conn_count + * Returns the maximum number of IPv6 connections that the NSS acceleration + * engine supports. + * + * @return + * Number of connections that can be accelerated. + */ +extern int nss_ipv6_max_conn_count(void); + +/** + * Callback function for receiving IPv6 messages. + * + * @datatypes + * nss_ipv6_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_ipv6_msg_callback_t)(void *app_data, struct nss_ipv6_msg *msg); + +/** + * nss_ipv6_tx + * Transmits an IPv6 message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_ipv6_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipv6_tx(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_msg *msg); + +/** + * nss_ipv6_tx_sync + * Transmits a synchronous IPv6 message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_ipv6_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipv6_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_msg *msg); + +/** + * nss_ipv6_tx_with_size + * Transmits an IPv6 message with a specified size to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_ipv6_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * @param[in] size Actual size of this message. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_ipv6_tx_with_size(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_msg *msg, uint32_t size); + +/** + * nss_ipv6_notify_register + * Registers a notifier callback to forward the IPv6 messages received from the NSS + * firmware to the registered subsystem. + * + * @datatypes + * nss_ipv6_msg_callback_t + * + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_ipv6_notify_register(nss_ipv6_msg_callback_t cb, void *app_data); + +/** + * nss_ipv6_notify_unregister + * Deregisters a notifier callback from the NSS. + * + * @return + * None. + * + * @dependencies + * The notifier callback must have been previously registered. + */ +extern void nss_ipv6_notify_unregister(void); + +/** + * nss_ipv6_conn_sync_many_notify_register + * Registers a notifier callback with the NSS for connection synchronization + * message responses. + * + * @datatypes + * nss_ipv6_msg_callback_t + * + * @param[in] cb Callback function for the message. + * + * @return + * None. + */ +extern void nss_ipv6_conn_sync_many_notify_register(nss_ipv6_msg_callback_t cb); + +/** + * nss_ipv6_conn_sync_many_notify_unregister + * Degisters a notifier callback from the NSS. + * + * @return + * None. + * + * @dependencies + * The notifier callback must have been previously registered. + */ +extern void nss_ipv6_conn_sync_many_notify_unregister(void); + +/** + * nss_ipv6_get_mgr + * Gets the NSS context that is managing the IPv6 processes. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_ipv6_get_mgr(void); + +/** + * nss_ipv6_msg_init + * Initializes IPv6-specific messages. + * + * @datatypes + * nss_ipv6_msg \n + * nss_ipv6_msg_callback_t + * + * @param[in,out] nim Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_ipv6_msg_init(struct nss_ipv6_msg *nim, uint16_t if_num, uint32_t type, uint32_t len, + nss_ipv6_msg_callback_t cb, void *app_data); + +/** + * nss_ipv6_register_handler + * Registers the IPv6 message handler. + * + * @return + * None. + */ +void nss_ipv6_register_handler(void); + +/** + * nss_ipv6_register_sysctl + * Registers the IPv6 system control table. + * + * @return + * None. + */ +void nss_ipv6_register_sysctl(void); + +/** + * nss_ipv6_unregister_sysctl + * Deregisters the IPv6 system control table. + * + * @return + * None. + * + * @dependencies + * The system control table must have been previously registered. + */ +void nss_ipv6_unregister_sysctl(void); + +/** + * nss_ipv6_update_conn_count + * Sets the maximum number of IPv6 connections. + * + * @param[in] ipv6_num_conn Maximum number. + * + * @return + * 0 -- Success + */ +int nss_ipv6_update_conn_count(int ipv6_num_conn); + +/** + * nss_ipv6_free_conn_tables + * Frees memory allocated for connection tables. + * + * @return + * None. + */ +void nss_ipv6_free_conn_tables(void); + +/** + * nss_ipv6_dscp_action_get + * Gets the action value of the DSCP. + * + * @param[in] dscp Value of the DSCP field. + * + * @return + * Action value of the DSCP field. + */ +enum nss_ipv6_dscp_map_actions nss_ipv6_dscp_action_get(uint8_t dscp); + +/* + * Logger APIs + */ + +/** + * nss_ipv6_log_tx_msg + * Logs an IPv6 message that is sent to the NSS firmware. + * + * @datatypes + * nss_ipv6_msg + * + * @param[in] nim Pointer to the NSS interface message. + * + * @return + * None. + */ +void nss_ipv6_log_tx_msg(struct nss_ipv6_msg *nim); + +/** + * nss_ipv6_log_rx_msg + * Logs an IPv6 message that is received from the NSS firmware. + * + * @datatypes + * nss_ipv6_msg + * + * @param[in] nim Pointer to the NSS interface message. + * + * @return + * None. + */ +void nss_ipv6_log_rx_msg(struct nss_ipv6_msg *nim); + +/** + * nss_ipv6_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_ipv6_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_ipv6_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_ipv6_stats_unregister_notifier(struct notifier_block *nb); +#endif + +/** + * @} + */ + +#endif /* __NSS_IPV6_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv6_reasm.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv6_reasm.h new file mode 100644 index 000000000..806bf0041 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ipv6_reasm.h @@ -0,0 +1,92 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ipv6_reasm.h + * NSS IPv6 reassembly interface definitions. + */ + +#ifndef __NSS_IPV6_REASM_H +#define __NSS_IPV6_REASM_H + +/** + * @addtogroup nss_ipv6_reasm_subsystem + * @{ + */ + +/** + * nss_ipv6_reasm_stats + * IPv6 reassembly node statistics. + */ +enum nss_ipv6_reasm_stats { + NSS_IPV6_REASM_STATS_ALLOC_FAILS, + /**< Number of fragment queue allocation failures. */ + NSS_IPV6_REASM_STATS_TIMEOUTS, + /**< Number of expired fragment queues. */ + NSS_IPV6_REASM_STATS_DISCARDS, + /**< Number of fragment queues discarded due to malformed fragments. */ + NSS_IPV6_REASM_STATS_MAX, + /**< Maximum message type. */ +}; + +/** + * nss_ipv6_reasm_stats_notification + * Data for sending IPv6 reassembly statistics. + */ +struct nss_ipv6_reasm_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint64_t cmn_node_stats[NSS_STATS_NODE_MAX]; /**< Common node statistics. */ + uint64_t ipv6_reasm_stats[NSS_IPV6_REASM_STATS_MAX]; /**< IPv6 reassembly statistics. */ +}; + +#ifdef __KERNEL__ + +/** + * nss_ipv6_reasm_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_ipv6_reasm_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_ipv6_reasm_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_ipv6_reasm_stats_unregister_notifier(struct notifier_block *nb); +#endif + +/** + * @} + */ + +#endif /* __NSS_IPV6_REASM_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_l2tpv2.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_l2tpv2.h new file mode 100644 index 000000000..e36c8504f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_l2tpv2.h @@ -0,0 +1,327 @@ +/* + ************************************************************************** + * Copyright (c) 2015, 2017-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_l2tpv2.h + * NSS L2TPv2 interface definitions. + */ + +#ifndef _NSS_L2TP_V2_H_ +#define _NSS_L2TP_V2_H_ + +/** + * @addtogroup nss_l2tpv2_subsystem + * @{ + */ + +/** + * Maximum number of supported L2TPv2 sessions. + */ +#define NSS_MAX_L2TPV2_DYNAMIC_INTERFACES 4 + +/** + * nss_l2tpv2_metadata_types + * Message types for L2TPv2 requests and responses. + */ +enum nss_l2tpv2_metadata_types { + NSS_L2TPV2_MSG_SESSION_CREATE, + NSS_L2TPV2_MSG_SESSION_DESTROY, + NSS_L2TPV2_MSG_SYNC_STATS, + NSS_L2TPV2_MSG_BIND_IPSEC_IF, + NSS_L2TPV2_MSG_MAX +}; + +/** + * nss_l2tpv2_stats_session + * L2TPv2 debug statistics. + */ +enum nss_l2tpv2_stats_session { + NSS_L2TPV2_STATS_SESSION_RX_PPP_LCP_PKTS, /**< Number of PPP LCP packets received. */ + NSS_L2TPV2_STATS_SESSION_RX_EXP_DATA_PKTS, /**< Number of Rx exceptioned packets. */ + NSS_L2TPV2_STATS_SESSION_ENCAP_PBUF_ALLOC_FAIL_PKTS, /**< Number of times packet buffer allocation failed during encapsulation. */ + NSS_L2TPV2_STATS_SESSION_DECAP_PBUF_ALLOC_FAIL_PKTS, /*< Number of times packet buffer allocation failed during decapsulation. */ + NSS_L2TPV2_STATS_SESSION_DECAP_L2TPOIPSEC_SRC_ERR, + /**< Number of packets dropped due to source error in L2TP over IPsec flow in decapsulation. */ + NSS_L2TPV2_STATS_SESSION_MAX /**< Maximum message type. */ +}; + +/** + * nss_l2tpv2_stats_notification + * L2TPv2 statistics structure. + */ +struct nss_l2tpv2_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ + uint64_t stats[NSS_L2TPV2_STATS_SESSION_MAX]; /**< L2TPv2 statistics. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ +/** + * nss_l2tpv2_session_create_msg + * Payload for creating an L2TPv2 session. + */ +struct nss_l2tpv2_session_create_msg { + uint16_t local_tunnel_id; /**< Local identifier for the control connection. */ + uint16_t local_session_id; /**< Local identifier of session inside a tunnel. */ + uint16_t peer_tunnel_id; /**< Remote identifier for the control connection. */ + uint16_t peer_session_id; /**< Remote identifier of session inside a tunnel. */ + + uint32_t sip; /**< Local tunnel endpoint IP address. */ + uint32_t dip; /**< Remote tunnel endpoint IP address. */ + uint32_t reorder_timeout; /**< Reorder timeout for out of order packets */ + + uint16_t sport; /**< Local source port. */ + uint16_t dport; /**< Remote source port. */ + + uint8_t recv_seq; /**< Sequence number received. */ + uint8_t oip_ttl; /**< Maximum time-to-live value for outer IP packet. */ + uint8_t udp_csum; /**< UDP checksum. */ + uint8_t reserved; /**< Alignment padding. */ +}; + +/** + * nss_l2tpv2_session_destroy_msg + * Payload for deletion an L2TPv2 session. + */ +struct nss_l2tpv2_session_destroy_msg { + uint16_t local_tunnel_id; /**< ID of the local tunnel. */ + uint16_t local_session_id; /**< ID of the local session. */ +}; + +/** + * nss_l2tpv2_bind_ipsec_if_msg + * Message for binding the IPsec interface with L2TP. + * + * Message for configuring the L2TP session with an + * IPsec inner interface number. This is used when + * L2TP tunnel is enabled with IPsec. + */ +struct nss_l2tpv2_bind_ipsec_if_msg { + uint32_t ipsec_ifnum; /**< Inner IPSec interface number. */ +}; + +/** + * nss_l2tpv2_sync_session_stats_msg + * Message information for L2TPv2 synchronization statistics. + */ +struct nss_l2tpv2_sync_session_stats_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t rx_errors; /**< Not used. Reserved for backward compatibility. */ + uint32_t rx_seq_discards; + /**< Rx packets discarded because of a sequence number check. */ + uint32_t rx_oos_packets; /**< Number of out of sequence packets received. */ + uint32_t tx_errors; /**< Not used. Reserved for backward compatibility. */ + uint32_t tx_dropped; /**< Tx packets dropped because of encapsulation failure or next node's queue is full. */ + + /** + * Debug statistics for L2tp v2. + */ + struct { + uint32_t rx_ppp_lcp_pkts; + /**< Number of PPP LCP packets received. */ + uint32_t rx_exception_data_pkts; + /**< Data packet exceptions sent to the host. */ + uint32_t encap_pbuf_alloc_fail; + /**< Buffer allocation failure during encapsulation. */ + uint32_t decap_pbuf_alloc_fail; + /**< Buffer allocation failure during decapsulation. */ + uint32_t decap_l2tpoipsec_src_error; + /**< Packets dropped due to the wrong source for the L2TPoIPsec flow. */ + } debug_stats; /**< Debug statistics object for l2tp v2. */ +}; + +/** + * nss_l2tpv2_msg + * Data for sending and receiving L2TPv2 messages. + */ +struct nss_l2tpv2_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of an L2TPv2 message. + */ + union { + struct nss_l2tpv2_session_create_msg session_create_msg; + /**< Session create message. */ + struct nss_l2tpv2_session_destroy_msg session_destroy_msg; + /**< Session delete message. */ + struct nss_l2tpv2_sync_session_stats_msg stats; + /**< Session statistics. */ + struct nss_l2tpv2_bind_ipsec_if_msg bind_ipsec_if_msg; + /**< Bind IPsec interface message. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving L2TPv2 messages. + * + * @datatypes + * nss_l2tpv2_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_l2tpv2_msg_callback_t)(void *app_data, struct nss_l2tpv2_msg *msg); + +/** + * nss_l2tpv2_tx + * Sends L2TPv2 messages to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_l2tpv2_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_l2tpv2_tx(struct nss_ctx_instance *nss_ctx, struct nss_l2tpv2_msg *msg); + +/** + * nss_l2tpv2_get_context. + * Gets the L2TPv2 context used in L2TPv2 messages sent to the NSS. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_l2tpv2_get_context(void); + +/** + * Callback function for receiving L2TPv2 tunnel data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_l2tpv2_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_register_l2tpv2_if + * Registers the L2TPv2 tunnel interface with the NSS for sending and + * receiving messages. + * + * @datatypes + * nss_l2tpv2_callback_t \n + * nss_l2tpv2_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] l2tpv2_callback Callback for the L2TP tunnel data. + * @param[in] msg_callback Callback for the L2TP tunnel message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features SKB types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_register_l2tpv2_if(uint32_t if_num, nss_l2tpv2_callback_t l2tpv2_callback, + nss_l2tpv2_msg_callback_t msg_callback, struct net_device *netdev, uint32_t features); + +/** + * nss_unregister_l2tpv2_if + * Deregisters the L2TPv2 tunnel interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The tunnel interface must have been previously registered. + */ +extern void nss_unregister_l2tpv2_if(uint32_t if_num); + +/** + * nss_l2tpv2_msg_init + * Initializes an L2TPv2 message. + * + * @datatypes + * nss_l2tpv2_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num Interface number + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_l2tpv2_msg_init(struct nss_l2tpv2_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_l2tpv2_register_handler + * Registers the L2TPv2 interface with the NSS debug statistics handler. + * + * @return + * None. + */ +extern void nss_l2tpv2_register_handler(void); + +/** + * nss_l2tpv2_session_debug_stats_get + * Gets L2TPv2 NSS session debug statistics. + * + * @param[out] stats_mem Pointer to the memory address, which must be large + * enough to hold all the statistics. + * + * @return + * None. + */ +extern void nss_l2tpv2_session_debug_stats_get(void *stats_mem); + +/** + * nss_l2tpv2_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_l2tpv2_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_l2tpv2_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_l2tpv2_stats_unregister_notifier(struct notifier_block *nb); +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* _NSS_L2TP_V2_H_ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_lag.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_lag.h new file mode 100644 index 000000000..4e7e7ea25 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_lag.h @@ -0,0 +1,211 @@ +/* + ************************************************************************** + * Copyright (c) 2014, 2015, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_lag.h + * NSS LAG APIs + */ + +/** + * @addtogroup nss_lag_subsystem + * @{ + */ + +/** + * nss_lag_metadata_types + * Types of LAG metadata. + */ +enum nss_lag_metadata_types { + NSS_TX_METADATA_LAG_STATE_CHANGE = 0, + NSS_TX_METADATA_LAG_MAX, +}; + +/** + * nss_lag_state_change_ev + * LAG change events. + */ +enum nss_lag_state_change_ev { + NSS_LAG_RELEASE = 0, + NSS_LAG_ENSLAVE = 1, +}; + +/** + * nss_lag_error_types + * LAG return values. + */ +enum nss_lag_error_types { + NSS_LAG_ERROR_EINTERFACE = 1, + NSS_LAG_ERROR_EMSG = 2, +}; + +/** + * nss_lag_state_change + * Link aggregation (LAG) state changes. + */ +struct nss_lag_state_change { + uint32_t lagid; /**< ID of the link aggregation group. */ + uint32_t interface; + /**< Physical interface on which the state change occurred. */ + enum nss_lag_state_change_ev event; /**< Type of state change event. */ +}; + +/** + * nss_lag_msg + * Data for sending and receiving LAG messages. + */ +struct nss_lag_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a LAG message. + */ + union { + struct nss_lag_state_change state; + /**< State change message. */ + } msg; /**< Message payload for LAG configuration messages exchanged with NSS core. */ +}; + +/** + * nss_lag_tx + * Sends a LAG message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_lag_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_lag_tx(struct nss_ctx_instance *nss_ctx, struct nss_lag_msg *msg); + +/** + * Callback function for receiving LAG data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] dev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_lag_callback_t)(struct net_device *dev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving a LAG message. + * + * @datatypes + * nss_lag_msg + * + * @param[in] ctx Pointer to the application context for this message. + * @param[in] nm Pointer to the message data. + */ +typedef void (*nss_lag_msg_callback_t)(void *ctx, struct nss_lag_msg *nm); + +/** + * Callback function for receiving a LAG event. + * + * @datatypes + * nss_lag_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_lag_event_callback_t)(void *app_data, struct nss_lag_msg *msg); + +/** + * nss_register_lag_if + * Registers the LAG interface with the NSS for sending and receiving data + * and messages. + * + * @datatypes + * nss_lag_callback_t \n + * nss_lag_event_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] lag_cb Callback to receive LAG data. + * @param[in] lag_ev_cb Callback to receive LAG events. + * @param[in] netdev Pointer to the associated network device. + * + * @return + * Pointer to the NSS core context. + */ +extern void *nss_register_lag_if(uint32_t if_num, + nss_lag_callback_t lag_cb, + nss_lag_event_callback_t lag_ev_cb, + struct net_device *netdev); + +/** + * nss_unregister_lag_if + * Deregisters the LAG interface from the NSS. + * + * @param[in] if_num LAG interface number + * + * @return + * None. + * + * @dependencies + * The LAG interface must have been previously registered. + */ +extern void nss_unregister_lag_if(uint32_t if_num); + +/** + * nss_lag_msg_init + * Initializes a LAG message. + * + * @datatypes + * nss_lag_msg \n + * nss_lag_msg_callback_t + * + * @param[in,out] nlm Pointer to the message. + * @param[in] lag_num LAG interface number. + * @param[in] type Type of buffer. + * @param[in] len Length of the buffer. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_lag_msg_init(struct nss_lag_msg *nlm, uint16_t lag_num, uint32_t type, uint32_t len, + nss_lag_msg_callback_t cb, void *app_data); + +/** + * nss_lag_tx_slave_state + * Sends LAG slave state. + * + * @datatypes + * nss_lag_state_change_ev + * + * @param[in] lagid LAG Group ID. + * @param[in] slave_ifnum Slave interface number. + * @param[in] slave_state Slave state. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_lag_tx_slave_state(uint16_t lagid, + int32_t slave_ifnum, + enum nss_lag_state_change_ev slave_state); + +/** + * @} + */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_lso_rx.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_lso_rx.h new file mode 100644 index 000000000..e8f4babae --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_lso_rx.h @@ -0,0 +1,88 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_lso_rx.h + * NSS driver LSO (Large Send Offload) Rx header file. + */ + +#ifndef __NSS_LSO_RX_H +#define __NSS_LSO_RX_H + +/** + * @addtogroup nss_lso_rx_subsystem + * @{ + */ + +/** + * nss_lso_rx_stats_types + * LSO Rx driver statistics. + */ +enum nss_lso_rx_stats_types { + NSS_LSO_RX_STATS_TX_DROPPED, /**< Number of packets dropped because transmit queue is full. */ + NSS_LSO_RX_STATS_DROPPED, /**< Number of packets dropped because of node internal errors. */ + NSS_LSO_RX_STATS_PBUF_ALLOC_FAIL, /**< Number of pbuf allocation failures. */ + NSS_LSO_RX_STATS_PBUF_REFERENCE_FAIL, /**< Number of pbuf reference failures. */ + NSS_LSO_RX_STATS_MAX, /**< Maximum message type. */ +}; + +/** + * nss_lso_rx_stats_notification + * Data for sending LSO Rx statistics. + */ +struct nss_lso_rx_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint64_t cmn_node_stats[NSS_STATS_NODE_MAX]; /**< Common node statistics. */ + uint64_t node_stats[NSS_LSO_RX_STATS_MAX]; /**< LSO Rx special statistics. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ +/** + * nss_lso_rx_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_lso_rx_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_lso_rx_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_lso_rx_stats_unregister_notifier(struct notifier_block *nb); +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_LSO_RX_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_map_t.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_map_t.h new file mode 100644 index 000000000..ebfd62f4f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_map_t.h @@ -0,0 +1,382 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_map_t.h + * NSS MAP-T interface definitions. + */ + +#ifndef _NSS_MAP_T_H_ +#define _NSS_MAP_T_H_ + +#ifndef DOXYGEN_SHOULD_SKIP_THIS +#include "nss_dynamic_interface.h" +#endif + +/** + * @addtogroup nss_map_t_subsystem + * @{ + */ + +/** + * Maximum number of supported MAP-T instances. + */ +#define NSS_MAX_MAP_T_DYNAMIC_INTERFACES 4 + +#define NSS_MAPT_MDATA_FLAG_DF_BIT (1 << 0) + +/* + * mapt meta data + */ +struct nss_map_t_mdata { + uint16_t flags; + uint16_t res[6]; +}; + +/** + * nss_map_t_msg_types + * Message types for MAP-T requests and responses. + */ +enum nss_map_t_msg_types { + NSS_MAP_T_MSG_INSTANCE_RULE_CONFIGURE, + NSS_MAP_T_MSG_INSTANCE_RULE_DECONFIGURE, + NSS_MAP_T_MSG_SYNC_STATS, + NSS_MAP_T_MSG_MAX +}; + +/** + * nss_map_t_stats_instance + * MAP-T debug error types. + */ +enum nss_map_t_stats_instance { + NSS_MAP_T_STATS_V4_TO_V6_PBUF_EXCEPTION, + NSS_MAP_T_STATS_V4_TO_V6_PBUF_NO_MATCHING_RULE, + NSS_MAP_T_STATS_V4_TO_V6_PBUF_NOT_TCP_OR_UDP, + NSS_MAP_T_STATS_V4_TO_V6_RULE_ERR_LOCAL_PSID, + NSS_MAP_T_STATS_V4_TO_V6_RULE_ERR_LOCAL_IPV6, + NSS_MAP_T_STATS_V4_TO_V6_RULE_ERR_REMOTE_PSID, + NSS_MAP_T_STATS_V4_TO_V6_RULE_ERR_REMOTE_EA_BITS, + NSS_MAP_T_STATS_V4_TO_V6_RULE_ERR_REMOTE_IPV6, + NSS_MAP_T_STATS_V6_TO_V4_PBUF_EXCEPTION, + NSS_MAP_T_STATS_V6_TO_V4_PBUF_NO_MATCHING_RULE, + NSS_MAP_T_STATS_V6_TO_V4_PBUF_NOT_TCP_OR_UDP, + NSS_MAP_T_STATS_V6_TO_V4_RULE_ERR_LOCAL_IPV4, + NSS_MAP_T_STATS_V6_TO_V4_RULE_ERR_REMOTE_IPV4, + NSS_MAP_T_STATS_MAX +}; + +/** + * nss_map_t_stats_notification + * MAP-T statistics structure. + */ +struct nss_map_t_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ + enum nss_dynamic_interface_type if_type; /**< Dynamic interface type. */ + uint64_t stats[NSS_MAP_T_STATS_MAX]; /**< MAP-T statistics. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ +/** + * nss_map_t_instance_rule_config_msg + * Message information for configuring a MAP-T instance. + */ +struct nss_map_t_instance_rule_config_msg { + uint32_t rule_num; /**< Rule sequence number */ + uint32_t total_rules; /**< Total number of NAT64 rules configured. */ + uint32_t local_ipv6_prefix_len; /**< Local IPv6 prefix length. */ + uint32_t local_ipv4_prefix; /**< Local IPv4 prefix. */ + uint32_t local_ipv4_prefix_len; /**< Local IPv4 prefix length. */ + uint32_t local_ea_len; /**< Local EA bits length. */ + uint32_t local_psid_offset; /**< Local PSID offset. */ + + uint32_t reserve_a; /**< Reserved for backward compatibility. */ + + uint32_t remote_ipv6_prefix_len; /**< Remote IPv6 prefix length. */ + uint32_t remote_ipv4_prefix; /**< Remote IPv4 prefix. */ + uint32_t remote_ipv4_prefix_len; /**< Remote IPv4 prefix length. */ + uint32_t remote_ea_len; /**< Remote EA bits length. */ + uint32_t remote_psid_offset; /**< Remote PSID offset. */ + + uint32_t local_map_style; /**< Local MAP style. */ + uint32_t remote_map_style; /**< Remote MAP style. */ + + uint32_t sibling_if; /**< Sibling interface number. */ + + uint8_t local_ipv6_prefix[16]; /**< Local IPv6 prefix. */ + uint8_t reserve_b[16]; /**< Reserved for backward compatibility. */ + uint8_t remote_ipv6_prefix[16]; /**< Remote IPv6 prefix. */ + + uint8_t valid_rule; /**< MAP-T rule validity. */ + uint8_t flags; /**< MAP-T flags. */ + uint8_t reserved[2]; /**< Reserved for byte alignment. */ +}; + +/** + * nss_map_t_instance_rule_deconfig_msg + * Message information for deleting a MAP-T instance. + */ +struct nss_map_t_instance_rule_deconfig_msg { + int32_t if_number; /**< Interface number. */ +}; + +/** + * nss_map_t_sync_stats_msg + * Message information for MAP-T synchronization statistics. + */ +struct nss_map_t_sync_stats_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t tx_dropped; /**< Dropped Tx packets. */ + + /** + * Debug statistics for MAP-T. + */ + union { + + /** + * IPv4 to IPv6 path debug statistics. + */ + struct { + uint32_t exception_pkts; + /**< Number of packets exceptioned to host in IPv4 to IPv6 fast path. */ + uint32_t no_matching_rule; + /**< No matching of any rule. */ + uint32_t not_tcp_or_udp; + /**< Number of packets which are neither UDP nor TCP. */ + uint32_t rule_err_local_psid; + /**< Calculate the local PSID error. */ + uint32_t rule_err_local_ipv6; + /**< Calculate local IPv6 error. */ + uint32_t rule_err_remote_psid; + /**< Calculate remote PSID error. */ + uint32_t rule_err_remote_ea_bits; + /**< Calculate remote EA bits error. */ + uint32_t rule_err_remote_ipv6; + /**< Calculate remote IPv6 error. */ + } v4_to_v6; /**< IPv4 to IPv6 debug statistics object. */ + + /** + * IPv6 to IPv4 path debug statistics. + */ + struct { + uint32_t exception_pkts; + /**< Number of packets exception to host in IPv6 to IPv4 fast path. */ + uint32_t no_matching_rule; + /**< No matching of any rule. */ + uint32_t not_tcp_or_udp; + /**< Number of packets which are neither UDP nor TCP. */ + uint32_t rule_err_local_ipv4; + /**< Calculate local IPv4 error. */ + uint32_t rule_err_remote_ipv4; + /**< Calculate remote IPv4 error. */ + } v6_to_v4; /**< IPv6 to IPv4 debug statistics object */ + + } debug_stats; /**< Payload of debug statistics. */ +}; + +/** + * nss_map_t_msg + * Data for sending and receiving MAP-T messages. + */ +struct nss_map_t_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a MAP-T message. + */ + union { + struct nss_map_t_instance_rule_config_msg create_msg; + /**< Create message. */ + struct nss_map_t_instance_rule_deconfig_msg destroy_msg; + /**< Destroy message. */ + struct nss_map_t_sync_stats_msg stats; + /**< Statistics message to host. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving MAP-T messages. + * + * @datatypes + * nss_map_t_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_map_t_msg_callback_t)(void *app_data, struct nss_map_t_msg *msg); + +/** + * nss_map_t_tx + * Sends a MAP-T message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_map_t_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_map_t_tx(struct nss_ctx_instance *nss_ctx, struct nss_map_t_msg *msg); + +/** + * nss_map_t_tx_sync + * Sends a MAP-T message synchronously to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_map_t_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_map_t_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_map_t_msg *msg); + +/** + * nss_map_t_get_context + * Gets the MAP-T context used in nss_map_t_tx. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_map_t_get_context(void); + +/** + * Callback function for receiving MAP-T tunnel data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_map_t_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_map_t_register_if + * Registers a MAP-T interface with the NSS for sending and receiving tunnel messages. + * + * @datatypes + * nss_map_t_callback_t \n + * nss_map_t_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] type NSS interface type. + * @param[in] map_t_callback Callback for the MAP-T data. + * @param[in] msg_callback Callback for the MAP-T message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_map_t_register_if(uint32_t if_num, uint32_t type, nss_map_t_callback_t map_t_callback, + nss_map_t_msg_callback_t msg_callback, struct net_device *netdev, uint32_t features); + +/** + * nss_map_t_unregister_if + * Deregisters a MAP-T tunnel interface from the NSS. + * + * @param[in] if_num NSS interface number + * + * @return + * None. + */ +extern void nss_map_t_unregister_if(uint32_t if_num); + +/** + * nss_map_t_msg_init + * Initializes a MAP-T message. + * + * @datatypes + * nss_map_t_msg_init + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num Interface number + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_map_t_msg_init(struct nss_map_t_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_map_t_register_handler + * Registers the MAP-T debug statistics handler with the NSS. + * + * @return + * None. + */ +extern void nss_map_t_register_handler(void); + +/** + * nss_map_t_instance_debug_stats_get + * Gets debug statistics for a MAP-T instance. + * + * @param[out] stats_mem Pointer to the memory address, which must be large enough to + hold all the statistics. + * + * @return + * None. + */ +extern void nss_map_t_instance_debug_stats_get(void *stats_mem); + +/** + * nss_map_t_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_map_t_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_map_t_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_map_t_stats_unregister_notifier(struct notifier_block *nb); +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* _NSS_MAP_T_H_ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_match.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_match.h new file mode 100644 index 000000000..ae1e30855 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_match.h @@ -0,0 +1,296 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_match.h + * NSS match interface definitions. + */ + +#ifndef _NSS_MATCH_H_ +#define _NSS_MATCH_H_ + + +/** + * @addtogroup nss_match_subsystem + * @{ + */ + +/** + * Maximum number of supported match instances. + */ +#define NSS_MATCH_INSTANCE_MAX 4 + +/** + * Maximum number of rules supported per instance. + */ +#define NSS_MATCH_INSTANCE_RULE_MAX 32 + +/** + * Maximum number of masksets. + */ +#define NSS_MATCH_MASK_MAX 2 + +/** + * Maximum number of words per maskset. + */ +#define NSS_MATCH_MASK_WORDS_MAX 4 + +/** + * nss_match_error_type + * Match message error types. + */ +typedef enum nss_match_error_type { + NSS_MATCH_SUCCESS, /**< No error. */ + NSS_MATCH_ERROR_UNKNOWN_MSG, /**< Message unknown. */ + NSS_MATCH_ERROR_DSCP_OUTOFBOUND, /**< DSCP out of bound. */ + NSS_MATCH_ERROR_OUTER_8021P_OUTOFBOUND, /**< Outer 802.1p out of bound. */ + NSS_MATCH_ERROR_INNER_8021P_OUTOFBOUND, /**< Inner 802.1p out of bound. */ + NSS_MATCH_ERROR_RULE_ID_OUTOFBOUND, /**< Rule ID is out of bound. */ + NSS_MATCH_ERROR_ACTION_TYPE_INVALID, /**< Invalid action type. */ + NSS_MATCH_ERROR_RULE_EXISTS, /**< Rule ID already in use. */ + NSS_MATCH_ERROR_RULE_DOES_NOT_EXIST, /**< Rule does not exist. */ + NSS_MATCH_ERROR_INSTANCE_CONFIGURED, /**< Error in instance configuration. */ + NSS_MATCH_ERROR_PROFILE_CONFIG_INVALID, /**< Invalid profile configuration message. */ + NSS_MATCH_ERROR_DB_INIT_FAILED, /**< Database initialization failed. */ + NSS_MATCH_ERROR_TABLE_ID_OUTOFBOUND, /**< Table ID is out of bound. */ + NSS_MATCH_ERROR_RULE_ADD, /**< Error in adding rule. */ + NSS_MATCH_ERROR_RULE_DELETE, /**< Error in deleting rule. */ + NSS_MATCH_ERROR_TABLE_ADD, /**< Error in adding table. */ + NSS_MATCH_ERROR_TABLE_DELETE, /**< Error in deleting table. */ + NSS_MATCH_ERROR_MASK_ID_OUTOFBOUND, /**< Mask ID is out of bound. */ + NSS_MATCH_ERROR_IFNUM_INVALID, /**< Next node interface number is invalid. */ + NSS_MATCH_ERROR_MAX, /**< Maximum error. */ +} nss_match_status_t; + +/** + * nss_match_action_type + * Bit positions for possible actions that can be taken. + */ +enum nss_match_action_type { + NSS_MATCH_ACTION_NONE, /**< No action. */ + NSS_MATCH_ACTION_SETPRIO = 1, /**< Set given priority to the packet. */ + NSS_MATCH_ACTION_FORWARD = 2, /**< Forward the packet to a given node. */ + NSS_MATCH_ACTION_DROP = 4, /**< Drop the packet. */ + NSS_MATCH_ACTION_MAX, /**< Maximum action type. */ +}; + +/** + * nss_match_profile_type + * Different profile types. + */ +enum nss_match_profile_type { + NSS_MATCH_PROFILE_TYPE_NONE, + NSS_MATCH_PROFILE_TYPE_VOW, /**< Matches on interface number/DSCP/802.1P. */ + NSS_MATCH_PROFILE_TYPE_L2, /**< Matches on interface number/destination MAC/source MAC/Ether type. */ + NSS_MATCH_PROFILE_TYPE_MAX, /**< Maximum profile type. */ +}; + +/** + * nss_match_msg_types. + * Message types for match requests and responses. + */ +enum nss_match_msg_types { + NSS_MATCH_INSTANCE_NONE, /**< Invalid message. */ + NSS_MATCH_TABLE_CONFIGURE_MSG, /**< Instance configure. */ + NSS_MATCH_ADD_VOW_RULE_MSG, /**< Insert VoW rule. */ + NSS_MATCH_ADD_L2_RULE_MSG, /**< Insert l2 rule. */ + NSS_MATCH_DELETE_VOW_RULE_MSG, /**< Delete VoW rule. */ + NSS_MATCH_DELETE_L2_RULE_MSG, /**< Delete l2 rule. */ + NSS_MATCH_STATS_SYNC, /**< Instance statistics. */ + NSS_MATCH_MSG_MAX, /**< Maximum instance messages. */ +}; + +/** + * nss_match_stats_sync + * Synchronization message structure. + */ +struct nss_match_stats_sync { + struct nss_cmn_node_stats p_stats; /**< Pnode statistics. */ + uint32_t hit_count[NSS_MATCH_INSTANCE_RULE_MAX]; /**< Exception events. */ +}; + +/** + * nss_match_profile_configure_msg + * Message for configuring the profile for a match instance. + */ +struct nss_match_profile_configure_msg { + uint32_t profile_type; /**< Profile type. */ + uint32_t valid_mask_flag; /**< Valid maskset flag. */ + uint32_t maskset[NSS_MATCH_MASK_MAX][NSS_MATCH_MASK_WORDS_MAX]; /**< Maskset. */ +}; + +/* + * nss_match_rule_action + * Message information for action. + */ +struct nss_match_rule_action { + uint32_t action_flag; /**< Action bit map. */ + uint32_t forward_ifnum; /**< Next node interface number. */ + uint16_t setprio; /**< Priority number to be set in packet. */ + uint16_t reserved; /**< Reserved 2 bytes. */ +}; + +/** + * nss_match_rule_vow_msg + * Rule message for VoW profile. + */ +struct nss_match_rule_vow_msg { + uint16_t rule_id; /**< Rule ID for the rule. */ + uint16_t mask_id; /**< Mask number used for the rule. */ + struct nss_match_rule_action action; /**< Action related with the rule. */ + uint32_t if_num; /**< Interface number. */ + uint8_t dscp; /**< DSCP. */ + uint8_t outer_8021p; /**< Outer 802.1p. */ + uint8_t inner_8021p; /**< Inner 802.1p. */ + uint8_t reserved; /**< Reserved byte. */ +}; + +/** + * nss_match_rule_l2_msg + * Rule message for L2 profile. + */ +struct nss_match_rule_l2_msg { + uint16_t rule_id; /**< Rule ID for the rule. */ + uint16_t mask_id; /**< Mask number used for the rule. */ + struct nss_match_rule_action action; /**< Action related with the rule. */ + uint32_t if_num; /**< Interface number. */ + uint16_t dmac[3]; /**< Destination MAC address. */ + uint16_t smac[3]; /**< Source MAC address. */ + uint16_t ethertype; /**< Ethernet type. */ +}; + +/** + * nss_match_msg + * Data for sending and receiving match messages. + */ +struct nss_match_msg { + struct nss_cmn_msg cm; /**< Message header. */ + + /** + * Payload of a match message. + */ + union { + struct nss_match_profile_configure_msg configure_msg; /**< Configure message. */ + struct nss_match_rule_vow_msg vow_rule; /**< Insertion or deletion message for VoW profile. */ + struct nss_match_rule_l2_msg l2_rule; /**< Insertion or deletion message for l2 profile. */ + struct nss_match_stats_sync stats; /**< Instance synchronization statistics. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_match_msg_tx_sync + * Sends proxy match messages to the NSS. + * + * Do not call this function from a softirq or interrupt because it + * might sleep if the NSS firmware is busy serving another host thread. + * + * @datatypes + * nss_ctx_instance \n + * nss_match_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_match_msg_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_match_msg *msg); + +/** + * Callback function for receiving match messages. + * + * @datatypes + * nss_match_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_match_msg_sync_callback_t)(void *app_data, struct nss_match_msg *msg); + +/** + * nss_match_get_context + * Returns NSS context of match. + * + * @datatypes + * nss_ctx_instance + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_match_get_context(void); + +/** + * nss_match_register_instance + * Registers match dynamic node to NSS. + * + * @datatypes + * nss_match_msg_sync_callback_t + * + * @param[in] if_num Interface number of match instance. + * @param[in] notify_cb Notify callback function for the message. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_match_register_instance(int if_num, nss_match_msg_sync_callback_t notify_cb); + +/** + * nss_match_unregister_instance + * Unregisters match dynamic node to NSS. + * + * @param[in] if_num Interface number of match instance. + * + * @return + * True if successful, else false. + */ +extern bool nss_match_unregister_instance(int if_num); + +/** + * nss_match_msg_init + * Initializes a match message. + * + * @datatypes + * nss_match_msg \n + * nss_match_msg_sync_callback_t + * + * @param[in,out] nmm Pointer to the message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_match_msg_init(struct nss_match_msg *nmm, uint16_t if_num, uint32_t type, uint32_t len, + nss_match_msg_sync_callback_t cb, void *app_data); + +/** + * nss_match_init + * Initializes match. + * + * @return + * None. + */ +extern void nss_match_init(void); +/** + * @} + */ + +#endif /* _NSS_MATCH_H_ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_mirror.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_mirror.h new file mode 100644 index 000000000..39853e5ce --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_mirror.h @@ -0,0 +1,317 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_mirror.h + * NSS mirror interface definitions. + */ + +#ifndef _NSS_MIRROR_H_ +#define _NSS_MIRROR_H_ + +/** + * @addtogroup nss_mirror_subsystem + * @{ + */ + +/** + * Maximum number of supported mirror interfaces. + */ +#define NSS_MAX_MIRROR_DYNAMIC_INTERFACES 8 + +/** + * nss_mirror_msg_types + * Message types for mirror interface requests and responses. + */ +enum nss_mirror_msg_types { + NSS_MIRROR_MSG_CONFIGURE, /**< Configure message type. */ + NSS_MIRROR_MSG_ENABLE, /**< Enable message type. */ + NSS_MIRROR_MSG_DISABLE, /**< Disable message type. */ + NSS_MIRROR_MSG_SET_NEXTHOP, /**< Set nexthop message type. */ + NSS_MIRROR_MSG_RESET_NEXTHOP, /**< Reset nexthop message type. */ + NSS_MIRROR_MSG_SYNC_STATS, /**< Statistics synchronization message type. */ + NSS_MIRROR_MSG_MAX /**< Maximum message type. */ +}; + +/** + * nss_mirror_pkt_clone_point + * Clone point to use for mirroring the packet. + */ +enum nss_mirror_pkt_clone_point { + NSS_MIRROR_PKT_CLONE_POINT_DEFAULT = 1, /**< Clone the packet from the start. */ + NSS_MIRROR_PKT_CLONE_POINT_BEFORE_PACKET_START, /**< Clone n-bytes before packet start. */ + NSS_MIRROR_PKT_CLONE_POINT_AFTER_PACKET_START, /**< Clone n-bytes after packet start. */ + NSS_MIRROR_PKT_CLONE_POINT_MAX +}; + +/** + * nss_mirror_error_type + * Error types for mirror responses to messages from the host. + */ +enum nss_mirror_error_type { + NSS_MIRROR_ERROR_TYPE_NONE, /**< No error. */ + NSS_MIRROR_ERROR_TYPE_NO_MEMORY, /**< No memory to copy. */ + NSS_MIRROR_ERROR_TYPE_TX_FAILURE, /**< Transmit failure. */ + NSS_MIRROR_ERROR_TYPE_BAD_PARAM, /**< Bad parameter. */ + NSS_MIRROR_ERROR_TYPE_BAD_CLONE_POINT, /**< Bad packet clone point. */ + NSS_MIRROR_ERROR_TYPE_INSTANCE_CONFIGURED, /**< Instance already active. */ + NSS_MIRROR_ERROR_TYPE_INSTANCE_DISABLED, /**< Instance already disabled. */ + NSS_MIRROR_ERROR_TYPE_BAD_NEXTHOP, /**< Incorrect nexthop interface. */ + NSS_MIRROR_ERROR_TYPE_NEXTHOP_CONFIGURED, /**< Nexthop already interface. */ + NSS_MIRROR_ERROR_TYPE_NEXTHOP_RESET, /**< Nexthop already reset. */ + NSS_MIRROR_ERROR_TYPE_UNKNOWN_MSG, /**< Unknown message. */ + NSS_MIRROR_ERROR_TYPE_MAX, /**< Maximum message type. */ +}; + +/** + * nss_mirror_stats + * Mirror interface debug statistics. + */ +enum nss_mirror_stats { + NSS_MIRROR_STATS_PKTS, /**< Number of packets exceptioned to host. */ + NSS_MIRROR_STATS_BYTES, /**< Number of bytes exceptioned to host. */ + NSS_MIRROR_STATS_TX_SEND_FAIL, /**< Transmit send failures. */ + NSS_MIRROR_STATS_DEST_LOOKUP_FAIL, /**< Destination lookup failures. */ + NSS_MIRROR_STATS_MEM_ALLOC_FAIL, /**< Memory allocation failures. */ + NSS_MIRROR_STATS_COPY_FAIL, /**< Copy failures. */ + NSS_MIRROR_STATS_MAX /**< Maximum statistics count. */ +}; + +/** + * nss_mirror_configure_msg + * Mirror interface configuration information. + */ +struct nss_mirror_configure_msg { + uint32_t pkt_clone_point; /**< Point in the packet to copy from. */ + uint16_t pkt_clone_size; /**< Number of bytes to copy. */ + uint16_t pkt_clone_offset; /**< Copy offset. */ +}; + +/** + * nss_mirror_set_nexthop_msg + * Mirror interface set nexthop information. + */ +struct nss_mirror_set_nexthop_msg { + uint32_t if_num; /**< Nexthop interface number. */ +}; + +/** + * nss_mirror_node_stats + * Mirror interface debug statistics structure. + */ +struct nss_mirror_node_stats { + uint32_t mirror_pkts; /**< Number of packets exceptioned to host. */ + uint32_t mirror_bytes; /**< Number of bytes exceptioned to host. */ + uint32_t tx_send_fail; /**< Transmit send failures. */ + uint32_t dest_lookup_fail; /**< Destination lookup failures. */ + uint32_t mem_alloc_fail; /**< Memory allocation failures. */ + uint32_t copy_fail; /**< Copy failures. */ + uint32_t bad_param; /**< Bad parameter. */ +}; + +/** + * nss_mirror_stats_sync_msg + * Message information for mirror interface synchronization statistics. + */ +struct nss_mirror_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + struct nss_mirror_node_stats mirror_stats; /**< Debug statistics for mirror. */ +}; + +/** + * nss_mirror_stats_notification + * Mirror transmission statistics structure. + */ +struct nss_mirror_stats_notification { + uint64_t stats_ctx[NSS_MIRROR_STATS_MAX]; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_mirror_msg + * Data for sending and receiving mirror interface messages. + */ +struct nss_mirror_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a mirror interface message. + */ + union { + struct nss_mirror_configure_msg config; + /**< Mirror interface configure message. */ + struct nss_mirror_set_nexthop_msg nexthop; + /**< Mirror interface set nexthop message. */ + struct nss_mirror_stats_sync_msg stats; + /**< Statistics message to host. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving mirror instance data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_mirror_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving mirror interface messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_mirror_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_mirror_get_context + * Gets the mirror interface context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_mirror_get_context(void); + +/** + * nss_mirror_tx_msg + * Sends mirror interface messages to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_mirror_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_mirror_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_mirror_msg *msg); + +/** + * nss_mirror_tx_msg_sync + * Sends a mirror interface message to the NSS synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_mirror_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_mirror_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_mirror_msg *msg); + +/** + * nss_mirror_unregister_if + * Deregisters a mirror interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + */ +extern void nss_mirror_unregister_if(uint32_t if_num); + +/** + * nss_mirror_register_if + * Registers a mirror interface with the NSS for sending and receiving messages. + * + * @datatypes + * nss_mirror_data_callback_t \n + * nss_mirror_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] data_callback Callback for the mirror interface data. + * @param[in] event_callback Callback for the mirror interface message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_mirror_register_if(uint32_t if_num, + nss_mirror_data_callback_t data_callback, + nss_mirror_msg_callback_t event_callback, + struct net_device *netdev, uint32_t features); + +/** + * nss_mirror_verify_if_num + * Verify whether the interface is an mirror interface or not. + * + * @param[in] if_num NSS interface number. + * + * @return + * True if the interface is an mirror interface. + */ +extern bool nss_mirror_verify_if_num(uint32_t if_num); + +/** + * nss_mirror_register_handler + * Registers the mirror interface debug statistics handler with the NSS. + * + * @return + * None. + */ +extern void nss_mirror_register_handler(void); + +/** + * nss_mirror_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_mirror_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_mirror_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_mirror_stats_register_notifier(struct notifier_block *nb); + +/** + * @} + */ +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_n2h.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_n2h.h new file mode 100644 index 000000000..1613f41c0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_n2h.h @@ -0,0 +1,572 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_n2h.h + * NSS to HLOS interface definitions. + */ + +#ifndef __NSS_N2H_H +#define __NSS_N2H_H + +/** + * @addtogroup nss_n2h_subsystem + * @{ + */ + +#define MAX_PAGES_PER_MSG 32 /**< Maximum number of pages per message. */ +#define NSS_N2H_RPS_PRI_DEFAULT -1 /**< Default RPS priority mapping. */ + +/** + * nss_n2h_payload_info + * Payload configuration based on the watermark. + */ +struct nss_n2h_payload_info { + uint32_t pool_size; /**< Empty buffer pool size. */ + + /** + * Low watermark. + * Set this field to 0 for the system to automatically determine the watermark. + */ + uint32_t low_water; + + /** + * High watermark. + * Set this field to 0 for the system to automatically determine the watermark. + */ + uint32_t high_water; +}; + +#ifdef __KERNEL__ /* only kernel will use. */ + +/** + * nss_n2h_cfg_pvt + * N2H private data configuration. + */ +struct nss_n2h_cfg_pvt { + struct semaphore sem; /**< Semaphore for SMP synchronization. */ + struct completion complete; /**< Waits for the NSS to process the message. */ + struct nss_n2h_payload_info empty_buf_pool_info; /**< Empty buffer pool information. */ + struct nss_n2h_payload_info empty_paged_buf_pool_info; /**< Paged buffer pool information. */ + int wifi_pool; /**< Size of the empty Wi-Fi buffer pool. */ + int response; /**< Response from the firmware. */ +}; + +#endif /*__KERNEL__ */ + +/** + * nss_n2h_stats_types + * N2H node statistics. + */ +enum nss_n2h_stats_types { + NSS_N2H_STATS_QUEUE_DROPPED = NSS_STATS_NODE_MAX, + /**< Number of packets dropped because the exception queue is too full. */ + NSS_N2H_STATS_TOTAL_TICKS, /**< Total clock ticks spend inside the N2H. */ + NSS_N2H_STATS_WORST_CASE_TICKS, /**< Worst case iteration of the exception path in ticks. */ + NSS_N2H_STATS_ITERATIONS, /**< Number of iterations around the N2H. */ + NSS_N2H_STATS_PBUF_OCM_TOTAL_COUNT, /**< Number of pbuf OCM total count. */ + NSS_N2H_STATS_PBUF_OCM_FREE_COUNT, /**< Number of pbuf OCM free count. */ + NSS_N2H_STATS_PBUF_OCM_ALLOC_FAILS_WITH_PAYLOAD, + /**< Number of pbuf OCM allocations that have failed with payload. */ + NSS_N2H_STATS_PBUF_OCM_ALLOC_FAILS_NO_PAYLOAD, + /**< Number of pbuf OCM allocations that have failed without payload. */ + NSS_N2H_STATS_PBUF_DEFAULT_TOTAL_COUNT, /**< Number of pbuf default total count. */ + NSS_N2H_STATS_PBUF_DEFAULT_FREE_COUNT, /**< Number of pbuf default free count. */ + NSS_N2H_STATS_PBUF_DEFAULT_ALLOC_FAILS_WITH_PAYLOAD, + /**< Number of pbuf default allocations that have failed with payload. */ + NSS_N2H_STATS_PBUF_DEFAULT_ALLOC_FAILS_NO_PAYLOAD, + /**< Number of pbuf default allocations that have failed without payload. */ + + NSS_N2H_STATS_PAYLOAD_ALLOC_FAILS, /**< Number of pbuf allocations that have failed because there were no free payloads. */ + NSS_N2H_STATS_PAYLOAD_FREE_COUNT, /**< Number of free payloads that exist. */ + + NSS_N2H_STATS_H2N_CONTROL_PACKETS, /**< Control packets received from HLOS. */ + NSS_N2H_STATS_H2N_CONTROL_BYTES, /**< Control bytes received from HLOS. */ + NSS_N2H_STATS_N2H_CONTROL_PACKETS, /**< Control packets sent to HLOS. */ + NSS_N2H_STATS_N2H_CONTROL_BYTES, /**< Control bytes sent to HLOS. */ + + NSS_N2H_STATS_H2N_DATA_PACKETS, /**< Data packets received from HLOS. */ + NSS_N2H_STATS_H2N_DATA_BYTES, /**< Data bytes received from HLOS. */ + NSS_N2H_STATS_N2H_DATA_PACKETS, /**< Data packets sent to HLOS. */ + NSS_N2H_STATS_N2H_DATA_BYTES, /**< Data bytes sent to HLOS. */ + NSS_N2H_STATS_N2H_TOT_PAYLOADS, /**< Number of payloads in NSS. */ + NSS_N2H_STATS_N2H_INTERFACE_INVALID, /**< Number of bad interface access. */ + NSS_N2H_STATS_ENQUEUE_RETRIES, /**< Number of enqueue retries by N2H. */ + + NSS_N2H_STATS_MAX, /**< Maximum message type. */ +}; + +/** + * nss_n2h_metadata_types + * Message types for N2H requests and responses. + */ +enum nss_n2h_metadata_types { + NSS_RX_METADATA_TYPE_N2H_STATS_SYNC = 0, + NSS_TX_METADATA_TYPE_N2H_RPS_CFG, + NSS_TX_METADATA_TYPE_N2H_EMPTY_POOL_BUF_CFG, + NSS_TX_METADATA_TYPE_N2H_FLUSH_PAYLOADS, + NSS_TX_METADATA_TYPE_N2H_MITIGATION_CFG, + NSS_METADATA_TYPE_N2H_ADD_BUF_POOL, + NSS_TX_METADATA_TYPE_SET_WATER_MARK, + NSS_TX_METADATA_TYPE_GET_WATER_MARK, + NSS_TX_METADATA_TYPE_N2H_WIFI_POOL_BUF_CFG, + NSS_TX_DDR_INFO_VIA_N2H_CFG, + NSS_TX_METADATA_TYPE_N2H_SET_PNODE_QUEUE_CFG, + NSS_TX_METADATA_TYPE_N2H_EMPTY_PAGED_POOL_BUF_CFG, + NSS_TX_METADATA_TYPE_SET_PAGED_WATER_MARK, + NSS_TX_METADATA_TYPE_GET_PAGED_WATER_MARK, + NSS_TX_METADATA_TYPE_N2H_RPS_PRI_MAP_CFG, + NSS_TX_METADATA_TYPE_N2H_QUEUE_LIMIT_CFG, + NSS_TX_METADATA_TYPE_N2H_PAGED_BUFFER_POOL_INIT, + NSS_TX_METADATA_TYPE_N2H_HOST_BACK_PRESSURE_CFG, + NSS_METADATA_TYPE_N2H_MAX, +}; + +/* + * nss_n2h_error_types + * N2H error types. + */ +enum nss_n2h_error_types { + N2H_EUNKNOWN = 1, + N2H_ALREADY_CFG, + N2H_LOW_WATER_MIN_INVALID, + N2H_HIGH_WATER_LESS_THAN_LOW, + N2H_HIGH_WATER_LIMIT_INVALID, + N2H_LOW_WATER_LIMIT_INVALID, + N2H_WATER_MARK_INVALID, + N2H_EMPTY_BUFFER_TOO_HIGH, + N2H_EMPTY_BUFFER_TOO_LOW, + N2H_MMU_ENTRY_IS_INVALID, + N2H_PN_QUEUE_SET_FAILED, + N2H_PAGES_PER_MSG_EXCEEDED, + N2H_RPS_PRI_MAP_TOO_HIGH, +}; + +/** + * nss_n2h_stats_notification + * N2H statistics structure. + */ +struct nss_n2h_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint64_t n2h_stats[NSS_N2H_STATS_MAX]; /**< N2H statistics. */ + uint64_t drv_stats[NSS_STATS_DRV_MAX]; /**< Driver statistics. */ +}; + +/** + * nss_n2h_rps + * N2H RPS configuration. + */ +struct nss_n2h_rps { + uint32_t enable; /**< Enable RPS. */ +}; + +/** + * nss_n2h_rps_pri_map + * N2H priority configuration. + * + * This is used to direct packets with a given priority to a specific host CPU. + * A value of -1 in pri_map[pri] is treated as invalid and will not override + * RPS for that priority. + */ +struct nss_n2h_rps_pri_map { + int32_t pri_map[NSS_MAX_NUM_PRI]; /**< Priority to RPS map. */ +}; + +/** + * nss_n2h_mitigation + * N2H mitigation configuration. + */ +struct nss_n2h_mitigation { + uint32_t enable; /**< Enable NSS mitigation. */ +}; + +/** + * nss_n2h_buf_pool + * N2H buffer pool configuration. + */ +struct nss_n2h_buf_pool { + uint32_t nss_buf_page_size; /**< Size of the buffer page. */ + uint32_t nss_buf_num_pages; /**< Number of buffer pages. */ + + uint32_t nss_buf_pool_addr[MAX_PAGES_PER_MSG]; + /**< Buffer addresses. */ + nss_ptr_t nss_buf_pool_vaddr[MAX_PAGES_PER_MSG]; + /**< Virtual addresses of the buffers. */ +#ifndef __LP64__ + uint32_t padding[MAX_PAGES_PER_MSG]; + /**< Pad to fit 64 bits, do not reuse. */ +#endif +}; + +/** + * nss_n2h_pnode_queue_config + * Queue configuration command for pnodes in NSS. + */ +struct nss_n2h_pnode_queue_config { + uint8_t mq_en; /**< Enable multiple queues. */ + uint8_t reserved[3]; /**< Reserved for alignment. */ + uint16_t qlimits[NSS_MAX_NUM_PRI]; + /**< Limits of each queue. */ +#if (NSS_MAX_NUM_PRI & 1) + uint16_t reserved2; +#endif +}; + +/** + * nss_n2h_empty_pool_buf + * Old way of setting the number of empty pool buffers (payloads). + * + * The NSS firmware sets the low watermark to n -- ring_size, and the high + * watermark to n + ring_size. + */ +struct nss_n2h_empty_pool_buf { + uint32_t pool_size; /**< Size of the empty buffer pool. */ +}; + +/** + * nss_n2h_water_mark + * New way of setting the low and high watermarks in the NSS firmware. + */ +struct nss_n2h_water_mark { + /** + * Low watermark. + * Lower threshold for the number of payloads that can be held by NSS firmware. + * Setting this value to 0 gets the system to automatically determine the watermark. + */ + uint32_t low_water; + + /** + * High watermark. + * Upper threshold for the number of paylods that be held by the NSS firmware. + * Setting this value to 0 gets the system to automatically determine the watermark. + */ + uint32_t high_water; +}; + +/** + * nss_n2h_flush_payloads + * Flush payload configuration. + */ +struct nss_n2h_flush_payloads { + uint32_t reserved; /**< Reserved for future use. */ +}; + +/** + * nss_n2h_wifi_payloads + * Payloads required for Wi-Fi offloading. + */ +struct nss_n2h_wifi_payloads { + uint32_t payloads; /**< Number of payloads for Wi-Fi use. */ +}; + +/** + * nss_n2h_pbuf_mgr_stats + * Payload buffer manager statistics. + */ +struct nss_n2h_pbuf_mgr_stats { + uint32_t pbuf_total_count; /**< Total number of buffers, free or in use. */ + uint32_t pbuf_free_count; /**< Number of currently free buffers. */ + uint32_t pbuf_alloc_fails_with_payload; + /**< Number of buffer allocation failures. */ + uint32_t pbuf_alloc_fails_no_payload; + /**< Number of buffer allocation failures without payload. */ +}; + +/** + * nss_n2h_paged_buffer_pool_init + * Paged buffer configuration initialization. + */ +struct nss_n2h_paged_buffer_pool_init { + uint32_t reserved; /**< Reserved for future use. */ +}; + +/** + * nss_n2h_stats_sync + * N2H synchronization statistics. + */ +struct nss_n2h_stats_sync { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t queue_dropped; + /**< Number of packets dropped because the N2H queue is too full. */ + uint32_t total_ticks; /**< Total clock ticks spent inside the N2H handler. */ + uint32_t worst_case_ticks; /**< Worst case iteration of the N2H handler in ticks. */ + uint32_t iterations; /**< Number of iterations around the N2H handler. */ + + struct nss_n2h_pbuf_mgr_stats pbuf_ocm_stats; + /**< Statistics for on-chip memory payload buffers. */ + struct nss_n2h_pbuf_mgr_stats pbuf_default_stats; + /**< Statistics for DDR memory payload buffers. */ + + uint32_t payload_alloc_fails; /**< Number of payload allocation failures. */ + uint32_t payload_free_count; /**< Number of free payloads. */ + + uint32_t h2n_ctrl_pkts; /**< Control packets received from the HLOS. */ + uint32_t h2n_ctrl_bytes; /**< Control bytes received from the HLOS. */ + uint32_t n2h_ctrl_pkts; /**< Control packets sent to the HLOS. */ + uint32_t n2h_ctrl_bytes; /**< Control bytes sent to the HLOS. */ + + uint32_t h2n_data_pkts; /**< Data packets received from the HLOS. */ + uint32_t h2n_data_bytes; /**< Data bytes received from the HLOS. */ + uint32_t n2h_data_pkts; /**< Data packets sent to the HLOS. */ + uint32_t n2h_data_bytes; /**< Data bytes sent to the HLOS. */ + uint32_t tot_payloads; /**< Total number of payloads in the NSS firmware. */ + + /** + * Number of data packets with invalid interface received from the host. + */ + uint32_t data_interface_invalid; + uint32_t enqueue_retries; /**< Number of times N2H retried enqueue to next node. */ +}; + +/** + * nss_mmu_ddr_info + * System DDR memory information required by the firmware MMU to set range guards. + */ +struct nss_mmu_ddr_info { + uint32_t ddr_size; /**< Total size of the DDR. */ + uint32_t start_address; /**< System start address. */ + uint32_t num_active_cores; + /**< Number of active cores. */ + uint32_t nss_ddr_size; /**< Total memory for NSS SoC. */ +}; + +/** + * nss_n2h_queue_limit_config + * Queue length limit for N2H node. + */ +struct nss_n2h_queue_limit_config { + uint32_t qlimit; /**< Queue length size. */ +}; + +/** + * nss_n2h_host_back_pressure + * Host back pressure configuration. + */ +struct nss_n2h_host_back_pressure { + uint32_t enable; /**< Enable host back pressure. */ +}; + +/** + * nss_n2h_msg + * Data for sending and receiving N2H messages. + */ +struct nss_n2h_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of an N2H message. + */ + union { + struct nss_n2h_stats_sync stats_sync; + /**< N2H statistics synchronization. */ + struct nss_n2h_rps rps_cfg; + /**< RPS configuration. */ + struct nss_n2h_rps_pri_map rps_pri_map; + /**< RPS priority map. */ + struct nss_n2h_empty_pool_buf empty_pool_buf_cfg; + /**< Empty pool buffer configuration. */ + struct nss_n2h_empty_pool_buf empty_paged_pool_buf_cfg; + /**< Empty paged pool buffer configuration. */ + struct nss_n2h_flush_payloads flush_payloads; + /**< Flush payloads present in the NSS. */ + struct nss_n2h_mitigation mitigation_cfg; + /**< Mitigation configuration. */ + struct nss_n2h_buf_pool buf_pool; + /**< Pool buffer coniguration. */ + struct nss_n2h_water_mark wm; + /**< Sets low and high watermarks. */ + struct nss_n2h_water_mark wm_paged; + /**< Sets low and high watermarks for paged pool. */ + struct nss_n2h_payload_info payload_info; + /**< Gets the payload information. */ + struct nss_n2h_payload_info paged_payload_info; + /**< Gets the paged payload information. */ + struct nss_n2h_wifi_payloads wp; + /**< Sets the number of Wi-Fi payloads. */ + struct nss_mmu_ddr_info mmu; + /**< Gets the DDR size and start address to configure the MMU. */ + struct nss_n2h_pnode_queue_config pn_q_cfg; + /**< Pnode queueing configuration. */ + struct nss_n2h_queue_limit_config ql_cfg; + /**< Queue limit configuration. */ + struct nss_n2h_paged_buffer_pool_init paged_buffer_pool_init; + /**< Paged buffer pool initialization. */ + struct nss_n2h_host_back_pressure host_bp_cfg; + /**< Host back pressure configuration. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving N2H messages. + * + * @datatypes + * nss_n2h_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the N2H message. + */ +typedef void (*nss_n2h_msg_callback_t)(void *app_data, struct nss_n2h_msg *msg); + +/** + * nss_n2h_tx_msg + * Sends messages to the N2H package. + * + * @datatypes + * nss_ctx_instance \n + * nss_n2h_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] nnm Pointer to the N2H message. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_n2h_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_n2h_msg *nnm); + +/** + * nss_n2h_single_core_register_sysctl + * Registers the N2H sysctl entry to the sysctl tree for a single-core system. + * + * @return + * None. + */ +extern void nss_n2h_single_core_register_sysctl(void); + +/** + * nss_n2h_multi_core_register_sysctl + * Registers the N2H sysctl entry to the sysctl tree for a multi-core system. + * + * @return + * None. + */ +extern void nss_n2h_multi_core_register_sysctl(void); + +/** + * nss_n2h_unregister_sysctl + * Deregisters the N2H sysctl entry from the sysctl tree. + * + * @return + * None. + * + * @dependencies + * The system control must have been previously registered. + */ +extern void nss_n2h_unregister_sysctl(void); + +/** + * nss_n2h_flush_payloads + * Sends flush payloads message to NSS + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_n2h_flush_payloads(struct nss_ctx_instance *nss_ctx); + +/** + * nss_n2h_msg_init + * Initializes messages from the host to the NSS. + * + * @datatypes + * nss_n2h_msg \n + * nss_n2h_msg_callback_t + * + * @param[in,out] nim Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_n2h_msg_init(struct nss_n2h_msg *nim, uint16_t if_num, uint32_t type, uint32_t len, + nss_n2h_msg_callback_t cb, void *app_data); + +/** + * nss_n2h_update_queue_config_sync + * Synchrounous method to update pnode queue configuration to NSS. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] mq_en Enable multiple pnode queues. + * @param[in] qlimits Maximum number of packets in each queues. + * + * @return + * Status of the configuration update operation. + */ +extern nss_tx_status_t nss_n2h_update_queue_config_sync(struct nss_ctx_instance *nss_ctx, bool mq_en, uint16_t *qlimits); + +/** + * nss_n2h_update_queue_config_async + * Asynchrounous method to update pnode queue configuration to NSS. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] mq_en Enable multiple pnode queues. + * @param[in] qlimits Maximum number of packets in each queues. + * + * @return + * Status of the configuration update operation. + */ +extern nss_tx_status_t nss_n2h_update_queue_config_async(struct nss_ctx_instance *nss_ctx, bool mq_en, uint16_t *qlimits); + +#ifdef __KERNEL__ /* only kernel will use. */ + +/** + * nss_n2h_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_n2h_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_n2h_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_n2h_stats_unregister_notifier(struct notifier_block *nb); + +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_N2H_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_oam.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_oam.h new file mode 100644 index 000000000..af25e3703 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_oam.h @@ -0,0 +1,145 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_oam.h + * NSS OAM - Operations, Administration and Maintenance Service + */ + +#ifndef __NSS_OAM_H +#define __NSS_OAM_H + +/** + * @addtogroup nss_oam_subsystem + * @{ + */ + +#define NSS_OAM_FW_VERSION_LEN 132 /**< Size of the OAM firmware version. */ + +/** + * nss_oam_msg_types + * OAM command types. + * + * The OAM proxy sends these command messages to the NSS OAM server via the OAM adapter. + */ +enum nss_oam_msg_types { + NSS_OAM_MSG_TYPE_NONE, + NSS_OAM_MSG_TYPE_GET_FW_VER, + NSS_OAM_MSG_TYPE_MAX, +}; + +/** + * nss_oam_error + * OAM error responses. + */ +enum nss_oam_error { + NSS_OAM_ERROR_NONE, + NSS_OAM_ERROR_INVAL_MSG_TYPE, + NSS_OAM_ERROR_INVAL_MSG_LEN, + NSS_OAM_ERROR_MAX, +}; + +/** + * nss_oam_fw_ver + * OAM firmware version. + */ +struct nss_oam_fw_ver { + uint8_t string[NSS_OAM_FW_VERSION_LEN]; /**< OAM firmware version. */ +}; + +/** + * nss_oam_msg + * Data for sending and receiving OAM messages. + */ +struct nss_oam_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of an OAM message. + */ + union { + struct nss_oam_fw_ver fw_ver; + /**< Firmware version. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving OAM messages. + * + * @datatypes + * nss_oam_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_oam_msg_callback_t)(void *app_data, struct nss_oam_msg *msg); + +/** + * nss_oam_tx_msg + * Transmits an OAM message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_oam_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation + */ +extern nss_tx_status_t nss_oam_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_oam_msg *msg); + +/** + * nss_oam_notify_register + * Registers a notifier callback with the NSS for sending and receiving OAM messages. + * + * @datatypes + * nss_oam_msg_callback_t + * + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_oam_notify_register(nss_oam_msg_callback_t cb, void *app_data); + +/** + * nss_oam_notify_unregister + * Deregisters an OAM message notifier callback from the NSS. + * + * @return + * None. + */ +extern void nss_oam_notify_unregister(void); + +/** + * nss_register_oam_if + * Registers the OAM interface handler with the NSS. + * + * @param[in] if_number Interface number of the OAM interface. + * + * @return + * Boolean status of handler registration + */ +extern bool nss_register_oam_if(uint16_t if_number); + +/** + * @} + */ + +#endif /* __NSS_OAM_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_phy_if.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_phy_if.h new file mode 100644 index 000000000..cc52d86f0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_phy_if.h @@ -0,0 +1,67 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_phy_if.h.h + * NSS physical interface definitions. + */ + +#ifndef __NSS_PHY_IF_H +#define __NSS_PHY_IF_H + +/** + * @addtogroup nss_driver_subsystem + * @{ + */ + +/** + * nss_phys_if_reset_nexthop + * De-configure nexthop for an interface. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num Network physical interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_phys_if_reset_nexthop(struct nss_ctx_instance *nss_ctx, uint32_t if_num); + +/** + * nss_phys_if_set_nexthop + * Configure nexthop for an interface. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num Network physical interface number. + * @param[in] nexthop Network physical or virtual interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_phys_if_set_nexthop(struct nss_ctx_instance *nss_ctx, uint32_t if_num, uint32_t nexthop); + +/** + * @} + */ + +#endif /* __NSS_PHY_IF_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_pm.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_pm.h new file mode 100644 index 000000000..8e0cce88d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_pm.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * @file nss_pm.h + * NSS power management definitions. + */ + +#ifndef __NSS_PM_H +#define __NSS_PM_H + +/** + * @addtogroup nss_pm_subsystem + * @{ + */ + +/** + * nss_pm_client + * Power management (PM) clients. + * + * These clients can query for bus or clock performance levels. + */ +typedef enum nss_pm_client { + NSS_PM_CLIENT_GMAC, + NSS_PM_CLIENT_CRYPTO, + NSS_PM_CLIENT_NETAP, + NSS_PM_MAX_CLIENTS, +} nss_pm_client_t; + +/** + * nss_pm_perf_level + * Performance levels. + * + * This enumeration is passed as a parameter to NSS PM performance-level + * requests. + */ +typedef enum nss_pm_perf_level { + NSS_PM_PERF_LEVEL_SUSPEND = 0, + NSS_PM_PERF_LEVEL_IDLE, + NSS_PM_PERF_LEVEL_NOMINAL, + NSS_PM_PERF_LEVEL_TURBO, + NSS_PM_PERF_MAX_LEVELS, +} nss_pm_perf_level_t; + +/** + * nss_pm_interface_status_t + * Status of the PM client interface. + */ +typedef enum { + NSS_PM_API_SUCCESS = 0, + NSS_PM_API_FAILED, +} nss_pm_interface_status_t; + +/** + * nss_pm_client_register + * Registers a power management driver client. + * + * @datatypes + * nss_pm_client_t + * + * @param[in] client_id ID of the client driver. + * + * @return + * None. + */ +extern void *nss_pm_client_register(nss_pm_client_t client_id); + +/** + * nss_pm_client_unregister + * Deregisters a power management driver client. + * + * @datatypes + * nss_pm_client_t + * + * @param[in] client_id ID of the client driver. + * + * @return + * None. + */ +int nss_pm_client_unregister(nss_pm_client_t client_id); + +/** + * nss_pm_set_perf_level + * Updates the bus bandwidth level for a client. + * + * @datatypes + * nss_pm_perf_level_t + * + * @param[in,out] handle Handle of the client. + * @param[in,out] lvl Performance level. + * + * @return + * None. + */ +extern nss_pm_interface_status_t nss_pm_set_perf_level(void *handle, nss_pm_perf_level_t lvl); + +/** + * @} + */ + +#endif /* __NSS_PM_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_portid.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_portid.h new file mode 100644 index 000000000..61e8ac6d8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_portid.h @@ -0,0 +1,284 @@ +/* + ************************************************************************** + * Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_portid.h + * NSS Port ID interface definitions. + */ + +#ifndef __NSS_PORTID_H +#define __NSS_PORTID_H + +/** + * @addtogroup nss_portid_subsystem + * @{ + */ + +/** + * Maximum number of ports on the S17c switch chip. + * + * If a new switch has more ports than S17c, this value must be updated. + */ +#define NSS_PORTID_MAX_SWITCH_PORT 7 + +/** + * nss_portid_msg_types + * Message types for port ID requests and responses. + */ +enum nss_portid_msg_types { + NSS_PORTID_CONFIGURE_MSG, + NSS_PORTID_UNCONFIGURE_MSG, + NSS_PORTID_STATS_SYNC_MSG, + NSS_PORTID_MAX_MSG_TYPE +}; + +/** + * nss_portid_configure_msg + * Message information for configuring a port ID. + */ +struct nss_portid_configure_msg { + uint32_t port_if_num; + /**< Interface number corresponding to the port ID of the device. */ + uint8_t port_id; /**< ID of the mapped switch port. */ + uint8_t gmac_id; /**< ID of the mapped GMAC interface. */ + uint8_t reserved[2]; /**< Reserved for word alignment. */ +}; + +/** + * nss_portid_unconfigure_msg + * Message information for deleting a port ID. + */ +struct nss_portid_unconfigure_msg { + uint32_t port_if_num; + /**< Interface number corresponding to the port ID of the device. */ + uint8_t port_id; /**< ID of the mapped switch port. */ + uint8_t reserved[3]; /**< Reserved for word alignment. */ +}; + +/** + * nss_portid_stats_sync_msg + * Message information for port ID synchronization statistics. + */ +struct nss_portid_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t rx_invalid_header; /**< Rx with an invalid header. */ + uint8_t port_id; /**< ID of the mapped switch port. */ + uint8_t reserved[3]; /**< Reserved for word alignment. */ +}; + +/** + * nss_portid_msg + * Data for sending and receiving port ID messages. + */ +struct nss_portid_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a port ID message. + */ + union { + struct nss_portid_configure_msg configure; + /**< Port ID configuration information. */ + struct nss_portid_unconfigure_msg unconfigure; + /**< Port ID de-configuration information. */ + struct nss_portid_stats_sync_msg stats_sync; + /**< Synchronization statistics for the port ID. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving port ID messages. + * + * @datatypes + * nss_portid_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] npm Pointer to the NSS Profiler message. + */ +typedef void (*nss_portid_msg_callback_t)(void *app_data, struct nss_portid_msg *npm); + +/** + * Callback function for receiving port ID interface data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_portid_buf_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_portid_get_stats + * Gets statistics from a port interface. + * + * @datatypes + * rtnl_link_stats64 + * + * @param[in] if_num NSS interface number. + * @param[out] stats Container for the statistic counters. + * + * @return + * TRUE or FALSE. + */ +bool nss_portid_get_stats(uint32_t if_num, struct rtnl_link_stats64 *stats); + +/** + * nss_portid_msg_init + * Initializes a port ID message. + * + * @datatypes + * nss_portid_msg \n + * nss_portid_msg_callback_t + * + * @param[in,out] npm Pointer to the NSS port ID message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_portid_msg_init(struct nss_portid_msg *npm, uint16_t if_num, uint32_t type, uint32_t len, + nss_portid_msg_callback_t cb, void *app_data); + +/** + * nss_portid_if_tx_data + * Transmits a data packet to the NSS port ID interface. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] os_buf Pointer to the OS data buffer. + * @param[in] if_num NSS interface number. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_portid_if_tx_data(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num); + +/** + * nss_portid_tx_msg + * Sends a port ID message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_portid_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_portid_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_portid_msg *msg); + +/** + * nss_portid_tx_msg_sync + * Sends a port ID message to the NSS and waits for a response. + * + * @datatypes + * nss_ctx_instance \n + * nss_portid_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in,out] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_portid_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_portid_msg *msg); + +/** + * nss_portid_register_port_if + * Registers the port interface with the NSS. + * + * @datatypes + * net_device \n + * nss_portid_buf_callback_t + * + * @param[in] if_num NSS interface number. + * @param[in] port_id Physical port ID of this interface. + * @param[in] ndev Pointer to the associated network device. + * @param[in] buf_cb Callback for the data. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_portid_register_port_if(uint32_t if_num, uint32_t port_id, struct net_device *ndev, nss_portid_buf_callback_t buf_cb); + +/** + * nss_portid_unregister_port_if + * Deregisters the port interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * TRUE or FALSE. + * + * @dependencies + * The interface must have been previously registered. + */ +extern bool nss_portid_unregister_port_if(uint32_t if_num); + +/** + * nss_portid_tx_configure_port_if_msg + * Sends a port interface configuration message to the NSS. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] port_if_num Interface number of the port node. + * @param[in] port_id ID of the mapped switch port. + * @param[in] gmac_id ID of the mapped GMAC interface. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_portid_tx_configure_port_if_msg(struct nss_ctx_instance *nss_ctx, uint32_t port_if_num, uint8_t port_id, uint8_t gmac_id); + +/** + * nss_portid_tx_unconfigure_port_if_msg + * Sends a port interface de-configuration message to the NSS. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] port_if_num Interface number of the port node. + * @param[in] port_id ID of the mapped switch port. + * + * @return + * Status of the Tx operation. + * + * @dependencies + * The interface must have been previously configured. + */ +extern nss_tx_status_t nss_portid_tx_unconfigure_port_if_msg(struct nss_ctx_instance *nss_ctx, uint32_t port_if_num, uint8_t port_id); + +/** + * @} + */ + +#endif /* __NSS_PORTID_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_ppe.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ppe.h new file mode 100644 index 000000000..648bb6638 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ppe.h @@ -0,0 +1,91 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2018, 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ppe.h + * NSS PPE interface definitions. + */ + +#ifndef _NSS_PPE_H_ +#define _NSS_PPE_H_ + +typedef int32_t nss_ppe_port_t; + +/** + * @addtogroup nss_ppe_subsystem + * @{ + */ + +/* + * NSS PORT defines + */ +#define NSS_PPE_NUM_PHY_PORTS_MAX 8 + /**< Maximum number of PPE physical ports. */ +#define NSS_PPE_PORT_IPSEC 7 + /**< Port number of PPE inline IPsec port. */ + +/** + * nss_ppe_sc_type + * PPE service code types. + */ +enum nss_ppe_sc_type { + NSS_PPE_SC_NONE, /**< Normal PPE processing. */ + NSS_PPE_SC_BYPASS_ALL, /**< Bypasses all stages in PPE. */ + NSS_PPE_SC_ADV_QOS_BRIDGED, /**< Advance QoS redirection for bridged flow. */ + NSS_PPE_SC_BR_QOS, /**< Bridge QoS redirection. */ + NSS_PPE_SC_BNC_0, /**< QoS bounce. */ + NSS_PPE_SC_BNC_CMPL_0, /**< QoS bounce complete. */ + NSS_PPE_SC_ADV_QOS_ROUTED, /**< Advance QoS redirection for routed flow. */ + NSS_PPE_SC_IPSEC_PPE2EIP, /**< Inline IPsec redirection from PPE-to-EIP. */ + NSS_PPE_SC_IPSEC_EIP2PPE, /**< Inline IPsec redirection from EIP-to-PPE. */ + NSS_PPE_SC_PTP, /**< Service code for PTP packets. */ + NSS_PPE_SC_VLAN_FILTER_BYPASS, /**< VLAN filter bypass for bridge flows between 2 different VSIs. */ + NSS_PPE_SC_L3_EXCEPT, /**< Indicate exception post tunnel/TAP operation. */ + NSS_PPE_SC_SPF_BYPASS, /**< Source port filtering bypass in PPE. */ + NSS_PPE_SC_MAX, /**< Maximum service code. */ +}; + +/** + * nss_ppe_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_ppe_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_ppe_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_ppe_stats_register_notifier(struct notifier_block *nb); + +/** @} */ /* end_addtogroup nss_ppe_subsystem */ + +#endif /* _NSS_PPE_H_ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_ppe_vp.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ppe_vp.h new file mode 100644 index 000000000..64e1f5ecf --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_ppe_vp.h @@ -0,0 +1,79 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_ppe_vp.h + * NSS PPE Virtual Port definitions. + */ + +#ifndef _NSS_PPE_VP_H_ +#define _NSS_PPE_VP_H_ + +/** + * nss_if_ppe_vp_destroy + * Destroy the PPE VP for a given NSS interface number. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_ppe_vp_destroy(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num); + +/** + * nss_ppe_vp_create + * Create the PPE VP for a given NSS interface number. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_ppe_vp_create(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num); + +/** + * nss_ppe_vp_get_ppe_port_by_nssif + * Returns the PPE VP number for a given NSS interface number. + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] nss_if NSS interface number. + * + * @return + * Returns the PPE VP number corresponding to the NSS interface number. + */ +nss_ppe_port_t nss_ppe_vp_get_ppe_port_by_nssif(struct nss_ctx_instance *nss_ctx, nss_if_num_t nss_if); + +/** + * nss_ppe_vp_get_context + * Return the NSS context of PPE VP. + * + * @datatypes + * nss_ctx_instance + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_ppe_vp_get_context(void); + +#endif /* _NSS_PPE_VP_H_ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_pppoe.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_pppoe.h new file mode 100644 index 000000000..f082d3067 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_pppoe.h @@ -0,0 +1,384 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_pppoe.h + * NSS PPPoE interface definitions. + */ + +#ifndef __NSS_PPPOE_H +#define __NSS_PPPOE_H + +/** + * @addtogroup nss_pppoe_subsystem + * @{ + */ + +/** + * Maximum number of supported PPPoE sessions. + */ +#define NSS_MAX_PPPOE_DYNAMIC_INTERFACES 8 + +/** + * nss_pppoe_metadata_types + * Message types for PPPoE requests and responses. + */ +enum nss_pppoe_metadata_types { + NSS_PPPOE_MSG_SESSION_CREATE, + NSS_PPPOE_MSG_SESSION_DESTROY, + NSS_PPPOE_MSG_SYNC_STATS, + NSS_PPPOE_MSG_BR_ACCEL_CFG, + NSS_PPPOE_MSG_MAX +}; + +/** + * nss_pppoe_session_exception_events + * Session exception events from the PPPoE handler. + */ +enum nss_pppoe_session_exception_events { + NSS_PPPOE_SESSION_EXCEPTION_EVENT_WRONG_VERSION_OR_TYPE, + NSS_PPPOE_SESSION_EXCEPTION_EVENT_WRONG_CODE, + NSS_PPPOE_SESSION_EXCEPTION_EVENT_UNSUPPORTED_PPP_PROTOCOL, + NSS_PPPOE_SESSION_EXCEPTION_EVENT_MAX +}; + +/** + * pppoe_base_exception_events + * Base node exception events from the PPPoE handler. + */ +enum nss_pppoe_base_exception_events { + NSS_PPPOE_BASE_EXCEPTION_EVENT_SHORT_PPPOE_HDR_LENGTH, + NSS_PPPOE_BASE_EXCEPTION_EVENT_SHORT_PACKET_LENGTH, + NSS_PPPOE_BASE_EXCEPTION_EVENT_WRONG_VERSION_OR_TYPE, + NSS_PPPOE_BASE_EXCEPTION_EVENT_WRONG_CODE, + NSS_PPPOE_BASE_EXCEPTION_EVENT_UNSUPPORTED_PPP_PROTOCOL, + NSS_PPPOE_BASE_EXCEPTION_EVENT_DISABLED_BRIDGE_PACKET, + NSS_PPPOE_BASE_EXCEPTION_EVENT_MAX +}; + +/** + * nss_pppoe_br_accel_mode + * PPPoE bridge acceleration modes. + */ +enum nss_pppoe_br_accel_modes { + NSS_PPPOE_BR_ACCEL_MODE_DIS, + NSS_PPPOE_BR_ACCEL_MODE_EN_5T, + NSS_PPPOE_BR_ACCEL_MODE_EN_3T, + NSS_PPPOE_BR_ACCEL_MODE_MAX +}; + +/** + * nss_pppoe_base_stats + * PPPoE base node synchronization statistics. + */ +struct nss_pppoe_base_stats { + struct nss_cmn_node_stats node; /**< Common node statistics. */ + uint32_t exception[NSS_PPPOE_BASE_EXCEPTION_EVENT_MAX]; + /**< PPPoE base node exception events. */ +}; + +/** + * nss_pppoe_session_stats + * PPPoE synchronization statistics per session. + */ +struct nss_pppoe_session_stats { + struct nss_cmn_node_stats node; /**< Common node statistics. */ + uint32_t exception[NSS_PPPOE_SESSION_EXCEPTION_EVENT_MAX]; + /**< PPPoE session exception events. */ +}; + +/** + * nss_pppoe_stats_session + * PPPoE session statistics. + */ +enum nss_pppoe_stats_session { + NSS_PPPOE_STATS_SESSION_RX_PACKETS, + NSS_PPPOE_STATS_SESSION_RX_BYTES, + NSS_PPPOE_STATS_SESSION_TX_PACKETS, + NSS_PPPOE_STATS_SESSION_TX_BYTES, + NSS_PPPOE_STATS_SESSION_WRONG_VERSION_OR_TYPE, + NSS_PPPOE_STATS_SESSION_WRONG_CODE, + NSS_PPPOE_STATS_SESSION_UNSUPPORTED_PPP_PROTOCOL, + NSS_PPPOE_STATS_SESSION_MAX +}; + +/** + * nss_pppoe_stats_base + * PPPoE base node statistics. + */ +enum nss_pppoe_stats_base { + NSS_PPPOE_STATS_BASE_RX_PACKETS, + NSS_PPPOE_STATS_BASE_RX_BYTES, + NSS_PPPOE_STATS_BASE_TX_PACKETS, + NSS_PPPOE_STATS_BASE_TX_BYTES, + NSS_PPPOE_STATS_BASE_RX_QUEUE_0_DROPPED, + NSS_PPPOE_STATS_BASE_RX_QUEUE_1_DROPPED, + NSS_PPPOE_STATS_BASE_RX_QUEUE_2_DROPPED, + NSS_PPPOE_STATS_BASE_RX_QUEUE_3_DROPPED, + NSS_PPPOE_STATS_BASE_SHORT_PPPOE_HDR_LENGTH, + NSS_PPPOE_STATS_BASE_SHORT_PACKET_LENGTH, + NSS_PPPOE_STATS_BASE_WRONG_VERSION_OR_TYPE, + NSS_PPPOE_STATS_BASE_WRONG_CODE, + NSS_PPPOE_STATS_BASE_UNSUPPORTED_PPP_PROTOCOL, + NSS_PPPOE_STATS_BASE_DISABLED_BRIDGE_PACKET, + NSS_PPPOE_STATS_BASE_MAX +}; + +/** + * nss_pppoe_stats_notification + * PPPoE statistics structure. + */ +struct nss_pppoe_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ + uint64_t session_stats[NSS_PPPOE_STATS_SESSION_MAX]; /**< PPPoE statistics. */ + uint64_t base_stats[NSS_PPPOE_STATS_BASE_MAX]; /**< PPPoE base node statistics. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ + +/** + * nss_pppoe_sync_stats_msg + * PPPoE synchronization statistics. + */ +struct nss_pppoe_sync_stats_msg { + struct nss_pppoe_session_stats session_stats; /**< Session statistics. */ + struct nss_pppoe_base_stats base_stats; /**< Base node statistics. */ +}; + +/** + * nss_pppoe_destroy_msg + * PPPoE session destroy message. + */ +struct nss_pppoe_destroy_msg { + uint16_t session_id; /**< PPPoE session identification number. */ + uint8_t server_mac[ETH_ALEN]; /**< PPPoE server MAC address. */ + uint8_t local_mac[ETH_ALEN]; /**< Local physical interface MAC address. */ +}; + +/** + * nss_pppoe_create_msg + * PPPoE session create message. + */ +struct nss_pppoe_create_msg { + int32_t base_if_num; /**< Base NSS interface number which PPPoE session created on. */ + uint32_t mtu; /**< PPPoE interface MTU value. */ + uint8_t server_mac[ETH_ALEN]; /**< PPPoE server MAC address. */ + uint8_t local_mac[ETH_ALEN]; /**< Local physical interface MAC address. */ + uint16_t session_id; /**< PPPoE session identification number. */ +}; + +/** + * nss_pppoe_br_accel_cfg_msg + * PPPoE bridge acceleration configuration message. + */ +struct nss_pppoe_br_accel_cfg_msg { + uint32_t br_accel_cfg; /**< PPPoE bridge acceleration configuration. */ +}; + +/** + * nss_pppoe_msg + * Data for sending and receiving PPPoE messages. + */ +struct nss_pppoe_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a PPPoE message. + */ + union { + struct nss_pppoe_create_msg create; + /**< Session create message. */ + struct nss_pppoe_destroy_msg destroy; + /**< Session destroy message. */ + struct nss_pppoe_sync_stats_msg sync_stats; + /**< Session statistics message. */ + struct nss_pppoe_br_accel_cfg_msg br_accel; + /**< PPPoE bridge acceleration configuration message. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving PPPoE messages. + * + * @datatypes + * nss_pppoe_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_pppoe_msg_callback_t)(void *app_data, struct nss_pppoe_msg *msg); + +/** + * nss_pppoe_tx_msg_sync + * Sends a PPPoE message synchronously to NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_pppoe_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_pppoe_tx_msg_sync(struct nss_ctx_instance *nss_ctx, + struct nss_pppoe_msg *msg); + +/** + * nss_register_pppoe_session_if + * Registers the PPPoE session interface with the NSS for sending and + * receiving messages. + * + * @datatypes + * nss_pppoe_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] notification_callback Callback for the message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Socket buffer types supported by this interface. + * @param[in] app_ctx Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_register_pppoe_session_if(uint32_t if_num, + nss_pppoe_msg_callback_t notification_callback, + struct net_device *netdev, uint32_t features, void *app_ctx); + +/** + * nss_unregister_pppoe_session_if + * Deregisters the PPPoE session interface from the NSS. + * + * @param[in] if_num NSS interface number. +. * + * @return + * None. + * + * @dependencies + * The tunnel interface must have been previously registered. + */ +extern void nss_unregister_pppoe_session_if(uint32_t if_num); + +/** + * nss_pppoe_get_context + * Gets the PPPoE context used in PPPoE transmit message. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_pppoe_get_context(void); + +/** + * nss_pppoe_debug_stats_get + * Gets NSS session debug statistics. + * + * @param[out] stats_mem Pointer to the memory address, which must be large + * enough to hold all the statistics. + * + * @return + * None. + */ +extern void nss_pppoe_debug_stats_get(void *stats_mem); + +/** + * nss_pppoe_get_bridge_accel_mode + * Gets the PPPoE bridge acceleration mode. + * + * @return + * Current PPPoE bridge acceleration mode. + */ +extern enum nss_pppoe_br_accel_modes nss_pppoe_get_br_accel_mode(void); + +/** + * nss_pppoe_register_sysctl + * Registers the PPPoE system control table. + * + * @return + * None. + */ +void nss_pppoe_register_sysctl(void); + +/** + * nss_pppoe_unregister_sysctl + * Deregisters the PPPoE system control table. + * + * @return + * None. + * + * @dependencies + * The system control table must have been previously registered. + */ +void nss_pppoe_unregister_sysctl(void); + +/** + * nss_pppoe_msg_init + * Initializes a PPPoE message. + * + * @datatypes + * nss_pppoe_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num Interface number + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_pppoe_msg_init(struct nss_pppoe_msg *ncm, + uint16_t if_num, uint32_t type, uint32_t len, + void *cb, void *app_data); + +/** + * nss_pppoe_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_pppoe_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_pppoe_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_pppoe_stats_unregister_notifier(struct notifier_block *nb); + +#endif /*__KERNEL__ */ + +/** + * @} + */ + +#endif /* __NSS_PPPOE_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_pptp.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_pptp.h new file mode 100644 index 000000000..6478684b9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_pptp.h @@ -0,0 +1,345 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_pptp.h + * NSS PPTP interface definitions. + */ + +#ifndef _NSS_PPTP_H_ +#define _NSS_PPTP_H_ + +#include "nss_dynamic_interface.h" + +/** + * @addtogroup nss_pptp_subsystem + * @{ + */ + +/** + * Maximum number of supported PPTP sessions is 4. + * Number of dynamic intefaces per session is 3. + * Total 4 * 3 = 12 + */ +#define NSS_MAX_PPTP_DYNAMIC_INTERFACES 12 + +/** + * nss_pptp_metadata_types + * Message types for PPTP requests and responses. + */ +enum nss_pptp_metadata_types { + NSS_PPTP_MSG_SESSION_CONFIGURE, + NSS_PPTP_MSG_SESSION_DECONFIGURE, + NSS_PPTP_MSG_SYNC_STATS, + NSS_PPTP_MSG_MAX +}; + +/** + * nss_pptp_exception_events + * Exception events for PPTP encapsulation and decapsulation packets. + */ +enum nss_pptp_exception_events { + PPTP_EXCEPTION_EVENT_ENCAP_HEADROOM_ERR, + PPTP_EXCEPTION_EVENT_ENCAP_SMALL_SIZE, + PPTP_EXCEPTION_EVENT_ENCAP_PNODE_ENQUEUE_FAIL, + PPTP_EXCEPTION_EVENT_DECAP_NO_SEQ_NOR_ACK, + PPTP_EXCEPTION_EVENT_DECAP_INVAL_GRE_FLAGS, + PPTP_EXCEPTION_EVENT_DECAP_INVAL_GRE_PROTO, + PPTP_EXCEPTION_EVENT_DECAP_WRONG_SEQ, + PPTP_EXCEPTION_EVENT_DECAP_INVAL_PPP_HDR, + PPTP_EXCEPTION_EVENT_DECAP_PPP_LCP, + PPTP_EXCEPTION_EVENT_DECAP_UNSUPPORTED_PPP_PROTO, + PPTP_EXCEPTION_EVENT_DECAP_PNODE_ENQUEUE_FAIL, + PPTP_EXCEPTION_EVENT_MAX, +}; + +/** + * nss_pptp_stats_session + * PPTP debug statistics. + */ +enum nss_pptp_stats_session { + NSS_PPTP_STATS_ENCAP_RX_PACKETS, + NSS_PPTP_STATS_ENCAP_RX_BYTES, + NSS_PPTP_STATS_ENCAP_TX_PACKETS, + NSS_PPTP_STATS_ENCAP_TX_BYTES, + NSS_PPTP_STATS_ENCAP_RX_QUEUE_0_DROP, + NSS_PPTP_STATS_ENCAP_RX_QUEUE_1_DROP, + NSS_PPTP_STATS_ENCAP_RX_QUEUE_2_DROP, + NSS_PPTP_STATS_ENCAP_RX_QUEUE_3_DROP, + NSS_PPTP_STATS_DECAP_RX_PACKETS, + NSS_PPTP_STATS_DECAP_RX_BYTES, + NSS_PPTP_STATS_DECAP_TX_PACKETS, + NSS_PPTP_STATS_DECAP_TX_BYTES, + NSS_PPTP_STATS_DECAP_RX_QUEUE_0_DROP, + NSS_PPTP_STATS_DECAP_RX_QUEUE_1_DROP, + NSS_PPTP_STATS_DECAP_RX_QUEUE_2_DROP, + NSS_PPTP_STATS_DECAP_RX_QUEUE_3_DROP, + NSS_PPTP_STATS_SESSION_ENCAP_HEADROOM_ERR, + NSS_PPTP_STATS_SESSION_ENCAP_SMALL_SIZE, + NSS_PPTP_STATS_SESSION_ENCAP_PNODE_ENQUEUE_FAIL, + NSS_PPTP_STATS_SESSION_DECAP_NO_SEQ_NOR_ACK, + NSS_PPTP_STATS_SESSION_DECAP_INVAL_GRE_FLAGS, + NSS_PPTP_STATS_SESSION_DECAP_INVAL_GRE_PROTO, + NSS_PPTP_STATS_SESSION_DECAP_WRONG_SEQ, + NSS_PPTP_STATS_SESSION_DECAP_INVAL_PPP_HDR, + NSS_PPTP_STATS_SESSION_DECAP_PPP_LCP, + NSS_PPTP_STATS_SESSION_DECAP_UNSUPPORTED_PPP_PROTO, + NSS_PPTP_STATS_SESSION_DECAP_PNODE_ENQUEUE_FAIL, + NSS_PPTP_STATS_SESSION_MAX +}; + +/** + * nss_pptp_stats_notification + * PPTP statistics structure. + */ +struct nss_pptp_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ + enum nss_dynamic_interface_type if_type; /**< Dynamic interface type. */ + uint64_t stats[NSS_PPTP_STATS_SESSION_MAX]; /**< PPTP statistics. */ +}; + +/** + * nss_pptp_session_configure_msg + * Message information for configuring a PPTP session. + */ +struct nss_pptp_session_configure_msg { + uint16_t src_call_id; /**< Local call ID for caller or callee. */ + uint16_t dst_call_id; /**< Peer call ID for caller or callee. */ + uint32_t sip; /**< Local tunnel endpoint. */ + uint32_t dip; /**< Remote tunnel endpoint. */ + uint32_t sibling_ifnum_pri; /**< Primary sibling interface. */ + uint32_t sibling_ifnum_aux; /**< Auxiliary sibling interface. */ +}; + +/** + * nss_pptp_session_deconfigure_msg + * Message information for deleting a PPTP session. + */ +struct nss_pptp_session_deconfigure_msg { + uint16_t src_call_id; /**< Local call ID */ +}; + +/** + * nss_pptp_sync_session_stats_msg + * Message information for PPTP synchronization statistics. + */ +struct nss_pptp_sync_session_stats_msg { + struct nss_cmn_node_stats node_stats; + /**< Common node statistics for the encapsulation direction. */ + uint32_t exception_events[PPTP_EXCEPTION_EVENT_MAX]; + /**< Statistics of events which casued packets to exception to host. */ +}; + +/** + * nss_pptp_msg + * Data for sending and receiving PPTP messages. + */ +struct nss_pptp_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a PPTP message. + */ + union { + struct nss_pptp_session_configure_msg session_configure_msg; + /**< Session configuration message. */ + struct nss_pptp_session_deconfigure_msg session_deconfigure_msg; + /**< Session de-configuration message. */ + struct nss_pptp_sync_session_stats_msg stats; + /**< Session statistics message. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving PPTP messages. + * + * @datatypes + * nss_pptp_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_pptp_msg_callback_t)(void *app_data, struct nss_pptp_msg *msg); + +/** + * nss_pptp_tx_msg_sync + * Sends a PPTP message synchronously to NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_pptp_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_pptp_tx_msg_sync(struct nss_ctx_instance *nss_ctx, + struct nss_pptp_msg *msg); + +/** + * nss_pptp_tx_buf + * Sends a data packet to the firmware. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * @param[in] skb Pointer to the data socket buffer. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_pptp_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb); + +/** + * nss_pptp_get_context + * Gets the PPTP context used in nss_pptp_tx. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_pptp_get_context(void); + +/** + * Callback function for receiving PPTP tunnel data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_pptp_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_register_pptp_if + * Registers the PPTP tunnel interface with the NSS for sending and + * receiving messages. + * + * @datatypes + * nss_pptp_callback_t \n + * nss_pptp_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] type Dynamic interface type. + * @param[in] pptp_data_callback Callback for the data. + * @param[in] notification_callback Callback for the message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Socket buffer types supported by this interface. + * @param[in] app_ctx Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_register_pptp_if(uint32_t if_num, uint32_t type, nss_pptp_callback_t pptp_data_callback, + nss_pptp_msg_callback_t notification_callback, struct net_device *netdev, uint32_t features, void *app_ctx); + +/** + * nss_unregister_pptp_if + * Deregisters the PPTP tunnel interface from the NSS. + * + * @param[in] if_num NSS interface number. +. * + * @return + * None. + * + * @dependencies + * The tunnel interface must have been previously registered. + */ +extern void nss_unregister_pptp_if(uint32_t if_num); + +/** + * nss_pptp_msg_init + * Initializes a PPTP message. + * + * @datatypes + * nss_pptp_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num Interface number + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_pptp_msg_init(struct nss_pptp_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_pptp_register_handler + * Registers the PPTP interface with the NSS debug statistics handler. + * + * @return + * None. + */ +extern void nss_pptp_register_handler(void); + +/** + * nss_pptp_session_debug_stats_get + * Gets NSS session debug statistics. + * + * @param[out] stats_mem Pointer to the memory address, which must be large + * enough to hold all the statistics. + * + * @return + * None. + */ +extern void nss_pptp_session_debug_stats_get(void *stats_mem); + +/** + * nss_pptp_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_pptp_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_pptp_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_pptp_stats_unregister_notifier(struct notifier_block *nb); + +/** + * @} + */ + +#endif /* _NSS_PPTP_H_ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_profiler.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_profiler.h new file mode 100644 index 000000000..89ddf6586 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_profiler.h @@ -0,0 +1,406 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2015, 2017, 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_profiler.h + * NSS Profiler APIs + */ + +#ifndef __NSS_PROFILER_H +#define __NSS_PROFILER_H + +/** + * @addtogroup nss_profiler_subsystem + * @{ + */ + +/** + * Length of the counter name. + * + * This value allows all counter values to fit in a single 1400-byte UDP packet. + */ +#define PROFILE_COUNTER_NAME_LENGTH 20 + +#define PROFILE_MAX_APP_COUNTERS 24 /**< Maximum number of application counters. */ + +/** + * nss_profile_counter + * Counter statistics. + */ +struct nss_profile_counter { + char name[PROFILE_COUNTER_NAME_LENGTH]; /**< Counter name. */ + uint32_t value; /**< Current value. */ +}; + +/** + * nss_profiler_message_types + * Message types for the Profiler. + * + * Do not alter this enumeration. However, adding more types is allowed. + */ +enum nss_profiler_message_types { + NSS_PROFILER_CHANGE_SAMPLING_RATE_MSG, /**< Host-to-NSS: ask to do a rate change. */ + NSS_PROFILER_START_MSG, /**< Host-to-NSS: start the NSS Profiler. */ + NSS_PROFILER_STOP_MSG, /**< Host-to-NSS: stop the NSS Profiler. */ + NSS_PROFILER_FLOWCTRL_MSG, /**< Host-to-NSS: do flow control on sampling. */ + NSS_PROFILER_DEBUG_RD_MSG, /**< Host-to-NSS: debug the output. */ + NSS_PROFILER_DEBUG_WR_MSG, /**< Host-to-NSS: debug the input. */ + NSS_PROFILER_DEBUG_REPLY_MSG, /**< NSS-to-host: debug response. */ + NSS_PROFILER_REPLY_MSG, /**< Check the response. */ + NSS_PROFILER_FIXED_INFO_MSG, /**< NSS-to-host: constant data. */ + NSS_PROFILER_COUNTERS_MSG, /**< NSS-to-host: counter information. */ + NSS_PROFILER_SAMPLES_MSG, /**< NSS-to-host: main sample data. */ + NSS_PROFILER_START_CAL, /**< Not for the host to use. */ + NSS_PROFILER_GET_SYS_STAT_EVENT, /**< Get the system status event. */ + NSS_PROFILER_SET_SYS_STAT_EVENT, /**< Set the system status event. */ + NSS_PROFILER_MAX_MSG_TYPES, /**< Maximum number of message types. */ +}; + +/** + * nss_profile_errors + * Profiler error types returned from the NSS. + */ +enum nss_profile_errors { + PROFILE_ERROR_NO_PROF_INIT = 1, + PROFILE_ERROR_EMEM, + PROFILE_ERROR_BAD_PKT, + PROFILE_ERROR_UNKNOWN_CMD, + PROFILE_ERROR_NO_DMA, + PROFILE_ERROR_MAX +}; + +/** + * nss_profiler_cmd_param + * Parameter information for the Profiler. + * + * Use this structure for per-session commands: START, STOP, FLOWCTRL, RATE. + */ +struct nss_profiler_cmd_param { + uint32_t hd_magic; /**< Common overlay in all headers. */ + uint32_t num_counters; + /**< Number of registered performance (application) counters. */ + uint32_t ocm_size; /**< Size of the on-chip-memory. */ + uint32_t sram_start; /**< DDR starting address. */ + uint32_t rate; /**< Sampling rate. */ + uint32_t cpu_id; /**< ID of the chip register. */ + uint32_t cpu_freq; /**< Chip clock frequency. */ + uint32_t ddr_freq; /**< DDR memory speed. */ + + struct nss_profile_counter counters[PROFILE_MAX_APP_COUNTERS]; + /**< Application profiling counters. */ +}; + +/** + * nss_profiler_data_msg + * Message information for the Profiler. + */ +struct nss_profiler_data_msg { + uint32_t hd_magic; /**< Magic header for verification. */ + uint32_t msg_data[1]; /**< Variable length private data. */ +}; + +/** + * nss_profiler_debug_msg + * Message information for Profiler debugging. + */ +struct nss_profiler_debug_msg { + uint32_t hd_magic; /**< Magic header for verification. */ + uint32_t debug_data[256]; /**< Fixed length debug data. */ +}; + +/** + * nss_profiler_msg + * Data for sending and receiving Profiler messages. + */ +struct nss_profiler_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a Profiler message. + */ + union npm_body { + struct nss_profiler_cmd_param pcmdp; /**< Command parameters. */ + struct nss_profiler_debug_msg pdm; /**< Debug packet. */ + struct nss_profiler_data_msg msg; /**< Sampling data. */ + } payload; /**< Message payload. The data length is set in common message header. */ +}; + +/** + * nss_profile_sdma_producer + * DMA descriptor of producer. + */ +struct nss_profile_sdma_producer { + uint32_t intr_num; /**< Interrupt number. */ + uint32_t pkg_id; /**< Package ID that registered this entry. */ + uint32_t buf_size; /**< DMA buffer size. */ + uint32_t num_bufs; /**< Number of ring buffers. */ + uint32_t desc_ring; /**< Ring address (physical 32-bit). */ + uint32_t pad3w[3]; /**< Pad 32-byte alignment. */ +}; + +/** + * nss_u64_32_data + * 64-bit union for both 32/64 bits data aligned at 64-bit boundary. + */ +union nss_u64_32_data { + uint64_t d64; /**< 64-bit space holder: may not be used. */ + uint32_t d32; /**< 32-bit direct data. */ + void *kp; /**< Kernel data pointer either 32 or 64 bits. */ +}; + +/** + * nss_u64_32_func + * 64-bit union for both 32/64 bits function aligned at 64-bit boundary. + */ +union nss_u64_32_func { + uint64_t f64; /**< 64-bit space holder: do not use. */ + void (*fp)(void*); /**< Function pointer: either 32 or 64 bits. */ +}; + +/** + * nss_profile_sdma_consumer + * DMA descriptor of consumer. + */ +struct nss_profile_sdma_consumer { + union nss_u64_32_data arg; /**< Dispatch function argument. */ + union nss_u64_32_func dispatch; /**< Dispatch function pointer. */ + union nss_u64_32_data ring; /**< DMA descriptor ring kernel address. */ + int64_t unused_lw; /**< Extra room in a Ubi32 cache line. */ +}; + +#define ARM_CACHE_LINE_SIZE 128 /**< ARM CPU cache line size in bytes. */ +#define NSS_CACHE_LINE_WORDS 8 /**< Ubi32 CPU cache line size in words. */ + +/** + * Number of DMA per control block. + */ +#define NSS_PROFILE_MAX_DMA_DESCRIPTORS (ARM_CACHE_LINE_SIZE / sizeof(struct nss_profile_sdma_producer) - 1) + +/** + * nss_profile_sdma_ctrl + * Soft DMA control block. + */ +struct nss_profile_sdma_ctrl { + int32_t num_rings; /**< Number of descriptor rings allocated, maximum is 3. */ + int32_t cur_ring; /**< Which ring is in use: Default 0. */ + int32_t pidx[NSS_PROFILE_MAX_DMA_DESCRIPTORS]; /**< Producer index. */ + + /** + * Pad for the first Ubi32 cache line in the first ARM cache line: Unused. + */ + int32_t pad_for_1st_cl_in_1st_arm_cl[NSS_CACHE_LINE_WORDS - 2 - NSS_PROFILE_MAX_DMA_DESCRIPTORS]; + struct nss_profile_sdma_producer producer[NSS_PROFILE_MAX_DMA_DESCRIPTORS]; /**< DMA producer structure. */ + + int32_t cidx[NSS_PROFILE_MAX_DMA_DESCRIPTORS]; /**< Consumer index. */ + + /** + * Pad for the first Ubi32 cache line in the second ARM cache line: Unused. + */ + int32_t pad_for_1st_cl_in_2nd_arm_cl[NSS_CACHE_LINE_WORDS - NSS_PROFILE_MAX_DMA_DESCRIPTORS]; + struct nss_profile_sdma_consumer consumer[NSS_PROFILE_MAX_DMA_DESCRIPTORS]; /**< DMA consumer structure. */ +}; + +/** + * Callback function for receiving Profiler messages. + * + * @note: Memory (buffer) pointed by npm is owned by caller, that is, NSS driver. + * + * @datatypes + * nss_profiler_msg + * + * @param[in] ctx Pointer to the context of the NSS process (core). + * @param[in] npm Pointer to the NSS Profiler message. + */ +typedef void (*nss_profiler_callback_t)(void *ctx, struct nss_profiler_msg *npm); + +/** + * nss_profiler_notify_register + * Registers the Profiler interface with the NSS driver for sending and receiving messages. + * + * This function must be called once for each core. + * + * @datatypes + * nss_core_id_t \n + * nss_profiler_callback_t + * + * @param[in] profiler_callback Callback for the data. + * @param[in] core_id NSS core ID. + * @param[in] ctx Pointer to the context of the NSS core. The context is + provided to caller in the registered callback function. + * + * @return + * Pointer to the NSS core context. + * + * @dependencies + * The caller must provide the valid core ID that is being profiled. + */ +extern void *nss_profiler_notify_register(nss_core_id_t core_id, nss_profiler_callback_t profiler_callback, void *ctx); + +/** + * nss_profiler_notify_unregister + * Deregisters the Profiler interface from the NSS driver. + * + * @datatypes + * nss_core_id_t + * + * @param[in] core_id NSS core ID. + * + * @return + * None. + * + * @dependencies + * The interface must have been previously registered. + */ +extern void nss_profiler_notify_unregister(nss_core_id_t core_id); + +/** + * nss_profiler_if_tx_buf + * Sends a Profiler command to the NSS firmware. + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] buf Buffer to send to NSS firmware. + * @param[in] len Length of the buffer. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Status of the Tx operation. + * + * @dependencies + * A valid context must be provided (for the right core). + * This context was returned during registration. + */ +extern nss_tx_status_t nss_profiler_if_tx_buf(void *nss_ctx, + void *buf, uint32_t len, void *cb, void *app_data); + +/** + * nss_profiler_alloc_dma + * Allocate profiler DMA for transmitting samples. + * + * @datatypes + * nss_ctx_instance \n + * nss_profile_sdma_producer + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] dma_p Pointer to return DMA control. + * + * @return + * Buffer adddress. + */ +extern void *nss_profiler_alloc_dma(struct nss_ctx_instance *nss_ctx, struct nss_profile_sdma_producer **dma_p); + +/** + * nss_profiler_release_dma() + * Free profiler DMA. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * + * @return + * None. + */ +extern void nss_profiler_release_dma(struct nss_ctx_instance *nss_ctx); + +/* + * nss_profile_dma_register_cb + * Register a handler for profile DMA. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] id DMA ID; typical value is 0. + * @param[in] cb Callback function pointer. + * @param[in] arg Callback function argument pointer. + * + * @return + * True on success; or false on failure. + */ +extern bool nss_profile_dma_register_cb(struct nss_ctx_instance *nss_ctx, int id, + void (*cb)(void*), void *arg); + +/** + * nss_profile_dma_deregister_cb() + * Deregister callback for profile DMA. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] id DMA ID; typical value is 0. + * + * @return + * True on success; or false on failure. + */ +extern bool nss_profile_dma_deregister_cb(struct nss_ctx_instance *nss_ctx, int id); + +/** + * nss_profile_dma_get_ctrl() + * API to get profile DMA control. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * + * @return + * DMA controller. + */ +extern struct nss_profile_sdma_ctrl *nss_profile_dma_get_ctrl(struct nss_ctx_instance *nss_ctx); + +/** + * profile_register_performance_counter + * Registers a Linux counter with the profiler for any variables. + * + * @param[in] counter Pointer to the variable address. + * @param[in] name Pointer to the variable name: if name is longer than + 23 characters, then only the first 23 bytes are used. + * + * @return + * 0 if counter array is full -- too many registered counters. + * 1 on success + */ +extern int profile_register_performance_counter(volatile unsigned int *counter, char *name); + +/** + * nss_profiler_msg_init + * Initializes a Profiler-specific message. + * + * @datatypes + * nss_profiler_msg \n + * nss_profiler_callback_t + * + * @param[in,out] npm Pointer to the NSS Profiler message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the message. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_profiler_msg_init(struct nss_profiler_msg *npm, uint16_t if_num, + uint32_t type, uint32_t len, + nss_profiler_callback_t cb, void *app_data); + +/** + * @} + */ + +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_project.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_project.h new file mode 100644 index 000000000..4b1b7119b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_project.h @@ -0,0 +1,176 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * @file nss_project.h + * NSS project interface definitions. + */ + +#ifndef __NSS_PROJECT_H +#define __NSS_PROJECT_H + +/** + * @addtogroup nss_project_subsystem + * @{ + */ + + /** + * Maximum number of IRQs for which a message will have statistics. + * + * Must be defined on firmware and host such that NSS_PROJECT_IRQS_PER_MESSAGE * + * sizeof(struct nss_project_irq_stats) + 8 + sizeof(struct nss_cmn_msg) is smaller + * than the maximum payload size of an sk_buff (1792), 8 being the number of + * bytes needed to store the thread number and number of statistics written. + */ +#define NSS_PROJECT_IRQS_PER_MESSAGE 32 + +/** + * nss_project_message_types + * Project message types. + */ +enum nss_project_message_types { + NSS_PROJECT_MSG_WT_STATS_ENABLE, + /**< Message to enable or disable worker thread statistics. */ + NSS_PROJECT_MSG_WT_STATS_NOTIFY, + /**< NSS to HLOS message containing worker thread statistics. */ + NSS_PROJECT_MSG_MAX, +}; + +/** + * nss_project_error_types + * Project error types. + */ +enum nss_project_error_types { + NSS_PROJECT_ERROR_UNKNOWN_MSG, + /**< Unrecognized message type. */ + NSS_PROJECT_ERROR_WT_STATS_UNSUPPORTED, + /**< The firmware does not support worker thread statistics. */ + NSS_PROJECT_ERROR_WT_STATS_REDUNDANT_ENABLE, + /**< The firmware received a redundant request to enable worker thread statistics. */ + NSS_PROJECT_ERROR_MAX, +}; + +/** + * nss_project_msg_wt_stats_enable + * Enables or disables worker thread statistics collection. + */ +struct nss_project_msg_wt_stats_enable { + + /* + * NSS to HLOS + */ + uint32_t worker_thread_count; + /**< Number of worker threads supported by this core. */ + uint32_t irq_count; + /**< Number of IRQs supported by this core. */ + + /* + * HLOS to NSS + */ + bool enable; /**< True to enable, false to disable. */ +}; + +/** + * nss_project_irq_stats + * Statistics for an individual IRQ on a worker thread. + */ +struct nss_project_irq_stats { + uint64_t count; /**< Number of times callback has been executed */ + uint32_t callback; /**< Address of the callback function */ + uint32_t irq; /**< IRQ number to which callback function is bound */ + uint32_t ticks_min; /**< Fewest ticks taken in callback function */ + uint32_t ticks_avg; /**< Exponential moving average of ticks */ + uint32_t ticks_max; /**< Maximum ticks */ + uint32_t insn_min; /**< Fewest instructions executed in callback function */ + uint32_t insn_avg; /**< Exponential moving average of instruction count */ + uint32_t insn_max; /**< Maximum instructions */ +}; + +/** + * nss_project_msg_wt_stats_notify + * Message containing statistics for active worker_thread IRQs. + */ +struct nss_project_msg_wt_stats_notify { + uint32_t threadno; /**< The thread whose stats are contained. */ + uint32_t stats_written; /**< The number of statistics written to the array. */ + struct nss_project_irq_stats stats[NSS_PROJECT_IRQS_PER_MESSAGE]; + /**< The per-IRQ statistics for the worker thread */ +}; + +/** + * nss_project_msg + * General message structure for project messages. + */ +struct nss_project_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a message to or from the project code. + */ + union { + struct nss_project_msg_wt_stats_enable wt_stats_enable; + /**< Enable or disable worker thread statistics. */ + struct nss_project_msg_wt_stats_notify wt_stats_notify; + /**< One-way worker thread statistics message. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving project messages. + * + * @datatypes + * nss_project_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the project message. + */ +typedef void (*nss_project_msg_callback_t)(void *app_data, struct nss_project_msg *msg); + +/** + * nss_project_register_sysctl + * Registers the project sysctl table to the sysctl tree. + * + * @return + * None. + */ +void nss_project_register_sysctl(void); + +/** + * nss_project_unregister_sysctl + * De-registers the project sysctl table from the sysctl tree. + * + * @return + * None. + * + * @dependencies + * The system control must have been previously registered. + */ +void nss_project_unregister_sysctl(void); + +/** + * nss_project_register_handler + * Registers the project message handler. + * + * @return + * None. + */ +void nss_project_register_handler(struct nss_ctx_instance *nss_ctx); + +/** + * @} + */ + +#endif /* __NSS_PROJECT_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_pvxlan.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_pvxlan.h new file mode 100644 index 000000000..2de0da2b5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_pvxlan.h @@ -0,0 +1,371 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * @file nss_pvxlan.h + * NSS proxy VxLAN interface definitions. + */ + +#ifndef __NSS_PVXLAN_H +#define __NSS_PVXLAN_H + +/** + * @addtogroup nss_pvxlan_subsystem + * @{ + */ + +/** + * Size of the headroom required for proxy VxLAN packets. + */ +#define NSS_PVXLAN_HEADROOM 256 + +/** + * Maximum number of supported proxy VxLAN tunnel sessions. + */ +#define NSS_PVXLAN_MAX_INTERFACES 32 + +/* + * Proxy VxLAN Rule configure message flags + */ +#define NSS_PVXLAN_TUNNEL_IPV4 0x0001 /**< IPv4 tunnel. */ +#define NSS_PVXLAN_TUNNEL_IPV6 0x0002 /**< IPv6 tunnel. */ +#define NSS_PVXLAN_TUNNEL_UDP 0x0010 /**< UDP tunnel. */ +#define NSS_PVXLAN_TUNNEL_UDPLite 0x0020 /**< UDPLite tunnel. */ +#define NSS_PVXLAN_TUNNEL_ENCAP_UDPLITE_HDR_CSUM 0x0100 + /**< Generate only UDPLite header checksum. Otherwise whole UDPLite payload. */ + +/** + * nss_pvxlan_msg_type + * Proxy VxLAN message types. + */ +typedef enum nss_pvxlan_msg_type { + NSS_PVXLAN_MSG_TYPE_SYNC_STATS, /**< Statistics synchronization message. */ + NSS_PVXLAN_MSG_TYPE_TUNNEL_CREATE_RULE, /**< Creating tunnel rule. */ + NSS_PVXLAN_MSG_TYPE_TUNNEL_DESTROY_RULE, + /**< Destroying tunnel rule. */ + NSS_PVXLAN_MSG_TYPE_TUNNEL_ENABLE, /**< Enable the tunnel. */ + NSS_PVXLAN_MSG_TYPE_TUNNEL_DISABLE, /**< Disable the tunnel. */ + NSS_PVXLAN_MSG_TYPE_MAC_ADD, /**< Add MAC rule to the database. */ + NSS_PVXLAN_MSG_TYPE_MAC_DEL, /**< Remove MAC rule from the database. */ + NSS_PVXLAN_MSG_TYPE_MAX, /**< Maximum message type. */ +} nss_pvxlan_msg_type_t; + +/** + * nss_pvxlan_error_response_types + * Error types for proxy VxLAN responses to messages from the host. + */ +typedef enum nss_pvxlan_error_response_types { + NSS_PVXLAN_ERROR_UNKNOWN_TYPE = 1, /**< Unknown type error. */ + NSS_PVXLAN_ERROR_INVALID_L3_PROTO, /**< L3 Protocol is invalid error. */ + NSS_PVXLAN_ERROR_INVALID_UDP_PROTO, /**< UDP Protocol is invalid error. */ + NSS_PVXLAN_ERROR_TUNNEL_DISABLED, /**< Tunnel is already disabled error. */ + NSS_PVXLAN_ERROR_TUNNEL_ENABLED, /**< Tunnel is already enabled error. */ + NSS_PVXLAN_ERROR_TUNNEL_ENTRY_EXIST, + /**< Tunnel is already exist error. */ + NSS_PVXLAN_ERROR_TUNNEL_TABLE_FULL, + /**< Tunnel table is full error. */ + NSS_PVXLAN_ERROR_INVALID_TUNNEL_ID, /**< Tunnel ID is invalid error. */ + NSS_PVXLAN_ERROR_MAC_TABLE_FULL, /**< MAC table is full error. */ + NSS_PVXLAN_ERROR_MAC_EXIST, /**< MAC does already exist in the table error. */ + NSS_PVXLAN_ERROR_MAC_NOT_EXIST, /**< MAC does not exist in the table error. */ + NSS_PVXLAN_ERROR_MAC_ENTRY_UNHASHED, + /**< MAC entry is not hashed in table. */ + NSS_PVXLAN_ERROR_MAC_ENTRY_INSERT_FAILED, + /**< Insertion to MAC table is failed. */ + NSS_PVXLAN_ERROR_UDP_ENCAP_TUNNEL_ID_IN_USE, + /**< Given tunnel ID is currently used. */ + PVXLAN_ERROR_MSG_TUNNEL_ADD_FAILED, /**< Tunnel add information failed. */ + PVXLAN_ERROR_MSG_MAC_ENTRY_ALLOC_FAILED, + /**< MAC entry allocation failed. */ + PVXLAN_ERROR_MSG_MAC_ENTRY_DELETE_FAILED, + /**< MAC entry deletion failed. */ + NSS_PVXLAN_ERROR_MAX, /**< Maximum error type. */ +} nss_pvxlan_error_response_t; + +/** + * nss_pvxlan_stats_msg + * Per-tunnel statistics messages from the NSS firmware. + */ +struct nss_pvxlan_stats_msg { + struct nss_cmn_node_stats node_stats; /**< Common firmware statistics. */ + uint32_t mac_db_lookup_failed; /**< MAC Database look up failed. */ + uint32_t udp_encap_lookup_failed; /**< MAC Database look up failed. */ + uint32_t dropped_malformed; /**< Packet is malformed. */ + uint32_t dropped_next_node_queue_full; /**< Next node dropped the packet. */ + uint32_t dropped_hroom; /**< Transmit dropped due to insufficent headroom. */ + uint32_t dropped_ver_mis; /**< Transmit dropped due to version mismatch. */ + uint32_t dropped_zero_sized_packet; /**< Transmit dropped due to zero sized packet. */ + uint32_t dropped_pbuf_alloc_failed; /**< Receive side pbuf allocation failed. */ + uint32_t dropped_linear_failed; /**< Receive side linearization failed. */ +}; + +/** + * nss_pvxlan_ip + * IP versions. + */ +struct nss_pvxlan_ip { + /** + * Union of IPv4 and IPv6 IP addresses. + */ + union { + uint32_t ipv4; /**< IPv4 address. */ + uint32_t ipv6[4]; /**< IPv6 address. */ + } ip; /**< Union of IPv4 and IPv6 IP addresses. */ +}; + +/** + * nss_pvxlan_encap_rule + * Encapsulation information for a proxy VxLAN tunnel. + */ +struct nss_pvxlan_encap_rule { + struct nss_pvxlan_ip src; /**< Source IP. */ + uint32_t src_port; /**< Source port. */ + struct nss_pvxlan_ip dest; /**< Destination IP. */ + uint32_t dest_port; /**< Destination port. */ +}; + +/** + * nss_pvxlan_rule_msg + * Proxy VxLAN rule message. + * + * The same rule structure applies for both encapsulation and decapsulation + * in a tunnel. + */ +struct nss_pvxlan_rule_msg { + struct nss_pvxlan_encap_rule encap; /**< Encapsulation portion of the rule. */ + uint32_t tunnel_id; /**< Tunnel ID. */ + uint16_t flags; /**< Tunnel type flags. */ + int8_t rps; + /**< Receive packet steering number. Set -1 to let NSS firmware decide. */ +}; + +/** + * nss_pvxlan_tunnel_state_msg + * To enable/disable the tunnel. + */ +struct nss_pvxlan_tunnel_state_msg { + uint32_t sibling_if_num; /**< Sibling interface number. */ +}; + +/** + * nss_pvxlan_mac_msg + * Proxy VxLAN MAC message structure. + */ +struct nss_pvxlan_mac_msg { + uint16_t mac_addr[3]; /**< MAC address. */ + uint16_t flags; /**< Flags. */ + uint32_t vnet_id; /**< Virtual net ID. */ + uint32_t tunnel_id; /**< Tunnel ID. */ + uint16_t policy_id; /**< Policy ID. */ +}; + +/** + * nss_pvxlan_msg + * Data for sending and receiving proxy VxLAN messages. + */ +struct nss_pvxlan_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a proxy VxLAN common message. + */ + union { + struct nss_pvxlan_stats_msg stats; + /**< Proxy VxLAN statistics. */ + struct nss_pvxlan_rule_msg rule_cfg; + /**< Rule information. */ + struct nss_pvxlan_rule_msg rule_uncfg; + /**< Rule information. */ + struct nss_pvxlan_tunnel_state_msg enable; + /**< Enable the tunnel. */ + struct nss_pvxlan_mac_msg mac_add; + /**< MAC rule add message. */ + struct nss_pvxlan_mac_msg mac_del; + /**< MAC rule delete message. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving proxy VxLAN tunnel data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_pvxlan_buf_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving proxy VxLAN tunnel messages. + * + * @datatypes + * nss_pvxlan_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_pvxlan_msg_callback_t)(void *app_data, struct nss_pvxlan_msg *msg); + +/** + * nss_pvxlan_tx_msg + * Sends proxy VxLAN tunnel messages to the NSS. + * + * Do not call this function from a softirq or interrupt because it + * might sleep if the NSS firmware is busy serving another host thread. + * + * @datatypes + * nss_ctx_instance \n + * nss_pvxlan_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in,out] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_pvxlan_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_pvxlan_msg *msg); + +/** + * nss_pvxlan_tx_msg_sync + * Sends proxy VxLAN tunnel messages to the NSS. + * + * Do not call this function from a softirq or interrupt because it + * might sleep if the NSS firmware is busy serving another host thread. + * + * @datatypes + * nss_ctx_instance \n + * nss_pvxlan_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in,out] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_pvxlan_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_pvxlan_msg *msg); + +/** + * nss_pvxlan_tx_buf + * Sends a proxy VXLAN tunnel data buffer to the NSS interface. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] buf Pointer to the data buffer. + * @param[in] if_num NSS interface number. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_pvxlan_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *buf, uint32_t if_num); + +/** + * nss_pvxlan_unregister + * Deregisters the proxy VxLAN tunnel interface from the NSS interface. + * + * @param[in] if_num NSS interface number. + * + * @return + * TRUE or FALSE + * + * @dependencies + * The tunnel interface must have been previously registered. + */ +extern bool nss_pvxlan_unregister(uint32_t if_num); + +/** + * nss_pvxlan_register + * Registers the proxy VxLAN tunnel interface with the NSS for sending and + * receiving tunnel messages. + * + * @datatypes + * nss_pvxlan_msg_callback_t \n + * nss_pvxlan_buf_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] data_cb Data callback for the proxy VXLAN tunnel data. + * @param[in] notify_cb Notify callback for the proxy VXLAN tunnel data. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_pvxlan_register(uint32_t if_num, nss_pvxlan_buf_callback_t data_cb, + nss_pvxlan_msg_callback_t notify_cb, struct net_device *netdev, uint32_t features); + +/** + * nss_pvxlan_get_ctx + * Gets the NSS context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_pvxlan_get_ctx(void); + +/** + * nss_pvxlan_ifnum_with_core_id + * Gets the proxy VxLAN interface number with the core ID. + * + * @param[in] if_num NSS interface number. + * + * @return + * Interface number with the core ID. + */ +extern int nss_pvxlan_ifnum_with_core_id(int if_num); + +/** + * nss_pvxlan_init + * Initializes the proxy VXLAN interface. + * + * @return + * None. + */ +extern void nss_pvxlan_init(void); + +/** + * nss_pvxlan_msg_init + * Initializes a proxy VxLAN message. + * + * @datatypes + * nss_pvxlan_msg \n + * nss_pvxlan_msg_callback_t + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_pvxlan_msg_init(struct nss_pvxlan_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, + nss_pvxlan_msg_callback_t cb, void *app_data); + +/** + * @} + */ + +#endif /* __NSS_PVXLAN_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_qrfs.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_qrfs.h new file mode 100644 index 000000000..486baf9d4 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_qrfs.h @@ -0,0 +1,193 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * @file nss_qrfs.h + * NSS QRFS interface definitions. + */ + +#ifndef __NSS_QRFS_H +#define __NSS_QRFS_H + +/** + * @addtogroup nss_qrfs_subsystem + * @{ + */ + +/** + * nss_qrfs_msg_types + * Message types for the NSS QRFS. + */ +enum nss_qrfs_msg_types { + NSS_QRFS_MSG_FLOW_ADD, + NSS_QRFS_MSG_FLOW_DELETE, + NSS_QRFS_MSG_MAC_ADD, + NSS_QRFS_MSG_MAC_DELETE, + NSS_QRFS_MSG_STATS_SYNC, + NSS_QRFS_MSG_MAX, +}; + +/** + * nss_qrfs_error_types + * Error types for the NSS QRFS. + */ +enum nss_qrfs_error_types { + NSS_QRFS_ERROR_INVALID_MSG_TYPE, + NSS_QRFS_ERROR_INVALID_MSG_SIZE, + NSS_QRFS_ERROR_INVALID_IP_VERSION, + NSS_QRFS_ERROR_V4_FLOW_TABLE_FULL, + NSS_QRFS_ERROR_V6_FLOW_TABLE_FULL, + NSS_QRFS_ERROR_MAC_TABLE_FULL, + NSS_QRFS_ERROR_MAX, +}; + +/** + * nss_qrfs_flow_rule_msg + * Information for the NSS QRFS flow rule message. + */ +struct nss_qrfs_flow_rule_msg { + uint16_t src_port; /**< Source port. */ + uint16_t dst_port; /**< Destination port. */ + uint32_t ip_version; /**< IPv4:4 IPv6:6. */ + uint32_t src_addr[4]; /**< Source IP address. */ + uint32_t dst_addr[4]; /**< Destination IP address. */ + uint16_t protocol; /**< IP protocol. */ + uint16_t cpu; /**< CPU core ID. */ + uint32_t if_num; /**< Physical interface number. */ +}; + +/** + * nss_qrfs_mac_rule_msg + * Information for the NSS QRFS MAC rule message. + */ +struct nss_qrfs_mac_rule_msg { + uint8_t mac[ETH_ALEN]; /**< Ethernet address. */ + uint16_t cpu; /**< CPU core ID. */ + uint32_t if_num; /**< Physical interface number. */ +}; + +/** + * nss_qrfs_stats_sync_msg + * Information for the NSS QRFS statistics message. + */ +struct nss_qrfs_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common pnode statistics. */ + uint32_t invalid_offset; /**< Packets with invalid offset. */ + uint32_t unknown_protocol; /**< Protocol other than TCP, UDP. */ + uint32_t ipv4_flow_rule_hits; /**< Number of IPv4 flow rule hits. */ + uint32_t ipv6_flow_rule_hits; /**< Number of IPv6 flow rule hits. */ +}; + +/** + * nss_qrfs_msg + * Data for sending and receiving NSS QRFS rule or statistics messages. + */ +struct nss_qrfs_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a NSS QRFS rule or statistics message. + */ + union { + struct nss_qrfs_flow_rule_msg flow_add; /**< Add flow rule. */ + struct nss_qrfs_flow_rule_msg flow_delete; /**< Delete flow rule. */ + struct nss_qrfs_mac_rule_msg mac_add; /**< Add MAC rule. */ + struct nss_qrfs_mac_rule_msg mac_delete; /**< Delete MAC rule. */ + struct nss_qrfs_stats_sync_msg stats_sync; /**< Synchronize statistics. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving QRFS messages. + * + * @datatypes + * nss_qrfs_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_qrfs_msg_callback_t)(void *app_data, struct nss_qrfs_msg *msg); + +/** + * nss_qrfs_register_handler + * Registers the QRFS interface with the NSS for sending and receiving + * messages. + * + * @datatypes + * nss_ctx_instance + * + * @return + * None. + */ +void nss_qrfs_register_handler(struct nss_ctx_instance *nss_ctx); + +/** + * nss_qrfs_notify_register + * Registers a notifier callback for QRFS messages with the NSS. + * + * @datatypes + * nss_qrfs_msg_callback_t + * + * @param[in] core NSS core number index to the notifier callback table. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_qrfs_notify_register(int core, nss_qrfs_msg_callback_t cb, void *app_data); + +/** + * nss_qrfs_notify_unregister + * Deregisters a QRFS message notifier callback from the NSS. + * + * @param[in] core NSS core number index to the notifier callback table. + * + * @return + * None. + */ +void nss_qrfs_notify_unregister(int core); + +/** + * nss_qrfs_set_flow_rule + * Sends a QRFS message to the NSS core to set the flow rule. + * + * @datatypes + * sk_buff + * + * @param[in] skb Pointer to the SKB buffer. + * @param[in] cpu CPU number to set in the flow table. + * @param[in] action Action to perform on the flow table. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_qrfs_set_flow_rule(struct sk_buff *skb, uint32_t cpu, uint32_t action); + +/** + * nss_qrfs_init + * Initializes the QRFS. + * + * @return + * None. + */ +void nss_qrfs_init(void); + +/** + * @} + */ + +#endif /* __NSS_QRFS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_qvpn.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_qvpn.h new file mode 100644 index 000000000..931da43dc --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_qvpn.h @@ -0,0 +1,522 @@ +/* + ************************************************************************** + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_qvpn.h + * NSS QVPN interface definitions. + */ + +#ifndef _NSS_QVPN_H_ +#define _NSS_QVPN_H_ + +/** + * @addtogroup nss_qvpn_subsystem + * @{ + */ + +#define NSS_QVPN_CMDS_MAX 10 /**< Maximum number of QVPN commands supported. */ +#define NSS_QVPN_VPN_HDR_HEAD_SIZE_MAX 64 /**< Maximum size of QVPN header. */ +#define NSS_QVPN_VPN_HDR_TAIL_SIZE_MAX 32 /**< Maximum size of QVPN tail. */ +#define NSS_QVPN_IV_SIZE_MAX 16 /**< Maximum size of IV supported. */ +#define NSS_QVPN_SESS_ID_SIZE_MAX 8 /**< Maximum size of session ID. */ + +/* + * QVPN L3/L4 header flags. + */ +#define NSS_QVPN_HDR_FLAG_IPV6 0x0001 /**< Outer L3 header is IPv6. */ +#define NSS_QVPN_HDR_FLAG_L4_UDP 0x0002 /**< L4 is UDP. */ + +/** + * nss_qvpn_msg_type + * Message types for QVPN NSS firmware. + */ +enum nss_qvpn_msg_type { + NSS_QVPN_MSG_TYPE_TUNNEL_CONFIGURE, /**< Configure QVPN tunnel instance. */ + NSS_QVPN_MSG_TYPE_TUNNEL_DECONFIGURE, /**< Deconfigure QVPN tunnel instance. */ + NSS_QVPN_MSG_TYPE_CRYPTO_KEY_ADD, /**< Add a new crypto key. */ + NSS_QVPN_MSG_TYPE_CRYPTO_KEY_DEL, /**< Delete crypto key. */ + NSS_QVPN_MSG_TYPE_CRYPTO_KEY_ACTIVATE, /**< Activate crypto key. */ + NSS_QVPN_MSG_TYPE_CRYPTO_KEY_DEACTIVATE,/**< Deactivate crypto key. */ + NSS_QVPN_MSG_TYPE_SYNC_STATS, /**< Statistics synchronization. */ + NSS_QVPN_MSG_TYPE_MAX /**< Maximum QVPN message type. */ +}; + +/** + * nss_qvpn_cmds_type + * Processing commands for QVPN. + */ +enum nss_qvpn_cmds_type { + NSS_QVPN_CMDS_TYPE_NONE, /**< Add VPN header to packet. */ + NSS_QVPN_CMDS_TYPE_ADD_VPN_HDR, /**< Add VPN header to packet. */ + NSS_QVPN_CMDS_TYPE_REMOVE_VPN_HDR, /**< Remove VPN header from packet. */ + NSS_QVPN_CMDS_TYPE_ADD_L3_L4_HDR, /**< Add L3/L4 header to packet. */ + NSS_QVPN_CMDS_TYPE_REMOVE_L3_L4_HDR, /**< Remove L3/L4 header from packet. */ + NSS_QVPN_CMDS_TYPE_ENCRYPT, /**< Send packet for encryption. */ + NSS_QVPN_CMDS_TYPE_DECRYPT, /**< Send packet for decryption. */ + NSS_QVPN_CMDS_TYPE_ANTI_REPLAY, /**< Sequence number processing. */ + NSS_QVPN_CMDS_TYPE_MAX /**< Maximum command supported. */ +}; + +/** + * nss_qvpn_profile + * QVPN profiles supported. + */ +enum nss_qvpn_profile { + NSS_QVPN_PROFILE_NONE, /**< No profile supported. */ + NSS_QVPN_PROFILE_CRYPTO_ENCAP, /**< Encapsulation profile with crypto enabled. */ + NSS_QVPN_PROFILE_CRYPTO_DECAP, /**< Decapsulation profile with crypto enabled. */ + NSS_QVPN_PROFILE_ENCAP, /**< Encapsulation Profile with crypto disabled. */ + NSS_QVPN_PROFILE_DECAP, /**< Decapsulation Profile with crypto disabled. */ + NSS_QVPN_PROFILE_MAX, /**< Maximum profile. */ +}; + +/** + * nss_qvpn_pkt_drop_event + * Packets drop statistics from QVPN node. + */ +enum nss_qvpn_pkt_drop_event { + NSS_QVPN_PKT_DROP_EVENT_NO_TAILROOM, /**< Packet tail room not enough to copy HMAC to tail. */ + NSS_QVPN_PKT_DROP_EVENT_NO_HEADROOM, /**< Packet head room not enough to add QVPN headers. */ + NSS_QVPN_PKT_DROP_EVENT_CBUF_ALLOC_FAIL, /**< Received packet dropped as crypto buffer allocation failed. */ + NSS_QVPN_PKT_DROP_EVENT_PBUF_ALLOC_FAIL, /**< Received packet dropped as associated pbuf allocation failed. */ + NSS_QVPN_PKT_DROP_EVENT_SYNC_ALLOC_FAIL, /**< Pbuf dropped while doing statistics synchronization. */ + NSS_QVPN_PKT_DROP_EVENT_PBUF_UNALIGN, /**< Received packet dropped as unaligned buffer. */ + NSS_QVPN_PKT_DROP_EVENT_CRYPTO_ENQ_FAIL, /**< Received packet dropped as crypto enqueue failed. */ + NSS_QVPN_PKT_DROP_EVENT_LINEAR_COPY_FAIL, /**< Received packet dropped as scatter-gather linear copy failed. */ + NSS_QVPN_PKT_DROP_EVENT_FWD_ENQ_FAIL, /**< Received packet dropped as enqueue to next node failed. */ + NSS_QVPN_PKT_DROP_EVENT_POST_CRYPTO_Q_FULL, /**< Post crypto queue is full dropping pbuf. */ + NSS_QVPN_PKT_DROP_EVENT_NODE_INACTIVE, /**< Node is inactive dropping crypto processed packet. */ + NSS_QVPN_PKT_DROP_EVENT_NON_CRYPTO_PB, /**< Non crypto processed packet enqueued to post crypto queue. */ + NSS_QVPN_PKT_DROP_EVENT_PAD_INVALID, /**< Packet received with invalid padding. */ + NSS_QVPN_PKT_DROP_EVENT_BLK_UNALIGNED, /**< Received pbuf length is not cipher block aligned. */ + NSS_QVPN_PKT_DROP_EVENT_MAX /**< End of packet drop event list. */ +}; + +/** + * nss_qvpn_exception_event + * Exception events from QVPN node. + */ +enum nss_qvpn_exception_event { + NSS_QVPN_EXCEPTION_EVENT_RX_CONTROL_PKT, /**< QVPN control packet received. */ + NSS_QVPN_EXCEPTION_EVENT_RX_TAIL_NOSUPP, /**< Protocol with tail not supported. */ + QVPN_TUN_EXCEPTION_EVENT_RX__HR_INSUFF, /**< Insufficient headroom. */ + NSS_QVPN_EXCEPTION_EVENT_RX_SESS_ID_INVALID, /**< Invalid session ID. */ + NSS_QVPN_EXCEPTION_EVENT_RX_DATA_PKT, /**< Data packets exceptioned to host. */ + NSS_QVPN_EXCEPTION_EVENT_RX_MALFORMED, /**< Malformed packet received. */ + NSS_QVPN_EXCEPTION_EVENT_MAX /**< End of exception event list. */ +}; + +/** + * nss_qvpn_error_type + * Error types for the QVPN interface. + */ +enum nss_qvpn_error_type { + NSS_QVPN_ERROR_TYPE_NONE, /**< No error. */ + NSS_QVPN_ERROR_TYPE_UNKNOWN_MSG, /**< Unknown message. */ + NSS_QVPN_ERROR_TUN_ALREADY_CONFIGURED, /**< Tunnel already configured. */ + NSS_QVPN_ERROR_TYPE_IF_INVALID, /**< Invalid interface. */ + NSS_QVPN_ERROR_TYPE_SIBLING_IF, /**< Invalid sibling interface number. */ + NSS_QVPN_ERROR_TYPE_IV_SIZE_INVALID, /**< Invalid IV size. */ + NSS_QVPN_ERROR_TYPE_HMAC_SIZE_INVALID, /**< Invalid HMAC size. */ + NSS_QVPN_ERROR_TYPE_CRYPTO_BLK_SIZE_INVALID, /**< Invalid crypto block size. */ + NSS_QVPN_ERROR_TYPE_SESSION_IDX_SIZE_INVALID, /**< Invalid session index size. */ + NSS_QVPN_ERROR_TYPE_CMD_NOT_SUPPORTED, /**< Command not supported. */ + NSS_QVPN_ERROR_TYPE_L4_PROTO_INVALID, /**< L4 protocol encapsulation is not supported. */ + NSS_QVPN_ERROR_TYPE_SIBLING_IF_TYPE, /**< Invalid sibling interface type. */ + NSS_QVPN_ERROR_TYPE_CMDS_COUNT_INVALID, /**< Total number of commands is invalid. */ + NSS_QVPN_ERROR_TYPE_ENTRY_NOT_FOUND, /**< Entry not found. */ + NSS_QVPN_ERROR_TYPE_ENTRY_NOT_ACTIVE, /**< Entry not active. */ + NSS_QVPN_ERROR_TYPE_ENTRY_ALREADY_ACTIVE, /**< Entry already active. */ + NSS_QVPN_ERROR_TYPE_CRYPTO_IDX_MISMATCH, /**< Invalid crypto index. */ + NSS_QVPN_ERROR_TYPE_KI_ALLOC_FAIL, /**< Key information allocation failure. */ + NSS_QVPN_ERROR_TYPE_PROFILE_INVALID, /**< Invalid command profile. */ + NSS_QVPN_ERROR_TYPE_RX_TAIL_NOSUPP, /**< VPN with tail not supported. */ + NSS_QVPN_ERROR_TYPE_MAX /**< End of error list. */ +}; + +/** + * nss_qvpn_iv_type + * IV type for generating and copying in packet. + */ +enum nss_qvpn_iv_type { + NSS_QVPN_IV_TYPE_NONE, /**< No IV. */ + NSS_QVPN_IV_TYPE_STATIC, /**< Use static IV configured. */ + NSS_QVPN_IV_TYPE_DYNAMIC_RAND, /**< Generate IV randomly. */ + NSS_QVPN_IV_TYPE_MAX /**< End of IV type list. */ +}; + +/** + * nss_qvpn_pad_type + * Pad type for generating and copying in packet. + */ +enum nss_qvpn_pad_type { + NSS_QVPN_PAD_TYPE_NONE, /**< No padding. */ + NSS_QVPN_PAD_TYPE_PKCS7, /**< Generate pad buffer using PKCS7. */ + NSS_QVPN_PAD_TYPE_INC, /**< Generate pad buffer monotonically increasing sequence. */ + NSS_QVPN_PAD_TYPE_MAX /**< End of pad type. */ +}; + +/** + * nss_qvpn_anti_replay_alg + * Anti-replay algorithms supported. + */ +enum nss_qvpn_anti_replay_alg { + NSS_QVPN_ANTI_REPLAY_ALG_NONE, /**< No anti-replay. */ + NSS_QVPN_ANTI_REPLAY_ALG_REPLAY_WINDOW, /**< Generate pad buffer monotonically increasing sequence. */ + NSS_QVPN_ANTI_REPLAY_ALG_MAX /**< End of anti-replay algorithm. */ +}; + +/** + * nss_qvpn_crypto_mode + * Crypto modes supported. + */ +enum nss_qvpn_crypto_mode { + NSS_QVPN_CRYPTO_MODE_NONE, /**< NULL cipher and NULL authentication. */ + NSS_QVPN_CRYPTO_MODE_ENC, /**< Encryption only. */ + NSS_QVPN_CRYPTO_MODE_DEC, /**< Decryption only. */ + NSS_QVPN_CRYPTO_MODE_AUTH, /**< Authentication only. */ + NSS_QVPN_CRYPTO_MODE_ENC_AUTH, /**< Encryption and then authentication. */ + NSS_QVPN_CRYPTO_MODE_AUTH_DEC, /**< Authentication and then decryption. */ + NSS_QVPN_CRYPTO_MODE_AUTH_ENC, /**< Authentication and then encryption. */ + NSS_QVPN_CRYPTO_MODE_DEC_AUTH, /**< Decryption and then authentication. */ + NSS_QVPN_CRYPTO_MODE_MAX /**< End of crypto mode. */ +}; + +/** + * nss_qvpn_hdr_configure_msg + * QVPN header configuration. + */ +struct nss_qvpn_hdr_configure_msg { + uint32_t src_ip[4]; /**< Source IP address. */ + uint32_t dst_ip[4]; /**< Destination IP address. */ + uint16_t src_port; /**< Source port. */ + uint16_t dst_port; /**< Destination port. */ + uint16_t hdr_flags; /**< Header flags. */ + uint16_t seqnum_size; /**< Size of sequence number. */ + uint16_t seqnum_offset; /**< Sequence number offset. */ + uint16_t anti_replay_alg; /**< Anti-replay algorithm. */ + uint16_t session_id_size; /**< Session ID size. */ + uint16_t session_id_offset; /**< Session ID offset. */ + uint16_t vpn_hdr_head_size; /**< VPN header size. */ + uint16_t vpn_hdr_head_offset; /**< VPN header offset. */ + uint16_t vpn_hdr_tail_size; /**< Size of tail. */ + uint16_t res; /**< Reserved for alignment. */ + uint8_t vpn_hdr_head[NSS_QVPN_VPN_HDR_HEAD_SIZE_MAX]; /**< Content of VPN header. */ + uint8_t vpn_hdr_tail[NSS_QVPN_VPN_HDR_TAIL_SIZE_MAX]; /**< VPN header tail content. */ + uint8_t hop_limit; /**< TTL or hop limit. */ + uint8_t res1[3]; /**< Reserved for alignment. */ +}; + +/** + * nss_qvpn_crypto_configure_msg + * QVPN crypto configuration message. + */ +struct nss_qvpn_crypto_configure_msg { + uint16_t hmac_len; /**< Length of HMAC to copy. */ + uint16_t hmac_offset; /**< Offset to copy HMAC. */ + uint16_t auth_offset; /**< Data offset to start authentication. */ + uint16_t cipher_op_offset; /**< Start of cipher data. */ + uint16_t cipher_blk_size; /**< Cipher block size. */ + uint16_t pad_type; /**< Pad algorithm. */ + uint16_t crypto_mode; /**< Crypto mode. */ + uint16_t iv_len; /**< Length of IV. */ + uint16_t iv_offset; /**< IV offset. */ + uint16_t iv_type; /**< IV generation algorithm. */ + uint8_t iv_val[NSS_QVPN_IV_SIZE_MAX]; /**< IV to be used. */ +}; + +/** + * nss_qvpn_crypto_key_add_msg + * QVPN key add message. + */ +struct nss_qvpn_crypto_key_add_msg { + uint32_t crypto_idx; /**< Crypto session ID. */ + uint8_t session_id[NSS_QVPN_SESS_ID_SIZE_MAX]; /**< Session ID. */ +}; + +/** + * nss_qvpn_crypto_key_del_msg + * Delete/Deactivate key message. + */ +struct nss_qvpn_crypto_key_del_msg { + uint32_t crypto_idx; /**< Crypto index to delete/deactivate. */ +}; + +/** + * nss_qvpn_tunnel_config_msg + * QVPN tunnel configure message. + */ +struct nss_qvpn_tunnel_config_msg { + uint32_t sibling_if; /**< Sibling interface number. */ + uint16_t total_cmds; /**< Total number of commands. */ + uint16_t cmd_profile; /**< Command processing profile. */ + uint16_t cmd[NSS_QVPN_CMDS_MAX]; /**< Commands to execute. */ + struct nss_qvpn_crypto_key_add_msg crypto_key; /**< Initial crypto key. */ + struct nss_qvpn_hdr_configure_msg hdr_cfg; /**< VPN header configuration. */ + struct nss_qvpn_crypto_configure_msg crypto_cfg; /**< Crypto configuration. */ +}; + +/** + * nss_qvpn_crypto_key_activate_msg + * Activate key message. + */ +struct nss_qvpn_crypto_key_activate_msg { + uint32_t crypto_idx; /**< Crypto session ID. */ + uint8_t vpn_hdr_head[NSS_QVPN_VPN_HDR_HEAD_SIZE_MAX]; /**< Content of VPN header. */ +}; + +/** + * nss_qvpn_stats_sync_msg + * Message information for QVPN synchronization statistics. + */ +struct nss_qvpn_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t crypto_resp_error[NSS_CRYPTO_CMN_RESP_ERROR_MAX]; /**< Crypto response errors. */ + uint32_t pkt_drop_event[NSS_QVPN_PKT_DROP_EVENT_MAX]; /**< Packet drop events. */ + uint32_t exception_event[NSS_QVPN_EXCEPTION_EVENT_MAX]; /**< QVPN exception events. */ +}; + +/** + * nss_qvpn_stats_notification + * QVPN transmission statistics structure. + */ +struct nss_qvpn_stats_notification { + uint64_t stats_ctx[NSS_STATS_NODE_MAX]; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_qvpn_msg + * QVPN message structure for configuration and statistics. + */ +struct nss_qvpn_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /**< QVPN configuration messages. */ + union { + struct nss_qvpn_tunnel_config_msg tunnel_config; /**< QVPN tunnel configure message. */ + struct nss_qvpn_crypto_key_add_msg key_add; /**< Crypto key add message. */ + struct nss_qvpn_crypto_key_del_msg key_del; /**< Crypto key delete message. */ + struct nss_qvpn_crypto_key_activate_msg key_activate; /**< Crypto key active message. */ + struct nss_qvpn_stats_sync_msg stats; /**< QVPN statistics synchronization message. */ + } msg; /**< QVPN configuration message. */ +}; + +/** + * nss_qvpn_tx_msg + * Sends an QVPN message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_qvpn_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_qvpn_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_qvpn_msg *msg); + +/** + * nss_qvpn_tx_msg_sync + * Sends an QVPN message to the NSS synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_qvpn_msg \n + * nss_qvpn_msg_type \n + * nss_qvpn_error_type + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] nvm Pointer to the message data. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in,out] resp Response for the configuration. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_qvpn_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_qvpn_msg *nvm, + uint32_t if_num, enum nss_qvpn_msg_type type, uint16_t len, + enum nss_qvpn_error_type *resp); + +/** + * nss_qvpn_tx_buf + * Sends data packet for QVPN encapsulation/decapsulation. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] if_num NSS interface number. + * @param[in] skb Pointer to sk_buff. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_qvpn_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb); + +/** + * nss_qvpn_msg_init + * Initializes an QVPN message. + * + * @datatypes + * nss_qvpn_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +void nss_qvpn_msg_init(struct nss_qvpn_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_qvpn_get_context + * Gets the QVPN context used in nss_qvpn_tx. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_qvpn_get_context(void); + +/** + * Callback when QVPN data is received. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_qvpn_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback to receive QVPN messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_qvpn_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_qvpn_unregister_if + * Deregisters the QVPN interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + */ +void nss_qvpn_unregister_if(uint32_t if_num); + +/** + * nss_qvpn_register_if + * Register to send/receive QVPN messages to NSS. + * + * @datatypes + * nss_qvpn_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] qvpn_data_callback Callback for the data. + * @param[in] qvpn_event_callback Callback for receiving events. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * @param[in] app_ctx Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_qvpn_register_if(uint32_t if_num, nss_qvpn_callback_t qvpn_data_callback, + nss_qvpn_msg_callback_t qvpn_event_callback, struct net_device *netdev, + uint32_t features, void *app_ctx); + +/** + * nss_qvpn_ifnum_with_core_id + * Gets the QVPN interface number with the core ID. + * + * @param[in] if_num NSS interface number. + * + * @return + * Interface number with the core ID. + */ +int nss_qvpn_ifnum_with_core_id(int if_num); + +/** + * nss_qvpn_register_handler + * Registers the QVPN handler with the NSS. + * + * @return + * None. + */ +void nss_qvpn_register_handler(void); + +/** + * nss_qvpn_ifmap_get + * Returns active QVPN interfaces. + * + * @return + * Pointer to interface map. + */ +unsigned long *nss_qvpn_ifmap_get(void); + +/** + * nss_qvpn_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_qvpn_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_qvpn_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_qvpn_stats_register_notifier(struct notifier_block *nb); + +/** + * @} + */ + +#endif /* _NSS_QVPN_H_ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_rmnet_rx.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_rmnet_rx.h new file mode 100644 index 000000000..2b7e7156e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_rmnet_rx.h @@ -0,0 +1,392 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * @file nss_rmnet_rx.h + * NSS RMNET interface message Structure and APIs + */ + +#ifndef __NSS_RMNET_RX_H +#define __NSS_RMNET_RX_H + +#include "nss_if.h" + +/** + * @addtogroup nss_rmnet_subsystem + * @{ + */ + +/** + * Maximum number of DMA channel. + */ +#define NSS_RMNET_RX_CHANNEL_MAX 12 + +/** + * nss_rmnet_rx_dp_type + * Interface datapath types. NSS-to-host path will be seen by ECM for rules. + */ +enum nss_rmnet_rx_dp_type { + NSS_RMNET_RX_DP_N2H, /**< Interface on NSS-to-host path has zero value. */ + NSS_RMNET_RX_DP_H2N, /**< Interface on host-to-NSS path has non-zero value. */ +}; + +/** + * nss_rmnet_rx_msg_types + * Message types for interface requests and responses. + */ +enum nss_rmnet_rx_msg_types { + NSS_RMNET_RX_TX_CONFIG_MSG = NSS_IF_MAX_MSG_TYPES + 1, + /**< Configuration message. */ + NSS_RMNET_RX_STATS_SYNC_MSG, /**< Statistic synchronization message. */ + NSS_RMNET_RX_MAX_MSG_TYPES, /**< Maximum message type. */ +}; + +/** + * nss_rmnet_rx_error_types + * Error types for the RMNET interface. + */ +enum nss_rmnet_rx_error_types { + NSS_RMNET_RX_SUCCESS, /**< No error. */ + NSS_RMNET_RX_ERROR_TYPE_MSG_UNKNOWN, /**< Unknown message type. */ + NSS_RMNET_RX_ERROR_TYPE_ALREADY_CONFIGURED, /**< Tunnel is already configured. */ + NSS_RMNET_RX_ERROR_TYPE_SIBLING_NOTFOUND, /**< Sibling interface is not found. */ + NSS_RMNET_RX_ERROR_TYPE_NEXTHOP_NOTFOUND, /**< Next hop is not found. */ + NSS_RMNET_RX_ERROR_TYPE_SIBLING_MISMATCH, /**< Sibling interface type mismatches. */ + NSS_RMNET_RX_ERROR_TYPE_DMA_CHANNEL_FAIL, /**< DMA Channel allocation failed. */ + NSS_RMNET_RX_ERROR_TYPE_RMNET_INVALID, /**< Interface type is invalid. */ + NSS_RMNET_RX_ERROR_TYPE_SHAPER_INVALID, /**< Shaper is invalid. */ + NSS_RMNET_RX_REG_FAILURE, /**< Registration failed. */ + NSS_RMNET_RX_ALLOC_FAILURE, /**< Memory allocation failed. */ + NSS_RMNET_RX_ERROR_MAX, /**< Maximum error type. */ +}; + +/** + * nss_rmnet_rx_pvt + * Private data information for the interface. + */ +struct nss_rmnet_rx_pvt { + struct semaphore sem; + /**< Semaphore to ensure that only one instance of a message is sent to the NSS. */ + struct completion complete; + /**< Waits for message completion or time out. */ + int response; /**< Message process response from the NSS firmware. */ + int sem_init_done; /**< Semaphore initialization is done. */ +}; + +/** + * nss_rmnet_rx_config_msg + * Message information for configuring the interface. + */ +struct nss_rmnet_rx_config_msg { + uint32_t flags; /**< Interface flags. */ + uint32_t sibling; /**< Sibling interface number. */ + uint32_t nexthop; /**< Next hop interface number. */ + uint32_t no_channel; /**< Number of channels. */ +}; + +/** + * nss_rmnet_rx_stats + * Interface statistics received from the NSS. + */ +struct nss_rmnet_rx_stats { + struct nss_cmn_node_stats node_stats; + /**< Common statistics. */ + uint32_t enqueue_failed; /**< Enqueue to next node is failed. */ + uint32_t no_avail_channel; /**< No available DMA channel. */ + uint32_t num_linear_pbuf; /**< Number of linear pbufs. */ + uint32_t no_pbuf_to_linear; /**< No pbuf to linearize. */ + uint32_t no_enough_room; /**< Not enough headroom to linearize the pbuf. */ + uint32_t using_channel[NSS_RMNET_RX_CHANNEL_MAX]; + /**< How many times a channel is used. */ + uint32_t dma_failed; /**< DMA copy call failed. */ +}; + + +/** + * nss_rmnet_rx_msg + * Data for sending and receiving interface messages. + */ +struct nss_rmnet_rx_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of an RMNET interface message. + */ + union { + struct nss_rmnet_rx_config_msg if_config; + /**< Rule for creating an RMNET interface. */ + struct nss_rmnet_rx_stats stats; + /**< RMNET interface statistics. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback to transmit interface data received from NSS + * to the transmit path of the RMNET interface. + * + * @datatypes + * net_device \n + * sk_buff + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + */ +typedef void (*nss_rmnet_rx_xmit_callback_t)(struct net_device *netdev, struct sk_buff *skb); + +/** + * Callback function for interface data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_rmnet_rx_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for interface messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_rmnet_rx_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_rmnet_rx_handle + * Context information for NSS communication. + */ +struct nss_rmnet_rx_handle { + struct nss_ctx_instance *nss_ctx; /**< NSS context. */ + int32_t if_num_n2h; /**< Redirect interface number on NSS-to-host path. */ + int32_t if_num_h2n; /**< Redirect interface number on host-to-NSS path. */ + struct net_device *ndev; /**< Associated network device. */ + struct nss_rmnet_rx_pvt *pvt; /**< Private data structure. */ + uint64_t *stats_n2h; /**< RMNET interface statistics from NSS-to-host. */ + uint64_t *stats_h2n; /**< RMNET interface statistics from host-to-NSS. */ + atomic_t refcnt; /**< Reference count. */ + nss_rmnet_rx_msg_callback_t cb; /**< Message callback. */ + void *app_data; /**< Application data to be passed to the callback. */ +}; + +/** + * nss_rmnet_rx_destroy_sync + * Destroys the RMNET interface synchronously. + * + * @datatypes + * nss_rmnet_rx_handle + * + * @param[in,out] handle Pointer to the RMNET interface handle (provided during + * dynamic interface allocation). + * + * @return + * Status of the Tx operation. + * + * @dependencies + * The interface must have been previously created. + */ +extern nss_tx_status_t nss_rmnet_rx_destroy_sync(struct nss_rmnet_rx_handle *handle); + +/** + * nss_rmnet_rx_create_sync_nexthop + * Creates an RMNET interface synchronously with specified nexthops. + * + * @datatypes + * net_device + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] nexthop_n2h Nexthop interface number of NSS-to-host dynamic interface. + * @param[in] nexthop_h2n Nexthop interface number of host-to-NSS dynamic interface. + * + * @return + * Pointer to the NSS RMNET interface handle. + */ +extern struct nss_rmnet_rx_handle *nss_rmnet_rx_create_sync_nexthop(struct net_device *netdev, uint32_t nexthop_n2h, uint32_t nexthop_h2n); + +/** + * nss_rmnet_rx_create + * Creates an RMNET interface synchronously with generic nexthops. + * + * @datatypes + * net_device + * + * @param[in] netdev Pointer to the associated network device. + * + * @return + * Pointer to the NSS RMNET interface handle. + */ +extern struct nss_rmnet_rx_handle *nss_rmnet_rx_create(struct net_device *netdev); + +/** + * nss_rmnet_rx_tx_buf + * Forwards RMNET interface packets to the NSS. + * + * @datatypes + * nss_rmnet_rx_handle \n + * sk_buff + * + * @param[in,out] handle Pointer to the RMNET interface handle (provided during + * registration). + * @param[in] skb Pointer to the data socket buffer. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_rmnet_rx_tx_buf(struct nss_rmnet_rx_handle *handle, + struct sk_buff *skb); + +/** + * nss_rmnet_rx_tx_msg + * Sends a message to the RMNET interface. + * + * @datatypes + * nss_ctx_instance \n + * nss_rmnet_rx_msg + * + * @param[in] nss_ctx Pointer to the NSS context (provided during registration). + * @param[in] nvim Pointer to the RMNET interface message. + * + * @return + * Command Tx status. + */ +extern nss_tx_status_t nss_rmnet_rx_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_rmnet_rx_msg *nvim); + +/** + * nss_rmnet_rx_xmit_callback_unregister + * Deregisters the transmit callback from the RMNET interface. + * + * @datatypes + * nss_rmnet_rx_handle + * + * @param[in,out] handle Pointer to the RMNET interface handle. + * + * @return + * None. + */ +extern void nss_rmnet_rx_xmit_callback_unregister(struct nss_rmnet_rx_handle *handle); + +/** + * nss_rmnet_rx_xmit_callback_register + * Registers a transmit callback to an RMNET interface. + * + * @datatypes + * nss_rmnet_rx_handle \n + * nss_rmnet_rx_xmit_callback_t + * + * @param[in,out] handle Pointer to the RMNET interface handle (provided during + * dynamic interface allocation). + * @param[in] cb Callback handler for RMNET data packets. + * + * @return + * None. + */ +extern void nss_rmnet_rx_xmit_callback_register(struct nss_rmnet_rx_handle *handle, + nss_rmnet_rx_xmit_callback_t cb); + +/** + * nss_rmnet_rx_unregister + * Deregisters an RMNET interface from the NSS driver. + * + * @datatypes + * nss_rmnet_rx_handle + * + * @param[in,out] handle Pointer to the RMNET interface handle. + * + * @return + * None. + */ +extern void nss_rmnet_rx_unregister(struct nss_rmnet_rx_handle *handle); + +/** + * nss_rmnet_rx_register + * Registers an RMNET Interface with NSS driver. + * + * @datatypes + * nss_rmnet_rx_handle \n + * nss_rmnet_rx_data_callback_t \n + * net_device + * + * @param[in,out] handle Pointer to the RMNET interface handle (provided during + * dynamic interface allocation). + * @param[in] data_callback Callback handler for RMNET data packets. + * @param[in] netdev Pointer to the associated network device. + * + * @return + * Status of the Tx operation. + */ +extern void nss_rmnet_rx_register(struct nss_rmnet_rx_handle *handle, + nss_rmnet_rx_data_callback_t data_callback, + struct net_device *netdev); + +/** + * nss_rmnet_rx_get_ifnum_with_coreid + * Returns the interface number with the core ID. + * + * @param[in] if_num NSS interface number + * + * @return + * Interface number with the core ID. + */ +extern int32_t nss_rmnet_rx_get_ifnum_with_core_id(int32_t if_num); + +/** + * nss_rmnet_rx_get_ifnum + * Returns the interface number with appending core ID. + * + * @param[in] dev Net device + * + * @return + * Interface number with the core ID. + */ +extern int32_t nss_rmnet_rx_get_ifnum(struct net_device *dev); + +/** + * nss_rmnet_rx_get_interface_num + * Returns the RMNET interface number associated with the handle. + * + * @datatypes + * nss_rmnet_rx_handle + * + * @param[in] handle Pointer to the RMNET interface handle (provided during + dynamic interface allocation). + * + * @return + * RMNET interface number. + */ +extern int32_t nss_rmnet_rx_get_interface_num(struct nss_rmnet_rx_handle *handle); + +/** + * nss_rmnet_rx_get_context + * Gets the RMNET interface context. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_rmnet_rx_get_context(void); + +/** + * @} + */ + +#endif /* __NSS_RMNET_RX_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_rps.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_rps.h new file mode 100644 index 000000000..213604ea3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_rps.h @@ -0,0 +1,55 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_rps.h + * RPS related definitions. + */ + +#ifndef __NSS_RPS_H +#define __NSS_RPS_H + +/** + * @addtogroup nss_rps_subsystem + * @{ + */ + +/** + * nss_rps_register_sysctl + * Registers the RPS sysctl entries to the sysctl tree. + * + * @return + * None. + */ +extern void nss_rps_register_sysctl(void); + +/** + * nss_rps_unregister_sysctl + * Deregisters the RPS sysctl entries from the sysctl tree. + * + * @return + * None. + * + * @dependencies + * The system control must have been previously registered. + */ +extern void nss_rps_unregister_sysctl(void); + +/** + * @} + */ + +#endif /* __NSS_RPS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_shaper.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_shaper.h new file mode 100644 index 000000000..fbb0415ae --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_shaper.h @@ -0,0 +1,901 @@ +/* + ************************************************************************** + * Copyright (c) 2014, 2017-2018 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_shaper.h + * NSS Shaper definitions + */ + +#ifndef __NSS_SHAPER_H +#define __NSS_SHAPER_H + +/** + * @addtogroup nss_shaper_subsystem + * @{ + */ + +/** + * nss_shaper_node_types + * Types of shaper nodes that are exported to the HLOS. + */ +enum nss_shaper_node_types { + NSS_SHAPER_NODE_TYPE_CODEL = 1, + NSS_SHAPER_NODE_TYPE_PRIO = 3, + NSS_SHAPER_NODE_TYPE_FIFO = 4, + NSS_SHAPER_NODE_TYPE_TBL = 5, + NSS_SHAPER_NODE_TYPE_BF = 6, + NSS_SHAPER_NODE_TYPE_BF_GROUP = 7, + NSS_SHAPER_NODE_TYPE_WRR = 9, + NSS_SHAPER_NODE_TYPE_WRR_GROUP = 10, + NSS_SHAPER_NODE_TYPE_HTB = 11, + NSS_SHAPER_NODE_TYPE_HTB_GROUP = 12, + NSS_SHAPER_NODE_TYPE_WRED = 13, + NSS_SHAPER_NODE_TYPE_PPE_SN = 14, + NSS_SHAPER_NODE_TYPE_MAX, +}; + +typedef enum nss_shaper_node_types nss_shaper_node_type_t; + /**< Types of shaper nodes that are exported to the HLOS. */ + +/** + * nss_shaper_config_types + * Types of shaper configuration messages. + */ +enum nss_shaper_config_types { + NSS_SHAPER_CONFIG_TYPE_ALLOC_SHAPER_NODE, + NSS_SHAPER_CONFIG_TYPE_FREE_SHAPER_NODE, + NSS_SHAPER_CONFIG_TYPE_SET_DEFAULT, + NSS_SHAPER_CONFIG_TYPE_SET_ROOT, + NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_BASIC_STATS_GET, + NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_ATTACH, + NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_DETACH, + NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_CHANGE_PARAM, + NSS_SHAPER_CONFIG_TYPE_HYBRID_MODE_ENABLE, + NSS_SHAPER_CONFIG_TYPE_HYBRID_MODE_DISABLE, + NSS_SHAPER_CONFIG_TYPE_SHAPER_NODE_MEM_REQ, +}; + +typedef enum nss_shaper_config_types nss_shaper_config_type_t; + /**< Types of shaper configuration messages. */ + +/** + * nss_shaper_response_types + * Types of shaper configuration responses. + */ +enum nss_shaper_response_types { + NSS_SHAPER_RESPONSE_TYPE_SUCCESS, + NSS_SHAPER_RESPONSE_TYPE_NO_SHAPER_NODE, + NSS_SHAPER_RESPONSE_TYPE_NO_SHAPER_NODES, + NSS_SHAPER_RESPONSE_TYPE_OLD, + NSS_SHAPER_RESPONSE_TYPE_UNRECOGNISED, + NSS_SHAPER_RESPONSE_TYPE_BAD_DEFAULT_CHOICE, + NSS_SHAPER_RESPONSE_TYPE_DUPLICATE_QOS_TAG, + NSS_SHAPER_RESPONSE_TYPE_TBL_CIR_RATE_AND_BURST_REQUIRED, + NSS_SHAPER_RESPONSE_TYPE_TBL_CIR_BURST_LESS_THAN_MTU, + NSS_SHAPER_RESPONSE_TYPE_CODEL_ALL_PARAMS_REQUIRED, + NSS_SHAPER_RESPONSE_TYPE_BF_GROUP_RATE_AND_BURST_REQUIRED, + NSS_SHAPER_RESPONSE_TYPE_BF_GROUP_BURST_LESS_THAN_MTU, + NSS_SHAPER_RESPONSE_TYPE_CHILD_NOT_BF_GROUP, + NSS_SHAPER_RESPONSE_TYPE_WRR_GROUP_INVALID_QUANTUM, + NSS_SHAPER_RESPONSE_TYPE_CHILD_NOT_WRR_GROUP, + NSS_SHAPER_RESPONSE_TYPE_WRR_INVALID_OPERATION_MODE, + NSS_SHAPER_RESPONSE_TYPE_HTB_GROUP_BURST_LESS_THAN_MTU, + NSS_SHAPER_RESPONSE_TYPE_HTB_GROUP_PRIORITY_OUT_OF_RANGE, + NSS_SHAPER_RESPONSE_TYPE_CHILDREN_BELONG_TO_MIXED_TYPES, + NSS_SHAPER_RESPONSE_TYPE_CHILD_ALREADY_PRESENT, + NSS_SHAPER_RESPONSE_TYPE_CHILD_MISMATCH, + NSS_SHAPER_RESPONSE_TYPE_CHILD_UNSUPPORTED, + NSS_SHAPER_RESPONSE_TYPE_CHILD_NOT_FOUND, + NSS_SHAPER_RESPONSE_TYPE_ATTACH_FAIL, + NSS_SHAPER_RESPONSE_TYPE_WRED_WEIGHT_MODE_INVALID, + NSS_SHAPER_RESPONSE_TYPE_PPE_SN_UCAST_BASE_OFFSET_INVALID, + NSS_SHAPER_RESPONSE_TYPE_PPE_SN_MCAST_BASE_OFFSET_INVALID, + NSS_SHAPER_RESPONSE_TYPE_PPE_SN_UCAST_QUEUE_ALLOC_FAILED, + NSS_SHAPER_RESPONSE_TYPE_PPE_SN_MCAST_QUEUE_ALLOC_FAILED, + NSS_SHAPER_RESPONSE_TYPE_PPE_SN_INVALID_LIMIT, + NSS_SHAPER_RESPONSE_TYPE_PPE_SN_UCAST_QUEUE_CHANGED, + NSS_SHAPER_RESPONSE_TYPE_PPE_SN_MCAST_QUEUE_CHANGED, + NSS_SHAPER_RESPONSE_TYPE_CODEL_FQ_MEM_INSUFFICIENT, + NSS_SHAPER_RESPONSE_TYPE_CODEL_FQ_COUNT_CHANGE_NOT_ALLOWED, + NSS_SHAPER_RESPONSE_TYPE_CODEL_FQ_COUNT_INVALID, + NSS_SHAPER_RESPONSE_TYPE_CODEL_MODE_CHANGE_NOT_ALLOWED, +}; + +typedef enum nss_shaper_response_types nss_shaper_response_type_t; + /**< Types of shaper configuration responses. */ + +/** + * nss_shaper_config_alloc_shaper_node + * Message information for allocating a shaper node for a NSS interface. + */ +struct nss_shaper_config_alloc_shaper_node { + nss_shaper_node_type_t node_type; /**< Type of shaper node. */ + uint32_t qos_tag; /**< QoS tag of the node. */ +}; + +/** + * nss_shaper_config_free_shaper_node + * Message information for freeing a shaper node from a NSS interface. + */ +struct nss_shaper_config_free_shaper_node { + uint32_t qos_tag; /**< QoS tag of the node. */ +}; + +/** + * nss_shaper_config_set_root_node + * Message information for setting a shaper node as the root. + */ +struct nss_shaper_config_set_root_node { + uint32_t qos_tag; /**< QoS tag of the node. */ +}; + +/** + * nss_shaper_config_set_default_node + * Message information for setting a shaper node as the default node for enqueueing. + */ +struct nss_shaper_config_set_default_node { + uint32_t qos_tag; /**< QoS tag of the node. */ +}; + +/** + * nss_shaper_config_set_hybrid_mode + * Message information for setting a shaper to operate in hybrid mode. + */ +struct nss_shaper_config_set_hybrid_mode { + uint32_t offset; /**< Queue offset for packets sent to the hardware. */ +}; + +/** + * nss_shaper_config_prio_attach + * Message information for attaching a shaper node to a PRIO shaper node. + */ +struct nss_shaper_config_prio_attach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ + uint32_t priority; /**< Priority of the child shaper node. */ +}; + +/** + * nss_shaper_config_prio_detach + * Message information for detaching a shaper node from a PRIO shaper node. + */ +struct nss_shaper_config_prio_detach { + uint32_t priority; /**< Priority of the child shaper node. */ +}; + +/** + * nss_shaper_config_codel_alg_param + * Message information for configuring a CoDel algorithm. + */ +struct nss_shaper_config_codel_alg_param { + uint16_t interval; /**< Buffer time to smooth a state transition. */ + uint16_t target; /**< Acceptable delay associated with a queue. */ + uint16_t mtu; /**< MTU for the associated interface. */ + uint16_t reserved; /**< Alignment padding. */ +}; + +/** + * nss_shaper_config_codel_param + * Message information for configuring a CoDel shaper node. + */ +struct nss_shaper_config_codel_param { + int32_t qlen_max; /**< Maximum number of packets that can be enqueued. */ + struct nss_shaper_config_codel_alg_param cap; + /**< Configuration for the CoDel algorithm. */ + uint32_t flows; /**< Number of flow hash buckets. */ + uint32_t flows_mem; /**< Host allocated memory for flow queues. */ + uint32_t flows_mem_sz; /**< Memory size allocated for flow queues. */ + uint32_t quantum; /**< Quantum (in bytes) to round-robin the flow buckets. */ + uint32_t ecn; /**< 0 - ECN disabled, 1 - ECN enabled. */ +}; + +/** + * nss_shaper_config_codel_mem_req + * Message to get CoDel memory requirement per flow queue (needed for fq_codel). + */ +struct nss_shaper_config_codel_mem_req { + uint32_t mem_req; /**< Memory needed per flow queue (in bytes). */ +}; + +/** + * nss_shaper_config_rate_param + * Message information for configuring the rate limiter algorithm. + */ +struct nss_shaper_config_rate_param { + uint32_t rate; + /**< Allowed traffic rate measured in bytes per second. */ + uint32_t burst; + /**< Maximum bytes that can be sent in a burst. */ + uint32_t max_size; + /**< Maximum size of the supported packets (in bytes). */ + + /** + * Specifies whether the rate limiter will be bypassed (short circuited). + */ + bool short_circuit; +}; + +/** + * nss_shaper_configure_tbl_attach + * Message information for attaching a shaper node to a TBL shaper node. + */ +struct nss_shaper_config_tbl_attach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_configure_tbl_param + * Message information for detaching a shaper node from a TBL shaper node. + */ +struct nss_shaper_config_tbl_param { + struct nss_shaper_config_rate_param lap_cir; + /**< Configuration parameters for the committed information rate. */ + struct nss_shaper_config_rate_param lap_pir; + /**< Configuration parameters for the peak information rate. */ +}; + +/** + * nss_shaper_config_bf_attach + * Message information for attaching a shaper node to a BF shaper node. + */ +struct nss_shaper_config_bf_attach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_config_bf_detach + * Message information for detaching a shaper node from a BF shaper node. + */ +struct nss_shaper_config_bf_detach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_config_bf_group_attach + * Message information for attaching a shaper node to a BF group shaper node. + */ +struct nss_shaper_config_bf_group_attach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_config_bf_group_param + * Configuration parameters for a BF group shaper node. + */ +struct nss_shaper_config_bf_group_param { + uint32_t quantum; + /**< Smallest increment value for the DRRs. */ + struct nss_shaper_config_rate_param lap; + /**< Configuration of the rate control algorithm. */ +}; + +/** + * nss_shaper_config_fifo_limit_set + * Drop modes for the FIFO shaper in the NSS interface. + */ +enum nss_shaper_config_fifo_drop_modes { + NSS_SHAPER_FIFO_DROP_MODE_HEAD = 0, + NSS_SHAPER_FIFO_DROP_MODE_TAIL, + NSS_SHAPER_FIFO_DROP_MODES, +}; + +typedef enum nss_shaper_config_fifo_drop_modes nss_shaper_config_fifo_drop_mode_t; + /**< Drop modes for the FIFO shaper in the NSS interface. */ + +/** + * nss_shaper_config_fifo_param + * Message information for configuring a FIFO shaper node. + */ +struct nss_shaper_config_fifo_param { + uint32_t limit; /**< Queue limit in packets. */ + nss_shaper_config_fifo_drop_mode_t drop_mode; + /**< FIFO drop mode when a queue is full. */ +}; + +/** + * nss_shaper_config_wred_weight_modes + * Supported weight modes. + */ +enum nss_shaper_config_wred_weight_modes { + NSS_SHAPER_WRED_WEIGHT_MODE_DSCP = 0, + NSS_SHAPER_WRED_WEIGHT_MODES, +}; + +typedef enum nss_shaper_config_wred_weight_modes nss_shaper_config_wred_weight_mode_t; + /**< Supported weight modes. */ + +/** + * nss_shaper_red_alg_param + * Message information for configuring the RED algorithm. + */ +struct nss_shaper_red_alg_param { + uint32_t min; /**< Minimum size of the queue. */ + uint32_t max; /**< Maximum size of the queue. */ + + /** + * Probability of dropped packets when the average queue size (qlen_avg) = max. + */ + uint32_t probability; + + /** + * Exponential weight factor to calculate the average queue size. + */ + uint32_t exp_weight_factor; +}; + +/** + * nss_shaper_config_wred_param + * Message information for configuring the WRED algorithm. + */ +struct nss_shaper_config_wred_param { + uint32_t limit; /**< Queue limit in bytes. */ + nss_shaper_config_wred_weight_mode_t weight_mode; + /**< WRED weight mode. */ + uint32_t traffic_classes; /**< Number of traffic classes (drop probability). */ + uint32_t def_traffic_class; /**< Default traffic class used when there is no match. */ + uint32_t traffic_id; /**< Traffic class to configure. */ + uint32_t weight_mode_value; /**< Value to match the selected header field against. */ + struct nss_shaper_red_alg_param rap; + /**< Configuration parameters for the RED algorithm. */ + uint8_t ecn; /**< Mark an ECN bit or drop packet. */ +}; + +/** + * nss_shaper_config_wrr_attach + * Message information for attaching a shaper node to a WRR shaper node. + */ +struct nss_shaper_config_wrr_attach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_config_wrr_detach + * Message information for detaching a child node from a WRR shaper node. + */ +struct nss_shaper_config_wrr_detach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_config_wrr_group_attach + * Message information for attaching a shaper node to a WRR group. + */ +struct nss_shaper_config_wrr_group_attach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_wrr_operation_modes + * Modes of WRR operation. + */ +enum nss_shaper_wrr_operation_modes { + NSS_SHAPER_WRR_MODE_ROUND_ROBIN = 0, + NSS_SHAPER_WRR_MODE_FAIR_QUEUEING = 1, + NSS_SHAPER_WRR_MODE_TYPE_MAX, +}; + +/** + * nss_shaper_config_wrr_param + * Message information for configuring the operation mode of a WRR shaper node. + */ +struct nss_shaper_config_wrr_param { + uint32_t operation_mode; /**< Mode in which to operate. */ +}; + +/** + * nss_shaper_config_wrr_group_param + * Message information for configuring a quantum value of a WRR group shaper node. + */ +struct nss_shaper_config_wrr_group_param { + uint32_t quantum; /**< Smallest increment value for the DRRs. */ +}; + +/** + * nss_shaper_config_htb_attach + * Message information for attaching a shaper node to an HTB shaper node. + */ +struct nss_shaper_config_htb_attach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_config_htb_group_attach + * Message information for attaching a shaper node to an HTB group. + */ +struct nss_shaper_config_htb_group_attach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_config_htb_group_detach + * Message information for detaching a shaper node from an HTB group. + */ +struct nss_shaper_config_htb_group_detach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_config_htb_group_param + * Message information for configuring an HTB group shaper node. + */ +struct nss_shaper_config_htb_group_param { + uint32_t quantum; /**< Smallest increment value for the DRRs. */ + uint32_t priority; /**< Value of the priority for this group. */ + uint32_t overhead; /**< Overhead in bytes to be added per packet. */ + struct nss_shaper_config_rate_param rate_police; + /**< Configuration parameters for the policing rate. */ + struct nss_shaper_config_rate_param rate_ceil; + /**< Configuration parameters for the ceiling rate. */ +}; + +/** + * nss_shaper_config_ppe_sn_attach + * Message information for attaching a shaper node to a PPE shaper node. + */ +struct nss_shaper_config_ppe_sn_attach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_config_ppe_sn_detach + * Message information for detaching a shaper node from a PPE shaper node. + */ +struct nss_shaper_config_ppe_sn_detach { + uint32_t child_qos_tag; /**< QoS tag of the child shaper node. */ +}; + +/** + * nss_shaper_config_ppe_sn_type + * Types of PPE shaper nodes. + */ +enum nss_shaper_config_ppe_sn_type { + /* + * Scheduler types. + */ + NSS_SHAPER_CONFIG_PPE_SN_TYPE_HTB, + NSS_SHAPER_CONFIG_PPE_SN_TYPE_HTB_GROUP, + NSS_SHAPER_CONFIG_PPE_SN_TYPE_TBL, + NSS_SHAPER_CONFIG_PPE_SN_TYPE_WRR, + NSS_SHAPER_CONFIG_PPE_SN_TYPE_WRR_GROUP, + NSS_SHAPER_CONFIG_PPE_SN_TYPE_PRIO, + NSS_SHAPER_CONFIG_PPE_SN_SCH_MAX = 0xFF, + + /* + * Queue types. + */ + NSS_SHAPER_CONFIG_PPE_SN_TYPE_FIFO, + NSS_SHAPER_CONFIG_PPE_SN_TYPE_RED, + NSS_SHAPER_CONFIG_PPE_SN_TYPE_MAX, +}; + +/** + * nss_shaper_config_ppe_sn_param + * Message information for configuring a PPE shaper node. + */ +struct nss_shaper_config_ppe_sn_param { + enum nss_shaper_config_ppe_sn_type type; + /**< Type of PPE shaper node. */ + uint16_t ucast_base; /**< Resource ID of the base hardware for unicast queue. */ + uint16_t ucast_offset; /**< Offset from the base resource ID for unicast queue. */ + uint16_t mcast_base; /**< Resource ID of the base hardware for multicast queue. */ + uint16_t mcast_offset; /**< Offset from the base resource ID for multicast queue. */ + uint8_t port; /**< PPE port on which this shaper node is configured. */ + uint8_t reserved; /**< Reserved for padding. */ + uint16_t limit; /**< Limit of the queue. */ +}; + +/* + * nss_shaper_node_config + * Configuration messages for all types of shaper nodes. + */ +struct nss_shaper_node_config { + uint32_t qos_tag; /**< ID of the shaper node to be configured. */ + + /** + * Configuration messages for all types of shaper nodes. + */ + union { + struct nss_shaper_config_prio_attach prio_attach; + /**< Attach a shaper node to a PRIO shaper node. */ + struct nss_shaper_config_prio_detach prio_detach; + /**< Detach a shaper node from a PRIO shaper node. */ + + struct nss_shaper_config_codel_param codel_param; + /**< Configure a CoDel shaper node. */ + + struct nss_shaper_config_codel_mem_req codel_mem_req; + /**< Get CoDel memory requirement. */ + + struct nss_shaper_config_tbl_attach tbl_attach; + /**< Attach a shaper node to a TBL shaper node. */ + struct nss_shaper_config_tbl_param tbl_param; + /**< Configuration parameters for a TBL shaper node. */ + + struct nss_shaper_config_bf_attach bf_attach; + /**< Attach a shaper node to a BF shaper node. */ + struct nss_shaper_config_bf_detach bf_detach; + /**< Detach a child shaper node from BF shaper node. */ + struct nss_shaper_config_bf_group_attach bf_group_attach; + /**< Attach a shaper node to a BF group shaper node. */ + struct nss_shaper_config_bf_group_param bf_group_param; + /**< Configure parameters for a BF group shaper node. */ + + struct nss_shaper_config_fifo_param fifo_param; + /**< Configure a FIFO shaper node. */ + + struct nss_shaper_config_wrr_attach wrr_attach; + /**< Attach a shaper node to a WRR shaper node. */ + struct nss_shaper_config_wrr_detach wrr_detach; + /**< Detach a shaper node from a WRR shaper node. */ + struct nss_shaper_config_wrr_param wrr_param; + /**< Configuration parameters for a WRR shaper node . */ + struct nss_shaper_config_wrr_group_attach wrr_group_attach; + /**< Attach a shaper node to a WRR group shaper node. */ + struct nss_shaper_config_wrr_group_param wrr_group_param; + /**< Configure a WRR group shaper node with a quantum value. */ + struct nss_shaper_config_htb_attach htb_attach; + /**< Attach a shaper node to an HTB shaper node. */ + struct nss_shaper_config_htb_group_attach htb_group_attach; + /**< Attach a shaper node to an HTB group shaper node. */ + struct nss_shaper_config_htb_group_detach htb_group_detach; + /**< Detach a shaper node from an HTB group shaper node. */ + struct nss_shaper_config_htb_group_param htb_group_param; + /**< Configuration parameters for an HTB group shaper node. */ + struct nss_shaper_config_wred_param wred_param; + /**< Configuration parameters for a WRED shaper node. */ + struct nss_shaper_config_ppe_sn_attach ppe_sn_attach; + /**< Attach a shaper node to a PPE shaper node. */ + struct nss_shaper_config_ppe_sn_detach ppe_sn_detach; + /**< Detach a shaper node from a PPE shaper node. */ + struct nss_shaper_config_ppe_sn_param ppe_sn_param; + /**< Configuration parameters for a PPE shaper node. */ + } snc; /**< Types of shaper node configuration messages. */ +}; + +/** + * nss_shaper_node_codel_fq_stats_delta + * CoDel flow queue mode statistics sent as deltas. + */ +struct nss_shaper_node_codel_fq_stats_delta { + uint32_t new_flow_cnt; /**< Total number of new flows seen. */ + uint32_t ecn_mark_cnt; /**< Number of packets marked with ECN. */ +}; + +/** + * nss_shaper_node_codel_fq_stats + * CoDel flow queue mode statistics. + */ +struct nss_shaper_node_codel_fq_stats { + struct nss_shaper_node_codel_fq_stats_delta delta; + /**< CoDel flow queue statistics sent as deltas. */ + uint32_t new_flows_len; /**< Current number of new flows. */ + uint32_t old_flows_len; /**< Current number of old flows. */ + uint32_t maxpacket; /**< Largest packet seen so far. */ +}; + +/** + * nss_shaper_node_codel_sq_stats + * CoDel single queue mode statistics. + */ +struct nss_shaper_node_codel_sq_stats { + /** + * Maximum amount of time (in milliseconds) that a packet was in this shaper + * node before being dequeued. + */ + uint32_t packet_latency_peak_msec_dequeued; + + /** + * Maximum amount of time (in milliseconds) that a packet was in this shaper + * node before being dropped. + */ + uint32_t packet_latency_peak_msec_dropped; +}; + +/** + * nss_shaper_node_codel_stats + * CoDel shaper node statistics. + */ +struct nss_shaper_node_codel_stats { + struct nss_shaper_node_codel_sq_stats sq; /**< Single queue mode statistics. */ + struct nss_shaper_node_codel_fq_stats fq; /**< Flow queue mode statistics. */ +}; + +/** + * nss_shaper_node_stats_delta + * Statistics that are sent as deltas. + */ +struct nss_shaper_node_stats_delta { + uint32_t enqueued_bytes; /**< Bytes enqueued successfully. */ + uint32_t enqueued_packets; /**< Packets enqueued successfully. */ + + /** + * Bytes dropped during an enqueue operation because of node limits. + */ + uint32_t enqueued_bytes_dropped; + + /** + * Packets dropped during an enqueue operation because of node limits. + */ + uint32_t enqueued_packets_dropped; + + uint32_t dequeued_bytes; + /**< Bytes dequeued successfully from a shaper node. */ + uint32_t dequeued_packets; + /**< Packets dequeued successfully from a shaper node. */ + + /** + * Bytes dropped by this node during dequeuing (some nodes drop packets during + * dequeuing rather than enqueuing). + */ + uint32_t dequeued_bytes_dropped; + + /** + * Packets dropped by this node during dequeuing (some nodes drop packets during + * dequeuing rather than enqueuing). + */ + uint32_t dequeued_packets_dropped; + + /** + * Number of times any queue limit was overrun, leading to packet drops. + */ + uint32_t queue_overrun; + + uint32_t unused[4]; /**< Reserved for future statistics expansion. */ +}; + +/** + * nss_shaper_node_stats + * Common shaper node statistics. + */ +struct nss_shaper_node_stats { + uint32_t qlen_bytes; /**< Total size of packets waiting in the queue. */ + uint32_t qlen_packets; /**< Number of packets waiting in the queue. */ + uint32_t unused[4]; /**< Reserved for future statistics expansion. */ + struct nss_shaper_node_stats_delta delta; + /**< Statistics that are sent as deltas. */ +}; + +/** + * nss_shaper_node_stats_response + * Statistics response for shaper nodes. + */ +struct nss_shaper_node_stats_response { + struct nss_shaper_node_stats sn_stats; /**< Common shaper node statistics. */ + + /** + * All shaper nodes that need to maintain unique statistics need + * to add their statistics structure here. + */ + union { + struct nss_shaper_node_codel_stats codel; + /**< CoDel specific statistics. */ + } per_sn_stats; /**< Shaper specific statistics. */ +}; + +/** + * nss_shaper_node_stats_get + * Statistics of a shaper node. + */ +struct nss_shaper_node_stats_get { + + /* + * Request + */ + uint32_t qos_tag; /**< QoS tag of the shaper node. */ + + /* + * Response + */ + struct nss_shaper_node_stats_response response; + /**< Shaper node statistics response */ +}; + +/** + * nss_shaper_configure + * Configuration message for a shaper node. + */ +struct nss_shaper_configure { + nss_shaper_config_type_t request_type; /**< Message is a request. */ + nss_shaper_response_type_t response_type; /**< Message is a response. */ + + /** + * Types of configuration messages for a shaper node. + */ + union { + struct nss_shaper_config_alloc_shaper_node alloc_shaper_node; + /**< Allocate a shaper node in the NSS interface. */ + struct nss_shaper_config_free_shaper_node free_shaper_node; + /**< Free a shaper node from the NSS interface. */ + struct nss_shaper_config_set_default_node set_default_node; + /**< Set a shaper node as the default node for a queue. */ + struct nss_shaper_config_set_root_node set_root_node; + /**< Set a shaper node as the root shaper nod. */ + struct nss_shaper_config_set_hybrid_mode set_hybrid_mode; + /**< Set a shaper to operate in Hybrid mode. */ + struct nss_shaper_node_config shaper_node_config; + /**< Configuration message for any type of shaper node. */ + struct nss_shaper_node_stats_get shaper_node_stats_get; + /**< Statistics for a shaper node. */ + } msg; /**< Types of configuration messages. */ +}; + +/** + * Registrant callback to receive shaper bounced packets + * + * @datatypes + * sk_buff + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] skb Pointer to the data socket buffer. + */ +typedef void (*nss_shaper_bounced_callback_t)(void *app_data, struct sk_buff *skb); + +/** + * nss_shaper_register_shaping + * Registers a shaper node with the NSS interface for basic shaping operations. + * + * @return + * Pointer to the NSS core context. + */ +extern void *nss_shaper_register_shaping(void); + +/** + * nss_shaper_unregister_shaping + * Deregisters a shaper node from the NSS interface. + * + * @param[in] ctx Pointer to the NSS context. + * + * @dependencies + * The shaper node must have been previously registered. + */ +extern void nss_shaper_unregister_shaping(void *ctx); + +/** + * nss_shaper_register_shaper_bounce_interface + * Registers a shaper bounce interface with the NSS interface for receiving + * shaper-bounced packets. + * + * @datatypes + * nss_shaper_bounced_callback_t \n + * module + * + * @param[in] if_num NSS interface number. + * @param[in] cb Callback function for the message. This callback is + * invoked when the NSS returns a sk_buff after shaping. + * @param[in] app_data Pointer to the application context of the message. + * This context is passed to the callback together with the + * sk_buff to provide context to the registrant (state). + * @param[in] owner Pointer to the kernel module. The module is held until it deregisters. + * + * @return + * Pointer to the NSS core context. + */ +extern void *nss_shaper_register_shaper_bounce_interface(uint32_t if_num, nss_shaper_bounced_callback_t cb, void *app_data, struct module *owner); + +/** + * nss_shaper_unregister_shaper_bounce_interface + * Deregisters a shaper bounce interface from the NSS interface. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The shaper bounce interface must have been previously registered. + */ +extern void nss_shaper_unregister_shaper_bounce_interface(uint32_t if_num); + +/** + * nss_shaper_register_shaper_bounce_bridge + * Registers a shaper bounce bridge with the NSS interface for receiving + * shaper-bounced packets. + * + * @datatypes + * nss_shaper_bounced_callback_t \n + * module + * + * @param[in] if_num NSS interface number. + * @param[in] cb Callback function for the message. This callback is + * invoked when the NSS returns a sk_buff after shaping. + * @param[in] app_data Pointer to the application context of the message. + * This context is passed to the callback together with the + * sk_buff to provide context to the registrant (state). + * @param[in] owner Pointer to the kernel module. + * + * @return + * Pointer to the NSS core context. + */ +extern void *nss_shaper_register_shaper_bounce_bridge(uint32_t if_num, nss_shaper_bounced_callback_t cb, void *app_data, struct module *owner); + +/** + * nss_shaper_unregister_shaper_bounce_bridge + * Deregisters a shaper bounce bridge from the NSS interface. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The shaper bounce bridge must have been previously registered. + */ +extern void nss_shaper_unregister_shaper_bounce_bridge(uint32_t if_num); + +/** + * nss_shaper_bounce_interface_packet + * Issues a packet for shaping via a bounce operation. + * + * @datatypes + * sk_buff + * + * @param[in] ctx Pointer to the NSS context provided during registration. + * @param[in] if_num NSS interface number. + * @param[in] skb Pointer to the data socket buffer. + * + * @return + * Success or failure. + */ +extern nss_tx_status_t nss_shaper_bounce_interface_packet(void *ctx, uint32_t if_num, struct sk_buff *skb); + +/** + * nss_shaper_bounce_bridge_packet + * Issues a packet for shaping via a bounce bridge. + * + * @datatypes + * sk_buff + * + * @param[in] ctx Pointer to the NSS context provided during registration. + * @param[in] if_num NSS interface number. + * @param[in] skb Pointer to the data socket buffer. + * + * @return + * Success or failure. + */ +extern nss_tx_status_t nss_shaper_bounce_bridge_packet(void *ctx, uint32_t if_num, struct sk_buff *skb); + +/** + * nss_shaper_config_send + * Sends a shaping configuration message. + * + * @datatypes + * nss_shaper_configure + * + * @param[in] ctx Pointer to the NSS context. + * @param[in] config Pointer to the configuration message. + * + * @return + * Indication if the configuration message was issued. + * @par + * This indication does not mean the configuration message was successfully + * processed. Success or failure is provided in the response issued to the + * given callback function as specified in nss_shaper_configure. + */ +nss_tx_status_t nss_shaper_config_send(void *ctx, struct nss_shaper_configure *config); + +/** + * nss_shaper_get_device + * Gets the original device from probe. + * + * @return + * Pointer to the device. + */ +extern struct device *nss_shaper_get_dev(void); + +/** + * @} + */ + +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_sjack.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_sjack.h new file mode 100644 index 000000000..32ef356b0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_sjack.h @@ -0,0 +1,154 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_sjack.h + * NSS SJACK interface definitions. + */ + +#ifndef __NSS_SJACK_H +#define __NSS_SJACK_H + +/** + * @addtogroup nss_sjack_subsystem + * @{ + */ + +/** + * nss_sjack_msg_types + * Message types for SJACK requests and responses. + */ +enum nss_sjack_msg_types { + NSS_SJACK_CONFIGURE_MSG, + NSS_SJACK_UNCONFIGURE_MSG, + NSS_SJACK_STATS_SYNC_MSG, + NSS_SJACK_MAX_MSG_TYPE +}; + +/** + * nss_sjack_configure_msg + * Message information for configuring the SJACK interface. + */ +struct nss_sjack_configure_msg { + uint32_t ingress_if_num; + /**< Ingress interface number corresponding to the SJACK device. */ + uint32_t egress_if_num; + /**< Egress interface number corresponding to the SJACK device. */ + uint16_t tunnel_id; /**< SJACK tunnel ID. */ + uint8_t ip_dscp; /**< Differentiated services code point value. */ + uint8_t gre_prio; /**< GRE priority information. */ + uint8_t gre_flags; /**< GRE flags. */ + uint8_t use_ipsec_sa_pattern; /**< IPsec security association pattern flag. */ +}; + +/** + * nss_sjack_unconfigure_msg + * Message information for de-configuring the SJACK interface. + */ +struct nss_sjack_unconfigure_msg { + uint32_t ingress_if_num; + /**< Ingress interface number corresponding to the SJACK device. */ +}; + +/** + * nss_sjack_stats_sync_msg + * Message information for SJACK synchronization statistics. + */ +struct nss_sjack_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ +}; + +/** + * nss_sjack_msg + * Data for sending and receiving SJACK messages. + */ +struct nss_sjack_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of an SJACK message. + */ + union { + struct nss_sjack_configure_msg configure; + /**< Configure SJACK. */ + struct nss_sjack_unconfigure_msg unconfigure; + /**< De-configure SJACK. */ + struct nss_sjack_stats_sync_msg stats_sync; + /**< Synchronized statistics for SJACK. */ + } msg; /**< Message payload for SJACK interface messages exchanged with NSS core. */ +}; + +/** + * Callback function for receiving SJACK messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_sjack_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_sjack_register_if + * Registers with the NSS for sending and receiving SJACK messages. + * + * @datatypes + * net_device \n + * nss_sjack_msg_callback_t + * + * @param[in] dev Pointer to the associated network device. + * @param[in] if_num NSS interface number. + * @param[in] event_callback Callback for the message. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_sjack_register_if(uint32_t if_num, struct net_device *dev, nss_sjack_msg_callback_t event_callback); + +/** + * nss_sjack_unregister_if + * Deregisters the SJACK interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The interface must have been previously registered. + */ +extern void nss_sjack_unregister_if(uint32_t if_num); + +/** + * nss_sjack_tx_msg + * Send SJACK messages to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_sjack_msg + * + * @param[in,out] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_sjack_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_sjack_msg *msg); + +/** @} */ /* end_addtogroup nss_sjack_subsystem */ + +#endif /* __NSS_SJACK_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_stats_public.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_stats_public.h new file mode 100644 index 000000000..f282ffdf3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_stats_public.h @@ -0,0 +1,131 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_stats_public.h + * NSS statistics Structure and APIs + */ + +#ifndef __NSS_STATS_PUBLIC_H +#define __NSS_STATS_PUBLIC_H + +/** + * @addtogroup nss_stats_public_subsystem + * @{ + */ + +/** + * Maximum string length. + * + * This should be equal to maximum string size of any statistics + * inclusive of statistics value. + */ +#define NSS_STATS_MAX_STR_LENGTH 96 + +/** + * nss_stats_node + * Node statistics. + */ +enum nss_stats_node { + NSS_STATS_NODE_RX_PKTS, /**< Accelerated node Rx packets. */ + NSS_STATS_NODE_RX_BYTES, /**< Accelerated node Rx bytes. */ + NSS_STATS_NODE_TX_PKTS, /**< Accelerated node Tx packets. */ + NSS_STATS_NODE_TX_BYTES, /**< Accelerated node Tx bytes. */ + NSS_STATS_NODE_RX_QUEUE_0_DROPPED, /**< Accelerated node Rx Queue 0 dropped. */ + NSS_STATS_NODE_RX_QUEUE_1_DROPPED, /**< Accelerated node Rx Queue 1 dropped. */ + NSS_STATS_NODE_RX_QUEUE_2_DROPPED, /**< Accelerated node Rx Queue 2 dropped. */ + NSS_STATS_NODE_RX_QUEUE_3_DROPPED, /**< Accelerated node Rx Queue 3 dropped. */ + NSS_STATS_NODE_MAX, /**< Maximum message type. */ +}; + +/* + * WARNING: There is a 1:1 mapping between values of enum nss_stats_drv and corresponding + * statistics string array in nss_drv_strings.c. + */ +/** + * nss_stats_drv + * HLOS driver statistics. + */ +enum nss_stats_drv { + NSS_STATS_DRV_NBUF_ALLOC_FAILS = 0, /**< Networking buffer allocation errors. */ + NSS_STATS_DRV_PAGED_BUF_ALLOC_FAILS, /**< Paged buffer allocation errors. */ + NSS_STATS_DRV_TX_QUEUE_FULL_0, /**< Tx queue full for Core 0. */ + NSS_STATS_DRV_TX_QUEUE_FULL_1, /**< Tx queue full for Core 1. */ + NSS_STATS_DRV_TX_EMPTY, /**< Host-to-network empty buffers. */ + NSS_STATS_DRV_PAGED_TX_EMPTY, /**< Host-to-network paged empty buffers. */ + NSS_STATS_DRV_TX_PACKET, /**< Host-to-network data packets. */ + NSS_STATS_DRV_TX_CMD_REQ, /**< Host-to-network control packets. */ + NSS_STATS_DRV_TX_CRYPTO_REQ, /**< Host-to-network crypto requests. */ + NSS_STATS_DRV_TX_BUFFER_REUSE, /**< Host-to-network reuse buffer count. */ + NSS_STATS_DRV_RX_EMPTY, /**< Network-to-host empty buffers. */ + NSS_STATS_DRV_RX_PACKET, /**< Network-to-host data packets. */ + NSS_STATS_DRV_RX_CMD_RESP, /**< Network-to-host command responses. */ + NSS_STATS_DRV_RX_STATUS, /**< Network-to-host status packets. */ + NSS_STATS_DRV_RX_CRYPTO_RESP, /**< Network-to-host crypto responses. */ + NSS_STATS_DRV_RX_VIRTUAL, /**< Network-to-host virtual packets. */ + NSS_STATS_DRV_TX_SIMPLE, /**< Host-to-network simple SKB packets. */ + NSS_STATS_DRV_TX_NR_FRAGS, /**< Host-to-network number of fragmented SKB packets. */ + NSS_STATS_DRV_TX_FRAGLIST, /**< Host-to-network fragmentation list of SKB packets. */ + NSS_STATS_DRV_RX_SIMPLE, /**< Network-to-host simple SKB packets. */ + NSS_STATS_DRV_RX_NR_FRAGS, /**< Network-to-host number of fragmented SKB packets. */ + NSS_STATS_DRV_RX_SKB_FRAGLIST, /**< Network-to-host fragmentation list of SKB packets. */ + NSS_STATS_DRV_RX_BAD_DESCRIPTOR, /**< Network-to-host bad descriptor reads. */ + NSS_STATS_DRV_NSS_SKB_COUNT, /**< NSS SKB pool count. */ + NSS_STATS_DRV_CHAIN_SEG_PROCESSED, /**< Network-to-host SKB chain processed count. */ + NSS_STATS_DRV_FRAG_SEG_PROCESSED, /**< Network-to-host fragments processed count. */ + NSS_STATS_DRV_TX_CMD_QUEUE_FULL, /**< Tx host-to-network control packets fail due to queue full. */ +#ifdef NSS_MULTI_H2N_DATA_RING_SUPPORT + NSS_STATS_DRV_TX_PACKET_QUEUE_0, /**< Host-to-network data packets on queue0. */ + NSS_STATS_DRV_TX_PACKET_QUEUE_1, /**< Host-to-network data packets on queue1. */ + NSS_STATS_DRV_TX_PACKET_QUEUE_2, /**< Host-to-network data packets on queue2. */ + NSS_STATS_DRV_TX_PACKET_QUEUE_3, /**< Host-to-network data packets on queue3. */ + NSS_STATS_DRV_TX_PACKET_QUEUE_4, /**< Host-to-network data packets on queue4. */ + NSS_STATS_DRV_TX_PACKET_QUEUE_5, /**< Host-to-network data packets on queue5. */ + NSS_STATS_DRV_TX_PACKET_QUEUE_6, /**< Host-to-network data packets on queue6. */ + NSS_STATS_DRV_TX_PACKET_QUEUE_7, /**< Host-to-network data packets on queue7. */ +#endif + NSS_STATS_DRV_MAX, /**< Maximum message type. */ +}; + +/** + * nss_stats_types + * List of statistics categories. + */ +enum nss_stats_types { + NSS_STATS_TYPE_COMMON, /**< Common pnode statistics. */ + NSS_STATS_TYPE_DROP, /**< Packet drop statistics. */ + NSS_STATS_TYPE_ERROR, /**< Hardware or software errors different from drop or exception statistics. */ + NSS_STATS_TYPE_EXCEPTION, /**< Packet exception (to host) statistics. */ + NSS_STATS_TYPE_SPECIAL, /**< Statistics that do not fall into the above types. */ + NSS_STATS_TYPE_MAX /**< Maximum message type. */ +}; + +/** + * nss_stats_notifier_action + * Statistics notification types. + */ +enum nss_stats_notifier_action { + NSS_STATS_EVENT_NOTIFY, + NSS_STATS_EVENT_MAX +}; + +/** + * @} + */ + +#endif /* __NSS_STATS_PUBLIC_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_tls.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_tls.h new file mode 100644 index 000000000..b23268e58 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_tls.h @@ -0,0 +1,469 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE + ************************************************************************** + */ + +/** + * @file nss_tls.h + * NSS TLS common interface definitions, supports inner/outer interface split. + */ + +#ifndef _NSS_TLS_H_ +#define _NSS_TLS_H_ + +/** + * @addtogroup nss_tls_subsystem + * @{ + */ +#define NSS_TLS_VER_TLS_1_1 0x0301 /**< TLS version 1.1, major and minor version. */ +#define NSS_TLS_VER_TLS_1_2 0x0302 /**< TLS version 1.2, major and minor version. */ +#define NSS_TLS_CLE_MAX 32 /**< Maximum classification error. */ + +/** + * tls_msg_types + * Message types for TLS requests and responses. + */ +enum nss_tls_msg_type { + NSS_TLS_MSG_TYPE_NODE_CONFIG, /**< Configure TLS firmware node. */ + NSS_TLS_MSG_TYPE_NODE_SYNC, /**< Node statistics. */ + NSS_TLS_MSG_TYPE_CTX_CONFIG, /**< Send exception interface number. */ + NSS_TLS_MSG_TYPE_CTX_DECONFIG, /**< Context deconfigure message. */ + NSS_TLS_MSG_TYPE_CTX_SYNC, /**< Synchronize statistics. */ + NSS_TLS_MSG_TYPE_CIPHER_UPDATE, /**< Context session update. */ + NSS_TLS_MSG_MAX, /**< Maximum message. */ +}; + +/** + * nss_tls_error + * TLS error. + */ +enum nss_tls_error { + NSS_TLS_ERROR_NONE = 0, /**< No error. */ + NSS_TLS_ERROR_UNKNOWN_MSG, /**< Unknown message. */ + NSS_TLS_ERROR_ALREADY_CONFIGURE, /**< Node already configured. */ + NSS_TLS_ERROR_FAIL_REG_INNER_CTX, /**< Register inner context error. */ + NSS_TLS_ERROR_FAIL_REG_OUTER_CTX, /**< Register outer context error. */ + NSS_TLS_ERROR_FAIL_REQ_POOL_ALLOC, /**< Request pool allocation failed. */ + NSS_TLS_ERROR_INVALID_BLK_LEN, /**< Invalid block length. */ + NSS_TLS_ERROR_INVALID_HASH_LEN, /**< Invalid hash length. */ + NSS_TLS_ERROR_INVALID_VER, /**< Invalid TLS version. */ + NSS_TLS_ERROR_INVALID_CTX_WORDS, /**< Context words size mismatch with TLS. */ + NSS_TLS_ERROR_FAIL_ALLOC_HWCTX, /**< Failed to allocate hardware context. */ + NSS_TLS_ERROR_FAIL_COPY_CTX, /**< Failed to copy context. */ + NSS_TLS_ERROR_FAIL_NOMEM, /**< Failed memory allocation. */ + NSS_TLS_ERROR_FAIL_INVAL_ALGO, /**< Invalid algorithm. */ + NSS_TLS_ERROR_MAX, /**< Maximum TLS error. */ +}; + +/** + * nss_tls_stats_types + * TLS statistics types. + */ +enum nss_tls_stats_types { + NSS_TLS_STATS_SINGLE_REC = NSS_STATS_NODE_MAX, + /**< Number of transmit single record datagrams. */ + NSS_TLS_STATS_MULTI_REC, /**< Number of multiple transmit record datagrams. */ + NSS_TLS_STATS_TX_INVAL_REQS, /**< Number of transmit invalidations successfully requested. */ + NSS_TLS_STATS_RX_CCS_REC, /**< Number of change cipher specification records received. */ + NSS_TLS_STATS_FAIL_CCS, /**< Failure to switch to new crypto. */ + NSS_TLS_STATS_ETH_NODE_DEACTIVE, /**< Ethernet node deactivated because no crypto was available. */ + NSS_TLS_STATS_CRYPTO_ALLOC_SUCCESS, /**< Number of successful crypto allocations. */ + NSS_TLS_STATS_CRYPTO_FREE_REQ, /**< Number of crypto-free requests. */ + NSS_TLS_STATS_CRYPTO_FREE_SUCCESS, /**< Number of crypto-free successes. */ + NSS_TLS_STATS_FAIL_CRYPTO_ALLOC, /**< Number of failed crypto allocations. */ + NSS_TLS_STATS_FAIL_CRYPTO_LOOKUP, /**< Failure to find an active crypto session. */ + NSS_TLS_STATS_FAIL_REQ_ALLOC, /**< Failure to allocate request memory pool. */ + NSS_TLS_STATS_FAIL_PBUF_STATS, /**< Failure in pbuf allocation for statistics. */ + NSS_TLS_STATS_FAIL_CTX_ACTIVE, /**< Failure in enqueue due to inactive context. */ + NSS_TLS_STATS_HW_LEN_ERROR, /**< Length error. */ + NSS_TLS_STATS_HW_TOKEN_ERROR, /**< Token error; unknown token command or instruction. */ + NSS_TLS_STATS_HW_BYPASS_ERROR, /**< Token contains too much bypass data. */ + NSS_TLS_STATS_HW_CRYPTO_ERROR, /**< Cryptographic block size error. */ + NSS_TLS_STATS_HW_HASH_ERROR, /**< Hash block size error. */ + NSS_TLS_STATS_HW_CONFIG_ERROR, /**< Invalid command, algorithm, or mode combination. */ + NSS_TLS_STATS_HW_ALGO_ERROR, /**< Unsupported algorithm. */ + NSS_TLS_STATS_HW_HASH_OVF_ERROR, /**< Hash input overflow. */ + NSS_TLS_STATS_HW_AUTH_ERROR, /**< Hash input overflow. */ + NSS_TLS_STATS_HW_PAD_VERIFY_ERROR, /**< Pad verification error. */ + NSS_TLS_STATS_HW_TIMEOUT_ERROR, /**< Data timed out. */ + NSS_TLS_STATS_NO_DESC_IN, /**< Ingress DMA descriptor not available. */ + NSS_TLS_STATS_NO_DESC_OUT, /**< Egress DMA descriptor not available. */ + NSS_TLS_STATS_NO_REQS, /**< Not enough requests available for records. */ + NSS_TLS_STATS_MAX, /**< Maximum statistics type. */ +}; + +/** + * nss_tls_hw_stats + * TLS HW statistics. + */ +struct nss_tls_hw_stats { + /* + * Dont change the order below + */ + uint32_t hw_len_error; /**< Length error. */ + uint32_t hw_token_error; /**< Token error, unknown token command/instruction. */ + uint32_t hw_bypass_error; /**< Token contains too much bypass data. */ + uint32_t hw_crypto_error; /**< Cryptograhic block size error. */ + uint32_t hw_hash_error; /**< Hash block size error. */ + uint32_t hw_config_error; /**< Invalid command/algorithm/mode/combination. */ + uint32_t hw_algo_error; /**< Unsupported algorithm. */ + uint32_t hw_hash_ovf_error; /**< Hash input overflow. */ + uint32_t hw_auth_error; /**< Hash input overflow. */ + uint32_t hw_pad_verify_error; /**< Pad verification error. */ + uint32_t hw_timeout_error; /**< Data timed out. */ +}; + +/** + * nss_tls_ctx_perf_stats + * TLS performance statistics. + */ +struct nss_tls_ctx_perf_stats { + uint32_t no_desc_in; /**< Ingress DMA descriptor not available. */ + uint32_t no_desc_out; /**< Egress DMA descriptor not available. */ + uint32_t no_reqs; /**< Not enough requests available for records. */ +}; + +/** + * nss_tls_ctx_stats + * TLS session statistics. + */ +struct nss_tls_ctx_stats { + struct nss_cmn_node_stats pkt; /**< Common node statistics. */ + uint32_t single_rec; /**< Number of Tx single record datagrams. */ + uint32_t multi_rec; /**< Number of multiple Tx record datagrams. */ + uint32_t tx_inval_reqs; /**< Number of Tx invalidation successfully requested. */ + uint32_t rx_ccs_rec; /**< Number of change cipher spec records received. */ + uint32_t fail_ccs; /**< Failed to switch to new crypto. */ + uint32_t eth_node_deactive; /**< Ethernet node deactivated as no crypto available. */ + uint32_t crypto_alloc_success; /**< Number of crypto allocation succeeded. */ + uint32_t crypto_free_req; /**< Number of crypto free request. */ + uint32_t crypto_free_success; /**< Number of crypto free succeeded. */ + uint32_t fail_crypto_alloc; /**< Number of crypto allocation failed. */ + uint32_t fail_crypto_lookup; /**< Failed to find active crypto session. */ + uint32_t fail_req_alloc; /**< Failure to allocate request memory pool. */ + uint32_t fail_pbuf_stats; /**< Failure in pbuf allocation for statistics. */ + uint32_t fail_ctx_active; /**< Failure in enqueue due to inactive context. */ + + struct nss_tls_hw_stats fail_hw; /**< Hardware failure. */ + struct nss_tls_ctx_perf_stats perf; /**< Performance related statistics. */ +}; + +/** + * nss_tls_node_stats + * TLS node statistics. + */ +struct nss_tls_node_stats { + uint32_t fail_ctx_alloc; /**< Failure in allocating a context. */ + uint32_t fail_ctx_free; /**< Failure in freeing up the context. */ + uint32_t fail_pbuf_stats; /**< Failure in pbuf allocation for statistics. */ +}; + +/** + * nss_tls_ctx_config + * TLS context configuration. + */ +struct nss_tls_ctx_config { + uint32_t except_ifnum; /**< Exception interface number. */ + uint32_t headroom; /**< Headroom required for encapsulation. */ + uint32_t tailroom; /**< Tailroom required for encapsulation. */ +}; + +/** + * nss_tls_cipher_update + * TLS cipher update message. + * + */ +struct nss_tls_cipher_update { + uint32_t crypto_idx; /**< Crypto index for cipher context. */ + uint16_t ver; /**< Version (TLS minor and major versions). */ + uint8_t skip; /**< Skip hardware processing. */ + uint8_t reserved; /**< Reserved for future use. */ +}; + +/** + * nss_tls_stats_notification + * TLS transmission statistics structure. + */ +struct nss_tls_stats_notification { + uint64_t stats_ctx[NSS_TLS_STATS_MAX]; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_tls_msg + * Data for sending and receiving TLS messages. + */ +struct nss_tls_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a TLS message. + */ + union { + struct nss_tls_cipher_update cipher_update; /**< Crypto configuration. */ + struct nss_tls_ctx_config ctx_cfg; /**< Context configuration. */ + struct nss_tls_ctx_stats stats; /**< Context statistics. */ + struct nss_tls_node_stats node_stats; /**< Node statistics. */ + } msg; /**< Message payload for TLS session messages exchanged with NSS core. */ +}; + +/** + * Callback function for receiving TLS messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_tls_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * Callback function for receiving TLS session data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_tls_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_tls_tx_buf + * Sends a TLS data packet to the NSS. + * + * @datatypes + * sk_buff \n + * nss_ctx_instance + * + * @param[in] os_buf Pointer to the OS data buffer. + * @param[in] if_num NSS interface number. + * @param[in] nss_ctx Pointer to the NSS core context. + * + * @return + * Status of Tx buffer forwarded to NSS for TLS operation. + */ +nss_tx_status_t nss_tls_tx_buf(struct sk_buff *os_buf, uint32_t if_num, struct nss_ctx_instance *nss_ctx); + +/** + * nss_tls_tx_msg + * Sends an asynchronous IPsec message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_tls_msg + * + * @param[in] nss_ctx Pointer to the NSS HLOS driver context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_tls_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_tls_msg *msg); + +/** + * nss_tls_tx_msg_sync + * Sends a synchronous IPsec message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_tls_msg_type \n + * nss_tls_msg + * + * @param[in] nss_ctx Pointer to the NSS HLOS driver context. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] ntcm Pointer to the NSS IPsec message. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_tls_tx_msg_sync(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + enum nss_tls_msg_type type, uint16_t len, + struct nss_tls_msg *ntcm); + +/** + * nss_tls_unregister_if + * Deregisters a TLS session interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + * + * @dependencies + * The TLS session interface must have been previously registered. + */ +extern void nss_tls_unregister_if(uint32_t if_num); + +/** + * nss_tls_register_if + * Registers a TLS session interface with the NSS for sending and receiving + * messages. + * + * @datatypes + * nss_tls_data_callback_t \n + * nss_tls_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] data_cb Callback function for the message. + * @param[in] msg_cb Callback for TLS tunnel message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * @param[in] type Type of message. + * @param[in] app_ctx Pointer to the application context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_tls_register_if(uint32_t if_num, + nss_tls_data_callback_t data_cb, + nss_tls_msg_callback_t msg_cb, + struct net_device *netdev, + uint32_t features, + uint32_t type, + void *app_ctx); + +/** + * nss_tls_notify_unregister + * Deregisters an event callback. + * + * @param[in] ifnum NSS interface number. + * + * @return + * None. + */ +extern void nss_tls_notify_unregister(uint32_t ifnum); + +/** + * nss_tls_notify_register + * Registers an event callback to handle notification from TLS firmware package. + * + * @datatypes + * nss_tls_msg_callback_t + * + * @param[in] ifnum NSS interface number. + * @param[in] ev_cb Callback for TLS tunnel message. + * @param[in] app_data Pointer to the application context. + * + * @return + * Pointer to NSS core context. + */ +extern struct nss_ctx_instance *nss_tls_notify_register(uint32_t ifnum, nss_tls_msg_callback_t ev_cb, void *app_data); + +/** + * nss_tls_msg_init + * Initializes a TLS message sent asynchronously. + * + * @datatypes + * nss_tls_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context. + * + * @return + * None. + */ +extern void nss_tls_msg_init(struct nss_tls_msg *ncm, uint32_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_tls_msg_sync_init + * Initializes a TLS message. + * + * @datatypes + * nss_tls_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * + * @return + * None. + */ +extern void nss_tls_msg_sync_init(struct nss_tls_msg *ncm, uint32_t if_num, uint32_t type, uint32_t len); + +/** + * nss_tls_get_context + * Gets the NSS core context for the TLS session. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_tls_get_context(void); + +/** + * nss_tls_get_device + * Gets the original device from probe. + * + * @return + * Pointer to the device. + */ +extern struct device *nss_tls_get_dev(struct nss_ctx_instance *nss_ctx); + +/** + * nss_tls_ifmap_get + * Returns active TLS interfaces. + * + * @return + * Pointer to the interface map. + */ +unsigned long *nss_tls_ifmap_get(void); + +/** + * nss_tls_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_tls_stats_unregister_notifier(struct notifier_block *nb); + +/** + * nss_tls_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_tls_stats_register_notifier(struct notifier_block *nb); + +/** + * @} + */ + +#endif /* _NSS_TLS_H_. */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_trustsec_tx.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_trustsec_tx.h new file mode 100644 index 000000000..b71fac54d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_trustsec_tx.h @@ -0,0 +1,234 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_trustsec_tx.h + * NSS TrustSec interface definitions. + */ + +#ifndef __NSS_TRUSTSEC_TX_H +#define __NSS_TRUSTSEC_TX_H + +/** + * @addtogroup nss_trustsec_tx_subsystem + * @{ + */ + +/** + * nss_trustsec_tx_msg_types + * Message types for TrustSec Tx requests and responses. + */ +enum nss_trustsec_tx_msg_types { + NSS_TRUSTSEC_TX_MSG_CONFIGURE, /** Configure the TrustSec node. */ + NSS_TRUSTSEC_TX_MSG_UNCONFIGURE, /** Unconfigure the TrustSec node. */ + NSS_TRUSTSEC_TX_MSG_STATS_SYNC, /** Statistics sychronization. */ + NSS_TRUSTSEC_TX_MSG_UPDATE_NEXTHOP, /** Update next hop. */ + NSS_TRUSTSEC_TX_MSG_MAX /** Maximum message type. */ +}; + +/** + * nss_trustsec_tx_error_types + * Error types for the TrustSec Tx interface. + */ +enum nss_trustsec_tx_error_types { + NSS_TRUSTSEC_TX_ERR_NONE, /** No error */ + NSS_TRUSTSEC_TX_ERR_INVAL_SRC_IF, /** Source interface is invalid. */ + NSS_TRUSTSEC_TX_ERR_RECONFIGURE_SRC_IF, /** Source interface is already configured. */ + NSS_TRUSTSEC_TX_ERR_DEST_IF_NOT_FOUND, /** Destination interface is not found. */ + NSS_TRUSTSEC_TX_ERR_NOT_CONFIGURED, /** Source interface is not configured. */ + NSS_TRUSTSEC_TX_ERR_SGT_MISMATCH, /** SGT mismatches. */ + NSS_TRUSTSEC_TX_ERR_UNKNOWN, /** Error is unknown. */ + NSS_TRUSTSEC_TX_ERR_MAX, /** Maximum error message. */ +}; + +/** + * nss_trustsec_tx_configure_msg + * Message information for configuring a TrustSec Tx interface. + */ +struct nss_trustsec_tx_configure_msg { + uint32_t src; /**< Interface number of the source tunnel. */ + uint32_t dest; /**< Outgoing interface number. */ + uint16_t sgt; /**< Security Group Tag value to embed in the TrustSec header. */ + uint8_t reserved[2]; /**< Reserved for word alignment. */ +}; + +/** + * nss_trustsec_tx_unconfigure_msg + * Message information for de-configuring a TrustSec Tx interface. + */ +struct nss_trustsec_tx_unconfigure_msg { + uint32_t src; /**< Interface number of the source tunnel. */ + uint16_t sgt; /**< Security Group Tag value configured for this interface. */ + uint8_t reserved[2]; /**< Reserved for word alignment. */ +}; + +/** + * nss_trustsec_tx_stats_sync_msg + * Statistics synchronization message for the TrustSec Tx interface. + */ +struct nss_trustsec_tx_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t invalid_src; /**< Received packets with an invalid source interface. */ + uint32_t unconfigured_src; /**< Received packets with a de-configured source interface. */ + uint32_t headroom_not_enough; /**< Not enough headroom to insert a TrustSec header. */ +}; + +/** + * nss_trustsec_tx_update_nexthop_msg + * Message information for updating the next hop for a TrustSec Tx interface. + */ +struct nss_trustsec_tx_update_nexthop_msg { + uint32_t src; /**< Interface number of the source tunnel. */ + uint32_t dest; /**< Outgoing interface number. */ + uint16_t sgt; /**< Security Group Tag value to embed in the TrustSec header. */ + uint8_t reserved[2]; /**< Reserved for word alignment. */ +}; + +/** + * nss_trustsec_tx_msg + * Data for sending and receiving TrustSec Tx messages. + */ +struct nss_trustsec_tx_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a TrustSec Tx message. + */ + union { + struct nss_trustsec_tx_configure_msg configure; + /**< Configure TrustSec Tx. */ + struct nss_trustsec_tx_unconfigure_msg unconfigure; + /**< De-configure TrustSec Tx. */ + struct nss_trustsec_tx_stats_sync_msg stats_sync; + /**< Synchronize TrustSec Tx statistics. */ + struct nss_trustsec_tx_update_nexthop_msg upd_nexthop; + /**< Update next hop of TrustSec Tx. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving TrustSec Tx messages. + * + * @datatypes + * nss_trustsec_tx_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_trustsec_tx_msg_callback_t)(void *app_data, struct nss_trustsec_tx_msg *npm); + +/** + * nss_trustsec_tx_msg_init + * Initializes a TrustSec Tx message. + * + * @datatypes + * nss_trustsec_tx_msg + * + * @param[in,out] npm Pointer to the NSS Profiler message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the message. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * TRUE or FALSE. + */ +extern void nss_trustsec_tx_msg_init(struct nss_trustsec_tx_msg *npm, uint16_t if_num, uint32_t type, uint32_t len, + nss_trustsec_tx_msg_callback_t cb, void *app_data); + +/** + * nss_trustsec_tx_msg + * Sends a TrustSec Tx message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_trustsec_tx_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_trustsec_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_trustsec_tx_msg *msg); + +/** + * nss_trustsec_tx_msg_sync + * Sends a TrustSec Tx message to the NSS and waits for a response. + * + * @datatypes + * nss_ctx_instance \n + * nss_trustsec_tx_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_trustsec_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_trustsec_tx_msg *msg); + +/** + * nss_trustsec_tx_update_nexthop + * Updates the next hop of the TrustSec. + * + * @param[in] src Source interface number. + * @param[in] dest Destination interface number. + * @param[in] sgt Security Group Tag value. + * + * @return + * Pointer to the NSS core context. + */ +extern nss_tx_status_t nss_trustsec_tx_update_nexthop(uint32_t src, uint32_t dest, uint16_t sgt); + +/** + * nss_trustsec_tx_get_ctx + * Gets the NSS context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_trustsec_tx_get_ctx(void); + +/** + * nss_trustsec_tx_configure_sgt + * Configures the Security Group Tag value for a source interface. + * + * @param[in] src Source interface number. + * @param[in] dest Destination interface number. + * @param[in] sgt Security Group Tag value. + * + * @return + * Pointer to the NSS core context. + */ +extern nss_tx_status_t nss_trustsec_tx_configure_sgt(uint32_t src, uint32_t dest, uint16_t sgt); + +/** + * nss_trustsec_tx_unconfigure_sgt + * De-configures the Security Group Tag value for a source interface. + * + * @param[in] src Source interface number. + * @param[in] sgt Security Group Tag value. + * + * @return + * Pointer to the NSS core context. + */ +extern nss_tx_status_t nss_trustsec_tx_unconfigure_sgt(uint32_t src, uint16_t sgt); + +/** @} */ /* end_addtogroup nss_trustsec_tx_subsystem */ + +#endif /* __NSS_TRUSTSEC_TX_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_tstamp.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_tstamp.h new file mode 100644 index 000000000..3c23e4eed --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_tstamp.h @@ -0,0 +1,125 @@ +/* + ************************************************************************** + * Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_tstamp.h + * NSS to HLOS Tstamp interface definitions. + */ + +#ifndef __NSS_TSTAMP_H +#define __NSS_TSTAMP_H + +/** + * nss_tstamp_msg_type + * Time stamp message types. + */ +enum nss_tstamp_msg_type { + NSS_TSTAMP_MSG_TYPE_SYNC_STATS, /**< Statistic synchronization message. */ + NSS_TSTAMP_MSG_TYPE_MAX, /**< Maximum message type. */ +}; + +/** + * nss_tstamp_h2n_pre_hdr + * Metadata added by the time stamp HLOS driver. + * + * It is used while sending the packet to the NSS time stamp module. + */ +struct nss_tstamp_h2n_pre_hdr { + uint32_t ts_ifnum; /**< Time stamp interface number. */ + uint32_t ts_tx_hdr_sz; /**< Total header size. */ +}; + +/* + * nss_tstamp_n2h_pre_hdr + * Metadata added by the NSS time stamp module. + * + * It is added before sending the packet to host. + */ +struct nss_tstamp_n2h_pre_hdr { + uint32_t ts_ifnum; /**< Time stamp interface number. */ + uint32_t ts_data_lo; /**< Time stamp lower order bits. */ + uint32_t ts_data_hi; /**< Time stamp higher order bits. */ + + uint32_t ts_tx; /**< Time stamp direction. */ + uint32_t ts_hdr_sz; /**< Size of the header including the skb data alignment padding. */ + uint32_t reserved; /**< Reserved for cache alignment. */ +}; + +/** + * nss_tstamp_stats_msg + * Statistics messages from the NSS firmware. + */ +struct nss_tstamp_stats_msg { + struct nss_cmn_node_stats node_stats; + /**< Common node statistics for time stamp. */ + uint32_t boomeranged; /**< Boomeranged packets. */ + uint32_t dropped_fail_enqueue; /**< Enqueue failed. */ + uint32_t dropped_fail_alloc; /**< Allocation for copy failed. */ + uint32_t dropped_fail_copy; /**< Copy failed. */ + uint32_t dropped_no_interface; /**< Next interface not found. */ + uint32_t dropped_no_headroom; /**< Packet does not have enough headroom. */ +}; + +/** + * nss_tstamp_msg + * Data for sending and receiving time stamp messages. + */ +struct nss_tstamp_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a time stamp common message. + */ + union { + struct nss_tstamp_stats_msg stats; + /**< Time stamp statistics. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving core-to-core transmissions messages. + * + * @datatypes + * nss_tstamp_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_tstamp_msg_callback_t)(void *app_data, struct nss_tstamp_msg *msg); + +/** + * nss_tstamp_notify_register + * Registers a notifier callback for time stamp messages with the NSS. + * + * @datatypes + * nss_tstamp_msg_callback_t + * + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_tstamp_notify_register(nss_tstamp_msg_callback_t cb, void *app_data); + +/** + * @brief Transfer the packet to time stamp NSS module. + * + * @return nss_tx_status + */ +nss_tx_status_t nss_tstamp_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *skb, uint32_t if_num); + +#endif /* __NSS_TSTAMP_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_tun6rd.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_tun6rd.h new file mode 100644 index 000000000..b0675e86d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_tun6rd.h @@ -0,0 +1,198 @@ +/* + ************************************************************************** + * Copyright (c) 2014, 2017-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_tun6rd.h + * NSS TUN6RD interface definitions. + */ + +#ifndef __NSS_TUN6RD_H +#define __NSS_TUN6RD_H + +/** + * @addtogroup nss_tun6rd_subsystem + * @{ + */ + +/** + * nss_tun6rd_metadata_types + * Message types for 6RD (IPv6 in IPv4) tunnel requests and responses. + */ +enum nss_tun6rd_metadata_types { + NSS_TUN6RD_ATTACH_PNODE, + NSS_TUN6RD_RX_STATS_SYNC, + NSS_TUN6RD_ADD_UPDATE_PEER, + NSS_TUN6RD_MAX, +}; + +/** + * nss_tun6rd_attach_tunnel_msg + * Message information for configuring the 6RD tunnel. + */ +struct nss_tun6rd_attach_tunnel_msg { + uint32_t saddr; /**< Source address of the tunnel. */ + uint32_t daddr; /**< Destination address of the tunnel. */ + uint8_t tos; /**< Type Of Service field added to the outer header. */ + uint8_t ttl; /**< Time-to-live value for the tunnel. */ + uint32_t sibling_if_num; /**< Sibling interface number. */ + uint16_t reserved; /**< Reserved field added for alignment. */ +}; + +/** + * nss_tun6rd_sync_stats_msg + * Message information for 6RD tunnel synchronization statistics. + */ +struct nss_tun6rd_sync_stats_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ +}; + +/** + * nss_tun6rd_set_peer_msg + * Message information for the 6RD tunnel peer address. + */ +struct nss_tun6rd_set_peer_msg { + uint32_t ipv6_address[4]; /**< IPv6 address. */ + uint32_t dest; /**< IPv4 address. */ +}; + +/** + * nss_tun6rd_msg + * Data for sending and receiving 6RD tunnel messages. + */ +struct nss_tun6rd_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a 6RD tunnel message. + */ + union { + struct nss_tun6rd_attach_tunnel_msg tunnel; + /**< Attach a 6RD tunnel. */ + struct nss_tun6rd_sync_stats_msg stats; + /**< Synchronized statistics for the interface. */ + struct nss_tun6rd_set_peer_msg peer; + /**< Add or update the peer. */ + } msg; /**< Message payload for 6RD tunnel messages exchanged with NSS core. */ +}; + +/** + * Callback function for receiving 6RD tunnel messages. + * + * @datatypes + * nss_tun6rd_msg + * + * @param[in] app_data Pointer to the application context of the message + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_tun6rd_msg_callback_t)(void *app_data, struct nss_tun6rd_msg *msg); + +/** + * nss_tun6rd_tx + * Sends a 6RD tunnel message. + * + * @datatypes + * nss_ctx_instance \n + * nss_tun6rd_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_tun6rd_tx(struct nss_ctx_instance *nss_ctx, struct nss_tun6rd_msg *msg); + +/** + * nss_tun6rd_get_context + * Gets the TUN6RD context used in nss_tun6rd_tx(). + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_tun6rd_get_context(void); + +/** + * Callback function for receiving 6RD tunnel data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_tun6rd_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_register_tun6rd_if + * Registers the TUN6RD interface with the NSS for sending and receiving messages. + * + * @datatypes + * nss_tun6rd_callback_t \n + * nss_tun6rd_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] type NSS interface type. + * @param[in] tun6rd_callback Callback for the data. + * @param[in] msg_callback Callback for the message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_register_tun6rd_if(uint32_t if_num, uint32_t type, nss_tun6rd_callback_t tun6rd_callback, + nss_tun6rd_msg_callback_t msg_callback, struct net_device *netdev, uint32_t features); + +/** + * nss_unregister_tun6rd_if + * Deregisters the TUN6RD interface from the NSS. + * + * @param[in] if_num NSS interface number. +. * + * @return + * None. + * + * @dependencies + * The 6RD tunnel interface must have been previously registered. + */ +extern void nss_unregister_tun6rd_if(uint32_t if_num); + +/** + * nss_tun6rd_msg_init + * Initializes a TUN6RD message. + * + * @datatypes + * nss_tun6rd_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_tun6rd_msg_init(struct nss_tun6rd_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** @} */ /* end_addtogroup nss_tun6rd_subsystem */ + +#endif /* __NSS_TUN6RD_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_tunipip6.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_tunipip6.h new file mode 100644 index 000000000..6edc73ba7 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_tunipip6.h @@ -0,0 +1,293 @@ +/* + ************************************************************************** + * Copyright (c) 2014, 2017-2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_tunipip6.h + * NSS TUNIPIP6 interface definitions. + */ + +#ifndef __NSS_TUNIPIP6_H +#define __NSS_TUNIPIP6_H + +/** + * Maximum number of supported TUNIPIP6 tunnels. + */ +#define NSS_TUNIPIP6_TUNNEL_MAX 32 + +/** + * @addtogroup nss_tunipip6_subsystem + * @{ + */ + +/** + * nss_tunipip6_map_rule + * Mapping rule (FMR/BMR) for forwarding traffic to the node in the same domain. + */ +struct nss_tunipip6_map_rule { + uint32_t ip6_prefix[4]; /**< An IPv6 prefix assigned by a mapping rule. */ + uint32_t ip4_prefix; /**< An IPv4 prefix assigned by a mapping rule. */ + uint32_t ip6_prefix_len; /**< IPv6 prefix length. */ + uint32_t ip4_prefix_len; /**< IPv4 prefix length. */ + uint32_t ip6_suffix[4]; /**< IPv6 suffix. */ + uint32_t ip6_suffix_len; /**< IPv6 suffix length. */ + uint32_t ea_len; /**< Embedded Address (EA) bits. */ + uint32_t psid_offset; /**< PSID offset default 6. */ +}; + +/* + * nss_tunipip6_err_types + * Error types for response to messages from the host. + */ +enum nss_tunipip6_err_types { + NSS_TUNIPIP6_ERR_TYPE_MAX_TUNNELS, /**< Maximum number of tunnel reached. */ + NSS_TUNIPIP6_ERR_TYPE_TUNNEL_EXIST, /**< Tunnel already exists. */ + NSS_TUNIPIP6_ERR_TYPE_ENCAP_BAD_PARAM, /**< Bad configuration. */ + NSS_TUNIPIP6_ERR_TYPE_ENCAP_FMR_EXIST, /**< FMR already exists. */ + NSS_TUNIPIP6_ERR_TYPE_ENCAP_NO_FMR, /**< No FMR configured.*/ + NSS_TUNIPIP6_ERR_TYPE_ENCAP_FMR_FULL, /**< FMR table is full. */ + NSS_TUNIPIP6_ERR_TYPE_ENCAP_INVALID_FMR, /**< Invalid FMR configured.*/ + NSS_TUNIPIP6_ERR_TYPE_ENCAP_BMR_EXIST, /**< BMR already exists. */ + NSS_TUNIPIP6_ERR_TYPE_ENCAP_NO_BMR, /**< No BMR configured. */ + NSS_TUNIPIP6_ERR_TYPE_ENCAP_FMR_MEM_ALLOC_FAILED, /**< Pool allocation for FMR failed. */ + NSS_TUNIPIP6_ERR_TYPE_UNKNOWN, /**< Unknown message type. */ + NSS_TUNIPIP6_ERROR_MAX, /**< Maximum number of errors. */ +}; + +/** + * nss_tunipip6_metadata_types + * Message types for TUNIPIP6 (IPv4 in IPv6) tunnel requests and responses. + */ +enum nss_tunipip6_metadata_types { + NSS_TUNIPIP6_TX_ENCAP_IF_CREATE, + NSS_TUNIPIP6_TX_DECAP_IF_CREATE, + NSS_TUNIPIP6_STATS_SYNC, + NSS_TUNIPIP6_FMR_RULE_ADD, + NSS_TUNIPIP6_FMR_RULE_DEL, + NSS_TUNIPIP6_FMR_RULE_FLUSH, + NSS_TUNIPIP6_BMR_RULE_ADD, + NSS_TUNIPIP6_BMR_RULE_DEL, + NSS_TUNIPIP6_MAX, +}; + +/** + * nss_tunipip6_create_msg + * Payload for configuring the TUNIPIP6 interface. + */ +struct nss_tunipip6_create_msg { + uint32_t saddr[4]; /**< Tunnel source address. */ + uint32_t daddr[4]; /**< Tunnel destination address. */ + uint32_t flowlabel; /**< Tunnel IPv6 flow label. */ + uint32_t flags; /**< Tunnel additional flags. */ + uint32_t sibling_if_num; /**< Sibling interface number. */ + uint8_t hop_limit; /**< Tunnel IPv6 hop limit. */ + uint8_t draft03; /**< Use MAP-E draft03 specification. */ + uint8_t ttl_inherit; /**< Inherit IPv4 TTL to hoplimit. */ + uint8_t tos_inherit; /**< Inherit IPv4 ToS. */ + uint8_t frag_id_update; /**< Enable update of fragment identifier of IPv4. */ + uint8_t reserved[3]; /**< Reserved bytes. */ + uint32_t fmr_max; /**< Maximum number of FMRs that can be configured. */ +}; + +/** + * nss_tunipip6_debug_stats + * TUNIPIP6 debug statistics. + */ +struct nss_tunipip6_debug_stats { + struct { + struct { + uint32_t low_headroom; /**< Low headroom for encapsulation. */ + uint32_t unhandled_proto; /**< Unhandled protocol for encapsulation. */ + } exp; + + struct { + uint32_t enqueue_fail; /**< Encapsulation enqueue fail. */ + } drop; + + struct { + uint32_t err_tunnel_cfg; /**< Tunnel configuration error. */ + uint32_t total_fmr; /**< Total number of existing FMRs. */ + uint32_t fmr_add_req; /**< FMR add requests. */ + uint32_t fmr_del_req; /**< FMR delete requests. */ + uint32_t fmr_flush_req; /**< FMR flush requests. */ + uint32_t fmr_update_req; /**< FMR update requests. */ + uint32_t fmr_add_fail; /**< FMR addition failed. */ + uint32_t fmr_del_fail; /**< FMR deletion failed. */ + uint32_t err_no_fmr; /**< No FMR configured. */ + uint32_t bmr_add_req; /**< BMR add requests. */ + uint32_t bmr_del_req; /**< BMR delete requests. */ + uint32_t err_bmr_exist; /**< BMR already configured. */ + uint32_t err_no_bmr; /**< No BMR configured. */ + } cfg; + } encap; + + struct { + struct { + uint32_t enqueue_fail; /**< Decapsulation enqueue fail. */ + } drop; + } decap; +}; + +/** + * nss_tunipip6_stats_sync_msg + * Message information for TUNIPIP6 synchronization statistics. + */ +struct nss_tunipip6_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + struct nss_tunipip6_debug_stats tun_stats; /**< TUNIPIP6 debug statistics. */ +}; + +/** + * nss_tunipip6_msg + * Data for sending and receiving TUNIPIP6 messages. + */ +struct nss_tunipip6_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a TUNIPIP6 message. + */ + union { + struct nss_tunipip6_create_msg tunipip6_create; + /**< Create a TUNIPIP6 tunnel. */ + struct nss_tunipip6_stats_sync_msg stats; + /**< Synchronized statistics for the TUNIPIP6 interface. */ + struct nss_tunipip6_map_rule map_rule; + /**< BMR/FMR rule to add/delete, new or existing rules. */ + } msg; /**< Message payload for TUNIPIP6 messages exchanged with NSS core. */ +}; + +/** + * Callback function for receiving TUNIPIP6 messages. + * + * @datatypes + * nss_tunipip6_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_tunipip6_msg_callback_t)(void *app_data, struct nss_tunipip6_msg *msg); + +/** + * nss_tunipip6_tx + * Sends a TUNIPIP6 message to NSS core. + * + * @datatypes + * nss_ctx_instance \n + * nss_tunipip6_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_tunipip6_tx(struct nss_ctx_instance *nss_ctx, struct nss_tunipip6_msg *msg); + +/** + * nss_tunipip6_tx_sync + * Sends a TUNIPIP6 message to NSS core synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_tunipip6_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_tunipip6_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_tunipip6_msg *msg); + +/** + * Callback function for receiving TUNIPIP6 data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_tunipip6_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_register_tunipip6_if + * Registers the TUNIPIP6 interface with the NSS for sending and receiving + * TUNIPIP6 messages. + * + * @datatypes + * nss_tunipip6_callback_t \n + * nss_tunipip6_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] type Dynamic interface type. + * @param[in] tunipip6_callback Callback for the data. + * @param[in] event_callback Callback for the message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_register_tunipip6_if(uint32_t if_num, uint32_t type, nss_tunipip6_callback_t tunipip6_callback, + nss_tunipip6_msg_callback_t event_callback, struct net_device *netdev, uint32_t features); + +/** + * nss_unregister_tunipip6_if + * Deregisters the TUNIPIP6 interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + */ +extern void nss_unregister_tunipip6_if(uint32_t if_num); + +/** + * nss_tunipip6_msg_init + * Initializes a TUNIPIP6 message. + * + * @datatypes + * nss_tunipip6_msg + * + * @param[in,out] ntm Pointer to the IPIP6 tunnel message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the message. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_tunipip6_msg_init(struct nss_tunipip6_msg *ntm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_tunipip6_get_context() + * Get TUNIPIP6 context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_tunipip6_get_context(void); + +/** @} */ /* end_addtogroup nss_tunipip6_subsystem */ + +#endif /* __NSS_TUN6RD_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_udp_st.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_udp_st.h new file mode 100755 index 000000000..d6f3aa4dd --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_udp_st.h @@ -0,0 +1,284 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_udp_st.h + * NSS Ethernet interface definitions. + */ + +#ifndef __NSS_UDP_ST_H +#define __NSS_UDP_ST_H + +/** + * @addtogroup nss_udp_st_subsystem + * @{ + */ + +#define NSS_UDP_ST_TX_CONN_MAX 16 +#define NSS_UDP_ST_FLAG_IPV4 4 /**< L3 Protocol - IPv4. */ +#define NSS_UDP_ST_FLAG_IPV6 6 /**< L3 Protocol - IPv6. */ + +/** + * nss_udp_st_message_types + * UDP speed test message types. + */ +enum nss_udp_st_message_types { + NSS_UDP_ST_START_MSG, /**< Start message. */ + NSS_UDP_ST_STOP_MSG, /**< Stop message. */ + NSS_UDP_ST_CFG_RULE_MSG, /**< Configure IPv4/IPv6 rule. */ + NSS_UDP_ST_UNCFG_RULE_MSG, /**< Unconfigure IPv4/IPv6 rule. */ + NSS_UDP_ST_STATS_SYNC_MSG, /**< Statistic syncronization. */ + NSS_UDP_ST_TX_CREATE_MSG, /**< Create transmit node. */ + NSS_UDP_ST_TX_DESTROY_MSG, /**< Destroy transmit node. */ + NSS_UDP_ST_RESET_STATS_MSG, /**< Reset existing statistics. */ + NSS_UDP_ST_MAX_MSG_TYPES, /**< Maximum message type. */ +}; + +/** + * nss_udp_st_test_types + * Test types of the UDP speed test. + */ +enum nss_udp_st_test_types { + NSS_UDP_ST_TEST_RX, /**< Test type is receive. */ + NSS_UDP_ST_TEST_TX, /**< Test type is transmit. */ + NSS_UDP_ST_TEST_MAX /**< Maximum test type. */ +}; + +/** + * nss_udp_st_error + * UDP speed test error types. + */ +enum nss_udp_st_error { + NSS_UDP_ST_ERROR_NONE, /**< No error. */ + NSS_UDP_ST_ERROR_INCORRECT_RATE, /**< Incorrect Tx rate. */ + NSS_UDP_ST_ERROR_INCORRECT_BUFFER_SIZE, /**< Incorrect buffer size. */ + NSS_UDP_ST_ERROR_MEMORY_FAILURE, /**< Memory allocation failed. */ + NSS_UDP_ST_ERROR_INCORRECT_STATE, /**< Trying to configure during incorrect state. */ + NSS_UDP_ST_ERROR_INCORRECT_FLAGS, /**< Incorrect flag configuration. */ + NSS_UDP_ST_ERROR_ENTRY_EXIST, /**< Given tunnel entry already exists. */ + NSS_UDP_ST_ERROR_ENTRY_ADD_FAILED, /**< UDP ST Encap entry addition failed. */ + NSS_UDP_ST_ERROR_ENTRY_NOT_EXIST, /**< Given tunnel entry does not exists. */ + NSS_UDP_ST_ERROR_WRONG_START_MSG_TYPE, /**< Start message type error. */ + NSS_UDP_ST_ERROR_WRONG_STOP_MSG_TYPE, /**< Stop message type error. */ + NSS_UDP_ST_ERROR_TOO_MANY_USERS, /**< Too many users tried to be added. */ + NSS_UDP_ST_ERROR_UNKNOWN_MSG_TYPE, /**< Unknown message type failure. */ + NSS_UDP_ST_ERROR_PB_ALLOC, /**< Pbuf allocation failed. */ + NSS_UDP_ST_ERROR_PB_SIZE, /**< Pbuf size is too small to fit buffer. */ + NSS_UDP_ST_ERROR_DROP_QUEUE, /**< Packet dropped enqueue next node. */ + UDP_ST_ERROR_TIMER_MISSED, /**< Timer call is missed. */ + NSS_UDP_ST_ERROR_MAX, /**< Maximum error type. */ +}; + +/** + * nss_udp_st_stats_time + * UDP speed test time statistics types. + */ +enum nss_udp_st_stats_time { + NSS_UDP_ST_STATS_TIME_START, /**< Start time of the test. */ + NSS_UDP_ST_STATS_TIME_CURRENT, /**< Current time of the running test. */ + NSS_UDP_ST_STATS_TIME_ELAPSED, /**< Elapsed time of the current test. */ + NSS_UDP_ST_STATS_TIME_MAX /**< Maximum time statistics. */ +}; + +/** + * Create TX node to start pushing rules. + */ +struct nss_udp_st_tx_create { + uint32_t rate; /**< Rate in Mbps. */ + uint32_t buffer_size; /**< UDP buffer size. */ + uint8_t dscp; /**< DSCP value. */ +}; + +/** + * Destroy Tx node. + */ +struct nss_udp_st_tx_destroy { + uint32_t flag; /**< Tx destroy flag. */ +}; + +/** + * NSS UDP speed test start structure. + */ +struct nss_udp_st_start { + uint32_t type; /**< Started test type (for example, receive or transmit). */ + +}; + +/** + * NSS UDP speed test stop structure. + */ +struct nss_udp_st_stop { + uint32_t type; /**< Stopped test type (for example, receive or transmit). */ +}; + +/** + * NSS UDP speed test ip structure + */ +struct nss_udp_st_ip { + union { + uint32_t ipv4; /**< IPv4 address. */ + uint32_t ipv6[4]; /**< IPv6 address. */ + } ip; +}; + +/** + * NSS UDP speed test IPv4/IPv6 configuration structure. + */ +struct nss_udp_st_cfg { + struct nss_udp_st_ip src_ip; /**< Source IP address. */ + int32_t src_port; /**< Source L4 port. */ + struct nss_udp_st_ip dest_ip; /**< Destination IP address. */ + int32_t dest_port; /**< Destination L4 port. */ + uint32_t type; /**< Started test type (for example, receive or transmit). */ + uint16_t ip_version; /**< IP version to indicate IPv4 or IPv6. */ +}; + +/** + * NSS UDP speed test node statistics structure. + */ +struct nss_udp_st_node_stats { + struct nss_cmn_node_stats node_stats; /**< Common node statistics for the UDP speed test. */ + uint32_t errors[NSS_UDP_ST_ERROR_MAX]; /**< Error statistics. */ +}; + +/** + * NSS UDP speed test statistics structure. + */ +struct nss_udp_st_stats { + struct nss_udp_st_node_stats nstats; /**< Node statistics for the UDP speed test. */ + uint32_t time_stats[NSS_UDP_ST_TEST_MAX][NSS_UDP_ST_STATS_TIME_MAX]; + /**< Time statistics. */ +}; + +/** + * NSS UDP speed test reset statistics structure. + */ +struct nss_udp_st_reset_stats { + uint32_t flag; /**< Reset statistics flag. */ +}; + +/** + * Message structure of the UDP speed test commands. + */ +struct nss_udp_st_msg { + struct nss_cmn_msg cm; /**< Message header. */ + union { + struct nss_udp_st_tx_create create; /**< Prepare transmit message. */ + struct nss_udp_st_tx_destroy destroy; /**< Destroy transmit message. */ + struct nss_udp_st_start start; /**< Start message. */ + struct nss_udp_st_stop stop; /**< Stop message. */ + struct nss_udp_st_cfg cfg; /**< IPv4/IPv6 configuration message. */ + struct nss_udp_st_cfg uncfg; /**< IPv4/IPv6 unconfiguration message. */ + struct nss_udp_st_stats stats; /**< Statistics synchronization message. */ + struct nss_udp_st_reset_stats reset_stats; + /**< Reset statistics message. */ + } msg; +}; + +/** + * Callback function for receiving UDP speed test messages. + * + * @datatypes + * nss_udp_st_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_udp_st_msg_callback_t)(void *app_data, struct nss_udp_st_msg *msg); + +/** + * nss_udp_st_register_handler + * Registers the UDP speed test message handler. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * + * @return + * None. + */ +extern void nss_udp_st_register_handler(struct nss_ctx_instance *nss_ctx); + +/** + * nss_udp_st_tx + * Transmits a UDP speed test message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_udp_st_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] num Pointer to the message data. + * + * @return + * Status of the transmit operation. + */ +extern nss_tx_status_t nss_udp_st_tx(struct nss_ctx_instance *nss_ctx, struct nss_udp_st_msg *num); + +/** + * nss_udp_st_tx_sync + * Transmits a synchronous UDP speed test message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_udp_st_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] num Pointer to the message data. + * + * @return + * Status of the transmit operation. + */ +extern nss_tx_status_t nss_udp_st_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_udp_st_msg *num); + +/** + * nss_udp_st_msg_init + * Initializes UDP speed test messages. + * + * @datatypes + * nss_udp_st_msg \n + * nss_udp_st_msg_callback_t + * + * @param[in,out] num Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_udp_st_msg_init(struct nss_udp_st_msg *num, uint16_t if_num, uint32_t type, uint32_t len, + nss_udp_st_msg_callback_t cb, void *app_data); + +/** + * nss_udp_st_get_mgr + * Gets the NSS context that is managing UDP speed sest processes. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_udp_st_get_mgr(void); + +/** + *@} + */ + +#endif /* __NSS_UDP_ST_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_unaligned.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_unaligned.h new file mode 100644 index 000000000..2eb6e1a4a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_unaligned.h @@ -0,0 +1,121 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_unaligned.h + * NSS unaligned interface definitions. + */ + +#ifndef __NSS_UNALIGNED_H +#define __NSS_UNALIGNED_H + +/** + * @addtogroup nss_unaligned_subsystem + * @{ + */ + +#define NSS_UNALIGNED_OPS_PER_MSG 54 + /**< The number of operations whose statistics are included in a message. */ +#define NSS_UNALIGNED_EMULATED_OPS 64 + /**< The number of operations that are emulated. */ + +/** + * nss_unaligned_msg_types + * Unaligned message types. + */ +enum nss_unaligned_msg_types { + NSS_UNALIGNED_MSG_STATS, /**< Performance statistics message. */ + NSS_UNALIGNED_MSG_MAX, /**< Maximum unaligned message type. */ +}; + +/** + * nss_unaligned_stats_op + * Performance statistics for emulating a single operation. + */ +struct nss_unaligned_stats_op { + uint32_t opcode_primary; + /**< Primary operation code. */ + uint32_t opcode_extension; + /**< Extension operation code, if applicable. */ + uint64_t count; + /**< Number of times operation was emulated. */ + uint32_t ticks_min; + /**< Minimum number of ticks spent emulating operation. */ + uint32_t ticks_avg; + /**< Average number of ticks spent emulating operation. */ + uint32_t ticks_max; + /**< Maximum number of ticks spent emulating operation. */ + uint32_t padding; + /**< Used for consistent alignment, can be re-used. */ +}; + +/** + * nss_unaligned_stats + * Message containing all non-zero operation statistics. + */ +struct nss_unaligned_stats { + uint64_t trap_count; + /**< Number of unaligned traps encountered. */ + struct nss_unaligned_stats_op ops[NSS_UNALIGNED_EMULATED_OPS]; + /**< Statistics for each operation. */ +}; + +/** + * nss_unaligned_stats_msg + * Message containing all non-zero operation statistics. + */ +struct nss_unaligned_stats_msg { + uint64_t trap_count; /**< Number of unaligned traps encountered. */ + struct nss_unaligned_stats_op ops[NSS_UNALIGNED_OPS_PER_MSG]; + /**< Statistics for each operation. */ + uint32_t current_iteration; /**< Number of full statistics messages sent without reaching the end. */ +}; + +/** + * nss_unaligned_msg + * Message from unaligned handler node. + */ +struct nss_unaligned_msg { + struct nss_cmn_msg cm; /**< Message header. */ + + /** + * Unaligned message payload. + */ + union { + struct nss_unaligned_stats_msg stats_msg; + /**< Message containing statistics. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_unaligned_register_handler() + * Registers message handler on the NSS unaligned interface and + * statistics dentry. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS context. + * + * @return + * None. + */ +void nss_unaligned_register_handler(struct nss_ctx_instance *nss_ctx); + +/** + * @} + */ +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_virt_if.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_virt_if.h new file mode 100644 index 000000000..443e6cf86 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_virt_if.h @@ -0,0 +1,436 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2017, 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * @file nss_virt_if.h + * NSS Virtual interface message Structure and APIs + */ + +#ifndef __NSS_VIRT_IF_H +#define __NSS_VIRT_IF_H + +#include "nss_if.h" + +/** + * @addtogroup nss_virtual_if_subsystem + * @{ + */ + +/** + * nss_virt_if_msg_types + * Message types for virtual interface requests and responses. + */ +enum nss_virt_if_msg_types { + NSS_VIRT_IF_OPEN = NSS_IF_OPEN, + NSS_VIRT_IF_CLOSE = NSS_IF_CLOSE, + NSS_VIRT_IF_LINK_STATE_NOTIFY = NSS_IF_LINK_STATE_NOTIFY, + NSS_VIRT_IF_MTU_CHANGE = NSS_IF_MTU_CHANGE, + NSS_VIRT_IF_MAC_ADDR_SET = NSS_IF_MAC_ADDR_SET, + NSS_VIRT_IF_STATS_SYNC = NSS_IF_STATS, + NSS_VIRT_IF_ISHAPER_ASSIGN = NSS_IF_ISHAPER_ASSIGN, + NSS_VIRT_IF_BSHAPER_ASSIGN = NSS_IF_BSHAPER_ASSIGN, + NSS_VIRT_IF_ISHAPER_UNASSIGN = NSS_IF_ISHAPER_UNASSIGN, + NSS_VIRT_IF_BSHAPER_UNASSIGN = NSS_IF_BSHAPER_UNASSIGN, + NSS_VIRT_IF_ISHAPER_CONFIG = NSS_IF_ISHAPER_CONFIG, + NSS_VIRT_IF_BSHAPER_CONFIG = NSS_IF_BSHAPER_CONFIG, + NSS_VIRT_IF_VSI_ASSIGN = NSS_IF_VSI_ASSIGN, + NSS_VIRT_IF_VSI_UNASSIGN = NSS_IF_VSI_UNASSIGN, + NSS_VIRT_IF_TX_CONFIG_MSG = NSS_IF_MAX_MSG_TYPES + 1, + NSS_VIRT_IF_STATS_SYNC_MSG, + NSS_VIRT_IF_MAX_MSG_TYPES, +}; + +/** + * nss_virt_if_error_types + * Error types for the virtual interface. + */ +enum nss_virt_if_error_types { + NSS_VIRT_IF_SUCCESS, + NSS_VIRT_IF_CORE_FAILURE, + NSS_VIRT_IF_ALLOC_FAILURE, + NSS_VIRT_IF_DYNAMIC_IF_FAILURE, + NSS_VIRT_IF_MSG_TX_FAILURE, + NSS_VIRT_IF_REG_FAILURE, + NSS_VIRT_IF_CORE_NOT_INITIALIZED, +}; + +/** + * nss_virt_if_base_node_stats + * Virtual interface statistics of NSS base node. + */ +struct nss_virt_if_base_node_stats { + uint32_t active_interfaces; /**< Number of active virtual interfaces. */ + uint32_t ocm_alloc_failed; /**< Number of interface allocation failure on OCM. */ + uint32_t ddr_alloc_failed; /**< Number of interface allocation failure on DDR. */ +}; + +/** + * nss_virt_if_interface_stats + * Virtual interface statistics of each pair of interfaces. + */ +struct nss_virt_if_interface_stats { + struct nss_cmn_node_stats node_stats; /**< Common statistics. */ + uint32_t tx_enqueue_failed; /**< Tx enqueue failures in the firmware. */ + uint32_t shaper_enqueue_failed; /**< Shaper enqueue failures in the firmware. */ + uint32_t ocm_alloc_failed; /**< Number of allocation failure on OCM. */ +}; + +/** + * nss_virt_if_stats + * Virtual interface statistics received from the NSS. + */ +struct nss_virt_if_stats { + struct nss_virt_if_base_node_stats base_stats; + struct nss_virt_if_interface_stats if_stats; +}; + +/** + * nss_virt_if_config_msg + * Message information for configuring the virtual interface. + */ +struct nss_virt_if_config_msg { + uint32_t flags; /**< Interface flags. */ + uint32_t sibling; /**< Sibling interface number. */ + uint32_t nexthop; /**< Next hop interface number. */ + uint8_t mac_addr[ETH_ALEN]; /**< MAC address. */ +}; + +/** + * nss_virt_if_msg + * Data for sending and receiving virtual interface messages. + */ +struct nss_virt_if_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a virtual interface message. + */ + union { + union nss_if_msgs if_msgs; + /**< NSS interface base message. */ + struct nss_virt_if_config_msg if_config; + /**< Rule for creating a virtual interface. */ + struct nss_virt_if_stats stats; + /**< Virtual interface statistics. */ + } msg; /**< Message payload. */ +}; + +/* + * nss_virt_if_pvt + * Private data information for the virtual interface. + */ +struct nss_virt_if_pvt { + struct semaphore sem; + /**< Semaphore to ensure that only one instance of a message is sent to the NSS. */ + struct completion complete; + /**< Waits for message completion or time out. */ + int response; /**< Message process response from the NSS firmware. */ + int sem_init_done; /**< Semaphore initialization is done. */ +}; + +/** + * Callback to transmit virtual interface data received from NSS + * to the transmit path of the virtual interface. + * + * @datatypes + * net_device \n + * sk_buff + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + */ +typedef void (*nss_virt_if_xmit_callback_t)(struct net_device *netdev, struct sk_buff *skb); + +/** + * Callback function for virtual interface data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_virt_if_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for virtual interface messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_virt_if_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_virt_if_handle + * Context information for WLAN-to-NSS communication. + */ +struct nss_virt_if_handle { + struct nss_ctx_instance *nss_ctx; /**< NSS context. */ + int32_t if_num_n2h; /**< Redirect interface number on NSS-to-host path. */ + int32_t if_num_h2n; /**< Redirect interface number on host-to-NSS path. */ + struct net_device *ndev; /**< Associated network device. */ + struct nss_virt_if_pvt *pvt; /**< Private data structure. */ + struct nss_virt_if_stats stats; /**< Virtual interface statistics. */ + atomic_t refcnt; /**< Reference count. */ + nss_virt_if_msg_callback_t cb; /**< Message callback. */ + void *app_data; /**< Application data to be passed to the callback. */ +}; + +/** + * nss_virt_if_dp_type + * Virtual interface datapath types. Redirect interface on NSS-to-host path will be seen by ECM for rules. + */ +enum nss_virt_if_dp_type { + NSS_VIRT_IF_DP_REDIR_N2H, /**< Redirect interface on NSS-to-host path has zero value. */ + NSS_VIRT_IF_DP_REDIR_H2N, /**< Redirect interface on host-to-NSS path has non-zero value. */ +}; + +/** + * nss_virt_if_create + * Creates a virtual interface asynchronously. + * + * @datatypes + * net_device \n + * nss_virt_if_msg_callback_t + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] cb Callback function for the message. This callback is + invoked when the response from the firmware is received. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Status of the Tx operation. + */ +extern int nss_virt_if_create(struct net_device *netdev, nss_virt_if_msg_callback_t cb, void *app_data); + +/** + * nss_virt_if_create_sync + * Creates a virtual interface synchronously with the default nexthop values + * NSS_N2H_INTERFACE and NSS_ETH_RX_INTERFACE. + * + * @datatypes + * net_device + * + * @param[in] netdev Pointer to the associated network device. + * + * @return + * Pointer to nss_virt_if_handle. + */ +extern struct nss_virt_if_handle *nss_virt_if_create_sync(struct net_device *netdev); + +/** + * nss_virt_if_create_sync_nexthop + * Creates a virtual interface synchronously with specified nexthops. + * + * @datatypes + * net_device + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] nexthop_n2h Nexthop interface number of network-to-host dynamic interface. + * @param[in] nexthop_h2n Nexthop interface number of host-to-network dynamic interface. + * + * @return + * Pointer to NSS virtual interface handle. + */ +extern struct nss_virt_if_handle *nss_virt_if_create_sync_nexthop(struct net_device *netdev, uint32_t nexthop_n2h, uint32_t nexthop_h2n); + +/** + * nss_virt_if_destroy + * Destroys the virtual interface asynchronously. + * + * @datatypes + * nss_virt_if_handle \n + * nss_virt_if_msg_callback_t + * + * @param[in,out] handle Pointer to the virtual interface handle (provided during + * dynamic interface allocation). + * @param[in] cb Callback function for the message. This callback is + * invoked when the response from the firmware is received. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * Status of the Tx operation. + * + * @dependencies + * The interface must have been previously created. + */ +extern nss_tx_status_t nss_virt_if_destroy(struct nss_virt_if_handle *handle, nss_virt_if_msg_callback_t cb, void *app_data); + +/** + * nss_virt_if_destroy_sync + * Destroys the virtual interface synchronously. + * + * @datatypes + * nss_virt_if_handle + * + * @param[in,out] handle Pointer to the virtual interface handle (provided during + * dynamic interface allocation). + * + * @return + * Status of the Tx operation. + * + * @dependencies + * The interface must have been previously created. + */ +extern nss_tx_status_t nss_virt_if_destroy_sync(struct nss_virt_if_handle *handle); + +/** + * nss_virt_if_tx_msg + * Sends a message to the virtual interface. + * + * @datatypes + * nss_ctx_instance \n + * nss_virt_if_msg + * + * @param[in] nss_ctx Pointer to the NSS context (provided during registration). + * @param[in] nvim Pointer to the virtual interface message. + * + * @return + * Command Tx status. + */ +extern nss_tx_status_t nss_virt_if_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_virt_if_msg *nvim); + +/** + * nss_virt_if_tx_buf + * Forwards virtual interface packets to the NSS. + * + * @datatypes + * nss_virt_if_handle \n + * sk_buff + * + * @param[in,out] handle Pointer to the virtual interface handle (provided during + * registration). + * @param[in] skb Pointer to the data socket buffer. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_virt_if_tx_buf(struct nss_virt_if_handle *handle, + struct sk_buff *skb); + +/** + * nss_virt_if_xmit_callback_register + * Registers a transmit callback to a virtual interface. + * + * @datatypes + * nss_virt_if_handle \n + * nss_virt_if_xmit_callback_t + * + * @param[in,out] handle Pointer to the virtual interface handle (provided during + * dynamic interface allocation). + * @param[in] cb Callback handler for virtual data packets. + * + * @return + * None. + */ +extern void nss_virt_if_xmit_callback_register(struct nss_virt_if_handle *handle, + nss_virt_if_xmit_callback_t cb); + +/** + * nss_virt_if_xmit_callback_unregister + * Deregisters the transmit callback from the virtual interface. + * + * @datatypes + * nss_virt_if_handle + * + * @param[in,out] handle Pointer to the virtual interface handle. + * + * @return + * None. + */ +extern void nss_virt_if_xmit_callback_unregister(struct nss_virt_if_handle *handle); + +/** + * nss_virt_if_register + * Registers a virtual Interface with NSS driver. + * + * @datatypes + * nss_virt_if_handle \n + * nss_virt_if_data_callback_t \n + * net_device + * + * @param[in,out] handle Pointer to the virtual interface handle(provided during + * dynamic interface allocation). + * @param[in] data_callback Callback handler for virtual data packets + * @param[in] netdev Pointer to the associated network device. + * + * @return + * Status of the Tx operation. + */ +extern void nss_virt_if_register(struct nss_virt_if_handle *handle, + nss_virt_if_data_callback_t data_callback, + struct net_device *netdev); + +/** + * nss_virt_if_unregister + * Deregisters a virtual interface from the NSS driver. + * + * @datatypes + * nss_virt_if_handle + * + * @param[in,out] handle Pointer to the virtual interface handle. + * + * @return + * None. + */ +extern void nss_virt_if_unregister(struct nss_virt_if_handle *handle); + +/** + * nss_virt_if_get_interface_num + * Returns the virtual interface number associated with the handle. + * + * @datatypes + * nss_virt_if_handle + * + * @param[in] handle Pointer to the virtual interface handle(provided during + dynamic interface allocation). + * + * @return + * Virtual interface number. + */ +extern int32_t nss_virt_if_get_interface_num(struct nss_virt_if_handle *handle); + +/** + * nss_virt_if_verify_if_num + * Verifies if the interface is 802.3 redirect type. + * + * @param[in] if_num Interface number to be verified. + * + * @return + * True if if_num is 802.3 redirect type. + */ +bool nss_virt_if_verify_if_num(uint32_t if_num); + +/** + * nss_virt_if_get_context + * Gets the virtual interface context. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_virt_if_get_context(void); + +/** + * @} + */ + +#endif /* __NSS_VIRT_IF_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_vlan.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_vlan.h new file mode 100644 index 000000000..872d2e042 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_vlan.h @@ -0,0 +1,265 @@ +/* + ************************************************************************** + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_vlan.h + * NSS VLAN interface definitions. + */ + +#ifndef __NSS_VLAN_H +#define __NSS_VLAN_H + +/** + * @addtogroup nss_vlan_subsystem + * @{ + */ + +/** + * nss_vlan_msg_types + * VLAN message types. + */ +enum nss_vlan_msg_types { + NSS_VLAN_MSG_ADD_TAG = NSS_IF_MAX_MSG_TYPES + 1, + NSS_VLAN_MSG_TYPE_MAX, +}; + +/** + * nss_vlan_error_types + * VLAN error types + */ +enum nss_vlan_error_types { + NSS_VLAN_ERROR_UNKNOWN_MSG = NSS_IF_ERROR_TYPE_MAX + 1, + NSS_VLAN_ERROR_TYPE_MAX, +}; + +#define NSS_VLAN_TYPE_SINGLE 0 /**< Single VLAN tag in message. */ +#define NSS_VLAN_TYPE_DOUBLE 1 /**< Double VLAN tag in message. */ + +/** + * nss_vlan_msg_add_tag + * VLAN message data for adding a VLAN tag. + */ +struct nss_vlan_msg_add_tag { + uint32_t vlan_tag; /**< VLAN tag information. */ + uint32_t next_hop; /**< Parent interface. */ + uint32_t if_num; /**< Actual physical interface. */ +}; + +/** + * nss_vlan_msg + * Data for sending and receiving VLAN messages. + */ +struct nss_vlan_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a VLAN message. + */ + union { + union nss_if_msgs if_msg; + /**< NSS interface base messages. */ + struct nss_vlan_msg_add_tag add_tag; + /**< VLAN add-a-tag message. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_vlan_tx_msg + * Sends a VLAN message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_vlan_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_vlan_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_vlan_msg *msg); + +/** + * nss_vlan_tx_msg_sync + * Sends a VLAN message to the NSS synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_vlan_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_vlan_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_vlan_msg *msg); + +/** + * Initializes a VLAN message. + * + * @datatypes + * nss_vlan_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +void nss_vlan_msg_init(struct nss_vlan_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_vlan_get_context + * Gets the VLAN context used in nss_vlan_tx. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_vlan_get_context(void); + +/** + * Callback when VLAN data is received + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_vlan_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback to receive VLAN messages + * + * @datatypes + * nss_vlan_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_vlan_msg_callback_t)(void *app_data, struct nss_vlan_msg *msg); + +/** + * nss_register_vlan_if + * Register to send/receive VLAN messages to NSS + * + * @datatypes + * nss_vlan_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] vlan_data_callback Callback for the data. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * @param[in] app_ctx Pointer to the application context of the message. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_register_vlan_if(uint32_t if_num, nss_vlan_callback_t vlan_data_callback, + struct net_device *netdev, uint32_t features, void *app_ctx); + +/** + * Deregisters the VLAN interface from the NSS. + * + * @return + * None. + */ +void nss_unregister_vlan_if(uint32_t if_num); + +/** + * nss_vlan_tx_set_mtu_msg + * Sends a VLAN message to set the MTU. + * + * @param[in] vlan_if_num VLAN interface number. + * @param[in] mtu MTU value to set. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_vlan_tx_set_mtu_msg(uint32_t vlan_if_num, uint32_t mtu); + +/** + * nss_vlan_tx_set_mac_addr_msg + * Sends a VLAN message to set the MAC address. + * + * @param[in] vlan_if_num VLAN interface number. + * @param[in] addr Pointer to the MAC address. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_vlan_tx_set_mac_addr_msg(uint32_t vlan_if_num, uint8_t *addr); + +/** + * nss_vlan_tx_vsi_attach_msg + * Send a VLAN message to attach a VSI. + * + * @param[in] vlan_if_num VLAN interface number. + * @param[in] vsi PPE VSI to attach. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_vlan_tx_vsi_attach_msg(uint32_t vlan_if_num, uint32_t vsi); + +/** + * nss_vlan_tx_vsi_detach_msg + * Sends a VLAN message to detach VSI. + * + * @param[in] vlan_if_num VLAN interface number. + * @param[in] vsi VSI to detach. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_vlan_tx_vsi_detach_msg(uint32_t vlan_if_num, uint32_t vsi); + +/** + * nss_vlan_tx_add_tag_msg + * Sends a VLAN add tag message. + * + * @param[in] vlan_if_num VLAN interface number. + * @param[in] vlan_tag VLAN tag information. + * @param[in] next_hop Parent interface. + * @param[in] physical_dev Physical port to which to add the VLAN tag. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_vlan_tx_add_tag_msg(uint32_t vlan_if_num, uint32_t vlan_tag, uint32_t next_hop, uint32_t physical_dev); + +/** + * Registers the VLAN handler with the NSS. + * + * @return + * None. + */ +void nss_vlan_register_handler(void); + +/** + * @} + */ + +#endif /* __NSS_VLAN_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_vxlan.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_vxlan.h new file mode 100644 index 000000000..254f4bee1 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_vxlan.h @@ -0,0 +1,350 @@ +/* + ************************************************************************** + * Copyright (c) 2019, 2021 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_vxlan.h + * NSS VxLAN interface definitions. + */ + +#ifndef __NSS_VXLAN_H +#define __NSS_VXLAN_H + +/** + * @addtogroup nss_vxlan_subsystem + * @{ + */ + +/** + * Maximum number of supported VxLAN tunnel sessions. + */ +#define NSS_VXLAN_MAX_TUNNELS 64 + +/** + * Maximum number of supported VxLAN FDB entries. + */ +#define NSS_VXLAN_MACDB_ENTRIES_MAX 1024 + +/** + * MAC database entries per message. + */ +#define NSS_VXLAN_MACDB_ENTRIES_PER_MSG 20 + +/* + * VxLAN Rule configure message flags + */ +#define NSS_VXLAN_RULE_FLAG_GBP_ENABLED 0x0001 /**< Group Policy ID is eanbled. */ +#define NSS_VXLAN_RULE_FLAG_INHERIT_TOS 0x0002 /**< Use inner TOS for encapsulation. */ +#define NSS_VXLAN_RULE_FLAG_ENCAP_L4_CSUM_REQUIRED 0x0004 /**< Generate transmit checksum. */ +#define NSS_VXLAN_RULE_FLAG_IPV4 0x0010 /**< IPv4 tunnel. */ +#define NSS_VXLAN_RULE_FLAG_IPV6 0x0020 /**< IPv6 tunnel. */ +#define NSS_VXLAN_RULE_FLAG_UDP 0x0100 /**< UDP tunnel. */ + +/** + * nss_vxlan_msg_type + * Message types for VxLAN tunnel. + */ +enum nss_vxlan_msg_type { + NSS_VXLAN_MSG_TYPE_STATS_SYNC, /**< Statistics synchronization message. */ + NSS_VXLAN_MSG_TYPE_TUN_CONFIGURE, /**< Creating tunnel rule. */ + NSS_VXLAN_MSG_TYPE_TUN_UNCONFIGURE, /**< Destroying tunnel rule. */ + NSS_VXLAN_MSG_TYPE_TUN_ENABLE, /**< Enable the tunnel. */ + NSS_VXLAN_MSG_TYPE_TUN_DISABLE, /**< Disable the tunnel. */ + NSS_VXLAN_MSG_TYPE_MAC_ADD, /**< Add MAC rule to the database. */ + NSS_VXLAN_MSG_TYPE_MAC_DEL, /**< Remove MAC rule from the database. */ + NSS_VXLAN_MSG_TYPE_MACDB_STATS, /**< MAC database statistics synchronization message. */ + NSS_VXLAN_MSG_TYPE_MAX, /**< Maximum message type. */ +}; + +/** + * nss_vxlan_error_response_types + * Error types for VxLAN responses to messages from the host. + */ +enum nss_vxlan_error_type { + NSS_VXLAN_ERROR_TYPE_NONE = 1, /**< Unknown type error. */ + NSS_VXLAN_ERROR_TYPE_DECAP_REGISTER_FAIL, /**< Decapsulation node registration failed. */ + NSS_VXLAN_ERROR_TYPE_DEST_IP_MISMATCH, /**< Destination IP address mismatch. */ + NSS_VXLAN_ERROR_TYPE_INVALID_VNI, /**< Invalid virtual network ID. */ + NSS_VXLAN_ERROR_TYPE_INVALID_L3_PROTO, /**< L3 Protocol is invalid error. */ + NSS_VXLAN_ERROR_TYPE_INVALID_UDP_PROTO, /**< UDP Protocol is invalid error. */ + NSS_VXLAN_ERROR_TYPE_INVALID_SRC_PORT, /**< Source port range is invalid. */ + NSS_VXLAN_ERROR_TYPE_MAC_BAD_ENTRY, /**< MAC table has a bad entry. */ + NSS_VXLAN_ERROR_TYPE_MAC_EXISTS, /**< MAC entry exists in the table error. */ + NSS_VXLAN_ERROR_TYPE_MAC_NOT_EXIST, /**< MAC does not exist in the table error. */ + NSS_VXLAN_ERROR_TYPE_MAC_ENTRY_UNHASHED, /**< MAC entry is not hashed in table. */ + NSS_VXLAN_ERROR_TYPE_MAC_ENTRY_ALLOC_FAILED, /**< MAC entry allocation failed. */ + NSS_VXLAN_ERROR_TYPE_MAC_ENTRY_DELETE_FAILED, /**< MAC entry deletion failed. */ + NSS_VXLAN_ERROR_TYPE_MAC_TABLE_FULL, /**< MAC table is full error. */ + NSS_VXLAN_ERROR_TYPE_SIBLING_NODE_NOT_EXIST, /**< Sibling node does not exist. */ + NSS_VXLAN_ERROR_TYPE_TUNNEL_CONFIGURED, /**< Tunnel is already configured. */ + NSS_VXLAN_ERROR_TYPE_TUNNEL_UNCONFIGURED, /**< Tunnel is not configured. */ + NSS_VXLAN_ERROR_TYPE_TUNNEL_ADD_FAILED, /**< Adding tunnel information failed. */ + NSS_VXLAN_ERROR_TYPE_TUNNEL_DISABLED, /**< Tunnel is already disabled error. */ + NSS_VXLAN_ERROR_TYPE_TUNNEL_ENABLED, /**< Tunnel is already enabled error. */ + NSS_VXLAN_ERROR_TYPE_TUNNEL_ENTRY_EXISTS, /**< Tunnel already exists. */ + NSS_VXLAN_ERROR_TYPE_MAX, /**< Maximum error type. */ +}; + +/** + * nss_vxlan_stats_msg + * Per-tunnel statistics messages from the NSS firmware. + */ +struct nss_vxlan_stats_msg { + struct nss_cmn_node_stats node_stats; /**< Common firmware statistics. */ + uint32_t except_mac_db_lookup_failed; /**< MAC database look up failed. */ + uint32_t except_mac_move; /**< User is moved. */ + uint32_t except_low_hroom; /**< Transmit exception due to insufficient headroom. */ + uint32_t except_no_policy_id; /**< Policy ID does not exist. */ + uint32_t except_extra_vxlan_hdr_flags; /**< More flags are set than NSS can process. */ + uint32_t except_vni_lookup_failed; /**< Virtual network ID look up failed. */ + uint32_t dropped_malformed; /**< Packet is malformed. */ + uint32_t dropped_next_node_queue_full; /**< Next node dropped the packet. */ + uint32_t except_inner_hash; /**< Inner hash calculation failed. */ +}; + +/** + * nss_vxlan_rule_msg + * VxLAN rule message. + * + * The same rule structure applies for both encapsulation and decapsulation + * in a tunnel. + */ +struct nss_vxlan_rule_msg { + /* + * VxLAN Rules + */ + uint32_t sibling_if_num; /**< Sibling node interface number. */ + uint32_t vni; /**< Virtual network ID. */ + uint16_t tunnel_flags; /**< VxLAN tunnel flags. */ + + /* + * IP rules + */ + uint16_t flow_label; /**< Flow label. */ + uint8_t tos; /**< Type of service/traffic class. */ + uint8_t ttl; /**< TTL/Hop Limit. */ + + /* + * L4 rules + */ + uint16_t src_port_min; /**< Minimum permissible port number. */ + uint16_t src_port_max; /**< Maximum permissible port number. */ + uint16_t dest_port; /**< UDP destination port. */ +}; + +/** + * nss_vxlan_encap_rule + * Encapsulation information for a VxLAN tunnel. + */ +struct nss_vxlan_encap_rule { + uint32_t src_ip[4]; /**< Source IP. */ + uint32_t dest_ip[4]; /**< Destination IP. */ +}; + +/** + * nss_vxlan_mac_msg + * VxLAN MAC message structure. + */ +struct nss_vxlan_mac_msg { + struct nss_vxlan_encap_rule encap; + /**< Tunnel encapsulation header. */ + uint32_t vni; /**< VxLAN network identifier. */ + uint16_t mac_addr[3]; /**< MAC address. */ +}; + +/** + * nss_vxlan_macdb_stats_entry + * MAC database statistics entry. + */ +struct nss_vxlan_macdb_stats_entry { + uint32_t hits; /**< Total hash hits on this hash entry. */ + uint16_t mac[3]; /**< MAC address. */ +}; + +/** + * nss_vxlan_macdb_stats_msg + * VxLAN MAC database statistics. + */ +struct nss_vxlan_macdb_stats_msg { + uint16_t cnt; /**< Number of MAC database entries copied. */ + uint16_t reserved; /**< Reserved for future use. */ + struct nss_vxlan_macdb_stats_entry entry[NSS_VXLAN_MACDB_ENTRIES_PER_MSG]; + /**< MAC database entries. */ +}; + +/** + * nss_vxlan_msg + * Data structure for sending and receiving VxLAN messages. + */ +struct nss_vxlan_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a VxLAN message. + */ + union { + struct nss_vxlan_stats_msg stats; + /**< Synchronized statistics for the VxLAN interface. */ + struct nss_vxlan_rule_msg vxlan_create; + /**< Allocate VxLAN tunnel node. */ + struct nss_vxlan_rule_msg vxlan_destroy; + /**< Destroy VxLAN tunnel node. */ + struct nss_vxlan_mac_msg mac_add; + /**< MAC add message for UDP encapsulation. */ + struct nss_vxlan_mac_msg mac_del; + /**< MAC delete message. */ + struct nss_vxlan_macdb_stats_msg db_stats; + /**< MAC database statistics. */ + } msg; /**< Payload for VxLAN tunnel messages exchanged with the NSS core. */ +}; + +/** + * Callback function for receiving VxLAN tunnel data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_vxlan_buf_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving VxLAN messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_vxlan_msg_callback_t)(void *app_data, struct nss_cmn_msg *msg); + +/** + * nss_vxlan_tx_msg + * Sends VxLAN tunnel messages to the NSS. + * + * Do not call this function from a softirq or interrupt because it + * might sleep if the NSS firmware is busy serving another host thread. + * + * @datatypes + * nss_ctx_instance \n + * nss_vxlan_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] nvm Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_vxlan_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_vxlan_msg *nvm); + +/** + * nss_vxlan_tx_msg_sync + * Sends a VxLAN message to the NSS synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_vxlan_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] nvm Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_vxlan_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_vxlan_msg *nvm); + +/** + * nss_vxlan_unregister_if + * Deregisters the VxLAN interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + */ +extern bool nss_vxlan_unregister_if(uint32_t if_num); + +/** + * nss_vxlan_register_if + * Registers the VxLAN interface with the NSS. + * + * @datatypes + * nss_vxlan_buf_callback_t \n + * nss_vxlan_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] type Dynamic interface type. + * @param[in] data_cb Callback for the data. + * @param[in] notify_cb Callback for the message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_vxlan_register_if(uint32_t if_num, uint32_t type, nss_vxlan_buf_callback_t data_cb, + nss_vxlan_msg_callback_t notify_cb, struct net_device *netdev, uint32_t features); + +/** + * nss_vxlan_register_handler + * Initializes VxLAN module in NSS + * + * @return + * None. + */ +extern void nss_vxlan_init(void); + +/** + * nss_vxlan_msg_init + * Initializes a VxLAN message. + * + * @datatypes + * nss_vxlan_msg \n + * nss_vxlan_msg_callback_t + * + * @param[in,out] nvm Pointer to the VxLAN tunnel message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Size of the message. + * @param[in] cb Pointer to the message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +extern void nss_vxlan_msg_init(struct nss_vxlan_msg *nvm, uint16_t if_num, uint32_t type, uint32_t len, + nss_vxlan_msg_callback_t cb, void *app_data); + +/** + * nss_vxlan_get_ctx() + * Get VxLAN context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_vxlan_get_ctx(void); + +/** + * @} + */ + +#endif +/* __NSS_VXLAN_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi.h new file mode 100644 index 000000000..4b82904ee --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi.h @@ -0,0 +1,1015 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2018, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_wifi.h + * NSS TO HLOS Wi-Fi interface definitions. + */ + +#ifndef __NSS_WIFI_H +#define __NSS_WIFI_H + +/** + * @addtogroup nss_wifi_subsystem + * @{ + */ + +#define NSS_WIFI_MGMT_DATA_LEN 128 /**< Size of the Wi-Fi management data. */ +#define NSS_WIFI_FW_STATS_DATA_LEN 480 /**< Size of the firmware statictics data. */ +#define NSS_WIFI_RAWDATA_MAX_LEN 64 /**< Maximum size of the raw Wi-Fi data. */ +#define NSS_WIFI_TX_NUM_TOS_TIDS 8 /**< Number of TIDs. */ +#define NSS_WIFI_PEER_STATS_DATA_LEN 232 /**< Size of the peer statistics data. */ +#define NSS_WIFI_IPV6_ADDR_LEN 16 /**< Size of the IPv6 address. */ +#define NSS_WIFI_MAX_RSSI_CHAINS 4 /**< Maximum number of RSSI chains. */ +#define NSS_WIFI_WME_NUM_AC 4 /**< Number of ACs. */ + +/** + * Maximum number of Wi-Fi peers per radio as a sum of + * maximum number of station peers (513), + * maximum numbero of AP VAP peers (16), and + * maximum number of monitor VAP peers (1). + */ +#define NSS_WIFI_MAX_PEER 530 + +/** + * nss_wifi_metadata_types + * Wi-Fi interface request and response message types. + */ +enum nss_wifi_metadata_types { + NSS_WIFI_INIT_MSG, + NSS_WIFI_POST_RECV_MSG, + NSS_WIFI_HTT_INIT_MSG, + NSS_WIFI_TX_INIT_MSG, + NSS_WIFI_RAW_SEND_MSG, + NSS_WIFI_MGMT_SEND_MSG, + NSS_WIFI_WDS_PEER_ADD_MSG, + NSS_WIFI_WDS_PEER_DEL_MSG, + NSS_WIFI_STOP_MSG, + NSS_WIFI_RESET_MSG, + NSS_WIFI_STATS_MSG, + NSS_WIFI_PEER_FREELIST_APPEND_MSG, + NSS_WIFI_RX_REORDER_ARRAY_FREELIST_APPEND_MSG, + NSS_WIFI_SEND_PEER_MEMORY_REQUEST_MSG, + NSS_WIFI_SEND_RRA_MEMORY_REQUEST_MSG, + NSS_WIFI_FW_STATS_MSG, + NSS_WIFI_MONITOR_FILTER_SET_MSG, + NSS_WIFI_PEER_BS_STATE_MSG, + NSS_WIFI_MSDU_TTL_SET_MSG, + NSS_WIFI_RX_VOW_EXTSTATS_SET_MSG, + NSS_WIFI_PKTLOG_CFG_MSG, + NSS_WIFI_ENABLE_PERPKT_TXSTATS_MSG, + NSS_WIFI_IGMP_MLD_TOS_OVERRIDE_MSG, + NSS_WIFI_OL_STATS_CFG_MSG, + NSS_WIFI_OL_STATS_MSG, + NSS_WIFI_TX_QUEUE_CFG_MSG, + NSS_WIFI_TX_MIN_THRESHOLD_CFG_MSG, + NSS_WIFI_DBDC_PROCESS_ENABLE_MSG, + NSS_WIFI_PRIMARY_RADIO_SET_MSG, + NSS_WIFI_FORCE_CLIENT_MCAST_TRAFFIC_SET_MSG, + NSS_WIFI_STORE_OTHER_PDEV_STAVAP_MSG, + NSS_WIFI_STA_KICKOUT_MSG, + NSS_WIFI_WNM_PEER_RX_ACTIVITY_MSG, + NSS_WIFI_PEER_STATS_MSG, + NSS_WIFI_WDS_VENDOR_MSG, + NSS_WIFI_TX_CAPTURE_SET_MSG, + NSS_WIFI_ALWAYS_PRIMARY_SET_MSG, + NSS_WIFI_FLUSH_HTT_CMD_MSG, + NSS_WIFI_CMD_MSG, + NSS_WIFI_ENABLE_OL_STATSV2_MSG, + NSS_WIFI_OL_PEER_TIME_MSG, + NSS_WIFI_PEER_SET_VLAN_ID_MSG, + NSS_WIFI_PEER_ISOLATION_MSG, + NSS_WIFI_MAX_MSG +}; + +/* + * wifi_error_types + * Wi-Fi error types. + */ +enum wifi_error_types { + NSS_WIFI_EMSG_NONE = 0, + NSS_WIFI_EMSG_UNKNOWN, + NSS_WIFI_EMSG_MGMT_DLEN, + NSS_WIFI_EMSG_MGMT_SEND, + NSS_WIFI_EMSG_CE_INIT_FAIL, + NSS_WIFI_EMSG_PDEV_INIT_FAIL, + NSS_WIFI_EMSG_HTT_INIT_FAIL, + NSS_WIFI_EMSG_PEER_ADD, + NSS_WIFI_EMSG_WIFI_START_FAIL, + NSS_WIFI_EMSG_STATE_NOT_RESET, + NSS_WIFI_EMSG_STATE_NOT_INIT_DONE, + NSS_WIFI_EMSG_STATE_NULL_CE_HANDLE, + NSS_WIFI_EMSG_STATE_NOT_CE_READY, + NSS_WIFI_EMSG_STATE_NOT_HTT_READY, + NSS_WIFI_EMSG_FW_STATS_DLEN, + NSS_WIFI_EMSG_FW_STATS_SEND, + NSS_WIFI_EMSG_STATE_TX_INIT_FAILED, + NSS_WIFI_EMSG_IGMP_MLD_TOS_OVERRIDE_CFG, + NSS_WIFI_EMSG_PDEV_INVALID, + NSS_WIFI_EMSG_OTHER_PDEV_STAVAP_INVALID, + NSS_WIFI_EMSG_HTT_SEND_FAIL, + NSS_WIFI_EMSG_CE_RING_INIT, + NSS_WIFI_EMSG_NOTIFY_CB, + NSS_WIFI_EMSG_PEERID_INVALID, + NSS_WIFI_EMSG_PEER_INVALID, + NSS_WIFI_EMSG_UNKNOWN_CMD, + NSS_WIFI_EMSG_MAX, +}; + +/** + * nss_wifi_ext_data_pkt_type + * Exception types for Wi-Fi extended data. + */ +enum nss_wifi_ext_data_pkt_type { + NSS_WIFI_RX_EXT_INV_PEER_TYPE, + NSS_WIFI_RX_EXT_PKTLOG_TYPE, + NSS_WIFI_RX_STATS_V2_EXCEPTION, + NSS_WIFI_RX_MGMT_NULL_TYPE, + NSS_WIFI_RX_EXT_MAX_TYPE, +}; + +/** + * nss_wifi_cmd + * Wi-Fi commands. + */ +enum nss_wifi_cmd { + NSS_WIFI_FILTER_NEIGH_PEERS_CMD, + NSS_WIFI_MAX_CMD +}; + +/** + * nss_wifi_ce_ring_state_msg + * Internal state information for the copy engine ring. + */ +struct nss_wifi_ce_ring_state_msg { + uint32_t nentries; /**< Number of entries in the copy engine ring. */ + uint32_t nentries_mask; /**< Number of entry masks. */ + uint32_t sw_index; /**< Initial software index. */ + uint32_t write_index; /**< Initial write index. */ + uint32_t hw_index; /**< Initial hardware index. */ + uint32_t base_addr_CE_space; + /**< Physical address of the copy engine hardware ring. */ + uint32_t base_addr_owner_space; + /**< Virtual address of the copy engine hardware ring. */ +}; + +/** + * nss_wifi_ce_state_msg + * Internal state information for the copy engine. + */ +struct nss_wifi_ce_state_msg { + struct nss_wifi_ce_ring_state_msg src_ring; + /**< Source ring information. */ + struct nss_wifi_ce_ring_state_msg dest_ring; + /**< Destination ring information. */ + uint32_t ctrl_addr; + /**< Control address relative to PCIe BAR. */ +}; + +/** + * nss_wifi_init_msg + * Wi-Fi initialization data. + */ +struct nss_wifi_init_msg { + uint32_t radio_id ; /**< Radio index. */ + uint32_t pci_mem; /**< PCI memory address. */ + uint32_t target_type; /**< Wi-Fi target type. */ + uint32_t mu_mimo_enhancement_en; + /**< Enable MU-MIMO enhancement. */ + struct nss_wifi_ce_state_msg ce_tx_state; + /**< Transmit copy engine information. */ + struct nss_wifi_ce_state_msg ce_rx_state; + /**< Receive copy engine information. */ + + /** + * Indicates whether network processing is bypassed for this radio. + */ + uint32_t bypass_nw_process; +}; + +/** + * nss_wifi_htt_init_msg + * Wi-Fi Host-to-Target (HTT) initialization data. + */ +struct nss_wifi_htt_init_msg { + uint32_t radio_id; /**< Radio index. */ + uint32_t ringsize; /**< WLAN hardware MAC ring size. */ + uint32_t fill_level; /**< Initial fill level. */ + uint32_t paddrs_ringptr; + /**< Physical address of the WLAN MAC hardware ring. */ + uint32_t paddrs_ringpaddr; + /**< Virtual address of the WLAN MAC hardware ring. */ + uint32_t alloc_idx_vaddr; + /**< Virtual address of the hardware ring index. */ + uint32_t alloc_idx_paddr; + /**< Physical address of the hardware ring index. */ +}; + +/** + * nss_wifi_tx_init_msg + * Wi-Fi Tx initialization data. + */ +struct nss_wifi_tx_init_msg { + uint32_t radio_id; /**< Radio index. */ + uint32_t desc_pool_size; /**< Number of descriptor pools allocated. */ + uint32_t tx_desc_array; + /**< Host-initialized software WLAN descriptor pool memory. */ + uint32_t wlanextdesc_addr; + /**< Starting address of the WLAN MAC extenstion descriptor pool. */ + uint32_t wlanextdesc_size; + /**< Descriptor size of the WLAN MAC extenstion. */ + + /** + * Starting virtual address, as shared by the Wi-Fi firmware, for HTT Tx descriptor memory. + */ + uint32_t htt_tx_desc_base_vaddr; + + /** + * HTT Tx descriptor memory start physical address as shared by Wi-Fi firmware. + */ + uint32_t htt_tx_desc_base_paddr; + + uint32_t htt_tx_desc_offset; + /**< Descriptor size of the firmware shared HTT Tx. */ + uint32_t pmap_addr; + /**< Firmware shared peer or TID map. */ +}; + +/** + * nss_wifi_tx_queue_cfg_msg + * Wi-Fi Tx queue configuration. + */ +struct nss_wifi_tx_queue_cfg_msg { + uint32_t size; /**< Size of the Tx queue. */ + uint32_t range; /**< Peer range. */ +}; + +/** + * nss_wifi_tx_min_threshold_cfg_msg + * Minimum threshold configuration data for the Wi-Fi Tx queue. + */ +struct nss_wifi_tx_min_threshold_cfg_msg { + uint32_t min_threshold; /**< Minimum threshold value of Tx queue. */ +}; + +/** + * nss_wifi_rawsend_msg + * Information for Wi-Fi raw data. + */ +struct nss_wifi_rawsend_msg { + uint32_t radio_id ; /**< Radio index. */ + uint32_t len; /**< Size of the raw data. */ + uint32_t array[NSS_WIFI_RAWDATA_MAX_LEN]; + /**< Array of raw data. */ +}; + +/** + * nss_wifi_mgmtsend_msg + * Information for Wi-Fi management data. + */ +struct nss_wifi_mgmtsend_msg { + uint32_t desc_id; /**< Descriptor index. */ + uint32_t len; /**< Size of the management data. */ + uint8_t array[NSS_WIFI_MGMT_DATA_LEN]; + /**< Array of management data. */ +}; + +/** + * nss_wifi_fw_stats_msg + * Information for Wi-Fi firmware statistics. + */ +struct nss_wifi_fw_stats_msg { + uint32_t len; /**< Size of the statistics data. */ + uint8_t array[NSS_WIFI_FW_STATS_DATA_LEN]; + /**< Array of statistics data. */ +}; + +/** + * nss_wifi_monitor_set_filter_msg + * Wi-Fi Monitor mode for setting filter messages. + */ +struct nss_wifi_monitor_set_filter_msg { + uint32_t filter_type; /**< Type of Monitor mode filter. */ +}; + +/** + * nss_wifi_wds_peer_msg + * Wi-Fi WDS peer-specific message. + */ +struct nss_wifi_wds_peer_msg { + uint8_t dest_mac[ETH_ALEN]; /**< MAC address of the destination. */ + uint8_t reserved[2]; /**< Reserved for 4-byte alignment padding. */ + uint8_t peer_mac[ETH_ALEN]; /**< MAC address of the base peer. */ + uint8_t reserved1[2]; /**< Reserved for 4-byte alignment padding. */ +}; + +/** + * nss_wifi_tx_capture_msg + * Wi-Fi Tx data capture configuration. + */ +struct nss_wifi_tx_capture_msg { + uint32_t tx_capture_enable; /**< Enable or disable Tx data capture. */ +}; + +/** + * nss_wifi_reset_msg + * Message to reset the Wi-Fi Radio. + */ +struct nss_wifi_reset_msg { + uint32_t radio_id; /**< Radio index. */ +}; + +/** + * nss_wifi_stop_msg + * Message to stop the Wi-Fi Radio. + */ +struct nss_wifi_stop_msg { + uint32_t radio_id; /**< Radio index. */ +}; + +/** + * nss_wifi_pktlog_cfg_msg + * Configuration information for a Wi-Fi packet log. + */ +struct nss_wifi_pktlog_cfg_msg { + uint32_t enable; /**< Enables or disables a packet log. */ + uint32_t bufsize; /**< Size of the packet log buffer. */ + uint32_t hdrsize; /**< Size of the packet log header. */ + uint32_t msdu_id_offset; /**< Offset for the MSDU ID in the message. */ +}; + +/** + * nss_wifi_ol_stats_cfg_msg + * Wi-Fi offload statistics configuration. + */ +struct nss_wifi_ol_stats_cfg_msg { + uint32_t stats_cfg; /**< Enable or disable offload statistics configuration. */ +}; + +/** + * nss_wifi_enable_perpkt_txstats_msg + * Wi-Fi per-packet Tx statistics configuration. + */ +struct nss_wifi_enable_perpkt_txstats_msg { + uint32_t perpkt_txstats_flag; /**< Enable or disable Tx statistics. */ +}; + +/** + * nss_wifi_peer_txtime_stats + * Peer Tx timestamp statistics per TID. + */ +struct nss_wifi_peer_txtime_stats { + uint32_t sum_tx; /**< Sum of sojourn for each packet. */ + uint32_t sum_msdus; /**< Number of MSDU per peer per TID. */ +}; + +/** + * nss_wifi_peer_tstamp_stats + * Peer ID and timestamp statistics per TID. + */ +struct nss_wifi_peer_tstamp_stats { + uint32_t peer_id; /**< TID value. */ + struct nss_wifi_peer_txtime_stats sum[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Timestamps. */ + uint32_t avg[NSS_WIFI_TX_NUM_TOS_TIDS]; /**< Exponential weighted average. */ +}; + +/** + * nss_wifi_ol_peer_time_msg + * NSS Wi-Fi Tx timestamp message for n number of peers. + */ +struct nss_wifi_ol_peer_time_msg { + uint32_t npeers; /**< Number of peers. */ + struct nss_wifi_peer_tstamp_stats tstats[1]; + /**< One instance of struct. */ +}; + +/** + * nss_wifi_enable_ol_statsv2 + * Wi-Fi enable/disable send packet to host. + */ +struct nss_wifi_enable_ol_statsv2 { + uint32_t enable_ol_statsv2; /**< Flag to send packet to host. */ +}; + +/** + * nss_wifi_dbdc_process_enable_msg + * Wi-Fi DBDC repeater process configuration. + */ +struct nss_wifi_dbdc_process_enable_msg { + uint32_t dbdc_process_enable; /**< Enable or disable the DBDC process. */ +}; + +/** + * nss_wifi_primary_radio_set_msg + * Wi-Fi primary radio configuration message. + */ +struct nss_wifi_primary_radio_set_msg { + /** + * Enable/Disable Flag to set the current radio as primary. + */ + uint32_t flag; +}; + +/** + * nss_wifi_always_primary_set_msg + * Always set the Wi-Fi primary radio. + * + * The primary radio is set using the nss_wifi_primary_radio_set_msg flag. + * When the nss_wifi_always_primary_set_msg flag is set: + * - Tx -- Do not drop a unicast packet on the secondary station the VAP. Instead, give that + * packet to the primary station the VAP. + * - Rx -- Do not drop a received unicast packet on the secondary station the VAP. Instead, + * give that packet to the bridge by changing the SKB device as the primary station + * VAP. + * + * Primary usage of this feature is to avoid a loopback. + */ +struct nss_wifi_always_primary_set_msg { + /** + * Always use the primary radio for Tx and Rx in the DBDC repeater process. + */ + uint32_t flag; +}; + +/** + * nss_wifi_force_client_mcast_traffic_set_msg + * Wi-Fi message to set the client multi-cast traffic for a radio. + */ +struct nss_wifi_force_client_mcast_traffic_set_msg { + uint32_t flag; /**< Flag to force set the multi-cast traffic in a radio. */ +}; + +/** + * wifi_store_other_pdev_stavap_msg + * Store the other radio's station vap. + */ +struct nss_wifi_store_other_pdev_stavap_msg { + int stavap_ifnum; /**< Station VAP interface number of the other radio. */ +}; + +/** + * nss_wifi_pl_metadata + * Wi-Fi packet log metadata. + */ +struct nss_wifi_pl_metadata { + uint32_t len; /**< Length of single buffer in MSDU. */ + uint32_t msdu_len; /**< Total MSDU length. */ + uint16_t da_tail; /**< Destination address tail bytes. */ + uint16_t sa_tail; /**< Source address tail bytes. */ + uint8_t vdev_id; /**< Virtual device ID. */ + uint8_t res1; /**< Reserved for alignment. */ + uint16_t res2; /**< Reserved for alignment. */ +}; + +/** + * nss_wifi_rx_ext_metadata + * Wi-Fi Rx extended data plane metadata. + */ +struct nss_wifi_rx_ext_metadata{ + uint16_t peer_id; /**< ID of associated Peer. */ + uint8_t htt_rx_status; /**< Rx status of the HTT. */ + uint8_t type; /**< Reserved for 4 byte alignment. */ +}; + +/** + * nss_wifi_mc_enhance_stats + * Wi-Fi multicast enhancement statistics. + */ +struct nss_wifi_mc_enhance_stats { + uint32_t rcvd; /**< Number of multicast frames received for conversion. */ + + /** + * Number of unicast frames sent as part of multicast enhancement conversion. + */ + uint32_t ucast_converted; + + /** + * Number of multicast enhancement frames dropped because of an allocation + * failure. + */ + uint32_t alloc_fail; + + /** + * Number of multicast enhancement frames dropped because of an enqueue failure. + */ + uint32_t enqueue_fail; + + /** + * Number of multicast enhancement frames dropped because of a copy failure. + */ + uint32_t copy_fail; + + /** + * Number of multicast enhancement frames dropped because of a peer flow control + * send failure. + */ + uint32_t peer_flow_ctrl_send_fail; + + /** + * Number of multicast enhancement frames dropped when the destination MAC + * address is the same as the source MAC address. + */ + uint32_t loopback_err; + + /** + * Number of multicast enhancement buffer frames dropped because of an empty + * destination MAC address. + */ + uint32_t dst_addr_err; +}; + +/** + * nss_wifi_stats_sync_msg + * Wi-Fi synchronization statistics. + */ +struct nss_wifi_stats_sync_msg { + struct nss_cmn_node_stats node_stats; /**< Common node statistics. */ + uint32_t tx_transmit_dropped; + /**< Number of packets dropped during transmission. */ + uint32_t tx_transmit_completions; + /**< Number of packets for which Tx completions are received. */ + uint32_t tx_mgmt_rcv_cnt; + /**< Number of management packets received from the host for Tx. */ + uint32_t tx_mgmt_pkts; + /**< Number of management packets transmitted over Wi-Fi. */ + + /** + * Number of management packets dropped because of a Tx failure. + */ + uint32_t tx_mgmt_dropped; + + /** + * Number of management packets for which Tx completions are received. + */ + uint32_t tx_mgmt_completions; + + /** + * Number of packets for which an Tx enqueue failed because of an invalid peer. + */ + uint32_t tx_inv_peer_enq_cnt; + + /** + * Number of packets with an invalid peer ID received from Wi-Fi. + */ + uint32_t rx_inv_peer_rcv_cnt; + + uint32_t rx_pn_check_failed; + /**< Number of Rx packets that failed a packet number check. */ + + /** + * Number of Rx packets that the Wi-Fi driver successfully processed. + */ + uint32_t rx_pkts_deliverd; + + /** + * Number of Rx bytes that the Wi-Fi driver successfully processed. + */ + uint32_t rx_bytes_deliverd; + + uint32_t tx_bytes_transmit_completions; + /**< Number of bytes for which Tx completions are received. */ + + /** + * Number of unaligned data packets that were received from Wi-Fi and dropped. + */ + uint32_t rx_deliver_unaligned_drop_cnt; + + uint32_t tidq_enqueue_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Number of packets enqueued to TID Queue (TIDQ). */ + uint32_t tidq_dequeue_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Number of packets dequeued from TIDQ. */ + uint32_t tidq_enqueue_fail_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Number of enqueue failures. */ + uint32_t tidq_ttl_expire_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Number of packets expired from TIDQ. */ + uint32_t tidq_dequeue_req_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Number of dequeue requests from the Wi-Fi firmware. */ + uint32_t total_tidq_depth; + /**< Current queue Depth. */ + + /** + * Total number of HTT fetch messages received from the Wi-Fi firmware. + */ + uint32_t rx_htt_fetch_cnt; + + /** + * Total number of packets that bypassed TIDQ and are sent to the Wi-Fi + * firmware. + */ + uint32_t total_tidq_bypass_cnt; + + /** + * Total number of packets dropped because of a global queue full condition. + */ + uint32_t global_q_full_cnt; + + /** + * Total number of packets dropped because of a TID queue full condition. + */ + uint32_t tidq_full_cnt; + + struct nss_wifi_mc_enhance_stats mc_enhance_stats; + /**< Multicast enhancement statistics. */ + + /** + * Number of times a group entry was not present for multicast enhancement. + */ + uint32_t mc_enhance_group_entry_miss; + + /** + * Number of times a deny list was hit during multicast enhancement. + */ + uint32_t mc_enhance_denylist_hit; +}; + +/** + * nss_wifi_peer_freelist_append_msg + * Information for creating a Wi-Fi peer freelist. + */ +struct nss_wifi_peer_freelist_append_msg { + uint32_t addr; /**< Starting address of peer freelist pool. */ + uint32_t length; /**< Size of peer freelist pool. */ + uint32_t num_peers; /**< Maximum peer entries supported in the pool. */ +}; + +/** + * nss_wifi_rx_reorder_array_freelist_append_msg + * Information for creating a Wi-Fi TIDQ peer freelist array. + */ +struct nss_wifi_rx_reorder_array_freelist_append_msg { + uint32_t addr; /**< Starting address of the TIDQ freelist pool. */ + uint32_t length; /**< Size of the TIDQ freelist pool. */ + + /** + * Maximum number of Rx reorder array entries supported in the freelist pool. + */ + uint32_t num_rra; +}; + +/** + * wifi_bs_peer_inactivity + * Active state information of the peer. + */ +struct nss_wifi_bs_peer_activity { + uint16_t nentries; /**< Number of entries in the peer ID array. */ + uint16_t peer_id[1]; /**< Array holding the peer IDs. */ +}; + +/** + * nss_wifi_msdu_ttl_set_msg + * Information for setting the Wi-Fi MSDU time-to-live value. + */ +struct nss_wifi_msdu_ttl_set_msg { + uint32_t msdu_ttl; /**< TTL value to be set. */ +}; + +/** + * nss_wifi_rx_vow_extstats_set_msg + * VoW extended statitics set. + */ +struct nss_wifi_rx_vow_extstats_set_msg { + uint32_t vow_extstats_en; /**< VoW extended statistics enable. */ +}; + +/** + * nss_wifi_igmp_mld_override_tos_msg + * Information for overriding TOS. + */ +struct nss_wifi_igmp_mld_override_tos_msg { + uint8_t igmp_mld_ovride_tid_en; + /**< Flag to enable TID override feature for IGMP/MLD configuration. */ + uint8_t igmp_mld_ovride_tid_val; + /**< Value of TID to be overriden for IGMP/MLD. */ + uint8_t res[2]; /**< Reserved for 4-byte alignment. */ +}; + +/** + * nss_wifi_peer_ol_stats + * Wi-Fi offload statistics. + */ +struct nss_wifi_peer_ol_stats { + uint32_t peer_id; /**< ID of associated peer. */ + uint32_t seq_num; /**< Sequence number of the PPDU. */ + uint32_t tx_unaggr; /**< Number of unaggregated packets transmitted. */ + uint32_t tx_aggr; /**< Number of aggregated packets transmitted. */ + uint32_t tx_mcast; /**< Number of multicast packets sent. */ + uint32_t tx_ucast; /**< Number of unicast packets sent. */ + uint32_t tx_data; /**< Number data packets sent. */ + uint32_t tx_bytes; /**< Number of bytes sent. */ + uint32_t tx_fail; /**< Number of failed Tx packets. */ + uint32_t thrup_bytes; /**< Number of throughput bytes. */ + uint32_t tx_bcast_pkts; /**< Number of broadcast packets sent. */ + uint32_t tx_bcast_bytes;/**< Number of broadcast bytes sent. */ + uint32_t tx_mgmt; /**< Number of Tx management frames. */ + uint32_t tx_wme[NSS_WIFI_WME_NUM_AC]; + /**< Data frames transmitted per AC. */ + uint32_t rx_wme[NSS_WIFI_WME_NUM_AC]; + /**< Data frames received per AC. */ + uint32_t ppdu_retries; /**< Number of PPDU retries. */ + uint32_t rssi_chains[NSS_WIFI_MAX_RSSI_CHAINS]; + /**< Acknowledgment RSSI per chain. */ + uint32_t rx_msdus; /**< Number of MSDUs received. */ + uint32_t rx_bytes; /**< Number of bytes received. */ + uint32_t rx_mpdus; /**< Number of MPDUs received. */ + uint32_t rx_retries; /**< Number of MPDU retries. */ +}; + +/** + * nss_wifi_ol_stats_msg + * Wi-Fi offload statistics. + */ +struct nss_wifi_ol_stats_msg { + uint32_t bawadv_cnt; /**< Number of block-acknowledgment window advancements. */ + uint32_t bcn_cnt; /**< Number of beacons. */ + uint32_t npeers; /**< Number of peer statistics entries. */ + struct nss_wifi_peer_ol_stats peer_ol_stats[1]; + /**< Array to hold the peer statistics. */ +}; + +/** + * nss_wifi_sta_kickout_msg + * Station kickout message from NSS Firmware + */ +struct nss_wifi_sta_kickout_msg { + uint32_t peer_id; /**< Peer ID. */ +}; + +/** + * nss_wifi_peer_isolation_msg + * Peer isolation message + */ +struct nss_wifi_peer_isolation_msg { + uint16_t peer_id; /**< Peer ID. */ + uint16_t isolation; /**< Isolation enabled/disabled. */ +}; + +/** + * nss_wifi_wnm_peer_rx_activity_msg + * Rx active state information for the peer. + */ +struct nss_wifi_wnm_peer_rx_activity_msg { + uint16_t nentries; /**< Number of entries. */ + + /** + * Array to hold the peer IDs for which the activity is reported. + */ + uint16_t peer_id[NSS_WIFI_MAX_PEER]; +}; + +/** + * nss_wifi_append_metaheader + * Append metaheader after pbuf->data for stats_v2. + */ +struct nss_wifi_append_statsv2_metahdr { + uint32_t rxstatsmagic; /**< Magic to be verified on host. */ + uint32_t seq_number; /**< Sequence number of packets sent from NSS. */ + uint16_t peer_id; /**< Peer ID of peer. */ + uint16_t num_msdus; /**< Number of MSDU in PPDU. */ + uint16_t num_retries; /**< Number of retries in PPDU. */ + uint16_t num_mpdus; /**< Number of MPDU in PPDU. */ + uint32_t num_bytes; /**< Number of bytes in PPDU. */ +}; + +/** + * nss_wifi_peer_stats_msg + * Wi-Fi peer statistics. + */ +struct nss_wifi_peer_stats_msg { + uint32_t peer_id; /**< Peer ID. */ + uint32_t tidq_byte_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Number of bytes in each TIDQ. */ + uint32_t tidq_queue_max[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Maximum depth for the TID queue. */ + uint32_t tidq_enqueue_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Number of packets enqueued to the TIDQ. */ + uint32_t tidq_dequeue_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Number of packets dequeued from the TIDQ. */ + uint32_t tidq_ttl_expire_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Number of expired packets from the TIDQ. */ + uint32_t tidq_dequeue_req_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; + /**< Number of dequeue requests from the Wi-Fi firmware. */ + + /** + * Total number of packets dropped because the TID queue is full. + */ + uint32_t tidq_full_cnt[NSS_WIFI_TX_NUM_TOS_TIDS]; +}; + +/** + * nss_wifi_wds_extn_peer_cfg_msg + * Configuration information when the WDS extension is enabled. + */ +struct nss_wifi_wds_extn_peer_cfg_msg { + uint8_t mac_addr[ETH_ALEN]; /**< Mac address of the peer. */ + uint8_t wds_flags; /**< WDS flags populated from the host. */ + uint8_t reserved; /**< Alignment padding. */ + uint16_t peer_id; /**< ID of the peer. */ +}; + +/** + * nss_wifi_cmd_msg + * Wi-Fi radio specific special commands to NSS Firmware + */ +struct nss_wifi_cmd_msg { + uint32_t cmd; /**< Type of command message. */ + uint32_t value; /**< Value of the command. */ +}; + +/** + * nss_wifi_msg + * Data for sending and receiving Wi-Fi messages. + */ +struct nss_wifi_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Message Payload. + */ + union { + struct nss_wifi_init_msg initmsg; + /**< Wi-Fi Radio initialization message. */ + struct nss_wifi_stop_msg stopmsg; + /**< Wi-Fi Radio stop message. */ + struct nss_wifi_reset_msg resetmsg; + /**< Wi-Fi Radio reset message. */ + struct nss_wifi_htt_init_msg httinitmsg; + /**< HTT initialization message. */ + struct nss_wifi_tx_init_msg pdevtxinitmsg; + /**< Tx initialization message. */ + struct nss_wifi_rawsend_msg rawmsg; + /**< Wi-Fi raw data send message. */ + struct nss_wifi_mgmtsend_msg mgmtmsg; + /**< Wi-Fi management data send message. */ + struct nss_wifi_wds_peer_msg pdevwdspeermsg; + /**< WDS peer-specific message. */ + struct nss_wifi_stats_sync_msg statsmsg; + /**< Synchronization statistics. */ + struct nss_wifi_peer_freelist_append_msg peer_freelist_append; + /**< Message for creating/appending peer freelist memory. */ + + /** + * Message for creating/appending a reorder array for Wi-Fi Receive Defragmentation. + */ + struct nss_wifi_rx_reorder_array_freelist_append_msg rx_reorder_array_freelist_append; + + struct nss_wifi_fw_stats_msg fwstatsmsg; + /**< Wi-Fi firmware statistics information message. */ + struct nss_wifi_monitor_set_filter_msg monitor_filter_msg; + /**< Set the filter message for Monitor mode. */ + struct nss_wifi_bs_peer_activity peer_activity; + /**< Message to get the active peer for a radio. */ + struct nss_wifi_msdu_ttl_set_msg msdu_ttl_set_msg; + /**< Set MSDU time-to-live. */ + struct nss_wifi_rx_vow_extstats_set_msg vow_extstats_msg; + /**< Enable VoW extended statistics message. */ + struct nss_wifi_pktlog_cfg_msg pcm_msg; + /**< Packet log configuration message. */ + struct nss_wifi_enable_perpkt_txstats_msg ept_msg; + /**< Enable or disable per-packet Tx statistics. */ + struct nss_wifi_igmp_mld_override_tos_msg wigmpmldtm_msg; + /**< Message to enable TID override for IGMP/MLD. */ + struct nss_wifi_ol_stats_cfg_msg scm_msg; + /**< Enable or disable offload statistics configuration. */ + struct nss_wifi_ol_stats_msg ol_stats_msg; + /**< Offload statistics. */ + struct nss_wifi_tx_queue_cfg_msg wtxqcm; + /**< Tx queue configuration. */ + + /** + * Minimum threshold configuration data for the Tx queue. + */ + struct nss_wifi_tx_min_threshold_cfg_msg wtx_min_threshold_cm; + + struct nss_wifi_dbdc_process_enable_msg dbdcpe_msg; + /**< Enable or disable the DBDC repeater process. */ + struct nss_wifi_primary_radio_set_msg wprs_msg; + /**< Set the current radio as the primary radio. */ + struct nss_wifi_force_client_mcast_traffic_set_msg wfcmts_msg; + /**< Message to force multicast traffic for a radio. */ + struct nss_wifi_store_other_pdev_stavap_msg wsops_msg; + /**< Message to store the other radio's station vap. */ + struct nss_wifi_sta_kickout_msg sta_kickout_msg; + /**< Station kickout message from NSS firmware. */ + struct nss_wifi_wnm_peer_rx_activity_msg wprm; + /**< Rx activity for the peer. */ + struct nss_wifi_peer_stats_msg peer_stats_msg; + /**< Peer statistics message. */ + struct nss_wifi_wds_extn_peer_cfg_msg wpeercfg; + /**< Configuartion information message when the WDS extension is enabled. */ + struct nss_wifi_tx_capture_msg tx_capture_msg; + /**< Enable or disable Tx data capture. */ + struct nss_wifi_always_primary_set_msg waps_msg; + /**< Message to always set the current radio as primary radio. */ + struct nss_wifi_cmd_msg wcmdm; + /**< Pdev command information. */ + struct nss_wifi_enable_ol_statsv2 wesh_msg; + /**< Enable version 2 tx/rx stats. */ + struct nss_wifi_ol_peer_time_msg wopt_msg; + /**< Send per peer/TID timestamp statistics to host. */ + struct nss_wifi_peer_isolation_msg isolation_msg; + /**< Enable or disable peer isolation. */ + } msg; /**< Message Payload. */ +}; + +/** + * nss_wifi_get_context + * Gets the Wi-Fi context used in nss_gre_tx. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_wifi_get_context(void); + +/** + * nss_wifi_tx_msg + * Sends a Wi-Fi message to the NSS firmware. + * + * @datatypes + * nss_ctx_instance \n + * nss_wifi_if_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +extern nss_tx_status_t nss_wifi_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_wifi_msg *msg); + +/** + * Callback function for receiving Wi-Fi messages. + * + * @datatypes + * nss_wifi_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_wifi_msg_callback_t)(void *app_data, struct nss_wifi_msg *msg); + +/** + * Callback function for receiving Wi-Fi data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_wifi_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_register_wifi_if + * Registers the Wi-Fi interface with the NSS for sending and receiving messages. + * + * @datatypes + * nss_wifi_callback_t \n + * nss_wifi_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] wifi_callback Callback for the data. + * @param[in] wifi_ext_callback Callback for the extended data. + * @param[in] event_callback Callback for the message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this interface. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_register_wifi_if(uint32_t if_num, nss_wifi_callback_t wifi_callback, + nss_wifi_callback_t wifi_ext_callback, nss_wifi_msg_callback_t event_callback, struct net_device *netdev, uint32_t features); + +/** + * nss_unregister_wifi_if + * Deregisters the Wi-Fi interface from the NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + */ +void nss_unregister_wifi_if(uint32_t if_num); + +/** + * @} + */ + +#endif /* __NSS_WIFI_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_ext_vdev_if.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_ext_vdev_if.h new file mode 100644 index 000000000..df48a5848 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_ext_vdev_if.h @@ -0,0 +1,297 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_wifi_ext_vdev_if.h + * NSS Wi-Fi extended virtual device interface definitions. + */ + +#ifndef __NSS_WIFI_EXT_VDEV_IF_H +#define __NSS_WIFI_EXT_VDEV_IF_H + +#define NSS_WIFI_EXT_VDEV_MAX 16 + +/* + * nss_wifi_ext_vdev_msg_types + * WiFi extension virtual device mesage types. + */ +enum nss_wifi_ext_vdev_msg_types { + NSS_WIFI_EXT_VDEV_MSG_CONFIGURE_IF = NSS_IF_MAX_MSG_TYPES + 1, + NSS_WIFI_EXT_VDEV_MSG_CONFIGURE_WDS, + NSS_WIFI_EXT_VDEV_SET_NEXT_HOP, + NSS_WIFI_EXT_VDEV_MSG_STATS_SYNC, + NSS_WIFI_EXT_VDEV_MSG_CONFIGURE_VLAN, + NSS_WIFI_EXT_VDEV_MSG_MAX +}; + +/** + * nss_wifi_ext_vdev_error_tyes + * WiFi extension error types. + */ +enum nss_wifi_ext_vdev_error_types { + NSS_WIFI_EXT_VDEV_ERROR_NONE = NSS_IF_ERROR_TYPE_MAX + 1, + /** Configuration successful. */ + NSS_WIFI_EXT_VDEV_ERROR_NULL_MAC, /**< NULL MAC received. */ + NSS_WIFI_EXT_VDEV_ERROR_INV_RADIO_ID, /**< Invalid radio interface number. */ + NSS_WIFI_EXT_VDEV_ERROR_INV_PVAP_ID, /**< Invalid parent virtual device interface number. */ + NSS_WIFI_EXT_VDEV_ERROR_RADIO_NOT_PRESENT, /**< Radio node is not present. */ + NSS_WIFI_EXT_VDEV_ERROR_INV_IF, /**< Message sent on invalid interface number. */ + NSS_WIFI_EXT_VDEV_ERROR_INV_VLAN_ID, /**< Invalid VLAN ID. */ + NSS_WIFI_EXT_VDEV_ERROR_INV_CMD, /**< Invalid command. */ + NSS_WIFI_EXT_VDEV_ERROR_PEERID_ALREADY_CONFIGURED, + /**< Peer ID is already configured. */ + NSS_WIFI_EXT_VDEV_ERROR_MAX /**< Maxiumum error types. */ +}; + +/** + * nss_wifi_ext_vdev_wds_msg + * Extended WDS configuration message. + */ +struct nss_wifi_ext_vdev_wds_msg { + uint16_t wds_peer_id; /**< WDS station peer ID. */ + uint16_t mac_addr[3]; /**< Remote MAC address. */ +}; + +/** + * nss_wifi_ext_vdev_stats + * Statistics message structure. + */ +struct nss_wifi_ext_vdev_stats { + struct nss_cmn_node_stats node_stats; /**< Ethernet node statistics. */ + uint32_t mc_count; /**< Number of mulitcast counts. */ + uint32_t nxt_hop_drp; /**< Next hop drop. */ +}; + +/** + * nss_wifi_vdev_config_msg + * NSS Wi-Fi virtual device configuration message. + */ +struct nss_wifi_ext_vdev_configure_if_msg { + uint8_t mac_addr[ETH_ALEN]; /**< MAC address. */ + uint16_t radio_ifnum; /**< Radio interface corresponding to virtual AP. */ + uint16_t pvap_ifnum; /**< Parent virtual device interface number. */ +}; + +/** + * nss_wifi_ext_vdev_set_next_hop_msg + * Message to set the next hop. + */ +struct nss_wifi_ext_vdev_set_next_hop_msg { + uint32_t if_num; /**< Interface number. */ +}; + +/** + * nss_wifi_ext_vdev_vlan_msg + * Extended VLAN configuration message. + */ +struct nss_wifi_ext_vdev_vlan_msg { + uint16_t vlan_id; /**< VLAN ID. */ +}; + +/** + * nss_wifi_ext_vdev_msg + * Message structure to Send/Receive commands. + */ +struct nss_wifi_ext_vdev_msg { + struct nss_cmn_msg cm; /**< Cnode message. */ + union { + union nss_if_msgs if_msg; /**< NSS interface base message. */ + struct nss_wifi_ext_vdev_configure_if_msg cmsg; /**< Interface configuration message. */ + struct nss_wifi_ext_vdev_wds_msg wmsg; /**< WDS configure message. */ + struct nss_wifi_ext_vdev_set_next_hop_msg wnhm; /**< Next hop set message. */ + struct nss_wifi_ext_vdev_stats stats; /**< Statistics messasge. */ + struct nss_wifi_ext_vdev_vlan_msg vmsg; /**< VLAN message. */ + } msg; +}; + +/** + * Callback function for receiving Wi-Fi extended virtual device data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_wifi_ext_vdev_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving Wi-Fi extended virtual device messages. + * + * @datatypes + * nss_wifi_ext_vdev_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] wevm Pointer to the message data. + */ +typedef void (*nss_wifi_ext_vdev_msg_callback_t)(void *app_data, struct nss_cmn_msg *ncm); + +/** + * Callback function for receiving extended data from the Wi-Fi extended virtual device interface. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + * @param[in] netdev Pointer to the associated network device. + */ +typedef void (*nss_wifi_ext_vdev_ext_data_callback_t)(struct net_device *netdev, + struct sk_buff *skb, struct napi_struct *napi); +/** + * nss_wifi_ext_vdev_msg_init + * Initializes a Wi-Fi extended virtual device message. + * + * @datatypes + * nss_wifi_vdev_msg \n + * nss_wifi_vdev_msg_callback_t + * + * @param[in] nim Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Length of message. + * @param[in] cb Message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +void nss_wifi_ext_vdev_msg_init(struct nss_wifi_ext_vdev_msg *nim, uint32_t if_num, uint32_t type, uint32_t len, + nss_wifi_ext_vdev_msg_callback_t cb, void *app_data); + +/** + * nss_wifi_ext_vdev_unregister_if + * Deregisters a Wi-Fi extended virtual interface from the NSS. + * + * @param[in] if_num Wi-Fi extended virtual interface number. + * + * @return + * None. + * + * @dependencies + * The Wi-Fi extended virtual interface must have been previously registered. + * + * @return + * True if successful, else false. + */ +extern bool nss_wifi_ext_vdev_unregister_if(uint32_t if_num); + +/** + * nss_wifi_ext_vdev_tx_buf + * Sends data buffers to NSS firmware asynchronously. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] os_buf Pointer to the OS buffer (e.g. skbuff). + * @param[in] if_num Wi-Fi extended virtual interface number. + * + * @return + * Status of the transmit operation. + */ +extern nss_tx_status_t nss_wifi_ext_vdev_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, + uint32_t if_num); + +/** + * nss_wifi_ext_vdev_tx_msg + * Sends Wi-Fi extended virtual interface messages. + * + * @datatypes + * nss_ctx_instance \n + * nss_wifi_ext_vdev_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] wevm Pointer to the message data. + * + * @return + * Status of the transmit operation. + */ +extern nss_tx_status_t nss_wifi_ext_vdev_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_wifi_ext_vdev_msg *wevm); + +/** + * nss_wifi_ext_vdev_tx_msg_sync + * Sends messages to NSS firmware synchronously. + * + * @datatypes + * nss_ctx_instance \n + * nss_wifi_ext_vdev_msg + * + * @param[in] nss_ctx NSS core context. + * @param[in] nwevm Pointer to Wi-Fi extended virtual interface message data. + * + * @return + * Status of the transmit operation. + */ +extern nss_tx_status_t nss_wifi_ext_vdev_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_wifi_ext_vdev_msg *nwevm); + +/** + * nss_wifi_ext_vdev_set_next_hop + * Sets the extended virtual interface next hop. + * + * @datatypes + * nss_ctx_instance \n + * + * @param[in] ctx NSS core context. + * @param[in] if_num NSS interface number. + * @param[in] next_hop Next hop interface number. + */ +extern nss_tx_status_t nss_wifi_ext_vdev_set_next_hop(struct nss_ctx_instance *ctx, int if_num, int next_hop); + +/** + * nss_wifi_ext_vdev_get_ctx + * Gets the NSS Wi-Fi extended virtual interface context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_wifi_ext_vdev_get_ctx(void); + +/** + * nss_wifi_ext_vdev_register_if + * Registers Wi-Fi extended virtual interface with NSS. + * + * @datatypes + * net_device \n + * nss_wifi_ext_vdev_data_callback_t \n + * nss_wifi_ext_vdev_ext_data_callback_t \n + * nss_wifi_ext_vdev_msg_callback_t + * + * @param[in] if_num NSS interface number. + * @param[in] cb_func_data Callback for the data. + * @param[in] cb_func_ext Callback for the message. + * @param[in] cb_func_msg Callback for the event message. + * @param[in] features Data socket buffer types supported by this interface. + * @param[in] netdev Pointer to the associated network device. + * @param[in] app_ctx Pointer to the application context. + * + * @return + * NSS interface number allocated. + */ +extern struct nss_ctx_instance *nss_wifi_ext_vdev_register_if(uint32_t if_num, + nss_wifi_ext_vdev_data_callback_t cb_func_data, nss_wifi_ext_vdev_ext_data_callback_t cb_func_ext, + nss_wifi_ext_vdev_msg_callback_t cb_func_msg, struct net_device *netdev, uint32_t features, + void *app_ctx); +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_mac_db_if.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_mac_db_if.h new file mode 100644 index 000000000..05b942553 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_mac_db_if.h @@ -0,0 +1,277 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + + /** + * @file nss_wifi_mac_db_if.h + * NSS-to-HLOS interface definitions. + */ +#ifndef __NSS_WIFI_MAC_DB_H +#define __NSS_WIFI_MAC_DB_H + +#define NSS_WIFI_MAC_DB_ENTRY_IF_LOCAL 0x1 + +/* + * MAX Wi-Fi MAC database entries sent in group + * is chosen considering the entry size and + * maximum entries a smallest buffer could accomodate. + */ +#define NSS_WIFI_MAC_DB_GROUP_ENTRIES_MAX 48 + +/** + * nss_wifi_mac_db_msg_types + * Wi-Fi MAC database messages. + */ +enum nss_wifi_mac_db_msg_types { + NSS_WIFI_MAC_DB_INIT_MSG, /**< Wi-Fi MAC database initialization message. */ + NSS_WIFI_MAC_DB_ADD_ENTRY_MSG, /**< Wi-Fi MAC database add entry message. */ + NSS_WIFI_MAC_DB_DEL_ENTRY_MSG, /**< Wi-Fi MAC database delete entry message. */ + NSS_WIFI_MAC_DB_UPDATE_ENTRY_MSG, /**< Wi-Fi MAC database update entry message. */ + NSS_WIFI_MAC_DB_DEINIT_MSG, /**< Wi-Fi MAC database deinitialization message. */ + NSS_WIFI_MAC_DB_GROUP_ENTRIES_ADD_MSG, /**< Wi-Fi MAC database group entries add message. */ + NSS_WIFI_MAC_DB_ENTRY_ACTIVITY_MSG, /**< Wi-Fi MAC database entry activity message. */ + NSS_WIFI_MAC_DB_CREATE_ENTRY_MSG, /**< Wi-Fi MAC database entry create message. */ + NSS_WIFI_MAC_DB_MAX_MSG +}; + +/** + * nss_wifi_mac_db_iftype + * Wi-Fi MAC database interface type. + */ +enum nss_wifi_mac_db_iftype { + NSS_WIFI_MAC_DB_ENTRY_IFTYPE_NONE, + NSS_WIFI_MAC_DB_ENTRY_IFTYPE_VAP, /**< Wi-Fi MAC database VAP entry interface. */ + NSS_WIFI_MAC_DB_ENTRY_IFTYPE_NON_VAP, /**< Wi-Fi MAC database non-VAP entry interface. */ + NSS_WIFI_MAC_DB_ENTRY_IFTYPE_MAX /**< Wi-Fi MAC database maximum interface. */ +}; + +/** + * nss_wifi_mac_db_if_opmode + * Wi-Fi MAC database interface operation mode. + */ +enum nss_wifi_mac_db_if_opmode { + NSS_WIFI_MAC_DB_ENTRY_IF_OPMODE_NONE, /**< No entry database interface operation mode. */ + NSS_WIFI_MAC_DB_ENTRY_IF_OPMODE_ETH, /**< Ethernet entry database interface operation mode. */ + NSS_WIFI_MAC_DB_ENTRY_IF_OPMODE_WIFI_AP, /**< Wi-Fi AP entry database interface operation mode. */ + NSS_WIFI_MAC_DB_ENTRY_IF_OPMODE_WIFI_STA, /**< Wi-Fi station entry database interface operation mode. */ + NSS_WIFI_MAC_DB_ENTRY_IF_OPMODE_MAX /**< Maximum entry database interface operation mode. */ +}; + +/** + * Wi-Fi MAC database errors. + */ +enum nss_wifi_mac_db_err_types { + NSS_WIFI_MAC_DB_ERROR_NONE, + /**< Wi-Fi MAC database error none. */ + NSS_WIFI_MAC_DB_ERROR_ENTRY_ALLOC_FAIL, + /**< Error used to report a Wi-Fi MAC database entry pool allocation failure. */ + NSS_WIFI_MAC_DB_ERROR_MAC_EXISTS, + /**< Error used to report that a Wi-Fi MAC database entry already exists. */ + NSS_WIFI_MAC_DB_ERROR_MAC_TABLE_FULL, + /**< Error used to report that a Wi-Fi MAC table is full. */ + NSS_WIFI_MAC_DB_ERROR_MAC_ENTRY_ALLOC_FAILED, + /**< Error used to report a Wi-Fi MAC database entry allocation failure. */ + NSS_WIFI_MAC_DB_ERROR_ENTRY_NOT_FOUND, + /**< Error used to report that a Wi-Fi MAC database entry is not present. */ + NSS_WIFI_MAC_DB_ERROR_MAC_ENTRY_UNHASHED, + /**< Error used to report that a Wi-Fi MAC database entry is unhashed. */ + NSS_WIFI_MAC_DB_ERROR_MAC_ENTRY_DELETE_FAILED, + /**< Error used to report a Wi-Fi MAC database entry delete failure. */ + NSS_WIFI_MAC_DB_ERROR_INVALID_NUM_ENTRIES_FAIL, + /**< Error used to report the number of invalid Wi-Fi MAC database entries. */ + NSS_WIFI_MAC_DB_ERROR_NOT_ALLOCATED_FAIL, + /**< Error used to report that a Wi-Fi MAC database is not allocated. */ + NSS_WIFI_MAC_DB_ERROR_INV_IF_RECVD_FAIL, + /**< Error used to report that a Wi-Fi MAC database entry interface is invalid. */ + NSS_WIFI_MAC_DB_ERROR_INVALID_EVENT, + /**< Error used to report that a Wi-Fi MAC database event is invalid. */ + NSS_WIFI_MAC_DB_ERROR_PN_INVALID, + /**< Error used to report that a Wi-Fi MAC database entry pnode is invalid. */ + NSS_WIFI_MAC_DB_ERROR_PHY_PN_INVALID, + /**< Error used to report that a Wi-Fi MAC database entry radio pnode is invalid. */ + NSS_WIFI_MAC_DB_ERROR_ENTRY_POOL_INVALID, + /**< Error used to report that a Wi-Fi MAC database entry pool is invalid. */ + NSS_WIFI_MAC_DB_ERROR_ENTRY_POOL_ALREADY_ALLOCATED, + /**< Error used to report that a Wi-Fi MAC database entry pool exists. */ + NSS_WIFI_MAC_DB_ERROR_GROUP_ENTRY_ADD_FAIL, + /**< Error used to report that a Wi-Fi MAC database group entry add failure. */ + NSS_WIFI_MAC_DB_ERROR_MAX, + /**< Wi-Fi MAC database error maximum. */ +}; + +/** + * nss_wifi_mac_db_entry_create_msg + * Wi-Fi MAC database entry create message. + */ +struct nss_wifi_mac_db_entry_create_msg { + uint8_t mac_addr[ETH_ALEN]; /**< MAC address. */ + uint16_t reserved; /**< Reserved bytes. */ + int32_t nss_if; /**< NSS interface number. */ +}; + +/** + * nss_wifi_mac_db_entry_activity_info + * Wi-Fi MAC database entry activity information. + */ +struct nss_wifi_mac_db_entry_activity_info { + uint8_t mac_addr[ETH_ALEN]; /**< MAC address. */ + uint16_t reserved; /**< Reserved bytes. */ + int32_t nss_if; /**< NSS interface number. */ +}; + +/** + * nss_wifi_mac_db_entry_activity_info_msg + * Wi-Fi MAC database entry activity information message. + */ +struct nss_wifi_mac_db_entry_activity_info_msg { + uint32_t nentries; /**< Number of entries. */ + struct nss_wifi_mac_db_entry_activity_info info[1]; + /**< Wi-Fi MAC database entry activity information. */ +}; + +/** + * nss_wifi_mac_db_entry_info_msg + * Wi-Fi MAC database entry information. + */ +struct nss_wifi_mac_db_entry_info_msg { + uint8_t mac_addr[ETH_ALEN]; /**< MAC address. */ + uint16_t flag; /**< Flag information about NSS interface. */ + int32_t nss_if; /**< NSS interface number. */ + uint32_t iftype; /**< NSS interface type. */ + uint32_t opmode; /**< NSS interface operation mode. */ + uint32_t wiphy_ifnum; /**< NSS interface for wireless physical device. */ +}; + +/** + * nss_wifi_mac_db_entry_group_info_msg + * Wi-Fi MAC database group of entries information. + */ +struct nss_wifi_mac_db_entry_group_info_msg { + uint32_t num_entries; + /**< Number of entries in group information message. */ + struct nss_wifi_mac_db_entry_info_msg entry[NSS_WIFI_MAC_DB_GROUP_ENTRIES_MAX]; + /**< Wi-Fi MAC database information specific message. */ +}; + +/** + * nss_wifi_mac_db_msg + * Structure that describes Wi-Fi MAC database messages. + */ +struct nss_wifi_mac_db_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of Wi-Fi MAC database message. + */ + union { + struct nss_wifi_mac_db_entry_info_msg nmfdbeimsg; + /**< Wi-Fi MAC database information specific message. */ + struct nss_wifi_mac_db_entry_group_info_msg nmfdbegimsg; + /**< Wi-Fi MAC database information specific message. */ + struct nss_wifi_mac_db_entry_activity_info_msg nmfdbeact_imsg; + /**< Wi-Fi MAC database entry activity information message. */ + struct nss_wifi_mac_db_entry_create_msg nmfdbecmsg; + /**< Wi-Fi MAC database entry create message. */ + } msg; /**< Message payload. */ +}; + +/** + * nss_wifi_mac_db_msg_callback_t + * Callback to receive Wi-Fi MAC database messages. + * + * @datatypes + * nss_wifi_mac_db_msg + * + * @param[in] app_data Application context of the message. + * @param[in] msg Message data. + * + * @return + * void + */ +typedef void (*nss_wifi_mac_db_msg_callback_t)(void *app_data, struct nss_wifi_mac_db_msg *msg); + +/** + * nss_wifi_mac_db_callback_t + * Callback to receive Wi-Fi MAC database messages. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + * + * @return + * void + */ +typedef void (*nss_wifi_mac_db_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + + +/** + * nss_wifi_mac_db_tx_msg + * Send Wi-Fi MAC database messages. + * + * @datatypes + * nss_ctx_instance \n + * nss_wifi_mac_db_msg + * + * @param[in] nss_ctx NSS context. + * @param[in] msg NSS Wi-Fi MAC database message. + * + * @return + * nss_tx_status_t Tx status + */ +extern nss_tx_status_t nss_wifi_mac_db_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_wifi_mac_db_msg *msg); + +/** + * nss_register_wifi_mac_db_if + * Register to send/receive Wi-Fi MAC database messages to NSS. + * + * @datatypes + * nss_wifi_mac_db_callback_t \n + * nss_wifi_mac_db_msg_callback_t \n + * net_device + * + * @param[in] if_num NSS interface number. + * @param[in] mfdb_callback Callback for the Wi-Fi MAC database device data. + * @param[in] mfdb_ext_callback Callback for the extended data. + * @param[in] event_callback Callback for the message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this + * interface. + * + * @return + * nss_ctx_instance* NSS context + */ +struct nss_ctx_instance *nss_register_wifi_mac_db_if(uint32_t if_num, nss_wifi_mac_db_callback_t wifi_mac_db_callback, + nss_wifi_mac_db_callback_t wifi_mac_db_ext_callback, nss_wifi_mac_db_msg_callback_t event_callback, struct net_device *netdev, uint32_t features); + +/** + * nss_unregister_wifi_mac_db_if + * Deregister Wi-Fi MAC database SoC interface with NSS. + * + * @param[in] if_num NSS interface number. + * + * @return + * void + */ +void nss_unregister_wifi_mac_db_if(uint32_t if_num); +struct nss_ctx_instance *nss_wifi_mac_db_get_context(void); +#endif /* __NSS_WIFI_MAC_DB_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_mesh.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_mesh.h new file mode 100644 index 000000000..986170a43 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_mesh.h @@ -0,0 +1,1000 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_wifi_mesh.h + * NSS TO HLOS Wi-Fi mesh device interface definitions. + */ + +#ifndef __NSS_WIFI_MESH_H +#define __NSS_WIFI_MESH_H + +/** + * @addtogroup nss_wifi_mesh_subsystem + * @{ + */ +#define NSS_WIFI_MESH_WIFI_HDRLEN_MAX 48 +/* + * Wi-Fi mesh maximum dynamic interface. + */ +#define NSS_WIFI_MESH_MAX_DYNAMIC_INTERFACE 32 + +/** + * Mesh path update flags. + */ +#define NSS_WIFI_MESH_PATH_UPDATE_FLAG_NEXTHOP 0x01 +#define NSS_WIFI_MESH_PATH_UPDATE_FLAG_HOPCOUNT 0x02 +#define NSS_WIFI_MESH_PATH_UPDATE_FLAG_METRIC 0x04 +#define NSS_WIFI_MESH_PATH_UPDATE_FLAG_MESH_FLAGS 0x08 +#define NSS_WIFI_MESH_PATH_UPDATE_FLAG_EXPIRY_TIME 0x10 +#define NSS_WIFI_MESH_PATH_UPDATE_FLAG_MESH_GATE 0x20 +#define NSS_WIFI_MESH_PATH_UPDATE_FLAG_BLOCK_MESH_FWD 0x40 +#define NSS_WIFI_MESH_PATH_UPDATE_FLAG_METADATA_ENABLE_VALID 0x80 + +/** + * Mesh proxy path update flags. + */ +#define NSS_WIFI_MESH_PROXY_PATH_UPDATE_FLAG_MDA 0x1 +#define NSS_WIFI_MESH_PROXY_PATH_UPDATE_FLAG_MESH 0x2 + +/** + * Mesh path update flags. + */ +#define NSS_WIFI_MESH_PATH_FLAG_REFRESH_SENT 0x1 + +/** + * Mesh path maximum entries. + */ +#define NSS_WIFI_MESH_PATH_MAX_ENTRIES 10 + +/** + * Mesh proxy path maximum entries. + */ +#define NSS_WIFI_MESH_PROXY_PATH_MAX_ENTRIES 10 + +/** + * Mesh configuration flags. + */ +#define NSS_WIFI_MESH_CONFIG_FLAG_TTL_VALID 0x01 +#define NSS_WIFI_MESH_CONFIG_FLAG_MPATH_REFRESH_VALID 0x02 +#define NSS_WIFI_MESH_CONFIG_FLAG_BLOCK_MESH_FORWARDING_VALID 0x04 +#define NSS_WIFI_MESH_CONFIG_FLAG_LOCAL_MAC_VALID 0x08 +#define NSS_WIFI_MESH_CONFIG_FLAG_MPP_LEARNING_MODE_VALID 0x10 +#define NSS_WIFI_MESH_CONFIG_FLAG_SIBLING_IF_NUM_VALID 0x20 +#define NSS_WIFI_MESH_CONFIG_FLAG_BLOCK_MESH_FWD_VALID 0x40 +#define NSS_WIFI_MESH_CONFIG_FLAG_METADATA_ENABLE_VALID 0x80 + +/** + * nss_wifi_mesh_path_flags + * Wi-Fi mesh path flags. + */ +#define NSS_WIFI_MESH_PATH_FLAG_ACTIVE 0x01 +#define NSS_WIFI_MESH_PATH_FLAG_RESOLVING 0x02 +#define NSS_WIFI_MESH_PATH_FLAG_RESOLVED 0x04 +#define NSS_WIFI_MESH_PATH_FLAG_FIXED 0x08 + +#define NSS_WIFI_MESH_ENCAP_METADATA_OFFSET_TYPE 4 + +/* + * nss_wifi_mesh_pre_header_type { + * Wi-Fi pre header types. + */ +enum nss_wifi_mesh_pre_header_type { + NSS_WIFI_MESH_PRE_HEADER_NONE = 0xdcb, /**< No preheader. */ + NSS_WIFI_MESH_PRE_HEADER_80211 = 0xabc, /**< 802.11 preheader. */ + NSS_WIFI_MESH_PRE_HEADER_MAX = 0xdea /**< Max preheader. */ +}; + +/* + * nss_wifi_mesh_extended_data_pkt_types + * Wi-Fi mesh extended data pkt types. + */ +enum nss_wifi_mesh_extended_data_pkt_types { + WIFI_MESH_EXT_DATA_PKT_TYPE_NONE, /**< No packet type. */ + WIFI_MESH_EXT_DATA_PKT_TYPE_EXCEPTION /**< Exception packet type. */ +}; + +/* + * nss_wifi_mesh_ieee80211_hdr + * Wi-Fi header + */ +struct nss_wifi_mesh_ieee80211_hdr { + uint16_t frame_ctl; /* Frame control. */ + uint16_t duration_id; /* Duration ID. */ + uint8_t addr1[ETH_ALEN]; /* Address 1. */ + uint8_t addr2[ETH_ALEN]; /* Address 2. */ + uint8_t addr3[ETH_ALEN]; /* Address 3. */ + uint16_t seq_ctrl; /* Sequence control. */ + uint8_t addr4[ETH_ALEN]; /* Address 4. */ +}__packed; + +/* + * nss_wifi_mesh_ieee80211s_hdr + * Wi-Fi mesh header + */ +struct nss_wifi_mesh_ieee80211s_hdr { + uint8_t flags; /* Mesh flags. */ + uint8_t ttl; /* TTL. */ + uint32_t seq_num; /* Sequence number. */ + uint8_t eaddr1[ETH_ALEN]; /* Mesh Address1. */ + uint8_t eaddr2[ETH_ALEN]; /* Mesh Address2. */ +}__packed; + +/* + * nss_wifi_mesh_per_packet_metadata + * Wi-Fi mesh per packet metadata structure. + */ +struct nss_wifi_mesh_per_packet_metadata { + uint16_t pkt_type; /* Packet type of the metadata. */ + uint8_t wifi_hdr_bytes[NSS_WIFI_MESH_WIFI_HDRLEN_MAX]; /* Wi-Fi header byte stream. */ +}; + +/** + * nss_wifi_mesh_dp_type + * Interface datapath types. + * NSS-to-host path will be seen by ECM for rules. + */ +enum nss_wifi_mesh_dp_type { + NSS_WIFI_MESH_DP_INNER, /**< Inner/Encapsulation Interface. */ + NSS_WIFI_MESH_DP_OUTER, /**< Outer/Decapsulation Interface. */ +}; + +/** + * nss_wifi_mesh_msg_types + * Wi-Fi mesh messages. + */ +enum nss_wifi_mesh_msg_types { + NSS_WIFI_MESH_MSG_INTERFACE_CONFIGURE = NSS_IF_MAX_MSG_TYPES + 1, /**< Wi-Fi mesh interface configure message. */ + NSS_WIFI_MESH_MSG_MPATH_ADD, /**< Wi-Fi mesh path add message. */ + NSS_WIFI_MESH_MSG_MPATH_DELETE, /**< Wi-Fi mesh path delete message. */ + NSS_WIFI_MESH_MSG_MPATH_UPDATE, /**< Wi-Fi mesh path update. */ + NSS_WIFI_MESH_MSG_PROXY_PATH_LEARN, /**< Wi-Fi mesh proxy path learn. */ + NSS_WIFI_MESH_MSG_PROXY_PATH_ADD, /**< Wi-Fi mesh proxy path add. */ + NSS_WIFI_MESH_MSG_PROXY_PATH_DELETE, /**< Wi-Fi mesh proxy path delete. */ + NSS_WIFI_MESH_MSG_PROXY_PATH_UPDATE, /**< Wi-Fi mesh proxy path update. */ + NSS_WIFI_MESH_MSG_PATH_NOT_FOUND, /**< Wi-Fi mesh path not found message. */ + NSS_WIFI_MESH_MSG_PATH_REFRESH, /**< Wi-Fi mesh path refresh message. */ + NSS_WIFI_MESH_MSG_PATH_EXPIRY, /**< Wi-Fi mesh path expiry message. */ + NSS_WIFI_MESH_MSG_PATH_TABLE_DUMP, /**< Wi-Fi mesh path table dump. */ + NSS_WIFI_MESH_MSG_PROXY_PATH_TABLE_DUMP, /**< Wi-Fi mesh proxy path table dump. */ + NSS_WIFI_MESH_MSG_STATS_SYNC, /**< Wi-Fi mesh statistics sync messgae. */ + NSS_WIFI_MESH_MSG_EXCEPTION_FLAG, /**< Wi-Fi mesh exception Flag. */ + NSS_WIFI_MESH_CONFIG_EXCEPTION, /**< Wi-Fi mesh configuration exception. */ + NSS_WIFI_MESH_MSG_MAX /**< Wi-Fi mesh maximum message. */ +}; + +/* + * nss_wifi_mesh_error_types + * Wi-Fi mesh error types. + */ +enum nss_wifi_mesh_error_types { + NSS_WIFI_MESH_ERROR_NONE = NSS_IF_ERROR_TYPE_MAX + 1, /**< Wi-Fi mesh no error type. */ + NSS_WIFI_MESH_ERROR_UNKNOWN_MSG, /**< Wi-Fi mesh unknown message error. */ + NSS_WIFI_MESH_ERROR_TTL_CONFIG, /**< Wi-Fi mesh invalid ttl error. */ + NSS_WIFI_MESH_ERROR_REFRESH_TIME_CONFIG, /**< Wi-Fi mesh invalid refresh time. */ + NSS_WIFI_MESH_ERROR_MPP_LEARNING_MODE_CONFIG, /**< Wi-Fi mesh invalid mpp learning mode. */ + NSS_WIFI_MESH_ERROR_PATH_ADD_MAX_RADIO_CNT, /**< Wi-Fi mesh path add error due to maximum radio count. */ + NSS_WIFI_MESH_ERROR_PATH_ADD_INVALID_INTERFACE_NUM, /**< Wi-Fi mesh path invalid interface number. */ + NSS_WIFI_MESH_ERROR_PATH_ADD_INTERFACE_NUM_NOT_FOUND, /**< Wi-Fi mesh path interface number not found. */ + NSS_WIFI_MESH_ERROR_PATH_TABLE_FULL, /**< Wi-Fi mesh path table full error. */ + NSS_WIFI_MESH_ERROR_PATH_ALLOC_FAIL, /**< Wi-Fi mesh path alloc error. */ + NSS_WIFI_MESH_ERROR_PATH_INSERT_FAIL, /**< Wi-Fi mesh path insert fail. */ + NSS_WIFI_MESH_ERROR_PATH_NOT_FOUND, /**< Wi-Fi mesh path not found error. */ + NSS_WIFI_MESH_ERROR_PATH_UNHASHED, /**< Wi-Fi mesh proxy path unhashed error. */ + NSS_WIFI_MESH_ERROR_PATH_DELETE_FAIL, /**< Wi-Fi mesh proxy path delete error. */ + NSS_WIFI_MESH_ERROR_PROXY_PATH_NOT_FOUND, /**< Wi-Fi mesh proxy path not found error. */ + NSS_WIFI_MESH_ERROR_PROXY_PATH_UNHASHED, /**< Wi-Fi mesh proxy path unhashed error. */ + NSS_WIFI_MESH_ERROR_PROXY_PATH_DELETE_FAIL, /**< Wi-Fi mesh proxy path delete error. */ + NSS_WIFI_MESH_ERROR_PROXY_PATH_EXISTS, /**< Wi-Fi mesh proxy path exists error. */ + NSS_WIFI_MESH_ERROR_PROXY_PATH_ALLOC_FAIL, /**< Wi-Fi mesh proxy path alloc error. */ + NSS_WIFI_MESH_ERROR_PROXY_PATH_INSERT_FAIL, /**< Wi-Fi mesh proxy path insert error. */ + NSS_WIFI_MESH_ERROR_PROXY_PATH_TABLE_FULL, /**< Wi-Fi mesh proxy path table full error. */ + NSS_WIFI_MESH_ERROR_PB_ALLOC_FAIL, /**< Wi-Fi mesh pbuf allocation failures. */ + NSS_WIFI_MESH_ERROR_ENQUEUE_TO_HOST_FAIL, /**< Wi-Fi mesh enqueue to host failures. */ + NSS_WIFI_MESH_ERROR_ENABLE_INTERFACE_FAIL, /**< Wi-Fi mesh enabling interface failures. */ + NSS_WIFI_MESH_ERROR_DISABLE_INTERFACE_FAIL, /**< Wi-Fi mesh disabling interface failures. */ + NSS_WIFI_MESH_ERROR_INVALID_EXCEPTION_NUM, /**< Wi-Fi mesh invalid exception number. */ + NSS_WIFI_MESH_ERROR_ONESHOT_ALREADY_ATTACHED, /**< Wi-Fi mesh oneshot already attached error. */ +}; + +/** + * nss_wifi_mesh_mpp_learning_mode + * Mesh device proxy path learning types. + */ +enum nss_wifi_mesh_mpp_learning_mode { + NSS_WIFI_MESH_MPP_LEARNING_MODE_INDEPENDENT_NSS, /**< Independent NSS learning. */ + NSS_WIFI_MESH_MPP_LEARNING_MODE_NSS_ASSISTED_HOST, /**< NSS assisted host learning. */ + NSS_WIFI_MESH_MPP_LEARNING_MODE_MAX /**< Mesh maximum learning type. */ +}; + +/** + * nss_wifi_mesh_configurable_exceptions + * Mesh configurable exceptions. + */ +enum nss_wifi_mesh_configurable_exceptions { + NSS_WIFI_MESH_DS_MESH_PATH_NOT_FOUND = 1, /**< Downstream (Eth - Wi-Fi) mesh path not found exception. */ + NSS_WIFI_MESH_US_MESH_PROXY_NOT_FOUND = 2, /**< Upstream (Wi-Fi - Eth) mesh proxy path not found exception. */ + NSS_WIFI_MESH_US_MESH_PATH_NOT_FOUND = 3, /**< Upstream (Wi-Fi - Eth) mesh path not found exception. */ + NSS_WIFI_MESH_EXCEPTION_MAX = 4 +}; + +/* + * nss_wifi_mesh_encap_ext_data_pkt_type + * Mesh encap extended data packet type. + */ +enum nss_wifi_mesh_encap_ext_data_pkt_type { + NSS_WIFI_MESH_ENCAP_EXT_DATA_PKT_TYPE_NONE, /**< No packet type. */ + NSS_WIFI_MESH_ENCAP_EXT_DATA_PKT_TYPE_MPATH_NOT_FOUND_EXC, /**< Packet when mesh path is not found. */ + NSS_WIFI_MESH_ENCAP_EXT_DATA_PKT_TYPE_MAX, /**< Maximum packet type. */ +}; + +/** + * nss_wifi_mesh_config_msg + * Mesh device configuration. + */ +struct nss_wifi_mesh_config_msg { + uint8_t local_mac_addr[ETH_ALEN]; /**< Local MAC address. */ + uint16_t reserved; /**< Reserved field. */ + uint32_t ttl; /**< TTL for packet. */ + uint32_t mesh_path_refresh_time; /**< Mesh path refresh time. */ + uint32_t config_flags; /**< Flags indicating which fields are valid. */ + uint32_t sibling_ifnum; /**< Sibling interface number. */ + uint8_t mpp_learning_mode; /**< Mesh proxy path learning mode. */ + uint8_t block_mesh_forwarding; /**< If enabled, blocks packet forwarding. */ + uint8_t reserved_2[2]; /**< Reserved bytes. */ + uint32_t metadata_type; /**< Indicates if metadata should be enabled when block_mesh_forwarding is true. */ +}; + +/** + * nss_wifi_mesh_mpath_add_msg + * Add a mesh path message for a mesh device. + */ +struct nss_wifi_mesh_mpath_add_msg { + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t next_hop_mac_addr[ETH_ALEN]; /**< Next hop MAC address. */ + uint32_t metric; /**< Metric for a mesh path. */ + uint32_t link_vap_id; /**< Radio ID of the mesh path. */ + uint32_t expiry_time; /**< Expiry time in order of ms. */ + uint8_t hop_count; /**< Hop count. */ + uint8_t path_flags; /**< Mesh path flags. */ + uint8_t is_mesh_gate; /**< Destination of this path is a mesh gate. */ + uint8_t block_mesh_fwd; /**< Block intra mesh forward. */ + uint32_t metadata_type; /**< Indicates if metadata should be enabled when block_mesh_forwarding is true. */ +}; + +/** + * nss_wifi_mesh_mpath_delete_msg + * Delete a mesh path message for a mesh device. + */ +struct nss_wifi_mesh_mpath_del_msg { + uint32_t link_vap_id; /**< Radio ID of the mesh path. */ + uint8_t mesh_dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t next_hop_mac_addr[ETH_ALEN]; /**< Next hop MAC address. */ +}; + +/** + * nss_wifi_mesh_mpath_update_msg + * Update a mesh path message for a mesh device. + */ +struct nss_wifi_mesh_mpath_update_msg { + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t next_hop_mac_addr[ETH_ALEN]; /**< Next hop MAC address. */ + uint8_t old_next_hop_mac_addr[ETH_ALEN];/**< Old next hop MAC address. */ + uint32_t metric; /**< Metric for a mesh path. */ + uint32_t link_vap_id; /**< Radio ID of the mesh path. */ + uint32_t expiry_time; /**< Expiration time of mesh path. */ + uint8_t hop_count; /**< Hop count. */ + uint8_t path_flags; /**< Mesh path flags. */ + uint8_t is_mesh_gate; /**< Indicates if the mesh path is a mesh gate. */ + uint8_t update_flags; /**< Update flags. */ + uint8_t block_mesh_fwd; /**< Block intra mesh forward. */ + uint8_t reserved[3]; /**< Reserved bytes. */ + uint8_t metadata_type; /**< Indicates if metadata should be enabled when block_mesh_forwarding is true. */ +}; + +/** + * nss_wifi_mesh_proxy_path_learn_msg + * Learn a mesh proxy path message for a mesh device. + */ +struct nss_wifi_mesh_proxy_path_learn_msg { + uint8_t mesh_dest_mac[ETH_ALEN]; /**< Mesh destination MAC address. */ + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t path_flags; /**< Mesh path flags. */ + uint8_t is_update; /**< Indicates if the learn is an update. */ +}; + +/** + * nss_wifi_mesh_proxy_path_add_msg + * Add a mesh proxy path message for a mesh device. + */ +struct nss_wifi_mesh_proxy_path_add_msg { + uint8_t mesh_dest_mac[ETH_ALEN]; /**< Mesh destination MAC address. */ + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t path_flags; /**< Mesh path flags. */ +}; + +/** + * nss_wifi_mesh_proxy_path_update_msg + * Update a mesh proxy path message for a mesh device. + */ +struct nss_wifi_mesh_proxy_path_update_msg { + uint8_t mesh_dest_mac[ETH_ALEN]; /**< Mesh destination MAC address. */ + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t path_flags; /**< Mesh path flags. */ + uint8_t bitmap; /**< Bitmap indicating valid fields in the update msg. */ +}; + +/** + * nss_wifi_mesh_proxy_path_del_msg + * Delete a mesh proxy path message for a mesh device. + */ +struct nss_wifi_mesh_proxy_path_del_msg { + uint8_t mesh_dest_mac_addr[ETH_ALEN]; /**< Mesh destination MAC. */ + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ +}; + +/** + * nss_wifi_mesh_mpath_not_found_msg + * Wi-Fi mesh path not found meesage. + */ +struct nss_wifi_mesh_mpath_not_found_msg { + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t transmitter_mac_addr[ETH_ALEN]; /**< Transmitter address. */ + uint32_t link_vap_id; /**< NSS interface number of the link vap if received from WiFi. */ + uint8_t is_mesh_forward_path; /**< Indicates if the message is from a forward path. */ +}; + +/** + * nss_wifi_mesh_path_refresh_msg + * Refresh mesh path message. + */ +struct nss_wifi_mesh_path_refresh_msg { + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t next_hop_mac_addr[ETH_ALEN]; /**< Next hop MAC address. */ + uint32_t link_vap_id; /**< Link VAP of the mesh path. */ + uint8_t path_flags; /**< Mesh path flags. */ +}; + +/** + * nss_wifi_mesh_path_expiry_msg + * Mesh path expiration message. + */ +struct nss_wifi_mesh_path_expiry_msg { + uint8_t mesh_dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t next_hop_mac_addr[ETH_ALEN]; /**< Next hop MAC address. */ + uint32_t link_vap_id; /**< Link VAP of the mesh path. */ + uint8_t path_flags; /**< Mesh path flags. */ +}; + +/* + * nss_wifi_mesh_encap_stats + * Encapsulation statistics. + */ +struct nss_wifi_mesh_encap_stats { + uint32_t expiry_notify_sent; /* Number of times expiry notification sent to host. */ + uint32_t mc_count; /* Number of multicast packets. */ + uint32_t mp_not_found; /* Number of times mesh path is not found. */ + uint32_t mp_active; /* Number of times mesh path is active. */ + uint32_t mpp_not_found; /* Number of times proxy path is not found. */ + uint32_t mpp_found; /* Number of times proxy path is found. */ + uint32_t encap_hdr_fail; /* Number of times encapsulating mesh header failed. */ + uint32_t mp_del_notify_fail; /* Number of times notifying mesh path delete failed. */ + uint32_t link_enqueue; /* Number of packets enqueued to the link VAP. */ + uint32_t link_enq_fail; /* Number of times enqueue to link vap failed. */ + uint32_t ra_lup_fail; /* Number of times receiver address look up is failed. */ + uint32_t dummy_add_count; /* Number of times dummy path is added. */ + uint32_t encap_mp_add_notify_fail; /* Number of times add notification failed. */ + uint32_t dummy_add_fail; /* Number of times dummy addition failed. */ + uint32_t dummy_lup_fail; /* Number of times dummy lookup failed. */ + uint32_t send_to_host_failed; /* Number of packets failed to be sent to host. */ + uint32_t sent_to_host; /* Number of packets sent to host. */ + uint32_t expiry_notify_fail; /* Number of times expiry notification to host failed. */ + uint32_t no_headroom; /* Number of packets dropped because there is no headroom. */ + uint32_t path_refresh_sent; /* Number of times path refresh is sent to host. */ + uint32_t linearise_failed; /* Number of packets dropped because pb_linearise. */ +}; + +/* + * nss_wifi_mesh_decap_stats + * Mesh decapsulation statistics. + */ +struct nss_wifi_mesh_decap_stats { + uint32_t path_refresh_sent; /**< Number of times path refresh is sent to host. */ + uint32_t reserved; /**< Reserved field. */ + uint32_t mc_drop; /**< Number of MC drop counts. */ + uint32_t ttl_0; /**< Number of TTL0 counts. */ + uint32_t mpp_lup_fail; /**< Number of mpp lookup failures. */ + uint32_t decap_hdr_fail; /**< Number of decap HDR failures. */ + uint32_t rx_fwd_fail; /**< Number of receive forward failures. */ + uint32_t rx_fwd_success; /**< Number of receive forward success counts. */ + uint32_t mp_fwd_lookup_fail; /**< Number of mpath forward lookup failures. */ + uint32_t mp_fwd_inactive; /**< Number of mpath forward inactive. */ + uint32_t nxt_mnode_fwd_success; /**< Number of next mnode forward successes. */ + uint32_t nxt_mnode_fwd_fail; /**< Number of next mnode forward failures. */ + uint32_t mpp_add_fail; /**< Number of MPP add failures. */ + uint32_t mpp_add_event2host_fail; /**< Number of MPP add event-to-host failures. */ + uint32_t mpp_upate_fail; /**< Number of MPP update failures. */ + uint32_t mpp_update_even2host_fail; /**< Number of MPP update event-to-host failure counts. */ + uint32_t mpp_learn2host_fail; /**< Number of MPP learn-to-host failure counts. */ + uint32_t block_mesh_fwd_packets; /**< Number of packets that are blocked for intra mesh forward. */ + uint32_t no_headroon; /**< Number of packets dropped due to insufficient headroom.. */ + uint32_t linearise_failed; /**< Number of packets dropped due to linear copy failure. */ + uint32_t mpp_learn_events_rate_limited; /**< Number of mesh proxy path learn events dropped due to rate limiting */ + uint32_t mp_missing_events_rate_limited;/**< Number of path missing notifications dropped due to rate limiting */ +}; + +/** + * nss_wifi_mesh_path_dump_entry + * Wi-Fi mesh path dump entry. + */ +struct nss_wifi_mesh_path_dump_entry { + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t next_hop_mac_addr[ETH_ALEN]; /**< Next hop MAC address. */ + uint32_t metric; /**< Mesh path metric. */ + uint32_t expiry_time[2]; /**< Mesh path expiration time. */ + uint8_t hop_count; /**< Number of hop counts. */ + uint8_t flags; /**< Mesh path flags. */ + uint8_t is_mesh_gate; /**< Determines whether gateway capability is enabled. */ + uint8_t reserved[1]; /**< Reserved field. */ + uint32_t link_vap_id; /**< Link interface number. */ +}; + +/** + * nss_wifi_mesh_proxy_path_dump_entry + * Wi-Fi mesh proxy path dump entry. + */ +struct nss_wifi_mesh_proxy_path_dump_entry { + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t mesh_dest_mac[ETH_ALEN]; /**< Mesh destination address. */ + uint8_t flags; /**< Mesh path flags. */ + uint8_t reserved[3]; /**< Reserved field. */ + uint32_t time_diff; /**< Difference of current time and active time. */ +}; + +/** + * nss_wifi_mesh_path_table_dump + * Wi-Fi mesh path table dump. + */ +struct nss_wifi_mesh_path_table_dump { + uint32_t num_entries; /**< Number of entries. */ + uint32_t more_events; /**< Determines whether more events are pending. */ + struct nss_wifi_mesh_path_dump_entry path_entry[0]; /**< Mesh path entries. */ +}; + +/** + * nss_wifi_mesh_proxy_path_table_dump + * Wi-Fi mesh proxy path table dump. + */ +struct nss_wifi_mesh_proxy_path_table_dump { + uint32_t num_entries; /**< Number of entries. */ + uint32_t more_events; /**< More events are pending. */ + struct nss_wifi_mesh_proxy_path_dump_entry path_entry[0]; /**< Mesh proxy path entry. */ +}; + +/** + * nss_wifi_mesh_assoc_link_vap + * Associate a link VAP to mesh. + */ +struct nss_wifi_mesh_assoc_link_vap { + uint32_t link_vap_id; /**< Link interface number. */ +}; + +/** + * nss_wifi_mesh_path_stats + * Wi-Fi mesh path statistics. + */ +struct nss_wifi_mesh_path_stats { + uint32_t alloc_failures; /**< Mesh path allocation failures. */ + uint32_t error_max_radio_count; /**< Mesh path error maximum radio count. */ + uint32_t invalid_interface_failures; /**< Mesh path invalid interface number failures count. */ + uint32_t add_success; /**< Mesh path add success count. */ + uint32_t table_full_errors; /**< Mesh path table full error count. */ + uint32_t insert_failures; /**< Mesh path insert failure count. */ + uint32_t not_found; /**< Mesh path not found failure count. */ + uint32_t delete_success; /**< Mesh path delete success count. */ + uint32_t update_success; /**< Mesh path update success count. */ + uint32_t mesh_path_expired; /**< Mesh path expired. */ + uint32_t mesh_path_refresh_needed; /**< Mesh path refresh needed. */ + uint32_t add_requests; /**< Mesh path add request. */ + uint32_t del_requests; /**< Mesh path delete request. */ + uint32_t update_requests; /**< Mesh path update requests. */ + uint32_t next_hop_updations; /**< Mesh path next hop updations. */ + uint32_t hop_count_updations; /**< Mesh path hop count updations. */ + uint32_t flag_updations; /**< Mesh path mesh flag updations. */ + uint32_t metric_updations; /**< Mesh path metric updations. */ + uint32_t block_mesh_fwd_updations; /**< Mesh path block mesh forward updations. */ + uint32_t delete_failures; /**< Mesh path delete failures. */ +}; + +/** + * nss_wifi_mesh_proxy_path_stats + * Wi-Fi mesh proxy path statistics. + */ +struct nss_wifi_mesh_proxy_path_stats { + uint32_t alloc_failures; /**< Mesh proxy path alloc failure count. */ + uint32_t entry_exist_failures; /**< Mesh proxy path entry already exists. */ + uint32_t add_success; /**< Mesh proxy path add success count. */ + uint32_t table_full_errors; /**< Mesh proxy path table full count. */ + uint32_t insert_failures; /**< Mesh proxy path insert failure count. */ + uint32_t not_found; /**< Mesh proxy path not found count. */ + uint32_t unhashed_errors; /**< Mesh proxy path unhased erorr count. */ + uint32_t delete_failures; /**< Mesh proxy path delete failure count. */ + uint32_t delete_success; /**< Mesh proxy path delete success count. */ + uint32_t update_success; /**< Mesh proxy path update success count. */ + uint32_t lookup_success; /**< Mesh proxy path lookup success count. */ + uint32_t add_requests; /**< Mesh proxy path addition requests. */ + uint32_t del_requests; /**< Mesh proxy path deletion requests. */ + uint32_t update_requests; /**< Mesh proxy path updation requests. */ + uint32_t mda_updations; /**< Mesh proxy path mda updations. */ + uint32_t flag_updations; /**< Mesh proxy path flags updations. */ +}; + +/** + * nss_wifi_mesh_exception_stats + * Wi-Fi mesh exception statistics. + */ +struct nss_wifi_mesh_exception_stats { + uint32_t packets_success; /**< Mesh exception successful packets count. */ + uint32_t packets_dropped; /**< Mesh exception dropped packets count. */ +}; + +/** + * nss_wifi_mesh_stats_sync_msg + * Message to get mesh device statistics from NSS firmware to the host. + */ +struct nss_wifi_mesh_stats_sync_msg { + struct nss_cmn_node_stats pnode_stats; /**< Common firmware statistics. */ + struct nss_wifi_mesh_encap_stats mesh_encap_stats; /**< Encapsulation statistics. */ + struct nss_wifi_mesh_decap_stats mesh_decap_stats; /**< Decapsulation statistics. */ + struct nss_wifi_mesh_path_stats mesh_path_stats; /**< Mesh path statistics. */ + struct nss_wifi_mesh_proxy_path_stats mesh_proxy_path_stats; /**< Mesh proxy path statistics. */ + struct nss_wifi_mesh_exception_stats mesh_except_stats; /**< Mesh exception statistics. */ +}; + +/* nss_wifi_mesh_exception_flag_msg + * Messsage to send exception packets to host. + */ +struct nss_wifi_mesh_exception_flag_msg { + uint8_t dest_mac_addr[ETH_ALEN]; /**< Destination MAC address. */ + uint8_t exception; /**< Exception flag bit. */ + uint8_t reserved[2]; /**< Reserved field. */ +}; + +/** + * nss_wifi_mesh_rate_limit_config + * Message to configure exceptions + */ +struct nss_wifi_mesh_rate_limit_config { + uint32_t exception_num; /**< Indicates the exception - enum wifi_mesh_configurable_exceptions. */ + uint32_t enable; /**< Indicates if exception is enabled. */ + uint32_t rate_limit; /**< Rate limit value in us. */ +}; + +/** + * nss_wifi_mesh_encap_ext_pkt_metadata + * Metadata to extended data callback + */ +struct nss_wifi_mesh_encap_ext_pkt_metadata { + uint16_t pkt_type; /**< Packet type of the exception packet. */ +}; + +/** + * nss_wifi_mesh_msg + * Data sent and received in mesh device-specific messages. + */ +struct nss_wifi_mesh_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a virtual device specific message. + */ + union { + union nss_if_msgs if_msg; + /**< NSS interface base message. */ + struct nss_wifi_mesh_config_msg mesh_config; + /**< Mesh device configuration. */ + struct nss_wifi_mesh_mpath_add_msg mpath_add; + /**< Add a message for a mesh path addition. */ + struct nss_wifi_mesh_mpath_del_msg mpath_del; + /**< Add a message for a mesh path deletion. */ + struct nss_wifi_mesh_mpath_update_msg mpath_update; + /**< Add a message for a mesh path update. */ + struct nss_wifi_mesh_proxy_path_learn_msg proxy_learn_msg; + /**< Add a message for a mesh proxy path learning. */ + struct nss_wifi_mesh_proxy_path_add_msg proxy_add_msg; + /**< Add a message for a mesh proxy path addition. */ + struct nss_wifi_mesh_proxy_path_update_msg proxy_update_msg; + /**< Add a message for a mesh proxy path update. */ + struct nss_wifi_mesh_proxy_path_del_msg proxy_del_msg; + /**< Add a message for a mesh proxy path deletion. */ + struct nss_wifi_mesh_mpath_not_found_msg mpath_not_found_msg; + /**< Mesh path not found message. */ + struct nss_wifi_mesh_path_refresh_msg path_refresh_msg; + /**< Add a message for a mesh path refresh. */ + struct nss_wifi_mesh_path_expiry_msg path_expiry_msg; + /**< Add a message for a mesh path expiration. */ + struct nss_wifi_mesh_path_table_dump mpath_table_dump; + /**< Add a message to dump mesh path table. */ + struct nss_wifi_mesh_proxy_path_table_dump proxy_path_table_dump; + /**< Add a message to dump mesh proxy path table. */ + struct nss_wifi_mesh_stats_sync_msg stats_sync_msg; + /**< Statistics synchronization message. */ + struct nss_wifi_mesh_exception_flag_msg exception_msg; + /**< Exception to host message. */ + struct nss_wifi_mesh_rate_limit_config exc_cfg; + /**< Add a message to configure the rate limit for exception events. */ + } msg; /**< Virtual device message payload. */ +}; + +/** + * nss_wifi_mesh_encap_stats_type + * Wi-Fi mesh encapsulation statistics types. + */ +enum nss_wifi_mesh_encap_stats_type { + NSS_WIFI_MESH_ENCAP_STATS_TYPE_PNODE_RX_PACKETS, /**< Wi-Fi mesh common node receive packets. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_PNODE_RX_BYTES, /**< Wi-Fi mesh common node receive bytes. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_PNODE_TX_PACKETS, /**< Wi-Fi mesh common node transmit packets. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_PNODE_TX_BYTES, /**< Wi-Fi mesh common node transmit bytes. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_PNODE_RX_DROPPED, /**< Wi-Fi mesh common node receive dropped. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_EXPIRY_NOTIFY_SENT, /**< Wi-Fi mesh encapsulation statistics expiry notify sent. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_MC_COUNT, /**< Wi-Fi mesh encapsulation statistics mc count. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_MP_NOT_FOUND, /**< Wi-Fi mesh encapsulation statistics mpath not found. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_MP_ACTIVE, /**< Wi-Fi mesh encapsulation statistics mpath active */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_MPP_NOT_FOUND, /**< Wi-Fi mesh encapsulation statistics mpp not found. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_MPP_FOUND, /**< Wi-Fi mesh encapsulation statistics mpp found. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_HDR_FAIL, /**< Wi-Fi mesh encapsulation statistics header failed. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_MP_DEL_NOTIFY_FAIL, /**< Wi-Fi mesh encapsulation statistics mpath delete notify. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_LINK_ENQUEUE, /**< Wi-Fi mesh encapsulation statistics link enqueue. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_LINK_ENQUEUE_FAIL, /**< Wi-Fi mesh encapsulation statistics link enqueue failed. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_RA_LOOKUP_FAIL, /**< Wi-Fi mesh encapsulation statistics receiver lookup failed. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_DUMMY_ADD_COUNT, /**< Wi-Fi mesh encapsulation statistics dummy add count. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_MP_ADD_NOTIFY_FAIL, /**< Wi-Fi mesh encapsulation statistics mpath add notify failed. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_DUMMY_ADD_FAIL, /**< Wi-Fi mesh encapsulation statistics dummy add failed. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_DUMMY_LOOKUP_FAIL, /**< Wi-Fi mesh encapsulation statistics dummy look-up failed. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_SEND_TO_HOST_FAILED, /**< Wi-Fi mesh encapsulation statistics when a packet fails to send to host. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_SENT_TO_HOST, /**< Wi-Fi mesh encapsulation statistics when packet is sent to host. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_EXPIRY_NOTIFY_FAIL, /**< Wi-Fi mesh encapsulation statistics expiry notified fail. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_NO_HEADROOM, /**< Wi-Fi mesh encapsulation statistics no headroom. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_PATH_REFRESH_SENT, /**< Wi-Fi mesh encapsulation statistics path refresh sent. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_LINEARISE_FAILED, /**< Wi-Fi mesh encapsulation statistics when linearisation failed. */ + NSS_WIFI_MESH_ENCAP_STATS_TYPE_MAX /**< Wi-Fi mesh encapsulation statistics maximum. */ +}; + +/** + * nss_wifi_mesh_decap_stats_type + * Wi-Fi mesh decapsulation statistics types. + */ +enum nss_wifi_mesh_decap_stats_type { + NSS_WIFI_MESH_DECAP_STATS_TYPE_PNODE_RX_PACKETS, /**< Wi-Fi mesh common node receive packets. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_PNODE_RX_BYTES, /**< Wi-Fi mesh common node receive bytes. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_PNODE_TX_PACKETS, /**< Wi-Fi mesh common node transmit packets. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_PNODE_TX_BYTES, /**< Wi-Fi mesh common node transmit bytes. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_PNODE_RX_DROPPED, /**< Wi-Fi mesh common node receive dropped. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_PATH_REFRESH_SENT, /**< Wi-Fi mesh decapsulation statistics path refresh sent. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_RESERVED, /**< Wi-Fi mesh decapsulation statistics reserved field. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MC_DROP, /**< Wi-Fi mesh decapsulation statistics MAC dropped count. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_TTL0, /**< Wi-Fi mesh decapsulation statistics ttl0. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MPP_LOOKUP_FAIL, /**< Wi-Fi mesh decapsulation statistics mpp lookup failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_HDR_FAIL, /**< Wi-Fi mesh decapsulation statistics header failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_RX_FWD_FAIL, /**< Wi-Fi mesh decapsulation statistics receive forward failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_RX_FWD_SUCCESS, /**< Wi-Fi mesh decapsulation statistics receive forward successful. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MP_FWD_LOOKUP_FAIL, /**< Wi-Fi mesh decapsulation statistics mpath forward lookup failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MP_FWD_INACTIVE, /**< Wi-Fi mesh decapsulation statistics mpath forward inactive. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MNODE_FWD_SUCCESS, /**< Wi-Fi mesh decapsulation statistics mnode forward successful. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MNODE_FWD_FAIL, /**< Wi-Fi mesh decapsulation statistics mnode forward failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MPP_ADD_FAIL, /**< Wi-Fi mesh decapsulation statistics mpp add failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MPP_ADD_EVENT_TO_HOST_FAIL, /**< Wi-Fi mesh decapsulation statistics mpp add event to host failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MPP_UPDATE_FAIL, /**< Wi-Fi mesh decapsulation statistics mpp update failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MPP_UPDATE_EVENT_TO_HOST_FAIL, /**< Wi-Fi mesh decapsulation statistics mpp update event to host failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MPP_LEARN_TO_HOST_FAIL, /**< Wi-Fi mesh decapsulation statistics mpp learn to host failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_BLOCK_MESH_FWD_PACKETS, /**< Wi-Fi mesh decapsulation statistics block mesh fwd packets. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_NO_HEADROOM, /**< Wi-Fi mesh decapsulation statistics no headroom. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_LINEARISE_FAILED, /**< Wi-Fi mesh decapsulation statistics linearise failed. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MPP_LEARN_EVENT_RL_DROPPED, /**< Wi-Fi mesh decapsulation statistics mpp learn event rl dropped. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MP_MISSING_EVENT_RL_DROPPED, /**< Wi-Fi mesh decapsulation statistics mp missing event rl dropped. */ + NSS_WIFI_MESH_DECAP_STATS_TYPE_MAX /**< Wi-Fi mesh decapsulation statistics maximum. */ +}; + +/** + * nss_wifi_mesh_path_stats_type + * Wi-Fi mesh path statistics types. + */ +enum nss_wifi_mesh_path_stats_type { + NSS_WIFI_MESH_PATH_STATS_TYPE_ALLOC_FAILURES, /**< Wi-Fi mesh path statistics allocation failures. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_ERROR_MAX_RADIO_COUNT, /**< Wi-Fi mesh path statistics maximum radio error count. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_INVALID_INTERFACE_FAILURES, /**< Wi-Fi mesh path statistics invalid interface failures. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_ADD_SUCCESS, /**< Wi-Fi mesh path statistics add success. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_TABLE_FULL_ERRORS, /**< Wi-Fi mesh path statistics table full errors. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_INSERT_FAILURES, /**< Wi-Fi mesh path statistics insertion failures. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_NOT_FOUND, /**< Wi-Fi mesh path statistics not found. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_DELETE_SUCCESS, /**< Wi-Fi mesh path statistics successful deletion. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_UPDATE_SUCCESS, /**< Wi-Fi mesh path statistics successful updation. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_EXPIRED, /**< Wi-Fi mesh path statistics expired. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_REFRESH_NEEDED, /**< Wi-Fi mesh path statistics refresh needed. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_ADD_REQUESTS, /**< Wi-Fi mesh path statistics add requests. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_DELETE_REQUESTS, /**< Wi-Fi mesh path statistics delete requests. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_UPDATE_REQUESTS, /**< Wi-Fi mesh path statistics update requests. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_NEXT_HOP_UPDATIONS, /**< Wi-Fi mesh path statistics next hop updations. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_HOP_COUNT_UPDATIONS, /**< Wi-Fi mesh path statistics hop count updations. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_MESH_FLAG_UPDATIONS, /**< Wi-Fi mesh path statistics mesh flag updations. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_METRIC_UPDATIONS, /**< Wi-Fi mesh path statistics metric updations. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_BLOCK_MESH_FWD_UPDATIONS, /**< Wi-Fi mesh path statistics block mesh forward updations. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_MESH_PATH_DELETE_FAILURES, /**< Wi-Fi mesh path statistics mesh path delete failures. */ + NSS_WIFI_MESH_PATH_STATS_TYPE_MAX /**< Wi-Fi mesh path statistics maximum. */ +}; + +/** + * nss_wifi_mesh_proxy_path_stats_type + * Wi-Fi mesh proxy path statistics types. + */ +enum nss_wifi_mesh_proxy_path_stats_type { + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_ALLOC_FAILURES, /**< Wi-Fi mesh proxy path statistics allocation failures. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_ENTRY_EXIST_FAILURES, /**< Wi-Fi mesh proxy path statistics entry exist failures. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_ADD_SUCCESS, /**< Wi-Fi mesh proxy path statistics add success. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_TABLE_FULL_ERRORS, /**< Wi-Fi mesh proxy path statistics table full errors. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_INSERT_FAILURES, /**< Wi-Fi mesh proxy path statistics insert failures. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_NOT_FOUND, /**< Wi-Fi mesh proxy path statistics not found. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_UNHASHED_ERRORS, /**< Wi-Fi mesh proxy path statistics unhashed errors. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_DELTE_FAILURES, /**< Wi-Fi mesh proxy path statistics delete failures */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_DELETE_SUCCESS, /**< Wi-Fi mesh proxy path statistics delete success. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_UPDATE_SUCCESS, /**< Wi-Fi mesh proxy path statistics update_success. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_LOOKUP_SUCCESS, /**< Wi-Fi mesh proxy path statistics lookup sccesss. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_ADD_REQUESTS, /**< Wi-Fi mesh proxy path statistics add requests. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_DELETE_REQUESTS, /**< Wi-Fi mesh proxy path statistics delete requests. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_UPDATE_REQUESTS, /**< Wi-Fi mesh proxy path statistics update request. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_MDA_UPDATIONS, /**< Wi-Fi mesh proxy path statistics mda updations. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_FLAGS_UPDATIONS, /**< Wi-Fi mesh proxy path statistics flags updations. */ + NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_MAX /**< Wi-Fi mesh proxy path statistics maximum. */ +}; + +/** + * nss_wifi_mesh_exception_stats_type + * Wi-Fi mesh exception statistics types. + */ +enum nss_wifi_mesh_exception_stats_type { + NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_PACKETS_SUCCESS, /**< Wi-Fi mesh exception statistics packets success. */ + NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_PACKETS_DROPPED, /**< Wi-Fi mesh exception statistics packets dropped. */ + NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_MAX /**< Wi-Fi mesh exception statistics maximum. */ +}; + +/** + * nss_wifi_mesh_hdl_stats_sync_msg + * Message to get mesh device statistics from NSS firmware to the host. + */ +struct nss_wifi_mesh_hdl_stats_sync_msg { + uint64_t encap_stats[NSS_WIFI_MESH_ENCAP_STATS_TYPE_MAX]; /**< Encap statistics. */ + uint64_t decap_stats[NSS_WIFI_MESH_DECAP_STATS_TYPE_MAX]; /**< Decap statistics. */ + uint64_t path_stats[NSS_WIFI_MESH_PATH_STATS_TYPE_MAX]; /**< Path statistics. */ + uint64_t proxy_path_stats[NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_MAX]; /**< Proxy path statistics. */ + uint64_t except_stats[NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_MAX]; /**< Exception statistics. */ +}; + +/** + * nss_wifi_mesh_stats_notification + * Wi-Fi mesh statistics structure. + */ +struct nss_wifi_mesh_stats_notification { + uint32_t core_id; /**< Core ID. */ + nss_if_num_t if_num; /**< Interface number. */ + struct nss_wifi_mesh_hdl_stats_sync_msg stats; /**< Encapsulation-decapsulation statistics. */ +}; + +/** + * nss_wifi_mesh_tx_msg + * Sends a Wi-Fi mesh message to the NSS interface. + * + * @datatypes + * nss_ctx_instance \n + * nss_wifi_mesh_msg + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the transmit operation. + */ +nss_tx_status_t nss_wifi_mesh_tx_msg(struct nss_ctx_instance *nss_ctx, + struct nss_wifi_mesh_msg *msg); + +/** + * nss_wifi_mesh_tx_buf + * Sends a Wi-Fi mesh data packet to the NSS interface. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in] os_buf Pointer to the OS data buffer. + * @param[in] if_num NSS interface number. + * + * @return + * Status of the transmit operation. + */ +nss_tx_status_t nss_wifi_mesh_tx_buf(struct nss_ctx_instance *nss_ctx, + struct sk_buff *os_buf, nss_if_num_t if_num); + +/** + * Callback function for receiving Wi-Fi virtual device messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_wifi_mesh_msg_callback_t)(void *app_data, + struct nss_cmn_msg *msg); + +/** + * Callback function for receiving Wi-Fi virtual device data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_wifi_mesh_data_callback_t)(struct net_device *netdev, + struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving extended data plane Wi-Fi virtual device data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + * @param[in] netdev Pointer to the associated network device. + */ +typedef void (*nss_wifi_mesh_ext_data_callback_t)(struct net_device *netdev, + struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_wifi_mesh_msg_init + * Initializes a Wi-Fi mesh device message. + * + * @datatypes + * nss_wifi_mesh_msg \n + * nss_wifi_mesh_msg_callback_t + * + * @param[in] nim Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Length of message. + * @param[in] cb Message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +void nss_wifi_mesh_msg_init(struct nss_wifi_mesh_msg *nim, nss_if_num_t if_num, uint32_t type, uint32_t len, + nss_wifi_mesh_msg_callback_t cb, void *app_data); + +/** + * nss_wifi_mesh_get_context + * Gets the NSS Wi-Fi extended virtual interface context. + * + * @return + * Pointer to the NSS core context. + */ +extern struct nss_ctx_instance *nss_wifi_mesh_get_context(void); + +/** + * nss_register_wifi_mesh_if + * Registers a Wi-Fi mesh device interface with the NSS interface. + * + * @datatypes + * nss_if_num_t \n + * nss_wifi_mesh_data_callback_t \n + * nss_wifi_mesh_ext_data_callback_t \n + * nss_wifi_mesh_msg_callback_t \n + * net_device + * @param[in] if_num NSS interface number. + * @param[in] mesh_data_callback Callback for the Wi-Fi virtual device data. + * @param[in] mesh_ext_data_callback Callback for the extended data. + * @param[in] mesh_event_callback Callback for the message. + * @param[in] dp_type Datapath type. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this + * interface. + * + * @return + * NSS_CORE_STATUS_SUCCESS in case of success. + * NSS_CORE_STATUS_FAILURE in case of failure. + */ +uint32_t nss_register_wifi_mesh_if(nss_if_num_t if_num, nss_wifi_mesh_data_callback_t mesh_data_callback, + nss_wifi_mesh_ext_data_callback_t mesh_ext_data_callback, nss_wifi_mesh_msg_callback_t mesh_event_callback, + uint32_t dp_type, struct net_device *netdev, uint32_t features); + +/** + * nss_unregister_wifi_mesh_if + * Deregisters a Wi-Fi mesh device interface from the NSS interface. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + */ +void nss_unregister_wifi_mesh_if(nss_if_num_t if_num); + +/** + * nss_wifi_mesh_tx_msg_ext + * Sends Wi-Fi mesh data packet along with metadata as a message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in,out] nss_ctx Pointer to the NSS core context. + * @param[in] os_buf Pointer to the OS data buffer. + * + * @return + * Status of the transmit operation. + */ +nss_tx_status_t nss_wifi_mesh_tx_msg_ext(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf); + +/** + * nss_wifi_mesh_verify_if_num + * Verify Wi-Fi mesh interface number. + * + * @datatypes + * interface number \n + * + * @param[in] nss_if_num_t NSS interface number. + * + * @return + * TRUE or FALSE. + */ +extern bool nss_wifi_mesh_verify_if_num(nss_if_num_t if_num); + +/** + * nss_wifi_mesh_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_wifi_mesh_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_wifi_mesh_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or non-zero on failure. + */ +extern int nss_wifi_mesh_stats_unregister_notifier(struct notifier_block *nb); +#endif /* __NSS_WIFI_MESH_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_vdev.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_vdev.h new file mode 100644 index 000000000..1b52f66d0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifi_vdev.h @@ -0,0 +1,1358 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * @file nss_wifi_vdev.h + * NSS-to-HLOS Wi-Fi virtual device interface definitions. + */ + +#ifndef __NSS_WIFI_VDEV_H +#define __NSS_WIFI_VDEV_H + +/** + * @addtogroup nss_wifi_vdev_subsystem + * @{ + */ +#define NSS_WIFI_HTT_TRANSFER_HDRSIZE_WORD 6 /**< Size of the Host-To-Target (HTT) message transfer header. */ +#define NSS_WIFI_VDEV_PER_PACKET_METADATA_OFFSET 4 +/**< Offset of the metadata in a virtual device message. */ +#define NSS_WIFI_VDEV_DSCP_MAP_LEN 64 /**< Length of the DSCP MAP field. */ +#define NSS_WIFI_VDEV_IPV6_ADDR_LENGTH 16 /**< Size of the IPv6 address field. */ +#define NSS_WIFI_MAX_SRCS 4 /**< Maximum number of multicast sources. */ +#define NSS_WIFI_VDEV_MAX_ME_ENTRIES 32 /**< Maximum number of multicast enhancement entries. */ + +/** + * nss_wifi_vdev_msg_types + * Wi-Fi virtual device messages. + */ +enum nss_wifi_vdev_msg_types { + NSS_WIFI_VDEV_INTERFACE_CONFIGURE_MSG = NSS_IF_MAX_MSG_TYPES + 1, + NSS_WIFI_VDEV_INTERFACE_UP_MSG, + NSS_WIFI_VDEV_INTERFACE_DOWN_MSG, + NSS_WIFI_VDEV_INTERFACE_CMD_MSG, + NSS_WIFI_VDEV_SNOOPLIST_GRP_LIST_CREATE_MSG, + NSS_WIFI_VDEV_SNOOPLIST_GRP_LIST_DELETE_MSG, + NSS_WIFI_VDEV_SNOOPLIST_GRP_MEMBER_ADD_MSG, + NSS_WIFI_VDEV_SNOOPLIST_GRP_MEMBER_REMOVE_MSG, + NSS_WIFI_VDEV_SNOOPLIST_GRP_MEMBER_UPDATE_MSG, + NSS_WIFI_VDEV_SNOOPLIST_DENY_MEMBER_ADD_MSG, + NSS_WIFI_VDEV_SNOOPLIST_DENY_LIST_DELETE_MSG, + NSS_WIFI_VDEV_SNOOPLIST_DENY_LIST_DUMP_MSG, + NSS_WIFI_VDEV_SNOOPLIST_DUMP_MSG, + NSS_WIFI_VDEV_SNOOPLIST_RESET_MSG, + NSS_WIFI_VDEV_SPECIAL_DATA_TX_MSG, + NSS_WIFI_VDEV_VOW_DBG_CFG_MSG, + NSS_WIFI_VDEV_VOW_DBG_STATS_REQ_MSG, + NSS_WIFI_VDEV_DSCP_TID_MAP_MSG, + NSS_WIFI_VDEV_SNOOPLIST_TOGGLE_MSG, + NSS_WIFI_VDEV_UPDATECHDR_MSG, + NSS_WIFI_VDEV_ME_SYNC_MSG, + NSS_WIFI_VDEV_STATS_MSG, + NSS_WIFI_VDEV_SET_NEXT_HOP, + NSS_WIFI_VDEV_DSCP_TID_MAP_ID_MSG, + NSS_WIFI_VDEV_EXTAP_ADD_ENTRY, + NSS_WIFI_VDEV_EXTAP_REMOVE_ENTRY, + NSS_WIFI_VDEV_QWRAP_PSTA_DELETE_ENTRY, + NSS_WIFI_VDEV_QWRAP_PSTA_ADD_ENTRY, + NSS_WIFI_VDEV_QWRAP_ISOLATION_ENABLE, + NSS_WIFI_VDEV_SET_PEER_NEXT_HOP, + NSS_WIFI_VDEV_CONFIG_VLAN_ID_MSG, + NSS_WIFI_VDEV_CONFIG_VLAN_MODE_MSG, + NSS_WIFI_VDEV_INTERFACE_RECOVERY_RESET_MSG, + NSS_WIFI_VDEV_INTERFACE_RECOVERY_RECONF_MSG, + NSS_WIFI_VDEV_SET_GROUP_KEY, + NSS_WIFI_VDEV_HMMC_MEMBER_ADD_MSG, + NSS_WIFI_VDEV_HMMC_MEMBER_DEL_MSG, + NSS_WIFI_VDEV_MAX_MSG +}; + +/** + * nss_wifi_vdev_err_types + * Error types for a Wi-Fi virtual device. + */ +enum nss_wifi_vdev_err_types { + NSS_WIFI_VDEV_ENONE, + NSS_WIFI_VDEV_EUNKNOWN_MSG, + NSS_WIFI_VDEV_EINV_VID_CONFIG, + NSS_WIFI_VDEV_EINV_EPID_CONFIG, + NSS_WIFI_VDEV_EINV_DL_CONFIG, + NSS_WIFI_VDEV_EINV_CMD, + NSS_WIFI_VDEV_EINV_ENCAP, + NSS_WIFI_VDEV_EINV_DECAP, + NSS_WIFI_VDEV_EINV_RX_NXTN, + NSS_WIFI_VDEV_EINV_VID_INDEX, + NSS_WIFI_VDEV_EINV_MC_CFG, + NSS_WIFI_VDEV_SNOOPTABLE_FULL, + NSS_WIFI_VDEV_SNOOPTABLE_ENOMEM, + NSS_WIFI_VDEV_SNOOPTABLE_GRP_LIST_UNAVAILABLE, + NSS_WIFI_VDEV_SNOOPTABLE_GRP_MEMBER_UNAVAILABLE, + NSS_WIFI_VDEV_SNOOPTABLE_PEER_UNAVAILABLE, + NSS_WIFI_VDEV_SNOOPTABLE_GRP_LIST_ENOMEM, + NSS_WIFI_VDEV_SNOOPTABLE_GRP_LIST_EXIST, + NSS_WIFI_VDEV_ME_ENOMEM, + NSS_WIFI_VDEV_EINV_NAWDS_CFG, + NSS_WIFI_VDEV_EINV_EXTAP_CFG, + NSS_WIFI_VDEV_EINV_VOW_DBG_CFG, + NSS_WIFI_VDEV_EINV_DSCP_TID_MAP, + NSS_WIFI_VDEV_INVALID_ETHER_TYPE, + NSS_WIFI_VDEV_SNOOPTABLE_GRP_MEMBER_EXIST, + NSS_WIFI_VDEV_ME_INVALID_NSRCS, + NSS_WIFI_VDEV_EINV_RADIO_ID, + NSS_WIFI_VDEV_RADIO_NOT_PRESENT, + NSS_WIFI_VDEV_CHDRUPD_FAIL, + NSS_WIFI_VDEV_ME_DENY_GRP_MAX_RCHD, + NSS_WIFI_VDEV_EINV_NEXT_HOP, + NSS_WIFI_VDEV_EINV_DSCP_TID_MAP_ID, + NSS_WIFI_VDEV_EINV_TID_VALUE, + NSS_WIFI_VDEV_EINV_EXTAP_TABLE, + NSS_WIFI_VDEV_EXTAP_ENTRY_UPDATE_FAIL, + NSS_WIFI_VDEV_QWRAP_PSTA_ADD_FAIL, + NSS_WIFI_VDEV_QWRAP_PSTA_DEL_FAIL, + NSS_WIFI_VDEV_QWRAP_ISOLATION_EN_FAIL, + NSS_WIFI_VDEV_QWRAP_ALLOC_FAIL, + NSS_WIFI_VDEV_PEER_NOT_FOUND_BY_MAC, + NSS_WIFI_VDEV_PEER_NEXT_HOP_NOT_FOUND, + NSS_VDEV_EUNKNOWN_NEXT_HOP, + NSS_WIFI_VDEV_VLAN_ID_CONFIG_FAIL, + NSS_WIFI_VDEV_VLAN_MODE_CONFIG_FAIL, + NSS_WIFI_VDEV_RECOVERY_RESET_FAIL, + NSS_WIFI_VDEV_RECOVERY_RECONF_FAIL, + NSS_WIFI_VDEV_CONFIG_GROUP_KEY_FAIL, + NSS_WIFI_VDEV_MULTIPASS_NOT_ENABLED, + NSS_WIFI_VDEV_ALLOC_VLAN_MAP_FAILED, + NSS_WIFI_VDEV_MTU_CHANGE_FAIL, + NSS_WIFI_VDEV_MAC_ADDR_CHANGE_FAIL, + NSS_WIFI_VDEV_PPE_PORT_CREATE_FAIL, + NSS_WIFI_VDEV_PPE_PORT_DESTROY_FAIL, + NSS_WIFI_VDEV_PPE_VSI_ASSIGN_FAIL, + NSS_WIFI_VDEV_PPE_VSI_UNASSIGN_FAIL, + NSS_WIFI_VDEV_EINV_MAX_CFG +}; + +/** + * nss_wifi_vdev_ext_data_pkt_type + * Types of extended data plane packets sent from the NSS to the host. + */ +enum nss_wifi_vdev_ext_data_pkt_type { + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_NONE = 0, + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_IGMP = 1, /**< IGMP packets. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_MESH = 2, /**< MESH packets. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_INSPECT = 3, /**< Host inspect packets. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_TXINFO = 4, /**< Tx completion information packets. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_MPSTA_TX = 5, /**< MP station Tx metadata. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_MPSTA_RX = 6, /**< MP station Rx metadata. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_RX_ERR = 7, /**< Rx error packets metadata. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_EXTAP_TX = 8, /**< ExtAP Tx metadata. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_EXTAP_RX = 9, /**< ExtAP Rx metadata. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_WNM_TFS = 10, /**< WNM TFS related metadata. */ + NSS_WIFI_VDEV_EXT_TX_COMPL_PKT_TYPE = 11, /**< Tx completion. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_WDS_LEARN = 12, /**< WDS source port learning command. */ + NSS_WIFI_VDEV_EXT_DATA_PPDU_INFO = 13, /**< PPDU metadata information. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_MCBC_RX = 14, /**< Multicast/broadcast packet received. */ + NSS_WIFI_VDEV_MESH_EXT_DATA_PKT_TYPE_RX_SPL_PACKET = 15, + /**< Mesh link VAP special packet. */ + NSS_WIFI_VDEV_MESH_EXT_DATA_PKT_TYPE_RX_MCAST_EXC = 16, + /**< Mesh link VAP multicast packet. */ + NSS_WIFI_VDEV_EXT_DATA_PKT_TYPE_MAX +}; + +/** + * nss_wifi_vdev_cmd + * Commands for the Wi-Fi virtual device. + */ +enum nss_wifi_vdev_cmd { + NSS_WIFI_VDEV_DROP_UNENC_CMD, /**< Configuration to drop unencrypted frames on VAP. */ + NSS_WIFI_VDEV_ENCAP_TYPE_CMD, /**< Configuration to set encapsulation type on VAP. */ + NSS_WIFI_VDEV_DECAP_TYPE_CMD, /**< Configuration to set decapsulation type on VAP. */ + NSS_WIFI_VDEV_ENABLE_ME_CMD, /**< Configuration to set multicast enhancement on VAP. */ + NSS_WIFI_VDEV_NAWDS_MODE_CMD, /**< Configuration to set NAWDS mode on VAP. */ + NSS_WIFI_VDEV_EXTAP_CONFIG_CMD, /**< Configuration to set extended AP mode on VAP. */ + NSS_WIFI_VDEV_CFG_BSTEER_CMD, /**< Configuration to set bandsteering on VAP. */ + NSS_WIFI_VDEV_VOW_DBG_MODE_CMD, /**< Configuration to set video over wireless (VOW) debug mode on VAP. */ + NSS_WIFI_VDEV_VOW_DBG_RST_STATS_CMD, + /**< Configuration to reset video over wireless (VOW) debug mode on VAP. */ + NSS_WIFI_VDEV_CFG_DSCP_OVERRIDE_CMD, + /**< Configuration to set DSCP/TID value override on VAP. */ + NSS_WIFI_VDEV_CFG_WNM_CAP_CMD, /**< Configuration to set wireless network management (WNM) capability on VAP. */ + NSS_WIFI_VDEV_CFG_WNM_TFS_CMD, /**< Configuration to set WNM traffic filtering and sleep mode (TFS) capability on VAP. */ + NSS_WIFI_VDEV_CFG_WDS_EXT_ENABLE_CMD, + /**< Configuration to set WDS extention capability on VAP. */ + NSS_WIFI_VDEV_CFG_WDS_CMD, /**< Configuration to set WDS on VAP. */ + NSS_WIFI_VDEV_CFG_AP_BRIDGE_CMD, /**< Configuration to enable/disable client isolation. */ + NSS_WIFI_VDEV_SECURITY_TYPE_CMD, /**< Configuration to set security type per VAP. */ + NSS_WIFI_VDEV_CFG_AST_OVERRIDE_CMD, /**< Configuration to set AST (Address Search Table) override on VAP. */ + NSS_WIFI_VDEV_CFG_SON_CAP_CMD, /**< Configuration to set software defined network capability on VAP. */ + NSS_WIFI_VDEV_CFG_MULTIPASS_CMD, /**< Configuration to enable multipass phrase capability on VAP. */ + NSS_WIFI_VDEV_CFG_HLOS_TID_OVERRIDE_CMD, + /**< Configuration to enable HLOS TID override on VAP. */ + NSS_WIFI_VDEV_ENABLE_IGMP_ME_CMD, /**< Configuration to set IGMP multicast enhancement on VAP. */ + NSS_WIFI_VDEV_CFG_WDS_BACKHAUL_CMD, + /**< Configuration to set WDS backhaul extension on VAP. */ + NSS_WIFI_VDEV_CFG_MCBC_EXC_TO_HOST_CMD, /**< Configuration to set multicast/broadcast exception to host on VAP. */ + NSS_WIFI_VDEV_CFG_PEER_AUTHORIZE_CMD, + /**< Configuration to enable peer authorization on VAP. */ + NSS_WIFI_VDEV_MAX_CMD +}; + +/** + * nss_wifi_vdev_dp_type + * Virtual device datapath types. + */ +enum nss_wifi_vdev_dp_type { + NSS_WIFI_VDEV_DP_ACCELERATED, /**< Wi-Fi accelerated VAP type. */ + NSS_WIFI_VDEV_DP_NON_ACCELERATED, /**< Wi-Fi non-acclerated VAP type. */ + NSS_WIFI_VDEV_DP_TYPE_MAX /**< Wi-Fi maximum VAP type. */ +}; + +/** + * nss_wifi_vdev_vlan_tagging_mode + * Supported VLAN tagging modes. + */ +enum nss_wifi_vdev_vlan_tagging_mode { + NSS_WIFI_VDEV_VLAN_NONE, /**< VLAN support disabled. */ + + /** + * Default VLAN mode to add VLAN tag in Rx path and + * remove VLAN tag only when matching with configured + * VLAN tag in Tx path. + */ + NSS_WIFI_VDEV_VLAN_INGRESS_ADD_EGRESS_STRIP_ON_ID_MATCH, + + /** + * Port-based VLAN mode to add VLAN tag in Rx path + * and remove any VLAN tag in Tx path. + */ + NSS_WIFI_VDEV_VLAN_INGRESS_ADD_EGRESS_STRIP_ALWAYS, + NSS_WIFI_VDEV_VLAN_MAX /**< Wi-Fi maximum VLAN support type. */ +}; + +enum vap_ext_mode { + WIFI_VDEV_EXT_MODE_MESH_LINK = 1, /* Wi-Fi mesh VAP mode */ + WIFI_VDEV_EXT_MODE_MAX, /* Wi-Fi maximum VAP mode */ +}; + +/** + * nss_wifi_vdev_config_msg + * Virtual device configuration. + */ +struct nss_wifi_vdev_config_msg { + uint8_t mac_addr[ETH_ALEN]; /**< MAC address. */ + uint16_t radio_ifnum; /**< Corresponding radio interface number. */ + uint32_t vdev_id; /**< Virtual device ID. */ + uint32_t epid; /**< Endpoint ID of the copy engine. */ + uint32_t downloadlen; /**< Size of the header download length. */ + uint32_t hdrcachelen; /**< Size of the header cache. */ + uint32_t hdrcache[NSS_WIFI_HTT_TRANSFER_HDRSIZE_WORD]; + /**< Cached per descriptor metedata shared with NSS Firmware. */ + uint32_t opmode; /**< VAP operating mode: Access-Point (AP) or Station (STA). */ + uint32_t mesh_mode_en; /**< Mesh mode is enabled. */ + uint8_t is_mpsta; + /**< Specifies whether the station is a VAP Master-Proxy (MP) station. */ + uint8_t is_psta; + /**< Specifies whether the station is a proxy station. */ + uint8_t special_vap_mode; + /**< Special VAP for monitoring received management packets. */ + uint8_t smartmesh_mode_en; + /**< VAP is configured as a smart monitor VAP. */ + uint8_t is_wrap; /**< Specifies whether the VAP is a WRAP-AP. */ + uint8_t is_nss_qwrap_en; /**< VAP is configured for NSS firmware QWRAP logic. */ + uint8_t tx_per_pkt_vdev_id_check; /**< Transmit per-packet virtual device ID check. */ + uint8_t align_pad; /**< Reserved field. */ + uint32_t vap_ext_mode; /**< Different VAP extended modes. */ +}; + +/** + * nss_wifi_vdev_enable_msg + * Enable a message for a virtual device. + */ +struct nss_wifi_vdev_enable_msg { + uint8_t mac_addr[ETH_ALEN]; /**< MAC address. */ + uint8_t reserved[2]; /**< Reserved for 4-byte alignment padding. */ +}; + +/** + * nss_wifi_vdev_disable_msg + * Disable message for a virtual device. + */ +struct nss_wifi_vdev_disable_msg { + uint32_t reserved; /**< Placeholder for future enhancement. */ +}; + +/** + * nss_wifi_vdev_recovery_msg + * Recovery message for a virtual device. + */ +struct nss_wifi_vdev_recovery_msg { + uint32_t reserved; /**< Placeholder for future enhancement. */ +}; + +/** + * nss_wifi_vdev_set_next_hop_msg + * Set next hop for Wi-Fi virtual device. + */ +struct nss_wifi_vdev_set_next_hop_msg { + uint32_t ifnumber; /**< Next hop interface number. */ +}; + +/** + * nss_wifi_vdev_extap_map + * Wi-Fi EXTAP map for IPv4/IPv6 addresses. + */ +struct nss_wifi_vdev_extap_map { + uint16_t ip_version; /**< IPv4 or IPv6 address. */ + uint8_t h_dest[ETH_ALEN]; /**< MAC address of original backend. */ + union { + uint8_t IPv4[4]; /**< IPv4 address of the backend. */ + uint8_t IPv6[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; /**< IPv6 group IP address. */ + } u; +}; + +/** + * nss_wifi_vdev_cmd_msg + * Virtual device commands. + */ +struct nss_wifi_vdev_cmd_msg { + uint32_t cmd; /**< Command type. */ + uint32_t value; /**< Command value. */ +}; + +/** + * nss_wifi_vdev_me_snptbl_grp_create_msg + * Information for creating the snooptable group of a virtual device. + */ +struct nss_wifi_vdev_me_snptbl_grp_create_msg { + uint32_t ether_type; /**< Ether type of the multicast group. */ + + /** + * IP address of a multicast group. + */ + union { + uint32_t grpaddr_ip4; + /**< IPv4 address. */ + uint8_t grpaddr_ip6[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; + /**< IPv6 address. */ + } u; /**< IP address of the multicast group. */ + + uint8_t grp_addr[ETH_ALEN]; + /**< MAC address of the multicast group. */ +}; + +/** + * nss_wifi_vdev_me_snptbl_grp_delete_msg + * Information for deleting a snooplist group list. + */ +struct nss_wifi_vdev_me_snptbl_grp_delete_msg { + uint32_t ether_type; /**< Ether type of the multicast group. */ + + /** + * IP address of the multicast group. + */ + union { + uint32_t grpaddr_ip4; + /**< IPv4 address. */ + uint8_t grpaddr_ip6[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; + /**< IPv6 address. */ + } u; /**< IP address of the multicast group. */ + + uint8_t grp_addr[ETH_ALEN]; /**< MAC address of the multicast group. */ +}; + +/** + * struct nss_wifi_vdev_me_mbr_ra_info + * Address details of receiver members. + */ +struct nss_wifi_vdev_me_mbr_ra_info { + bool dup; + /**< Duplicate bit to identify if next hop address is present. */ + uint8_t ramac[ETH_ALEN]; + /**< MAC address of receiver. */ +}; + +/** + * nss_wifi_vdev_me_snptbl_grp_mbr_add_msg + * Information for adding a snooplist group member. + */ +struct nss_wifi_vdev_me_snptbl_grp_mbr_add_msg { + uint32_t ether_type; /**< Ether type of the multicast group. */ + + /** + * IP address of the multicast group. + */ + union { + uint32_t grpaddr_ip4; + /**< IPv4 address. */ + uint8_t grpaddr_ip6[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; + /**< IPv6 address. */ + } u; /**< IP address of the multicast group. */ + + uint32_t peer_id; /**< Peer ID. */ + uint8_t grp_addr[ETH_ALEN]; + /**< MAC address of the multicast group. */ + uint8_t grp_member_addr[ETH_ALEN]; + /**< MAC address of the multicast group member. */ + uint8_t mode; /**< Multicast enhancement mode - mode 2 and mode 5. */ + uint8_t nsrcs; /**< Number of source IP addresses for selective source multicast. */ + uint8_t src_ip_addr[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH * NSS_WIFI_MAX_SRCS]; + /**< Source IP address. */ + struct nss_wifi_vdev_me_mbr_ra_info ra_entry; + /**< Receiver address entry corresponding to the member. */ +}; + +/** + * nss_wifi_vdev_me_snptbl_grp_mbr_delete_msg + * Information for removing a snooplist group member. + */ +struct nss_wifi_vdev_me_snptbl_grp_mbr_delete_msg { + uint32_t ether_type; /**< Ether type of the multicast group. */ + + /** + * IP address of the multicast group. + */ + union { + uint32_t grpaddr_ip4; + /**< IPv4 address. */ + uint8_t grpaddr_ip6[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; + /**< IPv6 address. */ + }u; /**< IP address of the multicast group. */ + uint8_t grp_addr[ETH_ALEN]; + /**< MAC address of the multicast group. */ + uint8_t grp_member_addr[ETH_ALEN]; + /**< MAC address of the multicast group member. */ +}; + +/** + * nss_wifi_vdev_me_snptbl_grp_mbr_update_msg + * Information for updating a snooplist group member. + */ +struct nss_wifi_vdev_me_snptbl_grp_mbr_update_msg { + uint32_t ether_type; /**< Ether type of the multicast group. */ + + /** + * IP address of the multicast group. + */ + union { + uint32_t grpaddr_ip4; + /**< IPv4 address. */ + uint8_t grpaddr_ip6[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; + /**< IPv6 address. */ + }u; /**< IP address of the multicast group. */ + + uint8_t grp_addr[ETH_ALEN]; + /**< MAC address of the multicast group. */ + uint8_t grp_member_addr[ETH_ALEN]; + /**< MAC address of the multicast group member. */ + uint8_t mode; /**< Multicast enhancement mode - mode 2 and mode 5. */ + uint8_t nsrcs; /**< Number of source IP addresses for selective source multicast. */ + uint8_t src_ip_addr[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH * NSS_WIFI_MAX_SRCS]; + /**< Source IP address. */ +}; + +/** + * nss_wifi_vdev_me_hmmc_add_msg + * Information for adding an entry into the host-managed multicast list. + */ +struct nss_wifi_vdev_me_hmmc_add_msg { + uint32_t ether_type; /**< IPv4 or IPv6. */ + union { + uint32_t ipv4_addr; + /**< IPv4 multicast group address. */ + uint8_t ipv6_addr[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; + /**< IPv6 multicast group address. */ + } u; /**< Type of group addresses. */ + uint32_t netmask; /**< IP subnet netmask. */ +}; + +/** + * nss_wifi_vdev_me_hmmc_del_msg + * Information for deleting an entry from the host-managed multicast list. + */ +struct nss_wifi_vdev_me_hmmc_del_msg { + uint32_t ether_type; /**< IPv4 or IPv6. */ + union { + uint32_t ipv4_addr; + /**< IPv4 multicast group address. */ + uint8_t ipv6_addr[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; + /**< IPv6 multicast group address. */ + } u; /**< Type of group addresses. */ + uint32_t netmask; /**< IP subnet netmask. */ +}; + +/** + * nss_wifi_vdev_me_deny_ip_add_msg + * Information for adding an entry into the deny list. + */ +struct nss_wifi_vdev_me_deny_ip_add_msg { + uint32_t ether_type; /**< IPv4 or IPv6. */ + union { + uint32_t ipv4_addr; + /**< IPv4 multicast group address. */ + uint8_t ipv6_addr[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; + /**< IPv6 multicast group address. */ + } u; /**< Type of group addresses. */ + uint32_t netmask; /**< IP subnet netmask. */ +}; + +/** + * nss_wifi_vdev_me_deny_ip_del_msg + * Information for deleting an entry from the deny list. + */ +struct nss_wifi_vdev_me_deny_ip_del_msg { + uint32_t ether_type; /**< IPv4 or IPv6. */ + union { + uint32_t ipv4_addr; + /**< IPv4 multicast group address. */ + uint8_t ipv6_addr[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; + /**< IPv6 multicast group address. */ + } u; /**< Type of group addresses. */ + uint32_t netmask; /**< IP subnet netmask. */ +}; + +/** + * nss_wifi_vdev_me_snptbl_deny_grp_add_msg + * Information for adding a snooplist member to a deny list. + */ +struct nss_wifi_vdev_me_snptbl_deny_grp_add_msg { + uint32_t grpaddr; /**< IP address of the multicast group. */ +}; + +/** + * nss_wifi_vdev_txmsg + * Information for transmitting special data. + */ +struct nss_wifi_vdev_txmsg { + uint16_t peer_id; /**< Peer ID. */ + uint16_t tid; /**< Traffic ID. */ +}; + +/** + * nss_wifi_vdev_vow_dbg_stats + * Types of VoW debug statistics. + */ +struct nss_wifi_vdev_vow_dbg_stats { + uint32_t rx_vow_dbg_counters; /**< VoW Rx debug counter. */ + uint32_t tx_vow_dbg_counters[8]; /**< VoW Tx debug counter. */ +}; + +/** + * nss_wifi_vdev_vow_dbg_cfg_msg + * Information for configuring VoW debug statistics. + */ +struct nss_wifi_vdev_vow_dbg_cfg_msg { + uint8_t vow_peer_list_idx; /**< Index of the peer list. */ + uint8_t tx_dbg_vow_peer_mac4; /**< MAC address 4 for the peer. */ + uint8_t tx_dbg_vow_peer_mac5; /**< MAC address 5 for the peer. */ +}; + +/** + * nss_wifi_vdev_dscp_tid_map + * DSCP-to-TID mapping. + */ +struct nss_wifi_vdev_dscp_tid_map { + uint32_t dscp_tid_map[NSS_WIFI_VDEV_DSCP_MAP_LEN]; + /**< Array holding the DSCP-to-TID mapping. */ +}; + +/** + * nss_wifi_vdev_dscptid_map_id + * DSCP-to-TID map ID. + */ +struct nss_wifi_vdev_dscptid_map_id { + uint8_t dscp_tid_map_id; + /**< DSCP-to-TID mapping ID to be used. */ +}; + +/** + * nss_wifi_vdev_set_peer_next_hop + * Set per peer next hop. + */ +struct nss_wifi_vdev_set_peer_next_hop_msg { + uint8_t peer_mac_addr[ETH_ALEN]; /**< MAC peer address. */ + uint16_t reserved; /**< Reserved. */ + uint32_t if_num; /**< Next hop interface number. */ +}; + +/** + * nss_wifi_vdev_qwrap_psta_msg + * PSTA VAP entry map in QWRAP mode. + */ +struct nss_wifi_vdev_qwrap_psta_msg { + uint8_t oma[ETH_ALEN]; /**< Original MAC address of PSTA VAP. */ + uint8_t vma[ETH_ALEN]; /**< Virtual MAC address of PSTA VAP. */ + uint8_t vdev_id; /**< ID of PSTA VAP. */ + uint8_t is_wired; /**< Is the entry for wired PSTA VAP. */ + uint8_t reserved[2]; /**< Reserved for 4-byte alignment. */ +}; + +/** + * nss_wifi_vdev_qwrap_isolation_en_msg + * Qwrap isolation mode enable. + */ +struct nss_wifi_vdev_qwrap_isolation_en_msg { + uint8_t isolation_enable; /**< QWRAP isolation mode enable. */ + uint8_t reserved[3]; /**< Reserved for 4-byte alignment. */ +}; + +/** + * nss_wifi_vdev_igmp_per_packet_metadata + * Per-packet metadata for IGMP packets. + */ +struct nss_wifi_vdev_igmp_per_packet_metadata { + uint32_t tid; /**< TID. */ + uint32_t tsf32; /**< TSF value. */ + uint8_t peer_mac_addr[ETH_ALEN]; + /**< Peer MAC address. */ + uint8_t reserved[2]; /**< Reserved for 4-byte alignment. */ +}; + +/** + * nss_wifi_vdev_mesh_per_packet_metadata + * Per-packet metadata for Mesh packets. + */ +struct nss_wifi_vdev_mesh_per_packet_metadata { + uint32_t status; /**< Meshmode Status. */ + uint32_t rssi; /**< Received signal strength indication. */ + uint32_t tsf; /**< Tx expiry time. */ + uint16_t tx_retries; /**< Retry count. */ +}; + +/** + * nss_wifi_vdev_vlan_config_msg + * Enable special handling on this VAP where VLAN tagging is added in Rx and removed in Tx. + */ +struct nss_wifi_vdev_vlan_config_msg { + uint16_t vlan_id; /**< VLAN ID configured. */ + uint8_t reserved[2]; /**< Reserved for 4-byte alignment. */ +}; + +/** + * nss_wifi_vdev_vlan_enable_msg + * Enable VLAN tagging mode on this VAP. + */ +struct nss_wifi_vdev_vlan_enable_msg { + uint8_t vlan_tagging_mode; /**< Flag to enable default or port-based VLAN tagging mode. */ + uint8_t reserved[3]; /**< Reserved for 4-byte alignment. */ +}; + +/** + * nss_wifi_vdev_set_vlan_group_key + * Set VLAN ID for special peer. + */ +struct nss_wifi_vdev_set_vlan_group_key { + uint16_t vlan_id; /**< VLAN ID. */ + uint16_t group_key; /**< Group key. */ +}; + +/** + * nss_wifi_vdev_txinfo_per_packet_metadata + * Per-packet metadata for Tx completion information packets. + */ +struct nss_wifi_vdev_txinfo_per_packet_metadata { + uint32_t status; /**< Tx completion status. */ + uint16_t msdu_count; /**< Count of MSDUs in the MSDU list. */ + uint16_t num_msdu; /**< Sequence Number of MSDU in the MSDU list. */ + uint32_t msdu_q_time; /**< Time spent by an MSDU in the Wi-Fi firmware. */ + uint32_t ppdu_rate; /**< PPDU rate in code rate. */ + uint8_t ppdu_num_mpdus_success; + /**< Number of successful MPDUs. */ + uint8_t ppdu_num_mpdus_fail; + /**< Number of failed MPDUs. */ + uint16_t ppdu_num_msdus_success; + /**< Number of successful MSDUs. */ + uint32_t ppdu_bytes_success; + /**< Number of successful bytes. */ + uint32_t ppdu_duration; /**< Estimated air time. */ + uint8_t ppdu_retries; /**< Number of times a PPDU is retried. */ + uint8_t ppdu_is_aggregate; + /**< Flag to check whether a PPDU is aggregated. */ + uint16_t start_seq_num; /**< Starting MSDU ID for this PPDU. */ + uint16_t version; /**< PPDU statistics version. */ + uint32_t ppdu_ack_timestamp; + /**< Timestamp (in ms) when an acknowledgement was received. */ + uint32_t ppdu_bmap_enqueued_lo; + /**< Bitmap of packets enqueued to the hardware (LSB). */ + uint32_t ppdu_bmap_enqueued_hi; + /**< Bitmap of packets enqueued to the hardware (MSB). */ + uint32_t ppdu_bmap_tried_lo; + /**< Bitmap of packets sent over the air (LSB). */ + uint32_t ppdu_bmap_tried_hi; + /**< Bitmap of packets sent over the air (MSB). */ + uint32_t ppdu_bmap_failed_lo; + /**< Bitmap of packets that failed to be acknowledged (LSB). */ + uint32_t ppdu_bmap_failed_hi; + /**< Bitmap of packets that failed to be acknowledged (MSB). */ +}; + +/** + * nss_wifi_vdev_qwrap_tx_metadata_types + * Per-packet metadata types for Qwrap Tx packets. + */ +enum nss_wifi_vdev_qwrap_tx_metadata_types { + NSS_WIFI_VDEV_QWRAP_TYPE_NONE = 0, + NSS_WIFI_VDEV_QWRAP_TYPE_TX = 1, + NSS_WIFI_VDEV_QWRAP_TYPE_RX_TO_TX = 2 +}; + +/** + * nss_wifi_vdev_extap_pkt_types + * Per-packet metadata types for ExtAP Tx packets. + */ +enum nss_wifi_vdev_extap_pkt_types { + NSS_WIFI_VDEV_EXTAP_PKT_TYPE_NONE = 0, + NSS_WIFI_VDEV_EXTAP_PKT_TYPE_TX = 1, + NSS_WIFI_VDEV_EXTAP_PKT_TYPE_RX_TO_TX = 2 +}; + +/** + * nss_wifi_vdev_mpsta_per_packet_tx_metadata + * Per-packet metadata for transmitting packets to an MP station. + */ +struct nss_wifi_vdev_mpsta_per_packet_tx_metadata { + uint16_t vdev_id; /**< Virtual device ID. */ + uint16_t metadata_type; /**< Tx metadata type. */ +}; + +/** + * nss_wifi_vdev_mpsta_per_packet_rx_metadata + * Per-packet metadata for receiving packets from an MP station. + */ +struct nss_wifi_vdev_mpsta_per_packet_rx_metadata { + uint16_t vdev_id; /**< Virtual device ID. */ + uint16_t peer_id; /**< Peer ID. */ +}; + +/** + * nss_wifi_vdev_rx_err_per_packet_metadata + * Per-packet metadata for error packets received. + */ +struct nss_wifi_vdev_rx_err_per_packet_metadata { + uint8_t peer_mac_addr[ETH_ALEN]; + /**< Peer MAC address. */ + uint8_t tid; /**< TID. */ + uint8_t vdev_id; /**< Virtual device ID. */ + uint8_t err_type; /**< Error type. */ + uint8_t rsvd[3]; /**< Reserved for future enhancement. */ +}; + +/** + * nss_wifi_vdev_extap_per_packet_metadata + * Per-packet metadata for ExtAP. + */ +struct nss_wifi_vdev_extap_per_packet_metadata { + uint16_t pkt_type; /**< ExtAP packet type. */ + uint8_t res[2]; /**< Reserved for 4-byte alignment. */ +}; + +/** + * nss_wifi_vdev_tx_compl_metadata + * Per-packet metadata for Tx completion message. + */ +struct nss_wifi_vdev_tx_compl_metadata { + uint8_t ta[ETH_ALEN]; /**< Transmitter MAC address. */ + uint8_t ra[ETH_ALEN]; /**< Receiver MAC address. */ + uint16_t ppdu_id; /**< PPDU ID. */ + uint16_t peer_id; /**< Peer ID. */ +}; + +/** + * nss_wifi_vdev_wds_info_type + * Specifies the type of WDS notification information. + */ +enum wifi_vdev_ext_wds_info_type { + NSS_WIFI_VDEV_WDS_TYPE_NONE = 0, + NSS_WIFI_VDEV_WDS_TYPE_RX, /**< Rx WDS entry. */ + NSS_WIFI_VDEV_WDS_TYPE_MEC, /**< Multicast Tx WDS entry. */ + NSS_WIFI_VDEV_WDS_TYPE_DA /**< Rx WDS entry for destination address. */ +}; + +/** + * nss_wifi_vdev_per_packet_metadata + * Payload of per-packet metadata. + */ +struct nss_wifi_vdev_wds_per_packet_metadata { + uint16_t peer_id; /**< Peer ID. */ + uint8_t is_sa_valid; /**< Specifies whether source address is valid. */ + uint8_t reserved; /**< Reserve bytes for alignment. */ + enum wifi_vdev_ext_wds_info_type wds_type; + /**< WDS message type. */ + uint8_t addr4_valid; /**< 802.11 4th address valid flag. */ + uint8_t rsvd; /**< Reserve bytes for alignment. */ + uint16_t sa_idx; /**< Source address index. */ + uint16_t sa_sw_peer_id; /**< Software/Address-Search-Table peer ID. */ +}; + +/** + * nss_wifi_vdev_ppdu_mdata_dir + * Physical layer protocol data unit (PPDU) metadata direction. + */ +enum nss_wifi_vdev_ppdu_mdata_dir { + WIFI_VDEV_PPDU_MDATA_TX, /**< PPDU metadata for transmit direction. */ + WIFI_VDEV_PPDU_MDATA_RX /**< PPDU metadata for receive direction. */ +}; + +/** + * nss_wifi_vdev_ppdu_metadata + * PPDU metadata. + */ +struct nss_wifi_vdev_ppdu_metadata { + uint32_t dir; /**< Data direction for metadata. */ + uint32_t ppdu_id; /**< PPDU ID. */ + uint16_t peer_id; /**< Peer ID. */ + uint8_t first_msdu; /**< First MSDU. */ + uint8_t last_msdu; /**< Last MSDU. */ +}; + +/** + * nss_wifi_vdev_per_packet_metadata + * Wi-Fi per packet metadata content. + */ +struct nss_wifi_vdev_per_packet_metadata { + uint32_t pkt_type; /**< Type of packet. */ + + /** + * Metadata payload for special data receive messages. + */ + union { + struct nss_wifi_vdev_igmp_per_packet_metadata igmp_metadata; + /**< Per packet metadata structure for IGMP. */ + struct nss_wifi_vdev_mesh_per_packet_metadata mesh_metadata; + /**< Per packet metadata structure for mesh mode. */ + struct nss_wifi_vdev_txinfo_per_packet_metadata txinfo_metadata; + /**< Per packet metadata structure for Tx information. */ + struct nss_wifi_vdev_mpsta_per_packet_tx_metadata mpsta_tx_metadata; + /**< Per packet Tx metadata structure for master-proxy station. */ + struct nss_wifi_vdev_mpsta_per_packet_rx_metadata mpsta_rx_metadata; + /**< Per packet Rx metadata structure for master-proxy station. */ + struct nss_wifi_vdev_rx_err_per_packet_metadata rx_err_metadata; + /**< Per packet metadata structure for Rx error. */ + struct nss_wifi_vdev_tx_compl_metadata tx_compl_metadata; + /**< Per packet Tx metadata structure for Tx completion. */ + struct nss_wifi_vdev_wds_per_packet_metadata wds_metadata; + /**< Per packet Tx metadata structure for wireless distribution system mode. */ + struct nss_wifi_vdev_ppdu_metadata ppdu_metadata; + /**< Per packet PPDU metadata needed for per PPDU copy mode. */ + } metadata; + /**< Metadata payload for special data receive message. */ +}; + +/** + * nss_wifi_vdev_meshmode_rx_metadata + * Metadata payload for Mesh mode receive. + */ +struct nss_wifi_vdev_meshmode_rx_metadata { + uint16_t rs_ratephy_lo; /**< PHY rate lower order bytes. */ + uint16_t rs_ratephy_hi; /**< PHY rate higher order bytes. */ + uint16_t cntr_chan_freq; /** Center channel frequency. */ + uint16_t vdev_id; /**< Virtual device ID. */ + uint16_t peer_id; /**< Peer ID. */ + uint16_t rs_rssi; /**< Received signal strength indication (noise floor adjusted). */ + uint8_t rs_flags; /**< First/last MSDU flags. */ + uint8_t rs_channel; /**< Operational channel. */ + uint8_t rs_keyix; /**< Key index. */ + uint8_t padd; /**< Padding to ensure alignment. */ +}; + +/** + * nss_wifi_vdev_rawmode_rx_metadata + * Metadata payload for Raw Mode receive. + */ +struct nss_wifi_vdev_rawmode_rx_metadata { + uint16_t vdev_id; /**< Virtual device ID. */ + uint16_t peer_id; /**< Peer ID. */ +}; + +/** + * nss_wifi_vdev_updchdr_msg + * Information for updating a cache header. + */ +struct nss_wifi_vdev_updchdr_msg { + uint32_t hdrcache[NSS_WIFI_HTT_TRANSFER_HDRSIZE_WORD]; + /**< Updated header cache. */ + uint32_t vdev_id; /**< Virtual device ID. */ +}; + +/** + * nss_wifi_vdev_me_host_sync_grp_entry + * Multicast enhancement host synchronization group table. + */ +struct nss_wifi_vdev_me_host_sync_grp_entry { + uint8_t group_addr[ETH_ALEN]; /**< Group address for this list. */ + uint8_t grp_member_addr[ETH_ALEN]; /**< MAC address of the multicast group member. */ + + /** + * Type of group addresses. + */ + union { + uint32_t grpaddr_ip4; + /**< IPv4 group address. */ + uint8_t grpaddr_ip6[NSS_WIFI_VDEV_IPV6_ADDR_LENGTH]; + /**< IPv6 group address. */ + } u; /**< Type of group addresses. */ + + uint32_t src_ip_addr; + /**< Source IP address. */ +}; + +/** + * wifi_vdev_me_host_sync_msg + * Synchronization message for a multicast enhancement host group. + */ +struct nss_wifi_vdev_me_host_sync_msg { + uint16_t vdev_id; /**< Virtual device ID. */ + uint8_t nentries; /**< Number of group entries carried by this message. */ + uint8_t radio_ifnum; /**< Interface number of the Wi-Fi radio. */ + struct nss_wifi_vdev_me_host_sync_grp_entry grp_entry[NSS_WIFI_VDEV_MAX_ME_ENTRIES]; + /**< Array for multicast group entries. */ +}; + +/** + * nss_wifi_vdev_mcast_enhance_stats + * Multicast enhancement-related statistics. + */ +struct nss_wifi_vdev_mcast_enhance_stats { + + /** + * Number of multicast packets recieved for multicast enhancement conversion. + */ + uint32_t mcast_rcvd; + + /** + * Number of unicast packets sent as part of multicast enhancement conversion. + */ + uint32_t mcast_ucast_converted; + + /** + * Number of multicast enhancement frames dropped because of a + * buffer allocation failure. + */ + uint32_t mcast_alloc_fail; + + /** + * Number of multicast enhancement frames dropped because of a + * buffer enqueue failure. + */ + uint32_t mcast_pbuf_enq_fail; + + /** + * Number of multicast enhancement frames dropped because of a + * buffer copy failure. + */ + uint32_t mcast_pbuf_copy_fail; + + /** + * Number of multicast enhancement frames dropped because of a + * failure in sending flow control to a peer. + */ + uint32_t mcast_peer_flow_ctrl_send_fail; + + /** + * Number of multicast enhancement buffer frames dropped when + * destination MAC is the same as source MAC. + */ + uint32_t mcast_loopback_err; + + /** + * Number of multicast enhancement buffer frames dropped + * because of an empty destination MAC. + */ + uint32_t mcast_dst_address_err; + + /** + * Number of multicast enhancement buffer frames dropped + * because no member is listening on the group. + */ + uint32_t mcast_no_enhance_drop_cnt; + + /** + * Number of multicast bytes received for multicast enhancement. + */ + uint32_t mcast_rcvd_bytes; + + /** + * Number of IGMP packets received for conversion to unicast. + */ + uint32_t igmp_rcvd; + + /** + * Number of IGMP packets converted to unicast as a part of + * VoW IGMP improvements. + */ + uint32_t igmp_ucast_converted; +}; + +/** + * nss_wifi_vdev_stats_sync_msg + * Message to get virtual device statistics from NSS Firmware to Host. + */ +struct nss_wifi_vdev_stats_sync_msg { + uint32_t dropped; /**< Number of dropped packets. */ + uint32_t tx_enqueue_cnt; /**< Transmit pnode enqueue count. */ + uint32_t tx_enqueue_fail_cnt; /**< Transmit pnode enqueue count. */ + uint32_t tx_intra_bss_enqueue_cnt; /**< Intra BSS enqueue count. */ + uint32_t tx_intra_bss_enqueue_fail_cnt; + /**< Intra BSS enqueue fail count. */ + uint32_t tx_intra_bss_mcast_send_cnt; + /**< Virual device multicast/broadcast packet count in AP mode. */ + uint32_t tx_intra_bss_mcast_send_fail_cnt; + /**< Virtual device multicast/broadcast packet count in AP mode. */ + uint32_t tx_enqueue_bytes; /**< Transmit enqueue bytes count. */ + uint32_t rx_enqueue_cnt; /**< Ethernet node enqueue count. */ + uint32_t rx_enqueue_fail_cnt; /**< Ethernet node enqueue fail count. */ + uint32_t rx_except_enqueue_cnt; /**< N2H (NSS to Host) node enqueue count. */ + uint32_t rx_except_enqueue_fail_cnt; /**< N2H (NSS to Host) node enqueue fail count. */ + uint32_t rx_enqueue_bytes; /**< Receive enqueue bytes count. */ + uint32_t rx_wds_learn_send_cnt; /**< Virtual device WDS source port learn count. */ + uint32_t rx_wds_learn_send_fail_cnt; /**< Virtual device WDS source count fail. */ + struct nss_wifi_vdev_mcast_enhance_stats wvmes; + /**< Multicast enhancement statistics. */ + uint32_t num_tx_exception; /**< Number of Tx exception to firmware. */ + uint32_t tx_dma_map_fail; /**< DMA map failure. */ + uint32_t tx_desc_alloc_fail; /**< Descriptor allocation failure. */ + uint32_t tx_hw_ring_full; /**< Hardware ring is full. */ + uint32_t tx_tso_pkt; /**< Number of TSO packets. */ + uint32_t tx_num_seg; /**< Number of segments in TSO packets. */ + uint32_t tx_rcvd; /**< Number of packets received from host. */ + uint32_t tx_rcvd_bytes; /**< Number of bytes received from host. */ + uint32_t cce_classified; + /**< Number of packets that are classified and sent to firmware as an exception. */ + uint32_t cce_classified_raw; + /**< Number of raw packets that are classified and sent to firmware as an exception. */ + uint32_t tx_eapol_cnt; /**< Number of EAPoL frames in transmit direction. */ + uint32_t nawds_tx_mcast_cnt; /**< Number of NAWDS packets sent. */ + uint32_t nawds_tx_mcast_bytes; /**< Number of NAWDS bytes sent. */ + uint32_t per_pkt_vdev_check_fail; /**< Number of packets that failed vdev id check in Tx. */ + uint32_t rx_mcast_cnt; /**< Receive multicast packet count. */ + uint32_t rx_mcast_bytes; /**< Receive multicast bytes count. */ + uint32_t rx_decrypt_err; /**< Receive decryption error */ + uint32_t rx_mic_err; /**< Receive MIC error */ + uint32_t mcbc_exc_host_fail_cnt; + /**< Number of multicast/broadcast packets failed to send to host through exception path. */ +}; + +/** + * nss_wifi_vdev_msg + * Data for sending and receiving virtual device specific messages. + */ +struct nss_wifi_vdev_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a virtual device specific message. + */ + union { + struct nss_wifi_vdev_config_msg vdev_config; + /**< Virtual device configuration. */ + struct nss_wifi_vdev_enable_msg vdev_enable; + /**< Enable a message for a virtual device. */ + struct nss_wifi_vdev_cmd_msg vdev_cmd; + /**< Command message for a virtual device. */ + struct nss_wifi_vdev_me_snptbl_grp_create_msg vdev_grp_list_create; + /**< Creates the snooptable group of a virtual device. */ + struct nss_wifi_vdev_me_snptbl_grp_delete_msg vdev_grp_list_delete; + /**< Deletes a snooplist group list. */ + struct nss_wifi_vdev_me_snptbl_grp_mbr_add_msg vdev_grp_member_add; + /**< Adds a snooplist group member. */ + struct nss_wifi_vdev_me_snptbl_grp_mbr_delete_msg vdev_grp_member_remove; + /**< Removes a snooplist group member. */ + struct nss_wifi_vdev_me_snptbl_grp_mbr_update_msg vdev_grp_member_update; + /**< Updates a snooplist group member. */ + struct nss_wifi_vdev_me_snptbl_deny_grp_add_msg vdev_deny_member_add; + /**< Add a snooplist member to the deny list. */ + struct nss_wifi_vdev_me_hmmc_add_msg vdev_hmmc_member_add; + /**< Adds a new member into the HMMC list. */ + struct nss_wifi_vdev_me_hmmc_del_msg vdev_hmmc_member_del; + /**< Delete a member from the HMMC list. */ + struct nss_wifi_vdev_me_deny_ip_add_msg vdev_deny_list_member_add; + /**< Adds a new member into the deny list. */ + struct nss_wifi_vdev_me_deny_ip_del_msg vdev_deny_list_member_del; + /**< Delete a member from the deny list. */ + struct nss_wifi_vdev_txmsg vdev_txmsgext; + /**< Transmits special data. */ + struct nss_wifi_vdev_vow_dbg_cfg_msg vdev_vow_dbg_cfg; + /**< Configures VoW debug statistics. */ + struct nss_wifi_vdev_vow_dbg_stats vdev_vow_dbg_stats; + /**< Types of VoW debug statistics. */ + struct nss_wifi_vdev_dscp_tid_map vdev_dscp_tid_map; + /**< DSCP-to-TID mapping. */ + struct nss_wifi_vdev_updchdr_msg vdev_updchdr; + /**< Updates a cache header. */ + struct nss_wifi_vdev_me_host_sync_msg vdev_me_sync; + /**< Message for a multicast enhancement host group table synchronization. */ + struct nss_wifi_vdev_stats_sync_msg vdev_stats; + /**< Message to get virtual device statistics from NSS firmware to host. */ + struct nss_wifi_vdev_set_next_hop_msg next_hop; + /**< Next hop message for virtual device. */ + struct nss_wifi_vdev_dscptid_map_id vdev_dscp_tid_map_id; + /**< Message to get DSCP-to-TID mapping id to be used on virtual device. */ + struct nss_wifi_vdev_extap_map vdev_extap_map; + /**< Message to add entry in EXTAP table on virtual device. */ + struct nss_wifi_vdev_qwrap_psta_msg vdev_qwrap_psta_map; + /**< Message to get PSTA VAP details in QWRAP mode. */ + struct nss_wifi_vdev_qwrap_isolation_en_msg vdev_qwrap_isolation_en; + /**< Message to enable QWRAP isolation mode. */ + struct nss_wifi_vdev_set_peer_next_hop_msg vdev_set_peer_next_hp; + /**< Message to set next hop per peer. */ + struct nss_wifi_vdev_vlan_config_msg vdev_vlan_config; + /**< Message to set VLAN configured on a particular virtual device. */ + struct nss_wifi_vdev_vlan_enable_msg vdev_vlan_enable; + /**< Message to enable VLAN tagging support on a particular virtual device. */ + struct nss_wifi_vdev_set_vlan_group_key vlan_group_key; + /**< Message to set group key for peer. */ + } msg; /**< Virtual device message payload. */ +}; + +/** + * nss_wifi_vdev_tx_msg + * Sends a Wi-Fi message to the NSS interface. + * + * @datatypes + * nss_ctx_instance \n + * nss_wifi_vdev_msg + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_wifi_vdev_tx_msg(struct nss_ctx_instance *nss_ctx, + struct nss_wifi_vdev_msg *msg); + +/** + * nss_wifi_vdev_base_tx_msg + * Sends a Wi-Fi message to the NSS VAP interface. + * + * @datatypes + * nss_ctx_instance \n + * nss_wifi_vdev_msg + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_wifi_vdev_base_tx_msg(struct nss_ctx_instance *nss_ctx, + struct nss_wifi_vdev_msg *msg); + +/** + * nss_wifi_vdev_tx_buf + * Sends a Wi-Fi data packet to the NSS interface. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in] os_buf Pointer to the OS data buffer. + * @param[in] if_num NSS interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_wifi_vdev_tx_buf(struct nss_ctx_instance *nss_ctx, + struct sk_buff *os_buf, uint32_t if_num); + +/** + * Callback function for receiving Wi-Fi virtual device messages. + * + * @datatypes + * nss_cmn_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_wifi_vdev_msg_callback_t)(void *app_data, + struct nss_cmn_msg *msg); + +/** + * Callback function for receiving Wi-Fi virtual device data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + */ +typedef void (*nss_wifi_vdev_callback_t)(struct net_device *netdev, + struct sk_buff *skb, struct napi_struct *napi); + +/** + * Callback function for receiving extended data plane Wi-Fi virtual device data. + * + * @datatypes + * net_device \n + * sk_buff \n + * napi_struct + * + * @param[in] netdev Pointer to the associated network device. + * @param[in] skb Pointer to the data socket buffer. + * @param[in] napi Pointer to the NAPI structure. + * @param[in] netdev Pointer to the associated network device. + */ +typedef void (*nss_wifi_vdev_ext_data_callback_t)(struct net_device *netdev, + struct sk_buff *skb, struct napi_struct *napi); + +/** + * nss_wifi_vdev_msg_init + * Initializes a Wi-Fi virtual device message. + * + * @datatypes + * nss_wifi_vdev_msg \n + * nss_wifi_vdev_msg_callback_t + * + * @param[in] nim Pointer to the NSS interface message. + * @param[in] if_num NSS interface number. + * @param[in] type Type of message. + * @param[in] len Length of message. + * @param[in] cb Message callback. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +void nss_wifi_vdev_msg_init(struct nss_wifi_vdev_msg *nim, uint32_t if_num, uint32_t type, uint32_t len, + nss_wifi_vdev_msg_callback_t *cb, void *app_data); + +/** + * nss_register_wifi_vdev_if + * Registers a Wi-Fi virtual device interface with the NSS interface. + * + * @datatypes + * nss_ctx_instance \n + * nss_wifi_vdev_callback_t \n + * nss_wifi_vdev_ext_data_callback_t \n + * nss_wifi_vdev_msg_callback_t \n + * net_device + * + * @param[in,out] nss_ctx Pointer to the NSS core context. + * @param[in] if_num NSS interface number. + * @param[in] wifi_data_callback Callback for the Wi-Fi virtual device data. + * @param[in] vdev_ext_data_callback Callback for the extended data. + * @param[in] wifi_event_callback Callback for the message. + * @param[in] netdev Pointer to the associated network device. + * @param[in] features Data socket buffer types supported by this + * interface. + * + * @return + * None. + */ +uint32_t nss_register_wifi_vdev_if(struct nss_ctx_instance *nss_ctx, int32_t if_num, nss_wifi_vdev_callback_t wifi_data_callback, + nss_wifi_vdev_ext_data_callback_t vdev_ext_data_callback, nss_wifi_vdev_msg_callback_t wifi_event_callback, + struct net_device *netdev, uint32_t features); + +/** + * nss_unregister_wifi_vdev_if + * Deregisters a Wi-Fi virtual device interface from the NSS interface. + * + * @param[in] if_num NSS interface number. + * + * @return + * None. + */ +void nss_unregister_wifi_vdev_if(uint32_t if_num); + +/** + * nss_wifi_vdev_tx_msg_ext + * Sends Wi-Fi data packet along with metadata as message to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * sk_buff + * + * @param[in,out] nss_ctx Pointer to the NSS core context. + * @param[in] os_buf Pointer to the OS data buffer. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_wifi_vdev_tx_msg_ext(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf); + +/** + * nss_wifi_vdev_set_next_hop + * Send next hop message to Wi-Fi virtual device. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in] if_num NSS interface number. + * @param[in] next_hop Next hop interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_wifi_vdev_set_next_hop(struct nss_ctx_instance *nss_ctx, int if_num, int next_hop); + +/** + * nss_wifi_vdev_base_set_next_hop + * Sends the next hop message to Wi-Fi virtual access point. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in] next_hop Next hop interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_wifi_vdev_base_set_next_hop(struct nss_ctx_instance *nss_ctx, int next_hop); + +/** + * nss_wifi_vdev_set_peer_next_hop + * Sends the peer next hop message to Wi-Fi virtual device. + * + * @datatypes + * nss_ctx_instance + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in] nss_if NSS interface number. + * @param[in] addr Peer MAC address. + * @param[in] next_hop_if Next hop interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_wifi_vdev_set_peer_next_hop(struct nss_ctx_instance *nss_ctx, uint32_t nss_if, uint8_t *addr, uint32_t next_hop_if); + +/* + * nss_wifi_vdev_set_dp_type + * Sets the datapath type for virtual device. + * + * @datatypes + * nss_ctx_instance \n + * net_device \n + * uint32_t \n + * enum nss_wifi_vdev_dp_type + * + * @param[in] nss_ctx Pointer to the NSS core context. + * @param[in] netdev Pointer to the associated network device. + * @param[in] if_num Interface number of the VAP. + * @param[in] dp_type Datapath type of the VAP. + * + * @return + * True if a success, or false if a failure. + */ +bool nss_wifi_vdev_set_dp_type(struct nss_ctx_instance *nss_ctx, struct net_device *netdev, + uint32_t if_num, enum nss_wifi_vdev_dp_type dp_type); +/** + * @} + */ + +#endif /* __NSS_WIFI_VDEV_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifili_if.h b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifili_if.h new file mode 100644 index 000000000..7d5954267 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/exports/nss_wifili_if.h @@ -0,0 +1,2057 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + + /** + * @file nss_wifili_if.h + * NSS TO HLOS interface definitions. + * NOTE: Here we will use wifili as a reference to + * the IPQ807x Wi-Fi object. + */ +#ifndef __NSS_WIFILI_H +#define __NSS_WIFILI_H + + /** + * @addtogroup nss_wifili_subsystem + * @{ + */ + +#define NSS_WIFILI_MAX_SRNG_REG_GROUPS_MSG 2 + /**< Maximum srng (ring) register groups. */ +#define NSS_WIFILI_MAX_NUMBER_OF_PAGE_MSG 32 + /**< Maximum number of pages allocated from host. */ +#define NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG 4 + /**< Maximum number of Transmit Classifier data ring for NSS. */ +#define NSS_WIFILI_MAX_REO_DATA_RINGS_MSG 4 + /**< Maximum number of Rx reorder data ring for NSS. */ +#define NSS_WIFILI_SOC_PER_PACKET_METADATA_OFFSET 4 + /**< Metadata area for storing Rx statistics. */ +#define NSS_WIFILI_MAX_TXDESC_POOLS_MSG 4 + /**< Maximum number of Tx Descriptor software pools. */ +#define NSS_WIFILI_MAX_TX_EXT_DESC_POOLS_MSG 4 + /**< Maximum number of Tx Descriptor Extended software pools. */ +#define NSS_WIFILI_MAX_SOC_NUM 3 + /**< Maximum number of SoC devices. */ +#define NSS_WIFILI_MAX_PDEV_NUM_MSG 3 + /**< Maximum number of pdev devices. */ +#define NSS_WIFILI_MAX_MCS 12 + /**< Maximum Modulaton And Coding Scheme (MCS) count. */ +#define NSS_WIFILI_MAX_MCS_11A 8 + /**< Maximum MCS for 11a mode. */ +#define NSS_WIFILI_MAX_MCS_11B 7 + /**< Maximum MCS for 11b mode. */ +#define NSS_WIFILI_MAX_MCS_11AC 10 + /**< Maximum MCS for 11ac mode. */ +#define NSS_WIFILI_MAX_MCS_11AX 10 + /**< Maximum MCS for 11ax mode. */ +#define NSS_WIFILI_SS_COUNT 8 + /**< Maximum spatial streams count. */ +#define NSS_WIFILI_SUPPORTED_BW 4 + /**< Maximum number of bandwidth supported. */ +#define NSS_WIFILI_REPT_MU_MIMO 1 +#define NSS_WIFILI_REPT_MU_OFDMA_MIMO 3 +#define NSS_WIFILI_MAX_RESERVED_TYPE 2 + /**< Maximum reserved type. */ +#define NSS_WIFILI_SOC_PER_PACKET_METADATA_SIZE 60 + /**< Metadata area total size. */ +#define NSS_WIFILI_MEC_PEER_ID 0xDEAD + /**< MEC (Multicast echo check) peer ID. */ +#define NSS_WIFILI_DA_PEER_ID 0xDAAD + /**< Destination address peer ID. */ +#define NSS_WIFILI_MIC_KEY_LEN 8 + /**< MIC (Message integrity code) key length. */ +#define NSS_WIFILI_TQM_RR_MAX 7 + /**< Maximum transmit queue release reasons. */ +#define NSS_WIFILI_HTT_STATUS_MAX 7 + /**< Maximum HTT completion status. */ +#define NSS_WIFILI_TQM_STATUS_MAX 9 + /**< Maximum TQM completion status. */ +#define NSS_WIFILI_REO_CODE_MAX 15 + /**< Maximum Rx reorder error codes. */ +#define NSS_WIFILI_DMA_CODE_MAX 14 + /**< Maximum DMA error codes. */ +#define NSS_WIFILI_MAX_TID 8 + /**< Maximum TID values. */ +#define NSS_WIFILI_DELAY_INDEX_MAX 10 + /**< Maximum software enqueue delay buckets. */ +#define NSS_WIFILI_MAX_NUMBER_OF_ADDTNL_SEG 64 + /**< Maximum number of additional pages allocated from host. */ +#define NSS_WIFILI_SOC_ATTACHED_MAX_PDEV_NUM 1 + /**< Maximum number of physical devices on the external SoC. */ +#define NSS_WIFILI_PEER_AST_FLOWQ_MAX 4 + /**< Maximum number of flow queues. */ +#define NSS_WIFILI_WBM_INTERNAL_ERR_MAX 5 + /**< WBM internal maximum errors. */ + +/* + * Peer Size in Bytes + */ +#define NSS_WIFILI_PEER_SIZE 1600 + +/* + * Radio specific flags + */ +#define NSS_WIFILI_PDEV_FLAG_V3_STATS_ENABLED 0x00000008 + /**< Flag to enable version 3 statistics. */ +/** + * Peer message flags. + */ +#define NSS_WIFILI_PEER_MSG_DISABLE_4ADDR 0x01 + +#ifdef __KERNEL__ /* only kernel will use. */ + +/** + * Wireless Multimedia Extention Access Category to TID. @hideinitializer + */ +#define NSS_WIFILI_WME_AC_TO_TID(_ac) ( \ + ((_ac) == NSS_WIFILI_WME_AC_VO) ? 6 : \ + (((_ac) == NSS_WIFILI_WME_AC_VI) ? 5 : \ + (((_ac) == NSS_WIFILI_WME_AC_BK) ? 1 : \ + 0))) + +/** + * Wireless TID to Wireless Extension Multimedia Access Category. @hideinitializer + */ +#define NSS_WIFILI_TID_TO_WME_AC(_tid) ( \ + (((_tid) == 0) || ((_tid) == 3)) ? NSS_WIFILI_WME_AC_BE : \ + ((((_tid) == 1) || ((_tid) == 2)) ? NSS_WIFILI_WME_AC_BK : \ + ((((_tid) == 4) || ((_tid) == 5)) ? NSS_WIFILI_WME_AC_VI : \ + NSS_WIFILI_WME_AC_VO))) +#endif /* __KERNEL */ + +/** + * nss_wifili_thread_scheme_id + * List of thread scheme IDs. + */ +enum nss_wifili_thread_scheme_id { + NSS_WIFILI_THREAD_SCHEME_ID_0, /**< High priority scheme index. */ + NSS_WIFILI_THREAD_SCHEME_ID_1, /**< Low priority scheme index. */ + NSS_WIFILI_THREAD_SCHEME_ID_2, /**< High priority scheme index. */ + NSS_WIFILI_THREAD_SCHEME_ID_MAX /**< Maximum value of scheme index. */ +}; + +/* + * nss_wifili_thread_scheme_priority + * List of wifili thread scheme priority. + */ +enum nss_wifili_thread_scheme_priority { + NSS_WIFILI_LOW_PRIORITY_SCHEME, /**< Low priority scheme. */ + NSS_WIFILI_HIGH_PRIORITY_SCHEME, /**< High priority scheme. */ +}; + +/** + * nss_wifili_wme_stream_classes + * WME stream classes. + */ +enum nss_wifili_wme_stream_classes { + NSS_WIFILI_WME_AC_BE, /**< Best effort. */ + NSS_WIFILI_WME_AC_BK, /**< Background. */ + NSS_WIFILI_WME_AC_VI, /**< Video. */ + NSS_WIFILI_WME_AC_VO, /**< Voice. */ + NSS_WIFILI_WME_AC_MAX /**< Maximum AC Value. */ +}; + +/** + * nss_wifili_packet_type + * Different Packet Types. + */ +enum nss_wifili_packet_type { + NSS_WIFILI_DOT11_A, /**< 802.11a packet type. */ + NSS_WIFILI_DOT11_B, /**< 802.11b packet type. */ + NSS_WIFILI_DOT11_N, /**< 802.11n packet type. */ + NSS_WIFILI_DOT11_AC, /**< 802.11ac packet type. */ + NSS_WIFILI_DOT11_AX , /**< 802.11ax packet type. */ + NSS_WIFILI_DOT11_MAX /**< Maximum 802.11 packet types. */ +}; + +/* + * nss_wifili_decap_pkt_type + * Different Decapsulation packet types + */ +enum wifili_decap_pkt_type { + NSS_WIFILI_DECAP_TYPE_RAW, /**< Raw packet type. */ + NSS_WIFILI_DECAP_TYPE_NATIVE_WIFI, /**< Native Wi-Fi packet type. */ + NSS_WIFILI_DECAP_TYPE_ETHERNET, /**< Ethernet packet type. */ + NSS_WIFILI_DECAP_TYPE_MAX, /**< Maximum packet type. */ +}; + +/** + * nss_wifili_msg_types + * NSS wifili messages. + */ +enum nss_wifili_msg_types { + NSS_WIFILI_INIT_MSG, + NSS_WIFILI_SOC_RESET_MSG, + NSS_WIFILI_PDEV_INIT_MSG, + NSS_WIFILI_PDEV_DEINIT_MSG, + NSS_WIFILI_START_MSG, + NSS_WIFILI_STOP_MSG, + NSS_WIFILI_PEER_CREATE_MSG, + NSS_WIFILI_PEER_DELETE_MSG, + NSS_WIFILI_SEND_PEER_MEMORY_REQUEST_MSG, + NSS_WIFILI_PEER_FREELIST_APPEND_MSG, + NSS_WIFILI_STATS_MSG, + NSS_WIFILI_WDS_VENDOR_MSG, + NSS_WIFILI_PEER_STATS_MSG, + NSS_WIFILI_WDS_PEER_ADD_MSG, + NSS_WIFILI_WDS_PEER_DEL_MSG, + NSS_WIFILI_WDS_PEER_MAP_MSG, + NSS_WIFILI_WDS_ACTIVE_INFO_MSG, + NSS_WIFILI_STATS_CFG_MSG, + NSS_WIFILI_TID_REOQ_SETUP_MSG, + NSS_WIFILI_RADIO_CMD_MSG, + NSS_WIFILI_LINK_DESC_INFO_MSG, + NSS_WIFILI_PEER_SECURITY_TYPE_MSG, + NSS_WIFILI_PEER_NAWDS_ENABLE_MSG, + NSS_WIFILI_RADIO_BUF_CFG, + NSS_WIFILI_DBDC_REPEATER_SET_MSG, + NSS_DBDC_REPEATER_AST_FLUSH_MSG, + NSS_WIFILI_SET_HMMC_DSCP_OVERRIDE_MSG, + NSS_WIFILI_SET_HMMC_DSCP_TID_MSG, + NSS_WIFILI_PDEV_STATS_V3_TXRX_SYNC_MSG, + NSS_WIFILI_PDEV_STATS_V3_DELAY_SYNC_MSG, + NSS_WIFILI_ENABLE_V3_STATS_MSG, + NSS_WIFILI_WDS_PEER_UPDATE_MSG, + NSS_WIFILI_STATS_V2_CFG_MSG, + NSS_WIFILI_SOJOURN_STATS_MSG, + NSS_WIFILI_PEER_SET_VLAN_ID, + NSS_WIFILI_UPDATE_PDEV_LMAC_ID_MSG, + NSS_WIFILI_PEER_AST_FLOWID_MAP_MSG, + NSS_WIFILI_PEER_MEC_AGEOUT_MSG, + NSS_WIFILI_JITTER_STATS_MSG, + NSS_WIFILI_ISOLATION_MSG, + NSS_WIFILI_PEER_EXT_STATS_MSG, + NSS_WIFILI_CLR_STATS, + NSS_WIFILI_PEER_4ADDR_EVENT_MSG, + NSS_WIFILI_DBDC_REPEATER_LOOP_DETECTION_MSG, + NSS_WIFILI_PEER_UPDATE_AUTH_FLAG, + NSS_WIFILI_SEND_MESH_CAPABILITY_INFO, + NSS_WIFILI_MAX_MSG +}; + +/** + * nss_wifili_error_types + * Wifili error message types for functions. + */ +enum nss_wifili_error_types { + NSS_WIFILI_EMSG_NONE, + /**< No error. */ + NSS_WIFILI_EMSG_INIT_FAIL_IMPROPER_STATE, + /**< Device initialization failure due to improper state of device. */ + NSS_WIFILI_EMSG_RINGS_INIT_FAIL, + /**< Device ring initialization failure. */ + NSS_WIFILI_EMSG_PDEV_INIT_IMPROPER_STATE_FAIL, + /**< Radio initialization failure due to improper state of device. */ + NSS_WIFILI_EMSG_PDEV_INIT_INVALID_RADIOID_FAIL, + /**< Radio initialization failed due to invalid radio ID. */ + WIFILI_EMSG_PDEV_INIT_INVALID_TARGETPDEVID_FAIL, + /**< Radio initialization failed due to invalid target physical device ID. */ + NSS_WIFILI_EMSG_PDEV_TX_IRQ_ALLOC_FAIL, + /**< IRQ line allocation for radio transmission failed. */ + NSS_WIFILI_EMSG_PDEV_RESET_INVALID_RADIOID_FAIL, + /**< Radio reset failed due to invalid radio ID. */ + NSS_WIFILI_EMSG_PDEV_RESET_PDEV_NULL_FAIL, + /**< Radio reset failed due to NULL physical device. */ + NSS_WIFILI_EMSG_PDEV_RESET_IMPROPER_STATE_FAIL, + /**< Radio reset failed due to improper state of pdev. */ + NSS_WIFILI_EMSG_START_IMPROPER_STATE_FAIL, + /**< Device start fail due to improper state */ + NSS_WIFILI_EMSG_PEER_CREATE_FAIL, + /**< Peer creation failed. */ + NSS_WIFILI_EMSG_PEER_DELETE_FAIL, + /**< Peer deletion failed. */ + NSS_WIFILI_EMSG_HASHMEM_INIT_FAIL, + /**< Peer hash memory allocation failed. */ + NSS_WIFILI_EMSG_PEER_FREELIST_APPEND_FAIL, + /**< Appending peer to freelist failed. */ + NSS_WIFILI_EMSG_PEER_CREATE_INVALID_VDEVID_FAIL, + /**< Peer creation failure due to invalid virtual device ID. */ + NSS_WIFILI_EMSG_PEER_CREATE_INVALID_PEER_ID_FAIL, + /**< Peer creation failure due to invalid peer ID. */ + NSS_WIFILI_EMSG_PEER_CREATE_VDEV_NULL_FAIL, + /**< Peer creation failure due to NULL virtual device. */ + NSS_WIFILI_EMSG_PEER_CREATE_PDEV_NULL_FAIL, + /**< Peer creation failure due to NULL physical device. */ + NSS_WIFILI_EMSG_PEER_CREATE_ALLOC_FAIL, + /**< Peer creation failure due to memory allocation failure. */ + NSS_WIFILI_EMSG_PEER_DELETE_VAPID_INVALID_FAIL, + /**< Peer deletion failure due to invalid virtual device ID. */ + NSS_WIFILI_EMSG_PEER_DELETE_INVALID_PEERID_FAIL, + /**< Peer deletion failed due to invalid peer ID. */ + NSS_WIFILI_EMSG_PEER_DELETE_VDEV_NULL_FAIL, + /**< Peer deletion failure due to NULL virtual device. */ + NSS_WIFILI_EMSG_PEER_DELETE_PDEV_NULL_FAIL, + /**< Peer deletion failure due to NULL physical device. */ + NSS_WIFILI_EMSG_PEER_DELETE_PEER_NULL_FAIL, + /**< Peer deletion failure due to NULL peer. */ + NSS_WIFILI_EMSG_PEER_DELETE_PEER_CORRUPTED_FAIL, + /**< Peer creation failure due to corrupted peer. */ + NSS_WIFILI_EMSG_PEER_DUPLICATE_AST_INDEX_PEER_ID_FAIL, + /**< AST index provided is duplicate. */ + NSS_WIFILI_EMSG_GROUP0_TIMER_ALLOC_FAIL, + /**< Timer allocation failure. */ + NSS_WIFILI_EMSG_INSUFFICIENT_WT_FAIL, + /**< Insufficient worker thread error. */ + NSS_WIFILI_EMSG_INVALID_NUM_TCL_RING_FAIL, + /**< Invalid number of Transmit Classifier rings provided in initialization message. */ + NSS_WIFILI_EMSG_INVALID_NUM_REO_DST_RING_FAIL, + /**< Invalid number of Rx reorder destination ring in initialization message. */ + NSS_WIFILI_EMSG_HAL_SRNG_SOC_ALLOC_FAIL, + /**< Srng SoC memory allocation failure. */ + NSS_WIFILI_EMSG_HAL_SRNG_INVALID_RING_INFO_FAIL, + /**< Device ring information is invalid. */ + NSS_WIFILI_EMSG_HAL_SRNG_TCL_ALLOC_FAIL, + /**< Transmit Classifier srng ring allocation failure. */ + NSS_WIFILI_EMSG_HAL_SRNG_TXCOMP_ALLOC_FAIL, + /**< Txcomp srng ring allocation failure. */ + NSS_WIFILI_EMSG_HAL_SRNG_REODST_ALLOC_FAIL, + /**< Rx reorder destination srng ring allocation failure. */ + NSS_WIFILI_EMSG_HAL_SRNG_REOREINJECT_ALLOC_FAIL, + /**< Rx reorder reinject srng ring allocation failure. */ + NSS_WIFILI_EMSG_HAL_SRNG_RXRELEASE_ALLOC_FAIL, + /**< Rx release srng ring allocation failure. */ + NSS_WIFILI_EMSG_HAL_SRNG_RXEXCP_ALLOC_FAIL, + /**< Rx exception srng ring allocation failure. */ + NSS_WIFILI_EMSG_HAL_TX_MEMALLOC_FAIL, + /**< Tx HAL (hardware abstraction layer) srng ring allocation failure. */ + NSS_WIFILI_EMSG_HAL_TX_INVLID_POOL_NUM_FAIL, + /**< Invalid pool number in initialization message. */ + NSS_WIFILI_EMSG_HAL_TX_INVALID_PAGE_NUM_FAIL, + /**< Invalid page numner in initialization message. */ + NSS_WIFILI_EMSG_HAL_TX_DESC_MEM_ALLOC_FAIL, + /**< Tx descriptor memory allocation failure. */ + NSS_WIFILI_EMSG_HAL_RX_MEMALLOC_FAIL, + /**< Rx memory allocation failure. */ + NSS_WIFILI_EMSG_PDEV_RXDMA_RING_ALLOC_FAIL, + /**< Rx DMA ring allocation failed. */ + NSS_WIFILI_EMSG_NAWDSEN_PEERID_INVALID, + /**< Peer NAWDS enable failure due to invalid peer ID. */ + NSS_WIFILI_EMSG_NAWDSEN_PEER_NULL, + /**< Peer NAWDS enable failure due to peer being NULL. */ + NSS_WIFILI_EMSG_NAWDSEN_PEER_CORRUPTED, + /**< Peer NAWDS enable failure due to corrupted peer. */ + NSS_WIFILI_EMSG_WDS_PEER_CFG_FAIL, + /**< WDS peer configuration failure. */ + NSS_WIFILI_EMSG_RESET_NO_STOP, + /**< Reset issued without stopping the device. */ + NSS_WIFILI_EMSG_HAL_SRNG_INVALID_RING_BASE_FAIL, + /**< Ring base address is invalid. */ + NSS_WIFILI_EMSG_PDEV_RX_INIT_FAIL, + /**< Pdev Rx initialization failure. */ + NSS_WIFILI_EMESG_AST_ADD_FAIL, + /**< AST entry addition failure for connected peer. */ + NSS_WIFILI_EMESG_AST_REMOVE_FAIL, + /**< AST entry removal failure for connected peer. */ + NSS_WIFILI_EMESG_WDS_ADD_FAIL, + /**< WDS peer AST entry addition failure. */ + NSS_WIFILI_EMESG_WDS_REMOVE_FAIL, + /**< WDS peer AST entry removal failure. */ + NSS_WIFILI_EMESG_WDS_MAP_FAIL, + /**< WDS peer AST entry hardware index mapping failure. */ + NSS_WIFILI_EMSG_WDS_INVALID_PEERID_FAIL, + /**< Invalid peer id passed in WDS messages. */ + NSS_WIFILI_EMSG_WDS_DUPLICATE_AST_INDEX_PEER_ID_FAIL, + /**< AST entry index is already filled. */ + NSS_WIFILI_EMSG_INVALID_RADIO_CMD, + /**< Radio command is invalid. */ + NSS_WIFILI_EMSG_INVALID_RADIO_IFNUM, + /**< Radio interface number is invalid. */ + NSS_WIFILI_EMSG_PEER_SECURITY_PEER_NULL_FAIL, + /**< Security message failed as peer is NULL for a peer ID. */ + NSS_WIFILI_EMSG_PEER_SECURITY_PEER_CORRUPTED_FAIL, + /**< Security message failed as peer is corrupted. */ + NSS_WIFILI_EMSG_RADIO_INVALID_BUF_CFG, + /**< Buffer configuration message failed as invalid range value is provided. */ + NSS_WIFILI_EMSG_INIT_FAIL_INVALID_TARGET, + /**< Invalid target SoC type from host. */ + NSS_WIFILI_EMSG_PDEV_INIT_FAIL_INVALID_LMAC_ID, + /**< Invalid lower MAC ID from host. */ + NSS_WIFILI_EMSG_STATE_PDEV_NOT_INITIALIZED, + /**< Configured message issued when radio is not initialized. */ + NSS_WIFILI_EMESG_RX_TLV_INVALID, + /**< Invalid TLV length. */ + NSS_WIFILI_EMESG_RX_BUF_LEN_INVALID, + /**< Invalid Rx buffer length. */ + NSS_WIFILI_EMSG_UNKNOWN + /**< Unknown error message. */ +}; + +/** + * nss_wifili_soc_extended_data_types + * Enumeration of extended data type to host. + */ +enum nss_wifili_soc_extended_data_types { + NSS_WIFILI_SOC_EXT_DATA_PKT_TYPE_NONE, /**< Packet type is none. */ + NSS_WIFILI_SOC_EXT_DATA_PKT_MSDU_LINK_DESC, /**< Packet type is MSDU link descriptor. */ + NSS_WIFILI_SOC_EXT_DATA_PKT_INVALID_PEER, /**< Packet type is invalid peer. */ + NSS_WIFILI_SOC_EXT_DATA_PKT_MIC_ERROR, /**< Packet received with MIC error. */ + NSS_WIFILI_SOC_EXT_DATA_PKT_2K_JUMP_ERROR, /**< Packet received with 2K jump in sequence number. */ + NSS_WIFILI_SOC_EXT_DATA_PKT_WIFI_PARSE_ERROR, /**< Packet received with Wi-Fi parse error. */ + NSS_WIFILI_SOC_EXT_DATA_PKT_TYPE_MAX /**< Maximum extended data types. */ +}; + +/** + * nss_wifili_radio_cmd + * Wi-Fi radio commands for wifili. + */ +enum nss_wifili_radio_cmd { + NSS_WIFILI_RADIO_TX_CAPTURE_CMD, /**< Enable Tx capture. */ + NSS_WIFILI_SET_PRIMARY_RADIO, /**< Set current radio as primary. */ + NSS_WIFILI_SET_ALWAYS_PRIMARY, /**< Set always primary flag. */ + NSS_WIFILI_SET_FORCE_CLIENT_MCAST_TRAFFIC, /**< Flag to force multicast traffic for a radio. */ + NSS_WIFILI_SET_DROP_SECONDARY_MCAST, /**< Flag to drop multicast traffic on secondary radio. */ + NSS_WIFILI_SET_DBDC_FASTLANE, /**< Flag to set DBDC fast-lane mode. */ + NSS_WIFILI_SET_DBDC_NOBACKHAUL_RADIO, /**< Flag to set DBDC to no backhaul radio. */ + NSS_WIFILI_RADIO_MAX_CMD /**< Maximum radio command index. */ +}; + +/* + * WARNING: There is a 1:1 mapping between values of enum nss_wifili_stats_txrx and corresponding + * statistics string array in nss_stats.c. + */ + +/** + * nss_wifili_stats_txrx + * Wifili Tx or Rx statistics. + */ +enum nss_wifili_stats_txrx { + NSS_WIFILI_STATS_RX_MSDU_ERROR, + /**< Number of Rx packets received from ring with MSDU error. */ + NSS_WIFILI_STATS_RX_INV_PEER_RCV, + /**< Number of Rx packets with invalid peer ID. */ + NSS_WIFILI_STATS_RX_WDS_SRCPORT_EXCEPTION, + /**< Number of Rx packets exceptioned to host because of source port learn fail. */ + NSS_WIFILI_STATS_RX_WDS_SRCPORT_EXCEPTION_FAIL, + /**< Number of Rx source port learn fail packets failed to get enqueued to host. */ + NSS_WIFILI_STATS_RX_DELIVERD, + /**< Number of packets wifili has given to next node. */ + NSS_WIFILI_STATS_RX_DELIVER_DROPPED, + /**< Number of packets which wifili failed to enqueue to next node. */ + NSS_WIFILI_STATS_RX_INTRA_BSS_UCAST, + /**< Number of packets that wifili sent for intra-BSS unicast packet. */ + NSS_WIFILI_STATS_RX_INTRA_BSS_UCAST_FAIL, + /**< Number of packets that wifili sent for intra-BSS unicast packet failed. */ + NSS_WIFILI_STATS_RX_INTRA_BSS_MCAST, + /**< Number of packets that wifili sent for intra-BSS multicast packet. */ + NSS_WIFILI_STATS_RX_INTRA_BSS_MCAST_FAIL, + /**< Number of packets that wifili sent for intra-BSS multicast packet failed. */ + NSS_WIFILI_STATS_RX_SG_RCV_SEND, + /**< Number of packets scatter-gather sent. */ + NSS_WIFILI_STATS_RX_SG_RCV_FAIL, + /**< Number of packets scatter-gather received failure. */ + NSS_STATS_WIFILI_RX_MCAST_ECHO, + /**< Number of multicast echo packets received. */ + NSS_STATS_WIFILI_RX_INV_TID, + /**< Number of invalid TID. */ + + /* + * TODO: Move per TID based + */ + NSS_WIFILI_STATS_RX_FRAG_INV_SC, + /**< Number of fragments with invalid sequence control. */ + NSS_WIFILI_STATS_RX_FRAG_INV_FC, + /**< Number of fragments with invalid frame control. */ + NSS_WIFILI_STATS_RX_FRAG_NON_FRAG, + /**< Number of non-fragments received in fragments. */ + NSS_WIFILI_STATS_RX_FRAG_RETRY, + /**< Number of retries for fragments. */ + NSS_WIFILI_STATS_RX_FRAG_OOO, + /**< Number of out-of-order fragments. */ + NSS_WIFILI_STATS_RX_FRAG_OOO_SEQ, + /**< Number of out-of-order sequence. */ + NSS_WIFILI_STATS_RX_FRAG_ALL_FRAG_RCV, + /**< Number of times all fragments for a sequence has been received. */ + NSS_WIFILI_STATS_RX_FRAG_DELIVER, + /**< Number of fragments delivered to host. */ + NSS_WIFILI_STATS_TX_ENQUEUE, + /**< Number of packets that got enqueued to wifili. */ + NSS_WIFILI_STATS_TX_ENQUEUE_DROP, + /**< Number of packets that dropped during enqueue to wifili. */ + NSS_WIFILI_STATS_TX_DEQUEUE, + /**< Number of packets that are dequeued by wifili. */ + NSS_WIFILI_STATS_TX_HW_ENQUEUE_FAIL, + /**< Number of Rx packets that NSS Wi-Fi offload path could successfully process. */ + NSS_WIFILI_STATS_TX_SENT_COUNT, + /**< Number of Tx packets sent to hardware. */ + NSS_WIFILI_STATS_TXRX_MAX, + /**< Number of maximum Tx or Rx statistics. */ +}; + +/* + * WARNING: There is a 1:1 mapping between values of enum nss_wifili_stats_tcl and corresponding + * statistics string array in nss_stats.c. + */ + +/** + * nss_wifili_stats_tcl + * Wifili transmit classifier statistics. + */ +enum nss_wifili_stats_tcl { + NSS_WIFILI_STATS_TCL_NO_HW_DESC, /**< Number of transmit classifier hardware descriptor. */ + NSS_WIFILI_STATS_TCL_RING_FULL, /**< Number of times transmit classifier ring was full. */ + NSS_WIFILI_STATS_TCL_RING_SENT, /**< Number of times transmit classifier descriptor sent. */ + NSS_WIFILI_STATS_TCL_MAX, /**< Number of maximum transmit classifier statistics. */ +}; + +/* + * WARNING: There is a 1:1 mapping between values of enum nss_wifili_stats_tx_comp and corresponding + * statistics string array in nss_stats.c. + */ + +/** + * nss_wifili_stats_tx_comp + * Wifili Tx completion statistics. + */ +enum nss_wifili_stats_tx_comp { + NSS_WIFILI_STATS_TX_DESC_FREE_INV_BUFSRC, /**< Number of invalid buffer source packets. */ + NSS_WIFILI_STATS_TX_DESC_FREE_INV_COOKIE, /**< Number of invalid cookie packets. */ + NSS_WIFILI_STATS_TX_DESC_FREE_HW_RING_EMPTY, /**< Number of times hardware ring empty found. */ + NSS_WIFILI_STATS_TX_DESC_FREE_REAPED, /**< Number of Tx packets that are reaped out of the Tx completion ring. */ + NSS_WIFILI_STATS_TX_DESC_FREE_MAX, /**< Number of Tx completion statistics. */ +}; + +/* + * WARNING: There is a 1:1 mapping between values of enum nss_wifili_stats_reo and corresponding + * statistics string array in nss_stats.c. + */ + +/** + * nss_wifili_stats_reo + * Wifili Rx reorder statistics. + */ +enum nss_wifili_stats_reo { + NSS_WIFILI_STATS_REO_ERROR, /**< Number of reorder error. */ + NSS_WIFILI_STATS_REO_REAPED, /**< Number of reorder reaped. */ + NSS_WIFILI_STATS_REO_INV_COOKIE, /**< Number of invalid cookie. */ + NSS_WIFILI_STATS_REO_FRAG_RCV, /**< Number of fragmented packets received. */ + NSS_WIFILI_STATS_REO_MAX, /**< Number of reorder statistics. */ +}; + +/* + * WARNING: There is a 1:1 mapping between values of enum nss_wifili_stats_txsw_pool and corresponding + * statistics string array in nss_stats.c. + */ + +/** + * nss_wifili_stats_txsw_pool + * Wifili Tx descriptor statistics. + */ +enum nss_wifili_stats_txsw_pool { + NSS_WIFILI_STATS_TX_DESC_IN_USE, /**< Number of Tx packets that are currently in flight. */ + NSS_WIFILI_STATS_TX_DESC_ALLOC_FAIL, /**< Number of Tx software descriptor allocation failures. */ + NSS_WIFILI_STATS_TX_DESC_ALREADY_ALLOCATED, /**< Number of Tx software descriptor already allocated. */ + NSS_WIFILI_STATS_TX_DESC_INVALID_FREE, /**< Number of Tx software descriptor invalid free. */ + NSS_WIFILI_STATS_TX_DESC_FREE_SRC_FW, /**< Number of Tx descriptor for which release source is firmware. */ + NSS_WIFILI_STATS_TX_DESC_FREE_COMPLETION, /**< Number of Tx descriptor completion. */ + NSS_WIFILI_STATS_TX_DESC_NO_PB, /**< Number of Tx descriptor pbuf is NULL. */ + NSS_WIFILI_STATS_TX_QUEUELIMIT_DROP, /**< Number of Tx dropped because of queue limit. */ + NSS_WIFILI_STATS_TX_DESC_MAX, /**< Number of Tx descriptor statistics. */ +}; + +/* + * WARNING: There is a 1:1 mapping between values of enum nss_wifili_stats_ext_txsw_pool and corresponding + * statistics string array in nss_stats.c + */ + +/** + * nss_wifili_stats_ext_txsw_pool + * Wifili Rx extended descriptor statistics. + */ +enum nss_wifili_stats_ext_txsw_pool { + NSS_WIFILI_STATS_EXT_TX_DESC_IN_USE, /**< Number of extended Tx packets that are currently in flight. */ + NSS_WIFILI_STATS_EXT_TX_DESC_ALLOC_FAIL, /**< Number of extended Tx software descriptor allocation failures. */ + NSS_WIFILI_STATS_EXT_TX_DESC_ALREADY_ALLOCATED, /**< Number of extended Tx software descriptor already allocated. */ + NSS_WIFILI_STATS_EXT_TX_DESC_INVALID_FREE, /**< Number of extended Tx software descriptor invalid free. */ + NSS_WIFILI_STATS_EXT_TX_DESC_MAX, /**< Number of extended Tx descriptor statistics. */ +}; + +/* + * WARNING: There is a 1:1 mapping between values of enum nss_wifili_stats_rxdma_pool and corresponding + * statistics string array in nss_stats.c + */ + +/** + * nss_wifili_stats_rxdma_pool + * Wifili Rx descriptor statistics. + */ +enum nss_wifili_stats_rxdma_pool { + NSS_WIFILI_STATS_RX_DESC_NO_PB, /**< Number of Rx descriptors that have no pbufs. */ + NSS_WIFILI_STATS_RX_DESC_ALLOC_FAIL, /**< Number of Rx descriptor allocation failures. */ + NSS_WIFILI_STATS_RX_DESC_IN_USE, /**< Number of Rx descriptor allocations in use. */ + NSS_WIFILI_STATS_RX_DESC_MAX, /**< Maximum number of Rx descriptor statistics. */ +}; + +/* + * WARNING: There is a 1:1 mapping between values of enum nss_wifili_stats_rxdma_ring and corresponding + * statistics string array in nss_stats.c. + */ + +/** + * nss_wifili_stats_rxdma_ring + * Wifili Rx DMA(Direct Memory Access) ring statistics. + */ +enum nss_wifili_stats_rxdma_ring { + NSS_WIFILI_STATS_RXDMA_DESC_UNAVAILABLE, /**< Number of Rx DMA descriptor unavailable. */ + NSS_WIFILI_STATS_RXDMA_BUF_REPLENISHED, /**< Number of Rx DMA buffer replenished. */ + NSS_WIFILI_STATS_RXDMA_DESC_MAX, /**< Number of Rx DMA descriptor statistics. */ +}; + +/* + * WARNING: There is a 1:1 mapping between values of enum nss_wifili_stats_wbm and corresponding + * statistics string array in nss_stats.c. + */ + +/** + * nss_wifili_stats_wbm + * Wifili WBM(Wireless Buffer Manager) ring statistics. + */ +enum nss_wifili_stats_wbm { + NSS_WIFILI_STATS_WBM_IE_LOCAL_ALLOC_FAIL, /**< Number of Wireless Buffer Manager internal local allocation failures. */ + NSS_WIFILI_STATS_WBM_SRC_DMA, /**< Number of receive invalid source DMA. */ + NSS_WIFILI_STATS_WBM_SRC_DMA_CODE_INV, /**< Number of receive invalid source DMA. */ + NSS_WIFILI_STATS_WBM_SRC_REO, /**< Number of receive invalid source reorder. */ + NSS_WIFILI_STATS_WBM_SRC_REO_CODE_NULLQ, /**< Number of receive invalid reorder error with NULL queue. */ + NSS_WIFILI_STATS_WBM_SRC_REO_CODE_INV, /**< Number of receive invalid reorder code invalid. */ + NSS_WIFILI_STATS_WBM_SRC_INV, /**< Number of receive invalid source invalid. */ + NSS_WIFILI_STATS_WBM_MAX, /**< Number of receive Wireless Buffer Manager statistics. */ +}; + +/** + * nss_wifili_stats + * NSS wifili statistics. + */ +struct nss_wifili_stats { + uint64_t stats_txrx[NSS_WIFILI_MAX_PDEV_NUM_MSG][NSS_WIFILI_STATS_TXRX_MAX]; + /**< Number of Tx or Rx statistics. */ + uint64_t stats_tcl_ring[NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG][NSS_WIFILI_STATS_TCL_MAX]; + /**< TCL statistics for each ring. */ + uint64_t stats_tx_comp[NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG][NSS_WIFILI_STATS_TX_DESC_FREE_MAX]; + /**< Tx completion ring statistics. */ + uint64_t stats_tx_desc[NSS_WIFILI_MAX_TXDESC_POOLS_MSG][NSS_WIFILI_STATS_TX_DESC_MAX]; + /**< Tx descriptor pool statistics. */ + uint64_t stats_ext_tx_desc[NSS_WIFILI_MAX_TX_EXT_DESC_POOLS_MSG][NSS_WIFILI_STATS_EXT_TX_DESC_MAX]; + /**< Tx extended descriptor pool statistics. */ + uint64_t stats_reo[NSS_WIFILI_MAX_REO_DATA_RINGS_MSG][NSS_WIFILI_STATS_REO_MAX]; + /**< Rx reorder ring statistics. */ + uint64_t stats_rx_desc[NSS_WIFILI_MAX_PDEV_NUM_MSG][NSS_WIFILI_STATS_RX_DESC_MAX]; + /**< Rx software pool statistics. */ + uint64_t stats_rxdma[NSS_WIFILI_MAX_PDEV_NUM_MSG][NSS_WIFILI_STATS_RXDMA_DESC_MAX]; + /**< Rx DMA ring statistics. */ + uint64_t stats_wbm[NSS_WIFILI_STATS_WBM_MAX]; + /**< Wireless Buffer Manager error ring statistics. */ +}; + +/* + * NSS wifili soc stats + */ +struct nss_wifili_soc_stats { + uint32_t soc_maxpdev; /**< Maximum number of radios per SoC. */ + struct nss_wifili_stats stats_wifili; + /**< Per-SoC statistics. */ +}; + +/** + * nss_wifili_stats_notification + * Data for sending wifili statistics. + */ +struct nss_wifili_stats_notification { + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number for this wifili. */ + struct nss_wifili_stats stats; /**< Wifili statistics. */ +}; + +#ifdef __KERNEL__ /* only kernel will use. */ + +/** + * nss_wifili_hal_srng_info + * Wifili HAL srng information. + */ +struct nss_wifili_hal_srng_info{ + uint8_t ring_id; + /**< Ring ID. */ + uint8_t mac_id; + /**< Pdev ID. */ + uint8_t resv[2]; + uint32_t ring_base_paddr; + /**< Physical base address of the ring. */ + uint32_t num_entries; + /**< Number of entries in ring. */ + uint32_t flags; /**< Miscellaneous flags. */ + uint32_t ring_dir; + /**< Ring direction: source or destination. */ + uint32_t entry_size; + /**< Ring entry size. */ + uint32_t low_threshold; + /**< Low threshold – in number of ring entries (valid for source rings only). */ + uint32_t hwreg_base[NSS_WIFILI_MAX_SRNG_REG_GROUPS_MSG]; + /**< Hardware ring base address. */ +}; + +/** + * nss_wifili_hal_srng_soc_msg + * Wifili hal srng message. + */ +struct nss_wifili_hal_srng_soc_msg { + uint32_t dev_base_addr; + /**< Base address of WLAN device. */ + uint32_t shadow_rdptr_mem_addr; + /**< Shadow read pointer address. */ + uint32_t shadow_wrptr_mem_addr; + /**< Shadow write pointer address. */ + uint32_t lmac_rings_start_id; + /**< start id of LMAC rings. */ +}; + +/** + * struct wifili_tx_desc_addtnl_mem_msg + * Wifili additional host memory message for increeased descriptors + */ +struct nss_wifili_tx_desc_addtnl_mem_msg { + uint32_t num_addtnl_addr; + /**< Number of additional memory pages provided. */ + uint32_t addtnl_memory_addr[NSS_WIFILI_MAX_NUMBER_OF_ADDTNL_SEG]; + /**< Physical memory addresse of each additional page. */ + uint32_t addtnl_memory_size[NSS_WIFILI_MAX_NUMBER_OF_ADDTNL_SEG]; + /**< Size of each additional page. */ +}; + +/** + * nss_wifili_tx_desc_init_msg + * Wifili software descriptor pool initialization message. + */ +struct nss_wifili_tx_desc_init_msg { + uint32_t num_tx_desc; + /**< Count of the software descriptors. */ + uint32_t num_tx_desc_ext; + /**< Count of software extented descriptors. */ + uint32_t num_pool; + /**< Number of descriptor pools. */ + uint32_t memory_addr[NSS_WIFILI_MAX_NUMBER_OF_PAGE_MSG]; + /**< Memory start address of each page. */ + uint32_t memory_size[NSS_WIFILI_MAX_NUMBER_OF_PAGE_MSG]; + /**< Memory size. */ + uint32_t num_memaddr; + /**< Number of memory address. */ + uint32_t ext_desc_page_num; + /**< Extended descriptor page number. */ + uint32_t num_tx_desc_2; + /**< Count of the software descriptors for second radio. */ + uint32_t num_tx_desc_ext_2; + /**< Count of software extended descriptors for second radio. */ + uint32_t num_tx_desc_3; + /**< Count of the software descriptors for third radio. */ + uint32_t num_tx_desc_ext_3; + /**< Count of software extended descriptors for third radio. */ + uint32_t num_tx_device_limit; + /**< Count of software Tx descriptors for the device. */ +}; + +/** + * nss_wifili_rx_init_param + * Rx initialization parameters. + */ +struct nss_wifili_rx_init_param { + uint16_t tlv_size; /**< Size of Rx TLV structure. */ + uint16_t rx_buf_len; /**< Rx buffer length programmed to hardware. */ +}; + +/** + * nss_wifili_init_msg + * Wifili SoC initialization message. + */ +struct nss_wifili_init_msg { + struct nss_wifili_hal_srng_soc_msg hssm; + uint8_t num_tcl_data_rings; + /**< Number of Transmit Classifier data rings. */ + uint8_t num_reo_dest_rings; + /**< Number of Rx reorder rings. */ + uint8_t flags; + /**< Flags for SoC initialization */ + uint8_t soc_mem_profile; + /**< SoC memory profile (256M/512M/1G). */ + struct nss_wifili_hal_srng_info tcl_ring_info[NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG]; + /**< Transmit Classifier data ring configuration information. */ + struct nss_wifili_hal_srng_info tx_comp_ring[NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG]; + /**< Tx completion ring configuration information. */ + struct nss_wifili_hal_srng_info reo_dest_ring[NSS_WIFILI_MAX_REO_DATA_RINGS_MSG]; + /**< Rx reorder destination ring configuration information. */ + struct nss_wifili_hal_srng_info reo_exception_ring; + /**< Rx reorder exception ring configuration information. */ + struct nss_wifili_hal_srng_info rx_rel_ring; + /**< Wireless Buffer Manager release ring configuration information. */ + struct nss_wifili_hal_srng_info reo_reinject_ring; + /**< Reinject ring configuration information. */ + struct nss_wifili_tx_desc_init_msg wtdim; + /**< Tx descriptor initialization message. */ + uint32_t target_type; + /**< Target type based on SoC. */ + struct nss_wifili_rx_init_param wrip; + /**< Rx parameters to initialize Rx context. */ + struct nss_wifili_tx_desc_addtnl_mem_msg wtdam; + /**< Tx descriptor additional memory message. */ + uint32_t tx_sw_internode_queue_size; + /**< Tx software internode queue size. */ +}; + +/** + * nss_wifili_pdev_deinit_msg + * Wifili pdev deinit message. + */ +struct nss_wifili_pdev_deinit_msg { + uint32_t ifnum; /**< NSS interface number of pdev. */ +}; + +/** + * nss_wifili_pdev_init_msg + * Wifili pdev initialization message. + */ +struct nss_wifili_pdev_init_msg { + struct nss_wifili_hal_srng_info rxdma_ring; + /**< MAC (Media Access Control) ring configuration. */ + uint32_t radio_id; + /**< MAC radio ID. */ + uint32_t hwmode; + /**< MAC hardware mode. */ + uint32_t lmac_id; + /**< Lower MAC ID. */ + uint32_t num_rx_swdesc; + /**< Number of descriptors per Rx pool. */ + uint32_t target_pdev_id; + /**< Target physical device ID. */ + uint8_t scheme_id; + /**< Radio scheme ID. */ + uint8_t reserved[3]; + /**< Padding for alignment. */ +}; + +/** + * nss_wifili_peer_ast_flowid_map_msg + * Wifili peer AST flow ID map message. + */ +struct nss_wifili_peer_ast_flowid_map_msg { + uint8_t peer_mac_addr[ETH_ALEN]; + /**< Peer MAC address. */ + uint16_t vdev_id; + /**< VAP ID. */ + uint16_t ast_idx[NSS_WIFILI_PEER_AST_FLOWQ_MAX]; + /**< Address search table index. */ + uint8_t tid_valid_mask[NSS_WIFILI_PEER_AST_FLOWQ_MAX]; + /**< TID valid mask for a flow. */ + uint8_t is_valid[NSS_WIFILI_PEER_AST_FLOWQ_MAX]; + /**< Valid bit. */ + uint8_t flowQ[NSS_WIFILI_PEER_AST_FLOWQ_MAX]; + /**< Flow queue. */ + uint16_t peer_id; + /**< Peer ID. */ + uint8_t reserved[2]; + /**< Padding for alignment. */ +}; + +/** + * nss_wifili_peer_ast + * Wifili peer creation message. + */ +struct nss_wifili_peer_msg { + uint8_t peer_mac_addr[6]; + /**< Peer MAC address. */ + uint16_t vdev_id; + /**< VAP ID. */ + uint16_t peer_id; + /**< Peer ID. */ + uint16_t hw_ast_idx; + /**< Hardware address search table index. */ + uint8_t is_nawds; + /**< NAWDS enabled for peer. */ + uint8_t pext_stats_valid; + /**< Peer extended statistics valid. */ + uint16_t psta_vdev_id; + /**< Proxy station VAP ID. */ + uint32_t nss_peer_mem; + /**< Holds peer memory adderss for NSS. */ + uint32_t tx_ast_hash; + /**< AST hash to be used during packet transmission. */ + uint32_t pext_stats_mem; + /**< Peer extended statistics memory. */ + uint32_t flags; + /**< Peer flags. */ +}; + +/** + * nss_wifili_peer_freelist_append_msg + * Peer memory request. + */ +struct nss_wifili_peer_freelist_append_msg { + uint32_t addr; + /**< Starting address of peer_freelist pool. */ + uint32_t length; + /**< Length of peer freelist pool. */ + uint32_t num_peers; + /**< Maximum number of peer entries supported in pool. */ +}; + +/** + * nss_wifili_wds_extn_peer_cfg_msg + * Configuration information when the WDS vendor extension is enabled. + */ +struct nss_wifili_wds_extn_peer_cfg_msg { + uint8_t peer_mac_addr[ETH_ALEN]; /**< Peer MAC address. */ + uint8_t wds_flags; /**< WDS flags populated from the host. */ + uint8_t reserved; /**< Alignment padding. */ + uint16_t peer_id; /**< Peer ID. */ +}; + +/** + * nss_wifili_tx_stats + * Tx statistics. + */ +struct nss_wifili_tx_stats { + uint32_t tx_enqueue_dropped; + /**< Tx enqueue drop count. */ + uint32_t tx_enqueue_cnt; + /**< Tx enqueue succesful count. */ + uint32_t tx_dequeue_cnt; + /**< Tx dequeue count. */ + uint32_t tx_send_fail_cnt; + /**< Hardware send failure count. */ + uint32_t inv_peer; + /**< Invalid peer enqueue count. */ + uint32_t inv_peer_drop_byte_cnt; + /**< Invalid peer drop byte count. */ + uint32_t tx_input_pkt; + /**< Tx packets ready to sent. */ + uint32_t tx_processed_pkt; + /**< Tx numner of packets sent. */ + uint32_t tx_processed_bytes; + /**< Tx number of bytes processed. */ +}; + +/** + * nss_wifili_rx_stats + * Rx statistics. + */ +struct nss_wifili_rx_stats { + uint32_t rx_msdu_err; + /**< Rx msdu error count. */ + uint32_t rx_inv_peer; + /**< Rx invalid peer count. */ + uint32_t rx_scatter_inv_peer; + /**< Rx scatter invalid peer count. */ + uint32_t rx_wds_learn_send; + /**< WDS source port learn packet. */ + uint32_t rx_wds_learn_send_fail; + /**< WDS source port learn exception send failure count. */ + uint32_t rx_send_dropped; + /**< Rx send dropped count. */ + uint32_t rx_deliver_cnt; + /**< Rx deliver count to next node. */ + uint32_t rx_deliver_cnt_fail; + /**< Rx deliver count failure. */ + uint32_t rx_intra_bss_ucast_send; + /**< Intra-BSS unicast sent count. */ + uint32_t rx_intra_bss_ucast_send_fail; + /**< Intra-BSS unicast send failure count. */ + uint32_t rx_intra_bss_mcast_send; + /**< Intra-BSS multicast send count. */ + uint32_t rx_intra_bss_mcast_send_fail; + /**< Intra-BSS multicast send failure count. */ + uint32_t rx_sg_recv_send; + /**< Rx scatter-gather receive send count. */ + uint32_t rx_sg_recv_fail; + /**< Rx scatter-gather receive failure count. */ + uint32_t rx_me_pkts; /**< Rx multicast echo packets count. */ + uint32_t rx_inv_tid; /**< Rx invalid TID. */ + + /* + * TODO: Move per tid based. + */ + uint32_t rx_frag_inv_sc; /**< Rx invalid frame sequence control. */ + uint32_t rx_frag_inv_fc; /**< Rx invalid frame control count. */ + uint32_t rx_non_frag_err; /**< Rx non-fragment received in fragmention. */ + uint32_t rx_repeat_fragno; /**< Rx fragment retry counters. */ + uint32_t rx_ooo_frag; /**< Rx out-of-order fragments count. */ + uint32_t rx_ooo_frag_seq; /**< Rx out-of-order sequence count. */ + uint32_t rx_all_frag_rcv; /**< Rx all fragments received count. */ + uint32_t rx_frag_deliver; /**< Rx fragment deliver counters. */ +}; + +/** + * nss_wifili_tx_tcl_ring_stats + * Transmit Classifier ring specific statistics. + */ +struct nss_wifili_tx_tcl_ring_stats { + uint32_t tcl_no_hw_desc; /**< Number of Transmit Classifier hardware descriptors. */ + uint32_t tcl_ring_full; /**< Number of times Transmit Classifier ring full. */ + uint32_t tcl_ring_sent; /**< Total number of ring sent. */ +}; + +/** + * nss_wifili_tx_comp_ring_stats + * Tx completion ring statistics. + */ +struct nss_wifili_tx_comp_ring_stats { + uint32_t invalid_bufsrc; /**< Tx comp (Completion) ring descriptor invalid buffer source. */ + uint32_t invalid_cookie; /**< Tx comletion ring descriptor has invalid cookies. */ + uint32_t hw_ring_empty; /**< Tx completion hardware ring empty. */ + uint32_t ring_reaped; /**< Tx completion successfull ring reaped. */ +}; + +/** + * nss_wifili_tx_sw_pool_stats + * Tx completion sw statistics. + */ +struct nss_wifili_tx_sw_pool_stats { + uint32_t desc_alloc; /**< Tx descriptor software pool descriptor in use. */ + uint32_t desc_alloc_fail; /**< Tx descriptor software pool allocation failure . */ + uint32_t desc_already_allocated; /**< Tx descriptor re-allocation for allocated descriptor. */ + uint32_t desc_invalid_free; /**< Tx descriptor freeing of allocated descriptor. */ + uint32_t tx_rel_src_fw; /**< Tx descriptor source is firmware. */ + uint32_t tx_rel_ext_desc; /**< Tx descriptor scatter-gather. */ + uint32_t tx_rel_tx_desc; /**< Tx descriptor source is hardware*/ + uint32_t tx_rel_no_pb; /**< Tx descriptor has pbuf present. */ + uint32_t tx_queue_limit_drop; /**< Tx number of packets dropped because of queueing limits. */ +}; + +/** + * wifili_tx_ext_sw_pool_stats + * Tx extended descriptor pool. + */ +struct nss_wifili_tx_ext_sw_pool_stats { + uint32_t desc_alloc; /**< Tx extend (scatter gather) descriptor in use. */ + uint32_t desc_alloc_fail; /**< Tx extend descriptor allocation failure. */ + uint32_t desc_already_allocated; /**< Tx extend descriptor already allocated. */ + uint32_t desc_invalid_free; /**< Tx descriptor invalid source. */ + +}; + +/** + * nss_wifili_rx_wbm_ring_stats + * WBM (Wireless Buffer Manager) release ring statistics. + */ +struct nss_wifili_rx_wbm_ring_stats { + uint32_t invalid_buf_mgr; /**< Invalid buffer manager. */ + uint32_t err_src_rxdma; /**< Wireless Buffer Manager source is Rx DMA ring. */ + uint32_t err_src_rxdma_code_inv; /**< Wireless Buffer Manager source DMA reason unknown. */ + uint32_t err_src_reo; /**< Wireless Buffer Manager source is receive reorder ring. */ + uint32_t err_src_reo_code_nullq; /**< Wireless Buffer Manager source receive reorder ring because of NULL TLV. */ + uint32_t err_src_reo_code_inv; /**< Wireless Buffer Manager source receive reorder ring reason unknown. */ + uint32_t err_src_invalid; /**< Wireless Buffer Manager source is unknown. */ + uint32_t err_reo_codes[NSS_WIFILI_REO_CODE_MAX]; + /**< Receive reoder error codes. */ + uint32_t err_dma_codes[NSS_WIFILI_DMA_CODE_MAX]; + /**< DMA error codes. */ + uint32_t err_internal_codes[NSS_WIFILI_WBM_INTERNAL_ERR_MAX]; + /**< Wireless Buffer Manager error codes. */ +}; + +/** + * nss_wifili_rx_reo_ring_stats + * Rx reorder error statistics. + */ +struct nss_wifili_rx_reo_ring_stats { + uint32_t ring_error; /**< Rx reorder ring error. */ + uint32_t ring_reaped; /**< Number of ring descriptor reaped. */ + uint32_t invalid_cookie; /**< Number of invalid cookie. */ + uint32_t defrag_reaped; /**< Rx defragment receive count. */ +}; + +/** + * nss_wifili_rx sw_pool_stats + * Wifili DMA sw pool statistics. + */ +struct nss_wifili_rx_sw_pool_stats { + uint32_t rx_no_pb; /**< Rx software descriptor number of buffer available. */ + uint32_t desc_alloc; /**< Number of descriptor in use. */ + uint32_t desc_alloc_fail; /**< Number of descriptor allocation failure. */ +}; + +/** + * nss_wifili_rx_dma_ring_stats + * Wifili Rx DMA ring statistics. + */ +struct nss_wifili_rx_dma_ring_stats { + uint32_t rx_hw_desc_unavailable; /**< Number of times hardware descriptor is unavailable. */ + uint32_t rx_buf_replenished; /**< Number of buffers replenished. */ +}; + +/** + * nss_wifili_dbdc_mode_stats + * Wifili DBDC mode statistics. + */ +struct nss_wifili_dbdc_mode_stats { + uint32_t dbdc_flush_ast_failed; + /**< Number of times DBDC AST flush message send has failed. */ + uint32_t dbdc_drop_rx_secmcast; + /**< Number of packets dropped in DBDC Rx for secondary multicast. */ + uint32_t dbdc_drop_tx_secmcast; + /**< Number of packets dropped in DBDC Tx for secondary multicast. */ + uint32_t dbdc_drop_rx_alwaysprimary; + /**< Number of packets dropped in DBDC Rx for always primary. */ + uint32_t dbdc_drop_tx_alwaysprimary; + /**< Number of packets dropped in DBDC Tx for always primary. */ + uint32_t dbdc_drop_loop_rx; + /**< Number of packets dropped in DBDC Rx for DBDC loop. */ + uint32_t dbdc_drop_loop_tx; + /**< Number of packets dropped in DBDC Tx for DBDC loop. */ +}; + +/** + * nss_wifili_delay_stats + * Wifili delay statistics. + */ +struct nss_wifili_delay_stats { + uint32_t delay_bucket[NSS_WIFILI_DELAY_INDEX_MAX]; + /**< Delay buckets for histogram. */ + uint32_t min_delay; + /**< Minimum delay. */ + uint32_t avg_delay; + /**< Average delay. */ + uint32_t max_delay; + /**< Maximum delay. */ +}; + +/** + * nss_wifili_v3_delay_per_tid_stats + * Wifili version 3 delay per TID statistics. + */ +struct nss_wifili_v3_delay_per_tid_stats { + struct nss_wifili_delay_stats swq_delay; + /**< Software enqueue delay. */ + struct nss_wifili_delay_stats hwtx_delay; + /**< Hardware transmit delay. */ + struct nss_wifili_delay_stats tx_intfrm_delay; + /**< Transmit interframe delay at radio entry. */ + struct nss_wifili_delay_stats rx_intfrm_delay; + /**< Receive interframe delay. */ +}; + +/** + * nss_wifili_v3_per_tid_tx_rx_stats + * Wifili version 3 Tx and Rx statistics per TID. + */ +struct nss_wifili_v3_tx_rx_per_tid_stats { + uint32_t radio_ingress_enq_drop_cnt; + /**< Ingress enqueue drop count. */ + uint32_t transmit_succes_cnt; + /**< Total successful transmit count. */ + uint32_t transmit_fwdrop_cnt; + /**< Firmware drop count. */ + uint32_t transmit_hwdrop_cnt; + /**< Hardware drop count. */ + uint32_t transmit_desc_fail_cnt; + /**< Transmit descriptor fail count. */ + uint32_t transmit_complete_cnt; + /**< Total transmit count. */ + uint32_t rx_delivered_cnt; + /**< Total Rx packets delivered to next node. */ + uint32_t rx_deliver_fail_cnt; + /**< Rx deliver fail count. */ + uint32_t rx_intrabss_cnt; + /**< Intra-BSS Rx count. */ + uint32_t rx_intrabss_fail_cnt; + /**< Intra-BSS Rx fail count. */ + uint32_t num_msdu_recived; + /**< Number of MSDU received from hardware. */ + uint32_t num_mcast_msdu_recived; + /**< Number of broadcast MSDU received. */ + uint32_t num_bcast_msdu_recived; + /**< Number of multicast MSDU received. */ + uint32_t transmit_tqm_status_cnt[NSS_WIFILI_TQM_STATUS_MAX]; + /**< Number of frames with this TQM completion status. */ + uint32_t transmit_htt_status_cnt[NSS_WIFILI_HTT_STATUS_MAX]; + /**< Number of frames with this HTT completion status. */ +}; + +/** + * nss_wifili_v3_tx_rx_per_ac_stats + * Wifili version 3 Tx and Rx statistics per AC. + */ +struct nss_wifili_v3_tx_rx_per_ac_stats { + uint32_t radio_ingress_enq_cnt; + /**< Ingress enqueue packet count. */ + uint32_t radio_ingress_deq_cnt; + /**< Ingress dequeue count. */ + uint32_t transmit_enq_cnt; + /**< Transmit enqueue count. */ +}; + +/** + * nss_wifili_radio_tx_rx_stats_v3 + * Wifili version 3 radio Tx and Rx statistics. + */ +struct nss_wifili_radio_tx_rx_stats_v3 { + struct nss_wifili_v3_tx_rx_per_tid_stats tid_stats[NSS_WIFILI_MAX_TID]; + /**< Per-TID Tx and Rx statistics. */ + struct nss_wifili_v3_tx_rx_per_ac_stats ac_stats[NSS_WIFILI_WME_AC_MAX]; + /**< Per-Access Category Tx and Rx statistics. */ +}; + +/** + * nss_wifili_radio_delay_stats_v3 + * Wifili version 3 radio delay statistics. + */ +struct nss_wifili_radio_delay_stats_v3 { + struct nss_wifili_v3_delay_per_tid_stats v3_delay_stats[NSS_WIFILI_MAX_TID]; + /**< Per-TID delay statistics. */ +}; + +/** + * nss_wifili_pdev_v3_tx_rx_stats_sync_msg + * Wifili message to synchronize version 3 Tx and Rx statistics to HLOS. + */ +struct nss_wifili_pdev_v3_tx_rx_stats_sync_msg { + uint32_t radio_id; + /**< Radio ID. */ + struct nss_wifili_radio_tx_rx_stats_v3 wlpv3_txrx_stats; + /**< Wifli version 3 Tx and Rx statistics. */ +}; + +/** + * nss_wifili_pdev_v3_delay_stats_sync_msg + * Wifili message to synchronize version 3 delay statistics to HLOS. + */ +struct nss_wifili_pdev_v3_delay_stats_sync_msg { + uint32_t radio_id; + /**< Radio ID. */ + struct nss_wifili_radio_delay_stats_v3 wlpv3_delay_stats; + /**< Wifli version 3 delay statistics. */ +}; + +/** + * nss_wifili_device_stats + * Wifili specific statistics. + */ +struct nss_wifili_device_stats { + struct nss_wifili_tx_tcl_ring_stats tcl_stats[NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG]; + /**< Transmit Classifier ring statistics. */ + struct nss_wifili_tx_comp_ring_stats txcomp_stats[NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG]; + /**< Tx completion ring statistics. */ + struct nss_wifili_tx_sw_pool_stats tx_sw_pool_stats[NSS_WIFILI_MAX_TXDESC_POOLS_MSG]; + /**< Tx software pool statistics. */ + struct nss_wifili_tx_ext_sw_pool_stats tx_ext_sw_pool_stats[NSS_WIFILI_MAX_TX_EXT_DESC_POOLS_MSG]; + /**< Tx extended software pool statistics. */ + struct nss_wifili_tx_stats tx_data_stats[NSS_WIFILI_MAX_PDEV_NUM_MSG]; + /**< Tx data statistics for each pdev. */ + struct nss_wifili_rx_reo_ring_stats rxreo_stats[NSS_WIFILI_MAX_REO_DATA_RINGS_MSG]; + /**< Rx reorder ring statistics. */ + struct nss_wifili_rx_sw_pool_stats rx_sw_pool_stats[NSS_WIFILI_MAX_PDEV_NUM_MSG]; + /**< Rx DMA software pool statistics. */ + struct nss_wifili_rx_stats rx_data_stats[NSS_WIFILI_MAX_PDEV_NUM_MSG]; + /**< Rx data statistics for each pdev. */ + struct nss_wifili_rx_dma_ring_stats rxdma_stats[NSS_WIFILI_MAX_PDEV_NUM_MSG]; + /**< Rx DMA ring statistics. */ + struct nss_wifili_rx_wbm_ring_stats rxwbm_stats; + /**< Wireless Buffer Manager ring statistics. */ + struct nss_wifili_dbdc_mode_stats dbdc_stats; + /**< DBDC mode statistics. */ +}; + +/** + * nss_wifili_stats_sync_msg + * Wifili SoC statistics synchronization message. + */ +struct nss_wifili_stats_sync_msg { + struct nss_wifili_device_stats stats; + /**< Device statistics. */ +}; + +/** + * nss_wifili_soc_linkdesc_per_packet_metadata + * Link descriptor per packet metadata. + */ +struct nss_wifili_soc_linkdesc_per_packet_metadata +{ + uint32_t desc_addr; /**< Link descriptor address. */ +}; + +/** + * nss_wifili_soc_per_packet_metadata + * Per packet special data that has to be sent to host. + */ +struct nss_wifili_soc_per_packet_metadata { + uint16_t pkt_type; /**< Packet type. */ + uint8_t pool_id; /**< Pool ID of invalid peer packets. */ + uint8_t reserved; /**< Alignment padding. */ + + /** + * Link descriptor per packet metadata. + */ + union { + struct nss_wifili_soc_linkdesc_per_packet_metadata linkdesc_metadata; + } metadata; /**< Per packet link descriptor metadata. */ +}; + +/** + * nss_wifili_tx_dropped + * Tx peer dropped packets. + */ +struct nss_wifili_tx_dropped { + uint32_t drop_stats[NSS_WIFILI_TQM_RR_MAX]; /**< Discarded by firmware. */ + uint32_t tx_nawds_mcast_drop_cnt; /**< Total number of NAWDS multicast packets dropped. */ +}; + +/** + * nss_wifili_tx_ctrl_stats + * Tx peer statistics. + */ +struct nss_wifili_tx_ctrl_stats { + uint32_t ofdma; /**< Number of orthogonal frequency-division multiple + access packets. */ + uint32_t non_amsdu_cnt; /**< Number of MSDUs with no MSDU level aggregation. */ + uint32_t amsdu_cnt; /**< Number of MSDUs part of AMSDU. */ + uint32_t tx_mcast_cnt; /**< Total number of multicast packets sent. */ + uint32_t tx_mcast_bytes; /**< Total number of multicast bytes sent. */ + uint32_t tx_ucast_cnt; /**< Total number of unicast packets sent. */ + uint32_t tx_ucast_bytes; /**< Total number of unicast bytes sent. */ + uint32_t tx_bcast_bytes; /**< Total number of broadcast bytes sent. */ + uint32_t tx_bcast_cnt; /**< Total number of broadcast packets sent. */ + struct nss_wifili_tx_dropped dropped; /**< Tx peer dropped. */ + uint32_t tx_success_cnt; /**< Total number of packets sent successfully. */ + uint32_t tx_success_bytes; /**< Total number of bytes sent successfully. */ + uint32_t tx_nawds_mcast_cnt; /**< Total number of NAWDS multicast packets sent. */ + uint32_t tx_nawds_mcast_bytes; /**< Total number of NAWDS multicast bytes sent. */ + uint32_t retries; /**< Total number of retries. */ +}; + +/** + * nss_wifili_peer_rx_err + * Rx peer errors. + */ +struct nss_wifili_rx_err { + uint32_t mic_err; /**< Rx MIC errors. */ + uint32_t decrypt_err; /**< Rx Decryption errors. */ +}; + +/** + * nss_wifili_rx_ctrl_stats + * Peer Rx statistics. + */ +struct nss_wifili_rx_ctrl_stats { + struct nss_wifili_rx_err err; /**< Rx peer errors. */ + uint32_t multipass_rx_pkt_drop; /**< Total number of multipass packets without a VLAN header. */ + uint32_t peer_unauth_rx_pkt_drop; /**< Number of receive packets dropped due to an authorized peer. */ + uint32_t reserved_type[NSS_WIFILI_MAX_RESERVED_TYPE]; /**< Reserved type for future use. */ + uint32_t non_amsdu_cnt; /**< Number of MSDUs with no MSDU level aggregation. */ + uint32_t amsdu_cnt; /**< Number of MSDUs part of AMSDU. */ + uint32_t mcast_rcv_cnt; /**< Total number of multicast packets received. */ + uint32_t mcast_rcv_bytes; /**< Total number of multicast bytes received. */ + uint32_t rx_recvd; /**< Total Rx received count. */ + uint32_t rx_recvd_bytes; /**< Total Rx received count. */ + uint32_t nawds_mcast_drop; /**< Total NAWDS drop count. */ + uint32_t nawds_mcast_drop_bytes; /**< Total NAWDS drop count. */ + uint32_t rx_intra_bss_pkts_num; /**< Total Intra-BSS packets received. */ + uint32_t rx_intra_bss_pkts_bytes; /**< Total Intra-BSS bytes received. */ + uint32_t rx_intra_bss_fail_num; /**< Total Intra-BSS packets failed. */ + uint32_t rx_intra_bss_fail_bytes; /**< Total Intra-BSS bytes received. */ + uint32_t bcast_rcv_cnt; /**< Total number of broadcast packets received. */ + uint32_t bcast_rcv_bytes; /**< Total number of broadcast bytes received. */ +}; + +/** + * nss_wifili_peer_ctrl_stats + * Wifili peer control statistics. + */ +struct nss_wifili_peer_ctrl_stats { + uint32_t peer_id; /**< Peer ID. */ + struct nss_wifili_tx_ctrl_stats tx; + /**< Peer Tx control statistics. */ + struct nss_wifili_rx_ctrl_stats rx; + /**< Peer Rx control statistics. */ +}; + +/** + * nss_wifili peer_stats + * Wifili peer statistics. + */ +struct nss_wifili_peer_stats { + uint32_t npeers; /**< Number of entries of peer statistics. */ + struct nss_wifili_peer_ctrl_stats wpcs[1]; + /**< Wifili peer control statistics. */ +}; + +/** + * nss_wifili_peer_stats_msg + * Wifili peer statistics message. + */ +struct nss_wifili_peer_stats_msg { + struct nss_wifili_peer_stats stats; + /**< Wifili peer statistics. */ +}; + +/** + * nss_wifili_sojourn_per_tid_stats + * Wifili sojourn per TID statistics. + */ +struct nss_wifili_sojourn_per_tid_stats { + uint32_t avg_sojourn_msdu; /**< Average per-TID of all time difference. */ + uint32_t sum_sojourn_msdu; /**< Sum per-TID of all time difference. */ + uint32_t num_msdus; /**< MSDUs per TID. */ +}; + +/** + * nss_wifili_sojourn_peer_stats + * Wifili sojourn peer statistics. + */ +struct nss_wifili_sojourn_peer_stats { + uint32_t peer_id; /**< Peer ID. **/ + struct nss_wifili_sojourn_per_tid_stats stats[NSS_WIFILI_MAX_TID]; /**< Statistics per TID. **/ +}; + +/** + * nss_wifili_sojourn_stats_msg + * Wifili sojourn statistics message. + */ +struct nss_wifili_sojourn_stats_msg { + uint32_t npeers; /**< Number of peers. */ + struct nss_wifili_sojourn_peer_stats sj_peer_stats[1]; /**< Per-peer sojourn statistics. */ +}; + +/* + * nss_wifili_jitter_tid_stats + * Per TID jitter statistics. + */ +struct nss_wifili_jitter_tid_stats { + uint32_t avg_jitter; /**< Average jitter. */ + uint32_t avg_delay; /**< Average delay. */ + uint32_t avg_err; /**< Average count error. */ + uint32_t success; /**< Transmit success count. */ + uint32_t drop; /**< Transmit drop count. */ +}; + +/* + * nss_wifili_jitter_stats + * Wifili jitter statistics. + */ +struct nss_wifili_jitter_stats { + uint32_t peer_id; /**< Peer ID. */ + struct nss_wifili_jitter_tid_stats stats[NSS_WIFILI_MAX_TID]; /**< Per-TID jitter statistics. */ +}; + +/* + * nss_wifili_jitter_stats_msg + * Wifili jitter message. + */ +struct nss_wifili_jitter_stats_msg { + uint32_t npeers; /**< Number of peers. */ + struct nss_wifili_jitter_stats jitter_stats[1]; /**< Jitter statistics. */ +}; + +/** + * nss_wifili_wds_peer_msg + * Wi-Fi Wireless distribution system (WDS) peer-specific message. + */ +struct nss_wifili_wds_peer_msg { + uint8_t dest_mac[ETH_ALEN]; /**< MAC address of the destination. */ + uint8_t peer_mac[ETH_ALEN]; /**< MAC address of the base peer. */ + uint8_t ast_type; /**< AST (Address Search Table) type for this peer. */ + uint8_t pdev_id; /**< Radio ID for next hop peer. */ + uint16_t peer_id; /**< Peer ID of next hop peer. */ +}; + +/** + * nss_wifili_peer_delay_stats + * Per-peer delay statistics. + */ +struct nss_wifili_peer_delay_stats { + struct nss_wifili_delay_stats swq_delay; /**< Software enqueue delay. */ + struct nss_wifili_delay_stats hwtx_delay; /**< Hardware transmit delay. */ +}; + +/** + * nss_wifili_peer_ext_stats + * Peer extended statistics. + */ +struct nss_wifili_peer_ext_stats { + uint32_t peer_id; /**< Peer ID. */ + struct nss_wifili_peer_delay_stats delay_stats[NSS_WIFILI_MAX_TID]; + /**< Delay statistics. */ +}; + +/** + * nss_wifili_peer_ext_stats_msg + * Peer extended statistics message. + */ +struct nss_wifili_peer_ext_stats_msg { + uint32_t npeers; /**< Number of peers. */ + struct nss_wifili_peer_ext_stats ext_stats[1]; /**< Extended statistics. */ +}; + +/** + * nss_wifili_stats_cfg_msg + * Wifili stats enable/disable configuration message. + */ +struct nss_wifili_stats_cfg_msg { + uint32_t cfg; /**< Enable or disable configuration. */ +}; + +/** + * nss_wifili_wds_peer_map_msg + * Wi-Fi Wireless distribution system(WDS) peer-specific message. + */ +struct nss_wifili_wds_peer_map_msg { + uint8_t dest_mac[ETH_ALEN]; /**< MAC address of the destination. */ + uint16_t peer_id; /**< Connected peer ID for this WDS peer. */ + uint16_t ast_idx; /**< AST (address search table) index for this peer in host. */ + uint16_t vdev_id;; /**< VAP ID. */ +}; + +/** + * nss_wifili_wds_active_info + * Wi-Fi WDS active information. + */ +struct nss_wifili_wds_active_info { + uint16_t ast_idx; /**< Hardware AST index. */ +}; + +/** + * nss_wifili_wds_active_info_msg + * Wi-Fi Wireless distribution system active information message. + */ +struct nss_wifili_wds_active_info_msg { + uint16_t nentries; /**< Number of WDS entries. */ + struct nss_wifili_wds_active_info info[1]; + /**< WDS active information. */ +}; + +/** + * nss_wifili_mec_ageout_info + * Wi-Fi multicast echo check ageout information. + */ +struct nss_wifili_mec_ageout_info { + uint8_t mac_addr[6]; /**< MAC address. */ + uint8_t radio_id; /**< Radio ID. */ + uint8_t pad; /**< Pad for word align structure. */ + +}; + +/** + * nss_wifili_mec_ageout_info_msg + * Wi-Fi multicast echo check ageout information message. + */ +struct nss_wifili_mec_ageout_info_msg { + uint16_t nentries; /**< Number of entries. */ + struct nss_wifili_mec_ageout_info info[1]; + /**< Multicast echo check active information. */ +}; + +/** + * nss_wifili_soc_linkdesc_buf_info_msg + * Link descriptor buffer addresss information. + */ +struct nss_wifili_soc_linkdesc_buf_info_msg { + uint32_t buffer_addr_low; /**< Link descriptor low address. */ + uint32_t buffer_addr_high; /**< Link descriptor high address. */ +}; + +/** + * nss_wifili_peer_security_type_msg + * Wifili security type message. + */ +struct nss_wifili_peer_security_type_msg { + uint16_t peer_id; /**< Peer ID. */ + uint8_t pkt_type; /**< Unicast or broadcast packet type. */ + uint8_t security_type; /**< Security type. */ + uint8_t mic_key[NSS_WIFILI_MIC_KEY_LEN]; + /**< MIC key. */ +}; + +/** + * nss_wifili_peer_nawds_enable_msg + * Wifili NAWDS enable for this peer. + */ +struct nss_wifili_peer_nawds_enable_msg { + uint16_t peer_id; /**< Peer ID. */ + uint16_t is_nawds; /**< Enable NAWDS on this peer. */ +}; + +/** + * nss_wifili_peer_vlan_id_msg + * Wifili peer VLAN ID message. + */ +struct nss_wifili_peer_vlan_id_msg { + uint16_t peer_id; /**< Peer ID. */ + uint16_t vlan_id; /**< VLAN ID. */ +}; + +/** + * nss_wifili_peer_isolation_msg + * Wifili peer isolation message. + */ +struct nss_wifili_peer_isolation_msg { + uint16_t peer_id; /**< Peer ID. */ + uint16_t isolation; /**< Isolation enabled/disabled. */ +}; + +/** + * nss_wifili_dbdc_repeater_loop_detection_msg + * Wifili DBDC repeater loop detection message. + */ +struct nss_wifili_dbdc_repeater_loop_detection_msg { + bool dbdc_loop_detected; /**< DBDC repeater loop detection flag. */ +}; + +/** + * nss_wifili_dbdc_repeater_set_msg + * Wifili DBDC repeater set message. + */ +struct nss_wifili_dbdc_repeater_set_msg { + uint32_t is_dbdc_en; /**< DBDC enable flag. */ +}; + +/** + * nss_wifili_hmmc_dscp_tid_set_msg + * Wifili Hy-Fi managed multicast DSCP TID set message. + */ +struct nss_wifili_hmmc_dscp_tid_set_msg { + uint16_t radio_id; /**< Radio ID. */ + uint16_t value; /**< Hy-Fi managed multicast TID value. */ +}; + +/** + * nss_wifili_hmmc_dscp_override_set_msg + * Wifili Hy-Fi managed multicast DSCP override set message. + */ +struct nss_wifili_hmmc_dscp_override_set_msg { + uint16_t radio_id; /**< Radio ID. */ + uint16_t value; /**< Hy-Fi managed multicast DSCP override value. */ +}; + +/** + * nss_wifili_reo_tidq_msg + * Rx reorder TID queue setup message. + */ +struct nss_wifili_reo_tidq_msg { + uint32_t tid; /**< TID (traffic identification) value. */ + uint16_t peer_id; /**< Peer ID. */ +}; + +/** + * nss_wifili_enable_v3_stats_msg + * Version 3 statistics enable message. + */ +struct nss_wifili_enable_v3_stats_msg { + uint32_t radio_id; /**< Radio ID. */ + uint32_t flag; /**< Flag to enable version 3 statistics. */ +}; + +/** + * nss_wifili_clr_stats_msg + * NSS firmware statistics clear message. + */ +struct nss_wifili_clr_stats_msg { + uint8_t vdev_id;; /**< VAP ID. */ +}; + +/** + * nss_wifili_update_auth_flag + * Peer authentication flag message. + */ +struct nss_wifili_peer_update_auth_flag { + uint16_t peer_id; /**< Peer ID. */ + uint8_t auth_flag; /**< Peer authentication flag. */ + uint8_t reserved; /**< Alignment padding. */ +}; + +/** + * nss_wifili_update_pdev_lmac_id_msg + * Physical device ID and lower MAC ID update message. + */ +struct nss_wifili_update_pdev_lmac_id_msg { + uint32_t pdev_id; /**< Physical device ID. */ + uint32_t lmac_id; /**< Lower MAC ID. */ + uint32_t target_pdev_id; /**< Target physical device ID. */ +}; + +/** + * nss_wifili_radio_cmd_msg + * Wi-Fi radio specific special commands. + */ +struct nss_wifili_radio_cmd_msg { + enum nss_wifili_radio_cmd cmd; + /**< Type of command message. */ + uint32_t value; /**< Value of the command. */ +}; + +/** + * nss_wifili_radio_buf_cfg_msg + * Wi-Fi Radio buffer requirement configuration. + * + * Number of payloads needed in NSS for multi-client scenarios are configured + * from Wi-Fi driver as per following ranges: + * 0-64 peers range 1. + * 64-128 peers range 2. + * 128-256 peers range 3. + * >256 peers range 4. + * Number of payloads needed in for each peer range is configured by Wi-Fi driver + * for flexibility. + */ +struct nss_wifili_radio_buf_cfg_msg { + uint32_t buf_cnt; /**< Number of buffers required. */ + uint32_t range; /**< Peer range. */ +}; + +/** + * nss_wifili_radio_cfg_msg + * Wi-Fi radio specific special configurations. + */ +struct nss_wifili_radio_cfg_msg { + uint32_t radio_if_num; /**< NSS assigned interface number for radio. */ + + /** + * Wi-Fi radio specific special command message. + */ + union { + struct nss_wifili_radio_cmd_msg radiocmdmsg; + /**< Radio specific commands. */ + struct nss_wifili_radio_buf_cfg_msg radiobufcfgmsg; + /**< Radio specific buffer configurations. */ + } radiomsg; /**< Wi-Fi radio command message. */ +}; + +/** + * struct wifili_peer_wds_4addr_allow_msg + * Per-peer four address configuration message. + */ +struct nss_wifili_peer_wds_4addr_allow_msg { + uint32_t peer_id; /**< Peer ID. */ + uint32_t if_num; /**< Associate virtual interface number. */ + bool enable; /**< Boolean flag to enable/disable four address frames. */ +}; + +/** + * struct nss_wifili_mesh_capability_info + * Wi-Fi mesh capability flag. + */ +struct nss_wifili_mesh_capability_info { + bool mesh_enable; /**< Wi-Fi mesh capability flag. */ +}; + +/** + * nss_wifili_msg + * Structure that describes wifili messages. + */ +struct nss_wifili_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of wifili message. + */ + union { + struct nss_wifili_init_msg init; + /**< Wi-Fi initialization data. */ + struct nss_wifili_pdev_init_msg pdevmsg; + /**< Tx initialization data. */ + struct nss_wifili_pdev_deinit_msg pdevdeinit; + /**< Tx de-initialization data. */ + struct nss_wifili_peer_msg peermsg; + /**< Peer-specific data for the physical device. */ + struct nss_wifili_peer_freelist_append_msg peer_freelist_append; + /**< Information for creating a peer freelist. */ + struct nss_wifili_stats_sync_msg wlsoc_stats; + /**< Synchronization statistics. */ + struct nss_wifili_peer_stats_msg peer_stats; + /**< Wifili peer statistics. */ + struct nss_wifili_wds_peer_msg wdspeermsg; + /**< WDS peer-specific message. */ + struct nss_wifili_wds_peer_map_msg wdspeermapmsg; + /**< WDS peer-mapping specific message. */ + struct nss_wifili_wds_active_info_msg wdsinfomsg; + /**< WDS active information specific message. */ + struct nss_wifili_stats_cfg_msg scm; + /**< Wifili peer statistics configuration message. */ + struct nss_wifili_reo_tidq_msg reotidqmsg; + /**< Rx reorder TID queue setup message. */ + struct nss_wifili_radio_cfg_msg radiocfgmsg; + /**< Radio command message. */ + struct nss_wifili_wds_extn_peer_cfg_msg wpeercfg; + /**< WDS vendor configuration message. */ + struct nss_wifili_soc_linkdesc_buf_info_msg linkdescinfomsg; + /**< Link descriptor buffer address information. */ + struct nss_wifili_peer_security_type_msg securitymsg; + /**< Wifili peer security message. */ + struct nss_wifili_peer_nawds_enable_msg nawdsmsg; + /**< Wifili peer enable NAWDS message. */ + struct nss_wifili_dbdc_repeater_set_msg dbdcrptrmsg; + /**< Wifili DBDC repeater enable message. */ + struct nss_wifili_hmmc_dscp_override_set_msg shmmcdscpmsg; + /**< Wifili Hy-Fi managed multicast DSCP override set message. */ + struct nss_wifili_hmmc_dscp_tid_set_msg shmmcdcptidmsg; + /**< Wifili Hy-Fi managed multicast DSCP TID map set message. */ + struct nss_wifili_pdev_v3_tx_rx_stats_sync_msg v3_txrx_stats_msg; + /**< Wifili version 3 Tx and Rx statistics message. */ + struct nss_wifili_pdev_v3_delay_stats_sync_msg v3_delay_stats_msg; + /**< Wifili version 3 delay statistics message. */ + struct nss_wifili_enable_v3_stats_msg enablev3statsmsg; + /**< Wifili version 3 statistics enable message. */ + struct nss_wifili_sojourn_stats_msg sj_stats_msg; + /**< Wifili sojourn statistics message. */ + struct nss_wifili_peer_vlan_id_msg peervlan; + /**< Wifili peer VLAN ID message. */ + struct nss_wifili_update_pdev_lmac_id_msg update_pdev_lmac_id_msg; + /**< Wifili peer update lower MAC ID message. */ + struct nss_wifili_peer_ast_flowid_map_msg peer_ast_flowid_msg; + /**< Wifili peer AST index flow ID map message. */ + struct nss_wifili_mec_ageout_info_msg mecagemsg; + /**< Multicast echo check active information specific message. */ + struct nss_wifili_jitter_stats_msg jt_stats_msg; + /** HLOS messages for bridge + */ +static void nss_bridge_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app_data) +{ + struct nss_bridge_msg *nbm = (struct nss_bridge_msg *)ncm; + nss_bridge_msg_callback_t cb; + + BUG_ON(!nss_is_dynamic_interface(ncm->interface)); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_BRIDGE_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for bridge interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_bridge_msg)) { + nss_warning("%px: length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Trace Messages + */ + nss_bridge_log_rx_msg(nbm); + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Update the callback and app_data for NOTIFY messages, IPv4 sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->bridge_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->nss_top->bridge_ctx; + } + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_bridge_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, nbm); +} + +/* + * nss_bridge_get_context() + */ +struct nss_ctx_instance *nss_bridge_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.bridge_handler_id]; +} +EXPORT_SYMBOL(nss_bridge_get_context); + +/* + * nss_bridge_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_bridge_callback(void *app_data, struct nss_bridge_msg *nbm) +{ + nss_bridge_msg_callback_t callback = (nss_bridge_msg_callback_t)bridge_pvt.cb; + void *data = bridge_pvt.app_data; + + bridge_pvt.response = NSS_TX_SUCCESS; + bridge_pvt.cb = NULL; + bridge_pvt.app_data = NULL; + + if (nbm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("bridge error response %d\n", nbm->cm.response); + bridge_pvt.response = nbm->cm.response; + } + + if (callback) { + callback(data, nbm); + } + complete(&bridge_pvt.complete); +} + +/* + * nss_bridge_verify_if_num() + * Verify if_num passed to us. + */ +bool nss_bridge_verify_if_num(uint32_t if_num) +{ + if (nss_is_dynamic_interface(if_num) == false) { + return false; + } + + if (nss_dynamic_interface_get_type(nss_bridge_get_context(), if_num) != NSS_DYNAMIC_INTERFACE_TYPE_BRIDGE) { + return false; + } + + return true; +} +EXPORT_SYMBOL(nss_bridge_verify_if_num); + +/* + * nss_bridge_tx_msg() + * Transmit a bridge message to NSSFW + */ +nss_tx_status_t nss_bridge_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_bridge_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Sanity check the message + */ + if (!nss_is_dynamic_interface(ncm->interface)) { + nss_warning("%px: tx request for interface that is not a bridge: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_BRIDGE_MSG_TYPE_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Trace Messages + */ + nss_bridge_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_bridge_tx_msg); + +/* + * nss_bridge_tx_msg_sync() + * Transmit a bridge message to NSS firmware synchronously. + */ +nss_tx_status_t nss_bridge_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_bridge_msg *nbm) +{ + nss_tx_status_t status; + int ret = 0; + + down(&bridge_pvt.sem); + bridge_pvt.cb = (void *)nbm->cm.cb; + bridge_pvt.app_data = (void *)nbm->cm.app_data; + + nbm->cm.cb = (nss_ptr_t)nss_bridge_callback; + nbm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_bridge_tx_msg(nss_ctx, nbm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: bridge_tx_msg failed\n", nss_ctx); + up(&bridge_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&bridge_pvt.complete, msecs_to_jiffies(NSS_BRIDGE_TX_TIMEOUT)); + + if (!ret) { + nss_warning("%px: bridge msg tx failed due to timeout\n", nss_ctx); + bridge_pvt.response = NSS_TX_FAILURE; + } + + status = bridge_pvt.response; + up(&bridge_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_bridge_tx_msg_sync); + +/* + * nss_bridge_msg_init() + * Initialize nss_bridge_msg. + */ +void nss_bridge_msg_init(struct nss_bridge_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_bridge_msg_init); + +/* + * nss_bridge_tx_vsi_assign_msg + * API to send vsi assign message to NSS FW + */ +nss_tx_status_t nss_bridge_tx_vsi_assign_msg(uint32_t if_num, uint32_t vsi) +{ + struct nss_ctx_instance *nss_ctx = nss_bridge_get_context(); + struct nss_bridge_msg nbm; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_bridge_verify_if_num(if_num) == false) { + nss_warning("%px: invalid interface %d", nss_ctx, if_num); + return NSS_TX_FAILURE; + } + + nss_bridge_msg_init(&nbm, if_num, NSS_IF_VSI_ASSIGN, + sizeof(struct nss_if_vsi_assign), NULL, NULL); + + nbm.msg.if_msg.vsi_assign.vsi = vsi; + + return nss_bridge_tx_msg_sync(nss_ctx, &nbm); +} +EXPORT_SYMBOL(nss_bridge_tx_vsi_assign_msg); + +/* + * nss_bridge_tx_vsi_unassign_msg + * API to send vsi unassign message to NSS FW + */ +nss_tx_status_t nss_bridge_tx_vsi_unassign_msg(uint32_t if_num, uint32_t vsi) +{ + struct nss_ctx_instance *nss_ctx = nss_bridge_get_context(); + struct nss_bridge_msg nbm; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_bridge_verify_if_num(if_num) == false) { + nss_warning("%px: invalid interface %d", nss_ctx, if_num); + return NSS_TX_FAILURE; + } + + nss_bridge_msg_init(&nbm, if_num, NSS_IF_VSI_UNASSIGN, + sizeof(struct nss_if_vsi_unassign), NULL, NULL); + + nbm.msg.if_msg.vsi_unassign.vsi = vsi; + + return nss_bridge_tx_msg_sync(nss_ctx, &nbm); +} +EXPORT_SYMBOL(nss_bridge_tx_vsi_unassign_msg); + +/* + * nss_bridge_tx_change_mtu_msg + * API to send change mtu message to NSS FW + */ +nss_tx_status_t nss_bridge_tx_set_mtu_msg(uint32_t bridge_if_num, uint32_t mtu) +{ + struct nss_ctx_instance *nss_ctx = nss_bridge_get_context(); + struct nss_bridge_msg nbm; + struct nss_if_mtu_change *nimc; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_bridge_verify_if_num(bridge_if_num) == false) { + nss_warning("%px: received invalid interface %d", nss_ctx, bridge_if_num); + return NSS_TX_FAILURE; + } + + nss_bridge_msg_init(&nbm, bridge_if_num, NSS_IF_MTU_CHANGE, + sizeof(struct nss_if_mtu_change), NULL, NULL); + + nimc = &nbm.msg.if_msg.mtu_change; + nimc->min_buf_size = (uint16_t)mtu; + + return nss_bridge_tx_msg_sync(nss_ctx, &nbm); +} +EXPORT_SYMBOL(nss_bridge_tx_set_mtu_msg); + +/* + * nss_bridge_tx_set_mac_addr_msg + * API to send change mac addr message to NSS FW + */ +nss_tx_status_t nss_bridge_tx_set_mac_addr_msg(uint32_t bridge_if_num, uint8_t *addr) +{ + struct nss_ctx_instance *nss_ctx = nss_bridge_get_context(); + struct nss_bridge_msg nbm; + struct nss_if_mac_address_set *nmas; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_bridge_verify_if_num(bridge_if_num) == false) { + nss_warning("%px: received invalid interface %d", nss_ctx, bridge_if_num); + return NSS_TX_FAILURE; + } + + nss_bridge_msg_init(&nbm, bridge_if_num, NSS_IF_MAC_ADDR_SET, + sizeof(struct nss_if_mac_address_set), NULL, NULL); + + nmas = &nbm.msg.if_msg.mac_address_set; + memcpy(nmas->mac_addr, addr, ETH_ALEN); + return nss_bridge_tx_msg_sync(nss_ctx, &nbm); +} +EXPORT_SYMBOL(nss_bridge_tx_set_mac_addr_msg); + +/* + * nss_bridge_tx_join_msg + * API to send slave join message to NSS FW + */ +nss_tx_status_t nss_bridge_tx_join_msg(uint32_t bridge_if_num, struct net_device *netdev) +{ + struct nss_ctx_instance *nss_ctx = nss_bridge_get_context(); + struct nss_bridge_msg nbm; + uint32_t slave_if_num; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_bridge_verify_if_num(bridge_if_num) == false) { + nss_warning("%px: received invalid interface %d\n", nss_ctx, bridge_if_num); + return NSS_TX_FAILURE; + } + + slave_if_num = nss_cmn_get_interface_number_by_dev(netdev); + if (slave_if_num < 0) { + nss_warning("%px: invalid slave device %px\n", nss_ctx, netdev); + return NSS_TX_FAILURE; + } + + nbm.msg.br_join.if_num = slave_if_num; + nss_bridge_msg_init(&nbm, bridge_if_num, NSS_BRIDGE_MSG_JOIN, + sizeof(struct nss_bridge_join_msg), NULL, NULL); + + return nss_bridge_tx_msg_sync(nss_ctx, &nbm); +} +EXPORT_SYMBOL(nss_bridge_tx_join_msg); + +/* + * nss_bridge_tx_leave_msg + * API to send slave leave message to NSS FW + */ +nss_tx_status_t nss_bridge_tx_leave_msg(uint32_t bridge_if_num, struct net_device *netdev) +{ + struct nss_ctx_instance *nss_ctx = nss_bridge_get_context(); + struct nss_bridge_msg nbm; + uint32_t slave_if_num; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_bridge_verify_if_num(bridge_if_num) == false) { + nss_warning("%px: received invalid interface %d\n", nss_ctx, bridge_if_num); + return NSS_TX_FAILURE; + } + + slave_if_num = nss_cmn_get_interface_number_by_dev(netdev); + if (slave_if_num < 0) { + nss_warning("%px: invalid slave device %px\n", nss_ctx, netdev); + return NSS_TX_FAILURE; + } + + nbm.msg.br_leave.if_num = slave_if_num; + nss_bridge_msg_init(&nbm, bridge_if_num, NSS_BRIDGE_MSG_LEAVE, + sizeof(struct nss_bridge_leave_msg), NULL, NULL); + + return nss_bridge_tx_msg_sync(nss_ctx, &nbm); +} +EXPORT_SYMBOL(nss_bridge_tx_leave_msg); + +/* + * nss_bridge_tx_set_fdb_learn_msg + * API to send FDB learn message to NSS FW + */ +nss_tx_status_t nss_bridge_tx_set_fdb_learn_msg(uint32_t bridge_if_num, enum nss_bridge_fdb_learn_mode fdb_learn) +{ + struct nss_ctx_instance *nss_ctx = nss_bridge_get_context(); + struct nss_bridge_msg nbm; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_bridge_verify_if_num(bridge_if_num) == false) { + nss_warning("%px: received invalid interface %d\n", nss_ctx, bridge_if_num); + return NSS_TX_FAILURE; + } + + if (fdb_learn >= NSS_BRIDGE_FDB_LEARN_MODE_MAX) { + nss_warning("%px: received invalid fdb learn mode %d\n", nss_ctx, fdb_learn); + return NSS_TX_FAILURE; + } + + nss_bridge_msg_init(&nbm, bridge_if_num, NSS_BRIDGE_MSG_SET_FDB_LEARN, + sizeof(struct nss_bridge_set_fdb_learn_msg), NULL, NULL); + + nbm.msg.fdb_learn.mode = fdb_learn; + + return nss_bridge_tx_msg_sync(nss_ctx, &nbm); +} +EXPORT_SYMBOL(nss_bridge_tx_set_fdb_learn_msg); + +/* + * nss_bridge_init() + */ +void nss_bridge_init(void) +{ + sema_init(&bridge_pvt.sem, 1); + init_completion(&bridge_pvt.complete); +} + +/* + * nss_bridge_unregister() + */ +void nss_bridge_unregister(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_bridge_get_context(); + + nss_assert(nss_bridge_verify_if_num(if_num)); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_top_main.bridge_callback = NULL; + + nss_core_unregister_handler(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_bridge_unregister); + +/* + * nss_bridge_register() + */ +struct nss_ctx_instance *nss_bridge_register(uint32_t if_num, struct net_device *netdev, + nss_bridge_callback_t bridge_data_cb, + nss_bridge_msg_callback_t bridge_msg_cb, + uint32_t features, + void *app_data) +{ + struct nss_ctx_instance *nss_ctx = nss_bridge_get_context(); + + nss_assert(nss_bridge_verify_if_num(if_num)); + + nss_core_register_subsys_dp(nss_ctx, if_num, bridge_data_cb, NULL, app_data, netdev, features); + + nss_top_main.bridge_callback = bridge_msg_cb; + + nss_core_register_handler(nss_ctx, if_num, nss_bridge_handler, app_data); + return nss_ctx; +} +EXPORT_SYMBOL(nss_bridge_register); + +/* + * nss_bridge_notify_register() + * Register to receive bridge notify messages. + */ +struct nss_ctx_instance *nss_bridge_notify_register(nss_bridge_msg_callback_t cb, void *app_data) +{ + nss_top_main.bridge_callback = cb; + nss_top_main.bridge_ctx = app_data; + return nss_bridge_get_context(); +} +EXPORT_SYMBOL(nss_bridge_notify_register); + +/* + * nss_bridge_notify_unregister() + * Unregister to receive bridge notify messages. + */ +void nss_bridge_notify_unregister(void) +{ + nss_top_main.bridge_callback = NULL; +} +EXPORT_SYMBOL(nss_bridge_notify_unregister); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_bridge_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_bridge_log.c new file mode 100644 index 000000000..3b0cf1e39 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_bridge_log.c @@ -0,0 +1,135 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_bridge_log.c + * NSS Bridge logger file. + */ + +#include "nss_core.h" + +/* + * nss_bridge_log_message_types_str + * NSS Bridge message strings + */ +static int8_t *nss_bridge_log_message_types_str[NSS_BRIDGE_MSG_TYPE_MAX] __maybe_unused = { + "Bridge Join message", + "Bridge Leave message", + "Bridge Set FDB Learn message" +}; + +/* + * nss_bridge_join_msg() + * Log NSS Bridge Join message. + */ +static void nss_bridge_join_msg(struct nss_bridge_msg *nbm) +{ + struct nss_bridge_join_msg *nbjm __maybe_unused = &nbm->msg.br_join; + nss_trace("%px: NSS Bridge Join message \n" + "Interface Number: %d\n", + nbm, nbjm->if_num); +} + +/* + * nss_bridge_leave_msg() + * Log NSS Bridge Leave message. + */ +static void nss_bridge_leave_msg(struct nss_bridge_msg *nbm) +{ + struct nss_bridge_leave_msg *nblm __maybe_unused = &nbm->msg.br_leave; + nss_trace("%px: NSS Bridge Leave message: \n" + "Interface Number: %d\n", + nbm, nblm->if_num); +} + +/* + * nss_bridge_fdb_learn_msg() + * Log NSS Set Bridge FDB Learn message. + */ +static void nss_bridge_fdb_learn_msg(struct nss_bridge_msg *nbm) +{ + struct nss_bridge_set_fdb_learn_msg *nbflm __maybe_unused = + &nbm->msg.fdb_learn; + nss_trace("%px: NSS Bridge Set FDB Learn message: \n" + "Mode: %d\n", + nbm, nbflm->mode); +} + +/* + * nss_bridge_log_verbose() + * Log message contents. + */ +static void nss_bridge_log_verbose(struct nss_bridge_msg *nbm) +{ + switch (nbm->cm.type) { + case NSS_BRIDGE_MSG_JOIN: + nss_bridge_join_msg(nbm); + break; + + case NSS_BRIDGE_MSG_LEAVE: + nss_bridge_leave_msg(nbm); + break; + + case NSS_BRIDGE_MSG_SET_FDB_LEARN: + nss_bridge_fdb_learn_msg(nbm); + break; + + default: + nss_trace("%px: Invalid message type\n", nbm); + break; + } +} + +/* + * nss_bridge_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_bridge_log_tx_msg(struct nss_bridge_msg *nbm) +{ + if (nbm->cm.type >= NSS_BRIDGE_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", nbm); + return; + } + + nss_info("%px: type[%d]:%s\n", nbm, nbm->cm.type, nss_bridge_log_message_types_str[nbm->cm.type - NSS_IF_MAX_MSG_TYPES - 1]); + nss_bridge_log_verbose(nbm); +} + +/* + * nss_bridge_log_rx_msg() + * Log messages received from FW. + */ +void nss_bridge_log_rx_msg(struct nss_bridge_msg *nbm) +{ + if (nbm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nbm); + return; + } + + if (nbm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nbm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nbm, nbm->cm.type, + nss_bridge_log_message_types_str[nbm->cm.type - NSS_IF_MAX_MSG_TYPES - 1], + nbm->cm.response, nss_cmn_response_str[nbm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + nbm, nbm->cm.type, nss_bridge_log_message_types_str[nbm->cm.type - NSS_IF_MAX_MSG_TYPES - 1], + nbm->cm.response, nss_cmn_response_str[nbm->cm.response]); + +verbose: + nss_bridge_log_verbose(nbm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_bridge_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_bridge_log.h new file mode 100644 index 000000000..af9a5f787 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_bridge_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_BRIDGE_LOG_H +#define __NSS_BRIDGE_LOG_H + +/* + * nss_bridge.h + * NSS Bridge header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_bridge_log_tx_msg + * Logs a bridge message that is sent to the NSS firmware. + */ +void nss_bridge_log_tx_msg(struct nss_bridge_msg *nbm); + +/* + * nss_bridge_log_rx_msg + * Logs a bridge message that is received from the NSS firmware. + */ +void nss_bridge_log_rx_msg(struct nss_bridge_msg *nbm); + +#endif /* __NSS_BRIDGE_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx.c b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx.c new file mode 100644 index 000000000..4a1d5f8c2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx.c @@ -0,0 +1,113 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_c2c_rx.c + * NSS C2C_RX APIs + */ + +#include +#include "nss_c2c_rx_stats.h" +#include "nss_c2c_rx_strings.h" + +/* + * nss_c2c_rx_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_c2c_rx_verify_if_num(uint32_t if_num) +{ + return if_num == NSS_C2C_RX_INTERFACE; +} + +/* + * nss_c2c_rx_interface_handler() + * Handle NSS -> HLOS messages for C2C_RX Statistics + */ +static void nss_c2c_rx_interface_handler(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_c2c_rx_msg *ncrm = (struct nss_c2c_rx_msg *)ncm; + nss_c2c_rx_msg_callback_t cb; + + if (!nss_c2c_rx_verify_if_num(ncm->interface)) { + nss_warning("%px: invalid interface %d for c2c_tx\n", nss_ctx, ncm->interface); + return; + } + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_C2C_RX_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for c2c_rx", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_c2c_rx_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + switch (ncrm->cm.type) { + case NSS_C2C_RX_MSG_TYPE_STATS: + /* + * Update driver statistics and send statistics notifications to the registered modules. + */ + nss_c2c_rx_stats_sync(nss_ctx, &ncrm->msg.stats); + nss_c2c_rx_stats_notify(nss_ctx); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages + * TODO: Add notify callbacks for c2c_rx + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + return; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_c2c_rx_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, ncrm); +} + +/* + * nss_c2c_rx_register_handler() + * Register handler for messaging + */ +void nss_c2c_rx_register_handler(struct nss_ctx_instance *nss_ctx) +{ + nss_core_register_handler(nss_ctx, NSS_C2C_RX_INTERFACE, nss_c2c_rx_interface_handler, NULL); + + if (nss_ctx->id == NSS_CORE_0) { + nss_c2c_rx_stats_dentry_create(); + } + nss_c2c_rx_strings_dentry_create(); +} +EXPORT_SYMBOL(nss_c2c_rx_register_handler); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_stats.c new file mode 100644 index 000000000..d9ea31656 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_stats.c @@ -0,0 +1,173 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_c2c_rx_stats.h" +#include "nss_c2c_rx_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_c2c_rx_stats_notifier); + +/* + * Spinlock to protect C2C_RX statistics update/read + */ +DEFINE_SPINLOCK(nss_c2c_rx_stats_lock); + +/* + * nss_c2c_rx_stats + * c2c_rx statistics + */ +uint64_t nss_c2c_rx_stats[NSS_MAX_CORES][NSS_C2C_RX_STATS_MAX]; + +/* + * nss_c2c_rx_stats_read() + * Read C2C_RX statistics + */ +static ssize_t nss_c2c_rx_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i, core; + + /* + * Max output lines = #stats * NSS_MAX_CORES + + * few blank lines for banner printing + Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = NSS_C2C_RX_STATS_MAX * NSS_MAX_CORES + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return -ENOMEM; + } + + stats_shadow = kzalloc(NSS_C2C_RX_STATS_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return -ENOMEM; + } + + /* + * C2C_RX statistics + */ + for (core = 0; core < NSS_MAX_CORES; core++) { + spin_lock_bh(&nss_c2c_rx_stats_lock); + for (i = 0; i < NSS_C2C_RX_STATS_MAX; i++) { + stats_shadow[i] = nss_c2c_rx_stats[core][i]; + } + spin_unlock_bh(&nss_c2c_rx_stats_lock); + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "c2c_rx", core); + size_wr += nss_stats_print("c2c_rx", NULL, NSS_STATS_SINGLE_INSTANCE + , nss_c2c_rx_strings_stats + , stats_shadow + , NSS_C2C_RX_STATS_MAX + , lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_c2c_rx_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(c2c_rx); + +/* + * nss_c2c_rx_stats_dentry_create() + * Create C2C_RX statistics debug entry. + */ +void nss_c2c_rx_stats_dentry_create(void) +{ + nss_stats_create_dentry("c2c_rx", &nss_c2c_rx_stats_ops); +} + +/* + * nss_c2c_rx_stats_sync() + * Handle the syncing of NSS C2C_RX statistics. + */ +void nss_c2c_rx_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_c2c_rx_stats *ncrs) +{ + int id = nss_ctx->id; + int j; + + spin_lock_bh(&nss_c2c_rx_stats_lock); + + /* + * Common node stats + */ + nss_c2c_rx_stats[id][NSS_STATS_NODE_RX_PKTS] += (ncrs->pbuf_simple + ncrs->pbuf_sg + ncrs->pbuf_returning); + nss_c2c_rx_stats[id][NSS_STATS_NODE_RX_BYTES] += ncrs->node_stats.rx_bytes; + nss_c2c_rx_stats[id][NSS_STATS_NODE_TX_PKTS] += ncrs->node_stats.tx_packets; + nss_c2c_rx_stats[id][NSS_STATS_NODE_TX_BYTES] += ncrs->node_stats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_c2c_rx_stats[id][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + j] += ncrs->node_stats.rx_dropped[j]; + } + + /* + * C2C_RX statistics + */ + nss_c2c_rx_stats[id][NSS_C2C_RX_STATS_PBUF_SIMPLE] += ncrs->pbuf_simple; + nss_c2c_rx_stats[id][NSS_C2C_RX_STATS_PBUF_SG] += ncrs->pbuf_sg; + nss_c2c_rx_stats[id][NSS_C2C_RX_STATS_PBUF_RETURNING] += ncrs->pbuf_returning; + nss_c2c_rx_stats[id][NSS_C2C_RX_STATS_INVAL_DEST] += ncrs->inval_dest; + + spin_unlock_bh(&nss_c2c_rx_stats_lock); +} + +/* + * nss_c2c_rx_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_c2c_rx_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_c2c_rx_stats_notification c2c_rx_stats; + + c2c_rx_stats.core_id = nss_ctx->id; + memcpy(c2c_rx_stats.stats, nss_c2c_rx_stats[c2c_rx_stats.core_id], sizeof(c2c_rx_stats.stats)); + atomic_notifier_call_chain(&nss_c2c_rx_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&c2c_rx_stats); +} + +/* + * nss_c2c_rx_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_c2c_rx_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_c2c_rx_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_c2c_rx_stats_register_notifier); + +/* + * nss_c2c_rx_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_c2c_rx_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_c2c_rx_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_c2c_rx_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_stats.h new file mode 100644 index 000000000..c53d08071 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_stats.h @@ -0,0 +1,63 @@ +/* + ****************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_C2C_RX_STATS_H +#define __NSS_C2C_RX_STATS_H + +#include + +/* + * c2c_rx_msg_type + * Message types supported + */ +enum c2c_rx_msg_type { + NSS_C2C_RX_MSG_TYPE_STATS, /* Statistics synchronization */ + NSS_C2C_RX_MSG_TYPE_MAX +}; + +/* + * nss_c2c_rx_stats + * The NSS c2c_rx node stats structure. + */ +struct nss_c2c_rx_stats { + struct nss_cmn_node_stats node_stats; + /* Common node stats for core-to-core reception. */ + uint32_t pbuf_simple; /* Number of received simple pbuf. */ + uint32_t pbuf_sg; /* Number of S/G pbuf received. */ + uint32_t pbuf_returning; /* Number of returning S/G pbuf. */ + uint32_t inval_dest; /* Number of pbuf enqueue failure because of dest is invalid. */ +}; + +/* + * nss_c2c_rx_msg + * Message structure to send/receive c2c_rx commands + */ +struct nss_c2c_rx_msg { + struct nss_cmn_msg cm; /* Message Header */ + union { + struct nss_c2c_rx_stats stats; /* c2c_rx statistics */ + } msg; +}; + +/* + * C2C_RX statistics APIs + */ +extern void nss_c2c_rx_stats_notify(struct nss_ctx_instance *nss_ctx); +typedef void (*nss_c2c_rx_msg_callback_t)(void *app_data, struct nss_c2c_rx_msg *msg); +extern void nss_c2c_rx_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_c2c_rx_stats *ncrs); +extern void nss_c2c_rx_stats_dentry_create(void); + +#endif /* __NSS_C2C_RX_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_strings.c new file mode 100644 index 000000000..c20754d12 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_strings.c @@ -0,0 +1,61 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" + +/* + * nss_c2c_rx_strings_stats + * Core-to-core Rx statistics strings. + */ +struct nss_stats_info nss_c2c_rx_strings_stats[NSS_C2C_RX_STATS_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"rx_byts" , NSS_STATS_TYPE_COMMON}, + {"tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"tx_byts" , NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops" , NSS_STATS_TYPE_DROP}, + {"pbuf_simple" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_sg" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_returning" , NSS_STATS_TYPE_SPECIAL}, + {"inval_dest" , NSS_STATS_TYPE_DROP} +}; + +/* + * nss_c2c_rx_strings_read() + * Read C2C Rx node statistics names. + */ +static ssize_t nss_c2c_rx_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_c2c_rx_strings_stats, NSS_C2C_RX_STATS_MAX); +} + +/* + * nss_c2c_rx_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(c2c_rx); + +/* + * nss_c2c_rx_strings_dentry_create() + * Create C2C Rx statistics strings debug entry. + */ +void nss_c2c_rx_strings_dentry_create(void) +{ + nss_strings_create_dentry("c2c_rx", &nss_c2c_rx_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_strings.h new file mode 100644 index 000000000..3810f11fa --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_rx_strings.h @@ -0,0 +1,23 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_C2C_RX_STRINGS_H +#define __NSS_C2C_RX_STRINGS_H + +extern struct nss_stats_info nss_c2c_rx_strings_stats[NSS_C2C_RX_STATS_MAX]; +extern void nss_c2c_rx_strings_dentry_create(void); + +#endif /* __NSS_C2C_RX_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx.c b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx.c new file mode 100644 index 000000000..244f4598a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx.c @@ -0,0 +1,439 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_c2c_tx.c + * NSS C2C_TX APIs + */ + +#include +#include "nss_c2c_tx_stats.h" +#include "nss_c2c_tx_log.h" +#include "nss_c2c_tx_strings.h" + +int nss_c2c_tx_test_id = -1; + +/* + * Private data structure. + */ +struct nss_c2c_tx_pvt { + struct semaphore sem; /* Semaphore structure. */ + struct completion complete; /* Completion structure. */ + int response; /* Response from FW. */ + void *cb; /* Original cb for sync msgs. */ + void *app_data; /* Original app_data for sync msgs. */ +}; + +/* + * Notify data structure + */ +struct nss_c2c_tx_notify_data { + nss_c2c_tx_msg_callback_t c2c_tx_callback; + void *app_data; +}; + +static struct nss_c2c_tx_notify_data nss_c2c_tx_notify[NSS_CORE_MAX]; +static struct nss_c2c_tx_pvt nss_c2c_tx_cfg_pvt; + +/* + * nss_c2c_tx_verify_if_num() + * Verify if_num passed to us. + */ +static inline bool nss_c2c_tx_verify_if_num(uint32_t if_num) +{ + return if_num == NSS_C2C_TX_INTERFACE; +} + +/* + * nss_c2c_tx_interface_handler() + * Handle NSS -> HLOS messages for C2C_TX Statistics + */ +static void nss_c2c_tx_msg_handler(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_c2c_tx_msg *nctm = (struct nss_c2c_tx_msg *)ncm; + nss_c2c_tx_msg_callback_t cb; + + if (!nss_c2c_tx_verify_if_num(ncm->interface)) { + nss_warning("%px: invalid interface %d for c2c_tx\n", nss_ctx, ncm->interface); + return; + } + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_C2C_TX_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for c2c_tx", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_c2c_tx_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Trace messages. + */ + nss_c2c_tx_log_rx_msg(nctm); + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + switch (nctm->cm.type) { + case NSS_C2C_TX_MSG_TYPE_TX_MAP: + case NSS_C2C_TX_MSG_TYPE_PERFORMANCE_TEST: + break; + + case NSS_C2C_TX_MSG_TYPE_STATS: + /* + * Update driver statistics and send statistics notifications to the registered modules. + */ + nss_c2c_tx_stats_sync(nss_ctx, &nctm->msg.stats); + nss_c2c_tx_stats_notify(nss_ctx); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_c2c_tx_notify[nss_ctx->id].c2c_tx_callback; + ncm->app_data = (nss_ptr_t)nss_c2c_tx_notify[nss_ctx->id].app_data; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_c2c_tx_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, nctm); +} + +/* + * nss_c2c_tx_register_handler() + * Register handler for messaging + */ +void nss_c2c_tx_register_handler(struct nss_ctx_instance *nss_ctx) +{ + nss_info("%px: nss_c2c_tx_register_handler", nss_ctx); + nss_core_register_handler(nss_ctx, NSS_C2C_TX_INTERFACE, nss_c2c_tx_msg_handler, NULL); + + if (nss_ctx->id == NSS_CORE_0) { + nss_c2c_tx_stats_dentry_create(); + } + nss_c2c_tx_strings_dentry_create(); +} +EXPORT_SYMBOL(nss_c2c_tx_register_handler); + +/* + * nss_c2c_tx_tx_msg() + * Transmit an c2c_tx message to the FW with a specified size. + */ +nss_tx_status_t nss_c2c_tx_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_c2c_tx_msg *nctm) +{ + struct nss_cmn_msg *ncm = &nctm->cm; + + /* + * Sanity check the message + */ + if (!nss_c2c_tx_verify_if_num(ncm->interface)) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_C2C_TX_MSG_TYPE_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Trace messages. + */ + nss_c2c_tx_log_tx_msg(nctm); + + return nss_core_send_cmd(nss_ctx, nctm, sizeof(*nctm), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_c2c_tx_tx_msg); + +/* + * nss_c2c_tx_msg_cfg_map_callback() + * Callback function for tx_map configuration + */ +static void nss_c2c_tx_msg_cfg_map_callback(void *app_data, struct nss_c2c_tx_msg *nctm) +{ + struct nss_ctx_instance *nss_ctx __attribute__((unused)) = (struct nss_ctx_instance *)app_data; + if (nctm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: nss c2c_tx_map configuration failed: %d for NSS core %d\n", + nss_ctx, nctm->cm.error, nss_ctx->id); + } + + nss_info("%px: nss c2c_tx_map configuration succeeded for NSS core %d\n", + nss_ctx, nss_ctx->id); +} + +/* + * nss_c2c_tx_msg_performance_test_start_callback() + * Callback function for c2c_tx test start configuration + */ +static void nss_c2c_tx_msg_performance_test_callback(void *app_data, struct nss_c2c_tx_msg *nctm) +{ + struct nss_ctx_instance *nss_ctx __attribute__((unused)) = (struct nss_ctx_instance *)app_data; + + /* + * Test start has been failed. Restore the value to initial state. + */ + if (nctm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: nss c2c_tx test start failed: %d for NSS core %d\n", + nss_ctx, nctm->cm.error, nss_ctx->id); + nss_c2c_tx_test_id = -1; + return; + } + + nss_info("%px: nss c2c_tx test successfully initialized for NSS core %d\n", + nss_ctx, nss_ctx->id); +} + +/* + * nss_c2c_tx_msg_cfg_map() + * Send NSS to c2c_map + */ +nss_tx_status_t nss_c2c_tx_msg_cfg_map(struct nss_ctx_instance *nss_ctx, uint32_t tx_map, uint32_t c2c_intr_addr) +{ + int32_t status; + struct nss_c2c_tx_msg nctm; + struct nss_c2c_tx_map *cfg_map; + + nss_info("%px: C2C map:%x\n", nss_ctx, tx_map); + nss_c2c_tx_msg_init(&nctm, NSS_C2C_TX_INTERFACE, NSS_C2C_TX_MSG_TYPE_TX_MAP, + sizeof(struct nss_c2c_tx_map), nss_c2c_tx_msg_cfg_map_callback, (void *)nss_ctx); + + cfg_map = &nctm.msg.map; + cfg_map->tx_map = tx_map; + cfg_map->c2c_intr_addr = c2c_intr_addr; + + status = nss_c2c_tx_tx_msg(nss_ctx, &nctm); + if (unlikely(status != NSS_TX_SUCCESS)) { + return status; + } + + return NSS_TX_SUCCESS; +} + +/* + * nss_c2c_tx_msg_performance_test() + * Send NSS c2c peformance test start message. + */ +nss_tx_status_t nss_c2c_tx_msg_performance_test(struct nss_ctx_instance *nss_ctx, uint32_t test_id) +{ + int32_t status; + struct nss_c2c_tx_msg nctm; + struct nss_c2c_tx_test *test; + + nss_info("%px: C2C test message:%x\n", nss_ctx, test_id); + nss_c2c_tx_msg_init(&nctm, NSS_C2C_TX_INTERFACE, NSS_C2C_TX_MSG_TYPE_PERFORMANCE_TEST, + sizeof(struct nss_c2c_tx_test), nss_c2c_tx_msg_performance_test_callback, (void *)nss_ctx); + + test = &nctm.msg.test; + test->test_id = test_id; + + status = nss_c2c_tx_tx_msg(nss_ctx, &nctm); + if (unlikely(status != NSS_TX_SUCCESS)) { + return status; + } + + return NSS_TX_SUCCESS; +} + +/* + * nss_c2c_tx_msg_init() + * Initialize C2C_TX message. + */ +void nss_c2c_tx_msg_init(struct nss_c2c_tx_msg *nctm, uint16_t if_num, uint32_t type, uint32_t len, + nss_c2c_tx_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&nctm->cm, if_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_c2c_tx_msg_init); + +/* + * nss_c2c_tx_performance_test_handler() + * Handles the performance test. + */ +static int nss_c2c_tx_performance_test_handler(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[0]; + int ret, ret_c2c_tx, current_state; + current_state = nss_c2c_tx_test_id; + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (ret != NSS_SUCCESS) { + return ret; + } + + if (!write) { + return ret; + } + + if (current_state != -1) { + nss_warning("%px: Another test is running.\n", nss_ctx); + return -EINVAL; + } + + if (nss_c2c_tx_test_id >= NSS_C2C_TX_TEST_TYPE_MAX || nss_c2c_tx_test_id <= 0) { + nss_warning("%px: Invalid test ID.\n", nss_ctx); + nss_c2c_tx_test_id = current_state; + return -EINVAL; + } + + nss_info("Starting the c2c_tx performance test\n"); + ret_c2c_tx = nss_c2c_tx_msg_performance_test(nss_ctx, nss_c2c_tx_test_id); + + if (ret_c2c_tx != NSS_SUCCESS) { + nss_warning("%px: Starting the test has failed.\n", nss_ctx); + nss_c2c_tx_test_id = -1; + } + + return ret_c2c_tx; +} + +static struct ctl_table nss_c2c_tx_table[] = { + { + .procname = "test_code", + .data = &nss_c2c_tx_test_id, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_c2c_tx_performance_test_handler, + }, + { } +}; + +static struct ctl_table nss_c2c_tx_dir[] = { + { + .procname = "c2c_tx", + .mode = 0555, + .child = nss_c2c_tx_table, + }, + { } +}; + +static struct ctl_table nss_c2c_tx_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_c2c_tx_dir, + }, + { } +}; + +static struct ctl_table nss_c2c_tx_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = nss_c2c_tx_root_dir, + }, + { } +}; + +static struct ctl_table_header *nss_c2c_tx_header; + +/* + * nss_c2c_tx_register_sysctl() + */ +void nss_c2c_tx_register_sysctl(void) +{ + + /* + * c2c_tx sema init. + */ + sema_init(&nss_c2c_tx_cfg_pvt.sem, 1); + init_completion(&nss_c2c_tx_cfg_pvt.complete); + + /* + * Register sysctl table. + */ + nss_c2c_tx_header = register_sysctl_table(nss_c2c_tx_root); +} + +/* + * nss_c2c_tx_unregister_sysctl() + * Unregister sysctl specific to c2c_tx + */ +void nss_c2c_tx_unregister_sysctl(void) +{ + /* + * Unregister sysctl table. + */ + if (nss_c2c_tx_header) { + unregister_sysctl_table(nss_c2c_tx_header); + } +} + +/* + * nss_c2c_tx_notify_register() + * Register to receive c2c_tx notify messages. + */ +struct nss_ctx_instance *nss_c2c_tx_notify_register(int core, nss_c2c_tx_msg_callback_t cb, void *app_data) +{ + if (core >= NSS_CORE_MAX) { + nss_warning("Input core number %d is wrong\n", core); + return NULL; + } + + nss_c2c_tx_notify[core].c2c_tx_callback = cb; + nss_c2c_tx_notify[core].app_data = app_data; + + return (struct nss_ctx_instance *)&nss_top_main.nss[core]; +} +EXPORT_SYMBOL(nss_c2c_tx_notify_register); + +/* + * nss_c2c_tx_notify_unregister() + * Unregister to receive c2c_tx notify messages. + */ +void nss_c2c_tx_notify_unregister(int core) +{ + if (core >= NSS_CORE_MAX) { + nss_warning("Input core number %d is wrong\n", core); + return; + } + + nss_c2c_tx_notify[core].c2c_tx_callback = NULL; + nss_c2c_tx_notify[core].app_data = NULL; +} +EXPORT_SYMBOL(nss_c2c_tx_notify_unregister); + +/* + * nss_c2c_tx_init() + */ +void nss_c2c_tx_init(void) +{ + int core; + + for (core = 0; core < NSS_CORE_MAX; core++) { + nss_c2c_tx_notify_register(core, NULL, NULL); + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_log.c new file mode 100644 index 000000000..088cef353 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_log.c @@ -0,0 +1,121 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_c2c_tx_log.c + * NSS C2C TX logger file. + */ + +#include "nss_core.h" + +/* + * nss_c2c_tx_log_message_types_str + * C2C TX message strings + */ +static int8_t *nss_c2c_tx_log_message_types_str[NSS_C2C_TX_MSG_TYPE_MAX] __maybe_unused = { + "C2C TX Stats message", + "C2C TX Map Message", +}; + +/* + * nss_c2c_tx_log_error_response_types_str + * Strings for error types for c2c_tx messages + */ +static int8_t *nss_c2c_tx_log_error_response_types_str[NSS_C2C_TX_MSG_ERROR_MAX] __maybe_unused = { + "No error", + "Invalid Operation" +}; + +/* + * nss_c2c_tx_map_msg()() + * Log NSS C2C TX Map message. + */ +static void nss_c2c_tx_map_msg(struct nss_c2c_tx_msg *nctm) +{ + struct nss_c2c_tx_map *nctmm __maybe_unused = &nctm->msg.map; + nss_trace("%px: NSS C2C TX Map message: \n" + "C2C Receiver Queue Start Address: %d\n" + "C2C Interrupt Register Address: %d\n", + nctm, + nctmm->tx_map, nctmm->c2c_intr_addr); +} + +/* + * nss_c2c_tx_log_verbose() + * Log message contents. + */ +static void nss_c2c_tx_log_verbose(struct nss_c2c_tx_msg *nctm) +{ + switch (nctm->cm.type) { + case NSS_C2C_TX_MSG_TYPE_TX_MAP: + nss_c2c_tx_map_msg(nctm); + break; + + default: + nss_trace("%px: Invalid message type\n", nctm); + break; + } +} + +/* + * nss_c2c_tx_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_c2c_tx_log_tx_msg(struct nss_c2c_tx_msg *nctm) +{ + if (nctm->cm.type >= NSS_C2C_TX_MSG_TYPE_MAX) { + nss_info("%px: Invalid message type\n", nctm); + return; + } + + nss_info("%px: type[%d]:%s\n", nctm, nctm->cm.type, nss_c2c_tx_log_message_types_str[nctm->cm.type]); + nss_c2c_tx_log_verbose(nctm); +} + +/* + * nss_c2c_tx_log_rx_msg() + * Log messages received from FW. + */ +void nss_c2c_tx_log_rx_msg(struct nss_c2c_tx_msg *nctm) +{ + if (nctm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nctm); + return; + } + + if (nctm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nctm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nctm, nctm->cm.type, + nss_c2c_tx_log_message_types_str[nctm->cm.type], + nctm->cm.response, nss_cmn_response_str[nctm->cm.response]); + goto verbose; + } + + if (nctm->cm.error >= NSS_C2C_TX_MSG_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nctm, nctm->cm.type, nss_c2c_tx_log_message_types_str[nctm->cm.type], + nctm->cm.response, nss_cmn_response_str[nctm->cm.response], + nctm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nctm, nctm->cm.type, nss_c2c_tx_log_message_types_str[nctm->cm.type], + nctm->cm.response, nss_cmn_response_str[nctm->cm.response], + nctm->cm.error, nss_c2c_tx_log_error_response_types_str[nctm->cm.error]); + +verbose: + nss_c2c_tx_log_verbose(nctm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_log.h new file mode 100644 index 000000000..e6ec47e77 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_log.h @@ -0,0 +1,36 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_C2C_TX_LOG_H +#define __NSS_C2C_TX_LOG_H +/* + * nss_c2c_tx_log.h + * NSS C2C TX Log Header File + */ + +/* + * nss_c2c_tx_log_tx_msg + * Logs an C2C TX message that is sent to the NSS firmware. + */ +void nss_c2c_tx_log_tx_msg(struct nss_c2c_tx_msg *nctm); + +/* + * nss_c2c_tx_log_rx_msg + * Logs an IPv4 message that is received from the NSS firmware. + */ +void nss_c2c_tx_log_rx_msg(struct nss_c2c_tx_msg *nctm); + +#endif /* __NSS_C2C_TX_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_stats.c new file mode 100644 index 000000000..7983f3f8c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_stats.c @@ -0,0 +1,168 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_c2c_tx_stats.h" +#include "nss_c2c_tx_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_c2c_tx_stats_notifier); + +/* + * Spinlock to protect C2C_TX statistics update/read + */ +DEFINE_SPINLOCK(nss_c2c_tx_stats_lock); + +/* + * nss_c2c_tx_stats + * c2c_tx statistics + */ +uint64_t nss_c2c_tx_stats[NSS_MAX_CORES][NSS_C2C_TX_STATS_MAX]; + +/* + * nss_c2c_tx_stats_read() + * Read c2c_tx statistics + */ +static ssize_t nss_c2c_tx_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i, core; + + /* + * Max output lines = #stats * NSS_MAX_CORES + + * few blank lines for banner printing + Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = NSS_C2C_TX_STATS_MAX * NSS_MAX_CORES + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return -ENOMEM; + } + + stats_shadow = kzalloc(NSS_C2C_TX_STATS_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return -ENOMEM; + } + + /* + * C2C_TX statistics + */ + for (core = 0; core < NSS_MAX_CORES; core++) { + spin_lock_bh(&nss_c2c_tx_stats_lock); + for (i = 0; i < NSS_C2C_TX_STATS_MAX; i++) { + stats_shadow[i] = nss_c2c_tx_stats[core][i]; + } + spin_unlock_bh(&nss_c2c_tx_stats_lock); + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "c2c_tx", core); + size_wr += nss_stats_print("c2c_tx", NULL, NSS_STATS_SINGLE_INSTANCE, nss_c2c_tx_strings_stats, stats_shadow, NSS_C2C_TX_STATS_MAX, lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_c2c_tx_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(c2c_tx); + +/* + * nss_c2c_tx_stats_dentry_create() + * Create c2c_tx statistics debug entry. + */ +void nss_c2c_tx_stats_dentry_create(void) +{ + nss_stats_create_dentry("c2c_tx", &nss_c2c_tx_stats_ops); +} + +/* + * nss_c2c_tx_stats_sync() + * Handle the syncing of NSS C2C_TX statistics. + */ +void nss_c2c_tx_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_c2c_tx_stats *nct) +{ + int id = nss_ctx->id; + int j; + + spin_lock_bh(&nss_c2c_tx_stats_lock); + + /* + * Common node stats + */ + nss_c2c_tx_stats[id][NSS_STATS_NODE_RX_PKTS] += (nct->pbuf_simple + nct->pbuf_sg + nct->pbuf_returning); + nss_c2c_tx_stats[id][NSS_STATS_NODE_RX_BYTES] += nct->node_stats.rx_bytes; + nss_c2c_tx_stats[id][NSS_STATS_NODE_TX_PKTS] += nct->node_stats.tx_packets; + nss_c2c_tx_stats[id][NSS_STATS_NODE_TX_BYTES] += nct->node_stats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_c2c_tx_stats[id][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + j] += nct->node_stats.rx_dropped[j]; + } + + /* + * C2C_TX statistics + */ + nss_c2c_tx_stats[id][NSS_C2C_TX_STATS_PBUF_SIMPLE] += nct->pbuf_simple; + nss_c2c_tx_stats[id][NSS_C2C_TX_STATS_PBUF_SG] += nct->pbuf_sg; + nss_c2c_tx_stats[id][NSS_C2C_TX_STATS_PBUF_RETURNING] += nct->pbuf_returning; + + spin_unlock_bh(&nss_c2c_tx_stats_lock); +} + +/* + * nss_c2c_tx_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_c2c_tx_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_c2c_tx_stats_notification c2c_tx_stats; + + c2c_tx_stats.core_id = nss_ctx->id; + memcpy(c2c_tx_stats.stats, nss_c2c_tx_stats[c2c_tx_stats.core_id], sizeof(c2c_tx_stats.stats)); + atomic_notifier_call_chain(&nss_c2c_tx_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&c2c_tx_stats); +} + +/* + * nss_c2c_tx_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_c2c_tx_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_c2c_tx_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_c2c_tx_stats_register_notifier); + +/* + * nss_c2c_tx_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_c2c_tx_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_c2c_tx_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_c2c_tx_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_stats.h new file mode 100644 index 000000000..f77a3b4d6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_stats.h @@ -0,0 +1,29 @@ +/* + ****************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_C2C_TX_STATS_H +#define __NSS_C2C_TX_STATS_H + +#include + +/* + * C2C Tx statistics APIs + */ +extern void nss_c2c_tx_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_c2c_tx_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_c2c_tx_stats *nct); +extern void nss_c2c_tx_stats_dentry_create(void); + +#endif /* __NSS_C2C_TX_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_strings.c new file mode 100644 index 000000000..8272e8466 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_strings.c @@ -0,0 +1,61 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" + +/* + * nss_c2c_tx_strings_stats + * C2C Tx statistics strings. + */ +struct nss_stats_info nss_c2c_tx_strings_stats[NSS_C2C_TX_STATS_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"rx_byts" , NSS_STATS_TYPE_COMMON}, + {"tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"tx_byts" , NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops" , NSS_STATS_TYPE_DROP}, + {"pbuf_simple" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_sg" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_returning" , NSS_STATS_TYPE_SPECIAL} +}; + + +/* + * nss_c2c_tx_strings_read() + * Read c2c Tx node statistics names + */ +static ssize_t nss_c2c_tx_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_c2c_tx_strings_stats, NSS_C2C_TX_STATS_MAX); +} + +/* + * nss_c2c_tx_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(c2c_tx); + +/* + * nss_c2c_tx_strings_dentry_create() + * Create C2C Tx statistics strings debug entry. + */ +void nss_c2c_tx_strings_dentry_create(void) +{ + nss_strings_create_dentry("c2c_tx", &nss_c2c_tx_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_strings.h new file mode 100644 index 000000000..483177de5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_c2c_tx_strings.h @@ -0,0 +1,23 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_C2C_TX_STRINGS_H +#define __NSS_C2C_TX_STRINGS_H + +extern struct nss_stats_info nss_c2c_tx_strings_stats[NSS_C2C_TX_STATS_MAX]; +extern void nss_c2c_tx_strings_dentry_create(void); + +#endif /* __NSS_C2C_TX_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_capwap.c b/feeds/ipq807x/qca-nss-drv/src/nss_capwap.c new file mode 100644 index 000000000..597c31814 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_capwap.c @@ -0,0 +1,606 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + + /* + * nss_capwap.c + * NSS CAPWAP driver interface APIs + */ +#include "nss_core.h" +#include "nss_capwap.h" +#include "nss_cmn.h" +#include "nss_tx_rx_common.h" +#include "nss_capwap_stats.h" +#include "nss_capwap_log.h" +#include "nss_capwap_strings.h" + +/* + * Spinlock for protecting tunnel operations colliding with a tunnel destroy + */ +DEFINE_SPINLOCK(nss_capwap_spinlock); + +/* + * Array of pointer for NSS CAPWAP handles. Each handle has per-tunnel + * stats based on the if_num which is an index. + * + * Per CAPWAP tunnel/interface number instance. + */ +struct nss_capwap_handle { + atomic_t refcnt; /**< Reference count on the tunnel */ + uint32_t if_num; /**< Interface number */ + uint32_t tunnel_status; /**< 0=disable, 1=enabled */ + struct nss_ctx_instance *ctx; /**< Pointer to context */ + nss_capwap_msg_callback_t msg_callback; /**< Msg callback */ + void *app_data; /**< App data (argument) */ + struct nss_capwap_tunnel_stats stats; /**< Stats per-interface number */ +}; +static struct nss_capwap_handle *nss_capwap_hdl[NSS_MAX_DYNAMIC_INTERFACES]; + +/* + * nss_capwap_get_interface_type() + * Function to get the type of dynamic interface. + */ +static enum nss_dynamic_interface_type nss_capwap_get_interface_type(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx; + nss_ctx = &nss_top_main.nss[nss_top_main.capwap_handler_id]; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + return nss_dynamic_interface_get_type(nss_ctx, if_num); +} + +/* + * nss_capwap_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_capwap_verify_if_num(uint32_t if_num) +{ + enum nss_dynamic_interface_type type; + + if (nss_is_dynamic_interface(if_num) == false) { + return false; + } + + type = nss_capwap_get_interface_type(if_num); + if ((type != NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_HOST_INNER) && (type != NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_OUTER) ) { + return false; + } + + return true; +} + +/* + * nss_capwap_refcnt_inc() + * Increments refcnt on the tunnel. + */ +static void nss_capwap_refcnt_inc(int32_t if_num) +{ + if_num = if_num - NSS_DYNAMIC_IF_START; + atomic_inc(&nss_capwap_hdl[if_num]->refcnt); + nss_assert(atomic_read(&nss_capwap_hdl[if_num]->refcnt) > 0); +} + +/* + * nss_capwap_refcnt_dec() + * Decrements refcnt on the tunnel. + */ +static void nss_capwap_refcnt_dec(int32_t if_num) +{ + if_num = if_num - NSS_DYNAMIC_IF_START; + nss_assert(atomic_read(&nss_capwap_hdl[if_num]->refcnt) > 0); + atomic_dec(&nss_capwap_hdl[if_num]->refcnt); +} + +/* + * nss_capwap_refcnt_get() + * Get refcnt on the tunnel. + */ +static uint32_t nss_capwap_refcnt_get(int32_t if_num) +{ + if_num = if_num - NSS_DYNAMIC_IF_START; + return atomic_read(&nss_capwap_hdl[if_num]->refcnt); +} + +/* + * nss_capwap_set_msg_callback() + * This sets the message callback handler and its associated context + */ +static void nss_capwap_set_msg_callback(int32_t if_num, nss_capwap_msg_callback_t cb, void *app_data) +{ + struct nss_capwap_handle *h; + + h = nss_capwap_hdl[if_num - NSS_DYNAMIC_IF_START]; + if (!h) { + return; + } + + h->app_data = app_data; + h->msg_callback = cb; +} + +/* + * nss_capwap_get_msg_callback() + * This gets the message callback handler and its associated context + */ +static nss_capwap_msg_callback_t nss_capwap_get_msg_callback(int32_t if_num, void **app_data) +{ + struct nss_capwap_handle *h; + + h = nss_capwap_hdl[if_num - NSS_DYNAMIC_IF_START]; + if (!h) { + *app_data = NULL; + return NULL; + } + + *app_data = h->app_data; + return h->msg_callback; +} + +/* + * nss_capwap_update_stats() + * Update per-tunnel stats for each CAPWAP interface. + */ +static void nss_capwap_update_stats(struct nss_capwap_handle *handle, struct nss_capwap_stats_msg *fstats) +{ + struct nss_capwap_tunnel_stats *stats; + enum nss_dynamic_interface_type type; + + stats = &handle->stats; + type = nss_capwap_get_interface_type(handle->if_num); + + switch(type) { + case NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_OUTER: + stats->rx_segments += fstats->rx_segments; + stats->dtls_pkts += fstats->dtls_pkts; + stats->rx_dup_frag += fstats->rx_dup_frag; + stats->rx_oversize_drops += fstats->rx_oversize_drops; + stats->rx_frag_timeout_drops += fstats->rx_frag_timeout_drops; + stats->rx_n2h_drops += fstats->rx_n2h_drops; + stats->rx_n2h_queue_full_drops += fstats->rx_n2h_queue_full_drops; + stats->rx_mem_failure_drops += fstats->rx_mem_failure_drops; + stats->rx_csum_drops += fstats->rx_csum_drops; + stats->rx_malformed += fstats->rx_malformed; + stats->rx_frag_gap_drops += fstats->rx_frag_gap_drops; + + /* + * Update pnode rx stats for OUTER node. + */ + stats->pnode_stats.rx_packets += fstats->pnode_stats.rx_packets; + stats->pnode_stats.rx_bytes += fstats->pnode_stats.rx_bytes; + stats->pnode_stats.rx_dropped += nss_cmn_rx_dropped_sum(&fstats->pnode_stats); + break; + + case NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_HOST_INNER: + stats->tx_segments += fstats->tx_segments; + stats->tx_queue_full_drops += fstats->tx_queue_full_drops; + stats->tx_mem_failure_drops += fstats->tx_mem_failure_drops; + stats->tx_dropped_sg_ref += fstats->tx_dropped_sg_ref; + stats->tx_dropped_ver_mis += fstats->tx_dropped_ver_mis; + stats->tx_dropped_hroom += fstats->tx_dropped_hroom; + stats->tx_dropped_dtls += fstats->tx_dropped_dtls; + stats->tx_dropped_nwireless += fstats->tx_dropped_nwireless; + + /* + * Update pnode tx stats for INNER node. + */ + stats->pnode_stats.tx_packets += fstats->pnode_stats.tx_packets; + stats->pnode_stats.tx_bytes += fstats->pnode_stats.tx_bytes; + stats->tx_dropped_inner += nss_cmn_rx_dropped_sum(&fstats->pnode_stats); + break; + + default: + nss_warning("%px: Received invalid dynamic interface type: %d", handle, type); + nss_assert(0); + return; + } + + /* + * Set to 1 when the tunnel is operating in fast memory. + */ + stats->fast_mem = fstats->fast_mem; +} + +/* + * nss_capwap_handler() + * Handle NSS -> HLOS messages for CAPWAP + */ +static void nss_capwap_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_capwap_msg *ntm = (struct nss_capwap_msg *)ncm; + nss_capwap_msg_callback_t cb; + + /* + * Is this a valid request/response packet? + */ + if (ncm->type > NSS_CAPWAP_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for CAPWAP interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_capwap_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Trace messages. + */ + nss_capwap_log_rx_msg(ntm); + + switch (ntm->cm.type) { + case NSS_CAPWAP_MSG_TYPE_SYNC_STATS: { + uint32_t if_num; + + if_num = ncm->interface - NSS_DYNAMIC_IF_START; + if (nss_capwap_hdl[if_num] != NULL) { + /* + * Update driver statistics and send statistics notifications to the registered modules. + */ + nss_capwap_update_stats(nss_capwap_hdl[if_num], &ntm->msg.stats); + nss_capwap_stats_notify(ncm->interface, nss_ctx->id); + } + } + } + + /* + * Update the callback and app_data for NOTIFY messages. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_capwap_get_msg_callback(ncm->interface, (void **)&ncm->app_data); + } + + /* + * Do we have a callback + */ + if (!ncm->cb) { + nss_trace("%px: cb is null for interface %d", nss_ctx, ncm->interface); + return; + } + + cb = (nss_capwap_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, ntm); +} + +/* + * nss_capwap_instance_alloc() + * Allocate CAPWAP tunnel instance + */ +static bool nss_capwap_instance_alloc(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_capwap_handle *h; + + /* + * Allocate a handle + */ + h = kmalloc(sizeof(struct nss_capwap_handle), GFP_ATOMIC); + if (h == NULL) { + nss_warning("%px: no memory for allocating CAPWAP instance for interface : %d", nss_ctx, if_num); + return false; + } + + memset(h, 0, sizeof(struct nss_capwap_handle)); + h->if_num = if_num; + + spin_lock_bh(&nss_capwap_spinlock); + if (nss_capwap_hdl[if_num - NSS_DYNAMIC_IF_START] != NULL) { + spin_unlock_bh(&nss_capwap_spinlock); + kfree(h); + nss_warning("%px: Another thread is already allocated instance for :%d", nss_ctx, if_num); + return false; + } + + nss_capwap_hdl[if_num - NSS_DYNAMIC_IF_START] = h; + spin_unlock_bh(&nss_capwap_spinlock); + + return true; +} + +/* + * nss_capwap_tx_msg() + * Transmit a CAPWAP message to NSS FW. Don't call this from softirq/interrupts. + */ +nss_tx_status_t nss_capwap_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_capwap_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + int32_t status; + int32_t if_num; + + BUG_ON(in_interrupt()); + BUG_ON(in_softirq()); + BUG_ON(in_serving_softirq()); + + if (nss_capwap_verify_if_num(msg->cm.interface) == false) { + return NSS_TX_FAILURE_BAD_PARAM; + } + + if (ncm->type >= NSS_CAPWAP_MSG_TYPE_MAX) { + return NSS_TX_FAILURE_BAD_PARAM; + } + + if_num = msg->cm.interface - NSS_DYNAMIC_IF_START; + spin_lock_bh(&nss_capwap_spinlock); + if (!nss_capwap_hdl[if_num]) { + spin_unlock_bh(&nss_capwap_spinlock); + nss_warning("%px: capwap tunnel if_num is not there: %d", nss_ctx, msg->cm.interface); + return NSS_TX_FAILURE_BAD_PARAM; + } + nss_capwap_refcnt_inc(msg->cm.interface); + spin_unlock_bh(&nss_capwap_spinlock); + + /* + * Trace messages. + */ + nss_capwap_log_tx_msg(msg); + + status = nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); + nss_capwap_refcnt_dec(msg->cm.interface); + return status; +} +EXPORT_SYMBOL(nss_capwap_tx_msg); + +/* + * nss_capwap_tx_buf() + * Transmit data buffer (skb) to a NSS interface number + */ +nss_tx_status_t nss_capwap_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num) +{ + BUG_ON(!nss_capwap_verify_if_num(if_num)); + + return nss_core_send_packet(nss_ctx, os_buf, if_num, H2N_BIT_FLAG_VIRTUAL_BUFFER | H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_capwap_tx_buf); + +/* + *********************************** + * Register/Unregister/Miscellaneous APIs + *********************************** + */ + +/* + * nss_capwap_get_stats() + * API for getting stats from a CAPWAP tunnel interface stats + */ +bool nss_capwap_get_stats(uint32_t if_num, struct nss_capwap_tunnel_stats *stats) +{ + if (nss_capwap_verify_if_num(if_num) == false) { + return false; + } + + if_num = if_num - NSS_DYNAMIC_IF_START; + spin_lock_bh(&nss_capwap_spinlock); + if (nss_capwap_hdl[if_num] == NULL) { + spin_unlock_bh(&nss_capwap_spinlock); + return false; + } + + memcpy(stats, &nss_capwap_hdl[if_num]->stats, sizeof(struct nss_capwap_tunnel_stats)); + spin_unlock_bh(&nss_capwap_spinlock); + return true; +} +EXPORT_SYMBOL(nss_capwap_get_stats); + +/* + * nss_capwap_notify_register() + * Registers a message notifier with NSS FW. It should not be called from + * softirq or interrupts. + */ +struct nss_ctx_instance *nss_capwap_notify_register(uint32_t if_num, nss_capwap_msg_callback_t cb, void *app_data) +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = &nss_top_main.nss[nss_top_main.capwap_handler_id]; + + if (nss_capwap_verify_if_num(if_num) == false) { + nss_warning("%px: notfiy register received for invalid interface %d", nss_ctx, if_num); + return NULL; + } + + spin_lock_bh(&nss_capwap_spinlock); + if (nss_capwap_hdl[if_num - NSS_DYNAMIC_IF_START] != NULL) { + spin_unlock_bh(&nss_capwap_spinlock); + nss_warning("%px: notfiy register tunnel already exists for interface %d", nss_ctx, if_num); + return NULL; + } + spin_unlock_bh(&nss_capwap_spinlock); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_capwap_notify_register); + +/* + * nss_capwap_notify_unregister() + * unregister the CAPWAP notifier for the given interface number (if_num). + * It shouldn't be called from softirq or interrupts. + */ +nss_tx_status_t nss_capwap_notify_unregister(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_top_instance *nss_top; + int index; + + if (nss_capwap_verify_if_num(if_num) == false) { + nss_warning("%px: notify unregister received for invalid interface %d", nss_ctx, if_num); + return NSS_TX_FAILURE_BAD_PARAM; + } + + nss_top = nss_ctx->nss_top; + if (nss_top == NULL) { + nss_warning("%px: notify unregister received for invalid nss_top %d", nss_ctx, if_num); + return NSS_TX_FAILURE_BAD_PARAM; + } + + index = if_num - NSS_DYNAMIC_IF_START; + spin_lock_bh(&nss_capwap_spinlock); + if (nss_capwap_hdl[index] == NULL) { + spin_unlock_bh(&nss_capwap_spinlock); + nss_warning("%px: notify unregister received for unallocated if_num: %d", nss_ctx, if_num); + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * It's the responsibility of caller to wait and call us again. We return failure saying + * that we can't remove msg handler now. + */ + if (nss_capwap_refcnt_get(if_num) != 0) { + spin_unlock_bh(&nss_capwap_spinlock); + nss_warning("%px: notify unregister tunnel %d: has reference", nss_ctx, if_num); + return NSS_TX_FAILURE_QUEUE; + } + + nss_capwap_set_msg_callback(if_num, NULL, NULL); + spin_unlock_bh(&nss_capwap_spinlock); + + return NSS_TX_SUCCESS; +} +EXPORT_SYMBOL(nss_capwap_notify_unregister); + +/* + * nss_capwap_data_register() + * Registers a data packet notifier with NSS FW. + */ +struct nss_ctx_instance *nss_capwap_data_register(uint32_t if_num, nss_capwap_buf_callback_t cb, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx; + int core_status; + + nss_ctx = nss_capwap_get_ctx(); + if (nss_capwap_verify_if_num(if_num) == false) { + nss_warning("%px: data register received for invalid interface %d", nss_ctx, if_num); + return NULL; + } + + spin_lock_bh(&nss_capwap_spinlock); + if (nss_ctx->subsys_dp_register[if_num].ndev != NULL) { + spin_unlock_bh(&nss_capwap_spinlock); + return NULL; + } + spin_unlock_bh(&nss_capwap_spinlock); + + core_status = nss_core_register_handler(nss_ctx, if_num, nss_capwap_msg_handler, NULL); + if (core_status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: nss core register handler failed for if_num:%d with error :%d", nss_ctx, if_num, core_status); + return NULL; + } + + if (nss_capwap_instance_alloc(nss_ctx, if_num) == false) { + nss_warning("%px: couldn't allocate tunnel instance for if_num:%d", nss_ctx, if_num); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, cb, NULL, NULL, netdev, features); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_capwap_data_register); + +/* + * nss_capwap_data_unregister() + * Unregister a data packet notifier with NSS FW + */ +bool nss_capwap_data_unregister(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx; + struct nss_capwap_handle *h; + + nss_ctx = nss_capwap_get_ctx(); + if (nss_capwap_verify_if_num(if_num) == false) { + nss_warning("%px: data unregister received for invalid interface %d", nss_ctx, if_num); + return false; + } + + spin_lock_bh(&nss_capwap_spinlock); + /* + * It's the responsibility of caller to wait and call us again. + */ + if (nss_capwap_refcnt_get(if_num) != 0) { + spin_unlock_bh(&nss_capwap_spinlock); + nss_warning("%px: notify unregister tunnel %d: has reference", nss_ctx, if_num); + return false; + } + h = nss_capwap_hdl[if_num - NSS_DYNAMIC_IF_START]; + nss_capwap_hdl[if_num - NSS_DYNAMIC_IF_START] = NULL; + spin_unlock_bh(&nss_capwap_spinlock); + + (void) nss_core_unregister_handler(nss_ctx, if_num); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + kfree(h); + return true; +} +EXPORT_SYMBOL(nss_capwap_data_unregister); + +/* + * nss_capwap_get_ctx() + * Return a CAPWAP NSS context. + */ +struct nss_ctx_instance *nss_capwap_get_ctx() +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = &nss_top_main.nss[nss_top_main.capwap_handler_id]; + return nss_ctx; +} +EXPORT_SYMBOL(nss_capwap_get_ctx); + +/* + * nss_capwap_ifnum_with_core_id() + * Append core id to capwap interface num + */ +int nss_capwap_ifnum_with_core_id(int if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_capwap_get_ctx(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (nss_is_dynamic_interface(if_num) == false) { + nss_info("%px: Invalid if_num: %d, must be a dynamic interface\n", nss_ctx, if_num); + return 0; + } + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_capwap_ifnum_with_core_id); + +/* + * nss_capwap_get_max_buf_size() + * Return a CAPWAP NSS max_buf_size. + */ +uint32_t nss_capwap_get_max_buf_size(struct nss_ctx_instance *nss_ctx) +{ + return nss_core_get_max_buf_size(nss_ctx); +} +EXPORT_SYMBOL(nss_capwap_get_max_buf_size); + +/* + * nss_capwap_init() + * Initializes CAPWAP. Gets called from nss_init.c + */ +void nss_capwap_init() +{ + memset(&nss_capwap_hdl, 0, sizeof(nss_capwap_hdl)); + nss_capwap_stats_dentry_create(); + nss_capwap_strings_dentry_create(); +} + +/* + * nss_capwap_msg_init() + * Initialize capwap message. + */ +void nss_capwap_msg_init(struct nss_capwap_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, + nss_capwap_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, (void*)cb, app_data); +} +EXPORT_SYMBOL(nss_capwap_msg_init); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_capwap_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_log.c new file mode 100644 index 000000000..b0b8564ac --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_log.c @@ -0,0 +1,282 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_capwap_log.c + * NSS CAPWAP logger file. + */ + +#include "nss_core.h" + +/* + * nss_capwap_log_message_types_str + * CAPWAP message strings + */ +static int8_t *nss_capwap_log_message_types_str[NSS_CAPWAP_MSG_TYPE_MAX] __maybe_unused = { + "No Message", + "CAPWAP config Rule", + "CAPWAP unconfig Rule", + "CAPWAP Enable Tunnel", + "CAPWAP Disable Tunnel", + "CAPWAP Update Path MTU", + "CAPWAP Sync Stats", + "CAPWAP Version", + "CAPWAP DTLS", + "CAPWAP Add Flow Rule", + "CAPWAP Delete Flow Rule" +}; + +/* + * nss_capwap_log_error_response_types_str + * Strings for error types for CAPWAP messages + */ +static int8_t *nss_capwap_log_error_response_types_str[NSS_CAPWAP_ERROR_MSG_MAX] __maybe_unused = { + "CAPWAP Invalid Reassembly Timeout", + "CAPWAP Invalid PAth MTU", + "CAPWAP Invalid Max Fragment", + "CAPWAP Invalid Buffer Size", + "CAPWAP Invalid L3 Protocool", + "CAPWAP Invalid UDP Protocol", + "CAPWAP Invalid Version", + "CAPWAP Tunnel Disabled", + "CAPWAP Tunnel Enabled", + "CAPWAP Tunnel Not Configured", + "CAPWAP Invalid IP Node", + "CAPWAP Invalid Type Flag", + "CAPWAP Inavlid DTLS Config", + "CAPWAP Flow Table Full", + "CAPWAP Flow Exists", + "CAPWAP Flow Does Not Exist" +}; + +/* + * nss_capwap_rule_msg() + * Log NSS CAPWAP stats message. + */ +static void nss_capwap_rule_msg(struct nss_capwap_msg *ncm) +{ + struct nss_capwap_rule_msg *ncrm __maybe_unused = &ncm->msg.rule; + nss_trace("%px: NSS CAPWAP Rule message \n" + "Encap Rule Src IP: %px\n" + "Encap Rule Src Port: %d\n" + "Encap Rule Dst Ip: %px\n" + "Encap Rule Dst Port: %d\n" + "Encap Rule Path MTU: %d\n" + "Decap Rule Reassembly Timeout: %d\n" + "Decap Rule Max Fragments: %d\n" + "Decap Rule Max Buffer Size: %d\n" + "Stats Timer: %d\n" + "RPS: %d\n" + "Type Flags: %x\n" + "L3 Protocol: %d\n" + "UDP Protocol: %d\n" + "MTU: %d\n" + "GMAC Interface Number: %d\n" + "Enabled Features: %x\n" + "DTLS Interface Number: %d\n" + "BSSID: %px\n" + "Outer Segment Value: %x\n", + ncrm, + &ncrm->encap.src_ip.ip, + ncrm->encap.src_port, + &ncrm->encap.dest_ip.ip, + ncrm->encap.dest_port, + ncrm->encap.path_mtu, + ncrm->decap.reassembly_timeout, + ncrm->decap.max_fragments, + ncrm->decap.max_buffer_size, + ncrm->stats_timer, + ncrm->rps, ncrm->type_flags, + ncrm->l3_proto, ncrm->which_udp, + ncrm->mtu_adjust, ncrm->gmac_ifnum, + ncrm->enabled_features, + ncrm->dtls_inner_if_num, + &ncrm->bssid, ncrm->outer_sgt_value); +} + +/* + * nss_capwap_path_mtu_msg() + * Log NSS CAPWAP path MTU message. + */ +static void nss_capwap_path_mtu_msg(struct nss_capwap_msg *ncm) +{ + struct nss_capwap_path_mtu_msg *ncpmm __maybe_unused = &ncm->msg.mtu; + nss_trace("%px: NSS CAPWAP Path MTU message \n" + "CAPWAP Path MTU: %d\n", + ncpmm, + ncpmm->path_mtu); +} + +/* + * nss_capwap_version_msg() + * Log NSS CAPWAP version message. + */ +static void nss_capwap_version_msg(struct nss_capwap_msg *ncm) +{ + struct nss_capwap_version_msg *ncvm __maybe_unused = &ncm->msg.version; + nss_trace("%px: NSS CAPWAP Version message \n" + "CAPWAP Version: %d\n", + ncvm, + ncvm->version); +} + +/* + * nss_capwap_dtls_msg() + * Log NSS CAPWAP dtls message. + */ +static void nss_capwap_dtls_msg(struct nss_capwap_msg *ncm) +{ + struct nss_capwap_dtls_msg *ncdm __maybe_unused = &ncm->msg.dtls; + nss_trace("%px: NSS CAPWAP dtls message \n" + "CAPWAP DTLS Enable: %d\n" + "CAPWAP DTLS Inner Interface Number: %d\n" + "CAPWAP MTU Adjust: %d\n" + "CAPWAP Reserved: %x\n", + ncdm, + ncdm->enable, ncdm->dtls_inner_if_num, + ncdm->mtu_adjust, ncdm->reserved); +} + +/* + * nss_capwap_flow_rule_msg() + * Log NSS CAPWAP flow rule message. + */ +static void nss_capwap_flow_rule_msg(struct nss_capwap_flow_rule_msg *ncfrm) +{ + nss_trace("%px: NSS CAPWAP Flow Rule message \n" + "CAPWAP IP Version: %d\n" + "CAPWAP Layer 4 Protocol: %d\n" + "CAPWAP Source Port: %d\n" + "CAPWAP Destination Port: %d\n" + "CAPWAP Source IP: %x %x %x %x\n" + "CAPWAP Destination IP: %x %x %x %x" + "CAPWAP Flow ID: %d", + ncfrm, + ncfrm->ip_version, ncfrm->protocol, + ncfrm->src_port, ncfrm->dst_port, + ncfrm->src_ip[0], ncfrm->src_ip[1], + ncfrm->src_ip[2], ncfrm->src_ip[3], + ncfrm->dst_ip[0], ncfrm->dst_ip[1], + ncfrm->dst_ip[2], ncfrm->dst_ip[3], + ncfrm->flow_id); +} + +/* + * nss_capwap_flow_rule_add_msg() + * Log NSS CAPWAP flow rule add message. + */ +static void nss_capwap_flow_rule_add_msg(struct nss_capwap_msg *ncm) +{ + struct nss_capwap_flow_rule_msg *ncfrm __maybe_unused = &ncm->msg.flow_rule_add; + nss_capwap_flow_rule_msg(ncfrm); +} + +/* + * nss_capwap_flow_rule_del_msg() + * Log NSS CAPWAP flow rule del message. + */ +static void nss_capwap_flow_rule_del_msg(struct nss_capwap_msg *ncm) +{ + struct nss_capwap_flow_rule_msg *ncfrm __maybe_unused = &ncm->msg.flow_rule_del; + nss_capwap_flow_rule_msg(ncfrm); +} + +/* + * nss_capwap_log_verbose() + * Log message contents. + */ +static void nss_capwap_log_verbose(struct nss_capwap_msg *ncm) +{ + switch (ncm->cm.type) { + case NSS_CAPWAP_MSG_TYPE_CFG_RULE: + nss_capwap_rule_msg(ncm); + break; + + case NSS_CAPWAP_MSG_TYPE_UPDATE_PATH_MTU: + nss_capwap_path_mtu_msg(ncm); + break; + + case NSS_CAPWAP_MSG_TYPE_VERSION: + nss_capwap_version_msg(ncm); + break; + + case NSS_CAPWAP_MSG_TYPE_DTLS: + nss_capwap_dtls_msg(ncm); + break; + + case NSS_CAPWAP_MSG_TYPE_FLOW_RULE_ADD: + nss_capwap_flow_rule_add_msg(ncm); + break; + + case NSS_CAPWAP_MSG_TYPE_FLOW_RULE_DEL: + nss_capwap_flow_rule_del_msg(ncm); + break; + + default: + nss_trace("%px: Invalid message type\n", ncm); + break; + } +} + +/* + * nss_capwap_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_capwap_log_tx_msg(struct nss_capwap_msg *ncm) +{ + if (ncm->cm.type >= NSS_CAPWAP_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", ncm); + return; + } + + nss_info("%px: type[%d]:%s\n", ncm, ncm->cm.type, nss_capwap_log_message_types_str[ncm->cm.type]); + nss_capwap_log_verbose(ncm); +} + +/* + * nss_capwap_log_rx_msg() + * Log messages received from FW. + */ +void nss_capwap_log_rx_msg(struct nss_capwap_msg *ncm) +{ + if (ncm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ncm); + return; + } + + if (ncm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ncm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ncm, ncm->cm.type, + nss_capwap_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response]); + goto verbose; + } + + if (ncm->cm.error >= NSS_CAPWAP_ERROR_MSG_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ncm, ncm->cm.type, nss_capwap_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ncm, ncm->cm.type, nss_capwap_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error, nss_capwap_log_error_response_types_str[ncm->cm.error]); + +verbose: + nss_capwap_log_verbose(ncm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_capwap_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_log.h new file mode 100644 index 000000000..f62098979 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_CAPWAP_LOG_H__ +#define __NSS_CAPWAP_LOG_H__ + +/* + * nss_capwap_log.h + * NSS CAPWAP Log Header File. + */ + +/* + * nss_capwap_log_tx_msg + * Logs a CAPWAP message that is sent to the NSS firmware. + */ +void nss_capwap_log_tx_msg(struct nss_capwap_msg *ncm); + +/* + * nss_capwap_log_rx_msg + * Logs a CAPWAP message that is received from the NSS firmware. + */ +void nss_capwap_log_rx_msg(struct nss_capwap_msg *ncm); + +#endif /* __NSS_CAPWAP_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_capwap_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_stats.c new file mode 100644 index 000000000..1d4387808 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_stats.c @@ -0,0 +1,313 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_drv_stats.h" +#include "nss_core.h" +#include "nss_capwap.h" +#include "nss_capwap_stats.h" +#include "nss_capwap_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_capwap_stats_notifier); + +/* + * nss_capwap_stats_encap() + * Make a row for CAPWAP encap stats. + */ +static ssize_t nss_capwap_stats_encap(char *line, int len, int i, struct nss_capwap_tunnel_stats *s) +{ + uint64_t tcnt = 0; + + switch (i) { + case 0: + tcnt = s->pnode_stats.tx_packets; + break; + case 1: + tcnt = s->pnode_stats.tx_bytes; + break; + case 2: + tcnt = s->tx_segments; + break; + case 3: + tcnt = s->tx_dropped_sg_ref; + break; + case 4: + tcnt = s->tx_dropped_ver_mis; + break; + case 5: + tcnt = s->tx_dropped_inner; + break; + case 6: + tcnt = s->tx_dropped_hroom; + break; + case 7: + tcnt = s->tx_dropped_dtls; + break; + case 8: + tcnt = s->tx_dropped_nwireless; + break; + case 9: + tcnt = s->tx_queue_full_drops; + break; + case 10: + tcnt = s->tx_mem_failure_drops; + break; + case 11: + tcnt = s->fast_mem; + break; + default: + return 0; + } + + return snprintf(line, len, "%s = %llu\n", nss_capwap_strings_encap_stats[i].stats_name, tcnt); +} + +/* + * nss_capwap_stats_decap() + * Make a row for CAPWAP decap stats. + */ +static ssize_t nss_capwap_stats_decap(char *line, int len, int i, struct nss_capwap_tunnel_stats *s) +{ + uint64_t tcnt = 0; + + switch (i) { + case 0: + tcnt = s->pnode_stats.rx_packets; + break; + case 1: + tcnt = s->pnode_stats.rx_bytes; + break; + case 2: + tcnt = s->dtls_pkts; + break; + case 3: + tcnt = s->rx_segments; + break; + case 4: + tcnt = s->pnode_stats.rx_dropped; + break; + case 5: + tcnt = s->rx_oversize_drops; + break; + case 6: + tcnt = s->rx_frag_timeout_drops; + break; + case 7: + tcnt = s->rx_dup_frag; + break; + case 8: + tcnt = s->rx_frag_gap_drops; + break; + case 9: + tcnt = s->rx_n2h_drops; + return snprintf(line, len, "%s = %llu (n2h = %llu)\n", nss_capwap_strings_decap_stats[i].stats_name, tcnt, s->rx_n2h_queue_full_drops); + case 10: + tcnt = s->rx_n2h_queue_full_drops; + break; + case 11: + tcnt = s->rx_mem_failure_drops; + break; + case 12: + tcnt = s->rx_csum_drops; + break; + case 13: + tcnt = s->rx_malformed; + break; + case 14: + tcnt = s->fast_mem; + break; + default: + return 0; + } + + return snprintf(line, len, "%s = %llu\n", nss_capwap_strings_decap_stats[i].stats_name, tcnt); +} + +/* + * nss_capwap_stats_read() + * Read CAPWAP stats + */ +static ssize_t nss_capwap_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos, uint16_t type) +{ + struct nss_stats_data *data = fp->private_data; + ssize_t bytes_read = 0; + struct nss_capwap_tunnel_stats stats; + size_t bytes; + char line[80]; + int start; + uint32_t if_num = NSS_DYNAMIC_IF_START; + uint32_t max_if_num = NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES; + + if (data) { + if_num = data->if_num; + } + + /* + * If we are done accomodating all the CAPWAP tunnels. + */ + if (if_num > max_if_num) { + return 0; + } + + for (; if_num <= max_if_num; if_num++) { + bool isthere; + enum nss_dynamic_interface_type dtype; + + if (nss_is_dynamic_interface(if_num) == false) { + continue; + } + + dtype = nss_dynamic_interface_get_type(nss_capwap_get_ctx(), if_num); + + /* + * Read encap stats from inner node and decap stats from outer node. + */ + if ((type == 1) && (dtype != NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_HOST_INNER)) { + continue; + } + + if ((type == 0) && (dtype != NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_OUTER)) { + continue; + } + + /* + * If CAPWAP tunnel does not exists, then isthere will be false. + */ + isthere = nss_capwap_get_stats(if_num, &stats); + if (!isthere) { + continue; + } + + bytes = snprintf(line, sizeof(line), "----if_num : %2d----\n", if_num); + if ((bytes_read + bytes) > sz) { + break; + } + + if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) { + bytes_read = -EFAULT; + goto fail; + } + bytes_read += bytes; + start = 0; + while (bytes_read < sz) { + if (type == 1) { + bytes = nss_capwap_stats_encap(line, sizeof(line), start, &stats); + } else { + bytes = nss_capwap_stats_decap(line, sizeof(line), start, &stats); + } + + /* + * If we don't have any more lines in decap/encap. + */ + if (bytes == 0) { + break; + } + + if ((bytes_read + bytes) > sz) + break; + + if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) { + bytes_read = -EFAULT; + goto fail; + } + + bytes_read += bytes; + start++; + } + } + + if (bytes_read > 0) { + *ppos = bytes_read; + } + + if (data) { + data->if_num = if_num; + } +fail: + return bytes_read; +} + +/* + * nss_capwap_decap_stats_read() + * Read CAPWAP decap stats + */ +static ssize_t nss_capwap_decap_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_capwap_stats_read(fp, ubuf, sz, ppos, 0); +} + +/* + * nss_capwap_encap_stats_read() + * Read CAPWAP encap stats + */ +static ssize_t nss_capwap_encap_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_capwap_stats_read(fp, ubuf, sz, ppos, 1); +} + +/* + * nss_capwap_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(capwap_encap); +NSS_STATS_DECLARE_FILE_OPERATIONS(capwap_decap); + +/* + * nss_capwap_stats_dentry_create() + * Create CAPWAP statistics debug entry + */ +void nss_capwap_stats_dentry_create(void) +{ + nss_stats_create_dentry("capwap_encap", &nss_capwap_encap_stats_ops); + nss_stats_create_dentry("capwap_decap", &nss_capwap_decap_stats_ops); +} + +/* + * nss_capwap_stats_notify() + * Sends notifications to the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_capwap_stats_notify(uint32_t if_num, uint32_t core_id) +{ + struct nss_capwap_stats_notification capwap_stats; + + capwap_stats.core_id = core_id; + capwap_stats.if_num = if_num; + nss_capwap_get_stats(if_num, &capwap_stats.stats); + atomic_notifier_call_chain(&nss_capwap_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&capwap_stats); +} + +/* + * nss_capwap_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_capwap_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_capwap_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_capwap_stats_register_notifier); + +/* + * nss_capwap_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_capwap_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_capwap_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_capwap_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_capwap_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_stats.h new file mode 100644 index 000000000..c1033ec15 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_stats.h @@ -0,0 +1,26 @@ +/* + ****************************************************************************** + * Copyright (c) 2017,2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_CAPWAP_STATS_H__ +#define __NSS_CAPWAP_STATS_H__ + +/* + * CAPWAP statistics APIs + */ +extern void nss_capwap_stats_notify(uint32_t if_num, uint32_t core_id); +extern void nss_capwap_stats_dentry_create(void); + +#endif /* __NSS_CAPWAP_STATS_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_capwap_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_strings.c new file mode 100644 index 000000000..266cd50b8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_strings.c @@ -0,0 +1,102 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" +#include "nss_capwap_strings.h" + +/* + * nss_capwap_strings_encap_stats + * CAPWAP encap statistics string. + */ +struct nss_stats_info nss_capwap_strings_encap_stats[NSS_CAPWAP_STATS_ENCAP_MAX] = { + {"tx_packets", NSS_STATS_TYPE_COMMON}, + {"tx_bytes", NSS_STATS_TYPE_COMMON}, + {"tx_segments", NSS_STATS_TYPE_SPECIAL}, + {"tx_drop_seg_ref", NSS_STATS_TYPE_DROP}, + {"tx_drop_ver_mismatch",NSS_STATS_TYPE_DROP}, + {"tx_dropped_inner", NSS_STATS_TYPE_DROP}, + {"tx_drop_hroom", NSS_STATS_TYPE_DROP}, + {"tx_drop_DTLS", NSS_STATS_TYPE_DROP}, + {"tx_drop_nwireless", NSS_STATS_TYPE_DROP}, + {"tx_drop_qfull", NSS_STATS_TYPE_DROP}, + {"tx_drop_mem_fail", NSS_STATS_TYPE_DROP}, + {"fast_mem", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_capwap_encap_strings_read() + * Read CAPWAP encap statistics names. + */ +static ssize_t nss_capwap_encap_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_capwap_strings_encap_stats, NSS_CAPWAP_STATS_ENCAP_MAX); +} + +/* + * nss_capwap_encap_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(capwap_encap); + +/* + * nss_capwap_strings_decap_stats + * CAPWAP decap statistics string. + */ +struct nss_stats_info nss_capwap_strings_decap_stats[NSS_CAPWAP_STATS_DECAP_MAX] = { + {"rx_packets", NSS_STATS_TYPE_COMMON}, + {"rx_bytes", NSS_STATS_TYPE_COMMON}, + {"rx_DTLS_pkts", NSS_STATS_TYPE_SPECIAL}, + {"rx_segments", NSS_STATS_TYPE_SPECIAL}, + {"rx_dropped", NSS_STATS_TYPE_DROP}, + {"rx_drop_oversize", NSS_STATS_TYPE_DROP}, + {"rx_drop_frag_timeout",NSS_STATS_TYPE_DROP}, + {"rx_drop_frag_dup", NSS_STATS_TYPE_DROP}, + {"rx_drop_frag_gap", NSS_STATS_TYPE_DROP}, + {"rx_drop_n2h", NSS_STATS_TYPE_DROP}, + {"rx_drop_n2h_qfull", NSS_STATS_TYPE_DROP}, + {"rx_drop_mem_fail", NSS_STATS_TYPE_DROP}, + {"rx_drop_csum", NSS_STATS_TYPE_DROP}, + {"rx_drop_malformed", NSS_STATS_TYPE_DROP}, + {"fast_mem", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_capwap_decap_strings_read() + * Read CAPWAP decap statistics names. + */ +static ssize_t nss_capwap_decap_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_capwap_strings_decap_stats, NSS_CAPWAP_STATS_DECAP_MAX); +} + +/* + * nss_capwap_decap_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(capwap_decap); + +/* + * nss_capwap_strings_dentry_create() + * Create CAPWAP statistics strings debug entry. + */ +void nss_capwap_strings_dentry_create(void) +{ + nss_strings_create_dentry("capwap_encap", &nss_capwap_encap_strings_ops); + nss_strings_create_dentry("capwap_decap", &nss_capwap_decap_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_capwap_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_strings.h new file mode 100644 index 000000000..96f89cdce --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_capwap_strings.h @@ -0,0 +1,28 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_CAPWAP_STRINGS_H +#define __NSS_CAPWAP_STRINGS_H + +#include "nss_capwap_stats.h" + +extern struct nss_stats_info nss_capwap_strings_encap_stats[NSS_CAPWAP_STATS_ENCAP_MAX]; +extern struct nss_stats_info nss_capwap_strings_decap_stats[NSS_CAPWAP_STATS_DECAP_MAX]; +extern void nss_capwap_strings_dentry_create(void); + +#endif /* __NSS_CAPWAP_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_clmap.c b/feeds/ipq807x/qca-nss-drv/src/nss_clmap.c new file mode 100644 index 000000000..777b71936 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_clmap.c @@ -0,0 +1,346 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + + /* + * nss_clmap.c + * NSS clmap driver interface APIs + */ +#include "nss_core.h" +#include "nss_clmap.h" +#include "nss_cmn.h" +#include "nss_tx_rx_common.h" +#include "nss_clmap_stats.h" +#include "nss_clmap_strings.h" +#include "nss_clmap_log.h" + +#define NSS_CLMAP_TX_TIMEOUT 3000 + +/* + * Private data structure + */ +static struct nss_clmap_pvt { + struct semaphore sem; /* Semaphore structure. */ + struct completion complete; /* Completion structure. */ + int response; /* Response from FW. */ + void *cb; /* Original cb for msgs. */ + void *app_data; /* Original app_data for msgs. */ +} clmap_pvt; + +/* + * nss_clmap_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_clmap_verify_if_num(uint32_t if_num) +{ + uint32_t type = nss_dynamic_interface_get_type(nss_clmap_get_ctx(), if_num); + + return ((type == NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US) || + (type == NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS)); +} + +/* + * nss_clmap_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_clmap_callback(void *app_data, struct nss_clmap_msg *nclm) +{ + clmap_pvt.response = NSS_TX_SUCCESS; + clmap_pvt.cb = NULL; + clmap_pvt.app_data = NULL; + + if (nclm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("clmap Error response %d\n", nclm->cm.response); + clmap_pvt.response = nclm->cm.response; + } + + /* + * Write memory barrier. + */ + smp_wmb(); + complete(&clmap_pvt.complete); +} + +/* + * nss_clmap_handler() + * Handle NSS -> HLOS messages for clmap. + */ +static void nss_clmap_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_clmap_msg *nclm = (struct nss_clmap_msg *)ncm; + nss_clmap_msg_callback_t cb; + + BUG_ON(!nss_clmap_verify_if_num(ncm->interface)); + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_CLMAP_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for clmap interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_clmap_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Trace messages. + */ + nss_core_log_msg_failures(nss_ctx, ncm); + nss_clmap_log_rx_msg(nclm); + + switch (nclm->cm.type) { + case NSS_CLMAP_MSG_TYPE_SYNC_STATS: + nss_clmap_stats_sync(nss_ctx, &nclm->msg.stats, ncm->interface); + nss_clmap_stats_notify(nss_ctx, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->nss_rx_interface_handlers[ncm->interface].app_data; + } + + /* + * Do we have a callback + */ + cb = (nss_clmap_msg_callback_t)ncm->cb; + if (!cb) { + nss_trace("%px: cb is null for interface %d", nss_ctx, ncm->interface); + return; + } + + cb((void *)ncm->app_data, ncm); +} + +/* + * nss_clmap_tx_msg() + * Transmit a clmap message to NSS FW. Don't call this from softirq/interrupts. + */ +nss_tx_status_t nss_clmap_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_clmap_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + if (!nss_clmap_verify_if_num(msg->cm.interface)) { + return NSS_TX_FAILURE_BAD_PARAM; + } + + if (ncm->type >= NSS_CLMAP_MSG_TYPE_MAX) { + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * Trace messages. + */ + nss_clmap_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_clmap_tx_msg); + +/* + * nss_clmap_tx_msg_sync() + * Transmit a clmap message to NSS firmware synchronously. + */ +nss_tx_status_t nss_clmap_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_clmap_msg *nclm) +{ + nss_tx_status_t status; + int ret; + + down(&clmap_pvt.sem); + nclm->cm.cb = (nss_ptr_t)nss_clmap_callback; + nclm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_clmap_tx_msg(nss_ctx, nclm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: clmap_tx_msg failed\n", nss_ctx); + up(&clmap_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&clmap_pvt.complete, msecs_to_jiffies(NSS_CLMAP_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: clmap tx sync failed due to timeout\n", nss_ctx); + clmap_pvt.response = NSS_TX_FAILURE; + } + + status = clmap_pvt.response; + up(&clmap_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_clmap_tx_msg_sync); + +/* + * nss_clmap_tx_buf() + * Transmit data buffer (skb) to a NSS interface number + */ +nss_tx_status_t nss_clmap_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *buf, uint32_t if_num) +{ + BUG_ON(!nss_clmap_verify_if_num(if_num)); + + return nss_core_send_packet(nss_ctx, buf, if_num, H2N_BIT_FLAG_VIRTUAL_BUFFER); +} +EXPORT_SYMBOL(nss_clmap_tx_buf); + +/* + * nss_clmap_unregister() + * Un-register a clmap interface from NSS. + */ +bool nss_clmap_unregister(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx; + int status; + + nss_ctx = nss_clmap_get_ctx(); + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_clmap_verify_if_num(if_num)) { + nss_warning("%px: clmap unregister request received for invalid interface %d", nss_ctx, if_num); + return false; + } + + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Failed to unregister handler for clmap NSS I/F:%u\n", nss_ctx, if_num); + return false; + } + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + nss_core_unregister_handler(nss_ctx, if_num); + nss_clmap_stats_session_unregister(if_num); + + return true; +} +EXPORT_SYMBOL(nss_clmap_unregister); + +/* + * nss_clmap_register() + * Registers a clmap interface with the NSS. + */ +struct nss_ctx_instance *nss_clmap_register(uint32_t if_num, + uint32_t di_type, + nss_clmap_buf_callback_t data_cb, + nss_clmap_msg_callback_t notify_cb, + struct net_device *netdev, + uint32_t features) +{ + struct nss_ctx_instance *nss_ctx; + int core_status; + bool stats_status = false; + + nss_ctx = nss_clmap_get_ctx(); + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_clmap_verify_if_num(if_num)) { + nss_warning("%px: clmap register request received for invalid interface %d", nss_ctx, if_num); + goto fail; + } + + if (di_type == NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US) { + stats_status = nss_clmap_stats_session_register(if_num, NSS_CLMAP_INTERFACE_TYPE_US, netdev); + } else { + stats_status = nss_clmap_stats_session_register(if_num, NSS_CLMAP_INTERFACE_TYPE_DS, netdev); + } + + if (!stats_status) { + nss_warning("%px: statistics registration failed for interface: %d\n", nss_ctx, if_num); + goto fail; + } + + core_status = nss_core_register_handler(nss_ctx, if_num, nss_clmap_msg_handler, (void *)netdev); + if (core_status != NSS_CORE_STATUS_SUCCESS) { + goto core_reg_fail; + } + + core_status = nss_core_register_msg_handler(nss_ctx, if_num, notify_cb); + if (core_status != NSS_CORE_STATUS_SUCCESS) { + goto msg_reg_fail; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, data_cb, NULL, (void *)netdev, netdev, features); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, di_type); + + return nss_ctx; + +msg_reg_fail: + nss_core_unregister_handler(nss_ctx, if_num); +core_reg_fail: + nss_clmap_stats_session_unregister(if_num); + nss_warning("%px: NSS core register handler failed for if_num:%d with error :%d", nss_ctx, if_num, core_status); +fail: + return NULL; + +} +EXPORT_SYMBOL(nss_clmap_register); + +/* + * nss_clmap_ifnum_with_core_id() + * Append core ID to clmap interface num. + */ +int nss_clmap_ifnum_with_core_id(int if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_clmap_get_ctx(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (!nss_is_dynamic_interface(if_num)) { + nss_warning("%px: Invalid if_num: %d, must be a dynamic interface\n", nss_ctx, if_num); + return 0; + } + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_clmap_ifnum_with_core_id); + +/* + * nss_clmap_msg_init() + * Initialize clmap message. + */ +void nss_clmap_msg_init(struct nss_clmap_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, + nss_clmap_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, (void*)cb, app_data); +} +EXPORT_SYMBOL(nss_clmap_msg_init); + +/* + * nss_clmap_get_ctx() + * Return a clmap NSS context. + */ +struct nss_ctx_instance *nss_clmap_get_ctx() +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.clmap_handler_id]; + return nss_ctx; +} +EXPORT_SYMBOL(nss_clmap_get_ctx); + +/* + * nss_clmap_init() + * Initializes clmap. Gets called from nss_init.c. + */ +void nss_clmap_init() +{ + sema_init(&clmap_pvt.sem, 1); + init_completion(&clmap_pvt.complete); + + nss_clmap_stats_dentry_create(); + nss_clmap_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_clmap_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_log.c new file mode 100644 index 000000000..45cb0a734 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_log.c @@ -0,0 +1,207 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_clmap_log.c + * NSS clmap logger file. + */ + +#include "nss_core.h" + +/* + * nss_clmap_log_message_types_str + * clmap message strings + */ +static char *nss_clmap_log_message_types_str[NSS_CLMAP_MSG_TYPE_MAX] __maybe_unused = { + "Clmap sync stats", + "Clmap enable interface", + "Clmap disable interface", + "Clmap add MAC rule", + "Clmap delete MAC rule", + "Clmap flush MAC rule" +}; + +/* + * nss_clmap_log_error_types_str + * Strings for error types for clmap messages + */ +static char *nss_clmap_log_error_types_str[NSS_CLMAP_ERROR_MAX] __maybe_unused = { + "Clmap unknown error", + "Clmap interface disabled", + "Clmap interface enabled", + "Clmap invalid VLAN", + "Clmap invalid tunnel ID", + "Clmap MAC table full", + "Clmap MAC exists", + "Clmap MAC does not exist", + "Clmap MAC entry unhashed", + "Clmap MAC entry insert failed", + "Clmap MAC entry alloc failed", + "Clmap MAC entry delete failed" +}; + +/* + * nss_clmap_log_mac_msg() + * Log NSS clmap MAC rule message. + */ +static void nss_clmap_log_mac_msg(struct nss_clmap_mac_msg *npvcm) +{ + nss_trace("%px: NSS clmap MAC message \n" + "Clmap Mac Addr: %x : %x : %x" + "Clmap Flags: %u\n" + "Clmap VLAN ID: %u\n" + "Clmap Next-hop Interface Number: %d\n", + npvcm, + npvcm->mac_addr[0], npvcm->mac_addr[1], + npvcm->mac_addr[2], npvcm->flags, + npvcm->vlan_id, npvcm->nexthop_ifnum); +} + +/* + * nss_clmap_log_interface_enable_msg() + * Log NSS clmap rule enable message. + */ +static void nss_clmap_log_interface_enable_msg(struct nss_clmap_msg *npvm) +{ + nss_trace("%px: NSS clmap interface state message: Enable \n", npvm); +} + +/* + * nss_clmap_log_interface_disable_msg() + * Log NSS clmap rule disable message. + */ +static void nss_clmap_log_interface_disable_msg(struct nss_clmap_msg *npvm) +{ + nss_trace("%px: NSS clmap interface state message: Disable \n", npvm); +} + +/* + * nss_clmap_log_mac_add_msg() + * Log NSS clmap mac rule add message. + */ +static void nss_clmap_log_mac_add_msg(struct nss_clmap_msg *npvm) +{ + struct nss_clmap_mac_msg *npvcm __maybe_unused = &npvm->msg.mac_add; + nss_clmap_log_mac_msg(npvcm); +} + +/* + * nss_clmap_log_mac_del_msg() + * Log NSS clmap mac rule del message. + */ +static void nss_clmap_log_mac_del_msg(struct nss_clmap_msg *npvm) +{ + struct nss_clmap_mac_msg *npvcm __maybe_unused = &npvm->msg.mac_del; + nss_clmap_log_mac_msg(npvcm); +} + +/* + * nss_clmap_log_mac_flush_msg() + * Log NSS clmap mac rule flush message. + */ +static void nss_clmap_log_mac_flush_msg(struct nss_clmap_msg *npvm) +{ + struct nss_clmap_flush_mac_msg *npvcm __maybe_unused = &npvm->msg.mac_flush; + nss_trace("%px: NSS clmap MAC flush message \n" + "Clmap Next-hop Interface Number: %d\n", + npvcm, npvcm->nexthop_ifnum); +} + +/* + * nss_clmap_log_verbose() + * Log message contents. + */ +static void nss_clmap_log_verbose(struct nss_clmap_msg *npvm) +{ + switch (npvm->cm.type) { + case NSS_CLMAP_MSG_TYPE_INTERFACE_ENABLE: + nss_clmap_log_interface_enable_msg(npvm); + break; + + case NSS_CLMAP_MSG_TYPE_INTERFACE_DISABLE: + nss_clmap_log_interface_disable_msg(npvm); + break; + + case NSS_CLMAP_MSG_TYPE_MAC_ADD: + nss_clmap_log_mac_add_msg(npvm); + break; + + case NSS_CLMAP_MSG_TYPE_MAC_DEL: + nss_clmap_log_mac_del_msg(npvm); + break; + + case NSS_CLMAP_MSG_TYPE_MAC_FLUSH: + nss_clmap_log_mac_flush_msg(npvm); + break; + + case NSS_CLMAP_MSG_TYPE_SYNC_STATS: + break; + + default: + nss_trace("%px: Invalid message type\n", npvm); + break; + } +} + +/* + * nss_clmap_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_clmap_log_tx_msg(struct nss_clmap_msg *npvm) +{ + if (npvm->cm.type >= NSS_CLMAP_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", npvm); + return; + } + + nss_info("%px: type[%d]:%s\n", npvm, npvm->cm.type, nss_clmap_log_message_types_str[npvm->cm.type]); + nss_clmap_log_verbose(npvm); +} + +/* + * nss_clmap_log_rx_msg() + * Log messages received from FW. + */ +void nss_clmap_log_rx_msg(struct nss_clmap_msg *npvm) +{ + if (npvm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", npvm); + return; + } + + if (npvm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (npvm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", npvm, npvm->cm.type, + nss_clmap_log_message_types_str[npvm->cm.type], + npvm->cm.response, nss_cmn_response_str[npvm->cm.response]); + goto verbose; + } + + if (npvm->cm.error >= NSS_CLMAP_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + npvm, npvm->cm.type, nss_clmap_log_message_types_str[npvm->cm.type], + npvm->cm.response, nss_cmn_response_str[npvm->cm.response], + npvm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + npvm, npvm->cm.type, nss_clmap_log_message_types_str[npvm->cm.type], + npvm->cm.response, nss_cmn_response_str[npvm->cm.response], + npvm->cm.error, nss_clmap_log_error_types_str[npvm->cm.error]); + +verbose: + nss_clmap_log_verbose(npvm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_clmap_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_log.h new file mode 100644 index 000000000..6d193d315 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_CLMAP_LOG_H__ +#define __NSS_CLMAP_LOG_H__ + +/* + * nss_clmap_log.h + * NSS clmap Log Header File. + */ + +/* + * nss_clmap_log_tx_msg + * Logs a clmap message that is sent to the NSS firmware. + */ +void nss_clmap_log_tx_msg(struct nss_clmap_msg *ncm); + +/* + * nss_clmap_log_rx_msg + * Logs a clmap message that is received from the NSS firmware. + */ +void nss_clmap_log_rx_msg(struct nss_clmap_msg *ncm); + +#endif /* __NSS_CLMAP_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_clmap_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_stats.c new file mode 100644 index 000000000..a75bffffe --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_stats.c @@ -0,0 +1,296 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_clmap.h" +#include "nss_clmap_stats.h" +#include "nss_clmap_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_clmap_stats_notifier); + +/* + * Spinlock to protect clmap statistics update/read + */ +DEFINE_SPINLOCK(nss_clmap_stats_lock); + +struct nss_clmap_stats *stats_db[NSS_CLMAP_MAX_DEBUG_INTERFACES] = {NULL}; + +/* + * nss_clmap_interface_type_str + * Clmap interface type string. + */ +static char *nss_clmap_interface_type_str[NSS_CLMAP_INTERFACE_TYPE_MAX] = { + "Upstream", + "Downstream" +}; + +/* + * nss_clmap_stats_session_unregister + * Unregister debug statistic for clmap session. + */ +void nss_clmap_stats_session_unregister(uint32_t if_num) +{ + uint32_t i; + + spin_lock_bh(&nss_clmap_stats_lock); + for (i = 0; i < NSS_CLMAP_MAX_DEBUG_INTERFACES; i++) { + if (stats_db[i] && (stats_db[i]->nss_if_num == if_num)) { + kfree(stats_db[i]); + stats_db[i] = NULL; + break; + } + } + spin_unlock_bh(&nss_clmap_stats_lock); +} + +/* + * nss_clmap_stats_session_register + * Register debug statistic for clmap session. + */ +bool nss_clmap_stats_session_register(uint32_t if_num, uint32_t if_type, struct net_device *netdev) +{ + uint32_t i; + bool stats_status = false; + + if (!netdev) { + nss_warning("Could not allocate statistics memory as the net device is NULL!\n"); + return stats_status; + } + + spin_lock_bh(&nss_clmap_stats_lock); + for (i = 0; i < NSS_CLMAP_MAX_DEBUG_INTERFACES; i++) { + if (!stats_db[i]) { + stats_db[i] = (struct nss_clmap_stats *)kzalloc(sizeof(struct nss_clmap_stats), GFP_KERNEL); + if (!stats_db[i]) { + nss_warning("%px: could not allocate memory for statistics database for interface id: %d\n", netdev, if_num); + break; + } + stats_db[i]->valid = true; + stats_db[i]->nss_if_num = if_num; + stats_db[i]->nss_if_type = if_type; + stats_db[i]->if_index = netdev->ifindex; + stats_status = true; + break; + } + } + spin_unlock_bh(&nss_clmap_stats_lock); + return stats_status; +} + +/* + * nss_clmap_get_debug_stats() + * Get clmap debug statistics. + */ +static int nss_clmap_get_debug_stats(struct nss_clmap_stats *stats) +{ + uint32_t i; + int interface_cnt = 0; + + spin_lock_bh(&nss_clmap_stats_lock); + for (i = 0; i < NSS_CLMAP_MAX_DEBUG_INTERFACES; i++) { + if (stats_db[i]) { + memcpy(stats, stats_db[i], sizeof(struct nss_clmap_stats)); + stats++; + interface_cnt++; + } + } + spin_unlock_bh(&nss_clmap_stats_lock); + + return interface_cnt; +} + +/* + * nss_clmap_stats_read() + * Read clmap statistics + */ +static ssize_t nss_clmap_stats_read(struct file *fp, char __user *ubuf, + size_t sz, loff_t *ppos) +{ + uint32_t max_output_lines = (NSS_CLMAP_INTERFACE_STATS_MAX * NSS_CLMAP_MAX_DEBUG_INTERFACES) + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct net_device *dev; + uint32_t id; + struct nss_clmap_stats *clmap_stats = NULL; + int interface_cnt; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + /* + * Allocate statistics memory only for all interfaces. + */ + clmap_stats = kzalloc((NSS_CLMAP_MAX_DEBUG_INTERFACES * sizeof(struct nss_clmap_stats)), GFP_KERNEL); + if (unlikely(!clmap_stats)) { + nss_warning("Could not allocate memory for populating clmap statistics\n"); + kfree(lbuf); + return 0; + } + + /* + * Get clmap statistics. + */ + interface_cnt = nss_clmap_get_debug_stats(clmap_stats); + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "clmap stats", NSS_STATS_SINGLE_CORE); + for (id = 0; id < interface_cnt; id++) { + struct nss_clmap_stats *clmsp = clmap_stats + id; + + if (unlikely(!clmsp->valid)) { + continue; + } + + dev = dev_get_by_index(&init_net, clmsp->if_index); + if (unlikely(!dev)) { + nss_warning("No netdev available for nss interface id:%d\n", clmsp->nss_if_num); + continue; + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, interface type=%s, netdevice=%s\n", id, + clmsp->nss_if_num, nss_clmap_interface_type_str[clmsp->nss_if_type], dev->name); + dev_put(dev); + + size_wr += nss_stats_print("clmap", NULL, NSS_STATS_SINGLE_INSTANCE, nss_clmap_strings_stats, + clmsp->stats, NSS_CLMAP_INTERFACE_STATS_MAX, lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(clmap_stats); + kfree(lbuf); + return bytes_read; +} + +/* + * nss_clmap_stats_sync() + * Sync function for clmap statistics + */ +void nss_clmap_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_clmap_stats_msg *stats_msg, uint32_t if_num) +{ + uint32_t i; + struct nss_clmap_stats *s = NULL; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + spin_lock_bh(&nss_clmap_stats_lock); + for (i = 0; i < NSS_CLMAP_MAX_DEBUG_INTERFACES; i++) { + if (stats_db[i] && (stats_db[i]->nss_if_num == if_num)) { + s = stats_db[i]; + break; + } + } + + if (!s) { + spin_unlock_bh(&nss_clmap_stats_lock); + nss_warning("%px: Interface not found: %u", nss_ctx, if_num); + return; + } + + s->stats[NSS_CLMAP_INTERFACE_STATS_RX_PKTS] += stats_msg->node_stats.rx_packets; + s->stats[NSS_CLMAP_INTERFACE_STATS_RX_BYTES] += stats_msg->node_stats.rx_bytes; + s->stats[NSS_CLMAP_INTERFACE_STATS_TX_PKTS] += stats_msg->node_stats.tx_packets; + s->stats[NSS_CLMAP_INTERFACE_STATS_TX_BYTES] += stats_msg->node_stats.tx_bytes; + + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + s->stats[NSS_CLMAP_INTERFACE_STATS_RX_QUEUE_0_DROPPED + i] += stats_msg->node_stats.rx_dropped[i]; + } + + s->stats[NSS_CLMAP_INTERFACE_STATS_DROPPED_MACDB_LOOKUP_FAILED] += stats_msg->dropped_macdb_lookup_failed; + s->stats[NSS_CLMAP_INTERFACE_STATS_DROPPED_INVALID_PACKET_SIZE] += stats_msg->dropped_invalid_packet_size; + s->stats[NSS_CLMAP_INTERFACE_STATS_DROPPED_LOW_HEADROOM] += stats_msg->dropped_low_hroom; + s->stats[NSS_CLMAP_INTERFACE_STATS_DROPPED_NEXT_NODE_QUEUE_FULL] += stats_msg->dropped_next_node_queue_full; + s->stats[NSS_CLMAP_INTERFACE_STATS_DROPPED_PBUF_ALLOC_FAILED] += stats_msg->dropped_pbuf_alloc_failed; + s->stats[NSS_CLMAP_INTERFACE_STATS_DROPPED_LINEAR_FAILED] += stats_msg->dropped_linear_failed; + s->stats[NSS_CLMAP_INTERFACE_STATS_SHARED_PACKET_CNT] += stats_msg->shared_packet_count; + s->stats[NSS_CLMAP_INTERFACE_STATS_ETHERNET_FRAME_ERROR] += stats_msg->ethernet_frame_error; + s->stats[NSS_CLMAP_INTERFACE_STATS_MACDB_CREATE_REQUESTS_CNT] += stats_msg->macdb_create_requests; + s->stats[NSS_CLMAP_INTERFACE_STATS_MACDB_CREATE_MAC_EXISTS_CNT] += stats_msg->macdb_create_mac_exists; + s->stats[NSS_CLMAP_INTERFACE_STATS_MACDB_CREATE_MAC_TABLE_FULL_CNT] += stats_msg->macdb_create_table_full; + s->stats[NSS_CLMAP_INTERFACE_STATS_MACDB_DESTROY_REQUESTS_CNT] += stats_msg->macdb_destroy_requests; + s->stats[NSS_CLMAP_INTERFACE_STATS_MACDB_DESTROY_MAC_NOT_FOUND_CNT] += stats_msg->macdb_destroy_mac_notfound; + s->stats[NSS_CLMAP_INTERFACE_STATS_MACDB_DESTROY_MAC_UNHASHED_CNT] += stats_msg->macdb_destroy_mac_unhashed; + s->stats[NSS_CLMAP_INTERFACE_STATS_MACDB_FLUSH_REQUESTS_CNT] += stats_msg->macdb_flush_requests; + spin_unlock_bh(&nss_clmap_stats_lock); +} + +/* + * nss_clmap_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(clmap) + +/* + * nss_clmap_stats_dentry_create() + * Create client map statistics debug entry. + */ +void nss_clmap_stats_dentry_create(void) +{ + nss_stats_create_dentry("clmap", &nss_clmap_stats_ops); +} + +/* + * nss_clmap_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_clmap_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_clmap_stats_notification clmap_stats; + struct nss_clmap_stats *s = NULL; + int i; + + spin_lock_bh(&nss_clmap_stats_lock); + for (i = 0; i < NSS_CLMAP_MAX_DEBUG_INTERFACES; i++) { + if (!stats_db[i] || (stats_db[i]->nss_if_num != if_num)) { + continue; + } + + s = stats_db[i]; + clmap_stats.core_id = nss_ctx->id; + clmap_stats.if_num = if_num; + memcpy(clmap_stats.stats_ctx, s->stats, sizeof(clmap_stats.stats_ctx)); + spin_unlock_bh(&nss_clmap_stats_lock); + atomic_notifier_call_chain(&nss_clmap_stats_notifier, NSS_STATS_EVENT_NOTIFY, &clmap_stats); + return; + } + spin_unlock_bh(&nss_clmap_stats_lock); +} + +/* + * nss_clmap_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_clmap_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_clmap_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_clmap_stats_unregister_notifier); + +/* + * nss_clmap_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_clmap_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_clmap_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_clmap_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_clmap_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_stats.h new file mode 100644 index 000000000..89f687dab --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_stats.h @@ -0,0 +1,42 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_CLMAP_STATS_H +#define __NSS_CLMAP_STATS_H + +#define NSS_CLMAP_MAX_DEBUG_INTERFACES 2 * NSS_CLMAP_MAX_INTERFACES + +/* + * Clmap session debug statistics. + */ +struct nss_clmap_stats { + uint64_t stats[NSS_CLMAP_INTERFACE_STATS_MAX]; /* Clmap statistics. */ + int32_t if_index; /* Interface index. */ + uint32_t nss_if_num; /* NSS interface number. */ + enum nss_clmap_interface_type nss_if_type; /* NSS interface type. */ + bool valid; +}; + +/* + * Clmap statistics APIs. + */ +extern void nss_clmap_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern bool nss_clmap_stats_session_register(uint32_t if_num, enum nss_clmap_interface_type if_type, struct net_device *netdev); +extern void nss_clmap_stats_session_unregister(uint32_t if_num); +extern void nss_clmap_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_clmap_stats_msg *stats_msg, uint32_t if_num); +extern void nss_clmap_stats_dentry_create(void); + +#endif /* __NSS_CLMAP_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_clmap_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_strings.c new file mode 100644 index 000000000..4a52b489a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_strings.c @@ -0,0 +1,73 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_clmap_strings.h" + +/* + * nss_clmap_strings_stats + * Clmap statistics strings for nss tunnel stats + */ +struct nss_stats_info nss_clmap_strings_stats[NSS_CLMAP_INTERFACE_STATS_MAX] = { + {"rx_pkts", NSS_STATS_TYPE_COMMON}, + {"rx_bytes", NSS_STATS_TYPE_COMMON}, + {"tx_pkts", NSS_STATS_TYPE_COMMON}, + {"tx_bytes", NSS_STATS_TYPE_COMMON}, + {"rx_queue_0_dropped", NSS_STATS_TYPE_DROP}, + {"rx_queue_1_dropped", NSS_STATS_TYPE_DROP}, + {"rx_queue_2_dropped", NSS_STATS_TYPE_DROP}, + {"rx_queue_3_dropped", NSS_STATS_TYPE_DROP}, + {"MAC DB look up failed", NSS_STATS_TYPE_SPECIAL}, + {"Invalid packet count", NSS_STATS_TYPE_SPECIAL}, + {"Headroom drop", NSS_STATS_TYPE_SPECIAL}, + {"Next node queue full drop", NSS_STATS_TYPE_SPECIAL}, + {"Pbuf alloc failed drop", NSS_STATS_TYPE_SPECIAL}, + {"Linear failed drop", NSS_STATS_TYPE_SPECIAL}, + {"Shared packet count", NSS_STATS_TYPE_SPECIAL}, + {"Ethernet frame error", NSS_STATS_TYPE_SPECIAL}, + {"Macdb create requests count", NSS_STATS_TYPE_SPECIAL}, + {"Macdb create failures MAC exists count", NSS_STATS_TYPE_SPECIAL}, + {"Macdb create failures MAC table full count", NSS_STATS_TYPE_SPECIAL}, + {"Macdb destroy requests count", NSS_STATS_TYPE_SPECIAL}, + {"Macdb destroy failures MAC not found count", NSS_STATS_TYPE_SPECIAL}, + {"Macdb destroy failures MAC unhashed count", NSS_STATS_TYPE_SPECIAL}, + {"Macdb flush requests count", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_clmap_strings_read() + * Read clmap statistics names + */ +static ssize_t nss_clmap_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_clmap_strings_stats, NSS_CLMAP_INTERFACE_STATS_MAX); +} + +/* + * nss_clmap_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(clmap); + +/* + * nss_clmap_strings_dentry_create() + * Create clmap statistics strings debug entry. + */ +void nss_clmap_strings_dentry_create(void) +{ + nss_strings_create_dentry("clmap", &nss_clmap_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_clmap_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_strings.h new file mode 100644 index 000000000..dbdffba98 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_clmap_strings.h @@ -0,0 +1,25 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#ifndef __NSS_CLMAP_STRINGS_H +#define __NSS_CLMAP_STRINGS_H + +#include "nss_clmap_stats.h" + +extern struct nss_stats_info nss_clmap_strings_stats[NSS_CLMAP_INTERFACE_STATS_MAX]; +extern void nss_clmap_strings_dentry_create(void); + +#endif /* __NSS_CLMAP_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_cmn.c b/feeds/ipq807x/qca-nss-drv/src/nss_cmn.c new file mode 100644 index 000000000..258994f1d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_cmn.c @@ -0,0 +1,345 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_cmn.c + * NSS generic APIs + */ + +#if (NSS_DT_SUPPORT == 1) +#include +#endif + +#include "nss_tx_rx_common.h" + +/* + * nss_cmn_response_str + * Common response structure string + */ +int8_t *nss_cmn_response_str[NSS_CMN_RESPONSE_LAST] = { + "Message Acknowledge without errors", + "Common message version not supported", + "Unknown Interface", + "Length Error", + "Message Error", + "FW Notification Message", +}; + +/* + * nss_cmn_msg_init() + * Initialize the common message of an ASYNC message. + */ +void nss_cmn_msg_init(struct nss_cmn_msg *ncm, uint32_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + ncm->interface = if_num; + ncm->version = NSS_HLOS_MESSAGE_VERSION; + ncm->type = type; + ncm->len = len; + ncm->cb = (nss_ptr_t)cb; + ncm->app_data = (nss_ptr_t)app_data; +} +EXPORT_SYMBOL(nss_cmn_msg_init); + +/* + * nss_cmn_msg_sync_init() + * Initialize the common message of a SYNC message. + */ +void nss_cmn_msg_sync_init(struct nss_cmn_msg *ncm, uint32_t if_num, uint32_t type, uint32_t len) +{ + nss_cmn_msg_init(ncm, if_num, type, len, NULL, NULL); +} +EXPORT_SYMBOL(nss_cmn_msg_sync_init); + +/* + * nss_cmn_get_interface_number() + * Return the interface number of the NSS net_device. + * + * Returns -1 on failure or the interface number of dev is an NSS net_device. + */ +int32_t nss_cmn_get_interface_number(struct nss_ctx_instance *nss_ctx, struct net_device *dev) +{ + int i; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: Interface number could not be found as core not ready\n", nss_ctx); + return -1; + } + + nss_assert(dev != 0); + + /* + * Check physical interface table + */ + for (i = 0; i < NSS_MAX_NET_INTERFACES; i++) { + if (dev == nss_ctx->subsys_dp_register[i].ndev) { + return i; + } + } + + nss_warning("%px: Interface number could not be found as interface has not registered yet\n", nss_ctx); + return -1; +} +EXPORT_SYMBOL(nss_cmn_get_interface_number); + +/* + * nss_cmn_append_core_id() + * Return the NSS interface number with core ID. + */ +int nss_cmn_append_core_id(struct nss_ctx_instance *nss_ctx, int if_num) +{ + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_cmn_append_core_id); + +/* + * nss_cmn_get_interface_dev() + * Return the net_device for NSS interface id. + * + * Returns NULL on failure or the net_device for NSS interface id. + */ +struct net_device *nss_cmn_get_interface_dev(struct nss_ctx_instance *ctx, uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: Interface device could not be found as core not ready\n", nss_ctx); + return NULL; + } + + if (unlikely(if_num >= NSS_MAX_NET_INTERFACES)) { + return NULL; + } + + return nss_ctx->subsys_dp_register[if_num].ndev; +} +EXPORT_SYMBOL(nss_cmn_get_interface_dev); + +/* + * nss_cmn_get_interface_number_by_dev_and_type() + * Return the NSS interface id for the net_device. + * + * Returns < 0 on failure or the NSS interface id for the given device and type. + */ +int32_t nss_cmn_get_interface_number_by_dev_and_type(struct net_device *dev, uint32_t type) +{ + int i, core; + struct nss_subsystem_dataplane_register *nsdr; + + nss_assert(dev != 0); + for (core = 0; core < nss_top_main.num_nss; core++) { + for (i = 0; i < NSS_MAX_NET_INTERFACES; i++) { + nsdr = &nss_top_main.nss[core].subsys_dp_register[i]; + if (dev == nsdr->ndev && type == nsdr->type) { + return i; + } + } + } + + nss_warning("Interface number could not be found for %px (%s) as interface has not registered yet\n", dev, dev->name); + return -1; +} +EXPORT_SYMBOL(nss_cmn_get_interface_number_by_dev_and_type); + +/* + * nss_cmn_get_interface_number_by_dev() + * Return the NSS interface id for the net_device. + * + * Returns < 0 on failure or the NSS interface id for the given device. + */ +int32_t nss_cmn_get_interface_number_by_dev(struct net_device *dev) +{ + return nss_cmn_get_interface_number_by_dev_and_type(dev, 0); +} +EXPORT_SYMBOL(nss_cmn_get_interface_number_by_dev); + +/* + * nss_cmn_get_state() + * return the NSS initialization state + */ +nss_state_t nss_cmn_get_state(struct nss_ctx_instance *ctx) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx; + nss_state_t state = NSS_STATE_UNINITIALIZED; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + spin_lock_bh(&nss_top_main.lock); + if (nss_ctx->state == NSS_CORE_STATE_INITIALIZED) { + state = NSS_STATE_INITIALIZED; + } + spin_unlock_bh(&nss_top_main.lock); + + return state; +} +EXPORT_SYMBOL(nss_cmn_get_state); + +/* + * nss_cmn_interface_is_redirect() + * Return true if the interface is a redirect interface. + */ +bool nss_cmn_interface_is_redirect(struct nss_ctx_instance *nss_ctx, int32_t interface_num) +{ + enum nss_dynamic_interface_type type = nss_dynamic_interface_get_type(nss_ctx, interface_num); + + return type == NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_N2H + || type == NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_H2N + || type == NSS_DYNAMIC_INTERFACE_TYPE_VIRTIF_DEPRECATED; +} +EXPORT_SYMBOL(nss_cmn_interface_is_redirect); + +/* + * nss_cmn_rx_dropped_sum() + * Sum rx_dropped count. + */ +uint32_t nss_cmn_rx_dropped_sum(struct nss_cmn_node_stats *node_stats) +{ + uint32_t sum = 0; + int i; + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + sum += node_stats->rx_dropped[i]; + } + return sum; +} +EXPORT_SYMBOL(nss_cmn_rx_dropped_sum); + +/* + * nss_cmn_register_queue_decongestion() + * Register for queue decongestion event + */ +nss_cb_register_status_t nss_cmn_register_queue_decongestion(struct nss_ctx_instance *nss_ctx, nss_cmn_queue_decongestion_callback_t event_callback, void *app_ctx) +{ + uint32_t i; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + spin_lock_bh(&nss_ctx->decongest_cb_lock); + + /* + * Find vacant location in callback table + */ + for (i = 0; i< NSS_MAX_CLIENTS; i++) { + if (nss_ctx->queue_decongestion_callback[i] == NULL) { + nss_ctx->queue_decongestion_callback[i] = event_callback; + nss_ctx->queue_decongestion_ctx[i] = app_ctx; + spin_unlock_bh(&nss_ctx->decongest_cb_lock); + return NSS_CB_REGISTER_SUCCESS; + } + } + + spin_unlock_bh(&nss_ctx->decongest_cb_lock); + return NSS_CB_REGISTER_FAILED; +} +EXPORT_SYMBOL(nss_cmn_register_queue_decongestion); + +/* + * nss_cmn_unregister_queue_decongestion() + * Unregister for queue decongestion event + */ +nss_cb_unregister_status_t nss_cmn_unregister_queue_decongestion(struct nss_ctx_instance *nss_ctx, nss_cmn_queue_decongestion_callback_t event_callback) +{ + uint32_t i; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + spin_lock_bh(&nss_ctx->decongest_cb_lock); + + /* + * Find actual location in callback table + */ + for (i = 0; i< NSS_MAX_CLIENTS; i++) { + if (nss_ctx->queue_decongestion_callback[i] == event_callback) { + nss_ctx->queue_decongestion_callback[i] = NULL; + nss_ctx->queue_decongestion_ctx[i] = NULL; + spin_unlock_bh(&nss_ctx->decongest_cb_lock); + return NSS_CB_UNREGISTER_SUCCESS; + } + } + + spin_unlock_bh(&nss_ctx->decongest_cb_lock); + return NSS_CB_UNREGISTER_FAILED; +} +EXPORT_SYMBOL(nss_cmn_unregister_queue_decongestion); + +/* + * nss_cmn_register_service_code() + * Register for service code event + */ +nss_cb_register_status_t nss_cmn_register_service_code(struct nss_ctx_instance *nss_ctx, nss_cmn_service_code_callback_t cb, uint8_t service_code, void *app_data) +{ + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (nss_ctx->service_code_callback[service_code]) { + /* + * We already have a callback registered for this service code. + */ + nss_warning("%px: a callback is registered already for this service code %d\n", nss_ctx, service_code); + + return NSS_CB_REGISTER_FAILED; + } + + nss_ctx->service_code_callback[service_code] = cb; + nss_ctx->service_code_ctx[service_code] = app_data; + return NSS_CB_REGISTER_SUCCESS; +} +EXPORT_SYMBOL(nss_cmn_register_service_code); + +/* + * nss_cmn_unregister_service_code() + * Unregister for service code event + */ +nss_cb_unregister_status_t nss_cmn_unregister_service_code(struct nss_ctx_instance *nss_ctx, nss_cmn_service_code_callback_t cb, uint8_t service_code) +{ + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_ctx->service_code_callback[service_code]) { + /* + * No callback was registered for this service code. + */ + nss_warning("%px: no callback is registered for this service code %d\n", nss_ctx, service_code); + return NSS_CB_UNREGISTER_FAILED; + } + + nss_ctx->service_code_callback[service_code] = NULL; + nss_ctx->service_code_ctx[service_code] = NULL; + return NSS_CB_UNREGISTER_SUCCESS; +} +EXPORT_SYMBOL(nss_cmn_unregister_service_code); + +/* + * nss_cmn_get_nss_enabled() + * Check if NSS mode is supported on platform + * + * This API checks the device tree parameter to decide on whether + * NSS mode is enabled. On older kernels this will always return true + */ +bool nss_cmn_get_nss_enabled(void) +{ +#if (NSS_DT_SUPPORT == 1) + struct device_node *cmn = NULL; + + /* + * Get reference to NSS common device node + */ + cmn = of_find_node_by_name(NULL, "nss-common"); + if (!cmn) { + nss_info_always("nss is not enabled on this platform\n"); + return false; + } +#endif + return true; +} +EXPORT_SYMBOL(nss_cmn_get_nss_enabled); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_core.c b/feeds/ipq807x/qca-nss-drv/src/nss_core.c new file mode 100644 index 000000000..eaea9ec15 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_core.c @@ -0,0 +1,3251 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_core.c + * NSS driver core APIs source file. + */ + +#include "nss_core.h" +#include +#include +#include +#include +#include +#ifdef CONFIG_BRIDGE_NETFILTER +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(5, 0, 0)) +#include +#else +#include +#endif +#endif +#include +#include "nss_tx_rx_common.h" +#include "nss_data_plane.h" + +#define NSS_CORE_JUMBO_LINEAR_BUF_SIZE 128 + +#if (NSS_SKB_REUSE_SUPPORT == 1) +/* + * We have validated the skb recycling code within the NSS for the + * following kernel versions. Before enabling the driver in new kernels, + * the skb recycle code must be checked against Linux skb handling. + * + * Tested on: 3.4, 3.10, 3.14, 3.18, 4.4 and 5.4 + */ +#if (!( \ +(((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)))) || \ +(((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)))) || \ +(((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)))) || \ +(((LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)))) || \ +(((LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)))) || \ +(((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5, 5, 0)))))) +#error "Check skb recycle code in this file to match Linux version" +#endif + +static atomic_t max_reuse = ATOMIC_INIT(PAGE_SIZE); + +#endif /* NSS_SKB_REUSE_SUPPORT */ + +static int max_ipv4_conn = NSS_DEFAULT_NUM_CONN; +module_param(max_ipv4_conn, int, S_IRUGO); +MODULE_PARM_DESC(max_ipv4_conn, "Max number of IPv4 connections"); + +static int max_ipv6_conn = NSS_DEFAULT_NUM_CONN; +module_param(max_ipv6_conn, int, S_IRUGO); +MODULE_PARM_DESC(max_ipv6_conn, "Max number of IPv6 connections"); + +bool pn_mq_en = false; +module_param(pn_mq_en, bool, S_IRUGO); +MODULE_PARM_DESC(pn_mq_en, "Enable pnode ingress QoS"); + +uint16_t pn_qlimits[NSS_MAX_NUM_PRI] = {[0 ... NSS_MAX_NUM_PRI - 1] = NSS_DEFAULT_QUEUE_LIMIT}; +module_param_array(pn_qlimits, short, NULL, 0); +MODULE_PARM_DESC(pn_qlimits, "Queue limit per queue"); + +/* + * Atomic variables to control jumbo_mru & paged_mode + */ +static atomic_t jumbo_mru; +static atomic_t paged_mode; + +/* + * nss_core_update_max_ipv4_conn() + * Update the maximum number of configured IPv4 connections + */ +void nss_core_update_max_ipv4_conn(int conn) +{ + max_ipv4_conn = conn; +} + +/* + * nss_core_update_max_ipv6_conn() + * Update the maximum number of configured IPv6 connections + */ +void nss_core_update_max_ipv6_conn(int conn) +{ + max_ipv6_conn = conn; +} + +#if (NSS_SKB_REUSE_SUPPORT == 1) +/* + * nss_core_set_max_reuse() + * Set the max_reuse to the specified value + */ +void nss_core_set_max_reuse(int max) +{ + atomic_set(&max_reuse, max); +} + +/* + * nss_core_get_max_reuse() + * Does an atomic read of max_reuse + */ +int nss_core_get_max_reuse(void) +{ + return atomic_read(&max_reuse); +} + +/* + * nss_core_get_min_reuse() + * Return min reuse size + */ +uint32_t nss_core_get_min_reuse(struct nss_ctx_instance *nss_ctx) +{ + NSS_VERIFY_CTX_MAGIC(nss_ctx); + return nss_ctx->max_buf_size; +} +#endif /* NSS_SKB_REUSE_SUPPORT */ + +/* + * nss_core_set_jumbo_mru() + * Set the jumbo_mru to the specified value + */ +void nss_core_set_jumbo_mru(int jumbo) +{ + atomic_set(&jumbo_mru, jumbo); + +#if (NSS_SKB_REUSE_SUPPORT == 1) + if (jumbo > nss_core_get_max_reuse()) + nss_core_set_max_reuse(ALIGN(jumbo * 2, PAGE_SIZE)); +#endif +} + +/* + * nss_core_get_jumbo_mru() + * Does an atomic read of jumbo_mru + */ +int nss_core_get_jumbo_mru(void) +{ + return atomic_read(&jumbo_mru); +} + +/* + * nss_core_set_paged_mode() + * Set the paged_mode to the specified value + */ +void nss_core_set_paged_mode(int mode) +{ + atomic_set(&paged_mode, mode); +} + +/* + * nss_core_get_paged_mode() + * Does an atomic read of paged_mode + */ +int nss_core_get_paged_mode(void) +{ + return atomic_read(&paged_mode); +} + +/* + * nss_core_register_msg_handler() + * Register a msg callback per interface number. One per interface. + */ +uint32_t nss_core_register_msg_handler(struct nss_ctx_instance *nss_ctx, uint32_t interface, nss_if_rx_msg_callback_t msg_cb) +{ + nss_assert(msg_cb != NULL); + + /* + * Validate interface id + */ + if (interface >= NSS_MAX_NET_INTERFACES) { + nss_warning("Error - Interface %d not Supported\n", interface); + return NSS_CORE_STATUS_FAILURE; + } + + /* + * Check if already registered + */ + if (nss_ctx->nss_rx_interface_handlers[interface].msg_cb) { + nss_warning("Error - Duplicate Interface CB Registered for interface %d\n", interface); + return NSS_CORE_STATUS_FAILURE; + } + + nss_ctx->nss_rx_interface_handlers[interface].msg_cb = msg_cb; + + return NSS_CORE_STATUS_SUCCESS; +} + +/* + * nss_core_unregister_msg_handler() + * Unregister a msg callback per interface number. + */ +uint32_t nss_core_unregister_msg_handler(struct nss_ctx_instance *nss_ctx, uint32_t interface) +{ + /* + * Validate interface id + */ + if (interface >= NSS_MAX_NET_INTERFACES) { + nss_warning("Error - Interface %d not Supported\n", interface); + return NSS_CORE_STATUS_FAILURE; + } + + nss_ctx->nss_rx_interface_handlers[interface].msg_cb = NULL; + + return NSS_CORE_STATUS_SUCCESS; +} + +/* + * nss_core_register_handler() + +-- Register a callback per interface code. Only one per interface. + */ +uint32_t nss_core_register_handler(struct nss_ctx_instance *nss_ctx, uint32_t interface, nss_core_rx_callback_t cb, void *app_data) +{ + nss_assert(cb != NULL); + + /* + * Validate interface id + */ + if (interface >= NSS_MAX_NET_INTERFACES) { + nss_warning("Error - Interface %d not Supported\n", interface); + return NSS_CORE_STATUS_FAILURE; + } + + /* + * Check if already registered + */ + if (nss_ctx->nss_rx_interface_handlers[interface].cb != NULL) { + nss_warning("Error - Duplicate Interface CB Registered for interface %d\n", interface); + return NSS_CORE_STATUS_FAILURE; + } + + nss_ctx->nss_rx_interface_handlers[interface].cb = cb; + nss_ctx->nss_rx_interface_handlers[interface].app_data = app_data; + + return NSS_CORE_STATUS_SUCCESS; +} + +/* + * nss_core_unregister_handler() + * Unegister a callback per interface code. + */ +uint32_t nss_core_unregister_handler(struct nss_ctx_instance *nss_ctx, uint32_t interface) +{ + /* + * Validate interface id + */ + if (interface >= NSS_MAX_NET_INTERFACES) { + nss_warning("Error - Interface %d not Supported\n", interface); + return NSS_CORE_STATUS_FAILURE; + } + + nss_ctx->nss_rx_interface_handlers[interface].cb = NULL; + nss_ctx->nss_rx_interface_handlers[interface].app_data = NULL; + + return NSS_CORE_STATUS_SUCCESS; +} + +/* + * nss_core_set_subsys_dp_type() + * Set the type for the datapath subsystem + */ +void nss_core_set_subsys_dp_type(struct nss_ctx_instance *nss_ctx, struct net_device *ndev, uint32_t if_num, uint32_t type) +{ + struct nss_subsystem_dataplane_register *reg; + + /* + * Check that interface number is in range. + */ + BUG_ON(if_num >= NSS_MAX_NET_INTERFACES); + + reg = &nss_ctx->subsys_dp_register[if_num]; + + /* + * Check if there is already a subsystem registered at this interface number. + */ + BUG_ON(reg->ndev && reg->ndev != ndev); + + reg->type = type; +} + +/* + * nss_core_register_subsys_dp() + * Registers a netdevice and associated information at a given interface. + * + * Can also be used to update an existing registry if the provided net_device + * is equal to the one already registered. Will fail if there is already + * a net_device registered to the interface not equal to the one provided, + * or if the interface number is out of range. + */ +void nss_core_register_subsys_dp(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + nss_phys_if_rx_callback_t cb, + nss_phys_if_rx_ext_data_callback_t ext_cb, + void *app_data, struct net_device *ndev, + uint32_t features) +{ + struct nss_subsystem_dataplane_register *reg; + + /* + * Check that interface number is in range. + */ + BUG_ON(if_num >= NSS_MAX_NET_INTERFACES); + + reg = &nss_ctx->subsys_dp_register[if_num]; + + /* + * Check if there is already a subsystem registered at this interface number. + */ + BUG_ON(reg->ndev && reg->ndev != ndev); + + reg->cb = cb; + reg->ext_cb = ext_cb; + reg->app_data = app_data; + reg->ndev = ndev; + reg->features = features; +} + +/* + * nss_core_unregister_subsys_dp() + * Unregisters the netdevice at the given interface. + * + * Fails if the interface number is not valid. + */ +void nss_core_unregister_subsys_dp(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_subsystem_dataplane_register *reg; + + /* + * Check that interface number is in range. + */ + BUG_ON(if_num >= NSS_MAX_NET_INTERFACES); + + reg = &nss_ctx->subsys_dp_register[if_num]; + + reg->cb = NULL; + reg->ext_cb = NULL; + reg->app_data = NULL; + reg->ndev = NULL; + reg->features = 0; + reg->type = 0; +} + +/* + * nss_core_handle_nss_status_pkt() + * Handle the metadata/status packet. + */ +void nss_core_handle_nss_status_pkt(struct nss_ctx_instance *nss_ctx, struct sk_buff *nbuf) +{ + struct nss_cmn_msg *ncm; + uint32_t expected_version = NSS_HLOS_MESSAGE_VERSION; + nss_core_rx_callback_t cb; + void *app_data; + uint16_t nss_if; + + if (skb_shinfo(nbuf)->nr_frags > 0) { + ncm = (struct nss_cmn_msg *)skb_frag_address(&skb_shinfo(nbuf)->frags[0]); + } else { + ncm = (struct nss_cmn_msg *)nbuf->data; + } + + /* + * Save NSS interface number in local variable + */ + nss_if = ncm->interface; + + /* + * Check for version number + */ + if (ncm->version != expected_version) { + nss_warning("%px: Message %d for interface %d received with invalid version %d, expected version %d", + nss_ctx, ncm->type, nss_if, ncm->version, expected_version); + return; + } + + /* + * Validate message size + */ + if (ncm->len > nbuf->len) { + nss_warning("%px: Message %d for interface %d received with invalid length %d, expected length %d", + nss_ctx, ncm->type, nss_if, nbuf->len, ncm->len); + return; + } + + /* + * Check for validity of interface number + */ + if (nss_if >= NSS_MAX_NET_INTERFACES) { + nss_warning("%px: Message %d received with invalid interface number %d", nss_ctx, ncm->type, nss_if); + return; + } + + cb = nss_ctx->nss_rx_interface_handlers[nss_if].cb; + app_data = nss_ctx->nss_rx_interface_handlers[nss_if].app_data; + + if (!cb) { + nss_warning("%px: Callback not registered for interface %d", nss_ctx, nss_if); + return; + } + + cb(nss_ctx, ncm, app_data); + + if (ncm->interface != nss_if) { + nss_warning("%px: Invalid NSS I/F %d expected %d", nss_ctx, ncm->interface, nss_if); + } + + return; +} + +/* + * nss_core_handle_nss_crypto_pkt() + * Handles crypto packet. + */ +static void nss_core_handle_crypto_pkt(struct nss_ctx_instance *nss_ctx, unsigned int interface_num, + struct sk_buff *nbuf, struct napi_struct *napi) +{ + struct nss_subsystem_dataplane_register *subsys_dp_reg = &nss_ctx->subsys_dp_register[interface_num]; + nss_phys_if_rx_callback_t cb; + struct net_device *ndev; + + ndev = subsys_dp_reg->ndev; + cb = subsys_dp_reg->cb; + if (likely(cb)) { + cb(ndev, nbuf, napi); + return; + } + + dev_kfree_skb_any(nbuf); + return; +} + +/* + * nss_soc_mem_info() + * Getting DDR information for NSS SoC + */ +static uint32_t nss_soc_mem_info(void) +{ + struct device_node *node; + struct device_node *snode; + int addr_cells; + int size_cells; + int n_items; + uint32_t nss_msize = 8 << 20; /* default: 8MB */ + const __be32 *ppp; + + node = of_find_node_by_name(NULL, "reserved-memory"); + if (!node) { + nss_info_always("reserved-memory not found\n"); + return nss_msize; + } + + ppp = (__be32 *)of_get_property(node, "#address-cells", NULL); + addr_cells = ppp ? be32_to_cpup(ppp) : 2; + nss_info("%px addr cells %d\n", ppp, addr_cells); + ppp = (__be32 *)of_get_property(node, "#size-cells", NULL); + size_cells = ppp ? be32_to_cpup(ppp) : 2; + nss_info("%px size cells %d\n", ppp, size_cells); + + for_each_child_of_node(node, snode) { + /* + * compare (snode->full_name, "/reserved-memory/nss@40000000") may be safer + */ + nss_info("%px snode %s fn %s\n", snode, snode->name, snode->full_name); + if (strcmp(snode->name, "nss") == 0) + break; + } + of_node_put(node); + if (!snode) { + nss_info_always("nss@node not found: needed to determine NSS reserved DDR\n"); + return nss_msize; + } + + ppp = (__be32 *)of_get_property(snode, "reg", &n_items); + if (ppp) { + n_items /= sizeof(ppp[0]); + nss_msize = be32_to_cpup(ppp + addr_cells + size_cells - 1); + nss_info_always("addr/size storage words %d %d # words %d in DTS, ddr size %x\n", + addr_cells, size_cells, n_items, nss_msize); + } + of_node_put(snode); + return nss_msize; +} + +/* + * nss_get_ddr_info() + * get DDR start address and size from device tree. + */ +static void nss_get_ddr_info(struct nss_mmu_ddr_info *mmu, char *name) +{ + __be32 avail_ddr; + long cached; + struct sysinfo vals; + struct device_node *node; + + si_meminfo(&vals); +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + cached = global_page_state(NR_FILE_PAGES); +#else + cached = global_node_page_state(NR_FILE_PAGES); +#endif + + avail_ddr = (vals.totalram + cached + vals.sharedram) * vals.mem_unit; + mmu->num_active_cores = nss_top_main.num_nss; + + /* + * Since "memory" has not been used by anyone, the format is not final. + * Three (3) possible formats available: one of 1 or 2 will be final. + * 1) item_size stating_address DDR_size : odd # items + * 2) stating_address DDR_size # 32-bit each; total 2 words + * 3) stating_address DDR_size # 64-bit each; total 4 words + */ + node = of_find_node_by_name(NULL, name); + if (node) { + int isize = 0; + int n_items; + const __be32 *ppp = (__be32 *)of_get_property(node, "reg", &n_items); + + n_items /= sizeof(ppp[0]); + nss_info_always("node size %d # items %d\n", + of_n_size_cells(node), n_items); + if (ppp) { + if (n_items & 1) { /* case 1 */ + isize = be32_to_cpup(ppp); + if (isize == 1) + goto case2; + if (isize == 2) + goto case3; + n_items = 0; + } else if (n_items == 2) { +case2: + mmu->start_address = be32_to_cpup(ppp + isize); + mmu->ddr_size = be32_to_cpup(&ppp[isize + 1]); + } else if (n_items == 4) { +case3: + if (!ppp[isize] && !ppp[isize * 2]) { + if (isize) + isize = 1; + mmu->start_address = be32_to_cpup(ppp + isize + 1); + mmu->ddr_size = be32_to_cpup(ppp + isize + 3); + } else + n_items = 0; + } else + n_items = 0; + if (n_items) { + of_node_put(node); + nss_info_always("%s: %x %u (avl %u) items %d active_cores %d\n", + name, mmu->start_address, mmu->ddr_size, + avail_ddr, n_items, mmu->num_active_cores); + /* + * if DTS mechanism goes wrong, use available + * DDR and round it up to 64MB for maximum DDR. + */ + if (avail_ddr > mmu->ddr_size) + mmu->ddr_size = (avail_ddr + (63 << 20)) + & (~63 << 20); + return; + } + } + of_node_put(node); + nss_info_always("incorrect memory info %px items %d\n", + ppp, n_items); + } + + /* + * boilerplate for setting customer values; + * start_address = 0 will not change default start address + * set in NSS FW (likely 0x4000_0000) + * total available RAM + 16 MB NSS FW DDR + ~31 MB kernel mem + * we round it up by 128MB to cover potential NSS DDR increase + * and a slightly large holes. + * The size can be changed to a fixed value as DTS, but simplier. + * mmu->ddr_size = 1024 << 20 + */ + mmu->start_address = 0; + mmu->ddr_size = (avail_ddr + (127 << 20)) & (~127 << 20); + nss_info_always("RAM pages fr %lu buf %lu cached %lu %lu : %lu %u\n", + vals.freeram, vals.bufferram, cached, vals.sharedram, + vals.totalram, mmu->ddr_size); +} + +/* + * nss_send_ddr_info() + * Send DDR info to NSS + */ +static void nss_send_ddr_info(struct nss_ctx_instance *nss_own) +{ + struct nss_n2h_msg nnm; + struct nss_cmn_msg *ncm = &nnm.cm; + uint32_t ret; + nss_info("%px: send DDR info\n", nss_own); + + nss_cmn_msg_init(ncm, NSS_N2H_INTERFACE, NSS_TX_DDR_INFO_VIA_N2H_CFG, + sizeof(struct nss_mmu_ddr_info), NULL, NULL); + + nss_get_ddr_info(&nnm.msg.mmu, "memory"); + nnm.msg.mmu.nss_ddr_size = nss_soc_mem_info(); + + ret = nss_core_send_cmd(nss_own, &nnm, sizeof(nnm), NSS_NBUF_PAYLOAD_SIZE); + if (ret != NSS_TX_SUCCESS) { + nss_info_always("%px: Failed to send DDR info for core %d\n", nss_own, nss_own->id); + } +} + +/* + * nss_core_cause_to_queue() + * Map interrupt cause to queue id + */ +static inline uint16_t nss_core_cause_to_queue(uint16_t cause) +{ + if (likely(cause == NSS_N2H_INTR_DATA_QUEUE_0)) { + return NSS_IF_N2H_DATA_QUEUE_0; + } + + if (likely(cause == NSS_N2H_INTR_DATA_QUEUE_1)) { + return NSS_IF_N2H_DATA_QUEUE_1; + } + + if (likely(cause == NSS_N2H_INTR_DATA_QUEUE_2)) { + return NSS_IF_N2H_DATA_QUEUE_2; + } + + if (likely(cause == NSS_N2H_INTR_DATA_QUEUE_3)) { + return NSS_IF_N2H_DATA_QUEUE_3; + } + + if (likely(cause == NSS_N2H_INTR_EMPTY_BUFFER_QUEUE)) { + return NSS_IF_N2H_EMPTY_BUFFER_RETURN_QUEUE; + } + + /* + * There is no way we can reach here as cause was already identified to be related to valid queue + */ + nss_assert(0); + return 0; +} + +/* + * nss_dump_desc() + * Prints descriptor data + */ +static inline void nss_dump_desc(struct nss_ctx_instance *nss_ctx, struct n2h_descriptor *desc) +{ + printk("bad descriptor dump for nss core = %d\n", nss_ctx->id); + printk("\topaque = %px\n", (void *)desc->opaque); + printk("\tinterface = %d\n", desc->interface_num); + printk("\tbuffer_type = %d\n", desc->buffer_type); + printk("\tbit_flags = %x\n", desc->bit_flags); + printk("\tbuffer_addr = %x\n", desc->buffer); + printk("\tbuffer_len = %d\n", desc->buffer_len); + printk("\tpayload_offs = %d\n", desc->payload_offs); + printk("\tpayload_len = %d\n", desc->payload_len); + printk("\tpri = %d\n", desc->pri); +} + +/* + * nss_core_skb_needs_linearize() + * Looks at if this skb needs to be linearized or not. + */ +static inline int nss_core_skb_needs_linearize(struct sk_buff *skb, uint32_t features) +{ + return ((skb_has_frag_list(skb) && + !(features & NETIF_F_FRAGLIST)) || + (skb_shinfo(skb)->nr_frags && + !(features & NETIF_F_SG))); +} + +/* + * nss_core_handle_bounced_pkt() + * Bounced packet is returned from an interface/bridge bounce operation. + * + * Return the skb to the registrant. + */ +static inline void nss_core_handle_bounced_pkt(struct nss_ctx_instance *nss_ctx, + struct nss_shaper_bounce_registrant *reg, + struct sk_buff *nbuf) +{ + void *app_data; + struct module *owner; + nss_shaper_bounced_callback_t bounced_callback; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + spin_lock_bh(&nss_top->lock); + + /* + * Do we have a registrant? + */ + if (!reg->registered) { + spin_unlock_bh(&nss_top->lock); + dev_kfree_skb_any(nbuf); + return; + } + + /* + * Get handle to the owning registrant + */ + bounced_callback = reg->bounced_callback; + app_data = reg->app_data; + owner = reg->owner; + + /* + * Callback is active, unregistration is not permitted while this is in progress + */ + reg->callback_active = true; + spin_unlock_bh(&nss_top->lock); + if (!try_module_get(owner)) { + spin_lock_bh(&nss_top->lock); + reg->callback_active = false; + spin_unlock_bh(&nss_top->lock); + dev_kfree_skb_any(nbuf); + return; + } + + /* + * Pass bounced packet back to registrant + */ + bounced_callback(app_data, nbuf); + spin_lock_bh(&nss_top->lock); + reg->callback_active = false; + spin_unlock_bh(&nss_top->lock); + module_put(owner); +} + +/* + * nss_core_handle_virt_if_pkt() + * Handle packet destined to virtual interface. + */ +static inline void nss_core_handle_virt_if_pkt(struct nss_ctx_instance *nss_ctx, + unsigned int interface_num, + struct sk_buff *nbuf) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_subsystem_dataplane_register *subsys_dp_reg = &nss_ctx->subsys_dp_register[interface_num]; + struct net_device *ndev = NULL; + + uint32_t xmit_ret; + uint16_t queue_offset = 0; + + NSS_PKT_STATS_INC(&nss_top->stats_drv[NSS_DRV_STATS_RX_VIRTUAL]); + + /* + * Checksum is already done by NSS for packets forwarded to virtual interfaces + */ + nbuf->ip_summed = CHECKSUM_NONE; + + /* + * Obtain net_device pointer + */ + ndev = subsys_dp_reg->ndev; + if (unlikely(!ndev)) { + nss_warning("%px: Received packet for unregistered virtual interface %d", + nss_ctx, interface_num); + + /* + * NOTE: The assumption is that gather support is not + * implemented in fast path and hence we can not receive + * fragmented packets and so we do not need to take care + * of freeing a fragmented packet + */ + dev_kfree_skb_any(nbuf); + return; + } + + /* + * TODO: Need to ensure the ndev is not removed before we take dev_hold(). + */ + dev_hold(ndev); + nbuf->dev = ndev; + + /* + * Linearize the skb if needed + * + * Mixing up non linear check with in nss_core_skb_needs_linearize causes + * unencessary performance impact because of netif_skb_features() API call unconditionally + * Hence moved skb_is_nonlinear call outside. + */ + if (unlikely(skb_is_nonlinear(nbuf))) { + if (nss_core_skb_needs_linearize(nbuf, (uint32_t)netif_skb_features(nbuf)) && + __skb_linearize(nbuf)) { + /* + * We needed to linearize, but __skb_linearize() failed. Therefore + * we free the nbuf. + */ + dev_put(ndev); + dev_kfree_skb_any(nbuf); + return; + } + } + + /* + * Check to see if there is a xmit callback is registered + * in this path. The callback will decide the queue mapping. + */ + if (unlikely((subsys_dp_reg->xmit_cb))) { + skb_set_queue_mapping(nbuf, 0); + subsys_dp_reg->xmit_cb(ndev, nbuf); + dev_put(ndev); + return; + } + + /* + * Mimic Linux behavior to allow multi-queue netdev choose which queue to use + */ + if (ndev->netdev_ops->ndo_select_queue) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + queue_offset = ndev->netdev_ops->ndo_select_queue(ndev, nbuf, NULL, NULL); +#else + queue_offset = ndev->netdev_ops->ndo_select_queue(ndev, nbuf, NULL); +#endif + } + + skb_set_queue_mapping(nbuf, queue_offset); + + /* + * Send the packet to virtual interface + * NOTE: Invoking this will BYPASS any assigned QDisc - this is OKAY + * as TX packets out of the NSS will have been shaped inside the NSS. + */ + xmit_ret = ndev->netdev_ops->ndo_start_xmit(nbuf, ndev); + if (unlikely(xmit_ret == NETDEV_TX_BUSY)) { + dev_kfree_skb_any(nbuf); + nss_info("%px: Congestion at virtual interface %d, %px", nss_ctx, interface_num, ndev); + } + dev_put(ndev); +} + +/* + * nss_core_handle_buffer_pkt() + * Handle data packet received on physical or virtual interface. + */ +static inline void nss_core_handle_buffer_pkt(struct nss_ctx_instance *nss_ctx, + unsigned int interface_num, + struct sk_buff *nbuf, + struct napi_struct *napi, + uint16_t flags, uint16_t qid, uint8_t service_code) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_subsystem_dataplane_register *subsys_dp_reg = &nss_ctx->subsys_dp_register[interface_num]; + struct net_device *ndev = NULL; + nss_phys_if_rx_callback_t cb; + uint16_t queue_offset = qid - NSS_IF_N2H_DATA_QUEUE_0; + + NSS_PKT_STATS_INC(&nss_top->stats_drv[NSS_DRV_STATS_RX_PACKET]); + + /* + * Check if NSS was able to obtain checksum + */ + nbuf->ip_summed = CHECKSUM_UNNECESSARY; + if (unlikely(!(flags & N2H_BIT_FLAG_IP_TRANSPORT_CHECKSUM_VALID))) { + nbuf->ip_summed = CHECKSUM_NONE; + } + + ndev = subsys_dp_reg->ndev; + if (!ndev) { + dev_kfree_skb_any(nbuf); + return; + } + + /* + * If we have a non-zero service code, call the corresponding service code + * callback. The callback will consume the skb. + * For service code, we provide the raw packet as it was received. + */ + if (unlikely(service_code)) { + nss_cmn_service_code_callback_t cb = nss_ctx->service_code_callback[service_code]; + if (likely(cb)) { + dev_hold(ndev); + nbuf->dev = ndev; + nbuf->protocol = eth_type_trans(nbuf, ndev); + cb(nss_ctx->service_code_ctx[service_code], nbuf); + dev_put(ndev); + return; + } + } + + /* + * Deliver nbuf to the interface through callback if there is one. + */ + cb = subsys_dp_reg->cb; + if (likely(cb)) { + /* + * linearize or free if requested. + */ + if (unlikely(skb_is_nonlinear(nbuf))) { + if (nss_core_skb_needs_linearize(nbuf, ndev->features) && __skb_linearize(nbuf)) { + dev_kfree_skb_any(nbuf); + return; + } + } + + /* + * Record RX queue if the netdev has that many RX queues + */ + if (queue_offset < ndev->real_num_rx_queues) { + skb_record_rx_queue(nbuf, queue_offset); + } + + cb(ndev, (void *)nbuf, napi); + return; + } + + /* + * Deliver to the stack directly. Ex. there is no rule matched for + * redirect interface. + */ + dev_hold(ndev); + nbuf->dev = ndev; + nbuf->protocol = eth_type_trans(nbuf, ndev); + netif_receive_skb(nbuf); + dev_put(ndev); +} + +/* + * nss_core_handle_ext_buffer_pkt() + * Handle Extended data plane packet received on physical or virtual interface. + */ +static inline void nss_core_handle_ext_buffer_pkt(struct nss_ctx_instance *nss_ctx, + unsigned int interface_num, + struct sk_buff *nbuf, + struct napi_struct *napi, + uint16_t flags) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_subsystem_dataplane_register *subsys_dp_reg = &nss_ctx->subsys_dp_register[interface_num]; + struct net_device *ndev = NULL; + nss_phys_if_rx_ext_data_callback_t ext_cb; + + NSS_PKT_STATS_INC(&nss_top->stats_drv[NSS_DRV_STATS_RX_EXT_PACKET]); + + /* + * Check if NSS was able to obtain checksum + */ + nbuf->ip_summed = CHECKSUM_UNNECESSARY; + if (unlikely(!(flags & N2H_BIT_FLAG_IP_TRANSPORT_CHECKSUM_VALID))) { + nbuf->ip_summed = CHECKSUM_NONE; + } + + ndev = subsys_dp_reg->ndev; + ext_cb = subsys_dp_reg->ext_cb; + if (likely(ext_cb) && likely(ndev)) { + + if (unlikely(skb_is_nonlinear(nbuf))) { + if (nss_core_skb_needs_linearize(nbuf, ndev->features) && __skb_linearize(nbuf)) { + /* + * We needed to linearize, but __skb_linearize() failed. So free the nbuf. + */ + dev_kfree_skb_any(nbuf); + return; + } + } + + ext_cb(ndev, (void *)nbuf, napi); + } else { + dev_kfree_skb_any(nbuf); + } +} + +/* + * nss_core_rx_pbuf() + * Receive a pbuf from the NSS into Linux. + */ +static inline void nss_core_rx_pbuf(struct nss_ctx_instance *nss_ctx, struct n2h_descriptor *desc, struct napi_struct *napi, + uint8_t buffer_type, struct sk_buff *nbuf, uint16_t qid) +{ + unsigned int interface_num = NSS_INTERFACE_NUM_GET(desc->interface_num); + unsigned int core_id = NSS_INTERFACE_NUM_GET_COREID(desc->interface_num); + struct nss_shaper_bounce_registrant *reg = NULL; + int32_t status; + + NSS_PKT_STATS_DEC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + + if (interface_num >= NSS_MAX_NET_INTERFACES) { + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_INVALID_INTERFACE]); + nss_warning("%px: Invalid interface_num: %d", nss_ctx, interface_num); + dev_kfree_skb_any(nbuf); + return; + } + + /* + * Check if core_id value is valid. + */ + if (core_id > nss_top_main.num_nss) { + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_INVALID_CORE_ID]); + nss_warning("%px: Invalid core id: %d", nss_ctx, core_id); + dev_kfree_skb_any(nbuf); + return; + } + + /* + * Check if need to convert to local core value. + */ + if (core_id) { + nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[core_id - 1]; + } + + switch (buffer_type) { + case N2H_BUFFER_PACKET: + nss_core_handle_buffer_pkt(nss_ctx, interface_num, nbuf, napi, desc->bit_flags, qid, desc->service_code); + break; + + case N2H_BUFFER_PACKET_VIRTUAL: + nss_core_handle_virt_if_pkt(nss_ctx, interface_num, nbuf); + break; + + case N2H_BUFFER_SHAPER_BOUNCED_INTERFACE: + reg = &nss_ctx->nss_top->bounce_interface_registrants[interface_num]; + nss_core_handle_bounced_pkt(nss_ctx, reg, nbuf); + break; + + case N2H_BUFFER_SHAPER_BOUNCED_BRIDGE: + reg = &nss_ctx->nss_top->bounce_bridge_registrants[interface_num]; + nss_core_handle_bounced_pkt(nss_ctx, reg, nbuf); + break; + + case N2H_BUFFER_PACKET_EXT: + nss_core_handle_ext_buffer_pkt(nss_ctx, interface_num, nbuf, napi, desc->bit_flags); + break; + + case N2H_BUFFER_STATUS: + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_STATUS]); + nss_core_handle_nss_status_pkt(nss_ctx, nbuf); + dev_kfree_skb_any(nbuf); + break; + + case N2H_BUFFER_CRYPTO_RESP: + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_CRYPTO_RESP]); + nss_core_handle_crypto_pkt(nss_ctx, interface_num, nbuf, napi); + break; + + case N2H_BUFFER_RATE_TEST: + + /* + * This is a packet NSS sent for packet rate testing. The test measures the + * maximum PPS we can achieve between the host and NSS. After we process + * the descriptor, we directly send these test packets back to NSS without further process. + * They are again marked with H2N_BUFFER_RATE_TEST buffer type so NSS can process + * and count the test packets properly. + */ + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_STATUS]); + status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_H2N_DATA_QUEUE, H2N_BUFFER_RATE_TEST, H2N_BIT_FLAG_BUFFER_REUSABLE); + if (unlikely(status != NSS_CORE_STATUS_SUCCESS)) { + dev_kfree_skb_any(nbuf); + nss_warning("%px: Unable to enqueue\n", nss_ctx); + } + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE); + break; + + default: + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_INVALID_BUFFER_TYPE]); + nss_warning("%px: Invalid buffer type %d received from NSS", nss_ctx, buffer_type); + dev_kfree_skb_any(nbuf); + } +} + +/* + * nss_core_set_skb_classify() + * Set skb field to avoid ingress shaping. + */ +static inline void nss_core_set_skb_classify(struct sk_buff *nbuf) +{ +#ifdef CONFIG_NET_CLS_ACT +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) + nbuf->tc_verd = SET_TC_NCLS_NSS(nbuf->tc_verd); +#else + skb_set_tc_classify_offload(nbuf); +#endif +#endif +} + +/* + * nss_core_handle_nrfrag_skb() + * Handled the processing of fragmented skb's + */ +static inline bool nss_core_handle_nr_frag_skb(struct nss_ctx_instance *nss_ctx, struct sk_buff **nbuf_ptr, struct sk_buff **jumbo_start_ptr, struct n2h_descriptor *desc, unsigned int buffer_type) +{ + struct sk_buff *nbuf = *nbuf_ptr; + struct sk_buff *jumbo_start = *jumbo_start_ptr; + + uint16_t payload_len = desc->payload_len; + uint16_t payload_offs = desc->payload_offs; + uint16_t bit_flags = desc->bit_flags; + + nss_assert(desc->payload_offs + desc->payload_len <= PAGE_SIZE); + + dma_unmap_page(nss_ctx->dev, (desc->buffer + desc->payload_offs), desc->payload_len, DMA_FROM_DEVICE); + + /* + * The first and last bits are both set. Hence the received frame can't have + * chains (or it's not a scattered one). + */ + if (likely(bit_flags & N2H_BIT_FLAG_FIRST_SEGMENT) && likely(bit_flags & N2H_BIT_FLAG_LAST_SEGMENT)) { + + /* + * We have received another head before we saw the last segment. + * Free the old head as the frag list is corrupt. + */ + if (unlikely(jumbo_start)) { + nss_warning("%px: received a full frame before a last", jumbo_start); + dev_kfree_skb_any(jumbo_start); + *jumbo_start_ptr = NULL; + } + + /* + * NOTE: Need to use __skb_fill since we do not want to + * increment nr_frags again. We just want to adjust the offset + * and the length. + */ + __skb_fill_page_desc(nbuf, 0, skb_frag_page(&skb_shinfo(nbuf)->frags[0]), payload_offs, payload_len); + + /* + * We do not update truesize. We just keep the initial set value. + */ + nbuf->data_len = payload_len; + nbuf->len = payload_len; + nbuf->priority = desc->pri; + +#ifdef CONFIG_NET_CLS_ACT + /* + * Skip the ingress QoS for the packet if the descriptor has + * ingress shaped flag set. + */ + if (unlikely(desc->bit_flags & N2H_BIT_FLAG_INGRESS_SHAPED)) { + nss_core_set_skb_classify(nbuf); + } +#endif + + goto pull; + } + + /* + * Track Number of Fragments processed. First && Last is not true fragment + */ + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_FRAG_SEG_PROCESSED]); + + /* + * NSS sent us an SG chain. + * Build a frags[] out of segments. + */ + if (unlikely((bit_flags & N2H_BIT_FLAG_FIRST_SEGMENT))) { + + /* + * We have received another head before we saw the last segment. + * Free the old head as the frag list is corrupt. + */ + if (unlikely(jumbo_start)) { + nss_warning("%px: received the second head before a last", jumbo_start); + dev_kfree_skb_any(jumbo_start); + } + + /* + * We do not update truesize. We just keep the initial set value. + */ + __skb_fill_page_desc(nbuf, 0, skb_frag_page(&skb_shinfo(nbuf)->frags[0]), payload_offs, payload_len); + nbuf->data_len = payload_len; + nbuf->len = payload_len; + nbuf->priority = desc->pri; + +#ifdef CONFIG_NET_CLS_ACT + /* + * Skip the ingress QoS for the packet if the descriptor has + * ingress shaped flag set. + */ + if (unlikely(desc->bit_flags & N2H_BIT_FLAG_INGRESS_SHAPED)) { + nss_core_set_skb_classify(nbuf); + } +#endif + + /* + * Set jumbo pointer to nbuf + */ + *jumbo_start_ptr = nbuf; + + /* + * Skip sending until last is received. + */ + return false; + } + + NSS_PKT_STATS_DEC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + + /* + * We've received a middle or a last segment. + * Check that we have received a head first to avoid null deferencing. + */ + if (unlikely(jumbo_start == NULL)) { + /* + * Middle before first! Free the middle. + */ + nss_warning("%px: saw a middle skb before head", nbuf); + dev_kfree_skb_any(nbuf); + return false; + } + + /* + * Free the skb after attaching the frag to the head skb. + * Our page is safe although we are freeing it because we + * just took a reference to it. + */ + skb_add_rx_frag(jumbo_start, skb_shinfo(jumbo_start)->nr_frags, skb_frag_page(&skb_shinfo(nbuf)->frags[0]), payload_offs, payload_len, PAGE_SIZE); + skb_frag_ref(jumbo_start, skb_shinfo(jumbo_start)->nr_frags - 1); + dev_kfree_skb_any(nbuf); + + if (!(bit_flags & N2H_BIT_FLAG_LAST_SEGMENT)) { + /* + * Skip sending until last is received. + */ + return false; + } + + /* + * Last is received. Set nbuf pointer to point to + * the jumbo skb so that it continues to get processed. + */ + nbuf = jumbo_start; + *nbuf_ptr = nbuf; + *jumbo_start_ptr = NULL; + prefetch((void *)(nbuf->data)); + +pull: + /* + * We need eth hdr to be in the linear part of the skb + * for data packets. Otherwise eth_type_trans fails. + */ + if (buffer_type != N2H_BUFFER_STATUS) { + if (!pskb_may_pull(nbuf, ETH_HLEN)) { + dev_kfree_skb(nbuf); + nss_warning("%px: could not pull eth header", nbuf); + return false; + } + } + + return true; +} + +/* + * nss_core_handle_linear_skb() + * Handler for processing linear skbs. + */ +static inline bool nss_core_handle_linear_skb(struct nss_ctx_instance *nss_ctx, struct sk_buff **nbuf_ptr, struct sk_buff **head_ptr, + struct sk_buff **tail_ptr, struct n2h_descriptor *desc) +{ + uint16_t bit_flags = desc->bit_flags; + struct sk_buff *nbuf = *nbuf_ptr; + struct sk_buff *head = *head_ptr; + struct sk_buff *tail = *tail_ptr; + + /* + * We are in linear SKB mode. + */ + nbuf->data = nbuf->head + desc->payload_offs; + nbuf->len = desc->payload_len; + skb_set_tail_pointer(nbuf, nbuf->len); + + dma_unmap_single(nss_ctx->dev, (desc->buffer + desc->payload_offs), desc->payload_len, + DMA_FROM_DEVICE); + + prefetch((void *)(nbuf->data)); + + if (likely(bit_flags & N2H_BIT_FLAG_FIRST_SEGMENT) && likely(bit_flags & N2H_BIT_FLAG_LAST_SEGMENT)) { + + /* + * We have received another head before we saw the last segment. + * Free the old head as the frag list is corrupt. + */ + if (unlikely(head)) { + nss_warning("%px: received a full frame before a last", head); + dev_kfree_skb_any(head); + *head_ptr = NULL; + } + + nbuf->priority = desc->pri; + +#ifdef CONFIG_NET_CLS_ACT + /* + * Skip the ingress QoS for the packet if the descriptor has + * ingress shaped flag set. + */ + if (unlikely(desc->bit_flags & N2H_BIT_FLAG_INGRESS_SHAPED)) { + nss_core_set_skb_classify(nbuf); + } +#endif + + /* + * TODO: Check if there is any issue wrt map and unmap, + * NSS should playaround with data area and should not + * touch HEADROOM area + */ + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_SIMPLE]); + return true; + } + + /* + * Track number of skb chain processed. First && Last is not true segment. + */ + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_CHAIN_SEG_PROCESSED]); + + /* + * NSS sent us an SG chain. + * Build a frag list out of segments. + */ + if (unlikely((bit_flags & N2H_BIT_FLAG_FIRST_SEGMENT))) { + + /* + * We have received another head before we saw the last segment. + * Free the old head as the frag list is corrupt. + */ + if (unlikely(head)) { + nss_warning("%px: received the second head before a last", head); + NSS_PKT_STATS_DEC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + dev_kfree_skb_any(head); + } + + /* + * Found head. + */ + if (unlikely(skb_has_frag_list(nbuf))) { + /* + * We don't support chain in a chain. + */ + nss_warning("%px: skb already has a fraglist", nbuf); + NSS_PKT_STATS_DEC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + dev_kfree_skb_any(nbuf); + return false; + } + + skb_frag_list_init(nbuf); + nbuf->data_len = 0; + nbuf->truesize = desc->payload_len; + nbuf->priority = desc->pri; + +#ifdef CONFIG_NET_CLS_ACT + /* + * Skip the ingress QoS for the packet if the descriptor has + * ingress shaped flag set. + */ + if (unlikely(desc->bit_flags & N2H_BIT_FLAG_INGRESS_SHAPED)) { + nss_core_set_skb_classify(nbuf); + } +#endif + + *head_ptr = nbuf; + + /* + * Skip sending until last is received. + */ + return false; + } + + NSS_PKT_STATS_DEC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + + /* + * We've received a middle segment. + * Check that we have received a head first to avoid null deferencing. + */ + if (unlikely(head == NULL)) { + + /* + * Middle before first! Free the middle. + */ + nss_warning("%px: saw a middle skb before head", nbuf); + dev_kfree_skb_any(nbuf); + + return false; + } + + if (!skb_has_frag_list(head)) { + /* + * 2nd skb in the chain. head's frag_list should point to him. + */ + nbuf->next = skb_shinfo(head)->frag_list; + skb_shinfo(head)->frag_list = nbuf; + } else { + /* + * 3rd, 4th... skb in the chain. The chain's previous tail's + * next should point to him. + */ + tail->next = nbuf; + nbuf->next = NULL; + } + *tail_ptr = nbuf; + + /* + * Now we've added a new nbuf to the chain. + * Update the chain length. + */ + head->data_len += desc->payload_len; + head->len += desc->payload_len; + head->truesize += desc->payload_len; + + if (!(bit_flags & N2H_BIT_FLAG_LAST_SEGMENT)) { + /* + * Skip sending until last is received. + */ + return false; + } + + /* + * Last is received. Send the frag_list. + */ + *nbuf_ptr = head; + *head_ptr = NULL; + *tail_ptr = NULL; + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_SKB_FRAGLIST]); + return true; +} + +/* + * nss_core_handle_empty_buffers() + * Handle empty buffer returns. + */ +static inline void nss_core_handle_empty_buffers(struct nss_ctx_instance *nss_ctx, + struct nss_if_mem_map *if_map, + struct hlos_n2h_desc_ring *n2h_desc_ring, + struct n2h_descriptor *desc_ring, + struct n2h_descriptor *desc, + uint32_t count, uint32_t hlos_index, + uint16_t mask) +{ + while (count) { + /* + * Since we only return the primary skb, we have no way to unmap + * properly. Simple skb's are properly mapped but page data skbs + * have the payload mapped (and not the skb->data slab payload). + * + * Warning: On non-Krait HW, we need to unmap fragments. + * + * This only unmaps the first segment either slab payload or + * skb page data. Eventually, we need to unmap all of a frag_list + * or all of page_data however this is not a big concern as of now + * since on Kriats dma_map_single() does not allocate any resource + * and hence dma_unmap_single() is sort off a nop. + * + * No need to invalidate for Tx Completions, so set dma direction = DMA_TO_DEVICE; + * Similarly prefetch is not needed for an empty buffer. + */ + struct sk_buff *nbuf; + + /* + * Prefetch the next cache line of descriptors. + */ + if (((hlos_index & 1) == 0) && likely(count > 2)) { + struct n2h_descriptor *next_cache_desc = &desc_ring[(hlos_index + 2) & mask]; + prefetch(next_cache_desc); + } + + nbuf = (struct sk_buff *)desc->opaque; + + if (unlikely(nbuf < (struct sk_buff *)PAGE_OFFSET)) { + /* + * Invalid opaque pointer + */ + nss_dump_desc(nss_ctx, desc); + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_BAD_DESCRIPTOR]); + goto next; + } + + dma_unmap_single(nss_ctx->dev, (desc->buffer + desc->payload_offs), desc->payload_len, DMA_TO_DEVICE); + dev_kfree_skb_any(nbuf); + + NSS_PKT_STATS_DEC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_EMPTY]); + +next: + hlos_index = (hlos_index + 1) & (mask); + desc = &desc_ring[hlos_index]; + count--; + } + + n2h_desc_ring->hlos_index = hlos_index; + if_map->n2h_hlos_index[NSS_IF_N2H_EMPTY_BUFFER_RETURN_QUEUE] = hlos_index; + + NSS_CORE_DMA_CACHE_MAINT((void *)&if_map->n2h_hlos_index[NSS_IF_N2H_EMPTY_BUFFER_RETURN_QUEUE], sizeof(uint32_t), DMA_TO_DEVICE); + NSS_CORE_DSB(); +} + +/* + * nss_core_handle_cause_queue() + * Handle interrupt cause related to N2H/H2N queues + */ +static int32_t nss_core_handle_cause_queue(struct int_ctx_instance *int_ctx, uint16_t cause, int16_t weight) +{ + int16_t count, count_temp; + uint16_t size, mask, qid; + uint32_t nss_index, hlos_index, start, end; + struct sk_buff *nbuf; + struct hlos_n2h_desc_ring *n2h_desc_ring; + struct n2h_desc_if_instance *desc_if; + struct n2h_descriptor *desc_ring; + struct n2h_descriptor *desc; + struct n2h_descriptor *next_cache_desc; + struct nss_ctx_instance *nss_ctx = int_ctx->nss_ctx; + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + struct nss_if_mem_map *if_map = mem_ctx->if_map; + + qid = nss_core_cause_to_queue(cause); + + /* + * Make sure qid < num_rings + */ + nss_assert(qid < if_map->n2h_rings); + + n2h_desc_ring = &nss_ctx->n2h_desc_ring[qid]; + desc_if = &n2h_desc_ring->desc_ring; + desc_ring = desc_if->desc; + NSS_CORE_DMA_CACHE_MAINT((void *)&if_map->n2h_nss_index[qid], sizeof(uint32_t), DMA_FROM_DEVICE); + NSS_CORE_DSB(); + nss_index = if_map->n2h_nss_index[qid]; + + hlos_index = n2h_desc_ring->hlos_index; + size = desc_if->size; + mask = size - 1; + + /* + * Check if there is work to be done for this queue + */ + count = ((nss_index - hlos_index) + size) & (mask); + if (unlikely(count == 0)) { + return 0; + } + + /* + * Restrict ourselves to suggested weight + */ + if (count > weight) { + count = weight; + } + + /* + * Invalidate all the descriptors we are going to read + */ + start = hlos_index; + end = (hlos_index + count) & mask; + if (end > start) { + dmac_inv_range((void *)&desc_ring[start], (void *)&desc_ring[end] + sizeof(struct n2h_descriptor)); + } else { + /* + * We have wrapped around + */ + dmac_inv_range((void *)&desc_ring[start], (void *)&desc_ring[mask] + sizeof(struct n2h_descriptor)); + dmac_inv_range((void *)&desc_ring[0], (void *)&desc_ring[end] + sizeof(struct n2h_descriptor)); + } + + /* + * Prefetch the first descriptor + */ + desc = &desc_ring[hlos_index]; + prefetch(desc); + + /* + * Prefetch the next cache line of descriptors if we are starting with + * the second descriptor in the cache line. If it is the first in the cache line, + * this will be done inside the loop. + */ + if (((hlos_index & 1) == 1) && likely((count > 1))) { + next_cache_desc = &desc_ring[(hlos_index + 2) & mask]; + prefetch(next_cache_desc); + } + + if (qid == NSS_IF_N2H_EMPTY_BUFFER_RETURN_QUEUE) { + nss_core_handle_empty_buffers(nss_ctx, if_map, n2h_desc_ring, desc_ring, desc, count, hlos_index, mask); + return count; + } + + count_temp = count; + while (count_temp) { + unsigned int buffer_type; + nss_ptr_t opaque; + + /* + * Prefetch the next cache line of descriptors. + */ + if (((hlos_index & 1) == 0) && likely(count_temp > 2)) { + next_cache_desc = &desc_ring[(hlos_index + 2) & mask]; + prefetch(next_cache_desc); + } + + buffer_type = desc->buffer_type; + opaque = desc->opaque; + + /* + * Obtain nbuf + */ + nbuf = (struct sk_buff *)opaque; + if (unlikely(nbuf < (struct sk_buff *)PAGE_OFFSET)) { + /* + * Invalid opaque pointer + */ + nss_dump_desc(nss_ctx, desc); + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_BAD_DESCRIPTOR]); + goto next; + } + + /* + * Shaping uses the singleton approach as well. No need to unmap all the segments since only + * one of them is actually looked at. + */ + if ((unlikely(buffer_type == N2H_BUFFER_SHAPER_BOUNCED_INTERFACE)) || (unlikely(buffer_type == N2H_BUFFER_SHAPER_BOUNCED_BRIDGE))) { + dma_unmap_page(nss_ctx->dev, (desc->buffer + desc->payload_offs), desc->payload_len, DMA_TO_DEVICE); + goto consume; + } + + /* + * crypto buffer + * + */ + if (unlikely((buffer_type == N2H_BUFFER_CRYPTO_RESP))) { + dma_unmap_single(NULL, (desc->buffer + desc->payload_offs), desc->payload_len, DMA_FROM_DEVICE); + goto consume; + } + + /* + * Check if we received a paged skb. + */ + if (skb_shinfo(nbuf)->nr_frags > 0) { + /* + * Check if we received paged skb while constructing + * a linear skb chain. If so we need to free. + */ + if (unlikely(n2h_desc_ring->head)) { + nss_warning("%px: we should not have an incomplete paged skb while" + " constructing a linear skb %px", nbuf, n2h_desc_ring->head); + + NSS_PKT_STATS_DEC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + dev_kfree_skb_any(n2h_desc_ring->head); + n2h_desc_ring->head = NULL; + } + + if (!nss_core_handle_nr_frag_skb(nss_ctx, &nbuf, &n2h_desc_ring->jumbo_start, desc, buffer_type)) { + goto next; + } + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_RX_NR_FRAGS]); + goto consume; + } + + /* + * Check if we received a linear skb while constructing + * a paged skb. If so we need to free the paged_skb and handle the linear skb. + */ + if (unlikely(n2h_desc_ring->jumbo_start)) { + nss_warning("%px: we should not have an incomplete linear skb while" + " constructing a paged skb %px", nbuf, n2h_desc_ring->jumbo_start); + + NSS_PKT_STATS_DEC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + dev_kfree_skb_any(n2h_desc_ring->jumbo_start); + n2h_desc_ring->jumbo_start = NULL; + } + + /* + * This is a simple linear skb. Use the the linear skb + * handler to process it. + */ + if (!nss_core_handle_linear_skb(nss_ctx, &nbuf, &n2h_desc_ring->head, &n2h_desc_ring->tail, desc)) { + goto next; + } + +consume: + nss_core_rx_pbuf(nss_ctx, desc, &(int_ctx->napi), buffer_type, nbuf, qid); + +next: + + hlos_index = (hlos_index + 1) & (mask); + desc = &desc_ring[hlos_index]; + count_temp--; + } + + n2h_desc_ring->hlos_index = hlos_index; + if_map->n2h_hlos_index[qid] = hlos_index; + + NSS_CORE_DMA_CACHE_MAINT((void *)&if_map->n2h_hlos_index[qid], sizeof(uint32_t), DMA_TO_DEVICE); + NSS_CORE_DSB(); + + return count; +} + +/* + * nss_core_init_nss() + * Initialize NSS core state + */ +static void nss_core_init_nss(struct nss_ctx_instance *nss_ctx, struct nss_if_mem_map *if_map) +{ + struct nss_top_instance *nss_top; + int ret; + + NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(*if_map), DMA_FROM_DEVICE); + NSS_CORE_DSB(); + + /* + * NOTE: A commonly found error is that sizes and start address of per core + * virtual register map do not match in NSS and HLOS builds. This will lead + * to some hard to trace issues such as spinlock magic check failure etc. + * Following checks verify that proper virtual map has been initialized + */ + nss_assert(if_map->magic == DEV_MAGIC); + +#ifdef NSS_DRV_C2C_ENABLE + nss_ctx->c2c_start = nss_ctx->meminfo_ctx.c2c_start_dma; +#endif + + nss_top = nss_ctx->nss_top; + spin_lock_bh(&nss_top->lock); + nss_ctx->state = NSS_CORE_STATE_INITIALIZED; + spin_unlock_bh(&nss_top->lock); + + if (nss_ctx->id) { + ret = nss_n2h_update_queue_config_async(nss_ctx, pn_mq_en, pn_qlimits); + if (ret != NSS_TX_SUCCESS) { + nss_warning("Failed to send pnode queue config to core 1\n"); + } + return; + } + + /* + * If nss core0 is up, then we are ready to hook to nss-gmac + */ + if (nss_data_plane_schedule_registration()) { + + /* + * Configure the maximum number of IPv4/IPv6 + * connections supported by the accelerator. + */ + nss_ipv4_conn_cfg = max_ipv4_conn; +#ifdef NSS_DRV_IPV6_ENABLE + nss_ipv6_conn_cfg = max_ipv6_conn; + nss_ipv6_update_conn_count(max_ipv6_conn); +#endif + nss_ipv4_update_conn_count(max_ipv4_conn); + +#ifdef NSS_MEM_PROFILE_LOW + /* + * For low memory profiles, restrict the number of empty buffer pool + * size to NSS_LOW_MEM_EMPTY_POOL_BUF_SZ. Overwrite the default number + * of empty buffer pool size configured during NSS initialization. + */ + ret = nss_n2h_cfg_empty_pool_size(nss_ctx, NSS_LOW_MEM_EMPTY_POOL_BUF_SZ); + if (ret != NSS_TX_SUCCESS) { + nss_warning("%px: Failed to update empty buffer pool config\n", nss_ctx); + } +#endif + } else { + spin_lock_bh(&nss_top->lock); + nss_ctx->state = NSS_CORE_STATE_UNINITIALIZED; + spin_unlock_bh(&nss_top->lock); + } +} + +/* + * nss_core_alloc_paged_buffers() + * Allocate paged buffers for SOS. + */ +static void nss_core_alloc_paged_buffers(struct nss_ctx_instance *nss_ctx, struct nss_if_mem_map *if_map, + uint16_t count, int16_t mask, int32_t hlos_index, uint32_t alloc_fail_count, + uint32_t buffer_type, uint32_t buffer_queue, uint32_t stats_index) +{ + struct sk_buff *nbuf; + struct page *npage; + struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[buffer_queue]; + struct h2n_desc_if_instance *desc_if = &h2n_desc_ring->desc_ring; + struct h2n_descriptor *desc_ring = desc_if->desc; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + while (count) { + struct h2n_descriptor *desc = &desc_ring[hlos_index]; + dma_addr_t buffer; + + /* + * Alloc an skb AND a page. + */ + nbuf = dev_alloc_skb(NSS_CORE_JUMBO_LINEAR_BUF_SIZE); + if (unlikely(!nbuf)) { + /* + * ERR: + */ + NSS_PKT_STATS_INC(&nss_top->stats_drv[alloc_fail_count]); + nss_warning("%px: Could not obtain empty paged buffer", nss_ctx); + break; + } + + npage = alloc_page(GFP_ATOMIC); + if (unlikely(!npage)) { + /* + * ERR: + */ + dev_kfree_skb_any(nbuf); + NSS_PKT_STATS_INC(&nss_top->stats_drv[alloc_fail_count]); + nss_warning("%px: Could not obtain empty page", nss_ctx); + break; + } + + /* + * When we alloc an skb, initially head = data = tail and len = 0. + * So nobody will try to read the linear part of the skb. + */ + skb_fill_page_desc(nbuf, 0, npage, 0, PAGE_SIZE); + nbuf->data_len += PAGE_SIZE; + nbuf->len += PAGE_SIZE; + nbuf->truesize += PAGE_SIZE; + + /* Map the page for jumbo */ + buffer = dma_map_page(nss_ctx->dev, npage, 0, PAGE_SIZE, DMA_FROM_DEVICE); + desc->buffer_len = PAGE_SIZE; + desc->payload_offs = 0; + + if (unlikely(dma_mapping_error(nss_ctx->dev, buffer))) { + /* + * ERR: + */ + dev_kfree_skb_any(nbuf); + nss_warning("%px: DMA mapping failed for empty buffer", nss_ctx); + break; + } + /* + * We are holding this skb in NSS FW, let kmemleak know about it + */ + kmemleak_not_leak(nbuf); + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + desc->opaque = (nss_ptr_t)nbuf; + desc->buffer = buffer; + desc->buffer_type = buffer_type; + + /* + * Flush the descriptor + */ + NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE); + + hlos_index = (hlos_index + 1) & (mask); + count--; + } + + /* + * Wait for the flushes to be synced before writing the index + */ + NSS_CORE_DSB(); + + h2n_desc_ring->hlos_index = hlos_index; + if_map->h2n_hlos_index[buffer_queue] = hlos_index; + + NSS_CORE_DMA_CACHE_MAINT(&if_map->h2n_hlos_index[buffer_queue], sizeof(uint32_t), DMA_TO_DEVICE); + NSS_CORE_DSB(); + + NSS_PKT_STATS_INC(&nss_top->stats_drv[stats_index]); +} + +/* + * nss_core_alloc_jumbo_mru_buffers() + * Allocate jumbo mru buffers. + */ +static void nss_core_alloc_jumbo_mru_buffers(struct nss_ctx_instance *nss_ctx, struct nss_if_mem_map *if_map, + int jumbo_mru, uint16_t count, int16_t mask, int32_t hlos_index) +{ + + struct sk_buff *nbuf; + struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[NSS_IF_H2N_EMPTY_BUFFER_QUEUE]; + struct h2n_desc_if_instance *desc_if = &h2n_desc_ring->desc_ring; + struct h2n_descriptor *desc_ring = desc_if->desc; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + while (count) { + struct h2n_descriptor *desc = &desc_ring[hlos_index]; + dma_addr_t buffer; + nbuf = dev_alloc_skb(jumbo_mru); + if (unlikely(!nbuf)) { + /* + * ERR: + */ + NSS_PKT_STATS_INC(&nss_top->stats_drv[NSS_DRV_STATS_NBUF_ALLOC_FAILS]); + nss_warning("%px: Could not obtain empty jumbo mru buffer", nss_ctx); + break; + } + + /* + * Map the skb + */ + buffer = dma_map_single(nss_ctx->dev, nbuf->head, jumbo_mru, DMA_FROM_DEVICE); + desc->buffer_len = jumbo_mru; + desc->payload_offs = (uint16_t) (nbuf->data - nbuf->head); + if (unlikely(dma_mapping_error(nss_ctx->dev, buffer))) { + /* + * ERR: + */ + dev_kfree_skb_any(nbuf); + nss_warning("%px: DMA mapping failed for empty buffer", nss_ctx); + break; + } + + /* + * We are holding this skb in NSS FW, let kmemleak know about it + */ + kmemleak_not_leak(nbuf); + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + desc->opaque = (nss_ptr_t)nbuf; + desc->buffer = buffer; + desc->buffer_type = H2N_BUFFER_EMPTY; + + /* + * Flush the descriptor + */ + NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE); + + hlos_index = (hlos_index + 1) & (mask); + count--; + } + + /* + * Wait for the flushes to be synced before writing the index + */ + NSS_CORE_DSB(); + + h2n_desc_ring->hlos_index = hlos_index; + if_map->h2n_hlos_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE] = hlos_index; + + NSS_CORE_DMA_CACHE_MAINT(&if_map->h2n_hlos_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE], sizeof(uint32_t), DMA_TO_DEVICE); + NSS_CORE_DSB(); + + NSS_PKT_STATS_INC(&nss_top->stats_drv[NSS_DRV_STATS_TX_EMPTY]); +} + +/* + * nss_core_alloc_max_avail_size_buffers() + * Allocate maximum available sized buffers. + */ +static void nss_core_alloc_max_avail_size_buffers(struct nss_ctx_instance *nss_ctx, struct nss_if_mem_map *if_map, + uint16_t max_buf_size, uint16_t count, int16_t mask, int32_t hlos_index) +{ + struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[NSS_IF_H2N_EMPTY_BUFFER_QUEUE]; + struct h2n_desc_if_instance *desc_if = &h2n_desc_ring->desc_ring; + struct h2n_descriptor *desc_ring = desc_if->desc; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + uint16_t payload_len = max_buf_size + NET_SKB_PAD; + uint16_t start = hlos_index; + uint16_t prev_hlos_index; + + while (count) { + dma_addr_t buffer; + struct h2n_descriptor *desc = &desc_ring[hlos_index]; + + struct sk_buff *nbuf = dev_alloc_skb(max_buf_size); + if (unlikely(!nbuf)) { + /* + * ERR: + */ + NSS_PKT_STATS_INC(&nss_top->stats_drv[NSS_DRV_STATS_NBUF_ALLOC_FAILS]); + nss_warning("%px: Could not obtain empty buffer", nss_ctx); + break; + } + + /* + * Map the skb + */ + buffer = dma_map_single(nss_ctx->dev, nbuf->head, payload_len, DMA_FROM_DEVICE); + + if (unlikely(dma_mapping_error(nss_ctx->dev, buffer))) { + /* + * ERR: + */ + dev_kfree_skb_any(nbuf); + nss_warning("%px: DMA mapping failed for empty buffer", nss_ctx); + break; + } + + /* + * We are holding this skb in NSS FW, let kmemleak know about it + */ + kmemleak_not_leak(nbuf); + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + + desc->opaque = (nss_ptr_t)nbuf; + desc->buffer = buffer; + desc->buffer_len = payload_len; + + hlos_index = (hlos_index + 1) & (mask); + count--; + } + + /* + * Find the last descriptor we need to flush. + */ + prev_hlos_index = (hlos_index - 1) & mask; + + /* + * Flush the descriptors, including the descriptor at prev_hlos_index. + */ + if (prev_hlos_index > start) { + dmac_clean_range((void *)&desc_ring[start], (void *)&desc_ring[prev_hlos_index] + sizeof(struct h2n_descriptor)); + } else { + /* + * We have wrapped around + */ + dmac_clean_range((void *)&desc_ring[start], (void *)&desc_ring[mask] + sizeof(struct h2n_descriptor)); + dmac_clean_range((void *)&desc_ring[0], (void *)&desc_ring[prev_hlos_index] + sizeof(struct h2n_descriptor)); + } + + /* + * Wait for the flushes to be synced before writing the index + */ + NSS_CORE_DSB(); + + h2n_desc_ring->hlos_index = hlos_index; + if_map->h2n_hlos_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE] = hlos_index; + + NSS_CORE_DMA_CACHE_MAINT(&if_map->h2n_hlos_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE], sizeof(uint32_t), DMA_TO_DEVICE); + NSS_CORE_DSB(); + + NSS_PKT_STATS_INC(&nss_top->stats_drv[NSS_DRV_STATS_TX_EMPTY]); +} + +/* + * nss_core_handle_empty_buffer_sos() + * Handle empty buffer SOS interrupt. + */ +static inline void nss_core_handle_empty_buffer_sos(struct nss_ctx_instance *nss_ctx, + struct nss_if_mem_map *if_map, uint16_t max_buf_size) +{ + uint16_t count, size, mask; + int32_t nss_index, hlos_index; + struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[NSS_IF_H2N_EMPTY_BUFFER_QUEUE]; + + int paged_mode = nss_core_get_paged_mode(); + int jumbo_mru = nss_core_get_jumbo_mru(); + + /* + * Check how many empty buffers could be filled in queue + */ + NSS_CORE_DMA_CACHE_MAINT(&if_map->h2n_nss_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE], sizeof(uint32_t), DMA_FROM_DEVICE); + NSS_CORE_DSB(); + nss_index = if_map->h2n_nss_index[NSS_IF_H2N_EMPTY_BUFFER_QUEUE]; + + hlos_index = h2n_desc_ring->hlos_index; + size = h2n_desc_ring->desc_ring.size; + + mask = size - 1; + count = ((nss_index - hlos_index - 1) + size) & (mask); + + nss_trace("%px: Adding %d buffers to empty queue\n", nss_ctx, count); + + /* + * Fill empty buffer queue with buffers leaving one empty descriptor + * Note that total number of descriptors in queue cannot be more than (size - 1) + */ + if (!count) { + return; + } + + if (paged_mode) { + nss_core_alloc_paged_buffers(nss_ctx, if_map, count, mask, hlos_index, + NSS_DRV_STATS_NBUF_ALLOC_FAILS, H2N_BUFFER_EMPTY, + NSS_IF_H2N_EMPTY_BUFFER_QUEUE, NSS_DRV_STATS_TX_EMPTY); + } else if (jumbo_mru) { + nss_core_alloc_jumbo_mru_buffers(nss_ctx, if_map, jumbo_mru, count, + mask, hlos_index); + } else { + nss_core_alloc_max_avail_size_buffers(nss_ctx, if_map, max_buf_size, + count, mask, hlos_index); + } + + /* + * Inform NSS that new buffers are available + */ + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_EMPTY_BUFFER_QUEUE); +} + +/* + * nss_core_handle_paged_empty_buffer_sos() + * Handle paged empty buffer SOS. + */ +static inline void nss_core_handle_paged_empty_buffer_sos(struct nss_ctx_instance *nss_ctx, + struct nss_if_mem_map *if_map, uint16_t max_buf_size) +{ + uint16_t count, size, mask; + int32_t nss_index, hlos_index; + struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[NSS_IF_H2N_EMPTY_PAGED_BUFFER_QUEUE]; + + /* + * Check how many empty buffers could be filled in queue + */ + NSS_CORE_DMA_CACHE_MAINT((void *)&if_map->h2n_nss_index[NSS_IF_H2N_EMPTY_PAGED_BUFFER_QUEUE], sizeof(uint32_t), DMA_FROM_DEVICE); + NSS_CORE_DSB(); + nss_index = if_map->h2n_nss_index[NSS_IF_H2N_EMPTY_PAGED_BUFFER_QUEUE]; + + hlos_index = h2n_desc_ring->hlos_index; + size = h2n_desc_ring->desc_ring.size; + + mask = size - 1; + count = ((nss_index - hlos_index - 1) + size) & (mask); + nss_trace("%px: Adding %d buffers to paged buffer queue", nss_ctx, count); + + /* + * Fill empty buffer queue with buffers leaving one empty descriptor + * Note that total number of descriptors in queue cannot be more than (size - 1) + */ + if (!count) { + return; + } + + nss_core_alloc_paged_buffers(nss_ctx, if_map, count, mask, hlos_index, + NSS_DRV_STATS_PAGED_BUF_ALLOC_FAILS, H2N_PAGED_BUFFER_EMPTY, + NSS_IF_H2N_EMPTY_PAGED_BUFFER_QUEUE, NSS_DRV_STATS_PAGED_TX_EMPTY); + + /* + * Inform NSS that new buffers are available + */ + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_EMPTY_PAGED_BUFFER_QUEUE); +} + +/* + * nss_core_handle_tx_unblocked() + * Handle TX Unblocked. + */ +static inline void nss_core_handle_tx_unblocked(struct nss_ctx_instance *nss_ctx) +{ + int32_t i; + nss_trace("%px: Data queue unblocked", nss_ctx); + + /* + * Call callback functions of drivers that have registered with us + */ + spin_lock_bh(&nss_ctx->decongest_cb_lock); + + for (i = 0; i < NSS_MAX_CLIENTS; i++) { + if (nss_ctx->queue_decongestion_callback[i]) { + nss_ctx->queue_decongestion_callback[i](nss_ctx->queue_decongestion_ctx[i]); + } + } + + spin_unlock_bh(&nss_ctx->decongest_cb_lock); + nss_ctx->h2n_desc_rings[NSS_IF_H2N_DATA_QUEUE].flags &= ~NSS_H2N_DESC_RING_FLAGS_TX_STOPPED; + + /* + * Mask Tx unblocked interrupt and unmask it again when queue full condition is reached + */ + nss_hal_disable_interrupt(nss_ctx, nss_ctx->int_ctx[0].shift_factor, NSS_N2H_INTR_TX_UNBLOCKED); +} + +/* + * nss_core_handle_cause_nonqueue() + * Handle non-queue interrupt causes (e.g. empty buffer SOS, Tx unblocked) + */ +static void nss_core_handle_cause_nonqueue(struct int_ctx_instance *int_ctx, uint32_t cause, int16_t weight) +{ + struct nss_ctx_instance *nss_ctx = int_ctx->nss_ctx; + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + struct nss_if_mem_map *if_map = mem_ctx->if_map; + uint16_t max_buf_size = (uint16_t) nss_ctx->max_buf_size; +#ifdef NSS_DRV_C2C_ENABLE + uint32_t c2c_intr_addr1, c2c_intr_addr2; + int32_t i; +#endif + + nss_assert((cause == NSS_N2H_INTR_EMPTY_BUFFERS_SOS) + || (cause == NSS_N2H_INTR_TX_UNBLOCKED) + || cause == NSS_N2H_INTR_PAGED_EMPTY_BUFFERS_SOS); + + /* + * If this is the first time we are receiving this interrupt then + * we need to initialize local state of NSS core. This helps us save an + * interrupt cause bit. Hopefully, unlikley and branch prediction algorithm + * of processor will prevent any excessive penalties. + */ + if (unlikely(nss_ctx->state == NSS_CORE_STATE_UNINITIALIZED)) { + struct nss_top_instance *nss_top = NULL; + nss_core_init_nss(nss_ctx, if_map); + nss_send_ddr_info(nss_ctx); + + nss_info_always("%px: nss core %d booted successfully\n", nss_ctx, nss_ctx->id); + nss_top = nss_ctx->nss_top; + +#ifdef NSS_DRV_C2C_ENABLE +#if (NSS_MAX_CORES > 1) + /* + * Pass C2C addresses of already brought up cores to the recently brought + * up core. No NSS core knows the state of other other cores in system so + * NSS driver needs to mediate and kick start C2C between them + */ + for (i = 0; i < nss_top_main.num_nss; i++) { + /* + * Loop through all NSS cores and send exchange C2C addresses + * TODO: Current implementation utilizes the fact that there are + * only two cores in current design. And ofcourse ignore + * the core that we are trying to initialize. + */ + if (&nss_top->nss[i] != nss_ctx) { + /* + * Block initialization routine of any other NSS cores running on other + * processors. We do not want them to mess around with their initialization + * state and C2C addresses while we check their state. + */ + spin_lock_bh(&nss_top->lock); + if (nss_top->nss[i].state == NSS_CORE_STATE_INITIALIZED) { + spin_unlock_bh(&nss_top->lock); + c2c_intr_addr1 = (uint32_t)(nss_ctx->nphys) + NSS_REGS_C2C_INTR_SET_OFFSET; + nss_c2c_tx_msg_cfg_map(&nss_top->nss[i], nss_ctx->c2c_start, c2c_intr_addr1); + c2c_intr_addr2 = (uint32_t)(nss_top->nss[i].nphys) + NSS_REGS_C2C_INTR_SET_OFFSET; + nss_c2c_tx_msg_cfg_map(nss_ctx, nss_top->nss[i].c2c_start, c2c_intr_addr2); + continue; + } + spin_unlock_bh(&nss_top->lock); + } + } +#endif +#endif + } + + /* + * TODO: find better mechanism to handle empty buffers + */ + if (likely(cause == NSS_N2H_INTR_EMPTY_BUFFERS_SOS)) { + nss_core_handle_empty_buffer_sos(nss_ctx, if_map, max_buf_size); + } else if (cause == NSS_N2H_INTR_PAGED_EMPTY_BUFFERS_SOS) { + nss_core_handle_paged_empty_buffer_sos(nss_ctx, if_map, max_buf_size); + } else if (cause == NSS_N2H_INTR_TX_UNBLOCKED) { + nss_core_handle_tx_unblocked(nss_ctx); + } +} + +/* + * nss_core_get_prioritized_cause() + * Obtain proritized cause (from multiple interrupt causes) that + * must be handled by NSS driver before other causes + */ +static uint32_t nss_core_get_prioritized_cause(uint32_t cause, uint32_t *type, int16_t *weight) +{ + *type = NSS_INTR_CAUSE_INVALID; + *weight = 0; + + /* + * NOTE: This is a very simple algorithm with fixed weight and strict priority + * + * TODO: Modify the algorithm later with proper weights and Round Robin + */ + + if (cause & NSS_N2H_INTR_EMPTY_BUFFERS_SOS) { + *type = NSS_INTR_CAUSE_NON_QUEUE; + *weight = NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT; + return NSS_N2H_INTR_EMPTY_BUFFERS_SOS; + } + + if (cause & NSS_N2H_INTR_PAGED_EMPTY_BUFFERS_SOS) { + *type = NSS_INTR_CAUSE_NON_QUEUE; + *weight = NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT; + return NSS_N2H_INTR_PAGED_EMPTY_BUFFERS_SOS; + } + + if (cause & NSS_N2H_INTR_EMPTY_BUFFER_QUEUE) { + *type = NSS_INTR_CAUSE_QUEUE; + *weight = NSS_EMPTY_BUFFER_RETURN_PROCESSING_WEIGHT; + return NSS_N2H_INTR_EMPTY_BUFFER_QUEUE; + } + + if (cause & NSS_N2H_INTR_TX_UNBLOCKED) { + *type = NSS_INTR_CAUSE_NON_QUEUE; + *weight = NSS_TX_UNBLOCKED_PROCESSING_WEIGHT; + return NSS_N2H_INTR_TX_UNBLOCKED; + } + + if (cause & NSS_N2H_INTR_DATA_QUEUE_0) { + *type = NSS_INTR_CAUSE_QUEUE; + *weight = NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT; + return NSS_N2H_INTR_DATA_QUEUE_0; + } + + if (cause & NSS_N2H_INTR_DATA_QUEUE_1) { + *type = NSS_INTR_CAUSE_QUEUE; + *weight = NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT; + return NSS_N2H_INTR_DATA_QUEUE_1; + } + + if (cause & NSS_N2H_INTR_DATA_QUEUE_2) { + *type = NSS_INTR_CAUSE_QUEUE; + *weight = NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT; + return NSS_N2H_INTR_DATA_QUEUE_2; + } + + if (cause & NSS_N2H_INTR_DATA_QUEUE_3) { + *type = NSS_INTR_CAUSE_QUEUE; + *weight = NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT; + return NSS_N2H_INTR_DATA_QUEUE_3; + } + + if (cause & NSS_N2H_INTR_COREDUMP_COMPLETE) { + *type = NSS_INTR_CAUSE_EMERGENCY; + return NSS_N2H_INTR_COREDUMP_COMPLETE; + } + + if (cause & NSS_N2H_INTR_PROFILE_DMA) { + *type = NSS_INTR_CAUSE_SDMA; + return NSS_N2H_INTR_PROFILE_DMA; + } + + return 0; +} + +/* + * nss_core_handle_napi() + * NAPI handler for NSS + */ +int nss_core_handle_napi(struct napi_struct *napi, int budget) +{ + int16_t processed, weight, count = 0; + uint32_t prio_cause, int_cause = 0, cause_type; + struct int_ctx_instance *int_ctx = container_of(napi, struct int_ctx_instance, napi); + struct nss_ctx_instance *nss_ctx = int_ctx->nss_ctx; + + /* + * Read cause of interrupt + */ + nss_hal_read_interrupt_cause(nss_ctx, int_ctx->shift_factor, &int_cause); + nss_hal_clear_interrupt_cause(nss_ctx, int_ctx->shift_factor, int_cause); + int_ctx->cause |= int_cause; + + do { + while ((int_ctx->cause) && (budget)) { + + /* + * Obtain the cause as per priority. Also obtain the weight + * + * NOTE: The idea is that all causes are processed as per priority and weight + * so that no single cause can overwhelm the system. + */ + prio_cause = nss_core_get_prioritized_cause(int_ctx->cause, &cause_type, &weight); + if (budget < weight) { + weight = budget; + } + + processed = 0; + switch (cause_type) { + case NSS_INTR_CAUSE_QUEUE: + processed = nss_core_handle_cause_queue(int_ctx, prio_cause, weight); + + count += processed; + budget -= processed; + + /* + * If #packets processed were lesser than weight then processing for this queue/cause is + * complete and we can clear this interrupt cause from interrupt context structure + */ + if (processed < weight) { + int_ctx->cause &= ~prio_cause; + } + break; + + case NSS_INTR_CAUSE_NON_QUEUE: + nss_core_handle_cause_nonqueue(int_ctx, prio_cause, weight); + int_ctx->cause &= ~prio_cause; + break; + + case NSS_INTR_CAUSE_SDMA: + nss_core_handle_napi_sdma(napi, budget); + int_ctx->cause &= ~prio_cause; + break; + + case NSS_INTR_CAUSE_EMERGENCY: + nss_info_always("NSS core %d signal COREDUMP COMPLETE %x\n", + nss_ctx->id, int_ctx->cause); + nss_fw_coredump_notify(nss_ctx, prio_cause); + int_ctx->cause &= ~prio_cause; + break; + + default: + nss_warning("%px: Invalid cause %x received from nss", nss_ctx, int_cause); + nss_assert(0); + break; + } + } + + nss_hal_read_interrupt_cause(nss_ctx, int_ctx->shift_factor, &int_cause); + nss_hal_clear_interrupt_cause(nss_ctx, int_ctx->shift_factor, int_cause); + int_ctx->cause |= int_cause; + } while ((int_ctx->cause) && (budget)); + + if (int_ctx->cause == 0) { + napi_complete(napi); + + /* + * Re-enable any further interrupt from this IRQ + */ + nss_hal_enable_interrupt(nss_ctx, int_ctx->shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS); + } + + return count; +} + +/* + * nss_core_handle_napi_emergency() + * NAPI handler for NSS crash + */ +int nss_core_handle_napi_emergency(struct napi_struct *napi, int budget) +{ + struct int_ctx_instance *int_ctx = container_of(napi, struct int_ctx_instance, napi); + + nss_info_always("NSS core %d signal COREDUMP COMPLETE %x\n", + int_ctx->nss_ctx->id, int_ctx->cause); + nss_fw_coredump_notify(int_ctx->nss_ctx, 0); + + return 0; +} + +/* + * nss_core_handle_napi_sdma() + * NAPI handler for NSS soft DMA + */ +int nss_core_handle_napi_sdma(struct napi_struct *napi, int budget) +{ + struct int_ctx_instance *int_ctx = container_of(napi, struct int_ctx_instance, napi); + struct nss_ctx_instance *nss_ctx = int_ctx->nss_ctx; + struct nss_profile_sdma_ctrl *ctrl = (struct nss_profile_sdma_ctrl *)nss_ctx->meminfo_ctx.sdma_ctrl; + + if (ctrl->consumer[0].dispatch.fp) + ctrl->consumer[0].dispatch.fp(ctrl->consumer[0].arg.kp); + +#if !defined(NSS_HAL_IPQ806X_SUPPORT) + napi_complete(napi); + enable_irq(int_ctx->irq); +#endif + return 0; +} + +/* + * nss_core_handle_napi_queue() + * NAPI handler for NSS queue cause + */ +int nss_core_handle_napi_queue(struct napi_struct *napi, int budget) +{ + int processed; + struct int_ctx_instance *int_ctx = container_of(napi, struct int_ctx_instance, napi); + + processed = nss_core_handle_cause_queue(int_ctx, int_ctx->cause, budget); + if (processed < budget) { + napi_complete(napi); + enable_irq(int_ctx->irq); + } + + return processed; +} + +/* + * nss_core_handle_napi_non_queue() + * NAPI handler for NSS non queue cause + */ +int nss_core_handle_napi_non_queue(struct napi_struct *napi, int budget) +{ + struct int_ctx_instance *int_ctx = container_of(napi, struct int_ctx_instance, napi); + + nss_core_handle_cause_nonqueue(int_ctx, int_ctx->cause, 0); + napi_complete(napi); + enable_irq(int_ctx->irq); + return 0; +} + +/* + * nss_core_write_one_descriptor() + * Fills-up a descriptor with required fields. + */ +static inline void nss_core_write_one_descriptor(struct h2n_descriptor *desc, + uint16_t buffer_type, uint32_t buffer, uint32_t if_num, + nss_ptr_t opaque, uint16_t payload_off, uint16_t payload_len, uint16_t buffer_len, + uint32_t qos_tag, uint16_t mss, uint16_t bit_flags) +{ + desc->buffer_type = buffer_type; + desc->buffer = buffer; + desc->interface_num = if_num; + desc->opaque = opaque; + desc->payload_offs = payload_off; + desc->payload_len = payload_len; + desc->buffer_len = buffer_len; + desc->qos_tag = qos_tag; + desc->mss = mss; + desc->bit_flags = bit_flags; +} + +/* +* nss_core_send_unwind_dma() +* It unwinds (or unmap) DMA from descriptors +*/ +static inline void nss_core_send_unwind_dma(struct device *dev, struct h2n_desc_if_instance *desc_if, + uint16_t hlos_index, int16_t count, bool is_fraglist) +{ + struct h2n_descriptor *desc_ring = desc_if->desc; + struct h2n_descriptor *desc; + int16_t i, mask; + + mask = desc_if->size - 1; + for (i = 0; i < count; i++) { + desc = &desc_ring[hlos_index]; + if (is_fraglist) { + dma_unmap_single(dev, desc->buffer, desc->buffer_len, DMA_TO_DEVICE); + } else { + dma_unmap_page(dev, desc->buffer, desc->buffer_len, DMA_TO_DEVICE); + } + hlos_index = (hlos_index - 1) & mask; + } +} + +/* + * nss_core_skb_tail_offset() + */ +static inline uint32_t nss_core_skb_tail_offset(struct sk_buff *skb) +{ +#ifdef NET_SKBUFF_DATA_USES_OFFSET + return skb->tail; +#else + return skb->tail - skb->head; +#endif +} + +/* + * nss_core_dma_map_single() + */ +static inline uint32_t nss_core_dma_map_single(struct device *dev, struct sk_buff *skb) +{ + return (uint32_t)dma_map_single(dev, skb->head, nss_core_skb_tail_offset(skb), DMA_TO_DEVICE); +} + +#if (NSS_SKB_REUSE_SUPPORT == 1) +/* + * nss_core_skb_can_reuse + * check if skb can be reuse + */ +static inline bool nss_core_skb_can_reuse(struct nss_ctx_instance *nss_ctx, + uint32_t if_num, struct sk_buff *nbuf, int min_skb_size) +{ + /* + * If we have to call a destructor, we can't re-use the buffer? + */ + if (unlikely(nbuf->destructor != NULL)) { + return false; + } + + /* + * Check if skb has more than single user. + */ + if (unlikely(skb_shared(nbuf))) { + return false; + } + +#if IS_ENABLED(CONFIG_NF_CONNTRACK) + /* + * This check is added to avoid deadlock from nf_conntrack + * when ecm is trying to flush a rule. + */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) + if (unlikely(nbuf->nfct)) { + return false; + } +#else + if (unlikely(nbuf->_nfct)) { + return false; + } +#endif +#endif + +#ifdef CONFIG_BRIDGE_NETFILTER + /* + * This check is added to avoid deadlock from nf_bridge + * when ecm is trying to flush a rule. + */ + if (unlikely(nf_bridge_info_get(nbuf))) { + return false; + } +#endif + + /* + * If skb has security parameters set do not reuse + */ + if (unlikely(skb_sec_path(nbuf))) { + return false; + } + + if (unlikely(irqs_disabled())) + return false; + + if (unlikely(skb_shinfo(nbuf)->tx_flags & SKBTX_DEV_ZEROCOPY)) + return false; + + if (unlikely(skb_is_nonlinear(nbuf))) + return false; + + if (unlikely(skb_has_frag_list(nbuf))) + return false; + + if (unlikely(skb_shinfo(nbuf)->nr_frags)) + return false; + + if (unlikely(nbuf->fclone != SKB_FCLONE_UNAVAILABLE)) + return false; + + min_skb_size = SKB_DATA_ALIGN(min_skb_size + NET_SKB_PAD); + if (unlikely(skb_end_pointer(nbuf) - nbuf->head < min_skb_size)) + return false; + + if (unlikely(skb_end_pointer(nbuf) - nbuf->head >= nss_core_get_max_reuse())) + return false; + + if (unlikely(skb_cloned(nbuf))) + return false; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) + if (unlikely(skb_pfmemalloc(nbuf))) + return false; +#endif + + return true; +} + +/* + * nss_skb_reuse - clean up an skb + * Clears the skb to be reused as a receive buffer. + * + * NOTE: This function does any necessary reference count dropping, and + * cleans up the skbuff as if its allocated fresh. + */ +void nss_skb_reuse(struct sk_buff *nbuf) +{ + struct skb_shared_info *shinfo; + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + u8 head_frag = nbuf->head_frag; +#endif + + /* + * Reset all the necessary head state information from skb which + * we found can be recycled for NSS. + */ + skb_dst_drop(nbuf); + + shinfo = skb_shinfo(nbuf); + memset(shinfo, 0, offsetof(struct skb_shared_info, dataref)); + atomic_set(&shinfo->dataref, 1); + + memset(nbuf, 0, offsetof(struct sk_buff, tail)); + nbuf->data = nbuf->head + NET_SKB_PAD; + skb_reset_tail_pointer(nbuf); + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)) + nbuf->head_frag = head_frag; +#endif +} +#endif + +/* + * nss_core_send_buffer_simple_skb() + * Sends one skb to NSS FW + */ +static inline int32_t nss_core_send_buffer_simple_skb(struct nss_ctx_instance *nss_ctx, + struct h2n_desc_if_instance *desc_if, uint32_t if_num, + struct sk_buff *nbuf, uint16_t hlos_index, uint16_t flags, uint8_t buffer_type, uint16_t mss) +{ + struct h2n_descriptor *desc_ring = desc_if->desc; + struct h2n_descriptor *desc; + uint16_t bit_flags; + uint16_t mask; + uint32_t frag0phyaddr; + +#if (NSS_SKB_REUSE_SUPPORT == 1) + uint16_t sz; +#endif + + bit_flags = flags | H2N_BIT_FLAG_FIRST_SEGMENT | H2N_BIT_FLAG_LAST_SEGMENT; + if (likely(nbuf->ip_summed == CHECKSUM_PARTIAL)) { + bit_flags |= H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM; + bit_flags |= H2N_BIT_FLAG_GEN_IPV4_IP_CHECKSUM; + } else if (nbuf->ip_summed == CHECKSUM_UNNECESSARY) { + bit_flags |= H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM_NONE; + } + + mask = desc_if->size - 1; + desc = &desc_ring[hlos_index]; + +#if (NSS_SKB_REUSE_SUPPORT == 1) + /* + * Check if the caller indicates that the buffer is not to be re-used (kept in the accelerator). + */ + if (unlikely(!(bit_flags & H2N_BIT_FLAG_BUFFER_REUSABLE))) { + goto no_reuse; + } + + /* + * Since the caller is allowing re-use, we now check if the skb meets the criteria. + */ + if (unlikely(!nss_core_skb_can_reuse(nss_ctx, if_num, nbuf, nss_ctx->max_buf_size))) { + goto no_reuse; + } + + /* + * We are going to do both Tx and then Rx on this buffer, unmap the Tx + * and then map Rx over the entire buffer. + */ + sz = max((uint16_t)nss_core_skb_tail_offset(nbuf), (uint16_t)(nss_ctx->max_buf_size + NET_SKB_PAD)); + frag0phyaddr = (uint32_t)dma_map_single(nss_ctx->dev, nbuf->head, sz, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(nss_ctx->dev, frag0phyaddr))) { + goto no_reuse; + } + + /* + * We are allowed to re-use the packet + */ + nss_core_write_one_descriptor(desc, buffer_type, frag0phyaddr, if_num, + (nss_ptr_t)nbuf, (uint16_t)(nbuf->data - nbuf->head), nbuf->len, + sz, (uint32_t)nbuf->priority, mss, bit_flags); + + NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE); + + /* + * We are done using the skb fields and can reuse it now + */ + nss_skb_reuse(nbuf); + + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_BUFFER_REUSE]); + return 1; + +no_reuse: +#endif + + bit_flags &= ~H2N_BIT_FLAG_BUFFER_REUSABLE; + frag0phyaddr = nss_core_dma_map_single(nss_ctx->dev, nbuf); + if (unlikely(dma_mapping_error(nss_ctx->dev, frag0phyaddr))) { + nss_warning("%px: DMA mapping failed for virtual address = %px", nss_ctx, nbuf->head); + return 0; + } + + nss_core_write_one_descriptor(desc, buffer_type, frag0phyaddr, if_num, + (nss_ptr_t)nbuf, (uint16_t)(nbuf->data - nbuf->head), nbuf->len, + (uint16_t)skb_end_offset(nbuf), (uint32_t)nbuf->priority, mss, bit_flags); + + NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE); + + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_SIMPLE]); + return 1; +} + +/* + * nss_core_send_buffer_nr_frags() + * Sends frags array (NETIF_F_SG) to NSS FW + * + * Note - Opaque is set only on LAST fragment, and DISCARD is set for the rest of segments + * Used to differentiate from FRAGLIST + */ +static inline int32_t nss_core_send_buffer_nr_frags(struct nss_ctx_instance *nss_ctx, + struct h2n_desc_if_instance *desc_if, uint32_t if_num, + struct sk_buff *nbuf, uint16_t hlos_index, uint16_t flags, uint8_t buffer_type, uint16_t mss) +{ + struct h2n_descriptor *desc_ring = desc_if->desc; + struct h2n_descriptor *desc; + const skb_frag_t *frag; + dma_addr_t buffer; + uint32_t nr_frags; + uint16_t bit_flags; + int16_t i; + uint16_t mask; + + uint32_t frag0phyaddr = nss_core_dma_map_single(nss_ctx->dev, nbuf); + if (unlikely(dma_mapping_error(nss_ctx->dev, frag0phyaddr))) { + nss_warning("%px: DMA mapping failed for virtual address = %px", nss_ctx, nbuf->head); + return 0; + } + + /* + * Set the appropriate flags. + */ + bit_flags = (flags | H2N_BIT_FLAG_DISCARD); + + /* + * Reset the reuse flag for non-linear buffers. + */ + bit_flags &= ~H2N_BIT_FLAG_BUFFER_REUSABLE; + if (likely(nbuf->ip_summed == CHECKSUM_PARTIAL)) { + bit_flags |= H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM; + bit_flags |= H2N_BIT_FLAG_GEN_IPV4_IP_CHECKSUM; + } + + mask = desc_if->size - 1; + desc = &desc_ring[hlos_index]; + + /* + * First fragment/descriptor is special + */ + nss_core_write_one_descriptor(desc, buffer_type, frag0phyaddr, if_num, + (nss_ptr_t)NULL, nbuf->data - nbuf->head, nbuf->len - nbuf->data_len, + skb_end_offset(nbuf), (uint32_t)nbuf->priority, mss, bit_flags | H2N_BIT_FLAG_FIRST_SEGMENT); + + NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE); + + /* + * Now handle rest of the fragments. + */ + nr_frags = skb_shinfo(nbuf)->nr_frags; + BUG_ON(nr_frags > MAX_SKB_FRAGS); + for (i = 0; i < nr_frags; i++) { + frag = &skb_shinfo(nbuf)->frags[i]; + + buffer = skb_frag_dma_map(nss_ctx->dev, frag, 0, skb_frag_size(frag), DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(nss_ctx->dev, buffer))) { + nss_warning("%px: DMA mapping failed for fragment", nss_ctx); + nss_core_send_unwind_dma(nss_ctx->dev, desc_if, hlos_index, i + 1, false); + return -(i + 1); + } + + hlos_index = (hlos_index + 1) & (mask); + desc = &(desc_if->desc[hlos_index]); + + nss_core_write_one_descriptor(desc, buffer_type, buffer, if_num, + (nss_ptr_t)NULL, 0, skb_frag_size(frag), skb_frag_size(frag), + nbuf->priority, mss, bit_flags); + + NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE); + } + + /* + * Update bit flag for last descriptor. + * The discard flag shall be set for all fragments except the + * the last one.The NSS returns the last fragment to HLOS + * after the packet processing is done.We do need to send the + * packet buffer address (skb) in the descriptor of last segment + * when the decriptor returns from NSS the HLOS uses the + * opaque field to free the memory allocated. + */ + desc->bit_flags |= H2N_BIT_FLAG_LAST_SEGMENT; + desc->bit_flags &= ~(H2N_BIT_FLAG_DISCARD); + desc->opaque = (nss_ptr_t)nbuf; + + NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE); + + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_NR_FRAGS]); + return i+1; +} + +/* + * nss_core_send_buffer_fraglist() + * Sends fraglist (NETIF_F_FRAGLIST) to NSS FW + * + * Note - Opaque will be set on all fragments, and DISCARD is set for the rest of segments + * Used to differentiate from FRAGS + */ +static inline int32_t nss_core_send_buffer_fraglist(struct nss_ctx_instance *nss_ctx, + struct h2n_desc_if_instance *desc_if, uint32_t if_num, + struct sk_buff *nbuf, uint16_t hlos_index, uint16_t flags, uint8_t buffer_type, uint16_t mss) +{ + struct h2n_descriptor *desc_ring = desc_if->desc; + struct h2n_descriptor *desc; + dma_addr_t buffer; + uint16_t mask; + struct sk_buff *iter; + uint16_t bit_flags; + int16_t i; + + uint32_t frag0phyaddr = nss_core_dma_map_single(nss_ctx->dev, nbuf); + if (unlikely(dma_mapping_error(nss_ctx->dev, frag0phyaddr))) { + nss_warning("%px: DMA mapping failed for virtual address = %px", nss_ctx, nbuf->head); + return 0; + } + + /* + * Copy and Set bit flags + */ + bit_flags = flags; + + /* + * Reset the reuse flag for non-linear buffers. + */ + bit_flags &= ~H2N_BIT_FLAG_BUFFER_REUSABLE; + if (likely(nbuf->ip_summed == CHECKSUM_PARTIAL)) { + bit_flags |= H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM; + bit_flags |= H2N_BIT_FLAG_GEN_IPV4_IP_CHECKSUM; + } + + mask = desc_if->size - 1; + desc = &desc_ring[hlos_index]; + + /* + * First fragment/descriptor is special. Will hold the Opaque + */ + nss_core_write_one_descriptor(desc, buffer_type, frag0phyaddr, if_num, + (nss_ptr_t)nbuf, nbuf->data - nbuf->head, nbuf->len - nbuf->data_len, + skb_end_offset(nbuf), (uint32_t)nbuf->priority, mss, bit_flags | H2N_BIT_FLAG_FIRST_SEGMENT); + + NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE); + + /* + * Walk the frag_list in nbuf + */ + i = 0; + skb_walk_frags(nbuf, iter) { + uint32_t nr_frags; + + buffer = nss_core_dma_map_single(nss_ctx->dev, iter); + if (unlikely(dma_mapping_error(nss_ctx->dev, buffer))) { + nss_warning("%px: DMA mapping failed for virtual address = %px", nss_ctx, iter->head); + nss_core_send_unwind_dma(nss_ctx->dev, desc_if, hlos_index, i + 1, true); + return -(i+1); + } + + /* + * We currently don't support frags[] array inside a + * fraglist. + */ + nr_frags = skb_shinfo(iter)->nr_frags; + if (unlikely(nr_frags > 0)) { + nss_warning("%px: fraglist with page data are not supported: %px\n", nss_ctx, iter); + nss_core_send_unwind_dma(nss_ctx->dev, desc_if, hlos_index, i + 1, true); + return -(i+1); + } + + /* + * Update index. + */ + hlos_index = (hlos_index + 1) & (mask); + desc = &(desc_if->desc[hlos_index]); + +#ifdef CONFIG_DEBUG_KMEMLEAK + /* + * We are holding this skb in NSS FW, let kmemleak know about it. + * + * If the skb is a fast clone (FCLONE), then nbuf is pointing to the + * cloned skb which is at the middle of the allocated block and kmemleak API + * would backtrace if passed such a pointer. We will need to get to the original + * skb pointer which kmemleak is aware of. + */ + if (iter->fclone == SKB_FCLONE_CLONE) { + kmemleak_not_leak(iter - 1); + } else { + kmemleak_not_leak(iter); + } +#endif + + nss_core_write_one_descriptor(desc, buffer_type, buffer, if_num, + (nss_ptr_t)iter, iter->data - iter->head, iter->len - iter->data_len, + skb_end_offset(iter), iter->priority, mss, bit_flags); + + NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE); + + i++; + } + + /* + * We need to defrag the frag_list, otherwise, if this structure is + * received back we don't know how we can reconstruct the frag_list. + * Therefore, we are clearing skb_has_fraglist. This is safe because all + * information about the segments are already sent to NSS-FW. + * So, the information will be in the NSS-FW. + */ + skb_shinfo(nbuf)->frag_list = NULL; + NSS_PKT_STATS_ADD(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT], i); + + /* + * Update bit flag for last descriptor. + */ + desc->bit_flags |= H2N_BIT_FLAG_LAST_SEGMENT; + NSS_CORE_DMA_CACHE_MAINT((void *)desc, sizeof(*desc), DMA_TO_DEVICE); + + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_FRAGLIST]); + return i+1; +} + +/* + * nss_core_init_handlers() + * Initialize the handlers for all interfaces associated with core + */ +void nss_core_init_handlers(struct nss_ctx_instance *nss_ctx) +{ + struct nss_rx_cb_list *cb_list = nss_ctx->nss_rx_interface_handlers; + memset(cb_list, 0, sizeof(*cb_list) * NSS_MAX_NET_INTERFACES); +} + +/* + * nss_core_send_buffer() + * Send network buffer to NSS + */ +int32_t nss_core_send_buffer(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + struct sk_buff *nbuf, uint16_t qid, + uint8_t buffer_type, uint16_t flags) +{ + int16_t count, hlos_index, nss_index, size, mask; + uint32_t segments; + struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[qid]; + struct h2n_desc_if_instance *desc_if = &h2n_desc_ring->desc_ring; + struct h2n_descriptor *desc_ring; + struct h2n_descriptor *desc; + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + struct nss_if_mem_map *if_map = mem_ctx->if_map; + uint16_t mss = 0; + bool is_bounce = ((buffer_type == H2N_BUFFER_SHAPER_BOUNCE_INTERFACE) || (buffer_type == H2N_BUFFER_SHAPER_BOUNCE_BRIDGE)); + + desc_ring = desc_if->desc; + size = desc_if->size; + mask = size - 1; + + /* + * If nbuf does not have fraglist, then update nr_frags + * from frags[] array. Otherwise walk the frag_list. + */ + if (!skb_has_frag_list(nbuf)) { + segments = skb_shinfo(nbuf)->nr_frags; + BUG_ON(segments > MAX_SKB_FRAGS); + } else { + struct sk_buff *iter; + segments = 0; + skb_walk_frags(nbuf, iter) { + segments++; + } + + /* + * Check that segments do not overflow the number of descriptors + */ + if (unlikely(segments > size)) { + nss_warning("%px: Unable to fit in skb - %d segments in our descriptors", nss_ctx, segments); + return NSS_CORE_STATUS_FAILURE; + } + } + + /* + * Take a lock for queue + */ + spin_lock_bh(&h2n_desc_ring->lock); + + /* + * We need to work out if there's sufficent space in our transmit descriptor + * ring to place all the segments of a nbuf. + */ + NSS_CORE_DMA_CACHE_MAINT((void *)&if_map->h2n_nss_index[qid], sizeof(uint32_t), DMA_FROM_DEVICE); + NSS_CORE_DSB(); + nss_index = if_map->h2n_nss_index[qid]; + + hlos_index = h2n_desc_ring->hlos_index; + + count = ((nss_index - hlos_index - 1) + size) & (mask); + + if (unlikely(count < (segments + 1))) { + /* + * NOTE: tx_q_full_cnt and TX_STOPPED flags will be used + * when we will add support for DESC Q congestion management + * in future + */ + h2n_desc_ring->tx_q_full_cnt++; + h2n_desc_ring->flags |= NSS_H2N_DESC_RING_FLAGS_TX_STOPPED; + spin_unlock_bh(&h2n_desc_ring->lock); + nss_warning("%px: Data/Command Queue full reached", nss_ctx); + +#if (NSS_PKT_STATS_ENABLED == 1) + if (nss_ctx->id == NSS_CORE_0) { + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_QUEUE_FULL_0]); + } else if (nss_ctx->id == NSS_CORE_1) { + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_QUEUE_FULL_1]); + } else { + nss_warning("%px: Invalid nss core: %d\n", nss_ctx, nss_ctx->id); + } +#endif + + /* + * Enable de-congestion interrupt from NSS + */ + nss_hal_enable_interrupt(nss_ctx, nss_ctx->int_ctx[0].shift_factor, NSS_N2H_INTR_TX_UNBLOCKED); + + return NSS_CORE_STATUS_FAILURE_QUEUE; + } + + desc = &desc_ring[hlos_index]; + + /* + * Check if segmentation enabled. + * Configure descriptor bit flags accordingly + */ + + /* + * When CONFIG_HIGHMEM is enabled OS is giving a single big chunk buffer without + * any scattered frames. + * + * NOTE: We dont have to perform segmentation offload for packets that are being + * bounced. These packets WILL return to the HLOS for freeing or further processing. + * They will NOT be transmitted by the NSS. + */ + if (skb_is_gso(nbuf) && !is_bounce) { + mss = skb_shinfo(nbuf)->gso_size; + flags |= H2N_BIT_FLAG_SEGMENTATION_ENABLE; + } + + /* + * WARNING! : The following "is_bounce" check has a potential to cause corruption + * if things change in the NSS. This check allows fragmented packets to be sent down + * with incomplete payload information since NSS does not care about the payload content + * when packets are bounced for shaping. If it starts caring in future, then this code + * will have to change. + * + * WHY WE ARE DOING THIS - Skipping S/G processing helps with performance. + * + */ + count = 0; + if (likely((segments == 0) || is_bounce)) { + count = nss_core_send_buffer_simple_skb(nss_ctx, desc_if, if_num, + nbuf, hlos_index, flags, buffer_type, mss); + } else if (skb_has_frag_list(nbuf)) { + count = nss_core_send_buffer_fraglist(nss_ctx, desc_if, if_num, + nbuf, hlos_index, flags, buffer_type, mss); + } else { + count = nss_core_send_buffer_nr_frags(nss_ctx, desc_if, if_num, + nbuf, hlos_index, flags, buffer_type, mss); + } + + if (unlikely(count <= 0)) { + /* + * We failed and hence we need to unmap dma regions + */ + nss_warning("%px: failed to map DMA regions:%d", nss_ctx, -count); + spin_unlock_bh(&h2n_desc_ring->lock); + return NSS_CORE_STATUS_FAILURE; + } + + /* + * Sync to ensure all flushing of the descriptors are complete + */ + NSS_CORE_DSB(); + + /* + * Update our host index so the NSS sees we've written a new descriptor. + */ + hlos_index = (hlos_index + count) & mask; + h2n_desc_ring->hlos_index = hlos_index; + if_map->h2n_hlos_index[qid] = hlos_index; + + NSS_CORE_DMA_CACHE_MAINT(&if_map->h2n_hlos_index[qid], sizeof(uint32_t), DMA_TO_DEVICE); + NSS_CORE_DSB(); + +#ifdef CONFIG_DEBUG_KMEMLEAK + /* + * We are holding this skb in NSS FW, let kmemleak know about it. + * + * If the skb is a fast clone (FCLONE), then nbuf is pointing to the + * cloned skb which is at the middle of the allocated block and kmemleak API + * would backtrace if passed such a pointer. We will need to get to the original + * skb pointer which kmemleak is aware of. + */ + if (nbuf->fclone == SKB_FCLONE_CLONE) { + kmemleak_not_leak(nbuf - 1); + } else { + kmemleak_not_leak(nbuf); + } +#endif + + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NSS_SKB_COUNT]); + + spin_unlock_bh(&h2n_desc_ring->lock); + return NSS_CORE_STATUS_SUCCESS; +} + +/* + * nss_core_send_cmd() + * Send command message to NSS + */ +int32_t nss_core_send_cmd(struct nss_ctx_instance *nss_ctx, void *msg, int size, int buf_size) +{ + struct nss_cmn_msg *ncm = (struct nss_cmn_msg *)msg; + int32_t status; + struct sk_buff *nbuf; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: interface: %d type: %d message dropped as core not ready\n", nss_ctx, ncm->interface, ncm->type); + return NSS_TX_FAILURE_NOT_READY; + } + + if (nss_cmn_get_msg_len(ncm) > size) { + nss_warning("%px: interface: %d type: %d message length %d is invalid, size = %d\n", + nss_ctx, ncm->interface, ncm->type, nss_cmn_get_msg_len(ncm), size); + return NSS_TX_FAILURE_TOO_LARGE; + } + + if (buf_size > PAGE_SIZE) { + nss_warning("%px: interface: %d type: %d tx request size too large: %u", + nss_ctx, ncm->interface, ncm->type, buf_size); + return NSS_TX_FAILURE_BAD_PARAM; + } + + nbuf = dev_alloc_skb(buf_size); + if (unlikely(!nbuf)) { + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_NBUF_ALLOC_FAILS]); + nss_warning("%px: interface: %d type: %d msg dropped as command allocation failed", nss_ctx, ncm->interface, ncm->type); + return NSS_TX_FAILURE; + } + + memcpy(skb_put(nbuf, buf_size), (void *)ncm, size); + + status = nss_core_send_buffer(nss_ctx, 0, nbuf, NSS_IF_H2N_CMD_QUEUE, H2N_BUFFER_CTRL, H2N_BIT_FLAG_BUFFER_REUSABLE); + if (status != NSS_CORE_STATUS_SUCCESS) { + dev_kfree_skb_any(nbuf); + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_CMD_QUEUE_FULL]); + nss_warning("%px: interface: %d type: %d unable to enqueue message status %d\n", nss_ctx, ncm->interface, ncm->type, status); + return status; + } + + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE); + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_CMD_REQ]); + return status; +} + +/* + * nss_core_send_packet() + * Send data packet to NSS + */ +int32_t nss_core_send_packet(struct nss_ctx_instance *nss_ctx, struct sk_buff *nbuf, uint32_t if_num, uint32_t flag) +{ + int32_t status; + int32_t queue_id = 0; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: interface: %d packet dropped as core not ready\n", nss_ctx, if_num); + return NSS_TX_FAILURE_NOT_READY; + } + +#ifdef NSS_MULTI_H2N_DATA_RING_SUPPORT + queue_id = (skb_get_queue_mapping(nbuf) & (NSS_HOST_CORES - 1)) << 1; + if (nbuf->priority) { + queue_id++; + } +#endif + status = nss_core_send_buffer(nss_ctx, if_num, nbuf, NSS_IF_H2N_DATA_QUEUE + queue_id, H2N_BUFFER_PACKET, flag); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: interface: %d unable to enqueue packet status %d\n", nss_ctx, if_num, status); + return status; + } + + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE); + +#ifdef NSS_MULTI_H2N_DATA_RING_SUPPORT + /* + * Count per queue and aggregate packet count + */ + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_PACKET_QUEUE_0 + queue_id]); +#endif + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_PACKET]); + return status; +} + +/* + * nss_core_ddr_info() + * Getting DDR information for NSS core + */ +uint32_t nss_core_ddr_info(struct nss_mmu_ddr_info *mmu) +{ + nss_get_ddr_info(mmu, "memory"); + return nss_soc_mem_info(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_core.h b/feeds/ipq807x/qca-nss-drv/src/nss_core.h new file mode 100644 index 000000000..d7f62feff --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_core.h @@ -0,0 +1,1038 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * na_core.h + * NSS driver core header file. + */ + +#ifndef __NSS_CORE_H +#define __NSS_CORE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "nss_phys_if.h" +#include "nss_hlos_if.h" +#include "nss_oam.h" +#include "nss_data_plane.h" +#include "nss_gmac_stats.h" +#include "nss_meminfo.h" +#include "nss_stats.h" + +/* + * NSS debug macros + */ +#define nss_info_always(s, ...) pr_alert(s, ##__VA_ARGS__) + +#if (NSS_DEBUG_LEVEL < 1) +#define nss_assert(fmt, args...) +#else +#define nss_assert(c) if (!(c)) { BUG_ON(!(c)); } +#endif + +#if defined(CONFIG_DYNAMIC_DEBUG) +/* + * Compile messages for dynamic enable/disable + */ +#define nss_warning(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__) +#define nss_info(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__) +#define nss_trace(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__) +#else + +/* + * Statically compile messages at different levels + */ +#if (NSS_DEBUG_LEVEL < 2) +#define nss_warning(s, ...) +#else +#define nss_warning(s, ...) pr_warn("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__) +#endif + +#if (NSS_DEBUG_LEVEL < 3) +#define nss_info(s, ...) +#else +#define nss_info(s, ...) pr_notice("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__) +#endif + +#if (NSS_DEBUG_LEVEL < 4) +#define nss_trace(s, ...) +#else +#define nss_trace(s, ...) pr_info("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__) +#endif +#endif + +#if (NSS_PKT_STATS_ENABLED == 1) +#define NSS_PKT_STATS_INC(x) nss_pkt_stats_inc((x)) +#define NSS_PKT_STATS_DEC(x) nss_pkt_stats_dec((x)) +#define NSS_PKT_STATS_ADD(x, i) nss_pkt_stats_add((x), (i)) +#define NSS_PKT_STATS_SUB(x, i) nss_pkt_stats_sub((x), (i)) +#define NSS_PKT_STATS_READ(x) nss_pkt_stats_read(x) +#else +#define NSS_PKT_STATS_INC(x) +#define NSS_PKT_STATS_DEC(x) +#define NSS_PKT_STATS_ADD(x, i) +#define NSS_PKT_STATS_SUB(x, i) +#define NSS_PKT_STATS_READ(x) +#endif + +/* + * Cache operation + */ +#define NSS_CORE_DSB() dsb(sy) +#define NSS_CORE_DMA_CACHE_MAINT(start, size, dir) nss_core_dma_cache_maint(start, size, dir) + +/* + * nss_core_dma_cache_maint() + * Perform the appropriate cache op based on direction + */ +static inline void nss_core_dma_cache_maint(void *start, uint32_t size, int direction) +{ + switch (direction) { + case DMA_FROM_DEVICE:/* invalidate only */ + dmac_inv_range(start, start + size); + break; + case DMA_TO_DEVICE:/* writeback only */ + dmac_clean_range(start, start + size); + break; + case DMA_BIDIRECTIONAL:/* writeback and invalidate */ + dmac_flush_range(start, start + size); + break; + default: + BUG(); + } +} + +#define NSS_DEVICE_IF_START NSS_PHYSICAL_IF_START + +#define NSS_IS_IF_TYPE(type, if_num) ((if_num >= NSS_##type##_IF_START) && (if_num < (NSS_##type##_IF_START + NSS_MAX_##type##_INTERFACES))) + +/* + * Default payload size for NSS buffers + */ +#define NSS_NBUF_PAYLOAD_SIZE NSS_EMPTY_BUFFER_SIZE +#define NSS_NBUF_PAD_EXTRA 256 +#define NSS_NBUF_ETH_EXTRA 192 + +/* + * N2H/H2N Queue IDs + */ +#define NSS_IF_N2H_EMPTY_BUFFER_RETURN_QUEUE 0 +#define NSS_IF_N2H_DATA_QUEUE_0 1 +#define NSS_IF_N2H_DATA_QUEUE_1 2 +#define NSS_IF_N2H_DATA_QUEUE_2 3 +#define NSS_IF_N2H_DATA_QUEUE_3 4 + +#define NSS_IF_H2N_EMPTY_BUFFER_QUEUE 0 +#define NSS_IF_H2N_CMD_QUEUE 1 +#define NSS_IF_H2N_EMPTY_PAGED_BUFFER_QUEUE 2 +#define NSS_IF_H2N_DATA_QUEUE 3 + +/* + * NSS Interrupt Causes + */ +#define NSS_INTR_CAUSE_INVALID 0 +#define NSS_INTR_CAUSE_QUEUE 1 +#define NSS_INTR_CAUSE_NON_QUEUE 2 +#define NSS_INTR_CAUSE_EMERGENCY 3 +#define NSS_INTR_CAUSE_SDMA 4 + +/* + * NSS Core Status + */ +#define NSS_CORE_STATUS_SUCCESS 0 +#define NSS_CORE_STATUS_FAILURE 1 +#define NSS_CORE_STATUS_FAILURE_QUEUE 2 + +/* + * NSS context magic + */ +#define NSS_CTX_MAGIC 0xDEDEDEDE + +/* + * Number of n2h descriptor rings + */ +#define NSS_N2H_DESC_RING_NUM 15 +#define NSS_H2N_DESC_RING_NUM 16 + +/* + * NSS maximum data queue per core + */ +#define NSS_MAX_DATA_QUEUE 4 + +/* + * NSS maximum IRQ per interrupt instance/core + */ +#if defined(NSS_HAL_IPQ807x_SUPPORT) || defined(NSS_HAL_IPQ60XX_SUPPORT) +#define NSS_MAX_IRQ_PER_INSTANCE 6 +#define NSS_MAX_IRQ_PER_CORE 10 /* must match with NSS_HAL_N2H_INTR_PURPOSE_MAX */ +#elif defined(NSS_HAL_IPQ50XX_SUPPORT) +#define NSS_MAX_IRQ_PER_CORE 8 +#else +#define NSS_MAX_IRQ_PER_INSTANCE 1 +#define NSS_MAX_IRQ_PER_CORE 2 +#endif + +/* + * NSS maximum clients + */ +#define NSS_MAX_CLIENTS 12 + +/* + * Maximum number of service code NSS supports + */ +#define NSS_MAX_SERVICE_CODE 256 + +/* + * Interrupt cause processing weights + */ +#define NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT 64 +#define NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT 64 +#define NSS_EMPTY_BUFFER_RETURN_PROCESSING_WEIGHT 64 +#define NSS_TX_UNBLOCKED_PROCESSING_WEIGHT 1 + +/* + * Cache line size of the NSS. + */ +#define NSS_CACHE_LINE_SIZE 32 + +/* + * Statistics struct + * + * INFO: These numbers are based on previous generation chip + * These may change in future + */ + +/* + * NSS Frequency Defines and Values + * + * INFO: The LOW and MAX value together describe the "performance" band that we should operate the frequency at. + * + */ +#define NSS_FREQ_SCALE_NA 0xFAADFAAD /* Frequency scale not supported */ +#define NSS_FREQ_NA 0x0 /* Instructions Per ms Min */ + +#define NSS_FREQ_110 110000000 /* Frequency in hz */ +#define NSS_FREQ_110_MIN 0x03000 /* Instructions Per ms Min */ +#define NSS_FREQ_110_MAX 0x07000 /* Instructions Per ms Max */ + +#define NSS_FREQ_187 187200000 /* Frequency in hz */ +#if defined(NSS_HAL_IPQ60XX_SUPPORT) +#define NSS_FREQ_187_MIN 0x03000 /* Instructions Per ms Min */ +#define NSS_FREQ_187_MAX 0x10000 /* Instructions Per ms Max */ +#else +#define NSS_FREQ_187_MIN 0x03000 /* Instructions Per ms Min */ +#define NSS_FREQ_187_MAX 0x07000 /* Instructions Per ms Max */ +#endif + +#define NSS_FREQ_275 275000000 /* Frequency in hz */ +#define NSS_FREQ_275_MIN 0x03000 /* Instructions Per ms Min */ +#define NSS_FREQ_275_MAX 0x07000 /* Instructions Per ms Max */ + +#define NSS_FREQ_550 550000000 /* Frequency in hz */ +#define NSS_FREQ_550_MIN 0x07000 /* Instructions Per ms Min */ +#define NSS_FREQ_550_MAX 0x08000 /* Instructions Per ms Max */ + +#define NSS_FREQ_600 600000000 /* Frequency in hz */ +#define NSS_FREQ_600_MIN 0x07000 /* Instructions Per ms Min */ +#define NSS_FREQ_600_MAX 0x08000 /* Instructions Per ms Max */ + +#define NSS_FREQ_733 733000000 /* Frequency in hz */ +#define NSS_FREQ_733_MIN 0x07000 /* Instructions Per ms Min */ +#define NSS_FREQ_733_MAX 0x25000 /* Instructions Per ms Max */ + +#define NSS_FREQ_748 748800000 /* Frequency in hz */ +#if defined(NSS_HAL_IPQ60XX_SUPPORT) +#define NSS_FREQ_748_MIN 0x10000 /* Instructions Per ms Min */ +#define NSS_FREQ_748_MAX 0x18000 /* Instructions Per ms Max */ +#else +#define NSS_FREQ_748_MIN 0x07000 /* Instructions Per ms Min */ +#define NSS_FREQ_748_MAX 0x14000 /* Instructions Per ms Max */ +#endif + +#define NSS_FREQ_800 800000000 /* Frequency in hz */ +#define NSS_FREQ_800_MIN 0x07000 /* Instructions Per ms Min */ +#define NSS_FREQ_800_MAX 0x25000 /* Instructions Per ms Max */ + +#define NSS_FREQ_850 850000000 /* Frequency in hz */ +#define NSS_FREQ_850_MIN 0x07000 /* Instructions Per ms Min */ +#define NSS_FREQ_850_MAX 0x0c000 /* Instructions Per ms Max */ + +#define NSS_FREQ_1000 1000000000 /* Frequency in hz */ +#define NSS_FREQ_1000_MIN 0x0c000 /* Instructions Per ms Min */ +#define NSS_FREQ_1000_MAX 0x25000 /* Instructions Per ms Max */ + +#define NSS_FREQ_1497 1497600000 /* Frequency in hz */ +#if defined(NSS_HAL_IPQ60XX_SUPPORT) +#define NSS_FREQ_1497_MIN 0x18000 /* Instructions Per ms Min */ +#define NSS_FREQ_1497_MAX 0x25000 /* Instructions Per ms Max */ +#else +#define NSS_FREQ_1497_MIN 0x14000 /* Instructions Per ms Min */ +#define NSS_FREQ_1497_MAX 0x25000 /* Instructions Per ms Max */ +#endif + +#define NSS_FREQ_1689 1689600000 /* Frequency in hz */ +#define NSS_FREQ_1689_MIN 0x14000 /* Instructions Per ms Min */ +#define NSS_FREQ_1689_MAX 0x25000 /* Instructions Per ms Max */ + +#if (NSS_DT_SUPPORT == 1) +#define NSSTCM_FREQ 400000000 /* NSS TCM Frequency in Hz */ + +/* + * NSS Clock names + */ +#define NSS_CORE_CLK "nss-core-clk" +#define NSS_TCM_SRC_CLK "nss-tcm-src" +#define NSS_TCM_CLK "nss-tcm-clk" +#define NSS_FABRIC0_CLK "nss-fab0-clk" +#define NSS_FABRIC1_CLK "nss-fab1-clk" + +/* + * NSS Fabric speeds + */ +#define NSS_FABRIC0_TURBO 533000000 +#define NSS_FABRIC1_TURBO 266500000 +#define NSS_FABRIC0_NOMINAL 400000000 +#define NSS_FABRIC1_NOMINAL 200000000 +#define NSS_FABRIC0_IDLE 133333000 +#define NSS_FABRIC1_IDLE 133333000 +#endif + +/* Default NSS packet queue limit. */ +#define NSS_DEFAULT_QUEUE_LIMIT 256 + +/* + * Gives us important data from NSS platform data + */ +extern struct nss_top_instance nss_top_main; + +/* + * NSS core state + */ +enum nss_core_state { + NSS_CORE_STATE_UNINITIALIZED = 0, + NSS_CORE_STATE_INITIALIZED, + /* + * in following cases, only interrupts work + */ + NSS_CORE_STATE_FW_DEAD = 2, + NSS_CORE_STATE_FW_DUMP = 4, + NSS_CORE_STATE_PANIC = 8, +}; + +/* + * Forward declarations + */ +struct nss_top_instance; +struct nss_ctx_instance; +struct int_ctx_instance; +struct net_dev_priv_instance; + +/* + * Network device private data instance + */ +struct netdev_priv_instance { + struct int_ctx_instance *int_ctx; /* Back pointer to interrupt context */ +}; + +/* + * Interrupt context instance (one per queue per NSS core) + */ +struct int_ctx_instance { + struct nss_ctx_instance *nss_ctx; + /* Back pointer to NSS context of core that + owns this interrupt */ + uint32_t irq; /* HLOS IRQ numbers bind to this instance */ + uint32_t shift_factor; /* Shift factor for this IRQ queue */ + uint32_t cause; /* Interrupt cause carried forward to BH */ + struct napi_struct napi;/* NAPI handler */ +}; + +/* + * N2H descriptor ring information + */ +struct hlos_n2h_desc_ring { + struct n2h_desc_if_instance desc_ring; + /* Descriptor ring */ + uint32_t hlos_index; /* Current HLOS index for this ring */ + struct sk_buff *head; /* First segment of an skb fraglist */ + struct sk_buff *tail; /* Last segment received of an skb fraglist */ + struct sk_buff *jumbo_start; /* First segment of an skb with frags[] */ +}; + +/* + * H2N descriptor ring information + */ +struct hlos_h2n_desc_rings { + struct h2n_desc_if_instance desc_ring; /* Descriptor ring */ + uint32_t hlos_index; + spinlock_t lock; /* Lock to save from simultaneous access */ + uint32_t flags; /* Flags */ + uint64_t tx_q_full_cnt; /* Descriptor queue full count */ +}; + +#define NSS_H2N_DESC_RING_FLAGS_TX_STOPPED 0x1 /* Tx has been stopped for this queue */ + +/* + * struct nss_shaper_bounce_registrant + * Registrant detail for shaper bounce operations + */ +struct nss_shaper_bounce_registrant { + nss_shaper_bounced_callback_t bounced_callback; /* Invoked for each shaper bounced packet returned from the NSS */ + void *app_data; /* Argument given to the callback */ + struct module *owner; /* Owning module of the callback + arg */ + bool registered; + volatile bool callback_active; /* true when the bounce callback is being called */ +}; + +/* + * CB function declarations + */ +typedef void (*nss_core_rx_callback_t)(struct nss_ctx_instance *, struct nss_cmn_msg *, void *); + +/* + * NSS Rx per interface callback structure + */ +struct nss_rx_cb_list { + nss_if_rx_msg_callback_t msg_cb; + nss_core_rx_callback_t cb; + void *app_data; +}; + +/* + * NSS core <-> subsystem data plane registration related paramaters. + * This struct is filled with if_register/data_plane register APIs and + * retrieved when handling a data packet/skb destined to that subsystem. + */ +struct nss_subsystem_dataplane_register { + nss_phys_if_rx_callback_t cb; /* callback to be invoked */ + nss_phys_if_xmit_callback_t xmit_cb; + /* Callback to be invoked for sending the packets to the transmit path */ + nss_phys_if_rx_ext_data_callback_t ext_cb; + /* Extended data plane callback to be invoked. + This is needed if driver needs extended handling + of data packet before giving to stack */ + void *app_data; /* additional info passed during callback(for future use) */ + struct net_device *ndev; /* Netdevice associated with the interface */ + uint32_t features; /* skb types supported by this subsystem */ + uint32_t type; /* Indicates the type of this data plane */ +}; + +/* + * Holds statistics for every worker thread on a core + */ +struct nss_worker_thread_stats { + struct nss_project_irq_stats *irq_stats; +}; + +/* + * NSS context instance (one per NSS core) + */ +struct nss_ctx_instance { + struct nss_top_instance *nss_top; + /* Back pointer to NSS Top */ + struct device *dev; /* Pointer to the original device from probe */ + struct net_device napi_ndev; /* Dummy_netdev for NAPI */ + uint32_t id; /* Core ID for this instance */ + void __iomem *nmap; /* Pointer to NSS CSM registers */ + void __iomem *vmap; /* Virt mem pointer to virtual register map */ + void __iomem *qgic_map; /* Virt mem pointer to QGIC register */ + uint32_t nphys; /* Phys mem pointer to CSM register map */ + uint32_t vphys; /* Phys mem pointer to virtual register map */ + uint32_t qgic_phys; /* Phys mem pointer to QGIC register map */ + uint32_t load; /* Load address for this core */ + struct nss_meminfo_ctx meminfo_ctx; /* Meminfo context */ + enum nss_core_state state; /* State of NSS core */ + uint32_t c2c_start; /* C2C start address */ + uint32_t num_irq; /* IRQ numbers per queue */ + struct int_ctx_instance int_ctx[NSS_MAX_IRQ_PER_CORE]; + /* Interrupt context instances for each queue */ + struct hlos_h2n_desc_rings h2n_desc_rings[NSS_H2N_DESC_RING_NUM]; + /* Host to NSS descriptor rings */ + struct hlos_n2h_desc_ring n2h_desc_ring[NSS_N2H_DESC_RING_NUM]; + /* NSS to Host descriptor rings */ + uint16_t rps_en; /* N2H Enable Multiple queues for Data Packets */ + uint16_t n2h_mitigate_en; /* N2H mitigation */ + uint32_t max_buf_size; /* Maximum buffer size */ + uint32_t buf_sz_allocated; /* size of bufs allocated from host */ + nss_cmn_queue_decongestion_callback_t queue_decongestion_callback[NSS_MAX_CLIENTS]; + /* Queue decongestion callbacks */ + void *queue_decongestion_ctx[NSS_MAX_CLIENTS]; + /* Queue decongestion callback contexts */ + nss_cmn_service_code_callback_t service_code_callback[NSS_MAX_SERVICE_CODE]; + /* Service code callbacks */ + void *service_code_ctx[NSS_MAX_SERVICE_CODE]; + /* Service code callback contexts */ + spinlock_t decongest_cb_lock; /* Lock to protect queue decongestion cb table */ + uint16_t phys_if_mtu[NSS_MAX_PHYSICAL_INTERFACES]; + /* Current MTU value of physical interface */ + uint32_t worker_thread_count; /* Number of NSS core worker threads for statistics */ + uint32_t irq_count; /* Number of NSS core IRQs for statistics */ + struct nss_worker_thread_stats *wt_stats; + /* Worker thread statistics */ + struct nss_unaligned_stats unaligned_stats; + /* Unaligned emulation performance statistics */ + struct nss_rx_cb_list nss_rx_interface_handlers[NSS_MAX_NET_INTERFACES]; + /* NSS interface callback handlers */ + struct nss_subsystem_dataplane_register subsys_dp_register[NSS_MAX_NET_INTERFACES]; + /* Subsystem registration data */ + uint32_t magic; + /* Magic protection */ +}; + +/* + * Main NSS context structure (singleton) + */ +struct nss_top_instance { + uint8_t num_nss; /* Number of NSS cores supported */ + uint8_t num_phys_ports; /* Number of physical ports supported */ + uint32_t clk_src; /* Clock source: default/alternate */ + spinlock_t lock; /* Big lock for NSS driver */ + spinlock_t stats_lock; /* Statistics lock */ + struct mutex wq_lock; /* Mutex for NSS Work queue function */ + struct dentry *top_dentry; /* Top dentry for nss */ + struct dentry *stats_dentry; /* Top dentry for nss stats */ + struct dentry *strings_dentry; /* Top dentry for nss stats strings */ + struct dentry *project_dentry; /* per-project stats dentry */ + struct nss_ctx_instance nss[NSS_MAX_CORES]; + /* NSS contexts */ + /* + * Network processing handler core ids (CORE0/CORE1) for various interfaces + */ + uint8_t phys_if_handler_id[NSS_MAX_PHYSICAL_INTERFACES]; + uint8_t virt_if_handler_id; + uint8_t gre_redir_handler_id; + uint8_t gre_redir_lag_us_handler_id; + uint8_t gre_redir_lag_ds_handler_id; + uint8_t gre_tunnel_handler_id; + uint8_t shaping_handler_id; + uint8_t ipv4_handler_id; + uint8_t ipv4_reasm_handler_id; + uint8_t ipv6_handler_id; + uint8_t ipv6_reasm_handler_id; + uint8_t crypto_handler_id; + uint8_t ipsec_handler_id; + uint8_t wlan_handler_id; + uint8_t tun6rd_handler_id; + uint8_t wifi_handler_id; + uint8_t ppe_handler_id; + uint8_t pptp_handler_id; + uint8_t pppoe_handler_id; + uint8_t l2tpv2_handler_id; + uint8_t dtls_handler_id; + uint8_t gre_handler_id; + uint8_t map_t_handler_id; + uint8_t tunipip6_handler_id; + uint8_t frequency_handler_id; + uint8_t sjack_handler_id; + uint8_t capwap_handler_id; + uint8_t tstamp_handler_id; + uint8_t portid_handler_id; + uint8_t oam_handler_id; + uint8_t edma_handler_id; + uint8_t bridge_handler_id; + uint8_t trustsec_tx_handler_id; + uint8_t vlan_handler_id; + uint8_t qvpn_handler_id; + uint8_t pvxlan_handler_id; + uint8_t igs_handler_id; + uint8_t gre_redir_mark_handler_id; + uint8_t clmap_handler_id; + uint8_t vxlan_handler_id; + uint8_t rmnet_rx_handler_id; + uint8_t match_handler_id; + uint8_t tls_handler_id; + uint8_t mirror_handler_id; + uint8_t wmdb_handler_id; + uint8_t dma_handler_id; + uint8_t udp_st_handler_id; + + /* + * Data/Message callbacks for various interfaces + */ + nss_phys_if_msg_callback_t phys_if_msg_callback[NSS_MAX_PHYSICAL_INTERFACES]; + /* Physical interface event callback functions */ + nss_virt_if_msg_callback_t virt_if_msg_callback[NSS_MAX_VIRTUAL_INTERFACES]; + /* Virtual interface messsage callback functions */ + nss_ipv4_msg_callback_t ipv4_callback; + /* IPv4 sync/establish callback function */ + nss_ipv6_msg_callback_t ipv6_callback; + /* IPv6 sync/establish callback function */ + nss_ipsec_msg_callback_t ipsec_encap_callback; + nss_ipsec_msg_callback_t ipsec_decap_callback; + /* IPsec event callback function */ + nss_crypto_msg_callback_t crypto_msg_callback; + nss_crypto_cmn_msg_callback_t crypto_cmn_msg_callback; + nss_crypto_buf_callback_t crypto_buf_callback; + nss_crypto_pm_event_callback_t crypto_pm_callback; + /* crypto interface callback functions */ + nss_profiler_callback_t profiler_callback[NSS_MAX_CORES]; + /* Profiler interface callback function */ + nss_tun6rd_msg_callback_t tun6rd_msg_callback; + /* 6rd tunnel interface event callback function */ + nss_wifi_msg_callback_t wifi_msg_callback; + /* wifi interface event callback function */ + nss_l2tpv2_msg_callback_t l2tpv2_msg_callback; + /* l2tP tunnel interface event callback function */ + nss_dtls_msg_callback_t dtls_msg_callback; /* dtls interface event callback */ + + nss_gre_tunnel_msg_callback_t gre_tunnel_msg_callback; /* gre tunnel interface event callback */ + + nss_map_t_msg_callback_t map_t_msg_callback; + /* map-t interface event callback function */ + nss_gre_msg_callback_t gre_msg_callback; + /* gre interface event callback function */ + nss_gre_data_callback_t gre_inner_data_callback; + /* gre inner data callback function */ + nss_gre_data_callback_t gre_outer_data_callback; + /* gre outer data callback function */ + nss_tunipip6_msg_callback_t tunipip6_msg_callback; + /* ipip6 tunnel interface event callback function */ + nss_pptp_msg_callback_t pptp_msg_callback; + /* PPTP tunnel interface event callback function */ + nss_pppoe_msg_callback_t pppoe_msg_callback; + /* PPPoE interface event callback function */ + struct nss_shaper_bounce_registrant bounce_interface_registrants[NSS_MAX_NET_INTERFACES]; + /* Registrants for interface shaper bounce operations */ + struct nss_shaper_bounce_registrant bounce_bridge_registrants[NSS_MAX_NET_INTERFACES]; + /* Registrants for bridge shaper bounce operations */ + nss_lag_event_callback_t lag_event_callback; + /* Registrants for lag operations */ + nss_oam_msg_callback_t oam_callback; + /* OAM call back */ + nss_edma_msg_callback_t edma_callback; + /* EDMA callback */ + nss_bridge_msg_callback_t bridge_callback; + /* Bridge callback */ + nss_vlan_msg_callback_t vlan_callback; + /* Vlan callback */ + nss_wifili_msg_callback_t wifili_msg_callback; + /* wifili interface event callback function */ + nss_ipsec_cmn_msg_callback_t ipsec_cmn_msg_callback; + /* IPSEC common interface event callback function */ + nss_qvpn_msg_callback_t qvpn_msg_callback; + /* QVPN interface event callback function */ + nss_rmnet_rx_msg_callback_t rmnet_rx_msg_callback[NSS_MAX_VIRTUAL_INTERFACES]; + /* Virtual interface messsage callback functions */ + nss_wifi_mac_db_msg_callback_t wifi_mac_db_msg_callback; + /* wifi mac database event callback function */ + + uint32_t dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_MAX]; + + /* + * Interface contexts (non network device) + */ + void *ipv4_ctx; /* IPv4 connection manager context */ + void *ipv6_ctx; /* IPv6 connection manager context */ + void *crypto_ctx; /* Crypto interface context */ + void *crypto_pm_ctx; /* Crypto PM context */ + void *profiler_ctx[NSS_MAX_CORES]; + /* Profiler interface context */ + void *ipsec_encap_ctx; /* IPsec encap context */ + void *ipsec_decap_ctx; /* IPsec decap context */ + void *oam_ctx; /* oam context */ + void *edma_ctx; /* edma context */ + void *bridge_ctx; /* Bridge context */ + void *vlan_ctx; /* Vlan context */ + + /* + * Statistics for various interfaces + */ + atomic64_t stats_drv[NSS_DRV_STATS_MAX]; + /* Hlos driver statistics */ + uint64_t stats_gmac[NSS_MAX_PHYSICAL_INTERFACES][NSS_GMAC_STATS_MAX]; + /* GMAC statistics */ + uint64_t stats_node[NSS_MAX_NET_INTERFACES][NSS_STATS_NODE_MAX]; + /* IPv4 statistics per interface */ + bool nss_hal_common_init_done; + + uint16_t prev_mtu_sz; /* mtu sz needed as of now */ + uint16_t crypto_enabled; /* check if crypto is enabled on the platform */ + + /* + * TODO: Review and update following fields + */ + uint64_t last_rx_jiffies; /* Time of the last RX message from the NA in jiffies */ + struct nss_hal_ops *hal_ops; /* nss_hal ops for this target platform */ + struct nss_data_plane_ops *data_plane_ops; + /* nss_data_plane ops for this target platform */ +}; + +#if (NSS_PKT_STATS_ENABLED == 1) +/* + * nss_pkt_stats_inc() + */ +static inline void nss_pkt_stats_inc(atomic64_t *stat) +{ + atomic64_inc(stat); +} + +/* + * nss_pkt_stats_dec() + */ +static inline void nss_pkt_stats_dec(atomic64_t *stat) +{ + atomic64_dec(stat); +} + +/* + * nss_pkt_stats_add() + */ +static inline void nss_pkt_stats_add(atomic64_t *stat, uint32_t pkt) +{ + atomic64_add(pkt, stat); +} + +/* + * nss_pkt_stats_sub() + */ +static inline void nss_pkt_stats_sub(atomic64_t *stat, uint32_t pkt) +{ + atomic64_sub(pkt, stat); +} + +/* + * nss_pkt_stats_read() + */ +static inline uint64_t nss_pkt_stats_read(atomic64_t *stat) +{ + return atomic64_read(stat); +} + +#endif + +/* + * NSS Statistics and Data for User Space + */ +struct nss_cmd_buffer { + uint32_t current_freq; /* Current Running Freq of NSS */ + int32_t auto_scale; /* Enable or Disable auto_scale */ + int32_t max_freq; /* Maximum supported frequency index value */ + uint32_t register_addr; /* register addr buffer */ + uint32_t register_data; /* register data buffer */ + uint32_t average_inst; /* average of inst for nss core */ + uint32_t coredump; /* cmd coredump buffer */ +}; +extern struct nss_cmd_buffer nss_cmd_buf; + +/* + * The scales for NSS + */ +typedef enum nss_freq_scales { + NSS_FREQ_LOW_SCALE = 0, + NSS_FREQ_MID_SCALE = 1, + NSS_FREQ_HIGH_SCALE = 2, + NSS_FREQ_MAX_SCALE = 3, +} nss_freq_scales_t; + +/* + * NSS Core Statistics and Frequencies + */ +#define NSS_SAMPLE_BUFFER_SIZE 4 /* Ring Buffer should be a Size of two */ +#define NSS_SAMPLE_BUFFER_MASK (NSS_SAMPLE_BUFFER_SIZE - 1) +#define NSS_FREQUENCY_SCALE_RATE_LIMIT_UP 2 /* Adjust the Rate of Frequency Switching Up */ +#define NSS_FREQUENCY_SCALE_RATE_LIMIT_DOWN 60000 /* Adjust the Rate of Frequency Switching Down */ +#define NSS_MESSAGE_RATE_LIMIT 15000 /* Adjust the Rate of Displaying Statistic Messages */ + +/* + * NSS Frequency Scale Info + * + * INFO: Contains the Scale information per Frequency + * Per Scale information needed to Program PLL and make switching decisions + */ +struct nss_scale_info { + uint32_t frequency; /* Frequency in Mhz */ + uint32_t minimum; /* Minimum INST_CNT per Sec */ + uint32_t maximum; /* Maximum INST_CNT per Sec */ +}; + +/* + * NSS Runtime Sample Structure + * + * INFO: Contains the runtime statistic of the NSS core + * Also contains the per frequency scale array + */ +struct nss_runtime_sampling { + struct nss_scale_info freq_scale[NSS_FREQ_MAX_SCALE]; /* NSS Max Scale Per Freq */ + nss_freq_scales_t freq_scale_index; /* Current Freq Index */ + uint32_t freq_scale_ready; /* Allow Freq Scaling */ + uint32_t freq_scale_rate_limit_up; /* Scaling Change Rate Limit */ + uint32_t freq_scale_rate_limit_down; /* Scaling Change Rate Limit */ + uint32_t buffer[NSS_SAMPLE_BUFFER_SIZE]; /* Sample Ring Buffer */ + uint32_t buffer_index; /* Running Buffer Index */ + uint32_t sum; /* Total INST_CNT SUM */ + uint32_t sample_count; /* Number of Samples stored in Ring Buffer */ + uint32_t average; /* Average of INST_CNT */ + uint32_t message_rate_limit; /* Debug Message Rate Limit */ + uint32_t initialized; /* Flag to check for adequate initial samples */ +}; + +/* + * cpu_utilization + */ +struct nss_freq_cpu_usage { + uint32_t used; /* CPU utilization at a certain frequency percentage */ + uint32_t max_ins; /* Maximum instructions that can be executed in 1ms at the current frequency + This value is calculated by diving frequency by 1000. */ + uint32_t total; /* Total usage added over a time of NSS_FREQ_USG_AVG_FREQUENCY milliseconds*/ + uint32_t max; /* Maximum CPU usage since the boot (%) */ + uint32_t min; /* Minimum CPU usage since the boot (%) */ + uint32_t avg_up; /* Actual upper bound of the CPU USAGE (%)*/ + uint16_t avg_ctr; /* Averaging counter */ +}; + +#if (NSS_DT_SUPPORT == 1) +/* + * nss_feature_enabled + */ +enum nss_feature_enabled { + NSS_FEATURE_NOT_ENABLED = 0, /* Feature is not enabled on this core */ + NSS_FEATURE_ENABLED, /* Feature is enabled on this core */ +}; + +/* + * nss_platform_data + * Platform data per core + */ +struct nss_platform_data { + uint32_t id; /* NSS core ID */ + uint32_t num_queue; /* No. of queues supported per core */ + uint32_t num_irq; /* No. of irq binded per queue */ + uint32_t irq[NSS_MAX_IRQ_PER_CORE]; /* IRQ numbers per queue */ + void __iomem *nmap; /* Virtual addr of NSS CSM space */ + void __iomem *vmap; /* Virtual addr of NSS virtual register map */ + void __iomem *qgic_map; /* Virtual addr of QGIC interrupt register */ + uint32_t nphys; /* Physical addr of NSS CSM space */ + uint32_t vphys; /* Physical addr of NSS virtual register map */ + uint32_t qgic_phys; /* Physical addr of QGIC virtual register map */ + uint32_t load_addr; /* Load address of NSS firmware */ + + enum nss_feature_enabled capwap_enabled; + /* Does this core handle capwap? */ + enum nss_feature_enabled crypto_enabled; + /* Does this core handle crypto? */ + enum nss_feature_enabled dtls_enabled; + /* Does this core handle DTLS sessions ? */ + enum nss_feature_enabled gre_redir_enabled; + /* Does this core handle gre_redir Tunnel ? */ + enum nss_feature_enabled gre_tunnel_enabled; + /* Does this core handle gre_tunnel Tunnel ? */ + enum nss_feature_enabled ipsec_enabled; + /* Does this core handle IPsec? */ + enum nss_feature_enabled ipv4_enabled; + /* Does this core handle IPv4? */ + enum nss_feature_enabled ipv4_reasm_enabled; + /* Does this core handle IPv4 reassembly? */ + enum nss_feature_enabled ipv6_enabled; + /* Does this core handle IPv6? */ + enum nss_feature_enabled ipv6_reasm_enabled; + /* Does this core handle IPv6 reassembly? */ + enum nss_feature_enabled l2tpv2_enabled; + /* Does this core handle l2tpv2 Tunnel ? */ + enum nss_feature_enabled map_t_enabled; + /* Does this core handle map-t */ + enum nss_feature_enabled gre_enabled; + /* Does this core handle GRE */ + enum nss_feature_enabled oam_enabled; + /* Does this core handle oam? */ + enum nss_feature_enabled ppe_enabled; + /* Does this core handle ppe ? */ + enum nss_feature_enabled pppoe_enabled; + /* Does this core handle pppoe? */ + enum nss_feature_enabled pptp_enabled; + /* Does this core handle pptp Tunnel ? */ + enum nss_feature_enabled portid_enabled; + /* Does this core handle portid? */ + enum nss_feature_enabled shaping_enabled; + /* Does this core handle shaping ? */ + enum nss_feature_enabled tstamp_enabled; + /* Does this core handle timestamping? */ + enum nss_feature_enabled turbo_frequency; + /* Does this core support turbo frequencies */ + enum nss_feature_enabled tun6rd_enabled; + /* Does this core handle 6rd Tunnel ? */ + enum nss_feature_enabled tunipip6_enabled; + /* Does this core handle ipip6 Tunnel ? */ + enum nss_feature_enabled wlanredirect_enabled; + /* Does this core handle WLAN redirect? */ + enum nss_feature_enabled wifioffload_enabled; + /* Does this core handle WIFI OFFLOAD? */ + enum nss_feature_enabled bridge_enabled; + /* Does this core handle bridge configuration */ + enum nss_feature_enabled vlan_enabled; + /* Does this core handle vlan configuration */ + enum nss_feature_enabled qvpn_enabled; + /* Does this core handle QVPN Tunnel ? */ + enum nss_feature_enabled pvxlan_enabled; + /* Does this core handle pvxlan? */ + enum nss_feature_enabled igs_enabled; + /* Does this core handle igs? */ + enum nss_feature_enabled gre_redir_mark_enabled; + /* Does this core handle GRE redir mark? */ + enum nss_feature_enabled clmap_enabled; + /* Does this core handle clmap? */ + enum nss_feature_enabled vxlan_enabled; + /* Does this core handle vxlan tunnel? */ + enum nss_feature_enabled rmnet_rx_enabled; + /* Does this core handle rmnet rx? */ + enum nss_feature_enabled match_enabled; + /* Does this core handle match node? */ + enum nss_feature_enabled tls_enabled; + /* Does this core handle TLS Tunnel ? */ + enum nss_feature_enabled mirror_enabled; + /* Does this core handle mirror? */ + enum nss_feature_enabled udp_st_enabled; + /* Does this core handle udp st? */ +}; +#endif + +/* + * nss_core_log_msg_failures() + * Driver function for logging failed messages. + */ +static inline void nss_core_log_msg_failures(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm) +{ + if ((ncm->response == NSS_CMN_RESPONSE_ACK) || (ncm->response == NSS_CMN_RESPONSE_NOTIFY)) { + return; + } + + /* + * TODO: Is it worth doing value to name on these values? + */ + nss_warning("%px: msg failure - interface: %d, type: %d, response: %d, error: %d", + nss_ctx, ncm->interface, ncm->type, ncm->response, ncm->error); +} + +/* + * NSS workqueue to change frequencies + */ +typedef struct { + struct work_struct my_work; /* Work Structure */ + uint32_t frequency; /* Frequency To Change */ + uint32_t stats_enable; /* Auto scale on/off */ +} nss_work_t; + +/* + * APIs provided by nss_core.c + */ +extern int nss_core_handle_napi(struct napi_struct *napi, int budget); +extern int nss_core_handle_napi_queue(struct napi_struct *napi, int budget); +extern int nss_core_handle_napi_non_queue(struct napi_struct *napi, int budget); +extern int nss_core_handle_napi_emergency(struct napi_struct *napi, int budget); +extern int nss_core_handle_napi_sdma(struct napi_struct *napi, int budget); +extern int32_t nss_core_send_buffer(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + struct sk_buff *nbuf, uint16_t qid, + uint8_t buffer_type, uint16_t flags); +extern int32_t nss_core_send_cmd(struct nss_ctx_instance *nss_ctx, void *msg, int size, int buf_size); +extern int32_t nss_core_send_packet(struct nss_ctx_instance *nss_ctx, struct sk_buff *nbuf, uint32_t if_num, uint32_t flag); +extern uint32_t nss_core_ddr_info(struct nss_mmu_ddr_info *coreinfo); +extern uint32_t nss_core_register_msg_handler(struct nss_ctx_instance *nss_ctx, uint32_t interface, nss_if_rx_msg_callback_t msg_cb); +extern uint32_t nss_core_unregister_msg_handler(struct nss_ctx_instance *nss_ctx, uint32_t interface); +extern uint32_t nss_core_register_handler(struct nss_ctx_instance *nss_ctx, uint32_t interface, nss_core_rx_callback_t cb, void *app_data); +extern uint32_t nss_core_unregister_handler(struct nss_ctx_instance *nss_ctx, uint32_t interface); +extern void nss_core_init_handlers(struct nss_ctx_instance *nss_ctx); +void nss_core_update_max_ipv4_conn(int conn); +void nss_core_update_max_ipv6_conn(int conn); +extern void nss_core_register_subsys_dp(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + nss_phys_if_rx_callback_t cb, + nss_phys_if_rx_ext_data_callback_t ext_cb, + void *app_data, struct net_device *ndev, + uint32_t features); +extern void nss_core_unregister_subsys_dp(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +void nss_core_set_subsys_dp_type(struct nss_ctx_instance *nss_ctx, struct net_device *ndev, uint32_t if_num, uint32_t type); + +static inline nss_if_rx_msg_callback_t nss_core_get_msg_handler(struct nss_ctx_instance *nss_ctx, uint32_t interface) +{ + return nss_ctx->nss_rx_interface_handlers[interface].msg_cb; +} + +static inline uint32_t nss_core_get_max_buf_size(struct nss_ctx_instance *nss_ctx) +{ + return nss_ctx->max_buf_size; +} + +/* + * APIs provided by nss_tx_rx.c + */ +extern void nss_rx_handle_status_pkt(struct nss_ctx_instance *nss_ctx, struct sk_buff *nbuf); + +/* + * APIs provided by nss_stats.c + */ +extern void nss_stats_init(void); +extern void nss_stats_clean(void); + +/* + * APIs provided by nss_log.c + */ +extern void nss_log_init(void); +extern bool nss_debug_log_buffer_alloc(uint8_t nss_id, uint32_t nentry); +extern int nss_logbuffer_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos); + +/* + * APIs to set jumbo_mru & paged_mode + */ +extern void nss_core_set_jumbo_mru(int jumbo_mru); +extern int nss_core_get_jumbo_mru(void); +extern void nss_core_set_paged_mode(int mode); +extern int nss_core_get_paged_mode(void); +#if (NSS_SKB_REUSE_SUPPORT == 1) +extern void nss_core_set_max_reuse(int max); +extern int nss_core_get_max_reuse(void); +extern uint32_t nss_core_get_min_reuse(struct nss_ctx_instance *nss_ctx); +#endif + +/* + * APIs for coredump + */ +extern void nss_coredump_notify_register(void); +extern void nss_fw_coredump_notify(struct nss_ctx_instance *nss_own, int intr); +extern int nss_coredump_init_delay_work(void); + +/* + * APIs provided by nss_freq.c + */ +extern bool nss_freq_sched_change(nss_freq_scales_t index, bool auto_scale); + +/* + * nss_freq_init_cpu_usage + * Initializes the cpu usage computation. + */ +extern void nss_freq_init_cpu_usage(void); + +/* + * APIs for PPE + */ +extern void nss_ppe_init(void); +extern void nss_ppe_free(void); + +/* + * APIs for N2H + */ +extern nss_tx_status_t nss_n2h_cfg_empty_pool_size(struct nss_ctx_instance *nss_ctx, uint32_t pool_sz); +extern nss_tx_status_t nss_n2h_paged_buf_pool_init(struct nss_ctx_instance *nss_ctx); + +#endif /* __NSS_CORE_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_coredump.c b/feeds/ipq807x/qca-nss-drv/src/nss_coredump.c new file mode 100644 index 000000000..691a9a712 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_coredump.c @@ -0,0 +1,257 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_core.c + * NSS driver core APIs source file. + */ + +#include "nss_core.h" +#include "nss_hal.h" +#include "nss_log.h" +#include +#include /* for panic_notifier_list */ +#include /* for time */ +#include "nss_tx_rx_common.h" + +#if NSS_MAX_CORES > 2 /* see comment in nss_fw_coredump_notify */ +#error too many NSS Cores: should be 1 or 2 +#endif + +static struct delayed_work coredump_queuewait; +static struct workqueue_struct *coredump_workqueue; + +/* + * nss_coredump_wait() + * reboot (panic) if all finished coredump interrupts will not come. + * N2H (C2C) interrupt may get lost during trap, as well NSS may start + * only one core; so timeout if less than desird core sends back finished + * coredump interrupt. + */ +static void nss_coredump_wait(struct work_struct *work) +{ + panic("did not get all coredump finished signals\n"); +} + +/* + * nss_coredump_init_delay_work() + * set a wait function in case coredump finish interrupt lost or + * only one NSS core is up. + */ +int nss_coredump_init_delay_work(void) +{ + coredump_workqueue = create_singlethread_workqueue("coredump_wait"); + if (!coredump_workqueue) { + nss_warning("can't set wait: hopefully all int will come\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&coredump_queuewait, nss_coredump_wait); + return 0; +} + +/* + * nss_panic_handler() + * notification callback register to panic chain + */ +static int nss_panic_handler(struct notifier_block *nb, + unsigned long action, void *data) +{ + int dumped, timed; + int i; + + for (i = 0; i < nss_top_main.num_nss; i++) { + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[i]; + if (nss_ctx->state & NSS_CORE_STATE_FW_DEAD || !nss_ctx->nmap) + continue; + nss_ctx->state |= NSS_CORE_STATE_PANIC; + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_TRIGGER_COREDUMP); + nss_warning("panic call NSS FW %px to dump %x\n", + nss_ctx->nmap, nss_ctx->state); + } + + /* + * wait for FW coredump done: maximum 2 rounds for each core + * 200ms per round -- 16MB * 10 over 200MHz 32-bit memory bus + * panic will take another 3-5 seconds to reboot, so longer enough. + */ + dumped = timed = 0; + do { + mdelay(200); + for (i = 0; i < nss_top_main.num_nss; i++) { + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[i]; + if ((nss_ctx->state & NSS_CORE_STATE_FW_DEAD || + !nss_ctx->nmap) && + !(nss_ctx->state & NSS_CORE_STATE_FW_DUMP)) { + nss_ctx->state |= NSS_CORE_STATE_FW_DUMP; + dumped++; + } + } + if (dumped >= nss_top_main.num_nss) { + nss_warning("NSS FW dump completed\n"); + break; + } + } while (timed++ < nss_top_main.num_nss * 2); + + if (timed >= nss_top_main.num_nss * 2) + nss_warning("might get %d FW dumped", dumped); + + return NOTIFY_DONE; +} + +static struct notifier_block nss_panic_nb = { + .notifier_call = nss_panic_handler, +}; + +/* + * nss_coredump_notify_register() + * API for nss_init to register coredump notifier to panic chain + */ +void nss_coredump_notify_register(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, &nss_panic_nb); +} + +/* + * nss_fw_coredump_notify() + * handler for coredump notification from NSS FW + */ +void nss_fw_coredump_notify(struct nss_ctx_instance *nss_own, + int intr __attribute__ ((unused))) +{ + int i, j, curr_index, useful_entries, num_cores_wait; + struct nss_log_descriptor *nld; + struct nss_log_entry *nle_init, *nle_print; + dma_addr_t dma_addr; + uint32_t offset, index; + + nss_warning("%px: COREDUMP %x Baddr %px stat %x", + nss_own, intr, nss_own->nmap, nss_own->state); + nss_own->state |= NSS_CORE_STATE_FW_DEAD; + queue_delayed_work(coredump_workqueue, &coredump_queuewait, + msecs_to_jiffies(3456)); + + /* + * If external log buffer is not set, use the nss initial log buffer. + */ + nld = (struct nss_log_descriptor *)(nss_rbe[nss_own->id].addr); + dma_addr = nss_rbe[nss_own->id].dma_addr; + if (!nld) { + nld = nss_own->meminfo_ctx.logbuffer; + dma_addr = nss_own->meminfo_ctx.logbuffer_dma; + } + + dma_sync_single_for_cpu(NULL, dma_addr, sizeof(struct nss_log_descriptor), DMA_FROM_DEVICE); + + /* + * If the current entry is smaller than or equal to the number of NSS_LOG_COREDUMP_LINE_NUM, + * only print whatever is in the buffer. Otherwise, dump last NSS_LOG_COREDUMP_LINE_NUM + * to the dmessage. + */ + nss_info_always("%px: Starting NSS-FW logbuffer dump for core %u\n", + nss_own, nss_own->id); + nle_init = nld->log_ring_buffer; + if (nld->current_entry <= NSS_LOG_COREDUMP_LINE_NUM) { + curr_index = 0; + useful_entries = nld->current_entry; + } else { + curr_index = ((nld->current_entry - NSS_LOG_COREDUMP_LINE_NUM) % nld->log_nentries); + useful_entries = NSS_LOG_COREDUMP_LINE_NUM; + } + + nle_print = nle_init + curr_index; + for (j = index = curr_index; j < (curr_index + useful_entries); j++, index++) { + if (j == nld->log_nentries) { + nle_print = nle_init; + index = 0; + } + + offset = (index * sizeof(struct nss_log_entry)) + + offsetof(struct nss_log_descriptor, log_ring_buffer); + dma_sync_single_for_cpu(NULL, dma_addr + offset, + sizeof(struct nss_log_entry), DMA_FROM_DEVICE); + nss_info_always("%px: %s\n", nss_own, nle_print->message); + nle_print++; + } + + if (nss_own->state & NSS_CORE_STATE_PANIC) + return; + + /* + * We need to wait until all other cores finish their dump. + */ + num_cores_wait = (nss_top_main.num_nss - 1); + if (!num_cores_wait) { + /* + * nss_cmd_buf.coredump values: + * 0 == normal coredump and panic + * non-zero value is for debug purpose: + * 1 == force coredump and panic + * otherwise coredump but do not panic. + */ + if (!(nss_cmd_buf.coredump & 0xFFFFFFFE)) { + panic("NSS FW coredump: bringing system down\n"); + } + nss_info_always("NSS core dump completed & use mdump to collect dump to debug\n"); + return; + } + + for (i = 0; i < nss_top_main.num_nss; i++) { + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[i]; + + /* + * Skip waiting for ourselves to coredump, we already have. + */ + if (nss_ctx == nss_own) { + continue; + } + + /* + * Notify any live core to dump. + */ + if (!(nss_ctx->state & NSS_CORE_STATE_FW_DEAD) && nss_ctx->nmap) { + nss_warning("notify NSS FW %px for coredump\n", nss_ctx->nmap); + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_TRIGGER_COREDUMP); + continue; + } + + /* + * bit 1 is used for testing coredump. Any other + * bit(s) (value other than 0/1) disable panic + * in order to use mdump utility: see mdump/src/README + * for more info. + */ + if (nss_cmd_buf.coredump & 0xFFFFFFFE) { + nss_info_always("NSS core dump completed and please use mdump to collect dump data\n"); + continue; + } + + /* + * Ideally we need to unregister ourselves from the panic + * notifier list before calling the panic to prevent infinite calling. + * However, When we tried, we couldn't make it work. Therefore, We just leave the corresponding call here + * if it will be needed in the future. + * + * atomic_notifier_chain_unregister(&panic_notifier_list, &nss_panic_nb); + */ + num_cores_wait--; + if (!num_cores_wait) { + panic("NSS FW coredump: bringing system down\n"); + return; + } + + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_crypto.c b/feeds/ipq807x/qca-nss-drv/src/nss_crypto.c new file mode 100644 index 000000000..96d8c5b2c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_crypto.c @@ -0,0 +1,302 @@ +/* + ************************************************************************** + * Copyright (c) 2013,2015-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_crypto.c + * NSS Crypto APIs + */ + +#include "nss_tx_rx_common.h" +#include "nss_crypto.h" +#include "nss_crypto_log.h" + +/* + ********************************** + General APIs + ********************************** + */ + +/* + * nss_crypto_set_msg_callback() + * this sets the message callback handler and its associated context + */ +static inline void nss_crypto_set_msg_callback(struct nss_ctx_instance *nss_ctx, nss_crypto_msg_callback_t cb, void *crypto_ctx) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + nss_top->crypto_ctx = crypto_ctx; + nss_top->crypto_msg_callback = cb; +} + +/* + * nss_crypto_get_msg_callback() + * this gets the message callback handler and its associated context + */ +static inline nss_crypto_msg_callback_t nss_crypto_get_msg_callback(struct nss_ctx_instance *nss_ctx, void **crypto_ctx) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + *crypto_ctx = nss_top->crypto_ctx; + return nss_top->crypto_msg_callback; +} + +/* + * nss_crypto_msg_handler() + * this handles all the IPsec events and responses + */ +static void nss_crypto_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app_data __attribute((unused))) +{ + struct nss_crypto_msg *nim = (struct nss_crypto_msg *)ncm; + nss_crypto_msg_callback_t cb = NULL; + void *crypto_ctx = NULL; + + /* + * Sanity check the message type + */ + if (ncm->type > NSS_CRYPTO_MSG_TYPE_MAX) { + nss_warning("%px: rx message type out of range: %d", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_crypto_msg)) { + nss_warning("%px: rx message length is invalid: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + if (ncm->interface != NSS_CRYPTO_INTERFACE) { + nss_warning("%px: rx message request for another interface: %d", nss_ctx, ncm->interface); + return; + } + + if (ncm->response == NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: rx message response for if %d, type %d, is invalid: %d", nss_ctx, ncm->interface, + ncm->type, ncm->response); + return; + } + + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_crypto_get_msg_callback(nss_ctx, &crypto_ctx); + ncm->app_data = (nss_ptr_t)crypto_ctx; + } + + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Trace messages. + */ + nss_crypto_log_rx_msg(nim); + + /* + * Load, Test & call + */ + cb = (nss_crypto_msg_callback_t)ncm->cb; + if (unlikely(!cb)) { + nss_trace("%px: rx handler has been unregistered for i/f: %d", nss_ctx, ncm->interface); + return; + } + cb((void *)ncm->app_data, nim); +} +/* + ********************************** + Tx APIs + ********************************** + */ + +/* + * nss_crypto_tx_msg + * Send crypto config to NSS. + */ +nss_tx_status_t nss_crypto_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_crypto_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + nss_info("%px: tx message %d for if %d\n", nss_ctx, ncm->type, ncm->interface); + + BUILD_BUG_ON(NSS_NBUF_PAYLOAD_SIZE < sizeof(struct nss_crypto_msg)); + + if (ncm->interface != NSS_CRYPTO_INTERFACE) { + nss_warning("%px: tx message request for another interface: %d", nss_ctx, ncm->interface); + } + + if (ncm->type > NSS_CRYPTO_MSG_TYPE_MAX) { + nss_warning("%px: tx message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + nss_info("msg params version:%d, interface:%d, type:%d, cb:%px, app_data:%px, len:%d\n", + ncm->version, ncm->interface, ncm->type, (void *)ncm->cb, (void *)ncm->app_data, ncm->len); + + /* + * Trace messages. + */ + nss_crypto_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_crypto_tx_data() + * NSS crypto TX data API. Sends a crypto buffer to NSS. + */ +nss_tx_status_t nss_crypto_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb) +{ + int32_t status; + + nss_trace("%px: tx_data buf=%px", nss_ctx, skb); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: tx_data packet dropped as core not ready", nss_ctx); + return NSS_TX_FAILURE_NOT_READY; + } + + status = nss_core_send_buffer(nss_ctx, if_num, skb, NSS_IF_H2N_DATA_QUEUE, H2N_BUFFER_PACKET, H2N_BIT_FLAG_BUFFER_REUSABLE); + if (unlikely(status != NSS_CORE_STATUS_SUCCESS)) { + nss_warning("%px: tx_data Unable to enqueue packet", nss_ctx); + if (status == NSS_CORE_STATUS_FAILURE_QUEUE) { + return NSS_TX_FAILURE_QUEUE; + } + + return NSS_TX_FAILURE; + } + + /* + * Kick the NSS awake so it can process our new entry. + */ + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE); + + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_CRYPTO_REQ]); + + return NSS_TX_SUCCESS; +} + +/* + ********************************** + Register APIs + ********************************** + */ + +/* + * nss_crypto_notify_register() + * register message notifier for crypto interface + */ +struct nss_ctx_instance *nss_crypto_notify_register(nss_crypto_msg_callback_t cb, void *app_data) +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = &nss_top_main.nss[nss_top_main.crypto_handler_id]; + + nss_crypto_set_msg_callback(nss_ctx, cb, app_data); + + return nss_ctx; +} + +/* + * nss_crypto_notify_unregister() + * unregister message notifier for crypto interface + */ +void nss_crypto_notify_unregister(struct nss_ctx_instance *nss_ctx) +{ + nss_crypto_set_msg_callback(nss_ctx, NULL, NULL); +} + +/* + * nss_crypto_data_register() + * register a data callback routine + */ +struct nss_ctx_instance *nss_crypto_data_register(uint32_t if_num, nss_crypto_buf_callback_t cb, + struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = &nss_top_main.nss[nss_top_main.crypto_handler_id]; + + if ((if_num >= NSS_MAX_NET_INTERFACES) && (if_num < NSS_MAX_PHYSICAL_INTERFACES)) { + nss_warning("%px: data register received for invalid interface %d", nss_ctx, if_num); + return NULL; + } + + /* + * Register subsystem, ensuring that no duplicate registrations occur. + */ + nss_core_register_subsys_dp(nss_ctx, if_num, cb, NULL, NULL, netdev, features); + + return nss_ctx; +} + +/* + * nss_crypto_data_unregister() + * unregister a data callback routine + */ +void nss_crypto_data_unregister(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + if ((if_num >= NSS_MAX_NET_INTERFACES) && (if_num < NSS_MAX_PHYSICAL_INTERFACES)) { + nss_warning("%px: data unregister received for invalid interface %d", nss_ctx, if_num); + return; + } + + nss_core_unregister_subsys_dp(nss_ctx, if_num); +} + +/* + * nss_crypto_pm_notify_register() + * register a PM notify callback routine + */ +void nss_crypto_pm_notify_register(nss_crypto_pm_event_callback_t cb, void *app_data) +{ + nss_top_main.crypto_pm_ctx = app_data; + nss_top_main.crypto_pm_callback = cb; +} + +/* + * nss_crypto_pm_notify_unregister() + * unregister a PM notify callback routine + */ +void nss_crypto_pm_notify_unregister(void) +{ + nss_top_main.crypto_pm_ctx = NULL; + nss_top_main.crypto_pm_callback = NULL; +} + +/* + * nss_crypto_register_handler() + */ +void nss_crypto_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[nss_top_main.crypto_handler_id]; + + nss_core_register_handler(nss_ctx, NSS_CRYPTO_INTERFACE, nss_crypto_msg_handler, NULL); +} + +/* + * nss_crypto_msg_init() + * Initialize crypto message + */ +void nss_crypto_msg_init(struct nss_crypto_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, + nss_crypto_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, (void *)cb, app_data); +} + +EXPORT_SYMBOL(nss_crypto_notify_register); +EXPORT_SYMBOL(nss_crypto_notify_unregister); +EXPORT_SYMBOL(nss_crypto_data_register); +EXPORT_SYMBOL(nss_crypto_data_unregister); +EXPORT_SYMBOL(nss_crypto_pm_notify_register); +EXPORT_SYMBOL(nss_crypto_pm_notify_unregister); +EXPORT_SYMBOL(nss_crypto_tx_msg); +EXPORT_SYMBOL(nss_crypto_tx_buf); +EXPORT_SYMBOL(nss_crypto_msg_init); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn.c b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn.c new file mode 100644 index 000000000..35c4c8c86 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn.c @@ -0,0 +1,388 @@ +/* + ************************************************************************** + * Copyright (c) 2013,2015-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_crypto_cmn.c + * NSS Crypto common API implementation + */ + +#include "nss_tx_rx_common.h" +#include "nss_crypto_cmn.h" +#include "nss_crypto_cmn_strings.h" +#include "nss_crypto_cmn_stats.h" +#include "nss_crypto_cmn_log.h" + +/* + * Amount time the synchronous message should wait for response from + * NSS before the timeout happens. After the timeout the message + * response even if it arrives has to be discarded. Typically, the + * time needs to be selected based on the worst case time in case of + * peak throughput between host & NSS. + */ +#define NSS_CRYPTO_CMN_TX_TIMEO_TICKS msecs_to_jiffies(3000) /* milliseconds */ + +/* + * Private data structure to hold state for + * the crypto specific NSS interaction + */ +struct nss_crypto_cmn_pvt { + struct semaphore sem; /* used for synchronizing 'tx_msg_sync' */ + struct completion complete; /* completion callback */ + atomic_t seq_no; /* used for tracking tx_msg_sync requests */ +}; + +/* + * This is a single instance applicable for all crypto synchronous + * messaging interaction with NSS. + */ +static struct nss_crypto_cmn_pvt g_nss_crypto_cmn; + +/* + * nss_crypto_cmn_msg_handler() + * this handles all the IPsec events and responses + */ +static void nss_crypto_cmn_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, + void *app_data __attribute((unused))) +{ + struct nss_crypto_cmn_msg *nim = (struct nss_crypto_cmn_msg *)ncm; + nss_crypto_cmn_msg_callback_t cb = NULL; + + /* + * Sanity check the message type + */ + if (ncm->type > NSS_CRYPTO_CMN_MSG_TYPE_MAX) { + nss_warning("%px: rx message type out of range: %d", nss_ctx, ncm->type); + return; + } + + /* + * Check if the message structure length matches that of Host side. In case + * of failure this indicates ether the structure is different or this is not + * the intended interface. + */ + if (nss_cmn_get_msg_len(ncm) > sizeof(*nim)) { + nss_warning("%px: rx message length is invalid: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + if (ncm->response == NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: rx message response for if %d, type %d, is invalid: %d", nss_ctx, + ncm->interface, ncm->type, ncm->response); + return; + } + + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->crypto_cmn_msg_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->nss_top->crypto_ctx; + } + + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Trace messages. + */ + nss_crypto_cmn_log_rx_msg(nim); + + switch (nim->cm.type) { + case NSS_CRYPTO_CMN_MSG_TYPE_SYNC_NODE_STATS: + case NSS_CRYPTO_CMN_MSG_TYPE_SYNC_ENG_STATS: + case NSS_CRYPTO_CMN_MSG_TYPE_SYNC_CTX_STATS: + /* + * Update driver statistics and send statistics + * notification to the registered modules. + */ + nss_crypto_cmn_stats_sync(nss_ctx, &nim->msg.stats); + nss_crypto_cmn_stats_notify(nss_ctx); + break; + } + /* + * Load, Test & call + */ + cb = (nss_crypto_cmn_msg_callback_t)ncm->cb; + if (unlikely(!cb)) { + nss_warning("%px: rx handler has been unregistered for i/f: %d", nss_ctx, ncm->interface); + return; + } + + cb((void *)ncm->app_data, nim); +} + +/* + * nss_crypto_cmn_tx_msg + * Send crypto config to NSS. + */ +nss_tx_status_t nss_crypto_cmn_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_crypto_cmn_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + uint16_t msg_len = nss_cmn_get_msg_len(ncm); + + nss_info("%px: tx message %d for if %d", nss_ctx, ncm->type, ncm->interface); + + BUILD_BUG_ON(NSS_NBUF_PAYLOAD_SIZE < sizeof(*msg)); + + if (ncm->type > NSS_CRYPTO_CMN_MSG_TYPE_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Check if the message structure length matches the structure length. Otherwise + * the sender accidentally programmed a incorrect length into the message. + */ + if (msg_len != sizeof(*msg)) { + nss_warning("%px: message request len bad: %d", nss_ctx, msg_len); + return NSS_TX_FAILURE_BAD_PARAM; + } + + nss_trace("%px: msg params version:%d, interface:%d, type:%d, cb:%px, app_data:%px, len:%d", + nss_ctx, ncm->version, ncm->interface, ncm->type, + (void *)ncm->cb, (void *)ncm->app_data, ncm->len); + + /* + * Trace messages. + */ + nss_crypto_cmn_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_crypto_cmn_tx_msg); + +/* + * nss_crypto_cmn_tx_msg_cb() + * Callback to handle the synchronous completion of messages. + */ +static void nss_crypto_cmn_tx_msg_cb(void *app_data, struct nss_crypto_cmn_msg *nim) +{ + struct nss_crypto_cmn_pvt *pvt = &g_nss_crypto_cmn; + struct nss_crypto_cmn_msg *resp = (struct nss_crypto_cmn_msg *)nim->cm.app_data; + + /* + * Only update the message structure if the sequence no. matches + * Otherwise, a timeout might have happened in between and we + * are probably receiving the completion for an older message + */ + if (atomic_read(&pvt->seq_no) == nim->seq_num) { + memcpy(resp, nim, sizeof(struct nss_crypto_cmn_msg)); + complete(&pvt->complete); + } +} + +/* + * nss_crypto_cmn_tx_msg_sync() + * Transmit a crypto message to NSS firmware synchronously. + */ +nss_tx_status_t nss_crypto_cmn_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_crypto_cmn_msg *msg) +{ + struct nss_crypto_cmn_pvt *pvt = &g_nss_crypto_cmn; + nss_tx_status_t status; + int ret = 0; + + down(&pvt->sem); + atomic_inc(&pvt->seq_no); + + /* + * this is a synchronous message; overload the callback + * and app_data + */ + msg->cm.cb = (nss_ptr_t)nss_crypto_cmn_tx_msg_cb; + msg->cm.app_data = (nss_ptr_t)msg; + msg->seq_num = atomic_read(&pvt->seq_no); + + status = nss_crypto_cmn_tx_msg(nss_ctx, msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: tx_msg failed", nss_ctx); + up(&pvt->sem); + return status; + } + + /* + * Note: This cannot be called in atomic context + */ + ret = wait_for_completion_timeout(&pvt->complete, NSS_CRYPTO_CMN_TX_TIMEO_TICKS); + if (!ret) { + atomic_inc(&pvt->seq_no); + nss_warning("%px: tx_msg_sync timed out", nss_ctx); + up(&pvt->sem); + return NSS_TX_FAILURE; + } + + /* + * This ensures that the even if the response arrives on a different + * CPU core the data copied by the response callback will be visible + * to the caller which is sleeping for it on a different core. For + * further details read Linux/Documentation/memory-barrier.txt + */ + smp_rmb(); + up(&pvt->sem); + + return NSS_TX_SUCCESS; +} +EXPORT_SYMBOL(nss_crypto_cmn_tx_msg_sync); + +/* + * nss_crypto_cmn_tx_buf() + * NSS crypto TX data API. Sends a crypto buffer to NSS. + */ +nss_tx_status_t nss_crypto_cmn_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + struct sk_buff *skb) +{ + int32_t status; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: tx_data packet dropped as core not ready", nss_ctx); + return NSS_TX_FAILURE_NOT_READY; + } + + status = nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); + switch (status) { + case NSS_CORE_STATUS_SUCCESS: + break; + + case NSS_CORE_STATUS_FAILURE_QUEUE: /* queue full condition */ + nss_warning("%px: H2N queue full for tx_buf", nss_ctx); + return NSS_TX_FAILURE_QUEUE; + + default: + nss_warning("%px: general failure for tx_buf", nss_ctx); + return NSS_TX_FAILURE; + } + + /* + * Kick the NSS awake so it can process our new entry. + */ + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE); + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_CRYPTO_REQ]); + + return NSS_TX_SUCCESS; +} +EXPORT_SYMBOL(nss_crypto_cmn_tx_buf); + +/* + * nss_crypto_cmn_notify_register() + * register message notifier for crypto interface + */ +struct nss_ctx_instance *nss_crypto_cmn_notify_register(nss_crypto_cmn_msg_callback_t cb, void *app_data) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[nss_top->crypto_handler_id]; + + nss_top->crypto_ctx = app_data; + nss_top->crypto_cmn_msg_callback = cb; + + return nss_ctx; +} +EXPORT_SYMBOL(nss_crypto_cmn_notify_register); + +/* + * nss_crypto_cmn_notify_unregister() + * De-register the message notifier for crypto interface + */ +void nss_crypto_cmn_notify_unregister(struct nss_ctx_instance *nss_ctx) +{ + struct nss_top_instance *nss_top = &nss_top_main; + + nss_top->crypto_ctx = NULL; + nss_top->crypto_cmn_msg_callback = NULL; +} +EXPORT_SYMBOL(nss_crypto_cmn_notify_unregister); + +/* + * nss_crypto_cmn_data_register() + * Register the data callback routine + */ +struct nss_ctx_instance *nss_crypto_cmn_data_register(uint32_t if_num, nss_crypto_cmn_buf_callback_t cb, + struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = &nss_top_main.nss[nss_top_main.crypto_handler_id]; + + if (if_num < NSS_SPECIAL_IF_START) { + nss_warning("%px: interface number is not special interface %d", nss_ctx, if_num); + return NULL; + } + + /* + * avoid multiple registration for same interface number + */ + if (nss_ctx->subsys_dp_register[if_num].cb) + return nss_ctx; + + /* + * Note: no locking is required for updating this as + * the registration is only a module load time operation. + */ + nss_core_register_subsys_dp(nss_ctx, if_num, cb, NULL, NULL, netdev, features); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_crypto_cmn_data_register); + +/* + * nss_crypto_cmn_data_unregister() + * De-register the data callback routine + */ +void nss_crypto_cmn_data_unregister(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + if (if_num < NSS_SPECIAL_IF_START) { + nss_warning("%px: interface number is not special interface %d", nss_ctx, if_num); + return; + } + + /* + * Note: no locking is required for updating this as + * the registration is only a module load time operation. + */ + nss_core_unregister_subsys_dp(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_crypto_cmn_data_unregister); + +/* + * nss_crypto_cmn_get_context() + * get NSS context instance for crypto handle + */ +struct nss_ctx_instance *nss_crypto_cmn_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.crypto_handler_id]; +} +EXPORT_SYMBOL(nss_crypto_cmn_get_context); + +/* + * nss_crypto_cmn_register_handler() + */ +void nss_crypto_cmn_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_crypto_cmn_get_context(); + + sema_init(&g_nss_crypto_cmn.sem, 1); + init_completion(&g_nss_crypto_cmn.complete); + nss_core_register_handler(nss_ctx, NSS_CRYPTO_CMN_INTERFACE, nss_crypto_cmn_msg_handler, NULL); + + nss_crypto_cmn_stats_dentry_create(); + nss_crypto_cmn_strings_dentry_create(); +} + +/* + * nss_crypto_cmn_msg_init() + * Initialize crypto message + */ +void nss_crypto_cmn_msg_init(struct nss_crypto_cmn_msg *ncm, uint16_t if_num, uint32_t type, + uint32_t len, nss_crypto_cmn_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_crypto_cmn_msg_init); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_log.c new file mode 100644 index 000000000..04cd66c47 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_log.c @@ -0,0 +1,210 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_crypto_cmn_log.c + * NSS Crypto Common logger file. + */ + +#include "nss_core.h" + +/* + * nss_crypto_cmn_log_message_types_str + * Crypto Common message strings + */ +static int8_t *nss_crypto_cmn_log_message_types_str[NSS_CRYPTO_CMN_MSG_TYPE_MAX] __maybe_unused = { + "Crypto Common Invalid Message", + "Crypto Common CRYPTO CMN Initialize Node", + "Crypto Common Initialize Engine", + "Crypto Common Initialize DMA Pair", + "Crypto Common Update Context Information", + "Crypto Common Clear Context Information", + "Crypto Common Verify Context Active", + "Crypto Common Synchronous Node Statistics" + "Crypto Common Synchronouts Engine Statistics", + "Crypto Common Synchronous Context Statistics" +}; + +/* + * nss_crypto_cmn_log_error_response_types_str + * Strings for error types for crypto common messages + */ +static int8_t *nss_crypto_cmn_log_error_response_types_str[NSS_CRYPTO_CMN_MSG_ERROR_MAX] __maybe_unused = { + "Crypto Common No Error", + "Crypto Common Header Version Not Supported", + "Crypto Common Context Index out-of-range for node", + "Crypto Common DMA mask out-of-range", + "Crypto Common DMA count exceeds Token", + "Crypto Common Token Allocation failed", + "Crypto Common Context Index out-of-range", + "Crypto Common Context has references", + "Crypto Common Bad Context Size", + "Crypto Common Bad Algorithm", + "Crypto Common Context Allocation failed", + "Crypto Common Context has no references", + "Crypto Common Invalid Context Flags" +}; + +/* + * nss_crypto_cmn_node_msg() + * Log NSS crypto common node message. + */ +static void nss_crypto_cmn_node_msg(struct nss_crypto_cmn_msg *ncm) +{ + struct nss_crypto_cmn_node *ncnm __maybe_unused = &ncm->msg.node; + nss_trace("%px: NSS crypto common node message:\n" + "Crypto Common Max DMA Rings: %d\n" + "Crypto Common Max Contex: %d\n" + "Crypto Common Max Context Size: %d\n", + ncnm, ncnm->max_dma_rings, + ncnm->max_ctx, ncnm->max_ctx_size); +} + +/* + * nss_crypto_cmn_engine_msg() + * Log NSS crypto cmn engine message. + */ +static void nss_crypto_cmn_engine_msg(struct nss_crypto_cmn_msg *ncm) +{ + struct nss_crypto_cmn_engine *ncem __maybe_unused = &ncm->msg.eng; + nss_trace("%px: NSS crypto common engine message \n" + "Crypto Common Firmware Version: %px\n" + "Crypto Common DMA Mask: %x\n" + "Crypto Common Token Count: %d\n", + ncem, &ncem->fw_ver, + ncem->dma_mask, ncem->req_count); +} + +/* + * nss_crypto_cmn_dma_msg() + * Log NSS crypto cmn dma message. + */ +static void nss_crypto_cmn_dma_msg(struct nss_crypto_cmn_msg *ncm) +{ + struct nss_crypto_cmn_dma *ncdm __maybe_unused = &ncm->msg.dma; + nss_trace("%px: NSS crypto common dma message \n" + "Crypto Common DMA Pair ID: %d\n", + ncdm, ncdm->pair_id); +} + +/* + * nss_crypto_cmn_ctx_msg() + * Log NSS crypto cmn context message. + */ +static void nss_crypto_cmn_ctx_msg(struct nss_crypto_cmn_msg *ncm) +{ + struct nss_crypto_cmn_ctx *nccm __maybe_unused = &ncm->msg.ctx; + nss_trace("%px: NSS crypto common context message \n" + "Crypto Common Context Spare Words: %px\n" + "Crypto Common Index: %d\n" + "Crypto Common Secure Offset: %d\n" + "Crypto Common Cipher Key: %px\n" + "Crypto Common Authorization Key: %px\n" + "Crypto Common Nonce Value: %px\n" + "Crypto Common Algorithm: %x\n" + "Crypto Common Context Specific Flags: %x\n", + nccm, &nccm->spare, + nccm->index, nccm->sec_offset, + &nccm->cipher_key, &nccm->auth_key, + &nccm->nonce, nccm->algo, nccm->flags); +} + +/* + * nss_crypto_cmn_log_verbose() + * Log message contents. + */ +static void nss_crypto_cmn_log_verbose(struct nss_crypto_cmn_msg *ncm) +{ + switch (ncm->cm.type) { + case NSS_CRYPTO_CMN_MSG_TYPE_SETUP_NODE: + nss_crypto_cmn_node_msg(ncm); + break; + + case NSS_CRYPTO_CMN_MSG_TYPE_SETUP_ENG: + nss_crypto_cmn_engine_msg(ncm); + break; + + case NSS_CRYPTO_CMN_MSG_TYPE_SETUP_DMA: + nss_crypto_cmn_dma_msg(ncm); + break; + + case NSS_CRYPTO_CMN_MSG_TYPE_SETUP_CTX: + case NSS_CRYPTO_CMN_MSG_TYPE_CLEAR_CTX: + case NSS_CRYPTO_CMN_MSG_TYPE_VERIFY_CTX: + nss_crypto_cmn_ctx_msg(ncm); + break; + + case NSS_CRYPTO_CMN_MSG_TYPE_SYNC_NODE_STATS: + case NSS_CRYPTO_CMN_MSG_TYPE_SYNC_ENG_STATS: + case NSS_CRYPTO_CMN_MSG_TYPE_SYNC_CTX_STATS: + /* Getting logged in stats */ + break; + + default: + nss_warning("%px: Invalid message type\n", ncm); + break; + } +} + +/* + * nss_crypto_cmn_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_crypto_cmn_log_tx_msg(struct nss_crypto_cmn_msg *ncm) +{ + if (ncm->cm.type >= NSS_CRYPTO_CMN_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", ncm); + return; + } + + nss_info("%px: type[%d]:%s\n", ncm, ncm->cm.type, nss_crypto_cmn_log_message_types_str[ncm->cm.type]); + nss_crypto_cmn_log_verbose(ncm); +} + +/* + * nss_crypto_cmn_log_rx_msg() + * Log messages received from FW. + */ +void nss_crypto_cmn_log_rx_msg(struct nss_crypto_cmn_msg *ncm) +{ + if (ncm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ncm); + return; + } + + if (ncm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ncm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ncm, ncm->cm.type, + nss_crypto_cmn_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response]); + goto verbose; + } + + if (ncm->cm.error >= NSS_CRYPTO_CMN_MSG_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ncm, ncm->cm.type, nss_crypto_cmn_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ncm, ncm->cm.type, nss_crypto_cmn_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error, nss_crypto_cmn_log_error_response_types_str[ncm->cm.error]); + +verbose: + nss_crypto_cmn_log_verbose(ncm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_log.h new file mode 100644 index 000000000..f78a8ecf7 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_CRYPTO_CMN_LOG_H__ +#define __NSS_CRYPTO_CMN_LOG_H__ + +/* + * nss_crypto_cmn_log.h + * NSS Crypto Common Log header file. + */ + +/* + * nss_crypto_cmn_log_tx_msg + * Logs a crypto common message that is sent to the NSS firmware. + */ +void nss_crypto_cmn_log_tx_msg(struct nss_crypto_cmn_msg *ncm); + +/* + * nss_crypto_cmn_log_rx_msg + * Logs a crypto common message that is received from the NSS firmware. + */ +void nss_crypto_cmn_log_rx_msg(struct nss_crypto_cmn_msg *ncm); + +#endif /* __NSS_CRYPTO_CMN_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_stats.c new file mode 100644 index 000000000..c30416634 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_stats.c @@ -0,0 +1,166 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_crypto_cmn_stats.h" +#include "nss_crypto_cmn_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_crypto_cmn_stats_notifier); + +/* + * Spinlock to protect CRYPTO_CMN statistics update/read + */ +DEFINE_SPINLOCK(nss_crypto_cmn_stats_lock); + +/* + * nss_crypto_cmn_stats + * crypto common statistics + */ +uint64_t nss_crypto_cmn_stats[NSS_CRYPTO_CMN_STATS_MAX]; + +/* + * nss_crypto_cmn_stats_read() + * Read crypto common statistics + */ +static ssize_t nss_crypto_cmn_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * Max output lines = #stats + + * few blank lines for banner printing + Number of Extra outputlines + * for future reference to add new stats + */ + uint32_t max_output_lines = NSS_CRYPTO_CMN_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = vzalloc(size_al); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return -ENOMEM; + } + + stats_shadow = vzalloc(NSS_CRYPTO_CMN_STATS_MAX * 8); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + vfree(lbuf); + return -ENOMEM; + } + + /* + * crypto common statistics + */ + spin_lock_bh(&nss_crypto_cmn_stats_lock); + for (i = 0; i < NSS_CRYPTO_CMN_STATS_MAX; i++) + stats_shadow[i] = nss_crypto_cmn_stats[i]; + + spin_unlock_bh(&nss_crypto_cmn_stats_lock); + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "crypto_cmn", NSS_STATS_SINGLE_CORE); + size_wr += nss_stats_print("crypto_cmn", NULL, NSS_STATS_SINGLE_INSTANCE, nss_crypto_cmn_strings_stats, + stats_shadow, NSS_CRYPTO_CMN_STATS_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + vfree(lbuf); + vfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_crypto_cmn_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(crypto_cmn); + +/* + * nss_crypto_cmn_stats_dentry_create() + * Create crypto common statistics debug entry. + */ +void nss_crypto_cmn_stats_dentry_create(void) +{ + nss_stats_create_dentry("crypto_cmn", &nss_crypto_cmn_stats_ops); +} + +/* + * nss_crypto_cmn_stats_sync() + * Handle the syncing of NSS crypto common statistics. + */ +void nss_crypto_cmn_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_crypto_cmn_stats *nct) +{ + int j; + + spin_lock_bh(&nss_crypto_cmn_stats_lock); + + /* + * Common node stats + */ + nss_crypto_cmn_stats[NSS_STATS_NODE_RX_PKTS] += nct->nstats.rx_packets; + nss_crypto_cmn_stats[NSS_STATS_NODE_RX_BYTES] += nct->nstats.rx_bytes; + nss_crypto_cmn_stats[NSS_STATS_NODE_TX_PKTS] += nct->nstats.tx_packets; + nss_crypto_cmn_stats[NSS_STATS_NODE_TX_BYTES] += nct->nstats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) + nss_crypto_cmn_stats[NSS_STATS_NODE_RX_QUEUE_0_DROPPED + j] += nct->nstats.rx_dropped[j]; + + /* + * crypto common statistics + */ + nss_crypto_cmn_stats[NSS_CRYPTO_CMN_STATS_FAIL_VERSION] += nct->fail_version; + nss_crypto_cmn_stats[NSS_CRYPTO_CMN_STATS_FAIL_CTX] += nct->fail_ctx; + nss_crypto_cmn_stats[NSS_CRYPTO_CMN_STATS_FAIL_DMA] += nct->fail_dma; + + spin_unlock_bh(&nss_crypto_cmn_stats_lock); +} + +/* + * nss_crypto_cmn_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_crypto_cmn_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_crypto_cmn_stats_notification crypto_cmn_stats; + + crypto_cmn_stats.core_id = nss_ctx->id; + memcpy(crypto_cmn_stats.stats, nss_crypto_cmn_stats, sizeof(crypto_cmn_stats.stats)); + atomic_notifier_call_chain(&nss_crypto_cmn_stats_notifier, NSS_STATS_EVENT_NOTIFY, &crypto_cmn_stats); +} + +/* + * nss_crypto_cmn_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_crypto_cmn_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_crypto_cmn_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_crypto_cmn_stats_register_notifier); + +/* + * nss_crypto_cmn_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_crypto_cmn_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_crypto_cmn_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_crypto_cmn_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_stats.h new file mode 100644 index 000000000..6319c2cbb --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_stats.h @@ -0,0 +1,77 @@ +/* + ****************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_CRYPTO_CMN_STATS_H +#define __NSS_CRYPTO_CMN_STATS_H + +#include + +/** + * nss_crypto_cmn_stats_types + * crypto common transmission node statistics + */ +enum nss_crypto_cmn_stats_types { + NSS_CRYPTO_CMN_STATS_FAIL_VERSION = NSS_STATS_NODE_MAX, /* version mismatch failures */ + NSS_CRYPTO_CMN_STATS_FAIL_CTX, /* context related failures */ + NSS_CRYPTO_CMN_STATS_FAIL_DMA, /* dma descriptor full */ + NSS_CRYPTO_CMN_STATS_MAX, /* Maximum message type */ +}; + +/** + * nss_crypto_cmn_stats_notification + * crypto common transmission statistics structure + */ +struct nss_crypto_cmn_stats_notification { + uint32_t core_id; /* core ID */ + uint64_t stats[NSS_CRYPTO_CMN_STATS_MAX]; /* transmission statistics */ +}; + +/* + * crypto common statistics APIs + */ +extern void nss_crypto_cmn_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_crypto_cmn_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_crypto_cmn_stats *nct); +extern void nss_crypto_cmn_stats_dentry_create(void); + +/** + * nss_crypto_cmn_stats_register_notifier + * Registers a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_crypto_cmn_stats_register_notifier(struct notifier_block *nb); + +/** + * nss_crypto_cmn_stats_unregister_notifier + * Deregisters a statistics notifier. + * + * @datatypes + * notifier_block + * + * @param[in] nb Notifier block. + * + * @return + * 0 on success or -2 on failure. + */ +extern int nss_crypto_cmn_stats_unregister_notifier(struct notifier_block *nb); + +#endif /* __NSS_CRYPTO_CMN_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_strings.c new file mode 100644 index 000000000..42e8d8b85 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_strings.c @@ -0,0 +1,61 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_crypto_cmn_strings.h" + +/* + * nss_crypto_cmn_strings_stats + * crypto common statistics strings. + */ +struct nss_stats_info nss_crypto_cmn_strings_stats[NSS_CRYPTO_CMN_STATS_MAX] = { + {"rx_pkts", NSS_STATS_TYPE_COMMON}, + {"rx_byts", NSS_STATS_TYPE_COMMON}, + {"tx_pkts", NSS_STATS_TYPE_COMMON}, + {"tx_byts", NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops", NSS_STATS_TYPE_DROP}, + {"fail_version", NSS_STATS_TYPE_SPECIAL}, + {"fail_ctx", NSS_STATS_TYPE_SPECIAL}, + {"fail_dma", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_crypto_cmn_strings_read() + * Read crypto common node statistics names + */ +static ssize_t nss_crypto_cmn_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_crypto_cmn_strings_stats, NSS_CRYPTO_CMN_STATS_MAX); +} + +/* + * nss_crypto_cmn_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(crypto_cmn); + +/* + * nss_crypto_cmn_strings_dentry_create() + * Create crypto common statistics strings debug entry. + */ +void nss_crypto_cmn_strings_dentry_create(void) +{ + nss_strings_create_dentry("crypto_cmn", &nss_crypto_cmn_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_strings.h new file mode 100644 index 000000000..aae067764 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_cmn_strings.h @@ -0,0 +1,25 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_CRYPTO_CMN_STRINGS_H +#define __NSS_CRYPTO_CMN_STRINGS_H + +#include "nss_crypto_cmn_stats.h" + +extern struct nss_stats_info nss_crypto_cmn_strings_stats[NSS_CRYPTO_CMN_STATS_MAX]; +extern void nss_crypto_cmn_strings_dentry_create(void); + +#endif /* __NSS_CRYPTO_CMN_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_crypto_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_log.c new file mode 100644 index 000000000..b5569973b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_log.c @@ -0,0 +1,151 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_crypto_log.c + * NSS Crypto logger file. + */ + +#include "nss_core.h" + +/* + * nss_crypto_log_message_types_str + * Crypto message strings + */ +static int8_t *nss_crypto_log_message_types_str[NSS_CRYPTO_MSG_TYPE_MAX] __maybe_unused = { + "Crypto Invalid Message", + "Crypto Open Engine Message", + "Crypto Close Engine Message", + "Crypto Update Session", + "Crypto Stats Sync", +}; + +/* + * nss_crypto_log_error_response_types_str + * Strings for error types for CRYPTO messages + */ +static int8_t *nss_crypto_log_error_response_types_str[NSS_CRYPTO_MSG_ERROR_MAX] __maybe_unused = { + "Crypto No Error", + "Crypto Invalid Engine", + "Crypto Unsupported Operation", + "Crypto Invalid Operation", + "Crypto Invalid Index Range", + "Crypto Index Alloc Failure", +}; + +/* + * nss_crypto_config_eng_msg() + * Log NSS Crypto config engine message. + */ +static void nss_crypto_config_eng_msg(struct nss_crypto_msg *ncm) +{ + struct nss_crypto_config_eng *nccem __maybe_unused = &ncm->msg.eng; + nss_trace("%px: NSS Crypto Config Engine Message:\n" + "Crypto Engine Number: %d\n" + "Crypto BAM Physical Base Address: %x\n" + "Crypto Physical Base Address: %x\n" + "Crypto Pipe Description Address: %px\n" + "Crypto Session Indices: %px\n", + nccem, nccem->eng_id, + nccem->bam_pbase, nccem->crypto_pbase, + &nccem->desc_paddr, &nccem->idx); +} + +/* + * nss_crypto_config_session_msg() + * Log NSS Crypto config session message. + */ +static void nss_crypto_config_session_msg(struct nss_crypto_msg *ncm) +{ + struct nss_crypto_config_session *nccsm __maybe_unused = &ncm->msg.session; + nss_trace("%px: NSS Crypto Config Session message \n" + "Crypto Session Index: %d\n" + "Crypto Session State: %d\n" + "Crypto Session Initialization Vector Length: %d\n", + nccsm, nccsm->idx, + nccsm->state, nccsm->iv_len); +} + +/* + * nss_crypto_log_verbose() + * Log message contents. + */ +static void nss_crypto_log_verbose(struct nss_crypto_msg *ncm) +{ + switch (ncm->cm.type) { + case NSS_CRYPTO_MSG_TYPE_OPEN_ENG: + nss_crypto_config_eng_msg(ncm); + break; + + case NSS_CRYPTO_MSG_TYPE_UPDATE_SESSION: + nss_crypto_config_session_msg(ncm); + break; + + default: + nss_warning("%px: Invalid message type\n", ncm); + break; + } +} + +/* + * nss_crypto_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_crypto_log_tx_msg(struct nss_crypto_msg *ncm) +{ + if (ncm->cm.type >= NSS_CRYPTO_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", ncm); + return; + } + + nss_info("%px: type[%d]:%s\n", ncm, ncm->cm.type, nss_crypto_log_message_types_str[ncm->cm.type]); + nss_crypto_log_verbose(ncm); +} + +/* + * nss_crypto_log_rx_msg() + * Log messages received from FW. + */ +void nss_crypto_log_rx_msg(struct nss_crypto_msg *ncm) +{ + if (ncm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ncm); + return; + } + + if (ncm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ncm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ncm, ncm->cm.type, + nss_crypto_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response]); + goto verbose; + } + + if (ncm->cm.error >= NSS_CRYPTO_MSG_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ncm, ncm->cm.type, nss_crypto_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ncm, ncm->cm.type, nss_crypto_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error, nss_crypto_log_error_response_types_str[ncm->cm.error]); + +verbose: + nss_crypto_log_verbose(ncm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_crypto_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_log.h new file mode 100644 index 000000000..c0d53ddae --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_crypto_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_CRYPTO_LOG_H__ +#define __NSS_CRYPTO_LOG_H__ + +/* + * nss_crypto_log.h + * NSS Crypto Log Header File + */ + +/* + * nss_crypto_log_tx_msg + * Logs a crypto message that is sent to the NSS firmware. + */ +void nss_crypto_log_tx_msg(struct nss_crypto_msg *ncm); + +/* + * nss_crypto_log_rx_msg + * Logs a crypto message that is received from the NSS firmware. + */ +void nss_crypto_log_rx_msg(struct nss_crypto_msg *ncm); + +#endif /* __NSS_CRYPTO_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/include/nss_data_plane_hal.h b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/include/nss_data_plane_hal.h new file mode 100644 index 000000000..aa46eadc7 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/include/nss_data_plane_hal.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "nss_phys_if.h" +#include + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)) +#define NSS_DATA_PLANE_SUPPORTED_FEATURES (NETIF_F_HIGHDMA \ + | NETIF_F_HW_CSUM \ + | NETIF_F_RXCSUM \ + | NETIF_F_SG \ + | NETIF_F_FRAGLIST \ + | (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) +#else +#define NSS_DATA_PLANE_SUPPORTED_FEATURES (NETIF_F_HIGHDMA \ + | NETIF_F_HW_CSUM \ + | NETIF_F_RXCSUM \ + | NETIF_F_SG \ + | NETIF_F_FRAGLIST \ + | (NETIF_F_TSO | NETIF_F_TSO6)) +#endif + +/* + * nss_data_plane_param + */ +struct nss_data_plane_param { + struct nss_dp_data_plane_ctx dpc; /* data plane ctx base class */ + int if_num; /* physical interface number */ + struct nss_ctx_instance *nss_ctx; /* which nss core */ + struct nss_dp_gmac_stats gmac_stats; /* SoC specific stats for GMAC */ + int notify_open; /* This data plane interface has been opened or not */ + uint32_t features; /* skb types supported by this interface */ + uint32_t bypass_nw_process; /* Do we want to bypass NW processing in NSS for this data plane? */ +}; + +void nss_data_plane_hal_add_dp_ops(struct nss_dp_data_plane_ops *dp_ops); +void nss_data_plane_hal_register(struct nss_ctx_instance *nss_ctx); +void nss_data_plane_hal_unregister(struct nss_ctx_instance *nss_ctx); +void nss_data_plane_hal_set_features(struct nss_dp_data_plane_ctx *dpc); +uint16_t nss_data_plane_hal_get_mtu_sz(uint16_t mtu); +void nss_data_plane_hal_stats_sync(struct nss_data_plane_param *ndpp, struct nss_phys_if_stats *stats); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq50xx.c b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq50xx.c new file mode 100644 index 000000000..980a06b62 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq50xx.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "nss_core.h" +#include "nss_data_plane_hal.h" + +static DEFINE_SPINLOCK(nss_data_plane_hal_gmac_stats_lock); + +/* + * nss_data_plane_hal_get_stats() + * Called by nss-dp to get GMAC stats + */ +static void nss_data_plane_hal_get_stats(struct nss_dp_data_plane_ctx *dpc, + struct nss_dp_gmac_stats *stats) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + spin_lock_bh(&nss_data_plane_hal_gmac_stats_lock); + memcpy(stats, &dp->gmac_stats, sizeof(*stats)); + spin_unlock_bh(&nss_data_plane_hal_gmac_stats_lock); +} + +/* + * nss_data_plane_hal_add_dp_ops() + */ +void nss_data_plane_hal_add_dp_ops(struct nss_dp_data_plane_ops *dp_ops) +{ + dp_ops->get_stats = nss_data_plane_hal_get_stats; +} + +/* + * nss_data_plane_hal_register() + */ +void nss_data_plane_hal_register(struct nss_ctx_instance *nss_ctx) +{ +} + +/* + * nss_data_plane_hal_unregister() + */ +void nss_data_plane_hal_unregister(struct nss_ctx_instance *nss_ctx) +{ +} + +/* + * nss_data_plane_hal_set_features + */ +void nss_data_plane_hal_set_features(struct nss_dp_data_plane_ctx *dpc) +{ + dpc->dev->features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; + dpc->dev->hw_features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; + dpc->dev->wanted_features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; + + /* + * Synopsys GMAC does not support checksum offload for QinQ VLANs. + * Hence, we do not advertise checksum offload support for VLANs. + */ + dpc->dev->vlan_features |= NSS_DATA_PLANE_SUPPORTED_FEATURES & + (~(NETIF_F_RXCSUM | NETIF_F_HW_CSUM)); +} + +/* + * nss_data_plane_hal_stats_sync() + */ +void nss_data_plane_hal_stats_sync(struct nss_data_plane_param *ndpp, + struct nss_phys_if_stats *stats) +{ + struct nss_dp_hal_gmac_stats *gmac_stats = &ndpp->gmac_stats.stats; + + spin_lock_bh(&nss_data_plane_hal_gmac_stats_lock); + + gmac_stats->rx_bytes += stats->if_stats.rx_bytes; + gmac_stats->rx_packets += stats->if_stats.rx_packets; + gmac_stats->rx_errors += stats->estats.rx_errors; + gmac_stats->rx_receive_errors += stats->estats.rx_receive_errors; + gmac_stats->rx_descriptor_errors += stats->estats.rx_descriptor_errors; + gmac_stats->rx_late_collision_errors += stats->estats.rx_late_collision_errors; + gmac_stats->rx_dribble_bit_errors += stats->estats.rx_dribble_bit_errors; + gmac_stats->rx_length_errors += stats->estats.rx_length_errors; + gmac_stats->rx_ip_header_errors += stats->estats.rx_ip_header_errors; + gmac_stats->rx_ip_payload_errors += stats->estats.rx_ip_payload_errors; + gmac_stats->rx_no_buffer_errors += stats->estats.rx_no_buffer_errors; + gmac_stats->rx_transport_csum_bypassed += stats->estats.rx_transport_csum_bypassed; + + gmac_stats->tx_bytes += stats->if_stats.tx_bytes; + gmac_stats->tx_packets += stats->if_stats.tx_packets; + gmac_stats->tx_collisions += stats->estats.tx_collisions; + gmac_stats->tx_errors += stats->estats.tx_errors; + gmac_stats->tx_jabber_timeout_errors += stats->estats.tx_jabber_timeout_errors; + gmac_stats->tx_frame_flushed_errors += stats->estats.tx_frame_flushed_errors; + gmac_stats->tx_loss_of_carrier_errors += stats->estats.tx_loss_of_carrier_errors; + gmac_stats->tx_no_carrier_errors += stats->estats.tx_no_carrier_errors; + gmac_stats->tx_late_collision_errors += stats->estats.tx_late_collision_errors; + gmac_stats->tx_excessive_collision_errors += stats->estats.tx_excessive_collision_errors; + gmac_stats->tx_excessive_deferral_errors += stats->estats.tx_excessive_deferral_errors; + gmac_stats->tx_underflow_errors += stats->estats.tx_underflow_errors; + gmac_stats->tx_ip_header_errors += stats->estats.tx_ip_header_errors; + gmac_stats->tx_ip_payload_errors += stats->estats.tx_ip_payload_errors; + gmac_stats->tx_dropped += stats->estats.tx_dropped; + + gmac_stats->hw_errs[0] += stats->estats.hw_errs[0]; + gmac_stats->hw_errs[1] += stats->estats.hw_errs[1]; + gmac_stats->hw_errs[2] += stats->estats.hw_errs[2]; + gmac_stats->hw_errs[3] += stats->estats.hw_errs[3]; + gmac_stats->hw_errs[4] += stats->estats.hw_errs[4]; + gmac_stats->hw_errs[5] += stats->estats.hw_errs[5]; + gmac_stats->hw_errs[6] += stats->estats.hw_errs[6]; + gmac_stats->hw_errs[7] += stats->estats.hw_errs[7]; + gmac_stats->hw_errs[8] += stats->estats.hw_errs[8]; + gmac_stats->hw_errs[9] += stats->estats.hw_errs[9]; + gmac_stats->rx_missed += stats->estats.rx_missed; + + gmac_stats->fifo_overflows += stats->estats.fifo_overflows; + gmac_stats->rx_scatter_errors += stats->estats.rx_scatter_errors; + gmac_stats->tx_ts_create_errors += stats->estats.tx_ts_create_errors; + gmac_stats->gmac_total_ticks += stats->estats.gmac_total_ticks; + gmac_stats->gmac_worst_case_ticks += stats->estats.gmac_worst_case_ticks; + gmac_stats->gmac_iterations += stats->estats.gmac_iterations; + gmac_stats->tx_pause_frames += stats->estats.tx_pause_frames; + gmac_stats->mmc_rx_overflow_errors += stats->estats.mmc_rx_overflow_errors; + gmac_stats->mmc_rx_watchdog_timeout_errors += stats->estats.mmc_rx_watchdog_timeout_errors; + gmac_stats->mmc_rx_crc_errors += stats->estats.mmc_rx_crc_errors; + gmac_stats->mmc_rx_ip_header_errors += stats->estats.mmc_rx_ip_header_errors; + gmac_stats->mmc_rx_octets_g += stats->estats.mmc_rx_octets_g; + gmac_stats->mmc_rx_ucast_frames += stats->estats.mmc_rx_ucast_frames; + gmac_stats->mmc_rx_bcast_frames += stats->estats.mmc_rx_bcast_frames; + gmac_stats->mmc_rx_mcast_frames += stats->estats.mmc_rx_mcast_frames; + gmac_stats->mmc_rx_undersize += stats->estats.mmc_rx_undersize; + gmac_stats->mmc_rx_oversize += stats->estats.mmc_rx_oversize; + gmac_stats->mmc_rx_jabber += stats->estats.mmc_rx_jabber; + gmac_stats->mmc_rx_octets_gb += stats->estats.mmc_rx_octets_gb; + gmac_stats->mmc_rx_frag_frames_g += stats->estats.mmc_rx_frag_frames_g; + gmac_stats->mmc_tx_octets_g += stats->estats.mmc_tx_octets_g; + gmac_stats->mmc_tx_ucast_frames += stats->estats.mmc_tx_ucast_frames; + gmac_stats->mmc_tx_bcast_frames += stats->estats.mmc_tx_bcast_frames; + gmac_stats->mmc_tx_mcast_frames += stats->estats.mmc_tx_mcast_frames; + gmac_stats->mmc_tx_deferred += stats->estats.mmc_tx_deferred; + gmac_stats->mmc_tx_single_col += stats->estats.mmc_tx_single_col; + gmac_stats->mmc_tx_multiple_col += stats->estats.mmc_tx_multiple_col; + gmac_stats->mmc_tx_octets_gb += stats->estats.mmc_tx_octets_gb; + + spin_unlock_bh(&nss_data_plane_hal_gmac_stats_lock); +} + +/* + * nss_data_plane_hal_get_mtu_sz() + */ +uint16_t nss_data_plane_hal_get_mtu_sz(uint16_t mtu) +{ + /* + * GMACs support 3 Modes + * Normal Mode Payloads upto 1522 Bytes ( 1500 + 14 + 4(Vlan) + 4(CRC)) + * Mini Jumbo Mode Payloads upto 2000 Bytes (1978 + 14 + 4(Vlan) + 4 (CRC)) + * Full Jumbo Mode payloads upto 9022 Bytes (9000 + 14 + 4(Vlan) + 4 (CRC)) + */ + + /* + * The configured MTU value on a GMAC interface should be one of these + * cases. Finding the Needed MTU size that is required for GMAC to + * successfully receive the frame. + */ + if (mtu <= NSS_DP_GMAC_NORMAL_FRAME_MTU) { + return NSS_DP_GMAC_NORMAL_FRAME_MTU; + } + if (mtu <= NSS_DP_GMAC_MINI_JUMBO_FRAME_MTU) { + return NSS_DP_GMAC_MINI_JUMBO_FRAME_MTU; + } + if (mtu <= NSS_DP_GMAC_FULL_JUMBO_FRAME_MTU) { + return NSS_DP_GMAC_FULL_JUMBO_FRAME_MTU; + } + return 0; +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq60xx.c b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq60xx.c new file mode 100644 index 000000000..d74df4cf8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq60xx.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "nss_core.h" +#include "nss_data_plane_hal.h" + +/* + * nss_data_plane_hal_vsi_assign() + * Called by nss-dp to assign vsi of a data plane + */ +static int nss_data_plane_hal_vsi_assign(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + return nss_phys_if_vsi_assign(dp->nss_ctx, vsi, dp->if_num); +} + +/* + * nss_data_plane_hal_vsi_unassign() + * Called by nss-dp to unassign vsi of a data plane + */ +static int nss_data_plane_hal_vsi_unassign(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + return nss_phys_if_vsi_unassign(dp->nss_ctx, vsi, dp->if_num); +} + +/* + * nss_data_plane_hal_get_stats() + * Called by nss-dp to get GMAC stats + */ +static void nss_data_plane_hal_get_stats(struct nss_dp_data_plane_ctx *dpc, + struct nss_dp_gmac_stats *stats) +{ + /* + * EDMA doesn't send extended statistics. + */ +} + +/* + * nss_data_plane_hal_add_dp_ops() + */ +void nss_data_plane_hal_add_dp_ops(struct nss_dp_data_plane_ops *dp_ops) +{ + dp_ops->vsi_assign = nss_data_plane_hal_vsi_assign; + dp_ops->vsi_unassign = nss_data_plane_hal_vsi_unassign; + dp_ops->get_stats = nss_data_plane_hal_get_stats; +} + +/* + * nss_data_plane_hal_register() + */ +void nss_data_plane_hal_register(struct nss_ctx_instance *nss_ctx) +{ + /* + * Packets with the ptp service code should be delivered to + * PHY driver for timestamping. + */ + nss_cmn_register_service_code(nss_ctx, nss_phy_tstamp_rx_buf, + NSS_PTP_EVENT_SERVICE_CODE, nss_ctx); +} + +/* + * nss_data_plane_hal_unregister() + */ +void nss_data_plane_hal_unregister(struct nss_ctx_instance *nss_ctx) +{ + nss_cmn_unregister_service_code(nss_ctx, nss_phy_tstamp_rx_buf, + NSS_PTP_EVENT_SERVICE_CODE); +} + +/* + * nss_data_plane_hal_set_features + */ +void nss_data_plane_hal_set_features(struct nss_dp_data_plane_ctx *dpc) +{ + dpc->dev->features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; + dpc->dev->hw_features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; + dpc->dev->vlan_features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; + dpc->dev->wanted_features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; +} + +/* + * nss_data_plane_hal_stats_sync() + */ +void nss_data_plane_hal_stats_sync(struct nss_data_plane_param *ndpp, + struct nss_phys_if_stats *stats) +{ + /* + * EDMA does not pass sync interface stats through phys_if_stats + */ +} + +/* + * nss_data_plane_hal_get_mtu_sz() + */ +uint16_t nss_data_plane_hal_get_mtu_sz(uint16_t mtu) +{ + /* + * Reserve space for preheader + */ + return mtu + NSS_DP_PREHEADER_SIZE; +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq807x.c b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq807x.c new file mode 100644 index 000000000..82fe2b0a1 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/hal/nss_ipq807x.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "nss_core.h" +#include "nss_data_plane_hal.h" + +/* + * nss_data_plane_hal_vsi_assign() + * Called by nss-dp to assign vsi of a data plane + */ +static int nss_data_plane_hal_vsi_assign(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + return nss_phys_if_vsi_assign(dp->nss_ctx, vsi, dp->if_num); +} + +/* + * nss_data_plane_hal_vsi_unassign() + * Called by nss-dp to unassign vsi of a data plane + */ +static int nss_data_plane_hal_vsi_unassign(struct nss_dp_data_plane_ctx *dpc, uint32_t vsi) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + return nss_phys_if_vsi_unassign(dp->nss_ctx, vsi, dp->if_num); +} + +/* + * nss_data_plane_hal_get_stats() + * Called by nss-dp to get GMAC stats + */ +static void nss_data_plane_hal_get_stats(struct nss_dp_data_plane_ctx *dpc, + struct nss_dp_gmac_stats *stats) +{ + /* + * EDMA doesn't send extended statistics. + */ +} + +/* + * nss_data_plane_hal_add_dp_ops() + */ +void nss_data_plane_hal_add_dp_ops(struct nss_dp_data_plane_ops *dp_ops) +{ + dp_ops->vsi_assign = nss_data_plane_hal_vsi_assign; + dp_ops->vsi_unassign = nss_data_plane_hal_vsi_unassign; + dp_ops->get_stats = nss_data_plane_hal_get_stats; +} + +/* + * nss_data_plane_hal_register() + */ +void nss_data_plane_hal_register(struct nss_ctx_instance *nss_ctx) +{ + /* + * Packets with the ptp service code should be delivered to + * PHY driver for timestamping. + */ + nss_cmn_register_service_code(nss_ctx, nss_phy_tstamp_rx_buf, + NSS_PTP_EVENT_SERVICE_CODE, nss_ctx); +} + +/* + * nss_data_plane_hal_unregister() + */ +void nss_data_plane_hal_unregister(struct nss_ctx_instance *nss_ctx) +{ + nss_cmn_unregister_service_code(nss_ctx, nss_phy_tstamp_rx_buf, + NSS_PTP_EVENT_SERVICE_CODE); +} + +/* + * nss_data_plane_hal_set_features + */ +void nss_data_plane_hal_set_features(struct nss_dp_data_plane_ctx *dpc) +{ + dpc->dev->features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; + dpc->dev->hw_features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; + dpc->dev->vlan_features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; + dpc->dev->wanted_features |= NSS_DATA_PLANE_SUPPORTED_FEATURES; +} + +/* + * nss_data_plane_hal_stats_sync() + */ +void nss_data_plane_hal_stats_sync(struct nss_data_plane_param *ndpp, + struct nss_phys_if_stats *stats) +{ + /* + * EDMA does not pass sync interface stats through phys_if_stats + */ +} + +/* + * nss_data_plane_hal_get_mtu_sz() + */ +uint16_t nss_data_plane_hal_get_mtu_sz(uint16_t mtu) +{ + /* + * Reserve space for preheader + */ + return mtu + NSS_DP_PREHEADER_SIZE; +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/include/nss_data_plane.h b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/include/nss_data_plane.h new file mode 100644 index 000000000..503a20a40 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/include/nss_data_plane.h @@ -0,0 +1,60 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2017,2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * nss_data_plane + * Data plane used for communication between qca-nss-drv & data plane host + */ + +#ifndef __NSS_DATA_PLANE_H +#define __NSS_DATA_PLANE_H + +#include +#include "nss_phys_if.h" + +/* + * nss_data_plane_schedule_registration() + * Called from nss_init to schedule a work to do data_plane register to data plane host driver + */ +bool nss_data_plane_schedule_registration(void); + +/* + * nss_data_plane_init_delay_work() + * Initialize data_plane workqueue + */ +int nss_data_plane_init_delay_work(void); + +/* + * nss_data_plane_destroy_delay_work() + * Destroy data_plane workqueue + */ +void nss_data_plane_destroy_delay_work(void); + +/* + * nss_data_plane_ops defines the API required to support multiple data plane targets + */ +struct nss_data_plane_ops { + void (*data_plane_register)(struct nss_ctx_instance *nss_ctx); + void (*data_plane_unregister)(void); + void (*data_plane_stats_sync)(struct nss_phys_if_stats *stats, uint16_t interface); + uint16_t (*data_plane_get_mtu_sz)(uint16_t max_mtu); +}; + +extern struct nss_data_plane_ops nss_data_plane_gmac_ops; +extern struct nss_data_plane_ops nss_data_plane_ops; + +extern int nss_skip_nw_process; +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane.c b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane.c new file mode 100644 index 000000000..16b7cbbc6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane.c @@ -0,0 +1,386 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_data_plane.h" +#include "nss_core.h" +#include "nss_tx_rx_common.h" +#include "nss_data_plane_hal.h" + +/* + * nss_data_plane_param + */ +struct nss_data_plane_param nss_data_plane_params[NSS_DP_MAX_INTERFACES]; + +/* + * __nss_data_plane_init() + */ +static int __nss_data_plane_init(struct nss_dp_data_plane_ctx *dpc) +{ + struct net_device *netdev = dpc->dev; + netdev->needed_headroom += 32; + return NSS_DP_SUCCESS; +} + +/* + * __nss_data_plane_open() + * Called by nss-dp to notify open to nss-fw + */ +static int __nss_data_plane_open(struct nss_dp_data_plane_ctx *dpc, uint32_t tx_desc_ring, uint32_t rx_desc_ring, uint32_t mode) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + if (dp->notify_open) { + return NSS_DP_SUCCESS; + } + + if (nss_phys_if_open(dp->nss_ctx, tx_desc_ring, rx_desc_ring, mode, dp->if_num, dp->bypass_nw_process) == NSS_TX_SUCCESS) { + dp->notify_open = 1; + return NSS_DP_SUCCESS; + } + return NSS_DP_FAILURE; +} + +/* + * __nss_data_plane_close() + * Called by nss-dp to notify close to nss-fw + */ +static int __nss_data_plane_close(struct nss_dp_data_plane_ctx *dpc) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + if (!dp->notify_open) { + return NSS_DP_SUCCESS; + } + + if (nss_phys_if_close(dp->nss_ctx, dp->if_num) == NSS_TX_SUCCESS) { + dp->notify_open = 0; + return NSS_DP_SUCCESS; + } + return NSS_DP_FAILURE; +} + +/* + * __nss_data_plane_link_state() + * Called by nss-dp to notify link state change to nss-fw + */ +static int __nss_data_plane_link_state(struct nss_dp_data_plane_ctx *dpc, uint32_t link_state) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + return nss_phys_if_link_state(dp->nss_ctx, link_state, dp->if_num); +} + +/* + * __nss_data_plane_mac_addr() + * Called by nss-dp to set mac address + */ +static int __nss_data_plane_mac_addr(struct nss_dp_data_plane_ctx *dpc, uint8_t *addr) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + return nss_phys_if_mac_addr(dp->nss_ctx, addr, dp->if_num); +} + +/* + * __nss_data_plane_change_mtu() + * Called by nss-dp to change mtu of a data plane + */ +static int __nss_data_plane_change_mtu(struct nss_dp_data_plane_ctx *dpc, uint32_t mtu) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + if (mtu > NSS_DP_MAX_MTU_SIZE) { + nss_warning("%px: MTU exceeds MAX size %d\n", dp, mtu); + return NSS_DP_FAILURE; + } + + return nss_phys_if_change_mtu(dp->nss_ctx, mtu, dp->if_num); +} + +/* + * __nss_data_plane_pause_on_off() + * Called by nss-dp to enable/disable pause frames + */ +static int __nss_data_plane_pause_on_off(struct nss_dp_data_plane_ctx *dpc, uint32_t pause_on) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + + return nss_phys_if_pause_on_off(dp->nss_ctx, pause_on, dp->if_num); +} + +#ifdef NSS_DRV_QRFS_ENABLE +/* + * __nss_data_plane_rx_flow_steer() + * Called by nss-dp to set flow rule of a data plane + */ +static int __nss_data_plane_rx_flow_steer(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb, + uint32_t cpu, bool is_add) +{ + if (is_add) { + return nss_qrfs_set_flow_rule(skb, cpu, NSS_QRFS_MSG_FLOW_ADD); + } + + return nss_qrfs_set_flow_rule(skb, cpu, NSS_QRFS_MSG_FLOW_DELETE); +} +#endif + +/* + * __nss_data_plane_deinit() + * Place holder for nss-dp ops to free NSS data plane resources + */ +static int __nss_data_plane_deinit(struct nss_dp_data_plane_ctx *dpc) +{ + /* + * TODO: Implement free up of NSS data plane resources + */ + return NSS_TX_SUCCESS; +} + +/* + * __nss_data_plane_buf() + * Called by nss-dp to pass a sk_buff for xmit + */ +static netdev_tx_t __nss_data_plane_buf(struct nss_dp_data_plane_ctx *dpc, struct sk_buff *skb) +{ + struct nss_data_plane_param *dp = (struct nss_data_plane_param *)dpc; + int extra_head = dpc->dev->needed_headroom - skb_headroom(skb); + int extra_tail = 0; + nss_tx_status_t status; + struct net_device *dev = dpc->dev; + + if (skb->len < ETH_HLEN) { + nss_warning("skb->len ( %u ) < ETH_HLEN ( %u ) \n", skb->len, ETH_HLEN); + goto drop; + } + + if (skb->len > NSS_DP_MAX_PACKET_LEN) { + nss_warning("skb->len ( %u ) > Maximum packet length ( %u ) \n", skb->len, NSS_DP_MAX_PACKET_LEN); + goto drop; + } + + if (skb_cloned(skb) || extra_head > 0) { + /* + * If it is a clone and headroom is already enough, + * We just make a copy and clear the clone flag. + */ + if (extra_head <= 0) + extra_head = extra_tail = 0; + /* + * If tailroom is enough to accommodate the added headroom, + * then allocate a buffer of same size and do relocations. + * It might help kmalloc_reserve() not double the size. + */ + if (skb->end - skb->tail >= extra_head) + extra_tail = -extra_head; + + if (pskb_expand_head(skb, extra_head, extra_tail, GFP_ATOMIC)) { + nss_warning("%px: Unable to expand skb for headroom\n", dp); + goto drop; + } + } + + status = nss_phys_if_buf(dp->nss_ctx, skb, dp->if_num); + if (likely(status == NSS_TX_SUCCESS)) { + return NETDEV_TX_OK; + } else if (status == NSS_TX_FAILURE_QUEUE) { + return NETDEV_TX_BUSY; + } + +drop: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +/* + * __nss_data_plane_set_features() + * Called by nss-dp to allow data plane to modify the set of features it supports + */ +static void __nss_data_plane_set_features(struct nss_dp_data_plane_ctx *dpc) +{ + nss_data_plane_hal_set_features(dpc); +} + +/* + * nss offload data plane ops + */ +static struct nss_dp_data_plane_ops dp_ops = { + .init = __nss_data_plane_init, + .open = __nss_data_plane_open, + .close = __nss_data_plane_close, + .link_state = __nss_data_plane_link_state, + .mac_addr = __nss_data_plane_mac_addr, + .change_mtu = __nss_data_plane_change_mtu, + .xmit = __nss_data_plane_buf, + .set_features = __nss_data_plane_set_features, + .pause_on_off = __nss_data_plane_pause_on_off, +#ifdef NSS_DRV_QRFS_ENABLE + .rx_flow_steer = __nss_data_plane_rx_flow_steer, +#endif + .deinit = __nss_data_plane_deinit, +}; + +/* + * nss_data_plane_register_to_nss_dp() + */ +static bool nss_data_plane_register_to_nss_dp(struct nss_ctx_instance *nss_ctx, int if_num) +{ + struct nss_data_plane_param *ndpp = &nss_data_plane_params[if_num]; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct net_device *netdev; + bool is_open; + int core; + + netdev = nss_dp_get_netdev_by_nss_if_num(if_num); + if (!netdev) { + nss_info("%px: Platform don't have data plane%d enabled, \ + don't bring up nss_phys_if and don't register to nss-dp\n", + nss_ctx, if_num); + return false; + } + + is_open = nss_dp_is_in_open_state(netdev); + ndpp->dpc.dev = netdev; + ndpp->nss_ctx = nss_ctx; + ndpp->if_num = if_num; + ndpp->notify_open = 0; + ndpp->features = 0; + + /* + * Add data plane ops applicable to this SoC. + */ + nss_data_plane_hal_add_dp_ops(&dp_ops); + + /* + * Check if NSS NW processing to be bypassed for this data plane + */ + if (nss_skip_nw_process) { + ndpp->bypass_nw_process = 1; + } else { + ndpp->bypass_nw_process = 0; + } + + if (nss_dp_override_data_plane(netdev, &dp_ops, (struct nss_dp_data_plane_ctx *)ndpp) != NSS_DP_SUCCESS) { + nss_info("%px: Override nss-dp data plane for port %dfailed\n", nss_ctx, if_num); + return false; + } + + /* + * Setup the receive callback so that data pkts received form NSS-FW will + * be redirected to the nss-dp driver as we are overriding the data plane + */ + nss_top->phys_if_handler_id[if_num] = nss_ctx->id; + nss_phys_if_register_handler(nss_ctx, if_num); + + /* + * Packets recieved on physical interface can be exceptioned to HLOS + * from any NSS core so we need to register data plane for all + */ + for (core = 0; core < nss_top->num_nss; core++) { + nss_core_register_subsys_dp(&nss_top->nss[core], if_num, nss_dp_receive, NULL, NULL, netdev, ndpp->features); + } + + /* + * Now we are registered and our side is ready, if the data plane was opened, ask it to start again + */ + if (is_open) { + nss_dp_start_data_plane(netdev, (struct nss_dp_data_plane_ctx *)ndpp); + } + return true; +} + +/* + * nss_data_plane_unregister_from_nss_dp() + */ +static void nss_data_plane_unregister_from_nss_dp(int if_num) +{ + /* + * Do any SoC specific un-registrations. + */ + nss_data_plane_hal_unregister(nss_data_plane_params[if_num].nss_ctx); + + nss_dp_restore_data_plane(nss_data_plane_params[if_num].dpc.dev); + nss_data_plane_params[if_num].dpc.dev = NULL; + nss_data_plane_params[if_num].nss_ctx = NULL; + nss_data_plane_params[if_num].if_num = 0; + nss_data_plane_params[if_num].notify_open = 0; + nss_data_plane_params[if_num].bypass_nw_process = 0; +} + +/* + * __nss_data_plane_register() + */ +static void __nss_data_plane_register(struct nss_ctx_instance *nss_ctx) +{ + int i; + + for (i = NSS_DP_START_IFNUM; i < NSS_DP_MAX_INTERFACES; i++) { + if (!nss_data_plane_register_to_nss_dp(nss_ctx, i)) { + nss_warning("%px: Register data plane failed for data plane %d\n", nss_ctx, i); + } else { + nss_info("%px: Register data plan to data plane %d success\n", nss_ctx, i); + } + } + + /* + * Do any SoC specific registrations. + */ + nss_data_plane_hal_register(nss_ctx); +} + +/* + * __nss_data_plane_unregister() + */ +static void __nss_data_plane_unregister(void) +{ + int i, core; + + for (core = 0; core < nss_top_main.num_nss; core++) { + for (i = NSS_DP_START_IFNUM; i < NSS_DP_MAX_INTERFACES; i++) { + if (nss_top_main.nss[core].subsys_dp_register[i].ndev) { + nss_data_plane_unregister_from_nss_dp(i); + nss_core_unregister_subsys_dp(&nss_top_main.nss[core], i); + } + } + } +} + +/* + * __nss_data_plane_stats_sync() + */ +static void __nss_data_plane_stats_sync(struct nss_phys_if_stats *stats, uint16_t interface) +{ + nss_data_plane_hal_stats_sync(&nss_data_plane_params[interface], stats); +} + +/* + * __nss_data_plane_get_mtu_sz() + */ +static uint16_t __nss_data_plane_get_mtu_sz(uint16_t mtu) +{ + return nss_data_plane_hal_get_mtu_sz(mtu); +} + +/* + * nss_data_plane_ops + */ +struct nss_data_plane_ops nss_data_plane_ops = { + .data_plane_register = &__nss_data_plane_register, + .data_plane_unregister = &__nss_data_plane_unregister, + .data_plane_stats_sync = &__nss_data_plane_stats_sync, + .data_plane_get_mtu_sz = &__nss_data_plane_get_mtu_sz, +}; diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane_common.c b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane_common.c new file mode 100644 index 000000000..4ffaa9ce4 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane_common.c @@ -0,0 +1,84 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2016,2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_data_plane.h" +#include "nss_core.h" + +static struct delayed_work nss_data_plane_work; +static struct workqueue_struct *nss_data_plane_workqueue; + +extern bool pn_mq_en; +extern uint16_t pn_qlimits[NSS_MAX_NUM_PRI]; + +/* + * nss_data_plane_work_function() + * Work function that gets queued to "install" the data plane overlays + */ +static void nss_data_plane_work_function(struct work_struct *work) +{ + int ret; + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[NSS_CORE_0]; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + /* + * The queue config command is a synchronous command and needs to be issued + * in process context, before NSS data plane switch. + */ + ret = nss_n2h_update_queue_config_sync(nss_ctx, pn_mq_en, pn_qlimits); + if (ret != NSS_TX_SUCCESS) { + nss_warning("Failed to send pnode queue config to core 0\n"); + } + + nss_top->data_plane_ops->data_plane_register(nss_ctx); +} + +/* + * nss_data_plane_schedule_registration() + * Called from nss_init to schedule a work to do data_plane register to data plane host + */ +bool nss_data_plane_schedule_registration(void) +{ + if (!queue_work_on(1, nss_data_plane_workqueue, &nss_data_plane_work.work)) { + nss_warning("Failed to register data plane workqueue on core 1\n"); + return false; + } + + nss_info("Register data plane workqueue on core 1\n"); + return true; +} + +/* + * nss_data_plane_init_delay_work() + */ +int nss_data_plane_init_delay_work(void) +{ + nss_data_plane_workqueue = create_singlethread_workqueue("nss_data_plane_workqueue"); + if (!nss_data_plane_workqueue) { + nss_warning("Can't allocate workqueue\n"); + return -ENOMEM; + } + + INIT_DELAYED_WORK(&nss_data_plane_work, nss_data_plane_work_function); + return 0; +} + +/* + * nss_data_plane_destroy_delay_work() + */ +void nss_data_plane_destroy_delay_work(void) +{ + destroy_workqueue(nss_data_plane_workqueue); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane_gmac.c b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane_gmac.c new file mode 100644 index 000000000..42e10a91c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_data_plane/nss_data_plane_gmac.c @@ -0,0 +1,396 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_data_plane.h" +#include "nss_phys_if.h" +#include "nss_core.h" +#include "nss_tx_rx_common.h" +#include + +#define NSS_DP_GMAC_SUPPORTED_FEATURES (NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_FRAGLIST | (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) +#define NSS_DATA_PLANE_GMAC_MAX_INTERFACES 4 + +static DEFINE_SPINLOCK(nss_data_plane_gmac_stats_lock); + +/* + * nss_data_plane_gmac_param + * Holds the information that is going to pass to data plane host as a cookie + */ +struct nss_data_plane_gmac_param { + int if_num; /* physical interface number */ + struct net_device *dev; /* net_device instance of this data plane */ + struct nss_ctx_instance *nss_ctx; /* which nss core */ + struct nss_gmac_stats gmac_stats; /* gmac stats */ + int notify_open; /* This data plane interface has been opened or not */ + uint32_t features; /* skb types supported by this interface */ + uint32_t bypass_nw_process; /* Do we want to bypass NW processing in NSS for this data plane? */ +} nss_data_plane_gmac_params[NSS_DATA_PLANE_GMAC_MAX_INTERFACES]; + +/* + * __nss_data_plane_open() + * Called by gmac to notify open to nss-fw + */ +static int __nss_data_plane_open(void *arg, uint32_t tx_desc_ring, uint32_t rx_desc_ring, uint32_t mode) +{ + struct nss_data_plane_gmac_param *dp = (struct nss_data_plane_gmac_param *)arg; + + if (dp->notify_open) { + return NSS_GMAC_SUCCESS; + } + if (nss_phys_if_open(dp->nss_ctx, tx_desc_ring, rx_desc_ring, mode, dp->if_num, dp->bypass_nw_process) == NSS_TX_SUCCESS) { + dp->notify_open = 1; + return NSS_GMAC_SUCCESS; + } + return NSS_GMAC_FAILURE; +} + +/* + * __nss_data_plane_close() + * Called by gmac to notify close to nss-fw + */ +static int __nss_data_plane_close(void *arg) +{ + /* + * We don't actually do synopsys gmac close in fw, just return success + */ + return NSS_GMAC_SUCCESS; +} + +/* + * __nss_data_plane_link_state() + * Called by gmac to notify link state change to nss-fw + */ +static int __nss_data_plane_link_state(void *arg, uint32_t link_state) +{ + struct nss_data_plane_gmac_param *dp = (struct nss_data_plane_gmac_param *)arg; + + return nss_phys_if_link_state(dp->nss_ctx, link_state, dp->if_num); +} + +/* + * __nss_data_plane_mac_addr() + * Called by gmac to set mac address + */ +static int __nss_data_plane_mac_addr(void *arg, uint8_t *addr) +{ + struct nss_data_plane_gmac_param *dp = (struct nss_data_plane_gmac_param *)arg; + + return nss_phys_if_mac_addr(dp->nss_ctx, addr, dp->if_num); +} + +/* + * __nss_data_plane_change_mtu() + * Called by gmac to change mtu of a gmac + */ +static int __nss_data_plane_change_mtu(void *arg, uint32_t mtu) +{ + struct nss_data_plane_gmac_param *dp = (struct nss_data_plane_gmac_param *)arg; + + /* + * MTU size check is already done in nss-gmac driver, just pass to phys_if + */ + return nss_phys_if_change_mtu(dp->nss_ctx, mtu, dp->if_num); +} + +/* + * __nss_data_plane_pause_on_off() + * Called by gmac to enable/disable pause frames + */ +static int __nss_data_plane_pause_on_off(void *arg, uint32_t pause_on) +{ + struct nss_data_plane_gmac_param *dp = (struct nss_data_plane_gmac_param *)arg; + + return nss_phys_if_pause_on_off(dp->nss_ctx, pause_on, dp->if_num); +} + +/* + * __nss_data_plane_buf() + * Called by gmac to pass a sk_buff for xmit + */ +static int __nss_data_plane_buf(void *arg, struct sk_buff *os_buf) +{ + struct nss_data_plane_gmac_param *dp = (struct nss_data_plane_gmac_param *)arg; + + return nss_phys_if_buf(dp->nss_ctx, os_buf, dp->if_num); +} + +/* + * __nss_data_plane_set_features() + * Called by gmac to allow data plane to modify the set of features it supports + */ +static void __nss_data_plane_set_features(struct net_device *netdev) +{ + netdev->features |= NSS_DP_GMAC_SUPPORTED_FEATURES; + netdev->hw_features |= NSS_DP_GMAC_SUPPORTED_FEATURES; + netdev->vlan_features |= NSS_DP_GMAC_SUPPORTED_FEATURES; + netdev->wanted_features |= NSS_DP_GMAC_SUPPORTED_FEATURES; +} + +/* + * __nss_data_plane_get_stats() + */ +static void __nss_data_plane_get_stats(void *arg, struct nss_gmac_stats *stats) +{ + struct nss_data_plane_gmac_param *dp = (struct nss_data_plane_gmac_param *)arg; + + spin_lock_bh(&nss_data_plane_gmac_stats_lock); + memcpy(stats, &dp->gmac_stats, sizeof(*stats)); + spin_unlock_bh(&nss_data_plane_gmac_stats_lock); +} + +/* + * nss offload data plane ops + */ +static struct nss_gmac_data_plane_ops dp_ops = { + .open = __nss_data_plane_open, + .close = __nss_data_plane_close, + .link_state = __nss_data_plane_link_state, + .mac_addr = __nss_data_plane_mac_addr, + .change_mtu = __nss_data_plane_change_mtu, + .xmit = __nss_data_plane_buf, + .set_features = __nss_data_plane_set_features, + .pause_on_off = __nss_data_plane_pause_on_off, + .get_stats = __nss_data_plane_get_stats, +}; + +/* + * nss_data_plane_register_to_nss_gmac() + */ +static bool nss_data_plane_register_to_nss_gmac(struct nss_ctx_instance *nss_ctx, int if_num) +{ + struct nss_data_plane_gmac_param *ndpp = &nss_data_plane_gmac_params[if_num]; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct net_device *netdev; + bool is_open; + int core; + + netdev = nss_gmac_get_netdev_by_macid(if_num); + if (!netdev) { + nss_info("Platform don't have gmac%d enabled, don't bring up nss_phys_if and don't register to nss-gmac", if_num); + return false; + } + + is_open = nss_gmac_is_in_open_state(netdev); + ndpp->dev = netdev; + ndpp->nss_ctx = nss_ctx; + ndpp->if_num = if_num; + ndpp->notify_open = 0; + ndpp->features = 0; + + /* + * Check if NSS NW processing to be bypassed for this GMAC + */ + if (nss_skip_nw_process) { + ndpp->bypass_nw_process = 1; + } else { + ndpp->bypass_nw_process = 0; + } + + if (nss_gmac_override_data_plane(netdev, &dp_ops, ndpp) != NSS_GMAC_SUCCESS) { + nss_info("Override nss-gmac data plane failed\n"); + return false; + } + + /* + * Setup the receive callback so that data pkts received form NSS-FW will + * be redirected to the gmac driver as we are overriding the data plane + */ + nss_top->phys_if_handler_id[if_num] = nss_ctx->id; + nss_phys_if_register_handler(nss_ctx, if_num); + + /* + * Packets recieved on physical interface can be exceptioned to HLOS + * from any NSS core so we need to register data plane for all + */ + for (core = 0; core < nss_top->num_nss; core++) { + nss_core_register_subsys_dp(&nss_top->nss[core], if_num, nss_gmac_receive, NULL, NULL, netdev, ndpp->features); + } + + /* + * Now we are registered and our side is ready, if the gmac was opened, ask it to start again + */ + if (is_open) { + nss_gmac_start_data_plane(netdev, ndpp); + } + return true; +} + +/* + * nss_data_plane_unregister_from_nss_gmac() + */ +static void nss_data_plane_unregister_from_nss_gmac(int if_num) +{ + nss_gmac_restore_data_plane(nss_data_plane_gmac_params[if_num].dev); + nss_data_plane_gmac_params[if_num].dev = NULL; + nss_data_plane_gmac_params[if_num].nss_ctx = NULL; + nss_data_plane_gmac_params[if_num].if_num = 0; + nss_data_plane_gmac_params[if_num].notify_open = 0; + nss_data_plane_gmac_params[if_num].bypass_nw_process = 0; +} + +/* + * __nss_data_plane_register() + */ +static void __nss_data_plane_register(struct nss_ctx_instance *nss_ctx) +{ + int i; + + for (i = 0; i < NSS_DATA_PLANE_GMAC_MAX_INTERFACES; i++) { + if (!nss_data_plane_register_to_nss_gmac(nss_ctx, i)) { + nss_warning("%px: Register data plane failed for gmac:%d\n", nss_ctx, i); + } else { + nss_info("%px: Register data plan to gmac:%d success\n", nss_ctx, i); + } + } +} + +/* + * __nss_data_plane_unregister() + */ +static void __nss_data_plane_unregister(void) +{ + int i, core; + + for (core = 0; core < nss_top_main.num_nss; core++) { + for (i = 0; i < NSS_DATA_PLANE_GMAC_MAX_INTERFACES; i++) { + if (nss_top_main.nss[core].subsys_dp_register[i].ndev) { + nss_data_plane_unregister_from_nss_gmac(i); + nss_core_unregister_subsys_dp(&nss_top_main.nss[core], i); + } + } + } +} + +/* + * __nss_data_plane_stats_sync() + * Handle the syncing of gmac data plane stats. + */ +static void __nss_data_plane_stats_sync(struct nss_phys_if_stats *stats, uint16_t interface) +{ + struct nss_gmac_stats *gmac_stats = &nss_data_plane_gmac_params[interface].gmac_stats; + + spin_lock_bh(&nss_data_plane_gmac_stats_lock); + gmac_stats->rx_bytes += stats->if_stats.rx_bytes; + gmac_stats->rx_packets += stats->if_stats.rx_packets; + gmac_stats->rx_errors += stats->estats.rx_errors; + gmac_stats->rx_receive_errors += stats->estats.rx_receive_errors; + gmac_stats->rx_descriptor_errors += stats->estats.rx_descriptor_errors; + gmac_stats->rx_late_collision_errors += stats->estats.rx_late_collision_errors; + gmac_stats->rx_dribble_bit_errors += stats->estats.rx_dribble_bit_errors; + gmac_stats->rx_length_errors += stats->estats.rx_length_errors; + gmac_stats->rx_ip_header_errors += stats->estats.rx_ip_header_errors; + gmac_stats->rx_ip_payload_errors += stats->estats.rx_ip_payload_errors; + gmac_stats->rx_no_buffer_errors += stats->estats.rx_no_buffer_errors; + gmac_stats->rx_transport_csum_bypassed += stats->estats.rx_transport_csum_bypassed; + + gmac_stats->tx_bytes += stats->if_stats.tx_bytes; + gmac_stats->tx_packets += stats->if_stats.tx_packets; + gmac_stats->tx_collisions += stats->estats.tx_collisions; + gmac_stats->tx_errors += stats->estats.tx_errors; + gmac_stats->tx_jabber_timeout_errors += stats->estats.tx_jabber_timeout_errors; + gmac_stats->tx_frame_flushed_errors += stats->estats.tx_frame_flushed_errors; + gmac_stats->tx_loss_of_carrier_errors += stats->estats.tx_loss_of_carrier_errors; + gmac_stats->tx_no_carrier_errors += stats->estats.tx_no_carrier_errors; + gmac_stats->tx_late_collision_errors += stats->estats.tx_late_collision_errors; + gmac_stats->tx_excessive_collision_errors += stats->estats.tx_excessive_collision_errors; + gmac_stats->tx_excessive_deferral_errors += stats->estats.tx_excessive_deferral_errors; + gmac_stats->tx_underflow_errors += stats->estats.tx_underflow_errors; + gmac_stats->tx_ip_header_errors += stats->estats.tx_ip_header_errors; + gmac_stats->tx_ip_payload_errors += stats->estats.tx_ip_payload_errors; + gmac_stats->tx_dropped += stats->estats.tx_dropped; + + gmac_stats->hw_errs[0] += stats->estats.hw_errs[0]; + gmac_stats->hw_errs[1] += stats->estats.hw_errs[1]; + gmac_stats->hw_errs[2] += stats->estats.hw_errs[2]; + gmac_stats->hw_errs[3] += stats->estats.hw_errs[3]; + gmac_stats->hw_errs[4] += stats->estats.hw_errs[4]; + gmac_stats->hw_errs[5] += stats->estats.hw_errs[5]; + gmac_stats->hw_errs[6] += stats->estats.hw_errs[6]; + gmac_stats->hw_errs[7] += stats->estats.hw_errs[7]; + gmac_stats->hw_errs[8] += stats->estats.hw_errs[8]; + gmac_stats->hw_errs[9] += stats->estats.hw_errs[9]; + gmac_stats->rx_missed += stats->estats.rx_missed; + + gmac_stats->fifo_overflows += stats->estats.fifo_overflows; + gmac_stats->rx_scatter_errors += stats->estats.rx_scatter_errors; + gmac_stats->tx_ts_create_errors += stats->estats.tx_ts_create_errors; + gmac_stats->gmac_total_ticks += stats->estats.gmac_total_ticks; + gmac_stats->gmac_worst_case_ticks += stats->estats.gmac_worst_case_ticks; + gmac_stats->gmac_iterations += stats->estats.gmac_iterations; + gmac_stats->tx_pause_frames += stats->estats.tx_pause_frames; + gmac_stats->mmc_rx_overflow_errors += stats->estats.mmc_rx_overflow_errors; + gmac_stats->mmc_rx_watchdog_timeout_errors += stats->estats.mmc_rx_watchdog_timeout_errors; + gmac_stats->mmc_rx_crc_errors += stats->estats.mmc_rx_crc_errors; + gmac_stats->mmc_rx_ip_header_errors += stats->estats.mmc_rx_ip_header_errors; + gmac_stats->mmc_rx_octets_g += stats->estats.mmc_rx_octets_g; + gmac_stats->mmc_rx_ucast_frames += stats->estats.mmc_rx_ucast_frames; + gmac_stats->mmc_rx_bcast_frames += stats->estats.mmc_rx_bcast_frames; + gmac_stats->mmc_rx_mcast_frames += stats->estats.mmc_rx_mcast_frames; + gmac_stats->mmc_rx_undersize += stats->estats.mmc_rx_undersize; + gmac_stats->mmc_rx_oversize += stats->estats.mmc_rx_oversize; + gmac_stats->mmc_rx_jabber += stats->estats.mmc_rx_jabber; + gmac_stats->mmc_rx_octets_gb += stats->estats.mmc_rx_octets_gb; + gmac_stats->mmc_rx_frag_frames_g += stats->estats.mmc_rx_frag_frames_g; + gmac_stats->mmc_tx_octets_g += stats->estats.mmc_tx_octets_g; + gmac_stats->mmc_tx_ucast_frames += stats->estats.mmc_tx_ucast_frames; + gmac_stats->mmc_tx_bcast_frames += stats->estats.mmc_tx_bcast_frames; + gmac_stats->mmc_tx_mcast_frames += stats->estats.mmc_tx_mcast_frames; + gmac_stats->mmc_tx_deferred += stats->estats.mmc_tx_deferred; + gmac_stats->mmc_tx_single_col += stats->estats.mmc_tx_single_col; + gmac_stats->mmc_tx_multiple_col += stats->estats.mmc_tx_multiple_col; + gmac_stats->mmc_tx_octets_gb += stats->estats.mmc_tx_octets_gb; + + spin_unlock_bh(&nss_data_plane_gmac_stats_lock); +} + +/* + * __nss_data_plane_get_mtu_sz() + */ +static uint16_t __nss_data_plane_get_mtu_sz(uint16_t max_mtu) +{ + /* + * GMACs support 3 Modes + * Normal Mode Payloads upto 1522 Bytes ( 1500 + 14 + 4(Vlan) + 4(CRC)) + * Mini Jumbo Mode Payloads upto 2000 Bytes (1978 + 14 + 4(Vlan) + 4 (CRC)) + * Full Jumbo Mode payloads upto 9622 Bytes (9600 + 14 + 4(Vlan) + 4 (CRC)) + */ + + /* + * The configured MTU value on a gmac interface should be one of these + * cases. Finding the Needed MTU size that is required for GMAC to + * successfully receive the frame. + */ + if (max_mtu <= NSS_GMAC_NORMAL_FRAME_MTU) { + return NSS_GMAC_NORMAL_FRAME_MTU; + } + if (max_mtu <= NSS_GMAC_MINI_JUMBO_FRAME_MTU) { + return NSS_GMAC_MINI_JUMBO_FRAME_MTU; + } + if (max_mtu <= NSS_GMAC_FULL_JUMBO_FRAME_MTU) { + return NSS_GMAC_FULL_JUMBO_FRAME_MTU; + } + return 0; +} + +/* + * nss_data_plane_gmac_ops + */ +struct nss_data_plane_ops nss_data_plane_gmac_ops = { + .data_plane_register = &__nss_data_plane_register, + .data_plane_unregister = &__nss_data_plane_unregister, + .data_plane_stats_sync = &__nss_data_plane_stats_sync, + .data_plane_get_mtu_sz = &__nss_data_plane_get_mtu_sz, +}; diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dma.c b/feeds/ipq807x/qca-nss-drv/src/nss_dma.c new file mode 100755 index 000000000..e88e7c668 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dma.c @@ -0,0 +1,501 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_dma.c + * NSS DMA APIs + */ + +#include +#include "nss_dma_stats.h" +#include "nss_dma_log.h" +#include "nss_dma_strings.h" + +/* + * Test configuration value + */ +struct nss_dma_test_cfg_val { + int val; /* field value */ + int min; /* Minimum value */ + int max; /* Maximum value */ +}; + +/* + * Test configuration for user + */ +struct nss_dma_test_cfg_user { + struct nss_dma_test_cfg_val run; /* test run state */ + struct nss_dma_test_cfg_val code; /* test run code */ + struct nss_dma_test_cfg_val type; /* test type code */ + struct nss_dma_test_cfg_val packets; /* packet count per loop */ + int result_tx_packets; /* test results TX packets */ + int result_rx_packets; /* test result RX packets */ + int result_time; /* test time */ +}; + +static struct nss_dma_test_cfg_user test_cfg = { + .run = {.val = 0, .min = 0 /* stopped */, .max = 1 /* running */}, + .code = {.val = 1, .min = 1 /* linearize */, .max = 2 /* split */}, + .type = {.val = NSS_DMA_TEST_TYPE_DEFAULT, .min = NSS_DMA_TEST_TYPE_DEFAULT, .max = NSS_DMA_TEST_TYPE_MAX}, + .packets = {.val = 1, .min = 1, .max = 65536}, +}; + +/* + * Private data structure. + */ +struct nss_dma_pvt { + struct semaphore sem; /* Semaphore structure. */ + struct completion complete; /* Completion structure. */ + int response; /* Response from FW. */ + void *cb; /* Original cb for sync msgs. */ + void *app_data; /* Original app_data for sync msgs. */ +}; + +static struct nss_dma_pvt nss_dma_cfg_pvt; + +/* + * nss_dma_verify_if_num() + * Verify if_num passed to us. + */ +static inline bool nss_dma_verify_if_num(uint32_t if_num) +{ + return if_num == NSS_DMA_INTERFACE; +} + +/* + * nss_dma_interface_handler() + * Handle NSS -> HLOS messages for DMA Statistics + */ +static void nss_dma_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, + __attribute__((unused))void *app_data) +{ + struct nss_dma_msg *ndm = (struct nss_dma_msg *)ncm; + nss_dma_msg_callback_t cb; + + if (!nss_dma_verify_if_num(ncm->interface)) { + nss_warning("%px: invalid interface %d for dma\n", nss_ctx, ncm->interface); + return; + } + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_DMA_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for dma", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_dma_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Trace messages. + */ + nss_dma_log_rx_msg(ndm); + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Update driver statistics and send statistics notifications to the registered modules. + */ + if (ndm->cm.type == NSS_DMA_MSG_TYPE_SYNC_STATS) { + nss_dma_stats_sync(nss_ctx, &ndm->msg.stats); + nss_dma_stats_notify(nss_ctx); + } + + /* + * Update the callback and app_data for NOTIFY messages + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->nss_rx_interface_handlers[ncm->interface].app_data; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_dma_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, ncm); +} + +/* + * nss_dma_register_handler() + * Register handler for messaging + */ +void nss_dma_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_dma_get_context(); + + nss_info("%px: nss_dma_register_handler", nss_ctx); + nss_core_register_handler(nss_ctx, NSS_DMA_INTERFACE, nss_dma_msg_handler, NULL); + + nss_dma_stats_dentry_create(); + nss_dma_strings_dentry_create(); +} +EXPORT_SYMBOL(nss_dma_register_handler); + +/* + * nss_dma_tx_msg() + * Transmit an dma message to the FW with a specified size. + */ +nss_tx_status_t nss_dma_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_dma_msg *ndm) +{ + struct nss_cmn_msg *ncm = &ndm->cm; + + /* + * Sanity check the message + */ + if (!nss_dma_verify_if_num(ncm->interface)) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_DMA_MSG_TYPE_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Trace messages. + */ + nss_dma_log_tx_msg(ndm); + + return nss_core_send_cmd(nss_ctx, ndm, sizeof(*ndm), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_dma_tx_msg); + +/* + * nss_dma_msg_test_callback() + * Callback function for dma test start configuration + */ +static void nss_dma_msg_test_callback(void *app_data, struct nss_cmn_msg *ncm) +{ + struct nss_ctx_instance *nss_ctx __attribute__((unused)) = (struct nss_ctx_instance *)app_data; + struct nss_dma_msg *ndm = (struct nss_dma_msg *)ncm; + struct nss_dma_test_cfg *ndtc = &ndm->msg.test_cfg; + struct nss_cmn_node_stats *ncns = &ndtc->node_stats; + + test_cfg.run.val = 0; /* test completed */ + + /* + * Test start has been failed. Restore the value to initial state. + */ + if (ndm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: nss dma test failed: %d \n", nss_ctx, ndm->cm.error); + test_cfg.result_tx_packets = 0; + test_cfg.result_rx_packets = 0; + test_cfg.result_time = 0; + return; + } + + test_cfg.result_tx_packets = ncns->tx_packets; + test_cfg.result_rx_packets = ncns->rx_packets; + test_cfg.result_time = ndtc->time_delta; + + nss_info("%px: nss dma test complete\n", nss_ctx); + nss_info("%px: results tx=%u, rx=%u, time=%u\n", ndm, ncns->tx_packets, ncns->rx_packets, ndtc->time_delta); +} + +/* + * nss_dma_msg_test() + * Send NSS DMA test start message. + */ +static nss_tx_status_t nss_dma_msg_test(struct nss_ctx_instance *nss_ctx) +{ + struct nss_dma_msg ndm; + uint32_t flags = 0; + int32_t status; + size_t len; + + len = sizeof(struct nss_cmn_msg) + sizeof(struct nss_dma_test_cfg); + + nss_info("%px: DMA test message:%x\n", nss_ctx, test_cfg.run.val); + if (test_cfg.code.val == 1) { + flags = NSS_DMA_TEST_FLAGS_LINEARIZE; + } + + nss_dma_msg_init(&ndm, NSS_DMA_INTERFACE, NSS_DMA_MSG_TYPE_TEST_PERF, len, nss_dma_msg_test_callback, nss_ctx); + + ndm.msg.test_cfg.packet_count = test_cfg.packets.val; + ndm.msg.test_cfg.type = test_cfg.type.val; + ndm.msg.test_cfg.flags = flags; + + status = nss_dma_tx_msg(nss_ctx, &ndm); + if (unlikely(status != NSS_TX_SUCCESS)) { + return status; + } + + /* + * Test is now running + */ + test_cfg.run.val = 1; + return NSS_TX_SUCCESS; +} + +/* + * nss_dma_msg_init() + * Initialize DMA message. + */ +void nss_dma_msg_init(struct nss_dma_msg *ndm, uint16_t if_num, uint32_t type, uint32_t len, nss_dma_msg_callback_t cb, + void *app_data) +{ + nss_cmn_msg_init(&ndm->cm, if_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_dma_msg_init); + +/* + * nss_crypto_cmn_get_context() + * get NSS context instance for crypto handle + */ +struct nss_ctx_instance *nss_dma_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.dma_handler_id]; +} +EXPORT_SYMBOL(nss_dma_get_context); + +/* + * nss_dma_test_handler() + * Handles the performance test. + */ +static int nss_dma_test_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_ctx_instance *nss_ctx = nss_dma_get_context(); + int cur_state = test_cfg.run.val; + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret != NSS_SUCCESS) { + return ret; + } + + if (!write) { + return ret; + } + + /* + * Check any tests are already scheduled + */ + if (cur_state > 0) { + nss_info("%px: Test is already running, stopping it.\n", nss_ctx); + } else { + nss_info("%px: Test is not running, starting it.\n", nss_ctx); + } + + ret = nss_dma_msg_test(nss_ctx); + if (ret != NSS_SUCCESS) { + nss_warning("%px: Test configuration has failed.\n", nss_ctx); + test_cfg.run.val = 0; + } + + return ret; +} + +static struct ctl_table nss_dma_table[] = { + { + .procname = "test_run", + .data = &test_cfg.run.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = nss_dma_test_handler, + .extra1 = &test_cfg.run.min, + .extra2 = &test_cfg.run.max, + }, + { + .procname = "test_code", + .data = &test_cfg.code.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &test_cfg.code.min, + .extra2 = &test_cfg.code.max, + }, + { + .procname = "test_type", + .data = &test_cfg.type.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &test_cfg.type.min, + .extra2 = &test_cfg.type.max, + }, + { + .procname = "test_packets", + .data = &test_cfg.packets.val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &test_cfg.packets.min, + .extra2 = &test_cfg.packets.max, + }, + { + .procname = "result_tx", + .data = &test_cfg.result_tx_packets, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "result_rx", + .data = &test_cfg.result_rx_packets, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "result_time", + .data = &test_cfg.result_time, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { } +}; + +static struct ctl_table nss_dma_dir[] = { + { + .procname = "dma", + .mode = 0555, + .child = nss_dma_table, + }, + { } +}; + +static struct ctl_table nss_dma_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_dma_dir, + }, + { } +}; + +static struct ctl_table nss_dma_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = nss_dma_root_dir, + }, + { } +}; + +static struct ctl_table_header *nss_dma_header; + +/* + * nss_dma_register_sysctl() + */ +void nss_dma_register_sysctl(void) +{ + + /* + * dma sema init. + */ + sema_init(&nss_dma_cfg_pvt.sem, 1); + init_completion(&nss_dma_cfg_pvt.complete); + + /* + * Register sysctl table. + */ + nss_dma_header = register_sysctl_table(nss_dma_root); +} + +/* + * nss_dma_unregister_sysctl() + * Unregister sysctl specific to dma + */ +void nss_dma_unregister_sysctl(void) +{ + /* + * Unregister sysctl table. + */ + if (nss_dma_header) { + unregister_sysctl_table(nss_dma_header); + } +} + +/* + * nss_dma_notify_register() + * Register to receive dma notify messages. + */ +struct nss_ctx_instance *nss_dma_notify_register(int core, nss_dma_msg_callback_t cb, void *app_data) +{ + struct nss_ctx_instance *nss_ctx = nss_dma_get_context(); + uint32_t ret; + + ret = nss_core_register_handler(nss_ctx, NSS_DMA_INTERFACE, nss_dma_msg_handler, app_data); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to register event handler for DMA interface", nss_ctx); + return NULL; + } + + ret = nss_core_register_msg_handler(nss_ctx, NSS_DMA_INTERFACE, cb); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, NSS_DMA_INTERFACE); + nss_warning("%px: unable to register event handler for DMA interface", nss_ctx); + return NULL; + } + + return nss_ctx; +} +EXPORT_SYMBOL(nss_dma_notify_register); + +/* + * nss_dma_notify_unregister() + * Unregister to receive dma notify messages. + */ +void nss_dma_notify_unregister(int core) +{ + struct nss_ctx_instance *nss_ctx = nss_dma_get_context(); + uint32_t ret; + + BUG_ON(!nss_ctx); + + ret = nss_core_unregister_msg_handler(nss_ctx, NSS_DMA_INTERFACE); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to unregister event handler for DMA interface", nss_ctx); + return; + } + + ret = nss_core_unregister_handler(nss_ctx, NSS_DMA_INTERFACE); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to unregister event handler for DMA interface", nss_ctx); + return; + } + + return; +} +EXPORT_SYMBOL(nss_dma_notify_unregister); + +/* + * nss_dma_init() + */ +void nss_dma_init(void) +{ + nss_dma_register_sysctl(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dma_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_dma_log.c new file mode 100755 index 000000000..7f367c8ca --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dma_log.c @@ -0,0 +1,140 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_dma_log.c + * NSS DMA logger file. + */ + +#include "nss_core.h" + +/* + * nss_dma_log_message_types_str + * DMA message strings + */ +static int8_t *nss_dma_log_message_types_str[NSS_DMA_MSG_TYPE_MAX] __maybe_unused = { + "DMA invalid message", + "DMA Configure message", + "DMA Statistics sync message", + "DMA Test linearization performance", +}; + +/* + * nss_dma_log_error_response_types_str + * Strings for error types for DMA messages + */ +static int8_t *nss_dma_log_error_response_types_str[NSS_DMA_MSG_ERROR_MAX] __maybe_unused = { + "No error", + "HW initialization failed", + "Unhandled message type for node", + "Error performing the test", +}; + +/* + * nss_dma_map_msg() + * Log NSS DMA configure message. + */ +static void nss_dma_configure_msg(struct nss_dma_msg *ndm) +{ + nss_trace("%px: NSS DMA configure message: \n",ndm); +} + +/* + * nss_dma_test_perf_msg() + * Log NSS DMA performace test message. + */ +static void nss_dma_test_perf_msg(struct nss_dma_msg *ndm) +{ + struct nss_dma_test_cfg *ndtc = &ndm->msg.test_cfg; + struct nss_cmn_node_stats *ncns = &ndtc->node_stats; + + nss_trace("%px: NSS DMA test perf message: \n",ndm); + nss_trace("%px: processed (TX: %u, RX:%u, time:%u)\n", ndm, ncns->tx_packets, ncns->rx_packets, ndtc->time_delta); + nss_trace("%px: test parameters (type:%u, packet_cnt:%u)\n", ndm, ndtc->type, ndtc->packet_count); +} + +/* + * nss_dma_log_verbose() + * Log message contents. + */ +static void nss_dma_log_verbose(struct nss_dma_msg *ndm) +{ + switch (ndm->cm.type) { + case NSS_DMA_MSG_TYPE_CONFIGURE: + nss_dma_configure_msg(ndm); + break; + + case NSS_DMA_MSG_TYPE_TEST_PERF: + nss_dma_test_perf_msg(ndm); + break; + + default: + nss_trace("%px: Invalid message type\n", ndm); + break; + } +} + +/* + * nss_dma_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_dma_log_tx_msg(struct nss_dma_msg *ndm) +{ + if (ndm->cm.type >= NSS_DMA_MSG_TYPE_MAX) { + nss_info("%px: Invalid message type\n", ndm); + return; + } + + nss_info("%px: type[%d]:%s\n", ndm, ndm->cm.type, nss_dma_log_message_types_str[ndm->cm.type]); + nss_dma_log_verbose(ndm); +} + +/* + * nss_dma_log_rx_msg() + * Log messages received from FW. + */ +void nss_dma_log_rx_msg(struct nss_dma_msg *ndm) +{ + if (ndm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ndm); + return; + } + + if (ndm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ndm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ndm, ndm->cm.type, + nss_dma_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response]); + goto verbose; + } + + if (ndm->cm.error >= NSS_DMA_MSG_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ndm, ndm->cm.type, nss_dma_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response], + ndm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ndm, ndm->cm.type, nss_dma_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response], + ndm->cm.error, nss_dma_log_error_response_types_str[ndm->cm.error]); + +verbose: + nss_dma_log_verbose(ndm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dma_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_dma_log.h new file mode 100755 index 000000000..4a9b9c3ad --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dma_log.h @@ -0,0 +1,38 @@ +/* + ****************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_DMA_LOG_H +#define __NSS_DMA_LOG_H +/* + * nss_dma_log.h + * NSS DMA Log Header File + */ + +/* + * nss_dma_log_tx_msg + * Logs an DMA message that is sent to the NSS firmware. + */ +void nss_dma_log_tx_msg(struct nss_dma_msg *ndm); + +/* + * nss_dma_log_rx_msg + * Logs an DMA message that is received from the NSS firmware. + */ +void nss_dma_log_rx_msg(struct nss_dma_msg *ndm); + +#endif /* __NSS_DMA_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dma_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_dma_stats.c new file mode 100755 index 000000000..12812e4e5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dma_stats.c @@ -0,0 +1,163 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_dma.h" +#include "nss_dma_stats.h" +#include "nss_dma_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_dma_stats_notifier); + +/* + * Spinlock to protect DMA statistics update/read + */ +DEFINE_SPINLOCK(nss_dma_stats_lock); + +/* + * nss_dma_stats + * DMA statistics + */ +uint64_t nss_dma_stats[NSS_DMA_STATS_MAX]; + +/* + * nss_dma_stats_read() + * Read DMA statistics + */ +static ssize_t nss_dma_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats * NSS_MAX_CORES + + * few blank lines for banner printing + Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = NSS_DMA_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + uint64_t *stats_shadow; + ssize_t bytes_read = 0; + size_t size_wr = 0; + char *lbuf; + int32_t i; + + lbuf = vzalloc(size_al); + if (!lbuf) { + nss_warning("Could not allocate memory for local statistics buffer"); + return -ENOMEM; + } + + stats_shadow = vzalloc(NSS_DMA_STATS_MAX * 8); + if (!stats_shadow) { + nss_warning("Could not allocate memory for local shadow buffer"); + vfree(lbuf); + return -ENOMEM; + } + + /* + * DMA statistics + */ + spin_lock_bh(&nss_dma_stats_lock); + for (i = 0; i < NSS_DMA_STATS_MAX; i++) { + stats_shadow[i] = nss_dma_stats[i]; + } + spin_unlock_bh(&nss_dma_stats_lock); + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "dma", nss_top_main.dma_handler_id); + size_wr += nss_stats_print("dma", NULL, NSS_STATS_SINGLE_INSTANCE, nss_dma_strings_stats, + stats_shadow, NSS_DMA_STATS_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + + vfree(lbuf); + vfree(stats_shadow); + return bytes_read; +} + +/* + * nss_dma_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(dma); + +/* + * nss_dma_stats_dentry_create() + * Create DMA statistics debug entry. + */ +void nss_dma_stats_dentry_create(void) +{ + nss_stats_create_dentry("dma", &nss_dma_stats_ops); +} + +/* + * nss_dma_stats_sync() + * Handle the syncing of NSS DMA statistics. + */ +void nss_dma_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_dma_stats *nds) +{ + uint64_t *dma_stats; + uint32_t *msg_stats; + uint16_t i = 0; + + spin_lock_bh(&nss_dma_stats_lock); + + msg_stats = (uint32_t *)nds; + dma_stats = nss_dma_stats; + + for (i = 0; i < NSS_DMA_STATS_MAX; i++, dma_stats++, msg_stats++) { + *dma_stats += *msg_stats; + } + + spin_unlock_bh(&nss_dma_stats_lock); +} + +/* + * nss_dma_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_dma_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_dma_stats_notification dma_stats; + + spin_lock_bh(&nss_dma_stats_lock); + dma_stats.core_id = nss_ctx->id; + memcpy(dma_stats.stats_ctx, nss_dma_stats, sizeof(dma_stats.stats_ctx)); + spin_unlock_bh(&nss_dma_stats_lock); + + atomic_notifier_call_chain(&nss_dma_stats_notifier, NSS_STATS_EVENT_NOTIFY, &dma_stats); +} + +/* + * nss_dma_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_dma_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_dma_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_dma_stats_unregister_notifier); + +/* + * nss_dma_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_dma_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_dma_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_dma_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dma_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_dma_stats.h new file mode 100755 index 000000000..a7fc1d859 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dma_stats.h @@ -0,0 +1,31 @@ +/* + ****************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_DMA_STATS_H +#define __NSS_DMA_STATS_H + +#include + +/* + * DMA statistics APIs + */ +extern void nss_dma_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_dma_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_dma_stats *nds); +extern void nss_dma_stats_dentry_create(void); + +#endif /* __NSS_DMA_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dma_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_dma_strings.c new file mode 100755 index 000000000..402afc7be --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dma_strings.c @@ -0,0 +1,88 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_dma_stats.h" + +/* + * nss_dma_strings_stats + * DMA statistics strings. + */ +struct nss_stats_info nss_dma_strings_stats[NSS_DMA_STATS_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"rx_byts" , NSS_STATS_TYPE_COMMON}, + {"tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"tx_byts" , NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops" , NSS_STATS_TYPE_DROP}, + {"no_req" , NSS_STATS_TYPE_SPECIAL}, + {"no_desc" , NSS_STATS_TYPE_SPECIAL}, + {"fail_nexthop" , NSS_STATS_TYPE_SPECIAL}, + {"fail_nexthop_queue" , NSS_STATS_TYPE_SPECIAL}, + {"fail_linear_sz" , NSS_STATS_TYPE_SPECIAL}, + {"fail_linear_alloc" , NSS_STATS_TYPE_SPECIAL}, + {"fail_linear_no_sg" , NSS_STATS_TYPE_SPECIAL}, + {"fail_split_sz" , NSS_STATS_TYPE_SPECIAL}, + {"fail_split_alloc" , NSS_STATS_TYPE_SPECIAL}, + {"fail_sync_alloc" , NSS_STATS_TYPE_SPECIAL}, + {"fail_ctx_active" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[0]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[1]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[2]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[3]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[4]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[5]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[6]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[7]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[8]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[9]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[10]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[11]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[12]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[13]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[14]" , NSS_STATS_TYPE_SPECIAL}, + {"fail_hw[15]" , NSS_STATS_TYPE_SPECIAL}, +}; + + +/* + * nss_dma_strings_read() + * Read DMA node statistics names + */ +static ssize_t nss_dma_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_dma_strings_stats, NSS_DMA_STATS_MAX); +} + +/* + * nss_dma_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(dma); + +/* + * nss_dma_strings_dentry_create() + * Create DMA statistics strings debug entry. + */ +void nss_dma_strings_dentry_create(void) +{ + nss_strings_create_dentry("dma", &nss_dma_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dma_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_dma_strings.h new file mode 100755 index 000000000..145021627 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dma_strings.h @@ -0,0 +1,25 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_DMA_STRINGS_H +#define __NSS_DMA_STRINGS_H + +extern struct nss_stats_info nss_dma_strings_stats[NSS_DMA_STATS_MAX]; +extern void nss_dma_strings_dentry_create(void); + +#endif /* __NSS_DMA_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_drv_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_drv_stats.c new file mode 100644 index 000000000..30b8cb5f7 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_drv_stats.c @@ -0,0 +1,166 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_drv_strings.h" +#include "nss_drv_stats.h" + +/* + * nss_drv_stats_read() + * Read HLOS driver stats. + */ +static ssize_t nss_drv_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * Max output lines = #stats * NSS_MAX_CORES + + * few blank lines for banner printing + Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = NSS_DRV_STATS_MAX * NSS_MAX_CORES + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_DRV_STATS_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "drv", NSS_STATS_SINGLE_CORE); + for (i = 0; (i < NSS_DRV_STATS_MAX); i++) { + stats_shadow[i] = NSS_PKT_STATS_READ(&nss_top_main.stats_drv[i]); + } + + size_wr += nss_stats_print("drv", NULL, NSS_STATS_SINGLE_INSTANCE, nss_drv_strings_stats, stats_shadow, NSS_DRV_STATS_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * drv_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(drv); + +/* + * nss_drv_stats_dentry_create() + * Create DRV statistics debug entry. + */ +void nss_drv_stats_dentry_create(void) +{ + nss_stats_create_dentry("drv", &nss_drv_stats_ops); +} + +/* + * TODO: Move this (nss_wt_stats_read) function to new file (nss_wt_stats.c) + */ + +/* + * nss_wt_stats_read() + * Reads and formats worker thread statistics and outputs them to ubuf + */ +ssize_t nss_wt_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + struct nss_stats_data *data = fp->private_data; + struct nss_ctx_instance *nss_ctx = data->nss_ctx; + struct nss_project_irq_stats *shadow; + uint32_t thread_count = nss_ctx->worker_thread_count; + uint32_t irq_count = nss_ctx->irq_count; + + /* + * Three lines for each IRQ + */ + uint32_t max_output_lines = thread_count * 3 * irq_count; + size_t size_al = max_output_lines * NSS_STATS_MAX_STR_LENGTH; + size_t size_wr = 0; + ssize_t bytes_read = 0; + char *lbuf; + int i; + int j; + + lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer\n"); + return 0; + } + + shadow = kzalloc(thread_count * irq_count * sizeof(struct nss_project_irq_stats), GFP_KERNEL); + if (unlikely(!shadow)) { + nss_warning("Could not allocate memory for stats shadow\n"); + kfree(lbuf); + return 0; + } + + spin_lock_bh(&nss_top_main.stats_lock); + if (unlikely(!nss_ctx->wt_stats)) { + spin_unlock_bh(&nss_top_main.stats_lock); + nss_warning("Worker thread statistics not allocated\n"); + kfree(lbuf); + kfree(shadow); + return 0; + } + for (i = 0; i < thread_count; ++i) { + + /* + * The statistics shadow is an array with thread_count * irq_count + * items in it. Each item is located at the index: + * (thread number) * (irq_count) + (irq number) + * thus simulating a two-dimensional array. + */ + for (j = 0; j < irq_count; ++j) { + shadow[i * irq_count + j] = nss_ctx->wt_stats[i].irq_stats[j]; + } + } + spin_unlock_bh(&nss_top_main.stats_lock); + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "worker thread", NSS_STATS_SINGLE_CORE); + for (i = 0; i < thread_count; ++i) { + for (j = 0; j < irq_count; ++j) { + struct nss_project_irq_stats *is = &(shadow[i * irq_count + j]); + if (!(is->count)) { + continue; + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "t-%d:irq-%d callback: 0x%x, count: %llu\n", + i, j, is->callback, is->count); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "t-%d:irq-%d tick min: %10u avg: %10u max:%10u\n", + i, j, is->ticks_min, is->ticks_avg, is->ticks_max); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "t-%d:irq-%d insn min: %10u avg: %10u max:%10u\n\n", + i, j, is->insn_min, is->insn_avg, is->insn_max); + } + } + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(shadow); + + return bytes_read; +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_drv_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_drv_stats.h new file mode 100644 index 000000000..543dd5c89 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_drv_stats.h @@ -0,0 +1,80 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_drv_stats.h + * NSS driver stats header file. + */ + +#ifndef __NSS_DRV_STATS_H +#define __NSS_DRV_STATS_H + +#include + +/* + * HLOS driver statistics + * + * WARNING: There is a 1:1 mapping between values below and corresponding + * stats string array in nss_stats.c. + */ +enum NSS_DRV_STATS { + NSS_DRV_STATS_NBUF_ALLOC_FAILS = 0, /* NBUF allocation errors */ + NSS_DRV_STATS_PAGED_BUF_ALLOC_FAILS, /* Paged buf allocation errors */ + NSS_DRV_STATS_TX_QUEUE_FULL_0, /* Tx queue full for Core 0*/ + NSS_DRV_STATS_TX_QUEUE_FULL_1, /* Tx queue full for Core 1*/ + NSS_DRV_STATS_TX_EMPTY, /* H2N Empty buffers */ + NSS_DRV_STATS_PAGED_TX_EMPTY, /* H2N Paged Empty buffers */ + NSS_DRV_STATS_TX_PACKET, /* H2N Data packets */ + NSS_DRV_STATS_TX_CMD_REQ, /* H2N Control packets */ + NSS_DRV_STATS_TX_CRYPTO_REQ, /* H2N Crypto requests */ + NSS_DRV_STATS_TX_BUFFER_REUSE, /* H2N Reuse buffer count */ + NSS_DRV_STATS_RX_EMPTY, /* N2H Empty buffers */ + NSS_DRV_STATS_RX_PACKET, /* N2H Data packets */ + NSS_DRV_STATS_RX_EXT_PACKET, /* N2H EXT type packets */ + NSS_DRV_STATS_RX_CMD_RESP, /* N2H Command responses */ + NSS_DRV_STATS_RX_STATUS, /* N2H Status packets */ + NSS_DRV_STATS_RX_CRYPTO_RESP, /* N2H Crypto responses */ + NSS_DRV_STATS_RX_VIRTUAL, /* N2H Virtual packets */ + NSS_DRV_STATS_TX_SIMPLE, /* H2N Simple SKB Packets */ + NSS_DRV_STATS_TX_NR_FRAGS, /* H2N NR Frags SKB Packets */ + NSS_DRV_STATS_TX_FRAGLIST, /* H2N Fraglist SKB Packets */ + NSS_DRV_STATS_RX_SIMPLE, /* N2H Simple SKB Packets */ + NSS_DRV_STATS_RX_NR_FRAGS, /* N2H NR Frags SKB Packets */ + NSS_DRV_STATS_RX_SKB_FRAGLIST, /* N2H Fraglist SKB Packets */ + NSS_DRV_STATS_RX_BAD_DESCRIPTOR, /* N2H Bad descriptor reads */ + NSS_DRV_STATS_RX_INVALID_INTERFACE, /* N2H Received descriptor for invalid interface */ + NSS_DRV_STATS_RX_INVALID_CORE_ID, /* N2H Received packet for invalid core_id */ + NSS_DRV_STATS_RX_INVALID_BUFFER_TYPE, /* N2H Received packet for invalid buffer type */ + NSS_DRV_STATS_NSS_SKB_COUNT, /* NSS SKB Pool Count */ + NSS_DRV_STATS_CHAIN_SEG_PROCESSED, /* N2H SKB Chain Processed Count */ + NSS_DRV_STATS_FRAG_SEG_PROCESSED, /* N2H Frag Processed Count */ + NSS_DRV_STATS_TX_CMD_QUEUE_FULL, /* Tx H2N Control packets fail due to queue full */ +#ifdef NSS_MULTI_H2N_DATA_RING_SUPPORT + NSS_DRV_STATS_TX_PACKET_QUEUE_0, /* H2N Data packets on queue0 */ + NSS_DRV_STATS_TX_PACKET_QUEUE_1, /* H2N Data packets on queue1 */ + NSS_DRV_STATS_TX_PACKET_QUEUE_2, /* H2N Data packets on queue2 */ + NSS_DRV_STATS_TX_PACKET_QUEUE_3, /* H2N Data packets on queue3 */ + NSS_DRV_STATS_TX_PACKET_QUEUE_4, /* H2N Data packets on queue4 */ + NSS_DRV_STATS_TX_PACKET_QUEUE_5, /* H2N Data packets on queue5 */ + NSS_DRV_STATS_TX_PACKET_QUEUE_6, /* H2N Data packets on queue6 */ + NSS_DRV_STATS_TX_PACKET_QUEUE_7, /* H2N Data packets on queue7 */ +#endif + NSS_DRV_STATS_MAX, +}; + +extern void nss_drv_stats_dentry_create(void); +extern ssize_t nss_wt_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos); +#endif /* __NSS_DRV_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_drv_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_drv_strings.c new file mode 100644 index 000000000..259561525 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_drv_strings.c @@ -0,0 +1,92 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" + +/* + * nss_drv_strings_stats + * Host driver stats names. + */ +struct nss_stats_info nss_drv_strings_stats[NSS_DRV_STATS_MAX] = { + {"nbuf_alloc_errors" , NSS_STATS_TYPE_ERROR}, + {"paged_buf_alloc_errors" , NSS_STATS_TYPE_ERROR}, + {"tx_queue_full[0]" , NSS_STATS_TYPE_ERROR}, + {"tx_queue_full[1]" , NSS_STATS_TYPE_ERROR}, + {"tx_buffers_empty" , NSS_STATS_TYPE_SPECIAL}, + {"tx_paged_buffers_empty" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffer_pkt" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_cmd" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_crypto" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_reuse" , NSS_STATS_TYPE_SPECIAL}, + {"rx_buffers_empty" , NSS_STATS_TYPE_SPECIAL}, + {"rx_buffers_pkt" , NSS_STATS_TYPE_SPECIAL}, + {"rx_buffers_ext_pkt" , NSS_STATS_TYPE_SPECIAL}, + {"rx_buffers_cmd_resp" , NSS_STATS_TYPE_SPECIAL}, + {"rx_buffers_status_sync" , NSS_STATS_TYPE_SPECIAL}, + {"rx_buffers_crypto" , NSS_STATS_TYPE_SPECIAL}, + {"rx_buffers_virtual" , NSS_STATS_TYPE_SPECIAL}, + {"tx_skb_simple" , NSS_STATS_TYPE_SPECIAL}, + {"tx_skb_nr_frags" , NSS_STATS_TYPE_SPECIAL}, + {"tx_skb_fraglist" , NSS_STATS_TYPE_SPECIAL}, + {"rx_skb_simple" , NSS_STATS_TYPE_SPECIAL}, + {"rx_skb_nr_frags" , NSS_STATS_TYPE_SPECIAL}, + {"rx_skb_fraglist" , NSS_STATS_TYPE_SPECIAL}, + {"rx_bad_desciptor" , NSS_STATS_TYPE_ERROR}, + {"invalid_interface" , NSS_STATS_TYPE_ERROR}, + {"invalid_core_id" , NSS_STATS_TYPE_ERROR}, + {"invalid_buffer_type" , NSS_STATS_TYPE_ERROR}, + {"nss_skb_count" , NSS_STATS_TYPE_SPECIAL}, + {"rx_chain_seg_processed" , NSS_STATS_TYPE_SPECIAL}, + {"rx_frag_seg_processed" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_cmd_queue_full" , NSS_STATS_TYPE_ERROR}, +#ifdef NSS_MULTI_H2N_DATA_RING_SUPPORT + {"tx_buffers_data_queue[0]" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_data_queue[1]" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_data_queue[2]" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_data_queue[3]" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_data_queue[4]" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_data_queue[5]" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_data_queue[6]" , NSS_STATS_TYPE_SPECIAL}, + {"tx_buffers_data_queue[7]" , NSS_STATS_TYPE_SPECIAL}, +#endif +}; + +/* + * nss_drv_strings_read() + * Read drv node statistics names. + */ +static ssize_t nss_drv_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_drv_strings_stats, NSS_DRV_STATS_MAX); +} + +/* + * nss_drv_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(drv); + +/* + * nss_drv_strings_dentry_create() + * Create drv statistics strings debug entry. + */ +void nss_drv_strings_dentry_create(void) +{ + nss_strings_create_dentry("drv", &nss_drv_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_drv_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_drv_strings.h new file mode 100644 index 000000000..72a1fd7a0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_drv_strings.h @@ -0,0 +1,26 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_DRV_STRINGS_H +#define __NSS_DRV_STRINGS_H + +extern struct nss_stats_info nss_drv_strings_stats[NSS_DRV_STATS_MAX]; + +extern void nss_drv_strings_dentry_create(void); + +#endif /* __NSS_DRV_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dscp_map.h b/feeds/ipq807x/qca-nss-drv/src/nss_dscp_map.h new file mode 100644 index 000000000..441dc11ac --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dscp_map.h @@ -0,0 +1,212 @@ +/* + ************************************************************************** + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_dscp_map.h + * NSS dscp map parse APIs + */ + +#include "nss_tx_rx_common.h" + +#define NSS_DSCP_MAP_PARAM_FIELD_COUNT 3 +#define NSS_DSCP_MAP_ARRAY_SIZE 64 +#define NSS_DSCP_MAP_PRIORITY_MAX NSS_MAX_NUM_PRI + +/* + * nss dscp map entry structure. + */ +struct nss_dscp_map_entry { + uint8_t action; /* Action associated with the DSCP value.*/ + uint8_t priority; /* Priority associated with the DSCP value. */ +}; + +/* + * nss dscp map parse output. + */ +struct nss_dscp_map_parse { + uint8_t dscp; /* Parsed dscp value */ + uint8_t action; /* Parsed action value */ + uint8_t priority; /* Parsed priority value */ +}; + +/* + * nss_dscp_map_print() + * Sysctl handler for printing dscp/pri mapping. + */ +static int nss_dscp_map_print(struct ctl_table *ctl, void __user *buffer, size_t *lenp, + loff_t *ppos, struct nss_dscp_map_entry *mapping) +{ + char *r_buf; + int i, len; + size_t cp_bytes = 0; + + /* + * (64 * 8) + 22 bytes for the buffer size is sufficient to write + * the table including the spaces and new line characters. + */ + r_buf = kzalloc(((NSS_DSCP_MAP_ARRAY_SIZE * 8) + 22) * sizeof(char), GFP_KERNEL); + if (!r_buf) { + nss_warning("Failed to alloc buffer to print dscp map table\n"); + return -EFAULT; + } + + /* + * Write the priority values to the first line of the output. + */ + len = scnprintf(r_buf + cp_bytes, 11, "%s: ", "priority"); + cp_bytes += len; + for (i = 0; i < NSS_DSCP_MAP_ARRAY_SIZE; i++) { + len = scnprintf(r_buf + cp_bytes, 4, "%d ", mapping[i].priority); + if (!len) { + nss_warning("failed to read from buffer %d\n", mapping[i].priority); + kfree(r_buf); + return -EFAULT; + } + cp_bytes += len; + } + + /* + * Add new line character at the end. + */ + len = scnprintf(r_buf + cp_bytes, 4, "\n"); + cp_bytes += len; + + /* + * Write the action values to the second line of the output. + */ + len = scnprintf(r_buf + cp_bytes, 11, "%s: ", "action"); + cp_bytes += len; + for (i = 0; i < NSS_DSCP_MAP_ARRAY_SIZE; i++) { + len = scnprintf(r_buf + cp_bytes, 4, "%d ", mapping[i].action); + if (!len) { + nss_warning("failed to read from buffer %d\n", mapping[i].action); + kfree(r_buf); + return -EFAULT; + } + cp_bytes += len; + } + + /* + * Add new line character at the end. + */ + len = scnprintf(r_buf + cp_bytes, 4, "\n"); + cp_bytes += len; + + cp_bytes = simple_read_from_buffer(buffer, *lenp, ppos, r_buf, cp_bytes); + *lenp = cp_bytes; + kfree(r_buf); + return 0; +} + +/* + * nss_dscp_map_parse() + * Sysctl handler for dscp/pri mappings. + */ +static int nss_dscp_map_parse(struct ctl_table *ctl, void __user *buffer, size_t *lenp, + loff_t *ppos, struct nss_dscp_map_parse *out) +{ + int count; + size_t cp_bytes = 0; + char w_buf[7]; + loff_t w_offset = 0; + char *str; + char *tokens[NSS_DSCP_MAP_PARAM_FIELD_COUNT]; + unsigned int dscp, priority, action; + int ret; + + /* + * Buffer length cannot be more than 7 and less than 6. + */ + if (*lenp < 6 || *lenp > 7) { + nss_warning("Buffer is not correct. Invalid lenght: %d\n", (int)*lenp); + return -EINVAL; + } + + /* + * It's a write operation + */ + cp_bytes = simple_write_to_buffer(w_buf, *lenp, &w_offset, buffer, 7); + if (cp_bytes != *lenp) { + nss_warning("failed to write to buffer\n"); + return -EFAULT; + } + + count = 0; + str = w_buf; + tokens[count] = strsep(&str, " "); + while (tokens[count] != NULL) { + count++; + if (count == NSS_DSCP_MAP_PARAM_FIELD_COUNT) { + nss_warning("maximum allowed field count is %d\n", NSS_DSCP_MAP_PARAM_FIELD_COUNT); + break; + } + tokens[count] = strsep(&str, " "); + } + + /* + * Did we read enough number of parameters from the command line. + * There must be 2 parameters. + */ + if (count != NSS_DSCP_MAP_PARAM_FIELD_COUNT) { + nss_warning("param fields are less than expected: %d\n", count); + return -EINVAL; + } + + /* + * Write the tokens to integers. + */ + ret = sscanf(tokens[0], "%u", &dscp); + if (ret != 1) { + nss_warning("failed to write the dscp token to integer\n"); + return -EFAULT; + } + + ret = sscanf(tokens[1], "%u", &action); + if (ret != 1) { + nss_warning("failed to write the action token to integer\n"); + return -EFAULT; + } + + ret = sscanf(tokens[2], "%u", &priority); + if (ret != 1) { + nss_warning("failed to write the priority token to integer\n"); + return -EFAULT; + } + + /* + * dscp value cannot be higher than 63. + */ + if (dscp >= NSS_DSCP_MAP_ARRAY_SIZE) { + nss_warning("invalid dscp value: %d\n", dscp); + return -EINVAL; + } + + /* + * Priority must be less than NSS_DSCP_MAP_PRIORITY_MAX which is 4. + */ + if (priority >= NSS_DSCP_MAP_PRIORITY_MAX) { + nss_warning("invalid priority value: %d\n", priority); + return -EINVAL; + } + + nss_info("dscp: %d action: %d priority: %d\n", dscp, action, priority); + + out->dscp = dscp; + out->action = action; + out->priority = priority; + + return 0; +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls.c b/feeds/ipq807x/qca-nss-drv/src/nss_dtls.c new file mode 100644 index 000000000..6d7c1a1ce --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls.c @@ -0,0 +1,468 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_dtls_stats.h" +#include "nss_dtls_log.h" + +#define NSS_DTLS_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * Data structures to store DTLS nss debug stats + */ +static DEFINE_SPINLOCK(nss_dtls_session_stats_lock); +static struct nss_dtls_stats_session session_stats[NSS_MAX_DTLS_SESSIONS]; + +/* + * Private data structure + */ +static struct nss_dtls_pvt { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +} dtls_pvt; + +/* + * nss_dtls_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_dtls_verify_if_num(uint32_t if_num) +{ + if (nss_is_dynamic_interface(if_num) == false) + return false; + + if (nss_dynamic_interface_get_type(nss_dtls_get_context(), if_num) + != NSS_DYNAMIC_INTERFACE_TYPE_DTLS) + return false; + + return true; +} + +/* + * nss_dtls_session_stats_sync + * Per DTLS session debug stats + */ +static void nss_dtls_session_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_dtls_session_stats *stats_msg, + uint16_t if_num) +{ + int i; + struct nss_dtls_stats_session *s = NULL; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + spin_lock_bh(&nss_dtls_session_stats_lock); + for (i = 0; i < NSS_MAX_DTLS_SESSIONS; i++) { + if (session_stats[i].if_num != if_num) { + continue; + } + + s = &session_stats[i]; + break; + } + + if (!s) { + spin_unlock_bh(&nss_dtls_session_stats_lock); + return; + } + + s->stats[NSS_DTLS_STATS_SESSION_RX_PKTS] += stats_msg->node_stats.rx_packets; + s->stats[NSS_DTLS_STATS_SESSION_TX_PKTS] += stats_msg->node_stats.tx_packets; + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + s->stats[NSS_DTLS_STATS_SESSION_RX_QUEUE_0_DROPPED + i] += stats_msg->node_stats.rx_dropped[i]; + } + s->stats[NSS_DTLS_STATS_SESSION_RX_AUTH_DONE] += stats_msg->rx_auth_done; + s->stats[NSS_DTLS_STATS_SESSION_TX_AUTH_DONE] += stats_msg->tx_auth_done; + s->stats[NSS_DTLS_STATS_SESSION_RX_CIPHER_DONE] += stats_msg->rx_cipher_done; + s->stats[NSS_DTLS_STATS_SESSION_TX_CIPHER_DONE] += stats_msg->tx_cipher_done; + s->stats[NSS_DTLS_STATS_SESSION_RX_CBUF_ALLOC_FAIL] += stats_msg->rx_cbuf_alloc_fail; + s->stats[NSS_DTLS_STATS_SESSION_TX_CBUF_ALLOC_FAIL] += stats_msg->tx_cbuf_alloc_fail; + s->stats[NSS_DTLS_STATS_SESSION_TX_CENQUEUE_FAIL] += stats_msg->tx_cenqueue_fail; + s->stats[NSS_DTLS_STATS_SESSION_RX_CENQUEUE_FAIL] += stats_msg->rx_cenqueue_fail; + s->stats[NSS_DTLS_STATS_SESSION_TX_DROPPED_HROOM] += stats_msg->tx_dropped_hroom; + s->stats[NSS_DTLS_STATS_SESSION_TX_DROPPED_TROOM] += stats_msg->tx_dropped_troom; + s->stats[NSS_DTLS_STATS_SESSION_TX_FORWARD_ENQUEUE_FAIL] += stats_msg->tx_forward_enqueue_fail; + s->stats[NSS_DTLS_STATS_SESSION_RX_FORWARD_ENQUEUE_FAIL] += stats_msg->rx_forward_enqueue_fail; + s->stats[NSS_DTLS_STATS_SESSION_RX_INVALID_VERSION] += stats_msg->rx_invalid_version; + s->stats[NSS_DTLS_STATS_SESSION_RX_INVALID_EPOCH] += stats_msg->rx_invalid_epoch; + s->stats[NSS_DTLS_STATS_SESSION_RX_MALFORMED] += stats_msg->rx_malformed; + s->stats[NSS_DTLS_STATS_SESSION_RX_CIPHER_FAIL] += stats_msg->rx_cipher_fail; + s->stats[NSS_DTLS_STATS_SESSION_RX_AUTH_FAIL] += stats_msg->rx_auth_fail; + s->stats[NSS_DTLS_STATS_SESSION_RX_CAPWAP_CLASSIFY_FAIL] += stats_msg->rx_capwap_classify_fail; + s->stats[NSS_DTLS_STATS_SESSION_RX_SINGLE_REC_DGRAM] += stats_msg->rx_single_rec_dgram; + s->stats[NSS_DTLS_STATS_SESSION_RX_MULTI_REC_DGRAM] += stats_msg->rx_multi_rec_dgram; + s->stats[NSS_DTLS_STATS_SESSION_RX_REPLAY_FAIL] += stats_msg->rx_replay_fail; + s->stats[NSS_DTLS_STATS_SESSION_RX_REPLAY_DUPLICATE] += stats_msg->rx_replay_duplicate; + s->stats[NSS_DTLS_STATS_SESSION_RX_REPLAY_OUT_OF_WINDOW] += stats_msg->rx_replay_out_of_window; + s->stats[NSS_DTLS_STATS_SESSION_OUTFLOW_QUEUE_FULL] += stats_msg->outflow_queue_full; + s->stats[NSS_DTLS_STATS_SESSION_DECAP_QUEUE_FULL] += stats_msg->decap_queue_full; + s->stats[NSS_DTLS_STATS_SESSION_PBUF_ALLOC_FAIL] += stats_msg->pbuf_alloc_fail; + s->stats[NSS_DTLS_STATS_SESSION_PBUF_COPY_FAIL] += stats_msg->pbuf_copy_fail; + s->stats[NSS_DTLS_STATS_SESSION_EPOCH] = stats_msg->epoch; + s->stats[NSS_DTLS_STATS_SESSION_TX_SEQ_HIGH] = stats_msg->tx_seq_high; + s->stats[NSS_DTLS_STATS_SESSION_TX_SEQ_LOW] = stats_msg->tx_seq_low; + spin_unlock_bh(&nss_dtls_session_stats_lock); +} + +/* + * nss_dtls_session_stats_get() + * Get session DTLS statitics. + */ +void nss_dtls_session_stats_get(struct nss_dtls_stats_session *stats) +{ + int i; + + if (!stats) { + nss_warning("No memory to copy dtls session stats"); + return; + } + + spin_lock_bh(&nss_dtls_session_stats_lock); + for (i = 0; i < NSS_MAX_DTLS_SESSIONS; i++) { + if (session_stats[i].valid) { + memcpy(stats, &session_stats[i], + sizeof(struct nss_dtls_stats_session)); + stats++; + } + } + spin_unlock_bh(&nss_dtls_session_stats_lock); +} + +/* + * nss_dtls_handler() + * Handle NSS -> HLOS messages for dtls tunnel + */ +static void nss_dtls_handler(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, + __attribute__((unused))void *app_data) +{ + struct nss_dtls_msg *ntm = (struct nss_dtls_msg *)ncm; + void *ctx; + + nss_dtls_msg_callback_t cb; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + BUG_ON(!nss_dtls_verify_if_num(ncm->interface)); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_DTLS_MSG_MAX) { + nss_warning("%px: received invalid message %d " + "for DTLS interface %d", + nss_ctx, ncm->type, ncm->interface); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_dtls_msg)) { + nss_warning("%px: dtls message length is invalid: %d", + nss_ctx, ncm->len); + return; + } + + switch (ntm->cm.type) { + case NSS_DTLS_MSG_SESSION_STATS: + nss_dtls_session_stats_sync(nss_ctx, + &ntm->msg.stats, + ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->dtls_msg_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Trace messages. + */ + nss_dtls_log_rx_msg(ntm); + + /* + * callback + */ + cb = (nss_dtls_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * call dtls session callback + */ + if (!cb) { + nss_warning("%px: No callback for dtls session interface %d", + nss_ctx, ncm->interface); + return; + } + + cb(ctx, ntm); +} + +/* + * nss_dtls_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_dtls_callback(void *app_data, struct nss_dtls_msg *nim) +{ + nss_dtls_msg_callback_t callback = (nss_dtls_msg_callback_t)dtls_pvt.cb; + void *data = dtls_pvt.app_data; + + dtls_pvt.cb = NULL; + dtls_pvt.app_data = NULL; + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("dtls Error response %d\n", nim->cm.response); + + dtls_pvt.response = NSS_TX_FAILURE; + if (callback) { + callback(data, nim); + } + + complete(&dtls_pvt.complete); + return; + } + + dtls_pvt.response = NSS_TX_SUCCESS; + if (callback) { + callback(data, nim); + } + + complete(&dtls_pvt.complete); +} + +/* + * nss_dtls_tx_buf() + * Transmit buffer over DTLS interface + */ +nss_tx_status_t nss_dtls_tx_buf(struct sk_buff *skb, uint32_t if_num, + struct nss_ctx_instance *nss_ctx) +{ + BUG_ON(!nss_dtls_verify_if_num(if_num)); + + return nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_VIRTUAL_BUFFER | H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_dtls_tx_buf); + +/* + * nss_dtls_tx_msg() + * Transmit a DTLS message to NSS firmware + */ +nss_tx_status_t nss_dtls_tx_msg(struct nss_ctx_instance *nss_ctx, + struct nss_dtls_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Sanity check the message + */ + BUG_ON(!nss_dtls_verify_if_num(ncm->interface)); + + if (ncm->type > NSS_DTLS_MSG_MAX) { + nss_warning("%px: dtls message type out of range: %d", + nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Trace messages. + */ + nss_dtls_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_dtls_tx_msg); + +/* + * nss_dtls_tx_msg() + * Transmit a DTLS message to NSS firmware synchronously. + */ +nss_tx_status_t nss_dtls_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_dtls_msg *msg) +{ + + nss_tx_status_t status; + int ret = 0; + + down(&dtls_pvt.sem); + dtls_pvt.cb = (void *)msg->cm.cb; + dtls_pvt.app_data = (void *)msg->cm.app_data; + + msg->cm.cb = (nss_ptr_t)nss_dtls_callback; + msg->cm.app_data = (nss_ptr_t)NULL; + + status = nss_dtls_tx_msg(nss_ctx, msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: dtls_tx_msg failed\n", nss_ctx); + up(&dtls_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&dtls_pvt.complete, msecs_to_jiffies(NSS_DTLS_TX_TIMEOUT)); + + if (!ret) { + nss_warning("%px: DTLS msg tx failed due to timeout\n", nss_ctx); + dtls_pvt.response = NSS_TX_FAILURE; + } + + status = dtls_pvt.response; + up(&dtls_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_dtls_tx_msg_sync); + +/* + *********************************** + * Register/Unregister/Miscellaneous APIs + *********************************** + */ + +/* + * nss_dtls_register_if() + */ +struct nss_ctx_instance *nss_dtls_register_if(uint32_t if_num, + nss_dtls_data_callback_t cb, + nss_dtls_msg_callback_t ev_cb, + struct net_device *netdev, + uint32_t features, + void *app_ctx) +{ + int32_t i; + + struct nss_ctx_instance *nss_ctx = nss_dtls_get_context(); + + BUG_ON(!nss_dtls_verify_if_num(if_num)); + + spin_lock_bh(&nss_dtls_session_stats_lock); + for (i = 0; i < NSS_MAX_DTLS_SESSIONS; i++) { + if (!session_stats[i].valid) { + session_stats[i].valid = true; + session_stats[i].if_num = if_num; + session_stats[i].if_index = netdev->ifindex; + break; + } + } + spin_unlock_bh(&nss_dtls_session_stats_lock); + + if (i == NSS_MAX_DTLS_SESSIONS) { + nss_warning("%px: Cannot find free slot for " + "DTLS session stats, I/F:%u\n", nss_ctx, if_num); + return NULL; + } + + if (nss_ctx->subsys_dp_register[if_num].ndev) { + nss_warning("%px: Cannot find free slot for " + "DTLS NSS I/F:%u\n", nss_ctx, if_num); + + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, cb, NULL, app_ctx, netdev, features); + nss_ctx->subsys_dp_register[if_num].type = NSS_DYNAMIC_INTERFACE_TYPE_DTLS; + + nss_top_main.dtls_msg_callback = ev_cb; + nss_core_register_handler(nss_ctx, if_num, nss_dtls_handler, app_ctx); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_dtls_register_if); + +/* + * nss_dtls_unregister_if() + */ +void nss_dtls_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_dtls_get_context(); + int32_t i; + + BUG_ON(!nss_dtls_verify_if_num(if_num)); + + spin_lock_bh(&nss_dtls_session_stats_lock); + for (i = 0; i < NSS_MAX_DTLS_SESSIONS; i++) { + if (session_stats[i].if_num == if_num) { + memset(&session_stats[i], 0, + sizeof(struct nss_dtls_stats_session)); + break; + } + } + spin_unlock_bh(&nss_dtls_session_stats_lock); + + if (i == NSS_MAX_DTLS_SESSIONS) { + nss_warning("%px: Cannot find debug stats for DTLS session %d\n", nss_ctx, if_num); + return; + } + + if (!nss_ctx->subsys_dp_register[if_num].ndev) { + nss_warning("%px: Cannot find registered netdev for DTLS NSS I/F:%u\n", nss_ctx, if_num); + + return; + } + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_top_main.dtls_msg_callback = NULL; + nss_core_unregister_handler(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_dtls_unregister_if); + +/* + * nss_get_dtls_context() + */ +struct nss_ctx_instance *nss_dtls_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.dtls_handler_id]; +} +EXPORT_SYMBOL(nss_dtls_get_context); + +/* + * nss_dtls_msg_init() + * Initialize nss_dtls msg. + */ +void nss_dtls_msg_init(struct nss_dtls_msg *ncm, uint16_t if_num, + uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_dtls_msg_init); + +/* + * nss_dtls_get_ifnum_with_coreid() + */ +int32_t nss_dtls_get_ifnum_with_coreid(int32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_dtls_get_context(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_dtls_get_ifnum_with_coreid); + +/* + * nss_dtls_register_handler() + */ +void nss_dtls_register_handler(void) +{ + sema_init(&dtls_pvt.sem, 1); + init_completion(&dtls_pvt.complete); + + nss_dtls_stats_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn.c b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn.c new file mode 100644 index 000000000..024d217d5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn.c @@ -0,0 +1,451 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_dtls_cmn_log.h" +#include "nss_dtls_cmn_stats.h" +#include "nss_dtls_cmn_strings.h" + +#define NSS_DTLS_CMN_TX_TIMEOUT 3000 /* 3 Seconds */ +#define NSS_DTLS_CMN_INTERFACE_MAX_LONG BITS_TO_LONGS(NSS_MAX_NET_INTERFACES) + +/* + * Private data structure. + */ +static struct nss_dtls_cmn_pvt { + struct semaphore sem; + struct completion complete; + enum nss_dtls_cmn_error resp; + unsigned long if_map[NSS_DTLS_CMN_INTERFACE_MAX_LONG]; +} dtls_cmn_pvt; + +/* + * nss_dtls_cmn_verify_ifnum() + * Verify if the interface number is a DTLS interface. + */ +static bool nss_dtls_cmn_verify_ifnum(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + enum nss_dynamic_interface_type type = nss_dynamic_interface_get_type(nss_ctx, if_num); + + if (type == NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_INNER) + return true; + + if (type == NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_OUTER) + return true; + + if (if_num == NSS_DTLS_INTERFACE) + return true; + + return false; +} + +/* + * nss_dtls_cmn_handler() + * Handle NSS -> HLOS messages for dtls tunnel. + */ +static void nss_dtls_cmn_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *data) +{ + nss_dtls_cmn_msg_callback_t cb; + void *app_data; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_trace("%px: handle event for interface num :%u", nss_ctx, ncm->interface); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_DTLS_CMN_MSG_MAX) { + nss_warning("%px:Bad message type(%d) for DTLS interface %d", nss_ctx, ncm->type, ncm->interface); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_dtls_cmn_msg)) { + nss_warning("%px:Bad message length(%d)", nss_ctx, ncm->len); + return; + } + + if (ncm->type == NSS_DTLS_CMN_MSG_TYPE_SYNC_STATS) { + nss_dtls_cmn_stats_sync(nss_ctx, ncm); + nss_dtls_cmn_stats_notify(nss_ctx, ncm->interface); + } + + /* + * Update the callback and app_data for NOTIFY messages. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->nss_rx_interface_handlers[ncm->interface].app_data; + } + + /* + * Log failures. + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Trace messages. + */ + nss_dtls_cmn_log_rx_msg((struct nss_dtls_cmn_msg *)ncm); + + /* + * Callback. + */ + cb = (nss_dtls_cmn_msg_callback_t)ncm->cb; + app_data = (void *)ncm->app_data; + + /* + * Call DTLS session callback. + */ + if (!cb) { + nss_warning("%px: No callback for dtls session interface %d", nss_ctx, ncm->interface); + return; + } + + nss_trace("%px: calling dtlsmgr event handler(%u)", nss_ctx, ncm->interface); + cb(app_data, ncm); +} + +/* + * nss_dtls_cmn_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_dtls_cmn_callback(void *app_data, struct nss_cmn_msg *ncm) +{ + /* + * This callback is for synchronous operation. The caller sends its + * response pointer which needs to be loaded with the response + * data arriving from the NSS. + */ + enum nss_dtls_cmn_error *resp = (enum nss_dtls_cmn_error *)app_data; + + *resp = (ncm->response == NSS_CMN_RESPONSE_ACK) ? NSS_DTLS_CMN_ERROR_NONE : ncm->error; + complete(&dtls_cmn_pvt.complete); + + return; +} + +/* + * nss_dtls_cmn_ifmap_get() + * Return DTLS common active interfaces map. + */ +unsigned long *nss_dtls_cmn_ifmap_get(void) +{ + return dtls_cmn_pvt.if_map; +} + +/* + * nss_dtls_cmn_tx_buf() + * Transmit buffer over DTLS interface. + */ +nss_tx_status_t nss_dtls_cmn_tx_buf(struct sk_buff *skb, uint32_t if_num, struct nss_ctx_instance *nss_ctx) +{ + if (!nss_dtls_cmn_verify_ifnum(nss_ctx, if_num)) + return NSS_TX_FAILURE; + + return nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_VIRTUAL_BUFFER | H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_dtls_cmn_tx_buf); + +/* + * nss_dtls_cmn_tx_msg() + * Transmit a DTLS message to NSS firmware. + */ +nss_tx_status_t nss_dtls_cmn_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_dtls_cmn_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + if (ncm->type >= NSS_DTLS_CMN_MSG_MAX) { + nss_warning("%px: dtls message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + if (!nss_dtls_cmn_verify_ifnum(nss_ctx, ncm->interface)) { + nss_warning("%px: dtls message interface is bad: %u", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + /* + * Trace messages. + */ + nss_dtls_cmn_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_dtls_cmn_tx_msg); + +/* + * nss_dtls_cmn_tx_msg_sync() + * Transmit a DTLS message to NSS firmware synchronously. + */ +nss_tx_status_t nss_dtls_cmn_tx_msg_sync(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + enum nss_dtls_cmn_msg_type type, uint16_t len, + struct nss_dtls_cmn_msg *ndcm, enum nss_dtls_cmn_error *resp) +{ + struct nss_dtls_cmn_msg ndcm_local; + nss_tx_status_t status; + int ret; + + /* + * Length of the message should be the based on type. + */ + if (len > sizeof(ndcm_local.msg)) { + nss_warning("%px: (%u)Bad message length(%u) for type (%d)", nss_ctx, if_num, len, type); + return NSS_TX_FAILURE_TOO_LARGE; + } + + /* + * Response buffer is a required for copying the response for message. + */ + if (!resp) { + nss_warning("%px: (%u)Response buffer is empty, type(%d)", nss_ctx, if_num, type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * TODO: this can be removed in future as we need to ensure that the response + * memory is only updated when the current outstanding request is waiting. + * This can be solved by introducing sequence no. in messages and only completing + * the message if the sequence no. matches. For now this is solved by passing + * a known memory dtls_cmn_pvt.resp. + */ + down(&dtls_cmn_pvt.sem); + + /* + * We need to copy the message content into the actual message + * to be sent to NSS. + */ + nss_dtls_cmn_msg_init(&ndcm_local, if_num, type, len, nss_dtls_cmn_callback, &dtls_cmn_pvt.resp); + memcpy(&ndcm_local.msg, &ndcm->msg, len); + + status = nss_dtls_cmn_tx_msg(nss_ctx, &ndcm_local); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: dtls_tx_msg failed", nss_ctx); + goto done; + } + + ret = wait_for_completion_timeout(&dtls_cmn_pvt.complete, msecs_to_jiffies(NSS_DTLS_CMN_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: DTLS msg tx failed due to timeout", nss_ctx); + status = NSS_TX_FAILURE_NOT_READY; + goto done; + } + + /* + * Read memory barrier. + */ + smp_rmb(); + + /* + * Copy the response received. + */ + *resp = dtls_cmn_pvt.resp; + + /* + * Only in case of non-error response we will + * indicate success. + */ + if (dtls_cmn_pvt.resp != NSS_DTLS_CMN_ERROR_NONE) + status = NSS_TX_FAILURE; + +done: + up(&dtls_cmn_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_dtls_cmn_tx_msg_sync); + +/* + * nss_dtls_cmn_notify_register() + * Register a handler for notification from NSS firmware. + */ +struct nss_ctx_instance *nss_dtls_cmn_notify_register(uint32_t if_num, nss_dtls_cmn_msg_callback_t ev_cb, + void *app_data) +{ + struct nss_ctx_instance *nss_ctx = nss_dtls_cmn_get_context(); + uint32_t ret; + + BUG_ON(!nss_ctx); + + ret = nss_core_register_handler(nss_ctx, if_num, nss_dtls_cmn_handler, app_data); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to register event handler for interface(%u)", nss_ctx, if_num); + return NULL; + } + + ret = nss_core_register_msg_handler(nss_ctx, if_num, ev_cb); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: unable to register event handler for interface(%u)", nss_ctx, if_num); + return NULL; + } + + return nss_ctx; +} +EXPORT_SYMBOL(nss_dtls_cmn_notify_register); + +/* + * nss_dtls_cmn_notify_unregister() + * Unregister notification callback handler. + */ +void nss_dtls_cmn_notify_unregister(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_dtls_cmn_get_context(); + uint32_t ret; + + BUG_ON(!nss_ctx); + + ret = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to unregister event handler for interface(%u)", nss_ctx, if_num); + return; + } + + ret = nss_core_unregister_handler(nss_ctx, if_num); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to unregister event handler for interface(%u)", nss_ctx, if_num); + return; + } + + return; +} +EXPORT_SYMBOL(nss_dtls_cmn_notify_unregister); + +/* + * nss_dtls_cmn_register_if() + * Register data and event callback handlers for dynamic interface. + */ +struct nss_ctx_instance *nss_dtls_cmn_register_if(uint32_t if_num, + nss_dtls_cmn_data_callback_t data_cb, + nss_dtls_cmn_msg_callback_t ev_cb, + struct net_device *netdev, + uint32_t features, + uint32_t type, + void *app_data) +{ + struct nss_ctx_instance *nss_ctx = nss_dtls_cmn_get_context(); + uint32_t ret; + + if (!nss_dtls_cmn_verify_ifnum(nss_ctx, if_num)) { + nss_warning("%px: DTLS Interface is not dynamic:%u", nss_ctx, if_num); + return NULL; + } + + if (nss_ctx->subsys_dp_register[if_num].ndev) { + nss_warning("%px: Cannot find free slot for DTLS NSS I/F:%u", nss_ctx, if_num); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, data_cb, NULL, app_data, netdev, features); + nss_ctx->subsys_dp_register[if_num].type = type; + + ret = nss_core_register_handler(nss_ctx, if_num, nss_dtls_cmn_handler, app_data); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to register event handler for interface(%u)", nss_ctx, if_num); + return NULL; + } + + ret = nss_core_register_msg_handler(nss_ctx, if_num, ev_cb); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: unable to register event handler for interface(%u)", nss_ctx, if_num); + return NULL; + } + + /* + * Atomically set the bitmap for the interface number. + */ + set_bit(if_num, dtls_cmn_pvt.if_map); + return nss_ctx; +} +EXPORT_SYMBOL(nss_dtls_cmn_register_if); + +/* + * nss_dtls_cmn_unregister_if() + * Unregister data and event callback handlers for the interface. + */ +void nss_dtls_cmn_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_dtls_cmn_get_context(); + uint32_t ret; + + if (!nss_ctx->subsys_dp_register[if_num].ndev) { + nss_warning("%px: Cannot find registered netdev for DTLS NSS I/F:%u", nss_ctx, if_num); + return; + } + + /* + * Atomically clear the bitmap for the interface number. + */ + clear_bit(if_num, dtls_cmn_pvt.if_map); + + ret = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to unregister event handler for interface(%u)", nss_ctx, if_num); + return; + } + + nss_core_unregister_handler(nss_ctx, if_num); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + nss_ctx->subsys_dp_register[if_num].type = 0; +} +EXPORT_SYMBOL(nss_dtls_cmn_unregister_if); + +/* + * nss_dtls_get_context() + * Return DTLS NSS context. + */ +struct nss_ctx_instance *nss_dtls_cmn_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.dtls_handler_id]; +} +EXPORT_SYMBOL(nss_dtls_cmn_get_context); + +/* + * nss_dtls_cmn_msg_init() + * Initialize nss_dtls_cmn msg. + */ +void nss_dtls_cmn_msg_init(struct nss_dtls_cmn_msg *ncm, uint32_t if_num, + uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_dtls_cmn_msg_init); + +/* + * nss_dtls_cmn_get_ifnum() + * Return DTLS interface number with coreid. + */ +int32_t nss_dtls_cmn_get_ifnum(int32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_dtls_cmn_get_context(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_dtls_cmn_get_ifnum); + +/* + * nss_dtls_cmn_register_handler() + * DTLS initialization. + */ +void nss_dtls_cmn_register_handler(void) +{ + sema_init(&dtls_cmn_pvt.sem, 1); + init_completion(&dtls_cmn_pvt.complete); + nss_dtls_cmn_stats_dentry_create(); + nss_dtls_cmn_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_log.c new file mode 100644 index 000000000..41ad37c44 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_log.c @@ -0,0 +1,178 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_dtls_cmn_log.c + * NSS DTLS common logger file. + */ + +#include "nss_core.h" + +/* + * nss_dtls_cmn_log_message_types_str + * DTLS common message strings + */ +static int8_t *nss_dtls_cmn_log_message_types_str[NSS_DTLS_CMN_MSG_MAX] __maybe_unused = { + "DTLS_CMN Configure Node", + "DTLS_CMN Configure Base Context Parameter", + "DTLS_CMN Configure DTLS Parameters", + "DTLS_CMN Switch DTLS Transform", + "DTLS_CMN Deconfigure Context", + "DTLS_CMN Synchronize Stats", + "DTLS_CMN Node Statistics" +}; + +/* + * nss_dtls_cmn_log_error_response_types_str + * Strings for error types for DTLS common messages + */ +static int8_t *nss_dtls_cmn_log_error_response_types_str[NSS_DTLS_CMN_ERROR_MAX] __maybe_unused = { + "DTLS_CMN No Error", + "DTLS_CMN Unknown MEssage", + "DTLS_CMN Invalid Destination Interface", + "DTLS_CMN Invalid Source Interface", + "DTLS_CMN Invalid Crypto", + "DTLS_CMN Invalid Version", + "DTLS_CMN Invalid Context Type", + "DTLS_CMN Invalid Context Words", + "DTLS_CMN Hardware Context Alloc Fail", + "DTLS_CMN Copy Context Failure", + "DTLS_CMN Switch Hardware Context Fail", + "DTLS_CMN Already Configured", + "DTLS_CMN No Memory", + "DTLS_CMN Copy Nonce Failure" +}; + +/* + * nss_dtls_cmn_hdr_config_msg() + * Log DTLS common header configure message. + */ +static void nss_dtls_cmn_hdr_config_msg(struct nss_dtls_cmn_msg *ndm) +{ + struct nss_dtls_cmn_ctx_config_hdr *ndchm __maybe_unused = &ndm->msg.hdr_cfg; + nss_trace("%px: NSS DTLS_CMN Header Configure Message:\n" + "DTLS_CMN flags: %x\n" + "DTLS_CMN destination interface number: %d\n" + "DTLS_CMN source interface number: %d\n" + "DTLS_CMN source ip: %px\n" + "DTLS_CMN destination ip: %px\n" + "DTLS_CMN source port: %d\n" + "DTLS_CMN destination port: %d\n" + "DTLS_CMN time to live: %d\n" + "DTLS_CMN dscp value: %x\n" + "DTLS_CMN dscp copy value: %x\n" + "DTLS_CMN DF flag: %x\n", + ndchm, ndchm->flags, + ndchm->dest_ifnum, ndchm->src_ifnum, + &ndchm->sip, &ndchm->dip, + ndchm->sport, ndchm->dport, + ndchm->hop_limit_ttl, ndchm->dscp, + ndchm->dscp_copy, ndchm->df); +}; + +/* + * nss_dtls_cmn_dtls_config_msg() + * Log DTLS common dtls configure message. + */ +static void nss_dtls_cmn_dtls_config_msg(struct nss_dtls_cmn_msg *ndm) +{ + struct nss_dtls_cmn_ctx_config_dtls *ndcdm __maybe_unused = &ndm->msg.dtls_cfg; + nss_trace("%px: NSS DTLS_CMN DTLS Configure Message:\n" + "DTLS_CMN version: %d\n" + "DTLS_CMN crypto Index: %d\n" + "DTLS_CMN window size: %d\n" + "DTLS_CMN initial epoch: %d\n" + "DTLS_CMN IV length for encapsulation: %d\n" + "DTLS_CMN authentication hash length for encapsulation: %d\n" + "DTLS_CMN cipher block length: %d\n" + "DTLS_CMN reserved: %x\n", + ndcdm, ndcdm->ver, + ndcdm->crypto_idx, ndcdm->window_size, + ndcdm->epoch, ndcdm->iv_len, + ndcdm->hash_len, ndcdm->blk_len, + ndcdm->res1); +}; + +/* + * nss_dtls_cmn_log_verbose() + * Log message contents. + */ +static void nss_dtls_cmn_log_verbose(struct nss_dtls_cmn_msg *ndm) +{ + switch (ndm->cm.type) { + case NSS_DTLS_CMN_MSG_TYPE_CONFIGURE_HDR: + nss_dtls_cmn_hdr_config_msg(ndm); + break; + + case NSS_DTLS_CMN_MSG_TYPE_CONFIGURE_DTLS: + nss_dtls_cmn_dtls_config_msg(ndm); + break; + + default: + nss_warning("%px: Invalid message type\n", ndm); + break; + } +} + +/* + * nss_dtls_cmn_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_dtls_cmn_log_tx_msg(struct nss_dtls_cmn_msg *ndm) +{ + if (ndm->cm.type >= NSS_DTLS_CMN_MSG_MAX) { + nss_warning("%px: Invalid message type\n", ndm); + return; + } + + nss_info("%px: type[%d]:%s\n", ndm, ndm->cm.type, nss_dtls_cmn_log_message_types_str[ndm->cm.type]); + nss_dtls_cmn_log_verbose(ndm); +} + +/* + * nss_dtls_cmn_log_rx_msg() + * Log messages received from FW. + */ +void nss_dtls_cmn_log_rx_msg(struct nss_dtls_cmn_msg *ndm) +{ + if (ndm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ndm); + return; + } + + if (ndm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ndm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ndm, ndm->cm.type, + nss_dtls_cmn_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response]); + goto verbose; + } + + if (ndm->cm.error >= NSS_DTLS_CMN_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ndm, ndm->cm.type, nss_dtls_cmn_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response], + ndm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ndm, ndm->cm.type, nss_dtls_cmn_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response], + ndm->cm.error, nss_dtls_cmn_log_error_response_types_str[ndm->cm.error]); + +verbose: + nss_dtls_cmn_log_verbose(ndm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_log.h new file mode 100644 index 000000000..3a5f75566 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_DTLS_CMN_LOG_H +#define __NSS_DTLS_CMN_LOG_H + +/* + * nss_dtls_cmn_log.h + * NSS DTLS Commn Log Header File. + */ + +/* + * nss_dtls_cmn_log_tx_msg + * Logs a DTLS common message that is sent to the NSS firmware. + */ +void nss_dtls_cmn_log_tx_msg(struct nss_dtls_cmn_msg *ndm); + +/* + * nss_dtls_cmn_log_rx_msg + * Logs a DTLS common message that is received from the NSS firmware. + */ +void nss_dtls_cmn_log_rx_msg(struct nss_dtls_cmn_msg *ndm); + +#endif /* __NSS_DTLS_CMN_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_stats.c new file mode 100644 index 000000000..2908b28e3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_stats.c @@ -0,0 +1,215 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_core.h" +#include "nss_dtls_cmn.h" +#include "nss_dtls_cmn_stats.h" +#include "nss_dtls_cmn_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_dtls_cmn_stats_notifier); + +/* + * Spinlock to protect dtls common statistics update/read + */ +DEFINE_SPINLOCK(nss_dtls_cmn_stats_lock); + +unsigned long *nss_dtls_cmn_ifmap_get(void); + +/* + * nss_dtls_cmn_ctx_stats + * dtls common ctx statistics + */ +uint64_t nss_dtls_cmn_ctx_stats[NSS_MAX_NET_INTERFACES][NSS_DTLS_CMN_CTX_STATS_MAX]; + +/* + * nss_dtls_cmn_stats_iface_type() + * Return a string for each interface type. + */ +static const char *nss_dtls_cmn_stats_iface_type(enum nss_dynamic_interface_type type) +{ + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_INNER: + return "dtls_cmn_inner"; + + case NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_OUTER: + return "dtls_cmn_outer"; + + default: + return "invalid_interface"; + + } +} + +/* + * nss_dtls_cmn_stats_read() + * Read dtls common node statistics. + */ +static ssize_t nss_dtls_cmn_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats + + * few blank lines for banner printing + Number of Extra outputlines + * for future reference to add new stats + */ + uint32_t max_output_lines = NSS_DTLS_CMN_CTX_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + struct nss_ctx_instance *nss_ctx = nss_dtls_cmn_get_context(); + enum nss_dynamic_interface_type type; + unsigned long *ifmap; + uint64_t *stats_shadow; + ssize_t bytes_read = 0; + size_t size_wr = 0; + uint32_t if_num; + int32_t i; + int count; + char *lbuf; + + ifmap = nss_dtls_cmn_ifmap_get(); + count = bitmap_weight(ifmap, NSS_MAX_NET_INTERFACES); + if (count) { + size_al = size_al * count; + } + + lbuf = vzalloc(size_al); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return -ENOMEM; + } + + stats_shadow = vzalloc(NSS_DTLS_CMN_CTX_STATS_MAX * 8); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + vfree(lbuf); + return -ENOMEM; + } + + /* + * Common node stats for each DTLS dynamic interface. + */ + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "dtls_cmn stats", NSS_STATS_SINGLE_CORE); + for_each_set_bit(if_num, ifmap, NSS_MAX_NET_INTERFACES) { + + type = nss_dynamic_interface_get_type(nss_ctx, if_num); + if ((type != NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_INNER) && + (type != NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_OUTER)) { + continue; + } + + spin_lock_bh(&nss_dtls_cmn_stats_lock); + for (i = 0; i < NSS_DTLS_CMN_CTX_STATS_MAX; i++) { + stats_shadow[i] = nss_dtls_cmn_ctx_stats[if_num][i]; + } + spin_unlock_bh(&nss_dtls_cmn_stats_lock); + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n%s if_num:%03u\n", + nss_dtls_cmn_stats_iface_type(type), if_num); + size_wr += nss_stats_print("dtls_cmn", NULL, NSS_STATS_SINGLE_INSTANCE, nss_dtls_cmn_ctx_stats_str, + stats_shadow, NSS_DTLS_CMN_CTX_STATS_MAX, lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + vfree(lbuf); + vfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_dtls_cmn_stats_ops. + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(dtls_cmn); + +/* + * nss_dtls_cmn_stats_dentry_create() + * Create dtls common statistics debug entry. + */ +void nss_dtls_cmn_stats_dentry_create(void) +{ + nss_stats_create_dentry("dtls_cmn", &nss_dtls_cmn_stats_ops); +} + +/* + * nss_dtls_cmn_stats_sync() + * Update dtls common node statistics. + */ +void nss_dtls_cmn_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm) +{ + struct nss_dtls_cmn_msg *ndcm = (struct nss_dtls_cmn_msg *)ncm; + struct nss_dtls_cmn_ctx_stats *ndccs = &ndcm->msg.stats; + uint64_t *ctx_stats; + uint32_t *msg_stats; + uint16_t i = 0; + + spin_lock_bh(&nss_dtls_cmn_stats_lock); + + msg_stats = (uint32_t *)ndccs; + ctx_stats = nss_dtls_cmn_ctx_stats[ncm->interface]; + + for (i = 0; i < NSS_DTLS_CMN_CTX_STATS_MAX; i++, ctx_stats++, msg_stats++) { + *ctx_stats += *msg_stats; + } + + spin_unlock_bh(&nss_dtls_cmn_stats_lock); +} + +/* + * nss_dtls_cmn_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_dtls_cmn_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_dtls_cmn_stats_notification *dtls_cmn_stats; + + dtls_cmn_stats = kmalloc(sizeof(struct nss_dtls_cmn_stats_notification), GFP_ATOMIC); + if (!dtls_cmn_stats) { + nss_warning("Unable to allocate memory for stats notification\n"); + return; + } + + spin_lock_bh(&nss_dtls_cmn_stats_lock); + dtls_cmn_stats->core_id = nss_ctx->id; + dtls_cmn_stats->if_num = if_num; + memcpy(dtls_cmn_stats->stats_ctx, nss_dtls_cmn_ctx_stats[if_num], sizeof(dtls_cmn_stats->stats_ctx)); + spin_unlock_bh(&nss_dtls_cmn_stats_lock); + + atomic_notifier_call_chain(&nss_dtls_cmn_stats_notifier, NSS_STATS_EVENT_NOTIFY, dtls_cmn_stats); + kfree(dtls_cmn_stats); +} + +/* + * nss_dtls_cmn_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_dtls_cmn_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_dtls_cmn_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_dtls_cmn_stats_unregister_notifier); + +/* + * nss_dtls_cmn_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_dtls_cmn_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_dtls_cmn_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_dtls_cmn_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_stats.h new file mode 100644 index 000000000..80e6edfa4 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_stats.h @@ -0,0 +1,26 @@ +/* + **************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#ifndef __NSS_DTLS_CMN_STATS_H +#define __NSS_DTLS_CMN_STATS_H + +#include + +extern void nss_dtls_cmn_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_dtls_cmn_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm); +extern void nss_dtls_cmn_stats_dentry_create(void); + +#endif /* __NSS_DTLS_CMN_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_strings.c new file mode 100644 index 000000000..8fc91976a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_strings.c @@ -0,0 +1,128 @@ +/* + **************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_dtls_cmn_strings.h" + +/* + * nss_dtls_cmn_ctx_stats_str + * dtls common ctx statistics strings. + */ +struct nss_stats_info nss_dtls_cmn_ctx_stats_str[NSS_DTLS_CMN_CTX_STATS_MAX] = { + {"rx_pkts", NSS_STATS_TYPE_COMMON}, + {"rx_byts", NSS_STATS_TYPE_COMMON}, + {"tx_pkts", NSS_STATS_TYPE_COMMON}, + {"tx_byts", NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops", NSS_STATS_TYPE_DROP}, + {"rx_single_rec", NSS_STATS_TYPE_SPECIAL}, + {"rx_multi_rec", NSS_STATS_TYPE_SPECIAL}, + {"fail_crypto_resource", NSS_STATS_TYPE_DROP}, + {"fail_crypto_enqueue", NSS_STATS_TYPE_DROP}, + {"fail_headroom", NSS_STATS_TYPE_DROP}, + {"fail_tailroom", NSS_STATS_TYPE_DROP}, + {"fail_ver", NSS_STATS_TYPE_DROP}, + {"fail_epoch", NSS_STATS_TYPE_DROP}, + {"fail_dtls_record", NSS_STATS_TYPE_DROP}, + {"fail_capwap", NSS_STATS_TYPE_DROP}, + {"fail_replay", NSS_STATS_TYPE_DROP}, + {"fail_replay_dup", NSS_STATS_TYPE_DROP}, + {"fail_replay_win", NSS_STATS_TYPE_DROP}, + {"fail_queue", NSS_STATS_TYPE_DROP}, + {"fail_queue_nexthop", NSS_STATS_TYPE_DROP}, + {"fail_pbuf_alloc", NSS_STATS_TYPE_DROP}, + {"fail_pbuf_linear", NSS_STATS_TYPE_DROP}, + {"fail_pbuf_stats", NSS_STATS_TYPE_DROP}, + {"fail_pbuf_align", NSS_STATS_TYPE_DROP}, + {"fail_ctx_active", NSS_STATS_TYPE_DROP}, + {"fail_hwctx_active", NSS_STATS_TYPE_DROP}, + {"fail_cipher", NSS_STATS_TYPE_EXCEPTION}, + {"fail_auth", NSS_STATS_TYPE_EXCEPTION}, + {"fail_seq_ovf", NSS_STATS_TYPE_DROP}, + {"fail_blk_len", NSS_STATS_TYPE_DROP}, + {"fail_hash_len", NSS_STATS_TYPE_DROP}, + {"len_error", NSS_STATS_TYPE_DROP}, + {"token_error", NSS_STATS_TYPE_DROP}, + {"bypass_error", NSS_STATS_TYPE_DROP}, + {"config_error", NSS_STATS_TYPE_DROP}, + {"algo_error", NSS_STATS_TYPE_DROP}, + {"hash_ovf_error", NSS_STATS_TYPE_DROP}, + {"ttl_error", NSS_STATS_TYPE_DROP}, + {"csum_error", NSS_STATS_TYPE_DROP}, + {"timeout_error", NSS_STATS_TYPE_DROP}, + {"fail_cle_[0]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[1]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[2]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[3]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[4]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[5]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[6]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[7]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[8]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[9]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[10]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[11]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[12]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[13]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[14]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[15]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[16]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[17]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[18]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[19]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[20]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[21]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[22]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[23]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[24]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[25]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[26]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[27]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[28]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[29]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[30]", NSS_STATS_TYPE_DROP}, + {"fail_cle_[31]", NSS_STATS_TYPE_DROP}, + {"seq_low", NSS_STATS_TYPE_SPECIAL}, + {"seq_high", NSS_STATS_TYPE_SPECIAL}, + {"epoch", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_dtls_cmn_ctx_stats_str_strings_read() + * Read dtls common ctx statistics names + */ +static ssize_t nss_dtls_cmn_ctx_stats_str_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_dtls_cmn_ctx_stats_str, NSS_DTLS_CMN_CTX_STATS_MAX); +} + +/* + * nss_dtls_cmn_ctx_stats_str_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(dtls_cmn_ctx_stats_str); + +/* + * nss_dtls_cmn_strings_dentry_create() + * Create dtls common statistics strings debug entry. + */ +void nss_dtls_cmn_strings_dentry_create(void) +{ + nss_strings_create_dentry("dtls_cmn_ctx_stats_str", &nss_dtls_cmn_ctx_stats_str_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_strings.h new file mode 100644 index 000000000..0c0bc448d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_cmn_strings.h @@ -0,0 +1,25 @@ +/* + **************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#ifndef __NSS_DTLS_CMN_STRINGS_H +#define __NSS_DTLS_CMN_STRINGS_H + +#include "nss_dtls_cmn_stats.h" + +extern struct nss_stats_info nss_dtls_cmn_ctx_stats_str[NSS_DTLS_CMN_CTX_STATS_MAX]; +extern void nss_dtls_cmn_strings_dentry_create(void); + +#endif /* __NSS_DTLS_CMN_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_log.c new file mode 100644 index 000000000..5e1e33e7a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_log.c @@ -0,0 +1,185 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_dtls_log.c + * NSS DTLS logger file. + */ + +#include "nss_core.h" + +/* + * nss_dtls_log_message_types_str + * DTLS message strings + */ +static int8_t *nss_dtls_log_message_types_str[NSS_DTLS_MSG_MAX] __maybe_unused = { + "DTLS Session Configure", + "DTLS Session Destroy", + "DTLS Session Stats", + "DTLS Encap Cipher Update", + "DTLS Encap Cipher Switch", + "DTLS Decap Cipher Update", + "DTLS Decap Cipher Switch" +}; + +/* + * nss_dtls_log_error_response_types_str + * Strings for error types for DTLS messages + */ +static int8_t *nss_dtls_log_error_response_types_str[NSS_DTLS_ERR_MAX] __maybe_unused = { + "DTLS Unknown Message", + "DTLS Invalid APP Interface", + "DTLS Invalid Parameter", + "DTLS Invalid Version", + "DTLS No Memory" +}; + +/* + * nss_dtls_session_config_msg() + * Log DTLS session configure message. + */ +static void nss_dtls_session_config_msg(struct nss_dtls_msg *ndm) +{ + struct nss_dtls_session_configure *ndscm __maybe_unused = &ndm->msg.cfg; + nss_trace("%px: NSS DTLS Session Configure Message:\n" + "DTLS Version: %d\n" + "DTLS Flags: %x\n" + "DTLS crypto index encap: %d\n" + "DTLS crypto index decap: %d\n" + "DTLS IV length for encapsulation: %d\n" + "DTLS IV length for decapsulation: %d\n" + "DTLS authentication hash length for encapsulation: %d\n" + "DTLS authentication hash length for decapsulation: %d\n" + "DTLS cipher algorithm for encapsulation: %x\n" + "DTLS authentication algorithm for encapsulation: %x\n" + "DTLS cipher algorithm for decapsulation: %x\n" + "DTLS authentication algorithm for decapsulation: %x\n" + "DTLS NSS interface: %x\n" + "DTLS source port: %d\n" + "DTLS destination port: %d\n" + "DTLS source ip: %px\n" + "DTLS destination ip: %px\n" + "DTLS window size: %d\n" + "DTLS epoch: %d\n" + "DTLS outer IP TTL: %d\n" + "DTLS reserved1 padding: %x\n" + "DTLS reserved2 padding: %x\n", + ndscm, ndscm->ver, + ndscm->flags, ndscm->crypto_idx_encap, + ndscm->crypto_idx_decap, ndscm->iv_len_encap, + ndscm->iv_len_decap, ndscm->hash_len_encap, + ndscm->hash_len_decap, ndscm->cipher_algo_encap, + ndscm->auth_algo_encap, ndscm->cipher_algo_decap, + ndscm->auth_algo_decap, ndscm->nss_app_if, + ndscm->sport, ndscm->dport, + &ndscm->sip, &ndscm->dip, + ndscm->window_size, ndscm->epoch, + ndscm->oip_ttl, ndscm->reserved1, + ndscm->reserved2); +} + +/* + * nss_dtls_session_cipher_upddate_msg() + * Log DTLS Session Cipher Update message. + */ +static void nss_dtls_session_cipher_update_msg(struct nss_dtls_msg *ndm) +{ + struct nss_dtls_session_cipher_update *ndscum __maybe_unused = &ndm->msg.cipher_update; + nss_trace("%px: NSS DTLS Session Cipher Update message\n" + "DTLS crypto index: %d\n" + "DTLS hash length: %d\n" + "DTLS crypto IV length for encapsulation: %d\n" + "DTLS encapsulation cipher: %x\n" + "DTLS encapsulation authentication algorigthm: %x\n" + "DTLS epoch: %d\n" + "DTLS reserved: %x\n", + ndscum, ndscum->crypto_idx, + ndscum->hash_len, ndscum->iv_len, + ndscum->cipher_algo, ndscum->auth_algo, + ndscum->epoch, ndscum->reserved); +} + +/* + * nss_dtls_log_verbose() + * Log message contents. + */ +static void nss_dtls_log_verbose(struct nss_dtls_msg *ndm) +{ + switch (ndm->cm.type) { + case NSS_DTLS_MSG_REKEY_DECAP_CIPHER_UPDATE: + case NSS_DTLS_MSG_REKEY_ENCAP_CIPHER_UPDATE: + nss_dtls_session_cipher_update_msg(ndm); + break; + + case NSS_DTLS_MSG_SESSION_CONFIGURE: + nss_dtls_session_config_msg(ndm); + break; + + default: + nss_warning("%px: Invalid message type\n", ndm); + break; + } +} + +/* + * nss_dtls_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_dtls_log_tx_msg(struct nss_dtls_msg *ndm) +{ + if (ndm->cm.type >= NSS_DTLS_MSG_MAX) { + nss_warning("%px: Invalid message type\n", ndm); + return; + } + + nss_info("%px: type[%d]:%s\n", ndm, ndm->cm.type, nss_dtls_log_message_types_str[ndm->cm.type]); + nss_dtls_log_verbose(ndm); +} + +/* + * nss_dtls_log_rx_msg() + * Log messages received from FW. + */ +void nss_dtls_log_rx_msg(struct nss_dtls_msg *ndm) +{ + if (ndm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ndm); + return; + } + + if (ndm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ndm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ndm, ndm->cm.type, + nss_dtls_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response]); + goto verbose; + } + + if (ndm->cm.error >= NSS_DTLS_ERR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ndm, ndm->cm.type, nss_dtls_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response], + ndm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ndm, ndm->cm.type, nss_dtls_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response], + ndm->cm.error, nss_dtls_log_error_response_types_str[ndm->cm.error]); + +verbose: + nss_dtls_log_verbose(ndm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_log.h new file mode 100644 index 000000000..99fca71a8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_DTLS_LOG_H +#define __NSS_DTLS_LOG_H + +/* + * nss_dtls_log.h + * NSS DTLS Log Header File + */ + +/* + * nss_dtls_log_tx_msg + * Logs a DTLS message that is sent to the NSS firmware. + */ +void nss_dtls_log_tx_msg(struct nss_dtls_msg *ndm); + +/* + * nss_dtls_log_rx_msg + * Logs a DTLS message that is received from the NSS firmware. + */ +void nss_dtls_log_rx_msg(struct nss_dtls_msg *ndm); + +#endif /* __NSS_DTLS_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_stats.c new file mode 100644 index 000000000..0aeff87b0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_stats.c @@ -0,0 +1,143 @@ +/* + ************************************************************************** + * Copyright (c) 2017, 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_dtls_stats.h" + +/* + * nss_dtls_stats_session_str + * DTLS statistics strings for nss session stats. + */ +struct nss_stats_info nss_dtls_stats_session_str[NSS_DTLS_STATS_SESSION_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"rx_drops[0]" , NSS_STATS_TYPE_DROP}, + {"rx_drops[1]" , NSS_STATS_TYPE_DROP}, + {"rx_drops[2]" , NSS_STATS_TYPE_DROP}, + {"rx_drops[3]" , NSS_STATS_TYPE_DROP}, + {"rx_auth_done" , NSS_STATS_TYPE_SPECIAL}, + {"tx_auth_done" , NSS_STATS_TYPE_SPECIAL}, + {"rx_cipher_done" , NSS_STATS_TYPE_SPECIAL}, + {"tx_cipher_done" , NSS_STATS_TYPE_SPECIAL}, + {"rx_cbuf_alloc_fail" , NSS_STATS_TYPE_DROP}, + {"tx_cbuf_alloc_fail" , NSS_STATS_TYPE_DROP}, + {"tx_cenqueue_fail" , NSS_STATS_TYPE_DROP}, + {"rx_cenqueue_fail" , NSS_STATS_TYPE_DROP}, + {"tx_drops_hroom" , NSS_STATS_TYPE_DROP}, + {"tx_drops_troom" , NSS_STATS_TYPE_DROP}, + {"tx_forward_enqueue_fail" , NSS_STATS_TYPE_DROP}, + {"rx_forward_enqueue_fail" , NSS_STATS_TYPE_DROP}, + {"rx_invalid_version" , NSS_STATS_TYPE_DROP}, + {"rx_invalid_epoch" , NSS_STATS_TYPE_DROP}, + {"rx_malformed" , NSS_STATS_TYPE_DROP}, + {"rx_cipher_fail" , NSS_STATS_TYPE_EXCEPTION}, + {"rx_auth_fail" , NSS_STATS_TYPE_EXCEPTION}, + {"rx_capwap_classify_fail" , NSS_STATS_TYPE_DROP}, + {"rx_single_rec_dgram" , NSS_STATS_TYPE_SPECIAL}, + {"rx_multi_rec_dgram" , NSS_STATS_TYPE_SPECIAL}, + {"rx_replay_fail" , NSS_STATS_TYPE_DROP}, + {"rx_replay_duplicate" , NSS_STATS_TYPE_SPECIAL}, + {"rx_replay_out_of_window" , NSS_STATS_TYPE_SPECIAL}, + {"outflow_queue_full" , NSS_STATS_TYPE_DROP}, + {"decap_queue_full" , NSS_STATS_TYPE_DROP}, + {"pbuf_alloc_fail" , NSS_STATS_TYPE_DROP}, + {"pbuf_copy_fail" , NSS_STATS_TYPE_DROP}, + {"epoch" , NSS_STATS_TYPE_DROP}, + {"tx_seq_high" , NSS_STATS_TYPE_SPECIAL}, + {"tx_seq_low" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_dtls_stats_read() + * Read DTLS session statistics. + */ +static ssize_t nss_dtls_stats_read(struct file *fp, char __user *ubuf, + size_t sz, loff_t *ppos) +{ + uint32_t max_output_lines = 2 + (NSS_MAX_DTLS_SESSIONS + * (NSS_DTLS_STATS_SESSION_MAX + 2)) + 2; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct net_device *dev; + int id; + struct nss_dtls_stats_session *dtls_session_stats = NULL; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + dtls_session_stats = kzalloc((sizeof(struct nss_dtls_stats_session) + * NSS_MAX_DTLS_SESSIONS), GFP_KERNEL); + if (unlikely(dtls_session_stats == NULL)) { + nss_warning("Could not allocate memory for populating DTLS stats"); + kfree(lbuf); + return 0; + } + + /* + * Get all stats. + */ + nss_dtls_session_stats_get(dtls_session_stats); + + /* + * Session stats. + */ + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "dtls", NSS_STATS_SINGLE_CORE); + + for (id = 0; id < NSS_MAX_DTLS_SESSIONS; id++) { + if (!dtls_session_stats[id].valid) + break; + + dev = dev_get_by_index(&init_net, dtls_session_stats[id].if_index); + if (likely(dev)) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "%d. nss interface id=%d, netdevice=%s\n", + id, dtls_session_stats[id].if_num, + dev->name); + dev_put(dev); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "%d. nss interface id=%d\n", id, + dtls_session_stats[id].if_num); + } + + size_wr += nss_stats_print("dtls_cmn", NULL, id, nss_dtls_stats_session_str, dtls_session_stats[id].stats, NSS_DTLS_STATS_SESSION_MAX, lbuf, size_wr, size_al); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(dtls_session_stats); + kfree(lbuf); + return bytes_read; +} + +/* + * nss_dtls_stats_ops. + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(dtls) + +/* + * nss_dtls_stats_dentry_create() + * Create DTLS statistics debug entry. + */ +void nss_dtls_stats_dentry_create(void) +{ + nss_stats_create_dentry("dtls", &nss_dtls_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dtls_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_stats.h new file mode 100644 index 000000000..bf6a148b7 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dtls_stats.h @@ -0,0 +1,115 @@ +/* + ****************************************************************************** + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_DTLS_STATS_H +#define __NSS_DTLS_STATS_H + +/* + * DTLS session debug statistic counters + */ +enum nss_dtls_stats_session_types { + NSS_DTLS_STATS_SESSION_RX_PKTS, + /* Rx packets */ + NSS_DTLS_STATS_SESSION_TX_PKTS, + /* Tx packets */ + NSS_DTLS_STATS_SESSION_RX_QUEUE_0_DROPPED, + NSS_DTLS_STATS_SESSION_RX_QUEUE_1_DROPPED, + NSS_DTLS_STATS_SESSION_RX_QUEUE_2_DROPPED, + NSS_DTLS_STATS_SESSION_RX_QUEUE_3_DROPPED, + NSS_DTLS_STATS_SESSION_RX_AUTH_DONE, + /* Rx successful authentication */ + NSS_DTLS_STATS_SESSION_TX_AUTH_DONE, + /* Tx authentication done */ + NSS_DTLS_STATS_SESSION_RX_CIPHER_DONE, + /* Rx cipher done */ + NSS_DTLS_STATS_SESSION_TX_CIPHER_DONE, + /* Tx cipher done */ + NSS_DTLS_STATS_SESSION_RX_CBUF_ALLOC_FAIL, + /* Rx crypto buffer alloc fail */ + NSS_DTLS_STATS_SESSION_TX_CBUF_ALLOC_FAIL, + /* Tx crypto buffer alloc fail */ + NSS_DTLS_STATS_SESSION_TX_CENQUEUE_FAIL, + /* Tx enqueue to crypto fail */ + NSS_DTLS_STATS_SESSION_RX_CENQUEUE_FAIL, + /* Rx enqueue to crypto fail */ + NSS_DTLS_STATS_SESSION_TX_DROPPED_HROOM, + /* Tx drop due to insufficient headroom */ + NSS_DTLS_STATS_SESSION_TX_DROPPED_TROOM, + /* Tx drop due to insufficient tailroom */ + NSS_DTLS_STATS_SESSION_TX_FORWARD_ENQUEUE_FAIL, + /* Enqueue failed to Tx node after encap */ + NSS_DTLS_STATS_SESSION_RX_FORWARD_ENQUEUE_FAIL, + /* Enqueue failed to Rx node after decap */ + NSS_DTLS_STATS_SESSION_RX_INVALID_VERSION, + /* Rx invalid DTLS version */ + NSS_DTLS_STATS_SESSION_RX_INVALID_EPOCH, + /* Rx invalid DTLS epoch */ + NSS_DTLS_STATS_SESSION_RX_MALFORMED, + /* Rx malformed DTLS record */ + NSS_DTLS_STATS_SESSION_RX_CIPHER_FAIL, + /* Rx cipher fail */ + NSS_DTLS_STATS_SESSION_RX_AUTH_FAIL, + /* Rx authentication fail */ + NSS_DTLS_STATS_SESSION_RX_CAPWAP_CLASSIFY_FAIL, + /* Rx CAPWAP classification fail */ + NSS_DTLS_STATS_SESSION_RX_SINGLE_REC_DGRAM, + /* Rx single record datagrams processed */ + NSS_DTLS_STATS_SESSION_RX_MULTI_REC_DGRAM, + /* Rx multi record datagrams processed */ + NSS_DTLS_STATS_SESSION_RX_REPLAY_FAIL, + /* Rx anti-replay failures */ + NSS_DTLS_STATS_SESSION_RX_REPLAY_DUPLICATE, + /* Rx anti-replay fail due to duplicate record */ + NSS_DTLS_STATS_SESSION_RX_REPLAY_OUT_OF_WINDOW, + /* Rx anti-replay fail due to out of window record */ + NSS_DTLS_STATS_SESSION_OUTFLOW_QUEUE_FULL, + /* Tx drop due to encap queue full */ + NSS_DTLS_STATS_SESSION_DECAP_QUEUE_FULL, + /* Rx drop due to decap queue full */ + NSS_DTLS_STATS_SESSION_PBUF_ALLOC_FAIL, + /* Drops due to buffer allocation failure */ + NSS_DTLS_STATS_SESSION_PBUF_COPY_FAIL, + /* Drops due to buffer copy failure */ + NSS_DTLS_STATS_SESSION_EPOCH, + /* Current Epoch */ + NSS_DTLS_STATS_SESSION_TX_SEQ_HIGH, + /* Upper 16-bits of current sequence number */ + NSS_DTLS_STATS_SESSION_TX_SEQ_LOW, + /* Lower 32-bits of current sequence number */ + NSS_DTLS_STATS_SESSION_MAX, +}; + +/* + * DTLS session statistics + */ +struct nss_dtls_stats_session { + uint64_t stats[NSS_DTLS_STATS_SESSION_MAX]; + int32_t if_index; + uint32_t if_num; /* nss interface number */ + bool valid; +}; + +/* + * Stats APIs provided by nss_dtls.c + */ +extern void nss_dtls_session_stats_get(struct nss_dtls_stats_session *s); + +/* + * DTLS statistics APIs + */ +extern void nss_dtls_stats_dentry_create(void); + +#endif /* __NSS_DTLS_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface.c b/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface.c new file mode 100644 index 000000000..a7286bfd0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface.c @@ -0,0 +1,420 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_dynamic_interface_log.h" +#include "nss_dynamic_interface_stats.h" + +#define NSS_DYNAMIC_INTERFACE_COMP_TIMEOUT 60000 /* 60 Sec */ + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_dynamic_interface_stats_notifier); + +void nss_dynamic_interface_stats_notify(uint32_t if_num, uint32_t core_id); + +/* + * Message data structure to store the message result + */ +struct nss_dynamic_interface_msg_data { + struct completion complete; /* completion structure */ + int if_num; /* Interface number */ + enum nss_cmn_response response; /* Message response */ +}; + +static nss_dynamic_interface_assigned nss_dynamic_interface_assigned_types[NSS_CORE_MAX][NSS_MAX_DYNAMIC_INTERFACES]; /* Array of assigned interface types */ + +/* + * nss_dynamic_interface_handler() + * Handle NSS -> HLOS messages for dynamic interfaces + */ +static void nss_dynamic_interface_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + nss_dynamic_interface_msg_callback_t cb; + struct nss_dynamic_interface_msg *ndim = (struct nss_dynamic_interface_msg *)ncm; + int32_t if_num; + + BUG_ON(ncm->interface != NSS_DYNAMIC_INTERFACE); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_DYNAMIC_INTERFACE_MAX) { + nss_warning("%px: received invalid message %d for dynamic interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_dynamic_interface_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Trace messages. + */ + nss_dynamic_interface_log_rx_msg(ndim); + + /* + * Handling dynamic interface messages coming from NSS fw. + */ + switch (ndim->cm.type) { + case NSS_DYNAMIC_INTERFACE_ALLOC_NODE: + if (ncm->response == NSS_CMN_RESPONSE_ACK) { + nss_info("%px alloc_node response ack if_num %d\n", nss_ctx, ndim->msg.alloc_node.if_num); + if_num = ndim->msg.alloc_node.if_num; + if (if_num > 0) { + nss_dynamic_interface_assigned_types[nss_ctx->id][if_num - NSS_DYNAMIC_IF_START] = ndim->msg.alloc_node.type; + } else { + nss_warning("%px: if_num < 0\n", nss_ctx); + } + } + + break; + + case NSS_DYNAMIC_INTERFACE_DEALLOC_NODE: + if (ncm->response == NSS_CMN_RESPONSE_ACK) { + nss_info("%px dealloc_node response ack if_num %d\n", nss_ctx, ndim->msg.dealloc_node.if_num); + if_num = ndim->msg.dealloc_node.if_num; + nss_dynamic_interface_assigned_types[nss_ctx->id][if_num - NSS_DYNAMIC_IF_START] = NSS_DYNAMIC_INTERFACE_TYPE_NONE; + /* + * Send dynamic interface dealloc notifications to the registered modules. + */ + nss_dynamic_interface_stats_notify(ndim->msg.dealloc_node.if_num, nss_ctx->id); + } + + break; + + default: + nss_warning("%px: Received response %d for type %d, interface %d", + nss_ctx, ncm->response, ncm->type, ncm->interface); + return; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + nss_warning("%px: nss_dynamic_interface_handler cb is NULL\n", nss_ctx); + return; + } + + /* + * Callback + */ + cb = (nss_dynamic_interface_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, ncm); +} + +/* + * nss_dynamic_interface_callback + * Callback to handle the message response from NSS FW. + */ +static void nss_dynamic_interface_callback(void *app_data, struct nss_cmn_msg *ncm) +{ + struct nss_dynamic_interface_msg_data *di_data = (struct nss_dynamic_interface_msg_data *)app_data; + struct nss_dynamic_interface_msg *ndim = (struct nss_dynamic_interface_msg *)ncm; + + di_data->response = ncm->response; + di_data->if_num = ndim->msg.alloc_node.if_num; + + /* + * Unblock the sleeping function. + */ + complete(&di_data->complete); +} + +/* + * nss_dynamic_interface_tx() + * Transmit a dynamic interface message to NSSFW, asynchronously. + */ +nss_tx_status_t nss_dynamic_interface_tx(struct nss_ctx_instance *nss_ctx, struct nss_dynamic_interface_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Sanity check the message + */ + if (ncm->interface != NSS_DYNAMIC_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_DYNAMIC_INTERFACE_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Trace messages. + */ + nss_dynamic_interface_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_dynamic_interface_tx_sync() + * Send the message to NSS and wait till we get an ACK or NACK for this msg. + */ +static nss_tx_status_t nss_dynamic_interface_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_dynamic_interface_msg_data *di_data, + struct nss_dynamic_interface_msg *ndim) +{ + nss_tx_status_t status; + int ret; + + status = nss_dynamic_interface_tx(nss_ctx, ndim); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: not able to transmit msg successfully\n", nss_ctx); + return status; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&di_data->complete, msecs_to_jiffies(NSS_DYNAMIC_INTERFACE_COMP_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: Waiting for ack timed out\n", nss_ctx); + return NSS_TX_FAILURE; + } + + return status; +} + +/* + * nss_dynamic_interface_alloc_node() + * Allocates node of perticular type on NSS and returns interface_num for this node or -1 in case of failure. + * + * Note: This function should not be called from soft_irq or interrupt context because it blocks till ACK/NACK is + * received for the message sent to NSS. + */ +int nss_dynamic_interface_alloc_node(enum nss_dynamic_interface_type type) +{ + struct nss_ctx_instance *nss_ctx = NULL; + struct nss_dynamic_interface_msg ndim; + struct nss_dynamic_interface_alloc_node_msg *ndia; + struct nss_dynamic_interface_msg_data di_data; + uint32_t core_id; + nss_tx_status_t status; + + if (type >= NSS_DYNAMIC_INTERFACE_TYPE_MAX) { + nss_warning("Dynamic if msg drooped as type is wrong %d\n", type); + return -1; + } + + core_id = nss_top_main.dynamic_interface_table[type]; + nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[core_id]; + di_data.if_num = -1; + di_data.response = false; + init_completion(&di_data.complete); + + nss_dynamic_interface_msg_init(&ndim, NSS_DYNAMIC_INTERFACE, NSS_DYNAMIC_INTERFACE_ALLOC_NODE, + sizeof(struct nss_dynamic_interface_alloc_node_msg), nss_dynamic_interface_callback, (void *)&di_data); + + ndia = &ndim.msg.alloc_node; + ndia->type = type; + + /* + * Initialize if_num to -1. The allocated if_num is returned by the firmware + * in the response message. + */ + ndia->if_num = -1; + + /* + * Calling synchronous transmit function. + */ + status = nss_dynamic_interface_tx_sync(nss_ctx, &di_data, &ndim); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px not able to transmit alloc node msg\n", nss_ctx); + return -1; + } + + /* + * Check response and return -1 if its a NACK else proceed. + */ + if (di_data.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px Received NACK from NSS - Response:%d\n", nss_ctx, di_data.response); + return -1; + } + + return di_data.if_num; +} + +/* + * nss_dynamic_interface_dealloc_node() + * Deallocate node of particular type and if_num in NSS. + * + * Note: This will just mark the state of node as not active, actual memory will be freed when reference count of that node becomes 0. + * This function should not be called from soft_irq or interrupt context because it blocks till ACK/NACK is received for the message + * sent to NSS. + */ +nss_tx_status_t nss_dynamic_interface_dealloc_node(int if_num, enum nss_dynamic_interface_type type) +{ + struct nss_ctx_instance *nss_ctx = NULL; + struct nss_dynamic_interface_msg ndim; + struct nss_dynamic_interface_dealloc_node_msg *ndid; + struct nss_dynamic_interface_msg_data di_data; + uint32_t core_id; + nss_tx_status_t status; + + if (type >= NSS_DYNAMIC_INTERFACE_TYPE_MAX) { + nss_warning("Dynamic if msg dropped as type is wrong type %d if_num %d\n", type, if_num); + return NSS_TX_FAILURE_BAD_PARAM; + } + + core_id = nss_top_main.dynamic_interface_table[type]; + nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[core_id]; + di_data.response = false; + init_completion(&di_data.complete); + + if (nss_is_dynamic_interface(if_num) == false) { + nss_warning("%px: nss_dynamic_interface if_num is not in range %d\n", nss_ctx, if_num); + return NSS_TX_FAILURE_BAD_PARAM; + } + + nss_dynamic_interface_msg_init(&ndim, NSS_DYNAMIC_INTERFACE, NSS_DYNAMIC_INTERFACE_DEALLOC_NODE, + sizeof(struct nss_dynamic_interface_dealloc_node_msg), nss_dynamic_interface_callback, (void *)&di_data); + + ndid = &ndim.msg.dealloc_node; + ndid->type = type; + ndid->if_num = if_num; + + /* + * Calling synchronous transmit function. + */ + status = nss_dynamic_interface_tx_sync(nss_ctx, &di_data, &ndim); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px not able to transmit alloc node msg\n", nss_ctx); + return status; + } + + if (di_data.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px Received NACK from NSS\n", nss_ctx); + return -1; + } + + return status; +} + +/* + * nss_dynamic_interface_register_handler() + */ +void nss_dynamic_interface_register_handler(struct nss_ctx_instance *nss_ctx) +{ + nss_core_register_handler(nss_ctx, NSS_DYNAMIC_INTERFACE, nss_dynamic_interface_handler, NULL); + nss_dynamic_interface_stats_dentry_create(); +} + +/* + * nss_is_dynamic_interface() + * Judge it is a valid dynamic interface + */ +bool nss_is_dynamic_interface(int if_num) +{ + return (if_num >= NSS_DYNAMIC_IF_START && if_num < NSS_SPECIAL_IF_START); +} + +/* + * nss_dynamic_interface_get_nss_ctx_by_type() + * Gets the NSS context using NSS dynamic interface type. + */ +struct nss_ctx_instance *nss_dynamic_interface_get_nss_ctx_by_type(enum nss_dynamic_interface_type type) +{ + struct nss_ctx_instance *nss_ctx = NULL; + uint32_t core_id; + + if (type >= NSS_DYNAMIC_INTERFACE_TYPE_MAX) { + nss_warning("Invalid param: Type is wrong %d\n", type); + return NULL; + } + + core_id = nss_top_main.dynamic_interface_table[type]; + nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[core_id]; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + return nss_ctx; +} + +/* + * nss_dynamic_interface_get_type() + * Gets the type of dynamic interface + */ +enum nss_dynamic_interface_type nss_dynamic_interface_get_type(struct nss_ctx_instance *nss_ctx, int if_num) +{ + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (nss_is_dynamic_interface(if_num) == false) { + return NSS_DYNAMIC_INTERFACE_TYPE_NONE; + } + + return nss_dynamic_interface_assigned_types[nss_ctx->id][if_num - NSS_DYNAMIC_IF_START]; +} + +/* + * nss_dynamic_interface_msg_init() + * Initialize dynamic interface message. + */ +void nss_dynamic_interface_msg_init(struct nss_dynamic_interface_msg *ndm, uint16_t if_num, uint32_t type, uint32_t len, + void *cb, void *app_data) +{ + nss_cmn_msg_init(&ndm->cm, if_num, type, len, cb, app_data); +} + +/* + * nss_dynamic_interface_stats_notify() + * Sends notifications to all the registered modules. + */ +void nss_dynamic_interface_stats_notify(uint32_t if_num, uint32_t core_id) +{ + struct nss_dynamic_interface_notification stats; + + stats.core_id = core_id; + stats.if_num = if_num; + atomic_notifier_call_chain(&nss_dynamic_interface_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&stats); +} +EXPORT_SYMBOL(nss_dynamic_interface_stats_notify); + +/* + * nss_dynamic_interface_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_dynamic_interface_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_dynamic_interface_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_dynamic_interface_stats_register_notifier); + +/* + * nss_dynamic_interface_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_dynamic_interface_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_dynamic_interface_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_dynamic_interface_stats_unregister_notifier); + +EXPORT_SYMBOL(nss_dynamic_interface_alloc_node); +EXPORT_SYMBOL(nss_dynamic_interface_dealloc_node); +EXPORT_SYMBOL(nss_is_dynamic_interface); +EXPORT_SYMBOL(nss_dynamic_interface_get_type); +EXPORT_SYMBOL(nss_dynamic_interface_get_nss_ctx_by_type); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_log.c new file mode 100644 index 000000000..0c49aeb7e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_log.c @@ -0,0 +1,145 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_dynamic_interface_log.c + * NSS Dynamic Interface logger file. + */ + +#include "nss_core.h" + +/* + * nss_dynamic_interface_log_message_types_str + * Dynamic Interface message strings + */ +static int8_t *nss_dynamic_interface_log_message_types_str[NSS_DYNAMIC_INTERFACE_MAX] __maybe_unused = { + "Dynamic Interface Alloc Node", + "Dynamic Interface Dealloc Node" +}; + +/* + * nss_dynamic_interface_log_error_response_types_str + * Strings for error types for dynamic interface messages + */ +static int8_t *nss_dynamic_interface_log_error_response_types_str[NSS_DYNAMIC_INTERFACE_ERR_MAX] __maybe_unused = { + "Dynamic Interface Error Unknown Interface", + "Dynamic Interface Error Unavailable Interface", + "Dynamic Interface Error Invalid Interface Type", + "Dynamic Interface Error Invalid Interface Number", + "Dynamic Interface Error Alloc Function Unavailable", + "Dynamic Interface Error Dealloc Funciton Unavailable", + "Dynamic Interface Error Allocation Error", + "Dynamic Interface Error Interface Number Mismatch" +}; + +/* + * nss_dynamic_interface_alloc_node_msg() + * Log Dynamic Interface alloc node message. + */ +static void nss_dynamic_interface_alloc_node_log_msg(struct nss_dynamic_interface_msg *ndm) +{ + struct nss_dynamic_interface_alloc_node_msg *ndanm __maybe_unused = &ndm->msg.alloc_node; + nss_trace("%px: NSS Dynamic Interface Alloc Node Message:\n" + "Dynamic Interface Type: %d\n" + "Dynamic Interface Number: %d\n", + ndanm, ndanm->type, + ndanm->if_num); +} + +/* + * nss_dynamic_interface_dealloc_node_msg() + * Log Dynamic Interface dealloc node message. + */ +static void nss_dynamic_interface_dealloc_node_log_msg(struct nss_dynamic_interface_msg *ndm) +{ + struct nss_dynamic_interface_dealloc_node_msg *nddnm __maybe_unused = &ndm->msg.dealloc_node; + nss_trace("%px: NSS Dynamic Interface Alloc Node Message:\n" + "Dynamic Interface Type: %d\n" + "Dynamic Interface Number: %d\n", + nddnm, nddnm->type, + nddnm->if_num); +} + +/* + * nss_dynamic_interface_log_verbose() + * Log message contents. + */ +static void nss_dynamic_interface_log_verbose(struct nss_dynamic_interface_msg *ndm) +{ + switch (ndm->cm.type) { + case NSS_DYNAMIC_INTERFACE_ALLOC_NODE: + nss_dynamic_interface_alloc_node_log_msg(ndm); + break; + + case NSS_DYNAMIC_INTERFACE_DEALLOC_NODE: + nss_dynamic_interface_dealloc_node_log_msg(ndm); + break; + + default: + nss_warning("%px: Invalid message type\n", ndm); + break; + } +} + +/* + * nss_dynamic_interface_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_dynamic_interface_log_tx_msg(struct nss_dynamic_interface_msg *ndm) +{ + if (ndm->cm.type >= NSS_DYNAMIC_INTERFACE_MAX) { + nss_warning("%px: Invalid message type\n", ndm); + return; + } + + nss_info("%px: type[%d]:%s\n", ndm, ndm->cm.type, nss_dynamic_interface_log_message_types_str[ndm->cm.type]); + nss_dynamic_interface_log_verbose(ndm); +} + +/* + * nss_dynamic_interface_log_rx_msg() + * Log messages received from FW. + */ +void nss_dynamic_interface_log_rx_msg(struct nss_dynamic_interface_msg *ndm) +{ + if (ndm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ndm); + return; + } + + if (ndm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ndm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ndm, ndm->cm.type, + nss_dynamic_interface_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response]); + goto verbose; + } + + if (ndm->cm.error >= NSS_DYNAMIC_INTERFACE_ERR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ndm, ndm->cm.type, nss_dynamic_interface_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response], + ndm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n, error[%d]:%s\n", + ndm, ndm->cm.type, nss_dynamic_interface_log_message_types_str[ndm->cm.type], + ndm->cm.response, nss_cmn_response_str[ndm->cm.response], + ndm->cm.error, nss_dynamic_interface_log_error_response_types_str[ndm->cm.error]); + +verbose: + nss_dynamic_interface_log_verbose(ndm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_log.h new file mode 100644 index 000000000..266b909c9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_DYNAMIC_INTERFACE_LOG_H +#define __NSS_DYNAMIC_INTERFACE_LOG_H + +/* + * nss_dynamic_interface.h + * NSS Dynamic Interface private header file. + */ + +/* + * nss_dynamic_interface_log_tx_msg + * Logs a dynamic interface message that is sent to the NSS firmware. + */ +void nss_dynamic_interface_log_tx_msg(struct nss_dynamic_interface_msg *ndm); + +/* + * nss_dynamic_interface_log_rx_msg + * Logs a dynamic interface message that is received from the NSS firmware. + */ +void nss_dynamic_interface_log_rx_msg(struct nss_dynamic_interface_msg *ndm); + +#endif /* __NSS_DYNAMIC_INTERFACE_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_stats.c new file mode 100644 index 000000000..1f190b645 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_stats.c @@ -0,0 +1,160 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_dynamic_interface.h" + +/* + * nss_dynamic_interface_type_names + * Name strings for dynamic interface types + */ +const char *nss_dynamic_interface_type_names[NSS_DYNAMIC_INTERFACE_TYPE_MAX] = { + "NSS_DYNAMIC_INTERFACE_TYPE_NONE", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR", + "NSS_DYNAMIC_INTERFACE_TYPE_RESERVED_5", + "NSS_DYNAMIC_INTERFACE_TYPE_TUNIPIP6_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_TUNIPIP6_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_RESERVED", + "NSS_DYNAMIC_INTERFACE_TYPE_VAP", + "NSS_DYNAMIC_INTERFACE_TYPE_RESERVED_0", + "NSS_DYNAMIC_INTERFACE_TYPE_PPPOE", + "NSS_DYNAMIC_INTERFACE_TYPE_VIRTIF_DEPRECATED", + "NSS_DYNAMIC_INTERFACE_TYPE_L2TPV2", + "NSS_DYNAMIC_INTERFACE_TYPE_RESERVED_4", + "NSS_DYNAMIC_INTERFACE_TYPE_PORTID", + "NSS_DYNAMIC_INTERFACE_TYPE_DTLS", + "NSS_DYNAMIC_INTERFACE_TYPE_QVPN_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_QVPN_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_BRIDGE", + "NSS_DYNAMIC_INTERFACE_TYPE_VLAN", + "NSS_DYNAMIC_INTERFACE_TYPE_RESERVED_3", + "NSS_DYNAMIC_INTERFACE_TYPE_WIFILI_INTERNAL", + "NSS_DYNAMIC_INTERFACE_TYPE_MAP_T_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_MAP_T_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_HOST_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_OFFL_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_SJACK_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INLINE_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INLINE_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_N2H", + "NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_H2N", + "NSS_DYNAMIC_INTERFACE_TYPE_TUN6RD_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_TUN6RD_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INNER_EXCEPTION", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_US", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_DS", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_GRE_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_PPTP_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_PPTP_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_PPTP_HOST_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_MDATA_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_MDATA_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_REDIRECT", + "NSS_DYNAMIC_INTERFACE_TYPE_PVXLAN_HOST_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_PVXLAN_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_IGS", + "NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US", + "NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS", + "NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_OUTER", + "NSS_DYNAMIC_INTERFACE_TYPE_MATCH", + "NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_N2H", + "NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_H2N", + "NSS_DYNAMIC_INTERFACE_TYPE_WIFILI_EXTERNAL0", + "NSS_DYNAMIC_INTERFACE_TYPE_WIFILI_EXTERNAL1", + "NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_HOST_INNER", + "NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_OUTER", +}; + +/* + * nss_dynamic_interface_type_names_stats_read() + * Read and display dynamic interface types names + */ +static ssize_t nss_dynamic_interface_type_names_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int i; + char *lbuf = NULL; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint32_t max_output_lines = 2 /* header & footer for stats */ + + NSS_DYNAMIC_INTERFACE_TYPE_MAX /* maximum number of dynamic interface types */ + + 2; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + + lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + /* + * name strings + */ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n dynamic interface type names start:\n\n"); + + for (i = 0; i < NSS_DYNAMIC_INTERFACE_TYPE_MAX; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "\t%u : %s\n", i, nss_dynamic_interface_type_names[i]); + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n dynamic interface type names end\n"); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(lbuf); + return bytes_read; +} + +/* + * nss_dynamic_interface_type_names_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(dynamic_interface_type_names) + +/* + * nss_dynamic_interface_stats_dentry_create() + * Create dynamic-interface statistics debug entry. + */ +void nss_dynamic_interface_stats_dentry_create(void) +{ + struct dentry *di_dentry = NULL; + struct dentry *di_type_name_d = NULL; + + di_dentry = debugfs_create_dir("dynamic_if", nss_top_main.stats_dentry); + if (unlikely(di_dentry == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/dynamic_if directory"); + return; + } + + di_type_name_d = debugfs_create_file("type_names", 0400, di_dentry, + &nss_top_main, &nss_dynamic_interface_type_names_stats_ops); + if (unlikely(di_type_name_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/dynamic_if/type_names file"); + return; + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_stats.h new file mode 100644 index 000000000..ef16162bc --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_dynamic_interface_stats.h @@ -0,0 +1,33 @@ +/* + ****************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_DYNAMIC_INTERFACE_STATS_H +#define __NSS_DYNAMIC_INTERFACE_STATS_H + +/* + * nss_dynamic_interface.h + * NSS Dynamic Interface private header file. + */ + +/* + * nss_dynamic_interface_stats_dentry_create + * Create dynamic interface debugfs entry. + */ +void nss_dynamic_interface_stats_dentry_create(void); + +#endif /* __NSS_DYNAMIC_INTERFACE_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_edma.c b/feeds/ipq807x/qca-nss-drv/src/nss_edma.c new file mode 100644 index 000000000..f03772689 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_edma.c @@ -0,0 +1,139 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_edma.c + * NSS EDMA APIs + */ +#include "nss_edma_stats.h" +#include "nss_edma_strings.h" + +/* + ********************************** + Rx APIs + ********************************** + */ + +/* + * nss_edma_interface_handler() + * Handle NSS -> HLOS messages for EDMA node + */ +static void nss_edma_interface_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_edma_msg *nem = (struct nss_edma_msg *)ncm; + nss_edma_msg_callback_t cb; + + /* + * Is this a valid request/response packet? + */ + if (nem->cm.type >= NSS_METADATA_TYPE_EDMA_MAX) { + nss_warning("%px: received invalid message %d for edma interface", nss_ctx, nem->cm.type); + return; + } + + /* + * Handle different types of messages + */ + switch (nem->cm.type) { + case NSS_METADATA_TYPE_EDMA_PORT_STATS_SYNC: + /* + * Update driver statistics and send statistics notifications to the registered modules. + */ + nss_edma_metadata_port_stats_sync(nss_ctx, &nem->msg.port_stats); + nss_edma_stats_notify(nss_ctx); + + break; + case NSS_METADATA_TYPE_EDMA_RING_STATS_SYNC: + nss_edma_metadata_ring_stats_sync(nss_ctx, &nem->msg.ring_stats); + break; + case NSS_METADATA_TYPE_EDMA_ERR_STATS_SYNC: + nss_edma_metadata_err_stats_sync(nss_ctx, &nem->msg.err_stats); + break; + default: + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + /* + * Check response + */ + nss_info("%px: Received response %d for type %d, interface %d", + nss_ctx, ncm->response, ncm->type, ncm->interface); + } + } + /* + * Update the callback and app_data for NOTIFY messages, edma sends all notify messages + * to the same callback/app_data. + */ + if (nem->cm.response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->edma_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->nss_top->edma_ctx; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * Callback + */ + cb = (nss_edma_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, nem); +} + +/* + * nss_edma_notify_register() + * Register to received EDMA events. + */ +struct nss_ctx_instance *nss_edma_notify_register(nss_edma_msg_callback_t cb, void *app_data) +{ + nss_top_main.edma_callback = cb; + nss_top_main.edma_ctx = app_data; + return &nss_top_main.nss[nss_top_main.edma_handler_id]; +} +EXPORT_SYMBOL(nss_edma_notify_register); + +/* + * nss_edma_notify_unregister() + * Unregister to received EDMA events. + */ +void nss_edma_notify_unregister(void) +{ + nss_top_main.edma_callback = NULL; +} +EXPORT_SYMBOL(nss_edma_notify_unregister); + +/* + * nss_get_edma_context() + */ +struct nss_ctx_instance *nss_edma_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.edma_handler_id]; +} +EXPORT_SYMBOL(nss_edma_get_context); + +/* + * nss_edma_register_handler() + */ +void nss_edma_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_edma_get_context(); + + nss_core_register_handler(nss_ctx, NSS_EDMA_INTERFACE, nss_edma_interface_handler, NULL); + + nss_edma_stats_dentry_create(); + nss_edma_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_edma_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_edma_stats.c new file mode 100644 index 000000000..abb338e2a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_edma_stats.c @@ -0,0 +1,822 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_edma_stats.c + * NSS EDMA statistics APIs + */ + +#include "nss_edma_stats.h" +#include "nss_edma_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_edma_stats_notifier); + +struct nss_edma_stats edma_stats; + +/* + ********************************** + EDMA statistics APIs + ********************************** + */ + +/* + * nss_edma_port_stats_read() + * Read EDMA port statistics + */ +static ssize_t nss_edma_port_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * Max output lines = #stats * NSS_MAX_CORES + + * few blank lines for banner printing + Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + struct nss_stats_data *data = fp->private_data; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_STATS_NODE_MAX * sizeof(uint64_t), GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "edma", NSS_STATS_SINGLE_CORE); + + /* + * Common node stats + */ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d stats:\n\n", data->edma_id); + + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_STATS_NODE_MAX); i++) { + stats_shadow[i] = edma_stats.port[data->edma_id].port_stats[i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("edma_port", NULL, data->edma_id + , nss_edma_strings_stats_node + , stats_shadow + , NSS_STATS_NODE_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_edma_port_type_stats_read() + * Read EDMA port type + */ +static ssize_t nss_edma_port_type_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * max output lines = #stats + start tag line + end tag line + three blank lines + */ + uint32_t max_output_lines = (1 + 2) + 3; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t port_type; + struct nss_stats_data *data = fp->private_data; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + size_wr = scnprintf(lbuf, size_al, "edma port type start:\n\n"); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d type:\n\n", data->edma_id); + + /* + * Port type + */ + spin_lock_bh(&nss_top_main.stats_lock); + port_type = edma_stats.port[data->edma_id].port_type; + spin_unlock_bh(&nss_top_main.stats_lock); + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "port_type = %s\n", nss_edma_strings_stats_port_type[port_type].stats_name); + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma stats end\n"); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + + return bytes_read; +} + +/* + * nss_edma_port_ring_map_stats_read() + * Read EDMA port ring map + */ +static ssize_t nss_edma_port_ring_map_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + start tag line + end tag line + three blank lines + */ + uint32_t max_output_lines = (4 + 2) + 3; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + struct nss_stats_data *data = fp->private_data; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_EDMA_PORT_RING_MAP_MAX * sizeof(uint64_t), GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr = scnprintf(lbuf, size_al, "edma port ring map start:\n\n"); + + /* + * Port ring map + */ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "edma port %d ring map:\n\n", data->edma_id); + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; i < NSS_EDMA_PORT_RING_MAP_MAX; i++) { + stats_shadow[i] = edma_stats.port[data->edma_id].port_ring_map[i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + + size_wr += nss_stats_print("edma_port_ring", NULL, data->edma_id + , nss_edma_strings_stats_port_ring_map + , stats_shadow + , NSS_EDMA_PORT_RING_MAP_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_edma_txring_stats_read() + * Read EDMA Tx ring stats + */ +static ssize_t nss_edma_txring_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + start tag line + end tag line + three blank lines + */ + uint32_t max_output_lines = (NSS_EDMA_STATS_TX_MAX + 2) + 3; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + struct nss_stats_data *data = fp->private_data; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_EDMA_STATS_TX_MAX * sizeof(uint64_t), GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr = scnprintf(lbuf, size_al, "edma Tx ring stats start:\n\n"); + + /* + * Tx ring stats + */ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Tx ring %d stats:\n\n", data->edma_id); + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; i < NSS_EDMA_STATS_TX_MAX; i++) { + stats_shadow[i] = edma_stats.tx_stats[data->edma_id][i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + + size_wr += nss_stats_print("edma_tx_ring", NULL, data->edma_id + , nss_edma_strings_stats_tx + , stats_shadow + , NSS_EDMA_STATS_TX_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_edma_rxring_stats_read() + * Read EDMA rxring stats + */ +static ssize_t nss_edma_rxring_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + start tag line + end tag line + three blank lines + */ + uint32_t max_output_lines = (NSS_EDMA_STATS_RX_MAX + 2) + 3; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + struct nss_stats_data *data = fp->private_data; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_EDMA_STATS_RX_MAX * sizeof(uint64_t), GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + /* + * RX ring stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; i < NSS_EDMA_STATS_RX_MAX; i++) { + stats_shadow[i] = edma_stats.rx_stats[data->edma_id][i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("edma_rx_ring", NULL, data->edma_id + , nss_edma_strings_stats_rx + , stats_shadow + , NSS_EDMA_STATS_RX_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_edma_txcmplring_stats_read() + * Read EDMA txcmplring stats + */ +static ssize_t nss_edma_txcmplring_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + start tag line + end tag line + three blank lines + */ + uint32_t max_output_lines = (NSS_EDMA_STATS_TXCMPL_MAX + 2) + 3; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + struct nss_stats_data *data = fp->private_data; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_EDMA_STATS_TXCMPL_MAX * sizeof(uint64_t), GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr = scnprintf(lbuf, size_al, "edma Tx cmpl ring stats start:\n\n"); + + /* + * Tx cmpl ring stats + */ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Tx cmpl ring %d stats:\n\n", data->edma_id); + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; i < NSS_EDMA_STATS_TXCMPL_MAX; i++) { + stats_shadow[i] = edma_stats.txcmpl_stats[data->edma_id][i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("edma_tx_cmpl_ring", NULL, data->edma_id + , nss_edma_strings_stats_txcmpl + , stats_shadow + , NSS_EDMA_STATS_TXCMPL_MAX + , lbuf, size_wr, size_al); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nedma Tx cmpl ring stats end\n\n"); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_edma_rxfillring_stats_read() + * Read EDMA rxfillring stats + */ +static ssize_t nss_edma_rxfillring_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + start tag line + end tag line + three blank lines + */ + uint32_t max_output_lines = (NSS_EDMA_STATS_RXFILL_MAX + 2) + 3; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + struct nss_stats_data *data = fp->private_data; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_EDMA_STATS_RXFILL_MAX * sizeof(uint64_t), GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr = scnprintf(lbuf, size_al, "edma Rx fill ring stats start:\n\n"); + + /* + * Rx fill ring stats + */ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Rx fill ring %d stats:\n\n", data->edma_id); + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; i < NSS_EDMA_STATS_RXFILL_MAX; i++) { + stats_shadow[i] = edma_stats.rxfill_stats[data->edma_id][i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("edma_rx_fill_ring", NULL + , NSS_STATS_SINGLE_INSTANCE + , nss_edma_strings_stats_rxfill + , stats_shadow + , NSS_EDMA_STATS_RXFILL_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_edma_err_stats_read() + * Read EDMA err stats + */ +static ssize_t nss_edma_err_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + start tag line + end tag line + three blank lines + */ + uint32_t max_output_lines = (NSS_EDMA_ERR_STATS_MAX + 2) + 3; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_EDMA_ERR_STATS_MAX * sizeof(uint64_t), GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr = scnprintf(lbuf, size_al, "edma error stats start:\n\n"); + + /* + * Common node stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + + for (i = 0; (i < NSS_EDMA_ERR_STATS_MAX); i++) + stats_shadow[i] = edma_stats.misc_err[i]; + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("edma_err", NULL, NSS_STATS_SINGLE_INSTANCE + , nss_edma_strings_stats_err_map + , stats_shadow + , NSS_EDMA_ERR_STATS_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * edma_port_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port); + +/* + * edma_port_type_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_type); + +/* + * edma_port_ring_map_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(edma_port_ring_map); + +/* + * edma_txring_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(edma_txring); + +/* + * edma_rxring_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(edma_rxring); + +/* + * edma_txcmplring_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(edma_txcmplring); + +/* + * edma_rxfillring_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(edma_rxfillring); + +/* + * edma_err_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(edma_err); + +/* + * nss_edma_stats_dentry_create() + * Create edma statistics debug entry. + */ +void nss_edma_stats_dentry_create(void) +{ + int i; + struct dentry *edma_d = NULL; + struct dentry *edma_port_dir_d = NULL; + struct dentry *edma_port_d = NULL; + struct dentry *edma_port_type_d = NULL; + struct dentry *edma_port_stats_d = NULL; + struct dentry *edma_port_ring_map_d = NULL; + struct dentry *edma_rings_dir_d = NULL; + struct dentry *edma_tx_dir_d = NULL; + struct dentry *edma_tx_d = NULL; + struct dentry *edma_rx_dir_d = NULL; + struct dentry *edma_rx_d = NULL; + struct dentry *edma_txcmpl_dir_d = NULL; + struct dentry *edma_txcmpl_d = NULL; + struct dentry *edma_rxfill_dir_d = NULL; + struct dentry *edma_rxfill_d = NULL; + struct dentry *edma_err_stats_d = NULL; + char file_name[10]; + + edma_d = debugfs_create_dir("edma", nss_top_main.stats_dentry); + if (unlikely(edma_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma directory"); + return; + } + + /* + * edma port stats + */ + edma_port_dir_d = debugfs_create_dir("ports", edma_d); + if (unlikely(edma_port_dir_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/ports directory"); + return; + } + + for (i = 0; i < NSS_EDMA_NUM_PORTS_MAX; i++) { + memset(file_name, 0, sizeof(file_name)); + snprintf(file_name, sizeof(file_name), "%d", i); + + edma_port_d = debugfs_create_dir(file_name, edma_port_dir_d); + if (unlikely(edma_port_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d directory", i); + return; + } + + edma_port_stats_d = debugfs_create_file("stats", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_edma_port_stats_ops); + if (unlikely(edma_port_stats_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/stats file", i); + return; + } + + edma_port_type_d = debugfs_create_file("type", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_edma_port_type_stats_ops); + if (unlikely(edma_port_type_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/type file", i); + return; + } + + edma_port_ring_map_d = debugfs_create_file("ring_map", 0400, edma_port_d, (void *)(nss_ptr_t)i, &nss_edma_port_ring_map_stats_ops); + if (unlikely(edma_port_ring_map_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/ports/%d/ring_map file", i); + return; + } + } + + /* + * edma error stats + */ + edma_err_stats_d = NULL; + edma_err_stats_d = debugfs_create_file("err_stats", 0400, edma_d, &nss_top_main, &nss_edma_err_stats_ops); + if (unlikely(edma_port_stats_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/%d/err_stats file", 0); + return; + } + + /* + * edma ring stats + */ + edma_rings_dir_d = debugfs_create_dir("rings", edma_d); + if (unlikely(edma_rings_dir_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/rings directory"); + return; + } + + /* + * edma tx ring stats + */ + edma_tx_dir_d = debugfs_create_dir("tx", edma_rings_dir_d); + if (unlikely(edma_tx_dir_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/rings/tx directory"); + return; + } + + for (i = 0; i < NSS_EDMA_NUM_TX_RING_MAX; i++) { + memset(file_name, 0, sizeof(file_name)); + scnprintf(file_name, sizeof(file_name), "%d", i); + edma_tx_d = debugfs_create_file(file_name, 0400, edma_tx_dir_d, (void *)(nss_ptr_t)i, &nss_edma_txring_stats_ops); + if (unlikely(edma_tx_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/rings/tx/%d file", i); + return; + } + } + + /* + * edma rx ring stats + */ + edma_rx_dir_d = debugfs_create_dir("rx", edma_rings_dir_d); + if (unlikely(edma_rx_dir_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rx directory"); + return; + } + + for (i = 0; i < NSS_EDMA_NUM_RX_RING_MAX; i++) { + memset(file_name, 0, sizeof(file_name)); + scnprintf(file_name, sizeof(file_name), "%d", i); + edma_rx_d = debugfs_create_file(file_name, 0400, edma_rx_dir_d, (void *)(nss_ptr_t)i, &nss_edma_rxring_stats_ops); + if (unlikely(edma_rx_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rx/%d file", i); + return; + } + } + + /* + * edma tx cmpl ring stats + */ + edma_txcmpl_dir_d = debugfs_create_dir("txcmpl", edma_rings_dir_d); + if (unlikely(edma_txcmpl_dir_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/rings/txcmpl directory"); + return; + } + + for (i = 0; i < NSS_EDMA_NUM_TXCMPL_RING_MAX; i++) { + memset(file_name, 0, sizeof(file_name)); + scnprintf(file_name, sizeof(file_name), "%d", i); + edma_txcmpl_d = debugfs_create_file(file_name, 0400, edma_txcmpl_dir_d, (void *)(nss_ptr_t)i, &nss_edma_txcmplring_stats_ops); + if (unlikely(edma_txcmpl_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/rings/txcmpl/%d file", i); + return; + } + } + + /* + * edma rx fill ring stats + */ + edma_rxfill_dir_d = debugfs_create_dir("rxfill", edma_rings_dir_d); + if (unlikely(edma_rxfill_dir_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rxfill directory"); + return; + } + + for (i = 0; i < NSS_EDMA_NUM_RXFILL_RING_MAX; i++) { + memset(file_name, 0, sizeof(file_name)); + scnprintf(file_name, sizeof(file_name), "%d", i); + edma_rxfill_d = debugfs_create_file(file_name, 0400, edma_rxfill_dir_d, (void *)(nss_ptr_t)i, &nss_edma_rxfillring_stats_ops); + if (unlikely(edma_rxfill_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/edma/rings/rxfill/%d file", i); + return; + } + } +} + +/* + * nss_edma_metadata_port_stats_sync() + * Handle the syncing of EDMA port statistics. + */ +void nss_edma_metadata_port_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_port_stats_sync *nepss) +{ + uint16_t i, j = 0; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + spin_lock_bh(&nss_top->stats_lock); + + /* + * edma port stats + * We process a subset of port stats since msg payload is not enough to hold all ports at once. + */ + for (i = nepss->start_port; i < nepss->end_port; i++) { + int k; + + edma_stats.port[i].port_stats[NSS_STATS_NODE_RX_PKTS] += nepss->port_stats[j].node_stats.rx_packets; + edma_stats.port[i].port_stats[NSS_STATS_NODE_RX_BYTES] += nepss->port_stats[j].node_stats.rx_bytes; + edma_stats.port[i].port_stats[NSS_STATS_NODE_TX_PKTS] += nepss->port_stats[j].node_stats.tx_packets; + edma_stats.port[i].port_stats[NSS_STATS_NODE_TX_BYTES] += nepss->port_stats[j].node_stats.tx_bytes; + + for (k = 0; k < NSS_MAX_NUM_PRI; k++) { + edma_stats.port[i].port_stats[NSS_STATS_NODE_RX_QUEUE_0_DROPPED + k] += nepss->port_stats[j].node_stats.rx_dropped[k]; + } + + edma_stats.port[i].port_type = nepss->port_stats[j].port_type; + edma_stats.port[i].port_ring_map[NSS_EDMA_PORT_RX_RING] = nepss->port_stats[j].edma_rx_ring; + edma_stats.port[i].port_ring_map[NSS_EDMA_PORT_TX_RING] = nepss->port_stats[j].edma_tx_ring; + j++; + } + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_edma_metadata_ring_stats_sync() + * Handle the syncing of EDMA ring statistics. + */ +void nss_edma_metadata_ring_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_ring_stats_sync *nerss) +{ + int32_t i; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + spin_lock_bh(&nss_top->stats_lock); + + /* + * edma tx ring stats + */ + for (i = 0; i < NSS_EDMA_NUM_TX_RING_MAX; i++) { + edma_stats.tx_stats[i][NSS_EDMA_STATS_TX_ERR] += nerss->tx_ring[i].tx_err; + edma_stats.tx_stats[i][NSS_EDMA_STATS_TX_DROPPED] += nerss->tx_ring[i].tx_dropped; + edma_stats.tx_stats[i][NSS_EDMA_STATS_TX_DESC] += nerss->tx_ring[i].desc_cnt; + } + + /* + * edma rx ring stats + */ + for (i = 0; i < NSS_EDMA_NUM_RX_RING_MAX; i++) { + edma_stats.rx_stats[i][NSS_EDMA_STATS_RX_CSUM_ERR] += nerss->rx_ring[i].rx_csum_err; + edma_stats.rx_stats[i][NSS_EDMA_STATS_RX_DESC] += nerss->rx_ring[i].desc_cnt; + edma_stats.rx_stats[i][NSS_EDMA_STATS_RX_QOS_ERR] += nerss->rx_ring[i].qos_err; + edma_stats.rx_stats[i][NSS_EDMA_STATS_RX_SRC_PORT_INVALID] += nerss->rx_ring[i].rx_src_port_invalid; + edma_stats.rx_stats[i][NSS_EDMA_STATS_RX_SRC_IF_INVALID] += nerss->rx_ring[i].rx_src_if_invalid; + } + + /* + * edma tx cmpl ring stats + */ + for (i = 0; i < NSS_EDMA_NUM_TXCMPL_RING_MAX; i++) { + edma_stats.txcmpl_stats[i][NSS_EDMA_STATS_TXCMPL_DESC] += nerss->txcmpl_ring[i].desc_cnt; + } + + /* + * edma rx fill ring stats + */ + for (i = 0; i < NSS_EDMA_NUM_RXFILL_RING_MAX; i++) { + edma_stats.rxfill_stats[i][NSS_EDMA_STATS_RXFILL_DESC] += nerss->rxfill_ring[i].desc_cnt; + } + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_edma_metadata_err_stats_sync() + * Handle the syncing of EDMA error statistics. + */ +void nss_edma_metadata_err_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_err_stats_sync *nerss) +{ + + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + spin_lock_bh(&nss_top->stats_lock); + + edma_stats.misc_err[NSS_EDMA_AXI_RD_ERR] += nerss->msg_err_stats.axi_rd_err; + edma_stats.misc_err[NSS_EDMA_AXI_WR_ERR] += nerss->msg_err_stats.axi_wr_err; + edma_stats.misc_err[NSS_EDMA_RX_DESC_FIFO_FULL_ERR] += nerss->msg_err_stats.rx_desc_fifo_full_err; + edma_stats.misc_err[NSS_EDMA_RX_BUF_SIZE_ERR] += nerss->msg_err_stats.rx_buf_size_err; + edma_stats.misc_err[NSS_EDMA_TX_SRAM_FULL_ERR] += nerss->msg_err_stats.tx_sram_full_err; + edma_stats.misc_err[NSS_EDMA_TX_CMPL_BUF_FULL_ERR] += nerss->msg_err_stats.tx_cmpl_buf_full_err; + edma_stats.misc_err[NSS_EDMA_PKT_LEN_LA64K_ERR] += nerss->msg_err_stats.pkt_len_la64k_err; + edma_stats.misc_err[NSS_EDMA_PKT_LEN_LE33_ERR] += nerss->msg_err_stats.pkt_len_le33_err; + edma_stats.misc_err[NSS_EDMA_DATA_LEN_ERR] += nerss->msg_err_stats.data_len_err; + edma_stats.misc_err[NSS_EDMA_ALLOC_FAIL_CNT] += nerss->msg_err_stats.alloc_fail_cnt; + edma_stats.misc_err[NSS_EDMA_QOS_INVAL_DST_DROPS] += nerss->msg_err_stats.qos_inval_dst_drops; + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_edma_stats_notify() + * Calls statistics notifier. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_edma_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + uint32_t core_id = nss_ctx->id; + + atomic_notifier_call_chain(&nss_edma_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&core_id); +} + +/* + * nss_edma_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_edma_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_edma_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_edma_stats_register_notifier); + +/* + * nss_edma_stats_unregister_notifier() + * Deregisters stats notifier. + */ +int nss_edma_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_edma_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_edma_stats_unregister_notifier); + +/* + * nss_edma_get_stats + * Sends EDMA statistics to NSS clients. + */ +void nss_edma_get_stats(uint64_t *stats, int port_id) +{ + memcpy(stats, edma_stats.port[port_id].port_stats, sizeof(uint64_t) * NSS_STATS_NODE_MAX); +} +EXPORT_SYMBOL(nss_edma_get_stats); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_edma_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_edma_stats.h new file mode 100644 index 000000000..305582b84 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_edma_stats.h @@ -0,0 +1,36 @@ +/* + ****************************************************************************** + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +/* + * nss_edma_stats.h + * NSS EDMA statistics header file. + */ + +#ifndef __NSS_EDMA_STATS_H +#define __NSS_EDMA_STATS_H + +#include "nss_core.h" + +/* + * NSS EDMA statistics APIs + */ +extern void nss_edma_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_edma_metadata_port_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_port_stats_sync *nepss); +extern void nss_edma_metadata_ring_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_ring_stats_sync *nerss); +extern void nss_edma_metadata_err_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_edma_err_stats_sync *nerss); +extern void nss_edma_stats_dentry_create(void); + +#endif /* __NSS_EDMA_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_edma_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_edma_strings.c new file mode 100644 index 000000000..3d9c23352 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_edma_strings.c @@ -0,0 +1,350 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_edma_strings_stats_node + * EDMA statistics strings. + */ +struct nss_stats_info nss_edma_strings_stats_node[NSS_STATS_NODE_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"rx_byts" , NSS_STATS_TYPE_COMMON}, + {"tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"tx_byts" , NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops" , NSS_STATS_TYPE_DROP} +}; + +/* + * nss_edma_common_stats_strings_read() + * Read EDMA common node statistics names. + */ +static ssize_t nss_edma_common_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_edma_strings_stats_node, NSS_STATS_NODE_MAX); +} + +/* + * nss_edma_common_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(edma_common_stats); + +/* + * nss_edma_strings_stats_tx + */ +struct nss_stats_info nss_edma_strings_stats_tx[NSS_EDMA_STATS_TX_MAX] = { + {"tx_err" , NSS_STATS_TYPE_ERROR}, + {"tx_drops" , NSS_STATS_TYPE_DROP}, + {"desc_cnt" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_edma_txring_strings_read() + * Read EDMA txring names. + */ +static ssize_t nss_edma_txring_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_edma_strings_stats_tx, NSS_EDMA_STATS_TX_MAX); +} + +/* + * edma_txring_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(edma_txring); + +/* + * nss_edma_strings_stats_rx + */ +struct nss_stats_info nss_edma_strings_stats_rx[NSS_EDMA_STATS_RX_MAX] = { + {"rx_csum_err" , NSS_STATS_TYPE_ERROR}, + {"desc_cnt" , NSS_STATS_TYPE_SPECIAL}, + {"qos_err" , NSS_STATS_TYPE_DROP}, + {"rx_src_port_invalid" , NSS_STATS_TYPE_DROP}, + {"rx_src_interface_invalid" , NSS_STATS_TYPE_DROP} +}; + +/* + * nss_edma_rxring_strings_read() + * Read EDMA rxring names. + */ +static ssize_t nss_edma_rxring_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_edma_strings_stats_rx, NSS_EDMA_STATS_RX_MAX); +} + +/* + * edma_rxring_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(edma_rxring); + +/* + * nss_edma_strings_stats_txcmpl + */ +struct nss_stats_info nss_edma_strings_stats_txcmpl[NSS_EDMA_STATS_TXCMPL_MAX] = { + {"desc_cnt" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_edma_txcmplring_strings_read() + * Read EDMA txcmplring names. + */ +static ssize_t nss_edma_txcmplring_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_edma_strings_stats_txcmpl, NSS_EDMA_STATS_TXCMPL_MAX); +} + +/* + * edma_txcmplring_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(edma_txcmplring); + +/* + * nss_edma_strings_stats_rxfill + */ +struct nss_stats_info nss_edma_strings_stats_rxfill[NSS_EDMA_STATS_RXFILL_MAX] = { + {"desc_cnt" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_edma_rxfillring_strings_read() + * Read EDMA rxfillring names. + */ +static ssize_t nss_edma_rxfillring_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_edma_strings_stats_rxfill, NSS_EDMA_STATS_RXFILL_MAX); +} + +/* + * edma_rxfillring_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(edma_rxfillring); + +/* + * nss_edma_strings_stats_port_type + */ +struct nss_stats_info nss_edma_strings_stats_port_type[NSS_EDMA_PORT_TYPE_MAX] = { + {"physical_port", NSS_STATS_TYPE_SPECIAL}, + {"virtual_port" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_edma_port_type_strings_read() + * Read EDMA port type names. + */ +static ssize_t nss_edma_port_type_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_edma_strings_stats_port_type, NSS_EDMA_PORT_TYPE_MAX); +} + +/* + * edma_port_type_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(edma_port_type); + +/* + * nss_edma_strings_stats_port_ring_map + */ +struct nss_stats_info nss_edma_strings_stats_port_ring_map[NSS_EDMA_PORT_RING_MAP_MAX] = { + {"rx_ring" , NSS_STATS_TYPE_SPECIAL}, + {"tx_ring" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_edma_port_ring_map_strings_read() + * Read EDMA port ring map names. + */ +static ssize_t nss_edma_port_ring_map_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_edma_strings_stats_port_ring_map, NSS_EDMA_PORT_RING_MAP_MAX); +} + +/* + * edma_port_ring_map_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(edma_port_ring_map); + +/* + * nss_edma_strings_stats_err_map + */ +struct nss_stats_info nss_edma_strings_stats_err_map[NSS_EDMA_ERR_STATS_MAX] = { + {"axi_rd_err" , NSS_STATS_TYPE_ERROR}, + {"axi_wr_err" , NSS_STATS_TYPE_ERROR}, + {"rx_desc_fifo_full_err", NSS_STATS_TYPE_ERROR}, + {"rx_buf_size_err" , NSS_STATS_TYPE_ERROR}, + {"tx_sram_full_err" , NSS_STATS_TYPE_ERROR}, + {"tx_cmpl_buf_full_err" , NSS_STATS_TYPE_ERROR}, + {"pkt_len_la64k_err" , NSS_STATS_TYPE_ERROR}, + {"pkt_len_le33_err" , NSS_STATS_TYPE_ERROR}, + {"data_len_err" , NSS_STATS_TYPE_ERROR}, + {"alloc_fail_cnt" , NSS_STATS_TYPE_ERROR}, + {"qos_inval_dst_drops" , NSS_STATS_TYPE_DROP} +}; + +/* + * nss_edma_err_strings_read() + * Read EDMA error names. + */ +static ssize_t nss_edma_err_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_edma_strings_stats_err_map, NSS_EDMA_ERR_STATS_MAX); +} + +/* + * edma_err_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(edma_err); + +/* + * nss_edma_strings_dentry_create() + * Create EDMA statistics strings debug entry. + */ +void nss_edma_strings_dentry_create(void) +{ + struct dentry *edma_d; + struct dentry *edma_port_dir_d; + struct dentry *edma_rings_dir_d; + struct dentry *edma_rx_dir_d; + struct dentry *edma_tx_dir_d; + struct dentry *edma_rxfill_dir_d; + struct dentry *edma_txcmpl_dir_d; + struct dentry *file_d; + + if (!nss_top_main.strings_dentry) { + nss_warning("qca-nss-drv/strings is not present"); + return; + } + + edma_d = debugfs_create_dir("edma", nss_top_main.strings_dentry); + if (!edma_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma directory"); + return; + } + + /* + * EDMA port stats. + */ + edma_port_dir_d = debugfs_create_dir("ports", edma_d); + if (!edma_port_dir_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/ports directory"); + goto fail; + } + + file_d = debugfs_create_file("common_stats_str", 0400, edma_port_dir_d, &nss_top_main, &nss_edma_common_stats_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/ports/common_stats_str file"); + goto fail; + } + + file_d = debugfs_create_file("type", 0400, edma_port_dir_d, &nss_top_main, &nss_edma_port_type_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/ports/type file"); + goto fail; + } + + file_d = debugfs_create_file("ring_map", 0400, edma_port_dir_d, &nss_top_main, &nss_edma_port_ring_map_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/ports/ring_map file"); + goto fail; + } + + /* + * edma error stats + */ + file_d = debugfs_create_file("err_stats", 0400, edma_d, &nss_top_main, &nss_edma_err_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/err_stats file"); + goto fail; + } + + /* + * edma ring stats + */ + edma_rings_dir_d = debugfs_create_dir("rings", edma_d); + if (!edma_rings_dir_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/rings directory"); + goto fail; + } + + /* + * edma tx ring stats + */ + edma_tx_dir_d = debugfs_create_dir("tx", edma_rings_dir_d); + if (!edma_tx_dir_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/rings/tx directory"); + goto fail; + } + + file_d = debugfs_create_file("tx_str", 0400, edma_tx_dir_d, &nss_top_main, &nss_edma_txring_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/rings/tx file"); + goto fail; + } + + /* + * edma rx ring stats + */ + edma_rx_dir_d = debugfs_create_dir("rx", edma_rings_dir_d); + if (!edma_rx_dir_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/rings/rx directory"); + goto fail; + } + + file_d = debugfs_create_file("rx_str", 0400, edma_rx_dir_d, &nss_top_main, &nss_edma_rxring_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/rings/rx file"); + goto fail; + } + + /* + * edma tx cmpl ring stats + */ + edma_txcmpl_dir_d = debugfs_create_dir("txcmpl", edma_rings_dir_d); + if (!edma_txcmpl_dir_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/rings/txcmpl directory"); + goto fail; + } + + file_d = debugfs_create_file("txcmpl_str", 0400, edma_txcmpl_dir_d, &nss_top_main, &nss_edma_txcmplring_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/rings/txcmpl file"); + goto fail; + } + + /* + * edma rx fill ring stats + */ + edma_rxfill_dir_d = debugfs_create_dir("rxfill", edma_rings_dir_d); + if (!edma_rxfill_dir_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/rings/rxfill directory"); + goto fail; + } + + file_d = debugfs_create_file("rxfill_str", 0400, edma_rxfill_dir_d, &nss_top_main, &nss_edma_rxfillring_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/strings/edma/rings/rxfill file"); + goto fail; + } + + return; +fail: + debugfs_remove_recursive(edma_d); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_edma_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_edma_strings.h new file mode 100644 index 000000000..b211975ff --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_edma_strings.h @@ -0,0 +1,30 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_EDMA_STRINGS_H +#define __NSS_EDMA_STRINGS_H + +extern struct nss_stats_info nss_edma_strings_stats_node[NSS_STATS_NODE_MAX]; +extern struct nss_stats_info nss_edma_strings_stats_tx[NSS_EDMA_STATS_TX_MAX]; +extern struct nss_stats_info nss_edma_strings_stats_rx[NSS_EDMA_STATS_RX_MAX]; +extern struct nss_stats_info nss_edma_strings_stats_txcmpl[NSS_EDMA_STATS_TXCMPL_MAX]; +extern struct nss_stats_info nss_edma_strings_stats_rxfill[NSS_EDMA_STATS_RXFILL_MAX]; +extern struct nss_stats_info nss_edma_strings_stats_port_type[NSS_EDMA_PORT_TYPE_MAX]; +extern struct nss_stats_info nss_edma_strings_stats_port_ring_map[NSS_EDMA_PORT_RING_MAP_MAX]; +extern struct nss_stats_info nss_edma_strings_stats_err_map[NSS_EDMA_ERR_STATS_MAX]; +extern void nss_edma_strings_dentry_create(void); + +#endif /* __NSS_EDMA_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx.c b/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx.c new file mode 100644 index 000000000..eba62afae --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx.c @@ -0,0 +1,77 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2017, 2019-2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_eth_rx.c + * NSS ETH_RX APIs + */ + +#include +#include "nss_eth_rx_stats.h" +#include "nss_eth_rx_strings.h" + +/* + ********************************** + Rx APIs + ********************************** + */ + +/* + * nss_eth_rx_interface_handler() + * Handle NSS -> HLOS messages for ETH_RX node + */ +static void nss_eth_rx_interface_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_eth_rx_msg *nem = (struct nss_eth_rx_msg *)ncm; + + /* + * Is this a valid request/response packet? + */ + if (nem->cm.type >= NSS_METADATA_TYPE_ETH_RX_MAX) { + nss_warning("%px: received invalid message %d for eth_rx interface", nss_ctx, nem->cm.type); + return; + } + + switch (nem->cm.type) { + case NSS_RX_METADATA_TYPE_ETH_RX_STATS_SYNC: + /* + * Update driver statistics and send stats notifications to the registered modules. + */ + nss_eth_rx_metadata_stats_sync(nss_ctx, &nem->msg.node_sync); + nss_eth_rx_stats_notify(nss_ctx); + break; + + default: + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + /* + * Check response + */ + nss_info("%px: Received response %d for type %d, interface %d", + nss_ctx, ncm->response, ncm->type, ncm->interface); + } + } +} + +/* + * nss_eth_rx_register_handler() + */ +void nss_eth_rx_register_handler(struct nss_ctx_instance *nss_ctx) +{ + nss_core_register_handler(nss_ctx, NSS_ETH_RX_INTERFACE, nss_eth_rx_interface_handler, NULL); + + nss_eth_rx_stats_dentry_create(); + nss_eth_rx_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_stats.c new file mode 100644 index 000000000..cfc705773 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_stats.c @@ -0,0 +1,187 @@ +/* + ************************************************************************** + * Copyright (c) 2017, 2019-2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_eth_rx_stats.h" +#include "nss_eth_rx_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_eth_rx_stats_notifier); + +uint64_t nss_eth_rx_stats[NSS_ETH_RX_STATS_MAX]; /* ETH_RX statistics */ +uint64_t nss_eth_rx_exception_stats[NSS_ETH_RX_EXCEPTION_EVENT_MAX]; /* Unknown protocol exception events per interface */ + +/* + * nss_eth_rx_stats_read() + * Read ETH_RX stats. + */ +static ssize_t nss_eth_rx_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * Max output lines = #stats * NSS_MAX_CORES + + * few blank lines for banner printing + Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_ETH_RX_STATS_MAX + NSS_ETH_RX_EXCEPTION_EVENT_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + /* + * Note: The assumption here is that we do not have more than 64 stats. + */ + stats_shadow = kzalloc(64 * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "eth_rx", NSS_STATS_SINGLE_CORE); + + size_wr += nss_stats_fill_common_stats(NSS_ETH_RX_INTERFACE, NSS_STATS_SINGLE_INSTANCE, lbuf, size_wr, size_al, "eth_rx"); + + /* + * eth_rx node stats. + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_ETH_RX_STATS_MAX); i++) { + stats_shadow[i] = nss_eth_rx_stats[i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + + size_wr += nss_stats_print("eth_rx", "eth_rx node stats" + , NSS_STATS_SINGLE_INSTANCE + , nss_eth_rx_strings_stats + , stats_shadow + , NSS_ETH_RX_STATS_MAX + , lbuf, size_wr, size_al); + + /* + * Exception stats. + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_ETH_RX_EXCEPTION_EVENT_MAX); i++) { + stats_shadow[i] = nss_eth_rx_exception_stats[i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + + size_wr += nss_stats_print("eth_rx", "eth_rx exception stats" + , NSS_STATS_SINGLE_INSTANCE + , nss_eth_rx_strings_exception_stats + , stats_shadow + , NSS_ETH_RX_EXCEPTION_EVENT_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_eth_rx_stats_ops. + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(eth_rx); + +/* + * nss_eth_rx_stats_dentry_create() + * Create eth_rx statistics debug entry. + */ +void nss_eth_rx_stats_dentry_create(void) +{ + nss_stats_create_dentry("eth_rx", &nss_eth_rx_stats_ops); +} + +/* + * nss_eth_rx_metadata_stats_sync() + * Handle the syncing of ETH_RX node statistics. + */ +void nss_eth_rx_metadata_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_eth_rx_node_sync *nens) +{ + int32_t i; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + spin_lock_bh(&nss_top->stats_lock); + + nss_top->stats_node[NSS_ETH_RX_INTERFACE][NSS_STATS_NODE_RX_PKTS] += nens->node_stats.rx_packets; + nss_top->stats_node[NSS_ETH_RX_INTERFACE][NSS_STATS_NODE_RX_BYTES] += nens->node_stats.rx_bytes; + nss_top->stats_node[NSS_ETH_RX_INTERFACE][NSS_STATS_NODE_TX_PKTS] += nens->node_stats.tx_packets; + nss_top->stats_node[NSS_ETH_RX_INTERFACE][NSS_STATS_NODE_TX_BYTES] += nens->node_stats.tx_bytes; + + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + nss_top->stats_node[NSS_ETH_RX_INTERFACE][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + i] += nens->node_stats.rx_dropped[i]; + } + + nss_eth_rx_stats[NSS_ETH_RX_STATS_TOTAL_TICKS] += nens->total_ticks; + nss_eth_rx_stats[NSS_ETH_RX_STATS_WORST_CASE_TICKS] += nens->worst_case_ticks; + nss_eth_rx_stats[NSS_ETH_RX_STATS_ITERATIONS] += nens->iterations; + + for (i = 0; i < NSS_ETH_RX_EXCEPTION_EVENT_MAX; i++) { + nss_eth_rx_exception_stats[i] += nens->exception_events[i]; + } + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_eth_rx_stats_notify() + * Sends notifications to the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_eth_rx_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_eth_rx_stats_notification eth_rx_stats; + + eth_rx_stats.core_id = nss_ctx->id; + memcpy(eth_rx_stats.cmn_node_stats, nss_top_main.stats_node[NSS_ETH_RX_INTERFACE], sizeof(eth_rx_stats.cmn_node_stats)); + memcpy(eth_rx_stats.special_stats, nss_eth_rx_stats, sizeof(eth_rx_stats.special_stats)); + memcpy(eth_rx_stats.exception_stats, nss_eth_rx_exception_stats, sizeof(eth_rx_stats.exception_stats)); + atomic_notifier_call_chain(&nss_eth_rx_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)ð_rx_stats); +} + +/* + * nss_eth_rx_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_eth_rx_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_eth_rx_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_eth_rx_stats_register_notifier); + +/* + * nss_eth_rx_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_eth_rx_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_eth_rx_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_eth_rx_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_stats.h new file mode 100644 index 000000000..c5470ba9c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_stats.h @@ -0,0 +1,65 @@ +/* + ************************************************************************** + * Copyright (c) 2017, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_ETH_RX_STATS_H +#define __NSS_ETH_RX_STATS_H + +#include + +/* + * nss_eth_rx_stats.h + * NSS driver ETH_RX statistics header file. + */ + +/* + * Request/Response types + */ +enum nss_eth_rx_metadata_types { + NSS_RX_METADATA_TYPE_ETH_RX_STATS_SYNC, + NSS_METADATA_TYPE_ETH_RX_MAX, +}; + +/* + * The NSS eth_rx node stats structure. + */ +struct nss_eth_rx_node_sync { + struct nss_cmn_node_stats node_stats; + /* Common node stats for ETH_RX */ + uint32_t total_ticks; /* Total clock ticks spend inside the eth_rx */ + uint32_t worst_case_ticks; /* Worst case iteration of the eth_rx in ticks */ + uint32_t iterations; /* Number of iterations around the eth_rx */ + uint32_t exception_events[NSS_ETH_RX_EXCEPTION_EVENT_MAX]; + /* Number of ETH_RX exception events */ +}; + +/* + * Message structure to send/receive eth_rx commands + */ +struct nss_eth_rx_msg { + struct nss_cmn_msg cm; /* Message Header */ + union { + struct nss_eth_rx_node_sync node_sync; /* Message: node statistics sync */ + } msg; +}; + +/* + * eth_rx statistics APIs + */ +extern void nss_eth_rx_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_eth_rx_metadata_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_eth_rx_node_sync *nens); +extern void nss_eth_rx_stats_dentry_create(void); + +#endif /* __NSS_ETH_RX_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_strings.c new file mode 100644 index 000000000..8412b444d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_strings.c @@ -0,0 +1,106 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" + +/* + * nss_eth_rx_strings_stats + * Ethernet Rx statistics strings. + */ +struct nss_stats_info nss_eth_rx_strings_stats[NSS_ETH_RX_STATS_MAX] = { + {"ticks" , NSS_STATS_TYPE_SPECIAL}, + {"worst_ticks" , NSS_STATS_TYPE_SPECIAL}, + {"iterations" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_eth_rx_strings_exception_stats + * Interface statistics strings for unknown exceptions. + */ +struct nss_stats_info nss_eth_rx_strings_exception_stats[NSS_ETH_RX_EXCEPTION_EVENT_MAX] = { + {"unknown_l3_protocol" , NSS_STATS_TYPE_EXCEPTION}, + {"eth_hdr_missing" , NSS_STATS_TYPE_EXCEPTION}, + {"vlan_missing" , NSS_STATS_TYPE_EXCEPTION}, + {"trustsec_hdr_missing" , NSS_STATS_TYPE_EXCEPTION} +}; + +/* + * nss_eth_rx_special_stats_strings_read() + * Read Ethernet Rx special node statistics names. + */ +static ssize_t nss_eth_rx_special_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_eth_rx_strings_stats, NSS_ETH_RX_STATS_MAX); +} + +/* + * nss_eth_rx_exception_stats_strings_read() + * Read Ethernet Rx exception statistics names. + */ +static ssize_t nss_eth_rx_exception_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_eth_rx_strings_exception_stats, NSS_ETH_RX_EXCEPTION_EVENT_MAX); +} + +/* + * nss_eth_rx_special_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(eth_rx_special_stats); + +/* + * nss_eth_rx_exception_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(eth_rx_exception_stats); + +/* + * nss_eth_rx_strings_dentry_create() + * Create Ethernet Rx statistics strings debug entry. + */ +void nss_eth_rx_strings_dentry_create(void) +{ + struct dentry *eth_rx_d = NULL; + struct dentry *eth_rx_spcl_stats_d = NULL; + struct dentry *eth_rx_excp_stats_d = NULL; + + if (!nss_top_main.strings_dentry) { + nss_warning("qca-nss-drv/strings is not present"); + return; + } + + eth_rx_d = debugfs_create_dir("eth_rx", nss_top_main.strings_dentry); + if (!eth_rx_d) { + nss_warning("Failed to create qca-nss-drv/strings/eth_rx directory"); + return; + } + + eth_rx_spcl_stats_d = debugfs_create_file("special_stats_str", 0400, eth_rx_d, &nss_top_main, &nss_eth_rx_special_stats_strings_ops); + if (!eth_rx_spcl_stats_d) { + nss_warning("Failed to create qca-nss-drv/stats/eth_rx/special_stats_str file"); + debugfs_remove_recursive(eth_rx_d); + return; + } + + eth_rx_excp_stats_d = debugfs_create_file("exception_stats_str", 0400, eth_rx_d, &nss_top_main, &nss_eth_rx_exception_stats_strings_ops); + if (!eth_rx_excp_stats_d) { + nss_warning("Failed to create qca-nss-drv/stats/eth_rx/exception_stats_str file"); + debugfs_remove_recursive(eth_rx_d); + return; + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_strings.h new file mode 100644 index 000000000..2f40440f3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_eth_rx_strings.h @@ -0,0 +1,26 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_ETH_RX_STRINGS_H +#define __NSS_ETH_RX_STRINGS_H + +extern struct nss_stats_info nss_eth_rx_strings_stats[NSS_ETH_RX_STATS_MAX]; +extern struct nss_stats_info nss_eth_rx_strings_exception_stats[NSS_ETH_RX_EXCEPTION_EVENT_MAX]; +extern void nss_eth_rx_strings_dentry_create(void); + +#endif /* __NSS_ETH_RX_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_freq.c b/feeds/ipq807x/qca-nss-drv/src/nss_freq.c new file mode 100644 index 000000000..d55bd63b6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_freq.c @@ -0,0 +1,467 @@ +/* + ************************************************************************** + * Copyright (c) 2013, 2015-2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_freq.c + * NSS frequency change APIs + */ + +#include "nss_stats.h" +#include "nss_tx_rx_common.h" +#include "nss_freq_log.h" +#include "nss_freq_stats.h" + +#define NSS_ACK_STARTED 0 +#define NSS_ACK_FINISHED 1 + +#define NSS_FREQ_USG_AVG_FREQUENCY 1000 /* Time in ms over which CPU Usage is averaged */ +#define NSS_FREQ_CPU_USAGE_MAX_BOUND 75 /* MAX CPU usage equivalent to running max instructions excluding all the hazards */ +#define NSS_FREQ_CPU_USAGE_MAX 100 /* MAX CPU usage equivalent to running max instructions including all the hazards. + This is also the ideal maximum usage value. */ + +/* + * Spinlock to protect the global data structure nss_freq_cpu_status + */ +DEFINE_SPINLOCK(nss_freq_cpu_usage_lock); + +/* + * At any point, this object has the latest data about CPU utilization. + */ +struct nss_freq_cpu_usage nss_freq_cpu_status; + +extern struct nss_runtime_sampling nss_runtime_samples; +extern struct workqueue_struct *nss_wq; +extern nss_work_t *nss_work; + +/* + * nss_freq_msg_init() + * Initialize the freq message + */ +static void nss_freq_msg_init(struct nss_corefreq_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, + void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} + +/* + * nss_freq_handle_ack() + * Handle the nss ack of frequency change. + */ +static void nss_freq_handle_ack(struct nss_ctx_instance *nss_ctx, struct nss_freq_msg *nfa) +{ + if (nfa->ack == NSS_ACK_STARTED) { + /* + * NSS finished start noficiation - HW change clocks and send end notification + */ + nss_info("%px: NSS ACK Received: %d - Change HW CLK/Send Finish to NSS\n", nss_ctx, nfa->ack); + + return; + } + + if (nfa->ack == NSS_ACK_FINISHED) { + /* + * NSS finished end notification - Done + */ + nss_info("%px: NSS ACK Received: %d - End Notification ACK - Running: %dmhz\n", nss_ctx, nfa->ack, nfa->freq_current); + nss_runtime_samples.freq_scale_ready = 1; + return; + } + + nss_info("%px: NSS had an error - Running: %dmhz\n", nss_ctx, nfa->freq_current); +} + +/* + * nss_freq_queue_work() + * Queue Work to the NSS Workqueue based on Current index. + */ +static bool nss_freq_queue_work(void) +{ + nss_freq_scales_t index = nss_runtime_samples.freq_scale_index; + + BUG_ON(!nss_wq); + + nss_info("frequency:%d index:%d sample count:%x\n", nss_runtime_samples.freq_scale[index].frequency, + index, nss_runtime_samples.average); + + /* + * schedule freq change with autoscale ON + */ + return nss_freq_sched_change(index, true); +} + +/* + * nss_freq_get_cpu_usage() + * Returns the CPU usage value in percentage at any instance for a required core. Returns -1 in case of an error. + * + * Calculation frequency is 1 second. Range of usage is 0-100. This API returns -1 if CPU usage is requested for core 1. + * TODO: Extend this API to get CPU usage for core 1. + */ +int8_t nss_freq_get_cpu_usage(uint32_t core_id) +{ + int8_t usage; + + if (core_id == 0) { + spin_lock_bh(&nss_freq_cpu_usage_lock); + usage = nss_freq_cpu_status.used; + spin_unlock_bh(&nss_freq_cpu_usage_lock); + + return usage; + } + + nss_warning("CPU usage functionality is not supported for core %u\n", core_id); + return -1; +} + +/* + * nss_freq_compute_cpu_usage() + * Computes the CPU utilization and maximum-minumun cpu utilization since boot. + */ +static void nss_freq_compute_cpu_usage(struct nss_ctx_instance *nss_ctx, uint32_t inst_cnt) +{ + uint32_t estimated_ins_capacity; + uint8_t actual_usage; + uint8_t usage; + + spin_lock_bh(&nss_freq_cpu_usage_lock); + + /* + * If actual CPU usage turns up higher than 100, there is something wrong with the received data. + * Upper bound average varies between 80% usage to 100% usage. + * + * TODO: To improve estimation algorithm for calculating how many actual instructions are executed. + */ + actual_usage = (inst_cnt * 100) / nss_freq_cpu_status.max_ins; + if ((actual_usage > NSS_FREQ_CPU_USAGE_MAX) || (actual_usage == 0)) { + spin_unlock_bh(&nss_freq_cpu_usage_lock); + return; + } + + /* + * Simpler version of below math: This is calculating the reduced number of maximum instructions + * estimated_ins_capacity = nss_freq_cpu_status.avg_up% of nss_freq_cpu_status.max_ins + * Calculating usage percentage: usage = (inst_cnt/estimated_ins_capacity) * 100 + */ + estimated_ins_capacity = ((NSS_FREQ_CPU_USAGE_MAX_BOUND * nss_freq_cpu_status.max_ins) / 100); + if (estimated_ins_capacity == 0) { + spin_unlock_bh(&nss_freq_cpu_usage_lock); + return; + } + usage = (inst_cnt * 100) / estimated_ins_capacity; + + /* + * Average the instructions over NSS_FREQ_USG_AVG_FREQUENCY ms + */ + if (nss_freq_cpu_status.avg_ctr == NSS_FREQ_USG_AVG_FREQUENCY) { + nss_freq_cpu_status.used = nss_freq_cpu_status.total / NSS_FREQ_USG_AVG_FREQUENCY; + + /* + * Due to our estimation, this could go beyond the end limit of 100% + */ + if (nss_freq_cpu_status.used > NSS_FREQ_CPU_USAGE_MAX) { + nss_freq_cpu_status.used = NSS_FREQ_CPU_USAGE_MAX; + } + + /* + * Getting the all time max and min usage + */ + if (nss_freq_cpu_status.used > nss_freq_cpu_status.max) { + nss_freq_cpu_status.max = nss_freq_cpu_status.used; + } + + if (nss_freq_cpu_status.used < nss_freq_cpu_status.min) { + nss_freq_cpu_status.min = nss_freq_cpu_status.used; + } + + nss_trace("%px: max_instructions:%d cpu_usage:%d max_usage:%d min_usage:%d\n", nss_ctx, + nss_freq_cpu_status.max_ins, nss_freq_cpu_status.used, nss_freq_cpu_status.max, nss_freq_cpu_status.min); + + nss_freq_cpu_status.total = 0; + nss_freq_cpu_status.avg_ctr = 0; + } + + nss_freq_cpu_status.total += usage; + nss_freq_cpu_status.avg_ctr++; + + spin_unlock_bh(&nss_freq_cpu_usage_lock); +} + +/* + * nss_freq_scale_frequency() + * Frequency scaling algorithm to scale frequency. + */ +void nss_freq_scale_frequency(struct nss_ctx_instance *nss_ctx, uint32_t inst_cnt) +{ + uint32_t b_index; + uint32_t minimum; + uint32_t maximum; + uint32_t index = nss_runtime_samples.freq_scale_index; + + /* + * We do not accept any statistics if auto scaling is off, + * we start with a fresh sample set when scaling is + * eventually turned on. + */ + if (!nss_cmd_buf.auto_scale && nss_runtime_samples.initialized) { + return; + } + + /* + * Delete Current Index Value, Add New Value, Recalculate new Sum, Shift Index + */ + b_index = nss_runtime_samples.buffer_index; + + nss_runtime_samples.sum = nss_runtime_samples.sum - nss_runtime_samples.buffer[b_index]; + nss_runtime_samples.buffer[b_index] = inst_cnt; + nss_runtime_samples.sum = nss_runtime_samples.sum + nss_runtime_samples.buffer[b_index]; + nss_runtime_samples.buffer_index = (b_index + 1) & NSS_SAMPLE_BUFFER_MASK; + + if (nss_runtime_samples.sample_count < NSS_SAMPLE_BUFFER_SIZE) { + nss_runtime_samples.sample_count++; + + /* + * Samples Are All Ready, Start Auto Scale + */ + if (nss_runtime_samples.sample_count == NSS_SAMPLE_BUFFER_SIZE ) { + nss_cmd_buf.auto_scale = 1; + nss_runtime_samples.freq_scale_ready = 1; + nss_runtime_samples.initialized = 1; + } + + return; + } + + nss_runtime_samples.average = nss_runtime_samples.sum / nss_runtime_samples.sample_count; + + /* + * Print out statistics every 10 samples + */ + if (nss_runtime_samples.message_rate_limit++ >= NSS_MESSAGE_RATE_LIMIT) { + nss_trace("%px: Running AVG:%x Sample:%x Divider:%d\n", nss_ctx, nss_runtime_samples.average, inst_cnt, nss_runtime_samples.sample_count); + nss_trace("%px: Current Frequency Index:%d\n", nss_ctx, index); + nss_trace("%px: Auto Scale Ready:%d Auto Scale:%d\n", nss_ctx, nss_runtime_samples.freq_scale_ready, nss_cmd_buf.auto_scale); + nss_trace("%px: Current Rate:%x\n", nss_ctx, nss_runtime_samples.average); + + nss_runtime_samples.message_rate_limit = 0; + } + + /* + * Don't scale if we are not ready or auto scale is disabled. + */ + if ((nss_runtime_samples.freq_scale_ready != 1) || (nss_cmd_buf.auto_scale != 1)) { + return; + } + + /* + * Scale Algorithmn + * Algorithmn will limit how fast it will transition each scale, by the number of samples seen. + * If any sample is out of scale during the idle count, the rate_limit will reset to 0. + * Scales are limited to the max number of cpu scales we support. + */ + if (nss_runtime_samples.freq_scale_rate_limit_up++ >= NSS_FREQUENCY_SCALE_RATE_LIMIT_UP) { + maximum = nss_runtime_samples.freq_scale[index].maximum; + if ((nss_runtime_samples.average > maximum) && (index < (NSS_FREQ_MAX_SCALE - 1))) { + nss_runtime_samples.freq_scale_index++; + nss_runtime_samples.freq_scale_ready = 0; + + /* + * If fail to increase frequency, decrease index + */ + nss_trace("frequency increase to %d inst:%x > maximum:%x\n", nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency, inst_cnt, maximum); + if (!nss_freq_queue_work()) { + nss_runtime_samples.freq_scale_index--; + } + } + + /* + * Reset the down scale counter based on running average, so can idle properly + */ + if (nss_runtime_samples.average > maximum) { + nss_trace("down scale timeout reset running average:%x\n", nss_runtime_samples.average); + nss_runtime_samples.freq_scale_rate_limit_down = 0; + } + + nss_runtime_samples.freq_scale_rate_limit_up = 0; + return; + } + + if (nss_runtime_samples.freq_scale_rate_limit_down++ >= NSS_FREQUENCY_SCALE_RATE_LIMIT_DOWN) { + minimum = nss_runtime_samples.freq_scale[index].minimum; + + /* + * Check if we need to lower the frequency. For some SoC like IPQ50xx, low frequency + * is not supported. So check if the next lower frequency is configured before shifting down + */ + if ((nss_runtime_samples.average < minimum) && (index > 0) && nss_runtime_samples.freq_scale[index - 1].maximum) { + nss_runtime_samples.freq_scale_index--; + nss_runtime_samples.freq_scale_ready = 0; + + /* + * If fail to decrease frequency, increase index + */ + nss_trace("frequency decrease to %d inst:%x < minumum:%x\n", nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency, nss_runtime_samples.average, minimum); + if (!nss_freq_queue_work()) { + nss_runtime_samples.freq_scale_index++; + } + } + nss_runtime_samples.freq_scale_rate_limit_down = 0; + return; + } +} + +/* + * nss_freq_handle_core_stats() + * Handle the core stats. + */ +static void nss_freq_handle_core_stats(struct nss_ctx_instance *nss_ctx, struct nss_core_stats *core_stats) +{ + uint32_t inst_cnt = core_stats->inst_cnt_total; + + /* + * compute CPU utilization by using the instruction count + */ + nss_freq_compute_cpu_usage(nss_ctx, inst_cnt); + + /* + * Perform frequency scaling + */ + nss_freq_scale_frequency(nss_ctx, inst_cnt); +} + +/* + * nss_freq_interface_handler() + * Handle NSS -> HLOS messages for Frequency Changes and Statistics. + */ +static void nss_freq_interface_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) { + + struct nss_corefreq_msg *ncfm = (struct nss_corefreq_msg *)ncm; + + /* + * Trace Messages + */ + nss_freq_log_rx_msg(ncfm); + + switch (ncfm->cm.type) { + case COREFREQ_METADATA_TYPE_TX_FREQ_ACK: + nss_freq_handle_ack(nss_ctx, &ncfm->msg.nfc); + break; + case COREFREQ_METADATA_TYPE_TX_CORE_STATS: + nss_freq_handle_core_stats(nss_ctx, &ncfm->msg.ncs); + break; + + default: + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + /* + * Check response + */ + nss_info("%px: Received response %d for type %d, interface %d", nss_ctx, ncm->response, ncm->type, ncm->interface); + } + } +} + +/* + * nss_freq_change() + * NSS frequency change API. + */ +nss_tx_status_t nss_freq_change(struct nss_ctx_instance *nss_ctx, uint32_t eng, uint32_t stats_enable, uint32_t start_or_end) +{ + struct nss_corefreq_msg ncm; + struct nss_freq_msg *nfc; + + nss_info("%px: frequency changing to: %d\n", nss_ctx, eng); + + /* + * Update the max instruction count for a frequency during down scaling. + * Better to update this as late as possible in the frequency update call. + */ + spin_lock_bh(&nss_freq_cpu_usage_lock); + nss_freq_cpu_status.max_ins = eng / 1000; + spin_unlock_bh(&nss_freq_cpu_usage_lock); + + nss_freq_msg_init(&ncm, NSS_COREFREQ_INTERFACE, NSS_TX_METADATA_TYPE_NSS_FREQ_CHANGE, + sizeof(struct nss_freq_msg), NULL, NULL); + nfc = &ncm.msg.nfc; + nfc->frequency = eng; + nfc->start_or_end = start_or_end; + nfc->stats_enable = stats_enable; + + return nss_core_send_cmd(nss_ctx, &ncm, sizeof(ncm), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_freq_sched_change() + * Schedule a frequency work. + */ +bool nss_freq_sched_change(nss_freq_scales_t index, bool auto_scale) +{ + if (index >= NSS_FREQ_MAX_SCALE) { + nss_info("NSS freq scale beyond limit\n"); + return false; + } + + nss_work = (nss_work_t *)kmalloc(sizeof(nss_work_t), GFP_ATOMIC); + if (!nss_work) { + nss_info("NSS Freq WQ kmalloc fail"); + return false; + } + + INIT_WORK((struct work_struct *)nss_work, nss_hal_wq_function); + + nss_work->frequency = nss_runtime_samples.freq_scale[index].frequency; + + nss_work->stats_enable = auto_scale; + nss_cmd_buf.current_freq = nss_work->frequency; + queue_work(nss_wq, (struct work_struct *)nss_work); + + return true; +} + +/* + * nss_freq_get_context() + * Get NSS context instance for frequency. + */ +struct nss_ctx_instance *nss_freq_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.frequency_handler_id]; +} +EXPORT_SYMBOL(nss_freq_get_context); + +/* + * nss_freq_register_handler() + */ +void nss_freq_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_freq_get_context(); + nss_core_register_handler(nss_ctx, NSS_COREFREQ_INTERFACE, nss_freq_interface_handler, NULL); +} + +/* + * nss_freq_cpu_usage_init() + * Initialize cpu usage computing. + * + * TODO: Add support to retrieve CPU usage even if frequency scaling is disabled. + */ +void nss_freq_init_cpu_usage(void) +{ + nss_freq_cpu_status.used = 0; + nss_freq_cpu_status.max_ins = nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency / 1000; + nss_freq_cpu_status.total = 0; + nss_freq_cpu_status.max = 0; /* Initial value is 0 to capture the highest most value during the run */ + nss_freq_cpu_status.min = NSS_FREQ_CPU_USAGE_MAX; /* Initial value is 100 to capture the lowest most value during the run */ + nss_freq_cpu_status.avg_up = NSS_FREQ_CPU_USAGE_MAX_BOUND; + nss_freq_cpu_status.avg_ctr = 0; + + nss_freq_stats_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_freq_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_freq_log.c new file mode 100644 index 000000000..9b96184cd --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_freq_log.c @@ -0,0 +1,100 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_freq_log.c + * NSS Freq logger file. + */ + +#include "nss_core.h" + +/* + * nss_freq_log_message_types_str + * Freq message strings + */ +static int8_t *nss_freq_log_message_types_str[COREFREQ_METADATA_TYPE_MAX] __maybe_unused = { + "Freq Error Message", + "Freq Change", + "Freq ACK", + "TX Core Stats", +}; + +/* + * nss_freq_log_msg() + * Log NSS Freq message. + */ +static void nss_freq_log_msg(struct nss_corefreq_msg *ncm) +{ + struct nss_freq_msg *nfm __maybe_unused = &ncm->msg.nfc; + nss_trace("%px: NSS Freq Message:\n" + "Frequency request: %d\n" + "Frequency start/end: %d\n" + "Frequency stats enable: %d\n" + "Current Frequency: %d\n" + "Frequency ACK: %d\n", + nfm, nfm->frequency, nfm->start_or_end, + nfm->stats_enable, nfm->freq_current, + nfm->ack); +} + +/* + * nss_freq_log_verbose() + * Log message contents. + */ +static void nss_freq_log_verbose(struct nss_corefreq_msg *ncm) +{ + switch (ncm->cm.type) { + case COREFREQ_METADATA_TYPE_RX_FREQ_CHANGE: + case COREFREQ_METADATA_TYPE_TX_FREQ_ACK: + nss_freq_log_msg(ncm); + break; + + case COREFREQ_METADATA_TYPE_TX_CORE_STATS: + /* + * No log for a valid stats message. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", ncm); + break; + } +} + +/* + * nss_freq_log_rx_msg() + * Log messages received from FW. + */ +void nss_freq_log_rx_msg(struct nss_corefreq_msg *ncm) +{ + if (ncm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ncm); + return; + } + + if (ncm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ncm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d], response[%d]:%s\n", ncm, ncm->cm.type, + ncm->cm.response, nss_cmn_response_str[ncm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + ncm, ncm->cm.type, nss_freq_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response]); + +verbose: + nss_freq_log_verbose(ncm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_freq_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_freq_log.h new file mode 100644 index 000000000..ab7d7d4f6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_freq_log.h @@ -0,0 +1,35 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_FREQ_LOG_H +#define __NSS_FREQ_LOG_H + +/* + * nss_freq_log.h + * NSS frequency log header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_freq_log_rx_msg + * Logs a frequency message that is received from the NSS firmware. + */ +void nss_freq_log_rx_msg(struct nss_corefreq_msg *nbm); + +#endif /* __NSS_FREQ_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_freq_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_freq_stats.c new file mode 100644 index 000000000..32e62c3c6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_freq_stats.c @@ -0,0 +1,86 @@ +/* + ************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_freq_stats.c + * NSS Frequency statistics APIs. + */ + +#include "nss_stats.h" +#include "nss_tx_rx_common.h" + +/* + * At any point, this object has the latest data about CPU utilization. + */ +extern struct nss_freq_cpu_usage nss_freq_cpu_status; + +/* + * Spinlock to protect the global data structure nss_freq_cpu_status + */ +extern spinlock_t nss_freq_cpu_usage_lock; + +/* + * nss_freq_stats_read() + * Read frequency stats and display CPU information. + */ +static ssize_t nss_freq_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * max output lines = Should change in case of number of lines below. + */ + uint32_t max_output_lines = (2 + 3) + 5; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint32_t avg, max, min; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + size_wr = scnprintf(lbuf, size_al, "CPU Utilization:\n"); + + spin_lock_bh(&nss_freq_cpu_usage_lock); + avg = nss_freq_cpu_status.used; + max = nss_freq_cpu_status.max; + min = nss_freq_cpu_status.min; + spin_unlock_bh(&nss_freq_cpu_usage_lock); + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Note: Averaged over 1 second\n\n"); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Core 0:\n"); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "Min\tAvg\tMax\n"); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, " %u%%\t %u%%\t %u%%\n\n", min, avg, max); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + + return bytes_read; +} + +/* + * nss_freq_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(freq) + +/* + * nss_freq_dentry_create() + */ +void nss_freq_stats_dentry_create(void) +{ + nss_stats_create_dentry("cpu_load_ubi", &nss_freq_stats_ops); +} \ No newline at end of file diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_freq_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_freq_stats.h new file mode 100644 index 000000000..72e00cc86 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_freq_stats.h @@ -0,0 +1,29 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +/* + * nss_freq_stats.h + * NSS Frequency statistics header file. + */ + +#ifndef __NSS_FREQ_STATS_H +#define __NSS_FREQ_STATS_H + +#include "nss_core.h" + +extern void nss_freq_stats_dentry_create(void); + +#endif /* __NSS_FREQ_STATS_H */ \ No newline at end of file diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gmac_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_gmac_stats.c new file mode 100644 index 000000000..23f5924e2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gmac_stats.c @@ -0,0 +1,83 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_gmac_stats.h" + +/* + * nss_gmac_stats_str + * GMAC stats strings. + */ +struct nss_stats_info nss_gmac_stats_str[NSS_GMAC_STATS_MAX] = { + {"ticks" , NSS_STATS_TYPE_SPECIAL}, + {"worst_ticks" , NSS_STATS_TYPE_SPECIAL}, + {"iterations" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_gmac_stats_read() + * Read GMAC stats. + */ +ssize_t nss_gmac_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + uint32_t i, id; + + /* + * max output lines = ((#stats + start tag + one blank) * #GMACs) Number of Extra outputlines for future + * reference to add new stats + start/end tag + 3 blank + */ + uint32_t max_output_lines = NSS_GMAC_STATS_MAX * NSS_MAX_PHYSICAL_INTERFACES + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_GMAC_STATS_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "gmac", NSS_STATS_SINGLE_CORE); + + for (id = 0; id < NSS_MAX_PHYSICAL_INTERFACES; id++) { + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_GMAC_STATS_MAX); i++) { + stats_shadow[i] = nss_top_main.stats_gmac[id][i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("gmac", "gmac stats", id + , nss_gmac_stats_str + , stats_shadow + , NSS_GMAC_STATS_MAX + , lbuf, size_wr, size_al); + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ngmac stats end\n\n"); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gmac_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_gmac_stats.h new file mode 100644 index 000000000..646143ed0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gmac_stats.h @@ -0,0 +1,33 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_GMAC_STATS_H +#define __NSS_GMAC_STATS_H + +#include + +/* + * GMAC node statistics + */ +enum nss_stats_gmac { + NSS_GMAC_STATS_TOTAL_TICKS, /* Total clock ticks spend inside the GMAC */ + NSS_GMAC_STATS_WORST_CASE_TICKS, /* Worst case iteration of the GMAC in ticks */ + NSS_GMAC_STATS_ITERATIONS, /* Number of iterations around the GMAC */ + NSS_GMAC_STATS_MAX, +}; + +extern ssize_t nss_gmac_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos); +#endif /* __NSS_GMAC_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre.c new file mode 100644 index 000000000..46cd72c71 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre.c @@ -0,0 +1,411 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_gre_stats.h" +#include "nss_gre_log.h" +#include "nss_gre_strings.h" + +#define NSS_GRE_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * Private data structure + */ +static struct { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +} nss_gre_pvt; + +/* + * TODO: Register separate callbacks for inner and outer GRE nodes. + */ +static atomic64_t pkt_cb_addr = ATOMIC64_INIT(0); + +/* + * nss_gre_inner_rx_handler() + * GRE inner rx handler. + */ +static void nss_gre_inner_rx_handler(struct net_device *dev, struct sk_buff *skb, + __attribute__((unused)) struct napi_struct *napi) +{ + nss_gre_data_callback_t cb; + + nss_gre_pkt_callback_t scb = (nss_gre_pkt_callback_t)(unsigned long)atomic64_read(&pkt_cb_addr); + if (unlikely(scb)) { + struct nss_gre_info *info = (struct nss_gre_info *)netdev_priv(dev); + if (likely(info->next_dev_inner)) { + scb(info->next_dev_inner, skb); + } + } + + cb = nss_top_main.gre_inner_data_callback; + cb(dev, skb, 0); +} + +/* + * nss_gre_outer_rx_handler() + * GRE outer rx handler. + */ +static void nss_gre_outer_rx_handler(struct net_device *dev, struct sk_buff *skb, + __attribute__((unused)) struct napi_struct *napi) +{ + nss_gre_data_callback_t cb; + + nss_gre_pkt_callback_t scb = (nss_gre_pkt_callback_t)(unsigned long)atomic64_read(&pkt_cb_addr); + if (unlikely(scb)) { + struct nss_gre_info *info = (struct nss_gre_info *)netdev_priv(dev); + if (likely(info->next_dev_outer)) { + scb(info->next_dev_outer, skb); + } + } + + cb = nss_top_main.gre_outer_data_callback; + cb(dev, skb, 0); +} + +/* + * nss_gre_msg_handler() + * Handle NSS -> HLOS messages for GRE + */ +static void nss_gre_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_gre_msg *ntm = (struct nss_gre_msg *)ncm; + void *ctx; + + nss_gre_msg_callback_t cb; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + BUG_ON(!(nss_is_dynamic_interface(ncm->interface) || ncm->interface == NSS_GRE_INTERFACE)); + + /* + * Trace Messages + */ + nss_gre_log_rx_msg(ntm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_GRE_MSG_MAX) { + nss_warning("%px: received invalid message %d for GRE STD interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_gre_msg)) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return; + } + + switch (ntm->cm.type) { + case NSS_GRE_MSG_SESSION_STATS: + /* + * debug stats embedded in stats msg + */ + nss_gre_stats_session_sync(nss_ctx, &ntm->msg.sstats, ncm->interface); + nss_gre_stats_session_notify(nss_ctx, ncm->interface); + break; + + case NSS_GRE_MSG_BASE_STATS: + nss_gre_stats_base_sync(nss_ctx, &ntm->msg.bstats); + nss_gre_stats_base_notify(nss_ctx); + break; + + default: + break; + + } + + /* + * Update the callback and app_data for NOTIFY messages, gre sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->gre_msg_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * callback + */ + cb = (nss_gre_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * call gre-std callback + */ + if (!cb) { + nss_warning("%px: No callback for gre-std interface %d", + nss_ctx, ncm->interface); + return; + } + + cb(ctx, ntm); +} + +/* + * nss_gre_callback() + * Callback to handle the completion of HLOS-->NSS messages. + */ +static void nss_gre_callback(void *app_data, struct nss_gre_msg *nim) +{ + nss_gre_msg_callback_t callback = (nss_gre_msg_callback_t)nss_gre_pvt.cb; + void *data = nss_gre_pvt.app_data; + + nss_gre_pvt.cb = NULL; + nss_gre_pvt.app_data = NULL; + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("gre Error response %d\n", nim->cm.response); + nss_gre_pvt.response = NSS_TX_FAILURE; + } else { + nss_gre_pvt.response = NSS_TX_SUCCESS; + } + + if (callback) { + callback(data, nim); + } + + complete(&nss_gre_pvt.complete); +} + +/* + * nss_gre_register_pkt_callback() + * Register for data callback. + */ +void nss_gre_register_pkt_callback(nss_gre_pkt_callback_t cb) +{ + atomic64_set(&pkt_cb_addr, (unsigned long)cb); +} +EXPORT_SYMBOL(nss_gre_register_pkt_callback); + +/* + * nss_gre_unregister_pkt_callback() + * Unregister for data callback. + */ +void nss_gre_unregister_pkt_callback() +{ + atomic64_set(&pkt_cb_addr, 0); +} +EXPORT_SYMBOL(nss_gre_unregister_pkt_callback); + +/* + * nss_gre_tx_msg() + * Transmit a GRE message to NSS firmware + */ +nss_tx_status_t nss_gre_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Sanity check the message + */ + if (!nss_is_dynamic_interface(ncm->interface)) { + nss_warning("%px: tx request for non dynamic interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_GRE_MSG_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Trace Messages + */ + nss_gre_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_gre_tx_msg); + +/* + * nss_gre_tx_msg_sync() + * Transmit a GRE message to NSS firmware synchronously. + */ +nss_tx_status_t nss_gre_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_msg *msg) +{ + nss_tx_status_t status; + int ret = 0; + + down(&nss_gre_pvt.sem); + nss_gre_pvt.cb = (void *)msg->cm.cb; + nss_gre_pvt.app_data = (void *)msg->cm.app_data; + + msg->cm.cb = (nss_ptr_t)nss_gre_callback; + msg->cm.app_data = (nss_ptr_t)NULL; + + status = nss_gre_tx_msg(nss_ctx, msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: gre_tx_msg failed\n", nss_ctx); + up(&nss_gre_pvt.sem); + return status; + } + ret = wait_for_completion_timeout(&nss_gre_pvt.complete, msecs_to_jiffies(NSS_GRE_TX_TIMEOUT)); + + if (!ret) { + nss_warning("%px: GRE STD tx sync failed due to timeout\n", nss_ctx); + nss_gre_pvt.response = NSS_TX_FAILURE; + } + + status = nss_gre_pvt.response; + up(&nss_gre_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_gre_tx_msg_sync); + +/* + * nss_gre_tx_buf() + * Send packet to GRE interface owned by NSS + */ +nss_tx_status_t nss_gre_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb) +{ + return nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_VIRTUAL_BUFFER | H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_gre_tx_buf); + +/* + *********************************** + * Register/Unregister/Miscellaneous APIs + *********************************** + */ + +/* + * nss_gre_register_if() + * Register data and message handlers for GRE. + */ +struct nss_ctx_instance *nss_gre_register_if(uint32_t if_num, uint32_t type, nss_gre_data_callback_t data_callback, + nss_gre_msg_callback_t event_callback, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_handler_id]; + + nss_assert(nss_ctx); + nss_assert(nss_is_dynamic_interface(if_num)); + + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_INNER: + nss_core_register_subsys_dp(nss_ctx, if_num, nss_gre_inner_rx_handler, NULL, netdev, netdev, features); + nss_top_main.gre_inner_data_callback = data_callback; + break; + + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_OUTER: + nss_core_register_subsys_dp(nss_ctx, if_num, nss_gre_outer_rx_handler, NULL, netdev, netdev, features); + nss_top_main.gre_outer_data_callback = data_callback; + break; + + default: + nss_warning("%px: Unable to register. Wrong interface type %d\n", nss_ctx, type); + return NULL; + } + + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, type); + + nss_top_main.gre_msg_callback = event_callback; + + nss_core_register_handler(nss_ctx, if_num, nss_gre_msg_handler, NULL); + + nss_gre_stats_session_register(if_num, netdev); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_gre_register_if); + +/* + * nss_gre_unregister_if() + * Unregister data and message handler. + */ +void nss_gre_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_handler_id]; + struct net_device *dev; + + nss_assert(nss_ctx); + nss_assert(nss_is_dynamic_interface(if_num)); + + dev = nss_cmn_get_interface_dev(nss_ctx, if_num); + if (!dev) { + nss_warning("%px: Unable to find net device for the interface %d\n", nss_ctx, if_num); + return; + } + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + nss_core_set_subsys_dp_type(nss_ctx, dev, if_num, NSS_DYNAMIC_INTERFACE_TYPE_NONE); + nss_top_main.gre_msg_callback = NULL; + + nss_core_unregister_handler(nss_ctx, if_num); + + nss_gre_stats_session_unregister(if_num); +} +EXPORT_SYMBOL(nss_gre_unregister_if); + +/* + * nss_get_gre_context() + */ +struct nss_ctx_instance *nss_gre_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_handler_id]; +} +EXPORT_SYMBOL(nss_gre_get_context); + +/* + * nss_gre_ifnum_with_core_id() + * Append core id to GRE interface num. + */ +int nss_gre_ifnum_with_core_id(int if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_gre_get_context(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (!nss_is_dynamic_interface(if_num)) { + nss_warning("%px: Invalid if_num: %d, must be a dynamic interface\n", nss_ctx, if_num); + return 0; + } + + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_gre_ifnum_with_core_id); + +/* + * nss_gre_msg_init() + * Initialize nss_gre msg. + */ +void nss_gre_msg_init(struct nss_gre_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_gre_msg_init); + +/* + * nss_gre_register_handler() + * debugfs stats msg handler received on static gre interface + */ +void nss_gre_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_gre_get_context(); + + nss_info("nss_gre_register_handler"); + sema_init(&nss_gre_pvt.sem, 1); + init_completion(&nss_gre_pvt.complete); + nss_core_register_handler(nss_ctx, NSS_GRE_INTERFACE, nss_gre_msg_handler, NULL); + nss_gre_stats_dentry_create(); + nss_gre_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_log.c new file mode 100644 index 000000000..c2f752ba5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_log.c @@ -0,0 +1,187 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_gre_log.c + * NSS GRE logger file. + */ + +#include "nss_core.h" + +#define NSS_GRE_LOG_MESSAGE_TYPE_INDEX(type) ((type) - NSS_IF_MAX_MSG_TYPES - 1) + +/* + * nss_gre_log_message_types_str + * NSS GRE message strings + */ +static int8_t *nss_gre_log_message_types_str[NSS_GRE_MSG_MAX] __maybe_unused = { + "GRE Message Configure", + "GRE Message Deconfigure", + "GRE Session Stats", + "GRE Base Stats" +}; + +/* + * nss_gre_log_config_msg() + * Log NSS GRE Config message. + */ +static void nss_gre_log_config_msg(struct nss_gre_msg *ngm) +{ + struct nss_gre_config_msg *ngcm __maybe_unused = &ngm->msg.cmsg; + nss_trace("%px: NSS GRE Config message\n" + "GRE flags: %d\n" + "GRE ikey: %d\n" + "GRE okey: %d\n" + "GRE mode: %d\n" + "GRE ip type: %d\n" + "GRE interface number: %d\n" + "GRE Src MAC: %pM\n" + "GRE Dst MAC: %pM\n" + "GRE ttl: %d\n" + "GRE tos: %d\n" + "GRE metadata size: %d\n", + ngcm, ngcm->flags, ngcm->ikey, ngcm->okey, + ngcm->mode, ngcm->ip_type, ngcm->next_node_if_num, + ngcm->src_mac, ngcm->dest_mac, ngcm->ttl, ngcm->tos, + ngcm->metadata_size); + /* + * Continuation of the log message. Different identifiers based on IP type. + */ + if (ngcm->ip_type == NSS_GRE_IP_IPV6) { + nss_trace("GRE Source IP: %pI6\n" + "GRE Dest IP: %pI6\n", + ngcm->src_ip, ngcm->dest_ip); + } else { + nss_trace("GRE Source IP: %pI4\n" + "GRE Dest IP: %pI4\n", + ngcm->src_ip, ngcm->dest_ip); + } +} + +/* + * nss_gre_log_deconfig_msg() + * Log NSS GRE deconfig message. + */ +static void nss_gre_log_deconfig_msg(struct nss_gre_msg *ngm) +{ + struct nss_gre_deconfig_msg *ngdm __maybe_unused = &ngm->msg.dmsg; + nss_trace("%px: NSS GRE deconfig message\n" + "GRE interface number: %d\n", + ngdm, ngdm->if_number); +} + +/* + * nss_gre_log_linkup_msg() + * Log NSS GRE linkup message. + */ +static void nss_gre_log_linkup_msg(struct nss_gre_msg *ngm) +{ + struct nss_gre_linkup_msg *nglm __maybe_unused = &ngm->msg.linkup; + nss_trace("%px: NSS GRE linkup message\n" + "GRE interface number: %d\n", + nglm, nglm->if_number); +} + +/* + * nss_gre_log_linkdown_msg() + * Log NSS GRE linkdown message. + */ +static void nss_gre_log_linkdown_msg(struct nss_gre_msg *ngm) +{ + struct nss_gre_linkdown_msg *ngdm __maybe_unused = &ngm->msg.linkdown; + nss_trace("%px: NSS GRE linkdown message\n" + "GRE interface number: %d\n", + ngdm, ngdm->if_number); +} + +/* + * nss_gre_log_verbose() + * Log message contents. + */ +static void nss_gre_log_verbose(struct nss_gre_msg *ngm) +{ + switch (ngm->cm.type) { + case NSS_GRE_MSG_ENCAP_CONFIGURE: + case NSS_GRE_MSG_DECAP_CONFIGURE: + nss_gre_log_config_msg(ngm); + break; + + case NSS_GRE_MSG_ENCAP_DECONFIGURE: + case NSS_GRE_MSG_DECAP_DECONFIGURE: + nss_gre_log_deconfig_msg(ngm); + break; + + case NSS_IF_OPEN: + nss_gre_log_linkup_msg(ngm); + break; + + case NSS_IF_CLOSE: + nss_gre_log_linkdown_msg(ngm); + break; + + case NSS_GRE_MSG_SESSION_STATS: + case NSS_GRE_MSG_BASE_STATS: + /* + * No log for valid stats messages. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", ngm); + break; + } +} + +/* + * nss_gre_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_gre_log_tx_msg(struct nss_gre_msg *ngm) +{ + if (ngm->cm.type >= NSS_GRE_MSG_MAX) { + nss_warning("%px: Invalid message type\n", ngm); + return; + } + + nss_info("%px: type[%d]:%s\n", ngm, ngm->cm.type, nss_gre_log_message_types_str[NSS_GRE_LOG_MESSAGE_TYPE_INDEX(ngm->cm.type)]); + nss_gre_log_verbose(ngm); +} + +/* + * nss_gre_log_rx_msg() + * Log messages received from FW. + */ +void nss_gre_log_rx_msg(struct nss_gre_msg *ngm) +{ + if (ngm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ngm); + return; + } + + if (ngm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ngm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ngm, ngm->cm.type, + nss_gre_log_message_types_str[NSS_GRE_LOG_MESSAGE_TYPE_INDEX(ngm->cm.type)], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + ngm, ngm->cm.type, nss_gre_log_message_types_str[NSS_GRE_LOG_MESSAGE_TYPE_INDEX(ngm->cm.type)], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response]); + +verbose: + nss_gre_log_verbose(ngm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_log.h new file mode 100644 index 000000000..2a2111785 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_GRE_LOG_H +#define __NSS_GRE_LOG_H + +/* + * nss_gre_log.h + * NSS GRE header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_gre_log_tx_msg + * Logs a gre message that is sent to the NSS firmware. + */ +void nss_gre_log_tx_msg(struct nss_gre_msg *ngm); + +/* + * nss_gre_log_rx_msg + * Logs a gre message that is received from the NSS firmware. + */ +void nss_gre_log_rx_msg(struct nss_gre_msg *ngm); + +#endif /* __NSS_GRE_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir.c new file mode 100644 index 000000000..73e7c9fc3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir.c @@ -0,0 +1,673 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_gre_redir_stats.h" +#include "nss_gre_redir_strings.h" +#include "nss_gre_redir_log.h" +#define NSS_GRE_REDIR_TX_TIMEOUT 3000 /* 3 Seconds */ + +static struct dentry *gre_redir_dentry; + +/* + * Private data structure for handling synchronous messaging. + */ +static struct { + struct semaphore sem; + struct completion complete; + int response; +} nss_gre_redir_pvt; + +/* + * Array to hold tunnel stats along with if_num + */ +struct nss_gre_redir_tunnel_stats tun_stats[NSS_GRE_REDIR_MAX_INTERFACES]; + +/* + * nss_gre_callback() + * Callback to handle the completion of HLOS-->NSS messages. + */ +static void nss_gre_redir_msg_sync_callback(void *app_data, struct nss_gre_redir_msg *nim) +{ + nss_gre_redir_pvt.response = NSS_TX_SUCCESS; + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("gre Error response %d\n", nim->cm.response); + nss_gre_redir_pvt.response = NSS_TX_FAILURE; + } + + complete(&nss_gre_redir_pvt.complete); +} + +/* + * nss_gre_redir_verify_ifnum() + * Verify interface type. + */ +bool nss_gre_redir_verify_ifnum(uint32_t if_num) +{ + uint32_t type; + + type = nss_dynamic_interface_get_type(nss_gre_redir_get_context(), if_num); + return type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_HOST_INNER || + type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_OFFL_INNER || + type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_SJACK_INNER || + type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_OUTER || + type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_US || + type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_DS; +} + +/* + * nss_gre_redir_handler() + * Handle NSS -> HLOS messages for GRE tunnel. + */ +static void nss_gre_redir_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_gre_redir_msg *ngrm = (struct nss_gre_redir_msg *)ncm; + void *ctx; + nss_gre_redir_msg_callback_t cb; + + /* + * interface should either be dynamic interface for receiving tunnel msg or GRE_REDIR interface for + * receiving base node messages. + */ + BUG_ON(((ncm->interface < NSS_DYNAMIC_IF_START) || (ncm->interface >= (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))) && + ncm->interface != NSS_GRE_REDIR_INTERFACE); + + /* + * Trace Messages + */ + nss_gre_redir_log_rx_msg(ngrm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_GRE_REDIR_MAX_MSG_TYPES) { + nss_warning("%px: Received invalid message %d for gre interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_gre_redir_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Update the callback and app_data for NOTIFY messages, gre sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->nss_rx_interface_handlers[ncm->interface].app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + switch (ncm->type) { + case NSS_GRE_REDIR_RX_STATS_SYNC_MSG: + nss_gre_redir_stats_sync(nss_ctx, ncm->interface, &ngrm->msg.stats_sync); + nss_gre_redir_stats_notify(nss_ctx, ncm->interface); + break; + } + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_gre_redir_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * call gre tunnel callback + */ + cb(ctx, ncm); +} + +/* + * nss_gre_redir_register_if() + * Register dynamic node for GRE redir. + */ +static struct nss_ctx_instance *nss_gre_redir_register_if(uint32_t if_num, struct net_device *netdev, + nss_gre_redir_data_callback_t cb_func_data, nss_gre_redir_msg_callback_t cb_func_msg, uint32_t features, + uint32_t type, void *app_ctx) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_redir_handler_id]; + uint32_t status; + int i, idx = -1; + + nss_assert(nss_ctx); + nss_assert((if_num >= NSS_DYNAMIC_IF_START) && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))); + + spin_lock_bh(&nss_gre_redir_stats_lock); + for (i = 0; i < NSS_GRE_REDIR_MAX_INTERFACES; i++) { + if (tun_stats[i].dev == netdev) { + idx = i; + break; + } + + if ((idx == -1) && (tun_stats[i].ref_count == 0)) { + idx = i; + } + } + + if (idx == -1) { + spin_unlock_bh(&nss_gre_redir_stats_lock); + nss_warning("%px: Maximum number of gre_redir tunnel_stats instances are already allocated\n", nss_ctx); + return NULL; + } + + if (!tun_stats[idx].ref_count) { + tun_stats[idx].dev = netdev; + } + tun_stats[idx].ref_count++; + + spin_unlock_bh(&nss_gre_redir_stats_lock); + + /* + * Registering handler for sending tunnel interface msgs to NSS. + */ + status = nss_core_register_handler(nss_ctx, if_num, nss_gre_redir_msg_handler, app_ctx); + if (status != NSS_CORE_STATUS_SUCCESS) { + spin_lock_bh(&nss_gre_redir_stats_lock); + tun_stats[idx].ref_count--; + if (!tun_stats[idx].ref_count) { + tun_stats[idx].dev = NULL; + } + spin_unlock_bh(&nss_gre_redir_stats_lock); + + nss_warning("%px: Not able to register handler for gre_redir interface %d with NSS core\n", nss_ctx, if_num); + return NULL; + } + + /* + * Registering handler for sending tunnel interface msgs to NSS. + */ + status = nss_core_register_msg_handler(nss_ctx, if_num, cb_func_msg); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + spin_lock_bh(&nss_gre_redir_stats_lock); + tun_stats[idx].ref_count--; + if (!tun_stats[idx].ref_count) { + tun_stats[idx].dev = NULL; + } + spin_unlock_bh(&nss_gre_redir_stats_lock); + + nss_warning("%px: Not able to register handler for gre_redir interface %d with NSS core\n", nss_ctx, if_num); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, cb_func_data, NULL, NULL, netdev, features); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, type); + return nss_ctx; +} + +/* + * nss_gre_redir_get_context() + * Retrieve context for GRE redir. + */ +struct nss_ctx_instance *nss_gre_redir_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_redir_handler_id]; +} +EXPORT_SYMBOL(nss_gre_redir_get_context); + +/* + * nss_gre_redir_alloc_and_register_node() + * Allocates and registers GRE Inner/Outer type dynamic nodes with NSS. + */ +int nss_gre_redir_alloc_and_register_node(struct net_device *dev, + nss_gre_redir_data_callback_t data_cb, + nss_gre_redir_msg_callback_t msg_cb, + uint32_t type, void *app_ctx) +{ + int ifnum; + nss_tx_status_t status; + struct nss_ctx_instance *nss_ctx; + + if ((type != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_HOST_INNER) && + (type != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_OFFL_INNER) && + (type != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_SJACK_INNER) && + (type != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_OUTER) && + (type != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_US) && + (type != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_DS)) { + + nss_warning("%px: Unknown type %u\n", dev, type); + return -1; + } + + ifnum = nss_dynamic_interface_alloc_node(type); + if (ifnum == -1) { + nss_warning("%px: Unable to allocate GRE_REDIR node of type = %u\n", dev, type); + return -1; + } + + nss_ctx = nss_gre_redir_register_if(ifnum, dev, data_cb, + msg_cb, 0, type, app_ctx); + if (!nss_ctx) { + nss_warning("Unable to register GRE_REDIR node of type = %u\n", type); + status = nss_dynamic_interface_dealloc_node(ifnum, type); + if (status != NSS_TX_SUCCESS) { + nss_warning("Unable to deallocate node.\n"); + } + + return -1; + } + + return ifnum; +} +EXPORT_SYMBOL(nss_gre_redir_alloc_and_register_node); + +/* + * nss_gre_redir_configure_inner_node() + * Configure an inner type gre_redir dynamic node. + */ +nss_tx_status_t nss_gre_redir_configure_inner_node(int ifnum, + struct nss_gre_redir_inner_configure_msg *ngrcm) +{ + struct nss_gre_redir_msg config; + uint32_t len, iftype, outerif_type; + nss_tx_status_t status; + + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_gre_redir_get_context(); + if (!nss_ctx) { + nss_warning("Unable to retrieve NSS context.\n"); + return NSS_TX_FAILURE_BAD_PARAM; + } + + if (ngrcm->ip_hdr_type != NSS_GRE_REDIR_IP_HDR_TYPE_IPV4 && + ngrcm->ip_hdr_type != NSS_GRE_REDIR_IP_HDR_TYPE_IPV6) { + nss_warning("%px: Unknown IP header type %u\n", nss_ctx, ngrcm->ip_hdr_type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + if (ngrcm->gre_version != NSS_GRE_REDIR_HEADER_VERSION) { + nss_warning("%px: Incorrect header version %u\n", nss_ctx, ngrcm->gre_version); + return NSS_TX_FAILURE_BAD_PARAM; + } + + iftype = nss_dynamic_interface_get_type(nss_ctx, ifnum); + if (!((iftype == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_HOST_INNER) || + (iftype == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_OFFL_INNER) || + (iftype == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_SJACK_INNER))) { + + nss_warning("%px: Incorrect interface type %u\n", nss_ctx, iftype); + return NSS_TX_FAILURE_BAD_PARAM; + } + + outerif_type = nss_dynamic_interface_get_type(nss_ctx, ngrcm->except_outerif); + if (outerif_type != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_OUTER) { + nss_warning("%px: Incorrect type for exception interface %u\n", nss_ctx, outerif_type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + len = sizeof(struct nss_gre_redir_inner_configure_msg); + + /* + * Configure the node + */ + nss_cmn_msg_init(&config.cm, ifnum, NSS_GRE_REDIR_TX_TUNNEL_INNER_CONFIGURE_MSG, len, NULL, NULL); + config.msg.inner_configure.ip_hdr_type = ngrcm->ip_hdr_type; + config.msg.inner_configure.ip_df_policy = ngrcm->ip_df_policy; + config.msg.inner_configure.gre_version = ngrcm->gre_version; + config.msg.inner_configure.ip_ttl = ngrcm->ip_ttl; + config.msg.inner_configure.except_outerif = ngrcm->except_outerif; + memcpy((void *)config.msg.inner_configure.ip_src_addr, (void *)(ngrcm->ip_src_addr), sizeof(ngrcm->ip_src_addr)); + memcpy((void *)config.msg.inner_configure.ip_dest_addr, (void *)(ngrcm->ip_dest_addr), sizeof(ngrcm->ip_dest_addr)); + + status = nss_gre_redir_tx_msg_sync(nss_ctx, &config); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to configure inner node %d.\n", nss_ctx, ifnum); + } + + return status; +} +EXPORT_SYMBOL(nss_gre_redir_configure_inner_node); + +/* + * nss_gre_redir_exception_ds_reg_cb() + * Configure a callback on VAP for downstream exception tunnel flows. + */ +nss_tx_status_t nss_gre_redir_exception_ds_reg_cb(int ifnum, + struct nss_gre_redir_exception_ds_reg_cb_msg *ngrcm) +{ + struct nss_gre_redir_msg config; + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_gre_redir_get_context(); + nss_tx_status_t status; + uint32_t vap_type, iftype; + uint32_t len = sizeof(struct nss_gre_redir_exception_ds_reg_cb_msg); + + if (!nss_ctx) { + nss_warning("Unable to retrieve NSS context.\n"); + return NSS_TX_FAILURE_BAD_PARAM; + } + + iftype = nss_dynamic_interface_get_type(nss_ctx, ifnum); + if (iftype != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_DS) { + nss_warning("%px: Incorrect interface type %u\n", nss_ctx, iftype); + return NSS_TX_FAILURE_BAD_PARAM; + } + + vap_type = nss_dynamic_interface_get_type(nss_ctx, ngrcm->dst_vap_nssif); + if ((vap_type != NSS_DYNAMIC_INTERFACE_TYPE_VAP)) { + nss_warning("%px: Incorrect type for vap interface type = %u", nss_ctx, vap_type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * Configure the node + */ + nss_cmn_msg_init(&config.cm, ifnum, NSS_GRE_REDIR_EXCEPTION_DS_REG_CB_MSG, len, NULL, NULL); + config.msg.exception_ds_configure.dst_vap_nssif = ngrcm->dst_vap_nssif; + + status = nss_gre_redir_tx_msg_sync(nss_ctx, &config); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to register callback from gre redir exception ds %d\n", nss_ctx, ifnum); + } + + return status; +} +EXPORT_SYMBOL(nss_gre_redir_exception_ds_reg_cb); + +/* + * nss_gre_redir_configure_outer_node() + * Configure an outer type gre_redir dynamic node. + */ +nss_tx_status_t nss_gre_redir_configure_outer_node(int ifnum, + struct nss_gre_redir_outer_configure_msg *ngrcm) +{ + struct nss_gre_redir_msg config; + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_gre_redir_get_context(); + nss_tx_status_t status; + uint32_t hostif_type, offlif_type, sjackif_type, iftype; + uint32_t len = sizeof(struct nss_gre_redir_outer_configure_msg); + + if (!nss_ctx) { + nss_warning("Unable to retrieve NSS context.\n"); + return NSS_TX_FAILURE_BAD_PARAM; + } + + if (ngrcm->ip_hdr_type != NSS_GRE_REDIR_IP_HDR_TYPE_IPV4 && + ngrcm->ip_hdr_type != NSS_GRE_REDIR_IP_HDR_TYPE_IPV6) { + nss_warning("%px: Unknown IP header type %u\n", nss_ctx, ngrcm->ip_hdr_type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + iftype = nss_dynamic_interface_get_type(nss_ctx, ifnum); + if (iftype != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_OUTER) { + nss_warning("%px: Incorrect interface type %u\n", nss_ctx, iftype); + return NSS_TX_FAILURE_BAD_PARAM; + } + + hostif_type = nss_dynamic_interface_get_type(nss_ctx, ngrcm->except_hostif); + offlif_type = nss_dynamic_interface_get_type(nss_ctx, ngrcm->except_offlif); + sjackif_type = nss_dynamic_interface_get_type(nss_ctx, ngrcm->except_sjackif); + if ((hostif_type != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_HOST_INNER) || + (offlif_type != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_OFFL_INNER) || + (ngrcm->except_sjackif + && sjackif_type != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_SJACK_INNER)) { + + nss_warning("%px: Incorrect type for exception interface hostif_type = %u" + "offlif_type = %u sjackif_type = %u\n", nss_ctx, hostif_type, + offlif_type, sjackif_type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * Configure the node + */ + nss_cmn_msg_init(&config.cm, ifnum, NSS_GRE_REDIR_TX_TUNNEL_OUTER_CONFIGURE_MSG, len, NULL, NULL); + config.msg.outer_configure.ip_hdr_type = ngrcm->ip_hdr_type; + config.msg.outer_configure.rps_hint = ngrcm->rps_hint; + config.msg.outer_configure.except_hostif = ngrcm->except_hostif; + config.msg.outer_configure.except_offlif = ngrcm->except_offlif; + config.msg.outer_configure.except_sjackif = ngrcm->except_sjackif; + + status = nss_gre_redir_tx_msg_sync(nss_ctx, &config); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to configure outer node %d\n", nss_ctx, ifnum); + } + + return status; +} +EXPORT_SYMBOL(nss_gre_redir_configure_outer_node); + +/* + * nss_gre_redir_tx_msg() + * Transmit a GRE message to NSS FW. + */ +nss_tx_status_t nss_gre_redir_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_gre_redir_log_tx_msg(msg); + + /* + * Sanity check the message + */ + + /* + * interface should either be dynamic interface to transmit tunnel msg or GRE_REDIR interface to transmit + * base node messages. + */ + if (((ncm->interface < NSS_DYNAMIC_IF_START) || (ncm->interface >= (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))) && + ncm->interface != NSS_GRE_REDIR_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_GRE_REDIR_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_gre_redir_tx_msg); + +/* + * nss_gre_redir_tx_msg_sync() + * Transmit a GRE redir message to NSS firmware synchronously. + */ +nss_tx_status_t nss_gre_redir_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_msg *ngrm) +{ + nss_tx_status_t status; + int ret = 0; + + down(&nss_gre_redir_pvt.sem); + ngrm->cm.cb = (nss_ptr_t)nss_gre_redir_msg_sync_callback; + ngrm->cm.app_data = (nss_ptr_t)NULL; + status = nss_gre_redir_tx_msg(nss_ctx, ngrm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: gre_tx_msg failed\n", nss_ctx); + up(&nss_gre_redir_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&nss_gre_redir_pvt.complete, msecs_to_jiffies(NSS_GRE_REDIR_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: GRE tx sync failed due to timeout\n", nss_ctx); + nss_gre_redir_pvt.response = NSS_TX_FAILURE; + } + + status = nss_gre_redir_pvt.response; + up(&nss_gre_redir_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_gre_redir_tx_msg_sync); + +/* + * nss_gre_redir_tx_buf() + * Send packet to gre_redir interface owned by NSS. + */ +nss_tx_status_t nss_gre_redir_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num) +{ + uint32_t type; + + nss_trace("%px: gre_redir If Tx packet, id:%d, data=%px", nss_ctx, if_num, os_buf->data); + + /* + * We expect Tx packets to the tunnel only from an interface of + * type GRE_REDIR_WIFI_HOST_INNER. + */ + type = nss_dynamic_interface_get_type(nss_ctx, if_num); + if (!((type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_HOST_INNER) + || (type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_DS))) { + nss_warning("%px: Unknown type for interface %u\n", nss_ctx, type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + return nss_core_send_packet(nss_ctx, os_buf, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_gre_redir_tx_buf); + +/* + * nss_gre_redir_tx_buf_noreuse() + * Send packet to gre_redir interface owned by NSS. + */ +nss_tx_status_t nss_gre_redir_tx_buf_noreuse(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num) +{ + uint32_t type; + + nss_trace("%px: gre_redir If Tx packet, id:%d, data=%px", nss_ctx, if_num, os_buf->data); + + /* + * We expect Tx packets to the tunnel only from an interface of + * type GRE_REDIR_WIFI_HOST_INNER. + */ + type = nss_dynamic_interface_get_type(nss_ctx, if_num); + if (!((type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_HOST_INNER) + || (type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_DS))) { + nss_warning("%px: Unknown type for interface %u\n", nss_ctx, type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + return nss_core_send_packet(nss_ctx, os_buf, if_num, 0); +} +EXPORT_SYMBOL(nss_gre_redir_tx_buf_noreuse); + +/* + * nss_gre_redir_unregister_if() + * Unregister dynamic node for GRE redir. + */ +bool nss_gre_redir_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx __maybe_unused = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_redir_handler_id]; + uint32_t status; + struct net_device *dev; + int i; + + nss_assert(nss_ctx); + nss_assert((if_num >= NSS_DYNAMIC_IF_START) && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))); + + dev = nss_cmn_get_interface_dev(nss_ctx, if_num); + if (!dev) { + nss_warning("%px: Unable to find net device for the interface %d\n", nss_ctx, if_num); + return false; + } + + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for gre_redir interface %d with NSS core\n", nss_ctx, if_num); + return false; + } + + status = nss_core_unregister_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for gre_redir interface %d with NSS core\n", nss_ctx, if_num); + return false; + } + + nss_core_set_subsys_dp_type(nss_ctx, dev, if_num, NSS_DYNAMIC_INTERFACE_TYPE_NONE); + nss_core_unregister_subsys_dp(nss_ctx, if_num); + spin_lock_bh(&nss_gre_redir_stats_lock); + + /* + * Update/Clear the tunnel stats entry for this tunnel. + */ + for (i = 0; i < NSS_GRE_REDIR_MAX_INTERFACES; i++) { + if (tun_stats[i].dev == dev) { + tun_stats[i].ref_count--; + if (!tun_stats[i].ref_count) { + tun_stats[i].dev = NULL; + } + + break; + } + } + + spin_unlock_bh(&nss_gre_redir_stats_lock); + return true; +} +EXPORT_SYMBOL(nss_gre_redir_unregister_if); + +/* + * nss_gre_redir_get_device() + * Gets the original device from probe. + */ +struct device *nss_gre_redir_get_device(void) +{ + struct nss_ctx_instance *nss_ctx = nss_gre_redir_get_context(); + return nss_ctx->dev; +} +EXPORT_SYMBOL(nss_gre_redir_get_device); + +/* + * nss_gre_redir_get_dentry() + * Returns directory entry created in debugfs for statistics. + */ +struct dentry *nss_gre_redir_get_dentry(void) +{ + return gre_redir_dentry; +} + +/* + * nss_gre_redir_register_handler() + * Registering handler for sending msg to base gre_redir node on NSS. + */ +void nss_gre_redir_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_gre_redir_get_context(); + uint32_t status; + + gre_redir_dentry = nss_gre_redir_stats_dentry_create(); + if (!gre_redir_dentry) { + nss_warning("%px: Not able to create debugfs entry\n", nss_ctx); + return; + } + + sema_init(&nss_gre_redir_pvt.sem, 1); + init_completion(&nss_gre_redir_pvt.complete); + memset(tun_stats, 0, sizeof(struct nss_gre_redir_tunnel_stats) * NSS_GRE_REDIR_MAX_INTERFACES); + status = nss_core_register_handler(nss_ctx, NSS_GRE_REDIR_INTERFACE, nss_gre_redir_msg_handler, NULL); + if (status != NSS_CORE_STATUS_SUCCESS) { + debugfs_remove_recursive(gre_redir_dentry); + gre_redir_dentry = NULL; + nss_warning("%px: Not able to register handler for gre_redir base interface with NSS core\n", nss_ctx); + return; + } + + nss_gre_redir_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds.c new file mode 100644 index 000000000..ea4132013 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds.c @@ -0,0 +1,404 @@ +/* + **************************************************************************** + * Copyright (c) 2018, 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_gre_redir_lag.h" +#include "nss_gre_redir_lag_ds_stats.h" +#include "nss_gre_redir_lag_ds_log.h" +#include "nss_gre_redir_lag_ds_strings.h" + +#define NSS_GRE_REDIR_LAG_DS_TX_TIMEOUT 3000 /* 3 Seconds */ + +struct nss_gre_redir_lag_ds_tun_stats tun_ds_stats[NSS_GRE_REDIR_LAG_MAX_NODE]; + +/* + * Private data structure + */ +static struct { + struct semaphore sem; + struct completion complete; + int response; + nss_gre_redir_lag_ds_msg_callback_t *cb; + void *app_data; +} nss_gre_redir_lag_ds_pvt; + +/* + * nss_gre_redir_lag_ds_callback() + * Callback to handle the completion of HLOS-->NSS messages. + */ +static void nss_gre_redir_lag_ds_callback(void *app_data, struct nss_gre_redir_lag_ds_msg *nim) +{ + nss_gre_redir_lag_ds_msg_callback_t callback = (nss_gre_redir_lag_ds_msg_callback_t)nss_gre_redir_lag_ds_pvt.cb; + void *data = nss_gre_redir_lag_ds_pvt.app_data; + + nss_gre_redir_lag_ds_pvt.cb = NULL; + nss_gre_redir_lag_ds_pvt.app_data = NULL; + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("GRE LAG DS: error response %d\n", nim->cm.response); + nss_gre_redir_lag_ds_pvt.response = NSS_TX_FAILURE; + } else { + nss_gre_redir_lag_ds_pvt.response = NSS_TX_SUCCESS; + } + + if (callback) { + callback(data, &nim->cm); + } + + complete(&nss_gre_redir_lag_ds_pvt.complete); +} + +/* + * nss_gre_redir_lag_ds_get_node_idx() + * Returns index of statistics context. + */ +bool nss_gre_redir_lag_ds_get_node_idx(uint32_t ifnum, uint32_t *idx) +{ + uint32_t node_idx; + for (node_idx = 0; node_idx < NSS_GRE_REDIR_LAG_MAX_NODE; node_idx++) { + if ((tun_ds_stats[node_idx].valid) && (tun_ds_stats[node_idx].ifnum == ifnum)) { + *idx = node_idx; + return true; + } + } + + return false; +} + +/* + * nss_gre_redir_lag_ds_verify_ifnum() + * Verify interface type. + */ +bool nss_gre_redir_lag_ds_verify_ifnum(uint32_t if_num) +{ + return nss_dynamic_interface_get_type(nss_gre_redir_lag_ds_get_context(), if_num) == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS; +} + +/* + * nss_gre_redir_lag_ds_handler() + * Handle NSS -> HLOS messages for gre tunnel + */ +static void nss_gre_redir_lag_ds_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + void *ctx; + struct nss_gre_redir_lag_ds_msg *ngrm = (struct nss_gre_redir_lag_ds_msg *)ncm; + nss_gre_redir_lag_ds_msg_callback_t cb; + + /* + * Interface should be a dynamic interface of type NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS. + */ + BUG_ON(!nss_gre_redir_lag_ds_verify_ifnum(ncm->interface)); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_GRE_REDIR_LAG_DS_MAX_MSG_TYPES) { + nss_warning("%px: received invalid message %d for gre interface\n", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_gre_redir_lag_ds_msg)) { + nss_warning("%px: Length of message is greater than required: %d\n", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Update the callback and app_data for NOTIFY messages, gre sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->nss_rx_interface_handlers[ncm->interface].app_data; + } + + /* + * Trace messages. + */ + nss_gre_redir_lag_ds_log_rx_msg(ngrm); + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + switch (ncm->type) { + case NSS_GRE_REDIR_LAG_DS_STATS_SYNC_MSG: + nss_gre_redir_lag_ds_stats_sync(nss_ctx, &ngrm->msg.ds_sync_stats, ncm->interface); + nss_gre_redir_lag_ds_stats_notify(nss_ctx, ncm->interface); + break; + } + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_gre_redir_lag_ds_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * call gre tunnel callback + */ + cb(ctx, ncm); +} + +/* + * nss_gre_redir_lag_ds_unregister_if() + * Unregister GRE redirect LAG downstream node. + */ +static enum nss_gre_redir_lag_err_types nss_gre_redir_lag_ds_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_gre_redir_lag_ds_get_context(); + uint32_t idx, status; + + nss_assert(nss_ctx); + nss_assert(!nss_gre_redir_lag_ds_verify_ifnum(if_num)); + + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for gre_lag interface %d with NSS core\n", nss_ctx, if_num); + return NSS_GRE_REDIR_LAG_ERR_CORE_UNREGISTER_FAILED; + } + + status = nss_core_unregister_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for gre_lag interface %d with NSS core\n", nss_ctx, if_num); + return NSS_GRE_REDIR_LAG_ERR_CORE_UNREGISTER_FAILED; + } + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + spin_lock_bh(&nss_gre_redir_lag_ds_stats_lock); + if (!nss_gre_redir_lag_ds_get_node_idx(if_num, &idx)) { + spin_unlock_bh(&nss_gre_redir_lag_ds_stats_lock); + nss_warning("%px: Stats context not found.\n", nss_ctx); + return NSS_GRE_REDIR_LAG_ERR_STATS_INDEX_NOT_FOUND; + } + + tun_ds_stats[idx].valid = false; + spin_unlock_bh(&nss_gre_redir_lag_ds_stats_lock); + return NSS_GRE_REDIR_LAG_SUCCESS; +} + +/* + * nss_gre_redir_lag_ds_register_if() + * Register GRE redirect LAG downstream node. + */ +static struct nss_ctx_instance *nss_gre_redir_lag_ds_register_if(uint32_t if_num, struct net_device *netdev, + nss_gre_redir_lag_ds_data_callback_t cb_func_data, + nss_gre_redir_lag_ds_msg_callback_t cb_func_msg, uint32_t features, uint32_t type, void *app_ctx) +{ + struct nss_ctx_instance *nss_ctx = nss_gre_redir_lag_ds_get_context(); + uint32_t status, i; + nss_assert(nss_ctx); + nss_assert(!nss_gre_redir_lag_ds_verify_ifnum(if_num)); + + /* + * Registering handler for sending tunnel interface msgs to NSS. + */ + status = nss_core_register_handler(nss_ctx, if_num, nss_gre_redir_lag_ds_msg_handler, app_ctx); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to register handler for gre_lag interface %d with NSS core\n", nss_ctx, if_num); + return NULL; + } + + /* + * Registering handler for sending tunnel interface msgs to NSS. + */ + status = nss_core_register_msg_handler(nss_ctx, if_num, cb_func_msg); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: Not able to register handler for gre_lag interface %d with NSS core\n", nss_ctx, if_num); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, cb_func_data, NULL, NULL, netdev, features); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, type); + spin_lock_bh(&nss_gre_redir_lag_ds_stats_lock); + for (i = 0; i < NSS_GRE_REDIR_LAG_MAX_NODE; i++) { + if (!tun_ds_stats[i].valid) { + tun_ds_stats[i].ifnum = if_num; + tun_ds_stats[i].valid = true; + break; + } + } + + spin_unlock_bh(&nss_gre_redir_lag_ds_stats_lock); + + return nss_ctx; +} + +/* + * nss_gre_redir_lag_ds_get_context() + * Retrieves context GRE redirect LAG downstream node. + */ +struct nss_ctx_instance *nss_gre_redir_lag_ds_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_redir_lag_ds_handler_id]; +} +EXPORT_SYMBOL(nss_gre_redir_lag_ds_get_context); + +/* + * nss_gre_redir_lag_ds_tx_msg() + * Transmit a gre message to NSS. + */ +nss_tx_status_t nss_gre_redir_lag_ds_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_ds_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace messages. + */ + nss_gre_redir_lag_ds_log_tx_msg(msg); + + /* + * Sanity check the message. Interface should be a dynamic interface + * of type NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS. + */ + if (!nss_gre_redir_lag_ds_verify_ifnum(ncm->interface)) { + nss_warning("%px: tx request for another interface: %d\n", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_GRE_REDIR_LAG_DS_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d\n", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_gre_redir_lag_ds_tx_msg); + +/* + * nss_gre_redir_lag_ds_tx_msg_sync() + * Transmit a GRE lag message to NSS firmware synchronously. + */ +nss_tx_status_t nss_gre_redir_lag_ds_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_ds_msg *ngrm) +{ + nss_tx_status_t status; + int ret = 0; + + down(&nss_gre_redir_lag_ds_pvt.sem); + nss_gre_redir_lag_ds_pvt.cb = (void *)ngrm->cm.cb; + nss_gre_redir_lag_ds_pvt.app_data = (void *)ngrm->cm.app_data; + ngrm->cm.cb = (nss_ptr_t)nss_gre_redir_lag_ds_callback; + ngrm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_gre_redir_lag_ds_tx_msg(nss_ctx, ngrm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: GRE LAG DS msg tx failed\n", nss_ctx); + up(&nss_gre_redir_lag_ds_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&nss_gre_redir_lag_ds_pvt.complete, msecs_to_jiffies(NSS_GRE_REDIR_LAG_DS_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: GRE LAG DS tx sync failed due to timeout\n", nss_ctx); + nss_gre_redir_lag_ds_pvt.response = NSS_TX_FAILURE; + } + + status = nss_gre_redir_lag_ds_pvt.response; + up(&nss_gre_redir_lag_ds_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_gre_redir_lag_ds_tx_msg_sync); + +/* + * nss_gre_redir_lag_ds_unregister_and_dealloc() + * Unregister and deallocate nss gre redirect LAG DS node. + */ +enum nss_gre_redir_lag_err_types nss_gre_redir_lag_ds_unregister_and_dealloc(uint32_t ifnum) +{ + uint32_t ret; + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_gre_redir_lag_ds_get_context(); + nss_tx_status_t status; + + if (!nss_gre_redir_lag_ds_verify_ifnum(ifnum)) { + nss_warning("%px: Unknown interface type %u.\n", nss_ctx, ifnum); + return NSS_GRE_REDIR_LAG_ERR_INCORRECT_IFNUM; + } + + ret = nss_gre_redir_lag_ds_unregister_if(ifnum); + if (ret) { + nss_warning("%px: Unable to unregister interface %u.\n", nss_ctx, ifnum); + return ret; + } + + status = nss_dynamic_interface_dealloc_node(ifnum, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to deallocate node %u\n", nss_ctx, ifnum); + return NSS_GRE_REDIR_LAG_ERR_DEALLOC_FAILED; + } + + return NSS_GRE_REDIR_LAG_SUCCESS; +} +EXPORT_SYMBOL(nss_gre_redir_lag_ds_unregister_and_dealloc); + +/* + * nss_gre_redir_lag_ds_alloc_and_register_node() + * Allocates and registers GRE downstream type dynamic nodes with NSS. + */ +int nss_gre_redir_lag_ds_alloc_and_register_node(struct net_device *dev, + nss_gre_redir_lag_ds_data_callback_t cb_func_data, + nss_gre_redir_lag_ds_msg_callback_t cb_func_msg, void *app_ctx) +{ + int ifnum; + nss_tx_status_t status; + struct nss_ctx_instance *nss_ctx; + + ifnum = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS); + if (ifnum == -1) { + nss_warning("%px: Unable to allocate GRE_LAG node of type = %u\n", dev, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS); + return -1; + } + + nss_ctx = nss_gre_redir_lag_ds_register_if(ifnum, dev, cb_func_data, + cb_func_msg, 0, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS, app_ctx); + if (!nss_ctx) { + nss_warning("%px: Unable to register GRE_LAG node of type = %u\n", dev, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS); + status = nss_dynamic_interface_dealloc_node(ifnum, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to deallocate node of type = %u.\n", dev, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_DS); + } + + return -1; + } + + return ifnum; +} +EXPORT_SYMBOL(nss_gre_redir_lag_ds_alloc_and_register_node); + +/* + * nss_gre_redir_lag_ds_register_handler() + * Registering handler for sending msg to base gre_lag node on NSS. + */ +void nss_gre_redir_lag_ds_register_handler(void) +{ + if (!nss_gre_redir_lag_ds_stats_dentry_create()) { + nss_warning(" Unable to create debugfs entry for LAG DS node.\n"); + return; + } + + nss_gre_redir_lag_ds_strings_dentry_create(); + nss_gre_redir_lag_ds_pvt.cb = NULL; + nss_gre_redir_lag_ds_pvt.app_data = NULL; + sema_init(&nss_gre_redir_lag_ds_pvt.sem, 1); + init_completion(&nss_gre_redir_lag_ds_pvt.complete); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_log.c new file mode 100644 index 000000000..158cb9d44 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_log.c @@ -0,0 +1,164 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_gre_redir_lag_ds_log.c + * NSS GRE REDIR LAG DS logger file. + */ + +#include "nss_core.h" + +/* + * nss_gre_redir_lag_ds_log_message_types_str + * GRE REDIR LAG DS message strings + */ +static int8_t *nss_gre_redir_lag_ds_log_message_types_str[NSS_GRE_REDIR_LAG_DS_MAX_MSG_TYPES] __maybe_unused = { + "GRE REDIR LAG DS add station Message", + "GRE REDIR LAG DS delete station message", + "GRE REDIR LAG DS update station message", + "GRE REDIR LAG DS stats sync message", +}; + +/* + * nss_gre_redir_lag_ds_log_error_response_types_str + * Strings for error types for GRE REDIR LAG DS messages + */ +static int8_t *nss_gre_redir_lag_ds_log_error_response_types_str[NSS_GRE_REDIR_LAG_ERR_MAX] __maybe_unused = { + "GRE REDIR LAG Success", + "GRE REDIR LAG Incorrect Interface", + "GRE REDIR LAG DS Core Unregister Failed", + "GRE REDIR LAG DS STats Index Not Found", + "GRE REDIR LAG Dealloc Failed", +}; + +/* + * nss_gre_redir_lag_ds_log_add_sta_msg() + * Log NSS GRE REDIR LAG DS add STA message. + */ +static void nss_gre_redir_lag_ds_log_add_sta_msg(struct nss_gre_redir_lag_ds_msg *ngm) +{ + struct nss_gre_redir_lag_ds_add_sta_msg *ngasm __maybe_unused = &ngm->msg.add_sta; + nss_trace("%px: NSS GRE REDIR LAG DS Add STA Message:\n" + "GRE REDIR LAG DS Station MAC Address: %px\n" + "GRE REDIR LAG DS Reorder Type: %d\n", + ngasm, ngasm->mac, ngasm->reorder_type); +} + +/* + * nss_gre_redir_lag_ds_log_del_sta_msg() + * Log NSS GRE REDIR LAG DS del STA message. + */ +static void nss_gre_redir_lag_ds_log_del_sta_msg(struct nss_gre_redir_lag_ds_msg *ngm) +{ + struct nss_gre_redir_lag_ds_delete_sta_msg *ngdsm __maybe_unused = &ngm->msg.del_sta; + nss_trace("%px: NSS GRE REDIR LAG DS Del STA Message:\n" + "GRE REDIR LAG DS Station MAC Address: %px\n", + ngdsm, ngdsm->mac); +} + +/* + * nss_gre_redir_lag_ds_log_add_sta_msg() + * Log NSS GRE REDIR LAG DS add STA message. + */ +static void nss_gre_redir_lag_ds_log_update_sta_msg(struct nss_gre_redir_lag_ds_msg *ngm) +{ + struct nss_gre_redir_lag_ds_update_sta_msg *ngusm __maybe_unused = &ngm->msg.update_sta; + nss_trace("%px: NSS GRE REDIR LAG DS Update STA Message:\n" + "GRE REDIR LAG DS Station MAC Address: %px\n" + "GRE REDIR LAG DS Reorder Type: %d\n", + ngusm, ngusm->mac, ngusm->reorder_type); +} + +/* + * nss_gre_redir_lag_ds_log_verbose() + * Log message contents. + */ +static void nss_gre_redir_lag_ds_log_verbose(struct nss_gre_redir_lag_ds_msg *ngm) +{ + switch (ngm->cm.type) { + case NSS_GRE_REDIR_LAG_DS_ADD_STA_MSG: + nss_gre_redir_lag_ds_log_add_sta_msg(ngm); + break; + + case NSS_GRE_REDIR_LAG_DS_DEL_STA_MSG: + nss_gre_redir_lag_ds_log_del_sta_msg(ngm); + break; + + case NSS_GRE_REDIR_LAG_DS_UPDATE_STA_MSG: + nss_gre_redir_lag_ds_log_update_sta_msg(ngm); + break; + + case NSS_GRE_REDIR_LAG_DS_STATS_SYNC_MSG: + /* + * No log for valid stats message. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", ngm); + break; + } +} + +/* + * nss_gre_redir_lag_ds_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_gre_redir_lag_ds_log_tx_msg(struct nss_gre_redir_lag_ds_msg *ngm) +{ + if (ngm->cm.type >= NSS_GRE_REDIR_LAG_DS_MAX_MSG_TYPES) { + nss_warning("%px: Invalid message type\n", ngm); + return; + } + + nss_info("%px: type[%d]:%s\n", ngm, ngm->cm.type, nss_gre_redir_lag_ds_log_message_types_str[ngm->cm.type]); + nss_gre_redir_lag_ds_log_verbose(ngm); +} + +/* + * nss_gre_redir_lag_ds_log_rx_msg() + * Log messages received from FW. + */ +void nss_gre_redir_lag_ds_log_rx_msg(struct nss_gre_redir_lag_ds_msg *ngm) +{ + if (ngm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ngm); + return; + } + + if (ngm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ngm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ngm, ngm->cm.type, + nss_gre_redir_lag_ds_log_message_types_str[ngm->cm.type], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response]); + goto verbose; + } + + if (ngm->cm.error >= NSS_GRE_REDIR_LAG_ERR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ngm, ngm->cm.type, nss_gre_redir_lag_ds_log_message_types_str[ngm->cm.type], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response], + ngm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ngm, ngm->cm.type, nss_gre_redir_lag_ds_log_message_types_str[ngm->cm.type], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response], + ngm->cm.error, nss_gre_redir_lag_ds_log_error_response_types_str[ngm->cm.error]); + +verbose: + nss_gre_redir_lag_ds_log_verbose(ngm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_log.h new file mode 100644 index 000000000..b0918760e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_LAG_DS_LOG_H__ +#define __NSS_GRE_REDIR_LAG_DS_LOG_H__ + +/* + * nss_gre_redir_lag_ds_log.h + * NSS GRE REDIR LAG DS Log Header File + */ + +/* + * nss_gre_redir_lag_ds_log_tx_msg + * Logs a gre redir lag ds message that is sent to the NSS firmware. + */ +void nss_gre_redir_lag_ds_log_tx_msg(struct nss_gre_redir_lag_ds_msg *ngm); + +/* + * nss_gre_redir_lag_ds_log_rx_msg + * Logs a gre redir lag ds message that is received from the NSS firmware. + */ +void nss_gre_redir_lag_ds_log_rx_msg(struct nss_gre_redir_lag_ds_msg *ngm); + +#endif /* __NSS_GRE_REDIR_LAG_DS_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_stats.c new file mode 100644 index 000000000..76b3d7f83 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_stats.c @@ -0,0 +1,211 @@ +/* + ************************************************************************** + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_gre_redir_lag.h" +#include "nss_gre_redir_lag_ds_stats.h" +#include "nss_gre_redir_lag_ds_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_gre_redir_lag_ds_stats_notifier); + +/* + * Spinlock to protect GRE redirect lag ds statistics update/read + */ +DEFINE_SPINLOCK(nss_gre_redir_lag_ds_stats_lock); + +extern struct nss_gre_redir_lag_ds_tun_stats tun_ds_stats[NSS_GRE_REDIR_LAG_MAX_NODE]; + +/* + * nss_gre_redir_lag_ds_stats_get() + * Get statistics for downstream LAG node. + */ +bool nss_gre_redir_lag_ds_stats_get(struct nss_gre_redir_lag_ds_tun_stats *cmn_stats, uint32_t index) +{ + if (index >= NSS_GRE_REDIR_LAG_MAX_NODE) + return false; + + spin_lock_bh(&nss_gre_redir_lag_ds_stats_lock); + if (!tun_ds_stats[index].valid) { + spin_unlock_bh(&nss_gre_redir_lag_ds_stats_lock); + return false; + } + + memcpy((void *)cmn_stats, (void *)&tun_ds_stats[index], sizeof(*cmn_stats)); + spin_unlock_bh(&nss_gre_redir_lag_ds_stats_lock); + return true; +} + +/* + * nss_gre_redir_lag_ds_stats_read() + * Read gre_redir_lag_ds tunnel stats. + */ +static ssize_t nss_gre_redir_lag_ds_cmn_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats + + * few blank lines for banner printing + Number of Extra outputlines + * for future reference to add new stats + */ + uint32_t max_output_lines = NSS_GRE_REDIR_LAG_DS_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + struct nss_stats_data *data = fp->private_data; + struct nss_gre_redir_lag_ds_tun_stats stats; + ssize_t bytes_read = 0; + size_t size_wr = 0; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + while (data->index < NSS_GRE_REDIR_LAG_MAX_NODE) { + if (nss_gre_redir_lag_ds_stats_get(&stats, data->index)) { + break; + } + + data->index++; + } + + if (data->index >= NSS_GRE_REDIR_LAG_MAX_NODE) { + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "gre_redir_lag_ds stats", NSS_STATS_SINGLE_CORE); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nTunnel stats for %03u\n", stats.ifnum); + size_wr += nss_stats_print("gre_redir_lag_ds", NULL, NSS_STATS_SINGLE_INSTANCE, nss_gre_redir_lag_ds_strings_stats, + &stats.rx_packets, NSS_GRE_REDIR_LAG_DS_STATS_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + data->index++; + kfree(lbuf); + return bytes_read; +} + +/* + * nss_gre_redir_lag_ds_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(gre_redir_lag_ds_cmn) + +/* + * nss_gre_redir_lag_ds_stats_dentry_create() + * Create debugfs directory entry. + */ +struct dentry *nss_gre_redir_lag_ds_stats_dentry_create(void) +{ + struct dentry *gre_redir; + struct dentry *cmn_stats; + + gre_redir = nss_gre_redir_get_dentry(); + if (unlikely(!gre_redir)) { + nss_warning("Failed to retrieve directory entry qca-nss-drv/stats/gre_redir/\n"); + return NULL; + } + + cmn_stats = debugfs_create_file("lag_ds_cmn_stats", 0400, gre_redir, + &nss_top_main, &nss_gre_redir_lag_ds_cmn_stats_ops); + if (unlikely(!cmn_stats)) { + nss_warning("Failed to create qca-nss-drv/stats/gre_redir/lag_ds_cmn_stats file\n"); + return NULL; + } + + return cmn_stats; +} + +/* + * nss_gre_redir_lag_ds_stats_sync() + * Update synchonized statistics. + */ +void nss_gre_redir_lag_ds_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_ds_sync_stats_msg *ngss, uint32_t ifnum) +{ + int idx, j; + + spin_lock_bh(&nss_gre_redir_lag_ds_stats_lock); + if (!nss_gre_redir_lag_ds_get_node_idx(ifnum, &idx)) { + spin_unlock_bh(&nss_gre_redir_lag_ds_stats_lock); + nss_warning("%px: Unable to update hash stats msg. Stats context not found.\n", nss_ctx); + return; + } + + tun_ds_stats[idx].tx_packets += ngss->node_stats.tx_packets; + tun_ds_stats[idx].tx_bytes += ngss->node_stats.tx_bytes; + tun_ds_stats[idx].rx_packets += ngss->node_stats.rx_packets; + tun_ds_stats[idx].rx_bytes += ngss->node_stats.rx_bytes; + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + tun_ds_stats[idx].rx_dropped[j] += ngss->node_stats.rx_dropped[j]; + } + + tun_ds_stats[idx].dst_invalid += ngss->ds_stats.dst_invalid; + tun_ds_stats[idx].exception_cnt += ngss->ds_stats.exception_cnt; + spin_unlock_bh(&nss_gre_redir_lag_ds_stats_lock); +} + +/* + * nss_gre_redir_lag_ds_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_gre_redir_lag_ds_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_gre_redir_lag_ds_stats_notification *stats_notify; + int idx; + + stats_notify = kzalloc(sizeof(struct nss_gre_redir_lag_ds_stats_notification), GFP_ATOMIC); + if (!stats_notify) { + nss_warning("Unable to allocate memory for stats notification\n"); + return; + } + + spin_lock_bh(&nss_gre_redir_lag_ds_stats_lock); + if (!nss_gre_redir_lag_ds_get_node_idx(if_num, &idx)) { + spin_unlock_bh(&nss_gre_redir_lag_ds_stats_lock); + nss_warning("%px: Unable to update hash stats msg. Stats context not found.\n", nss_ctx); + kfree(stats_notify); + return; + } + + stats_notify->core_id = nss_ctx->id; + stats_notify->if_num = if_num; + memcpy(&(stats_notify->stats_ctx), &(tun_ds_stats[idx]), sizeof(stats_notify->stats_ctx)); + spin_unlock_bh(&nss_gre_redir_lag_ds_stats_lock); + atomic_notifier_call_chain(&nss_gre_redir_lag_ds_stats_notifier, NSS_STATS_EVENT_NOTIFY, stats_notify); + kfree(stats_notify); +} + +/* + * nss_gre_redir_lag_ds_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_gre_redir_lag_ds_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_gre_redir_lag_ds_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_redir_lag_ds_stats_unregister_notifier); + +/* + * nss_gre_redir_lag_ds_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_gre_redir_lag_ds_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_gre_redir_lag_ds_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_redir_lag_ds_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_stats.h new file mode 100644 index 000000000..127f0082a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_stats.h @@ -0,0 +1,28 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * ************************************************************************ + */ + +#ifndef __NSS_GRE_REDIR_LAG_DS_STATS_H__ +#define __NSS_GRE_REDIR_LAG_DS_STATS_H__ + +extern spinlock_t nss_gre_redir_lag_ds_stats_lock; +extern void nss_gre_redir_lag_ds_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern bool nss_gre_redir_lag_ds_verify_ifnum(uint32_t if_num); +extern bool nss_gre_redir_lag_ds_get_node_idx(uint32_t ifnum, uint32_t *idx); +extern void nss_gre_redir_lag_ds_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_gre_redir_lag_ds_sync_stats_msg *ngss, uint32_t ifnum); +extern struct dentry *nss_gre_redir_lag_ds_stats_dentry_create(void); + +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_strings.c new file mode 100644 index 000000000..185189996 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_strings.c @@ -0,0 +1,60 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_gre_redir_lag_ds_strings.h" + +/* + * nss_gre_redir_lag_ds_strings_stats + * GRE REDIR LAG DS common statistics strings. + */ +struct nss_stats_info nss_gre_redir_lag_ds_strings_stats[NSS_GRE_REDIR_LAG_DS_STATS_MAX] = { + {"rx_packets", NSS_STATS_TYPE_COMMON}, + {"rx_bytes", NSS_STATS_TYPE_COMMON}, + {"tx_packets", NSS_STATS_TYPE_COMMON}, + {"tx_bytes", NSS_STATS_TYPE_COMMON}, + {"rx_dropped_0", NSS_STATS_TYPE_DROP}, + {"rx_dropped_1", NSS_STATS_TYPE_DROP}, + {"rx_dropped_2", NSS_STATS_TYPE_DROP}, + {"rx_dropped_3", NSS_STATS_TYPE_DROP}, + {"dst_invalid", NSS_STATS_TYPE_EXCEPTION}, + {"exception_packets", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_gre_redir_lag_ds_strings_read() + * Read gre_redir_lag_ds statistics names + */ +static ssize_t nss_gre_redir_lag_ds_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_gre_redir_lag_ds_strings_stats, NSS_GRE_REDIR_LAG_DS_STATS_MAX); +} + +/* + * nss_gre_redir_lag_ds_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(gre_redir_lag_ds); + +/* + * nss_gre_redir_lag_ds_strings_dentry_create() + * Create gre_redir_lag_ds statistics strings debug entry. + */ +void nss_gre_redir_lag_ds_strings_dentry_create(void) +{ + nss_strings_create_dentry("gre_redir_lag_ds", &nss_gre_redir_lag_ds_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_strings.h new file mode 100644 index 000000000..c85bc7721 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_ds_strings.h @@ -0,0 +1,25 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_LAG_DS_STRINGS_H +#define __NSS_GRE_REDIR_LAG_DS_STRINGS_H + +#include "nss_gre_redir_lag_ds_stats.h" + +extern struct nss_stats_info nss_gre_redir_lag_ds_strings_stats[NSS_GRE_REDIR_LAG_DS_STATS_MAX]; +extern void nss_gre_redir_lag_ds_strings_dentry_create(void); + +#endif /* __NSS_GRE_REDIR_LAG_DS_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us.c new file mode 100644 index 000000000..8e1b7588e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us.c @@ -0,0 +1,665 @@ +/* + **************************************************************************** + * Copyright (c) 2018, 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_gre_redir_lag_us_stats.h" +#include "nss_gre_redir_lag_us_log.h" +#include "nss_gre_redir_lag_us_strings.h" + +#define NSS_GRE_REDIR_LAG_US_TX_TIMEOUT 3000 /* 3 Seconds */ +#define NSS_GRE_REDIR_LAG_US_STATS_SYNC_PERIOD msecs_to_jiffies(4000) +#define NSS_GRE_REDIR_LAG_US_STATS_SYNC_UDELAY 4000 + +struct nss_gre_redir_lag_us_cmn_ctx cmn_ctx; + +/* + * Sync response context. + */ +static struct { + struct semaphore sem; + struct completion complete; + int response; + nss_gre_redir_lag_us_msg_callback_t *cb; + void *app_data; +} nss_gre_redir_lag_us_sync_ctx; + +/* + * nss_gre_redir_lag_us_callback() + * Callback to handle the completion of HLOS-->NSS messages. + */ +static void nss_gre_redir_lag_us_callback(void *app_data, struct nss_gre_redir_lag_us_msg *nim) +{ + nss_gre_redir_lag_us_msg_callback_t callback = (nss_gre_redir_lag_us_msg_callback_t)nss_gre_redir_lag_us_sync_ctx.cb; + void *data = nss_gre_redir_lag_us_sync_ctx.app_data; + + nss_gre_redir_lag_us_sync_ctx.cb = NULL; + nss_gre_redir_lag_us_sync_ctx.app_data = NULL; + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("GRE redir LAG US Error response %d\n", nim->cm.response); + nss_gre_redir_lag_us_sync_ctx.response = NSS_TX_FAILURE; + } else { + nss_gre_redir_lag_us_sync_ctx.response = NSS_TX_SUCCESS; + } + + if (callback) { + callback(data, &nim->cm); + } + + complete(&nss_gre_redir_lag_us_sync_ctx.complete); +} + +/* + * nss_gre_redir_lag_us_hash_update_stats_req() + * Update query hash message's index for next request. + */ +static void nss_gre_redir_lag_us_hash_update_stats_req(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_us_msg *ngrm) +{ + uint32_t ifnum = ngrm->cm.interface; + uint32_t idx, sync_delay = NSS_GRE_REDIR_LAG_US_STATS_SYNC_PERIOD; + struct nss_gre_redir_lag_us_hash_stats_query_msg *nim = &ngrm->msg.hash_stats; + + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + if (!nss_gre_redir_lag_us_get_node_idx(ifnum, &idx)) { + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + nss_warning("%px: Unable to update hash stats msg. Stats context not found.\n", nss_ctx); + return; + } + + /* + * Update start index for next iteration of the query. + */ + if (ngrm->cm.response == NSS_CMN_RESPONSE_ACK) { + cmn_ctx.stats_ctx[idx].db_sync_msg.msg.hash_stats.db_entry_idx = nim->db_entry_next; + } else { + cmn_ctx.stats_ctx[idx].db_sync_msg.msg.hash_stats.db_entry_idx = 0; + } + + /* + * If more hash entries are to be fetched from FW, queue work with delay of one eighth of + * the polling period. Else, schedule work with a delay of polling period. + */ + if (cmn_ctx.stats_ctx[idx].db_sync_msg.msg.hash_stats.db_entry_idx) + sync_delay = NSS_GRE_REDIR_LAG_US_STATS_SYNC_PERIOD / 8; + + queue_delayed_work(cmn_ctx.nss_gre_redir_lag_us_wq, &(cmn_ctx.stats_ctx[idx].nss_gre_redir_lag_us_work), sync_delay); + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); +} + +/* + * nss_gre_redir_lag_us_handler() + * Handle NSS -> HLOS messages for gre tunnel + */ +static void nss_gre_redir_lag_us_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + void *ctx; + struct nss_gre_redir_lag_us_msg *ngrm = (struct nss_gre_redir_lag_us_msg *)ncm; + nss_gre_redir_lag_us_msg_callback_t cb; + + /* + * Interface should be a dynamic interface of type NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US. + */ + BUG_ON(!nss_gre_redir_lag_us_verify_ifnum(ncm->interface)); + + /* + * Trace messages. + */ + nss_gre_redir_lag_us_log_rx_msg(ngrm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_GRE_REDIR_LAG_US_MAX_MSG_TYPES) { + nss_warning("%px: received invalid message %d for gre interface\n", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_gre_redir_lag_us_msg)) { + nss_warning("%px: Length of message is greater than required: %d\n", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Update the callback and app_data for NOTIFY messages, GRE sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->nss_rx_interface_handlers[ncm->interface].app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + switch (ncm->type) { + case NSS_GRE_REDIR_LAG_US_CMN_STATS_SYNC_MSG: + nss_gre_redir_lag_us_stats_sync(nss_ctx, &ngrm->msg.us_sync_stats, ncm->interface); + nss_gre_redir_lag_us_stats_notify(nss_ctx, ncm->interface); + break; + + case NSS_GRE_REDIR_LAG_US_DB_HASH_NODE_MSG: + nss_gre_redir_lag_us_hash_update_stats_req(nss_ctx, ngrm); + break; + } + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_gre_redir_lag_us_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * call gre tunnel callback + */ + cb(ctx, ncm); +} + +/* + * nss_gre_redir_lag_us_tx_msg_with_size() + * Transmit a GRE message to NSSFW with size. + */ +static nss_tx_status_t nss_gre_redir_lag_us_tx_msg_with_size(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_us_msg *msg, uint32_t size) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Sanity check the message. Interface should be a dynamic + * interface of type NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US. + */ + if (!nss_gre_redir_lag_us_verify_ifnum(ncm->interface)) { + nss_warning("%px: tx request for another interface: %d\n", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_GRE_REDIR_LAG_US_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d\n", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), size); +} + +/* + * nss_gre_redir_lag_us_tx_msg_sync_with_size() + * Transmit a GRE LAG message to NSS firmware synchronously with size. + */ +static nss_tx_status_t nss_gre_redir_lag_us_tx_msg_sync_with_size(struct nss_ctx_instance *nss_ctx, + struct nss_gre_redir_lag_us_msg *ngrm, uint32_t size) +{ + nss_tx_status_t status; + int ret = 0; + + down(&nss_gre_redir_lag_us_sync_ctx.sem); + + /* + * Save the client's callback, and initialize the message + * with the callback which releases the semaphore after message + * response is received, This callback will inturn call the client's + * callback. + */ + nss_gre_redir_lag_us_sync_ctx.cb = (void *)ngrm->cm.cb; + nss_gre_redir_lag_us_sync_ctx.app_data = (void *)ngrm->cm.app_data; + ngrm->cm.cb = (nss_ptr_t)nss_gre_redir_lag_us_callback; + ngrm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_gre_redir_lag_us_tx_msg_with_size(nss_ctx, ngrm, size); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: gre_tx_msg failed\n", nss_ctx); + up(&nss_gre_redir_lag_us_sync_ctx.sem); + return status; + } + + ret = wait_for_completion_timeout(&nss_gre_redir_lag_us_sync_ctx.complete, msecs_to_jiffies(NSS_GRE_REDIR_LAG_US_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: GRE LAG US tx sync failed due to timeout\n", nss_ctx); + nss_gre_redir_lag_us_sync_ctx.response = NSS_TX_FAILURE; + } + + status = nss_gre_redir_lag_us_sync_ctx.response; + up(&nss_gre_redir_lag_us_sync_ctx.sem); + return status; +} + +/* + * nss_gre_redir_lag_us_stats_sync_req_work() + * Work function for hash statistics synchronization. + */ +static void nss_gre_redir_lag_us_stats_sync_req_work(struct work_struct *work) +{ + struct delayed_work *d_work = container_of(work, struct delayed_work, work); + struct nss_gre_redir_lag_us_pvt_sync_stats *sync_ctx = container_of(d_work, struct nss_gre_redir_lag_us_pvt_sync_stats, + nss_gre_redir_lag_us_work); + struct nss_gre_redir_lag_us_hash_stats_query_msg *nicsm_req = &(sync_ctx->db_sync_msg.msg.hash_stats); + nss_tx_status_t nss_tx_status; + nss_gre_redir_lag_us_msg_callback_t cb; + void *app_data; + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_gre_redir_lag_us_get_context(); + int retry = NSS_GRE_REDIR_LAG_US_STATS_SYNC_RETRY; + + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + cb = sync_ctx->cb; + app_data = sync_ctx->app_data; + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + + nss_cmn_msg_init(&(sync_ctx->db_sync_msg.cm), sync_ctx->ifnum, + NSS_GRE_REDIR_LAG_US_DB_HASH_NODE_MSG, sizeof(struct nss_gre_redir_lag_us_hash_stats_query_msg), + cb, app_data); + while (retry) { + nss_tx_status = nss_gre_redir_lag_us_tx_msg_sync_with_size(nss_ctx, &(sync_ctx->db_sync_msg), PAGE_SIZE); + if (nss_tx_status == NSS_TX_SUCCESS) { + return; + } + + retry--; + nss_warning("%px: TX_NOT_OKAY, try again later\n", nss_ctx); + usleep_range(100, 200); + } + + /* + * TX failed after retries, take fresh start. + */ + nicsm_req->count = 0; + nicsm_req->db_entry_idx = 0; + queue_delayed_work(cmn_ctx.nss_gre_redir_lag_us_wq, &(sync_ctx->nss_gre_redir_lag_us_work), NSS_GRE_REDIR_LAG_US_STATS_SYNC_PERIOD); +} + +/* + * nss_gre_redir_lag_us_sync_work_init() + * Initialize work. + */ +static bool nss_gre_redir_lag_us_sync_work_init(uint32_t ifnum) +{ + struct nss_gre_redir_lag_us_hash_stats_query_msg *hash_stats_msg; + struct nss_ctx_instance __maybe_unused *nss_ctx = nss_gre_redir_lag_us_get_context(); + int ret, idx; + + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + if (!nss_gre_redir_lag_us_get_node_idx(ifnum, &idx)) { + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + nss_warning("%px: Unable to init work. Stats context not found.\n", nss_ctx); + return false; + } + + hash_stats_msg = &(cmn_ctx.stats_ctx[idx].db_sync_msg.msg.hash_stats); + hash_stats_msg->db_entry_idx = 0; + INIT_DELAYED_WORK(&(cmn_ctx.stats_ctx[idx].nss_gre_redir_lag_us_work), nss_gre_redir_lag_us_stats_sync_req_work); + ret = queue_delayed_work(cmn_ctx.nss_gre_redir_lag_us_wq, + &(cmn_ctx.stats_ctx[idx].nss_gre_redir_lag_us_work), NSS_GRE_REDIR_LAG_US_STATS_SYNC_PERIOD); + if (!ret) { + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + nss_warning("%px: Unable to queue work function to work queue\n", nss_ctx); + return false; + } + + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + return true; +} + +/* + * nss_gre_redir_lag_us_unregister_if() + * Unregister GRE redirect LAG upstream node. + */ +static enum nss_gre_redir_lag_err_types nss_gre_redir_lag_us_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_gre_redir_lag_us_get_context(); + uint32_t status; + int idx; + + nss_assert(nss_ctx); + nss_assert(!nss_gre_redir_lag_us_verify_ifnum(if_num)); + + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for gre_lag interface %d with NSS core\n", nss_ctx, if_num); + return NSS_GRE_REDIR_LAG_ERR_CORE_UNREGISTER_FAILED; + } + + status = nss_core_unregister_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for gre_lag interface %d with NSS core\n", nss_ctx, if_num); + return NSS_GRE_REDIR_LAG_ERR_CORE_UNREGISTER_FAILED; + } + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + if (!nss_gre_redir_lag_us_get_node_idx(if_num, &idx)) { + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + nss_warning("Stats context not found.\n"); + return NSS_GRE_REDIR_LAG_ERR_STATS_INDEX_NOT_FOUND; + } + + cmn_ctx.stats_ctx[idx].cb = NULL; + cmn_ctx.stats_ctx[idx].app_data = NULL; + cmn_ctx.stats_ctx[idx].valid = false; + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + + /* + * Work is per LAG US node. Cancel works for this node. + */ + cancel_delayed_work_sync(&(cmn_ctx.stats_ctx[idx].nss_gre_redir_lag_us_work)); + return NSS_GRE_REDIR_LAG_SUCCESS; +} + +/* + * nss_gre_redir_lag_us_register_if() + * Register GRE redirect LAG upstream node. + */ +static struct nss_ctx_instance *nss_gre_redir_lag_us_register_if(uint32_t if_num, struct net_device *netdev, + nss_gre_redir_lag_us_data_callback_t cb_func_data, + nss_gre_redir_lag_us_msg_callback_t cb_func_msg, uint32_t features, uint32_t type, void *app_ctx) +{ + struct nss_ctx_instance *nss_ctx = nss_gre_redir_lag_us_get_context(); + uint32_t status; + int i; + nss_assert(nss_ctx); + nss_assert(!nss_gre_redir_lag_us_verify_ifnum(if_num)); + + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + for (i = 0; i < NSS_GRE_REDIR_LAG_MAX_NODE; i++) { + if (!cmn_ctx.stats_ctx[i].valid) { + cmn_ctx.stats_ctx[i].ifnum = if_num; + cmn_ctx.stats_ctx[i].valid = true; + cmn_ctx.stats_ctx[i].cb = cb_func_msg; + cmn_ctx.stats_ctx[i].app_data = app_ctx; + break; + } + } + + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + if (i == NSS_GRE_REDIR_LAG_MAX_NODE) { + nss_warning("Maximum number of LAG nodes are already present.\n"); + return NULL; + } + + /* + * Registering handler for sending tunnel interface msgs to NSS. + */ + status = nss_core_register_handler(nss_ctx, if_num, nss_gre_redir_lag_us_msg_handler, app_ctx); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to register handler for gre_lag interface %d with NSS core\n", nss_ctx, if_num); + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + cmn_ctx.stats_ctx[i].valid = false; + cmn_ctx.stats_ctx[i].cb = NULL; + cmn_ctx.stats_ctx[i].app_data = NULL; + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + return NULL; + } + + /* + * Registering handler for sending tunnel interface msgs to NSS. + */ + status = nss_core_register_msg_handler(nss_ctx, if_num, cb_func_msg); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: Not able to register handler for gre_lag interface %d with NSS core\n", nss_ctx, if_num); + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + cmn_ctx.stats_ctx[i].valid = false; + cmn_ctx.stats_ctx[i].cb = NULL; + cmn_ctx.stats_ctx[i].app_data = NULL; + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, cb_func_data, NULL, NULL, netdev, features); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, type); + return nss_ctx; +} + +/* + * nss_gre_redir_lag_us_get_node_idx() + * Returns index of statistics context. + */ +bool nss_gre_redir_lag_us_get_node_idx(uint32_t ifnum, uint32_t *idx) +{ + uint32_t node_idx; + for (node_idx = 0; node_idx < NSS_GRE_REDIR_LAG_MAX_NODE; node_idx++) { + if ((cmn_ctx.stats_ctx[node_idx].valid) && (cmn_ctx.stats_ctx[node_idx].ifnum == ifnum)) { + *idx = node_idx; + return true; + } + } + + return false; +} + +/* + * nss_gre_redir_lag_us_verify_ifnum() + * Verify interface type. + */ +bool nss_gre_redir_lag_us_verify_ifnum(uint32_t if_num) +{ + return nss_dynamic_interface_get_type(nss_gre_redir_lag_us_get_context(), if_num) == NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US; +} + +/* + * nss_gre_redir_lag_us_get_context() + * Retrieve context for GRE redirect LAG upstream node. + */ +struct nss_ctx_instance *nss_gre_redir_lag_us_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_redir_lag_us_handler_id]; +} +EXPORT_SYMBOL(nss_gre_redir_lag_us_get_context); + +/* + * nss_gre_redir_lag_us_configure_node() + * Configure upstream lag node. + */ +bool nss_gre_redir_lag_us_configure_node(uint32_t ifnum, + struct nss_gre_redir_lag_us_config_msg *ngluc) +{ + struct nss_gre_redir_lag_us_msg *config; + uint32_t len, iftype, idx = 0, i; + bool ret; + nss_tx_status_t status; + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_ctx = nss_gre_redir_lag_us_get_context(); + + if (!nss_ctx) { + nss_warning("Unable to retrieve NSS context.\n"); + return false; + } + + config = (struct nss_gre_redir_lag_us_msg *) kzalloc(sizeof(struct nss_gre_redir_lag_us_msg), GFP_KERNEL); + if (!config) { + nss_warning("%px: Unable to allocate memory to send configure message.\n", nss_ctx); + return false; + } + + iftype = nss_dynamic_interface_get_type(nss_ctx, ifnum); + if (iftype != NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US) { + nss_warning("%px: Incorrect interface type %u\n", nss_ctx, iftype); + kfree(config); + return false; + } + + if (!ngluc) { + nss_warning("%px: Pointer to GRE redir LAG US message is NULL.\n", nss_ctx); + kfree(config); + return false; + } + + if ((ngluc->num_slaves < NSS_GRE_REDIR_LAG_MIN_SLAVE) || (ngluc->num_slaves > NSS_GRE_REDIR_LAG_MAX_SLAVE)) { + nss_warning("%px: Number of slaves is not in reange\n", nss_ctx); + kfree(config); + return false; + } + + ret = nss_gre_redir_lag_us_sync_work_init(ifnum); + if (!ret) { + nss_warning("%px: Unable to initialize work queue\n", nss_ctx); + kfree(config); + return false; + } + + len = sizeof(struct nss_gre_redir_lag_us_msg) - sizeof(struct nss_cmn_msg); + nss_cmn_msg_init(&config->cm, ifnum, NSS_GRE_REDIR_LAG_US_CONFIG_MSG, len, NULL, NULL); + config->msg.config_us.hash_mode = ngluc->hash_mode; + config->msg.config_us.num_slaves = ngluc->num_slaves; + for (i = 0; i < ngluc->num_slaves; i++) { + config->msg.config_us.if_num[i] = ngluc->if_num[i]; + } + + status = nss_gre_redir_lag_us_tx_msg_sync(nss_ctx, config); + kfree(config); + if (status == NSS_TX_SUCCESS) { + return true; + } + + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + if (nss_gre_redir_lag_us_get_node_idx(ifnum, &idx)) { + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + nss_warning("%px: Stats context not found.\n", nss_ctx); + return false; + } + + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + + /* + * Work is per LAG US node. Cancel work as configuration failed. + */ + cancel_delayed_work_sync(&(cmn_ctx.stats_ctx[idx].nss_gre_redir_lag_us_work)); + nss_warning("%px: Unable to configure upstream lag node %d.\n", nss_ctx, ifnum); + return false; +} +EXPORT_SYMBOL(nss_gre_redir_lag_us_configure_node); + +/* + * nss_gre_redir_lag_us_tx_msg() + * Transmit a GRE LAG message to NSS firmware asynchronously. + */ +nss_tx_status_t nss_gre_redir_lag_us_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_us_msg *ngrm) +{ + /* + * Trace messages. + */ + nss_gre_redir_lag_us_log_tx_msg(ngrm); + + return nss_gre_redir_lag_us_tx_msg_with_size(nss_ctx, ngrm, NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_gre_redir_lag_us_tx_msg); + +/* + * nss_gre_redir_lag_us_tx_msg_sync() + * Transmit a GRE lag message to NSS firmware synchronously. + */ +nss_tx_status_t nss_gre_redir_lag_us_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_lag_us_msg *ngrm) +{ + return nss_gre_redir_lag_us_tx_msg_sync_with_size(nss_ctx, ngrm, NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_gre_redir_lag_us_tx_msg_sync); + +/* + * nss_gre_redir_lag_us_unregister_and_dealloc() + * Unregister and deallocate nss gre redirect LAG US node. + */ +enum nss_gre_redir_lag_err_types nss_gre_redir_lag_us_unregister_and_dealloc(uint32_t ifnum) +{ + uint32_t ret; + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_gre_redir_lag_us_get_context(); + nss_tx_status_t status; + + if (!nss_gre_redir_lag_us_verify_ifnum(ifnum)) { + nss_warning("%px: Unknown interface type %u.\n", nss_ctx, ifnum); + return NSS_GRE_REDIR_LAG_ERR_INCORRECT_IFNUM; + } + + ret = nss_gre_redir_lag_us_unregister_if(ifnum); + if (ret) { + nss_warning("%px: Unable to unregister interface %u.\n", nss_ctx, ifnum); + return ret; + } + + status = nss_dynamic_interface_dealloc_node(ifnum, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to deallocate node %u\n", nss_ctx, ifnum); + return NSS_GRE_REDIR_LAG_ERR_DEALLOC_FAILED; + } + + return NSS_GRE_REDIR_LAG_SUCCESS; +} +EXPORT_SYMBOL(nss_gre_redir_lag_us_unregister_and_dealloc); + +/* + * nss_gre_redir_lag_us_alloc_and_register_node() + * Allocates and registers GRE upstream type dynamic nodes with NSS. + */ +int nss_gre_redir_lag_us_alloc_and_register_node(struct net_device *dev, + nss_gre_redir_lag_us_data_callback_t cb_func_data, + nss_gre_redir_lag_us_msg_callback_t cb_func_msg, void *app_ctx) +{ + int ifnum; + nss_tx_status_t status; + struct nss_ctx_instance *nss_ctx; + + ifnum = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US); + if (ifnum == -1) { + nss_warning("%px: Unable to allocate GRE_LAG node of type = %u\n", dev, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US); + return -1; + } + + nss_ctx = nss_gre_redir_lag_us_register_if(ifnum, dev, cb_func_data, + cb_func_msg, 0, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US, app_ctx); + if (!nss_ctx) { + nss_warning("%px: Unable to register GRE_LAG node of type = %u\n", dev, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US); + status = nss_dynamic_interface_dealloc_node(ifnum, NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_LAG_US); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to deallocate node.\n", dev); + } + + return -1; + } + + return ifnum; +} +EXPORT_SYMBOL(nss_gre_redir_lag_us_alloc_and_register_node); + +/* + * nss_gre_redir_lag_us_register_handler() + * Registering handler for sending msg to base gre_lag node on NSS. + */ +void nss_gre_redir_lag_us_register_handler(void) +{ + struct dentry *d_entry = nss_gre_redir_lag_us_stats_dentry_create(); + + if (!d_entry) { + nss_warning(" Unable to create debugfs entry for LAG US node.\n"); + return; + } + + cmn_ctx.nss_gre_redir_lag_us_wq = create_singlethread_workqueue("nss_gre_redir_lag_us_workqueue"); + if (!cmn_ctx.nss_gre_redir_lag_us_wq) { + debugfs_remove_recursive(d_entry); + nss_warning("Unable to create workqueue for LAG US node.\n"); + return; + } + + nss_gre_redir_lag_us_strings_dentry_create(); + nss_gre_redir_lag_us_sync_ctx.cb = NULL; + nss_gre_redir_lag_us_sync_ctx.app_data = NULL; + sema_init(&nss_gre_redir_lag_us_sync_ctx.sem, 1); + init_completion(&nss_gre_redir_lag_us_sync_ctx.complete); + spin_lock_init(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_log.c new file mode 100644 index 000000000..8601979d4 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_log.c @@ -0,0 +1,191 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_gre_redir_lag_us_log.c + * NSS GRE REDIR LAG US logger file. + */ + +#include "nss_core.h" + +/* + * nss_gre_redir_lag_us_log_message_types_str + * GRE REDIR LAG US message strings + */ +static int8_t *nss_gre_redir_lag_us_log_message_types_str[NSS_GRE_REDIR_LAG_US_MAX_MSG_TYPES] __maybe_unused = { + "GRE REDIR LAG US config Message", + "GRE REDIR LAG US add hash node message", + "GRE REDIR LAG US delete hash node message", + "GRE REDIR LAG US query hash node message", + "GRE REDIR LAG US stats sync message", + "GRE REDIR LAG US DB hash node message", +}; + +/* + * nss_gre_redir_lag_us_log_error_response_types_str + * Strings for error types for GRE REDIR LAG US messages + */ +static int8_t *nss_gre_redir_lag_us_log_error_response_types_str[NSS_GRE_REDIR_LAG_ERR_MAX] __maybe_unused = { + "GRE REDIR LAG Success", + "GRE REDIR LAG Incorrect Interface", + "GRE REDIR LAG US Core Unregister Failed", + "GRE REDIR LAG US STats Index Not Found", + "GRE REDIR LAG Dealloc Failed", +}; + +/* + * nss_gre_redir_lag_us_log_config_msg() + * Log NSS GRE REDIR LAG US config message. + */ +static void nss_gre_redir_lag_us_log_config_msg(struct nss_gre_redir_lag_us_msg *ngm) +{ + struct nss_gre_redir_lag_us_config_msg *ngcm __maybe_unused = &ngm->msg.config_us; + nss_trace("%px: NSS GRE REDIR LAG Config Message:\n" + "GRE REDIR LAG US Hash Mode: %d\n" + "GRE REDIR LAG US Number of Slaves: %d\n" + "GRE REDIR LAG US Interface Number: %px\n", + ngcm, ngcm->hash_mode, ngcm->num_slaves, + ngcm->if_num); +} + +/* + * nss_gre_redir_lag_us_log_add_hash_node_msg() + * Log NSS GRE REDIR LAG US add hash node message. + */ +static void nss_gre_redir_lag_us_log_add_hash_node_msg(struct nss_gre_redir_lag_us_msg *ngm) +{ + struct nss_gre_redir_lag_us_add_hash_node_msg *ngam __maybe_unused = &ngm->msg.add_hash; + nss_trace("%px: NSS GRE REDIR LAG Add Hash Node Message:\n" + "GRE REDIR LAG US Interface Number: %d\n" + "GRE REDIR LAG US Source MAC: %px\n" + "GRE REDIR LAG US Destination MAC: %px\n", + ngam, ngam->if_num, ngam->src_mac, + ngam->dest_mac); +} + +/* + * nss_gre_redir_lag_us_log_del_hash_node_msg() + * Log NSS GRE REDIR LAG US del hash node message. + */ +static void nss_gre_redir_lag_us_log_del_hash_node_msg(struct nss_gre_redir_lag_us_msg *ngm) +{ + struct nss_gre_redir_lag_us_del_hash_node_msg *ngdm __maybe_unused = &ngm->msg.del_hash; + nss_trace("%px: NSS GRE REDIR LAG Del Hash Node Message:\n" + "GRE REDIR LAG US Source MAC: %px\n" + "GRE REDIR LAG US Destination MAC: %px\n", + ngdm, ngdm->src_mac,ngdm->dest_mac); +} + +/* + * nss_gre_redir_lag_us_log_query_hash_node_msg() + * Log NSS GRE REDIR LAG US query hash node message. + */ +static void nss_gre_redir_lag_us_log_query_hash_node_msg(struct nss_gre_redir_lag_us_msg *ngm) +{ + struct nss_gre_redir_lag_us_query_hash_node_msg *ngqm __maybe_unused = &ngm->msg.query_hash; + nss_trace("%px: NSS GRE REDIR LAG Query Hash Node Message:\n" + "GRE REDIR LAG US Source MAC: %px\n" + "GRE REDIR LAG US Destination MAC: %px\n" + "GRE REDIR LAG US Interface Number: %d\n", + ngqm, ngqm->src_mac, ngqm->dest_mac, + ngqm->ifnum); +} + +/* + * nss_gre_redir_lag_us_log_verbose() + * Log message contents. + */ +static void nss_gre_redir_lag_us_log_verbose(struct nss_gre_redir_lag_us_msg *ngm) +{ + switch (ngm->cm.type) { + case NSS_GRE_REDIR_LAG_US_CONFIG_MSG: + nss_gre_redir_lag_us_log_config_msg(ngm); + break; + + case NSS_GRE_REDIR_LAG_US_ADD_HASH_NODE_MSG: + nss_gre_redir_lag_us_log_add_hash_node_msg(ngm); + break; + + case NSS_GRE_REDIR_LAG_US_DEL_HASH_NODE_MSG: + nss_gre_redir_lag_us_log_del_hash_node_msg(ngm); + break; + + case NSS_GRE_REDIR_LAG_US_QUERY_HASH_NODE_MSG: + nss_gre_redir_lag_us_log_query_hash_node_msg(ngm); + break; + + case NSS_GRE_REDIR_LAG_US_CMN_STATS_SYNC_MSG: + case NSS_GRE_REDIR_LAG_US_DB_HASH_NODE_MSG: + /* + * No log for valid stats message. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", ngm); + break; + } +} + +/* + * nss_gre_redir_lag_us_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_gre_redir_lag_us_log_tx_msg(struct nss_gre_redir_lag_us_msg *ngm) +{ + if (ngm->cm.type >= NSS_GRE_REDIR_LAG_US_MAX_MSG_TYPES) { + nss_warning("%px: Invalid message type\n", ngm); + return; + } + + nss_info("%px: type[%d]:%s\n", ngm, ngm->cm.type, nss_gre_redir_lag_us_log_message_types_str[ngm->cm.type]); + nss_gre_redir_lag_us_log_verbose(ngm); +} + +/* + * nss_gre_redir_lag_us_log_rx_msg() + * Log messages received from FW. + */ +void nss_gre_redir_lag_us_log_rx_msg(struct nss_gre_redir_lag_us_msg *ngm) +{ + if (ngm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ngm); + return; + } + + if (ngm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ngm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ngm, ngm->cm.type, + nss_gre_redir_lag_us_log_message_types_str[ngm->cm.type], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response]); + goto verbose; + } + + if (ngm->cm.error >= NSS_GRE_REDIR_LAG_ERR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ngm, ngm->cm.type, nss_gre_redir_lag_us_log_message_types_str[ngm->cm.type], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response], + ngm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ngm, ngm->cm.type, nss_gre_redir_lag_us_log_message_types_str[ngm->cm.type], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response], + ngm->cm.error, nss_gre_redir_lag_us_log_error_response_types_str[ngm->cm.error]); + +verbose: + nss_gre_redir_lag_us_log_verbose(ngm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_log.h new file mode 100644 index 000000000..cbda8d93a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_LAG_US_LOG_H__ +#define __NSS_GRE_REDIR_LAG_US_LOG_H__ + +/* + * nss_gre_redir_lag_us_log.h + * NSS GRE REDIR LAG US Log Header File + */ + +/* + * nss_gre_redir_lag_us_log_tx_msg + * Logs a gre redir lag us message that is sent to the NSS firmware. + */ +void nss_gre_redir_lag_us_log_tx_msg(struct nss_gre_redir_lag_us_msg *ngm); + +/* + * nss_gre_redir_lag_us_log_rx_msg + * Logs a gre redir lag us message that is received from the NSS firmware. + */ +void nss_gre_redir_lag_us_log_rx_msg(struct nss_gre_redir_lag_us_msg *ngm); + +#endif /* __NSS_GRE_REDIR_LAG_US_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_stats.c new file mode 100644 index 000000000..2b291bfd3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_stats.c @@ -0,0 +1,226 @@ +/* + ************************************************************************** + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_gre_redir_lag.h" +#include "nss_gre_redir_lag_us_stats.h" +#include "nss_gre_redir_lag_us_strings.h" + +#define NSS_GRE_REDIR_LAG_US_STATS_SYNC_PERIOD msecs_to_jiffies(4000) +#define NSS_GRE_REDIR_LAG_US_STATS_SYNC_UDELAY 4000 + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_gre_redir_lag_us_stats_notifier); + +extern struct nss_gre_redir_lag_us_cmn_ctx cmn_ctx; + +/* + * nss_gre_redir_lag_us_stats_get + * Get the common upstream statistics. + */ +bool nss_gre_redir_lag_us_stats_get(struct nss_gre_redir_lag_us_tunnel_stats *cmn_stats, uint32_t index) +{ + if (index >= NSS_GRE_REDIR_LAG_MAX_NODE) { + nss_warning("Index is out of valid range %u\n", index); + return false; + } + + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + if (!cmn_ctx.stats_ctx[index].valid) { + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + nss_warning("Common context not found for the index %u\n", index); + return false; + } + + memcpy((void *)cmn_stats, (void *)&(cmn_ctx.stats_ctx[index].tun_stats), sizeof(*cmn_stats)); + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + return true; +} + +/* + * nss_gre_redir_lag_us_cmn_stats_read() + * Read and copy stats to user buffer. + */ +static ssize_t nss_gre_redir_lag_us_cmn_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats + + * few blank lines for banner printing + Number of Extra outputlines + * for future reference to add new stats + */ + uint32_t max_output_lines = NSS_GRE_REDIR_LAG_US_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + struct nss_stats_data *data = fp->private_data; + struct nss_gre_redir_lag_us_tunnel_stats stats; + ssize_t bytes_read = 0; + size_t size_wr = 0; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + while (data->index < NSS_GRE_REDIR_LAG_MAX_NODE) { + if (nss_gre_redir_lag_us_stats_get(&stats, data->index)) { + break; + } + + data->index++; + } + + if (data->index == NSS_GRE_REDIR_LAG_MAX_NODE) { + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "gre_redir_lag_us stats", NSS_STATS_SINGLE_CORE); + size_wr += nss_stats_print("gre_redir_lag_us", NULL, NSS_STATS_SINGLE_INSTANCE, nss_gre_redir_lag_us_strings_stats, + &stats.rx_packets, NSS_GRE_REDIR_LAG_US_STATS_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + data->index++; + kfree(lbuf); + return bytes_read; +} + +/* + * nss_gre_redir_lag_us_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(gre_redir_lag_us_cmn) + +/* + * nss_gre_redir_lag_us_stats_dentry_create() + * Create debugfs directory for stats. + */ +struct dentry *nss_gre_redir_lag_us_stats_dentry_create(void) +{ + struct dentry *gre_redir; + struct dentry *cmn_stats; + + gre_redir = nss_gre_redir_get_dentry(); + if (unlikely(!gre_redir)) { + nss_warning("Failed to retrieve directory entry qca-nss-drv/stats/gre_redir/\n"); + return NULL; + } + + cmn_stats = debugfs_create_file("lag_us_cmn_stats", 0400, gre_redir, + &nss_top_main, &nss_gre_redir_lag_us_cmn_stats_ops); + if (unlikely(!cmn_stats)) { + nss_warning("Failed to create qca-nss-drv/stats/gre_redir/lag_us_cmn_stats file\n"); + return NULL; + } + + return cmn_stats; +} + +/* + * nss_gre_redir_lag_us_stats_sync() + * Update synchonized statistics. + */ +void nss_gre_redir_lag_us_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_gre_redir_lag_us_cmn_sync_stats_msg *ngss, uint32_t ifnum) +{ + int idx, j; + struct nss_gre_redir_lag_us_tunnel_stats *node_stats; + + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + if (!nss_gre_redir_lag_us_get_node_idx(ifnum, &idx)) { + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + nss_warning("%px: Unable to update hash stats msg. Stats context not found.\n", nss_ctx); + return; + } + + node_stats = &cmn_ctx.stats_ctx[idx].tun_stats; + + node_stats->tx_packets += ngss->node_stats.tx_packets; + node_stats->tx_bytes += ngss->node_stats.tx_bytes; + node_stats->rx_packets += ngss->node_stats.rx_packets; + node_stats->rx_bytes += ngss->node_stats.rx_bytes; + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + node_stats->rx_dropped[j] += ngss->node_stats.rx_dropped[j]; + } + + node_stats->us_stats.amsdu_pkts += ngss->us_stats.amsdu_pkts; + node_stats->us_stats.amsdu_pkts_enqueued += ngss->us_stats.amsdu_pkts_enqueued; + node_stats->us_stats.amsdu_pkts_exceptioned += ngss->us_stats.amsdu_pkts_exceptioned; + node_stats->us_stats.exceptioned += ngss->us_stats.exceptioned; + node_stats->us_stats.freed += ngss->us_stats.freed; + node_stats->db_stats.add_attempt += ngss->db_stats.add_attempt; + node_stats->db_stats.add_success += ngss->db_stats.add_success; + node_stats->db_stats.add_fail_table_full += ngss->db_stats.add_fail_table_full; + node_stats->db_stats.add_fail_exists += ngss->db_stats.add_fail_exists; + node_stats->db_stats.del_attempt += ngss->db_stats.del_attempt; + node_stats->db_stats.del_success += ngss->db_stats.del_success; + node_stats->db_stats.del_fail_not_found += ngss->db_stats.del_fail_not_found; + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); +} + +/* + * nss_gre_redir_lag_us_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_gre_redir_lag_us_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_gre_redir_lag_us_stats_notification *stats_notify; + int idx; + + stats_notify = kzalloc(sizeof(struct nss_gre_redir_lag_us_stats_notification), GFP_ATOMIC); + if (!stats_notify) { + nss_warning("Unable to allocate memory for stats notification\n"); + return; + } + + spin_lock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + if (!nss_gre_redir_lag_us_get_node_idx(if_num, &idx)) { + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + nss_warning("%px: Unable to update hash stats msg. Stats context not found.\n", nss_ctx); + kfree(stats_notify); + return; + } + + stats_notify->core_id = nss_ctx->id; + stats_notify->if_num = if_num; + memcpy(&(stats_notify->stats_ctx), &(cmn_ctx.stats_ctx[idx].tun_stats), sizeof(stats_notify->stats_ctx)); + spin_unlock_bh(&cmn_ctx.nss_gre_redir_lag_us_stats_lock); + atomic_notifier_call_chain(&nss_gre_redir_lag_us_stats_notifier, NSS_STATS_EVENT_NOTIFY, stats_notify); + kfree(stats_notify); +} + +/* + * nss_gre_redir_lag_us_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_gre_redir_lag_us_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_gre_redir_lag_us_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_redir_lag_us_stats_unregister_notifier); + +/* + * nss_gre_redir_lag_us_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_gre_redir_lag_us_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_gre_redir_lag_us_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_redir_lag_us_stats_register_notifier); + diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_stats.h new file mode 100644 index 000000000..9f223122d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_stats.h @@ -0,0 +1,50 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_LAG_US_STATS_H__ +#define __NSS_GRE_REDIR_LAG_US_STATS_H__ + +/* + * nss_gre_redir_lag_us_pvt_sync_stats + * Hash statistics synchronization context. + */ +struct nss_gre_redir_lag_us_pvt_sync_stats { + struct delayed_work nss_gre_redir_lag_us_work; /**< Delayed work per LAG US node. */ + struct nss_gre_redir_lag_us_msg db_sync_msg; /**< Hash statistics message. */ + struct nss_gre_redir_lag_us_tunnel_stats tun_stats; /**< GRE redirect LAG common statistics. */ + nss_gre_redir_lag_us_msg_callback_t cb; /**< Callback for hash query message. */ + void *app_data; /**< app_data for hash query message. */ + uint32_t ifnum; /**< NSS interface number. */ + bool valid; /**< Valid flag. */ +}; + +/* + * Common context for stats update. + */ +struct nss_gre_redir_lag_us_cmn_ctx { + struct workqueue_struct *nss_gre_redir_lag_us_wq; /**< Work queue. */ + spinlock_t nss_gre_redir_lag_us_stats_lock; /**< Spin lock. */ + struct nss_gre_redir_lag_us_pvt_sync_stats stats_ctx[NSS_GRE_REDIR_LAG_MAX_NODE]; +}; + +extern void nss_gre_redir_lag_us_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern bool nss_gre_redir_lag_us_get_node_idx(uint32_t ifnum, uint32_t *idx); +extern bool nss_gre_redir_lag_us_verify_ifnum(uint32_t if_num); +extern void nss_gre_redir_lag_us_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_gre_redir_lag_us_cmn_sync_stats_msg *ngss, uint32_t ifnum); +extern struct dentry *nss_gre_redir_lag_us_stats_dentry_create(void); + +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_strings.c new file mode 100644 index 000000000..c1dca2bad --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_strings.c @@ -0,0 +1,71 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_gre_redir_lag_us_strings.h" + +/* + * nss_gre_redir_lag_us_strings_stats + * GRE REDIR LAG US common statistics strings. + */ +struct nss_stats_info nss_gre_redir_lag_us_strings_stats[NSS_GRE_REDIR_LAG_US_STATS_MAX] = { + {"rx_packets", NSS_STATS_TYPE_COMMON}, + {"rx_bytes", NSS_STATS_TYPE_COMMON}, + {"tx_packets", NSS_STATS_TYPE_COMMON}, + {"tx_bytes", NSS_STATS_TYPE_COMMON}, + {"rx_dropped_0", NSS_STATS_TYPE_DROP}, + {"rx_dropped_1", NSS_STATS_TYPE_DROP}, + {"rx_dropped_2", NSS_STATS_TYPE_DROP}, + {"rx_dropped_3", NSS_STATS_TYPE_DROP}, + {"Amsdu pkts", NSS_STATS_TYPE_SPECIAL}, + {"Amsdu pkts enqueued", NSS_STATS_TYPE_SPECIAL}, + {"Amsdu pkts exceptioned", NSS_STATS_TYPE_EXCEPTION}, + {"Exceptioned", NSS_STATS_TYPE_EXCEPTION}, + {"Freed", NSS_STATS_TYPE_SPECIAL}, + {"add attempt", NSS_STATS_TYPE_SPECIAL}, + {"add success", NSS_STATS_TYPE_SPECIAL}, + {"add fail table full", NSS_STATS_TYPE_SPECIAL}, + {"add fail exists", NSS_STATS_TYPE_SPECIAL}, + {"del attempt", NSS_STATS_TYPE_SPECIAL}, + {"del success", NSS_STATS_TYPE_SPECIAL}, + {"del fail not found", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_gre_redir_lag_us_strings_read() + * Read gre_redir_lag_us statistics names + */ +static ssize_t nss_gre_redir_lag_us_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_gre_redir_lag_us_strings_stats, NSS_GRE_REDIR_LAG_US_STATS_MAX); +} + +/* + * nss_gre_redir_lag_us_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(gre_redir_lag_us); + +/* + * nss_gre_redir_lag_us_strings_dentry_create() + * Create gre_redir_lag_us statistics strings debug entry. + */ +void nss_gre_redir_lag_us_strings_dentry_create(void) +{ + nss_strings_create_dentry("gre_redir_lag_us", &nss_gre_redir_lag_us_strings_ops); +} + diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_strings.h new file mode 100644 index 000000000..74c1054de --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_lag_us_strings.h @@ -0,0 +1,25 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_LAG_US_STRINGS_H +#define __NSS_GRE_REDIR_LAG_US_STRINGS_H + +#include "nss_gre_redir_lag_us_stats.h" + +extern struct nss_stats_info nss_gre_redir_lag_us_strings_stats[NSS_GRE_REDIR_LAG_US_STATS_MAX]; +extern void nss_gre_redir_lag_us_strings_dentry_create(void); + +#endif /* __NSS_GRE_REDIR_LAG_US_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_log.c new file mode 100644 index 000000000..1ac6afbd2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_log.c @@ -0,0 +1,242 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_gre_redir_log.c + * NSS GRE REDIR logger file. + */ + +#include "nss_core.h" + +/* + * nss_gre_redir_log_message_types_str + * NSS GRE REDIR message strings + */ +static int8_t *nss_gre_redir_log_message_types_str[NSS_GRE_REDIR_MAX_MSG_TYPES] __maybe_unused = { + "GRE REDIR Tunnel Inner Configure", + "GRE REDIR Tunnel Outer Configure", + "GRE REDIR Interface Map", + "GRE REDIR Interface Unmap", + "GRE REDIR SJACK Map", + "GRE REDIR SJACK Unmap", + "GRE REDIR Stats Sync", + "GRE REDIR Exception DS register cb" +}; + +/* + * nss_gre_redir_log_inner_configure_msg() + * Log NSS GRE Redir inner configure message. + */ +static void nss_gre_redir_log_inner_configure_msg(struct nss_gre_redir_msg *ngm) +{ + struct nss_gre_redir_inner_configure_msg *ngicm __maybe_unused = &ngm->msg.inner_configure; + nss_trace("%px: NSS GRE Redir Inner Configure message" + "GRE REDIR IP Header Type: %d\n" + "GRE REDIR Source IP: %px\n" + "GRE REDIR Destination IP: %px\n" + "GRE REDIR Outer Interface: %d\n" + "GRE REDIR Do not Fragment: %d\n" + "GRE REDIR IP TTL: %d\n" + "GRE REDIR Version: %d\n", + ngicm, ngicm->ip_hdr_type, + ngicm->ip_src_addr, ngicm->ip_dest_addr, + ngicm->except_outerif, ngicm->ip_df_policy, + ngicm->ip_ttl, ngicm->gre_version); +} + +/* + * nss_gre_redir_log_interface_map_msg() + * Log NSS GRE Redir interface map message. + */ +static void nss_gre_redir_log_interface_map_msg(struct nss_gre_redir_msg *ngm) +{ + struct nss_gre_redir_interface_map_msg *ngicm __maybe_unused = &ngm->msg.interface_map; + nss_trace("%px: NSS GRE Redir Interface Map message" + "GRE REDIR NSS VAP Interface: %d\n" + "GRE REDIR Next Hop NSS Interface: %d\n" + "GRE REDIR Radio ID: %d\n" + "GRE REDIR VAP ID: %d\n" + "GRE REDIR LAG Flags: %x\n" + "GRE REDIR Tunnel Type: %d\n" + "GRE REDIR IPsec pattern: %d\n", + ngicm, ngicm->vap_nssif, + ngicm->nexthop_nssif, ngicm->radio_id, + ngicm->vap_id, ngicm->lag_en, + ngicm->tunnel_type, ngicm->ipsec_pattern); +} + +/* + * nss_gre_redir_log_interface_unmap_msg() + * Log NSS GRE Redir interface unmap message. + */ +static void nss_gre_redir_log_interface_unmap_msg(struct nss_gre_redir_msg *ngm) +{ + struct nss_gre_redir_interface_unmap_msg *ngicm __maybe_unused = &ngm->msg.interface_unmap; + nss_trace("%px: NSS GRE Redir Interface Map message" + "GRE REDIR NSS VAP Interface: %d\n" + "GRE REDIR Radio ID: %d\n" + "GRE REDIR VAP ID: %d\n", + ngicm, ngicm->vap_nssif, + ngicm->radio_id, ngicm->vap_id); +} + +/* + * nss_gre_redir_log_sjack_map_msg() + * Log NSS GRE Redir interface map message. + */ +static void nss_gre_redir_log_sjack_map_msg(struct nss_gre_redir_msg *ngm) +{ + struct nss_gre_redir_sjack_map_msg *ngscm __maybe_unused = &ngm->msg.sjack_map; + nss_trace("%px: NSS GRE Redir SJACK Map message" + "GRE REDIR Eth NSS Interface: %d\n" + "GRE REDIR Eth Interface ID: %d\n" + "GRE REDIR IPSec pattern: %x\n", + ngscm, ngscm->eth_nssif, + ngscm->eth_id, ngscm->ipsec_pattern); +} + +/* + * nss_gre_redir_log_sjack_unmap_msg() + * Log NSS GRE Redir interface unmap message. + */ +static void nss_gre_redir_log_sjack_unmap_msg(struct nss_gre_redir_msg *ngm) +{ + struct nss_gre_redir_sjack_unmap_msg *ngscm __maybe_unused = &ngm->msg.sjack_unmap; + nss_trace("%px: NSS GRE Redir SJACK Map message" + "GRE REDIR Eth NSS Interface: %d\n" + "GRE REDIR Eth Interface ID: %d\n", + ngscm, ngscm->eth_nssif, + ngscm->eth_id); +} + +/* + * nss_gre_redir_log_outer_configure_msg() + * Log NSS GRE Redir outer configure message. + */ +static void nss_gre_redir_log_outer_configure_msg(struct nss_gre_redir_msg *ngm) +{ + struct nss_gre_redir_outer_configure_msg *ngocm __maybe_unused = &ngm->msg.outer_configure; + nss_trace("%px: NSS GRE Redir Outer Configure message" + "GRE REDIR IP Header Type: %d\n" + "GRE REDIR Host Inner Interface: %d\n" + "GRE REDIR NSS Inner Interface: %d\n" + "GRE REDIR SJACK Inner Interface: %d\n" + "GRE REDIR RPS: %d\n" + "GRE REDIR RPS Valid: %d\n", + ngocm, ngocm->ip_hdr_type, + ngocm->except_hostif, ngocm->except_offlif, + ngocm->except_sjackif, ngocm->rps_hint, + ngocm->rps_hint_valid); +} + +/* + * nss_gre_redir_log_exception_ds_reg_cb_msg() + * Log GRE exception downstream callback registration message. + */ +static void nss_gre_redir_log_exception_ds_reg_cb_msg(struct nss_gre_redir_msg *ngm) +{ + struct nss_gre_redir_exception_ds_reg_cb_msg *exception_ds_configure __maybe_unused = &ngm->msg.exception_ds_configure; + nss_trace("%px: NSS GRE redir exception completion callback registration message\n" + "vap_if_num: %d\n", ngm, exception_ds_configure->dst_vap_nssif); +} + +/* + * nss_gre_redir_log_verbose() + * Log message contents. + */ +static void nss_gre_redir_log_verbose(struct nss_gre_redir_msg *ngm) +{ + switch (ngm->cm.type) { + case NSS_GRE_REDIR_TX_TUNNEL_INNER_CONFIGURE_MSG: + nss_gre_redir_log_inner_configure_msg(ngm); + break; + + case NSS_GRE_REDIR_TX_TUNNEL_OUTER_CONFIGURE_MSG: + nss_gre_redir_log_outer_configure_msg(ngm); + break; + + case NSS_GRE_REDIR_TX_INTERFACE_MAP_MSG: + nss_gre_redir_log_interface_map_msg(ngm); + break; + + case NSS_GRE_REDIR_TX_INTERFACE_UNMAP_MSG: + nss_gre_redir_log_interface_unmap_msg(ngm); + break; + + case NSS_GRE_REDIR_TX_SJACK_MAP_MSG: + nss_gre_redir_log_sjack_map_msg(ngm); + break; + + case NSS_GRE_REDIR_TX_SJACK_UNMAP_MSG: + nss_gre_redir_log_sjack_unmap_msg(ngm); + break; + + case NSS_GRE_REDIR_RX_STATS_SYNC_MSG: + /* + * No log for valid stats message. + */ + break; + + case NSS_GRE_REDIR_EXCEPTION_DS_REG_CB_MSG: + nss_gre_redir_log_exception_ds_reg_cb_msg(ngm); + break; + + default: + nss_warning("%px: Invalid message type\n", ngm); + break; + } +} + +/* + * nss_gre_redir_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_gre_redir_log_tx_msg(struct nss_gre_redir_msg *ngm) +{ + if (ngm->cm.type >= NSS_GRE_REDIR_MAX_MSG_TYPES) { + nss_warning("%px: Invalid message type\n", ngm); + return; + } + + nss_info("%px: type[%d]:%s\n", ngm, ngm->cm.type, nss_gre_redir_log_message_types_str[ngm->cm.type]); + nss_gre_redir_log_verbose(ngm); +} + +/* + * nss_gre_redir_log_rx_msg() + * Log messages received from FW. + */ +void nss_gre_redir_log_rx_msg(struct nss_gre_redir_msg *ngm) +{ + if (ngm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ngm); + return; + } + + if (ngm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ngm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ngm, ngm->cm.type, + nss_gre_redir_log_message_types_str[ngm->cm.type], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + ngm, ngm->cm.type, nss_gre_redir_log_message_types_str[ngm->cm.type], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response]); + +verbose: + nss_gre_redir_log_verbose(ngm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_log.h new file mode 100644 index 000000000..7e1fb793e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_LOG_H +#define __NSS_GRE_REDIR_LOG_H + +/* + * nss_gre_redir_log.h + * NSS GRE REDIR Log header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_gre_redir_log_tx_msg + * Logs a gre_redir message that is sent to the NSS firmware. + */ +void nss_gre_redir_log_tx_msg(struct nss_gre_redir_msg *ngm); + +/* + * nss_gre_redir_log_rx_msg + * Logs a gre_redir message that is received from the NSS firmware. + */ +void nss_gre_redir_log_rx_msg(struct nss_gre_redir_msg *ngm); + +#endif /* __NSS_GRE_REDIR_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark.c new file mode 100644 index 000000000..0b8524f7c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark.c @@ -0,0 +1,341 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_gre_redir_mark_strings.h" +#include "nss_gre_redir_mark_stats.h" +#include "nss_gre_redir_mark_log.h" +#define NSS_GRE_REDIR_MARK_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * Private data structure for handling synchronous messaging. + */ +static struct { + struct semaphore sem; + struct completion complete; + int response; +} nss_gre_redir_mark_pvt; + +/* + * nss_gre_redir_mark_msg_sync_callback() + * Callback to handle the completion of HLOS-->NSS messages. + */ +static void nss_gre_redir_mark_msg_sync_callback(void *app_data, struct nss_gre_redir_mark_msg *nim) +{ + nss_gre_redir_mark_pvt.response = NSS_TX_SUCCESS; + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("GRE mark Error response %d\n", nim->cm.response); + nss_gre_redir_mark_pvt.response = NSS_TX_FAILURE; + } + + complete(&nss_gre_redir_mark_pvt.complete); +} + +/* + * nss_gre_redir_mark_handler() + * Handle NSS to HLOS messages for GRE redir mark + */ +static void nss_gre_redir_mark_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app_data) +{ + struct nss_gre_redir_mark_msg *ngrm = (struct nss_gre_redir_mark_msg *)ncm; + nss_gre_redir_mark_msg_callback_t cb; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_GRE_REDIR_MARK_MSG_MAX) { + nss_warning("%px: received invalid message %d for GRE redir mark interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_gre_redir_mark_msg)) { + nss_warning("%px: length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Trace messages. + */ + nss_gre_redir_mark_log_rx_msg((struct nss_gre_redir_mark_msg *)ncm); + + if (ncm->type == NSS_GRE_REDIR_MARK_STATS_SYNC_MSG) { + nss_gre_redir_mark_stats_sync(nss_ctx, ncm->interface, &ngrm->msg.stats_sync); + nss_gre_redir_mark_stats_notify(nss_ctx, ncm->interface); + } + + /* + * Update the callback and app_data for NOTIFY messages, GRE redir mark sends all notify messages + * to the same callback/app_data. The app data here represent the netdevice of the GRE redir mark + * interface. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].ndev; + } + + /* + * load and call the registered synchronous message callback. + */ + cb = (nss_gre_redir_mark_msg_callback_t)ncm->cb; + if (unlikely(!cb)) { + return; + } + + cb((void *)ncm->app_data, ncm); +} + +/* + * nss_gre_redir_mark_reg_cb() + * Configure a callback on VAP. + */ +nss_tx_status_t nss_gre_redir_mark_reg_cb(int ifnum, + struct nss_gre_redir_mark_register_cb_msg *ngrcm) +{ + struct nss_gre_redir_mark_msg config; + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_gre_redir_mark_get_context(); + nss_tx_status_t status; + uint32_t vap_type; + uint32_t len = sizeof(struct nss_gre_redir_mark_register_cb_msg); + + if (!nss_ctx) { + nss_warning("Unable to retrieve NSS context.\n"); + return NSS_TX_FAILURE_BAD_PARAM; + } + + vap_type = nss_dynamic_interface_get_type(nss_ctx, ngrcm->nss_if_num); + if ((vap_type != NSS_DYNAMIC_INTERFACE_TYPE_VAP)) { + nss_warning("%px: Incorrect type for vap interface type = %u", nss_ctx, vap_type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * Configure the node + */ + nss_cmn_msg_init(&config.cm, NSS_GRE_REDIR_MARK_INTERFACE, NSS_GRE_REDIR_MARK_REG_CB_MSG, len, NULL, NULL); + config.msg.reg_cb_msg.nss_if_num = ngrcm->nss_if_num; + + status = nss_gre_redir_mark_tx_msg_sync(nss_ctx, &config); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to register callback from GRE redir mark interface %d\n", nss_ctx, ifnum); + } + + return status; +} +EXPORT_SYMBOL(nss_gre_redir_mark_reg_cb); + +/* + * nss_gre_redir_mark_tx_msg() + * Transmit a GRE MARK configuration message to NSS FW. + */ +nss_tx_status_t nss_gre_redir_mark_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_mark_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_gre_redir_mark_log_tx_msg(msg); + + /* + * interface should be of type of redir mark + */ + if (ncm->interface != NSS_GRE_REDIR_MARK_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_GRE_REDIR_MARK_MSG_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_gre_redir_mark_tx_msg); + +/* + * nss_gre_redir_mark_tx_msg_sync() + * Transmit a GRE redir mark message to NSS firmware synchronously. + */ +nss_tx_status_t nss_gre_redir_mark_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_redir_mark_msg *ngrm) +{ + nss_tx_status_t status; + int ret = 0; + + /* + * Decrease the semaphore count to send the message exclusively. + */ + down(&nss_gre_redir_mark_pvt.sem); + ngrm->cm.cb = (nss_ptr_t)nss_gre_redir_mark_msg_sync_callback; + ngrm->cm.app_data = (nss_ptr_t)NULL; + status = nss_gre_redir_mark_tx_msg(nss_ctx, ngrm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: GRE redir mark tx_msg failed\n", nss_ctx); + up(&nss_gre_redir_mark_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&nss_gre_redir_mark_pvt.complete, msecs_to_jiffies(NSS_GRE_REDIR_MARK_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: GRE redir mark message tx sync failed due to timeout\n", nss_ctx); + nss_gre_redir_mark_pvt.response = NSS_TX_FAILURE; + } + + status = nss_gre_redir_mark_pvt.response; + up(&nss_gre_redir_mark_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_gre_redir_mark_tx_msg_sync); + +/* + * nss_gre_redir_mark_tx_buf() + * Send packet to GRE redir mark interface owned by NSS. + */ +nss_tx_status_t nss_gre_redir_mark_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num) +{ + nss_trace("%px: GRE redir mark If Tx packet, interface id:%d, data=%px", nss_ctx, if_num, os_buf->data); + + /* + * We expect Tx packets to the GRE redir mark interface only. + */ + if (if_num != NSS_GRE_REDIR_MARK_INTERFACE) { + nss_warning("%px: Invalid interface:%d for GRE redir mark packets\n", nss_ctx, if_num); + return NSS_TX_FAILURE_BAD_PARAM; + } + + return nss_core_send_packet(nss_ctx, os_buf, if_num, 0); +} +EXPORT_SYMBOL(nss_gre_redir_mark_tx_buf); + +/* + * nss_gre_redir_mark_get_context() + * Return NSS GRE redir mark context. + */ +struct nss_ctx_instance *nss_gre_redir_mark_get_context(void) +{ + return &nss_top_main.nss[nss_top_main.gre_redir_mark_handler_id]; +} +EXPORT_SYMBOL(nss_gre_redir_mark_get_context); + +/* + * nss_gre_redir_mark_unregister_if() + * Unregister dynamic node for GRE_REDIR_MARK redir. + */ +bool nss_gre_redir_mark_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx __maybe_unused = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_redir_handler_id]; + struct net_device *dev; + uint32_t status; + + nss_assert(nss_ctx); + nss_assert(if_num == NSS_GRE_REDIR_MARK_INTERFACE); + + dev = nss_cmn_get_interface_dev(nss_ctx, if_num); + + BUG_ON(!dev); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for gre_redir_mark interface %d with NSS core\n", + nss_ctx, if_num); + return false; + } + + nss_ctx->nss_rx_interface_handlers[if_num].msg_cb = NULL; + return true; +} +EXPORT_SYMBOL(nss_gre_redir_mark_unregister_if); + +/* + * nss_gre_redir_mark_register_if() + * Register staticr GRE redir mark interface with data-plane. + */ +struct nss_ctx_instance *nss_gre_redir_mark_register_if(struct net_device *netdev, uint32_t if_num, + nss_gre_redir_mark_data_callback_t cb_func_data, nss_gre_redir_mark_msg_callback_t cb_func_msg, + uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_redir_handler_id]; + uint32_t status; + + nss_assert(nss_ctx); + nss_assert(if_num == NSS_GRE_REDIR_MARK_INTERFACE); + + /* + * Registering the interface with network data path. + */ + nss_core_register_subsys_dp(nss_ctx, if_num, cb_func_data, NULL, NULL, netdev, features); + status = nss_core_register_msg_handler(nss_ctx, NSS_GRE_REDIR_MARK_INTERFACE, cb_func_msg); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to register handler for gre_redir_mark interface %d with NSS core\n", + nss_ctx, if_num); + return NULL; + } + + return nss_ctx; +} +EXPORT_SYMBOL(nss_gre_redir_mark_register_if); + +/* + * nss_gre_redir_mark_get_device() + * Gets the original device from probe. + */ +struct device *nss_gre_redir_mark_get_device(void) +{ + struct nss_ctx_instance *nss_ctx = nss_gre_redir_mark_get_context(); + return nss_ctx->dev; +} +EXPORT_SYMBOL(nss_gre_redir_mark_get_device); + +/* + * nss_gre_redir_mark_register_handler() + * Register GRE redir mark and register handler + */ +void nss_gre_redir_mark_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_gre_redir_mark_get_context(); + struct dentry *gre_redir_mark_dentry = NULL; + uint32_t status = NSS_CORE_STATUS_FAILURE; + + /* + * Create the debug fs entry for the stats. + */ + gre_redir_mark_dentry = nss_gre_redir_mark_stats_dentry_create(); + if (!gre_redir_mark_dentry) { + nss_warning("%px: Not able to create debugfs entry\n", nss_ctx); + return; + } + + nss_gre_redir_mark_strings_dentry_create(); + sema_init(&nss_gre_redir_mark_pvt.sem, 1); + init_completion(&nss_gre_redir_mark_pvt.complete); + + nss_info("nss_gre_redir_mark_register_handler\n"); + status = nss_core_register_handler(nss_ctx, NSS_GRE_REDIR_MARK_INTERFACE, nss_gre_redir_mark_handler, NULL); + if (status != NSS_CORE_STATUS_SUCCESS) { + debugfs_remove_recursive(gre_redir_mark_dentry); + gre_redir_mark_dentry = NULL; + nss_warning("%px: Not able to register handler for GRE redir mark with NSS core\n", nss_ctx); + return; + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_log.c new file mode 100644 index 000000000..580c5a851 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_log.c @@ -0,0 +1,119 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" + +/* + * nss_gre_redir_mark_log_message_types_str + * GRE redir mark message strings + */ +static int8_t *nss_gre_redir_mark_log_message_types_str[NSS_GRE_REDIR_MARK_MSG_MAX] __maybe_unused = { + "GRE redir mark register callback message", + "GRE redir mark statistics synchronization" +}; + +/* + * nss_gre_redir_mark_log_error_response_types_str + * Strings for error types for GRE redir mark messages + */ +static int8_t *nss_gre_redir_mark_log_error_response_types_str[NSS_GRE_REDIR_MARK_ERROR_TYPE_MAX] __maybe_unused = { + "GRE redir mark No error", + "GRE redir mark Invalid interface for callback registration", + "GRE redir mark Invalid ethertype for Tx interface" +}; + +/* + * nss_gre_redir_mark_log_reg_cb_msg() + * Log NSS GRE redir mark configuration message + */ +static void nss_gre_redir_mark_log_reg_cb_msg(struct nss_gre_redir_mark_msg *ncm) +{ + struct nss_gre_redir_mark_register_cb_msg *reg_cb_msg __maybe_unused = &ncm->msg.reg_cb_msg; + nss_trace("%px: NSS GRE redir mark callback registration message \n" + "nss_if_num: %d\n", ncm, reg_cb_msg->nss_if_num); +} + +/* + * nss_gre_redir_mark_log_verbose() + * Log message contents. + */ +static void nss_gre_redir_mark_log_verbose(struct nss_gre_redir_mark_msg *ncm) +{ + switch (ncm->cm.type) { + case NSS_GRE_REDIR_MARK_REG_CB_MSG: + nss_gre_redir_mark_log_reg_cb_msg(ncm); + break; + + case NSS_GRE_REDIR_MARK_STATS_SYNC_MSG: + /* + * No log for valid stats message. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", ncm); + break; + } +} + +/* + * nss_gre_redir_mark_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_gre_redir_mark_log_tx_msg(struct nss_gre_redir_mark_msg *ngm) +{ + if (ngm->cm.type >= NSS_GRE_REDIR_MARK_MSG_MAX) { + nss_warning("%px: Invalid message type\n", ngm); + return; + } + + nss_info("%px: type[%d]:%s\n", ngm, ngm->cm.type, nss_gre_redir_mark_log_message_types_str[ngm->cm.type]); + nss_gre_redir_mark_log_verbose(ngm); +} +/* + * nss_gre_redir_mark_log_rx_msg() + * Log messages received from FW. + */ +void nss_gre_redir_mark_log_rx_msg(struct nss_gre_redir_mark_msg *ncm) +{ + if (ncm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ncm); + return; + } + + if (ncm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ncm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ncm, ncm->cm.type, + nss_gre_redir_mark_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response]); + goto verbose; + } + + if (ncm->cm.error >= NSS_GRE_REDIR_MARK_ERROR_TYPE_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ncm, ncm->cm.type, nss_gre_redir_mark_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ncm, ncm->cm.type, nss_gre_redir_mark_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error, nss_gre_redir_mark_log_error_response_types_str[ncm->cm.error]); + +verbose: + nss_gre_redir_mark_log_verbose(ncm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_log.h new file mode 100644 index 000000000..27e2ffb16 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_MARK_LOG_H__ +#define __NSS_GRE_REDIR_MARK_LOG_H__ + +/* + * nss_gre_redir_mark_log.h + * NSS GRE_REDIR_MARK Log Header File. + */ + +/* + * nss_gre_redir_mark_log_tx_msg + * Logs GRE_REDIR_MARK message that is sent to the NSS firmware. + */ +void nss_gre_redir_mark_log_tx_msg(struct nss_gre_redir_mark_msg *ncm); + +/* + * nss_gre_redir_mark_log_rx_msg + * Logs GRE_REDIR_MARK message that is received from the NSS firmware. + */ +void nss_gre_redir_mark_log_rx_msg(struct nss_gre_redir_mark_msg *ncm); + +#endif /* __NSS_GRE_REDIR_MARK_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_stats.c new file mode 100644 index 000000000..da0f64621 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_stats.c @@ -0,0 +1,230 @@ +/* + ************************************************************************** + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_stats.h" +#include "nss_gre_redir_mark.h" +#include "nss_gre_redir_mark_stats.h" +#include "nss_gre_redir_mark_strings.h" + +#define NSS_GRE_REDIR_MARK_STATS_STR_LEN 50 +#define NSS_GRE_REDIR_MARK_STATS_LEN ((NSS_GRE_REDIR_MARK_STATS_MAX + 7 ) * NSS_GRE_REDIR_MARK_STATS_STR_LEN) + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_gre_redir_mark_stats_notifier); + +/* + * Spinlock to protect GRE redirect mark statistics update/read + */ +DEFINE_SPINLOCK(nss_gre_redir_mark_stats_lock); + +/* + * Global GRE redirect mark stats structure. + */ +struct nss_gre_redir_mark_stats gre_mark_stats; + +/* + * nss_gre_redir_mark_stats_get() + * Get gre_redir tunnel stats. + */ +bool nss_gre_redir_mark_stats_get(struct nss_gre_redir_mark_stats *stats_mem) +{ + if (!stats_mem) { + nss_warning("No memory to copy GRE redir mark stats"); + return false; + } + + /* + * Copy the GRE redir mark stats in the memory. + */ + spin_lock_bh(&nss_gre_redir_mark_stats_lock); + memcpy(stats_mem, &gre_mark_stats, sizeof(struct nss_gre_redir_mark_stats)); + spin_unlock_bh(&nss_gre_redir_mark_stats_lock); + return true; +} +EXPORT_SYMBOL(nss_gre_redir_mark_stats_get); + +/** + * nss_gre_redir_mark_stats_read() + * READ GRE redir mark stats. + */ +static ssize_t nss_gre_redir_mark_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats + + * few blank lines for banner printing + Number of Extra outputlines + * for future reference to add new stats + */ + uint32_t max_output_lines = NSS_GRE_REDIR_MARK_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + struct nss_gre_redir_mark_stats stats; + size_t size_wr = 0; + ssize_t bytes_read = 0; + bool isthere; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + /* + * If GRE redir mark does not exists, then (isthere) will be false. + */ + isthere = nss_gre_redir_mark_stats_get(&stats); + if (!isthere) { + nss_warning("Could not get GRE redirect stats"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "gre_redir_mark stats", NSS_STATS_SINGLE_CORE); + size_wr += nss_stats_print("gre_redir_mark", NULL, NSS_STATS_SINGLE_INSTANCE, nss_gre_redir_mark_strings_stats, + stats.stats, NSS_GRE_REDIR_MARK_STATS_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(lbuf); + return bytes_read; +} + +/* + * nss_gre_redir_mark_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(gre_redir_mark) + +/* + * nss_gre_redir_mark_stats_dentry_create() + * Create debugfs directory entry for stats. + */ +struct dentry *nss_gre_redir_mark_stats_dentry_create(void) +{ + struct dentry *gre_redir_mark; + + gre_redir_mark = debugfs_create_file("gre_redir_mark", 0400, nss_top_main.stats_dentry, + &nss_top_main, &nss_gre_redir_mark_stats_ops); + if (unlikely(!gre_redir_mark)) { + nss_warning("Failed to create file entry qca-nss-drv/stats/gre_redir_mark/\n"); + return NULL; + } + + return gre_redir_mark; +} + +/* + * nss_gre_redir_mark_stats_sync() + * Update GRE redir mark stats. + */ +void nss_gre_redir_mark_stats_sync(struct nss_ctx_instance *nss_ctx, int if_num, struct nss_gre_redir_mark_stats_sync_msg *ngss) +{ + int i; + struct net_device *dev; + dev = nss_cmn_get_interface_dev(nss_ctx, if_num); + if (!dev) { + nss_warning("%px: Unable to find net device for the interface %d\n", nss_ctx, if_num); + return; + } + + if (if_num != NSS_GRE_REDIR_MARK_INTERFACE) { + nss_warning("%px: Unknown type for interface %d\n", nss_ctx, if_num); + return; + } + + /* + * Update the stats in exclusive mode to prevent the read from the process + * context through debug fs. + */ + spin_lock_bh(&nss_gre_redir_mark_stats_lock); + + /* + * Update the common node stats + */ + gre_mark_stats.stats[NSS_STATS_NODE_TX_PKTS] += ngss->node_stats.tx_packets; + gre_mark_stats.stats[NSS_STATS_NODE_TX_BYTES] += ngss->node_stats.tx_bytes; + gre_mark_stats.stats[NSS_STATS_NODE_RX_PKTS] += ngss->node_stats.rx_packets; + gre_mark_stats.stats[NSS_STATS_NODE_RX_BYTES] += ngss->node_stats.rx_bytes; + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + gre_mark_stats.stats[NSS_STATS_NODE_RX_QUEUE_0_DROPPED + i] += ngss->node_stats.rx_dropped[i]; + } + + /* + * Update the GRE redir mark specific stats + */ + gre_mark_stats.stats[NSS_GRE_REDIR_MARK_STATS_HLOS_MAGIC_FAILED] += ngss->hlos_magic_fail; + gre_mark_stats.stats[NSS_GRE_REDIR_MARK_STATS_INV_DST_IF_DROPS] += ngss->invalid_dst_drop; + gre_mark_stats.stats[NSS_GRE_REDIR_MARK_STATS_DST_IF_ENQUEUE] += ngss->dst_enqueue_success; + gre_mark_stats.stats[NSS_GRE_REDIR_MARK_STATS_DST_IF_ENQUEUE_DROPS] += ngss->dst_enqueue_drop; + gre_mark_stats.stats[NSS_GRE_REDIR_MARK_STATS_INV_APPID] += ngss->inv_appid; + gre_mark_stats.stats[NSS_GRE_REDIR_MARK_STATS_HEADROOM_UNAVAILABLE] += ngss->headroom_unavail; + gre_mark_stats.stats[NSS_GRE_REDIR_MARK_STATS_TX_COMPLETION_SUCCESS] += ngss->tx_completion_success; + gre_mark_stats.stats[NSS_GRE_REDIR_MARK_STATS_TX_COMPLETION_DROPS] += ngss->tx_completion_drop; + + spin_unlock_bh(&nss_gre_redir_mark_stats_lock); +} + +/* + * nss_gre_redir_mark_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_gre_redir_mark_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_gre_redir_mark_stats_notification *stats_notify; + + stats_notify = kzalloc(sizeof(struct nss_gre_redir_mark_stats_notification), GFP_ATOMIC); + if (!stats_notify) { + nss_warning("Unable to allocate memory for stats notification\n"); + return; + } + + if (if_num != NSS_GRE_REDIR_MARK_INTERFACE) { + nss_warning("%px: Unknown type for interface %d\n", nss_ctx, if_num); + kfree(stats_notify); + return; + } + + spin_lock_bh(&nss_gre_redir_mark_stats_lock); + stats_notify->core_id = nss_ctx->id; + stats_notify->if_num = if_num; + memcpy(stats_notify->stats_ctx, gre_mark_stats.stats, sizeof(stats_notify->stats_ctx)); + spin_unlock_bh(&nss_gre_redir_mark_stats_lock); + + atomic_notifier_call_chain(&nss_gre_redir_mark_stats_notifier, NSS_STATS_EVENT_NOTIFY, stats_notify); + kfree(stats_notify); +} + +/* + * nss_gre_redir_mark_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_gre_redir_mark_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_gre_redir_mark_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_redir_mark_stats_unregister_notifier); + +/* + * nss_gre_redir_mark_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_gre_redir_mark_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_gre_redir_mark_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_redir_mark_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_stats.h new file mode 100644 index 000000000..cacb3d218 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_stats.h @@ -0,0 +1,35 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_MARK_STATS_H__ +#define __NSS_GRE_REDIR_MARK_STATS_H__ + +/* + * NSS core stats -- for H2N/N2H gre_redir_mark debug stats + */ +struct nss_gre_redir_mark_stats { + uint64_t stats[NSS_GRE_REDIR_MARK_STATS_MAX]; +}; + +/* + * NSS GRE REDIR Mark statistics APIs + */ +extern void nss_gre_redir_mark_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_gre_redir_mark_stats_sync(struct nss_ctx_instance *nss_ctx, int if_num, + struct nss_gre_redir_mark_stats_sync_msg *ngss); +extern struct dentry *nss_gre_redir_mark_stats_dentry_create(void); + +#endif /* __NSS_GRE_REDIR_MARK_STATS_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_strings.c new file mode 100644 index 000000000..a8d5a9859 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_strings.c @@ -0,0 +1,66 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_gre_redir_mark_strings.h" + +/* + * nss_gre_redir_mark_strings_stats + * GRE redir mark statistics string + */ +struct nss_stats_info nss_gre_redir_mark_strings_stats[NSS_GRE_REDIR_MARK_STATS_MAX] = { + {"rx Packets", NSS_STATS_TYPE_COMMON}, + {"rx Bytes", NSS_STATS_TYPE_COMMON}, + {"tx Packets", NSS_STATS_TYPE_COMMON}, + {"tx Bytes", NSS_STATS_TYPE_COMMON}, + {"rx_dropped_0", NSS_STATS_TYPE_DROP}, + {"rx_dropped_1", NSS_STATS_TYPE_DROP}, + {"rx_dropped_2", NSS_STATS_TYPE_DROP}, + {"rx_dropped_3", NSS_STATS_TYPE_DROP}, + {"HLOS Magic Failed", NSS_STATS_TYPE_SPECIAL}, + {"tx Inv_dst_if Drops", NSS_STATS_TYPE_DROP}, + {"tx Dst_if Enqueue", NSS_STATS_TYPE_SPECIAL}, + {"tx Dst_if Enqueue Drops", NSS_STATS_TYPE_DROP}, + {"Invalid Appid", NSS_STATS_TYPE_SPECIAL}, + {"Headroom Unavailable", NSS_STATS_TYPE_EXCEPTION}, + {"tx Completion Host Enqueue Success", NSS_STATS_TYPE_SPECIAL}, + {"tx Completion Host Enqueue Drops", NSS_STATS_TYPE_DROP} +}; + +/* + * nss_gre_redir_mark_strings_read() + * Read gre_redir_mark statistics names + */ +static ssize_t nss_gre_redir_mark_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_gre_redir_mark_strings_stats, NSS_GRE_REDIR_MARK_STATS_MAX); +} + +/* + * nss_gre_redir_mark_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(gre_redir_mark); + +/* + * nss_gre_redir_mark_strings_dentry_create() + * Create gre_redir_mark statistics strings debug entry. + */ +void nss_gre_redir_mark_strings_dentry_create(void) +{ + nss_strings_create_dentry("gre_redir_mark", &nss_gre_redir_mark_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_strings.h new file mode 100644 index 000000000..98ed33204 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_mark_strings.h @@ -0,0 +1,25 @@ +/* + **************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_MARK_STRINGS_H +#define __NSS_GRE_REDIR_MARK_STRINGS_H + +#include "nss_gre_redir_mark_stats.h" + +extern struct nss_stats_info nss_gre_redir_mark_strings_stats[NSS_GRE_REDIR_MARK_STATS_MAX]; +extern void nss_gre_redir_mark_strings_dentry_create(void); + +#endif /* __NSS_GRE_REDIR_MARK_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_stats.c new file mode 100644 index 000000000..6adb3534c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_stats.c @@ -0,0 +1,312 @@ +/* + **************************************************************************** + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#include "nss_core.h" +#include "nss_gre_redir.h" +#include "nss_gre_redir_stats.h" +#include "nss_gre_redir_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_gre_redir_stats_notifier); + +/* + * Spinlock to protect GRE redirect statistics update/read + */ +DEFINE_SPINLOCK(nss_gre_redir_stats_lock); + +/* + * Array to hold tunnel stats along with if_num + */ +extern struct nss_gre_redir_tunnel_stats tun_stats[NSS_GRE_REDIR_MAX_INTERFACES]; + +/* + * nss_gre_redir_stats_get() + * Get GRE redirect tunnel stats. + */ +bool nss_gre_redir_stats_get(int index, struct nss_gre_redir_tunnel_stats *stats) +{ + spin_lock_bh(&nss_gre_redir_stats_lock); + if (tun_stats[index].ref_count == 0) { + spin_unlock_bh(&nss_gre_redir_stats_lock); + return false; + } + + memcpy(stats, &tun_stats[index], sizeof(struct nss_gre_redir_tunnel_stats)); + spin_unlock_bh(&nss_gre_redir_stats_lock); + return true; +} +EXPORT_SYMBOL(nss_gre_redir_stats_get); + +/* + * nss_gre_redir_stats_read() + * READ gre_redir tunnel stats. + */ +static ssize_t nss_gre_redir_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats + + * few blank lines for banner printing + Number of Extra outputlines + * for future reference to add new stats + */ + uint32_t max_output_lines = NSS_GRE_REDIR_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines * NSS_GRE_REDIR_MAX_INTERFACES; + struct nss_stats_data *data = fp->private_data; + struct nss_gre_redir_tunnel_stats stats; + ssize_t bytes_read = 0; + size_t size_wr = 0; + int index = 0; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + if (data) { + index = data->index; + } + + /* + * If we are done accomodating all the GRE_REDIR tunnels. + */ + if (index >= NSS_GRE_REDIR_MAX_INTERFACES) { + kfree(lbuf); + return 0; + } + + for (; index < NSS_GRE_REDIR_MAX_INTERFACES; index++) { + bool isthere; + + /* + * If gre_redir tunnel does not exists, then isthere will be false. + */ + isthere = nss_gre_redir_stats_get(index, &stats); + if (!isthere) { + continue; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "gre_redir stats", NSS_STATS_SINGLE_CORE); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nTunnel stats for %s\n", stats.dev->name); + size_wr += nss_stats_print("gre_redir", NULL, NSS_STATS_SINGLE_INSTANCE, nss_gre_redir_strings_stats, + &stats.tstats.rx_packets, NSS_GRE_REDIR_STATS_MAX, lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + if (data) { + data->index = index; + } + + kfree(lbuf); + return bytes_read; +} + +/* + * nss_gre_redir_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(gre_redir) + +/* + * nss_gre_redir_stats_dentry_create() + * Create debugfs directory entry for stats. + */ +struct dentry *nss_gre_redir_stats_dentry_create(void) +{ + struct dentry *gre_redir; + struct dentry *tun_stats; + + gre_redir = debugfs_create_dir("gre_redir", nss_top_main.stats_dentry); + if (unlikely(!gre_redir)) { + nss_warning("Failed to create directory entry qca-nss-drv/stats/gre_redir/\n"); + return NULL; + } + + tun_stats = debugfs_create_file("tun_stats", 0400, gre_redir, + &nss_top_main, &nss_gre_redir_stats_ops); + if (unlikely(!tun_stats)) { + debugfs_remove_recursive(gre_redir); + nss_warning("Failed to create file entry qca-nss-drv/stats/gre_redir/tun_stats\n"); + return NULL; + } + + return gre_redir; +} + +/* + * nss_gre_redir_stats_sync() + * Update gre_redir tunnel stats. + */ +void nss_gre_redir_stats_sync(struct nss_ctx_instance *nss_ctx, int if_num, struct nss_gre_redir_stats_sync_msg *ngss) +{ + int i, j; + uint32_t type; + struct net_device *dev; + struct nss_gre_redir_tun_stats *node_stats; + + type = nss_dynamic_interface_get_type(nss_ctx, if_num); + dev = nss_cmn_get_interface_dev(nss_ctx, if_num); + if (!dev) { + nss_warning("%px: Unable to find net device for the interface %d\n", nss_ctx, if_num); + return; + } + + if (!nss_gre_redir_verify_ifnum(if_num)) { + nss_warning("%px: Unknown type for interface %d\n", nss_ctx, if_num); + return; + } + + spin_lock_bh(&nss_gre_redir_stats_lock); + for (i = 0; i < NSS_GRE_REDIR_MAX_INTERFACES; i++) { + if (tun_stats[i].dev == dev) { + break; + } + } + + if (i == NSS_GRE_REDIR_MAX_INTERFACES) { + nss_warning("%px: Unable to find tunnel stats instance for interface %d\n", nss_ctx, if_num); + spin_unlock_bh(&nss_gre_redir_stats_lock); + return; + } + + nss_assert(tun_stats[i].ref_count); + node_stats = &tun_stats[i].tstats; + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_HOST_INNER: + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_OFFL_INNER: + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_SJACK_INNER: + node_stats->tx_packets += ngss->node_stats.tx_packets; + node_stats->tx_bytes += ngss->node_stats.tx_bytes; + node_stats->sjack_tx_packets += ngss->sjack_rx_packets; + node_stats->encap_sg_alloc_drop += ngss->encap_sg_alloc_drop; + node_stats->tx_dropped += nss_cmn_rx_dropped_sum(&(ngss->node_stats)); + for (j = 0; j < NSS_GRE_REDIR_MAX_RADIO; j++) { + node_stats->offl_tx_pkts[j] += ngss->offl_rx_pkts[j]; + } + + break; + + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_OUTER: + node_stats->rx_packets += ngss->node_stats.rx_packets; + node_stats->rx_bytes += ngss->node_stats.rx_bytes; + node_stats->sjack_rx_packets += ngss->sjack_rx_packets; + node_stats->decap_fail_drop += ngss->decap_fail_drop; + node_stats->decap_split_drop += ngss->decap_split_drop; + node_stats->split_sg_alloc_fail += ngss->split_sg_alloc_fail; + node_stats->split_linear_copy_fail += ngss->split_linear_copy_fail; + node_stats->split_not_enough_tailroom += ngss->split_not_enough_tailroom; + node_stats->decap_eapol_frames += ngss->decap_eapol_frames; + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + node_stats->rx_dropped[j] += ngss->node_stats.rx_dropped[j]; + } + + for (j = 0; j < NSS_GRE_REDIR_MAX_RADIO; j++) { + node_stats->offl_rx_pkts[j] += ngss->offl_rx_pkts[j]; + } + + break; + + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_US: + node_stats->exception_us_rx += ngss->node_stats.rx_packets; + node_stats->exception_us_tx += ngss->node_stats.tx_packets; + break; + + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_EXCEPTION_DS: + node_stats->exception_ds_rx += ngss->node_stats.rx_packets; + node_stats->exception_ds_tx += ngss->node_stats.tx_packets; + node_stats->exception_ds_invalid_dst_drop += ngss->exception_ds_invalid_dst_drop; + node_stats->exception_ds_inv_appid += ngss->exception_ds_inv_appid; + node_stats->headroom_unavail += ngss->headroom_unavail; + node_stats->tx_completion_success += ngss->tx_completion_success; + node_stats->tx_completion_drop += ngss->tx_completion_drop; + break; + } + + spin_unlock_bh(&nss_gre_redir_stats_lock); +} + +/* + * nss_gre_redir_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_gre_redir_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_gre_redir_stats_notification *stats_notify; + struct net_device *dev; + int i; + + stats_notify = kzalloc(sizeof(struct nss_gre_redir_stats_notification), GFP_ATOMIC); + if (!stats_notify) { + nss_warning("Unable to allocate memory for stats notification\n"); + return; + } + + dev = nss_cmn_get_interface_dev(nss_ctx, if_num); + if (!dev) { + nss_warning("%px: Unable to find net device for the interface %d\n", nss_ctx, if_num); + kfree(stats_notify); + return; + } + + if (!nss_gre_redir_verify_ifnum(if_num)) { + nss_warning("%px: Unknown type for interface %d\n", nss_ctx, if_num); + kfree(stats_notify); + return; + } + + spin_lock_bh(&nss_gre_redir_stats_lock); + for (i = 0; i < NSS_GRE_REDIR_MAX_INTERFACES; i++) { + if (tun_stats[i].dev == dev) { + break; + } + } + + if (i == NSS_GRE_REDIR_MAX_INTERFACES) { + nss_warning("%px: Unable to find tunnel stats instance for interface %d\n", nss_ctx, if_num); + spin_unlock_bh(&nss_gre_redir_stats_lock); + kfree(stats_notify); + return; + } + + stats_notify->core_id = nss_ctx->id; + stats_notify->if_num = if_num; + memcpy(&(stats_notify->stats_ctx), &(tun_stats[i]), sizeof(stats_notify->stats_ctx)); + spin_unlock_bh(&nss_gre_redir_stats_lock); + atomic_notifier_call_chain(&nss_gre_redir_stats_notifier, NSS_STATS_EVENT_NOTIFY, stats_notify); + kfree(stats_notify); +} + +/* + * nss_gre_redir_stats_unregister_notifier() + * Degisters statistics notifier. + */ +int nss_gre_redir_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_gre_redir_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_redir_stats_unregister_notifier); + +/* + * nss_gre_redir_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_gre_redir_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_gre_redir_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_redir_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_stats.h new file mode 100644 index 000000000..28f8fae3e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_stats.h @@ -0,0 +1,30 @@ +/* + ****************************************************************************** + * Copyright (c) 2017-2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_STATS_H__ +#define __NSS_GRE_REDIR_STATS_H__ + +/* + * NSS GRE REDIR statistics APIs + */ +extern spinlock_t nss_gre_redir_stats_lock; +extern bool nss_gre_redir_verify_ifnum(uint32_t if_num); +extern void nss_gre_redir_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_gre_redir_stats_sync(struct nss_ctx_instance *nss_ctx, int if_num, + struct nss_gre_redir_stats_sync_msg *ngss); +extern struct dentry *nss_gre_redir_stats_dentry_create(void); + +#endif /* __NSS_GRE_REDIR_STATS_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_strings.c new file mode 100644 index 000000000..319be274e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_strings.c @@ -0,0 +1,87 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_gre_redir_strings.h" + +/* + * nss_gre_redir_strings_stats + * GRE redirect statistics string. + */ +struct nss_stats_info nss_gre_redir_strings_stats[NSS_GRE_REDIR_STATS_MAX] = { + {"RX Packets", NSS_STATS_TYPE_COMMON}, + {"RX Bytes", NSS_STATS_TYPE_COMMON}, + {"TX Packets", NSS_STATS_TYPE_COMMON}, + {"TX Bytes", NSS_STATS_TYPE_COMMON}, + {"RX Drops_[0]", NSS_STATS_TYPE_DROP}, + {"RX Drops_[1]", NSS_STATS_TYPE_DROP}, + {"RX Drops_[2]", NSS_STATS_TYPE_DROP}, + {"RX Drops_[3]", NSS_STATS_TYPE_DROP}, + {"TX Drops", NSS_STATS_TYPE_DROP}, + {"RX Sjack Packets", NSS_STATS_TYPE_SPECIAL}, + {"TX Sjack packets", NSS_STATS_TYPE_SPECIAL}, + {"RX Offload Packets_[0]", NSS_STATS_TYPE_SPECIAL}, + {"RX Offload Packets_[1]", NSS_STATS_TYPE_SPECIAL}, + {"RX Offload Packets_[2]", NSS_STATS_TYPE_SPECIAL}, + {"RX Offload Packets_[3]", NSS_STATS_TYPE_SPECIAL}, + {"RX Offload Packets_[4]", NSS_STATS_TYPE_SPECIAL}, + {"TX Offload Packets_[0]", NSS_STATS_TYPE_SPECIAL}, + {"TX Offload Packets_[1]", NSS_STATS_TYPE_SPECIAL}, + {"TX Offload Packets_[2]", NSS_STATS_TYPE_SPECIAL}, + {"TX Offload Packets_[3]", NSS_STATS_TYPE_SPECIAL}, + {"TX Offload Packets_[4]", NSS_STATS_TYPE_SPECIAL}, + {"US exception RX Packets", NSS_STATS_TYPE_EXCEPTION}, + {"US exception TX Packets", NSS_STATS_TYPE_EXCEPTION}, + {"DS exception RX Packets", NSS_STATS_TYPE_EXCEPTION}, + {"DS exception TX Packets", NSS_STATS_TYPE_EXCEPTION}, + {"Encap SG alloc drop", NSS_STATS_TYPE_DROP}, + {"Decap fail drop", NSS_STATS_TYPE_DROP}, + {"Decap split drop", NSS_STATS_TYPE_SPECIAL}, + {"Split SG alloc fail", NSS_STATS_TYPE_SPECIAL}, + {"Split linear copy fail", NSS_STATS_TYPE_SPECIAL}, + {"Split not enough tailroom", NSS_STATS_TYPE_EXCEPTION}, + {"Exception ds invalid dst", NSS_STATS_TYPE_SPECIAL}, + {"Decap eapol frames", NSS_STATS_TYPE_SPECIAL}, + {"Exception ds invalid appid", NSS_STATS_TYPE_EXCEPTION}, + {"Headroom Unavailable", NSS_STATS_TYPE_EXCEPTION}, + {"Exception ds Tx completion Success", NSS_STATS_TYPE_SPECIAL}, + {"Exception ds Tx completion drop", NSS_STATS_TYPE_DROP} +}; + +/* + * nss_gre_redir_strings_read() + * Read GRE redirect statistics names. + */ +static ssize_t nss_gre_redir_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_gre_redir_strings_stats, NSS_GRE_REDIR_STATS_MAX); +} + +/* + * nss_gre_redir_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(gre_redir); + +/* + * nss_gre_redir_strings_dentry_create() + * Create GRE redirect statistics strings debug entry. + */ +void nss_gre_redir_strings_dentry_create(void) +{ + nss_strings_create_dentry("gre_redir", &nss_gre_redir_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_strings.h new file mode 100644 index 000000000..b0f0ba340 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_redir_strings.h @@ -0,0 +1,25 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#ifndef __NSS_GRE_REDIR_STRINGS_H +#define __NSS_GRE_REDIR_STRINGS_H + +#include "nss_gre_redir_stats.h" + +extern struct nss_stats_info nss_gre_redir_strings_stats[NSS_GRE_REDIR_STATS_MAX]; +extern void nss_gre_redir_strings_dentry_create(void); + +#endif /* __NSS_GRE_REDIR_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_stats.c new file mode 100644 index 000000000..3808e5e93 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_stats.c @@ -0,0 +1,338 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_gre_stats.c + * NSS GRE statistics APIs + * + */ + +#include "nss_tx_rx_common.h" +#include "nss_gre.h" +#include "nss_gre_stats.h" +#include "nss_gre_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_gre_stats_notifier); + +/* + * Data structures to store GRE nss debug stats + */ +static DEFINE_SPINLOCK(nss_gre_stats_lock); +static struct nss_gre_stats_session session_stats[NSS_GRE_MAX_DEBUG_SESSION_STATS]; +static struct nss_gre_stats_base base_stats; + +/* + * GRE statistics APIs + */ + +/* + * nss_gre_stats_session_unregister() + * Unregister debug statistic for GRE session. + */ +void nss_gre_stats_session_unregister(uint32_t if_num) +{ + int i; + + spin_lock_bh(&nss_gre_stats_lock); + for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) { + if (session_stats[i].if_num == if_num) { + memset(&session_stats[i], 0, sizeof(struct nss_gre_stats_session)); + break; + } + } + spin_unlock_bh(&nss_gre_stats_lock); +} + +/* + * nss_gre_stats_session_register() + * Register debug statistic for GRE session. + */ +void nss_gre_stats_session_register(uint32_t if_num, struct net_device *netdev) +{ + int i; + + spin_lock_bh(&nss_gre_stats_lock); + for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) { + if (!session_stats[i].valid) { + session_stats[i].valid = true; + session_stats[i].if_num = if_num; + session_stats[i].if_index = netdev->ifindex; + break; + } + } + spin_unlock_bh(&nss_gre_stats_lock); +} + +/* + * nss_gre_stats_session_sync() + * debug statistics sync for GRE session. + */ +void nss_gre_stats_session_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_session_stats_msg *sstats, uint16_t if_num) +{ + int i, j; + enum nss_dynamic_interface_type interface_type = nss_dynamic_interface_get_type(nss_ctx, if_num); + + spin_lock_bh(&nss_gre_stats_lock); + for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) { + if (session_stats[i].if_num == if_num) { + for (j = 0; j < NSS_GRE_SESSION_DEBUG_MAX; j++) { + session_stats[i].stats[j] += sstats->stats[j]; + } + + if (interface_type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_INNER) { + session_stats[i].stats[NSS_GRE_SESSION_ENCAP_RX_RECEIVED] += sstats->node_stats.rx_packets; + } else if (interface_type == NSS_DYNAMIC_INTERFACE_TYPE_GRE_OUTER) { + session_stats[i].stats[NSS_GRE_SESSION_DECAP_TX_FORWARDED] += sstats->node_stats.tx_packets; + } + break; + } + } + spin_unlock_bh(&nss_gre_stats_lock); +} + +/* + * nss_gre_stats_base_sync() + * Debug statistics sync for GRE base node. + */ +void nss_gre_stats_base_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_base_stats_msg *bstats) +{ + int i; + + spin_lock_bh(&nss_gre_stats_lock); + for (i = 0; i < NSS_GRE_BASE_DEBUG_MAX; i++) { + base_stats.stats[i] += bstats->stats[i]; + } + spin_unlock_bh(&nss_gre_stats_lock); +} + +/* + * nss_gre_stats_session_get() + * Get GRE session debug statistics. + */ +static void nss_gre_stats_session_get(void *stats_mem, int size) +{ + struct nss_gre_stats_session *stats = (struct nss_gre_stats_session *)stats_mem; + int i; + + if (!stats || (size < (sizeof(struct nss_gre_stats_session) * NSS_GRE_MAX_DEBUG_SESSION_STATS))) { + nss_warning("No memory to copy gre stats"); + return; + } + + spin_lock_bh(&nss_gre_stats_lock); + for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) { + if (session_stats[i].valid) { + memcpy(stats, &session_stats[i], sizeof(struct nss_gre_stats_session)); + stats++; + } + } + spin_unlock_bh(&nss_gre_stats_lock); +} + +/* + * nss_gre_stats_base_get() + * Get GRE debug base statistics. + */ +static void nss_gre_stats_base_get(void *stats_mem, int size) +{ + struct nss_gre_stats_base *stats = (struct nss_gre_stats_base *)stats_mem; + + if (!stats) { + nss_warning("No memory to copy GRE base stats\n"); + return; + } + + if (size < sizeof(struct nss_gre_stats_base)) { + nss_warning("Not enough memory to copy GRE base stats\n"); + return; + } + + spin_lock_bh(&nss_gre_stats_lock); + memcpy(stats, &base_stats, sizeof(struct nss_gre_stats_base)); + spin_unlock_bh(&nss_gre_stats_lock); +} + +/* + * nss_gre_stats_read() + * Read GRE statistics + */ +static ssize_t nss_gre_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + uint32_t max_output_lines = 2 /* header & footer for base debug stats */ + + 2 /* header & footer for session debug stats */ + + NSS_GRE_BASE_DEBUG_MAX /* Base debug */ + + NSS_GRE_MAX_DEBUG_SESSION_STATS * (NSS_GRE_SESSION_DEBUG_MAX + 2) /*session stats */ + + 2; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct net_device *dev; + struct nss_gre_stats_session *sstats; + struct nss_gre_stats_base *bstats; + int id; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + bstats = kzalloc(sizeof(struct nss_gre_stats_base), GFP_KERNEL); + if (unlikely(!bstats)) { + nss_warning("Could not allocate memory for base debug statistics buffer"); + kfree(lbuf); + return 0; + } + + sstats = kzalloc(sizeof(struct nss_gre_stats_session) * NSS_GRE_MAX_DEBUG_SESSION_STATS, GFP_KERNEL); + if (unlikely(!sstats)) { + nss_warning("Could not allocate memory for base debug statistics buffer"); + kfree(lbuf); + kfree(bstats); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "gre", NSS_STATS_SINGLE_CORE); + + /* + * Get all base stats + */ + nss_gre_stats_base_get((void *)bstats, sizeof(struct nss_gre_stats_base)); + + size_wr += nss_stats_print("gre", NULL, NSS_STATS_SINGLE_INSTANCE + , nss_gre_strings_base_stats + , bstats->stats + , NSS_GRE_BASE_DEBUG_MAX + , lbuf, size_wr, size_al); + + /* + * Get all session stats + */ + nss_gre_stats_session_get(sstats, sizeof(struct nss_gre_stats_session) * NSS_GRE_MAX_DEBUG_SESSION_STATS); + + for (id = 0; id < NSS_GRE_MAX_DEBUG_SESSION_STATS; id++) { + + if (!((sstats + id)->valid)) { + continue; + } + + dev = dev_get_by_index(&init_net, (sstats + id)->if_index); + if (likely(dev)) { + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id, + (sstats + id)->if_num, dev->name); + dev_put(dev); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id, + (sstats + id)->if_num); + } + size_wr += nss_stats_print("gre_session", NULL, id + , nss_gre_strings_session_stats + , (sstats + id)->stats + , NSS_GRE_SESSION_DEBUG_MAX + , lbuf, size_wr, size_al); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(sstats); + kfree(bstats); + kfree(lbuf); + return bytes_read; +} + +/* + * nss_gre_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(gre) + +/* + * nss_gre_stats_dentry_create() + * Create gre statistics debug entry. + */ +void nss_gre_stats_dentry_create(void) +{ + nss_stats_create_dentry("gre", &nss_gre_stats_ops); +} + +/* + * nss_gre_stats_base_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_gre_stats_base_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_gre_base_stats_notification gre_stats; + + spin_lock_bh(&nss_gre_stats_lock); + gre_stats.core_id = nss_ctx->id; + memcpy(gre_stats.stats_base_ctx, base_stats.stats, sizeof(gre_stats.stats_base_ctx)); + spin_unlock_bh(&nss_gre_stats_lock); + + atomic_notifier_call_chain(&nss_gre_stats_notifier, NSS_STATS_EVENT_NOTIFY, &gre_stats); +} + +/* + * nss_gre_stats_session_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_gre_stats_session_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_gre_session_stats_notification gre_stats; + int i; + + spin_lock_bh(&nss_gre_stats_lock); + for (i = 0; i < NSS_GRE_MAX_DEBUG_SESSION_STATS; i++) { + if (session_stats[i].if_num != if_num) { + continue; + } + + memcpy(gre_stats.stats_session_ctx, session_stats[i].stats, sizeof(gre_stats.stats_session_ctx)); + gre_stats.core_id = nss_ctx->id; + gre_stats.if_num = if_num; + spin_unlock_bh(&nss_gre_stats_lock); + atomic_notifier_call_chain(&nss_gre_stats_notifier, NSS_STATS_EVENT_NOTIFY, &gre_stats); + return; + } + spin_unlock_bh(&nss_gre_stats_lock); +} + +/* + * nss_gre_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_gre_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_gre_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_stats_unregister_notifier); + +/* + * nss_gre_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_gre_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_gre_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_stats.h new file mode 100644 index 000000000..7feb1d679 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_stats.h @@ -0,0 +1,55 @@ +/* + ************************************************************************** + * Copyright (c) 2017, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_gre_stats.h + * NSS GRE statistics header file. + */ + +#ifndef __NSS_GRE_STATS_H +#define __NSS_GRE_STATS_H + +#include + +/* + * GRE base debug statistics + */ +struct nss_gre_stats_base { + uint64_t stats[NSS_GRE_BASE_DEBUG_MAX]; /**< GRE debug statistics. */ +}; + +/* + * GRE session debug statistics + */ +struct nss_gre_stats_session { + uint64_t stats[NSS_GRE_SESSION_DEBUG_MAX]; /**< Session debug statistics. */ + int32_t if_index; /**< Netdevice's ifindex. */ + uint32_t if_num; /**< NSS interface number. */ + bool valid; /**< Is node valid ? */ +}; + +/* + * GRE statistics APIs + */ +extern void nss_gre_stats_base_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_gre_stats_session_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_gre_stats_session_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_session_stats_msg *sstats, uint16_t if_num); +extern void nss_gre_stats_base_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_base_stats_msg *bstats); +extern void nss_gre_stats_session_register(uint32_t if_num, struct net_device *netdev); +extern void nss_gre_stats_session_unregister(uint32_t if_num); +extern void nss_gre_stats_dentry_create(void); + +#endif /* __NSS_GRE_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_strings.c new file mode 100644 index 000000000..26c652d75 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_strings.c @@ -0,0 +1,124 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_gre_strings.h" + +/* + * nss_gre_strings_base_stats + * GRE debug statistics strings for base types + */ +struct nss_stats_info nss_gre_strings_base_stats[NSS_GRE_BASE_DEBUG_MAX] = { + {"base_rx_pkts", NSS_STATS_TYPE_COMMON}, + {"base_rx_drops", NSS_STATS_TYPE_DROP}, + {"base_exp_eth_hdr_missing", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_eth_type_non_ip", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_ip_unknown_protocol", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_ip_header_incomplete", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_ip_bad_total_length", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_ip_bad_checksum", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_ip_datagram_incomplete", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_ip_fragment", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_ip_options_incomplete", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_ip_with_options", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_ipv6_unknown_protocol", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_ipv6_header_incomplete", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_unknown_session", NSS_STATS_TYPE_EXCEPTION}, + {"base_exp_node_inactive", NSS_STATS_TYPE_EXCEPTION} +}; + +/* + * nss_gre_base_strings_read() + * Read GRE base debug statistics names + */ +static ssize_t nss_gre_base_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_gre_strings_base_stats, NSS_GRE_BASE_DEBUG_MAX); +} + +/* + * nss_gre_base_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(gre_base); + +/* + * nss_gre_strings_session_stats + * GRE debug statistics strings for sessions + */ +struct nss_stats_info nss_gre_strings_session_stats[NSS_GRE_SESSION_DEBUG_MAX] = { + {"session_pbuf_alloc_fail", NSS_STATS_TYPE_ERROR}, + {"session_decap_forward_enqueue_fail", NSS_STATS_TYPE_DROP}, + {"session_encap_forward_enqueue_fail", NSS_STATS_TYPE_DROP}, + {"session_decap_tx_forwarded", NSS_STATS_TYPE_SPECIAL}, + {"session_encap_rx_received", NSS_STATS_TYPE_SPECIAL}, + {"session_encap_rx_drops", NSS_STATS_TYPE_DROP}, + {"session_encap_rx_linear_fail", NSS_STATS_TYPE_DROP}, + {"session_exp_rx_key_error", NSS_STATS_TYPE_EXCEPTION}, + {"session_exp_rx_seq_error", NSS_STATS_TYPE_EXCEPTION}, + {"session_exp_rx_cs_error", NSS_STATS_TYPE_EXCEPTION}, + {"session_exp_rx_flag_mismatch", NSS_STATS_TYPE_EXCEPTION}, + {"session_exp_rx_malformed", NSS_STATS_TYPE_EXCEPTION}, + {"session_exp_rx_invalid_protocol", NSS_STATS_TYPE_EXCEPTION}, + {"session_exp_rx_no_headroom", NSS_STATS_TYPE_EXCEPTION} +}; + +/* + * nss_gre_session_strings_read() + * Read GRE session debug statistics names + */ +static ssize_t nss_gre_session_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_gre_strings_session_stats, NSS_GRE_SESSION_DEBUG_MAX); +} + +/* + * nss_gre_session_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(gre_session); + +/* + * nss_gre_strings_dentry_create() + * Create gre statistics strings debug entry. + */ +void nss_gre_strings_dentry_create(void) +{ + struct dentry *gre_d = NULL; + + if (!nss_top_main.strings_dentry) { + nss_warning("qca-nss-drv/strings is not present"); + return; + } + + gre_d = debugfs_create_dir("gre", nss_top_main.strings_dentry); + if (!gre_d) { + nss_warning("Failed to create qca-nss-drv/strings/gre directory"); + return; + } + + if (!debugfs_create_file("gre_base", 0400, gre_d, &nss_top_main, &nss_gre_base_strings_ops)) { + nss_warning("Failed to create qca-nss-drv/strings/gre/gre_base file"); + debugfs_remove_recursive(gre_d); + return; + } + + if (!debugfs_create_file("gre_session", 0400, gre_d, &nss_top_main, &nss_gre_session_strings_ops)) { + nss_warning("Failed to create qca-nss-drv/strings/gre/gre_session file"); + debugfs_remove_recursive(gre_d); + return; + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_strings.h new file mode 100644 index 000000000..e8a421fce --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_strings.h @@ -0,0 +1,26 @@ +/* + **************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#ifndef __NSS_GRE_STRINGS_H +#define __NSS_GRE_STRINGS_H + +#include "nss_gre_stats.h" + +extern struct nss_stats_info nss_gre_strings_base_stats[NSS_GRE_BASE_DEBUG_MAX]; +extern struct nss_stats_info nss_gre_strings_session_stats[NSS_GRE_SESSION_DEBUG_MAX]; +extern void nss_gre_strings_dentry_create(void); + +#endif /* __NSS_GRE_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel.c new file mode 100644 index 000000000..1e9a22a72 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel.c @@ -0,0 +1,395 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_gre_tunnel_stats.h" +#include "nss_gre_tunnel_log.h" +#include "nss_gre_tunnel_strings.h" + +#define NSS_GRE_TUNNEL_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * Private data structure + */ +static struct nss_gre_tunnel_pvt { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +} gre_tunnel_pvt; + +/* + * nss_gre_tunnel_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_gre_tunnel_verify_if_num(uint32_t if_num) +{ + uint32_t type = nss_dynamic_interface_get_type(nss_gre_tunnel_get_ctx(), if_num); + + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INNER: + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INLINE_INNER: + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_OUTER: + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INLINE_OUTER: + case NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INNER_EXCEPTION: + return true; + default: + return false; + } +} + +/* + * nss_gre_tunnel_handler() + * Handle NSS to HLOS messages for gre_tunnel + */ +static void nss_gre_tunnel_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_gre_tunnel_msg *ngtm = (struct nss_gre_tunnel_msg *)ncm; + void *ctx; + + nss_gre_tunnel_msg_callback_t cb; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + BUG_ON(!nss_gre_tunnel_verify_if_num(ncm->interface)); + + /* + * Trace Messages + */ + nss_gre_tunnel_log_rx_msg(ngtm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_GRE_TUNNEL_MSG_MAX) { + nss_warning("%px: received invalid message %d for GRE_TUNNEL interface %d", nss_ctx, ncm->type, ncm->interface); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_gre_tunnel_msg)) { + nss_warning("%px: gre_tunnel message length is invalid: %d", nss_ctx, ncm->len); + return; + } + + /* + * Check messages + */ + switch (ngtm->cm.type) { + case NSS_GRE_TUNNEL_MSG_STATS: + nss_gre_tunnel_stats_session_sync(nss_ctx, &ngtm->msg.stats, ncm->interface); + nss_gre_tunnel_stats_notify(nss_ctx, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->gre_tunnel_msg_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].app_data; + } + + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * callback + */ + cb = (nss_gre_tunnel_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * call GRE Tunnel session callback + */ + if (!cb) { + return; + } + + cb(ctx, ngtm); +} + +/* + * nss_gre_tunnel_inquiry() + * Inquiry if a GRE tunnel has been established in NSS FW. + * + * Input parameters: + * inquiry_info->ip_type + * inquiry_info->src_ip + * inquiry_info->dest_ip + * inquiry_info->gre_mode + * if (gre_mode == NSS_GRE_TUNNEL_MODE_GRE_UDP) + * inquiry_info->src_port + * inquiry_info->dest_port + * inquiry_info->encrypt_type -- currently not checked in FW, + */ +nss_tx_status_t nss_gre_tunnel_inquiry( + struct nss_gre_tunnel_configure *inquiry_info, + nss_gre_tunnel_msg_callback_t cb, void *app_data) +{ + nss_tx_status_t nss_tx_status; + struct nss_gre_tunnel_msg nim; + struct nss_ctx_instance *nss_ctx = nss_gre_tunnel_get_ctx(); + + /* + * Initialize inquiry message structure. + * This is async message and the result will be returned + * to the caller by the msg_callback passed in. + */ + memset(&nim, 0, sizeof(nim)); + nss_gre_tunnel_msg_init(&nim, NSS_GRE_TUNNEL_INTERFACE, + NSS_GRE_TUNNEL_MSG_INQUIRY, + sizeof(struct nss_gre_tunnel_configure), + cb, app_data); + nim.msg.configure = *inquiry_info; + nss_tx_status = nss_gre_tunnel_tx_msg(nss_ctx, &nim); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: Send GT inquiry message failed\n", inquiry_info); + } + + return nss_tx_status; +} +EXPORT_SYMBOL(nss_gre_tunnel_inquiry); + +/* + * nss_get_gre_tunnel_context() + * Return the core ctx which the feature is on + */ +struct nss_ctx_instance *nss_gre_tunnel_get_ctx(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.gre_tunnel_handler_id]; +} +EXPORT_SYMBOL(nss_gre_tunnel_get_ctx); + +/* + * nss_gre_tunnel_ifnum_with_core_id() + * Append core id to GRE tunnel interface num + */ +int nss_gre_tunnel_ifnum_with_core_id(int if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_gre_tunnel_get_ctx(); + BUG_ON(!nss_gre_tunnel_verify_if_num(if_num)); + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (nss_is_dynamic_interface(if_num) == false) { + nss_info("%px: Invalid if_num: %d, must be a dynamic interface\n", nss_ctx, if_num); + return 0; + } + + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_gre_tunnel_ifnum_with_core_id); + +/* + * nss_gre_tunnel_tx_buf() + * Transmit buffer over GRE Tunnel interface + */ +nss_tx_status_t nss_gre_tunnel_tx_buf(struct sk_buff *skb, uint32_t if_num, + struct nss_ctx_instance *nss_ctx) +{ + BUG_ON(!nss_gre_tunnel_verify_if_num(if_num)); + + return nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_VIRTUAL_BUFFER | H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_gre_tunnel_tx_buf); + +/* + * nss_gre_tunnel_tx_msg() + * Transmit a gre_tunnel message to NSS firmware + */ +nss_tx_status_t nss_gre_tunnel_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_gre_tunnel_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_gre_tunnel_log_tx_msg(msg); + + /* + * Sanity check message + */ + if (ncm->type >= NSS_GRE_TUNNEL_MSG_MAX) { + nss_warning("%px: gre_tunnel message type out of range: %d", + nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + BUG_ON(!nss_gre_tunnel_verify_if_num(ncm->interface)); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_gre_tunnel_tx_msg); + +/* + * nss_gre_tunnel_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_gre_tunnel_callback(void *app_data, struct nss_gre_tunnel_msg *ngtm) +{ + nss_gre_tunnel_msg_callback_t callback = (nss_gre_tunnel_msg_callback_t)gre_tunnel_pvt.cb; + void *data = gre_tunnel_pvt.app_data; + + gre_tunnel_pvt.response = NSS_TX_SUCCESS; + gre_tunnel_pvt.cb = NULL; + gre_tunnel_pvt.app_data = NULL; + + if (ngtm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("gre tunnel Error response %d\n", ngtm->cm.response); + gre_tunnel_pvt.response = ngtm->cm.response; + } + + if (callback) { + callback(data, ngtm); + } + complete(&gre_tunnel_pvt.complete); +} + +/* + * nss_gre_tunnel_tx_msg() + * Transmit a GRE Tunnel message to NSS firmware synchronously. + */ +nss_tx_status_t nss_gre_tunnel_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_tunnel_msg *ngtm) +{ + nss_tx_status_t status; + int ret = 0; + + down(&gre_tunnel_pvt.sem); + gre_tunnel_pvt.cb = (void *)ngtm->cm.cb; + gre_tunnel_pvt.app_data = (void *)ngtm->cm.app_data; + + ngtm->cm.cb = (nss_ptr_t)nss_gre_tunnel_callback; + ngtm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_gre_tunnel_tx_msg(nss_ctx, ngtm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: gre_tunnel_tx_msg failed\n", nss_ctx); + up(&gre_tunnel_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&gre_tunnel_pvt.complete, msecs_to_jiffies(NSS_GRE_TUNNEL_TX_TIMEOUT)); + + if (!ret) { + nss_warning("%px: GRE Tunnel msg tx failed due to timeout\n", nss_ctx); + gre_tunnel_pvt.response = NSS_TX_FAILURE; + } + + status = gre_tunnel_pvt.response; + up(&gre_tunnel_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_gre_tunnel_tx_msg_sync); + +/* + * nss_gre_tunnel_msg_init() + * Initialize gre_tunnel msg. + */ +void nss_gre_tunnel_msg_init(struct nss_gre_tunnel_msg *ngtm, uint16_t if_num, + uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ngtm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_gre_tunnel_msg_init); + +/* + * nss_gre_tunnel_register_if() + * Register netdev + */ +struct nss_ctx_instance *nss_gre_tunnel_register_if(uint32_t if_num, + nss_gre_tunnel_data_callback_t cb, + nss_gre_tunnel_msg_callback_t ev_cb, + struct net_device *netdev, + uint32_t features, + void *app_ctx) +{ + int32_t i; + + struct nss_ctx_instance *nss_ctx = nss_gre_tunnel_get_ctx(); + + BUG_ON(!nss_gre_tunnel_verify_if_num(if_num)); + + spin_lock_bh(&nss_gre_tunnel_stats_lock); + for (i = 0; i < NSS_MAX_GRE_TUNNEL_SESSIONS; i++) { + if (!session_stats[i].valid) { + session_stats[i].valid = true; + session_stats[i].if_num = if_num; + session_stats[i].if_index = netdev->ifindex; + break; + } + } + spin_unlock_bh(&nss_gre_tunnel_stats_lock); + + if (i == NSS_MAX_GRE_TUNNEL_SESSIONS) { + nss_warning("%px: Cannot find free slot for GRE Tunnel session stats, I/F:%u\n", nss_ctx, if_num); + return NULL; + } + + if (nss_ctx->subsys_dp_register[if_num].ndev) { + nss_warning("%px: Cannot find free slot for GRE Tunnel NSS I/F:%u\n", nss_ctx, if_num); + session_stats[i].valid = false; + session_stats[i].if_num = 0; + session_stats[i].if_index = 0; + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, cb, NULL, app_ctx, netdev, features); + + nss_top_main.gre_tunnel_msg_callback = ev_cb; + nss_core_register_handler(nss_ctx, if_num, nss_gre_tunnel_handler, app_ctx); + nss_gre_tunnel_stats_dentry_create(); + nss_gre_tunnel_strings_dentry_create(); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_gre_tunnel_register_if); + +/* + * nss_gre_tunnel_unregister_if() + * Unregister netdev + */ +void nss_gre_tunnel_unregister_if(uint32_t if_num) +{ + int32_t i; + struct nss_ctx_instance *nss_ctx = nss_gre_tunnel_get_ctx(); + + BUG_ON(!nss_gre_tunnel_verify_if_num(if_num)); + + spin_lock_bh(&nss_gre_tunnel_stats_lock); + for (i = 0; i < NSS_MAX_GRE_TUNNEL_SESSIONS; i++) { + if (session_stats[i].if_num == if_num) { + memset(&session_stats[i], 0, + sizeof(struct nss_gre_tunnel_stats_session)); + break; + } + } + spin_unlock_bh(&nss_gre_tunnel_stats_lock); + + if (i == NSS_MAX_GRE_TUNNEL_SESSIONS) { + nss_warning("%px: Cannot find debug stats for GRE Tunnel session: %d\n", nss_ctx, if_num); + return; + } + + if (!nss_ctx->subsys_dp_register[if_num].ndev) { + nss_warning("%px: Cannot find registered netdev for GRE Tunnel NSS I/F: %d\n", nss_ctx, if_num); + + return; + } + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_top_main.gre_tunnel_msg_callback = NULL; + nss_core_unregister_handler(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_gre_tunnel_unregister_if); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_log.c new file mode 100644 index 000000000..6af7b56b9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_log.c @@ -0,0 +1,168 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_gre_tunnel_log.c + * NSS GRE Tunnel logger file. + */ + +#include "nss_core.h" + +/* + * nss_gre_tunnel_log_message_types_str + * NSS GRE Tunnel message strings + */ +static int8_t *nss_gre_tunnel_log_message_types_str[NSS_GRE_TUNNEL_MSG_MAX] __maybe_unused = { + "GRE Tunnel configure", + "GRE Tunnel session destroy", + "GRE Tunnel stats", + "GRE Tunnel configure DI to WLAN ID", + "GRE Tunnel message inquiry" +}; + +/* + * nss_gre_tunnel_log_configure_msg() + * Log NSS GRE Tunnel configure message. + */ +static void nss_gre_tunnel_log_configure_msg(struct nss_gre_tunnel_msg *ngm) +{ + struct nss_gre_tunnel_configure *ngcm __maybe_unused = &ngm->msg.configure; + nss_trace("%px: NSS GRE Tunnel configure message \n" + "Meta Header Version: %d\n" + "GRE Mode: %x\n" + "IP Type: %x\n" + "Encryption Type: %d\n" + "Source Port: %d\n" + "Destination Port: %d\n" + "Crypto Node Identifier: %d\n" + "Encryption Crypto Index: %d\n" + "Decryption Crypto Index: %d\n" + "Word0 header: %d\n" + "Initialization Vector: %px\n" + "Sibling Interface Number: %d\n" + "TTL: %d\n" + "RPS: %d\n" + "Reserved: %x\n" + "Word1 Header: %x\n" + "Word2 Header: %x\n" + "Word3 Header: %x\n", + ngcm, ngcm->mh_version, ngcm->gre_mode, + ngcm->ip_type, ngcm->encrypt_type, + ngcm->src_port, ngcm->dest_port, + ngcm->crypto_node_id, ngcm->crypto_idx_encrypt, + ngcm->crypto_idx_decrypt, ngcm->word0, + ngcm->iv_val, ngcm->sibling_if, + ngcm->ttl, ngcm->rps, + ngcm->reserved, ngcm->word1, + ngcm->word2, ngcm->word3); + + /* + * Continuation of log message. Different identifiers based on ip_type + */ + if (ngcm->ip_type == NSS_GRE_TUNNEL_IP_IPV6) { + nss_trace("Source IP: %pI6\n" + "Destination IP: %pI6\n", + ngcm->src_ip, ngcm->dest_ip); + } else if (ngcm->ip_type == NSS_GRE_TUNNEL_IP_IPV4) { + nss_trace("Source IP: %pI4\n" + "Destination IP: %pI4\n", + ngcm->src_ip, ngcm->dest_ip); + } +} + +/* + * nss_gre_tunnel_log_di_to_wlan_id_msg() + * Log NSS GRE Tunnel Dynamic Interface to WLAN ID message. + */ +static void nss_gre_tunnel_log_di_to_wlan_id_msg(struct nss_gre_tunnel_msg *ngm) +{ + struct nss_gre_tunnel_di_to_wlan_id *ngdm __maybe_unused = &ngm->msg.dtwi; + nss_trace("%px: NSS GRE Dynamic Interface to WLAN ID message: \n" + "Dynamic Interface Number: %d\n" + "WLAN ID: %x\n", + ngdm, ngdm->dynamic_interface_num, + ngdm->wlan_id); +} + +/* + * nss_gre_tunnel_log_verbose() + * Log message contents. + */ +static void nss_gre_tunnel_log_verbose(struct nss_gre_tunnel_msg *ngm) +{ + switch (ngm->cm.type) { + case NSS_GRE_TUNNEL_MSG_CONFIGURE: + case NSS_GRE_TUNNEL_MSG_INQUIRY: + nss_gre_tunnel_log_configure_msg(ngm); + break; + + case NSS_GRE_TUNNEL_MSG_CONFIGURE_DI_TO_WLAN_ID: + nss_gre_tunnel_log_di_to_wlan_id_msg(ngm); + break; + + case NSS_GRE_TUNNEL_MSG_SESSION_DESTROY: + case NSS_GRE_TUNNEL_MSG_STATS: + /* + * No log for these valid messages. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", ngm); + break; + } +} + +/* + * nss_gre_tunnel_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_gre_tunnel_log_tx_msg(struct nss_gre_tunnel_msg *ngm) +{ + if (ngm->cm.type >= NSS_GRE_TUNNEL_MSG_MAX) { + nss_warning("%px: Invalid message type\n", ngm); + return; + } + + nss_info("%px: type[%d]:%s\n", ngm, ngm->cm.type, nss_gre_tunnel_log_message_types_str[ngm->cm.type]); + nss_gre_tunnel_log_verbose(ngm); +} + +/* + * nss_gre_tunnel_log_rx_msg() + * Log messages received from FW. + */ +void nss_gre_tunnel_log_rx_msg(struct nss_gre_tunnel_msg *ngm) +{ + if (ngm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ngm); + return; + } + + if (ngm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ngm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ngm, ngm->cm.type, + nss_gre_tunnel_log_message_types_str[ngm->cm.type], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + ngm, ngm->cm.type, nss_gre_tunnel_log_message_types_str[ngm->cm.type], + ngm->cm.response, nss_cmn_response_str[ngm->cm.response]); + +verbose: + nss_gre_tunnel_log_verbose(ngm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_log.h new file mode 100644 index 000000000..be0751301 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_GRE_TUNNEL_LOG_H +#define __NSS_GRE_TUNNEL_LOG_H + +/* + * nss_gre_tunnel.h + * NSS GRE Tunnel header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_gre_tunnel_log_tx_msg + * Logs a gre_tunnel message that is sent to the NSS firmware. + */ +void nss_gre_tunnel_log_tx_msg(struct nss_gre_tunnel_msg *ngm); + +/* + * nss_gre_tunnel_log_rx_msg + * Logs a gre_tunnel message that is received from the NSS firmware. + */ +void nss_gre_tunnel_log_rx_msg(struct nss_gre_tunnel_msg *ngm); + +#endif /* __NSS_GRE_TUNNEL_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_stats.c new file mode 100644 index 000000000..c3e53bf85 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_stats.c @@ -0,0 +1,282 @@ +/* + **************************************************************************** + * Copyright (c) 2017, 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_gre_tunnel.h" +#include "nss_gre_tunnel_stats.h" +#include "nss_gre_tunnel_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_gre_tunnel_stats_notifier); + +/* + * Spinlock to protect gre tunnel statistics update/read + */ +DEFINE_SPINLOCK(nss_gre_tunnel_stats_lock); + +struct nss_gre_tunnel_stats_session session_stats[NSS_MAX_GRE_TUNNEL_SESSIONS]; + +/* + * nss_gre_tunnel_stats_session_sync() + * Sync function for GRE Tunnel statistics + */ +void nss_gre_tunnel_stats_session_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_tunnel_stats *stats_msg, + uint16_t if_num) +{ + int i; + struct nss_gre_tunnel_stats_session *s = NULL; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + spin_lock_bh(&nss_gre_tunnel_stats_lock); + for (i = 0; i < NSS_MAX_GRE_TUNNEL_SESSIONS; i++) { + if (session_stats[i].if_num == if_num) { + s = &session_stats[i]; + break; + } + } + + if (!s) { + spin_unlock_bh(&nss_gre_tunnel_stats_lock); + nss_warning("%px: Session not found: %u", nss_ctx, if_num); + return; + } + + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_PKTS] += stats_msg->node_stats.rx_packets; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_TX_PKTS] += stats_msg->node_stats.tx_packets; + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_QUEUE_0_DROPPED + i] += stats_msg->node_stats.rx_dropped[i]; + } + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_MALFORMED] += stats_msg->rx_malformed; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_INVALID_PROT] += stats_msg->rx_invalid_prot; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_DECAP_QUEUE_FULL] += stats_msg->decap_queue_full; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_SINGLE_REC_DGRAM] += stats_msg->rx_single_rec_dgram; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_INVALID_REC_DGRAM] += stats_msg->rx_invalid_rec_dgram; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_BUFFER_ALLOC_FAIL] += stats_msg->buffer_alloc_fail; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_BUFFER_COPY_FAIL] += stats_msg->buffer_copy_fail; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_OUTFLOW_QUEUE_FULL] += stats_msg->outflow_queue_full; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_DROPPED_HROOM] += stats_msg->rx_dropped_hroom; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_CBUFFER_ALLOC_FAIL] += stats_msg->rx_cbuf_alloc_fail; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_CENQUEUE_FAIL] += stats_msg->rx_cenqueue_fail; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_DECRYPT_DONE] += stats_msg->rx_decrypt_done; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_FORWARD_ENQUEUE_FAIL] += stats_msg->rx_forward_enqueue_fail; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_TX_CBUFFER_ALLOC_FAIL] += stats_msg->tx_cbuf_alloc_fail; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_TX_CENQUEUE_FAIL] += stats_msg->tx_cenqueue_fail; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_DROPPED_TROOM] += stats_msg->rx_dropped_troom; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_TX_FORWARD_ENQUEUE_FAIL] += stats_msg->tx_forward_enqueue_fail; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_TX_CIPHER_DONE] += stats_msg->tx_cipher_done; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_CRYPTO_NOSUPP] += stats_msg->crypto_nosupp; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_DROPPED_MH_VERSION] += stats_msg->rx_dropped_mh_ver; + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_RX_UNALIGNED_PKT] += stats_msg->rx_unaligned_pkt; + + /* + * Copy crypto resp err stats. + */ + for (i = 0; i < NSS_CRYPTO_CMN_RESP_ERROR_MAX; i++) { +#if defined(NSS_HAL_IPQ807x_SUPPORT) + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_MAX + i] += stats_msg->crypto_resp_error[i]; +#else + s->stats[NSS_GRE_TUNNEL_STATS_SESSION_MAX + i] = 0; +#endif + } + + spin_unlock_bh(&nss_gre_tunnel_stats_lock); +} + +/* + * nss_gre_tunnel_stats_session_get() + * Get session GRE Tunnel statitics. + */ +static void nss_gre_tunnel_stats_session_get(struct nss_gre_tunnel_stats_session *stats) +{ + int i; + + if (!stats) { + nss_warning("No memory to copy gre_tunnel session stats"); + return; + } + + spin_lock_bh(&nss_gre_tunnel_stats_lock); + for (i = 0; i < NSS_MAX_GRE_TUNNEL_SESSIONS; i++) { + if (session_stats[i].valid) { + memcpy(stats, &session_stats[i], + sizeof(struct nss_gre_tunnel_stats_session)); + stats++; + } + } + spin_unlock_bh(&nss_gre_tunnel_stats_lock); +} + +/* + * nss_gre_tunnel_stats_read() + * Read GRE Tunnel session statistics + */ +static ssize_t nss_gre_tunnel_stats_read(struct file *fp, char __user *ubuf, + size_t sz, loff_t *ppos) +{ + uint32_t max_output_lines = 2 + (NSS_MAX_GRE_TUNNEL_SESSIONS + * (NSS_GRE_TUNNEL_STATS_SESSION_MAX + 2)) + 2; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + struct net_device *dev; + int id, i; + struct nss_gre_tunnel_stats_session *gre_tunnel_session_stats = NULL; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_CRYPTO_CMN_RESP_ERROR_MAX * 8, GFP_KERNEL); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + gre_tunnel_session_stats = kzalloc((sizeof(struct nss_gre_tunnel_stats_session) + * NSS_MAX_GRE_TUNNEL_SESSIONS), GFP_KERNEL); + if (unlikely(gre_tunnel_session_stats == NULL)) { + nss_warning("Could not allocate memory for populating GRE Tunnel stats"); + kfree(lbuf); + kfree(stats_shadow); + return 0; + } + + /* + * Get all stats + */ + nss_gre_tunnel_stats_session_get(gre_tunnel_session_stats); + + /* + * Session stats + */ + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "GRE tunnel stats", NSS_STATS_SINGLE_CORE); + + for (id = 0; id < NSS_MAX_GRE_TUNNEL_SESSIONS; id++) { + if (!gre_tunnel_session_stats[id].valid) + break; + + dev = dev_get_by_index(&init_net, gre_tunnel_session_stats[id].if_index); + if (likely(dev)) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "%d. nss interface id=%d, netdevice=%s\n", + id, gre_tunnel_session_stats[id].if_num, + dev->name); + dev_put(dev); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "%d. nss interface id=%d\n", id, + gre_tunnel_session_stats[id].if_num); + } + + size_wr += nss_stats_print("gre_tunnel", NULL, NSS_STATS_SINGLE_INSTANCE, + nss_gre_tunnel_strings_stats, gre_tunnel_session_stats[id].stats, + NSS_GRE_TUNNEL_STATS_SESSION_MAX, lbuf, size_wr, size_al); + + /* + * Print crypto resp err stats. + * TODO: We are not printing with the right enum string for crypto. This + * is intentional since we atleast want to see some stats for now. + */ + spin_lock_bh(&nss_gre_tunnel_stats_lock); + for (i = 0; i < NSS_CRYPTO_CMN_RESP_ERROR_MAX; i++) { + stats_shadow[i] = gre_tunnel_session_stats[id].stats[NSS_GRE_TUNNEL_STATS_SESSION_MAX + i]; + } + + spin_unlock_bh(&nss_gre_tunnel_stats_lock); + size_wr += nss_stats_print("gre_tunnel", NULL, NSS_STATS_SINGLE_INSTANCE, + nss_gre_tunnel_strings_stats, stats_shadow, + NSS_CRYPTO_CMN_RESP_ERROR_MAX, lbuf, size_wr, size_al); + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(gre_tunnel_session_stats); + kfree(lbuf); + kfree(stats_shadow); + return bytes_read; +} + +/* + * nss_gre_tunnel_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(gre_tunnel) + +/* + * nss_gre_tunnel_stats_dentry_create() + * Create gre tunnel statistics debug entry. + */ +void nss_gre_tunnel_stats_dentry_create(void) +{ + nss_stats_create_dentry("gre_tunnel", &nss_gre_tunnel_stats_ops); +} + +/* + * nss_gre_tunnel_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_gre_tunnel_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_gre_tunnel_stats_notification gre_tunnel_stats; + struct nss_gre_tunnel_stats_session *s = NULL; + int i; + + spin_lock_bh(&nss_gre_tunnel_stats_lock); + for (i = 0; i < NSS_MAX_GRE_TUNNEL_SESSIONS; i++) { + if (session_stats[i].if_num != if_num) { + continue; + } + + s = &session_stats[i]; + gre_tunnel_stats.core_id = nss_ctx->id; + gre_tunnel_stats.if_num = if_num; + memcpy(gre_tunnel_stats.stats_ctx, s->stats, sizeof(gre_tunnel_stats.stats_ctx)); + spin_unlock_bh(&nss_gre_tunnel_stats_lock); + atomic_notifier_call_chain(&nss_gre_tunnel_stats_notifier, NSS_STATS_EVENT_NOTIFY, &gre_tunnel_stats); + return; + } + spin_unlock_bh(&nss_gre_tunnel_stats_lock); +} + +/* + * nss_gre_tunnel_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_gre_tunnel_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_gre_tunnel_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_tunnel_stats_unregister_notifier); + +/* + * nss_gre_tunnel_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_gre_tunnel_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_gre_tunnel_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_gre_tunnel_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_stats.h new file mode 100644 index 000000000..cdee4788d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_stats.h @@ -0,0 +1,44 @@ +/* + ****************************************************************************** + * Copyright (c) 2016-2017, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_GRE_TUNNEL_STATS_H +#define __NSS_GRE_TUNNEL_STATS_H + +/* + * GRE Tunnel session debug statistics + */ +struct nss_gre_tunnel_stats_session { + uint64_t stats[NSS_GRE_TUNNEL_STATS_SESSION_MAX + NSS_CRYPTO_CMN_RESP_ERROR_MAX]; + /* GRE tunnel statistics */ + int32_t if_index; /* Interface index */ + uint32_t if_num; /* NSS interface number */ + bool valid; +}; + +/* + * Data structures to store GRE Tunnel nss debug stats + */ +extern spinlock_t nss_gre_tunnel_stats_lock; +extern struct nss_gre_tunnel_stats_session session_stats[NSS_MAX_GRE_TUNNEL_SESSIONS]; + +/* + * GRE Tunnel statistics APIs + */ +extern void nss_gre_tunnel_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_gre_tunnel_stats_session_sync(struct nss_ctx_instance *nss_ctx, struct nss_gre_tunnel_stats *stats_msg, uint16_t if_num); +extern void nss_gre_tunnel_stats_dentry_create(void); + +#endif /* __NSS_GRE_TUNNEL_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_strings.c new file mode 100644 index 000000000..402182e61 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_strings.c @@ -0,0 +1,77 @@ +/* + **************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_gre_tunnel_strings.h" + +/* + * nss_gre_tunnel_strings_stats + * GRE Tunnel statistics strings for nss session stats + */ +struct nss_stats_info nss_gre_tunnel_strings_stats[NSS_GRE_TUNNEL_STATS_SESSION_MAX] = { + {"rx_pkts", NSS_STATS_TYPE_COMMON}, + {"tx_pkts", NSS_STATS_TYPE_COMMON}, + {"rx_queue_0_dropped", NSS_STATS_TYPE_DROP}, + {"rx_queue_1_dropped", NSS_STATS_TYPE_DROP}, + {"rx_queue_2_dropped", NSS_STATS_TYPE_DROP}, + {"rx_queue_3_dropped", NSS_STATS_TYPE_DROP}, + {"rx_malformed", NSS_STATS_TYPE_SPECIAL}, + {"rx_invalid_prot", NSS_STATS_TYPE_SPECIAL}, + {"decap_queue_full", NSS_STATS_TYPE_SPECIAL}, + {"rx_single_rec_dgram", NSS_STATS_TYPE_SPECIAL}, + {"rx_invalid_rec_dgram", NSS_STATS_TYPE_SPECIAL}, + {"buffer_alloc_fail", NSS_STATS_TYPE_SPECIAL}, + {"buffer_copy_fail", NSS_STATS_TYPE_SPECIAL}, + {"outflow_queue_full", NSS_STATS_TYPE_SPECIAL}, + {"tx_dropped_hroom", NSS_STATS_TYPE_DROP}, + {"rx_cbuffer_alloc_fail", NSS_STATS_TYPE_SPECIAL}, + {"rx_cenqueue_fail", NSS_STATS_TYPE_SPECIAL}, + {"rx_decrypt_done", NSS_STATS_TYPE_SPECIAL}, + {"rx_forward_enqueue_fail", NSS_STATS_TYPE_SPECIAL}, + {"tx_cbuffer_alloc_fail", NSS_STATS_TYPE_SPECIAL}, + {"tx_cenqueue_fail", NSS_STATS_TYPE_SPECIAL}, + {"rx_dropped_troom", NSS_STATS_TYPE_DROP}, + {"tx_forward_enqueue_fail", NSS_STATS_TYPE_SPECIAL}, + {"tx_cipher_done", NSS_STATS_TYPE_SPECIAL}, + {"crypto_nosupp", NSS_STATS_TYPE_SPECIAL}, + {"rx_dropped_mh_version", NSS_STATS_TYPE_SPECIAL}, + {"rx_unaligned_pkt", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_gre_tunnel_strings_read() + * Read gre_tunnel session debug statistics names + */ +static ssize_t nss_gre_tunnel_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_gre_tunnel_strings_stats, NSS_GRE_TUNNEL_STATS_SESSION_MAX); +} + +/* + * nss_gre_tunnel_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(gre_tunnel); + +/* + * nss_gre_tunnel_strings_dentry_create() + * Create gre_tunnel statistics strings debug entry. + */ +void nss_gre_tunnel_strings_dentry_create(void) +{ + nss_strings_create_dentry("gre_tunnel", &nss_gre_tunnel_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_strings.h new file mode 100644 index 000000000..829469492 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_gre_tunnel_strings.h @@ -0,0 +1,25 @@ +/* + **************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#ifndef __NSS_GRE_TUNNEL_STRINGS_H +#define __NSS_GRE_TUNNEL_STRINGS_H + +#include "nss_gre_tunnel_stats.h" + +extern struct nss_stats_info nss_gre_tunnel_strings_stats[NSS_GRE_TUNNEL_STATS_SESSION_MAX]; +extern void nss_gre_tunnel_strings_dentry_create(void); + +#endif /* __NSS_GRE_TUNNEL_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hal/fsm9010/nss_hal_pvt.c b/feeds/ipq807x/qca-nss-drv/src/nss_hal/fsm9010/nss_hal_pvt.c new file mode 100644 index 000000000..4a72d3d82 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hal/fsm9010/nss_hal_pvt.c @@ -0,0 +1,342 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * nss_hal_pvt.c + * NSS HAL private APIs. + */ + +#include +#include +#include +#include +#include +#include "nss_hal.h" +#include "nss_core.h" + +#define NSS_H2N_INTR_EMPTY_BUFFER_QUEUE_BIT 0 +#define NSS_H2N_INTR_DATA_COMMAND_QUEUE_BIT 1 +#define NSS_H2N_INTR_TX_UNBLOCKED_BIT 11 +#define NSS_H2N_INTR_TRIGGER_COREDUMP_BIT 15 + +/* + * Interrupt type to cause vector. + */ +static uint32_t intr_cause[] = {(1 << NSS_H2N_INTR_EMPTY_BUFFER_QUEUE_BIT), + (1 << NSS_H2N_INTR_DATA_COMMAND_QUEUE_BIT), + (1 << NSS_H2N_INTR_TX_UNBLOCKED_BIT), + (1 << NSS_H2N_INTR_TRIGGER_COREDUMP_BIT)}; + +/* + * nss_hal_wq_function() + * Added to Handle BH requests to kernel + */ +void nss_hal_wq_function(struct work_struct *work) +{ + /* + * Not supported in FSM9010 + */ + kfree((void *)work); +} + +/* + * nss_hal_get_num_irqs() + * get number of irqs from interrupt resource of device tree + */ +static inline int nss_hal_get_num_irqs(struct device_node *np) +{ + int num_irqs = 0; + + while (of_irq_to_resource(np, num_irqs, NULL)) { + num_irqs++; + } + + return num_irqs; +} + +/* + * nss_hal_handle_irq() + * HLOS interrupt handler for nss interrupts + */ +static irqreturn_t nss_hal_handle_irq(int irq, void *ctx) +{ + struct int_ctx_instance *int_ctx = (struct int_ctx_instance *) ctx; + struct nss_ctx_instance *nss_ctx = int_ctx->nss_ctx; + + /* + * Mask interrupt until our bottom half re-enables it + */ + nss_hal_disable_interrupt(nss_ctx, int_ctx->shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS); + + /* + * Schedule tasklet to process interrupt cause + */ + napi_schedule(&int_ctx->napi); + return IRQ_HANDLED; +} + +/* + * nss_hal_of_get_pdata() + * Retrieve platform data from device node. + */ +static struct nss_platform_data *__nss_hal_of_get_pdata(struct platform_device *pdev) +{ + struct device_node *np = of_node_get(pdev->dev.of_node); + struct nss_platform_data *npd = NULL; + struct nss_ctx_instance *nss_ctx = NULL; + struct nss_top_instance *nss_top = &nss_top_main; + struct resource res_nphys, res_vphys; + int32_t i; + + npd = devm_kzalloc(&pdev->dev, sizeof(struct nss_platform_data), GFP_KERNEL); + if (!npd) { + return NULL; + } + + if (of_property_read_u32(np, "qcom,id", &npd->id) + || of_property_read_u32(np, "qcom,num-queue", &npd->num_queue)) { + pr_err("%s: error reading critical device node properties\n", np->name); + goto out; + } + + if (of_property_read_u32(np, "qcom,num-irq", &npd->num_irq)) { + npd->num_irq = nss_hal_get_num_irqs(np); + } + + if (npd->num_irq < npd->num_queue) { + pr_err("%s: not enough interrupts configured for all the queues\n", np->name); + goto out; + } + + if (npd->num_irq > NSS_MAX_IRQ_PER_CORE) { + pr_err("%s: exceeds maximum interrupt numbers per core\n", np->name); + goto out; + } + + nss_ctx = &nss_top->nss[npd->id]; + nss_ctx->id = npd->id; + + if (of_address_to_resource(np, 0, &res_nphys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for nphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + if (of_address_to_resource(np, 1, &res_vphys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for vphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + /* + * Save physical addresses + */ + npd->nphys = res_nphys.start; + npd->vphys = res_vphys.start; + + npd->nmap = ioremap_nocache(npd->nphys, resource_size(&res_nphys)); + if (!npd->nmap) { + nss_info_always("%px: nss%d: ioremap() fail for nphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + nss_assert(npd->vphys); + npd->vmap = ioremap_cache(npd->vphys, resource_size(&res_vphys)); + if (!npd->vmap) { + nss_info_always("%px: nss%d: ioremap() fail for vphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + /* + * Get IRQ numbers + */ + for (i = 0 ; i < npd->num_irq; i++) { + npd->irq[i] = irq_of_parse_and_map(np, i); + if (!npd->irq[i]) { + nss_info_always("%px: nss%d: irq_of_parse_and_map() fail for irq %d\n", nss_ctx, nss_ctx->id, i); + goto out; + } + } + + nss_hal_dt_parse_features(np, npd); + + of_node_put(np); + return npd; + +out: + if (npd->nmap) { + iounmap((void *)npd->nmap); + } + + if (npd->vmap) { + iounmap((void *)npd->vmap); + } + + devm_kfree(&pdev->dev, npd); + of_node_put(np); + return NULL; +} + +/* + * __nss_hal_debug_enable() + * Enable NSS debug + */ +static void __nss_hal_debug_enable(void) +{ + return; +} + +/* + * __nss_hal_common_reset() + */ +static int __nss_hal_common_reset(struct platform_device *nss_dev) +{ + return 0; +} + +/* + * __nss_hal_core_reset() + */ +static int __nss_hal_core_reset(struct platform_device *nss_dev, void __iomem *map, uint32_t addr, uint32_t clk_src) +{ + return 0; +} + +/* + * __nss_hal_firmware_load() + */ +static int __nss_hal_firmware_load(struct nss_ctx_instance *nss_ctx, struct platform_device *nss_dev, struct nss_platform_data *npd) +{ + return 0; +} + +/* + * __nss_hal_clock_configure() + */ +static int __nss_hal_clock_configure(struct nss_ctx_instance *nss_ctx, struct platform_device *nss_dev, struct nss_platform_data *npd) +{ + return 0; +} + +/* + * __nss_hal_read_interrupt_cause() + */ +static void __nss_hal_read_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t *cause) +{ + uint32_t value = nss_read_32(nss_ctx->nmap, NSS_REGS_N2H_INTR_STATUS_OFFSET); + *cause = (((value) >> shift_factor) & 0x7FFF); +} + +/* + * __nss_hal_clear_interrupt_cause() + */ +static void __nss_hal_clear_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ + nss_write_32(nss_ctx->nmap, NSS_REGS_N2H_INTR_CLR_OFFSET, (cause << shift_factor)); +} + +/* + * __nss_hal_disable_interrupt() + */ +static void __nss_hal_disable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ + nss_write_32(nss_ctx->nmap, NSS_REGS_N2H_INTR_MASK_CLR_OFFSET, (cause << shift_factor)); +} + +/* + * __nss_hal_enable_interrupt() + */ +static void __nss_hal_enable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ + nss_write_32(nss_ctx->nmap, NSS_REGS_N2H_INTR_MASK_SET_OFFSET, (cause << shift_factor)); +} + +/* + * __nss_hal_send_interrupt() + */ +static void __nss_hal_send_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t type) +{ + nss_write_32(nss_ctx->nmap, NSS_REGS_C2C_INTR_SET_OFFSET, intr_cause[type]); +} + +/* + * __nss_hal_request_irq() + */ +static int __nss_hal_request_irq(struct nss_ctx_instance *nss_ctx, struct nss_platform_data *npd, int irq_num) +{ + struct int_ctx_instance *int_ctx = &nss_ctx->int_ctx[irq_num]; + int err; + + if (irq_num == 1) { + int_ctx->shift_factor = 15; + err = request_irq(npd->irq[irq_num], nss_hal_handle_irq, 0, "nss_queue1", int_ctx); + } else { + int_ctx->shift_factor = 0; + err = request_irq(npd->irq[irq_num], nss_hal_handle_irq, 0, "nss", int_ctx); + } + if (err) { + nss_warning("%px: IRQ%d request failed", nss_ctx, npd->irq[irq_num]); + return err; + } + + int_ctx->irq = npd->irq[irq_num]; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi, 64); + return 0; +} + +/* + * __nss_hal_init_imem + */ +void __nss_hal_init_imem(struct nss_ctx_instance *nss_ctx) +{ + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + + mem_ctx->imem_head = NSS_IMEM_START + NSS_IMEM_SIZE * nss_ctx->id; + mem_ctx->imem_end = mem_ctx->imem_head + NSS_IMEM_SIZE; + mem_ctx->imem_tail = mem_ctx->imem_head; + + nss_info("%px: IMEM init: head: 0x%x end: 0x%x tail: 0x%x\n", nss_ctx, + mem_ctx->imem_head, mem_ctx->imem_end, mem_ctx->imem_tail); +} + +/* + * __nss_hal_init_utcm_shared + */ +bool __nss_hal_init_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t *meminfo_start) +{ + /* + * Nothing to be done as there are no UTCM_SHARED defined for fsm9010 + */ + return true; +} + +/* + * nss_hal_fsm9010_ops + */ +struct nss_hal_ops nss_hal_fsm9010_ops = { + .common_reset = __nss_hal_common_reset, + .core_reset = __nss_hal_core_reset, + .clock_configure = __nss_hal_clock_configure, + .firmware_load = __nss_hal_firmware_load, + .debug_enable = __nss_hal_debug_enable, + .of_get_pdata = __nss_hal_of_get_pdata, + .request_irq = __nss_hal_request_irq, + .send_interrupt = __nss_hal_send_interrupt, + .enable_interrupt = __nss_hal_enable_interrupt, + .disable_interrupt = __nss_hal_disable_interrupt, + .clear_interrupt_cause = __nss_hal_clear_interrupt_cause, + .read_interrupt_cause = __nss_hal_read_interrupt_cause, + .init_imem = __nss_hal_init_imem, + .init_utcm_shared = __nss_hal_init_utcm_shared, +}; diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_hal.h b/feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_hal.h new file mode 100644 index 000000000..d9591a781 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_hal.h @@ -0,0 +1,129 @@ +/* + ************************************************************************** + * Copyright (c) 2013, 2016-2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * nss_hal.h + * NSS HAL public declarations. + */ + +#ifndef __NSS_HAL_H +#define __NSS_HAL_H + +#include +#include +#include +#include + +extern struct clk *nss_core0_clk; +extern struct clk *nss_core1_clk; +extern struct nss_runtime_sampling nss_runtime_samples; +extern struct clk *nss_fab0_clk; +extern struct clk *nss_fab1_clk; +extern void nss_hal_wq_function(struct work_struct *work); + +#if defined(NSS_HAL_IPQ806X_SUPPORT) +extern struct nss_hal_ops nss_hal_ipq806x_ops; +#endif +#if defined(NSS_HAL_IPQ807x_SUPPORT) +extern struct nss_hal_ops nss_hal_ipq807x_ops; +#endif +#if defined(NSS_HAL_IPQ60XX_SUPPORT) +extern struct nss_hal_ops nss_hal_ipq60xx_ops; +#endif +#if defined(NSS_HAL_IPQ50XX_SUPPORT) +extern struct nss_hal_ops nss_hal_ipq50xx_ops; +#endif +#if defined(NSS_HAL_FSM9010_SUPPORT) +extern struct nss_hal_ops nss_hal_fsm9010_ops; +#endif + +#define NSS_HAL_SUPPORTED_INTERRUPTS (NSS_N2H_INTR_EMPTY_BUFFER_QUEUE | \ + NSS_N2H_INTR_DATA_QUEUE_0 | \ + NSS_N2H_INTR_DATA_QUEUE_1 | \ + NSS_N2H_INTR_EMPTY_BUFFERS_SOS | \ + NSS_N2H_INTR_TX_UNBLOCKED | \ + NSS_N2H_INTR_COREDUMP_COMPLETE | \ + NSS_N2H_INTR_PROFILE_DMA | \ + NSS_N2H_INTR_PAGED_EMPTY_BUFFERS_SOS) + +/* + * nss_hal_read_interrupt_cause() + */ +static inline void nss_hal_read_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t *cause) +{ + nss_top_main.hal_ops->read_interrupt_cause(nss_ctx, shift_factor, cause); +} + +/* + * nss_hal_clear_interrupt_cause() + */ +static inline void nss_hal_clear_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ + nss_top_main.hal_ops->clear_interrupt_cause(nss_ctx, shift_factor, cause); +} + +/* + * nss_hal_disable_interrupt() + */ +static inline void nss_hal_disable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ + nss_top_main.hal_ops->disable_interrupt(nss_ctx, shift_factor, cause); +} + +/* + * nss_hal_enable_interrupt() + */ +static inline void nss_hal_enable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ + nss_top_main.hal_ops->enable_interrupt(nss_ctx, shift_factor, cause); +} + +/* + * nss_hal_send_interrupt() + */ +static inline void nss_hal_send_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t cause) +{ + nss_top_main.hal_ops->send_interrupt(nss_ctx, cause); +} + +/* + * nss_hal_debug_enable() + */ +static inline void nss_hal_debug_enable(void) +{ + nss_top_main.hal_ops->debug_enable(); +} + +/* + * nss_hal_probe() + */ +int nss_hal_probe(struct platform_device *nss_dev); + +/* + * nss_hal_remove() + */ +int nss_hal_remove(struct platform_device *nss_dev); + +/* + * nss_hal_firmware_load() + */ +int nss_hal_firmware_load(struct nss_ctx_instance *nss_ctx, struct platform_device *nss_dev, struct nss_platform_data *npd); + +/* + * nss_hal_dt_parse_features() + */ +void nss_hal_dt_parse_features(struct device_node *np, struct nss_platform_data *npd); +#endif /* __NSS_HAL_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_hal_ops.h b/feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_hal_ops.h new file mode 100644 index 000000000..736d37e6c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_hal_ops.h @@ -0,0 +1,49 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * nss_hal_ops.h + * NSS HAL ops structure declaration. + */ + +#ifndef __NSS_HAL_OPS_H +#define __NSS_HAL_OPS_H + +#if (NSS_DT_SUPPORT != 1) +#include +#include +#endif + +/* + * nss_hal_ops defines the HAL layer API required to support multiple targets + */ +struct nss_hal_ops { + int (*common_reset)(struct platform_device *pdev); + int (*core_reset)(struct platform_device *nss_dev, void __iomem *map, uint32_t addr, uint32_t clk_src); + int (*clock_configure)(struct nss_ctx_instance *nss_ctx, struct platform_device *nss_dev, struct nss_platform_data *npd); + void (*debug_enable)(void); + struct nss_platform_data * (*of_get_pdata)(struct platform_device *pdev); + int (*firmware_load)(struct nss_ctx_instance *nss_ctx, struct platform_device *nss_dev, struct nss_platform_data *npd); + void (*read_interrupt_cause)(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t *cause); + int (*request_irq)(struct nss_ctx_instance *nss_ctx, struct nss_platform_data *npd, int irq_num); + void (*clear_interrupt_cause)(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause); + void (*send_interrupt)(struct nss_ctx_instance *nss_ctx, uint32_t type); + void (*enable_interrupt)(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause); + void (*disable_interrupt)(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause); + void (*init_imem)(struct nss_ctx_instance *nss_ctx); + bool (*init_utcm_shared)(struct nss_ctx_instance *nss_ctx, uint32_t *meminfo_start); +}; +#endif /* __NSS_HAL_OPS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_regs.h b/feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_regs.h new file mode 100644 index 000000000..765e8a197 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hal/include/nss_regs.h @@ -0,0 +1,108 @@ +/* + ************************************************************************** + * Copyright (c) 2013, 2015-2017, 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * nss_regs.h + * NSS register definitions. + */ + +#ifndef __NSS_REGS_H +#define __NSS_REGS_H + +#include +#include + +/* + * CSM register offsets + */ +#define NSS_REGS_CORE_ID_OFFSET 0x0000 +#define NSS_REGS_RESET_CTRL_OFFSET 0x0004 +#define NSS_REGS_CORE_BAR_OFFSET 0x0008 +#define NSS_REGS_CORE_AMC_OFFSET 0x000c +#define NSS_REGS_CORE_BOOT_ADDR_OFFSET 0x0010 +#define NSS_REGS_C2C_INTR_STATUS_OFFSET 0x0014 +#define NSS_REGS_C2C_INTR_SET_OFFSET 0x0018 +#define NSS_REGS_C2C_INTR_CLR_OFFSET 0x001c +#define NSS_REGS_N2H_INTR_STATUS_OFFSET 0x0020 +#define NSS_REGS_N2H_INTR_SET_OFFSET 0x0024 +#define NSS_REGS_N2H_INTR_CLR_OFFSET 0x0028 +#define NSS_REGS_N2H_INTR_MASK_OFFSET 0x002c +#define NSS_REGS_N2H_INTR_MASK_SET_OFFSET 0x0030 +#define NSS_REGS_N2H_INTR_MASK_CLR_OFFSET 0x0034 +#define NSS_REGS_CORE_INT_STAT0_TYPE_OFFSET 0x0038 +#define NSS_REGS_CORE_INT_STAT1_TYPE_OFFSET 0x003c +#define NSS_REGS_CORE_INT_STAT2_TYPE_OFFSET 0x0040 +#define NSS_REGS_CORE_INT_STAT3_TYPE_OFFSET 0x0044 +#define NSS_REGS_CORE_IFETCH_RANGE_OFFSET 0x0048 + +/* + * FPB register offsets + */ +#define NSS_REGS_FPB_CSR_CFG_OFFSET 0x0004 + +/* + * Defines for N2H interrupts + */ +#define NSS_N2H_INTR_EMPTY_BUFFER_QUEUE (1 << 0) +#define NSS_N2H_INTR_DATA_QUEUE_0 (1 << 1) +#define NSS_N2H_INTR_DATA_QUEUE_1 (1 << 2) +#define NSS_N2H_INTR_DATA_QUEUE_2 (1 << 3) +#define NSS_N2H_INTR_DATA_QUEUE_3 (1 << 4) +#define NSS_N2H_INTR_EMPTY_BUFFERS_SOS (1 << 10) +#define NSS_N2H_INTR_TX_UNBLOCKED (1 << 11) +#define NSS_N2H_INTR_PAGED_EMPTY_BUFFERS_SOS (1 << 12) +#define NSS_N2H_INTR_PROFILE_DMA (1 << 13) +#define NSS_N2H_INTR_COREDUMP_COMPLETE (1 << 14) + +/* + * Types of H2N interrupts + */ +enum nss_h2n_intr_type { + NSS_H2N_INTR_EMPTY_BUFFER_QUEUE = 0, + NSS_H2N_INTR_DATA_COMMAND_QUEUE = 1, + NSS_H2N_INTR_TX_UNBLOCKED = 2, + NSS_H2N_INTR_TRIGGER_COREDUMP = 3, + NSS_H2N_INTR_EMPTY_PAGED_BUFFER_QUEUE = 4, + NSS_H2N_INTR_TYPE_MAX = 5, +}; + +/* + * clock source for NSS cores + */ +enum nss_regs_clk_src_select { + NSS_REGS_CLK_SRC_DEFAULT, + NSS_REGS_CLK_SRC_ALTERNATE +}; + +/* + * nss_read_32() + * Read NSS register + */ +static inline uint32_t nss_read_32(void __iomem *addr, uint32_t offs) +{ + return readl(addr + offs); +} + +/* + * nss_write_32() + * Write NSS register + */ +static inline void nss_write_32(void __iomem *addr, uint32_t offs, uint32_t val) +{ + writel(val, addr + offs); +} + +#endif /* __NSS_REGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq50xx/nss_hal_pvt.c b/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq50xx/nss_hal_pvt.c new file mode 100644 index 000000000..3d6dfd02d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq50xx/nss_hal_pvt.c @@ -0,0 +1,667 @@ +/* + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * nss_hal_pvt.c + * NSS HAL private APIs. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nss_hal.h" +#include "nss_core.h" + +#define NSS_QGIC_IPC_REG_OFFSET 0x8 + +#define NSS0_H2N_INTR_BASE 13 + +/* + * N2H interrupts + */ +#define NSS_IRQ_NAME_EMPTY_BUF_SOS "nss_empty_buf_sos" +#define NSS_IRQ_NAME_EMPTY_BUF_QUEUE "nss_empty_buf_queue" +#define NSS_IRQ_NAME_TX_UNBLOCK "nss-tx-unblock" +#define NSS_IRQ_NAME_QUEUE0 "nss_queue0" +#define NSS_IRQ_NAME_QUEUE1 "nss_queue1" +#define NSS_IRQ_NAME_COREDUMP_COMPLETE "nss_coredump_complete" +#define NSS_IRQ_NAME_PAGED_EMPTY_BUF_SOS "nss_paged_empty_buf_sos" +#define NSS_IRQ_NAME_PROFILE_DMA "nss_profile_dma" + +/* + * CLKs + */ +#define NSS_CFG_CLK "nss-cfg-clk" +#define NSS_DBG_CLK "nss-dbg-clk" +#define NSS_CORE_CLK "nss-core-clk" +#define NSS_AXI_CLK "nss-axi-clk" +#define NSS_SNOC_AXI_CLK "nss-snoc-axi-clk" +#define NSS_NC_AXI_CLK "nss-nc-axi-clk" +#define NSS_UTCM_CLK "nss-utcm-clk" + +/* + * Core GCC reset + */ +#define NSS_CORE_GCC_RESET 0x00000007 + +/* + * GCC reset + */ +void __iomem *nss_misc_reset; +void __iomem *nss_misc_reset_flag; + +/* + * Purpose of each interrupt index: This should match the order defined in the NSS firmware + */ +enum nss_hal_n2h_intr_purpose { + NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_SOS = 0, + NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_QUEUE = 1, + NSS_HAL_N2H_INTR_PURPOSE_TX_UNBLOCKED = 2, + NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_0 = 3, + NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_1 = 4, + NSS_HAL_N2H_INTR_PURPOSE_COREDUMP_COMPLETE = 5, + NSS_HAL_N2H_INTR_PURPOSE_PAGED_EMPTY_BUFFER_SOS = 6, + NSS_HAL_N2H_INTR_PURPOSE_PROFILE_DMA = 7, + NSS_HAL_N2H_INTR_PURPOSE_MAX +}; + +/* + * Interrupt type to cause vector. + */ +static uint32_t intr_cause[NSS_MAX_CORES][NSS_H2N_INTR_TYPE_MAX] = { + /* core0 */ + {(1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_EMPTY_BUFFER_QUEUE)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_DATA_COMMAND_QUEUE)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_TX_UNBLOCKED)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_TRIGGER_COREDUMP)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_EMPTY_PAGED_BUFFER_QUEUE))} +}; + +/* + * nss_hal_wq_function() + * Added to Handle BH requests to kernel + */ +void nss_hal_wq_function(struct work_struct *work) +{ + nss_work_t *my_work = (nss_work_t *)work; + + mutex_lock(&nss_top_main.wq_lock); + + nss_freq_change(&nss_top_main.nss[NSS_CORE_0], my_work->frequency, my_work->stats_enable, 0); + clk_set_rate(nss_core0_clk, my_work->frequency); + + nss_freq_change(&nss_top_main.nss[NSS_CORE_0], my_work->frequency, my_work->stats_enable, 1); + + mutex_unlock(&nss_top_main.wq_lock); + kfree((void *)work); +} + +/* + * nss_hal_handle_irq() + */ +static irqreturn_t nss_hal_handle_irq(int irq, void *ctx) +{ + struct int_ctx_instance *int_ctx = (struct int_ctx_instance *) ctx; + + disable_irq_nosync(irq); + napi_schedule(&int_ctx->napi); + + return IRQ_HANDLED; +} + +/* + * __nss_hal_of_get_pdata() + * Retrieve platform data from device node. + */ +static struct nss_platform_data *__nss_hal_of_get_pdata(struct platform_device *pdev) +{ + struct device_node *np = of_node_get(pdev->dev.of_node); + struct nss_platform_data *npd; + struct nss_ctx_instance *nss_ctx = NULL; + struct nss_top_instance *nss_top = &nss_top_main; + struct resource res_nphys, res_qgic_phys; + int32_t i; + + npd = devm_kzalloc(&pdev->dev, sizeof(struct nss_platform_data), GFP_KERNEL); + if (!npd) { + return NULL; + } + + if (of_property_read_u32(np, "qcom,id", &npd->id) + || of_property_read_u32(np, "qcom,load-addr", &npd->load_addr) + || of_property_read_u32(np, "qcom,num-queue", &npd->num_queue) + || of_property_read_u32(np, "qcom,num-irq", &npd->num_irq)) { + pr_err("%s: error reading critical device node properties\n", np->name); + goto out; + } + + /* + * Read frequencies. If failure, load default values. + */ + of_property_read_u32(np, "qcom,mid-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency); + of_property_read_u32(np, "qcom,max-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency); + + if (npd->num_irq > NSS_MAX_IRQ_PER_CORE) { + pr_err("%s: exceeds maximum interrupt numbers per core\n", np->name); + goto out; + } + + nss_ctx = &nss_top->nss[npd->id]; + nss_ctx->id = npd->id; + + if (of_address_to_resource(np, 0, &res_nphys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for nphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + if (of_address_to_resource(np, 1, &res_qgic_phys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for qgic_phys\n", nss_ctx, nss_ctx->id); + goto out; + } + + /* + * Save physical addresses + */ + npd->nphys = res_nphys.start; + npd->qgic_phys = res_qgic_phys.start; + + npd->nmap = ioremap_nocache(npd->nphys, resource_size(&res_nphys)); + if (!npd->nmap) { + nss_info_always("%px: nss%d: ioremap() fail for nphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + npd->qgic_map = ioremap_nocache(npd->qgic_phys, resource_size(&res_qgic_phys)); + if (!npd->qgic_map) { + nss_info_always("%px: nss%d: ioremap() fail for qgic map\n", nss_ctx, nss_ctx->id); + goto out; + } + + NSS_CORE_DSB(); + + /* + * Get IRQ numbers + */ + for (i = 0 ; i < npd->num_irq; i++) { + npd->irq[i] = irq_of_parse_and_map(np, i); + if (!npd->irq[i]) { + nss_info_always("%px: nss%d: irq_of_parse_and_map() fail for irq %d\n", nss_ctx, nss_ctx->id, i); + goto out; + } + } + + nss_hal_dt_parse_features(np, npd); + + of_node_put(np); + return npd; + +out: + if (npd->nmap) { + iounmap(npd->nmap); + } + + if (npd->vmap) { + iounmap(npd->vmap); + } + + devm_kfree(&pdev->dev, npd); + of_node_put(np); + return NULL; +} + +/* + * nss_hal_clock_set_and_enable() + */ +static int nss_hal_clock_set_and_enable(struct device *dev, const char *id, unsigned long rate) +{ + struct clk *nss_clk = NULL; + int err; + + nss_clk = devm_clk_get(dev, id); + if (IS_ERR(nss_clk)) { + pr_err("%px: cannot get clock: %s\n", dev, id); + return -EFAULT; + } + + if (rate) { + err = clk_set_rate(nss_clk, rate); + if (err) { + pr_err("%px: cannot set %s freq\n", dev, id); + return -EFAULT; + } + } + + err = clk_prepare_enable(nss_clk); + if (err) { + pr_err("%px: cannot enable clock: %s\n", dev, id); + return -EFAULT; + } + + return 0; +} + +/* + * __nss_hal_core_reset() + */ +static int __nss_hal_core_reset(struct platform_device *nss_dev, void __iomem *map, uint32_t addr, uint32_t clk_src) +{ + uint32_t value; + + /* + * Apply ubi32 core reset + */ + nss_write_32(map, NSS_REGS_RESET_CTRL_OFFSET, 0x1); + + /* + * De-assert reset + */ + value = nss_read_32(nss_misc_reset, 0x0); + value &= ~NSS_CORE_GCC_RESET; + nss_write_32(nss_misc_reset, 0x0, value); + + /* + * Program address configuration + */ + nss_write_32(map, NSS_REGS_CORE_AMC_OFFSET, 0x1); + nss_write_32(map, NSS_REGS_CORE_BAR_OFFSET, 0x3C000000); + nss_write_32(map, NSS_REGS_CORE_BOOT_ADDR_OFFSET, addr); + + /* + * Set crypto interrupt as level sensitive + */ + nss_write_32(map, NSS_REGS_CORE_INT_STAT2_TYPE_OFFSET, 0x80000000); + nss_write_32(map, NSS_REGS_CORE_INT_STAT3_TYPE_OFFSET, 0x00200000); + + /* + * Enable Instruction Fetch range checking between 0x4000 0000 to 0xBFFF FFFF. + */ + nss_write_32(map, NSS_REGS_CORE_IFETCH_RANGE_OFFSET, 0xBF004001); + + /* + * De-assert ubi32 core reset + */ + nss_write_32(map, NSS_REGS_RESET_CTRL_OFFSET, 0x0); + + /* + * Set values only once for core0. Grab the proper clock. + */ + nss_core0_clk = clk_get(&nss_dev->dev, NSS_CORE_CLK); + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency)) { + return -EFAULT; + } + + return 0; +} + +/* + * __nss_hal_debug_enable() + * Enable NSS debug + */ +static void __nss_hal_debug_enable(void) +{ + +} + +/* + * __nss_hal_common_reset + * Do reset/clock configuration common to all cores + */ +static int __nss_hal_common_reset(struct platform_device *nss_dev) +{ + struct device_node *cmn = NULL; + struct resource res_nss_misc_reset; + + /* + * Get reference to NSS common device node + */ + cmn = of_find_node_by_name(NULL, "nss-common"); + if (!cmn) { + pr_err("%px: Unable to find nss-common node\n", nss_dev); + return -EFAULT; + } + + if (of_address_to_resource(cmn, 0, &res_nss_misc_reset) != 0) { + pr_err("%px: of_address_to_resource() return error for nss_misc_reset\n", nss_dev); + of_node_put(cmn); + return -EFAULT; + } + + of_node_put(cmn); + + nss_misc_reset = ioremap_nocache(res_nss_misc_reset.start, resource_size(&res_nss_misc_reset)); + if (!nss_misc_reset) { + pr_err("%px: ioremap fail for nss_misc_reset\n", nss_dev); + return -EFAULT; + } + + nss_top_main.nss_hal_common_init_done = true; + nss_info("nss_hal_common_reset Done\n"); + + return 0; +} + +/* + * __nss_hal_clock_configure() + */ +static int __nss_hal_clock_configure(struct nss_ctx_instance *nss_ctx, struct platform_device *nss_dev, struct nss_platform_data *npd) +{ + int32_t i; + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_DBG_CLK, 150000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CFG_CLK, 100000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_AXI_CLK, 400000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_SNOC_AXI_CLK, 400000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NC_AXI_CLK, 266670000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_UTCM_CLK, 266670000)) { + return -EFAULT; + } + + /* + * No entries, then just load default + */ + if ((nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency == 0) || + (nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency == 0)) { + nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency = NSS_FREQ_SCALE_NA; + nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency = NSS_FREQ_850; + nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency = NSS_FREQ_1000; + nss_info_always("%px: Running default frequencies\n", nss_ctx); + } + + /* + * Maple low frequency not applicable, set it accordingly + */ + nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency = NSS_FREQ_SCALE_NA; + + /* + * Test frequency from dtsi, if fail, try to set default frequency. + */ + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency)) { + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, NSS_FREQ_1000)) { + return -EFAULT; + } + } + + /* + * Setup ranges, test frequency, and display. + */ + for (i = 0; i < NSS_FREQ_MAX_SCALE; i++) { + switch (nss_runtime_samples.freq_scale[i].frequency) { + case NSS_FREQ_850: + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_850_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_850_MAX; + break; + + case NSS_FREQ_1000: + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_1000_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_1000_MAX; + break; + + case NSS_FREQ_SCALE_NA: + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_NA; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_NA; + continue; + + default: + nss_info_always("%px: Frequency not found %d\n", nss_ctx, nss_runtime_samples.freq_scale[i].frequency); + return -EFAULT; + } + + /* + * Test the frequency, if fail, then default to safe frequency and abort + */ + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[i].frequency)) { + return -EFAULT; + } + } + + nss_info_always("Supported Frequencies - "); + for (i = 0; i < NSS_FREQ_MAX_SCALE; i++) { + switch (nss_runtime_samples.freq_scale[i].frequency) { + case NSS_FREQ_850: + nss_info_always("850 MHz "); + break; + + case NSS_FREQ_1000: + nss_info_always("1 GHz "); + break; + + case NSS_FREQ_SCALE_NA: + continue; + + default: + nss_info_always("%px: Error\nNo Table/Invalid Frequency Found\n", nss_ctx); + return -EFAULT; + } + } + nss_info_always("\n"); + + /* + * Set values only once for core0. Grab the proper clock. + */ + nss_core0_clk = clk_get(&nss_dev->dev, NSS_CORE_CLK); + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency)) { + return -EFAULT; + } + return 0; +} + +/* + * __nss_hal_read_interrupt_cause() + */ +static void __nss_hal_read_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t *cause) +{ +} + +/* + * __nss_hal_clear_interrupt_cause() + */ +static void __nss_hal_clear_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ +} + +/* + * __nss_hal_disable_interrupt() + */ +static void __nss_hal_disable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ +} + +/* + * __nss_hal_enable_interrupt() + */ +static void __nss_hal_enable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ +} + +/* + * __nss_hal_send_interrupt() + */ +static void __nss_hal_send_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t type) +{ + /* + * Check if core and type is Valid + */ + nss_assert(nss_ctx->id < nss_top_main.num_nss); + nss_assert(type < NSS_H2N_INTR_TYPE_MAX); + + nss_write_32(nss_ctx->qgic_map, NSS_QGIC_IPC_REG_OFFSET, intr_cause[nss_ctx->id][type]); +} + +/* + * __nss_hal_request_irq() + */ +static int __nss_hal_request_irq(struct nss_ctx_instance *nss_ctx, struct nss_platform_data *npd, int irq_num) +{ + struct int_ctx_instance *int_ctx = &nss_ctx->int_ctx[irq_num]; + uint32_t cause, napi_wgt; + int err = -1, irq = npd->irq[irq_num]; + int (*napi_poll_cb)(struct napi_struct *, int) = NULL; + const char *irq_name; + + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); + + switch (irq_num) { + case NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_SOS: + napi_poll_cb = nss_core_handle_napi_non_queue; + napi_wgt = NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT; + cause = NSS_N2H_INTR_EMPTY_BUFFERS_SOS; + irq_name = NSS_IRQ_NAME_EMPTY_BUF_SOS; + break; + + case NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_QUEUE: + napi_poll_cb = nss_core_handle_napi_queue; + napi_wgt = NSS_EMPTY_BUFFER_RETURN_PROCESSING_WEIGHT; + cause = NSS_N2H_INTR_EMPTY_BUFFER_QUEUE; + irq_name = NSS_IRQ_NAME_EMPTY_BUF_QUEUE; + break; + + case NSS_HAL_N2H_INTR_PURPOSE_TX_UNBLOCKED: + napi_poll_cb = nss_core_handle_napi_non_queue; + napi_wgt = NSS_TX_UNBLOCKED_PROCESSING_WEIGHT; + cause = NSS_N2H_INTR_TX_UNBLOCKED; + irq_name = NSS_IRQ_NAME_TX_UNBLOCK; + break; + + case NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_0: + napi_poll_cb = nss_core_handle_napi_queue; + napi_wgt = NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT; + cause = NSS_N2H_INTR_DATA_QUEUE_0; + irq_name = NSS_IRQ_NAME_QUEUE0; + break; + + case NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_1: + napi_poll_cb = nss_core_handle_napi_queue; + napi_wgt = NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT; + cause = NSS_N2H_INTR_DATA_QUEUE_1; + irq_name = NSS_IRQ_NAME_QUEUE1; + break; + + case NSS_HAL_N2H_INTR_PURPOSE_COREDUMP_COMPLETE: + napi_poll_cb = nss_core_handle_napi_emergency; + napi_wgt = NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT; + cause = NSS_N2H_INTR_COREDUMP_COMPLETE; + irq_name = NSS_IRQ_NAME_COREDUMP_COMPLETE; + break; + + case NSS_HAL_N2H_INTR_PURPOSE_PAGED_EMPTY_BUFFER_SOS: + napi_poll_cb = nss_core_handle_napi_non_queue; + napi_wgt = NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT; + cause = NSS_N2H_INTR_PAGED_EMPTY_BUFFERS_SOS; + irq_name = NSS_IRQ_NAME_PAGED_EMPTY_BUF_SOS; + break; + + case NSS_HAL_N2H_INTR_PURPOSE_PROFILE_DMA: + napi_poll_cb = nss_core_handle_napi_sdma; + napi_wgt = NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT; + cause = NSS_N2H_INTR_PROFILE_DMA; + irq_name = NSS_IRQ_NAME_PROFILE_DMA; + break; + + default: + nss_warning("%px: nss%d: unsupported irq# %d\n", nss_ctx, nss_ctx->id, irq_num); + return err; + } + + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, napi_poll_cb, napi_wgt); + int_ctx->cause = cause; + err = request_irq(irq, nss_hal_handle_irq, 0, irq_name, int_ctx); + if (err) { + nss_warning("%px: nss%d: request_irq failed for irq# %d\n", nss_ctx, nss_ctx->id, irq_num); + return err; + } + int_ctx->irq = irq; + return 0; +} + +/* + * __nss_hal_init_imem + */ +void __nss_hal_init_imem(struct nss_ctx_instance *nss_ctx) +{ + /* + * Nothing to be done as there are no TCM in ipq50xx + */ +} + +/* + * __nss_hal_init_utcm_shared + */ +bool __nss_hal_init_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t *meminfo_start) +{ + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + uint32_t utcm_shared_map_magic = meminfo_start[2]; + uint32_t utcm_shared_start = meminfo_start[3]; + uint32_t utcm_shared_size = meminfo_start[4]; + + /* + * Check meminfo utcm_shared map magic + */ + if ((uint16_t)utcm_shared_map_magic != NSS_MEMINFO_RESERVE_AREA_UTCM_SHARED_MAP_MAGIC) { + nss_info_always("%px: failed to verify UTCM_SHARED map magic\n", nss_ctx); + return false; + } + + mem_ctx->utcm_shared_head = utcm_shared_start; + mem_ctx->utcm_shared_end = mem_ctx->utcm_shared_head + utcm_shared_size; + mem_ctx->utcm_shared_tail = mem_ctx->utcm_shared_head; + + nss_info("%px: UTCM_SHARED init: head: 0x%x end: 0x%x tail: 0x%x\n", nss_ctx, + mem_ctx->utcm_shared_head, mem_ctx->utcm_shared_end, mem_ctx->utcm_shared_tail); + return true; +} + +/* + * nss_hal_ipq50xx_ops + */ +struct nss_hal_ops nss_hal_ipq50xx_ops = { + .common_reset = __nss_hal_common_reset, + .core_reset = __nss_hal_core_reset, + .clock_configure = __nss_hal_clock_configure, + .firmware_load = nss_hal_firmware_load, + .debug_enable = __nss_hal_debug_enable, + .of_get_pdata = __nss_hal_of_get_pdata, + .request_irq = __nss_hal_request_irq, + .send_interrupt = __nss_hal_send_interrupt, + .enable_interrupt = __nss_hal_enable_interrupt, + .disable_interrupt = __nss_hal_disable_interrupt, + .clear_interrupt_cause = __nss_hal_clear_interrupt_cause, + .read_interrupt_cause = __nss_hal_read_interrupt_cause, + .init_imem = __nss_hal_init_imem, + .init_utcm_shared = __nss_hal_init_utcm_shared, +}; diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq60xx/nss_hal_pvt.c b/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq60xx/nss_hal_pvt.c new file mode 100644 index 000000000..4c84cb958 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq60xx/nss_hal_pvt.c @@ -0,0 +1,739 @@ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/** + * nss_hal_pvt.c + * NSS HAL private APIs. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nss_hal.h" +#include "nss_core.h" + +#define NSS_QGIC_IPC_REG_OFFSET 0x8 + +#define NSS0_H2N_INTR_BASE 13 + +/* + * Common CLKs + */ +#define NSS_NOC_CLK "nss-noc-clk" +#define NSS_PTP_REF_CLK "nss-ptp-ref-clk" +#define NSS_CSR_CLK "nss-csr-clk" +#define NSS_CFG_CLK "nss-cfg-clk" +#define NSS_NSSNOC_QOSGEN_REF_CLK "nss-nssnoc-qosgen-ref-clk" +#define NSS_NSSNOC_SNOC_CLK "nss-nssnoc-snoc-clk" +#define NSS_NSSNOC_TIMEOUT_REF_CLK "nss-nssnoc-timeout-ref-clk" +#define NSS_CE_AXI_CLK "nss-ce-axi-clk" +#define NSS_CE_APB_CLK "nss-ce-apb-clk" +#define NSS_NSSNOC_CE_AXI_CLK "nss-nssnoc-ce-axi-clk" +#define NSS_NSSNOC_CE_APB_CLK "nss-nssnoc-ce-apb-clk" +#define NSS_MEM_NOC_UBI32_CLK "nss-mem-noc-ubi32-clk" +#define NSS_SNOC_NSSNOC_CLK "nss-snoc-nssnoc-clk" + +/* + * Per-core CLKS + */ +#define NSS_NSSNOC_AHB_CLK "nss-nssnoc-ahb-clk" +#define NSS_CORE_CLK "nss-core-clk" +#define NSS_AHB_CLK "nss-ahb-clk" +#define NSS_AXI_CLK "nss-axi-clk" +#define NSS_NC_AXI_CLK "nss-nc-axi-clk" +#define NSS_UTCM_CLK "nss-utcm-clk" + +/* + * Voltage values + */ +#define NOMINAL_VOLTAGE 1 +#define TURBO_VOLTAGE 2 + +/* + * Core reset part 1 + */ +#define NSS_CORE_GCC_RESET_1 0x00000020 + +/* + * Core reset part 2 + */ +#define NSS_CORE_GCC_RESET_2 0x00000017 + +/* + * Voltage regulator + */ +struct regulator *npu_reg; + +/* + * GCC reset + */ +void __iomem *nss_misc_reset; +void __iomem *nss_misc_reset_flag; + +/* + * Purpose of each interrupt index: This should match the order defined in the NSS firmware + */ +enum nss_hal_n2h_intr_purpose { + NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_SOS = 0, + NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_QUEUE = 1, + NSS_HAL_N2H_INTR_PURPOSE_TX_UNBLOCKED = 2, + NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_0 = 3, + NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_1 = 4, + NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_2 = 5, + NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_3 = 6, + NSS_HAL_N2H_INTR_PURPOSE_COREDUMP_COMPLETE = 7, + NSS_HAL_N2H_INTR_PURPOSE_PAGED_EMPTY_BUFFER_SOS = 8, + NSS_HAL_N2H_INTR_PURPOSE_PROFILE_DMA = 9, + NSS_HAL_N2H_INTR_PURPOSE_MAX +}; + +/* + * Interrupt type to cause vector. + */ +static uint32_t intr_cause[NSS_MAX_CORES][NSS_H2N_INTR_TYPE_MAX] = { + /* core0 */ + {(1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_EMPTY_BUFFER_QUEUE)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_DATA_COMMAND_QUEUE)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_TX_UNBLOCKED)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_TRIGGER_COREDUMP)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_EMPTY_PAGED_BUFFER_QUEUE))} +}; + +/* + * nss_hal_wq_function() + * Added to Handle BH requests to kernel + */ +void nss_hal_wq_function(struct work_struct *work) +{ + nss_work_t *my_work = (nss_work_t *)work; + + mutex_lock(&nss_top_main.wq_lock); + + nss_freq_change(&nss_top_main.nss[NSS_CORE_0], my_work->frequency, my_work->stats_enable, 0); + clk_set_rate(nss_core0_clk, my_work->frequency); + + nss_freq_change(&nss_top_main.nss[NSS_CORE_0], my_work->frequency, my_work->stats_enable, 1); + + mutex_unlock(&nss_top_main.wq_lock); + kfree((void *)work); +} + +/* + * nss_hal_handle_irq() + */ +static irqreturn_t nss_hal_handle_irq(int irq, void *ctx) +{ + struct int_ctx_instance *int_ctx = (struct int_ctx_instance *) ctx; + + disable_irq_nosync(irq); + napi_schedule(&int_ctx->napi); + + return IRQ_HANDLED; +} + +/* + * __nss_hal_of_get_pdata() + * Retrieve platform data from device node. + */ +static struct nss_platform_data *__nss_hal_of_get_pdata(struct platform_device *pdev) +{ + struct device_node *np = of_node_get(pdev->dev.of_node); + struct nss_platform_data *npd; + struct nss_ctx_instance *nss_ctx = NULL; + struct nss_top_instance *nss_top = &nss_top_main; + struct resource res_nphys, res_qgic_phys; + int32_t i; + + npd = devm_kzalloc(&pdev->dev, sizeof(struct nss_platform_data), GFP_KERNEL); + if (!npd) { + return NULL; + } + + if (of_property_read_u32(np, "qcom,id", &npd->id) + || of_property_read_u32(np, "qcom,load-addr", &npd->load_addr) + || of_property_read_u32(np, "qcom,num-queue", &npd->num_queue) + || of_property_read_u32(np, "qcom,num-irq", &npd->num_irq)) { + pr_err("%s: error reading critical device node properties\n", np->name); + goto out; + } + + /* + * Read frequencies. If failure, load default values. + */ + of_property_read_u32(np, "qcom,low-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency); + of_property_read_u32(np, "qcom,mid-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency); + of_property_read_u32(np, "qcom,max-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency); + + if (npd->num_irq > NSS_MAX_IRQ_PER_CORE) { + pr_err("%s: exceeds maximum interrupt numbers per core\n", np->name); + goto out; + } + + nss_ctx = &nss_top->nss[npd->id]; + nss_ctx->id = npd->id; + + if (of_address_to_resource(np, 0, &res_nphys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for nphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + if (of_address_to_resource(np, 1, &res_qgic_phys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for qgic_phys\n", nss_ctx, nss_ctx->id); + goto out; + } + + /* + * Save physical addresses + */ + npd->nphys = res_nphys.start; + npd->qgic_phys = res_qgic_phys.start; + + npd->nmap = ioremap_nocache(npd->nphys, resource_size(&res_nphys)); + if (!npd->nmap) { + nss_info_always("%px: nss%d: ioremap() fail for nphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + npd->qgic_map = ioremap_nocache(npd->qgic_phys, resource_size(&res_qgic_phys)); + if (!npd->qgic_map) { + nss_info_always("%px: nss%d: ioremap() fail for qgic map\n", nss_ctx, nss_ctx->id); + goto out; + } + + NSS_CORE_DSB(); + + /* + * Get IRQ numbers + */ + for (i = 0 ; i < npd->num_irq; i++) { + npd->irq[i] = irq_of_parse_and_map(np, i); + if (!npd->irq[i]) { + nss_info_always("%px: nss%d: irq_of_parse_and_map() fail for irq %d\n", nss_ctx, nss_ctx->id, i); + goto out; + } + } + + nss_hal_dt_parse_features(np, npd); + + of_node_put(np); + return npd; + +out: + if (npd->nmap) { + iounmap(npd->nmap); + } + + if (npd->vmap) { + iounmap(npd->vmap); + } + + devm_kfree(&pdev->dev, npd); + of_node_put(np); + return NULL; +} + +/* + * nss_hal_clock_set_and_enable() + */ +static int nss_hal_clock_set_and_enable(struct device *dev, const char *id, unsigned long rate) +{ + struct clk *nss_clk = NULL; + int err; + + nss_clk = devm_clk_get(dev, id); + if (IS_ERR(nss_clk)) { + pr_err("%px: cannot get clock: %s\n", dev, id); + return -EFAULT; + } + + if (rate) { + err = clk_set_rate(nss_clk, rate); + if (err) { + pr_err("%px: cannot set %s freq\n", dev, id); + return -EFAULT; + } + } + + err = clk_prepare_enable(nss_clk); + if (err) { + pr_err("%px: cannot enable clock: %s\n", dev, id); + return -EFAULT; + } + + return 0; +} + +/* + * __nss_hal_core_reset() + */ +static int __nss_hal_core_reset(struct platform_device *nss_dev, void __iomem *map, uint32_t addr, uint32_t clk_src) +{ + uint32_t value; + + /* + * Apply ubi32 core reset + */ + nss_write_32(map, NSS_REGS_RESET_CTRL_OFFSET, 0x1); + + /* + * De-assert reset for first set + */ + value = nss_read_32(nss_misc_reset, 0x0); + value &= ~NSS_CORE_GCC_RESET_1; + nss_write_32(nss_misc_reset, 0x0, value); + + /* + * Minimum 10 - 20 cycles delay is required after + * de-asserting NSS reset clamp + */ + usleep_range(10, 20); + + /* + * De-assert reset for second set + */ + value &= ~NSS_CORE_GCC_RESET_2; + nss_write_32(nss_misc_reset, 0x0, value); + + /* + * Program address configuration + */ + nss_write_32(map, NSS_REGS_CORE_AMC_OFFSET, 0x1); + nss_write_32(map, NSS_REGS_CORE_BAR_OFFSET, 0x3C000000); + nss_write_32(map, NSS_REGS_CORE_BOOT_ADDR_OFFSET, addr); + + /* + * Enable Instruction Fetch range checking between 0x4000 0000 to 0xBFFF FFFF. + */ + nss_write_32(map, NSS_REGS_CORE_IFETCH_RANGE_OFFSET, 0xBF004001); + + /* + * De-assert ubi32 core reset + */ + nss_write_32(map, NSS_REGS_RESET_CTRL_OFFSET, 0x0); + + /* + * Set values only once for core0. Grab the proper clock. + */ + nss_core0_clk = clk_get(&nss_dev->dev, NSS_CORE_CLK); + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency)) { + return -EFAULT; + } + + return 0; +} + +/* + * __nss_hal_debug_enable() + * Enable NSS debug + */ +static void __nss_hal_debug_enable(void) +{ + +} + +/* + * __nss_hal_common_reset + * Do reset/clock configuration common to all cores + */ +static int __nss_hal_common_reset(struct platform_device *nss_dev) +{ + + struct device_node *cmn = NULL; + struct resource res_nss_misc_reset; + struct resource res_nss_misc_reset_flag; + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NOC_CLK, 266670000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_PTP_REF_CLK, 150000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CSR_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CFG_CLK, 100000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_QOSGEN_REF_CLK, 24000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_SNOC_CLK, 266600000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_SNOC_NSSNOC_CLK, 266670000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_TIMEOUT_REF_CLK, 6000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CE_AXI_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CE_APB_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_CE_AXI_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_CE_APB_CLK, 200000000)) { + return -EFAULT; + } + + /* + * Get reference to NSS common device node + */ + cmn = of_find_node_by_name(NULL, "nss-common"); + if (!cmn) { + pr_err("%px: Unable to find nss-common node\n", nss_dev); + return -EFAULT; + } + + if (of_address_to_resource(cmn, 0, &res_nss_misc_reset) != 0) { + pr_err("%px: of_address_to_resource() return error for nss_misc_reset\n", nss_dev); + of_node_put(cmn); + return -EFAULT; + } + + if (of_address_to_resource(cmn, 1, &res_nss_misc_reset_flag) != 0) { + pr_err("%px: of_address_to_resource() return error for nss_misc_reset_flag\n", nss_dev); + of_node_put(cmn); + return -EFAULT; + } + + of_node_put(cmn); + + nss_misc_reset = ioremap_nocache(res_nss_misc_reset.start, resource_size(&res_nss_misc_reset)); + if (!nss_misc_reset) { + pr_err("%px: ioremap fail for nss_misc_reset\n", nss_dev); + return -EFAULT; + } + + nss_misc_reset_flag = ioremap_nocache(res_nss_misc_reset_flag.start, resource_size(&res_nss_misc_reset_flag)); + if (!nss_misc_reset_flag) { + pr_err("%px: ioremap fail for nss_misc_reset_flag\n", nss_dev); + return -EFAULT; + } + + nss_top_main.nss_hal_common_init_done = true; + nss_info("nss_hal_common_reset Done\n"); + + return 0; +} + +/* + * __nss_hal_clock_configure() + */ +static int __nss_hal_clock_configure(struct nss_ctx_instance *nss_ctx, struct platform_device *nss_dev, struct nss_platform_data *npd) +{ + uint32_t i; + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_AHB_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_AHB_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_AXI_CLK, 533330000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NC_AXI_CLK, 266670000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_UTCM_CLK, 266670000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_MEM_NOC_UBI32_CLK, 533330000)) { + return -EFAULT; + } + + /* + * No entries, then just load default + */ + if ((nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency == 0) || + (nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency == 0) || + (nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency == 0)) { + nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency = NSS_FREQ_187; + nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency = NSS_FREQ_748; + nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency = NSS_FREQ_1497; + nss_info_always("Running default frequencies\n"); + } + + /* + * Test frequency from dtsi, if fail, try to set default frequency. + */ + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency)) { + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, NSS_FREQ_1497)) { + return -EFAULT; + } + } + + /* + * Setup ranges, test frequency, and display. + */ + for (i = 0; i < NSS_FREQ_MAX_SCALE; i++) { + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_187) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_187_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_187_MAX; + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_748) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_748_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_748_MAX; + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_1497) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_1497_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_1497_MAX; + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_1689) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_1689_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_1689_MAX; + } else { + nss_info_always("Frequency not found %d\n", nss_runtime_samples.freq_scale[i].frequency); + return -EFAULT; + } + + /* + * Test the frequency, if fail, then default to safe frequency and abort + */ + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[i].frequency)) { + return -EFAULT; + } + } + + nss_info_always("Supported Frequencies - "); + for (i = 0; i < NSS_FREQ_MAX_SCALE; i++) { + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_187) { + nss_info_always("187.2 MHz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_748) { + nss_info_always("748.8 MHz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_1497) { + nss_info_always("1.4976 GHz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_1689) { + nss_info_always("1.6896 GHz "); + } else { + nss_info_always("Error\nNo Table/Invalid Frequency Found\n"); + return -EFAULT; + } + } + nss_info_always("\n"); + + /* + * Set values only once for core0. Grab the proper clock. + */ + nss_core0_clk = clk_get(&nss_dev->dev, NSS_CORE_CLK); + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency)) { + return -EFAULT; + } + + return 0; +} + +/* + * __nss_hal_read_interrupt_cause() + */ +static void __nss_hal_read_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t *cause) +{ +} + +/* + * __nss_hal_clear_interrupt_cause() + */ +static void __nss_hal_clear_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ +} + +/* + * __nss_hal_disable_interrupt() + */ +static void __nss_hal_disable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ +} + +/* + * __nss_hal_enable_interrupt() + */ +static void __nss_hal_enable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ +} + +/* + * __nss_hal_send_interrupt() + */ +static void __nss_hal_send_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t type) +{ + /* + * Check if core and type is Valid + */ + nss_assert(nss_ctx->id < nss_top_main.num_nss); + nss_assert(type < NSS_H2N_INTR_TYPE_MAX); + + nss_write_32(nss_ctx->qgic_map, NSS_QGIC_IPC_REG_OFFSET, intr_cause[nss_ctx->id][type]); +} + +/* + * __nss_hal_request_irq() + */ +static int __nss_hal_request_irq(struct nss_ctx_instance *nss_ctx, struct nss_platform_data *npd, int irq_num) +{ + struct int_ctx_instance *int_ctx = &nss_ctx->int_ctx[irq_num]; + int err = -1, irq = npd->irq[irq_num]; + + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_SOS) { + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_non_queue, NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT); + int_ctx->cause = NSS_N2H_INTR_EMPTY_BUFFERS_SOS; + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_empty_buf_sos", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_QUEUE) { + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_queue, NSS_EMPTY_BUFFER_RETURN_PROCESSING_WEIGHT); + int_ctx->cause = NSS_N2H_INTR_EMPTY_BUFFER_QUEUE; + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_empty_buf_queue", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_TX_UNBLOCKED) { + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_non_queue, NSS_TX_UNBLOCKED_PROCESSING_WEIGHT); + int_ctx->cause = NSS_N2H_INTR_TX_UNBLOCKED; + err = request_irq(irq, nss_hal_handle_irq, 0, "nss-tx-unblock", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_0) { + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_queue, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + int_ctx->cause = NSS_N2H_INTR_DATA_QUEUE_0; + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_queue0", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_1) { + int_ctx->cause = NSS_N2H_INTR_DATA_QUEUE_1; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_queue, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_queue1", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_2) { + int_ctx->cause = NSS_N2H_INTR_DATA_QUEUE_2; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_queue, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_queue2", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_3) { + int_ctx->cause = NSS_N2H_INTR_DATA_QUEUE_3; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_queue, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_queue3", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_COREDUMP_COMPLETE) { + int_ctx->cause = NSS_N2H_INTR_COREDUMP_COMPLETE; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_emergency, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_coredump_complete", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_PAGED_EMPTY_BUFFER_SOS) { + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_non_queue, NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT); + int_ctx->cause = NSS_N2H_INTR_PAGED_EMPTY_BUFFERS_SOS; + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_paged_empty_buf_sos", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_PROFILE_DMA) { + int_ctx->cause = NSS_N2H_INTR_PROFILE_DMA; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_sdma, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_profile_dma", int_ctx); + } + + if (err) { + return err; + } + + int_ctx->irq = irq; + return 0; +} + +/* + * __nss_hal_init_imem + */ +void __nss_hal_init_imem(struct nss_ctx_instance *nss_ctx) +{ + /* + * Nothing to be done as there are no TCM in ipq60xx + */ +} + +/* + * __nss_hal_init_utcm_shared + */ +bool __nss_hal_init_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t *meminfo_start) +{ + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + uint32_t utcm_shared_map_magic = meminfo_start[2]; + uint32_t utcm_shared_start = meminfo_start[3]; + uint32_t utcm_shared_size = meminfo_start[4]; + + /* + * Check meminfo utcm_shared map magic + */ + if ((uint16_t)utcm_shared_map_magic != NSS_MEMINFO_RESERVE_AREA_UTCM_SHARED_MAP_MAGIC) { + nss_info_always("%px: failed to verify UTCM_SHARED map magic\n", nss_ctx); + return false; + } + + mem_ctx->utcm_shared_head = utcm_shared_start; + mem_ctx->utcm_shared_end = mem_ctx->utcm_shared_head + utcm_shared_size; + mem_ctx->utcm_shared_tail = mem_ctx->utcm_shared_head; + + nss_info("%px: UTCM_SHARED init: head: 0x%x end: 0x%x tail: 0x%x\n", nss_ctx, + mem_ctx->utcm_shared_head, mem_ctx->utcm_shared_end, mem_ctx->utcm_shared_tail); + return true; +} + +/* + * nss_hal_ipq60xx_ops + */ +struct nss_hal_ops nss_hal_ipq60xx_ops = { + .common_reset = __nss_hal_common_reset, + .core_reset = __nss_hal_core_reset, + .clock_configure = __nss_hal_clock_configure, + .firmware_load = nss_hal_firmware_load, + .debug_enable = __nss_hal_debug_enable, + .of_get_pdata = __nss_hal_of_get_pdata, + .request_irq = __nss_hal_request_irq, + .send_interrupt = __nss_hal_send_interrupt, + .enable_interrupt = __nss_hal_enable_interrupt, + .disable_interrupt = __nss_hal_disable_interrupt, + .clear_interrupt_cause = __nss_hal_clear_interrupt_cause, + .read_interrupt_cause = __nss_hal_read_interrupt_cause, + .init_imem = __nss_hal_init_imem, + .init_utcm_shared = __nss_hal_init_utcm_shared, +}; diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq806x/nss_clocks.h b/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq806x/nss_clocks.h new file mode 100644 index 000000000..1e7af1a4d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq806x/nss_clocks.h @@ -0,0 +1,131 @@ +/* * Copyright (c) 2013 The Linux Foundation. All rights reserved.* */ +/* + * Copyright (c) 2013 The Linux Foundation. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all + * copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL + * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE + * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL + * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR + * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER + * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR + * PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __NSS_CLOCKS_H +#define __NSS_CLOCKS_H + +#if (NSS_DT_SUPPORT != 1) +#include +#include + +#define REG(off) (MSM_CLK_CTL_BASE + (off)) +#define REG_GCC(off) (MSM_APCS_GCC_BASE + (off)) + +/* Peripheral clock registers. */ +#define PLL18_ACR REG(0x1234) +#define PLL18_MODE REG(0x31A0) +#define PLL18_L_VAL REG(0x31A4) +#define PLL18_M_VAL REG(0x31A8) +#define PLL18_N_VAL REG(0x31AC) +#define PLL18_TEST_CTL REG(0x31B0) +#define PLL18_CONFIG REG(0x31B4) +#define PLL18_STATUS REG(0x31B8) +#define PLL_LOCK_DET_STATUS REG(0x3420) +#define PLL_LOCK_DET_MASK REG(0x3424) +#define CE5_CORE_CLK_SRC_CTL REG(0x36C0) +#define CE5_CORE_CLK_SRC0_NS REG(0x36C4) +#define NSS_ACC_REG REG(0x28EC) +#define NSS_RESET_SPARE REG(0x3B60) +#define NSSFB0_CLK_SRC_CTL REG(0x3B80) +#define NSSFB0_CLK_SRC0_NS REG(0x3B84) +#define NSSFB0_CLK_SRC1_NS REG(0x3B88) +#define NSSFB0_CLK_CTL REG(0x3BA0) +#define NSSFAB_GLOBAL_BUS_NS REG(0x3BC0) +#define NSSFB1_CLK_SRC_CTL REG(0x3BE0) +#define NSSFB1_CLK_SRC0_NS REG(0x3BE4) +#define NSSFB1_CLK_SRC1_NS REG(0x3BE8) +#define NSSFB1_CLK_CTL REG(0x3C00) +#define CLK_HALT_NSSFAB0_NSSFAB1_STATEA REG(0x3C20) +#define UBI32_MPT0_CLK_CTL REG(0x3C40) +#define UBI32_MPT1_CLK_CTL REG(0x3C44) +#define CE5_HCLK_SRC_CTL REG(0x3C60) +#define CE5_HCLK_SRC0_NS REG(0x3C64) +#define CE5_HCLK_SRC1_NS REG(0x3C68) +#define CE5_HCLK_CTL REG(0x3C6C) +#define NSSFPB_CLK_CTL REG(0x3C80) +#define NSSFPB_CLK_SRC_CTL REG(0x3C84) +#define NSSFPB_CLK_SRC0_NS REG(0x3C88) +#define NSSFPB_CLK_SRC1_NS REG(0x3C8C) +#define GMAC_COREn_CLK_SRC_CTL(n) REG(0x3CA0+32*(n)) +#define GMAC_CORE1_CLK_SRC_CTL REG(0x3CA0) +#define GMAC_COREn_CLK_SRC0_MD(n) REG(0x3CA4+32*(n)) +#define GMAC_CORE1_CLK_SRC0_MD REG(0x3CA4) +#define GMAC_COREn_CLK_SRC1_MD(n) REG(0x3CA8+32*(n)) +#define GMAC_CORE1_CLK_SRC1_MD REG(0x3CA8) +#define GMAC_COREn_CLK_SRC0_NS(n) REG(0x3CAC+32*(n)) +#define GMAC_CORE1_CLK_SRC0_NS REG(0x3CAC) +#define GMAC_COREn_CLK_SRC1_NS(n) REG(0x3CB0+32*(n)) +#define GMAC_CORE1_CLK_SRC1_NS REG(0x3CB0) +#define GMAC_COREn_CLK_CTL(n) REG(0x3CB4+32*(n)) +#define GMAC_CORE1_CLK_CTL REG(0x3CB4) +#define GMAC_COREn_CLK_FS(n) REG(0x3CB8+32*(n)) +#define GMAC_CORE1_CLK_FS REG(0x3CB8) +#define GMAC_COREn_RESET(n) REG(0x3CBC+32*(n)) +#define GMAC_CORE1_RESET REG(0x3CBC) +#define UBI32_COREn_CLK_SRC_CTL(n) REG(0x3D20+32*(n)) +#define UBI32_CORE1_CLK_SRC_CTL REG(0x3D20) +#define UBI32_COREn_CLK_SRC0_MD(n) REG(0x3D24+32*(n)) +#define UBI32_CORE1_CLK_SRC0_MD REG(0x3D24) +#define UBI32_COREn_CLK_SRC1_MD(n) REG(0x3D28+32*(n)) +#define UBI32_CORE1_CLK_SRC1_MD REG(0x3D28) +#define UBI32_COREn_CLK_SRC0_NS(n) REG(0x3D2C+32*(n)) +#define UBI32_CORE1_CLK_SRC0_NS REG(0x3D2C) +#define UBI32_COREn_CLK_SRC1_NS(n) REG(0x3D30+32*(n)) +#define UBI32_CORE1_CLK_SRC1_NS REG(0x3D30) +#define UBI32_COREn_CLK_CTL(n) REG(0x3D34+32*(n)) +#define UBI32_CORE1_CLK_CTL REG(0x3D34) +#define UBI32_COREn_CLK_FS(n) REG(0x3D38+32*(n)) +#define UBI32_CORE1_CLK_FS REG(0x3D38) +#define UBI32_COREn_RESET_CLAMP(n) REG(0x3D3C+32*(n)) +#define UBI32_CORE1_RESET_CLAMP REG(0x3D3C) +#define NSS_250MHZ_CLK_SRC_CTL REG(0x3D60) +#define NSS_250MHZ_CLK_SRC0_NS REG(0x3D64) +#define NSS_250MHZ_CLK_SRC1_NS REG(0x3D68) +#define NSS_250MHZ_CLK_SRC0_MD REG(0x3D6C) +#define NSS_250MHZ_CLK_SRC1_MD REG(0x3D70) +#define NSS_250MHZ_CLK_CTL REG(0x3D74) +#define CE5_ACLK_SRC_CTL REG(0x3D80) +#define CE5_ACLK_SRC0_NS REG(0x3D84) +#define CE5_ACLK_SRC1_NS REG(0x3D88) +#define CE5_ACLK_CTL REG(0x3D8C) +#define PLL_ENA_NSS REG(0x3DA0) +#define NSSTCM_CLK_SRC_CTL REG(0x3DC0) +#define NSSTCM_CLK_SRC0_NS REG(0x3DC4) +#define NSSTCM_CLK_SRC1_NS REG(0x3DC8) +#define NSSTCM_CLK_FS REG(0x3DCC) +#define NSSTCM_CLK_CTL REG(0x3DD0) +#define CE5_CORE_0_RESET REG(0x3E00) +#define CE5_CORE_1_RESET REG(0x3E04) +#define CE5_CORE_2_RESET REG(0x3E08) +#define CE5_CORE_3_RESET REG(0x3E0C) +#define CE5_AHB_RESET REG(0x3E10) +#define NSS_RESET REG(0x3E20) +#define GMAC_AHB_RESET REG(0x3E24) +#define MACSEC_CORE1_RESET REG(0x3E28) +#define MACSEC_CORE2_RESET REG(0x3E2C) +#define MACSEC_CORE3_RESET REG(0x3E30) +#define NSS_TCM_RESET REG(0x3E40) + +enum nss_hal_pvt_pll_status { + PLL_NOT_LOCKED, + PLL_LOCKED +}; + +#endif +#endif /* __NSS_CLOCKS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq806x/nss_hal_pvt.c b/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq806x/nss_hal_pvt.c new file mode 100644 index 000000000..b8733e04b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq806x/nss_hal_pvt.c @@ -0,0 +1,1237 @@ +/* + ************************************************************************** + * Copyright (c) 2013, 2015-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * nss_hal_pvt.c + * NSS HAL private APIs. + */ + +#include +#include +#include +#include +#include +#if (NSS_DT_SUPPORT != 1) +#include +#include +#else +#include +#include +#include +#include +#include +#endif +#include "nss_hal.h" +#include "nss_clocks.h" +#include "nss_core.h" +#if (NSS_PM_SUPPORT == 1) +#include "nss_pm.h" +#endif +#if (NSS_FABRIC_SCALING_SUPPORT == 1) +#include +#endif + +#define NSS_H2N_INTR_EMPTY_BUFFER_QUEUE_BIT 0 +#define NSS_H2N_INTR_DATA_COMMAND_QUEUE_BIT 1 +#define NSS_H2N_INTR_TX_UNBLOCKED_BIT 11 +#define NSS_H2N_INTR_EMPTY_PAGED_BUFFER_QUEUE_BIT 12 +#define NSS_H2N_INTR_TRIGGER_COREDUMP_BIT 15 + +/* + * Interrupt type to cause vector. + */ +static uint32_t intr_cause[] = {(1 << NSS_H2N_INTR_EMPTY_BUFFER_QUEUE_BIT), + (1 << NSS_H2N_INTR_DATA_COMMAND_QUEUE_BIT), + (1 << NSS_H2N_INTR_TX_UNBLOCKED_BIT), + (1 << NSS_H2N_INTR_TRIGGER_COREDUMP_BIT), + (1 << NSS_H2N_INTR_EMPTY_PAGED_BUFFER_QUEUE_BIT)}; + +#if (NSS_DT_SUPPORT == 1) +bool nss_crypto_is_scaled = false; +#endif + +#if (NSS_FW_DBG_SUPPORT == 1) +/* + * NSS debug pins configuration + */ + +/* + * Core 0, Data + * No pull up, Function 2 + */ +static struct gpiomux_setting nss_spi_data_0 = { + .func = GPIOMUX_FUNC_2, + .drv = GPIOMUX_DRV_8MA, + .pull = GPIOMUX_PULL_NONE, + .dir = GPIOMUX_IN, +}; + +/* + * Core 0, CLK, CS + * Pull up high, Function 2 + */ +static struct gpiomux_setting nss_spi_cs_clk_0 = { + .func = GPIOMUX_FUNC_2, + .drv = GPIOMUX_DRV_8MA, + .pull = GPIOMUX_PULL_UP, + .dir = GPIOMUX_IN, +}; + +/* + * Core 1, CS + * Pull up high, Function 4 + */ +static struct gpiomux_setting nss_spi_cs_1 = { + .func = GPIOMUX_FUNC_4, + .drv = GPIOMUX_DRV_8MA, + .pull = GPIOMUX_PULL_UP, + .dir = GPIOMUX_IN, +}; + +/* + * Core 1, CLK + * Pull up high, Function 5 + */ +static struct gpiomux_setting nss_spi_clk_1 = { + .func = GPIOMUX_FUNC_5, + .drv = GPIOMUX_DRV_8MA, + .pull = GPIOMUX_PULL_UP, + .dir = GPIOMUX_IN, +}; + +/* + * Core 1, Data + * Pull up none, Function 5 + */ +static struct gpiomux_setting nss_spi_data_1 = { + .func = GPIOMUX_FUNC_5, + .drv = GPIOMUX_DRV_8MA, + .pull = GPIOMUX_PULL_NONE, + .dir = GPIOMUX_IN, +}; + +static struct msm_gpiomux_config nss_spi_gpiomux[] = { + { + .gpio = 14, + .settings = { + [GPIOMUX_ACTIVE] = &nss_spi_data_0, + [GPIOMUX_SUSPENDED] = &nss_spi_data_0, + }, + }, + { + .gpio = 15, + .settings = { + [GPIOMUX_ACTIVE] = &nss_spi_data_0, + [GPIOMUX_SUSPENDED] = &nss_spi_data_0, + }, + }, + { + .gpio = 16, + .settings = { + [GPIOMUX_ACTIVE] = &nss_spi_cs_clk_0, + [GPIOMUX_SUSPENDED] = &nss_spi_cs_clk_0, + }, + }, + { + .gpio = 17, + .settings = { + [GPIOMUX_ACTIVE] = &nss_spi_cs_clk_0, + [GPIOMUX_SUSPENDED] = &nss_spi_cs_clk_0, + }, + }, + { + .gpio = 55, + .settings = { + [GPIOMUX_ACTIVE] = &nss_spi_data_1, + [GPIOMUX_SUSPENDED] = &nss_spi_data_1, + }, + }, + { + .gpio = 56, + .settings = { + [GPIOMUX_ACTIVE] = &nss_spi_data_1, + [GPIOMUX_SUSPENDED] = &nss_spi_data_1, + }, + }, + { + .gpio = 57, + .settings = { + [GPIOMUX_ACTIVE] = &nss_spi_cs_1, + [GPIOMUX_SUSPENDED] = &nss_spi_cs_1, + }, + }, + { + .gpio = 58, + .settings = { + [GPIOMUX_ACTIVE] = &nss_spi_clk_1, + [GPIOMUX_SUSPENDED] = &nss_spi_clk_1, + }, + }, +}; +#endif /* NSS_FW_DBG_SUPPORT */ + +/* + * nss_hal_scale_fabric() + * DT supported fabric scaling + */ +void nss_hal_scale_fabric(uint32_t work_frequency) +{ +#if (NSS_DT_SUPPORT == 1) + nss_crypto_pm_event_callback_t crypto_pm_cb; + bool auto_scale; + bool turbo; + +#if (NSS_FABRIC_SCALING_SUPPORT == 1) + /* + * PM framework + */ + scale_fabrics(); +#endif + if ((nss_fab0_clk != NULL) && (nss_fab1_clk != NULL)) { + if (work_frequency >= NSS_FREQ_733) { + clk_set_rate(nss_fab0_clk, NSS_FABRIC0_TURBO); + clk_set_rate(nss_fab1_clk, NSS_FABRIC1_TURBO); + } else if (work_frequency > NSS_FREQ_110) { + clk_set_rate(nss_fab0_clk, NSS_FABRIC0_NOMINAL); + clk_set_rate(nss_fab1_clk, NSS_FABRIC1_NOMINAL); + } else { + clk_set_rate(nss_fab0_clk, NSS_FABRIC0_IDLE); + clk_set_rate(nss_fab1_clk, NSS_FABRIC1_IDLE); + } + + /* + * notify crypto about the clock change + */ + crypto_pm_cb = nss_top_main.crypto_pm_callback; + if (crypto_pm_cb) { + turbo = (work_frequency >= NSS_FREQ_733); + auto_scale = nss_cmd_buf.auto_scale; + nss_crypto_is_scaled = crypto_pm_cb(nss_top_main.crypto_pm_ctx, turbo, auto_scale); + } + } +#endif +} + +/* + * nss_hal_pm_support() + * Supported in 3.4 + */ +void nss_hal_pm_support(uint32_t work_frequency) +{ +#if (NSS_PM_SUPPORT == 1) + if (!pm_client) { + return; + } + + if (work_frequency >= NSS_FREQ_733) { + nss_pm_set_perf_level(pm_client, NSS_PM_PERF_LEVEL_TURBO); + } else if (work_frequency > NSS_FREQ_110) { + nss_pm_set_perf_level(pm_client, NSS_PM_PERF_LEVEL_NOMINAL); + } else { + nss_pm_set_perf_level(pm_client, NSS_PM_PERF_LEVEL_IDLE); + } +#endif +} + +/* + * nss_hal_freq_change() + * Send frequency change message, and clock adjustment + */ +void nss_hal_freq_change(nss_work_t *my_work) +{ + nss_freq_change(&nss_top_main.nss[NSS_CORE_0], my_work->frequency, my_work->stats_enable, 0); + if (nss_top_main.nss[NSS_CORE_1].state == NSS_CORE_STATE_INITIALIZED) { + nss_freq_change(&nss_top_main.nss[NSS_CORE_1], my_work->frequency, my_work->stats_enable, 0); + } + clk_set_rate(nss_core0_clk, my_work->frequency); + + nss_freq_change(&nss_top_main.nss[NSS_CORE_0], my_work->frequency, my_work->stats_enable, 1); + if (nss_top_main.nss[NSS_CORE_1].state == NSS_CORE_STATE_INITIALIZED) { + nss_freq_change(&nss_top_main.nss[NSS_CORE_1], my_work->frequency, my_work->stats_enable, 1); + } +} + +/* + * nss_hal_wq_function() + * Added to Handle BH requests to kernel + */ +void nss_hal_wq_function(struct work_struct *work) +{ + nss_work_t *my_work = (nss_work_t *)work; + + mutex_lock(&nss_top_main.wq_lock); +#if (NSS_DT_SUPPORT == 1) + /* + * If crypto clock is in Turbo, disable scaling for other + * NSS subsystem components and retain them at turbo + */ + if (nss_crypto_is_scaled) { + nss_cmd_buf.current_freq = nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency; + mutex_unlock(&nss_top_main.wq_lock); + return; + } +#endif + + nss_hal_freq_change(my_work); + + /* + * Supported in 3.4 + */ + nss_hal_pm_support(my_work->frequency); + + nss_hal_scale_fabric(my_work->frequency); + + mutex_unlock(&nss_top_main.wq_lock); + kfree((void *)work); +} + +/* + * nss_hal_handle_irq() + * HLOS interrupt handler for nss interrupts + */ +static irqreturn_t nss_hal_handle_irq(int irq, void *ctx) +{ + struct int_ctx_instance *int_ctx = (struct int_ctx_instance *) ctx; + struct nss_ctx_instance *nss_ctx = int_ctx->nss_ctx; + + /* + * Mask interrupt until our bottom half re-enables it + */ + nss_hal_disable_interrupt(nss_ctx, int_ctx->shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS); + + /* + * Schedule tasklet to process interrupt cause + */ + napi_schedule(&int_ctx->napi); + return IRQ_HANDLED; +} + +#if (NSS_DT_SUPPORT != 1) +#if defined(NSS_ENABLE_CLK) +/* + * nss_hal_pvt_enable_pll18() + * Enable PLL18 + */ +static uint32_t nss_hal_pvt_enable_pll18(uint32_t speed) +{ + uint32_t retries = 100; + + /* + * Prevent Compiler from commenting out the loop. + */ + uint32_t value; + uint32_t mask = (1 << 2); + + /* + * Start with clean slate + */ + writel(0, PLL18_MODE); + + /* + * Effective VCO Frequency = 1100 MHz Post Divide 2 + */ + if (speed == 1100) { + writel(0x4000042C, PLL18_L_VAL); + writel(0x0, PLL18_M_VAL); + writel(0x1, PLL18_N_VAL); + + /* + * PLL configuration (as provided by HW team) + */ + writel(0x01495625, PLL18_CONFIG); + writel(0x00003080, PLL18_TEST_CTL); + } else if (speed == 1466) { + /* + * Effective VCO Frequency = 1466 MHz Post Divide 2 + */ + + writel(0x4000043A, PLL18_L_VAL); + writel(0x10, PLL18_M_VAL); + writel(0x19, PLL18_N_VAL); + + /* + * PLL configuration (as provided by HW team) + */ + writel(0x014B5625, PLL18_CONFIG); + writel(0x00003080, PLL18_TEST_CTL); + } else { + BUG_ON(1); + } + + /* + * Enable PLL18 output (sequence provided by HW team) + */ + writel(0x2, PLL18_MODE); + mdelay(1); + writel(0x6, PLL18_MODE); + writel(0x7, PLL18_MODE); + + /* + * Enable NSS Vote for PLL18. + */ + writel(mask, PLL_ENA_NSS); + do { + value = readl(PLL_LOCK_DET_STATUS); + if (value & mask) { + return PLL_LOCKED; + } + + mdelay(1); + } while (retries-- > 0); + + return PLL_NOT_LOCKED; +} +#endif +#else +/* + * __nss_hal_of_get_pdata() + * Retrieve platform data from device node. + */ +static struct nss_platform_data *__nss_hal_of_get_pdata(struct platform_device *pdev) +{ + struct device_node *np = of_node_get(pdev->dev.of_node); + struct nss_platform_data *npd; + struct nss_ctx_instance *nss_ctx = NULL; + struct nss_top_instance *nss_top = &nss_top_main; + struct resource res_nphys, res_vphys; + int32_t i; + + npd = devm_kzalloc(&pdev->dev, sizeof(struct nss_platform_data), GFP_KERNEL); + if (!npd) { + return NULL; + } + + if (of_property_read_u32(np, "qcom,id", &npd->id) + || of_property_read_u32(np, "qcom,load-addr", &npd->load_addr) + || of_property_read_u32(np, "qcom,num-queue", &npd->num_queue) + || of_property_read_u32(np, "qcom,num-irq", &npd->num_irq)) { + pr_err("%s: error reading critical device node properties\n", np->name); + goto out; + } + + /* + * Read frequencies. If failure, load default values. + */ + of_property_read_u32(np, "qcom,low-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency); + of_property_read_u32(np, "qcom,mid-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency); + of_property_read_u32(np, "qcom,max-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency); + + if (npd->num_irq < npd->num_queue) { + pr_err("%s: not enough interrupts configured for all the queues\n", np->name); + goto out; + } + + if (npd->num_irq > NSS_MAX_IRQ_PER_CORE) { + pr_err("%s: exceeds maximum interrupt numbers per core\n", np->name); + goto out; + } + + nss_ctx = &nss_top->nss[npd->id]; + nss_ctx->id = npd->id; + + if (of_address_to_resource(np, 0, &res_nphys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for nphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + if (of_address_to_resource(np, 1, &res_vphys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for vphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + /* + * Save physical addresses + */ + npd->nphys = res_nphys.start; + npd->vphys = res_vphys.start; + + npd->nmap = ioremap_nocache(npd->nphys, resource_size(&res_nphys)); + if (!npd->nmap) { + nss_info_always("%px: nss%d: ioremap() fail for nphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + nss_assert(npd->vphys); + npd->vmap = ioremap_cache(npd->vphys, resource_size(&res_vphys)); + if (!npd->vmap) { + nss_info_always("%px: nss%d: ioremap() fail for vphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + /* + * Clear TCM memory used by this core + */ + for (i = 0; i < resource_size(&res_vphys) ; i += 4) { + nss_write_32(npd->vmap, i, 0); + NSS_CORE_DMA_CACHE_MAINT((npd->vmap + i), 4, DMA_TO_DEVICE); + } + NSS_CORE_DSB(); + + /* + * Get IRQ numbers + */ + for (i = 0 ; i < npd->num_irq; i++) { + npd->irq[i] = irq_of_parse_and_map(np, i); + if (!npd->irq[i]) { + nss_info_always("%px: nss%d: irq_of_parse_and_map() fail for irq %d\n", nss_ctx, nss_ctx->id, i); + goto out; + } + } + + nss_hal_dt_parse_features(np, npd); + + of_node_put(np); + return npd; + +out: + if (npd->nmap) { + iounmap(npd->nmap); + } + + if (npd->vmap) { + iounmap(npd->vmap); + } + + devm_kfree(&pdev->dev, npd); + of_node_put(np); + return NULL; +} +#endif + +/* + * __nss_hal_core_reset() + */ +static int __nss_hal_core_reset(struct platform_device *nss_dev, void __iomem *map, uint32_t addr, uint32_t clk_src) +{ +#if (NSS_DT_SUPPORT == 1) + struct reset_control *rstctl = NULL; + + /* + * Remove UBI32 reset clamp + */ + rstctl = devm_reset_control_get(&nss_dev->dev, "clkrst-clamp"); + if (IS_ERR(rstctl)) { + nss_info_always("%px: Deassert UBI32 core%d reset clamp failed", nss_dev, nss_dev->id); + return -EFAULT; + } + reset_control_deassert(rstctl); + + /* + * Remove UBI32 core clamp + */ + rstctl = devm_reset_control_get(&nss_dev->dev, "clamp"); + if (IS_ERR(rstctl)) { + nss_info_always("%px: Deassert UBI32 core%d clamp failed", nss_dev, nss_dev->id); + return -EFAULT; + } + reset_control_deassert(rstctl); + + /* + * Remove UBI32 AHB reset + */ + rstctl = devm_reset_control_get(&nss_dev->dev, "ahb"); + if (IS_ERR(rstctl)) { + nss_info_always("%px: Deassert AHB core%d reset failed", nss_dev, nss_dev->id); + return -EFAULT; + } + reset_control_deassert(rstctl); + + /* + * Remove UBI32 AXI reset + */ + rstctl = devm_reset_control_get(&nss_dev->dev, "axi"); + if (IS_ERR(rstctl)) { + nss_info_always("%px: Deassert core%d AXI reset failed", nss_dev, nss_dev->id); + return -EFAULT; + } + reset_control_deassert(rstctl); +#else +#if defined(NSS_ENABLE_CLOCK) + /* + * Enable mpt clock + */ + writel(0x10, UBI32_MPT0_CLK_CTL); + + /* + * UBI coren clock root enable + */ + if (clk_src == NSS_REGS_CLK_SRC_DEFAULT) { + /* select Src0 */ + writel(0x02, UBI32_COREn_CLK_SRC_CTL(nss_dev->id)); + } else { + /* select Src1 */ + writel(0x03, UBI32_COREn_CLK_SRC_CTL(nss_dev->id)); + } + + /* + * Src0: Bypass M value configuration. + */ + + /* + * Src1: M val is 0x01 and NOT_2D value is 0xfd, 400 MHz with PLL0. + */ + writel(0x100fd, UBI32_COREn_CLK_SRC1_MD(nss_dev->id)); + + /* + * Bypass, pll18 + * Effective frequency = 550 MHz + */ + writel(0x00000001, UBI32_COREn_CLK_SRC0_NS(nss_dev->id)); + + /* + * Dual edge, pll0, NOT(N_M) = 0xfe. + * Effective frequency = 400 MHz + */ + writel(0x00fe0142, UBI32_COREn_CLK_SRC1_NS(nss_dev->id)); + + /* + * UBI32 coren clock control branch. + */ + writel(0x4f, UBI32_COREn_CLK_FS(nss_dev->id)); + + /* + * UBI32 coren clock control branch. + */ + writel(0x10, UBI32_COREn_CLK_CTL(nss_dev->id)); +#endif + /* + * Remove UBI32 reset clamp + */ + writel(0xB, UBI32_COREn_RESET_CLAMP(nss_dev->id)); + + /* + * Busy wait for few cycles + */ + mdelay(1); + + /* + * Remove UBI32 core clamp + */ + writel(0x3, UBI32_COREn_RESET_CLAMP(nss_dev->id)); + + mdelay(1); + + /* + * Remove UBI32 AHB reset + */ + writel(0x1, UBI32_COREn_RESET_CLAMP(nss_dev->id)); + + mdelay(1); + + /* + * Remove UBI32 AXI reset + */ + writel(0x0, UBI32_COREn_RESET_CLAMP(nss_dev->id)); + + mdelay(1); +#endif /* NSS_DT_SUPPORT */ + + /* + * Apply ubi32 core reset + */ + nss_write_32(map, NSS_REGS_RESET_CTRL_OFFSET, 1); + + /* + * Program address configuration + */ + nss_write_32(map, NSS_REGS_CORE_AMC_OFFSET, 1); + nss_write_32(map, NSS_REGS_CORE_BAR_OFFSET, 0x3c000000); + nss_write_32(map, NSS_REGS_CORE_BOOT_ADDR_OFFSET, addr); + + /* + * C2C interrupts are level sensitive + */ + nss_write_32(map, NSS_REGS_CORE_INT_STAT2_TYPE_OFFSET, 0xFFFF); + + /* + * Enable Instruction Fetch range checking between 0x4000 0000 to 0xBFFF FFFF. + */ + nss_write_32(map, NSS_REGS_CORE_IFETCH_RANGE_OFFSET, 0xBF004001); + + /* + * De-assert ubi32 core reset + */ + nss_write_32(map, NSS_REGS_RESET_CTRL_OFFSET, 0); + + return 0; +} + +/* + * __nss_hal_debug_enable() + * Enable NSS debug + */ +static void __nss_hal_debug_enable(void) +{ +#if (NSS_FW_DBG_SUPPORT == 1) + msm_gpiomux_install(nss_spi_gpiomux, + ARRAY_SIZE(nss_spi_gpiomux)); +#endif +} + +/* + * __nss_hal_common_reset + * Do reset/clock configuration common to all cores + */ +static int __nss_hal_common_reset(struct platform_device *nss_dev) +{ +#if (NSS_DT_SUPPORT == 1) + struct device_node *cmn = NULL; + struct resource res_nss_fpb_base; + struct clk *nss_tcm_src = NULL; + struct clk *nss_tcm_clk = NULL; + void __iomem *fpb_base; + int err; + + /* + * Get reference to NSS common device node + */ + cmn = of_find_node_by_name(NULL, "nss-common"); + if (!cmn) { + pr_err("%px: Unable to find nss-common node\n", nss_dev); + return -EFAULT; + } + + if (of_address_to_resource(cmn, 0, &res_nss_fpb_base) != 0) { + pr_err("%px: of_address_to_resource() return error for nss_fpb_base\n", nss_dev); + of_node_put(cmn); + return -EFAULT; + } + of_node_put(cmn); + + fpb_base = ioremap_nocache(res_nss_fpb_base.start, resource_size(&res_nss_fpb_base)); + if (!fpb_base) { + pr_err("%px: ioremap fail for nss_fpb_base\n", nss_dev); + return -EFAULT; + } + + /* + * Attach debug interface to TLMM + */ + nss_write_32(fpb_base, NSS_REGS_FPB_CSR_CFG_OFFSET, 0x360); + + /* + * NSS TCM CLOCK + */ + nss_tcm_src = clk_get(&nss_dev->dev, NSS_TCM_SRC_CLK); + if (IS_ERR(nss_tcm_src)) { + pr_err("%px: cannot get clock: %s\n", nss_dev, NSS_TCM_SRC_CLK); + return -EFAULT; + } + + err = clk_set_rate(nss_tcm_src, NSSTCM_FREQ); + if (err) { + pr_err("%px: cannot set NSSTCM freq\n", nss_dev); + return -EFAULT; + } + + err = clk_prepare_enable(nss_tcm_src); + if (err) { + pr_err("%px: cannot enable NSSTCM clock source\n", nss_dev); + return -EFAULT; + } + + nss_tcm_clk = clk_get(&nss_dev->dev, NSS_TCM_CLK); + if (IS_ERR(nss_tcm_clk)) { + pr_err("%px: cannot get clock: %s\n", nss_dev, NSS_TCM_CLK); + return -EFAULT; + } + + err = clk_prepare_enable(nss_tcm_clk); + if (err) { + pr_err("%px: cannot enable NSSTCM clock\n", nss_dev); + return -EFAULT; + } + + /* + * NSS Fabric Clocks. + */ + nss_fab0_clk = clk_get(&nss_dev->dev, NSS_FABRIC0_CLK); + if (IS_ERR(nss_fab0_clk)) { + pr_err("%px: cannot get clock: %s\n", nss_dev, NSS_FABRIC0_CLK); + nss_fab0_clk = NULL; + } else { + err = clk_prepare_enable(nss_fab0_clk); + if (err) { + pr_err("%px: cannot enable clock: %s\n", nss_dev, NSS_FABRIC0_CLK); + return -EFAULT; + } + } + + nss_fab1_clk = clk_get(&nss_dev->dev, NSS_FABRIC1_CLK); + if (IS_ERR(nss_fab1_clk)) { + pr_err("%px: cannot get clock: %s\n", nss_dev, NSS_FABRIC1_CLK); + nss_fab1_clk = NULL; + } else { + err = clk_prepare_enable(nss_fab1_clk); + if (err) { + pr_err("%px: cannot enable clock: %s\n", nss_dev, NSS_FABRIC1_CLK); + return -EFAULT; + } + } + + nss_top_main.nss_hal_common_init_done = true; + nss_info("nss_hal_common_reset Done\n"); + return 0; +} +#else + uint32_t i; + uint32_t value; + uint32_t status_mask = 0x1; + uint32_t wait_cycles = 100; + +#if defined(NSS_ENABLE_CLK) + /* + * NSS FPB CLOCK + */ + + /* + * Enable clock root and Divider 0 + * NOTE: Default value is good so no work here + */ + + /* + * PLL0 (800 MHZ). SRC_SEL is 2 (3'b010) + * src_div selected is Div-6 (4'b0101). + * + * Effective frequency (Divider 0) = 133 MHz + */ + writel(0x2a, NSSFPB_CLK_SRC0_NS); + + /* + * Enable clock branch + */ + writel(0x50, NSSFPB_CLK_CTL); + + /* + * NSS FABRIC0 CLOCK + */ + + /* + * Enable clock root and Divider 0 + * NOTE: Default value is good so no work here + */ + + /* + * PLL0 (800 MHZ) and div is set to 2. + * Effective frequency = 400 MHZ. + */ + writel(0x0a, NSSFB0_CLK_SRC0_NS); + + /* + * NSS Fabric0 Branch and dynamic clock gating enabled. + */ + writel(0x50, NSSFB0_CLK_CTL); + + /* + * Enable clock root and Divider 0 + * NOTE: Default value is good so no work here + */ + + /* + * PLL0 (800 MHZ) and div is set to 4. + * Effective frequency = 200 MHZ. + */ + writel(0x1a, NSSFB1_CLK_SRC0_NS); + + /* + * NSS Fabric1 Branch enable and fabric clock gating enabled. + */ + writel(0x50, NSSFB1_CLK_CTL); + + /* + * NSS TCM CLOCK + */ + + /* + * Enable NSS TCM clock root source and select divider 0. + * + * NOTE: Default value is not good here + */ + writel(0x2, NSSTCM_CLK_SRC_CTL); + + /* + * PLL0 (800 MHZ) and div is set to 2. + * Effective frequency = 400 MHZ + */ + writel(0xa, NSSTCM_CLK_SRC0_NS); + + /* + * NSS TCM Branch enable and fabric clock gating enabled. + */ + writel(0x50, NSSTCM_CLK_CTL); + + /* + * Enable global NSS clock branches. + * NSS global Fab Branch enable and fabric clock gating enabled. + */ + writel(0xf, NSSFAB_GLOBAL_BUS_NS); + + /* + * Send reset interrupt to NSS + */ + writel(0x0, NSS_RESET); + + /* + * Enable PLL18 + */ + pll18_status = nss_hal_pvt_enable_pll18(); + if (!pll18_status) { + /* + * Select alternate good source (Src1/pll0) + */ + nss_top->clk_src = NSS_REGS_CLK_SRC_ALTERNATE; + return; + } + + /* + * Select default source (Src0/pll18) + */ + nss_top->clk_src = NSS_REGS_CLK_SRC_DEFAULT; +#endif + + /* + * Attach debug interface to TLMM + */ + nss_write_32((uint32_t)MSM_NSS_FPB_BASE, NSS_REGS_FPB_CSR_CFG_OFFSET, 0x360); + + /* + * NSS TCM CLOCK + */ + + /* + * Enable NSS TCM clock root source - SRC1. + * + */ + writel(0x3, NSSTCM_CLK_SRC_CTL); + + /* Enable PLL Voting for 0 */ + writel((readl(PLL_ENA_NSS) | 0x1), PLL_ENA_NSS); + do { + value = readl(PLL_LOCK_DET_STATUS); + if (value & status_mask) { + break; + } + mdelay(1); + } while (wait_cycles-- > 0); + + /* + * PLL0 (800 MHZ) and div is set to 3/4. + * Effective frequency = 266/400 Mhz for SRC0/1 + */ + writel(0x12, NSSTCM_CLK_SRC0_NS); + writel(0xa, NSSTCM_CLK_SRC1_NS); + + /* + * NSS TCM Branch enable and fabric clock gating enabled. + */ + writel(0x50, NSSTCM_CLK_CTL); + + /* + * Clear TCM memory + */ + for (i = 0; i < IPQ806X_NSS_TCM_SIZE; i += 4) { + nss_write_32((uint32_t)MSM_NSS_TCM_BASE, i, 0); + } + + return 0; +} +#endif /* NSS_DT_SUPPORT */ + +/* + * __nss_hal_clock_configure() + */ +static int __nss_hal_clock_configure(struct nss_ctx_instance *nss_ctx, struct platform_device *nss_dev, struct nss_platform_data *npd) +{ +#if (NSS_FABRIC_SCALING_SUPPORT == 1) + struct fab_scaling_info fab_data; +#endif + int i, err; + + /* + * Both ubi core on ipq806x attach to the same clock, configure just the core0 + */ + if (nss_ctx->id) { + return 0; + } + + nss_core0_clk = clk_get(&nss_dev->dev, NSS_CORE_CLK); + if (IS_ERR(nss_core0_clk)) { + err = PTR_ERR(nss_core0_clk); + nss_info_always("%px: Regulator %s get failed, err=%d\n", nss_ctx, dev_name(&nss_dev->dev), err); + return err; + } + + /* + * Check if turbo is supported + */ + if (npd->turbo_frequency) { + nss_info_always("nss_driver - Turbo Support %d\n", npd->turbo_frequency); +#if (NSS_PM_SUPPORT == 1) + nss_pm_set_turbo(); +#endif + } else { + nss_info_always("nss_driver - Turbo No Support %d\n", npd->turbo_frequency); + } + + /* + * If valid entries - from dtsi - then just init clks. + * Otherwise query for clocks. + */ + if ((nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency != 0) && + (nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency != 0) && + (nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency != 0)) { + goto clk_complete; + } + + /* + * Load default scales, then query for higher. + * If basic set cannot be set, then go to error, and abort + * Two set of defaults, 110, 550, 733 or 110, 275 and 550 + */ + if (clk_set_rate(nss_core0_clk, NSS_FREQ_110) != 0) { + return -EFAULT; + } + nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency = NSS_FREQ_110; + + if (npd->turbo_frequency) { + /* + * Figure out the middle scale + */ + if (clk_set_rate(nss_core0_clk, NSS_FREQ_600) == 0) { + nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency = NSS_FREQ_600; + } else if (clk_set_rate(nss_core0_clk, NSS_FREQ_550) == 0) { + nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency = NSS_FREQ_550; + } else { + return -EFAULT; + } + + /* + * Figure out the max scale + */ + if (clk_set_rate(nss_core0_clk, NSS_FREQ_800) == 0) { + nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency = NSS_FREQ_800; + } else if (clk_set_rate(nss_core0_clk, NSS_FREQ_733) == 0) { + nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency = NSS_FREQ_733; + } else { + return -EFAULT; + } + + } else { + if (clk_set_rate(nss_core0_clk, NSS_FREQ_275) != 0) { + return -EFAULT; + } + nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency = NSS_FREQ_275; + + if (clk_set_rate(nss_core0_clk, NSS_FREQ_550) != 0) { + return -EFAULT; + } + nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency = NSS_FREQ_550; + } + +clk_complete: +#if (NSS_FABRIC_SCALING_SUPPORT == 1) + if (npd->turbo_frequency) { + fab_data.idle_freq = nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency; + } else { + fab_data.idle_freq = nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency; + } + fab_data.clk = nss_core0_clk; + fab_scaling_register(&fab_data); +#endif + + /* + * Setup Ranges + */ + for (i = 0; i < NSS_FREQ_MAX_SCALE; i++) { + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_110) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_110_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_110_MAX; + } + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_275) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_275_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_275_MAX; + } + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_550) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_550_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_550_MAX; + } + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_600) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_600_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_600_MAX; + } + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_733) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_733_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_733_MAX; + } + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_800) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_800_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_800_MAX; + } + } + + nss_info_always("Supported Frequencies - "); + for (i = 0; i < NSS_FREQ_MAX_SCALE; i++) { + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_110) { + nss_info_always("110Mhz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_275) { + nss_info_always("275Mhz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_550) { + nss_info_always("550Mhz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_600) { + nss_info_always("600Mhz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_733) { + nss_info_always("733Mhz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_800) { + nss_info_always("800Mhz "); + } else { + nss_info_always("Error\nNo Table/Invalid Frequency Found - Loading Old Tables -"); + return -EFAULT; + } + } + nss_info_always("\n"); + + /* + * Set default frequency + */ + err = clk_set_rate(nss_core0_clk, nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency); + if (err) { + nss_info_always("%px: cannot set nss core0 clock\n", nss_ctx); + return -EFAULT; + } + + err = clk_prepare_enable(nss_core0_clk); + if (err) { + nss_info_always("%px: cannot enable nss core0 clock\n", nss_ctx); + return -EFAULT; + } + + return 0; +} + +/* + * __nss_hal_read_interrupt_cause() + */ +static void __nss_hal_read_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t *cause) +{ + uint32_t value = nss_read_32(nss_ctx->nmap, NSS_REGS_N2H_INTR_STATUS_OFFSET); + *cause = (((value) >> shift_factor) & 0x7FFF); +} + +/* + * __nss_hal_clear_interrupt_cause() + */ +static void __nss_hal_clear_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ + nss_write_32(nss_ctx->nmap, NSS_REGS_N2H_INTR_CLR_OFFSET, (cause << shift_factor)); +} + +/* + * __nss_hal_disable_interrupt() + */ +static void __nss_hal_disable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ + nss_write_32(nss_ctx->nmap, NSS_REGS_N2H_INTR_MASK_CLR_OFFSET, (cause << shift_factor)); +} + +/* + * __nss_hal_enable_interrupt() + */ +static void __nss_hal_enable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ + nss_write_32(nss_ctx->nmap, NSS_REGS_N2H_INTR_MASK_SET_OFFSET, (cause << shift_factor)); +} + +/* + * __nss_hal_send_interrupt() + */ +static void __nss_hal_send_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t type) +{ + nss_write_32(nss_ctx->nmap, NSS_REGS_C2C_INTR_SET_OFFSET, intr_cause[type]); +} + +/* + * __nss_hal_request_irq() + */ +static int __nss_hal_request_irq(struct nss_ctx_instance *nss_ctx, struct nss_platform_data *npd, int irq_num) +{ + struct int_ctx_instance *int_ctx = &nss_ctx->int_ctx[irq_num]; + int err; + + if (irq_num == 1) { + int_ctx->shift_factor = 15; + err = request_irq(npd->irq[irq_num], nss_hal_handle_irq, 0, "nss_queue1", int_ctx); + } else { + int_ctx->shift_factor = 0; + err = request_irq(npd->irq[irq_num], nss_hal_handle_irq, 0, "nss", int_ctx); + } + if (err) { + nss_info_always("%px: IRQ%d request failed", nss_ctx, npd->irq[irq_num]); + return err; + } + + int_ctx->irq = npd->irq[irq_num]; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi, 64); + + return 0; +} + +/* + * __nss_hal_init_imem + */ +void __nss_hal_init_imem(struct nss_ctx_instance *nss_ctx) +{ + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + + mem_ctx->imem_head = NSS_IMEM_START + NSS_IMEM_SIZE * nss_ctx->id; + mem_ctx->imem_end = mem_ctx->imem_head + NSS_IMEM_SIZE; + mem_ctx->imem_tail = mem_ctx->imem_head; + + nss_info("%px: IMEM init: head: 0x%x end: 0x%x tail: 0x%x\n", nss_ctx, + mem_ctx->imem_head, mem_ctx->imem_end, mem_ctx->imem_tail); +} + +/* + * __nss_hal_init_utcm_shared + */ +bool __nss_hal_init_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t *meminfo_start) +{ + /* + * Nothing to be done as there are no UTCM_SHARED defined for ipq806x + */ + return true; +} + +/* + * nss_hal_ipq806x_ops + */ +struct nss_hal_ops nss_hal_ipq806x_ops = { + .common_reset = __nss_hal_common_reset, + .core_reset = __nss_hal_core_reset, + .clock_configure = __nss_hal_clock_configure, + .firmware_load = nss_hal_firmware_load, + .debug_enable = __nss_hal_debug_enable, +#if (NSS_DT_SUPPORT == 1) + .of_get_pdata = __nss_hal_of_get_pdata, +#endif + .request_irq = __nss_hal_request_irq, + .send_interrupt = __nss_hal_send_interrupt, + .enable_interrupt = __nss_hal_enable_interrupt, + .disable_interrupt = __nss_hal_disable_interrupt, + .clear_interrupt_cause = __nss_hal_clear_interrupt_cause, + .read_interrupt_cause = __nss_hal_read_interrupt_cause, + .init_imem = __nss_hal_init_imem, + .init_utcm_shared = __nss_hal_init_utcm_shared, +}; diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq807x/nss_hal_pvt.c b/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq807x/nss_hal_pvt.c new file mode 100644 index 000000000..b95a23c4b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hal/ipq807x/nss_hal_pvt.c @@ -0,0 +1,771 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * nss_hal_pvt.c + * NSS HAL private APIs. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nss_hal.h" +#include "nss_core.h" + +#define NSS_QGIC_IPC_REG_OFFSET 0x8 + +#define NSS0_H2N_INTR_BASE 13 +#define NSS1_H2N_INTR_BASE 19 + +/* + * Common CLKs + */ +#define NSS_NOC_CLK "nss-noc-clk" +#define NSS_PTP_REF_CLK "nss-ptp-ref-clk" +#define NSS_CSR_CLK "nss-csr-clk" +#define NSS_CFG_CLK "nss-cfg-clk" +#define NSS_IMEM_CLK "nss-imem-clk" +#define NSS_NSSNOC_QOSGEN_REF_CLK "nss-nssnoc-qosgen-ref-clk" +#define NSS_MEM_NOC_NSS_AXI_CLK "nss-mem-noc-nss-axi-clk" +#define NSS_NSSNOC_SNOC_CLK "nss-nssnoc-snoc-clk" +#define NSS_NSSNOC_TIMEOUT_REF_CLK "nss-nssnoc-timeout-ref-clk" +#define NSS_CE_AXI_CLK "nss-ce-axi-clk" +#define NSS_CE_APB_CLK "nss-ce-apb-clk" +#define NSS_NSSNOC_CE_AXI_CLK "nss-nssnoc-ce-axi-clk" +#define NSS_NSSNOC_CE_APB_CLK "nss-nssnoc-ce-apb-clk" + +/* + * Per-core CLKS + */ +#define NSS_NSSNOC_AHB_CLK "nss-nssnoc-ahb-clk" +#define NSS_CORE_CLK "nss-core-clk" +#define NSS_AHB_CLK "nss-ahb-clk" +#define NSS_AXI_CLK "nss-axi-clk" +#define NSS_MPT_CLK "nss-mpt-clk" +#define NSS_NC_AXI_CLK "nss-nc-axi-clk" + +/* + * Voltage values + */ +#define NOMINAL_VOLTAGE 1 +#define TURBO_VOLTAGE 2 + +/* + * Core reset part 1 + */ +#define NSS_CORE_GCC_RESET_1 0x00000020 + +/* + * Core reset part 2 + */ +#define NSS_CORE_GCC_RESET_2 0x00000017 + +/* + * Voltage regulator + */ +struct regulator *npu_reg; + +/* + * GCC reset + */ +void __iomem *nss_misc_reset; + +/* + * Purpose of each interrupt index: This should match the order defined in the NSS firmware + */ +enum nss_hal_n2h_intr_purpose { + NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_SOS = 0, + NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_QUEUE = 1, + NSS_HAL_N2H_INTR_PURPOSE_TX_UNBLOCKED = 2, + NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_0 = 3, + NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_1 = 4, + NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_2 = 5, + NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_3 = 6, + NSS_HAL_N2H_INTR_PURPOSE_COREDUMP_COMPLETE = 7, + NSS_HAL_N2H_INTR_PURPOSE_PAGED_EMPTY_BUFFER_SOS = 8, + NSS_HAL_N2H_INTR_PURPOSE_PROFILE_DMA = 9, + NSS_HAL_N2H_INTR_PURPOSE_MAX +}; + +/* + * Interrupt type to cause vector. + */ +static uint32_t intr_cause[NSS_MAX_CORES][NSS_H2N_INTR_TYPE_MAX] = { + /* core0 */ + {(1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_EMPTY_BUFFER_QUEUE)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_DATA_COMMAND_QUEUE)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_TX_UNBLOCKED)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_TRIGGER_COREDUMP)), + (1 << (NSS0_H2N_INTR_BASE + NSS_H2N_INTR_EMPTY_PAGED_BUFFER_QUEUE))}, + /* core 1 */ + {(1 << (NSS1_H2N_INTR_BASE + NSS_H2N_INTR_EMPTY_BUFFER_QUEUE)), + (1 << (NSS1_H2N_INTR_BASE + NSS_H2N_INTR_DATA_COMMAND_QUEUE)), + (1 << (NSS1_H2N_INTR_BASE + NSS_H2N_INTR_TX_UNBLOCKED)), + (1 << (NSS1_H2N_INTR_BASE + NSS_H2N_INTR_TRIGGER_COREDUMP)), + (1 << (NSS1_H2N_INTR_BASE + NSS_H2N_INTR_EMPTY_PAGED_BUFFER_QUEUE))} +}; + +/* + * nss_hal_wq_function() + * Added to Handle BH requests to kernel + */ +void nss_hal_wq_function(struct work_struct *work) +{ + nss_work_t *my_work = (nss_work_t *)work; + + mutex_lock(&nss_top_main.wq_lock); + + if (my_work->frequency > NSS_FREQ_1497) { + regulator_set_voltage(npu_reg, TURBO_VOLTAGE, TURBO_VOLTAGE); + } + + nss_freq_change(&nss_top_main.nss[NSS_CORE_0], my_work->frequency, my_work->stats_enable, 0); + if (nss_top_main.nss[NSS_CORE_1].state == NSS_CORE_STATE_INITIALIZED) { + nss_freq_change(&nss_top_main.nss[NSS_CORE_1], my_work->frequency, my_work->stats_enable, 0); + } + clk_set_rate(nss_core0_clk, my_work->frequency); + + nss_freq_change(&nss_top_main.nss[NSS_CORE_0], my_work->frequency, my_work->stats_enable, 1); + if (nss_top_main.nss[NSS_CORE_1].state == NSS_CORE_STATE_INITIALIZED) { + nss_freq_change(&nss_top_main.nss[NSS_CORE_1], my_work->frequency, my_work->stats_enable, 1); + } + + clk_set_rate(nss_core1_clk, my_work->frequency); + if (my_work->frequency <= NSS_FREQ_1497) { + regulator_set_voltage(npu_reg, NOMINAL_VOLTAGE, NOMINAL_VOLTAGE); + } + + mutex_unlock(&nss_top_main.wq_lock); + kfree((void *)work); +} + +/* + * nss_hal_handle_irq() + */ +static irqreturn_t nss_hal_handle_irq(int irq, void *ctx) +{ + struct int_ctx_instance *int_ctx = (struct int_ctx_instance *) ctx; + + disable_irq_nosync(irq); + napi_schedule(&int_ctx->napi); + + return IRQ_HANDLED; +} + +/* + * __nss_hal_of_get_pdata() + * Retrieve platform data from device node. + */ +static struct nss_platform_data *__nss_hal_of_get_pdata(struct platform_device *pdev) +{ + struct device_node *np = of_node_get(pdev->dev.of_node); + struct nss_platform_data *npd; + struct nss_ctx_instance *nss_ctx = NULL; + struct nss_top_instance *nss_top = &nss_top_main; + struct resource res_nphys, res_vphys, res_qgic_phys; + int32_t i; + + npd = devm_kzalloc(&pdev->dev, sizeof(struct nss_platform_data), GFP_KERNEL); + if (!npd) { + return NULL; + } + + if (of_property_read_u32(np, "qcom,id", &npd->id) + || of_property_read_u32(np, "qcom,load-addr", &npd->load_addr) + || of_property_read_u32(np, "qcom,num-queue", &npd->num_queue) + || of_property_read_u32(np, "qcom,num-irq", &npd->num_irq)) { + pr_err("%s: error reading critical device node properties\n", np->name); + goto out; + } + + /* + * Read frequencies. If failure, load default values. + */ + of_property_read_u32(np, "qcom,low-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency); + of_property_read_u32(np, "qcom,mid-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency); + of_property_read_u32(np, "qcom,max-frequency", &nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency); + + if (npd->num_irq > NSS_MAX_IRQ_PER_CORE) { + pr_err("%s: exceeds maximum interrupt numbers per core\n", np->name); + goto out; + } + + nss_ctx = &nss_top->nss[npd->id]; + nss_ctx->id = npd->id; + + if (of_address_to_resource(np, 0, &res_nphys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for nphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + if (of_address_to_resource(np, 1, &res_vphys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for vphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + if (of_address_to_resource(np, 2, &res_qgic_phys) != 0) { + nss_info_always("%px: nss%d: of_address_to_resource() fail for qgic_phys\n", nss_ctx, nss_ctx->id); + goto out; + } + + /* + * Save physical addresses + */ + npd->nphys = res_nphys.start; + npd->vphys = res_vphys.start; + npd->qgic_phys = res_qgic_phys.start; + + npd->nmap = ioremap_nocache(npd->nphys, resource_size(&res_nphys)); + if (!npd->nmap) { + nss_info_always("%px: nss%d: ioremap() fail for nphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + nss_assert(npd->vphys); + npd->vmap = ioremap_cache(npd->vphys, resource_size(&res_vphys)); + if (!npd->vmap) { + nss_info_always("%px: nss%d: ioremap() fail for vphys\n", nss_ctx, nss_ctx->id); + goto out; + } + + npd->qgic_map = ioremap_nocache(npd->qgic_phys, resource_size(&res_qgic_phys)); + if (!npd->qgic_map) { + nss_info_always("%px: nss%d: ioremap() fail for qgic map\n", nss_ctx, nss_ctx->id); + goto out; + } + + /* + * Clear TCM memory used by this core + */ + for (i = 0; i < resource_size(&res_vphys) ; i += 4) { + nss_write_32(npd->vmap, i, 0); + NSS_CORE_DMA_CACHE_MAINT((npd->vmap + i), 4, DMA_TO_DEVICE); + } + NSS_CORE_DSB(); + + /* + * Get IRQ numbers + */ + for (i = 0 ; i < npd->num_irq; i++) { + npd->irq[i] = irq_of_parse_and_map(np, i); + if (!npd->irq[i]) { + nss_info_always("%px: nss%d: irq_of_parse_and_map() fail for irq %d\n", nss_ctx, nss_ctx->id, i); + goto out; + } + } + + nss_hal_dt_parse_features(np, npd); + + of_node_put(np); + return npd; + +out: + if (npd->nmap) { + iounmap(npd->nmap); + } + + if (npd->vmap) { + iounmap(npd->vmap); + } + + devm_kfree(&pdev->dev, npd); + of_node_put(np); + return NULL; +} + +/* + * __nss_hal_core_reset() + */ +static int __nss_hal_core_reset(struct platform_device *nss_dev, void __iomem *map, uint32_t addr, uint32_t clk_src) +{ + uint32_t value; + + /* + * De-assert reset for first set + */ + value = nss_read_32(nss_misc_reset, 0x0); + value &= ~(NSS_CORE_GCC_RESET_1 << (nss_dev->id << 3)); + nss_write_32(nss_misc_reset, 0x0, value); + + /* + * Minimum 10 - 20 cycles delay is required after + * de-asserting UBI reset clamp + */ + usleep_range(10, 20); + + /* + * De-assert reset for second set + */ + value &= ~(NSS_CORE_GCC_RESET_2 << (nss_dev->id << 3)); + nss_write_32(nss_misc_reset, 0x0, value); + + /* + * Apply ubi32 core reset + */ + nss_write_32(map, NSS_REGS_RESET_CTRL_OFFSET, 1); + + /* + * Program address configuration + */ + nss_write_32(map, NSS_REGS_CORE_AMC_OFFSET, 1); + nss_write_32(map, NSS_REGS_CORE_BAR_OFFSET, 0x3c000000); + nss_write_32(map, NSS_REGS_CORE_BOOT_ADDR_OFFSET, addr); + + /* + * C2C interrupts are level sensitive + * Copy engine interrupts are level sensitive + */ + nss_write_32(map, NSS_REGS_CORE_INT_STAT2_TYPE_OFFSET, 0xFFFF); + nss_write_32(map, NSS_REGS_CORE_INT_STAT3_TYPE_OFFSET, 0xFF); + + /* + * Enable Instruction Fetch range checking between 0x4000 0000 to 0xBFFF FFFF. + */ + nss_write_32(map, NSS_REGS_CORE_IFETCH_RANGE_OFFSET, 0xBF004001); + + /* + * De-assert ubi32 core reset + */ + nss_write_32(map, NSS_REGS_RESET_CTRL_OFFSET, 0); + + return 0; +} + +/* + * __nss_hal_debug_enable() + * Enable NSS debug + */ +static void __nss_hal_debug_enable(void) +{ + +} + +/* + * nss_hal_clock_set_and_enable() + */ +static int nss_hal_clock_set_and_enable(struct device *dev, const char *id, unsigned long rate) +{ + struct clk *nss_clk = NULL; + int err; + + nss_clk = devm_clk_get(dev, id); + if (IS_ERR(nss_clk)) { + pr_err("%px: cannot get clock: %s\n", dev, id); + return -EFAULT; + } + + if (rate) { + err = clk_set_rate(nss_clk, rate); + if (err) { + pr_err("%px: cannot set %s freq\n", dev, id); + return -EFAULT; + } + } + + err = clk_prepare_enable(nss_clk); + if (err) { + pr_err("%px: cannot enable clock: %s\n", dev, id); + return -EFAULT; + } + + return 0; +} + +/* + * __nss_hal_common_reset + * Do reset/clock configuration common to all cores + */ +static int __nss_hal_common_reset(struct platform_device *nss_dev) +{ + struct device_node *cmn = NULL; + struct resource res_nss_misc_reset; + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NOC_CLK, 461500000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_PTP_REF_CLK, 150000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CSR_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CFG_CLK, 100000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_IMEM_CLK, 400000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_QOSGEN_REF_CLK, 19200000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_MEM_NOC_NSS_AXI_CLK, 461500000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_SNOC_CLK, 266600000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_TIMEOUT_REF_CLK, 4800000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CE_AXI_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CE_APB_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_CE_AXI_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_CE_APB_CLK, 200000000)) { + return -EFAULT; + } + + /* + * Get reference to NSS common device node + */ + cmn = of_find_node_by_name(NULL, "nss-common"); + if (!cmn) { + pr_err("%px: Unable to find nss-common node\n", nss_dev); + return -EFAULT; + } + + if (of_address_to_resource(cmn, 0, &res_nss_misc_reset) != 0) { + pr_err("%px: of_address_to_resource() return error for nss_misc_reset\n", nss_dev); + of_node_put(cmn); + return -EFAULT; + } + of_node_put(cmn); + + nss_misc_reset = ioremap_nocache(res_nss_misc_reset.start, resource_size(&res_nss_misc_reset)); + if (!nss_misc_reset) { + pr_err("%px: ioremap fail for nss_misc_reset\n", nss_dev); + return -EFAULT; + } + + nss_top_main.nss_hal_common_init_done = true; + nss_info("nss_hal_common_reset Done\n"); + return 0; +} + +/* + * __nss_hal_clock_configure() + */ +static int __nss_hal_clock_configure(struct nss_ctx_instance *nss_ctx, struct platform_device *nss_dev, struct nss_platform_data *npd) +{ + uint32_t i; + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NSSNOC_AHB_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_AHB_CLK, 200000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_AXI_CLK, 461500000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_MPT_CLK, 25000000)) { + return -EFAULT; + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_NC_AXI_CLK, 461500000)) { + return -EFAULT; + } + + /* + * For IPQ807x, any rate above 1497 is Turbo Voltage + * Temporary set the voltage to turbo till we start scaling frequenices. + * This is to ensure probing is safe and autoscaling will correct the voltage. + */ + if (!nss_ctx->id) { + npu_reg = devm_regulator_get(&nss_dev->dev, "npu"); + if (IS_ERR(npu_reg)) { + return PTR_ERR(npu_reg); + } + if (regulator_enable(npu_reg)) { + return -EFAULT; + } + regulator_set_voltage(npu_reg, TURBO_VOLTAGE, TURBO_VOLTAGE); + } + + /* + * No entries, then just load default + */ + if ((nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency == 0) || + (nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency == 0) || + (nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency == 0)) { + nss_runtime_samples.freq_scale[NSS_FREQ_LOW_SCALE].frequency = NSS_FREQ_187; + nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency = NSS_FREQ_748; + nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency = NSS_FREQ_1497; + nss_info_always("Running default frequencies\n"); + } + + /* + * Test frequency from dtsi, if fail, try to set default frequency. + */ + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[NSS_FREQ_HIGH_SCALE].frequency)) { + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, NSS_FREQ_1497)) { + return -EFAULT; + } + } + + /* + * Setup ranges, test frequency, and display. + */ + for (i = 0; i < NSS_FREQ_MAX_SCALE; i++) { + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_187) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_187_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_187_MAX; + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_748) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_748_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_748_MAX; + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_1497) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_1497_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_1497_MAX; + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_1689) { + nss_runtime_samples.freq_scale[i].minimum = NSS_FREQ_1689_MIN; + nss_runtime_samples.freq_scale[i].maximum = NSS_FREQ_1689_MAX; + } else { + nss_info_always("Frequency not found %d\n", nss_runtime_samples.freq_scale[i].frequency); + return -EFAULT; + } + + /* + * Test the frequency, if fail, then default to safe frequency and abort + */ + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[i].frequency)) { + return -EFAULT; + } + } + + nss_info_always("Supported Frequencies - "); + for (i = 0; i < NSS_FREQ_MAX_SCALE; i++) { + if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_187) { + nss_info_always("187.2 MHz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_748) { + nss_info_always("748.8 MHz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_1497) { + nss_info_always("1.4976 GHz "); + } else if (nss_runtime_samples.freq_scale[i].frequency == NSS_FREQ_1689) { + nss_info_always("1.6896 GHz "); + } else { + nss_info_always("Error\nNo Table/Invalid Frequency Found\n"); + return -EFAULT; + } + } + nss_info_always("\n"); + + /* + * Set values only once for core0. Grab the proper clock. + */ + if (nss_ctx->id) { + nss_core1_clk = clk_get(&nss_dev->dev, NSS_CORE_CLK); + } else { + nss_core0_clk = clk_get(&nss_dev->dev, NSS_CORE_CLK); + } + + if (nss_hal_clock_set_and_enable(&nss_dev->dev, NSS_CORE_CLK, nss_runtime_samples.freq_scale[NSS_FREQ_MID_SCALE].frequency)) { + return -EFAULT; + } + + return 0; +} + +/* + * __nss_hal_read_interrupt_cause() + */ +static void __nss_hal_read_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t *cause) +{ +} + +/* + * __nss_hal_clear_interrupt_cause() + */ +static void __nss_hal_clear_interrupt_cause(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ +} + +/* + * __nss_hal_disable_interrupt() + */ +static void __nss_hal_disable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ +} + +/* + * __nss_hal_enable_interrupt() + */ +static void __nss_hal_enable_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t shift_factor, uint32_t cause) +{ +} + +/* + * __nss_hal_send_interrupt() + */ +static void __nss_hal_send_interrupt(struct nss_ctx_instance *nss_ctx, uint32_t type) +{ + /* + * Check if core and type is Valid + */ + nss_assert(nss_ctx->id < nss_top_main.num_nss); + nss_assert(type < NSS_H2N_INTR_TYPE_MAX); + + nss_write_32(nss_ctx->qgic_map, NSS_QGIC_IPC_REG_OFFSET, intr_cause[nss_ctx->id][type]); +} + +/* + * __nss_hal_request_irq() + */ +static int __nss_hal_request_irq(struct nss_ctx_instance *nss_ctx, struct nss_platform_data *npd, int irq_num) +{ + struct int_ctx_instance *int_ctx = &nss_ctx->int_ctx[irq_num]; + int err = -1, irq = npd->irq[irq_num]; + + irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_SOS) { + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_non_queue, NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT); + int_ctx->cause = NSS_N2H_INTR_EMPTY_BUFFERS_SOS; + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_empty_buf_sos", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_EMPTY_BUFFER_QUEUE) { + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_queue, NSS_EMPTY_BUFFER_RETURN_PROCESSING_WEIGHT); + int_ctx->cause = NSS_N2H_INTR_EMPTY_BUFFER_QUEUE; + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_empty_buf_queue", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_TX_UNBLOCKED) { + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_non_queue, NSS_TX_UNBLOCKED_PROCESSING_WEIGHT); + int_ctx->cause = NSS_N2H_INTR_TX_UNBLOCKED; + err = request_irq(irq, nss_hal_handle_irq, 0, "nss-tx-unblock", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_0) { + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_queue, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + int_ctx->cause = NSS_N2H_INTR_DATA_QUEUE_0; + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_queue0", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_1) { + int_ctx->cause = NSS_N2H_INTR_DATA_QUEUE_1; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_queue, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_queue1", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_2) { + int_ctx->cause = NSS_N2H_INTR_DATA_QUEUE_2; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_queue, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_queue2", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_DATA_QUEUE_3) { + int_ctx->cause = NSS_N2H_INTR_DATA_QUEUE_3; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_queue, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_queue3", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_COREDUMP_COMPLETE) { + int_ctx->cause = NSS_N2H_INTR_COREDUMP_COMPLETE; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_emergency, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_coredump_complete", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_PAGED_EMPTY_BUFFER_SOS) { + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_non_queue, NSS_EMPTY_BUFFER_SOS_PROCESSING_WEIGHT); + int_ctx->cause = NSS_N2H_INTR_PAGED_EMPTY_BUFFERS_SOS; + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_paged_empty_buf_sos", int_ctx); + } + + if (irq_num == NSS_HAL_N2H_INTR_PURPOSE_PROFILE_DMA) { + int_ctx->cause = NSS_N2H_INTR_PROFILE_DMA; + netif_napi_add(&nss_ctx->napi_ndev, &int_ctx->napi, nss_core_handle_napi_sdma, NSS_DATA_COMMAND_BUFFER_PROCESSING_WEIGHT); + err = request_irq(irq, nss_hal_handle_irq, 0, "nss_profile_dma", int_ctx); + } + + if (err) { + return err; + } + + int_ctx->irq = irq; + return 0; +} + +/* + * __nss_hal_init_imem + */ +void __nss_hal_init_imem(struct nss_ctx_instance *nss_ctx) +{ + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + + mem_ctx->imem_head = NSS_IMEM_START + NSS_IMEM_SIZE * nss_ctx->id; + mem_ctx->imem_end = mem_ctx->imem_head + NSS_IMEM_SIZE; + mem_ctx->imem_tail = mem_ctx->imem_head; + + nss_info("%px: IMEM init: head: 0x%x end: 0x%x tail: 0x%x\n", nss_ctx, + mem_ctx->imem_head, mem_ctx->imem_end, mem_ctx->imem_tail); +} + +/* + * __nss_hal_init_utcm_shared + */ +bool __nss_hal_init_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t *meminfo_start) +{ + /* + * Nothing to be done as there are no TCM in ipq807x + */ + return true; +} + +/* + * nss_hal_ipq807x_ops + */ +struct nss_hal_ops nss_hal_ipq807x_ops = { + .common_reset = __nss_hal_common_reset, + .core_reset = __nss_hal_core_reset, + .clock_configure = __nss_hal_clock_configure, + .firmware_load = nss_hal_firmware_load, + .debug_enable = __nss_hal_debug_enable, + .of_get_pdata = __nss_hal_of_get_pdata, + .request_irq = __nss_hal_request_irq, + .send_interrupt = __nss_hal_send_interrupt, + .enable_interrupt = __nss_hal_enable_interrupt, + .disable_interrupt = __nss_hal_disable_interrupt, + .clear_interrupt_cause = __nss_hal_clear_interrupt_cause, + .read_interrupt_cause = __nss_hal_read_interrupt_cause, + .init_imem = __nss_hal_init_imem, + .init_utcm_shared = __nss_hal_init_utcm_shared, +}; diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hal/nss_hal.c b/feeds/ipq807x/qca-nss-drv/src/nss_hal/nss_hal.c new file mode 100644 index 000000000..30085e010 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hal/nss_hal.c @@ -0,0 +1,834 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * nss_hal.c + * NSS HAL general APIs. + */ + +#include +#include +#include +#include +#include + +#include "nss_hal.h" +#include "nss_arch.h" +#include "nss_core.h" +#include "nss_tx_rx_common.h" +#include "nss_data_plane.h" +#if (NSS_PM_SUPPORT == 1) +#include "nss_pm.h" +#endif +#if (NSS_FABRIC_SCALING_SUPPORT == 1) +#include +#endif + +/* + * Macros + */ +#define MIN_IMG_SIZE (64*1024) +#define NSS_AP0_IMAGE "qca-nss0.bin" +#define NSS_AP1_IMAGE "qca-nss1.bin" + +/* + * File local/Static variables/functions + */ +static const struct net_device_ops nss_netdev_ops; +static const struct ethtool_ops nss_ethtool_ops; + +int nss_hal_firmware_load(struct nss_ctx_instance *nss_ctx, struct platform_device *nss_dev, struct nss_platform_data *npd) +{ + const struct firmware *nss_fw; + void __iomem *load_mem; + int rc; + + if (nss_ctx->id == 0) { + rc = request_firmware(&nss_fw, NSS_AP0_IMAGE, &(nss_dev->dev)); + } else if (nss_ctx->id == 1) { + rc = request_firmware(&nss_fw, NSS_AP1_IMAGE, &(nss_dev->dev)); + } else { + nss_warning("%px: Invalid nss dev: %d\n", nss_ctx, nss_ctx->id); + return -EINVAL; + } + + /* + * Check if the file read is successful + */ + if (rc) { + nss_info_always("%px: request_firmware failed with err code: %d", nss_ctx, rc); + return rc; + } + + if (nss_fw->size < MIN_IMG_SIZE) { + nss_info_always("%px: nss firmware is truncated, size:%d", nss_ctx, (int)nss_fw->size); + return rc; + } + + load_mem = ioremap_nocache(npd->load_addr, nss_fw->size); + if (!load_mem) { + nss_info_always("%px: ioremap_nocache failed: %x", nss_ctx, npd->load_addr); + release_firmware(nss_fw); + return rc; + } + + nss_info_always("nss_driver - fw of size %d bytes copied to load addr: %x, nss_id : %d\n", (int)nss_fw->size, npd->load_addr, nss_dev->id); + memcpy_toio(load_mem, nss_fw->data, nss_fw->size); + release_firmware(nss_fw); + iounmap(load_mem); + return 0; +} + +/* + * nss_hal_dt_parse_features() + */ +void nss_hal_dt_parse_features(struct device_node *np, struct nss_platform_data *npd) +{ + /* + * Read the features in + */ + npd->bridge_enabled = of_property_read_bool(np, "qcom,bridge-enabled"); + npd->capwap_enabled = of_property_read_bool(np, "qcom,capwap-enabled"); + npd->clmap_enabled = of_property_read_bool(np, "qcom,clmap-enabled"); + npd->crypto_enabled = of_property_read_bool(np, "qcom,crypto-enabled"); + npd->dtls_enabled = of_property_read_bool(np, "qcom,dtls-enabled"); + npd->gre_enabled = of_property_read_bool(np, "qcom,gre-enabled"); + npd->gre_redir_enabled = of_property_read_bool(np, "qcom,gre-redir-enabled"); + npd->gre_tunnel_enabled = of_property_read_bool(np, "qcom,gre_tunnel_enabled"); + npd->gre_redir_mark_enabled = of_property_read_bool(np, "qcom,gre-redir-mark-enabled"); + npd->igs_enabled = of_property_read_bool(np, "qcom,igs-enabled"); + npd->ipsec_enabled = of_property_read_bool(np, "qcom,ipsec-enabled"); + npd->ipv4_enabled = of_property_read_bool(np, "qcom,ipv4-enabled"); + npd->ipv4_reasm_enabled = of_property_read_bool(np, "qcom,ipv4-reasm-enabled"); + npd->ipv6_enabled = of_property_read_bool(np, "qcom,ipv6-enabled"); + npd->ipv6_reasm_enabled = of_property_read_bool(np, "qcom,ipv6-reasm-enabled"); + npd->l2tpv2_enabled = of_property_read_bool(np, "qcom,l2tpv2-enabled"); + npd->map_t_enabled = of_property_read_bool(np, "qcom,map-t-enabled"); + npd->oam_enabled = of_property_read_bool(np, "qcom,oam-enabled"); + npd->ppe_enabled = of_property_read_bool(np, "qcom,ppe-enabled"); + npd->pppoe_enabled = of_property_read_bool(np, "qcom,pppoe-enabled"); + npd->pptp_enabled = of_property_read_bool(np, "qcom,pptp-enabled"); + npd->portid_enabled = of_property_read_bool(np, "qcom,portid-enabled"); + npd->pvxlan_enabled = of_property_read_bool(np, "qcom,pvxlan-enabled"); + npd->qvpn_enabled = of_property_read_bool(np, "qcom,qvpn-enabled"); + npd->rmnet_rx_enabled = of_property_read_bool(np, "qcom,rmnet_rx-enabled"); + npd->shaping_enabled = of_property_read_bool(np, "qcom,shaping-enabled"); + npd->tls_enabled = of_property_read_bool(np, "qcom,tls-enabled"); + npd->tstamp_enabled = of_property_read_bool(np, "qcom,tstamp-enabled"); + npd->turbo_frequency = of_property_read_bool(np, "qcom,turbo-frequency"); + npd->tun6rd_enabled = of_property_read_bool(np, "qcom,tun6rd-enabled"); + npd->tunipip6_enabled = of_property_read_bool(np, "qcom,tunipip6-enabled"); + npd->vlan_enabled = of_property_read_bool(np, "qcom,vlan-enabled"); + npd->vxlan_enabled = of_property_read_bool(np, "qcom,vxlan-enabled"); + npd->wlanredirect_enabled = of_property_read_bool(np, "qcom,wlanredirect-enabled"); + npd->wifioffload_enabled = of_property_read_bool(np, "qcom,wlan-dataplane-offload-enabled"); + npd->match_enabled = of_property_read_bool(np, "qcom,match-enabled"); + npd->mirror_enabled = of_property_read_bool(np, "qcom,mirror-enabled"); + npd->udp_st_enabled = of_property_read_bool(np, "qcom,udp-st-enabled"); +} +/* + * nss_hal_clean_up_irq() + */ +static void nss_hal_clean_up_irq(struct int_ctx_instance *int_ctx) +{ + if (!int_ctx->irq) { + return; + } + + /* + * Wait here till the poll is complete. + */ + napi_disable(&int_ctx->napi); + + /* + * Interrupt can be raised here before free_irq() but as napi is + * already disabled, it will be never sheduled from hard_irq + * context. + */ + irq_clear_status_flags(int_ctx->irq, IRQ_DISABLE_UNLAZY); + free_irq(int_ctx->irq, int_ctx); + int_ctx->irq = 0; + + netif_napi_del(&int_ctx->napi); +} + +/* + * nss_hal_register_irq() + */ +static int nss_hal_register_irq(struct nss_ctx_instance *nss_ctx, struct nss_platform_data *npd, + struct net_device *netdev, int irq_num) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct int_ctx_instance *int_ctx = &nss_ctx->int_ctx[irq_num]; + int err = 0; + + /* + * request for IRQs + */ + int_ctx->nss_ctx = nss_ctx; + err = nss_top->hal_ops->request_irq(nss_ctx, npd, irq_num); + if (err) { + nss_warning("%px: IRQ request for queue %d failed", nss_ctx, irq_num); + return err; + } + + /* + * Register NAPI for NSS core interrupt + */ + napi_enable(&int_ctx->napi); + return 0; +} + +/* + * nss_hal_probe() + * HLOS device probe callback + */ +int nss_hal_probe(struct platform_device *nss_dev) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = NULL; + struct nss_platform_data *npd = NULL; + int i, err = 0; +#ifdef NSS_DRV_TSTAMP_ENABLE + struct net_device *tstamp_ndev = NULL; +#endif + + if (nss_top_main.nss_hal_common_init_done == false) { + err = nss_top->hal_ops->common_reset(nss_dev); + if (err) { + nss_info_always("NSS HAL common init failed\n"); + return -EFAULT; + } + } + +#if (NSS_DT_SUPPORT == 1) + if (!nss_dev->dev.of_node) { + pr_err("nss-driver: Device tree not available\n"); + return -ENODEV; + } + + npd = nss_top->hal_ops->of_get_pdata(nss_dev); + if (!npd) { + return -EFAULT; + } + + nss_ctx = &nss_top->nss[npd->id]; + nss_ctx->id = npd->id; + nss_dev->id = nss_ctx->id; +#else + npd = (struct nss_platform_data *) nss_dev->dev.platform_data; + nss_ctx = &nss_top->nss[nss_dev->id]; + nss_ctx->id = nss_dev->id; +#endif + nss_ctx->num_irq = npd->num_irq; + nss_ctx->nss_top = nss_top; + + /* + * dev is required for dma map/unmap + */ + nss_ctx->dev = &nss_dev->dev; + + nss_info("%px: NSS_DEV_ID %s\n", nss_ctx, dev_name(&nss_dev->dev)); + + /* + * Do firmware load from nss-drv if required + */ + err = nss_top->hal_ops->firmware_load(nss_ctx, nss_dev, npd); + if (err) { + nss_info_always("%px: firmware load from driver failed\n", nss_ctx); + goto err_init; + } + + err = nss_top->hal_ops->clock_configure(nss_ctx, nss_dev, npd); + if (err) { + nss_info_always("%px: clock configure failed\n", nss_ctx); + goto err_init; + } + + /* + * Get load address of NSS firmware + */ + nss_info("%px: Setting NSS%d Firmware load address to %x\n", nss_ctx, nss_ctx->id, npd->load_addr); + nss_top->nss[nss_ctx->id].load = npd->load_addr; + + /* + * Get virtual and physical memory addresses for nss logical/hardware address maps + */ + + /* + * Virtual address of CSM space + */ + nss_ctx->nmap = npd->nmap; + + /* + * Physical address of CSM space + */ + nss_ctx->nphys = npd->nphys; + nss_assert(nss_ctx->nphys); + + /* + * Virtual address of logical registers space + */ + nss_ctx->vmap = npd->vmap; + + /* + * Virtual address of QGIC interrupt space + */ + nss_ctx->qgic_map = npd->qgic_map; + + /* + * Physical address of logical registers space + */ + nss_ctx->vphys = npd->vphys; + nss_info("%d:ctx=%px, vphys=%x, vmap=%px, nphys=%x, nmap=%px", nss_ctx->id, + nss_ctx, nss_ctx->vphys, nss_ctx->vmap, nss_ctx->nphys, nss_ctx->nmap); + + if (!nss_meminfo_init(nss_ctx)) { + nss_info_always("%px: meminfo init failed\n", nss_ctx); + err = -EFAULT; + goto err_init; + } + + /* + * Initialize the dummy netdevice. + */ + init_dummy_netdev(&nss_ctx->napi_ndev); + + for (i = 0; i < npd->num_irq; i++) { + err = nss_hal_register_irq(nss_ctx, npd, &nss_ctx->napi_ndev, i); + if (err) { + goto err_register_irq; + } + } + +#ifdef NSS_DRV_TSTAMP_ENABLE + /* + * Allocate tstamp net_device and register the net_device + */ + if (npd->tstamp_enabled == NSS_FEATURE_ENABLED) { + tstamp_ndev = nss_tstamp_register_netdev(); + if (!tstamp_ndev) { + nss_warning("%px: Unable to register the TSTAMP net_device", nss_ctx); + npd->tstamp_enabled = NSS_FEATURE_NOT_ENABLED; + } + nss_top->tstamp_handler_id = nss_dev->id; + nss_tstamp_register_handler(tstamp_ndev); + } +#endif + /* + * Initialize the handlers for all interfaces associated with core + */ + nss_core_init_handlers(nss_ctx); + + /* + * Features that will always be enabled on both cores + */ + nss_dynamic_interface_register_handler(nss_ctx); + nss_n2h_register_handler(nss_ctx); + nss_project_register_handler(nss_ctx); +#ifdef NSS_DRV_QRFS_ENABLE + nss_qrfs_register_handler(nss_ctx); +#endif + +#ifdef NSS_DRV_C2C_ENABLE + nss_c2c_tx_register_handler(nss_ctx); + nss_c2c_rx_register_handler(nss_ctx); +#endif + nss_unaligned_register_handler(nss_ctx); + + /* + * Check functionalities are supported by this NSS core + */ +#ifdef NSS_DRV_SHAPER_ENABLE + if (npd->shaping_enabled == NSS_FEATURE_ENABLED) { + nss_top->shaping_handler_id = nss_dev->id; + nss_info("%d: NSS shaping is enabled", nss_dev->id); + } +#endif + + if (npd->ipv4_enabled == NSS_FEATURE_ENABLED) { + nss_top->ipv4_handler_id = nss_dev->id; + nss_ipv4_register_handler(); + +#ifdef NSS_DRV_EDMA_ENABLE + nss_top->edma_handler_id = nss_dev->id; + nss_edma_register_handler(); +#endif + nss_eth_rx_register_handler(nss_ctx); +#ifdef NSS_DRV_LAG_ENABLE + nss_lag_register_handler(); +#endif +#ifdef NSS_DRV_TRUSTSEC_ENABLE + nss_top->trustsec_tx_handler_id = nss_dev->id; + nss_trustsec_tx_register_handler(); +#endif + + nss_top->virt_if_handler_id = nss_dev->id; + + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_N2H] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_H2N] = nss_dev->id; + } + +#ifdef NSS_DRV_CAPWAP_ENABLE + if (npd->capwap_enabled == NSS_FEATURE_ENABLED) { + nss_top->capwap_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_OUTER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_HOST_INNER] = nss_dev->id; + } +#endif + +#ifdef NSS_DRV_IPV4_REASM_ENABLE + if (npd->ipv4_reasm_enabled == NSS_FEATURE_ENABLED) { + nss_top->ipv4_reasm_handler_id = nss_dev->id; + nss_ipv4_reasm_register_handler(); + } +#endif + +#ifdef NSS_DRV_IPV6_ENABLE + if (npd->ipv6_enabled == NSS_FEATURE_ENABLED) { + nss_top->ipv6_handler_id = nss_dev->id; + nss_ipv6_register_handler(); + } + +#ifdef NSS_DRV_IPV6_REASM_ENABLE + if (npd->ipv6_reasm_enabled == NSS_FEATURE_ENABLED) { + nss_top->ipv6_reasm_handler_id = nss_dev->id; + nss_ipv6_reasm_register_handler(); + } +#endif +#endif + +#ifdef NSS_DRV_CRYPTO_ENABLE + /* + * TODO: when Crypto is moved to Core-1 it needs to + * flush based on nss_top->crypto_enabled + */ + if (npd->crypto_enabled == NSS_FEATURE_ENABLED) { + nss_top->crypto_handler_id = nss_dev->id; +#if defined(NSS_HAL_IPQ807x_SUPPORT) || defined(NSS_HAL_IPQ60XX_SUPPORT) || defined(NSS_HAL_IPQ50XX_SUPPORT) + nss_crypto_cmn_register_handler(); +#else + nss_top->crypto_enabled = 1; + nss_crypto_register_handler(); +#endif + +#if defined(NSS_HAL_IPQ807x_SUPPORT) || defined(NSS_HAL_IPQ60XX_SUPPORT) + nss_top->dma_handler_id = nss_dev->id; + nss_dma_register_handler(); +#endif + } +#endif + +#ifdef NSS_DRV_IPSEC_ENABLE + if (npd->ipsec_enabled == NSS_FEATURE_ENABLED) { + nss_top->ipsec_handler_id = nss_dev->id; +#if defined(NSS_HAL_IPQ807x_SUPPORT) || defined(NSS_HAL_IPQ60XX_SUPPORT) || defined(NSS_HAL_IPQ50XX_SUPPORT) + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_OUTER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_MDATA_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_MDATA_OUTER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_REDIRECT] = nss_dev->id; + nss_ipsec_cmn_register_handler(); +#else + nss_ipsec_register_handler(); +#endif + } +#endif + + if (npd->wlanredirect_enabled == NSS_FEATURE_ENABLED) { + nss_top->wlan_handler_id = nss_dev->id; + } + +#ifdef NSS_DRV_TUN6RD_ENABLE + if (npd->tun6rd_enabled == NSS_FEATURE_ENABLED) { + nss_top->tun6rd_handler_id = nss_dev->id; + } +#endif + +#ifdef NSS_DRV_PPTP_ENABLE + if (npd->pptp_enabled == NSS_FEATURE_ENABLED) { + nss_top->pptp_handler_id = nss_dev->id; + nss_pptp_register_handler(); + } +#endif + + if (npd->pppoe_enabled == NSS_FEATURE_ENABLED) { + nss_top->pppoe_handler_id = nss_dev->id; + nss_pppoe_register_handler(); + } + +#ifdef NSS_DRV_PPE_ENABLE + if (npd->ppe_enabled == NSS_FEATURE_ENABLED) { + nss_top->ppe_handler_id = nss_dev->id; + nss_ppe_register_handler(); + nss_ppe_vp_register_handler(); + } +#endif + +#ifdef NSS_DRV_L2TP_ENABLE + if (npd->l2tpv2_enabled == NSS_FEATURE_ENABLED) { + nss_top->l2tpv2_handler_id = nss_dev->id; + nss_l2tpv2_register_handler(); + } +#endif + +#ifdef NSS_DRV_DTLS_ENABLE + if (npd->dtls_enabled == NSS_FEATURE_ENABLED) { + nss_top->dtls_handler_id = nss_dev->id; +#if defined(NSS_HAL_IPQ807x_SUPPORT) || defined(NSS_HAL_IPQ60XX_SUPPORT) || defined(NSS_HAL_IPQ50XX_SUPPORT) + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_OUTER] = nss_dev->id; + nss_dtls_cmn_register_handler(); +#else + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_DTLS] = nss_dev->id; + nss_dtls_register_handler(); +#endif + } +#endif + +#ifdef NSS_DRV_MAPT_ENABLE + if (npd->map_t_enabled == NSS_FEATURE_ENABLED) { + nss_top->map_t_handler_id = nss_dev->id; + nss_map_t_register_handler(); + } +#endif + +#ifdef NSS_DRV_TUNIPIP6_ENABLE + if (npd->tunipip6_enabled == NSS_FEATURE_ENABLED) { + nss_top->tunipip6_handler_id = nss_dev->id; + nss_tunipip6_register_handler(); + } +#endif + +#ifdef NSS_DRV_GRE_ENABLE + if (npd->gre_enabled == NSS_FEATURE_ENABLED) { + nss_top->gre_handler_id = nss_dev->id; + nss_gre_register_handler(); + } +#endif + +#ifdef NSS_DRV_GRE_REDIR_ENABLE + if (npd->gre_redir_enabled == NSS_FEATURE_ENABLED) { + nss_top->gre_redir_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_HOST_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_WIFI_OFFL_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_SJACK_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR_OUTER] = nss_dev->id; + nss_gre_redir_register_handler(); + nss_gre_redir_lag_us_register_handler(); + nss_gre_redir_lag_ds_register_handler(); +#ifdef NSS_DRV_SJACK_ENABLE + nss_top->sjack_handler_id = nss_dev->id; + nss_sjack_register_handler(); +#endif + + } + + if (npd->gre_redir_mark_enabled == NSS_FEATURE_ENABLED) { + nss_top->gre_redir_mark_handler_id = nss_dev->id; + nss_gre_redir_mark_register_handler(); + } +#endif + +#ifdef NSS_DRV_GRE_TUNNEL_ENABLE + if (npd->gre_tunnel_enabled == NSS_FEATURE_ENABLED) { + nss_top->gre_tunnel_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_OUTER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INLINE_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INLINE_OUTER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_TUNNEL_INNER_EXCEPTION] = nss_dev->id; + } +#endif + +#ifdef NSS_DRV_PORTID_ENABLE + if (npd->portid_enabled == NSS_FEATURE_ENABLED) { + nss_top->portid_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_PORTID] = nss_dev->id; + nss_portid_register_handler(); + } +#endif + + if (npd->wifioffload_enabled == NSS_FEATURE_ENABLED) { + nss_top->wifi_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_VAP] = nss_dev->id; + nss_wifi_register_handler(); + nss_wifili_register_handler(); +#ifdef NSS_DRV_WIFI_EXT_VDEV_ENABLE + nss_wifi_ext_vdev_register_handler(); +#endif + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_WIFILI_INTERNAL] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_WIFILI_EXTERNAL0] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_WIFILI_EXTERNAL1] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_WIFI_EXT_VDEV_WDS] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_WIFI_EXT_VDEV_VLAN] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_OUTER] = nss_dev->id; + + /* + * Register wifi mac database when offload enabled + */ + nss_top->wmdb_handler_id = nss_dev->id; + nss_wifi_mac_db_register_handler(); + + /* + * Initialize wifili thread scheme database + */ + nss_wifili_thread_scheme_db_init(nss_dev->id); + } + +#ifdef NSS_DRV_OAM_ENABLE + if (npd->oam_enabled == NSS_FEATURE_ENABLED) { + nss_top->oam_handler_id = nss_dev->id; + nss_oam_register_handler(); + } +#endif + +#ifdef NSS_DRV_BRIDGE_ENABLE + if (npd->bridge_enabled == NSS_FEATURE_ENABLED) { + nss_top->bridge_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_BRIDGE] = nss_dev->id; + nss_bridge_init(); + } +#endif + + if (npd->vlan_enabled == NSS_FEATURE_ENABLED) { + nss_top->vlan_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_VLAN] = nss_dev->id; + nss_vlan_register_handler(); + } + +#ifdef NSS_DRV_QVPN_ENABLE +#if defined(NSS_HAL_IPQ807x_SUPPORT) || defined(NSS_HAL_IPQ60XX_SUPPORT) + if (npd->qvpn_enabled == NSS_FEATURE_ENABLED) { + nss_top->qvpn_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_QVPN_OUTER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_QVPN_INNER] = nss_dev->id; + nss_qvpn_register_handler(); + } +#endif +#endif + +#ifdef NSS_DRV_PVXLAN_ENABLE + if (npd->pvxlan_enabled == NSS_FEATURE_ENABLED) { + nss_top->pvxlan_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_PVXLAN_HOST_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_PVXLAN_OUTER] = nss_dev->id; + } +#endif + +#ifdef NSS_DRV_RMNET_ENABLE + if (npd->rmnet_rx_enabled == NSS_FEATURE_ENABLED) { + nss_top->rmnet_rx_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_N2H] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_H2N] = nss_dev->id; + } +#endif + +#ifdef NSS_DRV_IGS_ENABLE + if (npd->igs_enabled == NSS_FEATURE_ENABLED) { + nss_top->igs_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_IGS] = nss_dev->id; + nss_info("%d: NSS IGS is enabled", nss_dev->id); + } +#endif + +#ifdef NSS_DRV_CLMAP_ENABLE + if (npd->clmap_enabled == NSS_FEATURE_ENABLED) { + nss_top->clmap_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS] = nss_dev->id; + } +#endif + +#ifdef NSS_DRV_VXLAN_ENABLE + if (npd->vxlan_enabled == NSS_FEATURE_ENABLED) { + nss_top->vxlan_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_OUTER] = nss_dev->id; + nss_vxlan_init(); + } +#endif + +#ifdef NSS_DRV_MATCH_ENABLE + if (npd->match_enabled == NSS_FEATURE_ENABLED) { + nss_top->match_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_MATCH] = nss_dev->id; + nss_match_init(); + } +#endif + +#ifdef NSS_DRV_TLS_ENABLE +#if defined(NSS_HAL_IPQ807x_SUPPORT) || defined(NSS_HAL_IPQ60XX_SUPPORT) + if (npd->tls_enabled == NSS_FEATURE_ENABLED) { + nss_top->tls_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_TLS_INNER] = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_TLS_OUTER] = nss_dev->id; + nss_tls_register_handler(); + } +#endif +#endif + +#ifdef NSS_DRV_MIRROR_ENABLE + if (npd->mirror_enabled == NSS_FEATURE_ENABLED) { + nss_top->mirror_handler_id = nss_dev->id; + nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_MIRROR] = nss_dev->id; + nss_mirror_register_handler(); + nss_info("%d: NSS mirror is enabled", nss_dev->id); + } + +#endif + +#ifdef NSS_DRV_UDP_ST_ENABLE + if (npd->udp_st_enabled == NSS_FEATURE_ENABLED) { + nss_top->udp_st_handler_id = nss_dev->id; + nss_udp_st_register_handler(nss_ctx); + } +#endif + + if (nss_ctx->id == 0) { +#if (NSS_FREQ_SCALE_SUPPORT == 1) + nss_freq_register_handler(); + + /* + * Init CPU usage detail + * Note: As of now, ubi cpu usage is supported only for core0 + */ + nss_freq_init_cpu_usage(); +#endif + + nss_lso_rx_register_handler(nss_ctx); + } + + nss_top->frequency_handler_id = nss_dev->id; + + /* + * Initialize decongestion callbacks to NULL + */ + for (i = 0; i < NSS_MAX_CLIENTS; i++) { + nss_ctx->queue_decongestion_callback[i] = 0; + nss_ctx->queue_decongestion_ctx[i] = 0; + } + + spin_lock_init(&(nss_ctx->decongest_cb_lock)); + nss_ctx->magic = NSS_CTX_MAGIC; + + nss_info("%px: Reseting NSS core %d now", nss_ctx, nss_ctx->id); + + /* + * Enable clocks and bring NSS core out of reset + */ + err = nss_top->hal_ops->core_reset(nss_dev, nss_ctx->nmap, nss_ctx->load, nss_top->clk_src); + if (err) { + goto err_register_irq; + } + + /* + * Initialize max buffer size for NSS core + */ + nss_ctx->max_buf_size = NSS_NBUF_PAYLOAD_SIZE; + + /* + * Initialize S/G status pointers to NULL + */ + for (i = 0; i < NSS_N2H_DESC_RING_NUM; i++) { + nss_ctx->n2h_desc_ring[i].head = NULL; + nss_ctx->n2h_desc_ring[i].tail = NULL; + nss_ctx->n2h_desc_ring[i].jumbo_start = NULL; + } + + /* + * Enable interrupts for NSS core. + */ + for (i = 0; i < npd->num_irq; i++) { + nss_hal_enable_interrupt(nss_ctx, nss_ctx->int_ctx[i].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS); + } + + nss_info("%px: All resources initialized and nss core%d has been brought out of reset", nss_ctx, nss_dev->id); + goto out; + +err_register_irq: + for (i = 0; i < npd->num_irq; i++) { + nss_hal_clean_up_irq(&nss_ctx->int_ctx[i]); + } + +err_init: + if (nss_dev->dev.of_node) { + if (npd->nmap) { + iounmap(npd->nmap); + } + + if (npd->vmap) { + iounmap(npd->vmap); + } + } + +out: + if (nss_dev->dev.of_node) { + devm_kfree(&nss_dev->dev, npd); + } + return err; +} + +/* + * nss_hal_remove() + * HLOS device remove callback + */ +int nss_hal_remove(struct platform_device *nss_dev) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[nss_dev->id]; + int i; + + /* + * Clean up debugfs + */ + nss_stats_clean(); + + /* + * Clear up the resources associated with the interrupt + */ + for (i = 0; i < nss_ctx->num_irq; i++) { + nss_hal_disable_interrupt(nss_ctx, nss_ctx->int_ctx[i].shift_factor, + NSS_HAL_SUPPORTED_INTERRUPTS); + nss_hal_clean_up_irq(&nss_ctx->int_ctx[i]); + } + + /* + * nss-drv is exiting, unregister and restore host data plane + */ + nss_top->data_plane_ops->data_plane_unregister(); + +#if (NSS_FABRIC_SCALING_SUPPORT == 1) + fab_scaling_unregister(nss_core0_clk); +#endif + + if (nss_dev->dev.of_node) { + if (nss_ctx->nmap) { + iounmap(nss_ctx->nmap); + nss_ctx->nmap = 0; + } + + if (nss_ctx->vmap) { + iounmap(nss_ctx->vmap); + nss_ctx->vmap = 0; + } + } + + nss_info("%px: All resources freed for nss core%d", nss_ctx, nss_dev->id); + return 0; +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_hlos_if.h b/feeds/ipq807x/qca-nss-drv/src/nss_hlos_if.h new file mode 100644 index 000000000..fdcf21b8c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_hlos_if.h @@ -0,0 +1,381 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_hlos_if.h + * NSS to HLOS interface definitions. + */ + +#ifndef __NSS_HLOS_IF_H +#define __NSS_HLOS_IF_H + +#define NSS_MIN_NUM_CONN 256 /* MIN Connection shared between IPv4 and IPv6 */ +#define NSS_FW_DEFAULT_NUM_CONN 1024 /* Firmware default number of connections for IPv4 and IPv6 */ +#define NSS_NUM_CONN_QUANTA_MASK (1024 - 1) /* Quanta of number of connections 1024 */ +#define NSS_CONN_CFG_TIMEOUT 6000 /* 6 sec timeout for connection cfg message */ + +/* + * The following definitions sets the maximum number of connections + * based on the type of memory profile that the system is operating with + */ +#if defined (NSS_MEM_PROFILE_LOW) +#define NSS_DEFAULT_NUM_CONN 512 /* Default number of connections for IPv4 and IPv6 each, for low memory profile */ +#if defined (NSS_DRV_IPV6_ENABLE) +#define NSS_MAX_TOTAL_NUM_CONN_IPV4_IPV6 1024 /* MAX Connection shared between IPv4 and IPv6 for low memory profile */ +#else +#define NSS_MAX_TOTAL_NUM_CONN_IPV4_IPV6 512 /* MAX Connection for IPv4 for low memory profile */ +#endif +#define NSS_LOW_MEM_EMPTY_POOL_BUF_SZ 4096 /* Default empty buffer pool size for low profile */ +#elif defined (NSS_MEM_PROFILE_MEDIUM) +#define NSS_DEFAULT_NUM_CONN 2048 /* Default number of connections for IPv4 and IPv6 each, for medium memory profile */ +#define NSS_MAX_TOTAL_NUM_CONN_IPV4_IPV6 4096 /* MAX Connection shared between IPv4 and IPv6 for medium memory profile */ +#else +#define NSS_DEFAULT_NUM_CONN 4096 /* Default number of connections for each IPv4 and IPv6 */ +#define NSS_MAX_TOTAL_NUM_CONN_IPV4_IPV6 8192 /* MAX Connection shared between IPv4 and IPv6 */ +#endif + +#if defined(NSS_SKB_FIXED_SIZE_2K) && !defined(__LP64__) +#define NSS_EMPTY_BUFFER_SIZE 1792 /* Default buffer size for reduced memory profiles. */ +#define NSS_FIXED_BUFFER_SIZE /* For low memory profiles, maximum buffer size/MTU is fixed */ +#else +#define NSS_EMPTY_BUFFER_SIZE 1984 /* Default buffer size for regular memory profiles. */ +#undef NSS_FIXED_BUFFER_SIZE +#endif + +enum { + NSS_SUCCESS = 0, + NSS_FAILURE = 1, +}; + +/* + * Request/Response types + */ +enum nss_if_metadata_types { + NSS_TX_METADATA_TYPE_INTERFACE_OPEN, + NSS_TX_METADATA_TYPE_INTERFACE_CLOSE, + NSS_TX_METADATA_TYPE_INTERFACE_LINK_STATE_NOTIFY, + NSS_TX_METADATA_TYPE_INTERFACE_MTU_CHANGE, + NSS_TX_METADATA_TYPE_INTERFACE_MAC_ADDR_SET, + NSS_TX_METADATA_TYPE_INTERFACE_MSS_SET, + NSS_RX_METADATA_TYPE_INTERFACE_STATS_SYNC, + NSS_METADATA_TYPE_INTERFACE_MAX, +}; + +/* + * General statistics messages + */ + +/* + * IPv4 reasm node stats + */ +struct nss_ipv4_reasm_stats_sync { + struct nss_cmn_node_stats node_stats; + /* Common node stats for ipv4_reasm */ + uint32_t ipv4_reasm_evictions; + uint32_t ipv4_reasm_alloc_fails; + uint32_t ipv4_reasm_timeouts; +}; + +/* + * IPv4 reasm message types + */ +enum nss_ipv4_reasm_message_types { + NSS_IPV4_REASM_STATS_SYNC_MSG, +}; + +/* + * IPv4 reassembly message structure + */ +struct nss_ipv4_reasm_msg { + struct nss_cmn_msg cm; + union { + struct nss_ipv4_reasm_stats_sync stats_sync; + } msg; +}; + +/* + * IPv6 reasm node stats + */ +struct nss_ipv6_reasm_stats_sync { + struct nss_cmn_node_stats node_stats; + /* Common node stats for ipv6_reasm */ + uint32_t ipv6_reasm_alloc_fails; + uint32_t ipv6_reasm_timeouts; + uint32_t ipv6_reasm_discards; +}; + +/* + * IPv6 reasm message types + */ +enum nss_ipv6_reasm_message_types { + NSS_IPV6_REASM_STATS_SYNC_MSG, +}; + +/* + * IPv6 reassembly message structure + */ +struct nss_ipv6_reasm_msg { + struct nss_cmn_msg cm; + union { + struct nss_ipv6_reasm_stats_sync stats_sync; + } msg; +}; + +/* + * Generic interface messages + */ +enum nss_generic_metadata_types { + NSS_TX_METADATA_TYPE_GENERIC_IF_PARAMS, + NSS_METADATA_TYPE_GENERIC_IF_MAX +}; + +/* + * Interface params command + */ +struct nss_generic_if_params { + uint8_t buf[1]; /* Buffer */ +}; + +/* + * Message structure to send/receive ipsec messages + */ +struct nss_generic_msg { + struct nss_cmn_msg cm; /* Message Header */ + union { + struct nss_generic_if_params rule; /* Message: generic rule */ + } msg; +}; + +/* + * NSS frequency scaling messages + */ +enum nss_freq_stats_metadata_types { + COREFREQ_METADATA_TYPE_ERROR, + COREFREQ_METADATA_TYPE_RX_FREQ_CHANGE, + COREFREQ_METADATA_TYPE_TX_FREQ_ACK, + COREFREQ_METADATA_TYPE_TX_CORE_STATS, + COREFREQ_METADATA_TYPE_MAX, +}; + + /* + * Types of TX metadata -- legacy code needs to be removed + */ +enum nss_tx_metadata_types { + NSS_TX_METADATA_TYPE_LEGACY_0, + NSS_TX_METADATA_TYPE_NSS_FREQ_CHANGE, + NSS_TX_METADATA_TYPE_SHAPER_CONFIGURE, +}; + +/* + * The NSS freq start or stop strcture + */ +struct nss_freq_msg { + /* Request */ + uint32_t frequency; + uint32_t start_or_end; + uint32_t stats_enable; + + /* Response */ + uint32_t freq_current; + int32_t ack; +}; + +/* + * NSS core stats + */ +struct nss_core_stats { + uint32_t inst_cnt_total; +}; + +/* + * Message structure to send/receive NSS Freq commands + */ +struct nss_corefreq_msg { + struct nss_cmn_msg cm; /* Message Header */ + union { + struct nss_freq_msg nfc; /* Message: freq stats */ + struct nss_core_stats ncs; /* Message: NSS stats sync */ + } msg; +}; + +/* + * H2N Buffer Types + */ +#define H2N_BUFFER_EMPTY 0 +#define H2N_PAGED_BUFFER_EMPTY 1 +#define H2N_BUFFER_PACKET 2 +#define H2N_BUFFER_CTRL 4 +#define H2N_BUFFER_NATIVE_WIFI 8 +#define H2N_BUFFER_SHAPER_BOUNCE_INTERFACE 9 +#define H2N_BUFFER_SHAPER_BOUNCE_BRIDGE 10 +#define H2N_BUFFER_RATE_TEST 14 +#define H2N_BUFFER_MAX 16 + +/* + * H2N Bit Flag Definitions + */ +#define H2N_BIT_FLAG_GEN_IPV4_IP_CHECKSUM 0x0001 +#define H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM 0x0002 +#define H2N_BIT_FLAG_FIRST_SEGMENT 0x0004 +#define H2N_BIT_FLAG_LAST_SEGMENT 0x0008 + +#define H2N_BIT_FLAG_GEN_IP_TRANSPORT_CHECKSUM_NONE 0x0010 +#define H2N_BIT_FLAG_TX_TS_REQUIRED 0x0040 +#define H2N_BIT_FLAG_DISCARD 0x0080 +#define H2N_BIT_FLAG_SEGMENTATION_ENABLE 0x0100 + +#define H2N_BIT_FLAG_VIRTUAL_BUFFER 0x2000 +#define H2N_BIT_FLAG_BUFFER_REUSABLE 0x8000 + +/* + * HLOS to NSS descriptor structure. + */ +struct h2n_descriptor { + uint32_t interface_num; /* Interface number to which the buffer is to be sent (where appropriate) */ + uint32_t buffer; /* Physical buffer address. This is the address of the start of the usable buffer being provided by the HLOS */ + uint32_t qos_tag; /* QoS tag information of the buffer (where appropriate) */ + uint16_t buffer_len; /* Length of the buffer (in bytes) */ + uint16_t payload_len; /* Length of the active payload of the buffer (in bytes) */ + uint16_t mss; /* MSS to be used with TSO/UFO */ + uint16_t payload_offs; /* Offset from the start of the buffer to the start of the payload (in bytes) */ + uint16_t bit_flags; /* Bit flags associated with the buffer */ + uint8_t buffer_type; /* Type of buffer */ + uint8_t reserved; /* Reserved for future use */ + nss_ptr_t opaque; /* 32 or 64-bit value provided by the HLOS to associate with the buffer. The cookie has no meaning to the NSS */ +#ifndef __LP64__ + uint32_t padding; /* Pad to fit 64bits, do not reuse */ +#endif +}; + +/* + * N2H Buffer Types + */ +#define N2H_BUFFER_EMPTY 1 +#define N2H_BUFFER_PACKET 3 +#define N2H_BUFFER_COMMAND_RESP 5 +#define N2H_BUFFER_STATUS 6 +#define N2H_BUFFER_CRYPTO_RESP 8 +#define N2H_BUFFER_PACKET_VIRTUAL 10 +#define N2H_BUFFER_SHAPER_BOUNCED_INTERFACE 11 +#define N2H_BUFFER_SHAPER_BOUNCED_BRIDGE 12 +#define N2H_BUFFER_PACKET_EXT 13 +#define N2H_BUFFER_RATE_TEST 14 +#define N2H_BUFFER_MAX 16 + +/* + * Command Response Types + */ +#define N2H_COMMAND_RESP_OK 0 +#define N2H_COMMAND_RESP_BUFFER_TOO_SMALL 1 +#define N2H_COMMAND_RESP_BUFFER_NOT_WRITEABLE 2 +#define N2H_COMMAND_RESP_UNSUPPORTED_COMMAND 3 +#define N2H_COMMAND_RESP_INVALID_PARAMETERS 4 +#define N2H_COMMAND_RESP_INACTIVE_SUBSYSTEM 5 + +/* + * N2H Bit Flag Definitions + */ +#define N2H_BIT_FLAG_IPV4_IP_CHECKSUM_VALID 0x0001 +#define N2H_BIT_FLAG_IP_TRANSPORT_CHECKSUM_VALID 0x0002 +#define N2H_BIT_FLAG_FIRST_SEGMENT 0x0004 +#define N2H_BIT_FLAG_LAST_SEGMENT 0x0008 +#define N2H_BIT_FLAG_INGRESS_SHAPED 0x0010 + +/* + * NSS to HLOS descriptor structure + */ +struct n2h_descriptor { + uint32_t interface_num; /* Interface number to which the buffer is to be sent (where appropriate) */ + uint32_t buffer; /* Physical buffer address. This is the address of the start of the usable buffer being provided by the HLOS */ + uint16_t buffer_len; /* Length of the buffer (in bytes) */ + uint16_t payload_len; /* Length of the active payload of the buffer (in bytes) */ + uint16_t payload_offs; /* Offset from the start of the buffer to the start of the payload (in bytes) */ + uint16_t bit_flags; /* Bit flags associated with the buffer */ + uint8_t buffer_type; /* Type of buffer */ + uint8_t response_type; /* Response type if the buffer is a command response */ + uint8_t pri; /* Packet priority */ + uint8_t service_code; /* Service code */ + uint32_t reserved; /* Reserved for future use */ + nss_ptr_t opaque; /* 32 or 64-bit value provided by the HLOS to associate with the buffer. The cookie has no meaning to the NSS */ +#ifndef __LP64__ + uint32_t padding; /* Pad to fit 64 bits, do not reuse */ +#endif +}; + +/* + * Device Memory Map Definitions + */ +#define DEV_MAGIC 0x4e52522e +#define DEV_INTERFACE_VERSION 1 +#define DEV_DESCRIPTORS 256 /* Do we need it here? */ + +/** + * H2N descriptor METADATA + */ +struct h2n_desc_if_meta { + uint32_t desc_addr; + uint16_t size; + uint16_t padding; +}; + +/** + * H2N descriptor ring + */ +struct h2n_desc_if_instance { + struct h2n_descriptor *desc; + uint16_t size; /* Size in entries of the H2N0 descriptor ring */ +}; + +/** + * N2H descriptor METADATA + */ +struct n2h_desc_if_meta { + uint32_t desc_addr; + uint16_t size; + uint16_t padding; +}; + +/** + * N2H descriptor ring + */ +struct n2h_desc_if_instance { + struct n2h_descriptor *desc; + uint16_t size; /* Size in entries of the H2N0 descriptor ring */ +}; + +/** + * NSS virtual interface map + */ +struct nss_if_mem_map { + struct h2n_desc_if_meta h2n_desc_if[16];/* Base address of H2N0 descriptor ring */ + struct n2h_desc_if_meta n2h_desc_if[15];/* Base address of N2H0 descriptor ring */ + uint32_t magic; /* Magic value used to identify NSS implementations (must be 0x4e52522e) */ + uint16_t if_version; /* Interface version number (must be 1 for this version) */ + uint8_t h2n_rings; /* Number of descriptor rings in the H2N direction */ + uint8_t n2h_rings; /* Number of descriptor rings in the N2H direction */ + uint32_t h2n_nss_index[16]; + /* Index number for the next descriptor that will be read by the NSS in the H2N0 descriptor ring (NSS owned) */ + volatile uint32_t n2h_nss_index[15]; + /* Index number for the next descriptor that will be written by the NSS in the N2H0 descriptor ring (NSS owned) */ + uint8_t num_phys_ports; + uint8_t reserved1[3]; /* Reserved for future use */ + uint32_t h2n_hlos_index[16]; + /* Index number for the next descriptor that will be written by the HLOS in the H2N0 descriptor ring (HLOS owned) */ + volatile uint32_t n2h_hlos_index[15]; + /* Index number for the next descriptor that will be read by the HLOS in the N2H0 descriptor ring (HLOS owned) */ + uint32_t reserved; /* Reserved for future use */ +}; +#endif /* __NSS_HLOS_IF_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_if.c b/feeds/ipq807x/qca-nss-drv/src/nss_if.c new file mode 100644 index 000000000..0c370b119 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_if.c @@ -0,0 +1,354 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2016, 2018-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_if.c + * NSS base interfaces + */ + +#include "nss_tx_rx_common.h" +#include "nss_if_log.h" + +/* + * nss_if_pvt + * NSS private structure to handle the completion of NSS -> HLOS messages. + */ +static struct nss_if_pvt { + struct semaphore sem; + struct completion complete; + int response; +} nss_if; + +static bool nss_if_sem_init_done; + +/* + * nss_if_callback + * Callback to handle the completion of NSS ->HLOS messages. + */ +static void nss_if_callback(void *app_data, struct nss_if_msg *nim) +{ + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("nss_if Error response %d\n", nim->cm.response); + nss_if.response = NSS_TX_FAILURE; + complete(&nss_if.complete); + return; + } + + nss_if.response = NSS_TX_SUCCESS; + complete(&nss_if.complete); +} + +/* + * nss_if_msg_sync() + * Send a message to an interface and wait for the response. + */ +nss_tx_status_t nss_if_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_if_msg *nim) +{ + nss_tx_status_t status; + int ret = 0; + + if (!nss_if_sem_init_done) { + sema_init(&nss_if.sem, 1); + init_completion(&nss_if.complete); + nss_if_sem_init_done = 1; + } + + down(&nss_if.sem); + + status = nss_if_tx_msg(nss_ctx, nim); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_if_msg failed\n", nss_ctx); + up(&nss_if.sem); + return status; + } + + ret = wait_for_completion_timeout(&nss_if.complete, msecs_to_jiffies(NSS_IF_TX_TIMEOUT)); + + if (!ret) { + nss_warning("%px: nss_if tx failed due to timeout\n", nss_ctx); + nss_if.response = NSS_TX_FAILURE; + } + + status = nss_if.response; + up(&nss_if.sem); + + return status; +} +EXPORT_SYMBOL(nss_if_msg_sync); + +/* + * nss_if_msg_handler() + * Handle NSS -> HLOS messages for base class interfaces + */ +void nss_if_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, + __attribute__((unused))void *app_data) +{ + struct nss_if_msg *nim = (struct nss_if_msg *)ncm; + nss_if_msg_callback_t cb; + + /* + * We only support base class messages with this interface + */ + if (ncm->type > NSS_IF_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return; + } + + if (!nss_is_dynamic_interface(ncm->interface) && + !((ncm->interface >= NSS_PHYSICAL_IF_START) && (ncm->interface < NSS_VIRTUAL_IF_START))) { + nss_warning("%px: interface %d not in physical or dynamic if range\n", nss_ctx, ncm->interface); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_if_msg)) { + nss_warning("%px: message length too big: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Trace messages. + */ + nss_if_log_rx_msg(nim); + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * Callback + */ + cb = (nss_if_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, nim); +} + +/* + * nss_if_tx_buf() + * Send packet to interface owned by NSS + */ +nss_tx_status_t nss_if_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num) +{ + nss_trace("%px: If Tx packet, id:%d, data=%px", nss_ctx, if_num, os_buf->data); + + if (!nss_is_dynamic_interface(if_num) && + !((if_num >= NSS_PHYSICAL_IF_START) && (if_num < NSS_VIRTUAL_IF_START))) { + nss_warning("%px: interface %d not in physical or dynamic if range\n", nss_ctx, if_num); + return NSS_TX_FAILURE_BAD_PARAM; + } + + return nss_core_send_packet(nss_ctx, os_buf, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); +} + +/* + * nss_if_tx_msg() + * Transmit a message to the specific interface on this core. + */ +nss_tx_status_t nss_if_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_if_msg *nim) +{ + struct nss_cmn_msg *ncm = &nim->cm; + struct net_device *dev; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + /* + * Sanity check the message + */ + if (ncm->type >= NSS_IF_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Sanity check the message for valid interfaces. + */ + if (ncm->interface < NSS_PHYSICAL_IF_START || + ncm->interface >= NSS_MAX_NET_INTERFACES ) { + nss_warning("%px: Tx request for invalid interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * Trace messages. + */ + nss_if_log_tx_msg(nim); + + dev = nss_ctx->subsys_dp_register[ncm->interface].ndev; + if (!dev) { + nss_warning("%px: Unregister interface %d: no context", nss_ctx, ncm->interface); + return NSS_TX_FAILURE_BAD_PARAM; + } + + return nss_core_send_cmd(nss_ctx, nim, sizeof(*nim), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_if_register() + * Primary registration for receiving data and msgs from an interface. + */ +struct nss_ctx_instance *nss_if_register(uint32_t if_num, + nss_if_rx_callback_t rx_callback, + nss_if_msg_callback_t msg_callback, + struct net_device *if_ctx) +{ + return NULL; +} + +/* + * nss_if_unregister() + * Unregisteer the callback for this interface + */ +void nss_if_unregister(uint32_t if_num) +{ +} + +/* + * nss_if_reset_nexthop() + * De-configures the nexthop for an interface + */ +nss_tx_status_t nss_if_reset_nexthop(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_trace("Resetting Nexthop. nss_ctx: %px ifnum: %u", nss_ctx, if_num); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_IF_RESET_NEXTHOP, 0, nss_if_callback, NULL); + + return nss_if_msg_sync(nss_ctx, &nim); +} +EXPORT_SYMBOL(nss_if_reset_nexthop); + +/* + * nss_if_set_nexthop() + * Configures the nexthop for an interface + */ +nss_tx_status_t nss_if_set_nexthop(struct nss_ctx_instance *nss_ctx, uint32_t if_num, uint32_t nexthop) +{ + struct nss_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (nexthop >= NSS_MAX_NET_INTERFACES) { + nss_warning("%px: Invalid nexthop interface number: %d", nss_ctx, nexthop); + return NSS_TX_FAILURE_BAD_PARAM; + } + + nss_trace("%px: NSS If nexthop will be set to %d, id:%d\n", nss_ctx, nexthop, if_num); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_IF_SET_NEXTHOP, + sizeof(struct nss_if_set_nexthop), nss_if_callback, NULL); + + nim.msg.set_nexthop.nexthop = nexthop; + + return nss_if_msg_sync(nss_ctx, &nim); +} +EXPORT_SYMBOL(nss_if_set_nexthop); + +/* + * nss_if_change_mtu() + * Change the MTU of the interface. + */ +nss_tx_status_t nss_if_change_mtu(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num, uint16_t mtu) +{ + struct nss_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_trace("%px: NSS If MTU will be changed to %u, of NSS if num: %u\n", nss_ctx, mtu, if_num); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_IF_MTU_CHANGE, + sizeof(struct nss_if_mtu_change), nss_if_callback, NULL); + + nim.msg.mtu_change.min_buf_size = mtu; + + return nss_if_msg_sync(nss_ctx, &nim); +} +EXPORT_SYMBOL(nss_if_change_mtu); + +/* + * nss_if_change_mac_addr() + * Change the MAC address of the interface. + */ +nss_tx_status_t nss_if_change_mac_addr(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num, uint8_t *mac_addr) +{ + struct nss_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_trace("%px: NSS If MAC address will be changed to %s, of NSS if num: %u\n", nss_ctx, mac_addr, if_num); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_IF_MAC_ADDR_SET, + sizeof(struct nss_if_mac_address_set), nss_if_callback, NULL); + + memcpy(nim.msg.mac_address_set.mac_addr, mac_addr, ETH_ALEN); + + return nss_if_msg_sync(nss_ctx, &nim); +} +EXPORT_SYMBOL(nss_if_change_mac_addr); + +/* + * nss_if_vsi_unassign() + * API to send VSI detach message to NSS FW. + */ +nss_tx_status_t nss_if_vsi_unassign(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num, uint32_t vsi) +{ + struct nss_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_trace("%px: VSI to be unassigned is %u\n", nss_ctx, vsi); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_IF_VSI_UNASSIGN, + sizeof(struct nss_if_vsi_unassign), nss_if_callback, NULL); + + nim.msg.vsi_unassign.vsi = vsi; + + return nss_if_msg_sync(nss_ctx, &nim); +} +EXPORT_SYMBOL(nss_if_vsi_unassign); + +/* + * nss_if_vsi_assign() + * API to send VSI attach message to NSS FW. + */ +nss_tx_status_t nss_if_vsi_assign(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num, uint32_t vsi) +{ + struct nss_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_trace("%px: VSI to be assigned is %u\n", nss_ctx, vsi); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_IF_VSI_ASSIGN, + sizeof(struct nss_if_vsi_assign), nss_if_callback, NULL); + + nim.msg.vsi_assign.vsi = vsi; + + return nss_if_msg_sync(nss_ctx, &nim); +} +EXPORT_SYMBOL(nss_if_vsi_assign); + +EXPORT_SYMBOL(nss_if_tx_msg); +EXPORT_SYMBOL(nss_if_register); +EXPORT_SYMBOL(nss_if_unregister); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_if_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_if_log.c new file mode 100644 index 000000000..a551a4205 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_if_log.c @@ -0,0 +1,429 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_if_log.c + * NSS Interface logger file. + */ + +#include "nss_core.h" + +/* + * nss_if_log_message_types_str + * NSS interface rule message strings + */ +static int8_t *nss_if_log_message_types_str[NSS_IF_MAX_MSG_TYPES] __maybe_unused = { + "NSS interface Open message", + "NSS interface close message", + "NSS interface link state notify message", + "NSS interface MTU change message", + "NSS interface MAC address set message", + "NSS interface stats message", + "NSS interface ishaper assign message", + "NSS interface bshaper assign message", + "NSS interface ishaper unassign message", + "NSS interface bshaper unassign message", + "NSS interface ishaper config message", + "NSS interface bshaper config message", + "NSS interface pause on off message", + "NSS interface VSI assign message", + "NSS interface VSI unassign message", + "NSS interface set next hop message", + "NSS interface set IGS node message", + "NSS interface clear IGS node message", + "NSS interface reset next hop message", +}; + +/* + * nss_if_log_error_response_types_str + * Strings for error types for NSS interface messages + */ +static int8_t *nss_if_log_error_response_types_str[NSS_IF_ERROR_TYPE_MAX] __maybe_unused = { + "No Ishapers", + "No Bshapers", + "No Ishaper", + "No Bshaper", + "No Old Ishaper", + "No Old Bshaper", + "Ishaper config failed", + "Bshaper config failed", + "Unknown error", + "Interface open error", + "Interface invalid MTU error", + "Invalid MAC address error", + "VSI no match error", + "VSI reassign error", + "Invalid VSI error", + "Max error", +}; + +/* + * nss_if_log_rule_open() + * Log NSS open interface message. + */ +static void nss_if_log_rule_open(struct nss_if_msg *nim) +{ + struct nss_if_open *niom __maybe_unused = &nim->msg.open; + nss_trace("%px: NSS open interface message \n" + "tx_desc_ring: %X\n" + "rx_desc_ring: %X\n" + "rx_forward_if: %u\n" + "alignment_mode: %u\n", + nim, + niom->tx_desc_ring, + niom->rx_desc_ring, + niom->rx_forward_if, + niom->alignment_mode); +} + +/* + * nss_if_log_rule_close() + * Log NSS close interface message. + */ +static void nss_if_log_rule_close(struct nss_if_msg *nim) +{ + nss_trace("%px: NSS close interface message \n", nim); +} + +/* + * nss_if_log_rule_link_state_notify() + * Log NSS interface link state notify message. + */ +static void nss_if_log_rule_link_state_notify(struct nss_if_msg *nim) +{ + struct nss_if_link_state_notify *nilstm __maybe_unused = &nim->msg.link_state_notify; + nss_trace("%px: NSS interface link state notify interface message \n" + "state: %u\n", + nim, + nilstm->state); +} + +/* + * nss_if_log_rule_mtu_change() + * Log NSS interface MTU change message. + */ +static void nss_if_log_rule_mtu_change(struct nss_if_msg *nim) +{ + struct nss_if_mtu_change *nimcm __maybe_unused = &nim->msg.mtu_change; + nss_trace("%px: NSS interface MTU change message \n" + "min_buf_size: %u\n", + nim, + nimcm->min_buf_size); +} + +/* + * nss_if_log_rule_mac_addr_set() + * Log NSS interface MAC address set message. + */ +static void nss_if_log_rule_mac_addr_set(struct nss_if_msg *nim) +{ + struct nss_if_mac_address_set *nimasm __maybe_unused = &nim->msg.mac_address_set; + nss_trace("%px: NSS interface MAC address set message \n" + "MAC address: %X:%X:%X:%X:%X:%X\n", + nim, + nimasm->mac_addr[0], nimasm->mac_addr[1], nimasm->mac_addr[2], + nimasm->mac_addr[3], nimasm->mac_addr[4], nimasm->mac_addr[5]); +} + +/* + * nss_if_log_rule_stats() + * Log NSS interface stats message. + */ +static void nss_if_log_rule_stats(struct nss_if_msg *nim) +{ + uint16_t i; + struct nss_cmn_node_stats *nism __maybe_unused = &nim->msg.stats; + + nss_trace("%px: NSS interface stats message \n" + "rx_packets: %u\n" + "rx_bytes: %u\n" + "tx_packets: %u\n" + "tx_bytes: %u\n", + nim, + nism->rx_packets, + nism->rx_bytes, + nism->tx_packets, + nism->tx_bytes); + + for(i=0; i < NSS_MAX_NUM_PRI; i++) + { + nss_trace("rx_dropped[%u]: %u\n", i, nism->rx_dropped[i]); + } +} + +/* + * nss_if_log_rule_shaper_assign() + * Log NSS interface shaper assignment message. + */ +static void nss_if_log_rule_shaper_assign(struct nss_if_msg *nim) +{ + struct nss_if_shaper_assign *shaper_assign_msg __maybe_unused = &nim->msg.shaper_assign; + nss_trace("%px: NSS interface shaper assign message \n" + "shaper_id: %u\n" + "new_shaper_id: %u\n", + nim, + shaper_assign_msg->shaper_id, + shaper_assign_msg->new_shaper_id); +} + +/* + * nss_if_log_rule_shaper_unassign() + * Log NSS interface shaper unassignment message. + */ +static void nss_if_log_rule_shaper_unassign(struct nss_if_msg *nim) +{ + struct nss_if_shaper_unassign *shaper_unassign_msg __maybe_unused = &nim->msg.shaper_unassign; + nss_trace("%px: NSS interface shaper unassign message \n" + "shaper_id: %u\n", + nim, + shaper_unassign_msg->shaper_id); +} + +/* + * nss_if_log_rule_shaper_config() + * Log NSS interface shaper configuration message. + */ +static void nss_if_log_rule_shaper_config(struct nss_if_msg *nim) +{ + struct nss_if_shaper_configure *shaper_configure_msg __maybe_unused = &nim->msg.shaper_configure; + nss_trace("%px: NSS interface shaper configuration message \n" + "request_type: %u\n" + "response_type: %u\n", + nim, + shaper_configure_msg->config.request_type, + shaper_configure_msg->config.response_type); +} + +/* + * nss_if_log_rule_pause_on_off() + * Log NSS interface pause on off message. + */ +static void nss_if_log_rule_pause_on_off(struct nss_if_msg *nim) +{ + struct nss_if_pause_on_off *pause_on_off_msg __maybe_unused = &nim->msg.pause_on_off; + nss_trace("%px: NSS interface pause ON/OFF message \n" + "pause_on: %u\n", + nim, + pause_on_off_msg->pause_on); +} + +/* + * nss_if_log_rule_vsi_assign() + * Log NSS interface VSI assignment message. + */ +static void nss_if_log_rule_vsi_assign(struct nss_if_msg *nim) +{ + struct nss_if_vsi_assign *vsi_assign_msg __maybe_unused = &nim->msg.vsi_assign; + nss_trace("%px: NSS interface VSI assignment message \n" + "VSI: %u\n", + nim, + vsi_assign_msg->vsi); +} + +/* + * nss_if_log_rule_vsi_unassign() + * Log NSS interface VSI unassignment message. + */ +static void nss_if_log_rule_vsi_unassign(struct nss_if_msg *nim) +{ + struct nss_if_vsi_unassign *vsi_unassign_msg __maybe_unused = &nim->msg.vsi_unassign; + nss_trace("%px: NSS interface VSI unassignment message \n" + "VSI: %u\n", + nim, + vsi_unassign_msg->vsi); +} + +/* + * nss_if_log_rule_set_nexthop() + * Log NSS interface set nexthop message. + */ +static void nss_if_log_rule_set_nexthop(struct nss_if_msg *nim) +{ + struct nss_if_set_nexthop *nisn __maybe_unused = &nim->msg.set_nexthop; + nss_trace("%px: NSS interface set nethop message \n" + "Nexthop: %u\n", + nim, + nisn->nexthop); +} + +/* + * nss_if_log_rule_set_igs_node() + * Log NSS interface set IGS node message. + */ +static void nss_if_log_rule_set_igs_node(struct nss_if_msg *nim) +{ + struct nss_if_igs_config *igs_config_msg __maybe_unused = &nim->msg.config_igs; + nss_trace("%px: NSS interface set IGS node message \n" + "igs_num: %d\n", + nim, + igs_config_msg->igs_num); +} + +/* + * nss_if_log_rule_clear_igs_node() + * Log NSS interface clear IGS node message. + */ +static void nss_if_log_rule_clear_igs_node(struct nss_if_msg *nim) +{ + struct nss_if_igs_config *igs_config_msg __maybe_unused = &nim->msg.config_igs; + nss_trace("%px: NSS interface clear IGS node message \n" + "igs_num: %d\n", + nim, + igs_config_msg->igs_num); +} + +/* + * nss_if_log_rule_reset_nexthop() + * Log NSS interface reset nexthop message. + */ +static void nss_if_log_rule_reset_nexthop(struct nss_if_msg *nim) +{ + nss_trace("%px: NSS interface reset nexthop message \n", nim); +} + +/* + * nss_if_log_verbose() + * Log message contents. + */ +static void nss_if_log_verbose(struct nss_if_msg *nim) +{ + nss_trace("NSS interface number: %u\n", nim->cm.interface); + + switch (nim->cm.type) { + case NSS_IF_OPEN: + nss_if_log_rule_open(nim); + break; + + case NSS_IF_CLOSE: + nss_if_log_rule_close(nim); + break; + + case NSS_IF_LINK_STATE_NOTIFY: + nss_if_log_rule_link_state_notify(nim); + break; + + case NSS_IF_MTU_CHANGE: + nss_if_log_rule_mtu_change(nim); + break; + + case NSS_IF_MAC_ADDR_SET: + nss_if_log_rule_mac_addr_set(nim); + break; + + case NSS_IF_STATS: + nss_if_log_rule_stats(nim); + break; + + case NSS_IF_ISHAPER_ASSIGN: + case NSS_IF_BSHAPER_ASSIGN: + nss_if_log_rule_shaper_assign(nim); + break; + + case NSS_IF_ISHAPER_UNASSIGN: + case NSS_IF_BSHAPER_UNASSIGN: + nss_if_log_rule_shaper_unassign(nim); + break; + + case NSS_IF_ISHAPER_CONFIG: + case NSS_IF_BSHAPER_CONFIG: + nss_if_log_rule_shaper_config(nim); + break; + + case NSS_IF_PAUSE_ON_OFF: + nss_if_log_rule_pause_on_off(nim); + break; + + case NSS_IF_VSI_ASSIGN: + nss_if_log_rule_vsi_assign(nim); + break; + + case NSS_IF_VSI_UNASSIGN: + nss_if_log_rule_vsi_unassign(nim); + break; + + case NSS_IF_SET_NEXTHOP: + nss_if_log_rule_set_nexthop(nim); + break; + + case NSS_IF_SET_IGS_NODE: + nss_if_log_rule_set_igs_node(nim); + break; + + case NSS_IF_CLEAR_IGS_NODE: + nss_if_log_rule_clear_igs_node(nim); + break; + + case NSS_IF_RESET_NEXTHOP: + nss_if_log_rule_reset_nexthop(nim); + break; + + default: + nss_trace("%px: Invalid message type\n", nim); + break; + } +} + +/* + * nss_if_log_rx_msg() + * Log messages received from FW. + */ +void nss_if_log_rx_msg(struct nss_if_msg *nim) +{ + if (nim->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_info("%px: Invalid response\n", nim); + return; + } + + if (nim->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nim->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nim, nim->cm.type, + nss_if_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response]); + goto verbose; + } + + if (nim->cm.error >= NSS_IF_ERROR_TYPE_MAX) { + nss_info("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nim, nim->cm.type, nss_if_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response], + nim->cm.error); + goto verbose; + } + + nss_info("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nim, nim->cm.type, nss_if_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response], + nim->cm.error, nss_if_log_error_response_types_str[nim->cm.error]); + +verbose: + nss_if_log_verbose(nim); +} + +/* + * nss_if_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_if_log_tx_msg(struct nss_if_msg *nim) +{ + if (nim->cm.type >= NSS_IF_MAX_MSG_TYPES) { + nss_info("%px: Invalid message type\n", nim); + return; + } + + nss_info("%px: type[%d]:%s\n", nim, nim->cm.type, nss_if_log_message_types_str[nim->cm.type]); + nss_if_log_verbose(nim); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_if_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_if_log.h new file mode 100644 index 000000000..0f2a7d27b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_if_log.h @@ -0,0 +1,40 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_IF_LOG_H +#define __NSS_IF_LOG_H + +/* + * nss_if_log.h + * NSS Interface header file. + */ + +/* + * nss_if_log_tx_msg + * Logs an NSS interface message that is sent to the NSS firmware. + */ +void nss_if_log_tx_msg(struct nss_if_msg *nim); + +/* + * nss_if_log_rx_msg + * Logs an NSS interface message that is received from the NSS firmware. + */ +void nss_if_log_rx_msg(struct nss_if_msg *nim); + + +#endif /* __NSS_IF_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_igs.c b/feeds/ipq807x/qca-nss-drv/src/nss_igs.c new file mode 100644 index 000000000..8153a4653 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_igs.c @@ -0,0 +1,207 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_igs_stats.h" + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)) +#ifdef CONFIG_NET_CLS_ACT +#include +#endif +#endif + +static struct module *nss_igs_module; + +/* + * nss_igs_verify_if_num() + * Verify interface number passed to us. + */ +bool nss_igs_verify_if_num(uint32_t if_num) +{ + enum nss_dynamic_interface_type if_type; + + if_type = nss_dynamic_interface_get_type(nss_igs_get_context(), if_num); + + if (if_type == NSS_DYNAMIC_INTERFACE_TYPE_IGS) { + return true; + } + return false; +} +EXPORT_SYMBOL(nss_igs_verify_if_num); + +/* + * nss_igs_handler() + * Handle NSS -> HLOS messages for igs device + */ +static void nss_igs_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, + void *app_data) +{ + void *ctx; + nss_igs_msg_callback_t cb; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + BUG_ON(!nss_igs_verify_if_num(ncm->interface)); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_IGS_MSG_MAX) { + nss_warning("%px: received invalid message %d for IGS interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_igs_msg)) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return; + } + + switch (ncm->type) { + case NSS_IGS_MSG_SYNC_STATS: + /* + * Debug stats embedded in stats msg. + */ + nss_igs_stats_sync(nss_ctx, ncm, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)app_data; + } + + /* + * callback + */ + cb = (nss_igs_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * call igs callback + */ + if (!cb) { + nss_warning("%px: No callback for igs interface %d", + nss_ctx, ncm->interface); + return; + } + + cb(ctx, ncm); +} + +/* + * nss_igs_unregister_if() + * Un-registers IGS interface from the NSS firmware. + */ +void nss_igs_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.igs_handler_id]; + uint32_t status; + + nss_assert(nss_ctx); + nss_assert(nss_igs_verify_if_num(if_num)); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_core_unregister_handler(nss_ctx, if_num); + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for interface %d with NSS core\n", nss_ctx, if_num); + } + + nss_igs_stats_reset(if_num); +} +EXPORT_SYMBOL(nss_igs_unregister_if); + +/* + * nss_igs_register_if() + * Registers the IGS interface with NSS FW. + */ +struct nss_ctx_instance *nss_igs_register_if(uint32_t if_num, uint32_t type, + nss_igs_msg_callback_t event_callback, struct net_device *netdev, + uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.igs_handler_id]; + uint32_t status; + + nss_assert(nss_ctx); + nss_assert(nss_igs_verify_if_num(if_num)); + + nss_core_register_handler(nss_ctx, if_num, nss_igs_handler, netdev); + status = nss_core_register_msg_handler(nss_ctx, if_num, event_callback); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: Not able to register handler for interface %d with NSS core\n", nss_ctx, if_num); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, NULL, 0, netdev, netdev, features); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, type); + + nss_igs_stats_dentry_create(); + nss_igs_stats_init(if_num, netdev); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_igs_register_if); + +/* + * nss_igs_get_context() + * Get the IGS context. + */ +struct nss_ctx_instance *nss_igs_get_context() +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.igs_handler_id]; +} +EXPORT_SYMBOL(nss_igs_get_context); + +#ifdef CONFIG_NET_CLS_ACT +/* + * nss_igs_module_save() + * Save the ingress shaping module reference. + */ +void nss_igs_module_save(struct tc_action_ops *act, struct module *module) +{ + nss_assert(act); + nss_assert(act->type == TCA_ACT_MIRRED_NSS); + + nss_igs_module = module; +} +EXPORT_SYMBOL(nss_igs_module_save); +#endif + +/* + * nss_igs_module_get() + * Get the ingress shaping module reference. + */ +bool nss_igs_module_get() +{ + nss_assert(nss_igs_module); + return try_module_get(nss_igs_module); +} +EXPORT_SYMBOL(nss_igs_module_get); + +/* + * nss_igs_module_put() + * Release the ingress shaping module reference. + */ +void nss_igs_module_put() +{ + nss_assert(nss_igs_module); + module_put(nss_igs_module); +} +EXPORT_SYMBOL(nss_igs_module_put); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_igs_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_igs_stats.c new file mode 100644 index 000000000..a6b511a28 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_igs_stats.c @@ -0,0 +1,307 @@ +/* + ************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_igs_stats.h" + +/* + * nss_igs_stats + * IGS debug statistics. + */ +enum nss_igs_stats { + NSS_IGS_STATS_TX_DROP, + NSS_IGS_STATS_SHAPER_DROP, + NSS_IGS_STATS_IPV4_PARSE_FAIL, + NSS_IGS_STATS_IPV4_UNKNOWN_GRE_TYPE, + NSS_IGS_STATS_IPV4_UNKNOWN_L4, + NSS_IGS_STATS_IPV4_NO_CME, + NSS_IGS_STATS_IPV4_FRAG_INITIAL, + NSS_IGS_STATS_IPV4_FRAG_NON_INITIAL, + NSS_IGS_STATS_IPV4_MALFORMED_UDP, + NSS_IGS_STATS_IPV4_MALFORMED_TCP, + NSS_IGS_STATS_IPV4_MALFORMED_UDPL, + NSS_IGS_STATS_IPV4_MALFORMED_GRE, + NSS_IGS_STATS_IPV6_PARSE_FAIL, + NSS_IGS_STATS_IPV6_UNKNOWN_L4, + NSS_IGS_STATS_IPV6_NO_CME, + NSS_IGS_STATS_IPV6_FRAG_INITIAL, + NSS_IGS_STATS_IPV6_FRAG_NON_INITIAL, + NSS_IGS_STATS_IPV6_MALFORMED_UDP, + NSS_IGS_STATS_IPV6_MALFORMED_TCP, + NSS_IGS_STATS_IPV6_MALFORMED_UDPL, + NSS_IGS_STATS_IPV6_MALFORMED_FRAG, + NSS_IGS_STATS_EVENT_NO_SI, + NSS_IGS_STATS_ETH_PARSE_FAIL, + NSS_IGS_STATS_ETH_UNKNOWN_TYPE, + NSS_IGS_STATS_PPPOE_NON_IP, + NSS_IGS_STATS_PPPOE_MALFORMED, + NSS_IGS_STATS_MAX +}; + +/* + * nss_igs_stats_debug_instance + * Stucture for H2N/N2H IGS debug stats + */ +static struct nss_igs_stats_debug_instance { + uint64_t stats[NSS_IGS_STATS_MAX]; /* IGS statistics for each instance. */ + int32_t if_index; /* IFB instance netdev index. */ + uint32_t if_num; /* IFB instance NSS interface number */ + bool valid; /* IFB statistics valid bit. */ +} nss_igs_stats_debug[NSS_MAX_IGS_DYNAMIC_INTERFACES]; + +/* + * Data structures to store IGS interface stats. + */ +static DEFINE_SPINLOCK(nss_igs_stats_debug_lock); + +/* + * nss_igs_stats_str + * IGS statistics strings for nss session stats + */ +struct nss_stats_info nss_igs_stats_str[NSS_IGS_STATS_MAX] = { + {"IGS_SHAPER_TX_DROP" , NSS_STATS_TYPE_DROP}, + {"IGS_SHAPER_DROP" , NSS_STATS_TYPE_DROP}, + {"IGS_EXCEPTION_IPV4_PARSE_FAIL" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV4_UNKNOWN_GRE_TYPE" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV4_UNKNOWN_L4" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV4_NO_CME" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV4_FRAG_INITIAL" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV4_FRAG_NON_INITIAL" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV4_MALFORMED_UDP" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV4_MALFORMED_TCP" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV4_MALFORMED_UDPL" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV4_MALFORMED_GRE" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV6_PARSE_FAIL" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV6_UNKNOWN_L4" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV6_NO_CME" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV6_FRAG_INITIAL" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV6_FRAG_NON_INITIAL" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV6_MALFORMED_UDP" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV6_MALFORMED_TCP" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV6_MALFORMED_UDPL" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_IPV6_MALFORMED_FRAG" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_EVENT_NO_SI" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_ETH_PARSE_FAIL" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_ETH_UNKNOWN_TYPE" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_PPPOE_NON_IP" , NSS_STATS_TYPE_EXCEPTION}, + {"IGS_EXCEPTION_PPPOE_MALFORMED" , NSS_STATS_TYPE_EXCEPTION} +}; + +/* + * nss_igs_stats_get() + * Get IGS statistics. + */ +static void nss_igs_stats_get(void *stats_mem) +{ + struct nss_igs_stats_debug_instance *stats = (struct nss_igs_stats_debug_instance *)stats_mem; + int i; + + if (!stats) { + nss_warning("No memory to copy IGS stats"); + return; + } + + spin_lock_bh(&nss_igs_stats_debug_lock); + for (i = 0; i < NSS_MAX_IGS_DYNAMIC_INTERFACES; i++) { + if (nss_igs_stats_debug[i].valid) { + memcpy(stats, &nss_igs_stats_debug[i], sizeof(struct nss_igs_stats_debug_instance)); + stats++; + } + } + spin_unlock_bh(&nss_igs_stats_debug_lock); +} + +/* + * nss_igs_stats_read() + * Read IGS statistics + */ +static ssize_t nss_igs_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + + uint32_t max_output_lines = 2 /* header & footer for instance stats */ + + NSS_MAX_IGS_DYNAMIC_INTERFACES * + ((NSS_STATS_NODE_MAX + 3 ) + (NSS_IGS_STATS_MAX + 3)) /*instance stats */ + + 2; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct net_device *dev; + struct nss_igs_stats_debug_instance *igs_shadow_stats; + int id; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + igs_shadow_stats = kzalloc(sizeof(struct nss_igs_stats_debug_instance) * + NSS_MAX_IGS_DYNAMIC_INTERFACES, GFP_KERNEL); + if (unlikely(!igs_shadow_stats)) { + nss_warning("Could not allocate memory for base debug statistics buffer"); + kfree(lbuf); + return 0; + } + + /* + * Get all stats + */ + nss_igs_stats_get((void *)igs_shadow_stats); + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "igs", NSS_STATS_SINGLE_CORE); + + /* + * Session stats + */ + for (id = 0; id < NSS_MAX_IGS_DYNAMIC_INTERFACES; id++) { + + if (!igs_shadow_stats[id].valid) { + continue; + } + + dev = dev_get_by_index(&init_net, igs_shadow_stats[id].if_index); + if (likely(dev)) { + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id, + igs_shadow_stats[id].if_num, dev->name); + dev_put(dev); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id, + igs_shadow_stats[id].if_num); + } + size_wr += nss_stats_fill_common_stats(igs_shadow_stats[id].if_num, id, lbuf, size_wr, size_al, "igs"); + + /* + * IGS exception stats. + */ + size_wr += nss_stats_print("igs", "igs exception stats start" + , id + , nss_igs_stats_str + , igs_shadow_stats[id].stats + , NSS_IGS_STATS_MAX + , lbuf, size_wr, size_al); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(igs_shadow_stats); + kfree(lbuf); + return bytes_read; +} + +/* + * nss_igs_stats_sync + * API to sync statistics for IGS + */ +void nss_igs_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, uint16_t if_num) +{ + uint8_t i, j; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_igs_msg *nim = (struct nss_igs_msg *)ncm; + struct nss_igs_stats_sync_msg *stats_msg = &nim->msg.stats; + struct nss_cmn_node_stats *node_stats_ptr = &stats_msg->node_stats; + uint32_t *igs_stats_ptr = (uint32_t *)&stats_msg->igs_stats; + + spin_lock_bh(&nss_igs_stats_debug_lock); + for (i = 0; i < NSS_MAX_IGS_DYNAMIC_INTERFACES; i++) { + if (nss_igs_stats_debug[i].if_num != if_num) { + continue; + } + + for (j = 0; j < NSS_IGS_STATS_MAX; j++) { + /* + * sync stats. + */ + nss_igs_stats_debug[i].stats[j] += igs_stats_ptr[j]; + } + spin_unlock_bh(&nss_igs_stats_debug_lock); + goto sync_cmn_stats; + } + + spin_unlock_bh(&nss_igs_stats_debug_lock); + return; + +sync_cmn_stats: + spin_lock_bh(&nss_top->stats_lock); + + /* + * sync common stats. + */ + nss_top->stats_node[if_num][NSS_STATS_NODE_RX_PKTS] += node_stats_ptr->rx_packets; + nss_top->stats_node[if_num][NSS_STATS_NODE_RX_BYTES] += node_stats_ptr->rx_bytes; + nss_top->stats_node[if_num][NSS_STATS_NODE_TX_PKTS] += node_stats_ptr->tx_packets; + nss_top->stats_node[if_num][NSS_STATS_NODE_TX_BYTES] += node_stats_ptr->tx_bytes; + + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + nss_top->stats_node[if_num][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + i] += + node_stats_ptr->rx_dropped[i]; + } + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_igs_stats_reset() + * API to reset the IGS stats. + */ +void nss_igs_stats_reset(uint32_t if_num) +{ + uint8_t i; + + spin_lock_bh(&nss_igs_stats_debug_lock); + for (i = 0; i < NSS_MAX_IGS_DYNAMIC_INTERFACES; i++) { + if (nss_igs_stats_debug[i].if_num == if_num) { + memset(&nss_igs_stats_debug[i], 0, sizeof(struct nss_igs_stats_debug_instance)); + break; + } + } + spin_unlock_bh(&nss_igs_stats_debug_lock); +} + +/* + * nss_igs_stats_init() + * API to initialize IGS debug instance statistics. + */ +void nss_igs_stats_init(uint32_t if_num, struct net_device *netdev) +{ + uint8_t i; + + spin_lock_bh(&nss_igs_stats_debug_lock); + for (i = 0; i < NSS_MAX_IGS_DYNAMIC_INTERFACES; i++) { + if (!nss_igs_stats_debug[i].valid) { + nss_igs_stats_debug[i].valid = true; + nss_igs_stats_debug[i].if_num = if_num; + nss_igs_stats_debug[i].if_index = netdev->ifindex; + break; + } + } + spin_unlock_bh(&nss_igs_stats_debug_lock); +} + +/* + * nss_igs_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(igs) + +/* + * nss_igs_stats_dentry_create() + * Create igs statistics debug entry. + */ +void nss_igs_stats_dentry_create(void) +{ + nss_stats_create_dentry("igs", &nss_igs_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_igs_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_igs_stats.h new file mode 100644 index 000000000..08f9c79f7 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_igs_stats.h @@ -0,0 +1,45 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_IGS_STATS_H +#define __NSS_IGS_STATS_H + +/* + * nss_igs_stats_sync + * API to sync statistics for IGS + */ +extern void nss_igs_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, uint16_t if_num); + +/* + * nss_igs_stats_reset() + * API to reset the IGS stats. + */ +extern void nss_igs_stats_reset(uint32_t if_num); + +/* + * nss_igs_stats_init() + * API to initialize IGS debug instance statistics. + */ +extern void nss_igs_stats_init(uint32_t if_num, struct net_device *netdev); + + +/* + * IGS statistics APIs + */ +extern void nss_igs_stats_dentry_create(void); + +#endif /* __NSS_IGS_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_init.c b/feeds/ipq807x/qca-nss-drv/src/nss_init.c new file mode 100644 index 000000000..ebd2a12fb --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_init.c @@ -0,0 +1,950 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_init.c + * NSS init APIs + * + */ +#include "nss_core.h" +#if (NSS_PM_SUPPORT == 1) +#include "nss_pm.h" +#endif +#include "nss_tx_rx_common.h" +#include "nss_data_plane.h" +#include "nss_capwap.h" +#include "nss_strings.h" + +#include + +#include +#include +#include +#include + +#if (NSS_DT_SUPPORT == 1) +#if (NSS_FABRIC_SCALING_SUPPORT == 1) +#include +#endif +#include +#include +#include +#include +#include +#else +#include +#endif + +#include +#include +#include + +/* + * Global declarations + */ +int nss_ctl_redirect __read_mostly = 0; +int nss_ctl_debug __read_mostly = 0; +int nss_ctl_logbuf __read_mostly = 0; +int nss_jumbo_mru __read_mostly = 0; +int nss_paged_mode __read_mostly = 0; +#if (NSS_SKB_REUSE_SUPPORT == 1) +int nss_max_reuse __read_mostly = PAGE_SIZE; +#endif +int nss_skip_nw_process = 0x0; +module_param(nss_skip_nw_process, int, S_IRUGO); + +/* + * PM client handle + */ +#if (NSS_PM_SUPPORT == 1) +static void *pm_client; +#endif + +/* + * Handler to send NSS messages + */ +struct clk *nss_core0_clk; +struct clk *nss_core1_clk; + +/* + * Handle fabric requests - only on new kernel + */ +#if (NSS_DT_SUPPORT == 1) +struct clk *nss_fab0_clk; +struct clk *nss_fab1_clk; +#endif + +/* + * Top level nss context structure + */ +struct nss_top_instance nss_top_main; +struct nss_cmd_buffer nss_cmd_buf; +struct nss_runtime_sampling nss_runtime_samples; +struct workqueue_struct *nss_wq; + +/* + * Work Queue to handle messages to Kernel + */ +nss_work_t *nss_work; + +extern struct of_device_id nss_dt_ids[]; + +/* + * nss_probe() + * HLOS device probe callback + */ +static inline int nss_probe(struct platform_device *nss_dev) +{ + return nss_hal_probe(nss_dev); +} + +/* + * nss_remove() + * HLOS device remove callback + */ +static inline int nss_remove(struct platform_device *nss_dev) +{ + return nss_hal_remove(nss_dev); +} + +#if (NSS_DT_SUPPORT == 1) +/* + * Platform Device ID for NSS core. + */ +struct of_device_id nss_dt_ids[] = { + { .compatible = "qcom,nss" }, + { .compatible = "qcom,nss0" }, + { .compatible = "qcom,nss1" }, + {}, +}; +MODULE_DEVICE_TABLE(of, nss_dt_ids); +#endif + +/* + * nss_driver + * Platform driver structure for NSS + */ +struct platform_driver nss_driver = { + .probe = nss_probe, + .remove = nss_remove, + .driver = { + .name = "qca-nss", + .owner = THIS_MODULE, +#if (NSS_DT_SUPPORT == 1) + .of_match_table = of_match_ptr(nss_dt_ids), +#endif + }, +}; + +#if (NSS_FREQ_SCALE_SUPPORT == 1) +/* + * nss_reset_frequency_stats_samples() + * Reset all frequency sampling state when auto scaling is turned off. + */ +static void nss_reset_frequency_stats_samples(void) +{ + nss_runtime_samples.buffer_index = 0; + nss_runtime_samples.sum = 0; + nss_runtime_samples.average = 0; + nss_runtime_samples.sample_count = 0; + nss_runtime_samples.message_rate_limit = 0; + nss_runtime_samples.freq_scale_rate_limit_down = 0; +} + +/* + * nss_current_freq_handler() + * Handle Userspace Frequency Change Requests + */ +static int nss_current_freq_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret, i; + + BUG_ON(!nss_wq); + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (!*lenp || (*ppos && !write)) { + printk("Frequency Set to %d\n", nss_cmd_buf.current_freq); + *lenp = 0; + return ret; + } + + /* + * Check if frequency exists in frequency Table + */ + i = 0; + while (i < NSS_FREQ_MAX_SCALE) { + if (nss_runtime_samples.freq_scale[i].frequency == nss_cmd_buf.current_freq) { + break; + } + i++; + } + if (i == NSS_FREQ_MAX_SCALE) { + printk("Frequency not found. Please check Frequency Table\n"); + nss_cmd_buf.current_freq = nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency; + return ret; + } + + /* + * Turn off Auto Scale + */ + nss_cmd_buf.auto_scale = 0; + nss_runtime_samples.freq_scale_ready = 0; + nss_runtime_samples.freq_scale_index = i; + + nss_work = (nss_work_t *)kmalloc(sizeof(nss_work_t), GFP_ATOMIC); + if (!nss_work) { + nss_info("NSS Freq WQ kmalloc fail"); + return ret; + } + INIT_WORK((struct work_struct *)nss_work, nss_hal_wq_function); + nss_work->frequency = nss_cmd_buf.current_freq; + nss_work->stats_enable = 0; + + /* + * Ensure we start with a fresh set of samples later + */ + nss_reset_frequency_stats_samples(); + + queue_work(nss_wq, (struct work_struct *)nss_work); + + return ret; +} + +/* + * nss_auto_scale_handler() + * Enables or Disable Auto Scaling + */ +static int nss_auto_scale_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (!*lenp || (*ppos && !write)) { + return ret; + } + + if (nss_cmd_buf.auto_scale != 1) { + /* + * Is auto scaling currently enabled? If so, send the command to + * disable stats reporting to NSS + */ + if (nss_runtime_samples.freq_scale_ready != 0) { + nss_cmd_buf.current_freq = nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency; + nss_work = (nss_work_t *)kmalloc(sizeof(nss_work_t), GFP_ATOMIC); + if (!nss_work) { + nss_info("NSS Freq WQ kmalloc fail"); + return ret; + } + INIT_WORK((struct work_struct *)nss_work, nss_hal_wq_function); + nss_work->frequency = nss_cmd_buf.current_freq; + nss_work->stats_enable = 0; + queue_work(nss_wq, (struct work_struct *)nss_work); + nss_runtime_samples.freq_scale_ready = 0; + + /* + * The current samples would be stale later when scaling is + * enabled again, hence reset them + */ + nss_reset_frequency_stats_samples(); + } + return ret; + } + + /* + * Setup default values - Middle of Freq Scale Band + */ + nss_runtime_samples.freq_scale_index = 1; + nss_runtime_samples.sample_count = 0; + nss_runtime_samples.initialized = 0; + nss_cmd_buf.current_freq = nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency; + + nss_work = (nss_work_t *)kmalloc(sizeof(nss_work_t), GFP_ATOMIC); + if (!nss_work) { + nss_info("NSS Freq WQ kmalloc fail"); + return ret; + } + INIT_WORK((struct work_struct *)nss_work, nss_hal_wq_function); + nss_work->frequency = nss_cmd_buf.current_freq; + nss_work->stats_enable = 1; + queue_work(nss_wq, (struct work_struct *)nss_work); + + nss_cmd_buf.auto_scale = 0; + nss_runtime_samples.freq_scale_ready = 1; + + return ret; +} + +/* + * nss_get_freq_table_handler() + * Display Support Freq and Ex how to Change. + */ +static int nss_get_freq_table_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret, i; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (write) { + return ret; + } + + printk("Frequency Supported - "); + + i = 0; + while (i < NSS_FREQ_MAX_SCALE) { + if (nss_runtime_samples.freq_scale[i].frequency != NSS_FREQ_SCALE_NA) { + printk("%d Hz ", nss_runtime_samples.freq_scale[i].frequency); + } + i++; + } + printk("\n"); + + *lenp = 0; + return ret; +} + +/* + * nss_get_average_inst_handler() + * Display AVG Inst Per Ms. + */ +static int nss_get_average_inst_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (write) { + return ret; + } + + printk("Current Inst Per Ms %x\n", nss_runtime_samples.average); + + *lenp = 0; + return ret; +} +#endif + +#if (NSS_FW_DBG_SUPPORT == 1) +/* + * nss_debug_handler() + * Enable NSS debug output + */ +static int nss_debug_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (!ret) { + if ((write) && (nss_ctl_debug != 0)) { + printk("Enabling NSS SPI Debug\n"); + nss_hal_debug_enable(); + } + } + + return ret; +} +#endif + +/* + * nss_coredump_handler() + * Send Signal To Coredump NSS Cores + */ +static int nss_coredump_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[NSS_CORE_0]; + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (!ret) { + /* + * if nss_cmd_buf.coredump is not 0 or 1, panic will be disabled + * when NSS FW crashes, so OEM/ODM have a chance to use mdump + * to dump crash dump (coredump) and send dump to us for analysis. + */ + if ((write) && (nss_ctl_debug != 0) && nss_cmd_buf.coredump == 1) { + printk("Coredumping to DDR\n"); + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_TRIGGER_COREDUMP); + } + } + + return ret; +} + +/* + * nss_jumbo_mru_handler() + * Sysctl to modify nss_jumbo_mru + */ +static int nss_jumbo_mru_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + return ret; + } + + if (write) { + nss_core_set_jumbo_mru(nss_jumbo_mru); + nss_info("jumbo_mru set to %d\n", nss_jumbo_mru); + } + + return ret; +} + +/* nss_paged_mode_handler() + * Sysctl to modify nss_paged_mode. + */ + +static int nss_paged_mode_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + return ret; + } + + if (write) { + nss_core_set_paged_mode(nss_paged_mode); + nss_info("paged_mode set to %d\n", nss_paged_mode); + } + + return ret; +} + +#if (NSS_SKB_REUSE_SUPPORT == 1) +/* + * nss_get_min_reuse_handler() + * Sysctl to get min reuse sizes + */ +static int nss_get_min_reuse_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + struct nss_ctx_instance *nss_ctx = NULL; + uint32_t core_id; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + return ret; + } + + printk("Min SKB reuse sizes - "); + + for (core_id = 0; core_id < NSS_CORE_MAX; core_id++) { + nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[core_id]; + printk("core %d: %d ", core_id, nss_core_get_min_reuse(nss_ctx)); + } + + printk("\n"); + *lenp = 0; + return ret; +} + +/* + * nss_max_reuse_handler() + * Sysctl to modify nss_max_reuse + */ +static int nss_max_reuse_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + + nss_max_reuse = nss_core_get_max_reuse(); + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + return ret; + } + + if (write) { + nss_core_set_max_reuse(nss_max_reuse); + nss_info("max_reuse set to %d\n", nss_max_reuse); + } + + return ret; +} + +/* + * sysctl-tuning for NSS driver SKB reuse + */ +static struct ctl_table nss_skb_reuse_table[] = { + { + .procname = "min_sizes", + .data = NULL, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_get_min_reuse_handler, + }, + { + .procname = "max_size", + .data = &nss_max_reuse, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_max_reuse_handler, + }, + { } +}; +#endif + +#if (NSS_FREQ_SCALE_SUPPORT == 1) +/* + * sysctl-tuning infrastructure. + */ +static struct ctl_table nss_freq_table[] = { + { + .procname = "current_freq", + .data = &nss_cmd_buf.current_freq, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_current_freq_handler, + }, + { + .procname = "freq_table", + .data = &nss_cmd_buf.max_freq, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_get_freq_table_handler, + }, + { + .procname = "auto_scale", + .data = &nss_cmd_buf.auto_scale, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_auto_scale_handler, + }, + { + .procname = "inst_per_sec", + .data = &nss_cmd_buf.average_inst, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_get_average_inst_handler, + }, + { } +}; +#endif + +static struct ctl_table nss_general_table[] = { + { + .procname = "redirect", + .data = &nss_ctl_redirect, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, +#if (NSS_FW_DBG_SUPPORT == 1) + { + .procname = "debug", + .data = &nss_ctl_debug, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_debug_handler, + }, +#endif + { + .procname = "coredump", + .data = &nss_cmd_buf.coredump, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_coredump_handler, + }, + { + .procname = "logbuf", + .data = &nss_ctl_logbuf, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_logbuffer_handler, + }, + { + .procname = "jumbo_mru", + .data = &nss_jumbo_mru, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_jumbo_mru_handler, + }, + { + .procname = "paged_mode", + .data = &nss_paged_mode, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_paged_mode_handler, + }, + { } +}; + +static struct ctl_table nss_init_dir[] = { +#if (NSS_FREQ_SCALE_SUPPORT == 1) + { + .procname = "clock", + .mode = 0555, + .child = nss_freq_table, + }, +#endif + { + .procname = "general", + .mode = 0555, + .child = nss_general_table, + }, +#if (NSS_SKB_REUSE_SUPPORT == 1) + { + .procname = "skb_reuse", + .mode = 0555, + .child = nss_skb_reuse_table, + }, +#endif + { } +}; + +static struct ctl_table nss_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_init_dir, + }, + { } +}; + +static struct ctl_table nss_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = nss_root_dir, + }, + { } +}; + +static struct ctl_table_header *nss_dev_header; + +/* + * nss_init() + * Registers nss driver + */ +static int __init nss_init(void) +{ +#if (NSS_DT_SUPPORT == 1) + struct device_node *cmn = NULL; +#endif + nss_info("Init NSS driver"); + +#if (NSS_DT_SUPPORT == 1) + /* + * Get reference to NSS common device node + */ + cmn = of_find_node_by_name(NULL, "nss-common"); + if (!cmn) { + nss_info_always("qca-nss-drv.ko is loaded for symbol link\n"); + return 0; + } + of_node_put(cmn); + + /* + * Pick up HAL by target information + */ +#if defined(NSS_HAL_IPQ806X_SUPPORT) + if (of_machine_is_compatible("qcom,ipq8064") || of_machine_is_compatible("qcom,ipq8062")) { + nss_top_main.hal_ops = &nss_hal_ipq806x_ops; + nss_top_main.data_plane_ops = &nss_data_plane_gmac_ops; + nss_top_main.num_nss = 2; + } +#endif +#if defined(NSS_HAL_IPQ807x_SUPPORT) + if (of_machine_is_compatible("qcom,ipq807x") || of_machine_is_compatible("qcom,ipq8074")) { + nss_top_main.hal_ops = &nss_hal_ipq807x_ops; + nss_top_main.data_plane_ops = &nss_data_plane_ops; +#if defined(NSS_MEM_PROFILE_LOW) + nss_top_main.num_nss = 1; +#else + nss_top_main.num_nss = 2; +#endif + } +#endif +#if defined(NSS_HAL_IPQ60XX_SUPPORT) + if (of_machine_is_compatible("qcom,ipq6018")) { + nss_top_main.hal_ops = &nss_hal_ipq60xx_ops; + nss_top_main.data_plane_ops = &nss_data_plane_ops; + nss_top_main.num_nss = 1; + } +#endif +#if defined(NSS_HAL_IPQ50XX_SUPPORT) + if (of_machine_is_compatible("qcom,ipq5018")) { + nss_top_main.hal_ops = &nss_hal_ipq50xx_ops; + nss_top_main.data_plane_ops = &nss_data_plane_ops; + nss_top_main.num_nss = 1; + } +#endif +#if defined(NSS_HAL_FSM9010_SUPPORT) + if (of_machine_is_compatible("qcom,fsm9010")) { + nss_top_main.hal_ops = &nss_hal_fsm9010_ops; + nss_top_main.data_plane_ops = &nss_data_plane_gmac_ops; + nss_top_main.num_nss = 1; + } +#endif + if (!nss_top_main.hal_ops) { + nss_info_always("No supported HAL compiled on this platform\n"); + return -EFAULT; + } +#else + /* + * For banana, only ipq806x is supported + */ + nss_top_main.hal_ops = &nss_hal_ipq806x_ops; + nss_top_main.data_plane_ops = &nss_data_plane_gmac_ops; + nss_top_main.num_nss = 2; + +#endif /* NSS_DT_SUPPORT */ + nss_top_main.nss_hal_common_init_done = false; + + /* + * Initialize data_plane workqueue + */ + if (nss_data_plane_init_delay_work()) { + nss_warning("Error initializing nss_data_plane_workqueue\n"); + return -EFAULT; + } + + /* + * Enable spin locks + */ + spin_lock_init(&(nss_top_main.lock)); + spin_lock_init(&(nss_top_main.stats_lock)); + mutex_init(&(nss_top_main.wq_lock)); + + /* + * Enable NSS statistics + */ + nss_stats_init(); + + /* + * Enable NSS statistics names. + */ + nss_strings_init(); + + /* + * Register sysctl table. + */ + nss_dev_header = register_sysctl_table(nss_root); + + /* + * Registering sysctl for ipv4/6 specific config. + */ + nss_ipv4_register_sysctl(); +#ifdef NSS_DRV_IPV6_ENABLE + nss_ipv6_register_sysctl(); +#endif + + /* + * Registering sysctl for n2h specific config. + */ + if (nss_top_main.num_nss == 1) { + nss_n2h_single_core_register_sysctl(); + } else { + nss_n2h_multi_core_register_sysctl(); + } + + /* + * Registering sysctl for rps specific config. + */ + nss_rps_register_sysctl(); + +#ifdef NSS_DRV_C2C_ENABLE + /* + * Registering sysctl for c2c_tx specific config. + */ + nss_c2c_tx_register_sysctl(); +#endif + + /* + * Registering sysctl for for printing non zero stats. + */ + nss_stats_register_sysctl(); + + /* + * Register sysctl for project config + */ + nss_project_register_sysctl(); + + /* + * Registering sysctl for pppoe specific config. + */ + nss_pppoe_register_sysctl(); + + /* + * Setup Runtime Sample values + */ + nss_runtime_samples.freq_scale_index = 1; + nss_runtime_samples.freq_scale_ready = 0; + nss_runtime_samples.freq_scale_rate_limit_down = 0; + nss_runtime_samples.buffer_index = 0; + nss_runtime_samples.sum = 0; + nss_runtime_samples.sample_count = 0; + nss_runtime_samples.average = 0; + nss_runtime_samples.message_rate_limit = 0; + nss_runtime_samples.initialized = 0; + + nss_cmd_buf.current_freq = nss_runtime_samples.freq_scale[nss_runtime_samples.freq_scale_index].frequency; + + /* + * Initial Workqueue + */ + nss_wq = create_workqueue("nss_freq_queue"); + +#if (NSS_PM_SUPPORT == 1) + /* + * Initialize NSS Bus PM module + */ + nss_pm_init(); + + /* + * Register with Bus driver + */ + pm_client = nss_pm_client_register(NSS_PM_CLIENT_NETAP); + if (!pm_client) { + nss_warning("Error registering with PM driver"); + } +#endif + + /* + * Initialize mtu size needed as start + */ + nss_top_main.prev_mtu_sz = ETH_DATA_LEN; + + /* + * register panic handler and timeout control + */ + nss_coredump_notify_register(); + nss_coredump_init_delay_work(); + +#ifdef NSS_DRV_CAPWAP_ENABLE + /* + * Init capwap + */ + nss_capwap_init(); +#endif + +#ifdef NSS_DRV_QRFS_ENABLE + /* + * Init QRFS + */ + nss_qrfs_init(); +#endif + +#ifdef NSS_DRV_C2C_ENABLE + /* + * Init c2c_tx + */ + nss_c2c_tx_init(); +#endif + +#ifdef NSS_DRV_PVXLAN_ENABLE + /* + * Init pvxlan + */ + nss_pvxlan_init(); +#endif + +#ifdef NSS_DRV_CLMAP_ENABLE + /* + * Init clmap + */ + nss_clmap_init(); +#endif + + /* + * INIT ppe on supported platform + */ +#ifdef NSS_DRV_PPE_ENABLE + nss_ppe_init(); +#endif + +#ifdef NSS_DRV_DMA_ENABLE + nss_dma_init(); +#endif + + /* + * Init Wi-Fi mesh + */ +#ifdef NSS_DRV_WIFI_MESH_ENABLE + nss_wifi_mesh_init(); +#endif + + /* + * Register platform_driver + */ + return platform_driver_register(&nss_driver); +} + +/* + * nss_cleanup() + * Unregisters nss driver + */ +static void __exit nss_cleanup(void) +{ + nss_info("Exit NSS driver"); + + if (nss_dev_header) + unregister_sysctl_table(nss_dev_header); + + /* + * Unregister n2h specific sysctl + */ + nss_n2h_unregister_sysctl(); + + /* + * Unregister rps specific sysctl + */ + nss_rps_unregister_sysctl(); + +#ifdef NSS_DRV_C2C_ENABLE + /* + * Unregister c2c_tx specific sysctl + */ + nss_c2c_tx_unregister_sysctl(); +#endif + + /* + * Unregister pppoe specific sysctl + */ + nss_pppoe_unregister_sysctl(); + + /* + * Unregister ipv4/6 specific sysctl and free allocated to connection tables + */ + nss_ipv4_unregister_sysctl(); + nss_ipv4_free_conn_tables(); + +#ifdef NSS_DRV_IPV6_ENABLE + nss_ipv6_unregister_sysctl(); + nss_ipv6_free_conn_tables(); +#endif + + nss_project_unregister_sysctl(); + nss_data_plane_destroy_delay_work(); + + /* + * cleanup ppe on supported platform + */ +#ifdef NSS_DRV_PPE_ENABLE + nss_ppe_free(); +#endif + + platform_driver_unregister(&nss_driver); +} + +module_init(nss_init); +module_exit(nss_cleanup); + +MODULE_DESCRIPTION("QCA NSS Driver"); +MODULE_AUTHOR("Qualcomm Atheros Inc"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipsec.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec.c new file mode 100644 index 000000000..49c7805f3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec.c @@ -0,0 +1,597 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ipsec.c + * NSS IPsec APIs + */ + +#include "nss_tx_rx_common.h" +#include "nss_ipsec.h" +#include "nss_ppe.h" +#include "nss_ipsec_log.h" + +#if defined(NSS_HAL_IPQ806X_SUPPORT) +#define NSS_IPSEC_ENCAP_INTERFACE_NUM NSS_IPSEC_ENCAP_IF_NUMBER +#define NSS_IPSEC_DECAP_INTERFACE_NUM NSS_IPSEC_DECAP_IF_NUMBER +#define NSS_IPSEC_DATA_INTERFACE_NUM NSS_C2C_TX_INTERFACE + +#elif defined(NSS_HAL_FSM9010_SUPPORT) +#define NSS_IPSEC_ENCAP_INTERFACE_NUM NSS_IPSEC_ENCAP_IF_NUMBER +#define NSS_IPSEC_DECAP_INTERFACE_NUM NSS_IPSEC_DECAP_IF_NUMBER +#define NSS_IPSEC_DATA_INTERFACE_NUM NSS_IPSEC_CMN_INTERFACE + +#elif defined(NSS_HAL_IPQ807x_SUPPORT) +#define NSS_IPSEC_ENCAP_INTERFACE_NUM NSS_IPSEC_CMN_INTERFACE +#define NSS_IPSEC_DECAP_INTERFACE_NUM NSS_IPSEC_CMN_INTERFACE +#define NSS_IPSEC_DATA_INTERFACE_NUM NSS_IPSEC_CMN_INTERFACE + +#elif defined(NSS_HAL_IPQ60XX_SUPPORT) +#define NSS_IPSEC_ENCAP_INTERFACE_NUM NSS_IPSEC_CMN_INTERFACE +#define NSS_IPSEC_DECAP_INTERFACE_NUM NSS_IPSEC_CMN_INTERFACE +#define NSS_IPSEC_DATA_INTERFACE_NUM NSS_IPSEC_CMN_INTERFACE + +#elif defined(NSS_HAL_IPQ50XX_SUPPORT) +#define NSS_IPSEC_ENCAP_INTERFACE_NUM NSS_IPSEC_CMN_INTERFACE +#define NSS_IPSEC_DECAP_INTERFACE_NUM NSS_IPSEC_CMN_INTERFACE +#define NSS_IPSEC_DATA_INTERFACE_NUM NSS_IPSEC_CMN_INTERFACE + +#else +#define NSS_IPSEC_ENCAP_INTERFACE_NUM -1 +#define NSS_IPSEC_DECAP_INTERFACE_NUM -1 +#define NSS_IPSEC_DATA_INTERFACE_NUM -1 + +#endif + +/* + * Amount time the synchronous message should wait for response from + * NSS before the timeout happens. After the timeout the message + * response even if it arrives has to be discarded. Typically, the + * time needs to be selected based on the worst case time in case of + * peak throughput between host & NSS. + */ +#define NSS_IPSEC_TX_TIMEO_TICKS msecs_to_jiffies(3000) /* 3 Seconds */ + +/* + * Private data structure to hold state for + * the ipsec specific NSS interaction + */ +struct nss_ipsec_pvt { + struct semaphore sem; /* used for synchronizing 'tx_msg_sync' */ + struct completion complete; /* completion callback */ + atomic_t resp; /* Response error type */ +} nss_ipsec; + +/* + * nss_ipsec_get_msg_ctx() + * return ipsec message context assoicated with the callback + * + * Note: certain SOC the decap interface specially programmed + */ +static inline nss_ptr_t nss_ipsec_get_msg_ctx(struct nss_ctx_instance *nss_ctx, uint32_t interface_num) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + /* + * the encap is primary interface + */ + if (interface_num == NSS_IPSEC_ENCAP_INTERFACE_NUM) + return (nss_ptr_t)nss_top->ipsec_encap_ctx; + + return (nss_ptr_t)nss_top->ipsec_decap_ctx; +} + +/* + * nss_ipsec_get_msg_callback() + * this gets the message callback handler + */ +static inline nss_ptr_t nss_ipsec_get_msg_callback(struct nss_ctx_instance *nss_ctx, uint32_t interface_num) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + /* + * the encap is primary interface + */ + if (interface_num == NSS_IPSEC_ENCAP_INTERFACE_NUM) + return (nss_ptr_t)nss_top->ipsec_encap_callback; + + return (nss_ptr_t)nss_top->ipsec_decap_callback; +} + +/* + ********************************** + Rx APIs + ********************************** + */ + +/* + * nss_ipsec_msg_handler() + * this handles all the IPsec events and responses + */ +static void nss_ipsec_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app_data __attribute((unused))) +{ + struct nss_ipsec_msg *nim = (struct nss_ipsec_msg *)ncm; + nss_ipsec_msg_callback_t cb = NULL; + uint32_t if_num = ncm->interface; + + /* + * Trace messages. + */ + nss_ipsec_log_rx_msg(nim); + + /* + * Sanity check the message type + */ + if (ncm->type > NSS_IPSEC_MSG_TYPE_MAX) { + nss_warning("%px: rx message type out of range: %d", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_ipsec_msg)) { + nss_warning("%px: rx message length is invalid: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + BUG_ON((if_num != NSS_IPSEC_ENCAP_INTERFACE_NUM) && (if_num != NSS_IPSEC_DECAP_INTERFACE_NUM)); + + if (ncm->response == NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: rx message response for if %d, type %d, is invalid: %d", nss_ctx, ncm->interface, + ncm->type, ncm->response); + return; + } + + /* + * Is this a notification? if, yes then fill up the callback and app_data from + * locally stored state + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = nss_ipsec_get_msg_callback(nss_ctx, if_num); + ncm->app_data = nss_ipsec_get_msg_ctx(nss_ctx, if_num); + } + + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * load, test & call + */ + cb = (nss_ipsec_msg_callback_t)ncm->cb; + if (unlikely(!cb)) { + nss_trace("%px: rx handler has been unregistered for i/f: %d", nss_ctx, ncm->interface); + return; + } + + cb((void *)ncm->app_data, nim); +} + +/* + ********************************** + Tx APIs + ********************************** + */ + +/* + * nss_ipsec_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_ipsec_callback(void *app_data, struct nss_ipsec_msg *nim) +{ + struct nss_cmn_msg *ncm = &nim->cm; + + /* + * This callback is for synchronous operation. The caller sends its + * response pointer which needs to be loaded with the response + * data arriving from the NSS + */ + atomic_t *resp = (atomic_t *)app_data; + + if (ncm->response == NSS_CMN_RESPONSE_ACK) { + atomic_set(resp, NSS_IPSEC_ERROR_TYPE_NONE); + complete(&nss_ipsec.complete); + return; + } + + atomic_set(resp, ncm->error); + complete(&nss_ipsec.complete); +} + +/* + * nss_ipsec_tx_msg + * Send ipsec rule to NSS. + */ +nss_tx_status_t nss_ipsec_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_ipsec_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + nss_info("%px: message %d for if %d\n", nss_ctx, ncm->type, ncm->interface); + + BUILD_BUG_ON(NSS_NBUF_PAYLOAD_SIZE < sizeof(struct nss_ipsec_msg)); + + /* + * Trace messages. + */ + nss_ipsec_log_tx_msg(msg); + + if ((ncm->interface != NSS_IPSEC_ENCAP_INTERFACE_NUM) && (ncm->interface != NSS_IPSEC_DECAP_INTERFACE_NUM)) { + nss_warning("%px: tx message request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_IPSEC_MSG_TYPE_MAX) { + nss_warning("%px: tx message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + nss_info("msg params version:%d, interface:%d, type:%d, cb:%px, app_data:%px, len:%d\n", + ncm->version, ncm->interface, ncm->type, (void *)ncm->cb, (void *)ncm->app_data, ncm->len); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_ipsec_tx_msg); + +/* + * nss_ipsec_tx_msg_sync() + * Transmit a ipsec message to NSS firmware synchronously. + */ +nss_tx_status_t nss_ipsec_tx_msg_sync(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + enum nss_ipsec_msg_type type, uint16_t len, + struct nss_ipsec_msg *nim, enum nss_ipsec_error_type *resp) +{ + struct nss_ipsec_msg nim_local = { {0} }; + nss_tx_status_t status; + int ret; + + /* + * Length of the message should be the based on type + */ + if (len > sizeof(nim_local.msg)) { + nss_warning("%px: (%u)Bad message length(%u) for type (%d)", nss_ctx, if_num, len, type); + return NSS_TX_FAILURE_TOO_LARGE; + } + + /* + * Response buffer is a required for copying the response for message + */ + if (!resp) { + nss_warning("%px: (%u)Response buffer is empty, type(%d)", nss_ctx, if_num, type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * TODO: this can be removed in future as we need to ensure that the response + * memory is only updated when the current outstanding request is waiting. + * This can be solved by introducing sequence no. in messages and only completing + * the message if the sequence no. matches. For now this is solved by passing + * a known memory nss_ipsec.resp + */ + down(&nss_ipsec.sem); + + /* + * Initializing it to a fail error type + */ + atomic_set(&nss_ipsec.resp, NSS_IPSEC_ERROR_TYPE_UNHANDLED_MSG); + + /* + * We need to copy the message content into the actual message + * to be sent to NSS + * + * Note: Here pass the nss_ipsec.resp as the pointer. Since, the caller + * provided pointer is not allocated by us and may go away when this function + * returns with failure. The callback is not aware of this and may try to + * access the pointer incorrectly potentially resulting in a crash. + */ + nss_ipsec_msg_init(&nim_local, if_num, type, len, nss_ipsec_callback, &nss_ipsec.resp); + memcpy(&nim_local.msg, &nim->msg, len); + + status = nss_ipsec_tx_msg(nss_ctx, &nim_local); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: ipsec_tx_msg failed", nss_ctx); + goto done; + } + + ret = wait_for_completion_timeout(&nss_ipsec.complete, NSS_IPSEC_TX_TIMEO_TICKS); + if (!ret) { + nss_warning("%px: IPsec msg tx failed due to timeout", nss_ctx); + status = NSS_TX_FAILURE_NOT_ENABLED; + goto done; + } + + /* + * Read memory barrier + */ + smp_rmb(); + + /* + * Copy the response received + */ + *resp = atomic_read(&nss_ipsec.resp); + + /* + * Only in case of non-error response we will + * indicate success + */ + if (*resp != NSS_IPSEC_ERROR_TYPE_NONE) + status = NSS_TX_FAILURE; + +done: + up(&nss_ipsec.sem); + return status; +} +EXPORT_SYMBOL(nss_ipsec_tx_msg_sync); + +/* + * nss_ipsec_tx_buf + * Send data packet for ipsec processing + */ +nss_tx_status_t nss_ipsec_tx_buf(struct sk_buff *skb, uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[nss_top_main.ipsec_handler_id]; + + nss_trace("%px: IPsec If Tx packet, id:%d, data=%px", nss_ctx, if_num, skb->data); + + return nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_ipsec_tx_buf); + +/* + ********************************** + Register APIs + ********************************** + */ + +/* + * nss_ipsec_notify_register() + * register message notifier for the given interface (if_num) + */ +struct nss_ctx_instance *nss_ipsec_notify_register(uint32_t if_num, nss_ipsec_msg_callback_t cb, void *app_data) +{ + struct nss_top_instance *nss_top = &nss_top_main; + uint8_t core_id = nss_top->ipsec_handler_id; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[core_id]; + + if (if_num >= NSS_MAX_NET_INTERFACES) { + nss_warning("%px: notfiy register received for invalid interface %d", nss_ctx, if_num); + return NULL; + } + + /* + * the encap is primary interface + */ + if (if_num == NSS_IPSEC_ENCAP_INTERFACE_NUM) { + nss_top->ipsec_encap_callback = cb; + nss_top->ipsec_encap_ctx = app_data; + return nss_ctx; + } + + nss_top->ipsec_decap_callback = cb; + nss_top->ipsec_decap_ctx = app_data; + return nss_ctx; +} +EXPORT_SYMBOL(nss_ipsec_notify_register); + +/* + * nss_ipsec_notify_unregister() + * unregister the IPsec notifier for the given interface number (if_num) + */ +void nss_ipsec_notify_unregister(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + if (if_num >= NSS_MAX_NET_INTERFACES) { + nss_warning("%px: notify unregister received for invalid interface %d", nss_ctx, if_num); + return; + } + + /* + * the encap is primary interface + */ + if (if_num == NSS_IPSEC_ENCAP_INTERFACE_NUM) { + nss_top->ipsec_encap_callback = NULL; + nss_top->ipsec_encap_ctx = NULL; + return; + } + + nss_top->ipsec_decap_callback = NULL; + nss_top->ipsec_decap_ctx = NULL; +} +EXPORT_SYMBOL(nss_ipsec_notify_unregister); + +/* + * nss_ipsec_data_register() + * register a data callback routine + */ +struct nss_ctx_instance *nss_ipsec_data_register(uint32_t if_num, nss_ipsec_buf_callback_t cb, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx, *nss_ctx0; + + nss_ctx = &nss_top_main.nss[nss_top_main.ipsec_handler_id]; + + if ((if_num >= NSS_MAX_NET_INTERFACES) && (if_num < NSS_MAX_PHYSICAL_INTERFACES)){ + nss_warning("%px: data register received for invalid interface %d", nss_ctx, if_num); + return NULL; + } + + /* + * avoid multiple registeration for multiple tunnels + */ + if (nss_ctx->subsys_dp_register[if_num].cb) { + return nss_ctx; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, cb, NULL, NULL, netdev, features); + + if (nss_top_main.ipsec_handler_id == 1) { + nss_ctx0 = &nss_top_main.nss[0]; + + nss_core_register_subsys_dp(nss_ctx0, if_num, cb, NULL, NULL, netdev, features); + } + + return nss_ctx; +} +EXPORT_SYMBOL(nss_ipsec_data_register); + +/* + * nss_ipsec_data_unregister() + * unregister a data callback routine + */ +void nss_ipsec_data_unregister(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx0; + + if ((if_num >= NSS_MAX_NET_INTERFACES) && (if_num < NSS_MAX_PHYSICAL_INTERFACES)){ + nss_warning("%px: data unregister received for invalid interface %d", nss_ctx, if_num); + return; + } + + if (nss_top_main.ipsec_handler_id == 1) { + nss_ctx0 = &nss_top_main.nss[0]; + + nss_core_unregister_subsys_dp(nss_ctx0, if_num); + } + + nss_core_unregister_subsys_dp(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_ipsec_data_unregister); + +/* + * nss_ipsec_get_encap_interface() + * Get the NSS interface number for encap message + */ +int32_t nss_ipsec_get_encap_interface(void) +{ + return NSS_IPSEC_ENCAP_INTERFACE_NUM; +} +EXPORT_SYMBOL(nss_ipsec_get_encap_interface); + +/* + * nss_ipsec_get_decap_interface() + * Get the NSS interface number for decap message + */ +int32_t nss_ipsec_get_decap_interface(void) +{ + return NSS_IPSEC_DECAP_INTERFACE_NUM; +} +EXPORT_SYMBOL(nss_ipsec_get_decap_interface); + +/* + * nss_ipsec_get_data_interface() + * Get the NSS interface number used for data path + */ +int32_t nss_ipsec_get_data_interface(void) +{ + return NSS_IPSEC_DATA_INTERFACE_NUM; +} +EXPORT_SYMBOL(nss_ipsec_get_data_interface); + +/* + * nss_ipsec_get_context() + * Get NSS context instance for IPsec handle + */ +struct nss_ctx_instance *nss_ipsec_get_context(void) +{ + return &nss_top_main.nss[nss_top_main.ipsec_handler_id]; +} +EXPORT_SYMBOL(nss_ipsec_get_context); + +/* + * nss_ipsec_get_ifnum() + * Return IPsec interface number with coreid. + */ +int32_t nss_ipsec_get_ifnum(int32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_ipsec_get_context(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_ipsec_get_ifnum); + +/* + * nss_ipsec_ppe_port_config() + * Configure PPE port for IPsec inline + */ +bool nss_ipsec_ppe_port_config(struct nss_ctx_instance *nss_ctx, struct net_device *netdev, + uint32_t if_num, uint32_t vsi_num) +{ +#ifdef NSS_PPE_SUPPORTED + if_num = NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); + + if (nss_ppe_tx_ipsec_config_msg(if_num, vsi_num, netdev->mtu, netdev->mtu) != NSS_TX_SUCCESS) { + nss_warning("%px: Failed to configure PPE IPsec port", nss_ctx); + return false; + } + + return true; +#else + return false; +#endif +} +EXPORT_SYMBOL(nss_ipsec_ppe_port_config); + +/* + * nss_ipsec_ppe_mtu_update() + * Update PPE MTU for IPsec inline + */ +bool nss_ipsec_ppe_mtu_update(struct nss_ctx_instance *nss_ctx, uint32_t if_num, uint16_t mtu, uint16_t mru) +{ +#ifdef NSS_PPE_SUPPORTED + if_num = NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); + + if (nss_ppe_tx_ipsec_mtu_msg(if_num, mtu, mru) != NSS_TX_SUCCESS) { + nss_warning("%px: Failed to update PPE MTU for IPsec port", nss_ctx); + return false; + } + + return true; +#else + return false; +#endif +} +EXPORT_SYMBOL(nss_ipsec_ppe_mtu_update); + +/* + * nss_ipsec_register_handler() + */ +void nss_ipsec_register_handler() +{ + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[nss_top_main.ipsec_handler_id]; + + BUILD_BUG_ON(NSS_IPSEC_ENCAP_INTERFACE_NUM < 0); + BUILD_BUG_ON(NSS_IPSEC_DECAP_INTERFACE_NUM < 0); + + sema_init(&nss_ipsec.sem, 1); + init_completion(&nss_ipsec.complete); + atomic_set(&nss_ipsec.resp, NSS_IPSEC_ERROR_TYPE_NONE); + + nss_ctx->nss_top->ipsec_encap_callback = NULL; + nss_ctx->nss_top->ipsec_decap_callback = NULL; + + nss_ctx->nss_top->ipsec_encap_ctx = NULL; + nss_ctx->nss_top->ipsec_decap_ctx = NULL; + + nss_core_register_handler(nss_ctx, NSS_IPSEC_ENCAP_INTERFACE_NUM, nss_ipsec_msg_handler, NULL); + nss_core_register_handler(nss_ctx, NSS_IPSEC_DECAP_INTERFACE_NUM, nss_ipsec_msg_handler, NULL); +} + +/* + * nss_ipsec_msg_init() + * Initialize ipsec message. + */ +void nss_ipsec_msg_init(struct nss_ipsec_msg *nim, uint16_t if_num, uint32_t type, uint32_t len, + nss_ipsec_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_ipsec_msg_init); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn.c new file mode 100644 index 000000000..c5f520da1 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn.c @@ -0,0 +1,525 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_dynamic_interface.h" +#include "nss_ipsec_cmn.h" +#include "nss_ppe.h" +#include "nss_ipsec_cmn_log.h" +#include "nss_ipsec_cmn_stats.h" +#include "nss_ipsec_cmn_strings.h" + +#define NSS_IPSEC_CMN_TX_TIMEOUT 3000 /* 3 Seconds */ +#define NSS_IPSEC_CMN_INTERFACE_MAX_LONG BITS_TO_LONGS(NSS_MAX_NET_INTERFACES) + +/* + * Private data structure for handling synchronous messaging. + */ +static struct nss_ipsec_cmn_pvt { + struct semaphore sem; + struct completion complete; + struct nss_ipsec_cmn_msg nicm; + unsigned long if_map[NSS_IPSEC_CMN_INTERFACE_MAX_LONG]; +} ipsec_cmn_pvt; + +/* + * nss_ipsec_cmn_verify_ifnum() + * Verify if the interface number is a IPsec interface. + */ +static bool nss_ipsec_cmn_verify_ifnum(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + enum nss_dynamic_interface_type type = nss_dynamic_interface_get_type(nss_ctx, if_num); + + if (if_num == NSS_IPSEC_CMN_INTERFACE) + return true; + + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_INNER: + case NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_OUTER: + case NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_MDATA_INNER: + case NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_MDATA_OUTER: + case NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_REDIRECT: + return true; + + default: + return false; + } + + return false; +} + +/* + * nss_ipsec_cmn_msg_handler() + * Handle NSS -> HLOS messages for IPSEC tunnel. + */ +static void nss_ipsec_cmn_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app_data) +{ + nss_ipsec_cmn_msg_callback_t cb; + struct nss_ipsec_cmn_msg *nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + /* + * Trace messages. + */ + nim = (struct nss_ipsec_cmn_msg *)ncm; + nss_ipsec_cmn_log_rx_msg(nim); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_IPSEC_CMN_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type(%u) for interface(%u)\n", nss_ctx, ncm->type, ncm->interface); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_ipsec_cmn_msg)) { + nss_warning("%px: Invalid message length(%d)\n", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + if (ncm->type == NSS_IPSEC_CMN_MSG_TYPE_CTX_SYNC) { + nss_ipsec_cmn_stats_sync(nss_ctx, ncm); + nss_ipsec_cmn_stats_notify(nss_ctx, ncm->interface); + } + + /* + * Update the callback and app_data for NOTIFY messages, ipsec_cmn sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->nss_rx_interface_handlers[ncm->interface].app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Callback + */ + cb = (nss_ipsec_cmn_msg_callback_t)ncm->cb; + app_data = (void *)ncm->app_data; + + /* + * Call IPsec message callback + */ + if (!cb) { + nss_warning("%px: No callback for IPsec interface %d\n", nss_ctx, ncm->interface); + return; + } + + nss_trace("%px: calling ipsecsmgr message handler(%u)\n", nss_ctx, ncm->interface); + cb(app_data, ncm); +} + +/* + * nss_ipsec_cmn_sync_resp() + * Callback to handle the completion of HLOS-->NSS messages. + */ +static void nss_ipsec_cmn_sync_resp(void *app_data, struct nss_cmn_msg *ncm) +{ + struct nss_ipsec_cmn_msg *pvt_msg = app_data; + struct nss_ipsec_cmn_msg *resp_msg = container_of(ncm, struct nss_ipsec_cmn_msg, cm); + + /* + * Copy response message to pvt message + */ + memcpy(pvt_msg, resp_msg, sizeof(*resp_msg)); + + /* + * Write memory barrier + */ + smp_wmb(); + + complete(&ipsec_cmn_pvt.complete); +} + +/* + * nss_ipsec_cmn_ifmap_get() + * Return IPsec common active interfaces map. + */ +unsigned long *nss_ipsec_cmn_ifmap_get(void) +{ + return ipsec_cmn_pvt.if_map; +} + +/* + * nss_ipsec_cmn_get_context() + * Retrieve context for IPSEC redir. + */ +struct nss_ctx_instance *nss_ipsec_cmn_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.ipsec_handler_id]; +} +EXPORT_SYMBOL(nss_ipsec_cmn_get_context); + +/* + * nss_ipsec_cmn_get_ifnum_with_coreid() + * Return IPsec interface number with coreid. + */ +uint32_t nss_ipsec_cmn_get_ifnum_with_coreid(int32_t ifnum) +{ + struct nss_ctx_instance *nss_ctx = nss_ipsec_cmn_get_context(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, ifnum); +} +EXPORT_SYMBOL(nss_ipsec_cmn_get_ifnum_with_coreid); + +/* + * nss_ipsec_cmn_msg_init() + * Initialize message + */ +void nss_ipsec_cmn_msg_init(struct nss_ipsec_cmn_msg *nim, uint16_t if_num, enum nss_ipsec_cmn_msg_type type, + uint16_t len, nss_ipsec_cmn_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&nim->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_ipsec_cmn_msg_init); + +/* + * nss_ipsec_cmn_tx_msg() + * Transmit a IPSEC message to NSS FW. + */ +nss_tx_status_t nss_ipsec_cmn_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_ipsec_cmn_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace messages. + */ + nss_ipsec_cmn_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (ncm->type >= NSS_IPSEC_CMN_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type(%u)\n", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + if (!nss_ipsec_cmn_verify_ifnum(nss_ctx, ncm->interface)) { + nss_warning("%px: Invalid message interface(%u)\n", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_ipsec_cmn_msg)) { + nss_warning("%px: Invalid message length(%u)\n", nss_ctx, nss_cmn_get_msg_len(ncm)); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_ipsec_cmn_tx_msg); + +/* + * nss_ipsec_cmn_tx_msg_sync() + * Transmit a IPSEC redir message to NSS firmware synchronously. + */ +nss_tx_status_t nss_ipsec_cmn_tx_msg_sync(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + enum nss_ipsec_cmn_msg_type type, uint16_t len, + struct nss_ipsec_cmn_msg *nicm) +{ + struct nss_ipsec_cmn_msg *local_nicm = &ipsec_cmn_pvt.nicm; + nss_tx_status_t status; + int ret = 0; + + /* + * Length of the message should be the based on type + */ + if (len > sizeof(struct nss_ipsec_cmn_msg)) { + nss_warning("%px: Invalid message length(%u), type (%d), I/F(%u)\n", nss_ctx, len, type, if_num); + return NSS_TX_FAILURE; + } + + down(&ipsec_cmn_pvt.sem); + + /* + * We need to copy the message content into the actual message + * to be sent to NSS + */ + memset(local_nicm, 0, sizeof(*local_nicm)); + + nss_ipsec_cmn_msg_init(local_nicm, if_num, type, len, nss_ipsec_cmn_sync_resp, local_nicm); + memcpy(&local_nicm->msg, &nicm->msg, len); + + status = nss_ipsec_cmn_tx_msg(nss_ctx, local_nicm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Failed to send message\n", nss_ctx); + goto done; + } + + ret = wait_for_completion_timeout(&ipsec_cmn_pvt.complete, msecs_to_jiffies(NSS_IPSEC_CMN_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: Failed to receive response, timeout(%d)\n", nss_ctx, ret); + status = NSS_TX_FAILURE_NOT_READY; + goto done; + } + + /* + * Read memory barrier + */ + smp_rmb(); + + if (local_nicm->cm.response != NSS_CMN_RESPONSE_ACK) { + status = NSS_TX_FAILURE; + nicm->cm.response = local_nicm->cm.response; + nicm->cm.error = local_nicm->cm.error; + goto done; + } + + /* + * Copy the message received + */ + memcpy(&nicm->msg, &local_nicm->msg, len); + +done: + up(&ipsec_cmn_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_ipsec_cmn_tx_msg_sync); + +/* + * nss_ipsec_cmn_tx_buf() + * Send packet to IPsec interface in NSS. + */ +nss_tx_status_t nss_ipsec_cmn_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num) +{ + nss_trace("%px: Send to IPsec I/F(%u), skb(%px)\n", nss_ctx, if_num, os_buf); + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_ipsec_cmn_verify_ifnum(nss_ctx, if_num)) { + nss_warning("%px: Interface number(%d) is not IPSec type\n", nss_ctx, if_num); + return NSS_TX_FAILURE; + } + + return nss_core_send_packet(nss_ctx, os_buf, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_ipsec_cmn_tx_buf); + +/* + * nss_ipsec_cmn_register_if() + * Register dynamic node for IPSEC redir. + */ +struct nss_ctx_instance *nss_ipsec_cmn_register_if(uint32_t if_num, struct net_device *netdev, + nss_ipsec_cmn_data_callback_t cb_data, + nss_ipsec_cmn_msg_callback_t cb_msg, + uint32_t features, enum nss_dynamic_interface_type type, void *app_ctx) +{ + struct nss_ctx_instance *nss_ctx = nss_ipsec_cmn_get_context(); + uint32_t status; + + if (!nss_ipsec_cmn_verify_ifnum(nss_ctx, if_num)) { + nss_warning("%px: Invalid IPsec interface(%u)\n", nss_ctx, if_num); + return NULL; + } + + if (nss_ctx->subsys_dp_register[if_num].ndev) { + nss_warning("%px: Failed find free slot for IPsec NSS I/F:%u\n", nss_ctx, if_num); + return NULL; + } + +#ifdef NSS_DRV_PPE_ENABLE + if (features & NSS_IPSEC_CMN_FEATURE_INLINE_ACCEL) + nss_ppe_tx_ipsec_add_intf_msg(nss_ipsec_cmn_get_ifnum_with_coreid(if_num)); +#endif + + /* + * Registering handler for sending tunnel interface msgs to NSS. + */ + status = nss_core_register_handler(nss_ctx, if_num, nss_ipsec_cmn_msg_handler, app_ctx); + if (status != NSS_CORE_STATUS_SUCCESS){ + nss_warning("%px: Failed to register message handler for IPsec NSS I/F:%u\n", nss_ctx, if_num); + return NULL; + } + + status = nss_core_register_msg_handler(nss_ctx, if_num, cb_msg); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: Failed to register message handler for IPsec NSS I/F:%u\n", nss_ctx, if_num); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, cb_data, NULL, app_ctx, netdev, features); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, type); + + /* + * Atomically set the bitmap for the interface number + */ + set_bit(if_num, ipsec_cmn_pvt.if_map); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_ipsec_cmn_register_if); + +/* + * nss_ipsec_cmn_unregister_if() + * Unregister dynamic node for IPSEC redir. + */ +bool nss_ipsec_cmn_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_ipsec_cmn_get_context(); + struct net_device *dev; + uint32_t status; + + nss_assert(nss_ctx); + + if (!nss_ipsec_cmn_verify_ifnum(nss_ctx, if_num)) { + nss_warning("%px: Invalid IPsec interface(%u)\n", nss_ctx, if_num); + return false; + } + + dev = nss_cmn_get_interface_dev(nss_ctx, if_num); + if (!dev) { + nss_warning("%px: Failed to find registered netdev for IPsec NSS I/F:%u\n", nss_ctx, if_num); + return false; + } + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + /* + * Atomically clear the bitmap for the interface number + */ + clear_bit(if_num, ipsec_cmn_pvt.if_map); + + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Failed to unregister handler for IPsec NSS I/F:%u\n", nss_ctx, if_num); + return false; + } + + status = nss_core_unregister_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Failed to unregister handler for IPsec NSS I/F:%u\n", nss_ctx, if_num); + return false; + } + + return true; +} +EXPORT_SYMBOL(nss_ipsec_cmn_unregister_if); + +/* + * nss_ipsec_cmn_notify_register() + * Register a handler for notification from NSS firmware. + */ +struct nss_ctx_instance *nss_ipsec_cmn_notify_register(uint32_t if_num, nss_ipsec_cmn_msg_callback_t cb, void *app_data) +{ + struct nss_ctx_instance *nss_ctx = nss_ipsec_cmn_get_context(); + uint32_t ret; + + BUG_ON(!nss_ctx); + + ret = nss_core_register_handler(nss_ctx, if_num, nss_ipsec_cmn_msg_handler, app_data); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to register event handler for interface(%u)\n", nss_ctx, if_num); + return NULL; + } + + ret = nss_core_register_msg_handler(nss_ctx, if_num, cb); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: Failed to register message handler for IPsec NSS I/F:%u\n", nss_ctx, if_num); + return NULL; + } + + return nss_ctx; +} +EXPORT_SYMBOL(nss_ipsec_cmn_notify_register); + +/* + * nss_ipsec_cmn_notify_unregister() + * unregister the IPsec notifier for the given interface number (if_num) + */ +void nss_ipsec_cmn_notify_unregister(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + uint32_t ret; + + if (if_num >= NSS_MAX_NET_INTERFACES) { + nss_warning("%px: notify unregister received for invalid interface %d\n", nss_ctx, if_num); + return; + } + + ret = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to unregister event handler for interface(%u)\n", nss_ctx, if_num); + return; + } + + ret = nss_core_unregister_handler(nss_ctx, if_num); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to unregister event handler for interface(%u)\n", nss_ctx, if_num); + return; + } +} +EXPORT_SYMBOL(nss_ipsec_cmn_notify_unregister); + +/* + * nss_ipsec_cmn_ppe_port_config() + * Configure PPE port for IPsec inline + */ +bool nss_ipsec_cmn_ppe_port_config(struct nss_ctx_instance *nss_ctx, struct net_device *netdev, + uint32_t if_num, uint32_t vsi_num) +{ +#ifdef NSS_PPE_SUPPORTED + if_num = NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); + + if (nss_ppe_tx_ipsec_config_msg(if_num, vsi_num, netdev->mtu, netdev->mtu) != NSS_TX_SUCCESS) { + nss_warning("%px: Failed to configure PPE IPsec port\n", nss_ctx); + return false; + } + + return true; +#else + return false; +#endif +} +EXPORT_SYMBOL(nss_ipsec_cmn_ppe_port_config); + +/* + * nss_ipsec_cmn_ppe_mtu_update() + * Update PPE MTU for IPsec inline + */ +bool nss_ipsec_cmn_ppe_mtu_update(struct nss_ctx_instance *nss_ctx, uint32_t if_num, uint16_t mtu, uint16_t mru) +{ +#ifdef NSS_PPE_SUPPORTED + if_num = NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); + + if (nss_ppe_tx_ipsec_mtu_msg(if_num, mtu, mru) != NSS_TX_SUCCESS) { + nss_warning("%px: Failed to update PPE MTU for IPsec port\n", nss_ctx); + return false; + } + + return true; +#else + return false; +#endif +} +EXPORT_SYMBOL(nss_ipsec_cmn_ppe_mtu_update); + +/* + * nss_ipsec_cmn_register_handler() + * Registering handler for sending msg to base ipsec_cmn node on NSS. + */ +void nss_ipsec_cmn_register_handler(void) +{ + sema_init(&ipsec_cmn_pvt.sem, 1); + init_completion(&ipsec_cmn_pvt.complete); + nss_ipsec_cmn_stats_dentry_create(); + nss_ipsec_cmn_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_log.c new file mode 100644 index 000000000..8ae7928f9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_log.c @@ -0,0 +1,354 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ipsec_cmn_log.c + * NSS IPSEC logger file. + */ + +#include "nss_core.h" + +#define NSS_IPSEC_LOG_IPV4 4 +#define NSS_IPSEC_LOG_IPV6 6 + +/* + * nss_ipsec_cmn_log_msg_types_str + * IPSEC message strings + */ +static int8_t *nss_ipsec_cmn_log_msg_types_str[NSS_IPSEC_CMN_MSG_TYPE_MAX] __maybe_unused = { + "IPSEC CMN Msg None", + "IPSEC CMN Node Config", + "IPSEC CMN CTX Config", + "IPSEC CMN CTX Sync", + "IPSEC CMN SA Create", + "IPSEC CMN SA Destroy", + "IPSEC CMN SA Sync", + "IPSEC CMN Flow Create", + "IPSEC CMN Flow Destroy", +}; + +/* + * nss_ipsec_cmn_log_node_msg_types_str + * IPSEC cmn node message strings + */ +static int8_t *nss_ipsec_cmn_log_node_str[] __maybe_unused = { + "IPSEC CMN Node DMA Redirect", + "IPSEC CMN Node DMA Lookaside", + "IPSEC CMN Node Maximum SA", +}; + +/* + * nss_ipsec_cmn_log_ctx_msg_types_str + * IPSEC cmn ctx message strings + */ +static int8_t *nss_ipsec_cmn_log_ctx_str[] __maybe_unused = { + "IPSEC CMN CTX Type", + "IPSEC CMN CTX Exception Interface", +}; + +/* + * nss_ipsec_cmn_log_ctx_types_str + * IPSEC cmn context strings + */ +static int8_t *nss_ipsec_cmn_ctx_types_str[] __maybe_unused = { + "IPSEC CMN CTX NONE", + "IPSEC CMN CTX INNER", + "IPSEC CMN CTX INNER BOUNCE", + "IPSEC CMN CTX OUTER", + "IPSEC CMN CTX OUTER BOUNCE", + "IPSEC CMN CTX REDIRECT", +}; + +/* + * nss_ipsec_cmn_log_flow_tuple_str + * IPSEC cmn flow tuple strings + */ +static int8_t *nss_ipsec_cmn_log_flow_tuple_str[] __maybe_unused = { + "Dest IP", + "Src IP", + "Spi Index", + "Dest Port", + "Src Port", + "User Pattern", + "User Protocol", + "IP Version", +}; + +/* + * nss_ipsec_cmn_log_sa_tuple_str + * IPSEC cmn SA tuple strings + */ +static int8_t *nss_ipsec_cmn_log_sa_tuple_str[] __maybe_unused = { + "Dest IP", + "Src IP", + "Spi Index", + "Dest Port", + "Src Port", + "Crypto Index", + "Protocol", + "IP Version", + "Hop Limit", +}; + +/* + * nss_ipsec_cmn_log_sa_data_str + * IPSEC cmn SA tuple strings + */ +static int8_t *nss_ipsec_cmn_log_sa_data_str[] __maybe_unused = { + "Sequence Start", + "Flags", + "Window Size", + "DSCP", + "DF", + "Block Length", + "IV length", + "ICV length", +}; + +/* + * nss_ipsec_cmn_log_error_str + * Strings for error types for IPSEC messages + */ +static int8_t *nss_ipsec_cmn_log_error_str[NSS_IPSEC_CMN_MSG_ERROR_MAX] __maybe_unused = { + "IPSEC No Error", + "IPSEC Invalid Context", + "IPSEC SA allocation Error", + "IPSEC Invalid SA", + "IPSEC Duplicate SA", + "IPSEC SA is in Use", + "IPSEC Error in Flow Allocation", + "IPSEC Invalid Flow", + "IPSEC Duplicate Flow", + "IPSEC Failure to find SA for Flow", + "IPSEC Failed to Register Dynamic Interface", + "IPSEC Unhandled Message", +}; + +/* + * nss_ipsec_cmn_log_node_msg() + * Log NSS IPSEC node message. + */ +static void nss_ipsec_cmn_log_node_msg(struct nss_ipsec_cmn_msg *nim) +{ + struct nss_ipsec_cmn_node *node_msg __maybe_unused = &nim->msg.node; + + nss_trace("%px: NSS IPSEC Node Message:\n" + "%s: %d\n" + "%s: %d\n" + "%s: %d\n", nim, + nss_ipsec_cmn_log_node_str[0], node_msg->dma_redirect, + nss_ipsec_cmn_log_node_str[1], node_msg->dma_lookaside, + nss_ipsec_cmn_log_node_str[2], node_msg->max_sa); +} + +/* + * nss_ipsec_cmn_log_ctx_msg() + * Log NSS IPSEC ctx message. + */ +static void nss_ipsec_cmn_log_ctx_msg(struct nss_ipsec_cmn_msg *nim) +{ + struct nss_ipsec_cmn_ctx *ctx_msg __maybe_unused = &nim->msg.ctx; + + nss_trace("%px: NSS IPSEC CTX Message:\n" + "%s: %s\n" + "%s: %d\n", nim, + nss_ipsec_cmn_log_ctx_str[0], nss_ipsec_cmn_ctx_types_str[ctx_msg->type], + nss_ipsec_cmn_log_ctx_str[1], ctx_msg->except_ifnum); +} + +/* + * nss_ipsec_cmn_log_sa_msg() + * Log NSS IPSEC SA message. + */ +static void nss_ipsec_cmn_log_sa_msg(struct nss_ipsec_cmn_msg *nim) +{ + struct nss_ipsec_cmn_sa *sa_msg __maybe_unused = &nim->msg.sa; + struct nss_ipsec_cmn_sa_tuple *tuple = &sa_msg->sa_tuple; + struct nss_ipsec_cmn_sa_data *data __maybe_unused = &sa_msg->sa_data; + + nss_trace("%px: NSS IPSEC SA Message:\n", nim); + + if (tuple->ip_ver == 4) { + nss_trace("%s: %pI4\n%s: %pI4\n", + nss_ipsec_cmn_log_sa_tuple_str[0], tuple->dest_ip, + nss_ipsec_cmn_log_sa_tuple_str[1], tuple->src_ip); + } else { + nss_trace("%s: %pI6\n%s: %pI6\n", + nss_ipsec_cmn_log_sa_tuple_str[0], tuple->dest_ip, + nss_ipsec_cmn_log_sa_tuple_str[1], tuple->src_ip); + } + + nss_trace( "%s: %x\n%s: %d\n%s: %d\n%s: %d\n" + "%s: %d\n%s: %d\n%s: %d\n" + "%s: %d\n%s: %x\n%s: %d\n%s: %d\n" + "%s: %d\n%s: %d\n%s: %d\n%s: %d\n", + nss_ipsec_cmn_log_sa_tuple_str[2], tuple->spi_index, + nss_ipsec_cmn_log_sa_tuple_str[3], tuple->dest_port, + nss_ipsec_cmn_log_sa_tuple_str[4], tuple->src_port, + nss_ipsec_cmn_log_sa_tuple_str[5], tuple->crypto_index, + nss_ipsec_cmn_log_sa_tuple_str[6], tuple->protocol, + nss_ipsec_cmn_log_sa_tuple_str[7], tuple->ip_ver, + nss_ipsec_cmn_log_sa_tuple_str[8], tuple->hop_limit, + + nss_ipsec_cmn_log_sa_data_str[0], data->seq_start, + nss_ipsec_cmn_log_sa_data_str[1], data->flags, + nss_ipsec_cmn_log_sa_data_str[2], data->window_size, + nss_ipsec_cmn_log_sa_data_str[3], data->dscp, + nss_ipsec_cmn_log_sa_data_str[4], data->df, + nss_ipsec_cmn_log_sa_data_str[5], data->blk_len, + nss_ipsec_cmn_log_sa_data_str[6], data->iv_len, + nss_ipsec_cmn_log_sa_data_str[7], data->icv_len); + +} + +/* + * nss_ipsec_cmn_log_flow_msg() + * Log NSS IPSEC Flow message. + */ +static void nss_ipsec_cmn_log_flow_msg(struct nss_ipsec_cmn_msg *nim) +{ + struct nss_ipsec_cmn_flow *flow_msg __maybe_unused = &nim->msg.flow; + struct nss_ipsec_cmn_flow_tuple *flow = &flow_msg->flow_tuple; + struct nss_ipsec_cmn_sa_tuple *sa = &flow_msg->sa_tuple; + + nss_trace("%px: NSS IPSEC Flow Message:\n", nim); + + if (sa->ip_ver == 4) { + nss_trace("%s: %pI4\n%s: %pI4\n", + nss_ipsec_cmn_log_sa_tuple_str[0], sa->dest_ip, + nss_ipsec_cmn_log_sa_tuple_str[1], sa->src_ip); + } else { + nss_trace("%s: %pI6\n%s: %pI6\n", + nss_ipsec_cmn_log_sa_tuple_str[0], sa->dest_ip, + nss_ipsec_cmn_log_sa_tuple_str[1], sa->src_ip); + } + + if (flow->ip_ver == 4) { + nss_trace("%s: %pI4\n%s: %pI4\n", + nss_ipsec_cmn_log_sa_tuple_str[0], flow->dest_ip, + nss_ipsec_cmn_log_sa_tuple_str[1], flow->src_ip); + } else { + nss_trace("%s: %pI6\n%s: %pI6\n", + nss_ipsec_cmn_log_sa_tuple_str[0], flow->dest_ip, + nss_ipsec_cmn_log_sa_tuple_str[1], flow->src_ip); + } + + nss_trace( "%s: %x\n%s: %d\n%s: %d\n%s: %d\n" + "%s: %d\n", + nss_ipsec_cmn_log_flow_tuple_str[2], flow->spi_index, + nss_ipsec_cmn_log_flow_tuple_str[3], flow->dst_port, + nss_ipsec_cmn_log_flow_tuple_str[4], flow->src_port, + nss_ipsec_cmn_log_flow_tuple_str[5], flow->user_pattern, + nss_ipsec_cmn_log_flow_tuple_str[6], flow->protocol); + + nss_trace( "%s: %x\n%s: %d\n%s: %d\n%s: %d\n" + "%s: %d\n%s: %d\n%s: %d\n", + nss_ipsec_cmn_log_sa_tuple_str[2], sa->spi_index, + nss_ipsec_cmn_log_sa_tuple_str[3], sa->dest_port, + nss_ipsec_cmn_log_sa_tuple_str[4], sa->src_port, + nss_ipsec_cmn_log_sa_tuple_str[5], sa->crypto_index, + nss_ipsec_cmn_log_sa_tuple_str[6], sa->protocol, + nss_ipsec_cmn_log_sa_tuple_str[7], sa->ip_ver, + nss_ipsec_cmn_log_sa_tuple_str[8], sa->hop_limit); +} + +/* + * nss_ipsec_cmn_log_verbose() + * Log message contents. + */ +static void nss_ipsec_cmn_log_verbose(struct nss_ipsec_cmn_msg *nim) +{ + switch (nim->cm.type) { + case NSS_IPSEC_CMN_MSG_TYPE_NODE_CONFIG: + nss_ipsec_cmn_log_node_msg(nim); + break; + + case NSS_IPSEC_CMN_MSG_TYPE_CTX_CONFIG: + nss_ipsec_cmn_log_ctx_msg(nim); + break; + + case NSS_IPSEC_CMN_MSG_TYPE_SA_CREATE: + case NSS_IPSEC_CMN_MSG_TYPE_SA_DESTROY: + nss_ipsec_cmn_log_sa_msg(nim); + break; + + case NSS_IPSEC_CMN_MSG_TYPE_FLOW_CREATE: + case NSS_IPSEC_CMN_MSG_TYPE_FLOW_DESTROY: + nss_ipsec_cmn_log_flow_msg(nim); + break; + + case NSS_IPSEC_CMN_MSG_TYPE_CTX_SYNC: + case NSS_IPSEC_CMN_MSG_TYPE_SA_SYNC: + /* + * No log for these valid messages. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", nim); + break; + } +} + +/* + * nss_ipsec_cmn_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_ipsec_cmn_log_tx_msg(struct nss_ipsec_cmn_msg *nim) +{ + if (nim->cm.type >= NSS_IPSEC_CMN_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", nim); + return; + } + + nss_info("%px: type[%d]:%s\n", nim, nim->cm.type, nss_ipsec_cmn_log_msg_types_str[nim->cm.type]); + nss_ipsec_cmn_log_verbose(nim); +} + +/* + * nss_ipsec_cmn_log_rx_msg() + * Log messages received from FW. + */ +void nss_ipsec_cmn_log_rx_msg(struct nss_ipsec_cmn_msg *nim) +{ + if (nim->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nim); + return; + } + + if (nim->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nim->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nim, nim->cm.type, + nss_ipsec_cmn_log_msg_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response]); + goto verbose; + } + + if (nim->cm.error >= NSS_IPSEC_CMN_MSG_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nim, nim->cm.type, nss_ipsec_cmn_log_msg_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response], + nim->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nim, nim->cm.type, nss_ipsec_cmn_log_msg_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response], + nim->cm.error, nss_ipsec_cmn_log_error_str[nim->cm.error]); + +verbose: + nss_ipsec_cmn_log_verbose(nim); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_log.h new file mode 100644 index 000000000..d99c8be4c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_IPSEC_CMN_LOG_H__ +#define __NSS_IPSEC_CMN_LOG_H__ + +/* + * nss_ipsec_cmn_log.h + * NSS Crypto Log Header File + */ + +/* + * nss_ipsec_cmn_log_tx_msg + * Logs a ipsec message that is sent to the NSS firmware. + */ +void nss_ipsec_cmn_log_tx_msg(struct nss_ipsec_cmn_msg *nim); + +/* + * nss_ipsec_cmn_log_rx_msg + * Logs a ipsec message that is received from the NSS firmware. + */ +void nss_ipsec_cmn_log_rx_msg(struct nss_ipsec_cmn_msg *nim); + +#endif /* __NSS_IPSEC_CMN_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_stats.c new file mode 100644 index 000000000..192f2291a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_stats.c @@ -0,0 +1,219 @@ +/* + *************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_core.h" +#include "nss_ipsec_cmn.h" +#include "nss_ipsec_cmn_stats.h" +#include "nss_ipsec_cmn_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_ipsec_cmn_stats_notifier); + +/* + * Spinlock to protect IPsec common statistics update/read + */ +DEFINE_SPINLOCK(nss_ipsec_cmn_stats_lock); + +unsigned long *nss_ipsec_cmn_ifmap_get(void); +const char *nss_ipsec_cmn_stats_iface_type(enum nss_dynamic_interface_type type); + +/* + * nss_ipsec_cmn_stats + * ipsec common statistics + */ +uint64_t nss_ipsec_cmn_stats[NSS_MAX_NET_INTERFACES][NSS_IPSEC_CMN_STATS_MAX]; + +/* + * nss_ipsec_cmn_stats_read() + * Read ipsec_cmn node statistics. + */ +static ssize_t nss_ipsec_cmn_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats + + * few blank lines for banner printing + Number of Extra outputlines + * for future reference to add new stats + */ + uint32_t max_output_lines = NSS_IPSEC_CMN_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + struct nss_ctx_instance *nss_ctx = nss_ipsec_cmn_get_context(); + enum nss_dynamic_interface_type type; + unsigned long *ifmap; + uint64_t *stats_shadow; + ssize_t bytes_read = 0; + size_t size_wr = 0; + uint32_t if_num; + int32_t i; + int count; + char *lbuf; + + ifmap = nss_ipsec_cmn_ifmap_get(); + count = bitmap_weight(ifmap, NSS_MAX_NET_INTERFACES); + if (count) { + size_al = size_al * count; + } + + lbuf = vzalloc(size_al); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return -ENOMEM; + } + + stats_shadow = vzalloc(NSS_IPSEC_CMN_STATS_MAX * 8); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + vfree(lbuf); + return -ENOMEM; + } + + /* + * Common node stats for each IPSEC dynamic interface. + */ + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "ipsec_cmn stats", NSS_STATS_SINGLE_CORE); + for_each_set_bit(if_num, ifmap, NSS_MAX_NET_INTERFACES) { + + type = nss_dynamic_interface_get_type(nss_ctx, if_num); + if ((type < NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_INNER) || + (type > NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_REDIRECT)) { + continue; + } + + spin_lock_bh(&nss_ipsec_cmn_stats_lock); + for (i = 0; i < NSS_IPSEC_CMN_STATS_MAX; i++) { + stats_shadow[i] = nss_ipsec_cmn_stats[if_num][i]; + } + spin_unlock_bh(&nss_ipsec_cmn_stats_lock); + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n%s if_num:%03u\n", + nss_ipsec_cmn_stats_iface_type(type), if_num); + size_wr += nss_stats_print("ipsec_cmn", NULL, NSS_STATS_SINGLE_INSTANCE, nss_ipsec_cmn_strings_stats, + stats_shadow, NSS_IPSEC_CMN_STATS_MAX, lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + vfree(lbuf); + vfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_ipsec_cmn_stats_ops. + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ipsec_cmn); + +/* + * nss_ipsec_cmn_stats_sync() + * Update ipsec_cmn node statistics. + */ +void nss_ipsec_cmn_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm) +{ + struct nss_ipsec_cmn_msg *nicm = (struct nss_ipsec_cmn_msg *)ncm; + struct nss_ipsec_cmn_ctx_stats *ndccs = &nicm->msg.ctx_sync.stats; + uint64_t *ctx_stats; + uint32_t *msg_stats; + uint16_t i = 0; + + spin_lock_bh(&nss_ipsec_cmn_stats_lock); + + msg_stats = (uint32_t *)ndccs; + ctx_stats = nss_ipsec_cmn_stats[ncm->interface]; + + for (i = 0; i < NSS_IPSEC_CMN_STATS_MAX; i++, ctx_stats++, msg_stats++) { + *ctx_stats += *msg_stats; + } + + spin_unlock_bh(&nss_ipsec_cmn_stats_lock); +} + +/* + * nss_ipsec_cmn_stats_iface_type() + * Return a string for each interface type. + */ +const char *nss_ipsec_cmn_stats_iface_type(enum nss_dynamic_interface_type type) +{ + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_INNER: + return "ipsec_cmn_inner"; + + case NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_MDATA_INNER: + return "ipsec_cmn_mdata_inner"; + + case NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_OUTER: + return "ipsec_cmn_outer"; + + case NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_MDATA_OUTER: + return "ipsec_cmn_mdata_outer"; + + case NSS_DYNAMIC_INTERFACE_TYPE_IPSEC_CMN_REDIRECT: + return "ipsec_cmn_redirect"; + + default: + return "invalid_interface"; + } +} + +/* + * nss_ipsec_cmn_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_ipsec_cmn_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_ipsec_cmn_stats_notification ipsec_cmn_stats; + + spin_lock_bh(&nss_ipsec_cmn_stats_lock); + ipsec_cmn_stats.core_id = nss_ctx->id; + ipsec_cmn_stats.if_num = if_num; + memcpy(ipsec_cmn_stats.stats_ctx, nss_ipsec_cmn_stats[if_num], sizeof(ipsec_cmn_stats.stats_ctx)); + spin_unlock_bh(&nss_ipsec_cmn_stats_lock); + + atomic_notifier_call_chain(&nss_ipsec_cmn_stats_notifier, NSS_STATS_EVENT_NOTIFY, &ipsec_cmn_stats); +} + +/* + * nss_ipsec_cmn_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_ipsec_cmn_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_ipsec_cmn_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ipsec_cmn_stats_unregister_notifier); + +/* + * nss_ipsec_cmn_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_ipsec_cmn_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_ipsec_cmn_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ipsec_cmn_stats_register_notifier); + +/* + * nss_ipsec_cmn_stats_dentry_create() + * Create ipsec common statistics debug entry. + */ +void nss_ipsec_cmn_stats_dentry_create(void) +{ + nss_stats_create_dentry("ipsec_cmn", &nss_ipsec_cmn_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_stats.h new file mode 100644 index 000000000..511056802 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_stats.h @@ -0,0 +1,28 @@ +/* + *************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#ifndef __NSS_IPSEC_CMN_STATS_H +#define __NSS_IPSEC_CMN_STATS_H + +#include + +extern void nss_ipsec_cmn_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_ipsec_cmn_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm); +extern void nss_ipsec_cmn_stats_dentry_create(void); + +#endif /* __NSS_IPSEC_CMN_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_strings.c new file mode 100644 index 000000000..bf2cff6c5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_strings.c @@ -0,0 +1,82 @@ +/* + *************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_ipsec_cmn_strings.h" + +/* + * nss_ipsec_cmn_strings_stats + * ipsec common statistics strings. + */ +struct nss_stats_info nss_ipsec_cmn_strings_stats[NSS_IPSEC_CMN_STATS_MAX] = { + {"rx_pkts", NSS_STATS_TYPE_COMMON}, + {"rx_byts", NSS_STATS_TYPE_COMMON}, + {"tx_pkts", NSS_STATS_TYPE_COMMON}, + {"tx_byts", NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops", NSS_STATS_TYPE_DROP}, + {"fail_headroom", NSS_STATS_TYPE_DROP}, + {"fail_tailroom", NSS_STATS_TYPE_DROP}, + {"fail_replay", NSS_STATS_TYPE_DROP}, + {"fail_replay_dup", NSS_STATS_TYPE_DROP}, + {"fail_replay_win", NSS_STATS_TYPE_DROP}, + {"fail_pbuf_crypto", NSS_STATS_TYPE_DROP}, + {"fail_queue", NSS_STATS_TYPE_DROP}, + {"fail_queue_crypto", NSS_STATS_TYPE_DROP}, + {"fail_queue_nexthop", NSS_STATS_TYPE_DROP}, + {"fail_pbuf_alloc", NSS_STATS_TYPE_DROP}, + {"fail_pbuf_linear", NSS_STATS_TYPE_DROP}, + {"fail_pbuf_stats", NSS_STATS_TYPE_DROP}, + {"fail_pbuf_align", NSS_STATS_TYPE_DROP}, + {"fail_cipher", NSS_STATS_TYPE_EXCEPTION}, + {"fail_auth", NSS_STATS_TYPE_EXCEPTION}, + {"fail_seq_ovf", NSS_STATS_TYPE_DROP}, + {"fail_blk_len", NSS_STATS_TYPE_DROP}, + {"fail_hash_len", NSS_STATS_TYPE_DROP}, + {"fail_transform", NSS_STATS_TYPE_DROP}, + {"fail_crypto", NSS_STATS_TYPE_DROP}, + {"fail_cle", NSS_STATS_TYPE_DROP}, + {"is_stopped", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_ipsec_cmn_strings_read() + * Read ipsec common statistics names + */ +static ssize_t nss_ipsec_cmn_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ipsec_cmn_strings_stats, NSS_IPSEC_CMN_STATS_MAX); +} + +/* + * nss_ipsec_cmn_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ipsec_cmn); + +/* + * nss_ipsec_cmn_strings_dentry_create() + * Create ipsec common statistics strings debug entry. + */ +void nss_ipsec_cmn_strings_dentry_create(void) +{ + nss_strings_create_dentry("ipsec_cmn", &nss_ipsec_cmn_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_strings.h new file mode 100644 index 000000000..c22f4c0f1 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_cmn_strings.h @@ -0,0 +1,27 @@ +/* + *************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#ifndef __NSS_IPSEC_CMN_STRINGS_H +#define __NSS_IPSEC_CMN_STRINGS_H + +#include "nss_ipsec_cmn_stats.h" + +extern struct nss_stats_info nss_ipsec_cmn_strings_stats[NSS_IPSEC_CMN_STATS_MAX]; +extern void nss_ipsec_cmn_strings_dentry_create(void); + +#endif /* __NSS_IPSEC_CMN_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_log.c new file mode 100644 index 000000000..2f1570efb --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_log.c @@ -0,0 +1,205 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ipsec_log.c + * NSS IPSEC logger file. + */ + +#include "nss_core.h" + +#define NSS_IPSEC_LOG_IPV4 4 +#define NSS_IPSEC_LOG_IPV6 6 + +/* + * nss_ipsec_log_message_types_str + * IPSEC message strings + */ +static int8_t *nss_ipsec_log_message_types_str[NSS_IPSEC_MSG_TYPE_MAX] __maybe_unused = { + "IPSEC Msg None", + "IPSEC ADD Rule", + "IPSEC DEL Rule", + "IPSEC Flush Tunnel", + "IPSEC SA Stats", + "IPSEC Flow Stats", + "IPSEC Node Stats", + "IPSEC Configure Node", +}; + +/* + * nss_ipsec_log_error_response_types_str + * Strings for error types for IPSEC messages + */ +static int8_t *nss_ipsec_log_error_response_types_str[NSS_IPSEC_ERROR_TYPE_MAX] __maybe_unused = { + "IPSEC No Error", + "IPSEC Hash Duplicate", + "IPSEC Hash Collision", + "IPSEC Unhandled Message", + "IPSEC Invalid Rule", + "IPSEC MAX SA", + "IPSEC MAX Flow", + "IPSEC Invalid CINDEX", + "IPSEC Invalid IP Version", +}; + +/* + * nss_ipsec_log_rule_msg() + * Log NSS IPSEC rule message. + */ +static void nss_ipsec_log_rule_msg(struct nss_ipsec_msg *nim) +{ + struct nss_ipsec_rule *nir __maybe_unused = &nim->msg.rule; + + nss_trace("%px: NSS IPSEC Rule Message:\n" + "IPSEC ESP SPI Index: %dn" + "IPSEC TTL Hop Limit: %dn" + "IPSEC IP Version: %x\n" + "IPSEC Crypto Index: %d\n" + "IPSEC Window Size: %d\n" + "IPSEC Cipher Block Len: %d\n" + "IPSEC Initialization Vector Length: %d\n" + "IPSEC NAT-T Required: %d\n" + "IPSEC ICV Length: %d\n" + "IPSEC Skip Seq Number: %d\n" + "IPSEC Skip ESP Trailer: %d\n" + "IPSEC Use Pattern: %d\n" + "IPSEC Enable Extended Sequence Number: %d\n" + "IPSEC DSCP Value: %d\n" + "IPSEC Don't Fragment Flag: %d\n" + "IPSEC DSCP Copy %d\n" + "IPSEC DF Copy: %d\n" + "IPSEC NSS Index: %d\n" + "IPSEC SA Index: %d\n", + nir, nir->oip.esp_spi, + nir->oip.ttl_hop_limit, nir->oip.ip_ver, + nir->data.crypto_index, nir->data.window_size, + nir->data.cipher_blk_len, nir->data.iv_len, + nir->data.nat_t_req, nir->data.esp_icv_len, + nir->data.esp_seq_skip, nir->data.esp_tail_skip, + nir->data.use_pattern, nir->data.enable_esn, + nir->data.dscp, nir->data.df, + nir->data.copy_dscp, nir->data.copy_df, + nir->index, nir->sa_idx); + + /* + * Continuation of previous log. Different identifiers based on ip_ver + */ + if (nir->oip.ip_ver == NSS_IPSEC_LOG_IPV6) { + nss_trace("IPSEC Destination Address: %pI6\n" + "IPSEC Source Address: %pI6\n", + nir->oip.dst_addr, nir->oip.src_addr); + } else if (nir->oip.ip_ver == NSS_IPSEC_LOG_IPV4) { + nss_trace("IPSEC Destination Address: %pI4\n" + "IPSEC Source Address: %pI4\n", + nir->oip.dst_addr, nir->oip.src_addr); + } +} + +/* + * nss_ipsec_log_configure_node_msg() + * Log NSS IPSEC configure node message. + */ +static void nss_ipsec_log_configure_node_msg(struct nss_ipsec_msg *nim) +{ + struct nss_ipsec_configure_node *nicn __maybe_unused = &nim->msg.node; + nss_trace("%px: NSS IPSEC Configure Node\n" + "IPSEC DMA Redirect: %d\n" + "IPSEC DMA Lookaside: %d\n", + nicn, nicn->dma_redirect, + nicn->dma_lookaside); +} + +/* + * nss_ipsec_log_verbose() + * Log message contents. + */ +static void nss_ipsec_log_verbose(struct nss_ipsec_msg *nim) +{ + switch (nim->cm.type) { + case NSS_IPSEC_MSG_TYPE_ADD_RULE: + case NSS_IPSEC_MSG_TYPE_DEL_RULE: + nss_ipsec_log_rule_msg(nim); + break; + + case NSS_IPSEC_MSG_TYPE_CONFIGURE_NODE: + nss_ipsec_log_configure_node_msg(nim); + break; + + case NSS_IPSEC_MSG_TYPE_NONE: + case NSS_IPSEC_MSG_TYPE_FLUSH_TUN: + case NSS_IPSEC_MSG_TYPE_SYNC_SA_STATS: + case NSS_IPSEC_MSG_TYPE_SYNC_FLOW_STATS: + case NSS_IPSEC_MSG_TYPE_SYNC_NODE_STATS: + /* + * No log for these valid messages. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", nim); + break; + } +} + +/* + * nss_ipsec_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_ipsec_log_tx_msg(struct nss_ipsec_msg *nim) +{ + if (nim->cm.type >= NSS_IPSEC_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", nim); + return; + } + + nss_info("%px: type[%d]:%s\n", nim, nim->cm.type, nss_ipsec_log_message_types_str[nim->cm.type]); + nss_ipsec_log_verbose(nim); +} + +/* + * nss_ipsec_log_rx_msg() + * Log messages received from FW. + */ +void nss_ipsec_log_rx_msg(struct nss_ipsec_msg *nim) +{ + if (nim->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nim); + return; + } + + if (nim->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nim->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nim, nim->cm.type, + nss_ipsec_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response]); + goto verbose; + } + + if (nim->cm.error >= NSS_IPSEC_ERROR_TYPE_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nim, nim->cm.type, nss_ipsec_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response], + nim->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nim, nim->cm.type, nss_ipsec_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response], + nim->cm.error, nss_ipsec_log_error_response_types_str[nim->cm.error]); + +verbose: + nss_ipsec_log_verbose(nim); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_log.h new file mode 100644 index 000000000..5342c208f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipsec_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_IPSEC_LOG_H__ +#define __NSS_IPSEC_LOG_H__ + +/* + * nss_ipsec_log.h + * NSS Crypto Log Header File + */ + +/* + * nss_ipsec_log_tx_msg + * Logs a ipsec message that is sent to the NSS firmware. + */ +void nss_ipsec_log_tx_msg(struct nss_ipsec_msg *nim); + +/* + * nss_ipsec_log_rx_msg + * Logs a ipsec message that is received from the NSS firmware. + */ +void nss_ipsec_log_rx_msg(struct nss_ipsec_msg *nim); + +#endif /* __NSS_IPSEC_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4.c new file mode 100644 index 000000000..e1e045206 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4.c @@ -0,0 +1,782 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ipv4.c + * NSS IPv4 APIs + */ +#include +#include "nss_dscp_map.h" +#include "nss_ipv4_stats.h" +#include "nss_ipv4_strings.h" + +#define NSS_IPV4_TX_MSG_TIMEOUT 1000 /* 1 sec timeout for IPv4 messages */ + +/* + * Private data structure for ipv4 configuration + */ +struct nss_ipv4_pvt { + struct semaphore sem; /* Semaphore structure */ + struct completion complete; /* completion structure */ + int response; /* Response from FW */ + void *cb; /* Original cb for sync msgs */ + void *app_data; /* Original app_data for sync msgs */ +} nss_ipv4_pvt; + +/* + * Private data structure for ipv4 connection information. + */ +struct nss_ipv4_conn_table_info { + uint32_t ce_table_size; /* Size of connection table entry in NSS FW */ + uint32_t cme_table_size; /* Size of connection match table entry in NSS FW */ + unsigned long ce_mem; /* Start address for connection entry table */ + unsigned long cme_mem; /* Start address for connection match entry table */ +} nss_ipv4_ct_info; + +int nss_ipv4_conn_cfg = NSS_DEFAULT_NUM_CONN; +int nss_ipv4_accel_mode_cfg __read_mostly = 1; + +static struct nss_dscp_map_entry mapping[NSS_DSCP_MAP_ARRAY_SIZE]; + +/* + * Callback for conn_sync_many request message. + */ +nss_ipv4_msg_callback_t nss_ipv4_conn_sync_many_msg_cb = NULL; + +/* + * nss_ipv4_dscp_map_usage() + * Help function shows the usage of the command. + */ +static inline void nss_ipv4_dscp_map_usage(void) +{ + nss_info_always("\nUsage:\n"); + nss_info_always("echo > /proc/sys/dev/nss/ipv4cfg/ipv4_dscp_map\n\n"); + nss_info_always("dscp[0-63] action[0-%u] prio[0-%u]:\n\n", + NSS_IPV4_DSCP_MAP_ACTION_MAX - 1, + NSS_DSCP_MAP_PRIORITY_MAX - 1); +} + +/* + * nss_ipv4_get_total_conn_count() + * Returns the sum of IPv4 and IPv6 connections. + */ +static uint32_t nss_ipv4_get_total_conn_count(int ipv4_num_conn) +{ + +#ifdef NSS_DRV_IPV6_ENABLE + return ipv4_num_conn + nss_ipv6_conn_cfg; +#else + return ipv4_num_conn; +#endif +} + +/* + * nss_ipv4_rx_msg_handler() + * Handle NSS -> HLOS messages for IPv4 bridge/route + */ +static void nss_ipv4_rx_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_ipv4_msg *nim = (struct nss_ipv4_msg *)ncm; + nss_ipv4_msg_callback_t cb; + + BUG_ON(ncm->interface != NSS_IPV4_RX_INTERFACE); + + /* + * Sanity check the message type + */ + if (ncm->type >= NSS_IPV4_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_ipv4_msg)) { + nss_warning("%px: message length is invalid: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Trace messages. + */ + nss_ipv4_log_rx_msg(nim); + + switch (nim->cm.type) { + case NSS_IPV4_RX_NODE_STATS_SYNC_MSG: + /* + * Update driver statistics on node sync and send statistics notifications to the registered modules. + */ + nss_ipv4_stats_node_sync(nss_ctx, &nim->msg.node_stats); + nss_ipv4_stats_notify(nss_ctx); + break; + + case NSS_IPV4_RX_CONN_STATS_SYNC_MSG: + /* + * Update driver statistics on connection sync. + */ + nss_ipv4_stats_conn_sync(nss_ctx, &nim->msg.conn_stats); + break; + + case NSS_IPV4_TX_CONN_STATS_SYNC_MANY_MSG: + /* + * Update driver statistics on connection sync many. + */ + nss_ipv4_stats_conn_sync_many(nss_ctx, &nim->msg.conn_stats_many); + ncm->cb = (nss_ptr_t)nss_ipv4_conn_sync_many_msg_cb; + break; + } + + /* + * Update the callback and app_data for NOTIFY messages, IPv4 sends all notify messages + * to the same callback/app_data. + */ + if (nim->cm.response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->ipv4_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->nss_top->ipv4_ctx; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * Callback + */ + cb = (nss_ipv4_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, nim); +} + +/* + * nss_ipv4_tx_sync_callback() + * Callback to handle the completion of synchronous tx messages. + */ +static void nss_ipv4_tx_sync_callback(void *app_data, struct nss_ipv4_msg *nim) +{ + nss_ipv4_msg_callback_t callback = (nss_ipv4_msg_callback_t)nss_ipv4_pvt.cb; + void *data = nss_ipv4_pvt.app_data; + + nss_ipv4_pvt.cb = NULL; + nss_ipv4_pvt.app_data = NULL; + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("ipv4 error response %d\n", nim->cm.response); + nss_ipv4_pvt.response = NSS_TX_FAILURE; + } else { + nss_ipv4_pvt.response = NSS_TX_SUCCESS; + } + + if (callback) { + callback(data, nim); + } + + complete(&nss_ipv4_pvt.complete); +} + +/* + * nss_ipv4_dscp_action_get() + * Gets the action mapped to dscp. + */ +enum nss_ipv4_dscp_map_actions nss_ipv4_dscp_action_get(uint8_t dscp) +{ + if (dscp >= NSS_DSCP_MAP_ARRAY_SIZE) { + nss_warning("dscp:%u invalid\n", dscp); + return NSS_IPV4_DSCP_MAP_ACTION_MAX; + } + + return mapping[dscp].action; +} +EXPORT_SYMBOL(nss_ipv4_dscp_action_get); + +/* + * nss_ipv4_max_conn_count() + * Return the maximum number of IPv4 connections that the NSS acceleration engine supports. + */ +int nss_ipv4_max_conn_count(void) +{ + return nss_ipv4_conn_cfg; +} +EXPORT_SYMBOL(nss_ipv4_max_conn_count); + +/* + * nss_ipv4_conn_inquiry() + * Inquiry if a connection has been established in NSS FW + */ +nss_tx_status_t nss_ipv4_conn_inquiry(struct nss_ipv4_5tuple *ipv4_5t_p, + nss_ipv4_msg_callback_t cb) +{ + nss_tx_status_t nss_tx_status; + struct nss_ipv4_msg nim; + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[0]; + + /* + * Initialize inquiry message structure. + * This is async message and the result will be returned + * to the caller by the msg_callback passed in. + */ + memset(&nim, 0, sizeof(nim)); + nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, + NSS_IPV4_TX_CONN_CFG_INQUIRY_MSG, + sizeof(struct nss_ipv4_inquiry_msg), + cb, NULL); + nim.msg.inquiry.rr.tuple = *ipv4_5t_p; + nss_tx_status = nss_ipv4_tx(nss_ctx, &nim); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: Send inquiry message failed\n", ipv4_5t_p); + } + + return nss_tx_status; +} +EXPORT_SYMBOL(nss_ipv4_conn_inquiry); + +/* + * nss_ipv4_tx_with_size() + * Transmit an ipv4 message to the FW with a specified size. + */ +nss_tx_status_t nss_ipv4_tx_with_size(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_msg *nim, uint32_t size) +{ + struct nss_cmn_msg *ncm = &nim->cm; + + /* + * Sanity check the message + */ + if (ncm->interface != NSS_IPV4_RX_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_IPV4_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Trace messages. + */ + nss_ipv4_log_tx_msg(nim); + + return nss_core_send_cmd(nss_ctx, nim, sizeof(*nim), size); +} +EXPORT_SYMBOL(nss_ipv4_tx_with_size); + +/* + * nss_ipv4_tx() + * Transmit an ipv4 message to the FW. + */ +nss_tx_status_t nss_ipv4_tx(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_msg *nim) +{ + return nss_ipv4_tx_with_size(nss_ctx, nim, NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_ipv4_tx); + +/* + * nss_ipv4_tx_sync() + * Transmit a synchronous ipv4 message to the FW. + */ +nss_tx_status_t nss_ipv4_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_msg *nim) +{ + nss_tx_status_t status; + int ret = 0; + + down(&nss_ipv4_pvt.sem); + nss_ipv4_pvt.cb = (void *)nim->cm.cb; + nss_ipv4_pvt.app_data = (void *)nim->cm.app_data; + + nim->cm.cb = (nss_ptr_t)nss_ipv4_tx_sync_callback; + nim->cm.app_data = (nss_ptr_t)NULL; + + status = nss_ipv4_tx(nss_ctx, nim); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: nss ipv4 msg tx failed\n", nss_ctx); + up(&nss_ipv4_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&nss_ipv4_pvt.complete, msecs_to_jiffies(NSS_IPV4_TX_MSG_TIMEOUT)); + if (!ret) { + nss_warning("%px: IPv4 tx sync failed due to timeout\n", nss_ctx); + nss_ipv4_pvt.response = NSS_TX_FAILURE; + } + + status = nss_ipv4_pvt.response; + up(&nss_ipv4_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_ipv4_tx_sync); + +/* + ********************************** + Register/Unregister/Miscellaneous APIs + ********************************** + */ + +/* + * nss_ipv4_notify_register() + * Register to received IPv4 events. + * + * NOTE: Do we want to pass an nss_ctx here so that we can register for ipv4 on any core? + */ +struct nss_ctx_instance *nss_ipv4_notify_register(nss_ipv4_msg_callback_t cb, void *app_data) +{ + /* + * TODO: We need to have a new array in support of the new API + * TODO: If we use a per-context array, we would move the array into nss_ctx based. + */ + nss_top_main.ipv4_callback = cb; + nss_top_main.ipv4_ctx = app_data; + return &nss_top_main.nss[nss_top_main.ipv4_handler_id]; +} +EXPORT_SYMBOL(nss_ipv4_notify_register); + +/* + * nss_ipv4_notify_unregister() + * Unregister to received IPv4 events. + * + * NOTE: Do we want to pass an nss_ctx here so that we can register for ipv4 on any core? + */ +void nss_ipv4_notify_unregister(void) +{ + nss_top_main.ipv4_callback = NULL; +} +EXPORT_SYMBOL(nss_ipv4_notify_unregister); + +/* + * nss_ipv4_conn_sync_many_notify_register() + * Register to receive IPv4 conn_sync_many message response. + */ +void nss_ipv4_conn_sync_many_notify_register(nss_ipv4_msg_callback_t cb) +{ + nss_ipv4_conn_sync_many_msg_cb = cb; +} +EXPORT_SYMBOL(nss_ipv4_conn_sync_many_notify_register); + +/* + * nss_ipv4_conn_sync_many_notify_unregister() + * Unregister to receive IPv4 conn_sync_many message response. + */ +void nss_ipv4_conn_sync_many_notify_unregister(void) +{ + nss_ipv4_conn_sync_many_msg_cb = NULL; +} +EXPORT_SYMBOL(nss_ipv4_conn_sync_many_notify_unregister); + +/* + * nss_ipv4_get_mgr() + * + * TODO: This only suppports a single ipv4, do we ever want to support more? + */ +struct nss_ctx_instance *nss_ipv4_get_mgr(void) +{ + return (void *)&nss_top_main.nss[nss_top_main.ipv4_handler_id]; +} +EXPORT_SYMBOL(nss_ipv4_get_mgr); + +/* + * nss_ipv4_register_handler() + * Register our handler to receive messages for this interface + */ +void nss_ipv4_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_ipv4_get_mgr(); + + if (nss_core_register_handler(nss_ctx, NSS_IPV4_RX_INTERFACE, nss_ipv4_rx_msg_handler, NULL) != NSS_CORE_STATUS_SUCCESS) { + nss_warning("IPv4 handler failed to register"); + } + + nss_ipv4_stats_dentry_create(); + nss_ipv4_strings_dentry_create(); +} + +/* + * nss_ipv4_conn_cfg_process_callback() + * Call back function for the ipv4 connection configure process + */ +static void nss_ipv4_conn_cfg_process_callback(void *app_data, struct nss_ipv4_msg *nim) +{ + struct nss_ipv4_rule_conn_cfg_msg *nirccm = &nim->msg.rule_conn_cfg; + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_ipv4_get_mgr(); + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: IPv4 connection configuration failed with error: %d\n", nss_ctx, nim->cm.error); + nss_core_update_max_ipv4_conn(NSS_FW_DEFAULT_NUM_CONN); + nss_ipv4_free_conn_tables(); + return; + } + + nss_ipv4_conn_cfg = ntohl(nirccm->num_conn); + nss_info("%px: IPv4 connection configuration success: %d\n", nss_ctx, nim->cm.error); +} + +/* + * nss_ipv4_conn_cfg_process() + * Process request to configure number of ipv4 connections + */ +static int nss_ipv4_conn_cfg_process(struct nss_ctx_instance *nss_ctx, int conn) +{ + struct nss_ipv4_msg nim; + struct nss_ipv4_rule_conn_cfg_msg *nirccm; + nss_tx_status_t nss_tx_status; + + if ((!nss_ipv4_ct_info.ce_table_size) || (!nss_ipv4_ct_info.cme_table_size)) { + nss_warning("%px: connection entry or connection match entry table size not available\n", + nss_ctx); + return -EINVAL; + } + + nss_info("%px: IPv4 supported connections: %d\n", nss_ctx, conn); + + nss_ipv4_ct_info.ce_mem = __get_free_pages(GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO, + get_order(nss_ipv4_ct_info.ce_table_size)); + if (!nss_ipv4_ct_info.ce_mem) { + nss_warning("%px: Memory allocation failed for IPv4 Connections: %d\n", + nss_ctx, + conn); + goto fail; + } + + nss_ipv4_ct_info.cme_mem = __get_free_pages(GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO, + get_order(nss_ipv4_ct_info.cme_table_size)); + if (!nss_ipv4_ct_info.ce_mem) { + nss_warning("%px: Memory allocation failed for IPv4 Connections: %d\n", + nss_ctx, + conn); + goto fail; + } + + memset(&nim, 0, sizeof(struct nss_ipv4_msg)); + nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_CONN_CFG_RULE_MSG, + sizeof(struct nss_ipv4_rule_conn_cfg_msg), nss_ipv4_conn_cfg_process_callback, NULL); + + nirccm = &nim.msg.rule_conn_cfg; + nirccm->num_conn = htonl(conn); + nirccm->ce_mem = dma_map_single(nss_ctx->dev, (void *)nss_ipv4_ct_info.ce_mem, nss_ipv4_ct_info.ce_table_size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(nss_ctx->dev, nirccm->ce_mem))) { + nss_warning("%px: DMA mapping failed for virtual address = %px", nss_ctx, (void *)nss_ipv4_ct_info.ce_mem); + goto fail; + } + + nirccm->cme_mem = dma_map_single(nss_ctx->dev, (void *)nss_ipv4_ct_info.cme_mem, nss_ipv4_ct_info.cme_table_size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(nss_ctx->dev, nirccm->cme_mem))) { + nss_warning("%px: DMA mapping failed for virtual address = %px", nss_ctx, (void *)nss_ipv4_ct_info.cme_mem); + goto fail; + } + + nss_tx_status = nss_ipv4_tx(nss_ctx, &nim); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_tx error setting IPv4 Connections: %d\n", + nss_ctx, + conn); + goto fail; + } + + return 0; + +fail: + nss_ipv4_free_conn_tables(); + return -EINVAL;; +} + +/* + * nss_ipv4_update_conn_count_callback() + * Callback function for the ipv4 get connection info message. + */ +static void nss_ipv4_update_conn_count_callback(void *app_data, struct nss_ipv4_msg *nim) +{ + struct nss_ipv4_rule_conn_get_table_size_msg *nircgts = &nim->msg.size; + struct nss_ctx_instance *nss_ctx = nss_ipv4_get_mgr(); + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: IPv4 fetch connection info failed with error: %d\n", nss_ctx, nim->cm.error); + nss_core_update_max_ipv4_conn(NSS_FW_DEFAULT_NUM_CONN); + return; + } + + nss_info("IPv4 get connection info success\n"); + + nss_ipv4_ct_info.ce_table_size = ntohl(nircgts->ce_table_size); + nss_ipv4_ct_info.cme_table_size = ntohl(nircgts->cme_table_size); + + if (nss_ipv4_conn_cfg_process(nss_ctx, ntohl(nircgts->num_conn)) != 0) { + nss_warning("%px: IPv4 connection entry or connection match entry table size\ + not available\n", nss_ctx); + } + + return; +} + +/* + * nss_ipv4_update_conn_count() + * Sets the maximum number of IPv4 connections. + * + * It first gets the connection tables size information from NSS FW + * and then configures the connections in NSS FW. + */ +int nss_ipv4_update_conn_count(int ipv4_num_conn) +{ + struct nss_ctx_instance *nss_ctx = nss_ipv4_get_mgr(); + struct nss_ipv4_msg nim; + struct nss_ipv4_rule_conn_get_table_size_msg *nircgts; + nss_tx_status_t nss_tx_status; + uint32_t sum_of_conn; + + /* + * By default, NSS FW is configured with default number of connections. + */ + if (ipv4_num_conn == NSS_FW_DEFAULT_NUM_CONN) { + nss_info("%px: Default number of connections (%d) already configured\n", nss_ctx, ipv4_num_conn); + return 0; + } + + /* + * The input should be multiple of 1024. + * Input for ipv4 and ipv6 sum together should not exceed 8k + * Min. value should be at least 256 connections. This is the + * minimum connections we will support for each of them. + */ + sum_of_conn = nss_ipv4_get_total_conn_count(ipv4_num_conn); + + if ((ipv4_num_conn & NSS_NUM_CONN_QUANTA_MASK) || + (sum_of_conn > NSS_MAX_TOTAL_NUM_CONN_IPV4_IPV6) || + (ipv4_num_conn < NSS_MIN_NUM_CONN)) { + nss_warning("%px: input supported connections (%d) does not adhere\ + specifications\n1) not multiple of 1024,\n2) is less than \ + min val: %d, OR\n IPv4/6 total exceeds %d\n", + nss_ctx, + ipv4_num_conn, + NSS_MIN_NUM_CONN, + NSS_MAX_TOTAL_NUM_CONN_IPV4_IPV6); + return -EINVAL; + } + + memset(&nim, 0, sizeof(struct nss_ipv4_msg)); + nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_CONN_TABLE_SIZE_MSG, + sizeof(struct nss_ipv4_rule_conn_get_table_size_msg), nss_ipv4_update_conn_count_callback, NULL); + + nircgts = &nim.msg.size; + nircgts->num_conn = htonl(ipv4_num_conn); + nss_tx_status = nss_ipv4_tx(nss_ctx, &nim); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: Send fetch connection info message failed\n", nss_ctx); + return -EINVAL; + } + + return 0; +} + +/* + * nss_ipv4_free_conn_tables() + * Frees memory allocated for connection tables + */ +void nss_ipv4_free_conn_tables(void) +{ + if (nss_ipv4_ct_info.ce_mem) { + free_pages(nss_ipv4_ct_info.ce_mem, get_order(nss_ipv4_ct_info.ce_table_size)); + } + + if (nss_ipv4_ct_info.cme_mem) { + free_pages(nss_ipv4_ct_info.cme_mem, get_order(nss_ipv4_ct_info.cme_table_size)); + } + + memset(&nss_ipv4_ct_info, 0, sizeof(struct nss_ipv4_conn_table_info)); + return; +} + +/* + * nss_ipv4_accel_mode_cfg_handler() + * Configure acceleration mode for IPv4 + */ +static int nss_ipv4_accel_mode_cfg_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[0]; + struct nss_ipv4_msg nim; + struct nss_ipv4_accel_mode_cfg_msg *nipcm; + nss_tx_status_t nss_tx_status; + int ret = NSS_FAILURE; + int current_value; + + /* + * Take snap shot of current value + */ + current_value = nss_ipv4_accel_mode_cfg; + + /* + * Write the variable with user input + */ + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret || (!write)) { + return ret; + } + + memset(&nim, 0, sizeof(struct nss_ipv4_msg)); + nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_ACCEL_MODE_CFG_MSG, + sizeof(struct nss_ipv4_accel_mode_cfg_msg), NULL, NULL); + + nipcm = &nim.msg.accel_mode_cfg; + nipcm->mode = htonl(nss_ipv4_accel_mode_cfg); + + nss_tx_status = nss_ipv4_tx_sync(nss_ctx, &nim); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: Send acceleration mode message failed\n", nss_ctx); + nss_ipv4_accel_mode_cfg = current_value; + return -EIO; + } + + return 0; +} + +/* + * nss_ipv4_dscp_map_cfg_handler() + * Sysctl handler for dscp/pri mappings. + */ +static int nss_ipv4_dscp_map_cfg_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[0]; + struct nss_dscp_map_parse out; + struct nss_ipv4_msg nim; + struct nss_ipv4_dscp2pri_cfg_msg *nipd2p; + nss_tx_status_t status; + int ret; + + if (!write) { + return nss_dscp_map_print(ctl, buffer, lenp, ppos, mapping); + } + + ret = nss_dscp_map_parse(ctl, buffer, lenp, ppos, &out); + if (ret) { + nss_warning("failed to parse dscp mapping:%d\n", ret); + nss_ipv4_dscp_map_usage(); + return ret; + } + + if (out.action >= NSS_IPV4_DSCP_MAP_ACTION_MAX) { + nss_warning("invalid action value: %d\n", out.action); + nss_ipv4_dscp_map_usage(); + return -EINVAL; + } + + memset(&nim, 0, sizeof(struct nss_ipv4_msg)); + nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_DSCP2PRI_CFG_MSG, + sizeof(struct nss_ipv4_dscp2pri_cfg_msg), NULL, NULL); + + nipd2p = &nim.msg.dscp2pri_cfg; + nipd2p->dscp = out.dscp; + nipd2p->priority = out.priority; + + status = nss_ipv4_tx_sync(nss_ctx, &nim); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: ipv4 dscp2pri config message failed\n", nss_ctx); + return -EFAULT; + } + + /* + * NSS firmware acknowleged the configuration, so update the mapping + * table on HOST side as well. + */ + mapping[out.dscp].action = out.action; + mapping[out.dscp].priority = out.priority; + + return 0; +} + +static struct ctl_table nss_ipv4_table[] = { + { + .procname = "ipv4_accel_mode", + .data = &nss_ipv4_accel_mode_cfg, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_ipv4_accel_mode_cfg_handler, + }, + { + .procname = "ipv4_dscp_map", + .data = &mapping[NSS_DSCP_MAP_ARRAY_SIZE], + .maxlen = sizeof(struct nss_dscp_map_entry), + .mode = 0644, + .proc_handler = &nss_ipv4_dscp_map_cfg_handler, + }, + { } +}; + +static struct ctl_table nss_ipv4_dir[] = { + { + .procname = "ipv4cfg", + .mode = 0555, + .child = nss_ipv4_table, + }, + { } +}; + +static struct ctl_table nss_ipv4_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_ipv4_dir, + }, + { } +}; + +static struct ctl_table nss_ipv4_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = nss_ipv4_root_dir, + }, + { } +}; + +static struct ctl_table_header *nss_ipv4_header; + +/* + * nss_ipv4_register_sysctl() + * Register sysctl specific to ipv4 + */ +void nss_ipv4_register_sysctl(void) +{ + sema_init(&nss_ipv4_pvt.sem, 1); + init_completion(&nss_ipv4_pvt.complete); + + /* + * Register sysctl table. + */ + nss_ipv4_header = register_sysctl_table(nss_ipv4_root); +} + +/* + * nss_ipv4_unregister_sysctl() + * Unregister sysctl specific to ipv4 + */ +void nss_ipv4_unregister_sysctl(void) +{ + /* + * Unregister sysctl table. + */ + if (nss_ipv4_header) { + unregister_sysctl_table(nss_ipv4_header); + } +} + +/* + * nss_ipv4_msg_init() + * Initialize IPv4 message. + */ +void nss_ipv4_msg_init(struct nss_ipv4_msg *nim, uint16_t if_num, uint32_t type, uint32_t len, + nss_ipv4_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_ipv4_msg_init); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_log.c new file mode 100644 index 000000000..08414051d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_log.c @@ -0,0 +1,355 @@ +/* + ************************************************************************** + * Copyright (c) 2016, 2018, 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ipv4_log.c + * NSS IPv4 logger file. + */ + +#include "nss_core.h" + +/* + * nss_ipv4_log_message_types_str + * IPv4 bridge/route rule message strings + */ +static int8_t *nss_ipv4_log_message_types_str[NSS_IPV4_MAX_MSG_TYPES] __maybe_unused = { + "IPv4 create rule message", + "IPv4 destroy rule message", + "Deprecated: NSS_IPV4_RX_ESTABLISH_RULE_MSG", + "IPv4 connection stats sync message", + "IPv4 generic statistics sync message", + "IPv4 number of connections supported rule message", + "IPv4 multicast create rule message", + "IPv4 request FW to send many conn sync message", +}; + +/* + * nss_ipv4_log_error_response_types_str + * Strings for error types for ipv4 messages + */ +static int8_t *nss_ipv4_log_error_response_types_str[NSS_IPV4_LAST] __maybe_unused = { + "No error", + "Unknown error", + "Invalid interface number", + "Missing connection rule", + "Buffer allocation failure", + "No connection found to delete", + "Conn cfg already done once", + "Conn cfg input is not multiple of quanta", + "Conn cfg input exceeds max supported connections", + "Conn cfg mem alloc fail at NSS FW", + "Invalid L4 protocol for multicast rule create", + "Invalid multicast flags for multicast update", + "Invalid interface for multicast update", +}; + +/* + * nss_ipv4_log_rule_create_msg() + * Log IPv4 create rule message. + */ +static void nss_ipv4_log_rule_create_msg(struct nss_ipv4_msg *nim) +{ + struct nss_ipv4_rule_create_msg *nircm __maybe_unused = &nim->msg.rule_create; + nss_trace("%px: IPv4 create rule message \n" + "Protocol: %d\n" + "from_mtu: %u\n" + "to_mtu: %u\n" + "from_ip: %pI4h:%d\n" + "to_ip: %pI4h:%d\n" + "from_ip_xlate: %pI4h:%d\n" + "to_ip_xlate: %pI4h:%d\n" + "from_mac: %pM\n" + "to_mac: %pM\n" + "src_iface_num: %u\n" + "dest_iface_num: %u\n" + "ingress_inner_vlan_tag: %u\n" + "egress_inner_vlan_tag: %u\n" + "ingress_outer_vlan_tag: %u\n" + "egress_outer_vlan_tag: %u\n" + "rule_flags: %x\n" + "valid_flags: %x\n" + "return_pppoe_if_exist: %u\n" + "return_pppoe_if_num: %u\n" + "flow_pppoe_if_exist: %u\n" + "flow_pppoe_if_num: %u\n" + "flow_qos_tag: %x (%u)\n" + "return_qos_tag: %x (%u)\n" + "flow_dscp: %x\n" + "return_dscp: %x\n" + "flow_mirror_ifnum: %u\n" + "return_mirror_ifnum: %u\n", + nim, + nircm->tuple.protocol, + nircm->conn_rule.flow_mtu, + nircm->conn_rule.return_mtu, + &nircm->tuple.flow_ip, nircm->tuple.flow_ident, + &nircm->tuple.return_ip, nircm->tuple.return_ident, + &nircm->conn_rule.flow_ip_xlate, nircm->conn_rule.flow_ident_xlate, + &nircm->conn_rule.return_ip_xlate, nircm->conn_rule.return_ident_xlate, + nircm->conn_rule.flow_mac, + nircm->conn_rule.return_mac, + nircm->conn_rule.flow_interface_num, + nircm->conn_rule.return_interface_num, + nircm->vlan_primary_rule.ingress_vlan_tag, + nircm->vlan_primary_rule.egress_vlan_tag, + nircm->vlan_secondary_rule.ingress_vlan_tag, + nircm->vlan_secondary_rule.egress_vlan_tag, + nircm->rule_flags, + nircm->valid_flags, + nircm->pppoe_rule.return_if_exist, + nircm->pppoe_rule.return_if_num, + nircm->pppoe_rule.flow_if_exist, + nircm->pppoe_rule.flow_if_num, + nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag, + nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag, + nircm->dscp_rule.flow_dscp, + nircm->dscp_rule.return_dscp, + nircm->mirror_rule.flow_ifnum, + nircm->mirror_rule.return_ifnum); +} + +/* + * nss_ipv4_log_destroy_rule_msg() + * Log IPv4 destroy rule message. + */ +static void nss_ipv4_log_destroy_rule_msg(struct nss_ipv4_msg *nim) +{ + struct nss_ipv4_rule_destroy_msg *nirdm __maybe_unused = &nim->msg.rule_destroy; + nss_trace("%px: IPv4 destroy rule message: \n" + "flow_ip: %pI4h:%d\n" + "return_ip: %pI4h:%d\n" + "protocol: %d\n", + nim, + &nirdm->tuple.flow_ip, nirdm->tuple.flow_ident, + &nirdm->tuple.return_ip, nirdm->tuple.return_ident, + nirdm->tuple.protocol); +} + +/* + * nss_ipv4_log_conn_sync() + * Log IPv4 connection stats sync message. + */ +static void nss_ipv4_log_conn_sync(struct nss_ipv4_msg *nim) +{ + struct nss_ipv4_conn_sync *sync = &nim->msg.conn_stats; + if (sync->flow_tx_packet_count || sync->return_tx_packet_count) { + nss_trace("%px: IPv4 connection stats sync message: \n" + "Protocol: %d\n" + "src_addr: %pI4h:%d\n" + "dest_addr: %pI4h:%d\n" + "flow_rx_packet_count: %u\n" + "flow_rx_byte_count: %u\n" + "return_rx_packet_count: %u\n" + "return_rx_byte_count: %u\n" + "flow_tx_packet_count: %u\n" + "flow_tx_byte_count: %u\n" + "return_tx_packet_count: %u\n" + "return_tx_byte_count: %u\n", + nim, + (int)sync->protocol, + &sync->flow_ip, (int)sync->flow_ident, + &sync->return_ip_xlate, (int)sync->return_ident_xlate, + sync->flow_rx_packet_count, + sync->flow_rx_byte_count, + sync->return_rx_packet_count, + sync->return_rx_byte_count, + sync->flow_tx_packet_count, + sync->flow_tx_byte_count, + sync->return_tx_packet_count, + sync->return_tx_byte_count); + } +} + +/* + * nss_ipv4_log_conn_cfg_msg() + * Log IPv4 number of connections supported rule message. + */ +static void nss_ipv4_log_conn_cfg_msg(struct nss_ipv4_msg *nim) +{ + struct nss_ipv4_rule_conn_cfg_msg *nirccm __maybe_unused = &nim->msg.rule_conn_cfg; + nss_trace("%px: IPv4 number of connections supported rule message: \n" + "num_conn: %d\n", + nim, + nirccm->num_conn); +} + +/* + * nss_ipv4_log_mc_rule_create_msg() + * Log IPv4 multicast create rule message. + */ +static void nss_ipv4_log_mc_rule_create_msg(struct nss_ipv4_msg *nim) +{ + uint16_t vif; + struct nss_ipv4_mc_rule_create_msg *nimrcm = &nim->msg.mc_rule_create; + for (vif = 0; vif < nimrcm->if_count ; vif++) { + nss_trace("%px: IPv4 multicast create rule message \n" + "Rule flag: %x\n" + "Vif: %d\n" + "Protocol: %d\n" + "to_mtu: %u\n" + "from_ip: %pI4h:%d\n" + "to_ip: %pI4h:%d\n" + "to_mac: %pM\n" + "dest_iface_num: %u\n" + "out_vlan[0] %x\n" + "out_vlan[1] %x\n", + nim, + nimrcm->if_rule[vif].rule_flags, + vif, + nimrcm->tuple.protocol, + nimrcm->if_rule[vif].if_mtu, + &nimrcm->tuple.flow_ip, nimrcm->tuple.flow_ident, + &nimrcm->tuple.return_ip, nimrcm->tuple.return_ident, + nimrcm->if_rule[vif].if_mac, + nimrcm->if_rule[vif].if_num, + nimrcm->if_rule[vif].egress_vlan_tag[0], + nimrcm->if_rule[vif].egress_vlan_tag[1]); + } +} + +/* + * nss_ipv4_log_conn_sync_many_msg() + * Log IPv4 many conn sync message. + */ +static void nss_ipv4_log_conn_sync_many_msg(struct nss_ipv4_msg *nim) +{ + uint16_t i; + struct nss_ipv4_conn_sync_many_msg *nicsm = &nim->msg.conn_stats_many; + for (i = 0; i < nicsm->count; i++) { + struct nss_ipv4_conn_sync *sync = &nicsm->conn_sync[i]; + if (sync->flow_tx_packet_count || sync->return_tx_packet_count) { + nss_trace("%px: IPv4 many conn sync message \n" + "count: %d\n" + "i: %d\n" + "Protocol: %d\n" + "src_addr: %pI4h:%d\n" + "dest_addr: %pI4h:%d\n" + "flow_rx_packet_count: %u\n" + "flow_rx_byte_count: %u\n" + "return_rx_packet_count: %u\n" + "return_rx_byte_count: %u\n" + "flow_tx_packet_count: %u\n" + "flow_tx_byte_count: %u\n" + "return_tx_packet_count: %u\n" + "return_tx_byte_count: %u\n", + nim, + nicsm->count, + i, + (int)sync->protocol, + &sync->flow_ip, (int)sync->flow_ident, + &sync->return_ip_xlate, (int)sync->return_ident_xlate, + sync->flow_rx_packet_count, + sync->flow_rx_byte_count, + sync->return_rx_packet_count, + sync->return_rx_byte_count, + sync->flow_tx_packet_count, + sync->flow_tx_byte_count, + sync->return_tx_packet_count, + sync->return_tx_byte_count); + } + } +} + +/* + * nss_ipv4_log_verbose() + * Log message contents. + */ +static void nss_ipv4_log_verbose(struct nss_ipv4_msg *nim) +{ + switch (nim->cm.type) { + case NSS_IPV4_TX_CREATE_RULE_MSG: + nss_ipv4_log_rule_create_msg(nim); + break; + + case NSS_IPV4_TX_DESTROY_RULE_MSG: + nss_ipv4_log_destroy_rule_msg(nim); + break; + + case NSS_IPV4_RX_CONN_STATS_SYNC_MSG: + nss_ipv4_log_conn_sync(nim); + break; + + case NSS_IPV4_RX_NODE_STATS_SYNC_MSG: + /* Getting logged in stats */ + break; + + case NSS_IPV4_TX_CONN_CFG_RULE_MSG: + nss_ipv4_log_conn_cfg_msg(nim); + break; + + case NSS_IPV4_TX_CREATE_MC_RULE_MSG: + nss_ipv4_log_mc_rule_create_msg(nim); + break; + + case NSS_IPV4_TX_CONN_STATS_SYNC_MANY_MSG: + nss_ipv4_log_conn_sync_many_msg(nim); + break; + + default: + nss_trace("%px: Invalid message type\n", nim); + break; + } +} + +/* + * nss_ipv4_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_ipv4_log_tx_msg(struct nss_ipv4_msg *nim) +{ + if (nim->cm.type >= NSS_IPV4_MAX_MSG_TYPES) { + nss_info("%px: Invalid message type\n", nim); + return; + } + + nss_info("%px: type[%d]:%s\n", nim, nim->cm.type, nss_ipv4_log_message_types_str[nim->cm.type]); + nss_ipv4_log_verbose(nim); +} + +/* + * nss_ipv4_log_rx_msg() + * Log messages received from FW. + */ +void nss_ipv4_log_rx_msg(struct nss_ipv4_msg *nim) +{ + if (nim->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_info("%px: Invalid response\n", nim); + return; + } + + if (nim->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nim->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nim, nim->cm.type, + nss_ipv4_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response]); + goto verbose; + } + + if (nim->cm.error >= NSS_IPV4_LAST) { + nss_info("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nim, nim->cm.type, nss_ipv4_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response], + nim->cm.error); + goto verbose; + } + + nss_info("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nim, nim->cm.type, nss_ipv4_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response], + nim->cm.error, nss_ipv4_log_error_response_types_str[nim->cm.error]); + +verbose: + nss_ipv4_log_verbose(nim); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm.c new file mode 100644 index 000000000..402a46c4a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm.c @@ -0,0 +1,76 @@ +/* + ************************************************************************** + * Copyright (c) 2014,2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ipv4_reasm.c + * NSS IPv4 Reassembly APIs + */ +#include +#include "nss_ipv4_reasm_stats.h" +#include "nss_ipv4_reasm_strings.h" + +/* + * nss_ipv4_reasm_msg_handler() + * Handle NSS -> HLOS messages for IPv4 reasm + */ +static void nss_ipv4_reasm_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_ipv4_reasm_msg *nim = (struct nss_ipv4_reasm_msg *)ncm; + + BUG_ON(ncm->interface != NSS_IPV4_REASM_INTERFACE); + + /* + * Handle deprecated messages. Eventually these messages should be removed. + */ + switch (nim->cm.type) { + case NSS_IPV4_REASM_STATS_SYNC_MSG: + /* + * Update Ipv4 reasm driver statistics and send statistics notifications to the registered modules. + */ + nss_ipv4_reasm_stats_sync(nss_ctx, &nim->msg.stats_sync); + nss_ipv4_reasm_stats_notify(nss_ctx); + + break; + default: + nss_warning("IPv4 reasm received an unknown message type"); + } +} + +/* + * nss_ipv4_reasm_get_context() + * get NSS context instance for ipv4 reassembly + */ +struct nss_ctx_instance *nss_ipv4_reasm_get_context(void) +{ + return &nss_top_main.nss[nss_top_main.ipv4_reasm_handler_id]; +} +EXPORT_SYMBOL(nss_ipv4_reasm_get_context); + +/* + * nss_ipv4_reasm_register_handler() + * Register our handler to receive messages for this interface + */ +void nss_ipv4_reasm_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_ipv4_reasm_get_context(); + + if (nss_core_register_handler(nss_ctx, NSS_IPV4_REASM_INTERFACE, nss_ipv4_reasm_msg_handler, NULL) != NSS_CORE_STATUS_SUCCESS) { + nss_warning("IPv4 reasm handler failed to register"); + } + + nss_ipv4_reasm_stats_dentry_create(); + nss_ipv4_reasm_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_stats.c new file mode 100644 index 000000000..350e61962 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_stats.c @@ -0,0 +1,167 @@ +/* + ************************************************************************** + * Copyright (c) 2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_ipv4_reasm_stats.h" +#include "nss_ipv4_reasm.h" +#include "nss_ipv4_reasm_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_ipv4_reasm_stats_notifier); + +uint64_t nss_ipv4_reasm_stats[NSS_IPV4_REASM_STATS_MAX]; /* IPv4 reasm statistics */ + +/* + * nss_ipv4_reasm_stats_read() + * Read IPV4 reassembly stats + */ +static ssize_t nss_ipv4_reasm_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * Max output lines = #stats + few blank lines for banner printing + + * Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_IPV4_REASM_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_IPV4_REASM_STATS_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "ipv4_reasm", NSS_STATS_SINGLE_CORE); + + size_wr += nss_stats_fill_common_stats(NSS_IPV4_REASM_INTERFACE, NSS_STATS_SINGLE_INSTANCE, lbuf, size_wr, size_al, "ipv4_reasm"); + + /* + * IPv4 reasm node stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_IPV4_REASM_STATS_MAX); i++) { + stats_shadow[i] = nss_ipv4_reasm_stats[i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("ipv4_reasm", NULL, NSS_STATS_SINGLE_INSTANCE + , nss_ipv4_reasm_strings_stats + , stats_shadow + , NSS_IPV4_REASM_STATS_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_ipv4_reasm_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ipv4_reasm); + +/* + * nss_ipv4_reasm_stats_dentry_create() + * Create the IPv4 reasm statistics debug entry + */ +void nss_ipv4_reasm_stats_dentry_create(void) +{ + nss_stats_create_dentry("ipv4_reasm", &nss_ipv4_reasm_stats_ops); +} + +/* + * nss_ipv4_reasm_stats_sync() + * Update driver specific information from the messsage. + */ +void nss_ipv4_reasm_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_reasm_stats_sync *nirs) +{ + int i; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + spin_lock_bh(&nss_top->stats_lock); + + /* + * Common node stats + */ + nss_top->stats_node[NSS_IPV4_REASM_INTERFACE][NSS_STATS_NODE_RX_PKTS] += nirs->node_stats.rx_packets; + nss_top->stats_node[NSS_IPV4_REASM_INTERFACE][NSS_STATS_NODE_RX_BYTES] += nirs->node_stats.rx_bytes; + nss_top->stats_node[NSS_IPV4_REASM_INTERFACE][NSS_STATS_NODE_TX_PKTS] += nirs->node_stats.tx_packets; + nss_top->stats_node[NSS_IPV4_REASM_INTERFACE][NSS_STATS_NODE_TX_BYTES] += nirs->node_stats.tx_bytes; + + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + nss_top->stats_node[NSS_IPV4_REASM_INTERFACE][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + i] += nirs->node_stats.rx_dropped[i]; + } + + /* + * IPv4 reasm node stats + */ + nss_ipv4_reasm_stats[NSS_IPV4_REASM_STATS_EVICTIONS] += nirs->ipv4_reasm_evictions; + nss_ipv4_reasm_stats[NSS_IPV4_REASM_STATS_ALLOC_FAILS] += nirs->ipv4_reasm_alloc_fails; + nss_ipv4_reasm_stats[NSS_IPV4_REASM_STATS_TIMEOUTS] += nirs->ipv4_reasm_timeouts; + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_ipv4_reasm_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_ipv4_reasm_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_ipv4_reasm_stats_notification ipv4_reasm_stats; + + ipv4_reasm_stats.core_id = nss_ctx->id; + memcpy(ipv4_reasm_stats.cmn_node_stats, nss_top_main.stats_node[NSS_IPV4_REASM_INTERFACE], sizeof(ipv4_reasm_stats.cmn_node_stats)); + memcpy(ipv4_reasm_stats.ipv4_reasm_stats, nss_ipv4_reasm_stats, sizeof(ipv4_reasm_stats.ipv4_reasm_stats)); + atomic_notifier_call_chain(&nss_ipv4_reasm_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&ipv4_reasm_stats); +} + +/* + * nss_ipv4_reasm_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_ipv4_reasm_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_ipv4_reasm_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ipv4_reasm_stats_register_notifier); + +/* + * nss_ipv4_reasm_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_ipv4_reasm_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_ipv4_reasm_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ipv4_reasm_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_stats.h new file mode 100644 index 000000000..f8c5f39f3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_stats.h @@ -0,0 +1,27 @@ +/* + ************************************************************************** + * Copyright (c) 2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_IPV4_REASM_STATS_H +#define __NSS_IPV4_REASM_STATS_H + +/* + * IPV4 reasm statistics APIs + */ +extern void nss_ipv4_reasm_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_ipv4_reasm_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_reasm_stats_sync *nirs); +extern void nss_ipv4_reasm_stats_dentry_create(void); + +#endif /* __NSS_IPV4_REASM_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_strings.c new file mode 100644 index 000000000..445d1349d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_strings.c @@ -0,0 +1,55 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_ipv4_reasm_strings_stats + * IPv4 reassembly statistics strings. + */ +struct nss_stats_info nss_ipv4_reasm_strings_stats[NSS_IPV4_REASM_STATS_MAX] = { + {"evictions" , NSS_STATS_TYPE_DROP}, + {"alloc_fails" , NSS_STATS_TYPE_DROP}, + {"timeouts" , NSS_STATS_TYPE_DROP} +}; + +/* + * nss_ipv4_reasm_strings_read() + * Read IPv4 reassembly node statistics names. + */ +static ssize_t nss_ipv4_reasm_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ipv4_reasm_strings_stats, NSS_IPV4_REASM_STATS_MAX); +} + +/* + * nss_ipv4_reasm_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ipv4_reasm); + +/* + * nss_ipv4_reasm_strings_dentry_create() + * Create IPv4 reassembly statistics strings debug entry. + */ +void nss_ipv4_reasm_strings_dentry_create(void) +{ + nss_strings_create_dentry("ipv4_reasm", &nss_ipv4_reasm_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_strings.h new file mode 100644 index 000000000..9a0b362c2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_reasm_strings.h @@ -0,0 +1,25 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_IPV4_REASM_STRINGS_H +#define __NSS_IPV4_REASM_STRINGS_H + +extern struct nss_stats_info nss_ipv4_reasm_strings_stats[NSS_IPV4_REASM_STATS_MAX]; +extern void nss_ipv4_reasm_strings_dentry_create(void); + +#endif /* __NSS_IPV4_REASM_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_stats.c new file mode 100644 index 000000000..39b162c7e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_stats.c @@ -0,0 +1,239 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include +#include "nss_ipv4_stats.h" +#include "nss_ipv4_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_ipv4_stats_notifier); + +uint64_t nss_ipv4_stats[NSS_IPV4_STATS_MAX]; +uint64_t nss_ipv4_exception_stats[NSS_IPV4_EXCEPTION_EVENT_MAX]; + +/* + * nss_ipv4_stats_read() + * Read IPV4 stats + */ +static ssize_t nss_ipv4_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + Number of Extra outputlines for future reference to add new stats + + * start tag line + end tag line + three blank lines + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_IPV4_STATS_MAX + NSS_IPV4_EXCEPTION_EVENT_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + /* + * Note: The assumption here is that exception event count is larger than other statistics count for IPv4 + */ + stats_shadow = kzalloc(NSS_IPV4_EXCEPTION_EVENT_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "ipv4", NSS_STATS_SINGLE_CORE); + size_wr += nss_stats_fill_common_stats(NSS_IPV4_RX_INTERFACE, NSS_STATS_SINGLE_INSTANCE, lbuf, size_wr, size_al, "ipv4"); + + /* + * IPv4 node stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; i < NSS_IPV4_STATS_MAX; i++) { + stats_shadow[i] = nss_ipv4_stats[i]; + } + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("ipv4", "ipv4 special stats" + , NSS_STATS_SINGLE_INSTANCE + , nss_ipv4_strings_stats + , stats_shadow + , NSS_IPV4_STATS_MAX + , lbuf, size_wr, size_al); + + /* + * Exception stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_IPV4_EXCEPTION_EVENT_MAX); i++) { + stats_shadow[i] = nss_ipv4_exception_stats[i]; + } + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("ipv4", "ipv4 exception stats" + , NSS_STATS_SINGLE_INSTANCE + , nss_ipv4_strings_exception_stats + , stats_shadow + , NSS_IPV4_EXCEPTION_EVENT_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_ipv4_stats_conn_sync() + * Update driver specific information from the messsage. + */ +void nss_ipv4_stats_conn_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_conn_sync *nirs) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + /* + * Update statistics maintained by NSS driver + */ + spin_lock_bh(&nss_top->stats_lock); + nss_ipv4_stats[NSS_IPV4_STATS_ACCELERATED_RX_PKTS] += nirs->flow_rx_packet_count + nirs->return_rx_packet_count; + nss_ipv4_stats[NSS_IPV4_STATS_ACCELERATED_RX_BYTES] += nirs->flow_rx_byte_count + nirs->return_rx_byte_count; + nss_ipv4_stats[NSS_IPV4_STATS_ACCELERATED_TX_PKTS] += nirs->flow_tx_packet_count + nirs->return_tx_packet_count; + nss_ipv4_stats[NSS_IPV4_STATS_ACCELERATED_TX_BYTES] += nirs->flow_tx_byte_count + nirs->return_tx_byte_count; + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_ipv4_stats_conn_sync_many() + * Update driver specific information from the conn_sync_many messsage. + */ +void nss_ipv4_stats_conn_sync_many(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_conn_sync_many_msg *nicsm) +{ + int i; + + /* + * Sanity check for the stats count + */ + if (nicsm->count * sizeof(struct nss_ipv4_conn_sync) >= nicsm->size) { + nss_warning("%px: stats sync count %u exceeds the size of this msg %u", nss_ctx, nicsm->count, nicsm->size); + return; + } + + for (i = 0; i < nicsm->count; i++) { + nss_ipv4_stats_conn_sync(nss_ctx, &nicsm->conn_sync[i]); + } +} + +/* + * nss_ipv4_stats_node_sync() + * Update driver specific information from the messsage. + */ +void nss_ipv4_stats_node_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_node_sync *nins) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + uint32_t i; + + /* + * Update statistics maintained by NSS driver + */ + spin_lock_bh(&nss_top->stats_lock); + nss_top->stats_node[NSS_IPV4_RX_INTERFACE][NSS_STATS_NODE_RX_PKTS] += nins->node_stats.rx_packets; + nss_top->stats_node[NSS_IPV4_RX_INTERFACE][NSS_STATS_NODE_RX_BYTES] += nins->node_stats.rx_bytes; + nss_top->stats_node[NSS_IPV4_RX_INTERFACE][NSS_STATS_NODE_TX_PKTS] += nins->node_stats.tx_packets; + nss_top->stats_node[NSS_IPV4_RX_INTERFACE][NSS_STATS_NODE_TX_BYTES] += nins->node_stats.tx_bytes; + + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + nss_top->stats_node[NSS_IPV4_RX_INTERFACE][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + i] += nins->node_stats.rx_dropped[i]; + } + + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_CREATE_REQUESTS] += nins->ipv4_connection_create_requests; + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_CREATE_COLLISIONS] += nins->ipv4_connection_create_collisions; + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_CREATE_INVALID_INTERFACE] += nins->ipv4_connection_create_invalid_interface; + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_DESTROY_REQUESTS] += nins->ipv4_connection_destroy_requests; + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_DESTROY_MISSES] += nins->ipv4_connection_destroy_misses; + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_HASH_HITS] += nins->ipv4_connection_hash_hits; + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_HASH_REORDERS] += nins->ipv4_connection_hash_reorders; + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_FLUSHES] += nins->ipv4_connection_flushes; + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_EVICTIONS] += nins->ipv4_connection_evictions; + nss_ipv4_stats[NSS_IPV4_STATS_FRAGMENTATIONS] += nins->ipv4_fragmentations; + nss_ipv4_stats[NSS_IPV4_STATS_MC_CONNECTION_CREATE_REQUESTS] += nins->ipv4_mc_connection_create_requests; + nss_ipv4_stats[NSS_IPV4_STATS_MC_CONNECTION_UPDATE_REQUESTS] += nins->ipv4_mc_connection_update_requests; + nss_ipv4_stats[NSS_IPV4_STATS_MC_CONNECTION_CREATE_INVALID_INTERFACE] += nins->ipv4_mc_connection_create_invalid_interface; + nss_ipv4_stats[NSS_IPV4_STATS_MC_CONNECTION_DESTROY_REQUESTS] += nins->ipv4_mc_connection_destroy_requests; + nss_ipv4_stats[NSS_IPV4_STATS_MC_CONNECTION_DESTROY_MISSES] += nins->ipv4_mc_connection_destroy_misses; + nss_ipv4_stats[NSS_IPV4_STATS_MC_CONNECTION_FLUSHES] += nins->ipv4_mc_connection_flushes; + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_CREATE_INVALID_MIRROR_IFNUM] += nins->ipv4_connection_create_invalid_mirror_ifnum; + nss_ipv4_stats[NSS_IPV4_STATS_CONNECTION_CREATE_INVALID_MIRROR_IFTYPE] += nins->ipv4_connection_create_invalid_mirror_iftype; + nss_ipv4_stats[NSS_IPV4_STATS_MIRROR_FAILURES] += nins->ipv4_mirror_failures; + + for (i = 0; i < NSS_IPV4_EXCEPTION_EVENT_MAX; i++) { + nss_ipv4_exception_stats[i] += nins->exception_events[i]; + } + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_ipv4_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ipv4); + +/* + * nss_ipv4_stats_dentry_create() + * Create IPv4 statistics debug entry. + */ +void nss_ipv4_stats_dentry_create(void) +{ + nss_stats_create_dentry("ipv4", &nss_ipv4_stats_ops); +} + +/* + * nss_ipv4_stats_notify() + * Sends notifications to the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_ipv4_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_ipv4_stats_notification ipv4_stats; + + ipv4_stats.core_id = nss_ctx->id; + memcpy(ipv4_stats.cmn_node_stats, nss_top_main.stats_node[NSS_IPV4_RX_INTERFACE], sizeof(ipv4_stats.cmn_node_stats)); + memcpy(ipv4_stats.special_stats, nss_ipv4_stats, sizeof(ipv4_stats.special_stats)); + memcpy(ipv4_stats.exception_stats, nss_ipv4_exception_stats, sizeof(ipv4_stats.exception_stats)); + atomic_notifier_call_chain(&nss_ipv4_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&ipv4_stats); +} + +/* + * nss_ipv4_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_ipv4_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_ipv4_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ipv4_stats_register_notifier); + +/* + * nss_ipv4_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_ipv4_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_ipv4_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ipv4_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_stats.h new file mode 100644 index 000000000..ad85c35a2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_stats.h @@ -0,0 +1,29 @@ +/* + ************************************************************************** + * Copyright (c) 2017, 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_IPV4_STATS_H +#define __NSS_IPV4_STATS_H + +/* + * NSS IPV4 statistics APIs + */ +extern void nss_ipv4_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_ipv4_stats_node_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_node_sync *nins); +extern void nss_ipv4_stats_conn_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_conn_sync *nirs); +extern void nss_ipv4_stats_conn_sync_many(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_conn_sync_many_msg *nicsm); +extern void nss_ipv4_stats_dentry_create(void); + +#endif /* __NSS_IPV4_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_strings.c new file mode 100644 index 000000000..77ff3520b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_strings.c @@ -0,0 +1,208 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_ipv4_strings_exception_stats + * Interface statistics strings for ipv4 exceptions. + */ +struct nss_stats_info nss_ipv4_strings_exception_stats[NSS_IPV4_EXCEPTION_EVENT_MAX] = { + {"icmp_hdr_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_unhandled_type" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_ipv4_hdr_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_ipv4_udp_hdr_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_ipv4_tcp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_sipv4_unknown_protocol" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_flush_to_host" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_ip_option" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_ip_fragment" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_small_ttl" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_flags" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_seq_exceeds_right_edge" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_small_data_offs" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_bad_sack" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_big_data_offs" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_seq_before_left_edge" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_ack_exceeds_right_edge" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_ack_before_left_edge" , NSS_STATS_TYPE_EXCEPTION}, + {"udp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"udp_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"udp_ip_option" , NSS_STATS_TYPE_EXCEPTION}, + {"udp_ip_fragment" , NSS_STATS_TYPE_EXCEPTION}, + {"udp_small_ttl" , NSS_STATS_TYPE_EXCEPTION}, + {"udp_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"wrong_target_mac" , NSS_STATS_TYPE_EXCEPTION}, + {"header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"bad_total_length" , NSS_STATS_TYPE_EXCEPTION}, + {"bad_checksum" , NSS_STATS_TYPE_EXCEPTION}, + {"non_initial_fragment" , NSS_STATS_TYPE_EXCEPTION}, + {"datagram_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"options_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"unknown_protocol" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_ip_option" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_ip_fragment" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_small_ttl" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"ingress_vid_mismatch" , NSS_STATS_TYPE_EXCEPTION}, + {"ingress_vid_missing" , NSS_STATS_TYPE_EXCEPTION}, + {"6rd_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"6rd_ip_option" , NSS_STATS_TYPE_EXCEPTION}, + {"6rd_ip_fragment" , NSS_STATS_TYPE_EXCEPTION}, + {"6rd_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"dscp_marking_mismatch" , NSS_STATS_TYPE_EXCEPTION}, + {"vlan_marking_mismatch" , NSS_STATS_TYPE_EXCEPTION}, + {"interface_mismatch" , NSS_STATS_TYPE_EXCEPTION}, + {"gre_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"gre_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"gre_ip_option" , NSS_STATS_TYPE_EXCEPTION}, + {"gre_ip_fragment" , NSS_STATS_TYPE_EXCEPTION}, + {"gre_small_ttl" , NSS_STATS_TYPE_EXCEPTION}, + {"gre_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"pptp_gre_session_match_fail" , NSS_STATS_TYPE_EXCEPTION}, + {"pptp_gre_invalid_proto" , NSS_STATS_TYPE_EXCEPTION}, + {"pptp_gre_no_cme" , NSS_STATS_TYPE_EXCEPTION}, + {"pptp_gre_ip_option" , NSS_STATS_TYPE_EXCEPTION}, + {"pptp_gre_ip_fragment" , NSS_STATS_TYPE_EXCEPTION}, + {"pptp_gre_small_ttl" , NSS_STATS_TYPE_EXCEPTION}, + {"pptp_gre_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"destroy" , NSS_STATS_TYPE_EXCEPTION}, + {"frag_df_set" , NSS_STATS_TYPE_EXCEPTION}, + {"frag_fail" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_ipv4_udplite_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"udplite_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"udplite_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"udplite_ip_option" , NSS_STATS_TYPE_EXCEPTION}, + {"udplite_ip_fragment" , NSS_STATS_TYPE_EXCEPTION}, + {"udplite_small_ttl" , NSS_STATS_TYPE_EXCEPTION}, + {"udplite_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"mc_udp_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"mc_mem_alloc_failure" , NSS_STATS_TYPE_EXCEPTION}, + {"mc_update_failure" , NSS_STATS_TYPE_EXCEPTION}, + {"mc_pbuf_alloc_failure" , NSS_STATS_TYPE_EXCEPTION}, + {"pppoe_bridge_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"pppoe_no_session" , NSS_STATS_TYPE_DROP}, + {"icmp_ipv4_gre_hdr_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_ipv4_esp_hdr_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"emesh_prio_mismatch" , NSS_STATS_TYPE_EXCEPTION}, +}; + +/* + * nss_ipv4_strings_stats + * IPv4 statistics strings. + */ +struct nss_stats_info nss_ipv4_strings_stats[NSS_IPV4_STATS_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_SPECIAL}, + {"rx_bytes" , NSS_STATS_TYPE_SPECIAL}, + {"tx_pkts" , NSS_STATS_TYPE_SPECIAL}, + {"tx_bytes" , NSS_STATS_TYPE_SPECIAL}, + {"create_requests" , NSS_STATS_TYPE_SPECIAL}, + {"create_collisions" , NSS_STATS_TYPE_SPECIAL}, + {"create_invalid_interface" , NSS_STATS_TYPE_SPECIAL}, + {"destroy_requests" , NSS_STATS_TYPE_SPECIAL}, + {"destroy_misses" , NSS_STATS_TYPE_SPECIAL}, + {"hash_hits" , NSS_STATS_TYPE_SPECIAL}, + {"hash_reorders" , NSS_STATS_TYPE_SPECIAL}, + {"flushes" , NSS_STATS_TYPE_SPECIAL}, + {"evictions" , NSS_STATS_TYPE_SPECIAL}, + {"fragmentations" , NSS_STATS_TYPE_SPECIAL}, + {"by_rule_drops" , NSS_STATS_TYPE_DROP}, + {"mc_create_requests" , NSS_STATS_TYPE_SPECIAL}, + {"mc_update_requests" , NSS_STATS_TYPE_SPECIAL}, + {"mc_create_invalid_interface" , NSS_STATS_TYPE_SPECIAL}, + {"mc_destroy_requests" , NSS_STATS_TYPE_SPECIAL}, + {"mc_destroy_misses" , NSS_STATS_TYPE_SPECIAL}, + {"mc_flushes" , NSS_STATS_TYPE_SPECIAL}, + {"mirror_invalid_ifnum_conn_create_req" , NSS_STATS_TYPE_SPECIAL}, + {"mirror_invalid_iftype_conn_create_req" , NSS_STATS_TYPE_SPECIAL}, + {"mirror_failures" , NSS_STATS_TYPE_SPECIAL}, +}; + +/* + * nss_ipv4_special_stats_strings_read() + * Read IPV4 special node statistics names. + */ +static ssize_t nss_ipv4_special_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ipv4_strings_stats, NSS_IPV4_STATS_MAX); +} + +/* + * nss_ipv4_exception_stats_strings_read() + * Read IPV4 exception statistics names. + */ +static ssize_t nss_ipv4_exception_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ipv4_strings_exception_stats, NSS_IPV4_EXCEPTION_EVENT_MAX); +} + +/* + * nss_ipv4_special_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ipv4_special_stats); + +/* + * nss_ipv4_exception_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ipv4_exception_stats); + +/* + * nss_ipv4_strings_dentry_create() + * Create IPv4 statistics strings debug entry. + */ +void nss_ipv4_strings_dentry_create(void) +{ + struct dentry *dir_d; + struct dentry *file_d; + + if (!nss_top_main.strings_dentry) { + nss_warning("qca-nss-drv/strings is not present"); + return; + } + + dir_d = debugfs_create_dir("ipv4", nss_top_main.strings_dentry); + if (!dir_d) { + nss_warning("Failed to create qca-nss-drv/strings/ipv4 directory"); + return; + } + + file_d = debugfs_create_file("special_stats_str", 0400, dir_d, &nss_top_main, &nss_ipv4_special_stats_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/stats/ipv4/special_stats_str file"); + goto fail; + } + + file_d = debugfs_create_file("exception_stats_str", 0400, dir_d, &nss_top_main, &nss_ipv4_exception_stats_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/stats/ipv4/exception_stats_str file"); + goto fail; + } + + return; +fail: + debugfs_remove_recursive(dir_d); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_strings.h new file mode 100644 index 000000000..fd819c769 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv4_strings.h @@ -0,0 +1,26 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_IPV4_STRINGS_H +#define __NSS_IPV4_STRINGS_H + +extern struct nss_stats_info nss_ipv4_strings_stats[NSS_IPV4_STATS_MAX]; +extern struct nss_stats_info nss_ipv4_strings_exception_stats[NSS_IPV4_EXCEPTION_EVENT_MAX]; +extern void nss_ipv4_strings_dentry_create(void); + +#endif /* __NSS_IPV4_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6.c new file mode 100644 index 000000000..2f9f14b9a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6.c @@ -0,0 +1,776 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ipv6.c + * NSS IPv6 APIs + */ +#include +#include "nss_dscp_map.h" +#include "nss_ipv6_stats.h" +#include "nss_ipv6_strings.h" + +#define NSS_IPV6_TX_MSG_TIMEOUT 1000 /* 1 sec timeout for IPv6 messages */ + +/* + * Private data structure for ipv6 configure messages + */ +struct nss_ipv6_cfg_pvt { + struct semaphore sem; /* Semaphore structure */ + struct completion complete; /* Completion structure */ + int response; /* Response from FW */ + void *cb; /* Original cb for sync msgs */ + void *app_data; /* Original app_data for sync msgs */ +} nss_ipv6_pvt; + +/* + * Private data structure for ipv6 connection information. + */ +struct nss_ipv6_conn_table_info { + uint32_t ce_table_size; /* Size of connection entry table in NSS FW */ + uint32_t cme_table_size; /* Size of connection match entry table in NSS FW */ + unsigned long ce_mem; /* Start address for connection entry table */ + unsigned long cme_mem; /* Start address for connection match entry table */ +} nss_ipv6_ct_info; + +int nss_ipv6_conn_cfg = NSS_DEFAULT_NUM_CONN; +int nss_ipv6_accel_mode_cfg __read_mostly = 1; + +static struct nss_dscp_map_entry mapping[NSS_DSCP_MAP_ARRAY_SIZE]; + +/* + * Callback for conn_sync_many request message. + */ +nss_ipv6_msg_callback_t nss_ipv6_conn_sync_many_msg_cb = NULL; + +/* + * nss_ipv6_dscp_map_usage() + * Help function shows the usage of the command. + */ +static inline void nss_ipv6_dscp_map_usage(void) +{ + nss_info_always("\nUsage:\n"); + nss_info_always("echo > /proc/sys/dev/nss/ipv6cfg/ipv6_dscp_map\n\n"); + nss_info_always("dscp[0-63] action[0-%u] prio[0-%u]:\n\n", + NSS_IPV6_DSCP_MAP_ACTION_MAX - 1, + NSS_DSCP_MAP_PRIORITY_MAX - 1); +} + +/* + * nss_ipv6_rx_msg_handler() + * Handle NSS -> HLOS messages for IPv6 bridge/route + */ +static void nss_ipv6_rx_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_ipv6_msg *nim = (struct nss_ipv6_msg *)ncm; + nss_ipv6_msg_callback_t cb; + + BUG_ON(ncm->interface != NSS_IPV6_RX_INTERFACE); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_IPV6_MAX_MSG_TYPES) { + nss_warning("%px: received invalid message %d for IPv6 interface", nss_ctx, nim->cm.type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_ipv6_msg)) { + nss_warning("%px: message length is invalid: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Trace messages. + */ + nss_ipv6_log_rx_msg(nim); + + /* + * Handle deprecated messages. Eventually these messages should be removed. + */ + switch (nim->cm.type) { + case NSS_IPV6_RX_NODE_STATS_SYNC_MSG: + /* + * Update driver statistics on node sync and send statistics notifications to the registered modules. + */ + nss_ipv6_stats_node_sync(nss_ctx, &nim->msg.node_stats); + nss_ipv6_stats_notify(nss_ctx); + break; + + case NSS_IPV6_RX_CONN_STATS_SYNC_MSG: + /* + * Update driver statistics on connection sync. + */ + nss_ipv6_stats_conn_sync(nss_ctx, &nim->msg.conn_stats); + break; + + case NSS_IPV6_TX_CONN_STATS_SYNC_MANY_MSG: + /* + * Update driver statistics on connection sync many. + */ + nss_ipv6_stats_conn_sync_many(nss_ctx, &nim->msg.conn_stats_many); + ncm->cb = (nss_ptr_t)nss_ipv6_conn_sync_many_msg_cb; + break; + } + + /* + * Update the callback and app_data for NOTIFY messages, IPv6 sends all notify messages + * to the same callback/app_data. + */ + if (nim->cm.response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->ipv6_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->nss_top->ipv6_ctx; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * Callback + */ + cb = (nss_ipv6_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, nim); +} + +/* + * nss_ipv6_tx_sync_callback() + * Callback to handle the completion of synchronous tx messages. + */ +static void nss_ipv6_tx_sync_callback(void *app_data, struct nss_ipv6_msg *nim) +{ + nss_ipv6_msg_callback_t callback = (nss_ipv6_msg_callback_t)nss_ipv6_pvt.cb; + void *data = nss_ipv6_pvt.app_data; + + nss_ipv6_pvt.cb = NULL; + nss_ipv6_pvt.app_data = NULL; + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("ipv6 error response %d\n", nim->cm.response); + nss_ipv6_pvt.response = NSS_TX_FAILURE; + } else { + nss_ipv6_pvt.response = NSS_TX_SUCCESS; + } + + if (callback) { + callback(data, nim); + } + + complete(&nss_ipv6_pvt.complete); +} + +/* + * nss_ipv6_dscp_action_get() + * Gets the action mapped to dscp. + */ +enum nss_ipv6_dscp_map_actions nss_ipv6_dscp_action_get(uint8_t dscp) +{ + if (dscp >= NSS_DSCP_MAP_ARRAY_SIZE) { + nss_warning("dscp:%u invalid\n", dscp); + return NSS_IPV6_DSCP_MAP_ACTION_MAX; + } + + return mapping[dscp].action; +} +EXPORT_SYMBOL(nss_ipv6_dscp_action_get); + +/* + * nss_ipv6_max_conn_count() + * Return the maximum number of IPv6 connections that the NSS acceleration engine supports. + */ +int nss_ipv6_max_conn_count(void) +{ + return nss_ipv6_conn_cfg; +} +EXPORT_SYMBOL(nss_ipv6_max_conn_count); + +/* + * nss_ipv6_conn_inquiry() + * Inquiry if a connection has been established in NSS FW + */ +nss_tx_status_t nss_ipv6_conn_inquiry(struct nss_ipv6_5tuple *ipv6_5t_p, + nss_ipv6_msg_callback_t cb) +{ + nss_tx_status_t nss_tx_status; + struct nss_ipv6_msg nim; + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[0]; + + /* + * Initialize inquiry message structure. + * This is async message and the result will be returned + * to the caller by the msg_callback passed in. + */ + memset(&nim, 0, sizeof(nim)); + nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, + NSS_IPV6_TX_CONN_CFG_INQUIRY_MSG, + sizeof(struct nss_ipv6_inquiry_msg), + cb, NULL); + nim.msg.inquiry.rr.tuple = *ipv6_5t_p; + nss_tx_status = nss_ipv6_tx(nss_ctx, &nim); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: Send inquiry message failed\n", ipv6_5t_p); + } + + return nss_tx_status; +} +EXPORT_SYMBOL(nss_ipv6_conn_inquiry); + +/* + * nss_ipv6_tx_with_size() + * Transmit an ipv6 message to the FW with a specified size. + */ +nss_tx_status_t nss_ipv6_tx_with_size(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_msg *nim, uint32_t size) +{ + struct nss_cmn_msg *ncm = &nim->cm; + + /* + * Sanity check the message + */ + if (ncm->interface != NSS_IPV6_RX_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_IPV6_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Trace messages. + */ + nss_ipv6_log_tx_msg(nim); + + return nss_core_send_cmd(nss_ctx, nim, sizeof(*nim), size); +} +EXPORT_SYMBOL(nss_ipv6_tx_with_size); + +/* + * nss_ipv6_tx() + * Transmit an ipv6 message to the FW. + */ +nss_tx_status_t nss_ipv6_tx(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_msg *nim) +{ + return nss_ipv6_tx_with_size(nss_ctx, nim, NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_ipv6_tx); + +/* + * nss_ipv6_tx_sync() + * Transmit a synchronous ipv6 message to the FW. + */ +nss_tx_status_t nss_ipv6_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_msg *nim) +{ + nss_tx_status_t status; + int ret = 0; + + down(&nss_ipv6_pvt.sem); + nss_ipv6_pvt.cb = (void *)nim->cm.cb; + nss_ipv6_pvt.app_data = (void *)nim->cm.app_data; + + nim->cm.cb = (nss_ptr_t)nss_ipv6_tx_sync_callback; + nim->cm.app_data = (nss_ptr_t)NULL; + + status = nss_ipv6_tx(nss_ctx, nim); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: nss ipv6 msg tx failed\n", nss_ctx); + up(&nss_ipv6_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&nss_ipv6_pvt.complete, msecs_to_jiffies(NSS_IPV6_TX_MSG_TIMEOUT)); + if (!ret) { + nss_warning("%px: IPv6 tx sync failed due to timeout\n", nss_ctx); + nss_ipv6_pvt.response = NSS_TX_FAILURE; + } + + status = nss_ipv6_pvt.response; + up(&nss_ipv6_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_ipv6_tx_sync); + +/* + ********************************** + Register/Unregister/Miscellaneous APIs + ********************************** + */ + +/* + * nss_ipv6_notify_register() + * Register to received IPv6 events. + * + * NOTE: Do we want to pass an nss_ctx here so that we can register for ipv6 on any core? + */ +struct nss_ctx_instance *nss_ipv6_notify_register(nss_ipv6_msg_callback_t cb, void *app_data) +{ + /* + * TODO: We need to have a new array in support of the new API + * TODO: If we use a per-context array, we would move the array into nss_ctx based. + */ + nss_top_main.ipv6_callback = cb; + nss_top_main.ipv6_ctx = app_data; + return &nss_top_main.nss[nss_top_main.ipv6_handler_id]; +} +EXPORT_SYMBOL(nss_ipv6_notify_register); + +/* + * nss_ipv6_notify_unregister() + * Unregister to received IPv6 events. + * + * NOTE: Do we want to pass an nss_ctx here so that we can register for ipv6 on any core? + */ +void nss_ipv6_notify_unregister(void) +{ + nss_top_main.ipv6_callback = NULL; +} +EXPORT_SYMBOL(nss_ipv6_notify_unregister); + +/* + * nss_ipv6_conn_sync_many_notify_register() + * Register to receive IPv6 conn_sync_many message response. + */ +void nss_ipv6_conn_sync_many_notify_register(nss_ipv6_msg_callback_t cb) +{ + nss_ipv6_conn_sync_many_msg_cb = cb; +} +EXPORT_SYMBOL(nss_ipv6_conn_sync_many_notify_register); + +/* + * nss_ipv6_conn_sync_many_notify_unregister() + * Unregister to receive IPv6 conn_sync_many message response. + */ +void nss_ipv6_conn_sync_many_notify_unregister(void) +{ + nss_ipv6_conn_sync_many_msg_cb = NULL; +} +EXPORT_SYMBOL(nss_ipv6_conn_sync_many_notify_unregister); + +/* + * nss_ipv6_get_mgr() + * + * TODO: This only suppports a single ipv6, do we ever want to support more? + */ +struct nss_ctx_instance *nss_ipv6_get_mgr(void) +{ + return (void *)&nss_top_main.nss[nss_top_main.ipv6_handler_id]; +} +EXPORT_SYMBOL(nss_ipv6_get_mgr); + +/* + * nss_ipv6_register_handler() + * Register our handler to receive messages for this interface + */ +void nss_ipv6_register_handler() +{ + struct nss_ctx_instance *nss_ctx = nss_ipv6_get_mgr(); + + if (nss_core_register_handler(nss_ctx, NSS_IPV6_RX_INTERFACE, nss_ipv6_rx_msg_handler, NULL) != NSS_CORE_STATUS_SUCCESS) { + nss_warning("IPv6 handler failed to register"); + } + + nss_ipv6_stats_dentry_create(); + nss_ipv6_strings_dentry_create(); +} + +/* + * nss_ipv6_conn_cfg_process_callback() + * Call back function for the ipv6 connection configuration process. + */ +static void nss_ipv6_conn_cfg_process_callback(void *app_data, struct nss_ipv6_msg *nim) +{ + struct nss_ipv6_rule_conn_cfg_msg *nirccm = &nim->msg.rule_conn_cfg; + struct nss_ctx_instance *nss_ctx __maybe_unused = nss_ipv6_get_mgr(); + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: IPv6 connection configuration failed with error: %d\n", nss_ctx, nim->cm.error); + nss_core_update_max_ipv6_conn(NSS_FW_DEFAULT_NUM_CONN); + nss_ipv6_free_conn_tables(); + return; + } + + nss_ipv6_conn_cfg = ntohl(nirccm->num_conn); + + nss_info("%px: IPv6 connection configuration success: %d\n", nss_ctx, nim->cm.error); +} + +/* + * nss_ipv6_conn_cfg_process() + * Process request to configure number of ipv6 connections + */ +static int nss_ipv6_conn_cfg_process(struct nss_ctx_instance *nss_ctx, int conn) +{ + struct nss_ipv6_msg nim; + struct nss_ipv6_rule_conn_cfg_msg *nirccm; + nss_tx_status_t nss_tx_status; + + if ((!nss_ipv6_ct_info.ce_table_size) || (!nss_ipv6_ct_info.cme_table_size)) { + nss_warning("%px: connection entry or connection match entry table size not available\n", + nss_ctx); + return -EINVAL; + } + + nss_info("%px: IPv6 supported connections: %d\n", nss_ctx, conn); + + nss_ipv6_ct_info.ce_mem = __get_free_pages(GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO, + get_order(nss_ipv6_ct_info.ce_table_size)); + if (!nss_ipv6_ct_info.ce_mem) { + nss_warning("%px: Memory allocation failed for IPv6 Connections: %d\n", + nss_ctx, + conn); + goto fail; + } + nss_info("%px: CE Memory allocated for IPv6 Connections: %d\n", + nss_ctx, + conn); + + nss_ipv6_ct_info.cme_mem = __get_free_pages(GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO, + get_order(nss_ipv6_ct_info.cme_table_size)); + if (!nss_ipv6_ct_info.cme_mem) { + nss_warning("%px: Memory allocation failed for IPv6 Connections: %d\n", + nss_ctx, + conn); + goto fail; + } + nss_info("%px: CME Memory allocated for IPv6 Connections: %d\n", + nss_ctx, + conn); + + memset(&nim, 0, sizeof(struct nss_ipv6_msg)); + nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_CONN_CFG_RULE_MSG, + sizeof(struct nss_ipv6_rule_conn_cfg_msg), nss_ipv6_conn_cfg_process_callback, NULL); + + nirccm = &nim.msg.rule_conn_cfg; + nirccm->num_conn = htonl(conn); + nirccm->ce_mem = dma_map_single(nss_ctx->dev, (void *)nss_ipv6_ct_info.ce_mem, nss_ipv6_ct_info.ce_table_size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(nss_ctx->dev, nirccm->ce_mem))) { + nss_warning("%px: DMA mapping failed for virtual address = %px", nss_ctx, (void *)nss_ipv6_ct_info.ce_mem); + goto fail; + } + + nirccm->cme_mem = dma_map_single(nss_ctx->dev, (void *)nss_ipv6_ct_info.cme_mem, nss_ipv6_ct_info.cme_table_size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(nss_ctx->dev, nirccm->cme_mem))) { + nss_warning("%px: DMA mapping failed for virtual address = %px", nss_ctx, (void *)nss_ipv6_ct_info.cme_mem); + goto fail; + } + + nss_tx_status = nss_ipv6_tx(nss_ctx, &nim); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_tx error setting IPv6 Connections: %d\n", + nss_ctx, + conn); + goto fail; + } + + return 0; + +fail: + nss_ipv6_free_conn_tables(); + return -EINVAL; +} + +/* + * nss_ipv6_update_conn_count_callback() + * Call back function for the ipv6 get connection info message. + */ +static void nss_ipv6_update_conn_count_callback(void *app_data, struct nss_ipv6_msg *nim) +{ + struct nss_ipv6_rule_conn_get_table_size_msg *nircgts = &nim->msg.size; + struct nss_ctx_instance *nss_ctx = nss_ipv6_get_mgr(); + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: IPv6 fetch connection info failed with error: %d\n", nss_ctx, nim->cm.error); + nss_core_update_max_ipv6_conn(NSS_FW_DEFAULT_NUM_CONN); + return; + } + + nss_info("IPv6 get connection info success\n"); + + nss_ipv6_ct_info.ce_table_size = ntohl(nircgts->ce_table_size); + nss_ipv6_ct_info.cme_table_size = ntohl(nircgts->cme_table_size); + + if (nss_ipv6_conn_cfg_process(nss_ctx, ntohl(nircgts->num_conn)) != 0) { + nss_warning("%px: IPv6 connection entry or connection match entry table size\ + not available\n", nss_ctx); + } + + return; +} + +/* + * nss_ipv6_update_conn_count() + * Sets the maximum number of IPv6 connections. + * + * It first gets the connection tables size information from NSS FW + * and then configures the connections in NSS FW. + */ +int nss_ipv6_update_conn_count(int ipv6_num_conn) +{ + struct nss_ctx_instance *nss_ctx = nss_ipv6_get_mgr(); + struct nss_ipv6_msg nim; + struct nss_ipv6_rule_conn_get_table_size_msg *nircgts; + nss_tx_status_t nss_tx_status; + uint32_t sum_of_conn; + + /* + * By default, NSS FW is configured with default number of connections. + */ + if (ipv6_num_conn == NSS_FW_DEFAULT_NUM_CONN) { + nss_info("%px: Default number of connections (%d) already configured\n", nss_ctx, ipv6_num_conn); + return 0; + } + + /* + * Specifications for input + * 1) The input should be power of 2. + * 2) Input for ipv4 and ipv6 sum togther should not exceed 8k + * 3) Min. value should be at leat 256 connections. This is the + * minimum connections we will support for each of them. + */ + sum_of_conn = nss_ipv4_conn_cfg + ipv6_num_conn; + if ((ipv6_num_conn & NSS_NUM_CONN_QUANTA_MASK) || + (sum_of_conn > NSS_MAX_TOTAL_NUM_CONN_IPV4_IPV6) || + (ipv6_num_conn < NSS_MIN_NUM_CONN)) { + nss_warning("%px: input supported connections (%d) does not adhere\ + specifications\n1) not power of 2,\n2) is less than \ + min val: %d, OR\n IPv4/6 total exceeds %d\n", + nss_ctx, + ipv6_num_conn, + NSS_MIN_NUM_CONN, + NSS_MAX_TOTAL_NUM_CONN_IPV4_IPV6); + return -EINVAL; + } + + memset(&nim, 0, sizeof(struct nss_ipv6_msg)); + nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_CONN_TABLE_SIZE_MSG, + sizeof(struct nss_ipv6_rule_conn_get_table_size_msg), nss_ipv6_update_conn_count_callback, NULL); + + nircgts = &nim.msg.size; + nircgts->num_conn = htonl(ipv6_num_conn); + nss_tx_status = nss_ipv6_tx(nss_ctx, &nim); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: Send acceleration mode message failed\n", nss_ctx); + return -EINVAL; + } + + return 0; +} + +/* + * nss_ipv6_free_conn_tables() + * Frees memory allocated for connection tables + */ +void nss_ipv6_free_conn_tables(void) +{ + if (nss_ipv6_ct_info.ce_mem) { + free_pages(nss_ipv6_ct_info.ce_mem, get_order(nss_ipv6_ct_info.ce_table_size)); + } + + if (nss_ipv6_ct_info.cme_mem) { + free_pages(nss_ipv6_ct_info.cme_mem, get_order(nss_ipv6_ct_info.cme_table_size)); + } + + memset(&nss_ipv6_ct_info, 0, sizeof(struct nss_ipv6_conn_table_info)); + return; +} + +/* + * nss_ipv6_accel_mode_cfg_handler() + * Configure acceleration mode for IPv6 + */ +static int nss_ipv6_accel_mode_cfg_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[0]; + struct nss_ipv6_msg nim; + struct nss_ipv6_accel_mode_cfg_msg *nipcm; + nss_tx_status_t nss_tx_status; + int ret = NSS_FAILURE; + int current_value; + + /* + * Take snap shot of current value + */ + current_value = nss_ipv6_accel_mode_cfg; + + /* + * Write the variable with user input + */ + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret || (!write)) { + return ret; + } + + memset(&nim, 0, sizeof(struct nss_ipv6_msg)); + nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_ACCEL_MODE_CFG_MSG, + sizeof(struct nss_ipv6_accel_mode_cfg_msg), NULL, NULL); + + nipcm = &nim.msg.accel_mode_cfg; + nipcm->mode = htonl(nss_ipv6_accel_mode_cfg); + + nss_tx_status = nss_ipv6_tx_sync(nss_ctx, &nim); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: Send acceleration mode message failed\n", nss_ctx); + nss_ipv6_accel_mode_cfg = current_value; + return -EIO; + } + + return 0; +} + +/* + * nss_ipv6_dscp_map_cfg_handler() + * Sysctl handler for dscp/pri mappings. + */ +static int nss_ipv6_dscp_map_cfg_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[0]; + struct nss_dscp_map_parse out; + struct nss_ipv6_msg nim; + struct nss_ipv6_dscp2pri_cfg_msg *nipd2p; + nss_tx_status_t status; + int ret; + + if (!write) { + return nss_dscp_map_print(ctl, buffer, lenp, ppos, mapping); + } + + ret = nss_dscp_map_parse(ctl, buffer, lenp, ppos, &out); + if (ret) { + nss_warning("failed to parse dscp mapping:%d\n", ret); + return ret; + } + + if (out.action >= NSS_IPV6_DSCP_MAP_ACTION_MAX) { + nss_warning("invalid action value: %d\n", out.action); + nss_ipv6_dscp_map_usage(); + return -EINVAL; + } + + memset(&nim, 0, sizeof(struct nss_ipv6_msg)); + nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_DSCP2PRI_CFG_MSG, + sizeof(struct nss_ipv6_dscp2pri_cfg_msg), NULL, NULL); + + nipd2p = &nim.msg.dscp2pri_cfg; + nipd2p->dscp = out.dscp; + nipd2p->priority = out.priority; + + status = nss_ipv6_tx_sync(nss_ctx, &nim); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: ipv6 dscp2pri config message failed\n", nss_ctx); + return -EFAULT; + } + + /* + * NSS firmware acknowleged the configuration, so update the mapping + * table on HOST side as well. + */ + mapping[out.dscp].action = out.action; + mapping[out.dscp].priority = out.priority; + + return 0; +} + +static struct ctl_table nss_ipv6_table[] = { + { + .procname = "ipv6_accel_mode", + .data = &nss_ipv6_accel_mode_cfg, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_ipv6_accel_mode_cfg_handler, + }, + { + .procname = "ipv6_dscp_map", + .data = &mapping[NSS_DSCP_MAP_ARRAY_SIZE], + .maxlen = sizeof(struct nss_dscp_map_entry), + .mode = 0644, + .proc_handler = &nss_ipv6_dscp_map_cfg_handler, + }, + { } +}; + +static struct ctl_table nss_ipv6_dir[] = { + { + .procname = "ipv6cfg", + .mode = 0555, + .child = nss_ipv6_table, + }, + { } +}; + +static struct ctl_table nss_ipv6_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_ipv6_dir, + }, + { } +}; + +static struct ctl_table nss_ipv6_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = nss_ipv6_root_dir, + }, + { } +}; + +static struct ctl_table_header *nss_ipv6_header; + +/* + * nss_ipv6_register_sysctl() + * Register sysctl specific to ipv6 + */ +void nss_ipv6_register_sysctl(void) +{ + sema_init(&nss_ipv6_pvt.sem, 1); + init_completion(&nss_ipv6_pvt.complete); + + /* + * Register sysctl table. + */ + nss_ipv6_header = register_sysctl_table(nss_ipv6_root); +} + +/* + * nss_ipv6_unregister_sysctl() + * Unregister sysctl specific to ipv6 + */ +void nss_ipv6_unregister_sysctl(void) +{ + /* + * Unregister sysctl table. + */ + if (nss_ipv6_header) { + unregister_sysctl_table(nss_ipv6_header); + } +} + +/* + * nss_ipv6_msg_init() + * Initialize IPv6 message. + */ +void nss_ipv6_msg_init(struct nss_ipv6_msg *nim, uint16_t if_num, uint32_t type, uint32_t len, + nss_ipv6_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_ipv6_msg_init); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_log.c new file mode 100644 index 000000000..ed606104e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_log.c @@ -0,0 +1,387 @@ +/* + ************************************************************************** + * Copyright (c) 2016, 2018, 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ipv6_log.c + * NSS IPv6 logger file. + */ + +#include "nss_core.h" + +/* + * This macro converts IPv6 address to network format + */ +#define NSS_IPV6_ADDR_TO_NW(nss6, nw) \ + { \ + nw[0] = htonl(nss6[0]); \ + nw[1] = htonl(nss6[1]); \ + nw[2] = htonl(nss6[2]); \ + nw[3] = htonl(nss6[3]); \ + } + +/* + * nss_ipv6_log_message_types_str + * IPv6 bridge/route rule messages strings + */ +static int8_t *nss_ipv6_log_message_types_str[NSS_IPV6_MAX_MSG_TYPES] __maybe_unused = { + "IPv6 create rule message", + "IPv6 destroy rule message", + "Deprecated: NSS_IPV4_RX_ESTABLISH_RULE_MSG", + "IPv6 connection stats sync message", + "IPv6 generic statistics sync message", + "IPv6 number of connections supported rule message", + "IPv6 multicast create rule message", + "IPv6 request FW to send many conn sync message", +}; + +/* + * nss_ipv6_log_error_response_types_str + * Strings for error types for ipv6 messages + */ +static int8_t *nss_ipv6_log_error_response_types_str[] __maybe_unused = { + "No error", + "Unknown error", + "Invalid interface number", + "Missing connection rule", + "Buffer allocation failure", + "No connection found to delete", + "Conn cfg already done once", + "Conn cfg input is not multiple of quanta", + "Conn cfg input exceeds max supported connections", + "Conn cfg mem alloc fail at NSS FW", + "Invalid L4 protocol for multicast rule create", + "Invalid multicast flags for multicast update", + "Invalid interface for multicast update", +}; + +/* + * nss_ipv6_log_rule_create_msg() + * Log IPv6 create rule message. + */ +static void nss_ipv6_log_rule_create_msg(struct nss_ipv6_msg *nim) +{ + uint32_t src_ip[4]; + uint32_t dest_ip[4]; + struct nss_ipv6_rule_create_msg *nircm = &nim->msg.rule_create; + + NSS_IPV6_ADDR_TO_NW(nircm->tuple.flow_ip, src_ip); + NSS_IPV6_ADDR_TO_NW(nircm->tuple.return_ip, dest_ip); + + nss_trace("%px: IPv6 create rule message \n" + "Protocol: %d\n" + "from_mtu: %u\n" + "to_mtu: %u\n" + "from_ip: %pI6:%d\n" + "to_ip: %pI6:%d\n" + "from_mac: %pM\n" + "to_mac: %pM\n" + "src_iface_num: %u\n" + "dest_iface_num: %u\n" + "ingress_inner_vlan_tag: %u\n" + "egress_inner_vlan_tag: %u\n" + "ingress_outer_vlan_tag: %u\n" + "egress_outer_vlan_tag: %u\n" + "rule_flags: %x\n" + "valid_flags: %x\n" + "return_pppoe_if_exist: %u\n" + "return_pppoe_if_num: %u\n" + "flow_pppoe_if_exist: %u\n" + "flow_pppoe_if_num: %u\n" + "flow_qos_tag: %x (%u)\n" + "return_qos_tag: %x (%u)\n" + "flow_dscp: %x\n" + "return_dscp: %x\n" + "flow_mirror_ifnum: %u\n" + "return_mirror_ifnum: %u\n", + nim, + nircm->tuple.protocol, + nircm->conn_rule.flow_mtu, + nircm->conn_rule.return_mtu, + src_ip, nircm->tuple.flow_ident, + dest_ip, nircm->tuple.return_ident, + nircm->conn_rule.flow_mac, + nircm->conn_rule.return_mac, + nircm->conn_rule.flow_interface_num, + nircm->conn_rule.return_interface_num, + nircm->vlan_primary_rule.ingress_vlan_tag, + nircm->vlan_primary_rule.egress_vlan_tag, + nircm->vlan_secondary_rule.ingress_vlan_tag, + nircm->vlan_secondary_rule.egress_vlan_tag, + nircm->rule_flags, + nircm->valid_flags, + nircm->pppoe_rule.return_if_exist, + nircm->pppoe_rule.return_if_num, + nircm->pppoe_rule.flow_if_exist, + nircm->pppoe_rule.flow_if_num, + nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag, + nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag, + nircm->dscp_rule.flow_dscp, + nircm->dscp_rule.return_dscp, + nircm->mirror_rule.flow_ifnum, + nircm->mirror_rule.return_ifnum); +} + +/* + * nss_ipv6_log_destroy_rule_msg() + * Log IPv6 destroy rule message. + */ +static void nss_ipv6_log_destroy_rule_msg(struct nss_ipv6_msg *nim) +{ + uint32_t src_ip[4]; + uint32_t dest_ip[4]; + struct nss_ipv6_rule_destroy_msg *nirdm = &nim->msg.rule_destroy; + + NSS_IPV6_ADDR_TO_NW(nirdm->tuple.flow_ip, src_ip); + NSS_IPV6_ADDR_TO_NW(nirdm->tuple.return_ip, dest_ip); + + nss_trace("%px: IPv6 destroy rule message: \n" + "flow_ip: %pI6:%d\n" + "return_ip: %pI6:%d\n" + "protocol: %d\n", + nim, + src_ip, nirdm->tuple.flow_ident, + dest_ip, nirdm->tuple.return_ident, + nirdm->tuple.protocol); +} + +/* + * nss_ipv6_log_conn_sync() + * Log IPv6 connection stats sync message. + */ +static void nss_ipv6_log_conn_sync(struct nss_ipv6_msg *nim) +{ + struct nss_ipv6_conn_sync *sync = &nim->msg.conn_stats; + if (sync->flow_tx_packet_count || sync->return_tx_packet_count) { + uint32_t src_ip[4]; + uint32_t dest_ip[4]; + + NSS_IPV6_ADDR_TO_NW(sync->flow_ip, src_ip); + NSS_IPV6_ADDR_TO_NW(sync->return_ip, dest_ip); + + nss_trace("%px: IPv6 connection stats sync message: \n" + "Protocol: %d\n" + "src_addr: %pI6:%d\n" + "dest_addr: %pI6:%d\n" + "flow_rx_packet_count: %u\n" + "flow_rx_byte_count: %u\n" + "return_rx_packet_count: %u\n" + "return_rx_byte_count: %u\n" + "flow_tx_packet_count: %u\n" + "flow_tx_byte_count: %u\n" + "return_tx_packet_count: %u\n" + "return_tx_byte_count: %u\n", + nim, + (int)sync->protocol, + src_ip, (int)sync->flow_ident, + dest_ip, (int)sync->return_ident, + sync->flow_rx_packet_count, + sync->flow_rx_byte_count, + sync->return_rx_packet_count, + sync->return_rx_byte_count, + sync->flow_tx_packet_count, + sync->flow_tx_byte_count, + sync->return_tx_packet_count, + sync->return_tx_byte_count); + } +} + +/* + * nss_ipv6_log_conn_cfg_msg() + * Log IPv6 number of connections supported rule message. + */ +static void nss_ipv6_log_conn_cfg_msg(struct nss_ipv6_msg *nim) +{ + struct nss_ipv6_rule_conn_cfg_msg *nirccm __maybe_unused = &nim->msg.rule_conn_cfg; + nss_trace("%px: IPv6 number of connections supported rule message: \n" + "num_conn: %d\n", + nim, + nirccm->num_conn); +} + +/* + * nss_ipv6_log_mc_rule_create_msg() + * Log IPv6 multicast create rule message. + */ +static void nss_ipv6_log_mc_rule_create_msg(struct nss_ipv6_msg *nim) +{ + uint16_t vif; + uint32_t src_ip[4]; + uint32_t dest_ip[4]; + struct nss_ipv6_mc_rule_create_msg *nimrcm = &nim->msg.mc_rule_create; + + NSS_IPV6_ADDR_TO_NW(nimrcm->tuple.flow_ip, src_ip); + NSS_IPV6_ADDR_TO_NW(nimrcm->tuple.return_ip, dest_ip); + + for (vif = 0; vif < nimrcm->if_count ; vif++) { + nss_trace("%px: IPv6 multicast create rule message \n" + "Rule flag: %x\n" + "Vif: %d\n" + "Protocol: %d\n" + "to_mtu: %u\n" + "from_ip: %pI6:%d\n" + "to_ip: %pI6:%d\n" + "to_mac: %pM\n" + "dest_iface_num: %u\n" + "out_vlan[0] %x\n" + "out_vlan[1] %x\n", + nim, + nimrcm->if_rule[vif].rule_flags, + vif, + nimrcm->tuple.protocol, + nimrcm->if_rule[vif].if_mtu, + src_ip, nimrcm->tuple.flow_ident, + dest_ip, nimrcm->tuple.return_ident, + nimrcm->if_rule[vif].if_mac, + nimrcm->if_rule[vif].if_num, + nimrcm->if_rule[vif].egress_vlan_tag[0], + nimrcm->if_rule[vif].egress_vlan_tag[1]); + } +} + +/* + * nss_ipv6_log_conn_sync_many_msg() + * Log IPv6 many conn sync message. + */ +static void nss_ipv6_log_conn_sync_many_msg(struct nss_ipv6_msg *nim) +{ + uint16_t i; + struct nss_ipv6_conn_sync_many_msg *nicsm = &nim->msg.conn_stats_many; + for (i = 0; i < nicsm->count; i++) { + struct nss_ipv6_conn_sync *sync = &nicsm->conn_sync[i]; + if (sync->flow_tx_packet_count || sync->return_tx_packet_count) { + uint32_t src_ip[4]; + uint32_t dest_ip[4]; + + NSS_IPV6_ADDR_TO_NW(sync->flow_ip, src_ip); + NSS_IPV6_ADDR_TO_NW(sync->return_ip, dest_ip); + + nss_trace("%px: IPv6 many conn sync message \n" + "count: %d\n" + "i: %d\n" + "Protocol: %d\n" + "src_addr: %pI6:%d\n" + "dest_addr: %pI6:%d\n" + "flow_rx_packet_count: %u\n" + "flow_rx_byte_count: %u\n" + "return_rx_packet_count: %u\n" + "return_rx_byte_count: %u\n" + "flow_tx_packet_count: %u\n" + "flow_tx_byte_count: %u\n" + "return_tx_packet_count: %u\n" + "return_tx_byte_count: %u\n", + nim, + nicsm->count, + i, + (int)sync->protocol, + src_ip, (int)sync->flow_ident, + dest_ip, (int)sync->return_ident, + sync->flow_rx_packet_count, + sync->flow_rx_byte_count, + sync->return_rx_packet_count, + sync->return_rx_byte_count, + sync->flow_tx_packet_count, + sync->flow_tx_byte_count, + sync->return_tx_packet_count, + sync->return_tx_byte_count); + } + } +} + +/* + * nss_ipv6_log_verbose() + * Log message contents. + */ +static void nss_ipv6_log_verbose(struct nss_ipv6_msg *nim) +{ + switch (nim->cm.type) { + case NSS_IPV6_TX_CREATE_RULE_MSG: + nss_ipv6_log_rule_create_msg(nim); + break; + + case NSS_IPV6_TX_DESTROY_RULE_MSG: + nss_ipv6_log_destroy_rule_msg(nim); + break; + + case NSS_IPV6_RX_CONN_STATS_SYNC_MSG: + nss_ipv6_log_conn_sync(nim); + break; + + case NSS_IPV6_RX_NODE_STATS_SYNC_MSG: + /* Getting logged in stats */ + break; + + case NSS_IPV6_TX_CONN_CFG_RULE_MSG: + nss_ipv6_log_conn_cfg_msg(nim); + break; + + case NSS_IPV6_TX_CREATE_MC_RULE_MSG: + nss_ipv6_log_mc_rule_create_msg(nim); + break; + + case NSS_IPV6_TX_CONN_STATS_SYNC_MANY_MSG: + nss_ipv6_log_conn_sync_many_msg(nim); + break; + + default: + nss_trace("%px: Invalid message type\n", nim); + break; + } +} + +/* + * nss_ipv6_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_ipv6_log_tx_msg(struct nss_ipv6_msg *nim) +{ + nss_info("%px: type[%d]: %s\n", nim, nim->cm.type, nss_ipv6_log_message_types_str[nim->cm.type]); + nss_ipv6_log_verbose(nim); +} + +/* + * nss_ipv6_log_rx_msg() + * Log messages received from FW. + */ +void nss_ipv6_log_rx_msg(struct nss_ipv6_msg *nim) +{ + if (nim->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_info("%px: Invalid response\n", nim); + return; + } + + if (nim->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nim->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]: %s, response[%d]: %s\n", nim, nim->cm.type, + nss_ipv6_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response]); + goto verbose; + } + + if (nim->cm.error > NSS_IPV6_CR_MULTICAST_UPDATE_INVALID_IF) { + nss_info("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nim, nim->cm.type, nss_ipv6_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response], + nim->cm.error); + goto verbose; + } + + nss_info("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nim, nim->cm.type, nss_ipv6_log_message_types_str[nim->cm.type], + nim->cm.response, nss_cmn_response_str[nim->cm.response], + nim->cm.error, nss_ipv6_log_error_response_types_str[nim->cm.error]); + +verbose: + nss_ipv6_log_verbose(nim); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm.c new file mode 100644 index 000000000..4ad8a7c24 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm.c @@ -0,0 +1,72 @@ +/* + ************************************************************************** + * Copyright (c) 2015,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ipv6_reasm.c + * NSS IPv6 Reassembly APIs + */ +#include +#include "nss_ipv6_reasm_stats.h" +#include "nss_ipv6_reasm_strings.h" + +/* + * nss_ipv6_reasm_msg_handler() + * Handle NSS -> HLOS messages for IPv6 reasm + */ +static void nss_ipv6_reasm_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_ipv6_reasm_msg *nim = (struct nss_ipv6_reasm_msg *)ncm; + + BUG_ON(ncm->interface != NSS_IPV6_REASM_INTERFACE); + + switch (nim->cm.type) { + case NSS_IPV6_REASM_STATS_SYNC_MSG: + /* + * Update driver statistics on node sync and send statistics notifications to the registered modules. + */ + nss_ipv6_reasm_stats_sync(nss_ctx, &nim->msg.stats_sync); + nss_ipv6_reasm_stats_notify(nss_ctx); + break; + default: + nss_warning("IPv6 reasm received an unknown message type"); + } +} + +/* + * nss_ipv6_reasm_get_context() + * get NSS context instance for ipv6 reassembly + */ +struct nss_ctx_instance *nss_ipv6_reasm_get_context(void) +{ + return &nss_top_main.nss[nss_top_main.ipv6_reasm_handler_id]; +} +EXPORT_SYMBOL(nss_ipv6_reasm_get_context); + +/* + * nss_ipv6_reasm_register_handler() + * Register our handler to receive messages for this interface + */ +void nss_ipv6_reasm_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_ipv6_reasm_get_context(); + + if (nss_core_register_handler(nss_ctx, NSS_IPV6_REASM_INTERFACE, nss_ipv6_reasm_msg_handler, NULL) != NSS_CORE_STATUS_SUCCESS) { + nss_warning("IPv6 reasm handler failed to register"); + } + + nss_ipv6_reasm_stats_dentry_create(); + nss_ipv6_reasm_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_stats.c new file mode 100644 index 000000000..d376f5af9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_stats.c @@ -0,0 +1,167 @@ +/* + ************************************************************************** + * Copyright (c) 2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_ipv6_reasm_stats.h" +#include "nss_ipv6_reasm.h" +#include "nss_ipv6_reasm_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_ipv6_reasm_stats_notifier); + +uint64_t nss_ipv6_reasm_stats[NSS_IPV6_REASM_STATS_MAX]; /* IPv6 reasm statistics */ + +/* + * nss_ipv6_reasm_stats_read() + * Read IPV6 reassembly stats + */ +static ssize_t nss_ipv6_reasm_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + /* + * Max output lines = #stats + few blank lines for banner printing + + * Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_IPV6_REASM_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_IPV6_REASM_STATS_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "ipv6_reasm", NSS_STATS_SINGLE_CORE); + + size_wr += nss_stats_fill_common_stats(NSS_IPV6_REASM_INTERFACE, NSS_STATS_SINGLE_INSTANCE, lbuf, size_wr, size_al, "ipv6_reasm"); + + /* + * Ipv6 reasm node stats + */ + + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_IPV6_REASM_STATS_MAX); i++) { + stats_shadow[i] = nss_ipv6_reasm_stats[i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + + size_wr += nss_stats_print("ipv6_reasm", NULL, NSS_STATS_SINGLE_INSTANCE + , nss_ipv6_reasm_strings_stats + , stats_shadow + , NSS_IPV6_REASM_STATS_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_ipv6_reasm_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ipv6_reasm); + +/* + * nss_ipv6_reasm_stats_dentry_create() + * Create IPv6 reasm statistics debug entry. + */ +void nss_ipv6_reasm_stats_dentry_create(void) +{ + nss_stats_create_dentry("ipv6_reasm", &nss_ipv6_reasm_stats_ops); +} + +/* + * nss_ipv6_reasm_stats_sync() + * Update driver specific information from the messsage. + */ +void nss_ipv6_reasm_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_reasm_stats_sync *nirs) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + int j; + + spin_lock_bh(&nss_top->stats_lock); + + /* + * Common node stats + */ + nss_top->stats_node[NSS_IPV6_REASM_INTERFACE][NSS_STATS_NODE_RX_PKTS] += nirs->node_stats.rx_packets; + nss_top->stats_node[NSS_IPV6_REASM_INTERFACE][NSS_STATS_NODE_RX_BYTES] += nirs->node_stats.rx_bytes; + nss_top->stats_node[NSS_IPV6_REASM_INTERFACE][NSS_STATS_NODE_TX_PKTS] += nirs->node_stats.tx_packets; + nss_top->stats_node[NSS_IPV6_REASM_INTERFACE][NSS_STATS_NODE_TX_BYTES] += nirs->node_stats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_top->stats_node[NSS_IPV6_REASM_INTERFACE][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + j] += nirs->node_stats.rx_dropped[j]; + } + + /* + * IPv6 reasm node stats + */ + nss_ipv6_reasm_stats[NSS_IPV6_REASM_STATS_ALLOC_FAILS] += nirs->ipv6_reasm_alloc_fails; + nss_ipv6_reasm_stats[NSS_IPV6_REASM_STATS_TIMEOUTS] += nirs->ipv6_reasm_timeouts; + nss_ipv6_reasm_stats[NSS_IPV6_REASM_STATS_DISCARDS] += nirs->ipv6_reasm_discards; + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_ipv6_reasm_stats_notify() + * Sends notifications to the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_ipv6_reasm_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_ipv6_reasm_stats_notification ipv6_reasm_stats; + + ipv6_reasm_stats.core_id = nss_ctx->id; + memcpy(ipv6_reasm_stats.cmn_node_stats, nss_top_main.stats_node[NSS_IPV6_REASM_INTERFACE], sizeof(ipv6_reasm_stats.cmn_node_stats)); + memcpy(ipv6_reasm_stats.ipv6_reasm_stats, nss_ipv6_reasm_stats, sizeof(ipv6_reasm_stats.ipv6_reasm_stats)); + atomic_notifier_call_chain(&nss_ipv6_reasm_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&ipv6_reasm_stats); +} + +/* + * nss_ipv6_reasm_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_ipv6_reasm_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_ipv6_reasm_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ipv6_reasm_stats_register_notifier); + +/* + * nss_ipv6_reasm_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_ipv6_reasm_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_ipv6_reasm_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ipv6_reasm_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_stats.h new file mode 100644 index 000000000..cdae31405 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_stats.h @@ -0,0 +1,27 @@ +/* + ************************************************************************** + * Copyright (c) 2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_IPV6_REASM_STATS_H +#define __NSS_IPV6_REASM_STATS_H + +/* + * NSS IPv6 reasm statistics APIs + */ +extern void nss_ipv6_reasm_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_ipv6_reasm_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_reasm_stats_sync *nirs); +extern void nss_ipv6_reasm_stats_dentry_create(void); + +#endif /* __NSS_IPV6_REASM_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_strings.c new file mode 100644 index 000000000..7b6436ab5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_strings.c @@ -0,0 +1,55 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_ipv6_reasm_strings_stats + * IPv6 reassembly statistics strings. + */ +struct nss_stats_info nss_ipv6_reasm_strings_stats[NSS_IPV6_REASM_STATS_MAX] = { + {"alloc_fails" , NSS_STATS_TYPE_DROP}, + {"timeouts" , NSS_STATS_TYPE_DROP}, + {"discards" , NSS_STATS_TYPE_DROP} +}; + +/* + * nss_ipv6_reasm_strings_read() + * Read IPv6 reassembly node statistics names. + */ +static ssize_t nss_ipv6_reasm_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ipv6_reasm_strings_stats, NSS_IPV6_REASM_STATS_MAX); +} + +/* + * nss_ipv6_reasm_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ipv6_reasm); + +/* + * nss_ipv6_reasm_strings_dentry_create() + * Create IPv6 reassembly statistics strings debug entry. + */ +void nss_ipv6_reasm_strings_dentry_create(void) +{ + nss_strings_create_dentry("ipv6_reasm", &nss_ipv6_reasm_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_strings.h new file mode 100644 index 000000000..6cac544d0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_reasm_strings.h @@ -0,0 +1,25 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_IPV6_REASM_STRINGS_H +#define __NSS_IPV6_REASM_STRINGS_H + +extern struct nss_stats_info nss_ipv6_reasm_strings_stats[NSS_IPV6_REASM_STATS_MAX]; +extern void nss_ipv6_reasm_strings_dentry_create(void); + +#endif /* __NSS_IPV6_REASM_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_stats.c new file mode 100644 index 000000000..617f55b73 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_stats.c @@ -0,0 +1,243 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include +#include "nss_ipv6_stats.h" +#include "nss_ipv6_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_ipv6_stats_notifier); + +uint64_t nss_ipv6_stats[NSS_IPV6_STATS_MAX]; +uint64_t nss_ipv6_exception_stats[NSS_IPV6_EXCEPTION_EVENT_MAX]; + +/* + * nss_ipv6_stats_read() + * Read IPV6 stats. + */ +static ssize_t nss_ipv6_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + Number of Extra outputlines for future reference to add new stats + + * start tag line + end tag line + three blank lines. + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_IPV6_STATS_MAX + NSS_IPV6_EXCEPTION_EVENT_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + /* + * Note: The assumption here is that exception event count is larger than other statistics count for IPv6. + */ + stats_shadow = kzalloc(NSS_IPV6_EXCEPTION_EVENT_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "ipv6", NSS_STATS_SINGLE_CORE); + size_wr += nss_stats_fill_common_stats(NSS_IPV6_RX_INTERFACE, NSS_STATS_SINGLE_INSTANCE, lbuf, size_wr, size_al, "ipv6"); + + /* + * IPv6 node stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_IPV6_STATS_MAX); i++) { + stats_shadow[i] = nss_ipv6_stats[i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + + size_wr += nss_stats_print("ipv6", "ipv6 node stats", NSS_STATS_SINGLE_INSTANCE + , nss_ipv6_strings_stats + , stats_shadow + , NSS_IPV6_STATS_MAX + , lbuf, size_wr, size_al); + + /* + * Exception stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_IPV6_EXCEPTION_EVENT_MAX); i++) { + stats_shadow[i] = nss_ipv6_exception_stats[i]; + } + spin_unlock_bh(&nss_top_main.stats_lock); + + size_wr += nss_stats_print("ipv6", "ipv6 exception stats", NSS_STATS_SINGLE_INSTANCE + , nss_ipv6_strings_exception_stats + , stats_shadow + , NSS_IPV6_EXCEPTION_EVENT_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_ipv6_stats_conn_sync() + * Update driver specific information from the messsage. + */ +void nss_ipv6_stats_conn_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_conn_sync *nics) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + + /* + * Update statistics maintained by NSS driver + */ + spin_lock_bh(&nss_top->stats_lock); + nss_ipv6_stats[NSS_IPV6_STATS_ACCELERATED_RX_PKTS] += nics->flow_rx_packet_count + nics->return_rx_packet_count; + nss_ipv6_stats[NSS_IPV6_STATS_ACCELERATED_RX_BYTES] += nics->flow_rx_byte_count + nics->return_rx_byte_count; + nss_ipv6_stats[NSS_IPV6_STATS_ACCELERATED_TX_PKTS] += nics->flow_tx_packet_count + nics->return_tx_packet_count; + nss_ipv6_stats[NSS_IPV6_STATS_ACCELERATED_TX_BYTES] += nics->flow_tx_byte_count + nics->return_tx_byte_count; + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_ipv6_stats_conn_sync_many() + * Update driver specific information from the conn_sync_many messsage. + */ +void nss_ipv6_stats_conn_sync_many(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_conn_sync_many_msg *nicsm) +{ + uint32_t i; + + /* + * Sanity check for the stats count + */ + if (nicsm->count * sizeof(struct nss_ipv6_conn_sync) >= nicsm->size) { + nss_warning("%px: stats sync count %u exceeds the size of this msg %u", nss_ctx, nicsm->count, nicsm->size); + return; + } + + for (i = 0; i < nicsm->count; i++) { + nss_ipv6_stats_conn_sync(nss_ctx, &nicsm->conn_sync[i]); + } +} + +/* + * nss_ipv6_stats_node_sync() + * Update driver specific information from the messsage. + */ +void nss_ipv6_stats_node_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_node_sync *nins) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + uint32_t i; + + /* + * Update statistics maintained by NSS driver + */ + spin_lock_bh(&nss_top->stats_lock); + nss_top->stats_node[NSS_IPV6_RX_INTERFACE][NSS_STATS_NODE_RX_PKTS] += nins->node_stats.rx_packets; + nss_top->stats_node[NSS_IPV6_RX_INTERFACE][NSS_STATS_NODE_RX_BYTES] += nins->node_stats.rx_bytes; + nss_top->stats_node[NSS_IPV6_RX_INTERFACE][NSS_STATS_NODE_TX_PKTS] += nins->node_stats.tx_packets; + nss_top->stats_node[NSS_IPV6_RX_INTERFACE][NSS_STATS_NODE_TX_BYTES] += nins->node_stats.tx_bytes; + + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + nss_top->stats_node[NSS_IPV6_RX_INTERFACE][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + i] += nins->node_stats.rx_dropped[i]; + } + + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_CREATE_REQUESTS] += nins->ipv6_connection_create_requests; + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_CREATE_COLLISIONS] += nins->ipv6_connection_create_collisions; + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_CREATE_INVALID_INTERFACE] += nins->ipv6_connection_create_invalid_interface; + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_DESTROY_REQUESTS] += nins->ipv6_connection_destroy_requests; + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_DESTROY_MISSES] += nins->ipv6_connection_destroy_misses; + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_HASH_HITS] += nins->ipv6_connection_hash_hits; + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_HASH_REORDERS] += nins->ipv6_connection_hash_reorders; + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_FLUSHES] += nins->ipv6_connection_flushes; + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_EVICTIONS] += nins->ipv6_connection_evictions; + nss_ipv6_stats[NSS_IPV6_STATS_FRAGMENTATIONS] += nins->ipv6_fragmentations; + nss_ipv6_stats[NSS_IPV6_STATS_FRAG_FAILS] += nins->ipv6_frag_fails; + nss_ipv6_stats[NSS_IPV6_STATS_MC_CONNECTION_CREATE_REQUESTS] += nins->ipv6_mc_connection_create_requests; + nss_ipv6_stats[NSS_IPV6_STATS_MC_CONNECTION_UPDATE_REQUESTS] += nins->ipv6_mc_connection_update_requests; + nss_ipv6_stats[NSS_IPV6_STATS_MC_CONNECTION_CREATE_INVALID_INTERFACE] += nins->ipv6_mc_connection_create_invalid_interface; + nss_ipv6_stats[NSS_IPV6_STATS_MC_CONNECTION_DESTROY_REQUESTS] += nins->ipv6_mc_connection_destroy_requests; + nss_ipv6_stats[NSS_IPV6_STATS_MC_CONNECTION_DESTROY_MISSES] += nins->ipv6_mc_connection_destroy_misses; + nss_ipv6_stats[NSS_IPV6_STATS_MC_CONNECTION_FLUSHES] += nins->ipv6_mc_connection_flushes; + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_CREATE_INVALID_MIRROR_IFNUM] += nins->ipv6_connection_create_invalid_mirror_ifnum; + nss_ipv6_stats[NSS_IPV6_STATS_CONNECTION_CREATE_INVALID_MIRROR_IFTYPE] += nins->ipv6_connection_create_invalid_mirror_iftype; + nss_ipv6_stats[NSS_IPV6_STATS_MIRROR_FAILURES] += nins->ipv6_mirror_failures; + + for (i = 0; i < NSS_IPV6_EXCEPTION_EVENT_MAX; i++) { + nss_ipv6_exception_stats[i] += nins->exception_events[i]; + } + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_ipv6_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ipv6); + +/* + * nss_ipv6_stats_dentry_create() + * Create IPv6 statistics debug entry. + */ +void nss_ipv6_stats_dentry_create(void) +{ + nss_stats_create_dentry("ipv6", &nss_ipv6_stats_ops); +} + +/* + * nss_ipv6_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_ipv6_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_ipv6_stats_notification ipv6_stats; + + ipv6_stats.core_id = nss_ctx->id; + memcpy(ipv6_stats.cmn_node_stats, nss_top_main.stats_node[NSS_IPV6_RX_INTERFACE], sizeof(ipv6_stats.cmn_node_stats)); + memcpy(ipv6_stats.special_stats, nss_ipv6_stats, sizeof(ipv6_stats.special_stats)); + memcpy(ipv6_stats.exception_stats, nss_ipv6_exception_stats, sizeof(ipv6_stats.exception_stats)); + + atomic_notifier_call_chain(&nss_ipv6_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&ipv6_stats); +} + +/* + * nss_ipv6_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_ipv6_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_ipv6_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ipv6_stats_register_notifier); + +/* + * nss_ipv6_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_ipv6_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_ipv6_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ipv6_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_stats.h new file mode 100644 index 000000000..1eaff5e2b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_stats.h @@ -0,0 +1,29 @@ +/* + ************************************************************************** + * Copyright (c) 2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_IPV6_STATS_H +#define __NSS_IPV6_STATS_H + +/* + * IPV6 statistics APIs + */ +extern void nss_ipv6_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_ipv6_stats_node_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_node_sync *nins); +extern void nss_ipv6_stats_conn_sync(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_conn_sync *nics); +extern void nss_ipv6_stats_conn_sync_many(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_conn_sync_many_msg *nicsm); +extern void nss_ipv6_stats_dentry_create(void); + +#endif /* __NSS_IPV6_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_strings.c new file mode 100644 index 000000000..57b100f7b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_strings.c @@ -0,0 +1,185 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_ipv6_strings_exception_stats + * Interface statistics strings for IPv6 exceptions. + */ +struct nss_stats_info nss_ipv6_strings_exception_stats[NSS_IPV6_EXCEPTION_EVENT_MAX] = { + {"icmp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_unhandled_type" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_udp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_tcp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_unknown_protocol" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_flush_to_host" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_small_hop_limit" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_flags" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_seq_exceeds_right_edge" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_small_data_offs" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_bad_sack" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_big_data_offs" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_seq_before_left_edge" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_ack_exceeds_right_edge" , NSS_STATS_TYPE_EXCEPTION}, + {"tcp_ack_before_left_edge" , NSS_STATS_TYPE_EXCEPTION}, + {"udp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"udp_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"udp_small_hop_limit" , NSS_STATS_TYPE_EXCEPTION}, + {"udp_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"wrong_target_mac" , NSS_STATS_TYPE_EXCEPTION}, + {"header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"unknown_protocol" , NSS_STATS_TYPE_EXCEPTION}, + {"ingress_vid_mismatch" , NSS_STATS_TYPE_EXCEPTION}, + {"ingress_vid_missing" , NSS_STATS_TYPE_EXCEPTION}, + {"dscp_marking_mismatch" , NSS_STATS_TYPE_EXCEPTION}, + {"vlan_marking_mismatch" , NSS_STATS_TYPE_EXCEPTION}, + {"interface_mismatch" , NSS_STATS_TYPE_EXCEPTION}, + {"gre_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"gre_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"gre_small_hop_limit" , NSS_STATS_TYPE_EXCEPTION}, + {"destroy" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_udplite_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"udplite_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"udplite_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"udplite_small_hop_limit" , NSS_STATS_TYPE_EXCEPTION}, + {"udplite_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"mc_udp_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"mc_mem_alloc_failure" , NSS_STATS_TYPE_EXCEPTION}, + {"mc_update_failure" , NSS_STATS_TYPE_EXCEPTION}, + {"mc_pbuf_alloc_failure" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_ip_fragment" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_small_hop_limit" , NSS_STATS_TYPE_EXCEPTION}, + {"esp_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"tunipip6_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"tunipip6_small_hop_limit" , NSS_STATS_TYPE_EXCEPTION}, + {"tunipip6_needs_fragmentation" , NSS_STATS_TYPE_EXCEPTION}, + {"pppoe_bridge_no_icme" , NSS_STATS_TYPE_EXCEPTION}, + {"dont_frag_set" , NSS_STATS_TYPE_EXCEPTION}, + {"reassembly_not_supported" , NSS_STATS_TYPE_EXCEPTION}, + {"pppoe_no_session" , NSS_STATS_TYPE_DROP}, + {"icmp_gre_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"icmp_esp_header_incomplete" , NSS_STATS_TYPE_EXCEPTION}, + {"emesh_prio_mismatch" , NSS_STATS_TYPE_EXCEPTION}, +}; + +/* + * nss_ipv6_strings_stats + * IPv6 stats strings. + */ +struct nss_stats_info nss_ipv6_strings_stats[NSS_IPV6_STATS_MAX] = { + {"rx_pkts" ,NSS_STATS_TYPE_SPECIAL}, + {"rx_bytes" ,NSS_STATS_TYPE_SPECIAL}, + {"tx_pkts" ,NSS_STATS_TYPE_SPECIAL}, + {"tx_bytes" ,NSS_STATS_TYPE_SPECIAL}, + {"create_requests" ,NSS_STATS_TYPE_SPECIAL}, + {"create_collisions" ,NSS_STATS_TYPE_SPECIAL}, + {"create_invalid_interface" ,NSS_STATS_TYPE_SPECIAL}, + {"destroy_requests" ,NSS_STATS_TYPE_SPECIAL}, + {"destroy_misses" ,NSS_STATS_TYPE_SPECIAL}, + {"hash_hits" ,NSS_STATS_TYPE_SPECIAL}, + {"hash_reorders" ,NSS_STATS_TYPE_SPECIAL}, + {"flushes" ,NSS_STATS_TYPE_SPECIAL}, + {"evictions" ,NSS_STATS_TYPE_SPECIAL}, + {"fragmentations" ,NSS_STATS_TYPE_SPECIAL}, + {"frag_fails" ,NSS_STATS_TYPE_SPECIAL}, + {"by_rule_drops" ,NSS_STATS_TYPE_DROP}, + {"mc_create_requests" ,NSS_STATS_TYPE_SPECIAL}, + {"mc_update_requests" ,NSS_STATS_TYPE_SPECIAL}, + {"mc_create_invalid_interface" ,NSS_STATS_TYPE_SPECIAL}, + {"mc_destroy_requests" ,NSS_STATS_TYPE_SPECIAL}, + {"mc_destroy_misses" ,NSS_STATS_TYPE_SPECIAL}, + {"mc_flushes" ,NSS_STATS_TYPE_SPECIAL}, + {"mirror_invalid_ifnum_conn_create_req" ,NSS_STATS_TYPE_SPECIAL}, + {"mirror_invalid_iftype_conn_create_req" ,NSS_STATS_TYPE_SPECIAL}, + {"mirror_failures" ,NSS_STATS_TYPE_SPECIAL}, +}; + +/* + * nss_ipv6_special_stats_strings_read() + * Read IPv6 special node statistics names. + */ +static ssize_t nss_ipv6_special_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ipv6_strings_stats, NSS_IPV6_STATS_MAX); +} + +/* + * nss_ipv6_exception_stats_strings_read() + * Read IPv6 exception statistics names. + */ +static ssize_t nss_ipv6_exception_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ipv6_strings_exception_stats, NSS_IPV6_EXCEPTION_EVENT_MAX); +} + +/* + * nss_ipv6_special_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ipv6_special_stats); + +/* + * nss_ipv6_exception_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ipv6_exception_stats); + +/* + * nss_ipv6_strings_dentry_create() + * Create IPv6 statistics strings debug entry. + */ +void nss_ipv6_strings_dentry_create(void) +{ + struct dentry *ipv6_d = NULL; + struct dentry *ipv6_spcl_stats_d = NULL; + struct dentry *ipv6_excp_stats_d = NULL; + + if (!nss_top_main.strings_dentry) { + nss_warning("qca-nss-drv/strings is not present"); + return; + } + + ipv6_d = debugfs_create_dir("ipv6", nss_top_main.strings_dentry); + if (!ipv6_d) { + nss_warning("Failed to create qca-nss-drv/strings/ipv6 directory"); + return; + } + + ipv6_spcl_stats_d = debugfs_create_file("special_stats_str", 0400, ipv6_d, &nss_top_main, &nss_ipv6_special_stats_strings_ops); + if (!ipv6_spcl_stats_d) { + nss_warning("Failed to create qca-nss-drv/stats/ipv6/special_stats_str file"); + debugfs_remove_recursive(ipv6_d); + return; + } + + ipv6_excp_stats_d = debugfs_create_file("exception_stats_str", 0400, ipv6_d, &nss_top_main, &nss_ipv6_exception_stats_strings_ops); + if (!ipv6_excp_stats_d) { + nss_warning("Failed to create qca-nss-drv/stats/ipv6/exception_stats_str file"); + debugfs_remove_recursive(ipv6_d); + return; + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_strings.h new file mode 100644 index 000000000..4f582e581 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ipv6_strings.h @@ -0,0 +1,26 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_IPV6_STRINGS_H +#define __NSS_IPV6_STRINGS_H + +extern struct nss_stats_info nss_ipv6_strings_stats[NSS_IPV6_STATS_MAX]; +extern struct nss_stats_info nss_ipv6_strings_exception_stats[NSS_IPV6_EXCEPTION_EVENT_MAX]; +extern void nss_ipv6_strings_dentry_create(void); + +#endif /* __NSS_IPV6_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2.c b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2.c new file mode 100644 index 000000000..2c73b4860 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2.c @@ -0,0 +1,284 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include +#include +#include "nss_tx_rx_common.h" +#include "nss_l2tpv2_stats.h" +#include "nss_l2tpv2_log.h" +#include "nss_l2tpv2_strings.h" + +/* + * Data structures to store l2tpv2 nss debug stats + */ +static DEFINE_SPINLOCK(nss_l2tpv2_session_debug_stats_lock); +static struct nss_l2tpv2_stats_session_debug nss_l2tpv2_session_debug_stats[NSS_MAX_L2TPV2_DYNAMIC_INTERFACES]; + +/* + * nss_l2tpv2_session_debug_stats_sync + * Per session debug stats for l2tpv2 + */ +void nss_l2tpv2_session_debug_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_l2tpv2_sync_session_stats_msg *stats_msg, uint16_t if_num) +{ + int i; + spin_lock_bh(&nss_l2tpv2_session_debug_stats_lock); + for (i = 0; i < NSS_MAX_L2TPV2_DYNAMIC_INTERFACES; i++) { + if (nss_l2tpv2_session_debug_stats[i].if_num == if_num) { + nss_l2tpv2_session_debug_stats[i].stats[NSS_L2TPV2_STATS_SESSION_RX_PPP_LCP_PKTS] += stats_msg->debug_stats.rx_ppp_lcp_pkts; + nss_l2tpv2_session_debug_stats[i].stats[NSS_L2TPV2_STATS_SESSION_RX_EXP_DATA_PKTS] += stats_msg->debug_stats.rx_exception_data_pkts; + nss_l2tpv2_session_debug_stats[i].stats[NSS_L2TPV2_STATS_SESSION_ENCAP_PBUF_ALLOC_FAIL_PKTS] += stats_msg->debug_stats.encap_pbuf_alloc_fail; + nss_l2tpv2_session_debug_stats[i].stats[NSS_L2TPV2_STATS_SESSION_DECAP_PBUF_ALLOC_FAIL_PKTS] += stats_msg->debug_stats.decap_pbuf_alloc_fail; + nss_l2tpv2_session_debug_stats[i].stats[NSS_L2TPV2_STATS_SESSION_DECAP_L2TPOIPSEC_SRC_ERR] += stats_msg->debug_stats.decap_l2tpoipsec_src_error; + break; + } + } + spin_unlock_bh(&nss_l2tpv2_session_debug_stats_lock); +} + +/* + * nss_l2tpv2_global_session_stats_get() + * Get session l2tpv2 statitics. + */ +void nss_l2tpv2_session_debug_stats_get(void *stats_mem) +{ + struct nss_l2tpv2_stats_session_debug *stats = (struct nss_l2tpv2_stats_session_debug *)stats_mem; + int i; + + if (!stats) { + nss_warning("No memory to copy l2tpv2 session stats"); + return; + } + + spin_lock_bh(&nss_l2tpv2_session_debug_stats_lock); + for (i = 0; i < NSS_MAX_L2TPV2_DYNAMIC_INTERFACES; i++) { + if (nss_l2tpv2_session_debug_stats[i].valid) { + memcpy(stats, &nss_l2tpv2_session_debug_stats[i], sizeof(struct nss_l2tpv2_stats_session_debug)); + stats++; + } + } + spin_unlock_bh(&nss_l2tpv2_session_debug_stats_lock); +} + +/* + * nss_l2tpv2_handler() + * Handle NSS -> HLOS messages for l2tpv2 tunnel + */ + +static void nss_l2tpv2_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_l2tpv2_msg *ntm = (struct nss_l2tpv2_msg *)ncm; + void *ctx; + + nss_l2tpv2_msg_callback_t cb; + + BUG_ON(!(nss_is_dynamic_interface(ncm->interface) || ncm->interface == NSS_L2TPV2_INTERFACE)); + + /* + * Trace Messages + */ + nss_l2tpv2_log_rx_msg(ntm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_L2TPV2_MSG_MAX) { + nss_warning("%px: received invalid message %d for L2TP interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_l2tpv2_msg)) { + nss_warning("%px: message length is invalid: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + switch (ntm->cm.type) { + + case NSS_L2TPV2_MSG_SYNC_STATS: + /* + * Update session debug stats in session stats msg and send statistics notifications to the registered modules + */ + nss_l2tpv2_session_debug_stats_sync(nss_ctx, &ntm->msg.stats, ncm->interface); + nss_l2tpv2_stats_notify(nss_ctx, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages, l2tpv2 sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->l2tpv2_msg_callback; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_l2tpv2_msg_callback_t)ncm->cb; + ctx = nss_ctx->subsys_dp_register[ncm->interface].ndev; + + /* + * call l2tpv2 tunnel callback + */ + if (!ctx) { + nss_warning("%px: Event received for l2tpv2 tunnel interface %d before registration", nss_ctx, ncm->interface); + return; + } + + cb(ctx, ntm); +} + +/* + * nss_l2tpv2_tx() + * Transmit a l2tpv2 message to NSS firmware + */ +nss_tx_status_t nss_l2tpv2_tx(struct nss_ctx_instance *nss_ctx, struct nss_l2tpv2_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_l2tpv2_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (!nss_is_dynamic_interface(ncm->interface)) { + nss_warning("%px: tx request for non dynamic interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_L2TPV2_MSG_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + *********************************** + * Register/Unregister/Miscellaneous APIs + *********************************** + */ + +/* + * nss_register_l2tpv2_if() + */ +struct nss_ctx_instance *nss_register_l2tpv2_if(uint32_t if_num, nss_l2tpv2_callback_t l2tpv2_callback, + nss_l2tpv2_msg_callback_t event_callback, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.l2tpv2_handler_id]; + int i = 0; + + nss_assert(nss_ctx); + nss_assert(nss_is_dynamic_interface(if_num)); + + nss_core_register_subsys_dp(nss_ctx, if_num, l2tpv2_callback, NULL, NULL, netdev, features); + + nss_top_main.l2tpv2_msg_callback = event_callback; + + nss_core_register_handler(nss_ctx, if_num, nss_l2tpv2_handler, NULL); + + spin_lock_bh(&nss_l2tpv2_session_debug_stats_lock); + for (i = 0; i < NSS_MAX_L2TPV2_DYNAMIC_INTERFACES; i++) { + if (!nss_l2tpv2_session_debug_stats[i].valid) { + nss_l2tpv2_session_debug_stats[i].valid = true; + nss_l2tpv2_session_debug_stats[i].if_num = if_num; + nss_l2tpv2_session_debug_stats[i].if_index = netdev->ifindex; + break; + } + } + spin_unlock_bh(&nss_l2tpv2_session_debug_stats_lock); + + return nss_ctx; +} + +/* + * nss_unregister_l2tpv2_if() + */ +void nss_unregister_l2tpv2_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.l2tpv2_handler_id]; + int i; + + nss_assert(nss_ctx); + nss_assert(nss_is_dynamic_interface(if_num)); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_top_main.l2tpv2_msg_callback = NULL; + + nss_core_unregister_handler(nss_ctx, if_num); + + spin_lock_bh(&nss_l2tpv2_session_debug_stats_lock); + for (i = 0; i < NSS_MAX_L2TPV2_DYNAMIC_INTERFACES; i++) { + if (nss_l2tpv2_session_debug_stats[i].if_num == if_num) { + memset(&nss_l2tpv2_session_debug_stats[i], 0, sizeof(struct nss_l2tpv2_stats_session_debug)); + break; + } + } + spin_unlock_bh(&nss_l2tpv2_session_debug_stats_lock); +} + +/* + * nss_get_l2tpv2_context() + */ +struct nss_ctx_instance *nss_l2tpv2_get_context() +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.l2tpv2_handler_id]; +} + +/* + * nss_l2tpv2_msg_init() + * Initialize nss_l2tpv2 msg. + */ +void nss_l2tpv2_msg_init(struct nss_l2tpv2_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} + +/* nss_l2tpv2_register_handler() + * debugfs stats msg handler received on static l2tpv2 interface + */ +void nss_l2tpv2_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_l2tpv2_get_context(); + + nss_info("nss_l2tpv2_register_handler"); + nss_core_register_handler(nss_ctx, NSS_L2TPV2_INTERFACE, nss_l2tpv2_handler, NULL); + + nss_l2tpv2_stats_dentry_create(); + nss_l2tpv2_strings_dentry_create(); +} + +EXPORT_SYMBOL(nss_l2tpv2_get_context); +EXPORT_SYMBOL(nss_l2tpv2_tx); +EXPORT_SYMBOL(nss_unregister_l2tpv2_if); +EXPORT_SYMBOL(nss_l2tpv2_msg_init); +EXPORT_SYMBOL(nss_register_l2tpv2_if); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_log.c new file mode 100644 index 000000000..fc1b31fc5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_log.c @@ -0,0 +1,143 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_l2tpv2_log.c + * NSS L2TPV2 logger file. + */ + +#include "nss_core.h" + +/* + * nss_l2tpv2_log_message_types_str + * NSS L2TPV2 message strings + */ +static int8_t *nss_l2tpv2_log_message_types_str[NSS_L2TPV2_MSG_MAX] __maybe_unused = { + "L2TPV2 Sesstion Create", + "L2TPV2 Session Destroy", + "L2TPV2 Stats", +}; + +/* + * nss_l2tpv2_log_session_create_msg() + * Log NSS L2TPV2 Session Create. + */ +static void nss_l2tpv2_log_session_create_msg(struct nss_l2tpv2_msg *nlm) +{ + struct nss_l2tpv2_session_create_msg *nlcm __maybe_unused = &nlm->msg.session_create_msg; + nss_trace("%px: NSS L2TPV2 Session Create message \n" + "L2TPV2 Local Tunnel ID: %x\n" + "L2TPV2 Local Session ID: %x\n" + "L2TPV2 Peer Tunnel ID: %x\n" + "L2TPV2 Peer Session ID: %x\n" + "L2TPV2 Source IP: %x\n" + "L2TPV2 Destnation IP: %x\n" + "L2TPV2 Reorder Timeout: %d\n" + "L2TPV2 Source Port: %d\n" + "L2TPV2 Destination Port: %d\n" + "L2TPV2 Received Sequence Number: %d\n" + "L2TPV2 Outer IP Packet TTL: %d\n" + "L2TPV2 UDP Checksum: %d\n", + nlcm, nlcm->local_tunnel_id, + nlcm->local_session_id, nlcm->peer_tunnel_id, + nlcm->peer_session_id, nlcm->sip, + nlcm->dip, nlcm->reorder_timeout, + nlcm->sport, nlcm->dport, + nlcm->recv_seq, nlcm->oip_ttl, + nlcm->udp_csum); +} + +/* + * nss_l2tpv2_log_session_destroy_msg() + * Log NSS L2TPV2 Session Create. + */ +static void nss_l2tpv2_log_session_destroy_msg(struct nss_l2tpv2_msg *nlm) +{ + struct nss_l2tpv2_session_destroy_msg *nldm __maybe_unused = &nlm->msg.session_destroy_msg; + nss_trace("%px: NSS L2TPV2 Session Destroy message \n" + "L2TPV2 Local Tunnel ID: %x\n" + "L2TPV2 Local Session ID: %x\n", + nldm, nldm->local_tunnel_id, + nldm->local_session_id); +} + +/* + * nss_l2tpv2_log_verbose() + * Log message contents. + */ +static void nss_l2tpv2_log_verbose(struct nss_l2tpv2_msg *nlm) +{ + switch (nlm->cm.type) { + case NSS_L2TPV2_MSG_SESSION_CREATE: + nss_l2tpv2_log_session_create_msg(nlm); + break; + + case NSS_L2TPV2_MSG_SESSION_DESTROY: + nss_l2tpv2_log_session_destroy_msg(nlm); + break; + + case NSS_L2TPV2_MSG_SYNC_STATS: + /* + * No log for valid stats message. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", nlm); + break; + } +} + +/* + * nss_l2tpv2_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_l2tpv2_log_tx_msg(struct nss_l2tpv2_msg *nlm) +{ + if (nlm->cm.type >= NSS_L2TPV2_MSG_MAX) { + nss_warning("%px: Invalid message type\n", nlm); + return; + } + + nss_info("%px: type[%d]:%s\n", nlm, nlm->cm.type, nss_l2tpv2_log_message_types_str[nlm->cm.type]); + nss_l2tpv2_log_verbose(nlm); +} + +/* + * nss_l2tpv2_log_rx_msg() + * Log messages received from FW. + */ +void nss_l2tpv2_log_rx_msg(struct nss_l2tpv2_msg *nlm) +{ + if (nlm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nlm); + return; + } + + if (nlm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nlm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nlm, nlm->cm.type, + nss_l2tpv2_log_message_types_str[nlm->cm.type], + nlm->cm.response, nss_cmn_response_str[nlm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + nlm, nlm->cm.type, nss_l2tpv2_log_message_types_str[nlm->cm.type], + nlm->cm.response, nss_cmn_response_str[nlm->cm.response]); + +verbose: + nss_l2tpv2_log_verbose(nlm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_log.h new file mode 100644 index 000000000..56cc9dee3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_L2TPV2_LOG_H +#define __NSS_L2TPV2_LOG_H + +/* + * nss_l2tpv2.h + * NSS L2TPV2 header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_l2tpv2_log_tx_msg + * Logs a l2tpv2 message that is sent to the NSS firmware. + */ +void nss_l2tpv2_log_tx_msg(struct nss_l2tpv2_msg *ntm); + +/* + * nss_l2tpv2_log_rx_msg + * Logs a l2tpv2 message that is received from the NSS firmware. + */ +void nss_l2tpv2_log_rx_msg(struct nss_l2tpv2_msg *ntm); + +#endif /* __NSS_L2TPV2_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_stats.c new file mode 100644 index 000000000..0784b54b8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_stats.c @@ -0,0 +1,156 @@ +/* + ************************************************************************** + * Copyright (c) 2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_l2tpv2_stats.h" +#include "nss_l2tpv2_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_l2tpv2_stats_notifier); + +/* + * nss_l2tpv2_stats_read() + * Read l2tpv2 statistics. + */ +static ssize_t nss_l2tpv2_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats * NSS_MAX_CORES + + * Few output lines for banner printing + Number of Extra outputlines for future reference to add new stats. + */ + uint32_t max_output_lines = NSS_MAX_L2TPV2_DYNAMIC_INTERFACES * (NSS_L2TPV2_STATS_SESSION_MAX + 2) /*session stats */ + + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines ; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct net_device *dev; + struct nss_l2tpv2_stats_session_debug l2tpv2_session_stats[NSS_MAX_L2TPV2_DYNAMIC_INTERFACES]; + int id; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + memset(&l2tpv2_session_stats, 0, sizeof(struct nss_l2tpv2_stats_session_debug) * NSS_MAX_L2TPV2_DYNAMIC_INTERFACES); + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "l2tpv2", NSS_STATS_SINGLE_CORE); + + /* + * Get all stats + */ + nss_l2tpv2_session_debug_stats_get((void *)&l2tpv2_session_stats); + + /* + * Session stats + */ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nl2tp v2 session stats start:\n\n"); + for (id = 0; id < NSS_MAX_L2TPV2_DYNAMIC_INTERFACES; id++) { + + if (!l2tpv2_session_stats[id].valid) { + break; + } + + dev = dev_get_by_index(&init_net, l2tpv2_session_stats[id].if_index); + if (likely(dev)) { + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id, + l2tpv2_session_stats[id].if_num, dev->name); + dev_put(dev); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id, + l2tpv2_session_stats[id].if_num); + } + + size_wr += nss_stats_print("l2tpv2", "l2tp v2 session stats" + , id + , nss_l2tpv2_strings_session_stats + , l2tpv2_session_stats[id].stats + , NSS_L2TPV2_STATS_SESSION_MAX + , lbuf, size_wr, size_al); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(lbuf); + return bytes_read; +} + +/* + * nss_l2tpv2_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(l2tpv2); + +/* + * nss_l2tpv2_stats_dentry_create() + * Create l2tpv2 statistics debug entry. + */ +void nss_l2tpv2_stats_dentry_create(void) +{ + nss_stats_create_dentry("l2tpv2", &nss_l2tpv2_stats_ops); +} + +/* + * nss_l2tpv2_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_l2tpv2_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_l2tpv2_stats_notification l2tpv2_stats; + struct nss_l2tpv2_stats_session_debug l2tpv2_session_stats[NSS_MAX_L2TPV2_DYNAMIC_INTERFACES]; + int id; + + memset(&l2tpv2_session_stats, 0, sizeof(l2tpv2_session_stats)); + + /* + * Get all stats + */ + nss_l2tpv2_session_debug_stats_get((void *)&l2tpv2_session_stats); + + for (id = 0; id < NSS_MAX_L2TPV2_DYNAMIC_INTERFACES; id++) { + if (l2tpv2_session_stats[id].if_num == if_num) { + memcpy(&l2tpv2_stats.stats, &l2tpv2_session_stats[id].stats, sizeof(l2tpv2_stats.stats)); + } + } + l2tpv2_stats.core_id = nss_ctx->id; + l2tpv2_stats.if_num = if_num; + atomic_notifier_call_chain(&nss_l2tpv2_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&l2tpv2_stats); +} + +/* + * nss_l2tpv2_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_l2tpv2_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_l2tpv2_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_l2tpv2_stats_register_notifier); + +/* + * nss_l2tpv2_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_l2tpv2_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_l2tpv2_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_l2tpv2_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_stats.h new file mode 100644 index 000000000..0c8ecda4d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_stats.h @@ -0,0 +1,33 @@ +/* + ****************************************************************************** + * Copyright (c) 2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_L2TPV2_STATS_H +#define __NSS_L2TPV2_STATS_H + +struct nss_l2tpv2_stats_session_debug { + uint64_t stats[NSS_L2TPV2_STATS_SESSION_MAX]; + int32_t if_index; + uint32_t if_num; /* nss interface number */ + bool valid; +}; + +/* + * l2tpv2 statistics APIs + */ +extern void nss_l2tpv2_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_l2tpv2_stats_dentry_create(void); + +#endif /* __NSS_L2TPV2_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_strings.c new file mode 100644 index 000000000..d5db00c0b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_strings.c @@ -0,0 +1,57 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_l2tpv2_strings_session_stats + * L2TPv2 statistics strings for NSS session statistics. + */ +struct nss_stats_info nss_l2tpv2_strings_session_stats[NSS_L2TPV2_STATS_SESSION_MAX] = { + {"rx_ppp_lcp_pkts" , NSS_STATS_TYPE_EXCEPTION}, + {"rx_exp_pkts" , NSS_STATS_TYPE_EXCEPTION}, + {"encap_pbuf_alloc_fails" , NSS_STATS_TYPE_SPECIAL}, + {"decap_pbuf_alloc_fails" , NSS_STATS_TYPE_SPECIAL}, + {"decap_l2tpoipsec_src_err" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_l2tpv2_strings_read() + * Read L2TPv2 node statistics names. + */ +static ssize_t nss_l2tpv2_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_l2tpv2_strings_session_stats, NSS_L2TPV2_STATS_SESSION_MAX); +} + +/* + * nss_l2tpv2_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(l2tpv2); + +/* + * nss_l2tpv2_strings_dentry_create() + * Create L2TPv2 statistics strings debug entry. + */ +void nss_l2tpv2_strings_dentry_create(void) +{ + nss_strings_create_dentry("l2tpv2", &nss_l2tpv2_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_strings.h new file mode 100644 index 000000000..2e8f871c1 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_l2tpv2_strings.h @@ -0,0 +1,25 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_L2TPV2_STRINGS_H +#define __NSS_L2TPV2_STRINGS_H + +extern struct nss_stats_info nss_l2tpv2_strings_session_stats[NSS_L2TPV2_STATS_SESSION_MAX]; +extern void nss_l2tpv2_strings_dentry_create(void); + +#endif /* __NSS_L2TPV2_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_lag.c b/feeds/ipq807x/qca-nss-drv/src/nss_lag.c new file mode 100644 index 000000000..02362e173 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_lag.c @@ -0,0 +1,273 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_tx_rx_lag.c + * NSS LAG Tx APIs + */ + +#include + +#include "nss_tx_rx_common.h" +#include "nss_lag_log.h" + +#define NSS_LAG_RESP_TIMEOUT 60000 /* 60 Sec */ + +/* + * Private data structure of dynamic interface + */ +struct nss_lag_pvt { + struct completion complete; /* completion structure */ + enum nss_cmn_response response; /* Message response */ +}; + +/* + * nss_lag_state_callback() + * Call back function for nss LAG State + */ +void nss_lag_state_callback(void *arg, struct nss_lag_msg *nm) +{ + struct nss_lag_pvt *lag_msg_state = arg; + + /* + * Unblock the sleeping function. + */ + lag_msg_state->response = nm->cm.response; + complete(&lag_msg_state->complete); +} + +/* + * nss_lag_verify_ifnum() + * + */ +static void nss_lag_verify_ifnum(uint32_t if_num) +{ + nss_assert((if_num == NSS_LAG0_INTERFACE_NUM) || + (if_num == NSS_LAG1_INTERFACE_NUM) || + (if_num == NSS_LAG2_INTERFACE_NUM) || + (if_num == NSS_LAG3_INTERFACE_NUM)); +} + +/* + * nss_lag_get_context() + */ +static struct nss_ctx_instance *nss_lag_get_context(void) +{ + uint8_t ipv4_handler_id = nss_top_main.ipv4_handler_id; + + return (struct nss_ctx_instance *)&nss_top_main.nss[ipv4_handler_id]; +} + +/* + * nss_lag_tx() + * Transmit a LAG msg to the firmware. + */ +nss_tx_status_t nss_lag_tx(struct nss_ctx_instance *nss_ctx, struct nss_lag_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_lag_log_tx_msg(msg); + + /* + * Sanity check the message + */ + nss_lag_verify_ifnum(ncm->interface); + + if (ncm->type > NSS_TX_METADATA_LAG_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_lag_tx); + +/** + * nss_register_lag_if() + */ +void *nss_register_lag_if(uint32_t if_num, + nss_lag_callback_t lag_cb, + nss_lag_event_callback_t lag_ev_cb, + struct net_device *netdev) +{ + struct nss_ctx_instance *nss_ctx = nss_lag_get_context(); + uint32_t features = 0; + + nss_assert(nss_ctx); + nss_lag_verify_ifnum(if_num); + + nss_core_register_subsys_dp(nss_ctx, if_num, lag_cb, NULL, NULL, netdev, features); + + nss_top_main.lag_event_callback = lag_ev_cb; + + /* + * Return the NSS driver context for LAG (same as for ipv4 functions) + */ + return (void *)nss_ctx; +} +EXPORT_SYMBOL(nss_register_lag_if); + +/** + * nss_unregister_lag_if() + */ +void nss_unregister_lag_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_lag_get_context(); + + nss_assert(nss_ctx); + nss_lag_verify_ifnum(if_num); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_top_main.lag_event_callback = NULL; +} +EXPORT_SYMBOL(nss_unregister_lag_if); + +/** + * nss_lag_handler() + */ +void nss_lag_handler(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, + void *app_data) +{ + struct nss_lag_msg *lm = (struct nss_lag_msg *)ncm; + void *ctx = NULL; + nss_lag_event_callback_t cb; + + BUG_ON(ncm->interface != NSS_LAG0_INTERFACE_NUM + && ncm->interface != NSS_LAG1_INTERFACE_NUM + && ncm->interface != NSS_LAG2_INTERFACE_NUM + && ncm->interface != NSS_LAG3_INTERFACE_NUM); + + /* + * Trace Messages + */ + nss_lag_log_rx_msg(lm); + + if (ncm->type >= NSS_TX_METADATA_LAG_MAX) { + nss_warning("%px: received invalid message %d for LAG interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_lag_msg)) { + nss_warning("%px: invalid length for LAG message: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /** + * Update the callback and app_data for NOTIFY messages. + * LAG sends all notify messages to the same callback. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->lag_event_callback; + } + + /** + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /** + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /** + * callback + */ + cb = (nss_lag_event_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + cb(ctx, lm); +} + +/** + * nss_lag_register_handler() + */ +void nss_lag_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_lag_get_context(); + + nss_core_register_handler(nss_ctx, NSS_LAG0_INTERFACE_NUM, nss_lag_handler, NULL); + nss_core_register_handler(nss_ctx, NSS_LAG1_INTERFACE_NUM, nss_lag_handler, NULL); + nss_core_register_handler(nss_ctx, NSS_LAG2_INTERFACE_NUM, nss_lag_handler, NULL); + nss_core_register_handler(nss_ctx, NSS_LAG3_INTERFACE_NUM, nss_lag_handler, NULL); +} + +/** + * nss_lag_msg_init() + * Initialize lag message + */ +void nss_lag_msg_init(struct nss_lag_msg *nlm, uint16_t lag_num, uint32_t type, uint32_t len, + nss_lag_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&nlm->cm, lag_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_lag_msg_init); + +/** + * nss_lag_tx_slave_state() + */ +nss_tx_status_t nss_lag_tx_slave_state(uint16_t lagid, int32_t slave_ifnum, + enum nss_lag_state_change_ev slave_state) +{ + struct nss_lag_msg nm; + struct nss_lag_state_change *nlsc = NULL; + nss_tx_status_t status; + int ret; + struct nss_ctx_instance *nss_ctx = nss_lag_get_context(); + struct nss_lag_pvt lag_msg_state; + + init_completion(&lag_msg_state.complete); + lag_msg_state.response = false; + + /* + * Construct a message to the NSS to update it + */ + nss_lag_msg_init(&nm, lagid, + NSS_TX_METADATA_LAG_STATE_CHANGE, + sizeof(struct nss_lag_state_change), + nss_lag_state_callback, &lag_msg_state); + + nlsc = &nm.msg.state; + nlsc->event = slave_state; + nlsc->interface = slave_ifnum; + + status = nss_lag_tx(nss_ctx, &nm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Send LAG update failed, status: %d\n", nss_ctx, + status); + return NSS_TX_FAILURE; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&lag_msg_state.complete, + msecs_to_jiffies(NSS_LAG_RESP_TIMEOUT)); + if (!ret) { + nss_warning("%px: Waiting for ack timed out\n", nss_ctx); + return NSS_TX_FAILURE; + } + + return lag_msg_state.response; +} +EXPORT_SYMBOL(nss_lag_tx_slave_state); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_lag_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_lag_log.c new file mode 100644 index 000000000..da83df0c6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_lag_log.c @@ -0,0 +1,103 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_lag_log.c + * NSS LAG logger file. + */ + +#include "nss_core.h" + +/* + * nss_lag_log_message_types_str + * NSS LAG message strings + */ +static int8_t *nss_lag_log_message_types_str[NSS_TX_METADATA_LAG_MAX] __maybe_unused = { + "LAG State Change", +}; + +/* + * nss_lag_log_state_change_msg() + * Log NSS LAG State Change. + */ +static void nss_lag_log_state_change_msg(struct nss_lag_msg *nlm) +{ + struct nss_lag_state_change *nlcm __maybe_unused = &nlm->msg.state; + nss_trace("%px: NSS LAG State Change message \n" + "LAG ID: %x\n" + "LAG Interface: %x\n" + "LAG Event: %d\n", + nlcm, nlcm->lagid, + nlcm->interface, nlcm->event); +} + +/* + * nss_lag_log_verbose() + * Log message contents. + */ +static void nss_lag_log_verbose(struct nss_lag_msg *nlm) +{ + switch (nlm->cm.type) { + case NSS_TX_METADATA_LAG_STATE_CHANGE: + nss_lag_log_state_change_msg(nlm); + break; + + default: + nss_trace("%px: Invalid message type\n", nlm); + break; + } +} + +/* + * nss_lag_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_lag_log_tx_msg(struct nss_lag_msg *nlm) +{ + if (nlm->cm.type >= NSS_TX_METADATA_LAG_MAX) { + nss_warning("%px: Invalid message type\n", nlm); + return; + } + + nss_info("%px: type[%d]:%s\n", nlm, nlm->cm.type, nss_lag_log_message_types_str[nlm->cm.type]); + nss_lag_log_verbose(nlm); +} + +/* + * nss_lag_log_rx_msg() + * Log messages received from FW. + */ +void nss_lag_log_rx_msg(struct nss_lag_msg *nlm) +{ + if (nlm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nlm); + return; + } + + if (nlm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nlm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nlm, nlm->cm.type, + nss_lag_log_message_types_str[nlm->cm.type], + nlm->cm.response, nss_cmn_response_str[nlm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + nlm, nlm->cm.type, nss_lag_log_message_types_str[nlm->cm.type], + nlm->cm.response, nss_cmn_response_str[nlm->cm.response]); + +verbose: + nss_lag_log_verbose(nlm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_lag_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_lag_log.h new file mode 100644 index 000000000..4efe393fc --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_lag_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_LAG_LOG_H +#define __NSS_LAG_LOG_H + +/* + * nss_lag.h + * NSS LAG header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_lag_log_tx_msg + * Logs a lag message that is sent to the NSS firmware. + */ +void nss_lag_log_tx_msg(struct nss_lag_msg *nlm); + +/* + * nss_lag_log_rx_msg + * Logs a lag message that is received from the NSS firmware. + */ +void nss_lag_log_rx_msg(struct nss_lag_msg *nlm); + +#endif /* __NSS_LAG_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_log.c new file mode 100644 index 000000000..bfdca6575 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_log.c @@ -0,0 +1,602 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ +/* + * nss_log.c + * NSS FW debug logger retrieval from DDR (memory) + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "nss_core.h" +#include "nss_log.h" + +/* + * Private data for each device file open instance + */ +struct nss_log_data { + void *load_mem; /* Pointer to struct nss_log_descriptor - descriptor data */ + dma_addr_t dma_addr; /* Handle to DMA */ + uint32_t last_entry; /* Last known sampled entry (or index) */ + uint32_t nentries; /* Caches the total number of entries of log buffer */ + int nss_id; /* NSS Core id being used */ + struct nss_ctx_instance *nss_ctx; + /* NSS ctx instance */ +}; + +struct nss_log_ring_buffer_addr nss_rbe[NSS_MAX_CORES]; + +static DEFINE_MUTEX(nss_log_mutex); +static wait_queue_head_t nss_log_wq; +static nss_log_msg_callback_t nss_debug_interface_cb; +static void *nss_debug_interface_app_data = NULL; + +static wait_queue_head_t msg_wq; +enum nss_cmn_response msg_response; +static bool msg_event; + +/* + * nss_log_llseek() + * Seek operation. + */ +static loff_t nss_log_llseek(struct file *file, loff_t offset, int origin) +{ + struct nss_log_data *data = file->private_data; + + switch (origin) { + case SEEK_SET: + break; + case SEEK_CUR: + offset += file->f_pos; + break; + case SEEK_END: + offset = ((data->nentries * sizeof(struct nss_log_entry)) + sizeof(struct nss_log_descriptor)) - offset; + break; + default: + return -EINVAL; + } + + return (offset >= 0) ? (file->f_pos = offset) : -EINVAL; +} + +/* + * nss_log_open() + * Open operation for our device. We let as many instance run together + */ +static int nss_log_open(struct inode *inode, struct file *filp) +{ + struct nss_log_data *data = NULL; + struct nss_top_instance *nss_top; + struct nss_ctx_instance *nss_ctx; + int nss_id; + + /* + * i_private is passed to us by debug_fs_create() + */ + nss_id = (int)(nss_ptr_t)inode->i_private; + if (nss_id < 0 || nss_id >= nss_top_main.num_nss) { + nss_warning("nss_id is not valid :%d\n", nss_id); + return -ENODEV; + } + + nss_top = &nss_top_main; + nss_ctx = &nss_top->nss[nss_id]; + + data = kzalloc(sizeof(struct nss_log_data), GFP_KERNEL); + if (!data) { + nss_warning("%px: Failed to allocate memory for log_data", nss_ctx); + return -ENOMEM; + } + + mutex_lock(&nss_log_mutex); + if (!nss_rbe[nss_id].addr) { + mutex_unlock(&nss_log_mutex); + kfree(data); + nss_warning("%px: Ring buffer not configured yet for nss_id:%d", nss_ctx, nss_id); + return -EIO; + } + + /* + * Actual ring buffer. + */ + data->load_mem = nss_rbe[nss_id].addr; + data->last_entry = 0; + data->nentries = nss_rbe[nss_id].nentries; + data->dma_addr = nss_rbe[nss_id].dma_addr; + data->nss_ctx = nss_ctx; + + /* + * Increment the reference count so that we don't free + * the memory + */ + nss_rbe[nss_id].ref_cnt++; + data->nss_id = nss_id; + filp->private_data = data; + mutex_unlock(&nss_log_mutex); + + return 0; +} + +/* + * nss_log_release() + * release gets called when close() is called on the file + * descriptor. We unmap the IO region. + */ +static int nss_log_release(struct inode *inode, struct file *filp) +{ + struct nss_log_data *data = filp->private_data; + + if (!data) { + return -EINVAL; + } + + mutex_lock(&nss_log_mutex); + nss_rbe[data->nss_id].ref_cnt--; + BUG_ON(nss_rbe[data->nss_id].ref_cnt < 0); + if (!nss_rbe[data->nss_id].ref_cnt) { + wake_up(&nss_log_wq); + } + mutex_unlock(&nss_log_mutex); + kfree(data); + return 0; +} + +/* + * nss_log_current_entry() + * Reads current entry index from NSS log descriptor. + */ +static uint32_t nss_log_current_entry(struct nss_log_descriptor *desc) +{ + rmb(); + return desc->current_entry; +} + +/* + * nss_log_read() + * Read operation lets command like cat and tail read our memory log buffer data. + */ +static ssize_t nss_log_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) +{ + struct nss_log_data *data = filp->private_data; + struct nss_log_descriptor *desc; + size_t bytes = 0; + size_t b; + struct nss_log_entry *rb; + uint32_t entry; + uint32_t offset, index; + char msg[NSS_LOG_OUTPUT_LINE_SIZE]; + + if (!data) { + return -EINVAL; + } + + desc = data->load_mem; + if (!desc) { + nss_warning("%px: load_mem is NULL", data); + return -EINVAL; + } + + /* + * If buffer is too small to fit even one entry. + */ + if (size < NSS_LOG_OUTPUT_LINE_SIZE) { + return 0; + } + + /* + * Get the current index + */ + dma_sync_single_for_cpu(data->nss_ctx->dev, data->dma_addr, sizeof(struct nss_log_descriptor), DMA_FROM_DEVICE); + + entry = nss_log_current_entry(desc); + + /* + * If the current and last sampled indexes are same then bail out. + */ + if (unlikely(data->last_entry == entry)) { + return 0; + } + + /* + * If this is the first read (after open) on our device file. + */ + if (unlikely(!(*ppos))) { + /* + * If log buffer has rolled over. Almost all the time + * it will be true. + */ + if (likely(entry > data->nentries)) { + /* + * Determine how much we can stuff in one + * buffer passed to us and accordingly + * reduce our index. + */ + data->last_entry = entry - data->nentries; + } else { + data->last_entry = 0; + } + } else if (unlikely(entry > data->nentries && ((entry - data->nentries) > data->last_entry))) { + /* + * If FW is producing debug buffer at a pace faster than + * we can consume, then we restrict our iteration. + */ + data->last_entry = entry - data->nentries; + } + + /* + * Iterate over indexes. + */ + while (entry > data->last_entry) { + index = offset = (data->last_entry % data->nentries); + offset = (offset * sizeof(struct nss_log_entry)) + + offsetof(struct nss_log_descriptor, log_ring_buffer); + + dma_sync_single_for_cpu(data->nss_ctx->dev, data->dma_addr + offset, + sizeof(struct nss_log_entry), DMA_FROM_DEVICE); + rb = &desc->log_ring_buffer[index]; + + b = scnprintf(msg, sizeof(msg), NSS_LOG_LINE_FORMAT, + rb->thread_num, rb->timestamp, rb->message); + + data->last_entry++; + + /* + * Copy to user buffer and if we fail then we return + * failure. + */ + if (copy_to_user(buf + bytes, msg, b)) { + return -EFAULT; + } + + bytes += b; + + /* + * If we ran out of space in the buffer. + */ + if ((bytes + NSS_LOG_OUTPUT_LINE_SIZE) >= size) + break; + } + + if (bytes > 0) + *ppos = bytes; + + return bytes; +} + +struct file_operations nss_logs_core_ops = { + .owner = THIS_MODULE, + .open = nss_log_open, + .read = nss_log_read, + .release = nss_log_release, + .llseek = nss_log_llseek, +}; + +/* + * nss_debug_interface_set_callback() + * Sets the callback + */ +void nss_debug_interface_set_callback(nss_log_msg_callback_t cb, void *app_data) +{ + nss_debug_interface_cb = cb; + nss_debug_interface_app_data = app_data; +} + +/* + * nss_debug_interface_event() + * Received an event from NSS FW + */ +static void nss_debug_interface_event(void *app_data, struct nss_log_debug_interface_msg *nim) +{ + struct nss_cmn_msg *ncm = (struct nss_cmn_msg *)nim; + + msg_response = ncm->response; + msg_event = true; + wake_up(&msg_wq); +} + +/* + * nss_debug_interface_handler() + * handle NSS -> HLOS messages for debug interfaces + */ +static void nss_debug_interface_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_log_debug_interface_msg *ntm = (struct nss_log_debug_interface_msg *)ncm; + nss_log_msg_callback_t cb; + + BUG_ON(ncm->interface != NSS_DEBUG_INTERFACE); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type > NSS_DEBUG_INTERFACE_TYPE_MAX) { + nss_warning("%px: received invalid message %d for CAPWAP interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_log_debug_interface_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Update the callback and app_data for NOTIFY messages. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_debug_interface_cb; + ncm->app_data = (nss_ptr_t)nss_debug_interface_app_data; + } + + /* + * Do we have a callback + */ + if (!ncm->cb) { + nss_trace("%px: cb is null for interface %d", nss_ctx, ncm->interface); + return; + } + + cb = (nss_log_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, ntm); +} + +/* + * nss_debug_interface_tx() + * Transmit a debug interface message to NSS FW + */ +static nss_tx_status_t nss_debug_interface_tx(struct nss_ctx_instance *nss_ctx, struct nss_log_debug_interface_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Sanity check the message + */ + if (ncm->interface != NSS_DEBUG_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_DEBUG_INTERFACE_TYPE_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_debug_log_buffer_alloc() + * Allocates and Initializes log buffer for the use in NSS FW (logging) + */ +bool nss_debug_log_buffer_alloc(uint8_t nss_id, uint32_t nentry) +{ + struct nss_log_debug_interface_msg msg; + struct nss_log_debug_memory_msg *dbg; + struct nss_top_instance *nss_top; + struct nss_ctx_instance *nss_ctx; + dma_addr_t dma_addr; + uint32_t size; + void *addr = NULL; + nss_tx_status_t status; + + if (nss_id >= nss_top_main.num_nss) { + return false; + } + + nss_top = &nss_top_main; + nss_ctx = &nss_top->nss[nss_id]; + + if (nss_ctx->state != NSS_CORE_STATE_INITIALIZED) { + nss_warning("%px: NSS Core:%d is not initialized yet\n", nss_ctx, nss_id); + return false; + } + + size = sizeof(struct nss_log_descriptor) + (sizeof(struct nss_log_entry) * nentry); + addr = kmalloc(size, GFP_ATOMIC); + if (!addr) { + nss_warning("%px: Failed to allocate memory for logging (size:%d)\n", nss_ctx, size); + return false; + } + + memset(addr, 0, size); + dma_addr = (uint32_t)dma_map_single(nss_ctx->dev, addr, size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(nss_ctx->dev, dma_addr))) { + nss_warning("%px: Failed to map address in DMA", nss_ctx); + kfree(addr); + return false; + } + + /* + * If we already have ring buffer associated with nss_id, then + * we must wait before we attach a new ring buffer. + */ + mutex_lock(&nss_log_mutex); + if (nss_rbe[nss_id].addr) { + mutex_unlock(&nss_log_mutex); + + /* + * Someone is using the current logbuffer. Wait until ref count become 0. + * We have to return mutex here, because the current user requires it to + * release the reference. + */ + if (!wait_event_timeout(nss_log_wq, !nss_rbe[nss_id].ref_cnt, 5 * HZ)) { + nss_warning("%px: Timeout waiting for refcnt to become 0\n", nss_ctx); + goto fail; + } + + mutex_lock(&nss_log_mutex); + if (!nss_rbe[nss_id].addr) { + mutex_unlock(&nss_log_mutex); + goto fail; + } + if (nss_rbe[nss_id].ref_cnt > 0) { + mutex_unlock(&nss_log_mutex); + nss_warning("%px: Some other thread is contending..opting out\n", nss_ctx); + goto fail; + } + } + + memset(&msg, 0, sizeof(struct nss_log_debug_interface_msg)); + nss_cmn_msg_init(&msg.cm, NSS_DEBUG_INTERFACE, NSS_DEBUG_INTERFACE_TYPE_LOG_BUF_INIT, + sizeof(struct nss_log_debug_memory_msg), nss_debug_interface_event, NULL); + + dbg = &msg.msg.addr; + dbg->nentry = nentry; + dbg->version = NSS_DEBUG_LOG_VERSION; + dbg->phy_addr = dma_addr; + + msg_event = false; + status = nss_debug_interface_tx(nss_ctx, &msg); + if (status != NSS_TX_SUCCESS) { + mutex_unlock(&nss_log_mutex); + nss_warning("%px: Failed to send message to debug interface:%d\n", nss_ctx, status); + goto fail; + } + + /* + * Wait for 5 seconds since this is a critical operation. + * Mutex is not unlocked here because we do not want someone to acquire the mutex and use the logbuffer + * while we are waiting message from NSS. + */ + if (!wait_event_timeout(msg_wq, msg_event, 5 * HZ)) { + mutex_unlock(&nss_log_mutex); + nss_warning("%px: Timeout send message to debug interface\n", nss_ctx); + goto fail; + } + + if (msg_response != NSS_CMN_RESPONSE_ACK) { + mutex_unlock(&nss_log_mutex); + nss_warning("%px: Response error for send message to debug interface:%d\n", nss_ctx, msg_response); + goto fail; + } + + /* + * If we had to free the previous allocation for ring buffer. + */ + if (nss_rbe[nss_id].addr) { + uint32_t old_size; + old_size = sizeof(struct nss_log_descriptor) + + (sizeof(struct nss_log_entry) * nss_rbe[nss_id].nentries); + dma_unmap_single(nss_ctx->dev, nss_rbe[nss_id].dma_addr, old_size, DMA_FROM_DEVICE); + kfree(nss_rbe[nss_id].addr); + } + + nss_rbe[nss_id].addr = addr; + nss_rbe[nss_id].nentries = nentry; + nss_rbe[nss_id].ref_cnt = 0; + nss_rbe[nss_id].dma_addr = dma_addr; + mutex_unlock(&nss_log_mutex); + wake_up(&nss_log_wq); + return true; + +fail: + dma_unmap_single(nss_ctx->dev, dma_addr, size, DMA_FROM_DEVICE); + kfree(addr); + wake_up(&nss_log_wq); + return false; +} + +/* + * nss_logbuffer_handler() + * Enable NSS debug output + */ +int nss_logbuffer_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + int core_status; + int i; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + return ret; + } + + if (!write) { + return ret; + } + + if (nss_ctl_logbuf < 32) { + nss_warning("Invalid NSS FW logbuffer size:%d (must be > 32)\n", nss_ctl_logbuf); + nss_ctl_logbuf = 0; + return ret; + } + + for (i = 0; i < nss_top_main.num_nss; i++) { + /* + * Register the callback handler and allocate the debug log buffers + */ + core_status = nss_core_register_handler(&nss_top_main.nss[i], NSS_DEBUG_INTERFACE, nss_debug_interface_handler, NULL); + if (core_status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("NSS logbuffer init failed with register handler:%d\n", core_status); + } + + if (!nss_debug_log_buffer_alloc(i, nss_ctl_logbuf)) { + nss_warning("%d: Failed to set debug log buffer on NSS core", i); + } + } + + return ret; +} + +/* + * nss_log_init() + * Initializes NSS FW logs retrieval logic from /sys + */ +void nss_log_init(void) +{ + int i; + struct dentry *logs_dentry; + struct dentry *core_log_dentry; + + memset(nss_rbe, 0, sizeof(nss_rbe)); + init_waitqueue_head(&nss_log_wq); + init_waitqueue_head(&msg_wq); + + /* + * Create directory for obtaining NSS FW logs from each core + */ + logs_dentry = debugfs_create_dir("logs", nss_top_main.top_dentry); + if (unlikely(!logs_dentry)) { + nss_warning("Failed to create qca-nss-drv/logs directory in debugfs"); + return; + } + + for (i = 0; i < nss_top_main.num_nss; i++) { + char file[16]; + extern struct file_operations nss_logs_core_ops; + + snprintf(file, sizeof(file), "core%d", i); + core_log_dentry = debugfs_create_file(file, 0400, + logs_dentry, (void *)(nss_ptr_t)i, &nss_logs_core_ops); + if (unlikely(!core_log_dentry)) { + nss_warning("Failed to create qca-nss-drv/logs/%s file in debugfs", file); + return; + } + } + + nss_debug_interface_set_callback(nss_debug_interface_event, NULL); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_log.h new file mode 100644 index 000000000..1d27e9498 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_log.h @@ -0,0 +1,115 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2015, 2018, 2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ +/* + * nss_log.h + * NSS FW debug log memory header file + */ + +#ifndef __NSS_LOG_H +#define __NSS_LOG_H + +#define NSS_DEBUG_LOG_VERSION 0x1 + +/** + * Dynamic Interface types + */ +enum nss_debug_interface_msg_type { + NSS_DEBUG_INTERFACE_TYPE_NONE = 0, + NSS_DEBUG_INTERFACE_TYPE_LOG_BUF_INIT = 1, + NSS_DEBUG_INTERFACE_TYPE_MAX, +}; + +/* + * The size of each log entry to be displayed. + */ +#define NSS_LOG_OUTPUT_LINE_SIZE 151 /* 5 + 12 + 132 + '\n' + '\0' (see below) */ +#define NSS_LOG_LINE_FORMAT "%3d: %010u: %s\n" +#define NSS_LOG_LINE_WIDTH 132 +#define NSS_LOG_COOKIE 0xFF785634 + +/* + * Dump last N entry during the coredump. + * This number should be lower than the minimum size of the logbuf + * which 32 right now. + */ +#define NSS_LOG_COREDUMP_LINE_NUM 25 + +/* + * Saves the ring buffer address for logging per NSS core + */ +struct nss_log_ring_buffer_addr { + void *addr; /* Pointer to struct nss_log_descriptor */ + dma_addr_t dma_addr; /* DMA Handle */ + uint32_t nentries; /* Number of entries in the ring buffer */ + int ref_cnt; /* Reference count */ +}; + +/* + * nss_log_entry is shared between Host and NSS FW + */ +struct nss_log_entry { + uint64_t sequence_num; /* Sequence number */ + uint32_t cookie; /* Magic for verification */ + uint32_t thread_num; /* thread-id */ + uint32_t timestamp; /* timestamp in ticks */ + char message[NSS_LOG_LINE_WIDTH]; /* actual debug message */ +} __attribute__((aligned(NSS_CACHE_LINE_SIZE))); + +/* + * The NSS log descripts holds ring-buffer along with other variables and + * it is shared between NSS FW and Host. + * + * NSS FW writes to ring buffer and current_entry but read by only Host. + */ +struct nss_log_descriptor { + uint32_t cookie; /* Magic for verification */ + uint32_t log_nentries; /* No.of log entries */ + uint32_t current_entry; /* pointer to current log entry */ + uint8_t pad[20]; /* pad to align ring buffer at cacheline boundary */ + struct nss_log_entry log_ring_buffer[0]; /* The actual log entry ring buffer */ +} __attribute__((aligned(NSS_CACHE_LINE_SIZE))); + +struct nss_log_debug_memory_msg { + uint32_t version; + uint32_t nentry; + uint32_t phy_addr; +}; + +struct nss_log_debug_interface_msg { + struct nss_cmn_msg cm; + union { + struct nss_log_debug_memory_msg addr; + } msg; +}; + +/** + * @brief Callback to receive debug interface messages + * + * @param app_data Application context of the message + * @param msg Message data + * + * @return void + */ +typedef void (*nss_log_msg_callback_t)(void *app_data, struct nss_log_debug_interface_msg *msg); + +/* + * Exported by nss_init.c and used in nss_log.c + */ +extern int nss_ctl_logbuf; + +extern struct nss_log_ring_buffer_addr nss_rbe[NSS_MAX_CORES]; + +#endif /* __NSS_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx.c b/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx.c new file mode 100644 index 000000000..3abda0101 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx.c @@ -0,0 +1,62 @@ +/* + ************************************************************************** + * Copyright (c) 2014,2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_lso_rx.c + * NSS LSO_RX APIs + */ + +#include +#include "nss_lso_rx_stats.h" +#include "nss_lso_rx_strings.h" + +/* + * nss_rx_lso_rx_interface_handler() + * Handle NSS -> HLOS messages for LSO_RX Changes and Statistics + */ +static void nss_rx_lso_rx_interface_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) { + + struct nss_lso_rx_msg *nlrm = (struct nss_lso_rx_msg *)ncm; + + switch (nlrm->cm.type) { + case NSS_LSO_RX_STATS_SYNC_MSG: + /* + * Update LSO_RX driver statistics and send statistics notifications to the registered modules + */ + nss_lso_rx_stats_sync(nss_ctx, &nlrm->msg.stats_sync); + nss_lso_rx_stats_notify(nss_ctx); + break; + + default: + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + /* + * Check response + */ + nss_info("%px: Received response %d for type %d, interface %d", nss_ctx, ncm->response, ncm->type, ncm->interface); + } + } +} + +/* + * nss_lso_rx_register_handler() + * Register handler for messaging + */ +void nss_lso_rx_register_handler(struct nss_ctx_instance *nss_ctx) +{ + nss_core_register_handler(nss_ctx, NSS_LSO_RX_INTERFACE, nss_rx_lso_rx_interface_handler, NULL); + nss_lso_rx_stats_dentry_create(); + nss_lso_rx_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_stats.c new file mode 100644 index 000000000..2763a2a8c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_stats.c @@ -0,0 +1,172 @@ +/* + ************************************************************************** + * Copyright (c) 2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_lso_rx_stats.h" +#include "nss_lso_rx_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_lso_rx_stats_notifier); + +uint64_t nss_lso_rx_stats[NSS_LSO_RX_STATS_MAX]; /* LSO_RX statistics */ + +/* + * nss_lso_rx_stats_read() + * Read LSO_RX stats + */ +static ssize_t nss_lso_rx_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * Max output lines = #stats + few blank lines for banner printing + + * Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_LSO_RX_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_LSO_RX_STATS_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "lso_rx", NSS_STATS_SINGLE_CORE); + size_wr += nss_stats_fill_common_stats(NSS_LSO_RX_INTERFACE, NSS_STATS_SINGLE_INSTANCE, lbuf, size_wr, size_al, "lso_rx"); + + /* + * lso_rx node stats + */ + + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_LSO_RX_STATS_MAX); i++) { + stats_shadow[i] = nss_lso_rx_stats[i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("lso_rx", "lso_rx node stats" + , NSS_STATS_SINGLE_INSTANCE + , nss_lso_rx_strings_stats + , stats_shadow + , NSS_LSO_RX_STATS_MAX + , lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_lso_rx_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(lso_rx); + +/* + * nss_lso_rx_stats_dentry_create() + * Create lso_rx statistics debug entry. + */ +void nss_lso_rx_stats_dentry_create(void) +{ + nss_stats_create_dentry("lso_rx", &nss_lso_rx_stats_ops); +} + +/* + * nss_lso_rx_stats_sync() + * Handle the syncing of lso_rx node statistics. + */ +void nss_lso_rx_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_lso_rx_stats_sync *nlrss) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + int j; + + spin_lock_bh(&nss_top->stats_lock); + + /* + * common node stats + */ + nss_top->stats_node[NSS_LSO_RX_INTERFACE][NSS_STATS_NODE_RX_PKTS] += nlrss->node_stats.rx_packets; + nss_top->stats_node[NSS_LSO_RX_INTERFACE][NSS_STATS_NODE_RX_BYTES] += nlrss->node_stats.rx_bytes; + nss_top->stats_node[NSS_LSO_RX_INTERFACE][NSS_STATS_NODE_TX_PKTS] += nlrss->node_stats.tx_packets; + nss_top->stats_node[NSS_LSO_RX_INTERFACE][NSS_STATS_NODE_TX_BYTES] += nlrss->node_stats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_top->stats_node[NSS_LSO_RX_INTERFACE][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + j] += nlrss->node_stats.rx_dropped[j]; + } + + /* + * General LSO_RX stats + */ + nss_lso_rx_stats[NSS_LSO_RX_STATS_TX_DROPPED] += nlrss->tx_dropped; + nss_lso_rx_stats[NSS_LSO_RX_STATS_DROPPED] += nlrss->dropped; + + /* + * pbuf + */ + nss_lso_rx_stats[NSS_LSO_RX_STATS_PBUF_ALLOC_FAIL] += nlrss->pbuf_alloc_fail; + nss_lso_rx_stats[NSS_LSO_RX_STATS_PBUF_REFERENCE_FAIL] += nlrss->pbuf_reference_fail; + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_lso_rx_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_lso_rx_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + struct nss_lso_rx_stats_notification lso_rx_stats; + + lso_rx_stats.core_id = nss_ctx->id; + memcpy(lso_rx_stats.cmn_node_stats, nss_top_main.stats_node[NSS_LSO_RX_INTERFACE], sizeof(lso_rx_stats.cmn_node_stats)); + memcpy(lso_rx_stats.node_stats, nss_lso_rx_stats, sizeof(lso_rx_stats.node_stats)); + atomic_notifier_call_chain(&nss_lso_rx_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&lso_rx_stats); +} + +/* + * nss_lso_rx_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_lso_rx_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_lso_rx_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_lso_rx_stats_register_notifier); + +/* + * nss_lso_rx_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_lso_rx_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_lso_rx_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_lso_rx_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_stats.h new file mode 100644 index 000000000..54ab6dc51 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_stats.h @@ -0,0 +1,67 @@ +/* + ************************************************************************** + * Copyright (c) 2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_LSO_RX_STATS_H +#define __NSS_LSO_RX_STATS_H + +#include + +/* + * lso_rx_node statistics. + */ +struct nss_lso_rx_stats_sync { + struct nss_cmn_node_stats node_stats; + + uint32_t tx_dropped; /* Number of packets dropped because lso_rx transmit queue is full */ + uint32_t dropped; /* Total of packets dropped by the node internally */ + uint32_t pbuf_alloc_fail; /* Count number of pbuf alloc fails */ + uint32_t pbuf_reference_fail; /* Count number of pbuf ref fails */ + + /* + * If we're generating per-packet statistics then we count total lso_rx processing ticks + * worst-case ticks and the number of iterations around the lso_rx handler that we take. + */ + uint32_t total_ticks; /* Total clock ticks spend inside the lso_rx handler */ + uint32_t worst_case_ticks; + /* Worst case iteration of the lso_rx handler in ticks */ + uint32_t iterations; /* Number of iterations around the lso_rx handler */ +}; + +/* + * Message types for lso_rx + */ +enum nss_lso_rx_metadata_types { + NSS_LSO_RX_STATS_SYNC_MSG, /* Message type - stats sync message */ +}; + +/* + * Message structure to send receive LSO_RX commands + */ +struct nss_lso_rx_msg { + struct nss_cmn_msg cm; /* Message header */ + union { + struct nss_lso_rx_stats_sync stats_sync; /* Stats sub-message */ + } msg; +}; + +/* + * lso_rx statistics APIs + */ +extern void nss_lso_rx_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_lso_rx_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_lso_rx_stats_sync *nlrss); +extern void nss_lso_rx_stats_dentry_create(void); + +#endif /* __NSS_LSO_RX_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_strings.c new file mode 100644 index 000000000..627f1de08 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_strings.c @@ -0,0 +1,57 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" +#include "nss_lso_rx_strings.h" + +/* + * nss_lso_rx_strings_stats + * LSO Rx statistics strings. + */ +struct nss_stats_info nss_lso_rx_strings_stats[NSS_LSO_RX_STATS_MAX] = { + {"tx_drops" ,NSS_STATS_TYPE_DROP}, + {"drops" ,NSS_STATS_TYPE_DROP}, + {"pbuf_alloc_fail" ,NSS_STATS_TYPE_ERROR}, + {"pbuf_reference_fail" ,NSS_STATS_TYPE_ERROR} +}; + +/* + * nss_lso_rx_strings_read() + * Read LSO Rx node statistics names. + */ +static ssize_t nss_lso_rx_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_lso_rx_strings_stats, NSS_LSO_RX_STATS_MAX); +} + +/* + * nss_lso_rx_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(lso_rx); + +/* + * nss_lso_rx_strings_dentry_create() + * Create LSO Rx statistics strings debug entry. + */ +void nss_lso_rx_strings_dentry_create(void) +{ + nss_strings_create_dentry("lso_rx", &nss_lso_rx_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_strings.h new file mode 100644 index 000000000..901ed3073 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_lso_rx_strings.h @@ -0,0 +1,25 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_LSO_RX_STRINGS_H +#define __NSS_LSO_RX_STRINGS_H + +extern struct nss_stats_info nss_lso_rx_strings_stats[NSS_LSO_RX_STATS_MAX]; +extern void nss_lso_rx_strings_dentry_create(void); + +#endif /* __NSS_LSO_RX_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_map_t.c b/feeds/ipq807x/qca-nss-drv/src/nss_map_t.c new file mode 100644 index 000000000..cfa7ab967 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_map_t.c @@ -0,0 +1,412 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_map_t_stats.h" +#include "nss_map_t_log.h" +#include "nss_map_t_strings.h" + +#define NSS_MAP_T_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * Private data structure + */ +static struct { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +} nss_map_t_pvt; + +/* + * Data structures to store map_t nss debug stats + */ +static DEFINE_SPINLOCK(nss_map_t_debug_stats_lock); +static struct nss_map_t_stats_instance_debug nss_map_t_debug_stats[NSS_MAX_MAP_T_DYNAMIC_INTERFACES]; + +/* + * nss_map_t_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_map_t_verify_if_num(uint32_t if_num) +{ + enum nss_dynamic_interface_type if_type; + + if (nss_is_dynamic_interface(if_num) == false) { + return false; + } + + if_type = nss_dynamic_interface_get_type(nss_map_t_get_context(), if_num); + switch (if_type) { + case NSS_DYNAMIC_INTERFACE_TYPE_MAP_T_INNER: + case NSS_DYNAMIC_INTERFACE_TYPE_MAP_T_OUTER: + return true; + + default: + return false; + } +} + +/* + * nss_map_t_instance_debug_stats_sync + * debug stats for map_t + */ +void nss_map_t_instance_debug_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_map_t_sync_stats_msg *stats_msg, uint16_t if_num) +{ + int i; + enum nss_dynamic_interface_type if_type; + + if_type = nss_dynamic_interface_get_type(nss_ctx, if_num); + + spin_lock_bh(&nss_map_t_debug_stats_lock); + for (i = 0; i < NSS_MAX_MAP_T_DYNAMIC_INTERFACES; i++) { + if (nss_map_t_debug_stats[i].if_num != if_num) { + continue; + } + switch (if_type) { + case NSS_DYNAMIC_INTERFACE_TYPE_MAP_T_INNER: + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V4_TO_V6_PBUF_EXCEPTION] += + stats_msg->debug_stats.v4_to_v6.exception_pkts; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V4_TO_V6_PBUF_NO_MATCHING_RULE] += + stats_msg->debug_stats.v4_to_v6.no_matching_rule; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V4_TO_V6_PBUF_NOT_TCP_OR_UDP] += + stats_msg->debug_stats.v4_to_v6.not_tcp_or_udp; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V4_TO_V6_RULE_ERR_LOCAL_PSID] += + stats_msg->debug_stats.v4_to_v6.rule_err_local_psid; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V4_TO_V6_RULE_ERR_LOCAL_IPV6] += + stats_msg->debug_stats.v4_to_v6.rule_err_local_ipv6; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V4_TO_V6_RULE_ERR_REMOTE_PSID] += + stats_msg->debug_stats.v4_to_v6.rule_err_remote_psid; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V4_TO_V6_RULE_ERR_REMOTE_EA_BITS] += + stats_msg->debug_stats.v4_to_v6.rule_err_remote_ea_bits; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V4_TO_V6_RULE_ERR_REMOTE_IPV6] += + stats_msg->debug_stats.v4_to_v6.rule_err_remote_ipv6; + break; + + case NSS_DYNAMIC_INTERFACE_TYPE_MAP_T_OUTER: + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V6_TO_V4_PBUF_EXCEPTION] += + stats_msg->debug_stats.v6_to_v4.exception_pkts; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V6_TO_V4_PBUF_NO_MATCHING_RULE] += + stats_msg->debug_stats.v6_to_v4.no_matching_rule; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V6_TO_V4_PBUF_NOT_TCP_OR_UDP] += + stats_msg->debug_stats.v6_to_v4.not_tcp_or_udp; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V6_TO_V4_RULE_ERR_LOCAL_IPV4] += + stats_msg->debug_stats.v6_to_v4.rule_err_local_ipv4; + nss_map_t_debug_stats[i].stats[NSS_MAP_T_STATS_V6_TO_V4_RULE_ERR_REMOTE_IPV4] += + stats_msg->debug_stats.v6_to_v4.rule_err_remote_ipv4; + break; + + default: + nss_warning("Invalid MAP-T interface encountered: %u\n", if_type); + break; + } + } + spin_unlock_bh(&nss_map_t_debug_stats_lock); +} + +/* + * nss_map_t_instance_debug_stats_get() + * Get map_t statitics. + */ +void nss_map_t_instance_debug_stats_get(void *stats_mem) +{ + struct nss_map_t_stats_instance_debug *stats = (struct nss_map_t_stats_instance_debug *)stats_mem; + int i; + + if (!stats) { + nss_warning("No memory to copy map_t stats"); + return; + } + + spin_lock_bh(&nss_map_t_debug_stats_lock); + for (i = 0; i < NSS_MAX_MAP_T_DYNAMIC_INTERFACES; i++) { + if (nss_map_t_debug_stats[i].valid) { + memcpy(stats, &nss_map_t_debug_stats[i], sizeof(struct nss_map_t_stats_instance_debug)); + stats++; + } + } + spin_unlock_bh(&nss_map_t_debug_stats_lock); +} + +/* + * nss_map_t_handler() + * Handle NSS -> HLOS messages for map_t tunnel + */ +static void nss_map_t_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_map_t_msg *ntm = (struct nss_map_t_msg *)ncm; + void *ctx; + + nss_map_t_msg_callback_t cb; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + BUG_ON(!nss_map_t_verify_if_num(ncm->interface)); + + /* + * Trace Messages + */ + nss_map_t_log_rx_msg(ntm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_MAP_T_MSG_MAX) { + nss_warning("%px: received invalid message %d for MAP-T interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_map_t_msg)) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return; + } + + switch (ntm->cm.type) { + case NSS_MAP_T_MSG_SYNC_STATS: + /* + * Update debug stats in stats msg and send statistics notifications to the registered modules + */ + nss_map_t_instance_debug_stats_sync(nss_ctx, &ntm->msg.stats, ncm->interface); + nss_map_t_stats_notify(nss_ctx, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages, map_t sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->map_t_msg_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * callback + */ + cb = (nss_map_t_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * call map-t callback + */ + if (!cb) { + nss_warning("%px: No callback for map-t interface %d", + nss_ctx, ncm->interface); + return; + } + + cb(ctx, ntm); +} + +/* + * nss_map_t_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_map_t_callback(void *app_data, struct nss_map_t_msg *nim) +{ + nss_map_t_msg_callback_t callback = (nss_map_t_msg_callback_t)nss_map_t_pvt.cb; + void *data = nss_map_t_pvt.app_data; + + nss_map_t_pvt.cb = NULL; + nss_map_t_pvt.app_data = NULL; + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("map_t Error response %d\n", nim->cm.response); + nss_map_t_pvt.response = NSS_TX_FAILURE; + } else { + nss_map_t_pvt.response = NSS_TX_SUCCESS; + } + + if (callback) { + callback(data, nim); + } + + complete(&nss_map_t_pvt.complete); +} + +/* + * nss_map_t_tx() + * Transmit a map_t message to NSS firmware + */ +nss_tx_status_t nss_map_t_tx(struct nss_ctx_instance *nss_ctx, struct nss_map_t_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_map_t_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (!nss_map_t_verify_if_num(ncm->interface)) { + nss_warning("%px: tx request is not for a MAP-T dynamic interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_MAP_T_MSG_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_map_t_tx); + +/* + * nss_map_t_tx_sync() + * Transmit a MAP-T message to NSS firmware synchronously. + */ +nss_tx_status_t nss_map_t_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_map_t_msg *msg) +{ + nss_tx_status_t status; + int ret = 0; + + down(&nss_map_t_pvt.sem); + nss_map_t_pvt.cb = (void *)msg->cm.cb; + nss_map_t_pvt.app_data = (void *)msg->cm.app_data; + + msg->cm.cb = (nss_ptr_t)nss_map_t_callback; + msg->cm.app_data = (nss_ptr_t)NULL; + + status = nss_map_t_tx(nss_ctx, msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: map_t_tx_msg failed\n", nss_ctx); + up(&nss_map_t_pvt.sem); + return status; + } + ret = wait_for_completion_timeout(&nss_map_t_pvt.complete, msecs_to_jiffies(NSS_MAP_T_TX_TIMEOUT)); + + if (!ret) { + nss_warning("%px: MAP-T tx sync failed due to timeout\n", nss_ctx); + nss_map_t_pvt.response = NSS_TX_FAILURE; + } + + status = nss_map_t_pvt.response; + up(&nss_map_t_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_map_t_tx_sync); + +/* + *********************************** + * Register/Unregister/Miscellaneous APIs + *********************************** + */ + +/* + * nss_map_t_register_if() + */ +struct nss_ctx_instance *nss_map_t_register_if(uint32_t if_num, uint32_t type, nss_map_t_callback_t map_t_callback, + nss_map_t_msg_callback_t event_callback, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.map_t_handler_id]; + int i = 0; + + nss_assert(nss_ctx); + nss_assert(nss_map_t_verify_if_num(if_num)); + + nss_core_register_subsys_dp(nss_ctx, if_num, map_t_callback, 0, netdev, netdev, features); + nss_ctx->subsys_dp_register[if_num].type = type; + + nss_top_main.map_t_msg_callback = event_callback; + + nss_core_register_handler(nss_ctx, if_num, nss_map_t_handler, NULL); + + spin_lock_bh(&nss_map_t_debug_stats_lock); + for (i = 0; i < NSS_MAX_MAP_T_DYNAMIC_INTERFACES; i++) { + if (!nss_map_t_debug_stats[i].valid) { + nss_map_t_debug_stats[i].valid = true; + nss_map_t_debug_stats[i].if_num = if_num; + nss_map_t_debug_stats[i].if_index = netdev->ifindex; + break; + } + } + spin_unlock_bh(&nss_map_t_debug_stats_lock); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_map_t_register_if); + +/* + * nss_map_t_unregister_if() + */ +void nss_map_t_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.map_t_handler_id]; + int i; + + nss_assert(nss_ctx); + nss_assert(nss_map_t_verify_if_num(if_num)); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_top_main.map_t_msg_callback = NULL; + + nss_core_unregister_handler(nss_ctx, if_num); + + spin_lock_bh(&nss_map_t_debug_stats_lock); + for (i = 0; i < NSS_MAX_MAP_T_DYNAMIC_INTERFACES; i++) { + if (nss_map_t_debug_stats[i].if_num == if_num) { + memset(&nss_map_t_debug_stats[i], 0, sizeof(struct nss_map_t_stats_instance_debug)); + break; + } + } + spin_unlock_bh(&nss_map_t_debug_stats_lock); +} +EXPORT_SYMBOL(nss_map_t_unregister_if); + +/* + * nss_get_map_t_context() + */ +struct nss_ctx_instance *nss_map_t_get_context() +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.map_t_handler_id]; +} +EXPORT_SYMBOL(nss_map_t_get_context); + +/* + * nss_map_t_msg_init() + * Initialize nss_map_t msg. + */ +void nss_map_t_msg_init(struct nss_map_t_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_map_t_msg_init); + +/* + * nss_map_t_register_handler() + * debugfs stats msg handler received on static map_t interface + */ +void nss_map_t_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_map_t_get_context(); + + nss_info("nss_map_t_register_handler"); + sema_init(&nss_map_t_pvt.sem, 1); + init_completion(&nss_map_t_pvt.complete); + nss_core_register_handler(nss_ctx, NSS_MAP_T_INTERFACE, nss_map_t_handler, NULL); + + nss_map_t_stats_dentry_create(); + nss_map_t_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_map_t_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_log.c new file mode 100644 index 000000000..5f1ef397b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_log.c @@ -0,0 +1,151 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_map_t_log.c + * NSS MAP_T logger file. + */ + +#include "nss_core.h" + +/* + * nss_map_t_log_message_types_str + * NSS MAP_T message strings + */ +static int8_t *nss_map_t_log_message_types_str[NSS_MAP_T_MSG_MAX] __maybe_unused = { + "MAP_T Rule Configure", + "MAP_T Rule Deconfigure", + "MAP_T Stats", +}; + +/* + * nss_map_t_log_rule_configure_msg() + * Log NSS MAP_T Rule Configure. + */ +static void nss_map_t_log_rule_configure_msg(struct nss_map_t_msg *ntm) +{ + struct nss_map_t_instance_rule_config_msg *ntcm __maybe_unused = &ntm->msg.create_msg; + nss_trace("%px: NSS MAP_T Rule Configure message \n" + "MAP_T Rule Seq Number: %d\n" + "MAP_T Total Number of Rules: %d\n" + "MAP_T Local IPv6 Prefix Length: %d\n" + "MAP_T Local IPv4 Prefix: %d\n" + "MAP_T Local IPv4 Prefix Length: %d\n" + "MAP_T Local EA Bits Length: %d\n" + "MAP_T Local PSID Offset: %d\n" + "MAP_T Reserved A: %d\n" + "MAP_T Remote IPv6 Prefix Length: %d\n" + "MAP_T Remote IPv4 Prefix: %d\n" + "MAP_T Remote IPv4 Prefix Length: %d\n" + "MAP_T Remote EA Bits Length: %d\n" + "MAP_T Remote PSID Offset: %d\n" + "MAP_T Local MAP Style: %d\n" + "MAP_T Remote Map Style: %d\n" + "MAP_T Local IPv6 Prefix: %px\n" + "MAP_T Reserved B: %px\n" + "MAP_T Remote IPv6 Prefix: %px\n" + "MAP_T Valid Rule: %d\n", + ntcm, ntcm->rule_num, ntcm->total_rules, + ntcm->local_ipv6_prefix_len, ntcm->local_ipv4_prefix, + ntcm->local_ipv4_prefix_len, ntcm->local_ea_len, + ntcm->local_psid_offset, ntcm->reserve_a, + ntcm->remote_ipv6_prefix_len, + ntcm->remote_ipv4_prefix, ntcm->remote_ipv4_prefix_len, + ntcm->remote_ea_len, ntcm->remote_psid_offset, + ntcm->local_map_style, ntcm->remote_map_style, + ntcm->local_ipv6_prefix, ntcm->reserve_b, + ntcm->remote_ipv6_prefix, ntcm->valid_rule); +} + +/* + * nss_map_t_log_rule_deconfig_msg() + * Log NSS MAP_T Rule Deconfigure. + */ +static void nss_map_t_log_rule_deconfig_msg(struct nss_map_t_msg *ntm) +{ + struct nss_map_t_instance_rule_deconfig_msg *ntdm __maybe_unused = &ntm->msg.destroy_msg; + nss_trace("%px: NSS MAP_T Rule Deconfigure message \n" + "MAP_T Interface Number: %d\n", + ntdm, ntdm->if_number); +} + +/* + * nss_map_t_log_verbose() + * Log message contents. + */ +static void nss_map_t_log_verbose(struct nss_map_t_msg *ntm) +{ + switch (ntm->cm.type) { + case NSS_MAP_T_MSG_INSTANCE_RULE_CONFIGURE: + nss_map_t_log_rule_configure_msg(ntm); + break; + + case NSS_MAP_T_MSG_INSTANCE_RULE_DECONFIGURE: + nss_map_t_log_rule_deconfig_msg(ntm); + break; + + case NSS_MAP_T_MSG_SYNC_STATS: + /* + * No log for valid stats message. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", ntm); + break; + } +} + +/* + * nss_map_t_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_map_t_log_tx_msg(struct nss_map_t_msg *ntm) +{ + if (ntm->cm.type >= NSS_MAP_T_MSG_MAX) { + nss_warning("%px: Invalid message type\n", ntm); + return; + } + + nss_info("%px: type[%d]:%s\n", ntm, ntm->cm.type, nss_map_t_log_message_types_str[ntm->cm.type]); + nss_map_t_log_verbose(ntm); +} + +/* + * nss_map_t_log_rx_msg() + * Log messages received from FW. + */ +void nss_map_t_log_rx_msg(struct nss_map_t_msg *ntm) +{ + if (ntm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ntm); + return; + } + + if (ntm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ntm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ntm, ntm->cm.type, + nss_map_t_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + ntm, ntm->cm.type, nss_map_t_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response]); + +verbose: + nss_map_t_log_verbose(ntm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_map_t_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_log.h new file mode 100644 index 000000000..39448898d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_MAP_T_LOG_H +#define __NSS_MAP_T_LOG_H + +/* + * nss_map_t.h + * NSS MAP_T header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_map_t_log_tx_msg + * Logs a map_t message that is sent to the NSS firmware. + */ +void nss_map_t_log_tx_msg(struct nss_map_t_msg *ntm); + +/* + * nss_map_t_log_rx_msg + * Logs a map_t message that is received from the NSS firmware. + */ +void nss_map_t_log_rx_msg(struct nss_map_t_msg *ntm); + +#endif /* __NSS_MAP_T_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_map_t_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_stats.c new file mode 100644 index 000000000..2dc4d9fa6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_stats.c @@ -0,0 +1,154 @@ +/* + ************************************************************************** + * Copyright (c) 2017,2019-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_map_t_stats.h" +#include "nss_map_t_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_map_t_stats_notifier); + +/* + * nss_map_t_stats_read() + * Read map_t statistics + */ +static ssize_t nss_map_t_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + + uint32_t max_output_lines = 2 /* header & footer for instance stats */ + + NSS_MAX_MAP_T_DYNAMIC_INTERFACES * (NSS_MAP_T_STATS_MAX + 2) /*instance stats */ + + 2; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct net_device *dev; + struct nss_map_t_stats_instance_debug map_t_instance_stats[NSS_MAX_MAP_T_DYNAMIC_INTERFACES]; + int id, i; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + memset(&map_t_instance_stats, 0, sizeof(struct nss_map_t_stats_instance_debug) * NSS_MAX_MAP_T_DYNAMIC_INTERFACES); + + /* + * Get all stats + */ + nss_map_t_instance_debug_stats_get((void *)&map_t_instance_stats); + + /* + * Session stats + */ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nmap_t instance stats start:\n\n"); + for (id = 0; id < NSS_MAX_MAP_T_DYNAMIC_INTERFACES; id++) { + + if (!map_t_instance_stats[id].valid) { + continue; + } + + dev = dev_get_by_index(&init_net, map_t_instance_stats[id].if_index); + if (likely(dev)) { + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id, + map_t_instance_stats[id].if_num, dev->name); + dev_put(dev); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id, + map_t_instance_stats[id].if_num); + } + + for (i = 0; i < NSS_MAP_T_STATS_MAX; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "\t%s = %llu\n", nss_map_t_strings_instance_stats[i].stats_name, + map_t_instance_stats[id].stats[i]); + } + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nmap_t instance stats end\n"); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(lbuf); + return bytes_read; +} + +/* + * nss_map_t_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(map_t); + +/* + * nss_map_t_stats_dentry_create() + * Create map_t statistics debug entry. + */ +void nss_map_t_stats_dentry_create(void) +{ + nss_stats_create_dentry("map_t", &nss_map_t_stats_ops); +} + +/* + * nss_map_t_stats_notify() + * Sends notifications to the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_map_t_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_map_t_stats_notification map_t_stats; + struct nss_map_t_stats_instance_debug map_t_instance_stats[NSS_MAX_MAP_T_DYNAMIC_INTERFACES]; + int id; + + memset(&map_t_instance_stats, 0, sizeof(map_t_instance_stats)); + + /* + * Get all stats + */ + nss_map_t_instance_debug_stats_get((void *)&map_t_instance_stats); + + for (id = 0; id < NSS_MAX_MAP_T_DYNAMIC_INTERFACES; id++) { + if (map_t_instance_stats[id].if_num == if_num) { + memcpy(&map_t_stats.stats, &map_t_instance_stats[id].stats, sizeof(map_t_stats.stats)); + } + } + map_t_stats.if_type = nss_dynamic_interface_get_type(nss_ctx, if_num); + map_t_stats.core_id = nss_ctx->id; + map_t_stats.if_num = if_num; + atomic_notifier_call_chain(&nss_map_t_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&map_t_stats); +} + +/* + * nss_map_t_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_map_t_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_map_t_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_map_t_stats_register_notifier); + +/* + * nss_map_t_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_map_t_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_map_t_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_map_t_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_map_t_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_stats.h new file mode 100644 index 000000000..8fa623afc --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_stats.h @@ -0,0 +1,36 @@ +/* + ****************************************************************************** + * Copyright (c) 2017,2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_MAP_T_STATS_H +#define __NSS_MAP_T_STATS_H + +/* + * NSS core stats -- for H2N/N2H map_t debug stats + */ +struct nss_map_t_stats_instance_debug { + uint64_t stats[NSS_MAP_T_STATS_MAX]; + int32_t if_index; + uint32_t if_num; /* nss interface number */ + bool valid; +}; + +/* + * MAP-T statistics APIs + */ +extern void nss_map_t_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_map_t_stats_dentry_create(void); + +#endif /* __NSS_MAP_T_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_map_t_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_strings.c new file mode 100644 index 000000000..90fbf7ec3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_strings.c @@ -0,0 +1,65 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_map_t_strings_instance_stats + * MAP-T statistics strings for NSS session statistics. + */ +struct nss_stats_info nss_map_t_strings_instance_stats[NSS_MAP_T_STATS_MAX] = { + {"V4_TO_V6_PBUF_EXCEPTION_PKTS", NSS_STATS_TYPE_EXCEPTION}, + {"V4_TO_V6_PBUF_NO_MATCHING_RULE", NSS_STATS_TYPE_SPECIAL}, + {"V4_TO_V6_PBUF_NOT_TCP_OR_UDP", NSS_STATS_TYPE_SPECIAL}, + {"V4_TO_V6_RULE_ERR_LOCAL_PSID", NSS_STATS_TYPE_ERROR}, + {"V4_TO_V6_RULE_ERR_LOCAL_IPV6", NSS_STATS_TYPE_ERROR}, + {"V4_TO_V6_RULE_ERR_REMOTE_PSID", NSS_STATS_TYPE_ERROR}, + {"V4_TO_V6_RULE_ERR_REMOTE_EA_BITS", NSS_STATS_TYPE_ERROR}, + {"V4_TO_V6_RULE_ERR_REMOTE_IPV6", NSS_STATS_TYPE_ERROR}, + {"V6_TO_V4_PBUF_EXCEPTION_PKTS", NSS_STATS_TYPE_EXCEPTION}, + {"V6_TO_V4_PBUF_NO_MATCHING_RULE", NSS_STATS_TYPE_SPECIAL}, + {"V6_TO_V4_PBUF_NOT_TCP_OR_UDP", NSS_STATS_TYPE_SPECIAL}, + {"V6_TO_V4_RULE_ERR_LOCAL_IPV4", NSS_STATS_TYPE_ERROR}, + {"V6_TO_V4_RULE_ERR_REMOTE_IPV4", NSS_STATS_TYPE_ERROR} +}; + +/* + * nss_map_t_strings_read() + * Read MAP-T node statistics names. + */ +static ssize_t nss_map_t_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_map_t_strings_instance_stats, NSS_MAP_T_STATS_MAX); +} + +/* + * nss_map_t_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(map_t); + +/* + * nss_map_t_strings_dentry_create() + * Create MAP-T statistics strings debug entry. + */ +void nss_map_t_strings_dentry_create(void) +{ + nss_strings_create_dentry("map_t", &nss_map_t_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_map_t_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_strings.h new file mode 100644 index 000000000..acf2cd0b2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_map_t_strings.h @@ -0,0 +1,25 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_MAP_T_STRINGS_H +#define __NSS_MAP_T_STRINGS_H + +extern struct nss_stats_info nss_map_t_strings_instance_stats[NSS_MAP_T_STATS_MAX]; +extern void nss_map_t_strings_dentry_create(void); + +#endif /* __NSS_MAP_T_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_match.c b/feeds/ipq807x/qca-nss-drv/src/nss_match.c new file mode 100644 index 000000000..dcdfa6cba --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_match.c @@ -0,0 +1,299 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +/* + * nss_match.c + */ + +#include "nss_tx_rx_common.h" +#include "nss_match_log.h" +#include "nss_match_stats.h" +#include "nss_match_strings.h" + +#define NSS_MATCH_TX_TIMEOUT 1000 /* 1 Seconds */ + +/* + * Private data structure for match interface + */ +static struct nss_match_pvt { + struct semaphore sem; + struct completion complete; + int32_t response; +} match_pvt; + +/* + * nss_get_match_context() + */ +struct nss_ctx_instance *nss_match_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.match_handler_id]; +} +EXPORT_SYMBOL(nss_match_get_context); + +/* + * nss_match_verify_if_num() + */ +static bool nss_match_verify_if_num(uint32_t if_num) +{ + if (nss_is_dynamic_interface(if_num) == false) { + return false; + } + + if (nss_dynamic_interface_get_type(nss_match_get_context(), if_num) + != NSS_DYNAMIC_INTERFACE_TYPE_MATCH) { + return false; + } + + return true; +} + +/* + * nss_match_msg_sync_callback + * Callback to handle the completion of NSS to HLOS messages. + */ +static void nss_match_msg_sync_callback(void *app_data, struct nss_match_msg *matchm) +{ + match_pvt.response = NSS_TX_SUCCESS; + + if (matchm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("Match Error response %d\n", matchm->cm.response); + match_pvt.response = NSS_TX_FAILURE; + } + + complete(&match_pvt.complete); +} + +/* + * nss_match_msg_tx() + * Sends message to NSS. + */ +static nss_tx_status_t nss_match_msg_tx(struct nss_ctx_instance *nss_ctx, struct nss_match_msg *matchm) +{ + struct nss_cmn_msg *ncm = &matchm->cm; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + /* + * Trace Messages + */ + nss_match_log_tx_msg(matchm); + + /* + * Sanity check the message + */ + if (!nss_match_verify_if_num(ncm->interface)) { + nss_warning("%px: Tx request for non dynamic interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_MATCH_MSG_MAX) { + nss_warning("%px: Message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, matchm, sizeof(*matchm), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_match_handler() + * Handle NSS to HLOS messages for Match node + */ +static void nss_match_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_match_msg *nem = (struct nss_match_msg *)ncm; + + nss_match_msg_sync_callback_t cb; + void *ctx; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + BUG_ON(!nss_match_verify_if_num(ncm->interface)); + + /* + * Trace Messages + */ + nss_match_log_rx_msg(nem); + + /* + * Is this a valid request/response packet? + */ + if (nem->cm.type >= NSS_MATCH_MSG_MAX) { + nss_warning("%px: Received invalid message %d for MATCH interface", nss_ctx, nem->cm.type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_match_msg)) { + nss_warning("%px: Unexpected message length: %d, on interface: %d", + nss_ctx, nss_cmn_get_msg_len(ncm), ncm->interface); + return; + } + + switch (nem->cm.type) { + case NSS_MATCH_STATS_SYNC: + + /* + * Update common node statistics + */ + nss_match_stats_sync(nss_ctx, nem); + nss_match_stats_notify(nss_ctx, nem->cm.interface); + } + + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_match_msg_sync_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + cb(ctx, nem); +} + +/* + * nss_match_msg_tx_sync() + * Send a message to match node and wait for the response. + */ +nss_tx_status_t nss_match_msg_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_match_msg *matchm) +{ + nss_tx_status_t status; + int ret = 0; + down(&match_pvt.sem); + + matchm->cm.cb = (nss_ptr_t)nss_match_msg_sync_callback; + matchm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_match_msg_tx(nss_ctx, matchm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_match_msg_tx failed\n", nss_ctx); + up(&match_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&match_pvt.complete, msecs_to_jiffies(NSS_MATCH_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: MATCH tx failed due to timeout\n", nss_ctx); + match_pvt.response = NSS_TX_FAILURE; + } + + status = match_pvt.response; + up(&match_pvt.sem); + + return status; +} +EXPORT_SYMBOL(nss_match_msg_tx_sync); + +/* + * nss_match_unregister_instance() + * Unregisters match instance. + */ +bool nss_match_unregister_instance(int if_num) +{ + struct nss_ctx_instance *nss_ctx; + uint32_t status; + + nss_ctx = nss_match_get_context(); + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_match_verify_if_num(if_num)) { + nss_warning("%px: Incorrect interface number: %d", nss_ctx, if_num); + return false; + } + + nss_core_unregister_handler(nss_ctx, if_num); + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for interface %d with NSS core\n", nss_ctx, if_num); + return false; + } + + nss_match_ifnum_delete(if_num); + + return true; +} +EXPORT_SYMBOL(nss_match_unregister_instance); + +/* + * nss_match_register_instance() + * Registers match instance. + */ +struct nss_ctx_instance *nss_match_register_instance(int if_num, nss_match_msg_sync_callback_t notify_cb) +{ + struct nss_ctx_instance *nss_ctx; + uint32_t status; + + nss_ctx = nss_match_get_context(); + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_match_verify_if_num(if_num)) { + nss_warning("%px: Incorrect interface number: %d", nss_ctx, if_num); + return NULL; + } + + nss_core_register_handler(nss_ctx, if_num, nss_match_handler, NULL); + status = nss_core_register_msg_handler(nss_ctx, if_num, (nss_if_rx_msg_callback_t)notify_cb); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to register handler for interface %d with NSS core\n", nss_ctx, if_num); + return NULL; + } + + if (!nss_match_ifnum_add(if_num)) { + nss_warning("%px: Unable to add match inteface : %u\n", nss_ctx, if_num); + nss_core_unregister_handler(nss_ctx, if_num); + nss_core_unregister_msg_handler(nss_ctx, if_num); + return NULL; + } + + return nss_ctx; +} +EXPORT_SYMBOL(nss_match_register_instance); + +/* + * nss_match_msg_init() + * Initialize match message. + */ +void nss_match_msg_init(struct nss_match_msg *nmm, uint16_t if_num, uint32_t type, uint32_t len, + nss_match_msg_sync_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&nmm->cm, if_num, type, len, (void*)cb, app_data); +} +EXPORT_SYMBOL(nss_match_msg_init); + +/* + * nss_match_init() + * Initialize match. + */ +void nss_match_init() +{ + nss_match_stats_dentry_create(); + nss_match_strings_dentry_create(); + sema_init(&match_pvt.sem, 1); + init_completion(&match_pvt.complete); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_match_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_match_log.c new file mode 100644 index 000000000..2afdf6425 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_match_log.c @@ -0,0 +1,225 @@ +/* + *************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +/* + * nss_match_log.c + * NSS match logger file. + */ + +#include "nss_core.h" + +/* + * nss_match_log_message_types_str + * Match message strings. + */ +static char *nss_match_log_message_types_str[NSS_MATCH_MSG_MAX] __maybe_unused = { + "Match no message", + "Match profile configure", + "Match add VoW rule", + "Match add L2 rule", + "Match delete VoW rule", + "Match delete L2 rule", + "Match sync stats" +}; + +/* + * nss_match_log_error_types_str + * Strings for error types for match messages + */ +static char *nss_match_log_error_types_str[NSS_MATCH_ERROR_MAX] __maybe_unused = { + "Match success", + "Match unknown message", + "Match DSCP is not in the range", + "Match 802.1p outer is not in the range", + "Match 802.1p inner is not in the range", + "Match rule ID is not in the range", + "Match action type is not in the range", + "Match rule ID already exists", + "Match rule ID doesn't exists", + "Match instance already configured", + "Match profile configuration message is invalid", + "Match database initialzation failed", + "Match table ID is not in the range", + "Match error in adding rule", + "Match error in deleting rule", + "Match error in adding table", + "Match error in deleting table", + "Match error mask ID is not in the range", + "Match error next node interface number is invalid", +}; + +/* + * nss_match_log_profile_configure_msg() + * Log NSS match profile configuration message. + */ +static void nss_match_log_profile_configure_msg(struct nss_match_msg *nmm) +{ + struct nss_match_profile_configure_msg *nmcm __maybe_unused = &nmm->msg.configure_msg; + int mask_num, mask_word; + + nss_trace("%px: NSS match configuration message \n" + "Match profile type: %u \n" + "Match mask flag: %u \n", + nmcm, + nmcm->profile_type, + nmcm->valid_mask_flag); + + for (mask_num = 0; mask_num < NSS_MATCH_MASK_MAX; mask_num++) { + nss_trace("Match mask number %d\n", mask_num + 1); + for (mask_word = 0; mask_word < NSS_MATCH_MASK_WORDS_MAX; mask_word++) { + nss_trace("%x ", nmcm->maskset[mask_num][mask_word]); + } + } +} + +/* + * nss_match_log_vow_rule_msg() + * Log NSS match VoW rule message. + */ +static void nss_match_log_vow_rule_msg(struct nss_match_msg *nmm) +{ + struct nss_match_rule_vow_msg *nmvrm __maybe_unused = &nmm->msg.vow_rule; + nss_trace("%px: NSS match VoW rule message \n" + "Match rule id: %hu \n" + "Match mask id: %hu \n" + "Match action: action flag = %u, next node = %u, priority = %hu \n" + "Match interface number: %u \n" + "Match DSCP: %hu \n" + "Match outer_8021p: %hu \n" + "Match inner_8021p: %hu \n", + nmvrm, + nmvrm->rule_id, + nmvrm->mask_id, + nmvrm->action.action_flag, nmvrm->action.forward_ifnum, nmvrm->action.setprio, + nmvrm->if_num, + nmvrm->dscp, + nmvrm->outer_8021p, + nmvrm->inner_8021p); +} + +/* + * nss_match_log_l2_rule_msg() + * Log NSS match L2 rule message. + */ +static void nss_match_log_l2_rule_msg(struct nss_match_msg *nmm) +{ + struct nss_match_rule_l2_msg *nmlrm __maybe_unused = &nmm->msg.l2_rule; + nss_trace("%px: NSS match L2 rule message \n" + "Match rule id: %hu \n" + "Match mask id: %hu \n" + "Match action: action flag = %u, next node = %u, priority = %hu \n" + "Match interface number: %u \n" + "Match destination mac address: %x :%x :%x \n" + "Match source mac address: %x :%x :%x \n" + "Match ether type: %x \n", + nmlrm, + nmlrm->rule_id, + nmlrm->mask_id, + nmlrm->action.action_flag, nmlrm->action.forward_ifnum, nmlrm->action.setprio, + nmlrm->if_num, + nmlrm->dmac[0], nmlrm->dmac[1], nmlrm->dmac[2], + nmlrm->smac[0], nmlrm->smac[1], nmlrm->smac[2], + nmlrm->ethertype); + +} + +/* + * nss_clmap_log_verbose() + * Log message contents. + */ +static void nss_match_log_verbose(struct nss_match_msg *nmm) +{ + switch (nmm->cm.type) { + case NSS_MATCH_TABLE_CONFIGURE_MSG: + nss_match_log_profile_configure_msg(nmm); + break; + + case NSS_MATCH_ADD_VOW_RULE_MSG: + nss_match_log_vow_rule_msg(nmm); + break; + + case NSS_MATCH_ADD_L2_RULE_MSG: + nss_match_log_l2_rule_msg(nmm); + break; + + case NSS_MATCH_DELETE_VOW_RULE_MSG: + nss_match_log_vow_rule_msg(nmm); + break; + + case NSS_MATCH_DELETE_L2_RULE_MSG: + nss_match_log_l2_rule_msg(nmm); + break; + + case NSS_MATCH_STATS_SYNC: + break; + + default: + nss_trace("%px: Invalid message type\n", nmm); + break; + } +} + +/* + * nss_match_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_match_log_tx_msg(struct nss_match_msg *nmm) +{ + if (nmm->cm.type >= NSS_MATCH_MSG_MAX) { + nss_warning("%px: Invalid message type\n", nmm); + return; + } + + nss_info("%px: type[%d]:%s\n", nmm, nmm->cm.type, nss_match_log_message_types_str[nmm->cm.type]); + nss_match_log_verbose(nmm); +} + +/* + * nss_match_log_rx_msg() + * Log messages received from FW. + */ +void nss_match_log_rx_msg(struct nss_match_msg *nmm) +{ + if (nmm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nmm); + return; + } + + if (nmm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nmm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nmm, nmm->cm.type, + nss_match_log_message_types_str[nmm->cm.type], + nmm->cm.response, nss_cmn_response_str[nmm->cm.response]); + goto verbose; + } + + if (nmm->cm.error >= NSS_MATCH_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nmm, nmm->cm.type, nss_match_log_message_types_str[nmm->cm.type], + nmm->cm.response, nss_cmn_response_str[nmm->cm.response], + nmm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nmm, nmm->cm.type, nss_match_log_message_types_str[nmm->cm.type], + nmm->cm.response, nss_cmn_response_str[nmm->cm.response], + nmm->cm.error, nss_match_log_error_types_str[nmm->cm.error]); + +verbose: + nss_match_log_verbose(nmm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_match_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_match_log.h new file mode 100644 index 000000000..df5b8a9e3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_match_log.h @@ -0,0 +1,39 @@ +/* + ****************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_MATCH_LOG_H__ +#define __NSS_MATCH_LOG_H__ + +/** + * nss_match_log.h + * NSS match Log Header File. + */ + +/* + * nss_match_log_tx_msg + * Logs a match message that is sent to the NSS firmware. + */ +void nss_match_log_tx_msg(struct nss_match_msg *nmm); + +/* + * nss_match_log_rx_msg + * Logs a match message that is received from the NSS firmware. + */ +void nss_match_log_rx_msg(struct nss_match_msg *nmm); + +#endif /* __NSS_MATCH_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_match_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_match_stats.c new file mode 100644 index 000000000..29782342f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_match_stats.c @@ -0,0 +1,245 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +/* + * nss_match_stats.c + */ +#include "nss_core.h" +#include "nss_stats.h" +#include +#include "nss_match_stats.h" +#include "nss_match_strings.h" + +#define NSS_MATCH_STATS_SIZE_PER_IF (NSS_STATS_MAX_STR_LENGTH * NSS_STATS_NODE_MAX) + /* Total number of statistics per match interface. */ + +int match_ifnum[NSS_MATCH_INSTANCE_MAX] = {0}; +uint64_t nss_match_stats[NSS_MATCH_INSTANCE_MAX][NSS_MATCH_STATS_MAX]; +static DEFINE_SPINLOCK(nss_match_stats_lock); + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_match_stats_notifier); + +/* + * nss_match_stats_read() + * Read match node statiistics. + */ +static ssize_t nss_match_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats + + * few blank lines for banner printing + Number of Extra outputlines + * for future reference to add new stats + */ + uint32_t max_output_lines = NSS_MATCH_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines * NSS_MATCH_INSTANCE_MAX; + ssize_t bytes_read = 0; + size_t size_wr = 0; + uint32_t if_num; + int index; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "match stats", NSS_STATS_SINGLE_CORE); + + /* + * Common node stats for each match dynamic interface. + */ + for (index = 0; index < NSS_MATCH_INSTANCE_MAX; index++) { + + spin_lock_bh(&nss_match_stats_lock); + if_num = match_ifnum[index]; + spin_unlock_bh(&nss_match_stats_lock); + + if (if_num) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\nMatch node if_num:%03u", if_num); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n ---------------------- \n"); + size_wr += nss_stats_print("match", NULL, NSS_STATS_SINGLE_INSTANCE, nss_match_strings_stats, + nss_match_stats[index], NSS_MATCH_STATS_MAX, lbuf, size_wr, size_al); + continue; + } + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + return bytes_read; +} + + +/* + * nss_match_stats_sync() + * Update match common node statistics. + */ +void nss_match_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_match_msg *nmm) +{ + struct nss_match_stats_sync *ndccs = &nmm->msg.stats; + uint64_t *ctx_stats; + uint32_t *msg_stats; + uint32_t if_num; + uint16_t i = 0; + int index; + + for (index = 0; index < NSS_MATCH_INSTANCE_MAX; index++) { + spin_lock_bh(&nss_match_stats_lock); + if_num = match_ifnum[index]; + spin_unlock_bh(&nss_match_stats_lock); + + if (if_num == nmm->cm.interface) { + break; + } + } + + if (index == NSS_MATCH_INSTANCE_MAX) { + nss_warning("Invalid Match index\n"); + return; + } + + spin_lock_bh(&nss_match_stats_lock); + msg_stats = (uint32_t *)ndccs; + ctx_stats = nss_match_stats[index]; + + for (i = 0; i < NSS_MATCH_STATS_MAX; i++, ctx_stats++, msg_stats++) { + *ctx_stats += *msg_stats; + } + + spin_unlock_bh(&nss_match_stats_lock); +} + +/* + * nss_match_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(match) + +/* + * nss_match_ifnum_add() + * Add match node interface ID. + */ +bool nss_match_ifnum_add(int if_num) +{ + int index = 0; + + spin_lock_bh(&nss_match_stats_lock); + + for (index = 0; index < NSS_MATCH_INSTANCE_MAX; index++) { + if (match_ifnum[index]) { + continue; + } + + match_ifnum[index] = if_num; + + spin_unlock_bh(&nss_match_stats_lock); + return true; + } + + spin_unlock_bh(&nss_match_stats_lock); + return false; +} + +/* + * nss_match_ifnum_delete() + * Delete match node interface ID. + */ +bool nss_match_ifnum_delete(int if_num) +{ + int index = 0; + + spin_lock_bh(&nss_match_stats_lock); + + for (index = 0; index < NSS_MATCH_INSTANCE_MAX; index++) { + if (match_ifnum[index] != if_num) { + continue; + } + + match_ifnum[index] = 0; + + spin_unlock_bh(&nss_match_stats_lock); + return true; + } + + spin_unlock_bh(&nss_match_stats_lock); + return false; +} + +/* + * nss_match_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_match_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_match_stats_notification match_stats; + uint32_t interface; + int index; + + match_stats.core_id = nss_ctx->id; + match_stats.if_num = if_num; + + for (index = 0; index < NSS_MATCH_INSTANCE_MAX; index++) { + spin_lock_bh(&nss_match_stats_lock); + interface = match_ifnum[index]; + spin_unlock_bh(&nss_match_stats_lock); + + if (interface == if_num) { + break; + } + } + + if (index == NSS_MATCH_INSTANCE_MAX) { + nss_warning("Invalid Match index\n"); + return; + } + + spin_lock_bh(&nss_match_stats_lock); + memcpy(match_stats.stats_ctx, nss_match_stats[index], sizeof(match_stats.stats_ctx)); + spin_unlock_bh(&nss_match_stats_lock); + atomic_notifier_call_chain(&nss_match_stats_notifier, NSS_STATS_EVENT_NOTIFY, &match_stats); +} + +/* + * nss_match_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_match_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_match_stats_notifier, nb); +} + +/* + * nss_match_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_match_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_match_stats_notifier, nb); +} + +/* + * nss_match_stats_dentry_create() + * Create match statistics debug entry. + */ +void nss_match_stats_dentry_create(void) +{ + nss_stats_create_dentry("match", &nss_match_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_match_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_match_stats.h new file mode 100644 index 000000000..3cbc74629 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_match_stats.h @@ -0,0 +1,81 @@ +/* + *************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#ifndef __NSS_MATCH_STATS_H__ +#define __NSS_MATCH_STATS_H__ + +/** + * nss_match_stats_types + * Match statistics types. + */ +enum nss_match_stats_types { + NSS_MATCH_STATS_HIT_COUNT_0 = NSS_STATS_NODE_MAX, + /**< Hit count of rule ID 1. */ + NSS_MATCH_STATS_HIT_COUNT_1, /**< Hit count of rule ID 2. */ + NSS_MATCH_STATS_HIT_COUNT_2, /**< Hit count of rule ID 3. */ + NSS_MATCH_STATS_HIT_COUNT_3, /**< Hit count of rule ID 4. */ + NSS_MATCH_STATS_HIT_COUNT_4, /**< Hit count of rule ID 5. */ + NSS_MATCH_STATS_HIT_COUNT_5, /**< Hit count of rule ID 6. */ + NSS_MATCH_STATS_HIT_COUNT_6, /**< Hit count of rule ID 7. */ + NSS_MATCH_STATS_HIT_COUNT_7, /**< Hit count of rule ID 8. */ + NSS_MATCH_STATS_HIT_COUNT_8, /**< Hit count of rule ID 9. */ + NSS_MATCH_STATS_HIT_COUNT_9, /**< Hit count of rule ID 10. */ + NSS_MATCH_STATS_HIT_COUNT_10, /**< Hit count of rule ID 11. */ + NSS_MATCH_STATS_HIT_COUNT_11, /**< Hit count of rule ID 12. */ + NSS_MATCH_STATS_HIT_COUNT_12, /**< Hit count of rule ID 13. */ + NSS_MATCH_STATS_HIT_COUNT_13, /**< Hit count of rule ID 14. */ + NSS_MATCH_STATS_HIT_COUNT_14, /**< Hit count of rule ID 15. */ + NSS_MATCH_STATS_HIT_COUNT_15, /**< Hit count of rule ID 16. */ + NSS_MATCH_STATS_HIT_COUNT_16, /**< Hit count of rule ID 17. */ + NSS_MATCH_STATS_HIT_COUNT_17, /**< Hit count of rule ID 18. */ + NSS_MATCH_STATS_HIT_COUNT_18, /**< Hit count of rule ID 19. */ + NSS_MATCH_STATS_HIT_COUNT_19, /**< Hit count of rule ID 20. */ + NSS_MATCH_STATS_HIT_COUNT_20, /**< Hit count of rule ID 21. */ + NSS_MATCH_STATS_HIT_COUNT_21, /**< Hit count of rule ID 22. */ + NSS_MATCH_STATS_HIT_COUNT_22, /**< Hit count of rule ID 23. */ + NSS_MATCH_STATS_HIT_COUNT_23, /**< Hit count of rule ID 24. */ + NSS_MATCH_STATS_HIT_COUNT_24, /**< Hit count of rule ID 25. */ + NSS_MATCH_STATS_HIT_COUNT_25, /**< Hit count of rule ID 26. */ + NSS_MATCH_STATS_HIT_COUNT_26, /**< Hit count of rule ID 27. */ + NSS_MATCH_STATS_HIT_COUNT_27, /**< Hit count of rule ID 28. */ + NSS_MATCH_STATS_HIT_COUNT_28, /**< Hit count of rule ID 29. */ + NSS_MATCH_STATS_HIT_COUNT_29, /**< Hit count of rule ID 30. */ + NSS_MATCH_STATS_HIT_COUNT_30, /**< Hit count of rule ID 31. */ + NSS_MATCH_STATS_HIT_COUNT_31, /**< Hit count of rule ID 32. */ + NSS_MATCH_STATS_MAX, /**< Maximum statistics type. */ +}; + +/** + * nss_match_stats_notification + * Match transmission statistics structure. + */ +struct nss_match_stats_notification { + uint64_t stats_ctx[NSS_MATCH_STATS_MAX]; /**< Context transmission statistics. */ + uint32_t core_id; /**< Core ID. */ + uint32_t if_num; /**< Interface number. */ +}; + +extern bool nss_match_ifnum_add(int if_num); +extern bool nss_match_ifnum_delete(int if_num); +extern void nss_match_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_match_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_match_msg *nmm); +extern void nss_match_stats_dentry_create(void); +extern int nss_match_stats_unregister_notifier(struct notifier_block *nb); +extern int nss_match_stats_register_notifier(struct notifier_block *nb); + +#endif /* __NSS_MATCH_STATS_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_match_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_match_strings.c new file mode 100644 index 000000000..67d8451dd --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_match_strings.c @@ -0,0 +1,92 @@ +/* + *************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_match_strings.h" + +/* + * nss_match_strings_stats + * match statistics strings. + */ +struct nss_stats_info nss_match_strings_stats[NSS_MATCH_STATS_MAX] = { + {"rx_pkts", NSS_STATS_TYPE_COMMON}, + {"rx_byts", NSS_STATS_TYPE_COMMON}, + {"tx_pkts", NSS_STATS_TYPE_COMMON}, + {"tx_byts", NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops", NSS_STATS_TYPE_DROP}, + {"hit_count[0]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[1]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[2]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[3]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[4]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[5]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[6]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[7]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[8]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[9]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[10]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[11]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[12]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[13]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[14]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[15]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[16]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[17]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[18]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[19]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[20]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[21]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[22]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[23]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[24]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[25]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[26]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[27]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[28]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[29]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[30]", NSS_STATS_TYPE_SPECIAL}, + {"hit_count[31]", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_match_stats_strings_read() + * Read match statistics names + */ +static ssize_t nss_match_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_match_strings_stats, NSS_MATCH_STATS_MAX); +} + +/* + * nss_match_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(match); + +/* + * nss_match_strings_dentry_create() + * Create match statistics strings debug entry. + */ +void nss_match_strings_dentry_create(void) +{ + nss_strings_create_dentry("match", &nss_match_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_match_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_match_strings.h new file mode 100644 index 000000000..9eb9f621a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_match_strings.h @@ -0,0 +1,27 @@ +/* + *************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#ifndef __NSS_MATCH_STRINGS_H +#define __NSS_MATCH_STRINGS_H + +#include "nss_match_stats.h" + +extern struct nss_stats_info nss_match_strings_stats[NSS_MATCH_STATS_MAX]; +extern void nss_match_strings_dentry_create(void); + +#endif /* __NSS_MATCH_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_meminfo.c b/feeds/ipq807x/qca-nss-drv/src/nss_meminfo.c new file mode 100644 index 000000000..e24e6be4e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_meminfo.c @@ -0,0 +1,798 @@ +/* + * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * nss_meminfo.c + * NSS meminfo subsystem + */ + +#include +#include "nss_tx_rx_common.h" +#include "nss_core.h" +#include "nss_arch.h" +#include "nss_meminfo.h" + +/* + * Store user configuration + */ +static char nss_meminfo_user_config[NSS_MEMINFO_USER_CONFIG_MAXLEN]; +module_param_string(meminfo_user_config, nss_meminfo_user_config, + NSS_MEMINFO_USER_CONFIG_MAXLEN, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP); +MODULE_PARM_DESC(nss_meminfo_user_config, "meminfo user configuration"); + +static bool nss_meminfo_debugfs_exist; + +/* + * Name table of memory type presented to user. + */ +char *nss_meminfo_memtype_table[NSS_MEMINFO_MEMTYPE_MAX] = {"IMEM", "SDRAM", "UTCM_SHARED"}; + +/* + * nss_meminfo_alloc_sdram() + * Allocate a SDRAM block. + */ +static void *nss_meminfo_alloc_sdram(struct nss_ctx_instance *nss_ctx, uint32_t size) +{ + void *addr = 0; + + /* + * kmalloc() return cache line aligned buffer. + */ + addr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); + if (!addr) + nss_info_always("%px: failed to alloc a sdram block of size %u\n", nss_ctx, size); + + kmemleak_not_leak((void *)addr); + return addr; +} + +/* + * nss_meminfo_free_sdram() + * Free SDRAM memory. + */ +static inline void nss_meminfo_free_sdram(struct nss_ctx_instance *nss_ctx, uint32_t dma_addr, + void *kern_addr, uint32_t size) +{ + /* + * Unmap it since every SDRAM memory had been mapped. + */ + dma_unmap_single(nss_ctx->dev, dma_addr, size, DMA_FROM_DEVICE); + kfree(kern_addr); +} + +/* + * nss_meminfo_alloc_imem() + * Allocate an IMEM block in a sequential way. + */ +static uint32_t nss_meminfo_alloc_imem(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment) +{ + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + uint32_t new_tail; + uint32_t addr = 0; + int mask; + + mask = alignment - 1; + + /* + * Alignment has to be a power of 2. + */ + nss_assert(!(alignment & mask)); + + new_tail = mem_ctx->imem_tail; + + /* + * Align up the address if it not aligned. + */ + if (new_tail & mask) + new_tail = (new_tail + mask) & ~mask; + + if (size > (mem_ctx->imem_end - new_tail)) { + nss_info_always("%px: failed to alloc an IMEM block of size %u\n", nss_ctx, size); + return addr; + } + + addr = new_tail; + mem_ctx->imem_tail = new_tail + size; + + return addr; +} + +/* + * nss_meminfo_free_imem() + * Free an IMEM block. Ignore the padding bytes for alignment requirement. + */ +static void nss_meminfo_free_imem(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size) +{ + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + mem_ctx->imem_tail -= size; +} + +/* + * nss_meminfo_alloc_utcm_shared() + * Allocate an UTCM_SHARED block in a sequential way. + */ +static uint32_t nss_meminfo_alloc_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t size, int alignment) +{ + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + uint32_t new_tail; + uint32_t addr = 0; + int mask; + + mask = alignment - 1; + + /* + * Alignment has to be a power of 2. + */ + nss_assert(!(alignment & mask)); + + new_tail = mem_ctx->utcm_shared_tail; + + /* + * Align up the address if it not aligned. + */ + if (new_tail & mask) + new_tail = (new_tail + mask) & ~mask; + + if (size > (mem_ctx->utcm_shared_end - new_tail)) { + nss_info_always("%px: failed to alloc an UTCM_SHARED block of size %u\n", nss_ctx, size); + return addr; + } + + addr = new_tail; + mem_ctx->utcm_shared_tail = new_tail + size; + + return addr; +} + +/* + * nss_meminfo_free_utcm_shared() + * Free an UTCM_SHARED block. Ignore the padding bytes for alignment requirement. + */ +static void nss_meminfo_free_utcm_shared(struct nss_ctx_instance *nss_ctx, uint32_t addr, uint32_t size) +{ + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + mem_ctx->utcm_shared_tail -= size; +} + +/* + * nss_meminfo_if_user_overwrite() + * Return user configured memory type. Otherwise, return -1. + */ +static int nss_meminfo_if_user_overwrite(struct nss_ctx_instance *nss_ctx, const char *name) +{ + char *user_config; + char **mtype_table; + char needle[NSS_MEMINFO_BLOCK_NAME_MAXLEN + 6]; + char user_choice[NSS_MEMINFO_MEMTYPE_NAME_MAXLEN]; + int i; + char *p; + + user_config = nss_meminfo_user_config; + mtype_table = nss_meminfo_memtype_table; + + snprintf(needle, sizeof(needle), "<%1d, %s, ", nss_ctx->id, name); + + p = strstr(user_config, needle); + if (!p) + return -1; + + p += strlen(needle); + + for (i = 0; i < NSS_MEMINFO_MEMTYPE_NAME_MAXLEN - 1; i++) { + /* + * Each user config is like , + * it starts with '<' and ends with '>'. + */ + if (*p == '>' || *p == '\0') + break; + user_choice[i] = *p; + p++; + } + + user_choice[i] = '\0'; + + for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++) + if (!strcmp(mtype_table[i], user_choice)) + return i; + + return -1; +} + +/* + * nss_meminfo_free_block_lists() + * Free block node and memory associated with each each memory object. + */ +static void nss_meminfo_free_block_lists(struct nss_ctx_instance *nss_ctx) +{ + struct nss_meminfo_ctx *mem_ctx; + struct nss_meminfo_block_list *l; + int i; + + mem_ctx = &nss_ctx->meminfo_ctx; + for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++) { + struct nss_meminfo_block *b; + l = &mem_ctx->block_lists[i]; + b = l->head; + while (b) { + struct nss_meminfo_block *tmp; + /* + * Free IMEM/SDRAM/UTCM_SHARED memory. + */ + switch (i) { + case NSS_MEMINFO_MEMTYPE_IMEM: + nss_meminfo_free_imem(nss_ctx, b->dma_addr, b->size); + break; + case NSS_MEMINFO_MEMTYPE_SDRAM: + nss_meminfo_free_sdram(nss_ctx, b->dma_addr, b->kern_addr, b->size); + break; + case NSS_MEMINFO_MEMTYPE_UTCM_SHARED: + nss_meminfo_free_utcm_shared(nss_ctx, b->dma_addr, b->size); + break; + } + + /* + * Free the struct nss_meminfo_block itself. + */ + tmp = b; + b = b->next; + kfree(tmp); + } + } +} + +/* + * nss_meminfo_init_block_lists() + * Initialize block lists and allocate memory for each block. + */ +static bool nss_meminfo_init_block_lists(struct nss_ctx_instance *nss_ctx) +{ + /* + * There is no corresponding mapped address in kernel for UTCM_SHARED. + * UTCM_SHARED access from kernel is not allowed. Mem Objects requesting + * UTCM_SHARED are not expected to use any kernel mapped address. + * Was for UTCM_SHARED, but move to here as default especially for KW scan. + * Thus, NSS_MEMINFO_POISON is the default value for non-mappable memory request. + */ + void *kern_addr = (void *)NSS_MEMINFO_POISON; + uint32_t dma_addr = 0; + struct nss_meminfo_ctx *mem_ctx; + struct nss_meminfo_block_list *l; + struct nss_meminfo_request *r; + struct nss_meminfo_map *map; + int mtype; + int i; + + mem_ctx = &nss_ctx->meminfo_ctx; + + /* + * Fill memory type for each block list. + */ + for (i = 0; i < NSS_MEMINFO_MEMTYPE_MAX; i++) + mem_ctx->block_lists[i].memtype = i; + + map = &mem_ctx->meminfo_map; + + /* + * Loop through all meminfo requests by checking the per-request magic. + */ + for (r = map->requests; r->magic == NSS_MEMINFO_REQUEST_MAGIC; r++) { + struct nss_meminfo_block *b = (struct nss_meminfo_block *) + kmalloc(sizeof(struct nss_meminfo_block), GFP_KERNEL); + if (!b) { + nss_info_always("%px: failed to allocate meminfo block\n", nss_ctx); + goto cleanup; + } + + b->index = map->num_requests++; + b->size = r->size; + + /* + * Look up the user-defined memory type. + * Return user-defined memory type if exists. Otherwise, return -1. + */ + mtype = nss_meminfo_if_user_overwrite(nss_ctx, r->name); + if (mtype == -1) + mtype = r->memtype_default; + r->memtype_user = mtype; + + switch (mtype) { + case NSS_MEMINFO_MEMTYPE_IMEM: + /* + * For SOC's where TCM is not present + */ + if (!nss_ctx->vphys) { + nss_info_always("%px:IMEM requested but TCM not defined " + "for this SOC\n", nss_ctx); + goto cleanup; + } + + /* + * Return SoC real address for IMEM as DMA address. + */ + dma_addr = nss_meminfo_alloc_imem(nss_ctx, r->size, r->alignment); + if (!dma_addr) { + nss_info_always("%px: failed to alloc IMEM block\n", nss_ctx); + goto cleanup; + } + + /* + * Calulate offset to the kernel address (vmap) where the + * whole IMEM is mapped onto instead of calling ioremap(). + */ + kern_addr = nss_ctx->vmap + dma_addr - nss_ctx->vphys; + break; + case NSS_MEMINFO_MEMTYPE_SDRAM: + kern_addr = nss_meminfo_alloc_sdram(nss_ctx, r->size); + if (!kern_addr) { + nss_info_always("%px: failed to alloc SDRAM block\n", nss_ctx); + goto cleanup; + } + + dma_addr = dma_map_single(nss_ctx->dev, kern_addr, r->size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(nss_ctx->dev, dma_addr))) { + nss_info_always("%px: failed to map SDRAM block\n", nss_ctx); + goto cleanup; + } + break; + case NSS_MEMINFO_MEMTYPE_UTCM_SHARED: + /* + * Return SoC real address for UTCM_SHARED as DMA address. + */ + dma_addr = nss_meminfo_alloc_utcm_shared(nss_ctx, r->size, r->alignment); + if (!dma_addr) { + nss_info_always("%px: failed to alloc UTCM_SHARED block\n", nss_ctx); + goto cleanup; + } + break; + case NSS_MEMINFO_MEMTYPE_INFO: + /* + * if FW request heap_ddr_size, fill it in from DTS values. + */ + if (!strcmp(r->name, "heap_ddr_size")) { + struct nss_mmu_ddr_info coreinfo; + r->size = nss_core_ddr_info(&coreinfo); + + /* + * split memory among the number of cores + */ + r->size /= coreinfo.num_active_cores; + dma_addr = coreinfo.start_address + nss_ctx->id * r->size; + nss_info_always("%px: NSS core %d DDR from %x to %x\n", nss_ctx, + nss_ctx->id, dma_addr, dma_addr + r->size); + } + break; + default: + nss_info_always("%px: %d unsupported memory type\n", nss_ctx, mtype); + goto cleanup; + } + + /* + * Update the request with DMA address for the memory that only be used by FW. + */ + r->addr = dma_addr; + + /* + * nss_if_mem_map settings + */ + if (!strcmp(r->name, "nss_if_mem_map_inst")) { + BUG_ON(mtype == NSS_MEMINFO_MEMTYPE_UTCM_SHARED); + mem_ctx->if_map_memtype = mtype; + mem_ctx->if_map_dma = dma_addr; + mem_ctx->if_map = (struct nss_if_mem_map *)kern_addr; + } + + if (!strcmp(r->name, "debug_boot_log_desc")) { + BUG_ON(mtype == NSS_MEMINFO_MEMTYPE_UTCM_SHARED); + mem_ctx->logbuffer_memtype = mtype; + mem_ctx->logbuffer_dma = dma_addr; + mem_ctx->logbuffer = (struct nss_log_descriptor *)kern_addr; + } + + if (!strcmp(r->name, "c2c_descs_if_mem_map")) { + mem_ctx->c2c_start_memtype = mtype; + mem_ctx->c2c_start_dma = dma_addr; + } + + if (strcmp(r->name, "profile_dma_ctrl") == 0) { + mem_ctx->sdma_ctrl = kern_addr; + nss_info_always("%px: set sdma %px\n", nss_ctx, kern_addr); + } + + /* + * Flush the updated meminfo request. + */ + NSS_CORE_DMA_CACHE_MAINT(r, sizeof(struct nss_meminfo_request), DMA_TO_DEVICE); + NSS_CORE_DSB(); + + /* + * Update the list + */ + l = &mem_ctx->block_lists[mtype]; + l->num_blks++; + l->total_size += r->size; + + b->next = l->head; + l->head = b; + } + + /* + * Verify memory map end magic + */ + if (*((uint16_t *)r) != NSS_MEMINFO_MAP_END_MAGIC) + goto cleanup; + + return true; + +cleanup: + nss_meminfo_free_block_lists(nss_ctx); + return false; +} + +/* + * nss_meminfo_allocate_n2h_h2n_rings() + * Allocate N2H/H2N rings. + */ +static bool nss_meminfo_allocate_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx, + struct nss_meminfo_n2h_h2n_info *info) +{ + switch (info->memtype) { + case NSS_MEMINFO_MEMTYPE_SDRAM: + info->kern_addr = nss_meminfo_alloc_sdram(nss_ctx, info->total_size); + if (!info->kern_addr) + return false; + + info->dma_addr = dma_map_single(nss_ctx->dev, (void *)info->kern_addr, + info->total_size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(nss_ctx->dev, info->dma_addr))) { + kfree((void *)info->kern_addr); + return false; + } + break; + case NSS_MEMINFO_MEMTYPE_IMEM: + /* + * For SOC's where TCM is not present + */ + if (!nss_ctx->vphys) { + nss_info_always("%px:IMEM requested but TCM not defined " + "for this SOC\n", nss_ctx); + return false; + } + + info->dma_addr = nss_meminfo_alloc_imem(nss_ctx, info->total_size, L1_CACHE_BYTES); + if (!info->dma_addr) + return false; + + info->kern_addr = nss_ctx->vmap + info->dma_addr - nss_ctx->vphys; + break; + default: + return false; + } + + return true; +} + +/* + * nss_meminfo_configure_n2h_h2n_rings() + * Configure N2H/H2N rings and if_map. + */ +static bool nss_meminfo_configure_n2h_h2n_rings(struct nss_ctx_instance *nss_ctx) +{ + struct nss_meminfo_ctx *mem_ctx = &nss_ctx->meminfo_ctx; + struct nss_meminfo_n2h_h2n_info *h2n_info; + struct nss_meminfo_n2h_h2n_info *n2h_info; + struct nss_if_mem_map *if_map; + int i; + int mtype; + + h2n_info = &mem_ctx->h2n_info; + n2h_info = &mem_ctx->n2h_info; + + /* + * Check memory type. SDRAM is the default option. + */ + mtype = nss_meminfo_if_user_overwrite(nss_ctx, "h2n_rings"); + if (mtype == -1) + mtype = NSS_MEMINFO_MEMTYPE_SDRAM; + + h2n_info->memtype = mtype; + + mtype = nss_meminfo_if_user_overwrite(nss_ctx, "n2h_rings"); + if (mtype == -1) + mtype = NSS_MEMINFO_MEMTYPE_SDRAM; + + n2h_info->memtype = mtype; + + n2h_info->total_size = sizeof(struct n2h_descriptor) * NSS_N2H_RING_COUNT * (NSS_RING_SIZE + 2); + h2n_info->total_size = sizeof(struct h2n_descriptor) * NSS_H2N_RING_COUNT * (NSS_RING_SIZE + 2); + + /* + * N2H ring allocations + */ + if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, n2h_info))) { + nss_info_always("%px: failed to allocate/map n2h rings\n", nss_ctx); + return false; + } + + /* + * H2N ring allocations + */ + if (!(nss_meminfo_allocate_n2h_h2n_rings(nss_ctx, h2n_info))) { + nss_info_always("%px: failed to allocate/map h2n_rings\n", nss_ctx); + goto cleanup; + } + + /* + * Bring a fresh copy of if_map from memory in order to read it correctly. + */ + if_map = mem_ctx->if_map; + NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_FROM_DEVICE); + NSS_CORE_DSB(); + + if_map->n2h_rings = NSS_N2H_RING_COUNT; + if_map->h2n_rings = NSS_H2N_RING_COUNT; + + /* + * N2H ring settings + */ + for (i = 0; i < NSS_N2H_RING_COUNT; i++) { + struct hlos_n2h_desc_ring *n2h_desc_ring = &nss_ctx->n2h_desc_ring[i]; + n2h_desc_ring->desc_ring.desc = (struct n2h_descriptor *)(n2h_info->kern_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2)); + n2h_desc_ring->desc_ring.size = NSS_RING_SIZE; + n2h_desc_ring->hlos_index = if_map->n2h_hlos_index[i]; + + if_map->n2h_desc_if[i].size = NSS_RING_SIZE; + if_map->n2h_desc_if[i].desc_addr = n2h_info->dma_addr + i * sizeof(struct n2h_descriptor) * (NSS_RING_SIZE + 2); + nss_info("%px: N2H ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->n2h_desc_if[i].size, if_map->n2h_desc_if[i].desc_addr); + } + + /* + * H2N ring settings + */ + for (i = 0; i < NSS_H2N_RING_COUNT; i++) { + struct hlos_h2n_desc_rings *h2n_desc_ring = &nss_ctx->h2n_desc_rings[i]; + h2n_desc_ring->desc_ring.desc = (struct h2n_descriptor *)(h2n_info->kern_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2)); + h2n_desc_ring->desc_ring.size = NSS_RING_SIZE; + h2n_desc_ring->hlos_index = if_map->h2n_hlos_index[i]; + spin_lock_init(&h2n_desc_ring->lock); + + if_map->h2n_desc_if[i].size = NSS_RING_SIZE; + if_map->h2n_desc_if[i].desc_addr = h2n_info->dma_addr + i * sizeof(struct h2n_descriptor) * (NSS_RING_SIZE + 2); + nss_info("%px: H2N ring %d, size %d, addr = %x\n", nss_ctx, i, if_map->h2n_desc_if[i].size, if_map->h2n_desc_if[i].desc_addr); + } + + /* + * Flush the updated nss_if_mem_map. + */ + NSS_CORE_DMA_CACHE_MAINT((void *)if_map, sizeof(struct nss_if_mem_map), DMA_TO_DEVICE); + NSS_CORE_DSB(); + + return true; + +cleanup: + if (n2h_info->memtype == NSS_MEMINFO_MEMTYPE_SDRAM) + nss_meminfo_free_sdram(nss_ctx, n2h_info->dma_addr, n2h_info->kern_addr, n2h_info->total_size); + else + nss_meminfo_free_imem(nss_ctx, n2h_info->dma_addr, n2h_info->total_size); + + nss_meminfo_free_block_lists(nss_ctx); + return false; +} + +/* + * nss_meminfo_config_show() + * function to show meinfo configuration per core. + */ +static int nss_meminfo_config_show(struct seq_file *seq, void *v) +{ + struct nss_ctx_instance *nss_ctx; + struct nss_meminfo_ctx *mem_ctx; + struct nss_meminfo_n2h_h2n_info *n2h_info; + struct nss_meminfo_n2h_h2n_info *h2n_info; + struct nss_meminfo_map *map; + struct nss_meminfo_request *r; + int nss_id; + int i; + + /* + * i_private is passed to us by debug_fs_create() + */ + nss_id = (int)(nss_ptr_t)seq->private; + if (nss_id < 0 || nss_id >= nss_top_main.num_nss) { + nss_warning("nss_id: %d is not valid\n", nss_id); + return -ENODEV; + } + + nss_ctx = &nss_top_main.nss[nss_id]; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + mem_ctx = &nss_ctx->meminfo_ctx; + map = &mem_ctx->meminfo_map; + n2h_info = &mem_ctx->n2h_info; + h2n_info = &mem_ctx->h2n_info; + + seq_printf(seq, "%-5s %-32s %-7s %-7s %-10s %-10s\n", + "Index", "Name", "Default", "User", "Size", "DMA Addr"); + seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n", + "N/A", "n2h_rings", "SDRAM", + nss_meminfo_memtype_table[n2h_info->memtype], + n2h_info->total_size, n2h_info->dma_addr); + seq_printf(seq, "%-5s %-32s %-7s %-7s 0x%-8x 0x%-8x\n", + "N/A", "h2n_rings", "SDRAM", + nss_meminfo_memtype_table[h2n_info->memtype], + h2n_info->total_size, h2n_info->dma_addr); + + r = map->requests; + for (i = 0; i < map->num_requests; i++) { + seq_printf(seq, "%-5d %-32s %-7s %-7s 0x%-8x 0x%-8x\n", + i, r[i].name, + nss_meminfo_memtype_table[r[i].memtype_default], + nss_meminfo_memtype_table[r[i].memtype_user], + r[i].size, r[i].addr); + } + + seq_printf(seq, "Available IMEM: 0x%x\n", mem_ctx->imem_end - mem_ctx->imem_tail); + seq_printf(seq, "How to configure? \n"); + seq_printf(seq, "Overwrite the /etc/modules.d/32-qca-nss-drv with following contents then reboot\n\n"); + seq_printf(seq, "qca-nss-drv meminfo_user_config=\", ..\"\n\n"); + seq_printf(seq, "For example, <1, h2n_rings, IMEM> stands for: h2n_rings of core 1 is on IMEM\n"); + seq_printf(seq, "Note:UTCM_SHARED cannot be used for n2h_rings, h2n_rings and debug_log_boot_desc.\n"); + + return 0; +} + +/* + * nss_meminfo_debugfs_file_open() + * function to open meminfo debugfs. + */ +static int nss_meminfo_debugfs_file_open(struct inode *inode, struct file *file) +{ + return single_open(file, nss_meminfo_config_show, inode->i_private); +} + +static struct file_operations nss_meminfo_debugfs_ops = { + .owner = THIS_MODULE, + .open = nss_meminfo_debugfs_file_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * nss_meminfo_init_debugfs() + * Init meminfo debugfs. + */ +static void nss_meminfo_init_debugfs(struct nss_ctx_instance *nss_ctx) +{ + int i; + struct dentry *meminfo_main_dentry; + struct dentry *meminfo_core_dentries[NSS_MAX_CORES]; + + if (nss_meminfo_debugfs_exist) + return; + + /* + * Create directory for showing meminfo configuration of each core. + */ + meminfo_main_dentry = debugfs_create_dir("meminfo", nss_top_main.top_dentry); + if (unlikely(!meminfo_main_dentry)) { + nss_warning("Failed to create qca-nss-drv/meminfo directory in debugfs\n"); + return; + } + + for (i = 0; i < nss_top_main.num_nss; i++) { + char file[10]; + snprintf(file, sizeof(file), "core%d", i); + meminfo_core_dentries[i] = debugfs_create_file(file, 0400, meminfo_main_dentry, + (void *)(nss_ptr_t)i, &nss_meminfo_debugfs_ops); + if (unlikely(!meminfo_core_dentries[i])) { + int j; + for (j = 0; j < i; j++) + debugfs_remove(meminfo_core_dentries[j]); + debugfs_remove(meminfo_main_dentry); + nss_warning("Failed to create qca-nss-drv/meminfo/%s file in debugfs", file); + return; + } + } + + nss_meminfo_debugfs_exist = true; + nss_info("nss meminfo user config: %s\n", nss_meminfo_user_config); +} + +/* + * nss_meminfo_init + * Initilization + * + */ +bool nss_meminfo_init(struct nss_ctx_instance *nss_ctx) +{ + struct nss_meminfo_ctx *mem_ctx; + uint32_t *meminfo_start; + struct nss_meminfo_map *map; + struct nss_top_instance *nss_top = &nss_top_main; + + mem_ctx = &nss_ctx->meminfo_ctx; + + /* + * meminfo_start is the label where the start address of meminfo map is stored. + */ + meminfo_start = (uint32_t *)ioremap_nocache(nss_ctx->load + NSS_MEMINFO_MAP_START_OFFSET, + NSS_MEMINFO_RESERVE_AREA_SIZE); + if (!meminfo_start) { + nss_info_always("%px: cannot remap meminfo start\n", nss_ctx); + return false; + } + + /* + * Check meminfo start magic + */ + if ((uint16_t)meminfo_start[0] != NSS_MEMINFO_RESERVE_AREA_MAGIC) { + nss_info_always("%px: failed to verify meminfo start magic\n", nss_ctx); + return false; + } + + map = &mem_ctx->meminfo_map; + map->start = (uint32_t *)ioremap_cache(meminfo_start[1], NSS_MEMINFO_MAP_SIZE); + if (!map->start) { + nss_info_always("%px: failed to remap meminfo map\n", nss_ctx); + return false; + } + + /* + * Check meminfo map magic + */ + if ((uint16_t)map->start[0] != NSS_MEMINFO_MAP_START_MAGIC) { + nss_info_always("%px: failed to verify meminfo map magic\n", nss_ctx); + return false; + } + + /* + * Meminfo map settings + */ + map->num_requests = 0; + map->requests = (struct nss_meminfo_request *)(map->start + 1); + + /* + * Init IMEM + */ + nss_top->hal_ops->init_imem(nss_ctx); + + /* + * Init UTCM_SHARED if supported + */ + if (!nss_top->hal_ops->init_utcm_shared(nss_ctx, meminfo_start)) { + nss_info_always("%px: failed to initialize UTCM_SHARED meminfo\n", nss_ctx); + return false; + } + + /* + * Init meminfo block lists + */ + if (!nss_meminfo_init_block_lists(nss_ctx)) { + nss_info_always("%px: failed to initialize meminfo block lists\n", nss_ctx); + return false; + } + + /* + * Configure N2H/H2N rings and nss_if_mem_map + */ + if (!nss_meminfo_configure_n2h_h2n_rings(nss_ctx)) + return false; + + nss_meminfo_init_debugfs(nss_ctx); + + nss_info_always("%px: meminfo init succeed\n", nss_ctx); + return true; +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_meminfo.h b/feeds/ipq807x/qca-nss-drv/src/nss_meminfo.h new file mode 100644 index 000000000..5c006cc54 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_meminfo.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * nss_meminfo.h + * nss meminfo header file. + */ + +#ifndef __NSS_MEMINFO_H +#define __NSS_MEMINFO_H + +#define NSS_MEMINFO_RESERVE_AREA_SIZE 0x1000 /* Size of reserved space in firmware start code aligned to one page */ +#define NSS_MEMINFO_RESERVE_AREA_MAGIC 0x9526 /* Magic at the beginning of reserved space */ +#define NSS_MEMINFO_MAP_START_OFFSET 8 /* Offset of memory map start address in reserved space */ +#define NSS_MEMINFO_MAP_SIZE 0x1000 /* Size of memory map per core aligned to one page */ +#define NSS_MEMINFO_MAP_START_MAGIC 0x9527 +#define NSS_MEMINFO_REQUEST_MAGIC 0X9528 +#define NSS_MEMINFO_MAP_END_MAGIC 0x9529 +#define NSS_MEMINFO_RESERVE_AREA_UTCM_SHARED_MAP_MAGIC 0x9530 /* Magic at the beginning of UTCM_SHARED reserved space */ +#define NSS_MEMINFO_BLOCK_NAME_MAXLEN 48 +#define NSS_MEMINFO_MEMTYPE_NAME_MAXLEN 32 +#define NSS_MEMINFO_USER_CONFIG_MAXLEN 1024 +#define NSS_MEMINFO_POISON 0x95 /* Invalid kernel memory address assigned for non mapable mem types */ + +/* + * Memory types available + */ +enum nss_meminfo_memtype { + NSS_MEMINFO_MEMTYPE_IMEM, /* NSS-IMEM also called TCM */ + NSS_MEMINFO_MEMTYPE_SDRAM, /* SDRAM also called DDR */ + NSS_MEMINFO_MEMTYPE_UTCM_SHARED, /* UTCM memory allocated for DMA objects */ + NSS_MEMINFO_MEMTYPE_INFO, /* Exchange information during boot up */ + NSS_MEMINFO_MEMTYPE_MAX +}; + +/* + * Memory request + * Firmware package defines each request asking host to feed the request. + */ +struct nss_meminfo_request { + uint16_t magic; /* Request magic */ + char name[NSS_MEMINFO_BLOCK_NAME_MAXLEN]; /* Memory block name */ + uint16_t memtype_default; /* Memory type requested */ + uint16_t memtype_user; /* User-defined memory type */ + uint32_t alignment; /* Alignment requirement */ + uint32_t size; /* Size requested */ + uint32_t addr; /* Memory block address got from host */ +}; + +/* + * Memory map + * It starts with a magic then an array of memory request and end with a checksum. + * Firmware creates the map for host to parse. + */ +struct nss_meminfo_map { + uint32_t *start; /* Start address */ + uint32_t num_requests; /* Number of requests */ + struct nss_meminfo_request *requests; /* Start of Request array */ +}; + +/* + * Memory block + * Block node for each request. + */ +struct nss_meminfo_block { + struct nss_meminfo_block *next; /* Next block in the same list */ + uint32_t index; /* Index to request array */ + uint32_t size; /* Size of memory block */ + uint32_t dma_addr; /* DMA address */ + void *kern_addr; /* Kernel address */ +}; + +/* + * Memory block list + * List of block node of same memory type. + */ +struct nss_meminfo_block_list { + enum nss_meminfo_memtype memtype; /* memory type */ + uint32_t num_blks; /* Number of blocks */ + uint32_t total_size; /* Size of all memory blocks in this list */ + struct nss_meminfo_block *head; /* list head */ +}; + +/* + * H2N/N2H rings information + */ +struct nss_meminfo_n2h_h2n_info { + enum nss_meminfo_memtype memtype; /* Memory type */ + uint32_t total_size; /* Total size */ + uint32_t dma_addr; /* DMA address */ + void *kern_addr; /* Kernel address */ +}; + +/* + * Memory context + */ +struct nss_meminfo_ctx { + struct nss_meminfo_n2h_h2n_info n2h_info; /* N2H rings info*/ + struct nss_meminfo_n2h_h2n_info h2n_info; /* H2N rings info */ + uint32_t imem_head; /* IMEM start address */ + uint32_t imem_end; /* IMEM end address */ + uint32_t imem_tail; /* IMEM data end */ + uint32_t utcm_shared_head; /* UTCM_SHARED start address */ + uint32_t utcm_shared_end; /* UTCM_SHARED end address */ + uint32_t utcm_shared_tail; /* UTCM_SHARED data end */ + struct nss_if_mem_map *if_map; /* nss_if_mem_map_inst virtual address */ + uint32_t if_map_dma; /* nss_if_mem_map_inst physical address */ + enum nss_meminfo_memtype if_map_memtype; /* Memory type for nss_if_mem_map */ + struct nss_log_descriptor *logbuffer; /* nss_logbuffer virtual address */ + uint32_t logbuffer_dma; /* nss_logbuffer physical address */ + enum nss_meminfo_memtype logbuffer_memtype; /* Memory type for logbuffer */ + uint32_t c2c_start_dma; /* nss_c2c start physical address */ + enum nss_meminfo_memtype c2c_start_memtype; /* Memory type for c2c_start */ + void *sdma_ctrl; /* Soft DMA controller */ + + struct nss_meminfo_map meminfo_map; /* Meminfo map */ + struct nss_meminfo_block_list block_lists[NSS_MEMINFO_MEMTYPE_MAX]; + /* Block lists for each memory type */ +}; + +bool nss_meminfo_init(struct nss_ctx_instance *nss_ctx); +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_mirror.c b/feeds/ipq807x/qca-nss-drv/src/nss_mirror.c new file mode 100644 index 000000000..a2e506128 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_mirror.c @@ -0,0 +1,296 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_mirror_stats.h" +#include "nss_mirror_strings.h" +#include "nss_mirror_log.h" + +#define NSS_MIRROR_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * Private data structure + */ +static struct { + struct semaphore sem; + struct completion complete; + int response; +} nss_mirror_pvt; + +atomic_t nss_mirror_num_instances; /* Number of active mirror stats instances. */ + +/* + * nss_mirror_verify_if_num() + * Verify interface number passed to us. + */ +bool nss_mirror_verify_if_num(uint32_t if_num) +{ + enum nss_dynamic_interface_type if_type; + + if_type = nss_dynamic_interface_get_type(nss_mirror_get_context(), if_num); + if (if_type == NSS_DYNAMIC_INTERFACE_TYPE_MIRROR) { + return true; + } + + return false; +} +EXPORT_SYMBOL(nss_mirror_verify_if_num); + +/* + * nss_mirror_handler() + * Handle NSS -> HLOS messages for mirror device. + */ +static void nss_mirror_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, + void *app_data) +{ + struct nss_mirror_msg *nmm = (struct nss_mirror_msg *)ncm; + void *ctx; + nss_mirror_msg_callback_t cb; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_assert(nss_mirror_verify_if_num(ncm->interface)); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_MIRROR_MSG_MAX) { + nss_warning("%px: received invalid message %d for mirror interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_mirror_msg)) { + nss_warning("%px: Length of message is greater than expected.", nss_ctx); + return; + } + + /* + * Log messages. + */ + nss_core_log_msg_failures(nss_ctx, ncm); + nss_mirror_log_rx_msg(nmm); + + switch (ncm->type) { + case NSS_MIRROR_MSG_SYNC_STATS: + /* + * Debug stats embedded in stats msg. + */ + nss_mirror_stats_sync(nss_ctx, nmm, ncm->interface); + nss_mirror_stats_notify(nss_ctx, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)app_data; + } + + /* + * Callback. + */ + cb = (nss_mirror_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * Call mirror interface callback. + */ + if (!cb) { + nss_warning("%px: No callback for mirror interface %d", + nss_ctx, ncm->interface); + return; + } + + cb(ctx, ncm); +} + +/* + * nss_mirror_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_mirror_callback(void *app_data, struct nss_cmn_msg *ncm) +{ + nss_mirror_pvt.response = NSS_TX_SUCCESS; + + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + nss_warning("mirror interface error response %d\n", ncm->response); + nss_mirror_pvt.response = NSS_TX_FAILURE; + } + + /* + * Write memory barrier. + */ + smp_wmb(); + complete(&nss_mirror_pvt.complete); +} + +/* + * nss_mirror_tx_msg() + * Transmit a mirror interface message to NSS firmware. + */ +nss_tx_status_t nss_mirror_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_mirror_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Sanity check the message. + */ + if (!nss_mirror_verify_if_num(ncm->interface)) { + nss_warning("%px: tx request for non mirror interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_MIRROR_MSG_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Trace messages. + */ + nss_mirror_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_mirror_tx_msg); + +/* + * nss_mirror_tx_msg_sync() + * Transmit a mirror interface message to NSS firmware synchronously. + */ +nss_tx_status_t nss_mirror_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_mirror_msg *msg) +{ + nss_tx_status_t status; + int ret = 0; + + down(&nss_mirror_pvt.sem); + msg->cm.cb = (nss_ptr_t)nss_mirror_callback; + msg->cm.app_data = (nss_ptr_t)NULL; + + status = nss_mirror_tx_msg(nss_ctx, msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: mirror_tx_msg failed\n", nss_ctx); + up(&nss_mirror_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&nss_mirror_pvt.complete, msecs_to_jiffies(NSS_MIRROR_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: Mirror interface tx sync failed due to timeout\n", nss_ctx); + nss_mirror_pvt.response = NSS_TX_FAILURE; + } + + status = nss_mirror_pvt.response; + up(&nss_mirror_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_mirror_tx_msg_sync); + +/* + * nss_mirror_unregister_if() + * Un-registers mirror interface from the NSS. + */ +void nss_mirror_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.mirror_handler_id]; + uint32_t status; + + nss_assert(nss_ctx); + nss_assert(nss_mirror_verify_if_num(if_num)); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_core_unregister_handler(nss_ctx, if_num); + + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for interface %d with NSS core\n", nss_ctx, if_num); + } + + atomic_dec(&nss_mirror_num_instances); + nss_mirror_stats_reset(if_num); +} +EXPORT_SYMBOL(nss_mirror_unregister_if); + +/* + * nss_mirror_register_if() + * Registers the mirror interface with NSS. + */ +struct nss_ctx_instance *nss_mirror_register_if(uint32_t if_num, + nss_mirror_data_callback_t data_callback, + nss_mirror_msg_callback_t event_callback, + struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.mirror_handler_id]; + int ret; + + nss_assert(nss_ctx); + nss_assert(netdev); + nss_assert(nss_mirror_verify_if_num(if_num)); + + if (atomic_read(&nss_mirror_num_instances) == NSS_MAX_MIRROR_DYNAMIC_INTERFACES) { + nss_warning("%px: Maximum number of mirror interfaces are already allocated\n", nss_ctx); + return NULL; + } + + ret = nss_mirror_stats_init(if_num, netdev); + if (ret < 0) { + nss_warning("%px: Error in initializaing mirror stats.\n", nss_ctx); + return NULL; + } + + nss_core_register_handler(nss_ctx, if_num, nss_mirror_handler, netdev); + ret = nss_core_register_msg_handler(nss_ctx, if_num, event_callback); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: Not able to register handler for mirror interface %d with NSS core\n", nss_ctx, if_num); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, data_callback, NULL, NULL, netdev, features); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, NSS_DYNAMIC_INTERFACE_TYPE_MIRROR); + + atomic_inc(&nss_mirror_num_instances); + return nss_ctx; +} +EXPORT_SYMBOL(nss_mirror_register_if); + +/* + * nss_mirror_get_context() + * Get the mirror instance context. + */ +struct nss_ctx_instance *nss_mirror_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.mirror_handler_id]; +} +EXPORT_SYMBOL(nss_mirror_get_context); + +/* + * nss_mirror_register_handler() + * Initialize and register mirror instance handler. + */ +void nss_mirror_register_handler(void) +{ + nss_info("nss_mirror_register_handler"); + sema_init(&nss_mirror_pvt.sem, 1); + init_completion(&nss_mirror_pvt.complete); + + nss_mirror_stats_dentry_create(); + nss_mirror_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_mirror_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_log.c new file mode 100644 index 000000000..5fb8858f3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_log.c @@ -0,0 +1,198 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_mirror_log.c + * NSS Mirror logger file. + */ + +#include "nss_core.h" + +/* + * nss_mirror_log_message_types_str + * MIRROR message strings + */ +static int8_t *nss_mirror_log_message_types_str[NSS_MIRROR_MSG_MAX] __maybe_unused = { + "Mirror Configure Msg", + "Mirror Enable Msg", + "Mirror Disable Msg", + "Mirror Set Nexthop Msg", + "Mirror Reset Nexthop Msg", + "Mirror Stats Sync Msg", +}; + +/* + * nss_mirror_log_error_response_types_str + * Strings for error types for Mirror messages + */ +static int8_t *nss_mirror_log_error_response_types_str[NSS_MIRROR_ERROR_TYPE_MAX] __maybe_unused = { + "Mirror no error", + "Mirror No Memory", + "Mirror Transmit Failure", + "Mirror Bad Parameter", + "Mirror Bad Clone Point", + "Mirror Intance Configured", + "Mirror Intance Disabled", + "Mirror Bad Nexthop", + "Mirror Nexthop Configured", + "Mirror Nexthop Reset", + "Mirror Unknown Message", +}; + +/* + * nss_mirror_log_configure_msg() + * Log NSS Mirror Configure message. + */ +static void nss_mirror_log_configure_msg(struct nss_mirror_msg *nmm) +{ + struct nss_mirror_configure_msg *config_msg __maybe_unused = &nmm->msg.config; + + nss_trace("%px: NSS Mirror Config message \n" + "Packet clone size: %u\n" + "Packet clone point: %hu\n", + config_msg, + config_msg->pkt_clone_size, + config_msg->pkt_clone_point); +} + +/* + * nss_mirror_log_set_nexthop_msg() + * Log NSS Mirror Set Nexthop message. + */ +static void nss_mirror_log_set_nexthop_msg(struct nss_mirror_msg *nmm) +{ + struct nss_mirror_set_nexthop_msg *nexthop_msg __maybe_unused = &nmm->msg.nexthop; + + nss_trace("%px: NSS Mirror Nexthop message \n" + "Nexthop interface number: %u\n", + nexthop_msg, + nexthop_msg->if_num); +} + +/* + * nss_mirror_log_enable_msg() + * Log NSS Mirror Enable message. + */ +static void nss_mirror_log_enable_msg(struct nss_mirror_msg *nmm) +{ + nss_trace("%px: NSS Mirror message: Enable \n", nmm); +} + +/* + * nss_mirror_log_disable_msg() + * Log NSS Mirror Disable message. + */ +static void nss_mirror_log_disable_msg(struct nss_mirror_msg *nmm) +{ + nss_trace("%px: NSS Mirror message: Disable \n", nmm); +} + +/* + * nss_mirror_log_reset_nexthop_msg() + * Log NSS Mirror Reset Nexthop message. + */ +static void nss_mirror_log_reset_nexthop_msg(struct nss_mirror_msg *nmm) +{ + nss_trace("%px: NSS Mirror message: Reset Nexthop \n", nmm); +} + +/* + * nss_mirror_log_verbose() + * Log message contents. + */ +static void nss_mirror_log_verbose(struct nss_mirror_msg *nmm) +{ + switch (nmm->cm.type) { + case NSS_MIRROR_MSG_CONFIGURE: + nss_mirror_log_configure_msg(nmm); + break; + + case NSS_MIRROR_MSG_ENABLE: + nss_mirror_log_enable_msg(nmm); + break; + + case NSS_MIRROR_MSG_DISABLE: + nss_mirror_log_disable_msg(nmm); + break; + + case NSS_MIRROR_MSG_SET_NEXTHOP: + nss_mirror_log_set_nexthop_msg(nmm); + break; + + case NSS_MIRROR_MSG_RESET_NEXTHOP: + nss_mirror_log_reset_nexthop_msg(nmm); + break; + + case NSS_MIRROR_MSG_SYNC_STATS: + break; + + default: + nss_trace("%px: Invalid message type\n", nmm); + break; + } +} + +/* + * nss_mirror_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_mirror_log_tx_msg(struct nss_mirror_msg *nmm) +{ + if (nmm->cm.type >= NSS_MIRROR_MSG_MAX) { + nss_warning("%px: Invalid message type\n", nmm); + return; + } + + nss_info("%px: type[%d]:%s\n", nmm, nmm->cm.type, nss_mirror_log_message_types_str[nmm->cm.type]); + nss_mirror_log_verbose(nmm); +} + +/* + * nss_mirror_log_rx_msg() + * Log messages received from FW. + */ +void nss_mirror_log_rx_msg(struct nss_mirror_msg *nmm) +{ + if (nmm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nmm); + return; + } + + if (nmm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nmm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nmm, nmm->cm.type, + nss_mirror_log_message_types_str[nmm->cm.type], + nmm->cm.response, nss_cmn_response_str[nmm->cm.response]); + goto verbose; + } + + if (nmm->cm.error >= NSS_MIRROR_ERROR_TYPE_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nmm, nmm->cm.type, nss_mirror_log_message_types_str[nmm->cm.type], + nmm->cm.response, nss_cmn_response_str[nmm->cm.response], + nmm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nmm, nmm->cm.type, nss_mirror_log_message_types_str[nmm->cm.type], + nmm->cm.response, nss_cmn_response_str[nmm->cm.response], + nmm->cm.error, nss_mirror_log_error_response_types_str[nmm->cm.error]); + +verbose: + nss_mirror_log_verbose(nmm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_mirror_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_log.h new file mode 100644 index 000000000..a81a4a022 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_log.h @@ -0,0 +1,39 @@ +/* + ****************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_MIRROR_LOG_H__ +#define __NSS_MIRROR_LOG_H__ + +/* + * nss_mirror_log.h + * NSS Mirror Log Header File. + */ + +/* + * nss_mirror_log_tx_msg + * Logs a Mirror message that is sent to the NSS firmware. + */ +void nss_mirror_log_tx_msg(struct nss_mirror_msg *nmm); + +/* + * nss_mirror_log_rx_msg + * Logs a Mirror message that is received from the NSS firmware. + */ +void nss_mirror_log_rx_msg(struct nss_mirror_msg *nmm); + +#endif /* __NSS_MIRROR_LOG_H__*/ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_mirror_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_stats.c new file mode 100644 index 000000000..51fa93c38 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_stats.c @@ -0,0 +1,324 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_mirror.h" +#include "nss_mirror_stats.h" +#include "nss_mirror_strings.h" + +static struct nss_mirror_stats_debug_instance *stats_db[NSS_MAX_MIRROR_DYNAMIC_INTERFACES]; + /* Mirror stats data structure. */ + +/* + * Atomic notifier data structure for statistics + */ +ATOMIC_NOTIFIER_HEAD(nss_mirror_stats_notifier); + +static DEFINE_SPINLOCK(nss_mirror_stats_lock); + +/* + * nss_mirror_stats_get() + * Get mirror interface statistics. + */ +static void nss_mirror_stats_get(void *stats_mem, uint32_t stats_num) +{ + struct nss_mirror_stats_debug_instance *stats = (struct nss_mirror_stats_debug_instance *)stats_mem; + int i; + + if (!stats) { + nss_warning("No memory to copy mirror interface stats"); + return; + } + + spin_lock_bh(&nss_mirror_stats_lock); + for (i = 0; i < NSS_MAX_MIRROR_DYNAMIC_INTERFACES; i++) { + + /* + * Copy maximum for given number of instances only. + */ + if (likely(stats_db[i])) { + if (likely(stats_num)) { + memcpy(stats, stats_db[i], sizeof(struct nss_mirror_stats_debug_instance)); + stats++; + stats_num--; + } else { + break; + } + } + } + spin_unlock_bh(&nss_mirror_stats_lock); +} + +/* + * nss_mirror_stats_read() + * Read mirror interface statistics. + */ +static ssize_t nss_mirror_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + + uint32_t max_output_lines = 2 /* header & footer for instance stats */ + + NSS_MAX_MIRROR_DYNAMIC_INTERFACES * + ((NSS_STATS_NODE_MAX + 3 ) + (NSS_MIRROR_STATS_MAX + 3)) /*instance stats */ + + 2; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct net_device *dev; + struct nss_mirror_stats_debug_instance *mirror_shadow_stats; + uint32_t id, mirror_active_instances = atomic_read(&nss_mirror_num_instances); + char *lbuf; + + if (!mirror_active_instances) { + return 0; + } + + lbuf = vzalloc(size_al); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + mirror_shadow_stats = vzalloc(sizeof(struct nss_mirror_stats_debug_instance) * + mirror_active_instances); + if (unlikely(!mirror_shadow_stats)) { + nss_warning("Could not allocate memory for base debug statistics buffer"); + vfree(lbuf); + return 0; + } + + /* + * Get all stats + */ + nss_mirror_stats_get((void *)mirror_shadow_stats, mirror_active_instances); + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "mirror stats", NSS_STATS_SINGLE_CORE); + + /* + * Session stats + */ + for (id = 0; id < mirror_active_instances; id++) { + dev = dev_get_by_index(&init_net, mirror_shadow_stats[id].if_index); + if (likely(dev)) { + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id, + mirror_shadow_stats[id].if_num, dev->name); + dev_put(dev); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id, + mirror_shadow_stats[id].if_num); + } + + size_wr += nss_stats_fill_common_stats(mirror_shadow_stats[id].if_num, id, lbuf, size_wr, size_al, "mirror"); + + /* + * Mirror interface exception stats. + */ + size_wr += nss_stats_print("mirror", "mirror exception stats", + id, + nss_mirror_strings_stats, + mirror_shadow_stats[id].stats, + NSS_MIRROR_STATS_MAX, + lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + vfree(mirror_shadow_stats); + vfree(lbuf); + return bytes_read; +} + +/* + * nss_mirror_stats_sync() + * API to sync statistics for mirror interface. + */ +void nss_mirror_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_mirror_msg *nmm, uint16_t if_num) +{ + uint8_t i, j; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_mirror_stats_sync_msg *stats_msg = &nmm->msg.stats; + struct nss_cmn_node_stats *node_stats_ptr = &stats_msg->node_stats; + uint32_t *mirror_stats_ptr = (uint32_t *)&stats_msg->mirror_stats; + + spin_lock_bh(&nss_mirror_stats_lock); + for (i = 0; i < NSS_MAX_MIRROR_DYNAMIC_INTERFACES; i++) { + if (!stats_db[i] || (stats_db[i]->if_num != if_num)) { + continue; + } + + for (j = 0; j < NSS_MIRROR_STATS_MAX; j++) { + /* + * Sync stats. + */ + stats_db[i]->stats[j] += mirror_stats_ptr[j]; + } + spin_unlock_bh(&nss_mirror_stats_lock); + goto sync_cmn_stats; + } + + spin_unlock_bh(&nss_mirror_stats_lock); + nss_warning("Invalid mirror stats sync message received for %d interface\n", if_num); + return; + +sync_cmn_stats: + spin_lock_bh(&nss_mirror_stats_lock); + + /* + * Sync common stats. + */ + nss_top->stats_node[if_num][NSS_STATS_NODE_RX_PKTS] += node_stats_ptr->rx_packets; + nss_top->stats_node[if_num][NSS_STATS_NODE_RX_BYTES] += node_stats_ptr->rx_bytes; + nss_top->stats_node[if_num][NSS_STATS_NODE_TX_PKTS] += node_stats_ptr->tx_packets; + nss_top->stats_node[if_num][NSS_STATS_NODE_TX_BYTES] += node_stats_ptr->tx_bytes; + + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + nss_top->stats_node[if_num][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + i] += + node_stats_ptr->rx_dropped[i]; + } + + spin_unlock_bh(&nss_mirror_stats_lock); +} + +/* + * nss_mirror_stats_reset() + * API to reset the mirror interface stats. + */ +void nss_mirror_stats_reset(uint32_t if_num) +{ + struct nss_mirror_stats_debug_instance *mirror_debug_instance = NULL; + uint8_t i; + + /* + * Reset common node stats. + */ + nss_stats_reset_common_stats(if_num); + + /* + * Reset mirror stats. + */ + spin_lock_bh(&nss_mirror_stats_lock); + for (i = 0; i < NSS_MAX_MIRROR_DYNAMIC_INTERFACES; i++) { + if (!stats_db[i] || (stats_db[i]->if_num != if_num)) { + continue; + } + + mirror_debug_instance = stats_db[i]; + stats_db[i] = NULL; + break; + } + spin_unlock_bh(&nss_mirror_stats_lock); + + if (mirror_debug_instance) { + vfree(mirror_debug_instance); + } +} + +/* + * nss_mirror_stats_init() + * API to initialize mirror debug instance statistics. + */ +int nss_mirror_stats_init(uint32_t if_num, struct net_device *netdev) +{ + struct nss_mirror_stats_debug_instance *mirror_debug_instance = NULL; + uint8_t i; + + mirror_debug_instance = + (struct nss_mirror_stats_debug_instance *)vzalloc(sizeof(struct nss_mirror_stats_debug_instance)); + if (!mirror_debug_instance) { + nss_warning("Memory alloc failed for mirror stats instance.\n"); + return -1; + } + + spin_lock_bh(&nss_mirror_stats_lock); + for (i = 0; i < NSS_MAX_MIRROR_DYNAMIC_INTERFACES; i++) { + if (stats_db[i] != NULL) { + continue; + } + + stats_db[i] = mirror_debug_instance; + stats_db[i]->if_num = if_num; + stats_db[i]->if_index = netdev->ifindex; + spin_unlock_bh(&nss_mirror_stats_lock); + return 0; + } + spin_unlock_bh(&nss_mirror_stats_lock); + vfree(mirror_debug_instance); + return -1; +} + +/* + * nss_mirror_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(mirror) + +/* + * nss_mirror_stats_dentry_create() + * Create mirror interface statistics debug entry. + */ +void nss_mirror_stats_dentry_create(void) +{ + nss_stats_create_dentry("mirror", &nss_mirror_stats_ops); +} + +/* + * nss_mirror_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_mirror_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_mirror_stats_notification mirror_stats; + int i; + + spin_lock_bh(&nss_mirror_stats_lock); + for (i = 0; i < NSS_MAX_MIRROR_DYNAMIC_INTERFACES; i++) { + if (!stats_db[i] || (stats_db[i]->if_num != if_num)) { + continue; + } + + memcpy(mirror_stats.stats_ctx, stats_db[i]->stats, sizeof(mirror_stats.stats_ctx)); + mirror_stats.core_id = nss_ctx->id; + mirror_stats.if_num = if_num; + spin_unlock_bh(&nss_mirror_stats_lock); + atomic_notifier_call_chain(&nss_mirror_stats_notifier, NSS_STATS_EVENT_NOTIFY, &mirror_stats); + return; + } + spin_unlock_bh(&nss_mirror_stats_lock); +} + +/* + * nss_mirror_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_mirror_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_mirror_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_mirror_stats_unregister_notifier); + +/* + * nss_mirror_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_mirror_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_mirror_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_mirror_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_mirror_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_stats.h new file mode 100644 index 000000000..22622a550 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_stats.h @@ -0,0 +1,44 @@ +/* + ****************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_MIRROR_STATS_H +#define __NSS_MIRROR_STATS_H + +/* + * Number of active mirror stats instances. + */ +extern atomic_t nss_mirror_num_instances; + +/* + * nss_mirror_stats_debug_instance + * Stucture for H2N/N2H mirror interface debug stats. + */ +struct nss_mirror_stats_debug_instance { + uint64_t stats[NSS_MIRROR_STATS_MAX]; /* Mirror statistics for each instance. */ + int32_t if_index; /* Mirror instance netdev index. */ + uint32_t if_num; /* Mirror instance NSS interface number */ +}; + +extern void nss_mirror_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_mirror_msg *nmm, uint16_t if_num); +extern void nss_mirror_stats_reset(uint32_t if_num); +extern int nss_mirror_stats_init(uint32_t if_num, struct net_device *netdev); +extern void nss_mirror_stats_dentry_create(void); +extern void nss_mirror_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); + +#endif /* __NSS_MIRROR_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_mirror_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_strings.c new file mode 100644 index 000000000..fb68e0461 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_strings.c @@ -0,0 +1,58 @@ +/* + *************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_mirror_strings.h" + +/* + * nss_mirror_strings_stats + * Mirror statistics strings for nss session stats. + */ +struct nss_stats_info nss_mirror_strings_stats[NSS_MIRROR_STATS_MAX] = { + {"pkts", NSS_STATS_TYPE_SPECIAL}, + {"bytes", NSS_STATS_TYPE_SPECIAL}, + {"tx_fail", NSS_STATS_TYPE_DROP}, + {"dest_lookup_fail", NSS_STATS_TYPE_DROP}, + {"mem_alloc_fail", NSS_STATS_TYPE_ERROR}, + {"copy_fail", NSS_STATS_TYPE_ERROR}, +}; + +/* + * nss_mirror_strings_read() + * Read mirror statistics names + */ +static ssize_t nss_mirror_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_mirror_strings_stats, NSS_MIRROR_STATS_MAX); +} + +/* + * nss_mirror_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(mirror); + +/* + * nss_mirror_strings_dentry_create() + * Create mirror statistics strings debug entry. + */ +void nss_mirror_strings_dentry_create(void) +{ + nss_strings_create_dentry("mirror", &nss_mirror_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_mirror_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_strings.h new file mode 100644 index 000000000..24b73f4c9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_mirror_strings.h @@ -0,0 +1,27 @@ +/* + *************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#ifndef __NSS_MIRROR_STRINGS_H +#define __NSS_MIRROR_STRINGS_H + +#include "nss_mirror_stats.h" + +extern struct nss_stats_info nss_mirror_strings_stats[NSS_MIRROR_STATS_MAX]; +extern void nss_mirror_strings_dentry_create(void); + +#endif /* __NSS_MIRROR_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_n2h.c b/feeds/ipq807x/qca-nss-drv/src/nss_n2h.c new file mode 100644 index 000000000..ea5c2d04b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_n2h.c @@ -0,0 +1,2250 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_n2h.c + * NSS N2H node APIs + */ + +#include "nss_tx_rx_common.h" +#include "nss_n2h_stats.h" +#include "nss_n2h_strings.h" +#include "nss_drv_strings.h" + +#define NSS_N2H_MAX_BUF_POOL_SIZE (1024 * 1024 * 20) /* 20MB */ +#define NSS_N2H_MIN_EMPTY_POOL_BUF_SZ 32 +#define NSS_N2H_MAX_EMPTY_POOL_BUF_SZ 131072 +#define NSS_N2H_DEFAULT_EMPTY_POOL_BUF_SZ 8192 +#define NSS_N2H_TX_TIMEOUT 3000 /* 3 Seconds */ + +int nss_n2h_empty_pool_buf_cfg[NSS_MAX_CORES] __read_mostly = {-1, -1}; +int nss_n2h_empty_paged_pool_buf_cfg[NSS_MAX_CORES] __read_mostly = {-1, -1}; +int nss_n2h_water_mark[NSS_MAX_CORES][2] __read_mostly = {{-1, -1}, {-1, -1} }; +int nss_n2h_paged_water_mark[NSS_MAX_CORES][2] __read_mostly = {{-1, -1}, {-1, -1} }; +int nss_n2h_wifi_pool_buf_cfg __read_mostly = -1; +int nss_n2h_core0_mitigation_cfg __read_mostly = 1; +int nss_n2h_core1_mitigation_cfg __read_mostly = 1; +int nss_n2h_core0_add_buf_pool_size __read_mostly; +int nss_n2h_core1_add_buf_pool_size __read_mostly; +int nss_n2h_queue_limit[NSS_MAX_CORES] __read_mostly = {NSS_DEFAULT_QUEUE_LIMIT, NSS_DEFAULT_QUEUE_LIMIT}; +int nss_n2h_host_bp_config[NSS_MAX_CORES] __read_mostly; + +struct nss_n2h_registered_data { + nss_n2h_msg_callback_t n2h_callback; + void *app_data; +}; + +static struct nss_n2h_cfg_pvt nss_n2h_nepbcfgp[NSS_MAX_CORES]; +static struct nss_n2h_registered_data nss_n2h_rd[NSS_MAX_CORES]; +static struct nss_n2h_cfg_pvt nss_n2h_rcp; +static struct nss_n2h_cfg_pvt nss_n2h_mitigationcp[NSS_CORE_MAX]; +static struct nss_n2h_cfg_pvt nss_n2h_bufcp[NSS_CORE_MAX]; +static struct nss_n2h_cfg_pvt nss_n2h_wp; +static struct nss_n2h_cfg_pvt nss_n2h_q_cfg_pvt; +static struct nss_n2h_cfg_pvt nss_n2h_q_lim_pvt; +static struct nss_n2h_cfg_pvt nss_n2h_host_bp_cfg_pvt; + +/* + * nss_n2h_interface_handler() + * Handle NSS -> HLOS messages for N2H node + */ +static void nss_n2h_interface_handler(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, + void *app_data) +{ + struct nss_n2h_msg *nnm = (struct nss_n2h_msg *)ncm; + nss_n2h_msg_callback_t cb; + + BUG_ON(ncm->interface != NSS_N2H_INTERFACE); + + /* + * Is this a valid request/response packet? + */ + if (nnm->cm.type >= NSS_METADATA_TYPE_N2H_MAX) { + nss_warning("%px: received invalid message %d for Offload stats interface", nss_ctx, nnm->cm.type); + return; + } + + switch (nnm->cm.type) { + case NSS_TX_METADATA_TYPE_N2H_RPS_CFG: + nss_info("NSS N2H rps_en %d \n",nnm->msg.rps_cfg.enable); + break; + + case NSS_TX_METADATA_TYPE_N2H_MITIGATION_CFG: + nss_info("NSS N2H mitigation_dis %d \n",nnm->msg.mitigation_cfg.enable); + break; + + case NSS_TX_METADATA_TYPE_N2H_EMPTY_POOL_BUF_CFG: + nss_info("%px: empty pool buf cfg response from FW", nss_ctx); + break; + + case NSS_TX_METADATA_TYPE_N2H_FLUSH_PAYLOADS: + nss_info("%px: flush payloads cmd response from FW", nss_ctx); + break; + + case NSS_RX_METADATA_TYPE_N2H_STATS_SYNC: + /* + * Update driver statistics and send statistics notifications to the registered modules. + */ + nss_n2h_stats_sync(nss_ctx, &nnm->msg.stats_sync); + nss_n2h_stats_notify(nss_ctx); + break; + + default: + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + /* + * Check response + */ + nss_info("%px: Received response %d for type %d, interface %d", + nss_ctx, ncm->response, ncm->type, ncm->interface); + } + } + + /* + * Update the callback and app_data for NOTIFY messages, n2h sends all notify messages + * to the same callback/app_data. + */ + if (nnm->cm.response == NSS_CMN_RESPONSE_NOTIFY) { + /* + * Place holder for the user to create right call + * back and app data when response is NSS_CMN_RESPONSE_NOTIFY + */ + ncm->cb = (nss_ptr_t)nss_n2h_rd[nss_ctx->id].n2h_callback; + ncm->app_data = (nss_ptr_t)nss_n2h_rd[nss_ctx->id].app_data; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * Callback + */ + cb = (nss_n2h_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, nnm); +} + +/* + * nss_n2h_mitigation_cfg_callback() + * call back function for mitigation configuration + */ +static void nss_n2h_mitigation_cfg_callback(void *app_data, struct nss_n2h_msg *nnm) +{ + uint32_t core_num = (uint32_t)(nss_ptr_t)app_data; + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[core_num]; + + if (nnm->cm.response != NSS_CMN_RESPONSE_ACK) { + + /* + * Error, hence we are not updating the nss_n2h_mitigate_en + */ + nss_n2h_mitigationcp[core_num].response = NSS_FAILURE; + complete(&nss_n2h_mitigationcp[core_num].complete); + nss_warning("core%d: MITIGATION configuration failed : %d\n", core_num, nnm->cm.error); + return; + } + + nss_info("core%d: MITIGATION configuration succeeded: %d\n", core_num, nnm->cm.error); + + nss_ctx->n2h_mitigate_en = nnm->msg.mitigation_cfg.enable; + nss_n2h_mitigationcp[core_num].response = NSS_SUCCESS; + complete(&nss_n2h_mitigationcp[core_num].complete); +} + +/* + * nss_n2h_buf_cfg_callback() + * call back function for pbuf configuration + */ +static void nss_n2h_bufs_cfg_callback(void *app_data, struct nss_n2h_msg *nnm) +{ + uint32_t core_num = (uint32_t)(nss_ptr_t)app_data; + unsigned int allocated_sz; + + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[core_num]; + + if (nnm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_n2h_bufcp[core_num].response = NSS_FAILURE; + nss_warning("core%d: buf configuration failed : %d\n", core_num, nnm->cm.error); + goto done; + } + + nss_info("core%d: buf configuration succeeded: %d\n", core_num, nnm->cm.error); + + allocated_sz = nnm->msg.buf_pool.nss_buf_page_size * nnm->msg.buf_pool.nss_buf_num_pages; + nss_ctx->buf_sz_allocated += allocated_sz; + + nss_n2h_bufcp[core_num].response = NSS_SUCCESS; + +done: + complete(&nss_n2h_bufcp[core_num].complete); +} + +/* + * nss_n2h_payload_stats_callback() + * It gets called response to payload accounting. + */ +static void nss_n2h_payload_stats_callback(void *app_data, + struct nss_n2h_msg *nnm) +{ + uint32_t core_num = (uint32_t)(nss_ptr_t)app_data; + + if (nnm->cm.response != NSS_CMN_RESPONSE_ACK) { + struct nss_n2h_empty_pool_buf *nnepbcm; + nnepbcm = &nnm->msg.empty_pool_buf_cfg; + + nss_warning("%d: core empty pool buf set failure: %d\n", + core_num, nnm->cm.error); + nss_n2h_nepbcfgp[core_num].response = NSS_FAILURE; + complete(&nss_n2h_nepbcfgp[core_num].complete); + return; + } + + if (nnm->cm.type == NSS_TX_METADATA_TYPE_GET_WATER_MARK) { + nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.pool_size = + ntohl(nnm->msg.payload_info.pool_size); + nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.low_water = + ntohl(nnm->msg.payload_info.low_water); + nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.high_water = + ntohl(nnm->msg.payload_info.high_water); + } + + if (nnm->cm.type == NSS_TX_METADATA_TYPE_GET_PAGED_WATER_MARK) { + nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.pool_size = + ntohl(nnm->msg.paged_payload_info.pool_size); + nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.low_water = + ntohl(nnm->msg.paged_payload_info.low_water); + nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.high_water = + ntohl(nnm->msg.paged_payload_info.high_water); + } + + nss_n2h_nepbcfgp[core_num].response = NSS_SUCCESS; + complete(&nss_n2h_nepbcfgp[core_num].complete); +} + +/* + * nss_n2h_set_wifi_payloads_callback() + * call back function for response to wifi pool configuration + * + */ +static void nss_n2h_set_wifi_payloads_callback(void *app_data, + struct nss_n2h_msg *nnm) +{ + struct nss_ctx_instance *nss_ctx __maybe_unused = (struct nss_ctx_instance *)app_data; + if (nnm->cm.response != NSS_CMN_RESPONSE_ACK) { + + nss_n2h_wp.response = NSS_FAILURE; + complete(&nss_n2h_wp.complete); + nss_warning("%px: wifi pool configuration failed : %d\n", nss_ctx, + nnm->cm.error); + return; + } + + nss_info("%px: wifi payload configuration succeeded: %d\n", nss_ctx, + nnm->cm.error); + nss_n2h_wp.response = NSS_SUCCESS; + nss_n2h_wp.wifi_pool = ntohl(nnm->msg.wp.payloads); + complete(&nss_n2h_wp.complete); +} + +/* + * nss_n2h_get_payload_info() + * Gets Payload information. + */ +static int nss_n2h_get_payload_info(nss_ptr_t core_num, struct nss_n2h_msg *nnm, struct nss_n2h_payload_info *nnepbcm) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[core_num]; + nss_tx_status_t nss_tx_status; + int ret = NSS_FAILURE; + + /* + * Note that semaphore should be already held. + */ + + nss_tx_status = nss_n2h_tx_msg(nss_ctx, nnm); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: core %d nss_tx error errorn", nss_ctx, (int)core_num); + return NSS_FAILURE; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_n2h_nepbcfgp[core_num].complete, + msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: core %d waiting for ack timed out\n", nss_ctx, (int)core_num); + return NSS_FAILURE; + } + + if (NSS_FAILURE == nss_n2h_nepbcfgp[core_num].response) { + nss_warning("%px: core %d response returned failure\n", nss_ctx, (int)core_num); + return NSS_FAILURE; + } + + return NSS_SUCCESS; +} + +/* + * nss_n2h_get_default_payload_info() + * Gets the default payload information. + */ +static int nss_n2h_get_default_payload_info(nss_ptr_t core_num) +{ + struct nss_n2h_msg nnm; + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_GET_WATER_MARK, + sizeof(struct nss_n2h_payload_info), + nss_n2h_payload_stats_callback, + (void *)core_num); + + return nss_n2h_get_payload_info(core_num, &nnm, + &nnm.msg.payload_info); +} + +/* + * nss_n2h_get_paged_payload_info() + * Gets the paged payload information. + */ +static int nss_n2h_get_paged_payload_info(nss_ptr_t core_num) +{ + struct nss_n2h_msg nnm; + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_GET_PAGED_WATER_MARK, + sizeof(struct nss_n2h_payload_info), + nss_n2h_payload_stats_callback, + (void *)core_num); + + return nss_n2h_get_payload_info(core_num, &nnm, + &nnm.msg.paged_payload_info); +} + +/* + * nss_n2h_set_empty_buf_pool() + * Sets empty pool buffer + */ +static int nss_n2h_set_empty_buf_pool(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos, + nss_ptr_t core_num, int *new_val) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[core_num]; + struct nss_n2h_msg nnm; + struct nss_n2h_empty_pool_buf *nnepbcm; + nss_tx_status_t nss_tx_status; + int ret = NSS_FAILURE; + + /* + * Acquiring semaphore + */ + down(&nss_n2h_nepbcfgp[core_num].sem); + + /* + * Take snap shot of current value + */ + nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.pool_size = *new_val; + + if (!write) { + ret = nss_n2h_get_default_payload_info(core_num); + *new_val = nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.pool_size; + if (ret == NSS_FAILURE) { + up(&nss_n2h_nepbcfgp[core_num].sem); + return -EBUSY; + } + + up(&nss_n2h_nepbcfgp[core_num].sem); + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + return ret; + } + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + up(&nss_n2h_nepbcfgp[core_num].sem); + return ret; + } + + if ((*new_val < NSS_N2H_MIN_EMPTY_POOL_BUF_SZ)) { + nss_warning("%px: core %d setting %d < min number of buffer", + nss_ctx, (int)core_num, *new_val); + goto failure; + } + + nss_info("%px: core %d number of empty pool buffer is : %d\n", + nss_ctx, (int)core_num, *new_val); + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_N2H_EMPTY_POOL_BUF_CFG, + sizeof(struct nss_n2h_empty_pool_buf), + nss_n2h_payload_stats_callback, + (nss_ptr_t *)core_num); + + nnepbcm = &nnm.msg.empty_pool_buf_cfg; + nnepbcm->pool_size = htonl(*new_val); + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: core %d nss_tx error empty pool buffer: %d\n", + nss_ctx, (int)core_num, *new_val); + goto failure; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_n2h_nepbcfgp[core_num].complete, + msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: core %d Waiting for ack timed out\n", nss_ctx, (int)core_num); + goto failure; + } + + /* + * ACK/NACK received from NSS FW + * If ACK: Callback function will update nss_n2h_empty_pool_buf with + * nss_n2h_nepbcfgp.num_conn_valid, which holds the user input + */ + if (NSS_FAILURE == nss_n2h_nepbcfgp[core_num].response) { + goto failure; + } + + up(&nss_n2h_nepbcfgp[core_num].sem); + return 0; + +failure: + /* + * Restore the current_value to its previous state + */ + *new_val = nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.pool_size; + up(&nss_n2h_nepbcfgp[core_num].sem); + return NSS_FAILURE; +} + +/* + * nss_n2h_set_empty_paged_pool_buf() + * Sets empty paged pool buffer + */ +static int nss_n2h_set_empty_paged_pool_buf(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos, + nss_ptr_t core_num, int *new_val) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[core_num]; + struct nss_n2h_msg nnm; + struct nss_n2h_empty_pool_buf *nneppbcm; + nss_tx_status_t nss_tx_status; + int ret = NSS_FAILURE; + + /* + * Acquiring semaphore + */ + down(&nss_n2h_nepbcfgp[core_num].sem); + + /* + * Take snap shot of current value + */ + nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.pool_size = *new_val; + + if (!write) { + ret = nss_n2h_get_paged_payload_info(core_num); + *new_val = nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.pool_size; + if (ret == NSS_FAILURE) { + up(&nss_n2h_nepbcfgp[core_num].sem); + return -EBUSY; + } + + up(&nss_n2h_nepbcfgp[core_num].sem); + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + return ret; + } + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + up(&nss_n2h_nepbcfgp[core_num].sem); + return ret; + } + + if ((*new_val < NSS_N2H_MIN_EMPTY_POOL_BUF_SZ)) { + nss_warning("%px: core %d setting %d < min number of buffer", + nss_ctx, (int)core_num, *new_val); + goto failure; + } + + nss_info("%px: core %d number of empty paged pool buffer is : %d\n", + nss_ctx, (int)core_num, *new_val); + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_N2H_EMPTY_PAGED_POOL_BUF_CFG, + sizeof(struct nss_n2h_empty_pool_buf), + nss_n2h_payload_stats_callback, + (nss_ptr_t *)core_num); + + nneppbcm = &nnm.msg.empty_pool_buf_cfg; + nneppbcm->pool_size = htonl(*new_val); + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: core %d nss_tx error empty paged pool buffer: %d\n", + nss_ctx, (int)core_num, *new_val); + goto failure; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_n2h_nepbcfgp[core_num].complete, + msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: core %d Waiting for ack timed out\n", nss_ctx, (int)core_num); + goto failure; + } + + /* + * ACK/NACK received from NSS FW + * If ACK: Callback function will update nss_n2h_empty_pool_buf with + * nss_n2h_nepbcfgp.num_conn_valid, which holds the user input + */ + if (NSS_FAILURE == nss_n2h_nepbcfgp[core_num].response) { + goto failure; + } + + up(&nss_n2h_nepbcfgp[core_num].sem); + return 0; + +failure: + /* + * Restore the current_value to its previous state + */ + *new_val = nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.pool_size; + up(&nss_n2h_nepbcfgp[core_num].sem); + return NSS_FAILURE; +} + +/* + * nss_n2h_set_water_mark() + * Sets water mark for N2H SOS + */ +static int nss_n2h_set_water_mark(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos, + uint32_t core_num, int *low, int *high) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[core_num]; + struct nss_n2h_msg nnm; + struct nss_n2h_water_mark *wm; + nss_tx_status_t nss_tx_status; + int ret = NSS_FAILURE; + + /* + * Acquiring semaphore + */ + down(&nss_n2h_nepbcfgp[core_num].sem); + + /* + * Take snap shot of current value + */ + nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.low_water = *low; + nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.high_water = *high; + + if (!write || *low == -1 || *high == -1) { + ret = nss_n2h_get_default_payload_info(core_num); + if (ret == NSS_FAILURE) { + up(&nss_n2h_nepbcfgp[core_num].sem); + return -EBUSY; + } + + *low = nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.low_water; + *high = nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.high_water; + } + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (!write || ret) { + up(&nss_n2h_nepbcfgp[core_num].sem); + return ret; + } + + if ((*low < NSS_N2H_MIN_EMPTY_POOL_BUF_SZ) || + (*high < NSS_N2H_MIN_EMPTY_POOL_BUF_SZ)) { + nss_warning("%px: core %d setting %d, %d < min number of buffer", + nss_ctx, core_num, *low, *high); + goto failure; + } + + if ((*low > NSS_N2H_MAX_EMPTY_POOL_BUF_SZ) || + (*high > NSS_N2H_MAX_EMPTY_POOL_BUF_SZ)) { + nss_warning("%px: core %d setting %d, %d is > upper limit", + nss_ctx, core_num, *low, *high); + goto failure; + } + + if (*low > *high) { + nss_warning("%px: core %d setting low %d is more than high %d", + nss_ctx, core_num, *low, *high); + goto failure; + } + + nss_info("%px: core %d number of low : %d and high : %d\n", + nss_ctx, core_num, *low, *high); + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_SET_WATER_MARK, + sizeof(struct nss_n2h_water_mark), + nss_n2h_payload_stats_callback, + (void *)(nss_ptr_t)core_num); + + wm = &nnm.msg.wm; + wm->low_water = htonl(*low); + wm->high_water = htonl(*high); + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: core %d nss_tx error setting : %d, %d\n", + nss_ctx, core_num, *low, *high); + goto failure; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_n2h_nepbcfgp[core_num].complete, + msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: core %d Waiting for ack timed out\n", nss_ctx, + core_num); + goto failure; + } + + /* + * ACK/NACK received from NSS FW + */ + if (NSS_FAILURE == nss_n2h_nepbcfgp[core_num].response) + goto failure; + + up(&nss_n2h_nepbcfgp[core_num].sem); + return NSS_SUCCESS; + +failure: + /* + * Restore the current_value to its previous state + */ + *low = nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.low_water; + *high = nss_n2h_nepbcfgp[core_num].empty_buf_pool_info.high_water; + up(&nss_n2h_nepbcfgp[core_num].sem); + return -EINVAL; +} + +/* + * nss_n2h_set_paged_water_mark() + * Sets water mark for paged pool N2H SOS + */ +static int nss_n2h_set_paged_water_mark(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos, + uint32_t core_num, int *low, int *high) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[core_num]; + struct nss_n2h_msg nnm; + struct nss_n2h_water_mark *pwm; + nss_tx_status_t nss_tx_status; + int ret = NSS_FAILURE; + + /* + * Acquiring semaphore + */ + down(&nss_n2h_nepbcfgp[core_num].sem); + + /* + * Take snap shot of current value + */ + nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.low_water = *low; + nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.high_water = *high; + + if (!write || *low == -1 || *high == -1) { + ret = nss_n2h_get_paged_payload_info(core_num); + if (ret == NSS_FAILURE) { + up(&nss_n2h_nepbcfgp[core_num].sem); + return -EBUSY; + } + + *low = nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.low_water; + *high = nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.high_water; + } + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (!write || ret) { + up(&nss_n2h_nepbcfgp[core_num].sem); + return ret; + } + + if ((*low < NSS_N2H_MIN_EMPTY_POOL_BUF_SZ) || + (*high < NSS_N2H_MIN_EMPTY_POOL_BUF_SZ)) { + nss_warning("%px: core %d setting %d, %d < min number of buffer", + nss_ctx, core_num, *low, *high); + goto failure; + } + + if ((*low > NSS_N2H_MAX_EMPTY_POOL_BUF_SZ) || + (*high > NSS_N2H_MAX_EMPTY_POOL_BUF_SZ)) { + nss_warning("%px: core %d setting %d, %d is > upper limit", + nss_ctx, core_num, *low, *high); + goto failure; + } + + if (*low > *high) { + nss_warning("%px: core %d setting low %d is more than high %d", + nss_ctx, core_num, *low, *high); + goto failure; + } + + nss_info("%px: core %d number of low : %d and high : %d\n", + nss_ctx, core_num, *low, *high); + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_SET_PAGED_WATER_MARK, + sizeof(struct nss_n2h_water_mark), + nss_n2h_payload_stats_callback, + (void *)(nss_ptr_t)core_num); + + pwm = &nnm.msg.wm_paged; + pwm->low_water = htonl(*low); + pwm->high_water = htonl(*high); + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: core %d nss_tx error setting : %d, %d\n", + nss_ctx, core_num, *low, *high); + goto failure; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_n2h_nepbcfgp[core_num].complete, + msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: core %d Waiting for ack timed out\n", nss_ctx, + core_num); + goto failure; + } + + /* + * ACK/NACK received from NSS FW + */ + if (NSS_FAILURE == nss_n2h_nepbcfgp[core_num].response) + goto failure; + + up(&nss_n2h_nepbcfgp[core_num].sem); + return NSS_SUCCESS; + +failure: + /* + * Restore the current_value to its previous state + */ + *low = nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.low_water; + *high = nss_n2h_nepbcfgp[core_num].empty_paged_buf_pool_info.high_water; + up(&nss_n2h_nepbcfgp[core_num].sem); + return -EINVAL; +} + +/* + * nss_n2h_cfg_wifi_pool() + * Sets number of wifi payloads to adjust high water mark for N2H SoS + */ +static int nss_n2h_cfg_wifi_pool(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos, + int *payloads) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[0]; + struct nss_n2h_msg nnm; + struct nss_n2h_wifi_payloads *wp; + nss_tx_status_t nss_tx_status; + int ret = NSS_FAILURE; + + /* + * Acquiring semaphore + */ + down(&nss_n2h_wp.sem); + + if (!write) { + *payloads = nss_n2h_wp.wifi_pool; + + up(&nss_n2h_wp.sem); + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + return ret; + } + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + up(&nss_n2h_wp.sem); + return ret; + } + + /* + * If payloads parameter is not set, we do + * nothing. + */ + if (*payloads == -1) + goto failure; + + if ((*payloads < NSS_N2H_MIN_EMPTY_POOL_BUF_SZ)) { + nss_warning("%px: wifi setting %d < min number of buffer", + nss_ctx, *payloads); + goto failure; + } + + if ((*payloads > NSS_N2H_MAX_EMPTY_POOL_BUF_SZ)) { + nss_warning("%px: wifi setting %d > max number of buffer", + nss_ctx, *payloads); + goto failure; + } + + nss_info("%px: wifi payloads : %d\n", + nss_ctx, *payloads); + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_N2H_WIFI_POOL_BUF_CFG, + sizeof(struct nss_n2h_wifi_payloads), + nss_n2h_set_wifi_payloads_callback, + (void *)nss_ctx); + + wp = &nnm.msg.wp; + wp->payloads = htonl(*payloads); + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: wifi setting %d nss_tx error", + nss_ctx, *payloads); + goto failure; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_n2h_wp.complete, + msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: Waiting for ack timed out\n", nss_ctx); + goto failure; + } + + /* + * ACK/NACK received from NSS FW + */ + if (NSS_FAILURE == nss_n2h_wp.response) + goto failure; + + up(&nss_n2h_wp.sem); + return NSS_SUCCESS; + +failure: + up(&nss_n2h_wp.sem); + return -EINVAL; +} + +/* + * nss_n2h_empty_pool_buf_core1_handler() + * Sets the number of empty buffer for core 1 + */ +static int nss_n2h_empty_pool_buf_cfg_core1_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_set_empty_buf_pool(ctl, write, buffer, lenp, ppos, + NSS_CORE_1, &nss_n2h_empty_pool_buf_cfg[NSS_CORE_1]); +} + +/* + * nss_n2h_empty_pool_buf_core0_handler() + * Sets the number of empty buffer for core 0 + */ +static int nss_n2h_empty_pool_buf_cfg_core0_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_set_empty_buf_pool(ctl, write, buffer, lenp, ppos, + NSS_CORE_0, &nss_n2h_empty_pool_buf_cfg[NSS_CORE_0]); +} + +/* + * nss_n2h_empty_paged_pool_buf_cfg_core1_handler() + * Sets the number of empty paged buffer for core 1 + */ +static int nss_n2h_empty_paged_pool_buf_cfg_core1_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_set_empty_paged_pool_buf(ctl, write, buffer, lenp, ppos, + NSS_CORE_1, &nss_n2h_empty_paged_pool_buf_cfg[NSS_CORE_1]); +} + +/* + * nss_n2h_empty_paged_pool_buf_cfg_core0_handler() + * Sets the number of empty paged buffer for core 0 + */ +static int nss_n2h_empty_paged_pool_buf_cfg_core0_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_set_empty_paged_pool_buf(ctl, write, buffer, lenp, ppos, + NSS_CORE_0, &nss_n2h_empty_paged_pool_buf_cfg[NSS_CORE_0]); +} + +/* + * nss_n2h_water_mark_core1_handler() + * Sets water mark for core 1 + */ +static int nss_n2h_water_mark_core1_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_set_water_mark(ctl, write, buffer, lenp, ppos, + NSS_CORE_1, &nss_n2h_water_mark[NSS_CORE_1][0], + &nss_n2h_water_mark[NSS_CORE_1][1]); +} + +/* + * nss_n2h_water_mark_core0_handler() + * Sets water mark for core 0 + */ +static int nss_n2h_water_mark_core0_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_set_water_mark(ctl, write, buffer, lenp, ppos, + NSS_CORE_0, &nss_n2h_water_mark[NSS_CORE_0][0], + &nss_n2h_water_mark[NSS_CORE_0][1]); +} + +/* + * nss_n2h_paged_water_mark_core1_handler() + * Sets paged water mark for core 1 + */ +static int nss_n2h_paged_water_mark_core1_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_set_paged_water_mark(ctl, write, buffer, lenp, ppos, + NSS_CORE_1, &nss_n2h_paged_water_mark[NSS_CORE_1][0], + &nss_n2h_paged_water_mark[NSS_CORE_1][1]); +} + +/* + * nss_n2h_paged_water_mark_core0_handler() + * Sets paged water mark for core 0 + */ +static int nss_n2h_paged_water_mark_core0_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_set_paged_water_mark(ctl, write, buffer, lenp, ppos, + NSS_CORE_0, &nss_n2h_paged_water_mark[NSS_CORE_0][0], + &nss_n2h_paged_water_mark[NSS_CORE_0][1]); +} + +/* + * nss_n2h_wifi_payloads_handler() + * Sets number of wifi payloads + */ +static int nss_n2h_wifi_payloads_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_cfg_wifi_pool(ctl, write, buffer, lenp, ppos, + &nss_n2h_wifi_pool_buf_cfg); +} + +/* + * nss_n2h_update_queue_config_callback() + * Callback to handle the completion of queue config command + */ +static void nss_n2h_update_queue_config_callback(void *app_data, struct nss_n2h_msg *nim) +{ + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("n2h Error response %d\n", nim->cm.response); + nss_n2h_q_cfg_pvt.response = NSS_TX_FAILURE; + } else { + nss_n2h_q_cfg_pvt.response = NSS_TX_SUCCESS; + } + + complete(&nss_n2h_q_cfg_pvt.complete); +} + +/* + * nss_n2h_update_queue_config_async() + * Asynchronous call to send pnode queue configuration. + */ +nss_tx_status_t nss_n2h_update_queue_config_async(struct nss_ctx_instance *nss_ctx, bool mq_en, uint16_t *qlimits) +{ + + struct nss_n2h_msg nnm; + struct nss_n2h_pnode_queue_config *cfg; + nss_tx_status_t status; + int i; + + if (!mq_en) { + return NSS_TX_SUCCESS; + } + + /* + * MQ mode doesnot make any sense if number of priority queues in NSS + * is 1 + */ + if (NSS_MAX_NUM_PRI <= 1) { + return NSS_TX_SUCCESS; + } + + memset(&nnm, 0, sizeof(struct nss_n2h_msg)); + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_N2H_SET_PNODE_QUEUE_CFG, + sizeof(struct nss_n2h_pnode_queue_config), NULL, 0); + + cfg = &nnm.msg.pn_q_cfg; + + /* + * Update limits + */ + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + cfg->qlimits[i] = qlimits[i]; + } + cfg->mq_en = true; + + status = nss_n2h_tx_msg(nss_ctx, &nnm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_tx error to send pnode queue config\n", nss_ctx); + return status; + } + + return NSS_TX_SUCCESS; +} +EXPORT_SYMBOL(nss_n2h_update_queue_config_async); + +/* + * nss_n2h_update_queue_config_sync() + * Synchronous call to send pnode queue configuration. + */ +nss_tx_status_t nss_n2h_update_queue_config_sync(struct nss_ctx_instance *nss_ctx, bool mq_en, uint16_t *qlimits) +{ + + struct nss_n2h_msg nnm; + struct nss_n2h_pnode_queue_config *cfg; + nss_tx_status_t status; + int ret, i; + + if (!mq_en) { + return NSS_TX_SUCCESS; + } + + /* + * MQ mode doesnot make any sense if number of priority queues in NSS + * is 1 + */ + if (NSS_MAX_NUM_PRI <= 1) { + return NSS_TX_SUCCESS; + } + + memset(&nnm, 0, sizeof(struct nss_n2h_msg)); + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_N2H_SET_PNODE_QUEUE_CFG, + sizeof(struct nss_n2h_pnode_queue_config), nss_n2h_update_queue_config_callback, 0); + + cfg = &nnm.msg.pn_q_cfg; + + /* + * Update limits + */ + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + cfg->qlimits[i] = qlimits[i]; + } + cfg->mq_en = true; + + down(&nss_n2h_q_cfg_pvt.sem); + + status = nss_n2h_tx_msg(nss_ctx, &nnm); + + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: n2h_tx_msg failed\n", nss_ctx); + up(&nss_n2h_q_cfg_pvt.sem); + return status; + } + ret = wait_for_completion_timeout(&nss_n2h_q_cfg_pvt.complete, msecs_to_jiffies(NSS_N2H_TX_TIMEOUT)); + + if (!ret) { + nss_warning("%px: Timeout expired for pnode queue config sync message\n", nss_ctx); + nss_n2h_q_cfg_pvt.response = NSS_TX_FAILURE; + } + + status = nss_n2h_q_cfg_pvt.response; + up(&nss_n2h_q_cfg_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_n2h_update_queue_config_sync); + +/* + * nss_n2h_mitigation_cfg() + * Send Message to NSS to disable MITIGATION. + */ +static nss_tx_status_t nss_n2h_mitigation_cfg(struct nss_ctx_instance *nss_ctx, int enable_mitigation, nss_core_id_t core_num) +{ + struct nss_n2h_msg nnm; + struct nss_n2h_mitigation *mitigation_cfg; + nss_tx_status_t nss_tx_status; + int ret; + + nss_assert(core_num < NSS_CORE_MAX); + + down(&nss_n2h_mitigationcp[core_num].sem); + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, NSS_TX_METADATA_TYPE_N2H_MITIGATION_CFG, + sizeof(struct nss_n2h_mitigation), + nss_n2h_mitigation_cfg_callback, + (void *)core_num); + + mitigation_cfg = &nnm.msg.mitigation_cfg; + mitigation_cfg->enable = enable_mitigation; + + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_tx error setting mitigation\n", nss_ctx); + goto failure; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_n2h_mitigationcp[core_num].complete, msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: Waiting for ack timed out\n", nss_ctx); + goto failure; + } + + /* + * ACK/NACK received from NSS FW + */ + if (NSS_FAILURE == nss_n2h_mitigationcp[core_num].response) { + goto failure; + } + + up(&nss_n2h_mitigationcp[core_num].sem); + return NSS_SUCCESS; + +failure: + up(&nss_n2h_mitigationcp[core_num].sem); + return NSS_FAILURE; +} + +static inline void nss_n2h_buf_pool_free(struct nss_n2h_buf_pool *buf_pool) +{ + int page_count; + for (page_count = 0; page_count < buf_pool->nss_buf_num_pages; page_count++) { + kfree((void *)buf_pool->nss_buf_pool_vaddr[page_count]); + } +} + +/* + * nss_n2h_buf_cfg() + * Send Message to NSS to enable pbufs. + */ +static nss_tx_status_t nss_n2h_buf_pool_cfg(struct nss_ctx_instance *nss_ctx, + int buf_pool_size, nss_core_id_t core_num) +{ + static struct nss_n2h_msg nnm; + struct nss_n2h_buf_pool *buf_pool; + nss_tx_status_t nss_tx_status; + int ret; + int page_count; + int num_pages = ALIGN(buf_pool_size, PAGE_SIZE)/PAGE_SIZE; + + nss_assert(core_num < NSS_CORE_MAX); + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, NSS_METADATA_TYPE_N2H_ADD_BUF_POOL, + sizeof(struct nss_n2h_buf_pool), + nss_n2h_bufs_cfg_callback, + (void *)core_num); + + do { + + down(&nss_n2h_bufcp[core_num].sem); + + buf_pool = &nnm.msg.buf_pool; + buf_pool->nss_buf_page_size = PAGE_SIZE; + + for (page_count = 0; page_count < MAX_PAGES_PER_MSG && num_pages; page_count++, num_pages--) { + void *kern_addr = kzalloc(PAGE_SIZE, GFP_ATOMIC); + if (!kern_addr) { + BUG_ON(!page_count); + break; + } + + kmemleak_not_leak(kern_addr); + buf_pool->nss_buf_pool_vaddr[page_count] = (nss_ptr_t)kern_addr; + buf_pool->nss_buf_pool_addr[page_count] = dma_map_single(nss_ctx->dev, kern_addr, PAGE_SIZE, DMA_TO_DEVICE); + } + + buf_pool->nss_buf_num_pages = page_count; + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + if (nss_tx_status != NSS_TX_SUCCESS) { + + nss_n2h_buf_pool_free(buf_pool); + nss_warning("%px: nss_tx error setting pbuf\n", nss_ctx); + goto failure; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_n2h_bufcp[core_num].complete, msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: Waiting for ack timed out\n", nss_ctx); + goto failure; + } + + /* + * ACK/NACK received from NSS FW + */ + if (NSS_FAILURE == nss_n2h_bufcp[core_num].response) { + + nss_n2h_buf_pool_free(buf_pool); + goto failure; + } + + up(&nss_n2h_bufcp[core_num].sem); + } while(num_pages); + + return NSS_SUCCESS; +failure: + up(&nss_n2h_bufcp[core_num].sem); + return NSS_FAILURE; +} + +/* + * nss_mitigation_handler() + * Enable NSS MITIGATION + */ +static int nss_n2h_mitigationcfg_core0_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[NSS_CORE_0]; + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + return ret; + } + + /* + * It's a read operation + */ + if (!write) { + return ret; + } + + if (!nss_n2h_core0_mitigation_cfg) { + printk(KERN_INFO "Disabling NSS MITIGATION\n"); + nss_n2h_mitigation_cfg(nss_ctx, 0, NSS_CORE_0); + return 0; + } + printk(KERN_INFO "Invalid input value.Valid value is 0, Runtime re-enabling not supported\n"); + return -EINVAL; +} + +/* + * nss_mitigation_handler() + * Enable NSS MITIGATION + */ +static int nss_n2h_mitigationcfg_core1_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[NSS_CORE_1]; + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + return ret; + } + + /* + * It's a read operation + */ + if (!write) { + return ret; + } + + if (!nss_n2h_core1_mitigation_cfg) { + printk(KERN_INFO "Disabling NSS MITIGATION\n"); + nss_n2h_mitigation_cfg(nss_ctx, 0, NSS_CORE_1); + return 0; + } + printk(KERN_INFO "Invalid input value.Valid value is 0, Runtime re-enabling not supported\n"); + return -EINVAL; +} + +/* + * nss_buf_handler() + * Add extra NSS bufs from host memory + */ +static int nss_n2h_buf_cfg_core0_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[NSS_CORE_0]; + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + return ret; + } + + /* + * It's a read operation + */ + if (!write) { + return ret; + } + + if (nss_ctx->buf_sz_allocated) { + nss_n2h_core0_add_buf_pool_size = nss_ctx->buf_sz_allocated; + return -EPERM; + } + + if ((nss_n2h_core0_add_buf_pool_size >= 1) && (nss_n2h_core0_add_buf_pool_size <= NSS_N2H_MAX_BUF_POOL_SIZE)) { + printk(KERN_INFO "configuring additional NSS pbufs\n"); + ret = nss_n2h_buf_pool_cfg(nss_ctx, nss_n2h_core0_add_buf_pool_size, NSS_CORE_0); + nss_n2h_core0_add_buf_pool_size = nss_ctx->buf_sz_allocated; + printk(KERN_INFO "additional pbufs of size %d got added to NSS\n", nss_ctx->buf_sz_allocated); + return ret; + } + + printk(KERN_INFO "Invalid input value. should be greater than 1 and less than %d\n", NSS_N2H_MAX_BUF_POOL_SIZE); + return -EINVAL; +} + +/* + * nss_n2h_buf_handler() + * Add extra NSS bufs from host memory + */ +static int nss_n2h_buf_cfg_core1_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[NSS_CORE_1]; + int ret; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret) { + return ret; + } + + /* + * It's a read operation + */ + if (!write) { + return ret; + } + + if (nss_ctx->buf_sz_allocated) { + nss_n2h_core1_add_buf_pool_size = nss_ctx->buf_sz_allocated; + return -EPERM; + } + + if ((nss_n2h_core1_add_buf_pool_size >= 1) && (nss_n2h_core1_add_buf_pool_size <= NSS_N2H_MAX_BUF_POOL_SIZE)) { + printk(KERN_INFO "configuring additional NSS pbufs\n"); + ret = nss_n2h_buf_pool_cfg(nss_ctx, nss_n2h_core1_add_buf_pool_size, NSS_CORE_1); + nss_n2h_core1_add_buf_pool_size = nss_ctx->buf_sz_allocated; + printk(KERN_INFO "additional pbufs of size %d got added to NSS\n", nss_ctx->buf_sz_allocated); + return ret; + } + + printk(KERN_INFO "Invalid input value. should be greater than 1 and less than %d\n", NSS_N2H_MAX_BUF_POOL_SIZE); + return -EINVAL; +} + +/* + * nss_n2h_queue_limit_callback() + * Callback to handle the completion of queue limit command. + */ +static void nss_n2h_queue_limit_callback(void *app_data, struct nss_n2h_msg *nim) +{ + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("n2h error response %d\n", nim->cm.response); + } + + nss_n2h_q_lim_pvt.response = nim->cm.response; + complete(&nss_n2h_q_lim_pvt.complete); +} + +/* + * nss_n2h_set_queue_limit_sync() + * Sets the n2h queue size limit synchronously. + */ +static int nss_n2h_set_queue_limit_sync(struct ctl_table *ctl, int write, void __user *buffer, + size_t *lenp, loff_t *ppos, uint32_t core_id) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[core_id]; + struct nss_n2h_msg nim; + struct nss_n2h_queue_limit_config *nnqlc = NULL; + int ret, current_val; + nss_tx_status_t nss_tx_status; + + /* + * Take a snap shot of current value + */ + current_val = nss_n2h_queue_limit[core_id]; + + /* + * Write the variable with user input + */ + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret || (!write)) { + return ret; + } + + /* + * We dont allow shortening of the queue size at run-time + */ + if (nss_n2h_queue_limit[core_id] < current_val) { + nss_warning("%px: New queue limit %d less than previous value %d. Cant allow shortening\n", + nss_ctx, nss_n2h_queue_limit[core_id], current_val); + nss_n2h_queue_limit[core_id] = current_val; + return NSS_TX_FAILURE; + } + + memset(&nim, 0, sizeof(struct nss_n2h_msg)); + nss_n2h_msg_init(&nim, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_N2H_QUEUE_LIMIT_CFG, + sizeof(struct nss_n2h_queue_limit_config), nss_n2h_queue_limit_callback, NULL); + + nnqlc = &nim.msg.ql_cfg; + nnqlc->qlimit = nss_n2h_queue_limit[core_id]; + + /* + * Send synchronous message to firmware + */ + down(&nss_n2h_q_lim_pvt.sem); + + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nim); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: n2h queue limit message send failed\n", nss_ctx); + nss_n2h_queue_limit[core_id] = current_val; + up(&nss_n2h_q_lim_pvt.sem); + return nss_tx_status; + } + + ret = wait_for_completion_timeout(&nss_n2h_q_lim_pvt.complete, msecs_to_jiffies(NSS_N2H_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: Timeout expired for queue limit sync message\n", nss_ctx); + nss_n2h_queue_limit[core_id] = current_val; + up(&nss_n2h_q_lim_pvt.sem); + return NSS_TX_FAILURE; + } + + /* + * If setting the queue limit failed, reset the value to original value + */ + if (nss_n2h_q_lim_pvt.response != NSS_CMN_RESPONSE_ACK) { + nss_n2h_queue_limit[core_id] = current_val; + } + + up(&nss_n2h_q_lim_pvt.sem); + return NSS_TX_SUCCESS; +} + +/* + * nss_n2h_queue_limit_core0_handler() + * Sets the n2h queue size limit for core0 + */ +static int nss_n2h_queue_limit_core0_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_set_queue_limit_sync(ctl, write, buffer, lenp, ppos, + NSS_CORE_0); +} + +/* + * nss_n2h_queue_limit_core1_handler() + * Sets the n2h queue size limit for core1 + */ +static int nss_n2h_queue_limit_core1_handler(struct ctl_table *ctl, + int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + return nss_n2h_set_queue_limit_sync(ctl, write, buffer, lenp, ppos, + NSS_CORE_1); +} + +/* + * nss_n2h_host_bp_cfg_callback() + * Callback function for back pressure configuration. + */ +static void nss_n2h_host_bp_cfg_callback(void *app_data, struct nss_n2h_msg *nnm) +{ + struct nss_ctx_instance *nss_ctx __maybe_unused = (struct nss_ctx_instance *)app_data; + if (nnm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_n2h_host_bp_cfg_pvt.response = NSS_FAILURE; + complete(&nss_n2h_host_bp_cfg_pvt.complete); + nss_warning("%px: n2h back pressure configuration failed : %d\n", nss_ctx, nnm->cm.error); + return; + } + + nss_info("%px: n2h back pressure configuration succeeded: %d\n", nss_ctx, nnm->cm.error); + nss_n2h_host_bp_cfg_pvt.response = NSS_SUCCESS; + complete(&nss_n2h_host_bp_cfg_pvt.complete); +} + +/* + * nss_n2h_host_bp_cfg() + * Send Message to n2h to enable back pressure. + */ +static nss_tx_status_t nss_n2h_host_bp_cfg_sync(struct nss_ctx_instance *nss_ctx, int enable_bp) +{ + struct nss_n2h_msg nnm; + nss_tx_status_t nss_tx_status; + int ret; + + down(&nss_n2h_host_bp_cfg_pvt.sem); + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, NSS_TX_METADATA_TYPE_N2H_HOST_BACK_PRESSURE_CFG, + sizeof(struct nss_n2h_host_back_pressure), + nss_n2h_host_bp_cfg_callback, + (void *)nss_ctx); + + nnm.msg.host_bp_cfg.enable = enable_bp; + + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_tx error setting back pressure\n", nss_ctx); + up(&nss_n2h_host_bp_cfg_pvt.sem); + return NSS_FAILURE; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_n2h_host_bp_cfg_pvt.complete, msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: Waiting for ack timed out\n", nss_ctx); + up(&nss_n2h_host_bp_cfg_pvt.sem); + return NSS_FAILURE; + } + + /* + * Response received from NSS FW + */ + if (nss_n2h_host_bp_cfg_pvt.response == NSS_FAILURE) { + up(&nss_n2h_host_bp_cfg_pvt.sem); + return NSS_FAILURE; + } + + up(&nss_n2h_host_bp_cfg_pvt.sem); + return NSS_SUCCESS; +} + +/* + * nss_n2h_host_bp_cfg_handler() + * Enable n2h back pressure. + */ +static int nss_n2h_host_bp_cfg_handler(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos, uint32_t core_id) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[core_id]; + int ret, ret_bp, current_state; + current_state = nss_n2h_host_bp_config[core_id]; + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (ret != NSS_SUCCESS) { + return ret; + } + + if (!write) { + return ret; + } + + if ((nss_n2h_host_bp_config[core_id] != 0) && (nss_n2h_host_bp_config[core_id] != 1)) { + nss_info_always("Invalid input value. Valid values are 0 and 1\n"); + nss_n2h_host_bp_config[core_id] = current_state; + return ret; + } + + nss_info("Configuring n2h back pressure\n"); + ret_bp = nss_n2h_host_bp_cfg_sync(nss_ctx, nss_n2h_host_bp_config[core_id]); + + if (ret_bp != NSS_SUCCESS) { + nss_warning("%px: n2h back pressure config failed\n", nss_ctx); + nss_n2h_host_bp_config[core_id] = current_state; + } + + return ret_bp; +} + +/* + * nss_n2h_host_bp_cfg_core0_handler() + * Enable n2h back pressure in core 0. + */ +static int nss_n2h_host_bp_cfg_core0_handler(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return nss_n2h_host_bp_cfg_handler(ctl, write, buffer, lenp, ppos, NSS_CORE_0); +} + +/* + * nss_n2h_host_bp_cfg_core1_handler() + * Enable n2h back pressure in core 1. + */ +static int nss_n2h_host_bp_cfg_core1_handler(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + return nss_n2h_host_bp_cfg_handler(ctl, write, buffer, lenp, ppos, NSS_CORE_1); +} + +static struct ctl_table nss_n2h_table_single_core[] = { + { + .procname = "n2h_empty_pool_buf_core0", + .data = &nss_n2h_empty_pool_buf_cfg[NSS_CORE_0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_empty_pool_buf_cfg_core0_handler, + }, + { + .procname = "n2h_empty_paged_pool_buf_core0", + .data = &nss_n2h_empty_paged_pool_buf_cfg[NSS_CORE_0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_empty_paged_pool_buf_cfg_core0_handler, + }, + { + .procname = "n2h_low_water_core0", + .data = &nss_n2h_water_mark[NSS_CORE_0][0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_water_mark_core0_handler, + }, + { + .procname = "n2h_high_water_core0", + .data = &nss_n2h_water_mark[NSS_CORE_0][1], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_water_mark_core0_handler, + }, + { + .procname = "n2h_paged_low_water_core0", + .data = &nss_n2h_paged_water_mark[NSS_CORE_0][0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_paged_water_mark_core0_handler, + }, + { + .procname = "n2h_paged_high_water_core0", + .data = &nss_n2h_paged_water_mark[NSS_CORE_0][1], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_paged_water_mark_core0_handler, + }, + { + .procname = "n2h_wifi_pool_buf", + .data = &nss_n2h_wifi_pool_buf_cfg, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_wifi_payloads_handler, + }, + { + .procname = "mitigation_core0", + .data = &nss_n2h_core0_mitigation_cfg, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_mitigationcfg_core0_handler, + }, + { + .procname = "extra_pbuf_core0", + .data = &nss_n2h_core0_add_buf_pool_size, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_buf_cfg_core0_handler, + }, + { + .procname = "n2h_queue_limit_core0", + .data = &nss_n2h_queue_limit[NSS_CORE_0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_queue_limit_core0_handler, + }, + { + .procname = "host_bp_enable0", + .data = &nss_n2h_host_bp_config[NSS_CORE_0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_host_bp_cfg_core0_handler, + }, + + { } +}; + +static struct ctl_table nss_n2h_table_multi_core[] = { + { + .procname = "n2h_empty_pool_buf_core0", + .data = &nss_n2h_empty_pool_buf_cfg[NSS_CORE_0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_empty_pool_buf_cfg_core0_handler, + }, + { + .procname = "n2h_empty_pool_buf_core1", + .data = &nss_n2h_empty_pool_buf_cfg[NSS_CORE_1], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_empty_pool_buf_cfg_core1_handler, + }, + { + .procname = "n2h_empty_paged_pool_buf_core0", + .data = &nss_n2h_empty_paged_pool_buf_cfg[NSS_CORE_0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_empty_paged_pool_buf_cfg_core0_handler, + }, + { + .procname = "n2h_empty_paged_pool_buf_core1", + .data = &nss_n2h_empty_paged_pool_buf_cfg[NSS_CORE_1], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_empty_paged_pool_buf_cfg_core1_handler, + }, + + { + .procname = "n2h_low_water_core0", + .data = &nss_n2h_water_mark[NSS_CORE_0][0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_water_mark_core0_handler, + }, + { + .procname = "n2h_low_water_core1", + .data = &nss_n2h_water_mark[NSS_CORE_1][0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_water_mark_core1_handler, + }, + { + .procname = "n2h_high_water_core0", + .data = &nss_n2h_water_mark[NSS_CORE_0][1], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_water_mark_core0_handler, + }, + { + .procname = "n2h_high_water_core1", + .data = &nss_n2h_water_mark[NSS_CORE_1][1], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_water_mark_core1_handler, + }, + { + .procname = "n2h_paged_low_water_core0", + .data = &nss_n2h_paged_water_mark[NSS_CORE_0][0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_paged_water_mark_core0_handler, + }, + { + .procname = "n2h_paged_low_water_core1", + .data = &nss_n2h_paged_water_mark[NSS_CORE_1][0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_paged_water_mark_core1_handler, + }, + { + .procname = "n2h_paged_high_water_core0", + .data = &nss_n2h_paged_water_mark[NSS_CORE_0][1], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_paged_water_mark_core0_handler, + }, + { + .procname = "n2h_paged_high_water_core1", + .data = &nss_n2h_paged_water_mark[NSS_CORE_1][1], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_paged_water_mark_core1_handler, + }, + { + .procname = "n2h_wifi_pool_buf", + .data = &nss_n2h_wifi_pool_buf_cfg, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_wifi_payloads_handler, + }, + { + .procname = "mitigation_core0", + .data = &nss_n2h_core0_mitigation_cfg, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_mitigationcfg_core0_handler, + }, + { + .procname = "mitigation_core1", + .data = &nss_n2h_core1_mitigation_cfg, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_mitigationcfg_core1_handler, + }, + { + .procname = "extra_pbuf_core0", + .data = &nss_n2h_core0_add_buf_pool_size, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_buf_cfg_core0_handler, + }, + { + .procname = "extra_pbuf_core1", + .data = &nss_n2h_core1_add_buf_pool_size, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_buf_cfg_core1_handler, + }, + { + .procname = "n2h_queue_limit_core0", + .data = &nss_n2h_queue_limit[NSS_CORE_0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_queue_limit_core0_handler, + }, + { + .procname = "n2h_queue_limit_core1", + .data = &nss_n2h_queue_limit[NSS_CORE_1], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_queue_limit_core1_handler, + }, + { + .procname = "host_bp_enable0", + .data = &nss_n2h_host_bp_config[NSS_CORE_0], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_host_bp_cfg_core0_handler, + }, + { + .procname = "host_bp_enable1", + .data = &nss_n2h_host_bp_config[NSS_CORE_1], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_n2h_host_bp_cfg_core1_handler, + }, + { } +}; + +/* + * This table will be overwritten during single-core registration + */ +static struct ctl_table nss_n2h_dir[] = { + { + .procname = "n2hcfg", + .mode = 0555, + .child = nss_n2h_table_multi_core, + }, + { } +}; + +static struct ctl_table nss_n2h_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_n2h_dir, + }, + { } +}; + +static struct ctl_table nss_n2h_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = nss_n2h_root_dir, + }, + { } +}; + +static struct ctl_table_header *nss_n2h_header; + +/* + * nss_n2h_cfg_empty_pool_size() + * Config empty buffer pool + */ +nss_tx_status_t nss_n2h_cfg_empty_pool_size(struct nss_ctx_instance *nss_ctx, uint32_t pool_sz) +{ + struct nss_n2h_msg nnm; + struct nss_n2h_empty_pool_buf *nnepbcm; + nss_tx_status_t nss_tx_status; + + if (pool_sz < NSS_N2H_MIN_EMPTY_POOL_BUF_SZ) { + nss_warning("%px: setting pool size %d < min number of buffer", + nss_ctx, pool_sz); + return NSS_TX_FAILURE; + } + + if (pool_sz > NSS_N2H_MAX_EMPTY_POOL_BUF_SZ) { + nss_warning("%px: setting pool size %d > max number of buffer", + nss_ctx, pool_sz); + return NSS_TX_FAILURE; + } + + nss_info("%px: update number of empty buffer pool size: %d\n", + nss_ctx, pool_sz); + + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_N2H_EMPTY_POOL_BUF_CFG, + sizeof(struct nss_n2h_empty_pool_buf), NULL, 0); + + nnepbcm = &nnm.msg.empty_pool_buf_cfg; + nnepbcm->pool_size = htonl(pool_sz); + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_tx error empty buffer pool: %d\n", nss_ctx, pool_sz); + return nss_tx_status; + } + + return nss_tx_status; +} + +/* + * nss_n2h_paged_buf_pool_init() + * Sends a command down to NSS to initialize paged buffer pool + */ +nss_tx_status_t nss_n2h_paged_buf_pool_init(struct nss_ctx_instance *nss_ctx) +{ + struct nss_n2h_msg nnm; + nss_tx_status_t nss_tx_status; + + /* + * No additional information needed at this point + */ + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_N2H_PAGED_BUFFER_POOL_INIT, + sizeof(struct nss_n2h_paged_buffer_pool_init), + NULL, + NULL); + + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: failed to send paged buf configuration init command to NSS\n", + nss_ctx); + return NSS_TX_FAILURE; + } + + return NSS_TX_SUCCESS; +} + +/* + * nss_n2h_flush_payloads() + * Sends a command down to NSS for flushing all payloads + */ +nss_tx_status_t nss_n2h_flush_payloads(struct nss_ctx_instance *nss_ctx) +{ + struct nss_n2h_msg nnm; + struct nss_n2h_flush_payloads *nnflshpl; + nss_tx_status_t nss_tx_status; + + nnflshpl = &nnm.msg.flush_payloads; + + /* + * TODO: No additional information sent in message + * as of now. Need to initialize message content accordingly + * if needed. + */ + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, + NSS_TX_METADATA_TYPE_N2H_FLUSH_PAYLOADS, + sizeof(struct nss_n2h_flush_payloads), + NULL, + NULL); + + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: failed to send flush payloads command to NSS\n", + nss_ctx); + + return NSS_TX_FAILURE; + } + + return NSS_TX_SUCCESS; +} + +/* + * nss_n2h_msg_init() + * Initialize n2h message. + */ +void nss_n2h_msg_init(struct nss_n2h_msg *nim, uint16_t if_num, uint32_t type, + uint32_t len, nss_n2h_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data); +} + +/* + * nss_n2h_tx_msg() + * Send messages to NSS n2h package. + */ +nss_tx_status_t nss_n2h_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_n2h_msg *nnm) +{ + struct nss_cmn_msg *ncm = &nnm->cm; + + /* + * Sanity check the message + */ + if (ncm->interface != NSS_N2H_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_METADATA_TYPE_N2H_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, nnm, sizeof(*nnm), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_n2h_notify_register() + * Register to received N2H events. + * + * NOTE: Do we want to pass an nss_ctx here so that we can register for n2h on any core? + */ +struct nss_ctx_instance *nss_n2h_notify_register(int core, nss_n2h_msg_callback_t cb, void *app_data) +{ + if (core >= nss_top_main.num_nss) { + nss_warning("Input core number %d is wrong \n", core); + return NULL; + } + /* + * TODO: We need to have a new array in support of the new API + * TODO: If we use a per-context array, we would move the array into nss_ctx based. + */ + nss_n2h_rd[core].n2h_callback = cb; + nss_n2h_rd[core].app_data = app_data; + return &nss_top_main.nss[core]; +} + +/* + * nss_n2h_register_handler() + */ +void nss_n2h_register_handler(struct nss_ctx_instance *nss_ctx) +{ + sema_init(&nss_n2h_q_cfg_pvt.sem, 1); + init_completion(&nss_n2h_q_cfg_pvt.complete); + + nss_core_register_handler(nss_ctx, NSS_N2H_INTERFACE, nss_n2h_interface_handler, NULL); + + if (nss_ctx->id == NSS_CORE_0) { + nss_n2h_stats_dentry_create(); + } + nss_n2h_strings_dentry_create(); + + nss_drv_strings_dentry_create(); +} + +/* + * nss_n2h_single_core_register_sysctl() + */ +void nss_n2h_single_core_register_sysctl(void) +{ + /* + * RPS sema init + */ + sema_init(&nss_n2h_rcp.sem, 1); + init_completion(&nss_n2h_rcp.complete); + + /* + * MITIGATION sema init for core0 + */ + sema_init(&nss_n2h_mitigationcp[NSS_CORE_0].sem, 1); + init_completion(&nss_n2h_mitigationcp[NSS_CORE_0].complete); + + /* + * PBUF addition sema init for core0 + */ + sema_init(&nss_n2h_bufcp[NSS_CORE_0].sem, 1); + init_completion(&nss_n2h_bufcp[NSS_CORE_0].complete); + + /* + * Core0 + */ + sema_init(&nss_n2h_nepbcfgp[NSS_CORE_0].sem, 1); + init_completion(&nss_n2h_nepbcfgp[NSS_CORE_0].complete); + nss_n2h_nepbcfgp[NSS_CORE_0].empty_buf_pool_info.pool_size = + nss_n2h_empty_pool_buf_cfg[NSS_CORE_0]; + nss_n2h_nepbcfgp[NSS_CORE_0].empty_buf_pool_info.low_water = + nss_n2h_water_mark[NSS_CORE_0][0]; + nss_n2h_nepbcfgp[NSS_CORE_0].empty_buf_pool_info.high_water = + nss_n2h_water_mark[NSS_CORE_0][1]; + nss_n2h_nepbcfgp[NSS_CORE_0].empty_paged_buf_pool_info.pool_size = + nss_n2h_empty_paged_pool_buf_cfg[NSS_CORE_0]; + nss_n2h_nepbcfgp[NSS_CORE_0].empty_paged_buf_pool_info.low_water = + nss_n2h_paged_water_mark[NSS_CORE_0][0]; + nss_n2h_nepbcfgp[NSS_CORE_0].empty_paged_buf_pool_info.high_water = + nss_n2h_paged_water_mark[NSS_CORE_0][1]; + + /* + * WiFi pool buf cfg sema init + */ + sema_init(&nss_n2h_wp.sem, 1); + init_completion(&nss_n2h_wp.complete); + + /* + * N2H queue config sema init + */ + sema_init(&nss_n2h_q_lim_pvt.sem, 1); + init_completion(&nss_n2h_q_lim_pvt.complete); + + /* + * Back pressure config sema init + */ + sema_init(&nss_n2h_host_bp_cfg_pvt.sem, 1); + init_completion(&nss_n2h_host_bp_cfg_pvt.complete); + + nss_n2h_notify_register(NSS_CORE_0, NULL, NULL); + + /* + * Register sysctl table. + */ + nss_n2h_dir[0].child = nss_n2h_table_single_core; + nss_n2h_header = register_sysctl_table(nss_n2h_root); +} + +/* + * nss_n2h_multi_core_register_sysctl() + */ +void nss_n2h_multi_core_register_sysctl(void) +{ + /* + * RPS sema init + */ + sema_init(&nss_n2h_rcp.sem, 1); + init_completion(&nss_n2h_rcp.complete); + + /* + * MITIGATION sema init for core0 + */ + sema_init(&nss_n2h_mitigationcp[NSS_CORE_0].sem, 1); + init_completion(&nss_n2h_mitigationcp[NSS_CORE_0].complete); + + /* + * MITIGATION sema init for core1 + */ + sema_init(&nss_n2h_mitigationcp[NSS_CORE_1].sem, 1); + init_completion(&nss_n2h_mitigationcp[NSS_CORE_1].complete); + + /* + * PBUF addition sema init for core0 + */ + sema_init(&nss_n2h_bufcp[NSS_CORE_0].sem, 1); + init_completion(&nss_n2h_bufcp[NSS_CORE_0].complete); + + /* + * PBUF addition sema init for core1 + */ + sema_init(&nss_n2h_bufcp[NSS_CORE_1].sem, 1); + init_completion(&nss_n2h_bufcp[NSS_CORE_1].complete); + + /* + * Core0 + */ + sema_init(&nss_n2h_nepbcfgp[NSS_CORE_0].sem, 1); + init_completion(&nss_n2h_nepbcfgp[NSS_CORE_0].complete); + nss_n2h_nepbcfgp[NSS_CORE_0].empty_buf_pool_info.pool_size = + nss_n2h_empty_pool_buf_cfg[NSS_CORE_0]; + nss_n2h_nepbcfgp[NSS_CORE_0].empty_buf_pool_info.low_water = + nss_n2h_water_mark[NSS_CORE_0][0]; + nss_n2h_nepbcfgp[NSS_CORE_0].empty_buf_pool_info.high_water = + nss_n2h_water_mark[NSS_CORE_0][1]; + nss_n2h_nepbcfgp[NSS_CORE_0].empty_paged_buf_pool_info.pool_size = + nss_n2h_empty_paged_pool_buf_cfg[NSS_CORE_0]; + nss_n2h_nepbcfgp[NSS_CORE_0].empty_paged_buf_pool_info.low_water = + nss_n2h_paged_water_mark[NSS_CORE_0][0]; + nss_n2h_nepbcfgp[NSS_CORE_0].empty_paged_buf_pool_info.high_water = + nss_n2h_paged_water_mark[NSS_CORE_0][1]; + + /* + * Core1 + */ + sema_init(&nss_n2h_nepbcfgp[NSS_CORE_1].sem, 1); + init_completion(&nss_n2h_nepbcfgp[NSS_CORE_1].complete); + nss_n2h_nepbcfgp[NSS_CORE_1].empty_buf_pool_info.pool_size = + nss_n2h_empty_pool_buf_cfg[NSS_CORE_1]; + nss_n2h_nepbcfgp[NSS_CORE_1].empty_buf_pool_info.low_water = + nss_n2h_water_mark[NSS_CORE_1][0]; + nss_n2h_nepbcfgp[NSS_CORE_1].empty_buf_pool_info.high_water = + nss_n2h_water_mark[NSS_CORE_1][1]; + nss_n2h_nepbcfgp[NSS_CORE_1].empty_paged_buf_pool_info.pool_size = + nss_n2h_empty_paged_pool_buf_cfg[NSS_CORE_1]; + nss_n2h_nepbcfgp[NSS_CORE_1].empty_paged_buf_pool_info.low_water = + nss_n2h_paged_water_mark[NSS_CORE_1][0]; + nss_n2h_nepbcfgp[NSS_CORE_1].empty_paged_buf_pool_info.high_water = + nss_n2h_paged_water_mark[NSS_CORE_1][1]; + + /* + * WiFi pool buf cfg sema init + */ + sema_init(&nss_n2h_wp.sem, 1); + init_completion(&nss_n2h_wp.complete); + + /* + * N2H queue config sema init + */ + sema_init(&nss_n2h_q_lim_pvt.sem, 1); + init_completion(&nss_n2h_q_lim_pvt.complete); + + /* + * Back pressure config sema init + */ + sema_init(&nss_n2h_host_bp_cfg_pvt.sem, 1); + init_completion(&nss_n2h_host_bp_cfg_pvt.complete); + + nss_n2h_notify_register(NSS_CORE_0, NULL, NULL); + nss_n2h_notify_register(NSS_CORE_1, NULL, NULL); + + /* + * Register sysctl table. + */ + nss_n2h_header = register_sysctl_table(nss_n2h_root); +} + +/* + * nss_n2h_unregister_sysctl() + * Unregister sysctl specific to n2h + */ +void nss_n2h_unregister_sysctl(void) +{ + /* + * Unregister sysctl table. + */ + if (nss_n2h_header) { + unregister_sysctl_table(nss_n2h_header); + } +} + +EXPORT_SYMBOL(nss_n2h_notify_register); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_n2h_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_n2h_stats.c new file mode 100644 index 000000000..60ff88ba9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_n2h_stats.c @@ -0,0 +1,214 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_n2h_stats.h" +#include "nss_n2h.h" +#include "nss_n2h_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_n2h_stats_notifier); + +uint64_t nss_n2h_stats[NSS_MAX_CORES][NSS_N2H_STATS_MAX]; + +/* + * nss_n2h_stats_read() + * Read N2H stats + */ +static ssize_t nss_n2h_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i, core; + + /* + * Max output lines = #stats + few blank lines for banner printing + + * Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = (NSS_N2H_STATS_MAX + 3) * NSS_MAX_CORES + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_N2H_STATS_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + /* + * N2H node stats + */ + for (core = 0; core < nss_top_main.num_nss; core++) { + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; i < NSS_N2H_STATS_MAX; i++) { + stats_shadow[i] = nss_n2h_stats[core][i]; + } + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "n2h", core); + size_wr += nss_stats_print("n2h", NULL, NSS_STATS_SINGLE_INSTANCE + , nss_n2h_strings_stats + , stats_shadow + , NSS_N2H_STATS_MAX + , lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_n2h_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(n2h); + +/* + * nss_n2h_stats_dentry_create() + * Create N2H statistics debug entry. + */ +void nss_n2h_stats_dentry_create(void) +{ + nss_stats_create_dentry("n2h", &nss_n2h_stats_ops); +} + +/* + * nss_n2h_stats_sync() + * Handle the syncing of NSS statistics. + */ +void nss_n2h_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_n2h_stats_sync *nnss) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + int id = nss_ctx->id; + int j; + + spin_lock_bh(&nss_top->stats_lock); + + /* + * common node stats + */ + nss_n2h_stats[id][NSS_STATS_NODE_RX_PKTS] += nnss->node_stats.rx_packets; + nss_n2h_stats[id][NSS_STATS_NODE_RX_BYTES] += nnss->node_stats.rx_bytes; + nss_n2h_stats[id][NSS_STATS_NODE_TX_PKTS] += nnss->node_stats.tx_packets; + nss_n2h_stats[id][NSS_STATS_NODE_TX_BYTES] += nnss->node_stats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_n2h_stats[id][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + j] += nnss->node_stats.rx_dropped[j]; + } + + /* + * General N2H stats + */ + nss_n2h_stats[id][NSS_N2H_STATS_QUEUE_DROPPED] += nnss->queue_dropped; + nss_n2h_stats[id][NSS_N2H_STATS_TOTAL_TICKS] += nnss->total_ticks; + nss_n2h_stats[id][NSS_N2H_STATS_WORST_CASE_TICKS] += nnss->worst_case_ticks; + nss_n2h_stats[id][NSS_N2H_STATS_ITERATIONS] += nnss->iterations; + + /* + * pbuf manager ocm and default pool stats + */ + nss_n2h_stats[id][NSS_N2H_STATS_PBUF_OCM_ALLOC_FAILS_WITH_PAYLOAD] += nnss->pbuf_ocm_stats.pbuf_alloc_fails_with_payload; + nss_n2h_stats[id][NSS_N2H_STATS_PBUF_OCM_FREE_COUNT] = nnss->pbuf_ocm_stats.pbuf_free_count; + nss_n2h_stats[id][NSS_N2H_STATS_PBUF_OCM_TOTAL_COUNT] = nnss->pbuf_ocm_stats.pbuf_total_count; + nss_n2h_stats[id][NSS_N2H_STATS_PBUF_OCM_ALLOC_FAILS_NO_PAYLOAD] += nnss->pbuf_ocm_stats.pbuf_alloc_fails_no_payload; + + nss_n2h_stats[id][NSS_N2H_STATS_PBUF_DEFAULT_ALLOC_FAILS_WITH_PAYLOAD] += nnss->pbuf_default_stats.pbuf_alloc_fails_with_payload; + nss_n2h_stats[id][NSS_N2H_STATS_PBUF_DEFAULT_FREE_COUNT] = nnss->pbuf_default_stats.pbuf_free_count; + nss_n2h_stats[id][NSS_N2H_STATS_PBUF_DEFAULT_TOTAL_COUNT] = nnss->pbuf_default_stats.pbuf_total_count; + nss_n2h_stats[id][NSS_N2H_STATS_PBUF_DEFAULT_ALLOC_FAILS_NO_PAYLOAD] += nnss->pbuf_default_stats.pbuf_alloc_fails_no_payload; + + /* + * payload mgr stats + */ + nss_n2h_stats[id][NSS_N2H_STATS_PAYLOAD_ALLOC_FAILS] += nnss->payload_alloc_fails; + nss_n2h_stats[id][NSS_N2H_STATS_PAYLOAD_FREE_COUNT] = nnss->payload_free_count; + + /* + * Host <=> NSS control traffic stats + */ + nss_n2h_stats[id][NSS_N2H_STATS_H2N_CONTROL_PACKETS] += nnss->h2n_ctrl_pkts; + nss_n2h_stats[id][NSS_N2H_STATS_H2N_CONTROL_BYTES] += nnss->h2n_ctrl_bytes; + nss_n2h_stats[id][NSS_N2H_STATS_N2H_CONTROL_PACKETS] += nnss->n2h_ctrl_pkts; + nss_n2h_stats[id][NSS_N2H_STATS_N2H_CONTROL_BYTES] += nnss->n2h_ctrl_bytes; + + /* + * Host <=> NSS control data traffic stats + */ + nss_n2h_stats[id][NSS_N2H_STATS_H2N_DATA_PACKETS] += nnss->h2n_data_pkts; + nss_n2h_stats[id][NSS_N2H_STATS_H2N_DATA_BYTES] += nnss->h2n_data_bytes; + nss_n2h_stats[id][NSS_N2H_STATS_N2H_DATA_PACKETS] += nnss->n2h_data_pkts; + nss_n2h_stats[id][NSS_N2H_STATS_N2H_DATA_BYTES] += nnss->n2h_data_bytes; + + /* + * Payloads related stats + */ + nss_n2h_stats[id][NSS_N2H_STATS_N2H_TOT_PAYLOADS] = nnss->tot_payloads; + + nss_n2h_stats[id][NSS_N2H_STATS_N2H_INTERFACE_INVALID] += nnss->data_interface_invalid; + nss_n2h_stats[id][NSS_N2H_STATS_ENQUEUE_RETRIES] += nnss->enqueue_retries; + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_n2h_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_n2h_stats_notify(struct nss_ctx_instance *nss_ctx) +{ + int i; + struct nss_n2h_stats_notification stats; + + for (i = 0; (i < NSS_STATS_DRV_MAX); i++) { + stats.drv_stats[i] = NSS_PKT_STATS_READ(&nss_top_main.stats_drv[i]); + } + + stats.core_id = nss_ctx->id; + memcpy(stats.n2h_stats, nss_n2h_stats[stats.core_id], sizeof(stats.n2h_stats)); + atomic_notifier_call_chain(&nss_n2h_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&stats); +} + +/* + * nss_n2h_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_n2h_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_n2h_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_n2h_stats_register_notifier); + +/* + * nss_n2h_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_n2h_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_n2h_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_n2h_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_n2h_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_n2h_stats.h new file mode 100644 index 000000000..96d065eb1 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_n2h_stats.h @@ -0,0 +1,27 @@ +/* + ****************************************************************************** + * Copyright (c) 2017,2019-2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_N2H_STATS_H +#define __NSS_N2H_STATS_H + +/* + * N2H statistics APIs + */ +extern void nss_n2h_stats_notify(struct nss_ctx_instance *nss_ctx); +extern void nss_n2h_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_n2h_stats_sync *nnss); +extern void nss_n2h_stats_dentry_create(void); + +#endif /* __NSS_N2H_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_n2h_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_n2h_strings.c new file mode 100644 index 000000000..c4c2ce525 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_n2h_strings.c @@ -0,0 +1,85 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_n2h_strings_stats + * N2H statistics strings. + */ +struct nss_stats_info nss_n2h_strings_stats[NSS_N2H_STATS_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"rx_byts" , NSS_STATS_TYPE_COMMON}, + {"tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"tx_byts" , NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops" , NSS_STATS_TYPE_DROP}, + {"queue_drops" , NSS_STATS_TYPE_DROP}, + {"ticks" , NSS_STATS_TYPE_SPECIAL}, + {"worst_ticks" , NSS_STATS_TYPE_SPECIAL}, + {"iterations" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_ocm_total_count" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_ocm_free_count" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_ocm_alloc_fail_payload" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_ocm_alloc_fail_nopayload", NSS_STATS_TYPE_SPECIAL}, + {"pbuf_def_total_count" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_def_free_count" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_def_alloc_fail_payload" , NSS_STATS_TYPE_SPECIAL}, + {"pbuf_def_alloc_fail_nopayload", NSS_STATS_TYPE_SPECIAL}, + {"payload_alloc_fails" , NSS_STATS_TYPE_SPECIAL}, + {"payload_free_count" , NSS_STATS_TYPE_SPECIAL}, + {"h2n_control_pkts" , NSS_STATS_TYPE_SPECIAL}, + {"h2n_control_byts" , NSS_STATS_TYPE_SPECIAL}, + {"n2h_control_pkts" , NSS_STATS_TYPE_SPECIAL}, + {"n2h_control_byts" , NSS_STATS_TYPE_SPECIAL}, + {"h2n_data_pkts" , NSS_STATS_TYPE_SPECIAL}, + {"h2n_data_byts" , NSS_STATS_TYPE_SPECIAL}, + {"n2h_data_pkts" , NSS_STATS_TYPE_SPECIAL}, + {"n2h_data_byts" , NSS_STATS_TYPE_SPECIAL}, + {"n2h_tot_payloads" , NSS_STATS_TYPE_SPECIAL}, + {"n2h_data_interface_invalid" , NSS_STATS_TYPE_SPECIAL}, + {"n2h_enqueue_retries" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_n2h_strings_read() + * Read N2H node statistics names. + */ +static ssize_t nss_n2h_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_n2h_strings_stats, NSS_N2H_STATS_MAX); +} + +/* + * nss_n2h_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(n2h); + +/* + * nss_n2h_strings_dentry_create() + * Create N2H statistics strings debug entry. + */ +void nss_n2h_strings_dentry_create(void) +{ + nss_strings_create_dentry("n2h", &nss_n2h_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_n2h_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_n2h_strings.h new file mode 100644 index 000000000..5d8c2131c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_n2h_strings.h @@ -0,0 +1,25 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_N2H_STRINGS_H +#define __NSS_N2H_STRINGS_H + +extern struct nss_stats_info nss_n2h_strings_stats[NSS_N2H_STATS_MAX]; +extern void nss_n2h_strings_dentry_create(void); + +#endif /* __NSS_N2H_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_oam.c b/feeds/ipq807x/qca-nss-drv/src/nss_oam.c new file mode 100644 index 000000000..baf43d352 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_oam.c @@ -0,0 +1,141 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_oam.c + * OAM - Operations, Administration and Maintenance Service for NSS + * + * This adapter module is responsible for sending and + * receiving to and from NSS FW + * This file contains the API for communicating NSS FW to send/receive + * commands OAM commands. + */ + +#include "nss_tx_rx_common.h" +#include "nss_oam_log.h" + +/* + * nss_oam_rx_msg_handler() + * Message handler for OAM messages from NSS + */ +static void nss_oam_rx_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused)) void *app_data) +{ + struct nss_oam_msg *nom = (struct nss_oam_msg *)ncm; + nss_oam_msg_callback_t cb; + + /* + * Trace Messages + */ + nss_oam_log_rx_msg(nom); + + /* + * Sanity check the message type + */ + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_oam_msg)) { + nss_warning("%px: recevied with invalid msg size: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + if (ncm->type > NSS_OAM_MSG_TYPE_MAX) { + nss_warning("%px: received with invalid resp type: %d", nss_ctx, ncm->type); + return; + } + + /* + * Log the failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_top_main.oam_callback; + ncm->app_data = (nss_ptr_t)nss_top_main.oam_ctx; + } + + cb = (nss_oam_msg_callback_t)ncm->cb; + if (unlikely(!cb)) { + nss_trace("%px: rx handler has been unregistered for i/f: %d", nss_ctx, ncm->interface); + return; + } + cb((void *)ncm->app_data, nom); +} + +/* + * nss_oam_tx() + * Transmit an oam message to the FW. + */ +nss_tx_status_t nss_oam_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_oam_msg *nom) +{ + struct nss_cmn_msg *ncm = &nom->cm; + + /* + * Trace Messages + */ + nss_oam_log_tx_msg(nom); + + if (ncm->type > NSS_OAM_MSG_TYPE_MAX) { + nss_warning("%px: CMD type for oam module is invalid - %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + if (ncm->interface != NSS_OAM_INTERFACE) { + nss_warning("%px: tx message request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, nom, sizeof(*nom), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_oam_tx_msg); + +/* + * nss_oam_notify_register() + * Register to receive OAM events. + */ +struct nss_ctx_instance *nss_oam_notify_register(nss_oam_msg_callback_t cb, void *app_data) +{ + if (nss_top_main.oam_ctx || nss_top_main.oam_callback) { + nss_warning("Failed to register notify callback - already registered\n"); + return NULL; + } + + nss_top_main.oam_ctx = app_data; + nss_top_main.oam_callback = cb; + return &nss_top_main.nss[nss_top_main.oam_handler_id]; +} +EXPORT_SYMBOL(nss_oam_notify_register); + +/* + * nss_oam_notify_unregister() + * Unregister to received OAM events. + */ +void nss_oam_notify_unregister(void) +{ + nss_top_main.oam_callback = NULL; + nss_top_main.oam_ctx = NULL; +} +EXPORT_SYMBOL(nss_oam_notify_unregister); + +/* + * nss_register_oam_handler() + * Register our handler to receive messages for this interface + */ +void nss_oam_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[nss_top_main.oam_handler_id]; + + if (nss_core_register_handler(nss_ctx, NSS_OAM_INTERFACE, nss_oam_rx_msg_handler, NULL) != NSS_CORE_STATUS_SUCCESS) { + nss_warning("OAM handler failed to register"); + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_oam_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_oam_log.c new file mode 100644 index 000000000..08ffec483 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_oam_log.c @@ -0,0 +1,101 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_oam_log.c + * NSS OAM logger file. + */ + +#include "nss_core.h" + +/* + * nss_oam_log_message_types_str + * NSS OAM message strings + */ +static int8_t *nss_oam_log_message_types_str[NSS_OAM_MSG_TYPE_MAX] __maybe_unused = { + "OAM Message None", + "OAM Get FW Version", +}; + +/* + * nss_oam_log_get_fw_version_msg() + * Log NSS OAM GET FW Version. + */ +static void nss_oam_log_get_fw_version_msg(struct nss_oam_msg *nom) +{ + struct nss_oam_fw_ver *nofm __maybe_unused = &nom->msg.fw_ver; + nss_trace("%px: NSS OAM Get FW Version message \n" + "OAM FW Version: %px\n", + nofm, nofm->string); +} + +/* + * nss_oam_log_verbose() + * Log message contents. + */ +static void nss_oam_log_verbose(struct nss_oam_msg *nom) +{ + switch (nom->cm.type) { + case NSS_OAM_MSG_TYPE_GET_FW_VER: + nss_oam_log_get_fw_version_msg(nom); + break; + + default: + nss_trace("%px: Invalid message type\n", nom); + break; + } +} + +/* + * nss_oam_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_oam_log_tx_msg(struct nss_oam_msg *nom) +{ + if (nom->cm.type >= NSS_OAM_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", nom); + return; + } + + nss_info("%px: type[%d]:%s\n", nom, nom->cm.type, nss_oam_log_message_types_str[nom->cm.type]); + nss_oam_log_verbose(nom); +} + +/* + * nss_oam_log_rx_msg() + * Log messages received from FW. + */ +void nss_oam_log_rx_msg(struct nss_oam_msg *nom) +{ + if (nom->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nom); + return; + } + + if (nom->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nom->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nom, nom->cm.type, + nss_oam_log_message_types_str[nom->cm.type], + nom->cm.response, nss_cmn_response_str[nom->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + nom, nom->cm.type, nss_oam_log_message_types_str[nom->cm.type], + nom->cm.response, nss_cmn_response_str[nom->cm.response]); + +verbose: + nss_oam_log_verbose(nom); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_oam_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_oam_log.h new file mode 100644 index 000000000..b02611ba2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_oam_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_OAM_LOG_H +#define __NSS_OAM_LOG_H + +/* + * nss_oam.h + * NSS OAM header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_oam_log_tx_msg + * Logs a oam message that is sent to the NSS firmware. + */ +void nss_oam_log_tx_msg(struct nss_oam_msg *nom); + +/* + * nss_oam_log_rx_msg + * Logs a oam message that is received from the NSS firmware. + */ +void nss_oam_log_rx_msg(struct nss_oam_msg *nom); + +#endif /* __NSS_OAM_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_phys_if.c b/feeds/ipq807x/qca-nss-drv/src/nss_phys_if.c new file mode 100644 index 000000000..4e5811a3e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_phys_if.c @@ -0,0 +1,629 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_phy_if.c + * NSS physical interface functions + */ + +#include "nss_tx_rx_common.h" +#include "nss_tstamp.h" +#if defined(NSS_HAL_IPQ807x_SUPPORT) || defined(NSS_HAL_IPQ60XX_SUPPORT) +#include +#endif + +#define NSS_PHYS_IF_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * NSS phys_if modes + */ +#define NSS_PHYS_IF_MODE0 0 /* phys_if mode 0 */ +#define NSS_PHYS_IF_MODE1 1 /* phys_if mode 1 */ +#define NSS_PHYS_IF_MODE2 2 /* phys_if mode 2 */ + +/* + * Private data structure for phys_if interface + */ +static struct nss_phys_if_pvt { + struct semaphore sem; + struct completion complete; + int response; +} phif; + +static int nss_phys_if_sem_init_done; + +/* + * nss_phys_if_update_driver_stats() + * Snoop the extended message and update driver statistics. + */ +static void nss_phys_if_update_driver_stats(struct nss_ctx_instance *nss_ctx, uint32_t id, struct nss_phys_if_stats *stats) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + uint64_t *top_stats = &(nss_top->stats_gmac[id][0]); + + spin_lock_bh(&nss_top->stats_lock); + top_stats[NSS_GMAC_STATS_TOTAL_TICKS] += stats->estats.gmac_total_ticks; + if (unlikely(top_stats[NSS_GMAC_STATS_WORST_CASE_TICKS] < stats->estats.gmac_worst_case_ticks)) { + top_stats[NSS_GMAC_STATS_WORST_CASE_TICKS] = stats->estats.gmac_worst_case_ticks; + } + top_stats[NSS_GMAC_STATS_ITERATIONS] += stats->estats.gmac_iterations; + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_phys_if_msg_handler() + * Handle NSS -> HLOS messages for physical interface/gmacs + */ +static void nss_phys_if_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, + __attribute__((unused))void *app_data) +{ + struct nss_phys_if_msg *nim = (struct nss_phys_if_msg *)ncm; + nss_phys_if_msg_callback_t cb; + + /* + * Sanity check the message type + */ + if (ncm->type > NSS_PHYS_IF_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return; + } + + if (!NSS_IS_IF_TYPE(PHYSICAL, ncm->interface)) { + nss_warning("%px: response for another interface: %d", nss_ctx, ncm->interface); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_phys_if_msg)) { + nss_warning("%px: message length too big: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Messages value that are within the base class are handled by the base class. + */ + if (ncm->type < NSS_IF_MAX_MSG_TYPES) { + return nss_if_msg_handler(nss_ctx, ncm, app_data); + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Snoop messages for local driver and handle deprecated interfaces. + */ + switch (nim->cm.type) { + case NSS_PHYS_IF_EXTENDED_STATS_SYNC: + /* + * To create the old API gmac statistics, we use the new extended GMAC stats. + */ + nss_phys_if_update_driver_stats(nss_ctx, ncm->interface, &nim->msg.stats); + nss_top_main.data_plane_ops->data_plane_stats_sync(&nim->msg.stats, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages, IPv4 sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->phys_if_msg_callback[ncm->interface]; + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].ndev; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * Callback + */ + cb = (nss_phys_if_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, nim); +} + +/* + * nss_phys_if_callback + * Callback to handle the completion of NSS ->HLOS messages. + */ +static void nss_phys_if_callback(void *app_data, struct nss_phys_if_msg *nim) +{ + if(nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("phys_if Error response %d\n", nim->cm.response); + phif.response = NSS_TX_FAILURE; + complete(&phif.complete); + return; + } + + phif.response = NSS_TX_SUCCESS; + complete(&phif.complete); +} + +/* + * nss_phys_if_buf() + * Send packet to physical interface owned by NSS + */ +nss_tx_status_t nss_phys_if_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num) +{ + nss_trace("%px: Phys If Tx packet, id:%d, data=%px", nss_ctx, if_num, os_buf->data); + +#ifdef NSS_DRV_TSTAMP_ENABLE + /* + * If we need the packet to be timestamped by GMAC Hardware at Tx + * send the packet to tstamp NSS module + */ + if (unlikely(skb_shinfo(os_buf)->tx_flags & SKBTX_HW_TSTAMP)) { + /* try PHY Driver hook for transmit timestamping firstly */ +#if defined(NSS_HAL_IPQ807x_SUPPORT) || defined(NSS_HAL_IPQ60XX_SUPPORT) + nss_phy_tstamp_tx_buf(os_buf->dev, os_buf); +#endif + if (!(skb_shinfo(os_buf)->tx_flags & SKBTX_IN_PROGRESS)) + return nss_tstamp_tx_buf(nss_ctx, os_buf, if_num); + } +#endif + + return nss_core_send_packet(nss_ctx, os_buf, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); +} + +/* + * nss_phys_if_msg() + */ +nss_tx_status_t nss_phys_if_msg(struct nss_ctx_instance *nss_ctx, struct nss_phys_if_msg *nim) +{ + struct nss_cmn_msg *ncm = &nim->cm; + struct net_device *dev; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + /* + * Sanity check the message + */ + if (!NSS_IS_IF_TYPE(PHYSICAL, ncm->interface)) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_PHYS_IF_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + dev = nss_ctx->subsys_dp_register[ncm->interface].ndev; + if (!dev) { + nss_warning("%px: Unregister physical interface %d: no context", nss_ctx, ncm->interface); + return NSS_TX_FAILURE_BAD_PARAM; + } + + return nss_core_send_cmd(nss_ctx, nim, sizeof(*nim), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_phys_if_tx_msg_sync() + * Send a message to physical interface & wait for the response. + */ +nss_tx_status_t nss_phys_if_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_phys_if_msg *nim) +{ + nss_tx_status_t status; + int ret = 0; + + down(&phif.sem); + + status = nss_phys_if_msg(nss_ctx, nim); + if(status != NSS_TX_SUCCESS) + { + nss_warning("%px: nss_phys_if_msg failed\n", nss_ctx); + up(&phif.sem); + return status; + } + + ret = wait_for_completion_timeout(&phif.complete, msecs_to_jiffies(NSS_PHYS_IF_TX_TIMEOUT)); + + if(!ret) + { + nss_warning("%px: phys_if tx failed due to timeout\n", nss_ctx); + phif.response = NSS_TX_FAILURE; + } + + status = phif.response; + up(&phif.sem); + + return status; +} + +/* + ********************************** + Register/Unregister/Miscellaneous APIs + ********************************** + */ + +/* + * nss_phys_if_register() + */ +struct nss_ctx_instance *nss_phys_if_register(uint32_t if_num, + nss_phys_if_rx_callback_t rx_callback, + nss_phys_if_msg_callback_t msg_callback, + struct net_device *netdev, + uint32_t features) +{ + uint8_t id = nss_top_main.phys_if_handler_id[if_num]; + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[id]; + + nss_assert(nss_ctx); + nss_assert(if_num <= NSS_MAX_PHYSICAL_INTERFACES); + + nss_core_register_subsys_dp(nss_ctx, if_num, rx_callback, NULL, NULL, netdev, features); + + nss_top_main.phys_if_msg_callback[if_num] = msg_callback; + + nss_ctx->phys_if_mtu[if_num] = ETH_DATA_LEN; + return nss_ctx; +} + +/* + * nss_phys_if_unregister() + */ +void nss_phys_if_unregister(uint32_t if_num) +{ + uint8_t id = nss_top_main.phys_if_handler_id[if_num]; + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[id]; + + nss_assert(nss_ctx); + nss_assert(if_num < NSS_MAX_PHYSICAL_INTERFACES); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_top_main.phys_if_msg_callback[if_num] = NULL; + + nss_top_main.nss[0].phys_if_mtu[if_num] = 0; + nss_top_main.nss[1].phys_if_mtu[if_num] = 0; +} + +/* + * nss_phys_if_register_handler() + */ +void nss_phys_if_register_handler(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + uint32_t ret; + + ret = nss_core_register_handler(nss_ctx, if_num, nss_phys_if_msg_handler, NULL); + + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("Message handler FAILED to be registered for interface %d", if_num); + return; + } + + if(!nss_phys_if_sem_init_done) { + sema_init(&phif.sem, 1); + init_completion(&phif.complete); + nss_phys_if_sem_init_done = 1; + } +} + +/* + * nss_phys_if_open() + * Send open command to physical interface + */ +nss_tx_status_t nss_phys_if_open(struct nss_ctx_instance *nss_ctx, uint32_t tx_desc_ring, uint32_t rx_desc_ring, uint32_t mode, uint32_t if_num, uint32_t bypass_nw_process) +{ + struct nss_phys_if_msg nim; + struct nss_if_open *nio; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_info("%px: Phys If Open, id:%d, TxDesc: %x, RxDesc: %x\n", nss_ctx, if_num, tx_desc_ring, rx_desc_ring); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_PHYS_IF_OPEN, + sizeof(struct nss_if_open), nss_phys_if_callback, NULL); + + nio = &nim.msg.if_msg.open; + nio->tx_desc_ring = tx_desc_ring; + nio->rx_desc_ring = rx_desc_ring; + + if (mode == NSS_PHYS_IF_MODE0) { + nio->rx_forward_if = NSS_ETH_RX_INTERFACE; + nio->alignment_mode = NSS_IF_DATA_ALIGN_2BYTE; + } else if (mode == NSS_PHYS_IF_MODE1) { + nio->rx_forward_if = NSS_SJACK_INTERFACE; + nio->alignment_mode = NSS_IF_DATA_ALIGN_4BYTE; + } else if (mode == NSS_PHYS_IF_MODE2) { + nio->rx_forward_if = NSS_PORTID_INTERFACE; + nio->alignment_mode = NSS_IF_DATA_ALIGN_2BYTE; + } else { + nss_info("%px: Phys If Open, unknown mode %d\n", nss_ctx, mode); + return NSS_TX_FAILURE; + } + + /* + * If Network processing in NSS is bypassed + * update next hop and alignment accordingly + */ + if (bypass_nw_process) { + nio->rx_forward_if = NSS_N2H_INTERFACE; + nio->alignment_mode = NSS_IF_DATA_ALIGN_2BYTE; + } + + return nss_phys_if_msg_sync(nss_ctx, &nim); +} + +/* + * nss_phys_if_close() + * Send close command to physical interface + */ +nss_tx_status_t nss_phys_if_close(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_phys_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_info("%px: Phys If Close, id:%d \n", nss_ctx, if_num); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_PHYS_IF_CLOSE, + sizeof(struct nss_if_close), nss_phys_if_callback, NULL); + + return nss_phys_if_msg_sync(nss_ctx, &nim); +} + +/* + * nss_phys_if_link_state() + * Send link state to physical interface + */ +nss_tx_status_t nss_phys_if_link_state(struct nss_ctx_instance *nss_ctx, uint32_t link_state, uint32_t if_num) +{ + struct nss_phys_if_msg nim; + struct nss_if_link_state_notify *nils; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_info("%px: Phys If Link State, id:%d, State: %x\n", nss_ctx, if_num, link_state); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_PHYS_IF_LINK_STATE_NOTIFY, + sizeof(struct nss_if_link_state_notify), nss_phys_if_callback, NULL); + + nils = &nim.msg.if_msg.link_state_notify; + nils->state = link_state; + return nss_phys_if_msg_sync(nss_ctx, &nim); +} + +/* + * nss_phys_if_mac_addr() + * Send a MAC address to physical interface + */ +nss_tx_status_t nss_phys_if_mac_addr(struct nss_ctx_instance *nss_ctx, uint8_t *addr, uint32_t if_num) +{ + struct nss_phys_if_msg nim; + struct nss_if_mac_address_set *nmas; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_info("%px: Phys If MAC Address, id:%d\n", nss_ctx, if_num); + nss_assert(addr != 0); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_PHYS_IF_MAC_ADDR_SET, + sizeof(struct nss_if_mac_address_set), nss_phys_if_callback, NULL); + + nmas = &nim.msg.if_msg.mac_address_set; + memcpy(nmas->mac_addr, addr, ETH_ALEN); + return nss_phys_if_msg_sync(nss_ctx, &nim); +} + +/* + * nss_phys_if_change_mtu() + * Send a MTU change command + */ +nss_tx_status_t nss_phys_if_change_mtu(struct nss_ctx_instance *nss_ctx, uint32_t mtu, uint32_t if_num) +{ + struct nss_phys_if_msg nim; + struct nss_if_mtu_change *nimc; + uint16_t mtu_sz, max_mtu; + int i; + nss_tx_status_t status; + +/* + * We disallow MTU changes for low memory profiles in order to keep the buffer size constant + */ +#ifdef NSS_FIXED_BUFFER_SIZE + if (mtu > ETH_DATA_LEN) { + nss_info_always("MTU change beyond 1500 restricted for low memory profile \n"); + return NSS_TX_FAILURE; + } +#endif + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_info("%px: Phys If Change MTU, id:%d, mtu=%d\n", nss_ctx, if_num, mtu); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_PHYS_IF_MTU_CHANGE, + sizeof(struct nss_if_mtu_change), nss_phys_if_callback, NULL); + + nimc = &nim.msg.if_msg.mtu_change; + nimc->min_buf_size = mtu; + + status = nss_phys_if_msg_sync(nss_ctx, &nim); + if (status != NSS_TX_SUCCESS) { + return status; + } + + /* + * Update the mtu and max_buf_size accordingly + */ + nss_ctx->phys_if_mtu[if_num] = (uint16_t)mtu; + + /* + * Loop through MTU values of all Physical + * interfaces and get the maximum one of all + */ + max_mtu = nss_ctx->phys_if_mtu[0]; + for (i = 1; i < NSS_MAX_PHYSICAL_INTERFACES; i++) { + if (max_mtu < nss_ctx->phys_if_mtu[i]) { + max_mtu = nss_ctx->phys_if_mtu[i]; + } + } + + mtu_sz = nss_top_main.data_plane_ops->data_plane_get_mtu_sz(max_mtu); + +/* + * We need to ensure the max_buf_size for 256MB profile stays + * constant at NSS_EMPTY_BUFFER_SIZE. We do this by disallowing changes + * to it due to MTU changes. Also, NSS_EMPTY_BUFFER_SIZE includes the + * PAD and ETH_HLEN, and is aligned to SMP_CACHE_BYTES + */ +#ifndef NSS_FIXED_BUFFER_SIZE + nss_ctx->max_buf_size = ((mtu_sz + ETH_HLEN + SMP_CACHE_BYTES - 1) & ~(SMP_CACHE_BYTES - 1)) + NSS_NBUF_ETH_EXTRA + NSS_NBUF_PAD_EXTRA; + + /* + * max_buf_size should not be lesser than NSS_NBUF_PAYLOAD_SIZE + */ + if (nss_ctx->max_buf_size < NSS_NBUF_PAYLOAD_SIZE) { + nss_ctx->max_buf_size = NSS_NBUF_PAYLOAD_SIZE; + } +#else + nss_ctx->max_buf_size = NSS_EMPTY_BUFFER_SIZE; +#endif + +#if (NSS_SKB_REUSE_SUPPORT == 1) + if (nss_ctx->max_buf_size > nss_core_get_max_reuse()) + nss_core_set_max_reuse(ALIGN(nss_ctx->max_buf_size * 2, PAGE_SIZE)); +#endif + + nss_info("Current mtu:%u mtu_sz:%u max_buf_size:%d\n", mtu, mtu_sz, nss_ctx->max_buf_size); + + if (mtu_sz > nss_ctx->nss_top->prev_mtu_sz) { + + /* If crypto is enabled on platform + * Send the flush payloads message + */ + if (nss_ctx->nss_top->crypto_enabled) { + if (nss_n2h_flush_payloads(nss_ctx) != NSS_TX_SUCCESS) { + nss_info("Unable to send flush payloads command to NSS\n"); + } + } + } + nss_ctx->nss_top->prev_mtu_sz = mtu_sz; + + return status; +} + +/* + * nss_phys_if_vsi_assign() + * Send a vsi assign to physical interface + */ +nss_tx_status_t nss_phys_if_vsi_assign(struct nss_ctx_instance *nss_ctx, uint32_t vsi, uint32_t if_num) +{ + struct nss_phys_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_info("%px: Phys If VSI Assign, id:%d\n", nss_ctx, if_num); + + memset(&nim, 0, sizeof(struct nss_phys_if_msg)); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_PHYS_IF_VSI_ASSIGN, + sizeof(struct nss_if_vsi_assign), nss_phys_if_callback, NULL); + + nim.msg.if_msg.vsi_assign.vsi = vsi; + return nss_phys_if_msg_sync(nss_ctx, &nim); +} + +/* + * nss_phys_if_vsi_unassign() + * Send a vsi unassign to physical interface + */ +nss_tx_status_t nss_phys_if_vsi_unassign(struct nss_ctx_instance *nss_ctx, uint32_t vsi, uint32_t if_num) +{ + struct nss_phys_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_info("%px: Phys If VSI Unassign, id:%d\n", nss_ctx, if_num); + + memset(&nim, 0, sizeof(struct nss_phys_if_msg)); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_PHYS_IF_VSI_UNASSIGN, + sizeof(struct nss_if_vsi_unassign), nss_phys_if_callback, NULL); + + nim.msg.if_msg.vsi_unassign.vsi = vsi; + return nss_phys_if_msg_sync(nss_ctx, &nim); +} + +/* + * nss_phys_if_pause_on_off() + * Send a pause enabled/disabled message to GMAC + */ +nss_tx_status_t nss_phys_if_pause_on_off(struct nss_ctx_instance *nss_ctx, uint32_t pause_on, uint32_t if_num) +{ + struct nss_phys_if_msg nim; + struct nss_if_pause_on_off *nipe; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_info("%px: phys if pause is set to %d, id:%d\n", nss_ctx, pause_on, if_num); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_PHYS_IF_PAUSE_ON_OFF, + sizeof(struct nss_if_pause_on_off), nss_phys_if_callback, NULL); + + nipe = &nim.msg.if_msg.pause_on_off; + nipe->pause_on = pause_on; + + return nss_phys_if_msg_sync(nss_ctx, &nim); +} + +/* + * nss_phys_if_reset_nexthop() + * De-configures nexthop for an interface + */ +nss_tx_status_t nss_phys_if_reset_nexthop(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_phys_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_PHYS_IF_RESET_NEXTHOP, + 0, nss_phys_if_callback, NULL); + + return nss_phys_if_msg_sync(nss_ctx, &nim); +} +EXPORT_SYMBOL(nss_phys_if_reset_nexthop); + +/* + * nss_phys_if_set_nexthop() + * Configures nexthop for an interface + */ +nss_tx_status_t nss_phys_if_set_nexthop(struct nss_ctx_instance *nss_ctx, uint32_t if_num, uint32_t nexthop) +{ + struct nss_phys_if_msg nim; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (nexthop >= NSS_MAX_NET_INTERFACES) { + nss_warning("%px: Invalid nexthop interface number: %d", nss_ctx, nexthop); + return NSS_TX_FAILURE_BAD_PARAM; + } + + nss_info("%px: Phys If nexthop will be set to %d, id:%d\n", nss_ctx, nexthop, if_num); + + nss_cmn_msg_init(&nim.cm, if_num, NSS_PHYS_IF_SET_NEXTHOP, + sizeof(struct nss_if_set_nexthop), nss_phys_if_callback, NULL); + nim.msg.if_msg.set_nexthop.nexthop = nexthop; + + return nss_phys_if_msg_sync(nss_ctx, &nim); +} +EXPORT_SYMBOL(nss_phys_if_set_nexthop); + +/* + * nss_get_state() + * Return the NSS initialization state + */ +nss_state_t nss_get_state(void *ctx) +{ + return nss_cmn_get_state(ctx); +} + +EXPORT_SYMBOL(nss_get_state); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_phys_if.h b/feeds/ipq807x/qca-nss-drv/src/nss_phys_if.h new file mode 100644 index 000000000..0df6bc32f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_phys_if.h @@ -0,0 +1,326 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/** + * nss_phys_if + * Physical interface message structure + */ + +#ifndef __NSS_PHYS_IF_H +#define __NSS_PHYS_IF_H + +/** + * Physical IF + */ + +/** + * The NSS per-GMAC statistics sync structure. + */ +struct nss_phys_if_estats { + uint32_t rx_errors; /**< Number of RX errors */ + uint32_t rx_receive_errors; /**< Number of RX receive errors */ + uint32_t rx_descriptor_errors; /**< Number of RX descriptor errors */ + uint32_t rx_late_collision_errors; + /**< Number of RX late collision errors */ + uint32_t rx_dribble_bit_errors; /**< Number of RX dribble bit errors */ + uint32_t rx_length_errors; /**< Number of RX length errors */ + uint32_t rx_ip_header_errors; /**< Number of RX IP header errors */ + uint32_t rx_ip_payload_errors; /**< Number of RX IP payload errors */ + uint32_t rx_no_buffer_errors; /**< Number of RX no-buffer errors */ + uint32_t rx_transport_csum_bypassed; + /**< Number of RX packets where the transport checksum was bypassed */ + uint32_t tx_collisions; /**< Number of TX collisions */ + uint32_t tx_errors; /**< Number of TX errors */ + uint32_t tx_jabber_timeout_errors; + /**< Number of TX jabber timeout errors */ + uint32_t tx_frame_flushed_errors; + /**< Number of TX frame flushed errors */ + uint32_t tx_loss_of_carrier_errors; + /**< Number of TX loss of carrier errors */ + uint32_t tx_no_carrier_errors; /**< Number of TX no carrier errors */ + uint32_t tx_late_collision_errors; + /**< Number of TX late collision errors */ + uint32_t tx_excessive_collision_errors; + /**< Number of TX excessive collision errors */ + uint32_t tx_excessive_deferral_errors; + /**< Number of TX excessive deferral errors */ + uint32_t tx_underflow_errors; /**< Number of TX underflow errors */ + uint32_t tx_ip_header_errors; /**< Number of TX IP header errors */ + uint32_t tx_ip_payload_errors; /**< Number of TX IP payload errors */ + uint32_t tx_dropped; /**< Number of TX dropped packets */ + uint32_t hw_errs[10]; /**< GMAC DMA error counters */ + uint32_t rx_missed; /**< Number of RX packets missed by the DMA */ + uint32_t fifo_overflows; /**< Number of RX FIFO overflows signalled by the DMA */ + uint32_t rx_scatter_errors; /**< Number of scattered frames received by the DMA */ + uint32_t tx_ts_create_errors; /**< Number of tx timestamp creation errors */ + uint32_t gmac_total_ticks; /**< Total clock ticks spend inside the GMAC */ + uint32_t gmac_worst_case_ticks; /**< Worst case iteration of the GMAC in ticks */ + uint32_t gmac_iterations; /**< Number of iterations around the GMAC */ + uint32_t tx_pause_frames; /**< Number of pause frames sent by the GMAC */ + uint32_t mmc_rx_overflow_errors; + /**< Number of RX overflow errors */ + uint32_t mmc_rx_watchdog_timeout_errors; + /**< Number of RX watchdog timeout errors */ + uint32_t mmc_rx_crc_errors; /**< Number of RX CRC errors */ + uint32_t mmc_rx_ip_header_errors; + /**< Number of RX IP header errors */ + uint32_t mmc_rx_octets_g; /* Number of good octets received */ + uint32_t mmc_rx_ucast_frames; /* Number of Unicast frames received */ + uint32_t mmc_rx_bcast_frames; /* Number of Bcast frames received */ + uint32_t mmc_rx_mcast_frames; /* Number of Mcast frames received */ + uint32_t mmc_rx_undersize; /* Number of RX undersize frames */ + uint32_t mmc_rx_oversize; /* Number of RX oversize frames */ + uint32_t mmc_rx_jabber; /* Number of jabber frames */ + uint32_t mmc_rx_octets_gb; /* Number of good/bad octets */ + uint32_t mmc_rx_frag_frames_g; /* Number of good ipv4 frag frames */ + uint32_t mmc_tx_octets_g; /* Number of good octets sent */ + uint32_t mmc_tx_ucast_frames; /* Number of Unicast frames sent*/ + uint32_t mmc_tx_bcast_frames; /* Number of Broadcast frames sent */ + uint32_t mmc_tx_mcast_frames; /* Number of Multicast frames sent */ + uint32_t mmc_tx_deferred; /* Number of Deferred frames sent */ + uint32_t mmc_tx_single_col; /* Number of single collisions */ + uint32_t mmc_tx_multiple_col; /* Number of multiple collisions */ + uint32_t mmc_tx_octets_gb; /* Number of good/bad octets sent*/ +}; + +/** + * The NSS GMAC statistics sync structure. + */ +struct nss_phys_if_stats { + struct nss_cmn_node_stats if_stats; /**< Generic interface stats */ + struct nss_phys_if_estats estats; /**< Extended Statistics specific to GMAC */ +}; + +/** + * @brief Request/Response types + */ +enum nss_phys_if_msg_types { + NSS_PHYS_IF_OPEN = NSS_IF_OPEN, + NSS_PHYS_IF_CLOSE = NSS_IF_CLOSE, + NSS_PHYS_IF_LINK_STATE_NOTIFY = NSS_IF_LINK_STATE_NOTIFY, + NSS_PHYS_IF_MTU_CHANGE = NSS_IF_MTU_CHANGE, + NSS_PHYS_IF_MAC_ADDR_SET = NSS_IF_MAC_ADDR_SET, + NSS_PHYS_IF_STATS = NSS_IF_STATS, + NSS_PHYS_IF_ISHAPER_ASSIGN = NSS_IF_ISHAPER_ASSIGN, + NSS_PHYS_IF_BSHAPER_ASSIGN = NSS_IF_BSHAPER_ASSIGN, + NSS_PHYS_IF_ISHAPER_UNASSIGN = NSS_IF_ISHAPER_UNASSIGN, + NSS_PHYS_IF_BSHAPER_UNASSIGN = NSS_IF_BSHAPER_UNASSIGN, + NSS_PHYS_IF_ISHAPER_CONFIG = NSS_IF_ISHAPER_CONFIG, + NSS_PHYS_IF_BSHAPER_CONFIG = NSS_IF_BSHAPER_CONFIG, + NSS_PHYS_IF_PAUSE_ON_OFF = NSS_IF_PAUSE_ON_OFF, + NSS_PHYS_IF_VSI_ASSIGN = NSS_IF_VSI_ASSIGN, + NSS_PHYS_IF_VSI_UNASSIGN = NSS_IF_VSI_UNASSIGN, + NSS_PHYS_IF_SET_NEXTHOP = NSS_IF_SET_NEXTHOP, + NSS_PHYS_IF_RESET_NEXTHOP = NSS_IF_RESET_NEXTHOP, + NSS_PHYS_IF_EXTENDED_STATS_SYNC = NSS_IF_MAX_MSG_TYPES + 1, + NSS_PHYS_IF_MAX_MSG_TYPES +}; + +/** + * Message structure to send/receive physical interface commands + * + * NOTE: Do not adjust the location of if_msg relative to new + * message types as it represents the base messages for all + * intefaces. + */ +struct nss_phys_if_msg { + struct nss_cmn_msg cm; /**< Message Header */ + union { + union nss_if_msgs if_msg; /**< Interfaces messages */ + struct nss_phys_if_stats stats; /**< Phys If Statistics */ + } msg; +}; + +/** + * @brief Callback to receive physical interface messages + * + * @param app_data Application context for this message + * @param msg NSS physical interface message + * + * @return void + */ +typedef void (*nss_phys_if_msg_callback_t)(void *app_data, struct nss_phys_if_msg *msg); + +/** + * @brief Callback to send physical interface data to the tranmsit path. + * + * @param netdev Net device + * @param skb Data buffer + * + * @return void + */ +typedef void (*nss_phys_if_xmit_callback_t)(struct net_device *netdev, struct sk_buff *skb); + +/** + * @brief Callback to receive physical interface data + * TODO: Adjust to pass app_data as unknown to the + * list layer and netdev/sk as known. + * + * @param app_data Application context for this message + * @param os_buf Data buffer + * + * @return void + */ +typedef void (*nss_phys_if_rx_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * @brief Callback to recieve extended data plane packet on interface. + * + * @param app_data Application context for this message + * @param skb Data buffer + * @param napi napi pointer + * + * @return void + */ +typedef void (*nss_phys_if_rx_ext_data_callback_t)(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi); + +/** + * @brief Register to send/receive GMAC packets/messages + * + * @param if_num GMAC i/f number + * @param rx_callback Receive callback for packets + * @param event_callback Receive callback for events + * @param netdev netdevice associated with this interface. + * @param features denote the skb types supported by this interface + * + * @return void* NSS context + */ +struct nss_ctx_instance *nss_phys_if_register(uint32_t if_num, + nss_phys_if_rx_callback_t rx_callback, + nss_phys_if_msg_callback_t msg_callback, + struct net_device *netdev, + uint32_t features); + +/** + * @brief Send GMAC packet + * + * @param nss_ctx NSS context + * @param os_buf OS buffer (e.g. skbuff) + * @param if_num GMAC i/f number + * + * @return nss_tx_status_t Tx status + */ +nss_tx_status_t nss_phys_if_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num); + +/** + * @brief Send message to physical interface + * + * @param nim Physical interface message + * + * @return command Tx status + */ +nss_tx_status_t nss_phys_if_msg(struct nss_ctx_instance *nss_ctx, struct nss_phys_if_msg *nim); + +/** + * @brief Send a message to physical interface & wait for the response. + * + * @param nim Physical interface message + * + * @return command Tx status + */ +nss_tx_status_t nss_phys_if_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_phys_if_msg *nim); + +/** + * @brief Open GMAC interface on NSS + * + * @param nss_ctx NSS context + * @param tx_desc_ring Tx descriptor ring address + * @param rx_desc_ring Rx descriptor ring address + * @param if_num GMAC i/f number + * @param bypass_nw_process network processing in nss is bypassed for GMAC + * + * @return nss_tx_status_t Tx status + */ +nss_tx_status_t nss_phys_if_open(struct nss_ctx_instance *nss_ctx, uint32_t tx_desc_ring, uint32_t rx_desc_ring, uint32_t mode, uint32_t if_num, + uint32_t bypass_nw_process); + +/** + * @brief Close GMAC interface on NSS + * + * @param nss_ctx NSS context + * @param if_num GMAC i/f number + * + * @return nss_tx_status_t Tx status + */ +nss_tx_status_t nss_phys_if_close(struct nss_ctx_instance *nss_ctx, uint32_t if_num); + +/** + * @brief Send link state message to NSS + * + * @param nss_ctx NSS context + * @param link_state Link state + * @param if_num GMAC i/f number + * + * @return nss_tx_status_t Tx status + */ +nss_tx_status_t nss_phys_if_link_state(struct nss_ctx_instance *nss_ctx, uint32_t link_state, uint32_t if_num); + +/** + * @brief Send MAC address to NSS + * + * @param nss_ctx NSS context + * @param addr MAC address pointer + * @param if_num GMAC i/f number + * + * @return nss_tx_status_t Tx status + */ +nss_tx_status_t nss_phys_if_mac_addr(struct nss_ctx_instance *nss_ctx, uint8_t *addr, uint32_t if_num); + +/** + * @brief Send MTU change notification to NSS + * + * @param nss_ctx NSS context + * @param mtu MTU + * @param if_num GMAC i/f number + * + * @return nss_tx_status_t Tx status + */ +nss_tx_status_t nss_phys_if_change_mtu(struct nss_ctx_instance *nss_ctx, uint32_t mtu, uint32_t if_num); + +/** + * @brief Send vsi assign to NSS + * + * @param nss_ctx NSS context + * @param vsi VSI number + * @param if_num GMAC i/f number + * + * @return nss_tx_status_t Tx status + */ +nss_tx_status_t nss_phys_if_vsi_assign(struct nss_ctx_instance *nss_ctx, uint32_t vsi, uint32_t if_num); + +/** + * @brief Send vsi unassign to NSS + * + * @param nss_ctx NSS context + * @param vsi VSI number + * @param if_num GMAC i/f number + * + * @return nss_tx_status_t Tx status + */ +nss_tx_status_t nss_phys_if_vsi_unassign(struct nss_ctx_instance *nss_ctx, uint32_t vsi, uint32_t if_num); + +/** + * @brief Send pause frame enabled notification to NSS + * + * @param nss_ctx NSS context + * @param pause_on Pause on or off + * @param if_num GMAC i/f number + * + * @return nss_tx_status_t Tx status + */ +nss_tx_status_t nss_phys_if_pause_on_off(struct nss_ctx_instance *nss_ctx, uint32_t pause_on, uint32_t if_num); + +#endif /* __NSS_PHYS_IF_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pm.c b/feeds/ipq807x/qca-nss-drv/src/nss_pm.c new file mode 100644 index 000000000..75527cddb --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pm.c @@ -0,0 +1,447 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_pm.c + * NSS Power Management APIs + * + */ +#include +#include +#include +#include +#include + +#if (NSS_DT_SUPPORT != 1) +#include +#endif + +#if (NSS_PM_SUPPORT == 1) +#include "nss_pm.h" + +/* + * Global NSS PM structure + */ +struct nss_pm_global_ctx ctx; + +/* + * Bus vector table for GMAC driver + */ +static struct msm_bus_paths nss_gmac_bw_level_tbl[NSS_PM_PERF_MAX_LEVELS] = { + [NSS_PM_PERF_LEVEL_SUSPEND] = GMAC_BW_MBPS(0, 0), + /* 0 MHz to DDR, 0 MHz to TCM */ + [NSS_PM_PERF_LEVEL_IDLE] = GMAC_BW_MBPS(133, 5), + /* 133 MHz to DDR, 5 MHz to TCM */ + [NSS_PM_PERF_LEVEL_NOMINAL] = GMAC_BW_MBPS(200, 400), + /* 200 MHz to DDR, 10 MHz to TCM */ + [NSS_PM_PERF_LEVEL_TURBO] = GMAC_BW_MBPS(266, 533), + /* 266 MHz to DDR, 20 MHz to TCM */ +}; + +/* + * Bus vector table for Crypto driver + */ +static struct msm_bus_paths nss_crypto_bw_level_tbl[NSS_PM_PERF_MAX_LEVELS] = { + [NSS_PM_PERF_LEVEL_SUSPEND] = CRYPTO_BW_MBPS(0, 0), + /* 0 MHz to DDR, 0 MHz to TCM */ + [NSS_PM_PERF_LEVEL_IDLE] = CRYPTO_BW_MBPS(133, 5), + /* 133 MHz to DDR, 5 MHz to TCM */ + [NSS_PM_PERF_LEVEL_NOMINAL] = CRYPTO_BW_MBPS(200, 400), + /* 200 MHz to DDR, 10 MHz to TCM */ + [NSS_PM_PERF_LEVEL_TURBO] = CRYPTO_BW_MBPS(266, 533), + /* 266 MHz to DDR, 20 MHz to TCM */ +}; + +#ifdef NSS_PM_NETAP_GMAC_SCALING + +/* + * Bus vector table for NSS HLOS driver + * This requests bw for both NSS Fab0 and Fab1 on behalf of GMAC and NSS Drivers + */ +static struct msm_bus_paths nss_netap_bw_level_tbl[NSS_PM_PERF_MAX_LEVELS] = { + [NSS_PM_PERF_LEVEL_SUSPEND] = GMAC_BW_MBPS(0, 0), + /* 0 MHz to DDR, 0 MHz to TCM */ + [NSS_PM_PERF_LEVEL_IDLE] = GMAC_BW_MBPS(122, 122), + /* 133 MHz to DDR and TCM */ + [NSS_PM_PERF_LEVEL_NOMINAL] = GMAC_BW_MBPS(200, 200), + /* 400 MHz to DDR and TCM */ + [NSS_PM_PERF_LEVEL_TURBO] = GMAC_BW_MBPS(400, 400), + /* 533 MHz to DDR and TCM */ +}; + +#else + +/* + * Bus vector table for NSS HLOS driver + */ +static struct msm_bus_paths nss_netap_bw_level_tbl[NSS_PM_PERF_MAX_LEVELS] = { + [NSS_PM_PERF_LEVEL_SUSPEND] = NETAP_BW_MBPS(0, 0), + /* 0 MHz to DDR, 0 MHz to TCM */ + [NSS_PM_PERF_LEVEL_IDLE] = NETAP_BW_MBPS(133, 133), + /* 133 MHz to DDR and TCM */ + [NSS_PM_PERF_LEVEL_NOMINAL] = NETAP_BW_MBPS(400, 400), + /* 400 MHz to DDR and TCM */ + [NSS_PM_PERF_LEVEL_TURBO] = NETAP_BW_MBPS(533, 533), + /* 533 MHz to DDR and TCM */ +}; + +#endif + +/* + * Bus Driver Platform data for GMAC, Crypto and Netap clients + */ +static struct msm_bus_scale_pdata nss_bus_scale[] = { + [NSS_PM_CLIENT_GMAC] = { + .usecase = nss_gmac_bw_level_tbl, + .num_usecases = ARRAY_SIZE(nss_gmac_bw_level_tbl), + .active_only = 1, + .name = "qca-nss-gmac", + }, + + [NSS_PM_CLIENT_CRYPTO] = { + .usecase = nss_crypto_bw_level_tbl, + .num_usecases = ARRAY_SIZE(nss_crypto_bw_level_tbl), + .active_only = 1, + .name = "qca-nss-crypto", + }, + + [NSS_PM_CLIENT_NETAP] = { + .usecase = nss_netap_bw_level_tbl, + .num_usecases = ARRAY_SIZE(nss_netap_bw_level_tbl), + .active_only = 1, + .name = "qca-nss-drv", + }, +}; + +/* + * nss_pm_dbg_perf_level_get + * debugfs hook to get the current performance level + */ +static int nss_pm_dbg_perf_level_get(void *data, u64 *val) +{ + nss_pm_client_data_t *pm_client; + + pm_client = (nss_pm_client_data_t *)data; + *val = pm_client->current_perf_lvl; + + return NSS_PM_API_SUCCESS; +} + +/* + * nss_pm_dbg_autoscale_get + * debugfs hook to get the current autoscale setting + */ +static int nss_pm_dbg_autoscale_get(void *data, u64 *val) +{ + nss_pm_client_data_t *pm_client; + + pm_client = (nss_pm_client_data_t *)data; + *val = pm_client->auto_scale; + + return NSS_PM_API_SUCCESS; +} + +/* + * nss_pm_dbg_perf_level_set + * debugfs hook to set perf level for a client + */ +static int nss_pm_dbg_perf_level_set(void *data, u64 val) +{ + uint32_t perf_level; + + perf_level = (uint32_t) val; + + if (perf_level >= NSS_PM_PERF_MAX_LEVELS || + perf_level < NSS_PM_PERF_LEVEL_IDLE) { + nss_pm_warning("unsupported performance level %d \n", perf_level); + return NSS_PM_API_FAILED; + } + + nss_pm_set_perf_level(data, perf_level); + return NSS_PM_API_SUCCESS; +} + +/* + * nss_pm_dbg_autoscale_set + * debugfs hook to enable auto scaling for a client + */ +static int nss_pm_dbg_autoscale_set(void *data, u64 val) +{ + nss_pm_client_data_t *pm_client; + + if (val > 1) { + nss_pm_warning(" Invalid set value, valid values are 0/1 \n"); + return NSS_PM_API_FAILED; + } + + pm_client->auto_scale = (uint32_t)val; + return NSS_PM_API_SUCCESS; +} + +DEFINE_SIMPLE_ATTRIBUTE(perf_level_fops, nss_pm_dbg_perf_level_get, nss_pm_dbg_perf_level_set, "%llu\n"); + +DEFINE_SIMPLE_ATTRIBUTE(autoscale_fops, nss_pm_dbg_autoscale_get, nss_pm_dbg_autoscale_set, "%llu\n"); +#endif /** (NSS_PM_SUPPORT == 1) */ + +/* + * nss_pm_client_register + * Initialize GMAC specific PM parameters + * + * Creates debugfs hooks for user-space control of NSS Client PM + * Initializes Bus BW to Idle Perf level + * Returns PM handle to the caller. + * + */ +void *nss_pm_client_register(nss_pm_client_t client_id) +{ +#if (NSS_PM_SUPPORT == 1) + int ret; + struct dentry *pm_dentry; + nss_pm_client_data_t *pm_client; + + if (unlikely(client_id >= NSS_PM_MAX_CLIENTS)) { + nss_pm_warning("nss_pm_client_register invalid client id %d \n", client_id); + goto error; + } + + pm_client = &ctx.nss_pm_client[client_id]; + + pm_client->bus_perf_client = msm_bus_scale_register_client(&nss_bus_scale[client_id]); + if (!pm_client->bus_perf_client) { + nss_pm_warning("unable to register bus client \n"); + goto error; + } + + ret = msm_bus_scale_client_update_request(pm_client->bus_perf_client, NSS_PM_PERF_LEVEL_IDLE); + if (ret) { + nss_pm_warning("initial bandwidth req failed (%d)\n", ret); + msm_bus_scale_unregister_client((uint32_t) pm_client->bus_perf_client); + goto error; + } + + pm_client->current_perf_lvl = NSS_PM_PERF_LEVEL_IDLE; + + switch (client_id) { + case NSS_PM_CLIENT_GMAC: + pm_dentry = debugfs_create_dir("gmac" , ctx.pm_dentry); + break; + + case NSS_PM_CLIENT_CRYPTO: + pm_dentry = debugfs_create_dir("crypto" , ctx.pm_dentry); + break; + + case NSS_PM_CLIENT_NETAP: + pm_dentry = debugfs_create_dir("netap" , ctx.pm_dentry); + break; + + default: + nss_pm_warning("debugfs create failed invalid client id %d \n", client_id); + msm_bus_scale_unregister_client((uint32_t) pm_client->bus_perf_client); + goto error; + + } + + if (unlikely(pm_dentry == NULL)) { + nss_pm_info("debugfs not created for %d client pm \n", client_id); + goto out; + } + + pm_client->dentry = pm_dentry; + pm_client->client_id = client_id; + + if (!debugfs_create_file("perf_level", S_IRUGO | S_IWUSR, pm_dentry, pm_client, &perf_level_fops)) { + nss_pm_info("debugfs perf_level file not created for %d client pm \n", client_id); + } + + if (!debugfs_create_file("auto-scale", S_IRUGO | S_IWUSR, pm_dentry, pm_client, &autoscale_fops)) { + nss_pm_info("debugfs auto-scale file not created for %d client pm \n", client_id); + } + +out: + return (void *)pm_client; +error: +#endif + return NULL; +} +EXPORT_SYMBOL(nss_pm_client_register); + +/* + * nss_pm_client_unregister + * Unregister the client for any PM operations + */ +int nss_pm_client_unregister(nss_pm_client_t client_id) +{ +#if (NSS_PM_SUPPORT == 1) + nss_pm_client_data_t *pm_client; + + if (unlikely(client_id >= NSS_PM_MAX_CLIENTS)) { + nss_pm_warning("nss_pm_client_unregister invalid client id %d \n", client_id); + goto error; + } + + pm_client = &ctx.nss_pm_client[client_id]; + + if (unlikely(pm_client == NULL)) { + nss_pm_warning("nss_pm_client_unregister client not registered %d \n", client_id); + goto error; + } + + if (pm_client->bus_perf_client) { + msm_bus_scale_unregister_client((uint32_t) pm_client->bus_perf_client); + } else { + nss_pm_info("nss_pm_client_unregister: client not registered \n"); + } + + if (likely(pm_client->dentry != NULL)) { + debugfs_remove_recursive(pm_client->dentry); + } + + return NSS_PM_API_SUCCESS; + +error: +#endif + return NSS_PM_API_FAILED; +} + +/* + * nss_pm_set_perf_level() + * Sets the performance level of client specific Fabrics and Clocks to requested level + */ +nss_pm_interface_status_t nss_pm_set_perf_level(void *handle, nss_pm_perf_level_t lvl) +{ +#if ((NSS_DT_SUPPORT == 1) && (NSS_FREQ_SCALE_SUPPORT == 1)) + nss_freq_scales_t index; + + switch (lvl) { + case NSS_PM_PERF_LEVEL_TURBO: + index = NSS_FREQ_HIGH_SCALE; + break; + + case NSS_PM_PERF_LEVEL_NOMINAL: + index = NSS_FREQ_MID_SCALE; + break; + + default: + index = NSS_PM_PERF_LEVEL_IDLE; + } + +#if !defined(NSS_HAL_IPQ807x_SUPPORT) + nss_freq_sched_change(index, false); +#endif + +#elif (NSS_PM_SUPPORT == 1) + + int ret = 0; + nss_pm_client_data_t *pm_client; + + pm_client = (nss_pm_client_data_t *) handle; + if (pm_client->current_perf_lvl == lvl) { + nss_pm_trace("Already at perf level %d , ignoring request \n", lvl); + return NSS_PM_API_SUCCESS; + } + + if (!pm_client->bus_perf_client) { + nss_pm_warning("Bus driver client not registered.request failed \n"); + return NSS_PM_API_FAILED; + } + + /* + * Do client specific operations here + */ + if (pm_client->client_id == NSS_PM_CLIENT_NETAP) { + if ((lvl == NSS_PM_PERF_LEVEL_TURBO) && (ctx.turbo_support == true)) { + /* + * For turbo perf level, switch TCM source to + * SRC1 to set TCM clock = 400 MHz + * SRC0 and SRC1 are set to 266 and 400 MHz resp. + * in nss_hal/ipq806x/nss_hal_pvt.c + */ + writel(0x3, NSSTCM_CLK_SRC_CTL); + } else { + /* + * For Nominal and Idle perf level, switch to SRC0 to + * set TCM clock = 266 MHz + */ + writel(0x2, NSSTCM_CLK_SRC_CTL); + + if (lvl == NSS_PM_PERF_LEVEL_TURBO) { + lvl = NSS_PM_PERF_LEVEL_NOMINAL; + } + } + } + + if (pm_client->client_id == NSS_PM_CLIENT_CRYPTO) { + if ((lvl == NSS_PM_PERF_LEVEL_TURBO) && (ctx.turbo_support == true)) { + /* + * For Turbo mode, set Crypto core and + * Fabric port clocks to 213 MHz + */ + writel(0x23, CE5_ACLK_SRC0_NS); + writel(0x23, CE5_HCLK_SRC0_NS); + writel(0x23, CE5_CORE_CLK_SRC0_NS); + + writel(0x2, CE5_ACLK_SRC_CTL); + writel(0x2, CE5_HCLK_SRC_CTL); + writel(0x2, CE5_CORE_CLK_SRC_CTL); + } else { + lvl = NSS_PM_PERF_LEVEL_NOMINAL; + } + } + + /* Update bandwidth if request has changed. This may sleep. */ + ret = msm_bus_scale_client_update_request(pm_client->bus_perf_client, lvl); + if (ret) { + nss_pm_warning("bandwidth request failed (%d)\n", ret); + return NSS_PM_API_FAILED; + } + + nss_pm_info("perf level request, current: %d new: %d \n", pm_client->current_perf_lvl, lvl); + pm_client->current_perf_lvl = lvl; +#endif + + return NSS_PM_API_SUCCESS; +} +EXPORT_SYMBOL(nss_pm_set_perf_level); + +#if (NSS_PM_SUPPORT == 1) +/* + * nss_pm_set_turbo() + * Sets the turbo support flag globally for all clients + */ +void nss_pm_set_turbo() { + + nss_pm_info("NSS Bus PM - Platform supports Turbo Mode \n"); + ctx.turbo_support = true; +} + +/* + * nss_pm_init() + * Initialize NSS PM top level structures + */ +void nss_pm_init(void) { + + nss_pm_info("NSS Bus PM (platform - IPQ806x, build - %s:%s)\n", __DATE__, __TIME__); + + ctx.pm_dentry = debugfs_create_dir("qca-nss-pm", NULL); + + /* Default turbo support is set to off */ + ctx.turbo_support = false; + + if (unlikely(ctx.pm_dentry == NULL)) { + nss_pm_warning("Failed to create qca-nss-drv directory in debugfs"); + } +} +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pm.h b/feeds/ipq807x/qca-nss-drv/src/nss_pm.h new file mode 100644 index 000000000..aaca293cb --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pm.h @@ -0,0 +1,164 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_pm.h + * NSS PM Driver header file + */ + +#ifndef __NSS_PM_H +#define __NSS_PM_H + +#include + +#include +#include +#include +#include + +#include + +/* + * NSS PM debug macros + */ +#if (NSS_PM_DEBUG_LEVEL < 1) +#define nss_pm_assert(fmt, args...) +#else +#define nss_pm_assert(c) if (!(c)) { BUG_ON(!(c)); } +#endif + +#if (NSS_PM_DEBUG_LEVEL < 2) +#define nss_pm_warning(fmt, args...) +#else +#define nss_pm_warning(fmt, args...) printk(KERN_WARNING "nss_pm:"fmt, ##args) +#endif + +#if (NSS_PM_DEBUG_LEVEL < 3) +#define nss_pm_info(fmt, args...) +#else +#define nss_pm_info(fmt, args...) printk(KERN_INFO "nss_pm:"fmt, ##args) +#endif + +#if (NSS_PM_DEBUG_LEVEL < 4) +#define nss_pm_trace(fmt, args...) +#else +#define nss_pm_trace(fmt, args...) printk(KERN_DEBUG "nss_pm:"fmt, ##args) +#endif + +/* + * Define this to use NETAP driver also request for NSS Fab1 BW on behalf of GMAC driver + */ +#define NSS_PM_NETAP_GMAC_SCALING 1 + +/* + * PM Client data structure + */ +typedef struct { + uint32_t bus_perf_client; + uint32_t clk_handle; + uint32_t current_perf_lvl; + uint32_t auto_scale; + struct dentry *dentry; + nss_pm_client_t client_id; +} nss_pm_client_data_t; + +/* + * NSS PM driver context + */ +struct nss_pm_global_ctx { + struct dentry *pm_dentry; + bool turbo_support; + nss_pm_client_data_t nss_pm_client[NSS_PM_MAX_CLIENTS]; +}; + +/* + * Macro defining Bus vector for GMAC driver + */ +#define GMAC_BW_MBPS(_data_bw, _desc_bw) \ +{ \ + .vectors = (struct msm_bus_vectors[]){ \ + {\ + .src = MSM_BUS_MASTER_NSS_GMAC_0, \ + .dst = MSM_BUS_SLAVE_EBI_CH0, \ + .ab = (_data_bw) * 16 * 1000000ULL, \ + .ib = (_data_bw) * 16 * 1000000ULL, \ + }, \ + { \ + .src = MSM_BUS_MASTER_NSS_GMAC_0, \ + .dst = MSM_BUS_SLAVE_NSS_TCM, \ + .ab = (_desc_bw) * 8 * 1000000ULL, \ + .ib = (_desc_bw) * 8 * 1000000ULL, \ + }, \ + }, \ + .num_paths = 2, \ +} + +/* + * Macro defining Bus vector for NSS crypto driver + */ +#define CRYPTO_BW_MBPS(_data_bw, _desc_bw) \ +{ \ + .vectors = (struct msm_bus_vectors[]){ \ + {\ + .src = MSM_BUS_MASTER_NSS_CRYPTO5_0, \ + .dst = MSM_BUS_SLAVE_EBI_CH0, \ + .ab = 0, \ + .ib = 0, \ + }, \ + { \ + .src = MSM_BUS_MASTER_NSS_CRYPTO5_0, \ + .dst = MSM_BUS_SLAVE_NSS_TCM, \ + .ab = (_desc_bw) * 8 * 1000000ULL, \ + .ib = (_desc_bw) * 8 * 1000000ULL, \ + }, \ + }, \ + .num_paths = 2, \ +} + +/* + * Macro defining Bus vector for NSS driver + * + */ +#define NETAP_BW_MBPS(_data_bw, _desc_bw) \ +{ \ + .vectors = (struct msm_bus_vectors[]){ \ + {\ + .src = MSM_BUS_MASTER_UBI32_0, \ + .dst = MSM_BUS_SLAVE_EBI_CH0, \ + .ab = (_data_bw) * 16 * 1000000ULL, \ + .ib = (_data_bw) * 16 * 1000000ULL, \ + }, \ + { \ + .src = MSM_BUS_MASTER_UBI32_0, \ + .dst = MSM_BUS_SLAVE_NSS_TCM, \ + .ab = (_desc_bw) * 8 * 1000000ULL, \ + .ib = (_desc_bw) * 8 * 1000000ULL, \ + }, \ + }, \ + .num_paths = 2, \ +} + +/* + * Initialize NSS PM top level structures + */ +void nss_pm_init(void); + +/* + * Sets the turbo support flag globally for all PM clients + */ +void nss_pm_set_turbo(void); + +#endif /** __NSS_PM_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_portid.c b/feeds/ipq807x/qca-nss-drv/src/nss_portid.c new file mode 100644 index 000000000..65982f09a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_portid.c @@ -0,0 +1,423 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_portid_stats.h" +#include "nss_portid_log.h" + +/* + * Spinlock to protect portid interface create/destroy/update + */ +DEFINE_SPINLOCK(nss_portid_spinlock); + +#define NSS_PORTID_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * Private data structure for phys_if interface + */ +static struct nss_portid_pvt { + struct semaphore sem; + struct completion complete; + int response; +} pid; + +/* + * Array of portid interface handles. Indexing based on the physical port_id + */ +struct nss_portid_handle nss_portid_hdl[NSS_PORTID_MAX_SWITCH_PORT]; + +/* + * nss_portid_handler() + * Handle NSS -> HLOS messages for portid + */ +static void nss_portid_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, + __attribute__((unused))void *app_data) +{ + nss_portid_msg_callback_t cb; + struct nss_portid_msg *npm = (struct nss_portid_msg *)ncm; + + BUG_ON(ncm->interface != NSS_PORTID_INTERFACE); + + /* + * Trace Messages + */ + nss_portid_log_rx_msg(npm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_PORTID_MAX_MSG_TYPE) { + nss_warning("%px: received invalid message %d for portid interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_portid_msg)) { + nss_warning("%px: message size incorrect: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + switch (ncm->type) { + case NSS_PORTID_STATS_SYNC_MSG: + /* + * Update portid statistics. + */ + nss_portid_stats_sync(nss_ctx, &npm->msg.stats_sync); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages, portid sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].ndev; + } + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_portid_msg_callback_t)ncm->cb; + + cb((void *)ncm->app_data, npm); +} + +/* + * nss_portid_get_ctx() + * Return a portid's NSS context. + */ +struct nss_ctx_instance *nss_portid_get_ctx(void) +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = &nss_top_main.nss[nss_top_main.portid_handler_id]; + return nss_ctx; +} + +/* + * nss_portid_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_portid_verify_if_num(uint32_t if_num) +{ + if (nss_is_dynamic_interface(if_num) == false) { + return false; + } + + if (nss_dynamic_interface_get_type(nss_portid_get_ctx(), if_num) != NSS_DYNAMIC_INTERFACE_TYPE_PORTID) { + return false; + } + + return true; +} + +/* + * nss_portid_get_stats() + * API for getting stats from a port interface + */ +bool nss_portid_get_stats(uint32_t if_num, struct rtnl_link_stats64 *stats) +{ + int i; + + spin_lock_bh(&nss_portid_spinlock); + for (i = 0; i < NSS_PORTID_MAX_SWITCH_PORT; i++) { + if (nss_portid_hdl[i].if_num == if_num) { + memcpy(stats, &nss_portid_hdl[i].stats, sizeof(*stats)); + spin_unlock_bh(&nss_portid_spinlock); + return true; + } + } + spin_unlock_bh(&nss_portid_spinlock); + return false; +} +EXPORT_SYMBOL(nss_portid_get_stats); + +/* + * nss_portid_if_tx_data() + * Transmit data buffer (skb) to a NSS interface number + */ +nss_tx_status_t nss_portid_if_tx_data(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num) +{ + return nss_if_tx_buf(nss_ctx, os_buf, if_num); +} +EXPORT_SYMBOL(nss_portid_if_tx_data); + +/* + * nss_portid_tx_msg() + * Transmit a portid message to NSSFW + */ +nss_tx_status_t nss_portid_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_portid_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_portid_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (ncm->interface != NSS_PORTID_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_PORTID_MAX_MSG_TYPE) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_portid_tx_msg); + +/* + * nss_portid_callback + * Callback to handle the completion of NSS ->HLOS messages. + */ +static void nss_portid_callback(void *app_data, struct nss_portid_msg *npm) +{ + if(npm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("portid error response %d\n", npm->cm.response); + pid.response = NSS_TX_FAILURE; + complete(&pid.complete); + return; + } + + pid.response = NSS_TX_SUCCESS; + complete(&pid.complete); +} + +/* + * nss_portid_tx_msg_sync() + * Send a message to portid interface & wait for the response. + */ +nss_tx_status_t nss_portid_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_portid_msg *msg) +{ + nss_tx_status_t status; + int ret = 0; + + down(&pid.sem); + + status = nss_portid_tx_msg(nss_ctx, msg); + if(status != NSS_TX_SUCCESS) + { + nss_warning("%px: nss_phys_if_msg failed\n", nss_ctx); + up(&pid.sem); + return status; + } + + ret = wait_for_completion_timeout(&pid.complete, msecs_to_jiffies(NSS_PORTID_TX_TIMEOUT)); + + if(!ret) + { + nss_warning("%px: portid tx failed due to timeout\n", nss_ctx); + pid.response = NSS_TX_FAILURE; + } + + status = pid.response; + up(&pid.sem); + + return status; +} +EXPORT_SYMBOL(nss_portid_tx_msg_sync); + +/* + * nss_portid_msg_init() + * Initialize portid message. + */ +void nss_portid_msg_init(struct nss_portid_msg *npm, uint16_t if_num, uint32_t type, uint32_t len, + nss_portid_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&npm->cm, if_num, type, len, (void*)cb, app_data); +} +EXPORT_SYMBOL(nss_portid_msg_init); + +/* + * nss_portid_tx_configure_port_if_msg + * API to send configure port message to NSS FW + */ +nss_tx_status_t nss_portid_tx_configure_port_if_msg(struct nss_ctx_instance *nss_ctx, uint32_t port_if_num, uint8_t port_id, uint8_t gmac_id) +{ + struct nss_portid_msg npm; + struct nss_portid_configure_msg *npcm; + + if (nss_portid_verify_if_num(port_if_num) == false) { + nss_warning("received invalid interface %d", port_if_num); + return NSS_TX_FAILURE; + } + + if (port_id >= NSS_PORTID_MAX_SWITCH_PORT) { + nss_warning("port_id %d exceeds NSS_PORTID_MAX_SWITCH_PORT\n", port_id); + return NSS_TX_FAILURE; + } + + if (gmac_id >= NSS_MAX_PHYSICAL_INTERFACES) { + nss_warning("gmac_id %d not valid\n", gmac_id); + return NSS_TX_FAILURE; + } + + /* + * Prepare message to configure a port interface + */ + npcm = &npm.msg.configure; + npcm->port_if_num = port_if_num; + npcm->port_id = port_id; + npcm->gmac_id = gmac_id; + + nss_portid_msg_init(&npm, NSS_PORTID_INTERFACE, NSS_PORTID_CONFIGURE_MSG, + sizeof(struct nss_portid_configure_msg), nss_portid_callback, NULL); + nss_info("Dynamic interface allocated, sending message to FW with port_if_num %d port_id %d gmac_id %d\n", + npcm->port_if_num, npcm->port_id, npcm->gmac_id); + return nss_portid_tx_msg_sync(nss_ctx, &npm); +} +EXPORT_SYMBOL(nss_portid_tx_configure_port_if_msg); + +/* + * nss_portid_tx_unconfigure_port_if_msg + * API to send unconfigure port message to NSS FW + */ +nss_tx_status_t nss_portid_tx_unconfigure_port_if_msg(struct nss_ctx_instance *nss_ctx, uint32_t port_if_num, uint8_t port_id) +{ + struct nss_portid_msg npm; + struct nss_portid_unconfigure_msg *npum; + + if (nss_portid_verify_if_num(port_if_num) == false) { + nss_warning("received invalid interface %d", port_if_num); + return NSS_TX_FAILURE; + } + + if (port_id >= NSS_PORTID_MAX_SWITCH_PORT) { + nss_warning("port_id %d exceeds NSS_PORTID_MAX_SWITCH_PORT\n", port_id); + return NSS_TX_FAILURE; + } + + /* + * Prepare message to unconfigure a port interface + */ + npum = &npm.msg.unconfigure; + npum->port_if_num = port_if_num; + npum->port_id = port_id; + + nss_portid_msg_init(&npm, NSS_PORTID_INTERFACE, NSS_PORTID_UNCONFIGURE_MSG, + sizeof(struct nss_portid_configure_msg), nss_portid_callback, NULL); + + return nss_portid_tx_msg_sync(nss_ctx, &npm); +} +EXPORT_SYMBOL(nss_portid_tx_unconfigure_port_if_msg); + +/* + * nss_portid_register_port_if() + * Register with portid node and get back nss_ctx + */ +struct nss_ctx_instance *nss_portid_register_port_if(uint32_t if_num, uint32_t port_id, struct net_device *netdev, + nss_portid_buf_callback_t buf_callback) +{ + struct nss_ctx_instance *nss_ctx = nss_portid_get_ctx(); + + if (nss_portid_verify_if_num(if_num) == false) { + nss_warning("nss portid register received invalid interface %d", if_num); + return NULL; + } + + if (port_id >= NSS_PORTID_MAX_SWITCH_PORT) { + nss_warning("nss portid register received invalid port number %d", port_id); + return NULL; + } + + spin_lock(&nss_portid_spinlock); + if (nss_portid_hdl[port_id].if_num != 0) { + nss_warning("nss portid failed: port already registered %d", port_id); + spin_unlock(&nss_portid_spinlock); + return NULL; + } + nss_portid_hdl[port_id].if_num = if_num; + spin_unlock(&nss_portid_spinlock); + + nss_core_register_subsys_dp(nss_ctx, if_num, buf_callback, NULL, NULL, netdev, 0); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_portid_register_port_if); + +/* + * nss_portid_unregister_port_if() + * Unregister portid node with NSS FW + */ +bool nss_portid_unregister_port_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx; + int i; + + nss_ctx = nss_portid_get_ctx(); + if (nss_portid_verify_if_num(if_num) == false) { + nss_warning("%px: unregister received for invalid interface %d", nss_ctx, if_num); + return false; + } + + spin_lock(&nss_portid_spinlock); + for (i = 0; i < NSS_PORTID_MAX_SWITCH_PORT; i++) { + if (nss_portid_hdl[i].if_num == if_num) { + nss_portid_hdl[i].if_num = 0; + } + } + spin_unlock(&nss_portid_spinlock); + + nss_core_unregister_handler(nss_ctx, if_num); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + return true; +} +EXPORT_SYMBOL(nss_portid_unregister_port_if); + +/* + * nss_portid_init() + * Initializes portid node. Gets called from nss_init.c + */ +void nss_portid_init(void) +{ + memset(&nss_portid_hdl, 0, sizeof(struct nss_portid_handle) * NSS_PORTID_MAX_SWITCH_PORT); +} + +/* + * nss_portid_register_handler() + * Registering handler for sending msg to portid node on NSS. + */ +void nss_portid_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_portid_get_ctx(); + + nss_core_register_handler(nss_ctx, NSS_PORTID_INTERFACE, nss_portid_handler, NULL); + + nss_portid_stats_dentry_create(); + + sema_init(&pid.sem, 1); + init_completion(&pid.complete); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_portid_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_portid_log.c new file mode 100644 index 000000000..700e1181b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_portid_log.c @@ -0,0 +1,129 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_portid_log.c + * NSS PORTID logger file. + */ + +#include "nss_core.h" + +/* + * nss_portid_log_message_types_str + * NSS PORTID message strings + */ +static int8_t *nss_portid_log_message_types_str[NSS_PORTID_MAX_MSG_TYPE] __maybe_unused = { + "PORTID Configure", + "PORTID Unconfigure", + "PORTID Stats", +}; + +/* + * nss_portid_log_configure_msg() + * Log NSS PORTID Configure. + */ +static void nss_portid_log_configure_msg(struct nss_portid_msg *npm) +{ + struct nss_portid_configure_msg *npcm __maybe_unused = &npm->msg.configure; + nss_trace("%px: NSS PORTID Configure message \n" + "PORTID Interface Number: %d\n" + "PORTID Interface ID: %d\n" + "PORTID GMAC ID: %d\n", + npcm, npcm->port_if_num, + npcm->port_id, npcm->gmac_id); +} + +/* + * nss_portid_log_unconfigure_msg() + * Log NSS PORTID Unconfigure. + */ +static void nss_portid_log_unconfigure_msg(struct nss_portid_msg *npm) +{ + struct nss_portid_unconfigure_msg *npum __maybe_unused = &npm->msg.unconfigure; + nss_trace("%px: NSS PORTID Configure message \n" + "PORTID Interface Number: %d\n" + "PORTID Interface ID: %d\n", + npum, npum->port_if_num, + npum->port_id); +} + +/* + * nss_portid_log_verbose() + * Log message contents. + */ +static void nss_portid_log_verbose(struct nss_portid_msg *npm) +{ + switch (npm->cm.type) { + case NSS_PORTID_CONFIGURE_MSG: + nss_portid_log_configure_msg(npm); + break; + + case NSS_PORTID_UNCONFIGURE_MSG: + nss_portid_log_unconfigure_msg(npm); + break; + + case NSS_PORTID_STATS_SYNC_MSG: + /* + * No log for valid stats message. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", npm); + break; + } +} + +/* + * nss_portid_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_portid_log_tx_msg(struct nss_portid_msg *npm) +{ + if (npm->cm.type >= NSS_PORTID_MAX_MSG_TYPE) { + nss_warning("%px: Invalid message type\n", npm); + return; + } + + nss_info("%px: type[%d]:%s\n", npm, npm->cm.type, nss_portid_log_message_types_str[npm->cm.type]); + nss_portid_log_verbose(npm); +} + +/* + * nss_portid_log_rx_msg() + * Log messages received from FW. + */ +void nss_portid_log_rx_msg(struct nss_portid_msg *npm) +{ + if (npm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", npm); + return; + } + + if (npm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (npm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", npm, npm->cm.type, + nss_portid_log_message_types_str[npm->cm.type], + npm->cm.response, nss_cmn_response_str[npm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + npm, npm->cm.type, nss_portid_log_message_types_str[npm->cm.type], + npm->cm.response, nss_cmn_response_str[npm->cm.response]); + +verbose: + nss_portid_log_verbose(npm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_portid_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_portid_log.h new file mode 100644 index 000000000..54d904fe9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_portid_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_PORTID_LOG_H +#define __NSS_PORTID_LOG_H + +/* + * nss_portid.h + * NSS PORTID header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_portid_log_tx_msg + * Logs a portid message that is sent to the NSS firmware. + */ +void nss_portid_log_tx_msg(struct nss_portid_msg *ntm); + +/* + * nss_portid_log_rx_msg + * Logs a portid message that is received from the NSS firmware. + */ +void nss_portid_log_rx_msg(struct nss_portid_msg *ntm); + +#endif /* __NSS_PORTID_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_portid_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_portid_stats.c new file mode 100644 index 000000000..8b6086dba --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_portid_stats.c @@ -0,0 +1,153 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_portid_stats.h" + +extern spinlock_t nss_portid_spinlock; +extern struct nss_portid_handle nss_portid_hdl[]; + +/* + * nss_portid_stats_str + * PortID statistics strings. + */ +struct nss_stats_info nss_portid_stats_str[NSS_PORTID_STATS_MAX] = { + {"rx_invalid_header" , NSS_STATS_TYPE_EXCEPTION} +}; + +uint64_t nss_portid_stats[NSS_PORTID_STATS_MAX]; + +/* + * nss_portid_stats_read() + * Read PortID stats. + */ +static ssize_t nss_portid_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + /* + * Max output lines = #stats + few output lines for banner printing + + * Number of Extra outputlines for future reference to add new stats. + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_PORTID_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "portid", NSS_STATS_SINGLE_CORE); + size_wr += nss_stats_fill_common_stats(NSS_PORTID_INTERFACE, NSS_STATS_SINGLE_INSTANCE, lbuf, size_wr, size_al, "portid"); + + /* + * PortID node stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_PORTID_STATS_MAX); i++) { + stats_shadow[i] = nss_portid_stats[i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + + size_wr += nss_stats_print("portid", NULL, NSS_STATS_SINGLE_INSTANCE + , nss_portid_stats_str + , stats_shadow + , NSS_PORTID_STATS_MAX + , lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_portid_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(portid) + +/* + * nss_portid_stats_dentry_create() + * Create portid node statistics debug entry. + */ +void nss_portid_stats_dentry_create(void) +{ + nss_stats_create_dentry("portid", &nss_portid_stats_ops); +} + +/* + * nss_portid_stats_sync() + * Update portid node stats. + */ +void nss_portid_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_portid_stats_sync_msg *npsm) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_portid_handle *hdl; + int j; + + if (npsm->port_id == NSS_PORTID_MAX_SWITCH_PORT) { + /* + * Update PORTID base node stats. + */ + spin_lock_bh(&nss_top->stats_lock); + nss_top->stats_node[NSS_PORTID_INTERFACE][NSS_STATS_NODE_RX_PKTS] += npsm->node_stats.rx_packets; + nss_top->stats_node[NSS_PORTID_INTERFACE][NSS_STATS_NODE_RX_BYTES] += npsm->node_stats.rx_bytes; + nss_top->stats_node[NSS_PORTID_INTERFACE][NSS_STATS_NODE_TX_PKTS] += npsm->node_stats.tx_packets; + nss_top->stats_node[NSS_PORTID_INTERFACE][NSS_STATS_NODE_TX_BYTES] += npsm->node_stats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_top->stats_node[NSS_PORTID_INTERFACE][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + j] += npsm->node_stats.rx_dropped[j]; + } + + nss_portid_stats[NSS_PORTID_STATS_RX_INVALID_HEADER] += npsm->rx_invalid_header; + spin_unlock_bh(&nss_top->stats_lock); + return; + } + + if (npsm->port_id >= NSS_PORTID_MAX_SWITCH_PORT) { + nss_warning("port_id %d exceeds NSS_PORTID_MAX_SWITCH_PORT\n", npsm->port_id); + return; + } + + /* + * Update PORTID interface stats. + */ + spin_lock_bh(&nss_portid_spinlock); + hdl = &nss_portid_hdl[npsm->port_id]; + if (hdl->if_num == 0) { + nss_warning("%px: nss_portid recv'd stats with unconfigured port %d", nss_ctx, npsm->port_id); + spin_unlock_bh(&nss_portid_spinlock); + return; + } + hdl->stats.rx_packets += npsm->node_stats.rx_packets; + hdl->stats.rx_bytes += npsm->node_stats.rx_bytes; + hdl->stats.rx_dropped += nss_cmn_rx_dropped_sum(&npsm->node_stats); + hdl->stats.tx_packets += npsm->node_stats.tx_packets; + hdl->stats.tx_bytes += npsm->node_stats.tx_bytes; + spin_unlock_bh(&nss_portid_spinlock); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_portid_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_portid_stats.h new file mode 100644 index 000000000..b1a1ee5b7 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_portid_stats.h @@ -0,0 +1,39 @@ +/* + ****************************************************************************** + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_PORTID_STATS_H +#define __NSS_PORTID_STATS_H + +/* + * PortID statistics + */ +enum nss_portid_stats_types { + NSS_PORTID_STATS_RX_INVALID_HEADER, + NSS_PORTID_STATS_MAX, +}; + +struct nss_portid_handle { + uint32_t if_num; /**< Interface number */ + struct rtnl_link_stats64 stats; /**< statistics counters */ +}; + +/* + * PortID statistics APIs + */ +extern void nss_portid_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_portid_stats_sync_msg *npsm); +extern void nss_portid_stats_dentry_create(void); + +#endif /* __NSS_PORTID_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe.c b/feeds/ipq807x/qca-nss-drv/src/nss_ppe.c new file mode 100644 index 000000000..46ce217b6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe.c @@ -0,0 +1,374 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2018, 2020-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_ppe.h" +#include "nss_ppe_stats.h" +#include "nss_ppe_strings.h" + +DEFINE_SPINLOCK(nss_ppe_stats_lock); + +struct nss_ppe_stats_debug nss_ppe_debug_stats; +struct nss_ppe_pvt ppe_pvt; + +/* + * nss_ppe_verify_ifnum() + * Verify PPE interface number. + */ +static inline bool nss_ppe_verify_ifnum(int if_num) +{ + return nss_is_dynamic_interface(if_num) || (if_num == NSS_PPE_INTERFACE); +} + +/* + * nss_ppe_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_ppe_callback(void *app_data, struct nss_ppe_msg *npm) +{ + nss_ppe_msg_callback_t callback = (nss_ppe_msg_callback_t)ppe_pvt.cb; + void *data = ppe_pvt.app_data; + + ppe_pvt.response = NSS_TX_SUCCESS; + ppe_pvt.cb = NULL; + ppe_pvt.app_data = NULL; + + if (npm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("ppe error response %d\n", npm->cm.response); + ppe_pvt.response = npm->cm.response; + } + + if (callback) { + callback(data, npm); + } + complete(&ppe_pvt.complete); +} + +/* + * nss_ppe_tx_msg() + * Transmit a ppe message to NSSFW + */ +nss_tx_status_t nss_ppe_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_ppe_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace messages. + */ + nss_ppe_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (ncm->type >= NSS_PPE_MSG_MAX) { + nss_warning("%px: message type out of range: %d\n", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + if (!nss_ppe_verify_ifnum(ncm->interface)) { + nss_warning("%px: invalid interface %d\n", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_ppe_tx_msg_sync() + * Transmit a ppe message to NSS firmware synchronously. + */ +nss_tx_status_t nss_ppe_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_ppe_msg *npm) +{ + nss_tx_status_t status; + int ret = 0; + + down(&ppe_pvt.sem); + ppe_pvt.cb = (void *)npm->cm.cb; + ppe_pvt.app_data = (void *)npm->cm.app_data; + + npm->cm.cb = (nss_ptr_t)nss_ppe_callback; + npm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_ppe_tx_msg(nss_ctx, npm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: ppe_tx_msg failed\n", nss_ctx); + up(&ppe_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&ppe_pvt.complete, msecs_to_jiffies(NSS_PPE_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: ppe msg tx failed due to timeout\n", nss_ctx); + ppe_pvt.response = NSS_TX_FAILURE; + } + + status = ppe_pvt.response; + up(&ppe_pvt.sem); + return status; +} + +/* + * nss_ppe_get_context() + * Get NSS context instance for ppe + */ +struct nss_ctx_instance *nss_ppe_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.ppe_handler_id]; +} + +/* + * nss_ppe_msg_init() + * Initialize nss_ppe_msg. + */ +void nss_ppe_msg_init(struct nss_ppe_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} + +/* + * nss_ppe_tx_ipsec_config_msg + * API to send inline IPsec port configure message to NSS FW + */ +nss_tx_status_t nss_ppe_tx_ipsec_config_msg(uint32_t nss_ifnum, uint32_t vsi_num, uint16_t mtu, + __attribute__((unused))uint16_t mru) +{ + struct nss_ctx_instance *nss_ctx = nss_ppe_get_context(); + struct nss_ppe_msg npm = {0}; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (vsi_num >= NSS_PPE_VSI_NUM_MAX) { + nss_warning("Invalid vsi number:%u\n", vsi_num); + return NSS_TX_FAILURE; + } + + nss_ppe_msg_init(&npm, NSS_PPE_INTERFACE, NSS_PPE_MSG_IPSEC_PORT_CONFIG, + sizeof(struct nss_ppe_ipsec_port_config_msg), NULL, NULL); + + npm.msg.ipsec_config.nss_ifnum = nss_ifnum; + npm.msg.ipsec_config.vsi_num = vsi_num; + npm.msg.ipsec_config.mtu = mtu; + + return nss_ppe_tx_msg_sync(nss_ctx, &npm); +} + +/* + * nss_ppe_tx_ipsec_mtu_msg + * API to send IPsec port MTU change message to NSS FW + */ +nss_tx_status_t nss_ppe_tx_ipsec_mtu_msg(uint32_t nss_ifnum, uint16_t mtu, __attribute__((unused))uint16_t mru) +{ + struct nss_ctx_instance *nss_ctx = nss_ppe_get_context(); + struct nss_ppe_msg npm = {0}; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + nss_ppe_msg_init(&npm, NSS_PPE_INTERFACE, NSS_PPE_MSG_IPSEC_PORT_MTU_CHANGE, + sizeof(struct nss_ppe_ipsec_port_mtu_msg), NULL, NULL); + + npm.msg.ipsec_mtu.nss_ifnum = nss_ifnum; + npm.msg.ipsec_mtu.mtu = mtu; + + return nss_ppe_tx_msg_sync(nss_ctx, &npm); +} + +/* + * nss_ppe_tx_ipsec_add_intf_msg + * API to attach NSS interface to IPsec port + */ +nss_tx_status_t nss_ppe_tx_ipsec_add_intf_msg(uint32_t nss_ifnum) +{ + struct nss_ctx_instance *nss_ctx = nss_ppe_get_context(); + struct nss_ppe_msg npm = {0}; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + nss_ppe_msg_init(&npm, NSS_PPE_INTERFACE, NSS_PPE_MSG_IPSEC_ADD_INTF, + sizeof(struct nss_ppe_ipsec_add_intf_msg), NULL, NULL); + + npm.msg.ipsec_addif.nss_ifnum = nss_ifnum; + + return nss_ppe_tx_msg_sync(nss_ctx, &npm); +} + +/* + * nss_ppe_tx_ipsec_del_intf_msg + * API to detach NSS interface to IPsec port + */ +nss_tx_status_t nss_ppe_tx_ipsec_del_intf_msg(uint32_t nss_ifnum) +{ + struct nss_ctx_instance *nss_ctx = nss_ppe_get_context(); + struct nss_ppe_msg npm = {0}; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + nss_ppe_msg_init(&npm, NSS_PPE_INTERFACE, NSS_PPE_MSG_IPSEC_DEL_INTF, + sizeof(struct nss_ppe_ipsec_del_intf_msg), NULL, NULL); + + npm.msg.ipsec_delif.nss_ifnum = nss_ifnum; + + return nss_ppe_tx_msg_sync(nss_ctx, &npm); +} + +/* + * nss_ppe_handler() + * Handle NSS -> HLOS messages for ppe + */ +static void nss_ppe_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_ppe_msg *msg = (struct nss_ppe_msg *)ncm; + void *ctx; + + nss_ppe_msg_callback_t cb; + + nss_trace("nss_ctx: %px ppe msg: %px\n", nss_ctx, msg); + BUG_ON(!nss_ppe_verify_ifnum(ncm->interface)); + + /* + * Trace messages. + */ + nss_ppe_log_rx_msg(msg); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_PPE_MSG_MAX) { + nss_warning("%px: received invalid message %d for PPE interface\n", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_ppe_msg)) { + nss_warning("%px: Length of message is greater than required: %d\n", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + switch (msg->cm.type) { + case NSS_PPE_MSG_SYNC_STATS: + /* + * session debug stats embeded in session stats msg + */ + nss_ppe_stats_sync(nss_ctx, &msg->msg.stats, ncm->interface); + nss_ppe_stats_notify(nss_ctx, ncm->interface); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_ppe_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + cb(ctx, msg); +} + +/* + * nss_ppe_register_handler() + * debugfs stats msg handler received on static ppe interface + * + * TODO: Export API so that others can also read PPE stats. + */ +void nss_ppe_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_ppe_get_context(); + + nss_core_register_handler(nss_ctx, NSS_PPE_INTERFACE, nss_ppe_handler, NULL); + + if (nss_ppe_debug_stats.valid) { + nss_ppe_stats_dentry_create(); + nss_ppe_strings_dentry_create(); + } +} + +/* + * nss_ppe_free() + * Uninitialize PPE base + */ +void nss_ppe_free(void) +{ + /* + * Check if PPE base is already uninitialized. + */ + if (!ppe_pvt.ppe_base) { + return; + } + + /* + * Unmap PPE base address + */ + iounmap(ppe_pvt.ppe_base); + ppe_pvt.ppe_base = NULL; + + spin_lock_bh(&nss_ppe_stats_lock); + nss_ppe_debug_stats.valid = false; + nss_ppe_debug_stats.if_num = 0; + nss_ppe_debug_stats.if_index = 0; + spin_unlock_bh(&nss_ppe_stats_lock); +} + +/* + * nss_ppe_init() + * Initialize PPE base + */ +void nss_ppe_init(void) +{ + /* + * Check if PPE base is already initialized. + */ + if (ppe_pvt.ppe_base) { + return; + } + + /* + * Get the PPE base address + */ + ppe_pvt.ppe_base = ioremap_nocache(PPE_BASE_ADDR, PPE_REG_SIZE); + if (!ppe_pvt.ppe_base) { + nss_warning("DRV can't get PPE base address\n"); + return; + } + + spin_lock_bh(&nss_ppe_stats_lock); + nss_ppe_debug_stats.valid = true; + nss_ppe_debug_stats.if_num = 0; + nss_ppe_debug_stats.if_index = 0; + spin_unlock_bh(&nss_ppe_stats_lock); + + sema_init(&ppe_pvt.sem, 1); + init_completion(&ppe_pvt.complete); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe.h b/feeds/ipq807x/qca-nss-drv/src/nss_ppe.h new file mode 100644 index 000000000..d71021145 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe.h @@ -0,0 +1,423 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ppe.h + * NSS PPE header file + */ + +#include +#include "nss_tx_rx_common.h" + +#define PPE_BASE_ADDR 0x3a000000 +#define PPE_REG_SIZE 0x1000000 + +#define PPE_L3_DBG_WR_OFFSET 0x200c04 +#define PPE_L3_DBG_RD_OFFSET 0x200c0c +#define PPE_L3_DBG0_OFFSET 0x10001 +#define PPE_L3_DBG1_OFFSET 0x10002 +#define PPE_L3_DBG2_OFFSET 0x10003 +#define PPE_L3_DBG3_OFFSET 0x10004 +#define PPE_L3_DBG4_OFFSET 0x10005 +#define PPE_L3_DBG_PORT_OFFSET 0x11e80 + +#define PPE_PKT_CODE_WR_OFFSET 0x100080 +#define PPE_PKT_CODE_RD_OFFSET 0x100084 +#define PPE_PKT_CODE_DROP0_OFFSET 0xf000000 +#define PPE_PKT_CODE_DROP1_OFFSET 0x10000000 +#define PPE_PKT_CODE_CPU_OFFSET 0x40000000 + +#define PPE_PKT_CODE_DROP0_GET(x) (((x) & 0xe0000000) >> 29) +#define PPE_PKT_CODE_DROP1_GET(x) (((x) & 0x7) << 3) +#define PPE_PKT_CODE_DROP_GET(d0, d1) (PPE_PKT_CODE_DROP0_GET(d0) | PPE_PKT_CODE_DROP1_GET(d1)) + +#define PPE_PKT_CODE_CPU_GET(x) (((x) >> 3) & 0xff) + +#define PPE_IPE_PC_REG 0x100000 + +/* + * NSS_SYS_REG_DROP_CPU_CNT_TBL + * Address map and access APIs for DROP_CPU_CNT table. + */ +#define PPE_DROP_CPU_CNT_TBL_OFFSET 0x60000 +#define PPE_DROP_CPU_CNT_TBL_ENTRY_SIZE 0x10 +#define PPE_DROP_CPU_CNT_TBL_BASE_OFFSET (PPE_IPE_PC_REG + PPE_DROP_CPU_CNT_TBL_OFFSET) +#define PPE_CPU_CODE_MAX_NUM 256 + +/* + * CPU code offset + */ +#define PPE_CPU_CODE_OFFSET(n) (PPE_DROP_CPU_CNT_TBL_BASE_OFFSET + ((n) * PPE_DROP_CPU_CNT_TBL_ENTRY_SIZE)) + +/* + * DROP code offset + */ +#define PPE_DROP_CODE_IDX(code, src_port) (PPE_CPU_CODE_MAX_NUM + (8 * (code)) + (src_port)) +#define PPE_DROP_CODE_OFFSET(code, src_port) (PPE_DROP_CPU_CNT_TBL_BASE_OFFSET + ((PPE_DROP_CODE_IDX(code, src_port)) * PPE_DROP_CPU_CNT_TBL_ENTRY_SIZE)) + +#define NSS_PPE_TX_TIMEOUT 1000 /* 1 Second */ + +/* + * Maximum number of VSI + */ +#define NSS_PPE_VSI_NUM_MAX 32 + +/* + * ppe nss debug stats lock + */ +extern spinlock_t nss_ppe_stats_lock; + +/* + * Private data structure + */ +struct nss_ppe_pvt { + void * __iomem ppe_base; + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +}; + +/* + * Data structure to store to PPE private context + */ +extern struct nss_ppe_pvt ppe_pvt; + +/** + * nss_ppe_message_types + * Message types for Packet Processing Engine (PPE) requests and responses. + * + * Note: PPE messages are added as short term approach, expect all + * messages below to be deprecated for more integrated approach. + */ +enum nss_ppe_message_types { + NSS_PPE_MSG_SYNC_STATS, + NSS_PPE_MSG_IPSEC_PORT_CONFIG, + NSS_PPE_MSG_IPSEC_PORT_MTU_CHANGE, + NSS_PPE_MSG_IPSEC_ADD_INTF, + NSS_PPE_MSG_IPSEC_DEL_INTF, + NSS_PPE_MSG_MAX, +}; + +/** + * nss_ppe_msg_error_type + * PPE error types. + */ +enum nss_ppe_msg_error_type { + PPE_MSG_ERROR_OK, + PPE_MSG_ERROR_UNKNOWN_TYPE, + PPE_MSG_ERROR_PORT_CREATION_FAIL, + PPE_MSG_ERROR_INVALID_PORT_VSI, + PPE_MSG_ERROR_INVALID_L3_IF, + PPE_MSG_ERROR_IPSEC_PORT_CONFIG, + PPE_MSG_ERROR_IPSEC_INTF_TABLE_FULL, + PPE_MSG_ERROR_IPSEC_INTF_ATTACHED, + PPE_MSG_ERROR_IPSEC_INTF_UNATTACHED, + PPE_ERROR_MAX +}; + +/** + * nss_ppe_stats_sc + * Message structure for per service code stats. + */ +struct nss_ppe_stats_sc { + uint32_t nss_ppe_sc_cb_unregister; /* Per service-code counter for callback not registered */ + uint32_t nss_ppe_sc_cb_success; /* Per service-code coutner for successful callback */ + uint32_t nss_ppe_sc_cb_failure; /* Per service-code counter for failure callback */ +}; + +/** + * nss_ppe_stats + * Message structure for ppe general stats + */ +struct nss_ppe_stats { + uint32_t nss_ppe_v4_l3_flows; /**< Number of IPv4 routed flows. */ + uint32_t nss_ppe_v4_l2_flows; /**< Number of IPv4 bridge flows. */ + uint32_t nss_ppe_v4_create_req; /**< Number of IPv4 create requests. */ + uint32_t nss_ppe_v4_create_fail; /**< Number of IPv4 create failures. */ + uint32_t nss_ppe_v4_destroy_req; /**< Number of IPv4 delete requests. */ + uint32_t nss_ppe_v4_destroy_fail; /**< Number of IPv4 delete failures. */ + uint32_t nss_ppe_v4_mc_create_req; /**< Number of IPv4 MC create requests. */ + uint32_t nss_ppe_v4_mc_create_fail; /**< Number of IPv4 MC create failure. */ + uint32_t nss_ppe_v4_mc_update_req; /**< Number of IPv4 MC update requests. */ + uint32_t nss_ppe_v4_mc_update_fail; /**< Number of IPv4 MC update failure. */ + uint32_t nss_ppe_v4_mc_destroy_req; /**< Number of IPv4 MC delete requests. */ + uint32_t nss_ppe_v4_mc_destroy_fail; /**< Number of IPv4 MC delete failure. */ + uint32_t nss_ppe_v4_unknown_interface; /**< Number of IPv4 create failures */ + + uint32_t nss_ppe_v6_l3_flows; /**< Number of IPv6 routed flows. */ + uint32_t nss_ppe_v6_l2_flows; /**< Number of IPv6 bridge flows. */ + uint32_t nss_ppe_v6_create_req; /**< Number of IPv6 create requests. */ + uint32_t nss_ppe_v6_create_fail; /**< Number of IPv6 create failures. */ + uint32_t nss_ppe_v6_destroy_req; /**< Number of IPv6 delete requests. */ + uint32_t nss_ppe_v6_destroy_fail; /**< Number of IPv6 delete failures. */ + uint32_t nss_ppe_v6_mc_create_req; /**< Number of IPv6 MC create requests. */ + uint32_t nss_ppe_v6_mc_create_fail; /**< Number of IPv6 MC create failure. */ + uint32_t nss_ppe_v6_mc_update_req; /**< Number of IPv6 MC update requests. */ + uint32_t nss_ppe_v6_mc_update_fail; /**< Number of IPv6 MC update failure. */ + uint32_t nss_ppe_v6_mc_destroy_req; /**< Number of IPv6 MC delete requests. */ + uint32_t nss_ppe_v6_mc_destroy_fail; /**< Number of IPv6 MC delete failure. */ + uint32_t nss_ppe_v6_unknown_interface; /**< Number of IPv6 create failures */ + + uint32_t nss_ppe_fail_vp_full; + /**< Request failed because the virtual port table is full */ + uint32_t nss_ppe_fail_nh_full; + /**< Request failed because the next hop table is full. */ + uint32_t nss_ppe_fail_flow_full; + /**< Request failed because the flow table is full. */ + uint32_t nss_ppe_fail_host_full; + /**< Request failed because the host table is full. */ + uint32_t nss_ppe_fail_pubip_full; + /**< Request failed because the public IP table is full. */ + uint32_t nss_ppe_fail_port_setup; + /**< Request failed because the PPE port is not setup. */ + uint32_t nss_ppe_fail_rw_fifo_full; + /**< Request failed because the read/write FIFO is full. */ + uint32_t nss_ppe_fail_flow_command; + /**< Request failed because the PPE flow command failed. */ + uint32_t nss_ppe_fail_unknown_proto; + /**< Request failed because of an unknown protocol. */ + uint32_t nss_ppe_fail_ppe_unresponsive; + /**< Request failed because the PPE is not responding. */ + uint32_t nss_ppe_ce_opaque_invalid; + /**< Request failed because of invalid opaque in connection entry. */ + uint32_t nss_ppe_fail_fqg_full; + /**< Request failed because the flow QoS group is full. */ +}; + + +/** + * nss_ppe_sync_stats_msg + * Message information for PPE synchronization statistics. + */ +struct nss_ppe_sync_stats_msg { + struct nss_ppe_stats stats; /**< General stats */ + struct nss_ppe_stats_sc sc_stats[NSS_PPE_SC_MAX]; + /**< Per service-code stats */ +}; + +/** + * nss_ppe_ipsec_port_config_msg + * Message structure for inline IPsec port configuration. + */ +struct nss_ppe_ipsec_port_config_msg { + uint32_t nss_ifnum; /**< NSS interface number corresponding to inline IPsec port. */ + uint16_t mtu; /**< MTU value for inline IPsec port. */ + uint8_t vsi_num; /**< Default port VSI for inline IPsec port. */ +}; + +/** + * nss_ppe_ipsec_port_mtu_msg + * Message structure for inline IPsec port MTU change. + */ +struct nss_ppe_ipsec_port_mtu_msg { + uint32_t nss_ifnum; /**< NSS interface number corresponding to inline IPsec port. */ + uint16_t mtu; /**< MTU value for inline IPsec port. */ +}; + +/** + * nss_ppe_ipsec_add_intf_msg + * Message structure for adding dynamic IPsec/DTLS interface to inline IPsec port. + */ +struct nss_ppe_ipsec_add_intf_msg { + uint32_t nss_ifnum; /**< Dynamic IPsec/DTLS interface number. */ +}; + +/** + * nss_ppe_ipsec_del_intf_msg + * Message structure for deleting dynamic IPsec/DTLS interface to inline IPsec port. + */ +struct nss_ppe_ipsec_del_intf_msg { + uint32_t nss_ifnum; /**< Dynamic IPsec/DTLS interface number. */ +}; + +/** + * nss_ppe_msg + * Data for sending and receiving PPE host-to-NSS messages. + */ +struct nss_ppe_msg { + struct nss_cmn_msg cm; /**< Common message header. */ + + /** + * Payload of a PPE host-to-NSS message. + */ + union { + struct nss_ppe_sync_stats_msg stats; + /**< Synchronization statistics. */ + struct nss_ppe_ipsec_port_config_msg ipsec_config; + /**< PPE inline IPsec port configuration message. */ + struct nss_ppe_ipsec_port_mtu_msg ipsec_mtu; + /**< Inline IPsec port MTU change message. */ + struct nss_ppe_ipsec_add_intf_msg ipsec_addif; + /**< Inline IPsec NSS interface attach message. */ + struct nss_ppe_ipsec_del_intf_msg ipsec_delif; + /**< Inline IPsec NSS interface detach message. */ + } msg; /**< Message payload. */ +}; + +/** + * Callback function for receiving PPE messages. + * + * @datatypes + * nss_ppe_msg + * + * @param[in] app_data Pointer to the application context of the message. + * @param[in] msg Pointer to the message data. + */ +typedef void (*nss_ppe_msg_callback_t)(void *app_data, struct nss_ppe_msg *msg); + +/** + * nss_ppe_tx_msg + * Sends PPE messages to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_ppe_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_ppe_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_ppe_msg *msg); + +/** + * nss_ppe_tx_msg_sync + * Sends PPE messages synchronously to the NSS. + * + * @datatypes + * nss_ctx_instance \n + * nss_ppe_msg + * + * @param[in] nss_ctx Pointer to the NSS context. + * @param[in,out] msg Pointer to the message data. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_ppe_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_ppe_msg *msg); + +/** + * nss_ppe_msg_init + * Initializes a PPE message. + * + * @datatypes + * nss_ppe_msg + * + * @param[in,out] ncm Pointer to the message. + * @param[in] if_num Interface number + * @param[in] type Type of message. + * @param[in] len Size of the payload. + * @param[in] cb Callback function for the message. + * @param[in] app_data Pointer to the application context of the message. + * + * @return + * None. + */ +void nss_ppe_msg_init(struct nss_ppe_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data); + +/** + * nss_ppe_get_context + * Gets the PPE context used in nss_ppe_tx. + * + * @return + * Pointer to the NSS core context. + */ +struct nss_ctx_instance *nss_ppe_get_context(void); + +/** + * nss_ppe_tx_ipsec_config_msg + * Sends the PPE a message to configure inline IPsec port. + * + * @param[in] if_num Static IPsec interface number. + * @param[in] vsi_num Default VSI number associated with inline IPsec port. + * @param[in] mtu Default MTU of static inline IPsec port. + * @param[in] mru Default MRU of static inline IPsec port. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_ppe_tx_ipsec_config_msg(uint32_t nss_ifnum, uint32_t vsi_num, uint16_t mtu, uint16_t mru); + +/** + * nss_ppe_tx_ipsec_mtu_msg + * Sends the PPE a message to configure MTU value on IPsec port. + * + * @param[in] nss_ifnum Static IPsec interface number. + * @param[in] mtu MTU of static IPsec interface. + * @param[in] mru MRU of static IPsec interface. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_ppe_tx_ipsec_mtu_msg(uint32_t nss_ifnum, uint16_t mtu, uint16_t mru); + +/** + * nss_ppe_tx_ipsec_add_intf_msg + * Sends the PPE a message to attach a dynamic interface number to IPsec port. + * + * @param[in] if_num Dynamic IPsec/DTLS interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_ppe_tx_ipsec_add_intf_msg(uint32_t nss_ifnum); + +/** + * nss_ppe_tx_ipsec_del_intf_msg + * Sends the PPE a message to detach a dynamic interface number to IPsec port. + * + * @param[in] if_num Dynamic IPsec/DTLS interface number. + * + * @return + * Status of the Tx operation. + */ +nss_tx_status_t nss_ppe_tx_ipsec_del_intf_msg(uint32_t nss_ifnum); + +/* + * nss_ppe_reg_read() + */ +static inline void nss_ppe_reg_read(u32 reg, u32 *val) +{ + *val = readl((ppe_pvt.ppe_base + reg)); +} + +/* + * nss_ppe_reg_write() + */ +static inline void nss_ppe_reg_write(u32 reg, u32 val) +{ + writel(val, (ppe_pvt.ppe_base + reg)); +} + +/* + * nss_ppe_log.h + * NSS PPE Log Header File + */ + +/* + * nss_ppe_log_tx_msg + * Logs a ppe message that is sent to the NSS firmware. + */ +void nss_ppe_log_tx_msg(struct nss_ppe_msg *npm); + +/* + * nss_ppe_log_rx_msg + * Logs a ppe message that is received from the NSS firmware. + */ +void nss_ppe_log_rx_msg(struct nss_ppe_msg *npm); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_log.c new file mode 100644 index 000000000..a6517322b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_log.c @@ -0,0 +1,189 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ppe_log.c + * NSS PPE logger file. + */ + +#include "nss_core.h" +#include "nss_ppe.h" + +/* + * nss_ppe_log_message_types_str + * PPE message strings + */ +static int8_t *nss_ppe_log_message_types_str[NSS_PPE_MSG_MAX] __maybe_unused = { + "PPE Stats", + "PPE IPSEC Port Config", + "PPE IPSEC Port MTU Change", + "PPE IPSEC Add Interface", + "PPE IPSEC Del Interface", +}; + +/* + * nss_ppe_log_error_response_types_str + * Strings for error types for PPE messages + */ +static int8_t *nss_ppe_log_error_response_types_str[PPE_ERROR_MAX] __maybe_unused = { + "PPE No Error", + "PPE Uknown Type", + "PPE Port Creation Failure", + "PPE Invalid Port VSI", + "PPE Invalid L3 Interface", + "PPE IPSEC Port Config Error", + "PPE IPSEC Interface Table Full", + "PPE IPSEC Interface Attached", + "PPE IPSEC Interface Unattached", +}; + +/* + * nss_ppe_log_port_config_msg() + * Log NSS PPE port config message. + */ +static void nss_ppe_log_port_config_msg(struct nss_ppe_msg *npm) +{ + struct nss_ppe_ipsec_port_config_msg *npcm __maybe_unused = &npm->msg.ipsec_config; + nss_trace("%px: NSS PPE Port Configure Message:\n" + "PPE NSS Interface Number: %d\n" + "PPE MTU: %d\n" + "PPE VSI Number: %d\n", + npcm, npcm->nss_ifnum, + npcm->mtu, npcm->vsi_num); +} + +/* + * nss_ppe_log_port_mtu_msg() + * Log NSS PPE port mtu message. + */ +static void nss_ppe_log_port_mtu_msg(struct nss_ppe_msg *npm) +{ + struct nss_ppe_ipsec_port_mtu_msg *npmm __maybe_unused = &npm->msg.ipsec_mtu; + nss_trace("%px: NSS PPE Port Configure Message:\n" + "PPE NSS Interface Number: %d\n" + "PPE MTU: %d\n", + npmm, npmm->nss_ifnum, + npmm->mtu); +} + +/* + * nss_ppe_log_add_intf_msg() + * Log NSS PPE IPSEC Add Interface Message. + */ +static void nss_ppe_log_add_intf_msg(struct nss_ppe_msg *npm) +{ + struct nss_ppe_ipsec_add_intf_msg *npam __maybe_unused = &npm->msg.ipsec_addif; + nss_trace("%px: NSS PPE IPSEC add Interface Message:\n" + "PPE NSS Interface Number: %d\n", + npam, npam->nss_ifnum); +} + +/* + * nss_ppe_log_del_intf_msg() + * Log NSS PPE IPSEC Delete Interface Message. + */ +static void nss_ppe_log_del_intf_msg(struct nss_ppe_msg *npm) +{ + struct nss_ppe_ipsec_del_intf_msg *npdm __maybe_unused = &npm->msg.ipsec_delif; + nss_trace("%px: NSS PPE IPSEC Delete Interface Message:\n" + "PPE NSS Interface Number: %d\n", + npdm, npdm->nss_ifnum); +} + +/* + * nss_ppe_log_verbose() + * Log message contents. + */ +static void nss_ppe_log_verbose(struct nss_ppe_msg *npm) +{ + switch (npm->cm.type) { + case NSS_PPE_MSG_IPSEC_PORT_CONFIG: + nss_ppe_log_port_config_msg(npm); + break; + + case NSS_PPE_MSG_IPSEC_PORT_MTU_CHANGE: + nss_ppe_log_port_mtu_msg(npm); + break; + + case NSS_PPE_MSG_IPSEC_ADD_INTF: + nss_ppe_log_add_intf_msg(npm); + break; + + case NSS_PPE_MSG_IPSEC_DEL_INTF: + nss_ppe_log_del_intf_msg(npm); + break; + + case NSS_PPE_MSG_SYNC_STATS: + /* + * No log for valid stats message. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", npm); + break; + } +} + +/* + * nss_ppe_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_ppe_log_tx_msg(struct nss_ppe_msg *npm) +{ + if (npm->cm.type >= NSS_PPE_MSG_MAX) { + nss_warning("%px: Invalid message type\n", npm); + return; + } + + nss_info("%px: type[%d]:%s\n", npm, npm->cm.type, nss_ppe_log_message_types_str[npm->cm.type]); + nss_ppe_log_verbose(npm); +} + +/* + * nss_ppe_log_rx_msg() + * Log messages received from FW. + */ +void nss_ppe_log_rx_msg(struct nss_ppe_msg *npm) +{ + if (npm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", npm); + return; + } + + if (npm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (npm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", npm, npm->cm.type, + nss_ppe_log_message_types_str[npm->cm.type], + npm->cm.response, nss_cmn_response_str[npm->cm.response]); + goto verbose; + } + + if (npm->cm.error >= PPE_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + npm, npm->cm.type, nss_ppe_log_message_types_str[npm->cm.type], + npm->cm.response, nss_cmn_response_str[npm->cm.response], + npm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + npm, npm->cm.type, nss_ppe_log_message_types_str[npm->cm.type], + npm->cm.response, nss_cmn_response_str[npm->cm.response], + npm->cm.error, nss_ppe_log_error_response_types_str[npm->cm.error]); + +verbose: + nss_ppe_log_verbose(npm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_stats.c new file mode 100644 index 000000000..e544856ed --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_stats.c @@ -0,0 +1,925 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_ppe.h" +#include "nss_ppe_stats.h" +#include "nss_ppe_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_ppe_stats_notifier); + +static uint8_t ppe_cc_nonexception[NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_MAX] = { + NSS_PPE_STATS_CPU_CODE_EXP_FAKE_L2_PROT_ERR, + NSS_PPE_STATS_CPU_CODE_EXP_FAKE_MAC_HEADER_ERR, + NSS_PPE_STATS_CPU_CODE_EXP_BITMAP_MAX, + NSS_PPE_STATS_CPU_CODE_L2_EXP_MRU_FAIL, + NSS_PPE_STATS_CPU_CODE_L2_EXP_MTU_FAIL, + NSS_PPE_STATS_CPU_CODE_L3_EXP_IP_PREFIX_BC, + NSS_PPE_STATS_CPU_CODE_L3_EXP_MTU_FAIL, + NSS_PPE_STATS_CPU_CODE_L3_EXP_MRU_FAIL, + NSS_PPE_STATS_CPU_CODE_L3_EXP_ICMP_RDT, + NSS_PPE_STATS_CPU_CODE_L3_EXP_IP_RT_TTL1_TO_ME, + NSS_PPE_STATS_CPU_CODE_L3_EXP_IP_RT_TTL_ZERO, + NSS_PPE_STATS_CPU_CODE_L3_FLOW_SERVICE_CODE_LOOP, + NSS_PPE_STATS_CPU_CODE_L3_FLOW_DE_ACCELERATE, + NSS_PPE_STATS_CPU_CODE_L3_EXP_FLOW_SRC_IF_CHK_FAIL, + NSS_PPE_STATS_CPU_CODE_L3_FLOW_SYNC_TOGGLE_MISMATCH, + NSS_PPE_STATS_CPU_CODE_L3_EXP_MTU_DF_FAIL, + NSS_PPE_STATS_CPU_CODE_L3_EXP_PPPOE_MULTICAST, + NSS_PPE_STATS_CPU_CODE_MGMT_OFFSET, + NSS_PPE_STATS_CPU_CODE_MGMT_EAPOL, + NSS_PPE_STATS_CPU_CODE_MGMT_PPPOE_DIS, + NSS_PPE_STATS_CPU_CODE_MGMT_IGMP, + NSS_PPE_STATS_CPU_CODE_MGMT_ARP_REQ, + NSS_PPE_STATS_CPU_CODE_MGMT_ARP_REP, + NSS_PPE_STATS_CPU_CODE_MGMT_DHCPv4, + NSS_PPE_STATS_CPU_CODE_MGMT_MLD, + NSS_PPE_STATS_CPU_CODE_MGMT_NS, + NSS_PPE_STATS_CPU_CODE_MGMT_NA, + NSS_PPE_STATS_CPU_CODE_MGMT_DHCPv6, + NSS_PPE_STATS_CPU_CODE_PTP_OFFSET, + NSS_PPE_STATS_CPU_CODE_PTP_SYNC, + NSS_PPE_STATS_CPU_CODE_PTP_FOLLOW_UP, + NSS_PPE_STATS_CPU_CODE_PTP_DELAY_REQ, + NSS_PPE_STATS_CPU_CODE_PTP_DELAY_RESP, + NSS_PPE_STATS_CPU_CODE_PTP_PDELAY_REQ, + NSS_PPE_STATS_CPU_CODE_PTP_PDELAY_RESP, + NSS_PPE_STATS_CPU_CODE_PTP_PDELAY_RESP_FOLLOW_UP, + NSS_PPE_STATS_CPU_CODE_PTP_ANNOUNCE, + NSS_PPE_STATS_CPU_CODE_PTP_MANAGEMENT, + NSS_PPE_STATS_CPU_CODE_PTP_SIGNALING, + NSS_PPE_STATS_CPU_CODE_PTP_PKT_RSV_MSG, + NSS_PPE_STATS_CPU_CODE_IPV4_SG_UNKNOWN, + NSS_PPE_STATS_CPU_CODE_IPV6_SG_UNKNOWN, + NSS_PPE_STATS_CPU_CODE_ARP_SG_UNKNOWN, + NSS_PPE_STATS_CPU_CODE_ND_SG_UNKNOWN, + NSS_PPE_STATS_CPU_CODE_IPV4_SG_VIO, + NSS_PPE_STATS_CPU_CODE_IPV6_SG_VIO, + NSS_PPE_STATS_CPU_CODE_ARP_SG_VIO, + NSS_PPE_STATS_CPU_CODE_ND_SG_VIO, + NSS_PPE_STATS_CPU_CODE_L3_ROUTING_IP_TO_ME, + NSS_PPE_STATS_CPU_CODE_L3_FLOW_SNAT_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_FLOW_DNAT_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_FLOW_RT_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_FLOW_BR_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_MC_BRIDGE_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_PREHEAD_RT_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_PREHEAD_SNAPT_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_PREHEAD_DNAPT_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_PREHEAD_SNAT_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_PREHEAD_DNAT_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_NO_ROUTE_PREHEAD_NAT_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_NO_ROUTE_PREHEAD_NAT_ERROR, + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_NO_ROUTE_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_NO_ROUTE_NH_INVALID_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_NO_ROUTE_PREHEAD_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_BRIDGE_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_FLOW_ACTION, + NSS_PPE_STATS_CPU_CODE_L3_FLOW_MISS_ACTION, + NSS_PPE_STATS_CPU_CODE_L2_NEW_MAC_ADDRESS, + NSS_PPE_STATS_CPU_CODE_L2_HASH_COLLISION, + NSS_PPE_STATS_CPU_CODE_L2_STATION_MOVE, + NSS_PPE_STATS_CPU_CODE_L2_LEARN_LIMIT, + NSS_PPE_STATS_CPU_CODE_L2_SA_LOOKUP_ACTION, + NSS_PPE_STATS_CPU_CODE_L2_DA_LOOKUP_ACTION, + NSS_PPE_STATS_CPU_CODE_APP_CTRL_ACTION, + NSS_PPE_STATS_CPU_CODE_IN_VLAN_FILTER_ACTION, + NSS_PPE_STATS_CPU_CODE_IN_VLAN_XLT_MISS, + NSS_PPE_STATS_CPU_CODE_EG_VLAN_FILTER_DROP, + NSS_PPE_STATS_CPU_CODE_ACL_PRE_ACTION, + NSS_PPE_STATS_CPU_CODE_ACL_POST_ACTION, + NSS_PPE_STATS_CPU_CODE_SERVICE_CODE_ACTION, +}; + +/* + * nss_ppe_stats_str_sc_type + * PPE service-code stats type + */ +static int8_t *nss_ppe_stats_str_sc_type[NSS_PPE_SC_MAX] = { + "SC_NONE ", + "SC_BYPASS_ALL ", + "SC_ADV_QOS_BRIDGED", + "SC_BR_QOS ", + "SC_BNC_0 ", + "SC_BNC_CMPL_0 ", + "SC_ADV_QOS_ROUTED ", + "SC_IPSEC_PPE2EIP ", + "SC_IPSEC_EIP2PPE ", + "SC_PTP ", + "SC_VLAN_FILTER ", + "SC_L3_EXCEPT ", +}; + +/* + * nss_ppe_stats_sync + * PPE connection sync statistics from NSS + */ +void nss_ppe_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_ppe_sync_stats_msg *stats_msg, uint16_t if_num) +{ + uint32_t sc; + spin_lock_bh(&nss_ppe_stats_lock); + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_L3_FLOWS] = stats_msg->stats.nss_ppe_v4_l3_flows; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_L2_FLOWS] = stats_msg->stats.nss_ppe_v4_l2_flows; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_CREATE_REQ] += stats_msg->stats.nss_ppe_v4_create_req; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_CREATE_FAIL] += stats_msg->stats.nss_ppe_v4_create_fail; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_DESTROY_REQ] += stats_msg->stats.nss_ppe_v4_destroy_req; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_DESTROY_FAIL] += stats_msg->stats.nss_ppe_v4_destroy_fail; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_MC_CREATE_REQ] += stats_msg->stats.nss_ppe_v4_mc_create_req; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_MC_CREATE_FAIL] += stats_msg->stats.nss_ppe_v4_mc_create_fail; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_MC_UPDATE_REQ] += stats_msg->stats.nss_ppe_v4_mc_update_req; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_MC_UPDATE_FAIL] += stats_msg->stats.nss_ppe_v4_mc_update_fail; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_MC_DESTROY_REQ] += stats_msg->stats.nss_ppe_v4_mc_destroy_req; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_MC_DESTROY_FAIL] += stats_msg->stats.nss_ppe_v4_mc_destroy_fail; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V4_UNKNOWN_INTERFACE] += stats_msg->stats.nss_ppe_v4_unknown_interface; + + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_L3_FLOWS] = stats_msg->stats.nss_ppe_v6_l3_flows; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_L2_FLOWS] = stats_msg->stats.nss_ppe_v6_l2_flows; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_CREATE_REQ] += stats_msg->stats.nss_ppe_v6_create_req; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_CREATE_FAIL] += stats_msg->stats.nss_ppe_v6_create_fail; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_DESTROY_REQ] += stats_msg->stats.nss_ppe_v6_destroy_req; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_DESTROY_FAIL] += stats_msg->stats.nss_ppe_v6_destroy_fail; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_MC_CREATE_REQ] += stats_msg->stats.nss_ppe_v6_mc_create_req; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_MC_CREATE_FAIL] += stats_msg->stats.nss_ppe_v6_mc_create_fail; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_MC_UPDATE_REQ] += stats_msg->stats.nss_ppe_v6_mc_update_req; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_MC_UPDATE_FAIL] += stats_msg->stats.nss_ppe_v6_mc_update_fail; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_MC_DESTROY_REQ] += stats_msg->stats.nss_ppe_v6_mc_destroy_req; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_MC_DESTROY_FAIL] += stats_msg->stats.nss_ppe_v6_mc_destroy_fail; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_V6_UNKNOWN_INTERFACE] += stats_msg->stats.nss_ppe_v6_unknown_interface; + + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_VP_FULL] += stats_msg->stats.nss_ppe_fail_vp_full; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_NH_FULL] += stats_msg->stats.nss_ppe_fail_nh_full; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_FLOW_FULL] += stats_msg->stats.nss_ppe_fail_flow_full; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_HOST_FULL] += stats_msg->stats.nss_ppe_fail_host_full; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_PUBIP_FULL] += stats_msg->stats.nss_ppe_fail_pubip_full; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_PORT_SETUP] += stats_msg->stats.nss_ppe_fail_port_setup; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_RW_FIFO_FULL] += stats_msg->stats.nss_ppe_fail_rw_fifo_full; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_FLOW_COMMAND] += stats_msg->stats.nss_ppe_fail_flow_command; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_UNKNOWN_PROTO] += stats_msg->stats.nss_ppe_fail_unknown_proto; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_PPE_UNRESPONSIVE] += stats_msg->stats.nss_ppe_fail_ppe_unresponsive; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_CE_OPAQUE_INVALID] += stats_msg->stats.nss_ppe_ce_opaque_invalid; + nss_ppe_debug_stats.conn_stats[NSS_PPE_STATS_FAIL_FQG_FULL] += stats_msg->stats.nss_ppe_fail_fqg_full; + + /* + * Update service-code stats. + */ + for (sc = 0; sc < NSS_PPE_SC_MAX; sc++) { + nss_ppe_debug_stats.sc_stats[sc].nss_ppe_sc_cb_unregister += stats_msg->sc_stats[sc].nss_ppe_sc_cb_unregister; + nss_ppe_debug_stats.sc_stats[sc].nss_ppe_sc_cb_success += stats_msg->sc_stats[sc].nss_ppe_sc_cb_success; + nss_ppe_debug_stats.sc_stats[sc].nss_ppe_sc_cb_failure += stats_msg->sc_stats[sc].nss_ppe_sc_cb_failure; + } + + spin_unlock_bh(&nss_ppe_stats_lock); +} + +/* + * nss_ppe_stats_conn_get() + * Get PPE connection statistics. + */ +static void nss_ppe_stats_conn_get(uint64_t *stats) +{ + if (!stats) { + nss_warning("No memory to copy ppe connection stats"); + return; + } + + /* + * Get flow stats + */ + spin_lock_bh(&nss_ppe_stats_lock); + memcpy(stats, nss_ppe_debug_stats.conn_stats, (sizeof(uint64_t) * NSS_PPE_STATS_CONN_MAX)); + spin_unlock_bh(&nss_ppe_stats_lock); +} + +/* + * nss_ppe_stats_sc_get() + * Get PPE service-code statistics. + */ +static void nss_ppe_stats_sc_get(struct nss_ppe_sc_stats_debug *sc_stats) +{ + if (!sc_stats) { + nss_warning("No memory to copy ppe service code stats"); + return; + } + + /* + * Get flow stats + */ + spin_lock_bh(&nss_ppe_stats_lock); + memcpy(sc_stats, nss_ppe_debug_stats.sc_stats, (sizeof(struct nss_ppe_sc_stats_debug) * NSS_PPE_SC_MAX)); + spin_unlock_bh(&nss_ppe_stats_lock); +} + +/* + * nss_ppe_stats_l3_get() + * Get PPE L3 debug statistics. + */ +static void nss_ppe_stats_l3_get(uint32_t *stats) +{ + if (!stats) { + nss_warning("No memory to copy ppe l3 dbg stats\n"); + return; + } + + spin_lock_bh(&nss_ppe_stats_lock); + nss_ppe_reg_write(PPE_L3_DBG_WR_OFFSET, PPE_L3_DBG0_OFFSET); + nss_ppe_reg_read(PPE_L3_DBG_RD_OFFSET, &stats[NSS_PPE_STATS_L3_DBG_0]); + + nss_ppe_reg_write(PPE_L3_DBG_WR_OFFSET, PPE_L3_DBG1_OFFSET); + nss_ppe_reg_read(PPE_L3_DBG_RD_OFFSET, &stats[NSS_PPE_STATS_L3_DBG_1]); + + nss_ppe_reg_write(PPE_L3_DBG_WR_OFFSET, PPE_L3_DBG2_OFFSET); + nss_ppe_reg_read(PPE_L3_DBG_RD_OFFSET, &stats[NSS_PPE_STATS_L3_DBG_2]); + + nss_ppe_reg_write(PPE_L3_DBG_WR_OFFSET, PPE_L3_DBG3_OFFSET); + nss_ppe_reg_read(PPE_L3_DBG_RD_OFFSET, &stats[NSS_PPE_STATS_L3_DBG_3]); + + nss_ppe_reg_write(PPE_L3_DBG_WR_OFFSET, PPE_L3_DBG4_OFFSET); + nss_ppe_reg_read(PPE_L3_DBG_RD_OFFSET, &stats[NSS_PPE_STATS_L3_DBG_4]); + + nss_ppe_reg_write(PPE_L3_DBG_WR_OFFSET, PPE_L3_DBG_PORT_OFFSET); + nss_ppe_reg_read(PPE_L3_DBG_RD_OFFSET, &stats[NSS_PPE_STATS_L3_DBG_PORT]); + spin_unlock_bh(&nss_ppe_stats_lock); +} + +/* + * nss_ppe_stats_code_get() + * Get PPE CPU and DROP code for last packet processed. + */ +static void nss_ppe_stats_code_get(uint32_t *stats) +{ + uint32_t drop_0, drop_1, cpu_code; + + nss_trace("%s(%d) Start\n", __func__, __LINE__); + if (!stats) { + nss_warning("No memory to copy ppe code\n"); + return; + } + + spin_lock_bh(&nss_ppe_stats_lock); + nss_ppe_reg_write(PPE_PKT_CODE_WR_OFFSET, PPE_PKT_CODE_DROP0_OFFSET); + nss_ppe_reg_read(PPE_PKT_CODE_RD_OFFSET, &drop_0); + + nss_ppe_reg_write(PPE_PKT_CODE_WR_OFFSET, PPE_PKT_CODE_DROP1_OFFSET); + nss_ppe_reg_read(PPE_PKT_CODE_RD_OFFSET, &drop_1); + + stats[NSS_PPE_STATS_CODE_DROP] = PPE_PKT_CODE_DROP_GET(drop_0, drop_1); + + nss_ppe_reg_write(PPE_PKT_CODE_WR_OFFSET, PPE_PKT_CODE_CPU_OFFSET); + nss_ppe_reg_read(PPE_PKT_CODE_RD_OFFSET, &cpu_code); + + stats[NSS_PPE_STATS_CODE_CPU] = PPE_PKT_CODE_CPU_GET(cpu_code); + + spin_unlock_bh(&nss_ppe_stats_lock); +} + +/* + * nss_ppe_port_drop_code_get() + * Get ppe per port drop code. + */ +static void nss_ppe_port_drop_code_get(uint32_t *stats, uint8_t port_id) +{ + uint8_t i; + nss_trace("%s(%d) Start\n", __func__, __LINE__); + if (!stats) { + nss_warning("No memory to copy ppe code\n"); + return; + } + + if (port_id > NSS_PPE_NUM_PHY_PORTS_MAX) { + nss_warning("Port id is out of range\n"); + return; + } + + spin_lock_bh(&nss_ppe_stats_lock); + + for (i = 0; i < NSS_PPE_STATS_DROP_CODE_MAX; i++) { + nss_ppe_reg_read(PPE_DROP_CODE_OFFSET(i, port_id), &stats[i]); + } + + spin_unlock_bh(&nss_ppe_stats_lock); +} + +/* + * nss_ppe_cpu_code_exception_get() + * Get ppe cpu code specific for flow exceptions. + */ +static void nss_ppe_cpu_code_exception_get(uint32_t *stats) +{ + uint8_t i; + nss_trace("%s(%d) Start\n", __func__, __LINE__); + if (!stats) { + nss_warning("No memory to copy ppe code\n"); + return; + } + + spin_lock_bh(&nss_ppe_stats_lock); + + for (i = 0; i < NSS_PPE_STATS_CPU_CODE_EXCEPTION_MAX ; i++) { + nss_ppe_reg_read(PPE_CPU_CODE_OFFSET(i), &stats[i]); + } + + spin_unlock_bh(&nss_ppe_stats_lock); +} + +/* + * nss_ppe_cpu_code_nonexception_get() + * Get ppe cpu code specific for flow exceptions. + */ +static void nss_ppe_cpu_code_nonexception_get(uint32_t *stats) +{ + uint8_t i; + nss_trace("%s(%d) Start\n", __func__, __LINE__); + if (!stats) { + nss_warning("No memory to copy ppe code\n"); + return; + } + + spin_lock_bh(&nss_ppe_stats_lock); + + for (i = 0; i < NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_MAX; i++) { + nss_ppe_reg_read(PPE_CPU_CODE_OFFSET(ppe_cc_nonexception[i]), &stats[i]); + } + + spin_unlock_bh(&nss_ppe_stats_lock); +} + +/* + * nss_ppe_conn_stats_read() + * Read ppe connection statistics + */ +static ssize_t nss_ppe_conn_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int i; + char *lbuf = NULL; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + uint64_t ppe_stats[NSS_PPE_STATS_CONN_MAX]; + uint32_t max_output_lines = NSS_PPE_STATS_CONN_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + + lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_PPE_STATS_CONN_MAX * 8, GFP_KERNEL); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + memset(ppe_stats, 0, sizeof(uint64_t) * NSS_PPE_STATS_CONN_MAX); + + /* + * Get all stats + */ + nss_ppe_stats_conn_get(ppe_stats); + + /* + * flow stats + */ + spin_lock_bh(&nss_ppe_stats_lock); + for (i = 0; i < NSS_PPE_STATS_CONN_MAX; i++) { + stats_shadow[i] = ppe_stats[i]; + } + + spin_unlock_bh(&nss_ppe_stats_lock); + size_wr += nss_stats_print("ppe", "ppe flow counters", NSS_STATS_SINGLE_INSTANCE, nss_ppe_stats_str_conn, stats_shadow, + NSS_PPE_STATS_CONN_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + + kfree(lbuf); + kfree(stats_shadow); + return bytes_read; +} + +/* + * nss_ppe_sc_stats_read() + * Read ppe service code statistics + */ +static ssize_t nss_ppe_sc_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int i; + char *lbuf = NULL; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct nss_ppe_sc_stats_debug sc_stats[NSS_PPE_SC_MAX]; + uint32_t max_output_lines = (NSS_PPE_SC_MAX * NSS_PPE_STATS_SERVICE_CODE_MAX) + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + + lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + memset(sc_stats, 0, sizeof(sc_stats)); + + /* + * Get stats + */ + nss_ppe_stats_sc_get(sc_stats); + + /* + * service code stats + */ + + for (i = 0; i < NSS_PPE_SC_MAX; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "ppe service code type: %s\n", + nss_ppe_stats_str_sc_type[i]); + size_wr += nss_stats_print("ppe", "ppe service code counters", NSS_STATS_SINGLE_INSTANCE, + nss_ppe_stats_str_sc, &sc_stats[i].nss_ppe_sc_cb_unregister, + NSS_PPE_STATS_SERVICE_CODE_MAX, lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + return bytes_read; +} + +/* + * nss_ppe_l3_stats_read() + * Read PPE L3 debug statistics + */ +static ssize_t nss_ppe_l3_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int i; + char *lbuf = NULL; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + uint32_t ppe_stats[NSS_PPE_STATS_L3_MAX]; + uint32_t max_output_lines = NSS_PPE_STATS_L3_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + + lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_PPE_STATS_L3_MAX * 8, GFP_KERNEL); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + memset(ppe_stats, 0, sizeof(uint32_t) * NSS_PPE_STATS_L3_MAX); + + /* + * Get all stats + */ + nss_ppe_stats_l3_get(ppe_stats); + + /* + * flow stats + */ + spin_lock_bh(&nss_ppe_stats_lock); + for (i = 0; i < NSS_PPE_STATS_L3_MAX; i++) { + stats_shadow[i] = ppe_stats[i]; + } + + spin_unlock_bh(&nss_ppe_stats_lock); + size_wr += nss_stats_print("ppe", "ppe l3 debug stats", NSS_STATS_SINGLE_INSTANCE, nss_ppe_stats_str_l3, + stats_shadow, NSS_PPE_STATS_L3_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + return bytes_read; +} + +/* + * nss_ppe_code_stats_read() + * Read ppe CPU & DROP code + */ +static ssize_t nss_ppe_code_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int i; + char *lbuf = NULL; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + uint32_t ppe_stats[NSS_PPE_STATS_CODE_MAX]; + uint32_t max_output_lines = NSS_PPE_STATS_CODE_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + + lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_PPE_STATS_CODE_MAX * 8, GFP_KERNEL); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + memset(ppe_stats, 0, sizeof(uint32_t) * NSS_PPE_STATS_CODE_MAX); + + /* + * Get all stats + */ + nss_ppe_stats_code_get(ppe_stats); + + /* + * flow stats + */ + spin_lock_bh(&nss_ppe_stats_lock); + for (i = 0; i < NSS_PPE_STATS_CODE_MAX; i++) { + stats_shadow[i] = ppe_stats[i]; + } + + spin_unlock_bh(&nss_ppe_stats_lock); + size_wr += nss_stats_print("ppe", "ppe session stats", NSS_STATS_SINGLE_INSTANCE, nss_ppe_stats_str_code, stats_shadow, + NSS_PPE_STATS_CODE_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + + kfree(lbuf); + kfree(stats_shadow); + return bytes_read; +} + +/* + * nss_ppe_port_dc_stats_read() + * Read PPE per port drop code stats + */ +static ssize_t nss_ppe_port_dc_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + few blank lines for future reference to add new stats. + */ + uint32_t max_output_lines = NSS_PPE_STATS_DROP_CODE_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + struct nss_stats_data *data = fp->private_data; + uint32_t *ppe_stats; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + ppe_stats = kzalloc(sizeof(uint32_t) * NSS_PPE_STATS_DROP_CODE_MAX, GFP_KERNEL); + if (unlikely(ppe_stats == NULL)) { + kfree(lbuf); + nss_warning("Could not allocate memory for ppe stats buffer"); + return 0; + } + + stats_shadow = kzalloc((NSS_PPE_STATS_DROP_CODE_MAX) * 8, GFP_KERNEL); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + kfree(ppe_stats); + return 0; + } + + /* + * Get drop code counters for specific port + */ + nss_ppe_port_drop_code_get(ppe_stats, data->edma_id); + + /* + * Drop code stats + */ + spin_lock_bh(&nss_ppe_stats_lock); + for (i = 0; i < NSS_PPE_STATS_DROP_CODE_MAX; i++) { + stats_shadow[i] = ppe_stats[i]; + } + + spin_unlock_bh(&nss_ppe_stats_lock); + size_wr += nss_stats_print("ppe", "ppe drop code stats", NSS_STATS_SINGLE_INSTANCE, nss_ppe_stats_str_dc, + stats_shadow, NSS_PPE_STATS_DROP_CODE_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(ppe_stats); + kfree(lbuf); + kfree(stats_shadow); + return bytes_read; +} + +/* + * nss_ppe_exception_cc_stats_read() + * Read PPE CPU code stats specific to flow exceptions + */ +static ssize_t nss_ppe_exception_cc_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + few blank lines for future reference to add new stats. + */ + uint32_t max_output_lines = NSS_PPE_STATS_CPU_CODE_EXCEPTION_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + uint32_t *ppe_stats; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + ppe_stats = kzalloc(sizeof(uint32_t) * NSS_PPE_STATS_CPU_CODE_EXCEPTION_MAX, GFP_KERNEL); + if (unlikely(ppe_stats == NULL)) { + kfree(lbuf); + nss_warning("Could not allocate memory for ppe stats buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_PPE_STATS_CPU_CODE_EXCEPTION_MAX * 8, GFP_KERNEL); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + kfree(ppe_stats); + return 0; + } + + /* + * Get CPU code counters for flow specific exceptions + */ + nss_ppe_cpu_code_exception_get(ppe_stats); + + /* + * CPU code stats + */ + spin_lock_bh(&nss_ppe_stats_lock); + for (i = 0; i < NSS_PPE_STATS_CPU_CODE_EXCEPTION_MAX; i++) { + stats_shadow[i] = ppe_stats[i]; + } + + spin_unlock_bh(&nss_ppe_stats_lock); + size_wr += nss_stats_print("ppe", "ppe cpu code flow-exception stats", NSS_STATS_SINGLE_INSTANCE, + nss_ppe_stats_str_cc, stats_shadow, NSS_PPE_STATS_CPU_CODE_EXCEPTION_MAX, + lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(ppe_stats); + kfree(lbuf); + kfree(stats_shadow); + return bytes_read; +} + +/* + * nss_ppe_nonexception_cc_stats_read() + * Read PPE CPU code stats for other than flow exceptions + */ +static ssize_t nss_ppe_nonexception_cc_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * max output lines = #stats + few blank lines for future reference to add new stats. + */ + uint32_t max_output_lines = NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + uint32_t *ppe_stats; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + ppe_stats = kzalloc(sizeof(uint32_t) * NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_MAX, GFP_KERNEL); + if (unlikely(ppe_stats == NULL)) { + kfree(lbuf); + nss_warning("Could not allocate memory for ppe stats buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_MAX * 8, GFP_KERNEL); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + kfree(ppe_stats); + return 0; + } + + /* + * Get CPU code counters for non flow exceptions + */ + nss_ppe_cpu_code_nonexception_get(ppe_stats); + + /* + * CPU code stats + */ + + spin_lock_bh(&nss_ppe_stats_lock); + for (i = 0; i < NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_MAX; i++) { + stats_shadow[i] = ppe_stats[i]; + } + + spin_unlock_bh(&nss_ppe_stats_lock); + size_wr += nss_stats_print("ppe", "ppe cpu code non-flow exception stats", NSS_STATS_SINGLE_INSTANCE, + &nss_ppe_stats_str_cc[NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_START], + stats_shadow, NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_MAX, lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(ppe_stats); + kfree(stats_shadow); + kfree(lbuf); + + return bytes_read; +} + +/* + * nss_ppe_conn_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_conn) + +/* + * nss_ppe_l3_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_l3) + +/* + * nss_ppe_code_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_code) + +/* + * nss_ppe_port_dc_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_port_dc) +/* + * nss_ppe_exception_cc_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_exception_cc) + +/* + * nss_ppe_nonexception_cc_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_nonexception_cc) + +/* + * nss_ppe_sc_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_sc) + +/* + * nss_ppe_stats_dentry_create() + * Create PPE statistics debug entry. + */ +void nss_ppe_stats_dentry_create(void) +{ + int i; + struct dentry *ppe_dentry = NULL; + struct dentry *ppe_code_d = NULL; + struct dentry *ppe_drop_d = NULL; + struct dentry *ppe_cpu_d = NULL; + char file_name[10]; + + ppe_dentry = debugfs_create_dir("ppe", nss_top_main.stats_dentry); + if (!ppe_dentry) { + nss_warning("Failed to create qca-nss-drv/stats/ppe directory"); + return; + } + + if (!debugfs_create_file("connection", 0400, ppe_dentry, &nss_top_main, &nss_ppe_conn_stats_ops)) { + nss_warning("Failed to create qca-nss-drv/stats/ppe/connection file"); + debugfs_remove_recursive(ppe_dentry); + return; + } + + if (!debugfs_create_file("sc_stats", 0400, ppe_dentry, &nss_top_main, &nss_ppe_sc_stats_ops)) { + nss_warning("Failed to create qca-nss-drv/stats/ppe/sc_stats file"); + debugfs_remove_recursive(ppe_dentry); + return; + } + + if (!debugfs_create_file("l3", 0400, ppe_dentry, &nss_top_main, &nss_ppe_l3_stats_ops)) { + nss_warning("Failed to create qca-nss-drv/stats/ppe/l3 file"); + debugfs_remove_recursive(ppe_dentry); + return; + } + + if (!debugfs_create_file("ppe_code", 0400, ppe_dentry, &nss_top_main, &nss_ppe_code_stats_ops)) { + nss_warning("Failed to create qca-nss-drv/stats/ppe/ppe_code file"); + debugfs_remove_recursive(ppe_dentry); + return; + } + + /* + * ppe exception and drop code stats + */ + ppe_code_d = debugfs_create_dir("code", ppe_dentry); + if (!ppe_code_d) { + nss_warning("Failed to create qca-nss-drv/stats/ppe/code directory"); + return; + } + + ppe_cpu_d = debugfs_create_dir("cpu", ppe_code_d); + if (!ppe_cpu_d) { + nss_warning("Failed to create qca-nss-drv/stats/ppe/code/cpu directory"); + return; + } + + if (!debugfs_create_file("exception", 0400, ppe_cpu_d, &nss_top_main, &nss_ppe_exception_cc_stats_ops)) { + nss_warning("Failed to create qca-nss-drv/stats/ppe/code/exception file"); + debugfs_remove_recursive(ppe_cpu_d); + return; + } + + if (!debugfs_create_file("non-exception", 0400, ppe_cpu_d, &nss_top_main, &nss_ppe_nonexception_cc_stats_ops)) { + nss_warning("Failed to create qca-nss-drv/stats/ppe/code/non-exception file"); + debugfs_remove_recursive(ppe_cpu_d); + return; + } + + ppe_drop_d = debugfs_create_dir("drop", ppe_code_d); + if (!ppe_drop_d) { + nss_warning("Failed to create qca-nss-drv/stats/ppe/code/drop directory"); + return; + } + + for (i = 0; i < NSS_PPE_NUM_PHY_PORTS_MAX; i++) { + if (i > 0) { + memset(file_name, 0, sizeof(file_name)); + snprintf(file_name, sizeof(file_name), "%d", i); + } + + if (!debugfs_create_file((i == 0) ? "cpu" : file_name, 0400, ppe_drop_d, + (void *)(nss_ptr_t)i, &nss_ppe_port_dc_stats_ops)) { + nss_warning("Failed to create qca-nss-drv/stats/ppe/code/drop/%d file", i); + debugfs_remove_recursive(ppe_drop_d); + return; + } + } +} + +/* + * nss_ppe_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_ppe_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_ppe_stats_notification ppe_stats; + + spin_lock_bh(&nss_ppe_stats_lock); + ppe_stats.core_id = nss_ctx->id; + ppe_stats.if_num = if_num; + memcpy(ppe_stats.ppe_stats_conn, nss_ppe_debug_stats.conn_stats, sizeof(ppe_stats.ppe_stats_conn)); + memcpy(ppe_stats.ppe_stats_sc, nss_ppe_debug_stats.sc_stats, sizeof(ppe_stats.ppe_stats_sc)); + spin_unlock_bh(&nss_ppe_stats_lock); + + atomic_notifier_call_chain(&nss_ppe_stats_notifier, NSS_STATS_EVENT_NOTIFY, &ppe_stats); +} + +/* + * nss_ppe_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_ppe_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_ppe_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ppe_stats_unregister_notifier); + +/* + * nss_ppe_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_ppe_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_ppe_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_ppe_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_stats.h new file mode 100644 index 000000000..bd2cecb8d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_stats.h @@ -0,0 +1,447 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2018, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ppe_stats.h + * NSS PPE statistics header file. + */ + +#ifndef __NSS_PPE_STATS_H +#define __NSS_PPE_STATS_H + +#include + +/* + * NSS PPE connection statistics + */ +enum nss_ppe_stats_conn { + NSS_PPE_STATS_V4_L3_FLOWS, /* No of v4 routed flows */ + NSS_PPE_STATS_V4_L2_FLOWS, /* No of v4 bridge flows */ + NSS_PPE_STATS_V4_CREATE_REQ, /* No of v4 create requests */ + NSS_PPE_STATS_V4_CREATE_FAIL, /* No of v4 create failure */ + NSS_PPE_STATS_V4_DESTROY_REQ, /* No of v4 delete requests */ + NSS_PPE_STATS_V4_DESTROY_FAIL, /* No of v4 delete failure */ + NSS_PPE_STATS_V4_MC_CREATE_REQ, /* No of v4 MC create requests */ + NSS_PPE_STATS_V4_MC_CREATE_FAIL, /* No of v4 MC create failure */ + NSS_PPE_STATS_V4_MC_UPDATE_REQ, /* No of v4 MC update requests */ + NSS_PPE_STATS_V4_MC_UPDATE_FAIL, /* No of v4 MC update failure */ + NSS_PPE_STATS_V4_MC_DESTROY_REQ, /* No of v4 MC delete requests */ + NSS_PPE_STATS_V4_MC_DESTROY_FAIL, /* No of v4 MC delete failure */ + NSS_PPE_STATS_V4_UNKNOWN_INTERFACE, /* No of v4 create failure due to invalid if */ + + NSS_PPE_STATS_V6_L3_FLOWS, /* No of v6 routed flows */ + NSS_PPE_STATS_V6_L2_FLOWS, /* No of v6 bridge flows */ + NSS_PPE_STATS_V6_CREATE_REQ, /* No of v6 create requests */ + NSS_PPE_STATS_V6_CREATE_FAIL, /* No of v6 create failure */ + NSS_PPE_STATS_V6_DESTROY_REQ, /* No of v6 delete requests */ + NSS_PPE_STATS_V6_DESTROY_FAIL, /* No of v6 delete failure */ + NSS_PPE_STATS_V6_MC_CREATE_REQ, /* No of v6 MC create requests */ + NSS_PPE_STATS_V6_MC_CREATE_FAIL, /* No of v6 MC create failure */ + NSS_PPE_STATS_V6_MC_UPDATE_REQ, /* No of v6 MC update requests */ + NSS_PPE_STATS_V6_MC_UPDATE_FAIL, /* No of v6 MC update failure */ + NSS_PPE_STATS_V6_MC_DESTROY_REQ, /* No of v6 MC delete requests */ + NSS_PPE_STATS_V6_MC_DESTROY_FAIL, /* No of v6 MC delete failure */ + NSS_PPE_STATS_V6_UNKNOWN_INTERFACE, /* No of v6 create failure due to invalid if */ + + NSS_PPE_STATS_FAIL_VP_FULL, /* Create req fail due to VP table full */ + NSS_PPE_STATS_FAIL_NH_FULL, /* Create req fail due to nexthop table full */ + NSS_PPE_STATS_FAIL_FLOW_FULL, /* Create req fail due to flow table full */ + NSS_PPE_STATS_FAIL_HOST_FULL, /* Create req fail due to host table full */ + NSS_PPE_STATS_FAIL_PUBIP_FULL, /* Create req fail due to pub-ip table full */ + NSS_PPE_STATS_FAIL_PORT_SETUP, /* Create req fail due to PPE port not setup */ + NSS_PPE_STATS_FAIL_RW_FIFO_FULL, /* Create req fail due to rw fifo full */ + NSS_PPE_STATS_FAIL_FLOW_COMMAND, /* Create req fail due to PPE flow command failure */ + NSS_PPE_STATS_FAIL_UNKNOWN_PROTO, /* Create req fail due to unknown protocol */ + NSS_PPE_STATS_FAIL_PPE_UNRESPONSIVE, /* Create req fail due to PPE not responding */ + NSS_PPE_STATS_CE_OPAQUE_INVALID, /* Create req fail due to invalid opaque in CE */ + NSS_PPE_STATS_FAIL_FQG_FULL, /* Create req fail due to flow qos group full */ + NSS_PPE_STATS_CONN_MAX +}; + +/* + * NSS PPE SC statistics + */ +enum nss_ppe_stats_service_code { + NSS_PPE_STATS_SERVICE_CODE_CB_UNREGISTER, + NSS_PPE_STATS_SERVICE_CODE_PROCESS_OK, + NSS_PPE_STATS_SERVICE_CODE_PROCESS_FAIL, + NSS_PPE_STATS_SERVICE_CODE_MAX +}; + +/* + * NSS PPE L3 statistics + */ +enum nss_ppe_stats_l3 { + NSS_PPE_STATS_L3_DBG_0, /* PPE L3 debug register 0 */ + NSS_PPE_STATS_L3_DBG_1, /* PPE L3 debug register 1 */ + NSS_PPE_STATS_L3_DBG_2, /* PPE L3 debug register 2 */ + NSS_PPE_STATS_L3_DBG_3, /* PPE L3 debug register 3 */ + NSS_PPE_STATS_L3_DBG_4, /* PPE L3 debug register 4 */ + NSS_PPE_STATS_L3_DBG_PORT, /* PPE L3 debug register Port */ + NSS_PPE_STATS_L3_MAX +}; + +/* + * NSS PPE_code statistics + */ +enum nss_ppe_stats_code { + NSS_PPE_STATS_CODE_CPU, /* PPE CPU code for last packet processed */ + NSS_PPE_STATS_CODE_DROP, /* PPE DROP code for last packet processed */ + NSS_PPE_STATS_CODE_MAX +}; + +/* + * PPE drop codes + */ +enum nss_ppe_stats_dc { + NSS_PPE_STATS_DROP_CODE_UNKNOWN, /* PPE drop code unknown */ + NSS_PPE_STATS_DROP_CODE_EXP_UNKNOWN_L2_PROT, /* PPE drop code exp unknown l2 prot */ + NSS_PPE_STATS_DROP_CODE_EXP_PPPOE_WRONG_VER_TYPE, /* PPE drop code exp pppoe wrong ver type */ + NSS_PPE_STATS_DROP_CODE_EXP_PPPOE_WRONG_CODE, /* PPE drop code exp pppoe wrong code */ + NSS_PPE_STATS_DROP_CODE_EXP_PPPOE_UNSUPPORTED_PPP_PROT, /* PPE drop code exp pppoe unsupported ppp prot */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_WRONG_VER, /* PPE drop code exp ipv4 wrong ver */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_SMALL_IHL, /* PPE drop code exp ipv4 small ihl */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_WITH_OPTION, /* PPE drop code exp ipv4 with option */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_HDR_INCOMPLETE, /* PPE drop code exp ipv4 hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_BAD_TOTAL_LEN, /* PPE drop code exp ipv4 bad total len */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_DATA_INCOMPLETE, /* PPE drop code exp ipv4 data incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_FRAG, /* PPE drop code exp ipv4 frag */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_PING_OF_DEATH, /* PPE drop code exp ipv4 ping of death */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_SNALL_TTL, /* PPE drop code exp ipv4 snall ttl */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_UNK_IP_PROT, /* PPE drop code exp ipv4 unk ip prot */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_CHECKSUM_ERR, /* PPE drop code exp ipv4 checksum err */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_INV_SIP, /* PPE drop code exp ipv4 inv sip */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_INV_DIP, /* PPE drop code exp ipv4 inv dip */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_LAND_ATTACK, /* PPE drop code exp ipv4 land attack */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_AH_HDR_INCOMPLETE, /* PPE drop code exp ipv4 ah hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_AH_HDR_CROSS_BORDER, /* PPE drop code exp ipv4 ah hdr cross border */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV4_ESP_HDR_INCOMPLETE, /* PPE drop code exp ipv4 esp hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_WRONG_VER, /* PPE drop code exp ipv6 wrong ver */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_HDR_INCOMPLETE, /* PPE drop code exp ipv6 hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_BAD_PAYLOAD_LEN, /* PPE drop code exp ipv6 bad payload len */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_DATA_INCOMPLETE, /* PPE drop code exp ipv6 data incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_WITH_EXT_HDR, /* PPE drop code exp ipv6 with ext hdr */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_SMALL_HOP_LIMIT, /* PPE drop code exp ipv6 small hop limit */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_INV_SIP, /* PPE drop code exp ipv6 inv sip */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_INV_DIP, /* PPE drop code exp ipv6 inv dip */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_LAND_ATTACK, /* PPE drop code exp ipv6 land attack */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_FRAG, /* PPE drop code exp ipv6 frag */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_PING_OF_DEATH, /* PPE drop code exp ipv6 ping of death */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_WITH_MORE_EXT_HDR, /* PPE drop code exp ipv6 with more ext hdr */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_UNK_LAST_NEXT_HDR, /* PPE drop code exp ipv6 unk last next hdr */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_MOBILITY_HDR_INCOMPLETE, /* PPE drop code exp ipv6 mobility hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_MOBILITY_HDR_CROSS_BORDER, /* PPE drop code exp ipv6 mobility hdr cross border */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_AH_HDR_INCOMPLETE, /* PPE drop code exp ipv6 ah hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_AH_HDR_CROSS_BORDER, /* PPE drop code exp ipv6 ah hdr cross border */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_ESP_HDR_INCOMPLETE, /* PPE drop code exp ipv6 esp hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_ESP_HDR_CROSS_BORDER, /* PPE drop code exp ipv6 esp hdr cross border */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_OTHER_EXT_HDR_INCOMPLETE, /* PPE drop code exp ipv6 other ext hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_IPV6_OTHER_EXT_HDR_CROSS_BORDER, /* PPE drop code exp ipv6 other ext hdr cross border */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_HDR_INCOMPLETE, /* PPE drop code exp tcp hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_HDR_CROSS_BORDER, /* PPE drop code exp tcp hdr cross border */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_SMAE_SP_DP, /* PPE drop code exp tcp smae sp dp */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_SMALL_DATA_OFFSET, /* PPE drop code exp tcp small data offset */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_FLAGS_0, /* PPE drop code exp tcp flags 0 */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_FLAGS_1, /* PPE drop code exp tcp flags 1 */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_FLAGS_2, /* PPE drop code exp tcp flags 2 */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_FLAGS_3, /* PPE drop code exp tcp flags 3 */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_FLAGS_4, /* PPE drop code exp tcp flags 4 */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_FLAGS_5, /* PPE drop code exp tcp flags 5 */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_FLAGS_6, /* PPE drop code exp tcp flags 6 */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_FLAGS_7, /* PPE drop code exp tcp flags 7 */ + NSS_PPE_STATS_DROP_CODE_EXP_TCP_CHECKSUM_ERR, /* PPE drop code exp tcp checksum err */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_HDR_INCOMPLETE, /* PPE drop code exp udp hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_HDR_CROSS_BORDER, /* PPE drop code exp udp hdr cross border */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_SMAE_SP_DP, /* PPE drop code exp udp smae sp dp */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_BAD_LEN, /* PPE drop code exp udp bad len */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_DATA_INCOMPLETE, /* PPE drop code exp udp data incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_CHECKSUM_ERR, /* PPE drop code exp udp checksum err */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_LITE_HDR_INCOMPLETE, /* PPE drop code exp udp lite hdr incomplete */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_LITE_HDR_CROSS_BORDER, /* PPE drop code exp udp lite hdr cross border */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_LITE_SMAE_SP_DP, /* PPE drop code exp udp lite smae sp dp */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_LITE_CSM_COV_1_TO_7, /* PPE drop code exp udp lite csm cov 1 to 7 */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_LITE_CSM_COV_TOO_LONG, /* PPE drop code exp udp lite csm cov too long */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_LITE_CSM_COV_CROSS_BORDER, /* PPE drop code exp udp lite csm cov cross border */ + NSS_PPE_STATS_DROP_CODE_EXP_UDP_LITE_CHECKSUM_ERR, /* PPE drop code exp udp lite checksum err */ + NSS_PPE_STATS_DROP_CODE_L3_MC_BRIDGE_ACTION, /* PPE drop code l3 mc bridge action */ + NSS_PPE_STATS_DROP_CODE_L3_NO_ROUTE_PREHEAD_NAT_ACTION, /* PPE drop code l3 no route prehead nat action */ + NSS_PPE_STATS_DROP_CODE_L3_NO_ROUTE_PREHEAD_NAT_ERROR, /* PPE drop code l3 no route prehead nat error */ + NSS_PPE_STATS_DROP_CODE_L3_ROUTE_ACTION, /* PPE drop code l3 route action */ + NSS_PPE_STATS_DROP_CODE_L3_NO_ROUTE_ACTION, /* PPE drop code l3 no route action */ + NSS_PPE_STATS_DROP_CODE_L3_NO_ROUTE_NH_INVALID_ACTION, /* PPE drop code l3 no route nh invalid action */ + NSS_PPE_STATS_DROP_CODE_L3_NO_ROUTE_PREHEAD_ACTION, /* PPE drop code l3 no route prehead action */ + NSS_PPE_STATS_DROP_CODE_L3_BRIDGE_ACTION, /* PPE drop code l3 bridge action */ + NSS_PPE_STATS_DROP_CODE_L3_FLOW_ACTION, /* PPE drop code l3 flow action */ + NSS_PPE_STATS_DROP_CODE_L3_FLOW_MISS_ACTION, /* PPE drop code l3 flow miss action */ + NSS_PPE_STATS_DROP_CODE_L2_EXP_MRU_FAIL, /* PPE drop code l2 exp mru fail */ + NSS_PPE_STATS_DROP_CODE_L2_EXP_MTU_FAIL, /* PPE drop code l2 exp mtu fail */ + NSS_PPE_STATS_DROP_CODE_L3_EXP_IP_PREFIX_BC, /* PPE drop code l3 exp ip prefix bc */ + NSS_PPE_STATS_DROP_CODE_L3_EXP_MTU_FAIL, /* PPE drop code l3 exp mtu fail */ + NSS_PPE_STATS_DROP_CODE_L3_EXP_MRU_FAIL, /* PPE drop code l3 exp mru fail */ + NSS_PPE_STATS_DROP_CODE_L3_EXP_ICMP_RDT, /* PPE drop code l3 exp icmp rdt */ + NSS_PPE_STATS_DROP_CODE_FAKE_MAC_HEADER_ERR, /* PPE drop code fake mac header err */ + NSS_PPE_STATS_DROP_CODE_L3_EXP_IP_RT_TTL_ZERO, /* PPE drop code l3 exp ip rt ttl zero */ + NSS_PPE_STATS_DROP_CODE_L3_FLOW_SERVICE_CODE_LOOP, /* PPE drop code l3 flow service code loop */ + NSS_PPE_STATS_DROP_CODE_L3_FLOW_DE_ACCELEARTE, /* PPE drop code l3 flow de accelearte */ + NSS_PPE_STATS_DROP_CODE_L3_EXP_FLOW_SRC_IF_CHK_FAIL, /* PPE drop code l3 exp flow src if chk fail */ + NSS_PPE_STATS_DROP_CODE_L3_FLOW_SYNC_TOGGLE_MISMATCH, /* PPE drop code l3 flow sync toggle mismatch */ + NSS_PPE_STATS_DROP_CODE_L3_EXP_MTU_DF_FAIL, /* PPE drop code l3 exp mtu df fail */ + NSS_PPE_STATS_DROP_CODE_L3_EXP_PPPOE_MULTICAST, /* PPE drop code l3 exp pppoe multicast */ + NSS_PPE_STATS_DROP_CODE_IPV4_SG_UNKNOWN, /* PPE drop code ipv4 sg unknown */ + NSS_PPE_STATS_DROP_CODE_IPV6_SG_UNKNOWN, /* PPE drop code ipv6 sg unknown */ + NSS_PPE_STATS_DROP_CODE_ARP_SG_UNKNOWN, /* PPE drop code arp sg unknown */ + NSS_PPE_STATS_DROP_CODE_ND_SG_UNKNOWN, /* PPE drop code nd sg unknown */ + NSS_PPE_STATS_DROP_CODE_IPV4_SG_VIO, /* PPE drop code ipv4 sg vio */ + NSS_PPE_STATS_DROP_CODE_IPV6_SG_VIO, /* PPE drop code ipv6 sg vio */ + NSS_PPE_STATS_DROP_CODE_ARP_SG_VIO, /* PPE drop code arp sg vio */ + NSS_PPE_STATS_DROP_CODE_ND_SG_VIO, /* PPE drop code nd sg vio */ + NSS_PPE_STATS_DROP_CODE_L2_NEW_MAC_ADDRESS, /* PPE drop code l2 new mac address */ + NSS_PPE_STATS_DROP_CODE_L2_HASH_COLLISION, /* PPE drop code l2 hash collision */ + NSS_PPE_STATS_DROP_CODE_L2_STATION_MOVE, /* PPE drop code l2 station move */ + NSS_PPE_STATS_DROP_CODE_L2_LEARN_LIMIT, /* PPE drop code l2 learn limit */ + NSS_PPE_STATS_DROP_CODE_L2_SA_LOOKUP_ACTION, /* PPE drop code l2 sa lookup action */ + NSS_PPE_STATS_DROP_CODE_L2_DA_LOOKUP_ACTION, /* PPE drop code l2 da lookup action */ + NSS_PPE_STATS_DROP_CODE_APP_CTRL_ACTION, /* PPE drop code app ctrl action */ + NSS_PPE_STATS_DROP_CODE_IN_VLAN_FILTER_ACTION, /* PPE drop code in vlan filter action */ + NSS_PPE_STATS_DROP_CODE_IN_VLAN_XLT_MISS, /* PPE drop code in vlan xlt miss */ + NSS_PPE_STATS_DROP_CODE_EG_VLAN_FILTER_DROP, /* PPE drop code eg vlan filter drop */ + NSS_PPE_STATS_DROP_CODE_ACL_PRE_ACTION, /* PPE drop code acl pre action */ + NSS_PPE_STATS_DROP_CODE_ACL_POST_ACTION, /* PPE drop code acl post action */ + NSS_PPE_STATS_DROP_CODE_MC_BC_SA, /* PPE drop code mc bc sa */ + NSS_PPE_STATS_DROP_CODE_NO_DESTINATION, /* PPE drop code no destination */ + NSS_PPE_STATS_DROP_CODE_STG_IN_FILTER, /* PPE drop code stg in filter */ + NSS_PPE_STATS_DROP_CODE_STG_EG_FILTER, /* PPE drop code stg eg filter */ + NSS_PPE_STATS_DROP_CODE_SOURCE_FILTER_FAIL, /* PPE drop code source filter fail */ + NSS_PPE_STATS_DROP_CODE_TRUNK_SEL_FAIL, /* PPE drop code trunk sel fail */ + NSS_PPE_STATS_DROP_CODE_TX_EN_FAIL, /* PPE drop code tx en fail */ + NSS_PPE_STATS_DROP_CODE_VLAN_TAG_FMT, /* PPE drop code vlan tag fmt */ + NSS_PPE_STATS_DROP_CODE_CRC_ERR, /* PPE drop code crc err */ + NSS_PPE_STATS_DROP_CODE_PAUSE_FRAME, /* PPE drop code pause frame */ + NSS_PPE_STATS_DROP_CODE_PROMISC, /* PPE drop code promisc */ + NSS_PPE_STATS_DROP_CODE_ISOLATION, /* PPE drop code isolation */ + NSS_PPE_STATS_DROP_CODE_MGMT_APP, /* PPE drop code mgmt app */ + NSS_PPE_STATS_DROP_CODE_FAKE_L2_PROT_ERR, /* PPE drop code fake l2 prot err */ + NSS_PPE_STATS_DROP_CODE_POLICER, /* PPE drop code policer */ + NSS_PPE_STATS_DROP_CODE_MAX /* PPE drop code max */ +}; + +/* + * PPE CPU codes + */ +#define NSS_PPE_STATS_CPU_CODE_MAX 150 +#define NSS_PPE_STATS_CPU_CODE_EXCEPTION_MAX 69 +#define NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_START 69 +#define NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_MAX (NSS_PPE_STATS_CPU_CODE_MAX - NSS_PPE_STATS_CPU_CODE_NONEXCEPTION_START) + +enum nss_ppe_stats_cc { + NSS_PPE_STATS_CPU_CODE_FORWARDING = 0, /* PPE cpu code forwarding */ + NSS_PPE_STATS_CPU_CODE_EXP_UNKNOWN_L2_PROT = 1, /* PPE cpu code exp unknown l2 prot */ + NSS_PPE_STATS_CPU_CODE_EXP_PPPOE_WRONG_VER_TYPE = 2, /* PPE cpu code exp pppoe wrong ver type */ + NSS_PPE_STATS_CPU_CODE_EXP_PPPOE_WRONG_CODE = 3, /* PPE cpu code exp pppoe wrong code */ + NSS_PPE_STATS_CPU_CODE_EXP_PPPOE_UNSUPPORTED_PPP_PROT = 4, /* PPE cpu code exp pppoe unsupported ppp prot */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_WRONG_VER = 5, /* PPE cpu code exp ipv4 wrong ver */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_SMALL_IHL = 6, /* PPE cpu code exp ipv4 small ihl */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_WITH_OPTION = 7, /* PPE cpu code exp ipv4 with option */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_HDR_INCOMPLETE = 8, /* PPE cpu code exp ipv4 hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_BAD_TOTAL_LEN = 9, /* PPE cpu code exp ipv4 bad total len */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_DATA_INCOMPLETE = 10, /* PPE cpu code exp ipv4 data incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_FRAG = 11, /* PPE cpu code exp ipv4 frag */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_PING_OF_DEATH = 12, /* PPE cpu code exp ipv4 ping of death */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_SNALL_TTL = 13, /* PPE cpu code exp ipv4 snall ttl */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_UNK_IP_PROT = 14, /* PPE cpu code exp ipv4 unk ip prot */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_CHECKSUM_ERR = 15, /* PPE cpu code exp ipv4 checksum err */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_INV_SIP = 16, /* PPE cpu code exp ipv4 inv sip */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_INV_DIP = 17, /* PPE cpu code exp ipv4 inv dip */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_LAND_ATTACK = 18, /* PPE cpu code exp ipv4 land attack */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_AH_HDR_INCOMPLETE = 19, /* PPE cpu code exp ipv4 ah hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_AH_HDR_CROSS_BORDER = 20, /* PPE cpu code exp ipv4 ah hdr cross border */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV4_ESP_HDR_INCOMPLETE = 21, /* PPE cpu code exp ipv4 esp hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_WRONG_VER = 22, /* PPE cpu code exp ipv6 wrong ver */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_HDR_INCOMPLETE = 23, /* PPE cpu code exp ipv6 hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_BAD_PAYLOAD_LEN = 24, /* PPE cpu code exp ipv6 bad payload len */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_DATA_INCOMPLETE = 25, /* PPE cpu code exp ipv6 data incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_WITH_EXT_HDR = 26, /* PPE cpu code exp ipv6 with ext hdr */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_SMALL_HOP_LIMIT = 27, /* PPE cpu code exp ipv6 small hop limit */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_INV_SIP = 28, /* PPE cpu code exp ipv6 inv sip */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_INV_DIP = 29, /* PPE cpu code exp ipv6 inv dip */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_LAND_ATTACK = 30, /* PPE cpu code exp ipv6 land attack */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_FRAG = 31, /* PPE cpu code exp ipv6 frag */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_PING_OF_DEATH = 32, /* PPE cpu code exp ipv6 ping of death */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_WITH_MORE_EXT_HDR = 33, /* PPE cpu code exp ipv6 with more ext hdr */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_UNK_LAST_NEXT_HDR = 34, /* PPE cpu code exp ipv6 unk last next hdr */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_MOBILITY_HDR_INCOMPLETE = 35, /* PPE cpu code exp ipv6 mobility hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_MOBILITY_HDR_CROSS_BORDER = 36, /* PPE cpu code exp ipv6 mobility hdr cross border */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_AH_HDR_INCOMPLETE = 37, /* PPE cpu code exp ipv6 ah hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_AH_HDR_CROSS_BORDER = 38, /* PPE cpu code exp ipv6 ah hdr cross border */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_ESP_HDR_INCOMPLETE = 39, /* PPE cpu code exp ipv6 esp hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_ESP_HDR_CROSS_BORDER = 40, /* PPE cpu code exp ipv6 esp hdr cross border */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_OTHER_EXT_HDR_INCOMPLETE = 41, /* PPE cpu code exp ipv6 other ext hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_IPV6_OTHER_EXT_HDR_CROSS_BORDER = 42, /* PPE cpu code exp ipv6 other ext hdr cross border */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_HDR_INCOMPLETE = 43, /* PPE cpu code exp tcp hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_HDR_CROSS_BORDER = 44, /* PPE cpu code exp tcp hdr cross border */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_SMAE_SP_DP = 45, /* PPE cpu code exp tcp smae sp dp */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_SMALL_DATA_OFFSET = 46, /* PPE cpu code exp tcp small data offset */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_FLAGS_0 = 47, /* PPE cpu code exp tcp flags 0 */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_FLAGS_1 = 48, /* PPE cpu code exp tcp flags 1 */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_FLAGS_2 = 49, /* PPE cpu code exp tcp flags 2 */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_FLAGS_3 = 50, /* PPE cpu code exp tcp flags 3 */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_FLAGS_4 = 51, /* PPE cpu code exp tcp flags 4 */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_FLAGS_5 = 52, /* PPE cpu code exp tcp flags 5 */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_FLAGS_6 = 53, /* PPE cpu code exp tcp flags 6 */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_FLAGS_7 = 54, /* PPE cpu code exp tcp flags 7 */ + NSS_PPE_STATS_CPU_CODE_EXP_TCP_CHECKSUM_ERR = 55, /* PPE cpu code exp tcp checksum err */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_HDR_INCOMPLETE = 56, /* PPE cpu code exp udp hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_HDR_CROSS_BORDER = 57, /* PPE cpu code exp udp hdr cross border */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_SMAE_SP_DP = 58, /* PPE cpu code exp udp smae sp dp */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_BAD_LEN = 59, /* PPE cpu code exp udp bad len */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_DATA_INCOMPLETE = 60, /* PPE cpu code exp udp data incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_CHECKSUM_ERR = 61, /* PPE cpu code exp udp checksum err */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_LITE_HDR_INCOMPLETE = 62, /* PPE cpu code exp udp lite hdr incomplete */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_LITE_HDR_CROSS_BORDER = 63, /* PPE cpu code exp udp lite hdr cross border */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_LITE_SMAE_SP_DP = 64, /* PPE cpu code exp udp lite smae sp dp */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_LITE_CSM_COV_1_TO_7 = 65, /* PPE cpu code exp udp lite csm cov 1 to 7 */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_LITE_CSM_COV_TOO_LONG = 66, /* PPE cpu code exp udp lite csm cov too long */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_LITE_CSM_COV_CROSS_BORDER = 67, /* PPE cpu code exp udp lite csm cov cross border */ + NSS_PPE_STATS_CPU_CODE_EXP_UDP_LITE_CHECKSUM_ERR = 68, /* PPE cpu code exp udp lite checksum err */ + NSS_PPE_STATS_CPU_CODE_EXP_FAKE_L2_PROT_ERR = 69, /* PPE cpu code exp fake l2 prot err */ + NSS_PPE_STATS_CPU_CODE_EXP_FAKE_MAC_HEADER_ERR = 70, /* PPE cpu code exp fake mac header err */ + NSS_PPE_STATS_CPU_CODE_EXP_BITMAP_MAX = 78, /* PPE cpu code exp bitmap max */ + NSS_PPE_STATS_CPU_CODE_L2_EXP_MRU_FAIL = 79, /* PPE cpu code l2 exp mru fail */ + NSS_PPE_STATS_CPU_CODE_L2_EXP_MTU_FAIL = 80, /* PPE cpu code l2 exp mtu fail */ + NSS_PPE_STATS_CPU_CODE_L3_EXP_IP_PREFIX_BC = 81, /* PPE cpu code l3 exp ip prefix bc */ + NSS_PPE_STATS_CPU_CODE_L3_EXP_MTU_FAIL = 82, /* PPE cpu code l3 exp mtu fail */ + NSS_PPE_STATS_CPU_CODE_L3_EXP_MRU_FAIL = 83, /* PPE cpu code l3 exp mru fail */ + NSS_PPE_STATS_CPU_CODE_L3_EXP_ICMP_RDT = 84, /* PPE cpu code l3 exp icmp rdt */ + NSS_PPE_STATS_CPU_CODE_L3_EXP_IP_RT_TTL1_TO_ME = 85, /* PPE cpu code l3 exp ip rt ttl1 to me */ + NSS_PPE_STATS_CPU_CODE_L3_EXP_IP_RT_TTL_ZERO = 86, /* PPE cpu code l3 exp ip rt ttl zero */ + NSS_PPE_STATS_CPU_CODE_L3_FLOW_SERVICE_CODE_LOOP = 87, /* PPE cpu code l3 flow service code loop */ + NSS_PPE_STATS_CPU_CODE_L3_FLOW_DE_ACCELERATE = 88, /* PPE cpu code l3 flow de accelerate */ + NSS_PPE_STATS_CPU_CODE_L3_EXP_FLOW_SRC_IF_CHK_FAIL = 89, /* PPE cpu code l3 exp flow src if chk fail */ + NSS_PPE_STATS_CPU_CODE_L3_FLOW_SYNC_TOGGLE_MISMATCH = 90, /* PPE cpu code l3 flow sync toggle mismatch */ + NSS_PPE_STATS_CPU_CODE_L3_EXP_MTU_DF_FAIL = 91, /* PPE cpu code l3 exp mtu df fail */ + NSS_PPE_STATS_CPU_CODE_L3_EXP_PPPOE_MULTICAST = 92, /* PPE cpu code l3 exp pppoe multicast */ + NSS_PPE_STATS_CPU_CODE_MGMT_OFFSET = 96, /* PPE cpu code mgmt offset */ + NSS_PPE_STATS_CPU_CODE_MGMT_EAPOL = 97, /* PPE cpu code mgmt eapol */ + NSS_PPE_STATS_CPU_CODE_MGMT_PPPOE_DIS = 98, /* PPE cpu code mgmt pppoe dis */ + NSS_PPE_STATS_CPU_CODE_MGMT_IGMP = 99, /* PPE cpu code mgmt igmp */ + NSS_PPE_STATS_CPU_CODE_MGMT_ARP_REQ = 100, /* PPE cpu code mgmt arp req */ + NSS_PPE_STATS_CPU_CODE_MGMT_ARP_REP = 101, /* PPE cpu code mgmt arp rep */ + NSS_PPE_STATS_CPU_CODE_MGMT_DHCPv4 = 102, /* PPE cpu code mgmt dhcpv4 */ + NSS_PPE_STATS_CPU_CODE_MGMT_MLD = 107, /* PPE cpu code mgmt mld */ + NSS_PPE_STATS_CPU_CODE_MGMT_NS = 108, /* PPE cpu code mgmt ns */ + NSS_PPE_STATS_CPU_CODE_MGMT_NA = 109, /* PPE cpu code mgmt na */ + NSS_PPE_STATS_CPU_CODE_MGMT_DHCPv6 = 110, /* PPE cpu code mgmt dhcpv6 */ + NSS_PPE_STATS_CPU_CODE_PTP_OFFSET = 112, /* PPE cpu code ptp offset */ + NSS_PPE_STATS_CPU_CODE_PTP_SYNC = 113, /* PPE cpu code ptp sync */ + NSS_PPE_STATS_CPU_CODE_PTP_FOLLOW_UP = 114, /* PPE cpu code ptp follow up */ + NSS_PPE_STATS_CPU_CODE_PTP_DELAY_REQ = 115, /* PPE cpu code ptp delay req */ + NSS_PPE_STATS_CPU_CODE_PTP_DELAY_RESP = 116, /* PPE cpu code ptp delay resp */ + NSS_PPE_STATS_CPU_CODE_PTP_PDELAY_REQ = 117, /* PPE cpu code ptp pdelay req */ + NSS_PPE_STATS_CPU_CODE_PTP_PDELAY_RESP = 118, /* PPE cpu code ptp pdelay resp */ + NSS_PPE_STATS_CPU_CODE_PTP_PDELAY_RESP_FOLLOW_UP = 119, /* PPE cpu code ptp pdelay resp follow up */ + NSS_PPE_STATS_CPU_CODE_PTP_ANNOUNCE = 120, /* PPE cpu code ptp announce */ + NSS_PPE_STATS_CPU_CODE_PTP_MANAGEMENT = 121, /* PPE cpu code ptp management */ + NSS_PPE_STATS_CPU_CODE_PTP_SIGNALING = 122, /* PPE cpu code ptp signaling */ + NSS_PPE_STATS_CPU_CODE_PTP_PKT_RSV_MSG = 127, /* PPE cpu code ptp pkt rsv msg */ + NSS_PPE_STATS_CPU_CODE_IPV4_SG_UNKNOWN = 136, /* PPE cpu code ipv4 sg unknown */ + NSS_PPE_STATS_CPU_CODE_IPV6_SG_UNKNOWN = 137, /* PPE cpu code ipv6 sg unknown */ + NSS_PPE_STATS_CPU_CODE_ARP_SG_UNKNOWN = 138, /* PPE cpu code arp sg unknown */ + NSS_PPE_STATS_CPU_CODE_ND_SG_UNKNOWN = 139, /* PPE cpu code nd sg unknown */ + NSS_PPE_STATS_CPU_CODE_IPV4_SG_VIO = 140, /* PPE cpu code ipv4 sg vio */ + NSS_PPE_STATS_CPU_CODE_IPV6_SG_VIO = 141, /* PPE cpu code ipv6 sg vio */ + NSS_PPE_STATS_CPU_CODE_ARP_SG_VIO = 142, /* PPE cpu code arp sg vio */ + NSS_PPE_STATS_CPU_CODE_ND_SG_VIO = 143, /* PPE cpu code nd sg vio */ + NSS_PPE_STATS_CPU_CODE_L3_ROUTING_IP_TO_ME = 148, /* PPE cpu code l3 routing ip to me */ + NSS_PPE_STATS_CPU_CODE_L3_FLOW_SNAT_ACTION = 149, /* PPE cpu code l3 flow snat action */ + NSS_PPE_STATS_CPU_CODE_L3_FLOW_DNAT_ACTION = 150, /* PPE cpu code l3 flow dnat action */ + NSS_PPE_STATS_CPU_CODE_L3_FLOW_RT_ACTION = 151, /* PPE cpu code l3 flow rt action */ + NSS_PPE_STATS_CPU_CODE_L3_FLOW_BR_ACTION = 152, /* PPE cpu code l3 flow br action */ + NSS_PPE_STATS_CPU_CODE_L3_MC_BRIDGE_ACTION = 153, /* PPE cpu code l3 mc bridge action */ + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_PREHEAD_RT_ACTION = 154, /* PPE cpu code l3 route prehead rt action */ + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_PREHEAD_SNAPT_ACTION = 155, /* PPE cpu code l3 route prehead snapt action */ + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_PREHEAD_DNAPT_ACTION = 156, /* PPE cpu code l3 route prehead dnapt action */ + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_PREHEAD_SNAT_ACTION = 157, /* PPE cpu code l3 route prehead snat action */ + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_PREHEAD_DNAT_ACTION = 158, /* PPE cpu code l3 route prehead dnat action */ + NSS_PPE_STATS_CPU_CODE_L3_NO_ROUTE_PREHEAD_NAT_ACTION = 159, /* PPE cpu code l3 no route prehead nat action */ + NSS_PPE_STATS_CPU_CODE_L3_NO_ROUTE_PREHEAD_NAT_ERROR = 160, /* PPE cpu code l3 no route prehead nat error */ + NSS_PPE_STATS_CPU_CODE_L3_ROUTE_ACTION = 161, /* PPE cpu code l3 route action */ + NSS_PPE_STATS_CPU_CODE_L3_NO_ROUTE_ACTION = 162, /* PPE cpu code l3 no route action */ + NSS_PPE_STATS_CPU_CODE_L3_NO_ROUTE_NH_INVALID_ACTION = 163, /* PPE cpu code l3 no route nh invalid action */ + NSS_PPE_STATS_CPU_CODE_L3_NO_ROUTE_PREHEAD_ACTION = 164, /* PPE cpu code l3 no route prehead action */ + NSS_PPE_STATS_CPU_CODE_L3_BRIDGE_ACTION = 165, /* PPE cpu code l3 bridge action */ + NSS_PPE_STATS_CPU_CODE_L3_FLOW_ACTION = 166, /* PPE cpu code l3 flow action */ + NSS_PPE_STATS_CPU_CODE_L3_FLOW_MISS_ACTION = 167, /* PPE cpu code l3 flow miss action */ + NSS_PPE_STATS_CPU_CODE_L2_NEW_MAC_ADDRESS = 168, /* PPE cpu code l2 new mac address */ + NSS_PPE_STATS_CPU_CODE_L2_HASH_COLLISION = 169, /* PPE cpu code l2 hash collision */ + NSS_PPE_STATS_CPU_CODE_L2_STATION_MOVE = 170, /* PPE cpu code l2 station move */ + NSS_PPE_STATS_CPU_CODE_L2_LEARN_LIMIT = 171, /* PPE cpu code l2 learn limit */ + NSS_PPE_STATS_CPU_CODE_L2_SA_LOOKUP_ACTION = 172, /* PPE cpu code l2 sa lookup action */ + NSS_PPE_STATS_CPU_CODE_L2_DA_LOOKUP_ACTION = 173, /* PPE cpu code l2 da lookup action */ + NSS_PPE_STATS_CPU_CODE_APP_CTRL_ACTION = 174, /* PPE cpu code app ctrl action */ + NSS_PPE_STATS_CPU_CODE_IN_VLAN_FILTER_ACTION = 175, /* PPE cpu code in vlan filter action */ + NSS_PPE_STATS_CPU_CODE_IN_VLAN_XLT_MISS = 176, /* PPE cpu code in vlan xlt miss */ + NSS_PPE_STATS_CPU_CODE_EG_VLAN_FILTER_DROP = 177, /* PPE cpu code eg vlan filter drop */ + NSS_PPE_STATS_CPU_CODE_ACL_PRE_ACTION = 178, /* PPE cpu code acl pre action */ + NSS_PPE_STATS_CPU_CODE_ACL_POST_ACTION = 179, /* PPE cpu code acl post action */ + NSS_PPE_STATS_CPU_CODE_SERVICE_CODE_ACTION = 180, /* PPE cpu code service code action */ +}; + +/* + * nss_ppe_sc_stats_debug + */ +struct nss_ppe_sc_stats_debug { + uint64_t nss_ppe_sc_cb_unregister; /* Per service-code counter for callback not registered */ + uint64_t nss_ppe_sc_cb_success; /* Per service-code coutner for successful callback */ + uint64_t nss_ppe_sc_cb_failure; /* Per service-code counter for failure callback */ +}; + +/* + * NSS PPE statistics + */ +struct nss_ppe_stats_debug { + uint64_t conn_stats[NSS_PPE_STATS_CONN_MAX]; + uint32_t l3_stats[NSS_PPE_STATS_L3_MAX]; + uint32_t code_stats[NSS_PPE_STATS_CODE_MAX]; + struct nss_ppe_sc_stats_debug sc_stats[NSS_PPE_SC_MAX]; + int32_t if_index; + uint32_t if_num; /* nss interface number */ + bool valid; +}; + +/* + * Data structures to store NSS PPE debug statistics + */ +extern struct nss_ppe_stats_debug nss_ppe_debug_stats; + +/** + * nss_ppe_stats_notification + * PPE transmission statistics structure. + */ +struct nss_ppe_stats_notification { + struct nss_ppe_sc_stats_debug ppe_stats_sc[NSS_PPE_SC_MAX]; /* PPE service code stats. */ + uint64_t ppe_stats_conn[NSS_PPE_STATS_CONN_MAX]; /* PPE connection statistics. */ + uint32_t core_id; /* Core ID. */ + uint32_t if_num; /* Interface number. */ +}; + +/* + * NSS PPE statistics APIs + */ +extern void nss_ppe_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_ppe_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_ppe_sync_stats_msg *stats_msg, uint16_t if_num); +extern void nss_ppe_stats_dentry_create(void); + +#endif /* __NSS_PPE_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_strings.c new file mode 100644 index 000000000..294996449 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_strings.c @@ -0,0 +1,532 @@ +/* + *************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_ppe.h" +#include "nss_strings.h" +#include "nss_ppe_strings.h" + +/* + * nss_ppe_stats_str_conn + * PPE statistics strings for nss flow stats + */ +struct nss_stats_info nss_ppe_stats_str_conn[NSS_PPE_STATS_CONN_MAX] = { + {"v4 routed flows", NSS_STATS_TYPE_SPECIAL}, + {"v4 bridge flows", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn create req", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn create fail", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn destroy req", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn destroy fail", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn MC create req", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn MC create fail", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn MC update req", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn MC update fail", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn MC delete req", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn MC delete fail", NSS_STATS_TYPE_SPECIAL}, + {"v4 conn unknown if", NSS_STATS_TYPE_SPECIAL}, + {"v6 routed flows", NSS_STATS_TYPE_SPECIAL}, + {"v6 bridge flows", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn create req", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn create fail", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn destroy req", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn destroy fail", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn MC create req", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn MC create fail", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn MC update req", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn MC update fail", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn MC delete req", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn MC delete fail", NSS_STATS_TYPE_SPECIAL}, + {"v6 conn unknown if", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - vp full", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - nexthop full", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - flow full", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - host full", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - pub-ip full", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - port not setup", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - rw fifo full", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - flow cmd failure", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - unknown proto", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - ppe not responding", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - CE opaque invalid", NSS_STATS_TYPE_SPECIAL}, + {"conn fail - fqg full", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_ppe_stats_str_conn_strings_read() + * Read ppe NSS flow statistics names + */ +static ssize_t nss_ppe_stats_str_conn_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ppe_stats_str_conn, NSS_PPE_STATS_CONN_MAX); +} + +/* + * nss_ppe_stats_str_conn_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ppe_stats_str_conn); + +/* + * nss_ppe_stats_str_l3 + * PPE statistics strings for nss debug stats + */ +struct nss_stats_info nss_ppe_stats_str_l3[NSS_PPE_STATS_L3_MAX] = { + {"L3 dbg reg 0", NSS_STATS_TYPE_SPECIAL}, + {"L3 dbg reg 1", NSS_STATS_TYPE_SPECIAL}, + {"L3 dbg reg 2", NSS_STATS_TYPE_SPECIAL}, + {"L3 dbg reg 3", NSS_STATS_TYPE_SPECIAL}, + {"L3 dbg reg 4", NSS_STATS_TYPE_SPECIAL}, + {"L3 dbg reg port", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_ppe_stats_str_l3_strings_read() + * Read ppe NSS debug statistics names + */ +static ssize_t nss_ppe_stats_str_l3_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ppe_stats_str_l3, NSS_PPE_STATS_L3_MAX); +} + +/* + * nss_ppe_stats_str_l3_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ppe_stats_str_l3); + +/* + * nss_ppe_stats_str_code + * PPE statistics strings for nss debug stats + */ +struct nss_stats_info nss_ppe_stats_str_code[NSS_PPE_STATS_CODE_MAX] = { + {"CPU_CODE", NSS_STATS_TYPE_SPECIAL}, + {"DROP_CODE", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_ppe_stats_str_code_strings_read() + * Read ppe NSS debug statistics names + */ +static ssize_t nss_ppe_stats_str_code_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ppe_stats_str_code, NSS_PPE_STATS_CODE_MAX); +} + +/* + * nss_ppe_stats_str_code_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ppe_stats_str_code); + +/* + * nss_ppe_stats_str_dc + * PPE statistics strings for drop code + */ +struct nss_stats_info nss_ppe_stats_str_dc[NSS_PPE_STATS_DROP_CODE_MAX] = { + {"DROP_CODE_NONE", NSS_STATS_TYPE_SPECIAL}, + {"DROP_CODE_EXP_UNKNOWN_L2_PORT", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_PPPOE_WRONG_VER_TYPE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_PPPOE_WRONG_CODE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_PPPOE_UNSUPPORTED_PPP_PROT", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_WRONG_VER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_SMALL_IHL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_WITH_OPTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_BAD_TOTAL_LEN", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_DATA_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_FRAG", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_PING_OF_DEATH", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_SNALL_TTL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_UNK_IP_PROT", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_CHECKSUM_ERR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_INV_SIP", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_INV_DIP", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_LAND_ATTACK", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_AH_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_AH_HDR_CROSS_BORDER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV4_ESP_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_WRONG_VER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_BAD_PAYLOAD_LEN", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_DATA_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_WITH_EXT_HDR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_SMALL_HOP_LIMIT", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_INV_SIP", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_INV_DIP", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_LAND_ATTACK", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_FRAG", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_PING_OF_DEATH", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_WITH_MORE_EXT_HDR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_UNK_LAST_NEXT_HDR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_MOBILITY_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_MOBILITY_HDR_CROSS_BORDER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_AH_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_AH_HDR_CROSS_BORDER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_ESP_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_ESP_HDR_CROSS_BORDER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_OTHER_EXT_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_IPV6_OTHER_EXT_HDR_CROSS_BORDER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_HDR_CROSS_BORDER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_SMAE_SP_DP", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_SMALL_DATA_OFFSET", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_FLAGS_0", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_FLAGS_1", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_FLAGS_2", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_FLAGS_3", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_FLAGS_4", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_FLAGS_5", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_FLAGS_6", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_FLAGS_7", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_TCP_CHECKSUM_ERR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_HDR_CROSS_BORDER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_SMAE_SP_DP", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_BAD_LEN", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_DATA_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_CHECKSUM_ERR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_LITE_HDR_INCOMPLETE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_LITE_HDR_CROSS_BORDER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_LITE_SMAE_SP_DP", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_LITE_CSM_COV_1_TO_7", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_LITE_CSM_COV_TOO_LONG", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_LITE_CSM_COV_CROSS_BORDER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EXP_UDP_LITE_CHECKSUM_ERR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_MC_BRIDGE_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_NO_ROUTE_PREHEAD_NAT_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_NO_ROUTE_PREHEAD_NAT_ERROR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_ROUTE_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_NO_ROUTE_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_NO_ROUTE_NH_INVALID_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_NO_ROUTE_PREHEAD_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_BRIDGE_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_FLOW_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_FLOW_MISS_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L2_EXP_MRU_FAIL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L2_EXP_MTU_FAIL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_EXP_IP_PREFIX_BC", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_EXP_MTU_FAIL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_EXP_MRU_FAIL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_EXP_ICMP_RDT", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_FAKE_MAC_HEADER_ERR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_EXP_IP_RT_TTL_ZERO", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_FLOW_SERVICE_CODE_LOOP", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_FLOW_DE_ACCELEARTE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_EXP_FLOW_SRC_IF_CHK_FAIL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_FLOW_SYNC_TOGGLE_MISMATCH", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_EXP_MTU_DF_FAIL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L3_EXP_PPPOE_MULTICAST", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_IPV4_SG_UNKNOWN", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_IPV6_SG_UNKNOWN", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_ARP_SG_UNKNOWN", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_ND_SG_UNKNOWN", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_IPV4_SG_VIO", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_IPV6_SG_VIO", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_ARP_SG_VIO", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_ND_SG_VIO", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L2_NEW_MAC_ADDRESS", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L2_HASH_COLLISION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L2_STATION_MOVE", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L2_LEARN_LIMIT", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L2_SA_LOOKUP_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_L2_DA_LOOKUP_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_APP_CTRL_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_IN_VLAN_FILTER_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_IN_VLAN_XLT_MISS", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_EG_VLAN_FILTER_DROP", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_ACL_PRE_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_ACL_POST_ACTION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_MC_BC_SA", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_NO_DESTINATION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_STG_IN_FILTER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_STG_EG_FILTER", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_SOURCE_FILTER_FAIL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_TRUNK_SEL_FAIL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_TX_EN_FAIL", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_VLAN_TAG_FMT", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_CRC_ERR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_PAUSE_FRAME", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_PROMISC", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_ISOLATION", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_MGMT_APP", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_FAKE_L2_PROT_ERR", NSS_STATS_TYPE_DROP}, + {"DROP_CODE_POLICER", NSS_STATS_TYPE_DROP} +}; + +/* + * nss_ppe_stats_str_dc_strings_read() + * Read ppe drop code statistics names + */ +static ssize_t nss_ppe_stats_str_dc_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ppe_stats_str_dc, NSS_PPE_STATS_DROP_CODE_MAX); +} + +/* + * nss_ppe_stats_str_dc_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ppe_stats_str_dc); + +/* + * nss_ppe_stats_str_sc + * PPE statistics strings for service-code stats + */ +struct nss_stats_info nss_ppe_stats_str_sc[NSS_PPE_STATS_SERVICE_CODE_MAX] = { + {"cb_unregister", NSS_STATS_TYPE_SPECIAL}, + {"process_ok", NSS_STATS_TYPE_SPECIAL}, + {"process_fail", NSS_STATS_TYPE_ERROR} +}; + +/* + * nss_ppe_stats_str_sc_strings_read() + * Read ppe service code statistics names + */ +static ssize_t nss_ppe_stats_str_sc_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ppe_stats_str_sc, NSS_PPE_STATS_SERVICE_CODE_MAX); +} + +/* + * nss_ppe_stats_str_sc_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ppe_stats_str_sc); + +/* + * nss_ppe_stats_str_cc + * PPE statistics strings for cpu code + */ +struct nss_stats_info nss_ppe_stats_str_cc[NSS_PPE_STATS_CPU_CODE_MAX] = { + {"CPU_CODE_FORWARDING", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_UNKNOWN_L2_PROT", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_PPPOE_WRONG_VER_TYPE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_WRONG_CODE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_PPPOE_UNSUPPORTED_PPP_PROT", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_WRONG_VER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_SMALL_IHL", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_WITH_OPTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV4_BAD_TOTAL_LEN", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_DATA_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_IPV4_FRAG", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV4_PING_OF_DEATH", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_SNALL_TTL", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV4_UNK_IP_PROT", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_CHECKSUM_ERR", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_INV_SIP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_INV_DIP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_LAND_ATTACK", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV4_AH_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV4_AH_CROSS_BORDER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV4_ESP_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_WRONG_VER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_BAD_PAYLOAD_LEN", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_DATA_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_WITH_EXT_HDR", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_SMALL_HOP_LIMIT", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_INV_SIP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_INV_DIP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_LAND_ATTACK", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_IPV6_FRAG", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_PING_OF_DEATH", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_WITH_EXT_HDR", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_UNK_NEXT_HDR", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_MOBILITY_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_MOBILITY_CROSS_BORDER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_AH_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_AH_CROSS_BORDER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_ESP_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_ESP_CROSS_BORDER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_OTHER_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_IPV6_OTHER_EXT_CROSS_BORDER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_TCP_HDR_CROSS_BORDER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_TCP_SMAE_SP_DP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_TCP_SMALL_DATA_OFFSET", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_FLAGS_0", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_FLAGS_1", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_FLAGS_2", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_FLAGS_3", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_FLAGS_4", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_FLAGS_5", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_FLAGS_6", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_FLAGS_7", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_CHECKSUM_ERR", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_UDP_HDR_CROSS_BORDER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_UDP_SMAE_SP_DP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_BAD_LEN", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_DATA_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_CHECKSUM_ERR", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_UDP_LITE_HDR_INCOMPLETE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_UDP_LITE_CROSS_BORDER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_UDP_LITE_SP_DP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_UDP_LITE_CSM_COV_TO_7", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_UDP_LITE_CSM_TOO_LONG", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_UDP_LITE_CSM_CROSS_BORDER", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_UDP_LITE_CHECKSUM_ERR", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_FAKE_L2_PROT_ERR", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EXP_FAKE_MAC_HEADER_ERR", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_BITMAP_MAX", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L2_MRU_FAIL", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L2_MTU_FAIL", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_EXP_IP_PREFIX_BC", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_MTU_FAIL", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_MRU_FAIL", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_ICMP_RDT", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_EXP_IP_RT_TO_ME", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_EXP_IP_TTL_ZERO", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_FLOW_SERVICE_CODE_LOOP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_DE_ACCELERATE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_EXP_FLOW_SRC_CHK_FAIL", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_FLOW_SYNC_TOGGLE_MISMATCH", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_EXP_MTU_DF_FAIL", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_PPPOE_MULTICAST", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_MGMT_OFFSET", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_MGMT_EAPOL", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PPPOE_DIS", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_MGMT_IGMP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_ARP_REQ", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_ARP_REP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_MGMT_DHCPv4", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_MGMT_MLD", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_MGMT_NS", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_MGMT_NA", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_MGMT_DHCPv6", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PTP_OFFSET", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PTP_SYNC", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_FOLLOW_UP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_DELAY_REQ", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_DELAY_RESP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PDELAY_REQ", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PDELAY_RESP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PTP_PDELAY_RESP_FOLLOW_UP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PTP_ANNOUNCE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PTP_MANAGEMENT", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PTP_SIGNALING", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PTP_RSV_MSG", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_SG_UNKNOWN", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_SG_UNKNOWN", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_SG_UNKNOWN", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_SG_UNKNOWN", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_SG_VIO", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_SG_VIO", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_SG_VIO", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_SG_VIO", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_ROUTING_IP_TO_ME", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_SNAT_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_DNAT_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_RT_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_BR_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_BRIDGE_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_ROUTE_PREHEAD_RT_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_ROUTE_PREHEAD_SNAPT_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_ROUTE_PREHEAD_DNAPT_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_ROUTE_PREHEAD_SNAT_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_ROUTE_PREHEAD_DNAT_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_NO_ROUTE_NAT_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_NO_ROUTE_NAT_ERROR", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_ROUTE_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_ROUTE_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_NO_ROUTE_INVALID_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_NO_ROUTE_PREHEAD_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_BRIDGE_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_FLOW_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L3_MISS_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L2_MAC_ADDRESS", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_HASH_COLLISION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_STATION_MOVE", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_LEARN_LIMIT", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L2_LOOKUP_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_L2_LOOKUP_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_CTRL_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_IN_FILTER_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_IN_XLT_MISS", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_EG_FILTER_DROP", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_PRE_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_POST_ACTION", NSS_STATS_TYPE_EXCEPTION}, + {"CPU_CODE_CODE_ACTION", NSS_STATS_TYPE_EXCEPTION} +}; + +/* + * nss_ppe_stats_str_cc_strings_read() + * Read ppe cpu code statistics names + */ +static ssize_t nss_ppe_stats_str_cc_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_ppe_stats_str_cc, NSS_PPE_STATS_CPU_CODE_MAX); +} + +/* + * nss_ppe_stats_str_cc_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(ppe_stats_str_cc); + +/* + * nss_ppe_strings_dentry_create() + * Create ppe statistics strings debug entry. + */ +void nss_ppe_strings_dentry_create(void) +{ + struct dentry *ppe_d = NULL; + + if (!nss_top_main.strings_dentry) { + nss_warning("qca-nss-drv/strings is not present"); + return; + } + + ppe_d = debugfs_create_dir("ppe", nss_top_main.strings_dentry); + if (!ppe_d) { + nss_warning("Failed to create qca-nss-drv/strings/ppe directory"); + return; + } + + if (!debugfs_create_file("stats_str_conn", 0400, ppe_d, &nss_top_main, &nss_ppe_stats_str_conn_strings_ops)) { + nss_warning("Failed to create qca-nss-drv/strings/ppe/stats_str_conn file"); + debugfs_remove_recursive(ppe_d); + return; + } + + if (!debugfs_create_file("stats_str_sc", 0400, ppe_d, &nss_top_main, &nss_ppe_stats_str_sc_strings_ops)) { + nss_warning("Failed to create qca-nss-drv/strings/ppe/stats_str_sc file"); + debugfs_remove_recursive(ppe_d); + return; + } + + if (!debugfs_create_file("stats_str_l3", 0400, ppe_d, &nss_top_main, &nss_ppe_stats_str_l3_strings_ops)) { + nss_warning("Failed to create qca-nss-drv/strings/ppe/stats_str_l3 file"); + debugfs_remove_recursive(ppe_d); + return; + } + + if (!debugfs_create_file("stats_str_code", 0400, ppe_d, &nss_top_main, &nss_ppe_stats_str_code_strings_ops)) { + nss_warning("Failed to create qca-nss-drv/strings/ppe/stats_str_code file"); + debugfs_remove_recursive(ppe_d); + return; + } + + if (!debugfs_create_file("stats_str_dc", 0400, ppe_d, &nss_top_main, &nss_ppe_stats_str_dc_strings_ops)) { + nss_warning("Failed to create qca-nss-drv/strings/ppe/stats_str_dc file"); + debugfs_remove_recursive(ppe_d); + return; + } + + if (!debugfs_create_file("stats_str_cc", 0400, ppe_d, &nss_top_main, &nss_ppe_stats_str_cc_strings_ops)) { + nss_warning("Failed to create qca-nss-drv/strings/ppe/stats_str_cc file"); + debugfs_remove_recursive(ppe_d); + return; + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_strings.h new file mode 100644 index 000000000..e8fe77ec2 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_strings.h @@ -0,0 +1,32 @@ +/* + *************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + *************************************************************************** + */ + +#ifndef __NSS_PPE_STRINGS_H +#define __NSS_PPE_STRINGS_H + +#include "nss_ppe_stats.h" + +extern struct nss_stats_info nss_ppe_stats_str_conn[NSS_PPE_STATS_CONN_MAX]; +extern struct nss_stats_info nss_ppe_stats_str_sc[NSS_PPE_STATS_SERVICE_CODE_MAX]; +extern struct nss_stats_info nss_ppe_stats_str_l3[NSS_PPE_STATS_L3_MAX]; +extern struct nss_stats_info nss_ppe_stats_str_code[NSS_PPE_STATS_CODE_MAX]; +extern struct nss_stats_info nss_ppe_stats_str_dc[NSS_PPE_STATS_DROP_CODE_MAX]; +extern struct nss_stats_info nss_ppe_stats_str_cc[NSS_PPE_STATS_CPU_CODE_MAX]; +extern void nss_ppe_strings_dentry_create(void); + +#endif /* __NSS_PPE_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp.c b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp.c new file mode 100644 index 000000000..a5411cec9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp.c @@ -0,0 +1,864 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * Header file for qca-ssdk APIs + */ +#include + +#include "nss_ppe_vp.h" +#include "nss_ppe_vp_stats.h" + +#define NSS_PPE_VP_TX_TIMEOUT 1000 /* 1 Second */ + +static struct nss_vp_mapping *vp_map[NSS_MAX_DYNAMIC_INTERFACES] = {NULL}; +unsigned char nss_ppe_vp_cmd[NSS_PPE_VP_MAX_CMD_STR] __read_mostly; + +/* + * Private data structure + */ +static struct nss_ppe_vp_pvt { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; + nss_ppe_port_t ppe_port_num; +} ppe_vp_pvt; + +DEFINE_SPINLOCK(nss_ppe_vp_stats_lock); +DEFINE_SPINLOCK(nss_ppe_vp_map_lock); + +struct nss_ppe_vp_stats_debug nss_ppe_vp_debug_stats; +static struct dentry *nss_ppe_vp_dentry; + +/* + * nss_ppe_vp_get_map_index() + * Get the index of the NSS-VP number mapping array. + */ +static inline int32_t nss_ppe_vp_get_map_index(nss_if_num_t if_num) +{ + return (if_num - NSS_DYNAMIC_IF_START); +} + +/* + * nss_ppe_vp_verify_ifnum() + * Verify PPE VP interface number. + */ +static inline bool nss_ppe_vp_verify_ifnum(int if_num) +{ + return (if_num == NSS_PPE_VP_INTERFACE); +} + +/* + * nss_ppe_vp_map_dealloc() + * Deallocate memory for the NSS interface number and PPE VP number mapping. + */ +static inline void nss_ppe_vp_map_dealloc(struct nss_vp_mapping *map) +{ + vfree(map); +} + +/* + * nss_ppe_vp_map_alloc() + * Allocate memory for the NSS interface number and PPE VP number mapping. + */ +static inline struct nss_vp_mapping *nss_ppe_vp_map_alloc(void) +{ + struct nss_vp_mapping *nss_vp_info = vzalloc(sizeof(struct nss_vp_mapping)); + if (!nss_vp_info) { + nss_warning("No memory for allocating NSS-VP mapping instance"); + } + + return nss_vp_info; +} + +/* + * nss_ppe_vp_proc_help() + * Print usage information for ppe_vp configure sysctl. + */ +static void nss_ppe_vp_proc_help(void) +{ + nss_info_always("== for dynamic interface types read following file =="); + nss_info_always("/sys/kernel/debug/qca-nss-drv/stats/dynamic_if/type_names"); + nss_info_always("NSS PPE VP create: echo > /proc/sys/nss/ppe_vp/create"); + nss_info_always("NSS PPE VP destroy: echo > /proc/sys/nss/ppe_vp/destroy"); +} + +/* + * nss_ppe_vp_del_map() + * Delete mapping between NSS interface number and VP number. + */ +static bool nss_ppe_vp_del_map(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num) +{ + int32_t idx; + nss_ppe_port_t ppe_port_num; + struct nss_vp_mapping *nss_vp_info; + uint16_t vp_index; + + nss_assert((if_num >= NSS_DYNAMIC_IF_START) && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))); + + idx = nss_ppe_vp_get_map_index(if_num); + if ((idx < 0) || (idx >= NSS_MAX_DYNAMIC_INTERFACES)) { + nss_warning("%px: Invalid index. Cannot delete the PPE VP mapping. idx:%u", nss_ctx, idx); + return false; + } + + spin_lock_bh(&nss_ppe_vp_map_lock); + nss_vp_info = vp_map[idx]; + if (!nss_vp_info) { + spin_unlock_bh(&nss_ppe_vp_map_lock); + nss_warning("%px: Could not find the vp num in the mapping. NSS if num:%u", nss_ctx, if_num); + return false; + } + + ppe_port_num = nss_vp_info->ppe_port_num; + + nss_ppe_vp_map_dealloc(nss_vp_info); + vp_map[idx] = NULL; + spin_unlock_bh(&nss_ppe_vp_map_lock); + + /* + * Clear the PPE VP stats once PPE VP is deleted + */ + vp_index = ppe_port_num - NSS_PPE_VP_START; + spin_lock_bh(&nss_ppe_vp_stats_lock); + memset(&nss_ppe_vp_debug_stats.vp_stats[vp_index], 0, sizeof(struct nss_ppe_vp_statistics_debug)); + spin_unlock_bh(&nss_ppe_vp_stats_lock); + + nss_info("%px: Deleted NSS interface number and PPE VP number mapping successfully: NSS if num:%u at index:%u", nss_ctx, if_num, idx); + + return true; +} + +/* + * nss_ppe_vp_add_map() + * Add mapping between NSS interface number and VP number. + */ +static bool nss_ppe_vp_add_map(struct nss_ctx_instance *nss_ctx ,nss_if_num_t if_num, struct nss_vp_mapping *nss_vp_info) +{ + uint32_t idx; + nss_ppe_port_t ppe_port_num; + + nss_assert((if_num >= NSS_DYNAMIC_IF_START) && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))); + + if (!nss_vp_info) { + nss_warning("%px: Received invalid argument.", nss_ctx); + return false; + } + + idx = nss_ppe_vp_get_map_index(if_num); + if ((idx < 0) || (idx >= NSS_MAX_DYNAMIC_INTERFACES)) { + nss_warning("%px: Invalid index. Cannot add the PPE VP mapping. idx:%u", nss_ctx, idx); + return false; + } + + spin_lock_bh(&nss_ppe_vp_map_lock); + if (vp_map[idx]) { + spin_unlock_bh(&nss_ppe_vp_map_lock); + nss_warning("%px: Mapping exists already. NSS if num:%d index:%u, VP num:%u", nss_ctx, if_num, idx, vp_map[idx]->ppe_port_num); + return false; + } + + vp_map[idx] = nss_vp_info; + ppe_port_num = vp_map[idx]->ppe_port_num; + spin_unlock_bh(&nss_ppe_vp_map_lock); + + nss_info("%px: Mapping added successfully. NSS if num:%d index:%u, VP num:%u", nss_ctx, if_num, idx, ppe_port_num); + + return true; +} + +/* + * nss_ppe_vp_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_ppe_vp_callback(void *app_data, struct nss_ppe_vp_msg *npvm) +{ + if (npvm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("ppe_vp error response %d", npvm->cm.response); + ppe_vp_pvt.response = NSS_TX_FAILURE; + complete(&ppe_vp_pvt.complete); + return; + } + + if (npvm->cm.type == NSS_IF_PPE_PORT_CREATE) { + ppe_vp_pvt.ppe_port_num = npvm->msg.if_msg.ppe_port_create.ppe_port_num; + nss_trace("PPE VP callback success VP num: %u", npvm->msg.if_msg.ppe_port_create.ppe_port_num); + } + ppe_vp_pvt.response = NSS_TX_SUCCESS; + + complete(&ppe_vp_pvt.complete); +} + +/* + * nss_ppe_vp_parse_vp_cmd() + * Parse PPE VP create and destroy message and return the NSS interface number. + * Command usage: + * echo /proc/sys/nss/ppe_vp/create> + * echo ath0 6 > /proc/sys/nss/ppe_vp/create + * Since ath0 has only one type i.e. ath0 is NSS_DYNAMIC_INTERFACE_TYPE_VAP, the above command can be rewritten as + * echo ath0 > /proc/sys/nss/ppe_vp/create => Here 6 can be ignored. + */ +static nss_if_num_t nss_ppe_vp_parse_vp_cmd(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int32_t if_num; + struct net_device *dev; + uint32_t dynamic_if_type = (uint32_t)NSS_DYNAMIC_INTERFACE_TYPE_NONE; + struct nss_ctx_instance *nss_ctx = nss_ppe_vp_get_context(); + char *pos; + char cmd_buf[NSS_PPE_VP_MAX_CMD_STR] = {0}, dev_name[NSS_PPE_VP_MAX_CMD_STR] = {0}; + size_t count = *lenp; + int ret = proc_dostring(ctl, write, buffer, lenp, ppos); + + if (!write) { + nss_ppe_vp_proc_help(); + return ret; + } + + if (!nss_ctx) { + nss_warning("%px: NSS Context not found.", nss_ctx); + return -ENODEV; + } + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (count >= NSS_PPE_VP_MAX_CMD_STR) { + nss_ppe_vp_proc_help(); + nss_warning("%px: Input string too big", nss_ctx); + return -E2BIG; + } + + if (copy_from_user(cmd_buf, buffer, count)) { + nss_warning("%px: Cannot copy user's entry to kernel memory", nss_ctx); + return -EFAULT; + } + + if ((pos = strrchr(cmd_buf, '\n')) != NULL) { + *pos = '\0'; + } + + if (sscanf(cmd_buf, "%s %u", dev_name, &dynamic_if_type) < 0) { + nss_warning("%px: PPE VP command parse failed", nss_ctx); + return -EFAULT; + } + + dev = dev_get_by_name(&init_net, dev_name); + if (!dev) { + nss_warning("%px: Cannot find the net device", nss_ctx); + return -ENODEV; + } + + nss_info("%px: Dynamic interface type: %u", nss_ctx, dynamic_if_type); + if ((dynamic_if_type < NSS_DYNAMIC_INTERFACE_TYPE_NONE) || (dynamic_if_type >= NSS_DYNAMIC_INTERFACE_TYPE_MAX)) { + nss_warning("%px: Invalid dynamic interface type: %d", nss_ctx, dynamic_if_type); + dev_put(dev); + return -EFAULT; + } + + if_num = nss_cmn_get_interface_number_by_dev_and_type(dev, dynamic_if_type); + if (if_num < 0) { + nss_warning("%px: Invalid interface number:%s", nss_ctx, dev_name); + dev_put(dev); + return -EFAULT; + } + + nss_info("%px: PPE VP create/destroy for, nss_if_num:%d dev_name:%s dynamic_if_type:%u", nss_ctx, if_num, dev_name, dynamic_if_type); + dev_put(dev); + + return if_num; +} + +/* + * nss_ppe_vp_tx_msg() + * Transmit a ppe_vp message to NSS FW + */ +nss_tx_status_t nss_ppe_vp_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_ppe_vp_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + nss_if_num_t if_num = ncm->interface; + + /* + * Trace messages. + */ + nss_ppe_vp_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (!((ncm->type == NSS_IF_PPE_PORT_CREATE) || (ncm->type == NSS_IF_PPE_PORT_DESTROY))) { + nss_warning("%px: Invalid message type: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + if (!(if_num >= NSS_DYNAMIC_IF_START && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES)))) { + nss_warning("%px: invalid interface %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_ppe_vp_tx_msg_sync() + * Transmit a ppe_vp message to NSS firmware synchronously. + */ +nss_tx_status_t nss_ppe_vp_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_ppe_vp_msg *npvm) +{ + nss_tx_status_t status; + int ret = 0; + + down(&ppe_vp_pvt.sem); + + status = nss_ppe_vp_tx_msg(nss_ctx, npvm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: ppe_tx_msg failed", nss_ctx); + up(&ppe_vp_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&ppe_vp_pvt.complete, msecs_to_jiffies(NSS_PPE_VP_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: ppe_vp msg tx failed due to timeout", nss_ctx); + ppe_vp_pvt.response = NSS_TX_FAILURE; + } + + status = ppe_vp_pvt.response; + up(&ppe_vp_pvt.sem); + + return status; +} + +/* + * nss_ppe_vp_get_context() + * Get NSS context instance for ppe_vp + */ +struct nss_ctx_instance *nss_ppe_vp_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.ppe_handler_id]; +} +EXPORT_SYMBOL(nss_ppe_vp_get_context); + +/* + * nss_ppe_vp_get_ppe_port_by_nssif() + * Get vp number for a given NSS interface number. + */ +nss_ppe_port_t nss_ppe_vp_get_ppe_port_by_nssif(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num) +{ + uint32_t idx; + nss_ppe_port_t ppe_port_num; + + if (!((if_num >= NSS_DYNAMIC_IF_START) && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES)))) { + nss_warning("%px: NSS invalid nss if num: %u", nss_ctx, if_num); + return -1; + } + + idx = nss_ppe_vp_get_map_index(if_num); + if (idx < 0 || idx >= NSS_MAX_DYNAMIC_INTERFACES) { + nss_warning("%px: NSS invalid index: %d nss if num: %u",nss_ctx, idx, if_num); + return -1; + } + + spin_lock_bh(&nss_ppe_vp_map_lock); + if (!vp_map[idx]) { + spin_unlock_bh(&nss_ppe_vp_map_lock); + nss_warning("%px: NSS interface and VP mapping is not present for nss if num: %u",nss_ctx, if_num); + return -1; + } + ppe_port_num = vp_map[idx]->ppe_port_num; + spin_unlock_bh(&nss_ppe_vp_map_lock); + + nss_info("%px: VP num %d nss_if: %d",nss_ctx, ppe_port_num, if_num); + + return ppe_port_num; +} +EXPORT_SYMBOL(nss_ppe_vp_get_ppe_port_by_nssif); + +/* + * nss_ppe_vp_destroy() + * Destroy PPE virtual port for the given nss interface number. + */ +nss_tx_status_t nss_ppe_vp_destroy(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num) +{ + nss_tx_status_t status; + struct nss_ppe_vp_msg *npvm; + uint32_t idx; + int32_t vsi_id_valid = false; + int32_t vsi_id; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + idx = nss_ppe_vp_get_map_index(if_num); + if (idx < 0 || idx >= NSS_MAX_DYNAMIC_INTERFACES) { + nss_warning("%px: Cannot destroy PPE VP. Invalid index: %d. nss_if_num: %u", nss_ctx, idx, if_num); + return -1; + } + + spin_lock_bh(&nss_ppe_vp_map_lock); + if (vp_map[idx]) { + vsi_id = vp_map[idx]->vsi_id; + vsi_id_valid = vp_map[idx]->vsi_id_valid; + } + spin_unlock_bh(&nss_ppe_vp_map_lock); + + if (vsi_id_valid) { + /* + * Send the dettach VSI message to the Firmware. + */ + if (nss_if_vsi_unassign(nss_ctx, if_num, vsi_id)) { + nss_warning("%px: PPE VP destroy failed. Failed to detach VSI to PPE VP interface %d vsi:%d", nss_ctx, if_num, vsi_id); + return NSS_TX_FAILURE; + } + + if (ppe_vsi_free(NSS_PPE_VP_SWITCH_ID, vsi_id)) { + nss_warning("%px: PPE VP destroy failed. Failed to free PPE VSI. nss_if:%d vsi:%d", nss_ctx, if_num, vsi_id); + return NSS_TX_FAILURE; + } + + nss_info("%px: PPE VP VSI detached successfully. VSI ID freed successfully. NSS if num:%u, VSI ID:%u", nss_ctx, if_num, vsi_id); + } + + npvm = kzalloc(sizeof(struct nss_ppe_vp_msg), GFP_KERNEL); + if (!npvm) { + nss_warning("%px: Unable to allocate memeory of PPE VP message", nss_ctx); + return NSS_TX_FAILURE; + } + + nss_trace("%px: PPE_VP will be destroyed for an interface: %d", nss_ctx, if_num); + + /* + * Destroy PPE VP for a dynamic interface. + */ + nss_cmn_msg_init(&npvm->cm, if_num, NSS_IF_PPE_PORT_DESTROY, 0, nss_ppe_vp_callback, NULL); + + status = nss_ppe_vp_tx_msg_sync(nss_ctx, npvm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to send PPE VP destroy message", nss_ctx); + kfree(npvm); + return NSS_TX_FAILURE; + } + + kfree(npvm); + + /* + * Delete mapping between the NSS interface number and the VP number. + */ + if (!nss_ppe_vp_del_map(nss_ctx, if_num)) { + nss_warning("%px: Failed to delete the mapping for nss_if:%d", nss_ctx, if_num); + return NSS_TX_FAILURE; + } + + return status; +} +EXPORT_SYMBOL(nss_ppe_vp_destroy); + +/* + * nss_ppe_vp_create() + * Create PPE virtual port for the given nss interface number. + */ +nss_tx_status_t nss_ppe_vp_create(struct nss_ctx_instance *nss_ctx, nss_if_num_t if_num) +{ + uint32_t vsi_id; + nss_tx_status_t status; + struct nss_ppe_vp_msg *npvm; + struct nss_vp_mapping *nss_vp_info; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + /* + * TODO: No need to create VSI for tunnel interfaces. Only for VAP interfaces VSI is needed. + * Allocate the VSI for the dynamic interface on which VP will be created. + */ + if (ppe_vsi_alloc(NSS_PPE_VP_SWITCH_ID, &vsi_id)) { + nss_warning("%px, Failed to alloc VSI ID, PPE VP create failed. nss_if:%u", nss_ctx, if_num); + return NSS_TX_FAILURE; + } + + npvm = kzalloc(sizeof(struct nss_ppe_vp_msg), GFP_KERNEL); + if (!npvm) { + nss_warning("%px: Unable to allocate memeory of PPE VP message", nss_ctx); + goto free_vsi; + } + + nss_trace("%px: PPE_VP will be created for an interface: %d", nss_ctx, if_num); + + /* + * Create PPE VP for a dynamic interface. + */ + nss_cmn_msg_init(&npvm->cm, if_num, NSS_IF_PPE_PORT_CREATE, + sizeof(struct nss_if_ppe_port_create), nss_ppe_vp_callback, NULL); + + status = nss_ppe_vp_tx_msg_sync(nss_ctx, npvm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to send ppe_vp create message", nss_ctx); + goto free_alloc; + } + + /* + * Send the attach VSI message to the Firmware. + */ + if (nss_if_vsi_assign(nss_ctx, if_num, vsi_id) != NSS_TX_SUCCESS) { + nss_warning("%px: Failed to attach VSI to PPE VP interface. nss_if:%u vsi:%u", nss_ctx, if_num, vsi_id); + goto destroy_vp; + } + + nss_vp_info = nss_ppe_vp_map_alloc(); + if (!nss_vp_info) { + nss_warning("%px: No memory for allocating NSS-VP mapping instance", nss_ctx); + goto detach_vsi; + } + + nss_vp_info->vsi_id = vsi_id; + nss_vp_info->vsi_id_valid = true; + nss_vp_info->if_num = if_num; + nss_vp_info->ppe_port_num = ppe_vp_pvt.ppe_port_num; + + nss_info("%px: PPE VP allocated VSI ID:%u NSS interface number:%u VP no from Firmware:%u", nss_ctx, vsi_id, if_num, nss_vp_info->ppe_port_num); + + /* + * Add mapping between the NSS interface number and the VP number. + */ + if (!nss_ppe_vp_add_map(nss_ctx, if_num, nss_vp_info)) { + nss_warning("%px: Failed to add mapping for NSS interface number: %d", nss_ctx, if_num); + goto free_nss_vp_info; + } + + kfree(npvm); + + return status; + +free_nss_vp_info: + nss_ppe_vp_map_dealloc(nss_vp_info); + +detach_vsi: + nss_trace("%px: Detaching VSI ID :%u NSS Interface no:%u", nss_ctx, vsi_id, if_num); + if (nss_if_vsi_unassign(nss_ctx, if_num, vsi_id)) { + nss_warning("%px: Failed to free PPE VP VSI. nss_if:%u vsi:%u", nss_ctx, if_num, vsi_id); + } + +destroy_vp: + nss_trace("%px: Destroy Vp for NSS Interface num:%u VP num:%u", nss_ctx, if_num, npvm->msg.if_msg.ppe_port_create.ppe_port_num); + if (nss_ppe_vp_destroy(nss_ctx, if_num)) { + nss_warning("%px: PPE VP destroy failed, nss_if:%u", nss_ctx, if_num); + } + +free_alloc: + kfree(npvm); + +free_vsi: + nss_trace("%px: Free VSI ID :%u NSS Interface no:%u", nss_ctx, vsi_id, if_num); + if (ppe_vsi_free(NSS_PPE_VP_SWITCH_ID, vsi_id)) { + nss_warning("%px: Failed to free PPE VP VSI. NSS if num:%u vsi:%u", nss_ctx, if_num, vsi_id); + } + + return NSS_TX_FAILURE; +} +EXPORT_SYMBOL(nss_ppe_vp_create); + +/* + * nss_ppe_vp_destroy_notify() + * Get PPE VP destroy notification from NSS + */ +static void nss_ppe_vp_destroy_notify(struct nss_ctx_instance *nss_ctx, struct nss_ppe_vp_destroy_notify_msg *destroy_notify) +{ + nss_if_num_t nss_if_num; + uint32_t i; + int32_t vsi_id; + bool vsi_id_valid = false; + nss_ppe_port_t ppe_port_num = destroy_notify->ppe_port_num; + + /* + * Find NSS interface number corresponding to the VP num. + */ + spin_lock_bh(&nss_ppe_vp_map_lock); + for (i = 0; i < NSS_MAX_DYNAMIC_INTERFACES; i++) { + if (vp_map[i] && (ppe_port_num == vp_map[i]->ppe_port_num)) { + nss_if_num = vp_map[i]->if_num; + vsi_id = vp_map[i]->vsi_id; + vsi_id_valid = vp_map[i]->vsi_id_valid; + break; + } + } + spin_unlock_bh(&nss_ppe_vp_map_lock); + + if (i == NSS_MAX_DYNAMIC_INTERFACES) { + nss_warning("%px: Could not find the NSS interface number mapping for VP number: %u\n", nss_ctx, ppe_port_num); + return; + } + + /* + * Delete the nss_if_num to VP num mapping and reset the stats entry for this VP. + */ + if (!nss_ppe_vp_del_map(nss_ctx, nss_if_num)) { + nss_warning("%px: Failed to delete the mapping for nss_if: %d\n", nss_ctx, nss_if_num); + return; + } + + if (vsi_id_valid && ppe_vsi_free(NSS_PPE_VP_SWITCH_ID, vsi_id)) { + nss_warning("%px: Failed to free PPE VSI. nss_if: %d vsi: %d\n", nss_ctx, nss_if_num, vsi_id); + } +} + +/* + * nss_ppe_vp_handler() + * Handle NSS -> HLOS messages for ppe + */ +static void nss_ppe_vp_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_ppe_vp_msg *msg = (struct nss_ppe_vp_msg *)ncm; + nss_ppe_vp_msg_callback_t cb; + void *ctx; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_trace("%px ppe_vp msg: %px\n", nss_ctx, msg); + BUG_ON(!nss_ppe_vp_verify_ifnum(ncm->interface)); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_PPE_VP_MSG_MAX) { + nss_warning("%px: received invalid message %d for PPE_VP interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_ppe_vp_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Trace messages. + */ + nss_ppe_vp_log_rx_msg(msg); + + switch (msg->cm.type) { + case NSS_PPE_VP_MSG_SYNC_STATS: + /* + * Per VP stats msg + */ + nss_ppe_vp_stats_sync(nss_ctx, &msg->msg.stats, ncm->interface); + break; + + case NSS_PPE_VP_MSG_DESTROY_NOTIFY: + /* + * VP destroy notification + */ + nss_ppe_vp_destroy_notify(nss_ctx, &msg->msg.destroy_notify); + break; + } + + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->nss_rx_interface_handlers[ncm->interface].app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * Callback + */ + cb = (nss_ppe_vp_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + cb(ctx, msg); +} + +/* + * nss_ppe_vp_destroy_handler() + * PPE VP destroy handler. + */ +static int nss_ppe_vp_destroy_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_ctx_instance *nss_ctx = nss_ppe_vp_get_context(); + int32_t if_num; + nss_tx_status_t nss_tx_status; + + if (!nss_ctx) { + nss_warning("%px: NSS Context not found.", nss_ctx); + return -ENODEV; + } + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if_num = nss_ppe_vp_parse_vp_cmd(ctl, write, buffer, lenp, ppos); + if (if_num < 0) { + nss_warning("%px: Invalid interface number: %d", nss_ctx, if_num); + return -EFAULT; + } + + if (nss_ppe_vp_get_ppe_port_by_nssif(nss_ctx, if_num) < 0) { + nss_warning("%px: VP is not present for interface: %d", nss_ctx, if_num); + return -EEXIST; + } + + nss_tx_status = nss_ppe_vp_destroy(nss_ctx, if_num); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: Sending message failed, cannot destroy PPE_VP node nss_if: %u", nss_ctx, if_num); + return -EBUSY; + } + + return 0; +} + +/* + * nss_ppe_vp_create_handler() + * PPE VP create handler. + */ +static int nss_ppe_vp_create_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int32_t if_num; + struct nss_ctx_instance *nss_ctx = nss_ppe_vp_get_context(); + nss_tx_status_t nss_tx_status; + + if (!nss_ctx) { + nss_warning("%px: NSS Context not found.", nss_ctx); + return -ENODEV; + } + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if_num = nss_ppe_vp_parse_vp_cmd(ctl, write, buffer, lenp, ppos); + if (if_num < 0) { + nss_warning("%px: Invalid interface number: %d", nss_ctx, if_num); + return -EFAULT; + } + + nss_info("%px: NSS interface number: %d", nss_ctx, if_num); + + if (nss_ppe_vp_get_ppe_port_by_nssif(nss_ctx, if_num) > 0) { + nss_warning("%px: VP is already present for nss_if_num: %d", nss_ctx, if_num); + return -EEXIST; + } + + nss_tx_status = nss_ppe_vp_create(nss_ctx, if_num); + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: Sending message failed, cannot create PPE VP node for nss_if_num: %u", nss_ctx, if_num); + return -EBUSY; + } + + return 0; +} + +static struct ctl_table nss_ppe_vp_table[] = { + { + .procname = "create", + .data = &nss_ppe_vp_cmd, + .maxlen = sizeof(nss_ppe_vp_cmd), + .mode = 0644, + .proc_handler = &nss_ppe_vp_create_handler, + }, + { + .procname = "destroy", + .data = &nss_ppe_vp_cmd, + .maxlen = sizeof(nss_ppe_vp_cmd), + .mode = 0644, + .proc_handler = &nss_ppe_vp_destroy_handler, + }, + { } +}; + +static struct ctl_table nss_ppe_vp_dir[] = { + { + .procname = "ppe_vp", + .mode = 0555, + .child = nss_ppe_vp_table, + }, + { } +}; + +static struct ctl_table nss_ppe_vp_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_ppe_vp_dir, + }, + { } +}; + +static struct ctl_table_header *nss_ppe_vp_procfs_header; + +/* + * nss_ppe_vp_procfs_register() + * Register sysctl specific to ppe_vp + */ +void nss_ppe_vp_procfs_register(void) +{ + /* + * Register sysctl table. + */ + nss_ppe_vp_procfs_header = register_sysctl_table(nss_ppe_vp_root_dir); +} + +/* + * uss_ppe_vp_procfs_unregister() + * Unregister sysctl specific for ppe_vp + */ +void nss_ppe_vp_procfs_unregister(void) +{ + /* + * Unregister sysctl table. + */ + if (nss_ppe_vp_procfs_header) { + unregister_sysctl_table(nss_ppe_vp_procfs_header); + } +} + +/* + * nss_ppe_vp_register_handler() + * + */ +void nss_ppe_vp_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_ppe_vp_get_context(); + + nss_ppe_vp_dentry = nss_ppe_vp_stats_dentry_create(); + if (nss_ppe_vp_dentry == NULL) { + nss_warning("%px: Not able to create debugfs entry", nss_ctx); + return; + } + + nss_core_register_handler(nss_ctx, NSS_PPE_VP_INTERFACE, nss_ppe_vp_handler, NULL); + nss_ppe_vp_procfs_register(); + + sema_init(&ppe_vp_pvt.sem, 1); + init_completion(&ppe_vp_pvt.complete); +} + +/* + * nss_ppe_vp_unregister_handler() + * + */ +void nss_ppe_vp_unregister_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_ppe_vp_get_context(); + + debugfs_remove_recursive(nss_ppe_vp_dentry); + nss_ppe_vp_procfs_unregister(); + nss_core_unregister_handler(nss_ctx, NSS_PPE_VP_INTERFACE); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp.h b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp.h new file mode 100644 index 000000000..ee656497f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp.h @@ -0,0 +1,130 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ppe_vp.h + * NSS PPE virtual port header file + */ + +#include +#include "nss_tx_rx_common.h" + +/* + * Maximum number of virtual port supported by PPE hardware + */ +#define NSS_PPE_VP_MAX_NUM 192 +#define NSS_PPE_VP_START 64 +#define NSS_PPE_VP_NODE_STATS_MAX 32 +#define NSS_PPE_VP_SWITCH_ID 0 +#define NSS_PPE_VP_MAX_CMD_STR 200 + +/* + * ppe_vp nss debug stats lock + */ +extern spinlock_t nss_ppe_vp_stats_lock; + +/* + * nss_ppe_vp_msg_error_type + * ppe_vp message errors + */ +enum nss_ppe_vp_msg_error_type { + NSS_PPE_VP_MSG_ERROR_TYPE_UNKNOWN, /* Unknown message error */ + PPE_VP_MSG_ERROR_TYPE_INVALID_DI, /* Invalid dynamic interface type */ + NSS_PPE_VP_MSG_ERROR_TYPE_MAX /* Maximum error type */ +}; + +/* + * nss_ppe_vp_message_types + * Message types for Packet Processing Engine (PPE) requests and responses. + */ +enum nss_ppe_vp_message_types { + NSS_PPE_VP_MSG_SYNC_STATS, + NSS_PPE_VP_MSG_DESTROY_NOTIFY, + NSS_PPE_VP_MSG_MAX, +}; + +/* + * nss_ppe_vp_statistics + * Message structure for ppe_vp statistics + */ +struct nss_ppe_vp_statistics { + uint32_t nss_if; /* NSS interface number corresponding to VP */ + nss_ppe_port_t ppe_port_num; /* VP number */ + uint32_t rx_drop; /* Rx drops due to VP node inactive */ + uint32_t tx_drop; /* Tx drops due to VP node inactive */ + uint32_t packet_big_err; /* Number of packets not sent to PPE because packet was too large */ + struct nss_cmn_node_stats stats; /* Common node statistics */ +}; + +/* + * nss_ppe_vp_sync_stats_msg + * Message information for ppe_vp synchronization statistics. + */ +struct nss_ppe_vp_sync_stats_msg { + uint16_t count; /* Number of VP node stats with the sync message */ + uint32_t rx_dropped[NSS_MAX_NUM_PRI]; /* Rx packet dropped due to queue full */ + struct nss_ppe_vp_statistics vp_stats[NSS_PPE_VP_NODE_STATS_MAX]; + /* Per service-code stats */ +}; + +/* + * nss_ppe_vp_destroy_notify_msg + * Message received as part of destroy notification from Firmware to Host. + */ +struct nss_ppe_vp_destroy_notify_msg { + nss_ppe_port_t ppe_port_num; /* VP number */ +}; + +/* + * nss_ppe_vp_msg + * Message for receiving ppe_vp NSS to host messages. + */ +struct nss_ppe_vp_msg { + struct nss_cmn_msg cm; /* Common message header. */ + + /* + * Payload. + */ + union { + union nss_if_msgs if_msg; + /* NSS interface base messages. */ + struct nss_ppe_vp_sync_stats_msg stats; + /* Synchronization statistics. */ + struct nss_ppe_vp_destroy_notify_msg destroy_notify; + /* Information for the VP destroyed in Firmware. */ + } msg; /* Message payload. */ +}; + +/* + * nss_vp_mapping + * Structure to maintain the one-to-one mapping between the NSS interface number and VP number. + */ +struct nss_vp_mapping { + nss_if_num_t if_num; /* NSS interface number. */ + nss_ppe_port_t ppe_port_num; /* PPE port number corresponding to the NSS interface number. */ + uint32_t vsi_id; /* VSI ID allocated for NSS interface */ + bool vsi_id_valid; /* Set to true if vsi_id field has a valid VSI else set to false. */ +}; + +typedef void (*nss_ppe_vp_msg_callback_t)(void *app_data, struct nss_ppe_vp_msg *msg); + +/* + * Logging APIs. + */ +void nss_ppe_vp_log_tx_msg(struct nss_ppe_vp_msg *npvm); +void nss_ppe_vp_log_rx_msg(struct nss_ppe_vp_msg *npvm); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_log.c new file mode 100644 index 000000000..8c853dfcb --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_log.c @@ -0,0 +1,135 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ppe_vp_log.c + * NSS PPE logger file. + */ + +#include "nss_core.h" +#include "nss_ppe_vp.h" + +/* + * nss_ppe_vp_log_error_response_types_str + * Strings for error types for PPE-VP messages + */ +static int8_t *nss_ppe_vp_log_error_response_types_str[NSS_PPE_VP_MSG_ERROR_TYPE_MAX] __maybe_unused = { + "PPE VP Unknown message type", + "PPE VP Invalid dynamic interface type", +}; + +/* + * nss_ppe_vp_log_destroy_notify_msg() + * Log NSS PPE VP destroy notification message. + */ +static void nss_ppe_vp_log_destroy_notify_msg(struct nss_ppe_vp_msg *npvm) +{ + struct nss_ppe_vp_destroy_notify_msg *npdnm __maybe_unused = &npvm->msg.destroy_notify; + + nss_trace("%px: NSS PPE VP destroy notification message:\n" + "VP number: %u\n", + npdnm, npdnm->ppe_port_num); +} + +/* + * nss_ppe_vp_log_verbose() + * Log message contents. + */ +static void nss_ppe_vp_log_verbose(struct nss_ppe_vp_msg *npvm) +{ + switch (npvm->cm.type) { + + case NSS_IF_PPE_PORT_CREATE: + nss_info("%px: PPE interface create message type:%d\n", npvm, npvm->cm.type); + break; + + case NSS_IF_PPE_PORT_DESTROY: + nss_info("%px: PPE interface destroy message type:%d\n", npvm, npvm->cm.type); + break; + + case NSS_IF_VSI_ASSIGN: + nss_info("%px: PPE interface VSI assign message type:%d\n", npvm, npvm->cm.type); + break; + + case NSS_IF_VSI_UNASSIGN: + nss_info("%px: PPE interface VSI unassign message type:%d\n", npvm, npvm->cm.type); + break; + + case NSS_PPE_VP_MSG_DESTROY_NOTIFY: + nss_ppe_vp_log_destroy_notify_msg(npvm); + break; + + case NSS_PPE_VP_MSG_SYNC_STATS: + /* + * No log for valid stats message. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", npvm); + break; + } +} + +/* + * nss_ppe_vp_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_ppe_vp_log_tx_msg(struct nss_ppe_vp_msg *npvm) +{ + + if (!((npvm->cm.type == NSS_IF_PPE_PORT_CREATE) || (npvm->cm.type == NSS_IF_PPE_PORT_DESTROY))) { + nss_warning("%px: Invalid message type\n", npvm); + return; + } + + nss_info("%px: type:%d\n", npvm, npvm->cm.type); + nss_ppe_vp_log_verbose(npvm); +} + +/* + * nss_ppe_vp_log_rx_msg() + * Log messages received from FW. + */ +void nss_ppe_vp_log_rx_msg(struct nss_ppe_vp_msg *npvm) +{ + if (npvm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", npvm); + return; + } + + if (npvm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (npvm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type: %d, response[%d]: %s\n", npvm, npvm->cm.type, + npvm->cm.response, nss_cmn_response_str[npvm->cm.response]); + goto verbose; + } + + if (npvm->cm.error >= NSS_PPE_VP_MSG_ERROR_TYPE_MAX) { + nss_warning("%px: msg failure - type: %d, response[%d]: %s, error[%d]:Invalid error\n", + npvm, npvm->cm.type, npvm->cm.response, nss_cmn_response_str[npvm->cm.response], + npvm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type: %d, response[%d]: %s, error[%d]: %s\n", + npvm, npvm->cm.type, npvm->cm.response, nss_cmn_response_str[npvm->cm.response], + npvm->cm.error, nss_ppe_vp_log_error_response_types_str[npvm->cm.error]); + +verbose: + nss_ppe_vp_log_verbose(npvm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_stats.c new file mode 100644 index 000000000..57c79953f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_stats.c @@ -0,0 +1,229 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_ppe_vp.h" +#include "nss_ppe_vp_stats.h" + +/* + * nss_ppe_vp_stats_cntrs + * PPE VP stats counters displayed using debugfs + */ +enum nss_ppe_vp_stats_cntrs { + NSS_PPE_VP_STATS_VP_NUM, + NSS_PPE_VP_STATS_NSS_IF, + NSS_PPE_VP_STATS_RX_PKTS, + NSS_PPE_VP_STATS_RX_BYTES, + NSS_PPE_VP_STATS_TX_PKTS, + NSS_PPE_VP_STATS_TX_BYTES, + NSS_PPE_VP_STATS_RX_INACTIVE, + NSS_PPE_VP_STATS_TX_INACTIVE, + NSS_PPE_VP_STATS_PACKET_BIG, + NSS_PPE_VP_STATS_TX_Q_0_DROP, + NSS_PPE_VP_STATS_TX_Q_1_DROP, + NSS_PPE_VP_STATS_TX_Q_2_DROP, + NSS_PPE_VP_STATS_TX_Q_3_DROP, + NSS_PPE_VP_STATS_MAX +}; + +/* + * nss_ppe_vp_stats_rx_cntrs + * PPE VP RX stats counters displayed using debugfs + */ +enum nss_ppe_vp_stats_rx_cntrs { + NSS_PPE_VP_STATS_RX_Q_0_DROP, + NSS_PPE_VP_STATS_RX_Q_1_DROP, + NSS_PPE_VP_STATS_RX_Q_2_DROP, + NSS_PPE_VP_STATS_RX_Q_3_DROP, + NSS_PPE_VP_STATS_RX_MAX +}; + +/* + * nss_ppe_vp_rx_stats_str + * PPE VP Rx statistics strings + */ +struct nss_stats_info nss_ppe_vp_stats_rx_str[NSS_PPE_VP_STATS_RX_MAX] = { + {"rx_queue_0_drop" , NSS_STATS_TYPE_DROP}, + {"rx_queue_1_drop" , NSS_STATS_TYPE_DROP}, + {"rx_queue_2_drop" , NSS_STATS_TYPE_DROP}, + {"rx_queue_3_drop" , NSS_STATS_TYPE_DROP}, +}; + +/* + * nss_ppe_vp_stats_str + * PPE VP statistics strings + */ +struct nss_stats_info nss_ppe_vp_stats_str[NSS_PPE_VP_STATS_MAX] = { + {"ppe_port_num" , NSS_STATS_TYPE_SPECIAL}, + {"nss_if" , NSS_STATS_TYPE_SPECIAL}, + {"rx_packets" , NSS_STATS_TYPE_COMMON}, + {"rx_bytes" , NSS_STATS_TYPE_COMMON}, + {"tx_packets" , NSS_STATS_TYPE_COMMON}, + {"tx_bytes" , NSS_STATS_TYPE_COMMON}, + {"rx_inactive" , NSS_STATS_TYPE_DROP}, + {"tx_inactive" , NSS_STATS_TYPE_DROP}, + {"packet_large_err" , NSS_STATS_TYPE_EXCEPTION}, + {"tx_queue_0_drop" , NSS_STATS_TYPE_DROP}, + {"tx_queue_1_drop" , NSS_STATS_TYPE_DROP}, + {"tx_queue_2_drop" , NSS_STATS_TYPE_DROP}, + {"tx_queue_3_drop" , NSS_STATS_TYPE_DROP}, +}; + +/* + * nss_ppe_vp_stats_sync + * PPE VP sync statistics from NSS + */ +void nss_ppe_vp_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_ppe_vp_sync_stats_msg *stats_msg, uint16_t if_num) +{ + uint16_t count = stats_msg->count; + uint16_t vp_index, i; + + spin_lock_bh(&nss_ppe_vp_stats_lock); + + /* + * Update general rx dropped stats. + */ + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + nss_ppe_vp_debug_stats.rx_dropped[i] += stats_msg->rx_dropped[i]; + } + + /* + * Update per VP tx and rx stats. + */ + while (count) { + count--; + + /* + * Update stats in global array + */ + vp_index = stats_msg->vp_stats[count].ppe_port_num - NSS_PPE_VP_START; + nss_ppe_vp_debug_stats.vp_stats[vp_index].ppe_port_num = stats_msg->vp_stats[count].ppe_port_num; + nss_ppe_vp_debug_stats.vp_stats[vp_index].nss_if = stats_msg->vp_stats[count].nss_if; + nss_ppe_vp_debug_stats.vp_stats[vp_index].rx_packets += stats_msg->vp_stats[count].stats.rx_packets; + nss_ppe_vp_debug_stats.vp_stats[vp_index].rx_bytes += stats_msg->vp_stats[count].stats.rx_bytes; + nss_ppe_vp_debug_stats.vp_stats[vp_index].tx_packets += stats_msg->vp_stats[count].stats.tx_packets; + nss_ppe_vp_debug_stats.vp_stats[vp_index].tx_bytes += stats_msg->vp_stats[count].stats.tx_bytes; + nss_ppe_vp_debug_stats.vp_stats[vp_index].rx_inactive_drop += stats_msg->vp_stats[count].rx_drop; + nss_ppe_vp_debug_stats.vp_stats[vp_index].tx_inactive_drop += stats_msg->vp_stats[count].tx_drop; + nss_ppe_vp_debug_stats.vp_stats[vp_index].packet_big_err += stats_msg->vp_stats[count].packet_big_err; + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + nss_ppe_vp_debug_stats.vp_stats[vp_index].tx_dropped[i] += stats_msg->vp_stats[count].stats.rx_dropped[i]; + } + + nss_trace("sync count:%d ppe_port_num %d rx_packets %d tx_packets %d\n", + count, stats_msg->vp_stats[count].ppe_port_num, + stats_msg->vp_stats[count].stats.rx_packets, + stats_msg->vp_stats[count].stats.tx_packets); + } + spin_unlock_bh(&nss_ppe_vp_stats_lock); +} + +/* + * nss_ppe_vp_stats_read() + * Read ppe vp statistics + */ +static ssize_t nss_ppe_vp_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int i; + char *lbuf = NULL; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct nss_ppe_vp_stats_debug *ppe_vp_stats; + uint32_t max_output_lines = ((NSS_PPE_VP_STATS_RX_MAX + NSS_PPE_VP_STATS_MAX) * NSS_PPE_VP_MAX_NUM) + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t stats_sz = sizeof(struct nss_ppe_vp_stats_debug); + + ppe_vp_stats = kzalloc(stats_sz, GFP_KERNEL); + if (!ppe_vp_stats) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + + lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + kfree(ppe_vp_stats); + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + /* + * Get vp stats + */ + spin_lock_bh(&nss_ppe_vp_stats_lock); + memcpy(ppe_vp_stats, &nss_ppe_vp_debug_stats, stats_sz); + spin_unlock_bh(&nss_ppe_vp_stats_lock); + + /* + * VP stats + */ + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "ppe_vp", NSS_STATS_SINGLE_CORE); + + /* + * Print Rx dropped. + */ + size_wr += nss_stats_print("ppe_vp", "ppe_vp rx dropped:" + , NSS_STATS_SINGLE_INSTANCE + , nss_ppe_vp_stats_rx_str + , ppe_vp_stats->rx_dropped + , NSS_PPE_VP_STATS_RX_MAX + , lbuf, size_wr, size_al); + + /* + * Print individual VP stats + */ + for (i = 0; i < NSS_PPE_VP_MAX_NUM; i++) { + if (!ppe_vp_stats->vp_stats[i].nss_if) { + continue; + } + + size_wr += nss_stats_print("ppe_vp", "ppe_vp stats" + , NSS_STATS_SINGLE_INSTANCE + , nss_ppe_vp_stats_str + , (uint64_t *) &ppe_vp_stats->vp_stats[i] + , NSS_PPE_VP_STATS_MAX + , lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(ppe_vp_stats); + kfree(lbuf); + return bytes_read; +} + +/* + * nss_ppe_vp_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(ppe_vp) + +/* + * nss_ppe_vp_stats_dentry_create() + * Create PPE statistics debug entry. + */ +struct dentry *nss_ppe_vp_stats_dentry_create(void) +{ + struct dentry *ppe_vp_d = debugfs_create_file("ppe_vp", 0400, nss_top_main.stats_dentry, + &nss_top_main, &nss_ppe_vp_stats_ops); + if (unlikely(ppe_vp_d == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/ppe_vp file"); + return NULL; + } + + return ppe_vp_d; +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_stats.h new file mode 100644 index 000000000..b435da5ba --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_ppe_vp_stats.h @@ -0,0 +1,63 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_ppe_vp_stats.h + * NSS PPE-VP statistics header file. + */ + +#ifndef __NSS_PPE_VP_STATS_H +#define __NSS_PPE_VP_STATS_H + +/* + * NSS PPE-VP statistics + */ +struct nss_ppe_vp_statistics_debug { + uint64_t ppe_port_num; /* VP number */ + uint64_t nss_if; /* NSS interface number corresponding to VP */ + uint64_t rx_packets; /* Number of packets received. */ + uint64_t rx_bytes; /* Number of bytes received. */ + uint64_t tx_packets; /* Number of packets transmitted. */ + uint64_t tx_bytes; /* Number of bytes transmitted. */ + uint64_t rx_inactive_drop; /* Number of packets dropped from PPE to VP due to VP inactive */ + uint64_t tx_inactive_drop; /* Number of packets dropped from VP to PPE due to VP inactive */ + uint64_t packet_big_err; /* Number of packets not sent to PPE because packet was too large */ + uint64_t tx_dropped[NSS_MAX_NUM_PRI]; /* Tx packets dropped on due to queue full. */ +}; + +/* + * NSS PPE-VP statistics + */ +struct nss_ppe_vp_stats_debug { + uint64_t rx_dropped[NSS_MAX_NUM_PRI]; /* Packets dropped on receive due to queue full. */ + struct nss_ppe_vp_statistics_debug vp_stats[NSS_PPE_VP_MAX_NUM]; + /* Per VP Tx and Rx stats. */ +}; + +/* + * Data structures to store NSS PPE_VP debug statistics + */ +extern struct nss_ppe_vp_stats_debug nss_ppe_vp_debug_stats; + +/* + * NSS PPE-VP statistics APIs + */ +extern void nss_ppe_vp_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_ppe_vp_sync_stats_msg *stats_msg, uint16_t if_num); +extern struct dentry *nss_ppe_vp_stats_dentry_create(void); + +#endif /* __NSS_PPE_VP_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pppoe.c b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe.c new file mode 100644 index 000000000..df613f76a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe.c @@ -0,0 +1,435 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_pppoe.c + * NSS PPPoE APIs + */ + +#include "nss_tx_rx_common.h" +#include "nss_pppoe_stats.h" +#include "nss_pppoe_log.h" +#include "nss_pppoe_strings.h" + +#define NSS_PPPOE_TX_TIMEOUT 3000 /* 3 Seconds */ + +int nss_pppoe_br_accel_mode __read_mostly = NSS_PPPOE_BR_ACCEL_MODE_EN_5T; + +/* + * Private data structure + */ +static struct nss_pppoe_pvt { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +} pppoe_pvt; + +/* + * nss_pppoe_br_help() + * Usage information for pppoe bride accel mode + */ +static inline void nss_pppoe_br_help(int mode) +{ + printk("Incorrect pppoe bridge accel mode: %d\n", mode); + printk("Supported modes\n"); + printk("%d: pppoe bridge acceleration disable\n", NSS_PPPOE_BR_ACCEL_MODE_DIS); + printk("%d: pppoe bridge acceleration enable with 5-tuple\n", NSS_PPPOE_BR_ACCEL_MODE_EN_5T); + printk("%d: pppoe bridge acceleration enable with 3-tuple\n", NSS_PPPOE_BR_ACCEL_MODE_EN_3T); +} + +/* + * nss_pppoe_get_context() + */ +struct nss_ctx_instance *nss_pppoe_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.pppoe_handler_id]; +} +EXPORT_SYMBOL(nss_pppoe_get_context); + +/* + * nss_pppoe_tx_msg() + * Transmit a PPPoE message to NSS firmware + */ +static nss_tx_status_t nss_pppoe_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_pppoe_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + enum nss_dynamic_interface_type type; + + /* + * Trace Messages + */ + nss_pppoe_log_tx_msg(msg); + + /* + * Sanity check the message + */ + type = nss_dynamic_interface_get_type(nss_pppoe_get_context(), ncm->interface); + if ((ncm->interface != NSS_PPPOE_INTERFACE) && (type != NSS_DYNAMIC_INTERFACE_TYPE_PPPOE)) { + nss_warning("%px: tx request for not PPPoE interface: %d type: %d\n", + nss_ctx, ncm->interface, type); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_PPPOE_MSG_MAX) { + nss_warning("%px: message type out of range: %d\n", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_pppoe_sync_msg_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_pppoe_sync_msg_callback(void *app_data, struct nss_pppoe_msg *npm) +{ + nss_pppoe_msg_callback_t callback = (nss_pppoe_msg_callback_t)pppoe_pvt.cb; + void *data = pppoe_pvt.app_data; + + pppoe_pvt.cb = NULL; + pppoe_pvt.app_data = NULL; + + pppoe_pvt.response = NSS_TX_SUCCESS; + if (npm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("pppoe Error response %d\n", npm->cm.response); + pppoe_pvt.response = NSS_TX_FAILURE; + } + + if (callback) { + callback(data, npm); + } + + complete(&pppoe_pvt.complete); +} + +/* + * nss_pppoe_handler() + * Handle NSS -> HLOS messages for PPPoE + */ +static void nss_pppoe_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_pppoe_msg *npm = (struct nss_pppoe_msg *)ncm; + void *ctx; + nss_pppoe_msg_callback_t cb; + + BUG_ON(!(nss_is_dynamic_interface(ncm->interface) || ncm->interface == NSS_PPPOE_INTERFACE)); + + /* + * Trace Messages + */ + nss_pppoe_log_rx_msg(npm); + + /* + * Sanity check the message type + */ + if (ncm->type >= NSS_PPPOE_MSG_MAX) { + nss_warning("%px: message type out of range: %d\n", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_pppoe_msg)) { + nss_warning("%px: message length is invalid: %d\n", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Handling PPPoE messages coming from NSS fw. + */ + switch (npm->cm.type) { + case NSS_PPPOE_MSG_SYNC_STATS: + /* + * Update PPPoE debug statistics and send statistics notifications to the registered modules + */ + nss_pppoe_stats_sync(nss_ctx, &npm->msg.sync_stats, ncm->interface); + nss_pppoe_stats_notify(nss_ctx, ncm->interface); + break; + default: + nss_warning("%px: Received response %d for type %d, interface %d\n", + nss_ctx, ncm->response, ncm->type, ncm->interface); + } + + /* + * Update the callback and app_data for NOTIFY messages, pppoe sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->pppoe_msg_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_pppoe_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + cb(ctx, npm); +} + +/* + * nss_pppoe_br_accel_mode_handler() + * Enable/disable pppoe bridge acceleration in NSS + */ +int nss_pppoe_br_accel_mode_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_ctx_instance *nss_ctx = nss_pppoe_get_context(); + struct nss_pppoe_msg npm; + struct nss_pppoe_br_accel_cfg_msg *npbacm; + nss_tx_status_t status; + int ret; + enum nss_pppoe_br_accel_modes current_value, new_val; + + /* + * Take snap shot of current value + */ + current_value = nss_pppoe_br_accel_mode; + + /* + * Write the variable with user input + */ + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + if (ret || (!write)) { + return ret; + } + + new_val = nss_pppoe_br_accel_mode; + if ((new_val < NSS_PPPOE_BR_ACCEL_MODE_DIS) || (new_val >= NSS_PPPOE_BR_ACCEL_MODE_MAX)) { + nss_warning("%px: value out of range: %d\n", nss_ctx, new_val); + nss_pppoe_br_accel_mode = current_value; + nss_pppoe_br_help(new_val); + return -EINVAL; + } + + memset(&npm, 0, sizeof(struct nss_pppoe_msg)); + nss_pppoe_msg_init(&npm, NSS_PPPOE_INTERFACE, NSS_PPPOE_MSG_BR_ACCEL_CFG, + sizeof(struct nss_pppoe_br_accel_cfg_msg), NULL, NULL); + + npbacm = &npm.msg.br_accel; + npbacm->br_accel_cfg = new_val; + + status = nss_pppoe_tx_msg_sync(nss_ctx, &npm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Send acceleration mode message failed\n", nss_ctx); + nss_pppoe_br_accel_mode = current_value; + return -EIO; + } + + return 0; +} + +/* + * nss_pppoe_get_br_accel_mode() + * Gets PPPoE bridge acceleration mode + */ +enum nss_pppoe_br_accel_modes nss_pppoe_get_br_accel_mode(void) +{ + return nss_pppoe_br_accel_mode; +} +EXPORT_SYMBOL(nss_pppoe_get_br_accel_mode); + +/* + * nss_pppoe_tx_msg_sync() + */ +nss_tx_status_t nss_pppoe_tx_msg_sync(struct nss_ctx_instance *nss_ctx, + struct nss_pppoe_msg *msg) +{ + nss_tx_status_t status; + int ret = 0; + + down(&pppoe_pvt.sem); + pppoe_pvt.cb = (void *)msg->cm.cb; + pppoe_pvt.app_data = (void *)msg->cm.app_data; + + msg->cm.cb = (nss_ptr_t)nss_pppoe_sync_msg_callback; + msg->cm.app_data = (nss_ptr_t)NULL; + + status = nss_pppoe_tx_msg(nss_ctx, msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_pppoe_tx_msg failed\n", nss_ctx); + up(&pppoe_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&pppoe_pvt.complete, msecs_to_jiffies(NSS_PPPOE_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: PPPoE msg tx failed due to timeout\n", nss_ctx); + pppoe_pvt.response = NSS_TX_FAILURE; + } + + status = pppoe_pvt.response; + up(&pppoe_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_pppoe_tx_msg_sync); + +/* + * nss_register_pppoe_session_if() + */ +struct nss_ctx_instance *nss_register_pppoe_session_if(uint32_t if_num, + nss_pppoe_msg_callback_t notification_callback, + struct net_device *netdev, uint32_t features, void *app_ctx) +{ + struct nss_ctx_instance *nss_ctx = nss_pppoe_get_context(); + + nss_assert(nss_ctx); + nss_assert(nss_is_dynamic_interface(if_num)); + + if (!nss_pppoe_stats_pppoe_session_init(if_num, netdev)) { + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, NULL, NULL, app_ctx, netdev, features); + + nss_top_main.pppoe_msg_callback = notification_callback; + + nss_core_register_handler(nss_ctx, if_num, nss_pppoe_handler, NULL); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_register_pppoe_session_if); + +/* + * nss_unregister_pppoe_session_if() + */ +void nss_unregister_pppoe_session_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_pppoe_get_context(); + + nss_assert(nss_ctx); + nss_assert(nss_is_dynamic_interface(if_num)); + + nss_pppoe_stats_pppoe_session_deinit(if_num); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_top_main.pppoe_msg_callback = NULL; + + nss_core_unregister_handler(nss_ctx, if_num); + +} +EXPORT_SYMBOL(nss_unregister_pppoe_session_if); + +static struct ctl_table nss_pppoe_table[] = { + { + .procname = "br_accel_mode", + .data = &nss_pppoe_br_accel_mode, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_pppoe_br_accel_mode_handler, + }, + { } +}; + +static struct ctl_table nss_pppoe_dir[] = { + { + .procname = "pppoe", + .mode = 0555, + .child = nss_pppoe_table, + }, + { } +}; + +static struct ctl_table nss_pppoe_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_pppoe_dir, + }, + { } +}; + +static struct ctl_table nss_pppoe_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = nss_pppoe_root_dir, + }, + { } +}; + +static struct ctl_table_header *nss_pppoe_header; + +/* + * nss_pppoe_register_sysctl() + * Register sysctl specific to pppoe + */ +void nss_pppoe_register_sysctl(void) +{ + /* + * Register sysctl table. + */ + nss_pppoe_header = register_sysctl_table(nss_pppoe_root); +} + +/* + * nss_pppoe_unregister_sysctl() + * Unregister sysctl specific to pppoe + */ +void nss_pppoe_unregister_sysctl(void) +{ + /* + * Unregister sysctl table. + */ + if (nss_pppoe_header) { + unregister_sysctl_table(nss_pppoe_header); + } +} + +/* + * nss_pppoe_register_handler() + */ +void nss_pppoe_register_handler(void) +{ + nss_info("nss_pppoe_register_handler\n"); + nss_core_register_handler(nss_pppoe_get_context(), NSS_PPPOE_INTERFACE, nss_pppoe_handler, NULL); + + sema_init(&pppoe_pvt.sem, 1); + init_completion(&pppoe_pvt.complete); + + nss_pppoe_stats_dentry_create(); + nss_pppoe_strings_dentry_create(); +} + +/* + * nss_pppoe_msg_init() + */ +void nss_pppoe_msg_init(struct nss_pppoe_msg *npm, uint16_t if_num, uint32_t type, uint32_t len, + void *cb, void *app_data) +{ + nss_cmn_msg_init(&npm->cm, if_num, type, len, (void *)cb, app_data); + +} +EXPORT_SYMBOL(nss_pppoe_msg_init); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_log.c new file mode 100644 index 000000000..7ab8b1902 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_log.c @@ -0,0 +1,133 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_pppoe_log.c + * NSS PPPOE logger file. + */ + +#include "nss_core.h" + +/* + * nss_pppoe_log_message_types_str + * NSS PPPOE message strings + */ +static int8_t *nss_pppoe_log_message_types_str[NSS_PPPOE_MSG_MAX] __maybe_unused = { + "PPPOE Session Create", + "PPPOE Session Destroy", + "PPPOE Stats", +}; + +/* + * nss_pppoe_log_session_create_msg() + * Log NSS Session Create. + */ +static void nss_pppoe_log_session_create_msg(struct nss_pppoe_msg *npm) +{ + struct nss_pppoe_create_msg *npcm __maybe_unused = &npm->msg.create; + nss_trace("%px: NSS PPPOE Session Create message \n" + "PPPOE Base Interface Number: %d\n" + "PPPOE MTU: %d\n" + "PPPOE Server MAC: %pM\n" + "PPPOE Local MAC: %pM\n" + "PPPOE Session ID: %d\n", + npcm, npcm->base_if_num, + npcm->mtu, npcm->server_mac, + npcm->local_mac, npcm->session_id); +} + +/* + * nss_pppoe_log_session_destroy_msg() + * Log NSS Session Destroy. + */ +static void nss_pppoe_log_session_destroy_msg(struct nss_pppoe_msg *npm) +{ + struct nss_pppoe_destroy_msg *npdm __maybe_unused = &npm->msg.destroy; + nss_trace("%px: NSS PPPOE Session Destroy message \n" + "PPPOE Session ID: %d\n" + "PPPOE Server MAC: %pM\n" + "PPPOE Local MAC: %pM\n", + npdm, npdm->session_id, + npdm->server_mac, npdm->local_mac); +} + +/* + * nss_pppoe_log_verbose() + * Log message contents. + */ +static void nss_pppoe_log_verbose(struct nss_pppoe_msg *npm) +{ + switch (npm->cm.type) { + case NSS_PPPOE_MSG_SESSION_CREATE: + nss_pppoe_log_session_create_msg(npm); + break; + + case NSS_PPPOE_MSG_SESSION_DESTROY: + nss_pppoe_log_session_destroy_msg(npm); + break; + + case NSS_PPPOE_MSG_SYNC_STATS: + /* + * No log for valid stats message. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", npm); + break; + } +} + +/* + * nss_pppoe_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_pppoe_log_tx_msg(struct nss_pppoe_msg *npm) +{ + if (npm->cm.type >= NSS_PPPOE_MSG_MAX) { + nss_warning("%px: Invalid message type\n", npm); + return; + } + + nss_info("%px: type[%d]:%s\n", npm, npm->cm.type, nss_pppoe_log_message_types_str[npm->cm.type]); + nss_pppoe_log_verbose(npm); +} + +/* + * nss_pppoe_log_rx_msg() + * Log messages received from FW. + */ +void nss_pppoe_log_rx_msg(struct nss_pppoe_msg *npm) +{ + if (npm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", npm); + return; + } + + if (npm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (npm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", npm, npm->cm.type, + nss_pppoe_log_message_types_str[npm->cm.type], + npm->cm.response, nss_cmn_response_str[npm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + npm, npm->cm.type, nss_pppoe_log_message_types_str[npm->cm.type], + npm->cm.response, nss_cmn_response_str[npm->cm.response]); + +verbose: + nss_pppoe_log_verbose(npm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_log.h new file mode 100644 index 000000000..4636b08d8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_PPPOE_LOG_H +#define __NSS_PPPOE_LOG_H + +/* + * nss_pppoe.h + * NSS PPPOE header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_pppoe_log_tx_msg + * Logs a pppoe message that is sent to the NSS firmware. + */ +void nss_pppoe_log_tx_msg(struct nss_pppoe_msg *nim); + +/* + * nss_pppoe_log_rx_msg + * Logs a pppoe message that is received from the NSS firmware. + */ +void nss_pppoe_log_rx_msg(struct nss_pppoe_msg *nim); + +#endif /* __NSS_PPPOE_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_stats.c new file mode 100644 index 000000000..75b24c8ed --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_stats.c @@ -0,0 +1,265 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include +#include "nss_pppoe_stats.h" +#include "nss_pppoe_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_pppoe_stats_notifier); + +/* + * Lock used for PPPoE statistics + */ +static DEFINE_SPINLOCK(nss_pppoe_stats_lock); + +/* + * PPPoE session stats structure for debug interface + */ +struct nss_pppoe_stats_session_stats { + uint64_t stats[NSS_PPPOE_STATS_SESSION_MAX]; + /* stats for the session */ + int32_t if_index; /* net device index for the session */ + uint32_t if_num; /* nss interface number */ + bool valid; /* dynamic interface valid flag */ +}; + +/* + * PPPoE interface stats structure for base node and sessions + */ +struct nss_pppoe_stats { + uint64_t base_stats[NSS_PPPOE_STATS_BASE_MAX]; + /* Base node stats */ + struct nss_pppoe_stats_session_stats session_stats[NSS_MAX_PPPOE_DYNAMIC_INTERFACES]; + /* Per session stats */ +}; + +/* + * Global PPPoE stats decleration. + */ +static struct nss_pppoe_stats pppoe_stats; + +/* + * nss_pppoe_stats_read() + * Read pppoe statistics + */ +static ssize_t nss_pppoe_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + + uint32_t max_output_lines = 2 /* header & footer for session stats */ + + NSS_MAX_PPPOE_DYNAMIC_INTERFACES * (NSS_PPPOE_STATS_SESSION_MAX + 2) /*session stats */ + + 2 + NSS_PPPOE_STATS_BASE_MAX + 2; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct net_device *dev; + int id; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + /* + * Base node stats + */ + size_wr += nss_stats_print("pppoe", "pppoe base node stats start" + , NSS_STATS_SINGLE_INSTANCE + , nss_pppoe_strings_base_stats + , pppoe_stats.base_stats + , NSS_PPPOE_STATS_BASE_MAX + , lbuf, size_wr, size_al); + + /* + * Session stats + */ + for (id = 0; id < NSS_MAX_PPPOE_DYNAMIC_INTERFACES; id++) { + if (!pppoe_stats.session_stats[id].valid) { + continue; + } + + dev = dev_get_by_index(&init_net, pppoe_stats.session_stats[id].if_index); + if (unlikely(!dev)) { + continue; + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id, + pppoe_stats.session_stats[id].if_num, dev->name); + dev_put(dev); + + size_wr += nss_stats_print("pppoe", "pppoe session node stats" + , id + , nss_pppoe_strings_session_stats + , pppoe_stats.session_stats[id].stats + , NSS_PPPOE_STATS_SESSION_MAX + , lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(lbuf); + return bytes_read; +} + +/* + * nss_pppoe_stats_pppoe_session_init() + * Initialize the session statistics. + */ +bool nss_pppoe_stats_pppoe_session_init(uint32_t if_num, struct net_device *dev) +{ + int i; + + spin_lock_bh(&nss_pppoe_stats_lock); + for (i = 0; i < NSS_MAX_PPPOE_DYNAMIC_INTERFACES; i++) { + if (!pppoe_stats.session_stats[i].valid) { + pppoe_stats.session_stats[i].valid = true; + pppoe_stats.session_stats[i].if_num = if_num; + pppoe_stats.session_stats[i].if_index = dev->ifindex; + spin_unlock_bh(&nss_pppoe_stats_lock); + return true; + } + } + spin_unlock_bh(&nss_pppoe_stats_lock); + + return false; +} + +/* + * nss_pppoe_stats_pppoe_session_deinit() + * De-initialize the session's stats. + */ +void nss_pppoe_stats_pppoe_session_deinit(uint32_t if_num) +{ + int i; + + spin_lock_bh(&nss_pppoe_stats_lock); + for (i = 0; i < NSS_MAX_PPPOE_DYNAMIC_INTERFACES; i++) { + if (pppoe_stats.session_stats[i].if_num == if_num) { + memset(&pppoe_stats.session_stats[i], 0, sizeof(pppoe_stats.session_stats[i])); + } + } + spin_unlock_bh(&nss_pppoe_stats_lock); +} + +/* + * nss_pppoe_stats_sync + * Per session debug stats for pppoe + */ +void nss_pppoe_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_pppoe_sync_stats_msg *stats_msg, uint16_t if_num) +{ + int i; + spin_lock_bh(&nss_pppoe_stats_lock); + for (i = 0; i < NSS_MAX_PPPOE_DYNAMIC_INTERFACES; i++) { + if (pppoe_stats.session_stats[i].if_num == if_num) { + int j; + + /* + * Sync PPPoE session stats. + */ + pppoe_stats.session_stats[i].stats[NSS_PPPOE_STATS_SESSION_RX_PACKETS] += stats_msg->session_stats.node.rx_packets; + pppoe_stats.session_stats[i].stats[NSS_PPPOE_STATS_SESSION_RX_BYTES] += stats_msg->session_stats.node.rx_bytes; + pppoe_stats.session_stats[i].stats[NSS_PPPOE_STATS_SESSION_TX_PACKETS] += stats_msg->session_stats.node.tx_packets; + pppoe_stats.session_stats[i].stats[NSS_PPPOE_STATS_SESSION_TX_BYTES] += stats_msg->session_stats.node.tx_bytes; + pppoe_stats.session_stats[i].stats[NSS_PPPOE_STATS_SESSION_WRONG_VERSION_OR_TYPE] += stats_msg->session_stats.exception[NSS_PPPOE_SESSION_EXCEPTION_EVENT_WRONG_VERSION_OR_TYPE]; + pppoe_stats.session_stats[i].stats[NSS_PPPOE_STATS_SESSION_WRONG_CODE] += stats_msg->session_stats.exception[NSS_PPPOE_SESSION_EXCEPTION_EVENT_WRONG_CODE]; + pppoe_stats.session_stats[i].stats[NSS_PPPOE_STATS_SESSION_UNSUPPORTED_PPP_PROTOCOL] += stats_msg->session_stats.exception[NSS_PPPOE_SESSION_EXCEPTION_EVENT_UNSUPPORTED_PPP_PROTOCOL]; + + /* + * Sync PPPoE base node stats coming with this session's stats. + */ + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_RX_PACKETS] += stats_msg->base_stats.node.rx_packets; + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_RX_BYTES] += stats_msg->base_stats.node.rx_bytes; + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_TX_PACKETS] += stats_msg->base_stats.node.tx_packets; + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_TX_BYTES] += stats_msg->base_stats.node.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_RX_QUEUE_0_DROPPED + j] += stats_msg->base_stats.node.rx_dropped[j]; + } + + /* + * Sync PPPoE base exception stats coming with this session's stats. + */ + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_SHORT_PPPOE_HDR_LENGTH] += stats_msg->base_stats.exception[NSS_PPPOE_BASE_EXCEPTION_EVENT_SHORT_PPPOE_HDR_LENGTH]; + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_SHORT_PACKET_LENGTH] += stats_msg->base_stats.exception[NSS_PPPOE_BASE_EXCEPTION_EVENT_SHORT_PACKET_LENGTH]; + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_WRONG_VERSION_OR_TYPE] += stats_msg->base_stats.exception[NSS_PPPOE_BASE_EXCEPTION_EVENT_WRONG_VERSION_OR_TYPE]; + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_WRONG_CODE] += stats_msg->base_stats.exception[NSS_PPPOE_BASE_EXCEPTION_EVENT_WRONG_CODE]; + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_UNSUPPORTED_PPP_PROTOCOL] += stats_msg->base_stats.exception[NSS_PPPOE_BASE_EXCEPTION_EVENT_UNSUPPORTED_PPP_PROTOCOL]; + pppoe_stats.base_stats[NSS_PPPOE_STATS_BASE_DISABLED_BRIDGE_PACKET] += stats_msg->base_stats.exception[NSS_PPPOE_BASE_EXCEPTION_EVENT_DISABLED_BRIDGE_PACKET]; + break; + } + } + spin_unlock_bh(&nss_pppoe_stats_lock); +} + +/* + * nss_pppoe_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(pppoe); + +/* + * nss_pppoe_stats_dentry_create() + * Create PPPoE node statistics debug entry. + */ +void nss_pppoe_stats_dentry_create(void) +{ + nss_stats_create_dentry("pppoe", &nss_pppoe_stats_ops); +} + +/* + * nss_pppoe_stats_notify() + * Sends notifications to the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_pppoe_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_pppoe_stats_notification nss_pppoe_stats; + int id; + + for (id = 0; id < NSS_MAX_PPPOE_DYNAMIC_INTERFACES; id++) { + if (pppoe_stats.session_stats[id].if_num == if_num) { + memcpy(&nss_pppoe_stats.session_stats, &pppoe_stats.session_stats[id].stats, sizeof(nss_pppoe_stats.session_stats)); + } + } + memcpy(&nss_pppoe_stats.base_stats, &pppoe_stats.base_stats, sizeof(nss_pppoe_stats.base_stats)); + nss_pppoe_stats.core_id = nss_ctx->id; + nss_pppoe_stats.if_num = if_num; + atomic_notifier_call_chain(&nss_pppoe_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&nss_pppoe_stats); +} + +/* + * nss_pppoe_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_pppoe_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_pppoe_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_pppoe_stats_register_notifier); + +/* + * nss_pppoe_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_pppoe_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_pppoe_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_pppoe_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_stats.h new file mode 100644 index 000000000..41c726ed1 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_stats.h @@ -0,0 +1,28 @@ +/* + ****************************************************************************** + * Copyright (c) 2017,2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_PPPOE_STATS_H +#define __NSS_PPPOE_STATS_H + +/* + * PPPoE statistics APIs + */ +extern void nss_pppoe_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_pppoe_stats_dentry_create(void); +extern void nss_pppoe_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_pppoe_sync_stats_msg *stats_msg, uint16_t if_num); +extern bool nss_pppoe_stats_pppoe_session_init(uint32_t if_num, struct net_device *dev); +extern void nss_pppoe_stats_pppoe_session_deinit(uint32_t if_num); +#endif /* __NSS_PPPOE_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_strings.c new file mode 100644 index 000000000..953945bc6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_strings.c @@ -0,0 +1,121 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_pppoe_strings_session_stats + * PPPoE session stats strings. + */ +struct nss_stats_info nss_pppoe_strings_session_stats[NSS_PPPOE_STATS_SESSION_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"rx_byts" , NSS_STATS_TYPE_COMMON}, + {"tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"tx_byts" , NSS_STATS_TYPE_COMMON}, + {"wrong_version_or_type" , NSS_STATS_TYPE_EXCEPTION}, + {"wrong_code" , NSS_STATS_TYPE_EXCEPTION}, + {"unsupported_ppp_protocol" , NSS_STATS_TYPE_EXCEPTION} +}; + +/* + * nss_pppoe_strings_base_stats + * PPPoE base node stats strings. + */ +struct nss_stats_info nss_pppoe_strings_base_stats[NSS_PPPOE_STATS_BASE_MAX] = { + {"rx_packets" , NSS_STATS_TYPE_COMMON}, + {"rx_bytes" , NSS_STATS_TYPE_COMMON}, + {"tx_packets" , NSS_STATS_TYPE_COMMON}, + {"tx_bytes" , NSS_STATS_TYPE_COMMON}, + {"rx_dropped[0]" , NSS_STATS_TYPE_DROP}, + {"rx_dropped[1]" , NSS_STATS_TYPE_DROP}, + {"rx_dropped[2]" , NSS_STATS_TYPE_DROP}, + {"rx_dropped[3]" , NSS_STATS_TYPE_DROP}, + {"short_pppoe_hdr_length" , NSS_STATS_TYPE_EXCEPTION}, + {"short_packet_length" , NSS_STATS_TYPE_EXCEPTION}, + {"wrong_version_or_type" , NSS_STATS_TYPE_EXCEPTION}, + {"wrong_code" , NSS_STATS_TYPE_EXCEPTION}, + {"unsupported_ppp_protocol" , NSS_STATS_TYPE_EXCEPTION}, + {"disabled_bridge_packet" , NSS_STATS_TYPE_EXCEPTION} +}; + +/* + * nss_pppoe_isession_stats_strings_read() + * Read PPPoE session statistics names. + */ +static ssize_t nss_pppoe_session_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_pppoe_strings_session_stats, NSS_PPPOE_STATS_SESSION_MAX); +} + +/* + * nss_pppoe_base_stats_strings_read() + * Read PPPoE base statistics names. + */ +static ssize_t nss_pppoe_base_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_pppoe_strings_base_stats, NSS_PPPOE_STATS_BASE_MAX); +} + +/* + * nss_pppoe_session_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(pppoe_session_stats); + +/* + * nss_pppoe_base_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(pppoe_base_stats); + +/* + * nss_pppoe_strings_dentry_create() + * Create PPPoE statistics strings debug entry. + */ +void nss_pppoe_strings_dentry_create(void) +{ + struct dentry *pppoe_d = NULL; + struct dentry *pppoe_session_stats_d = NULL; + struct dentry *pppoe_base_stats_d = NULL; + + if (!nss_top_main.strings_dentry) { + nss_warning("qca-nss-drv/strings is not present"); + return; + } + + pppoe_d = debugfs_create_dir("pppoe", nss_top_main.strings_dentry); + if (!pppoe_d) { + nss_warning("Failed to create qca-nss-drv/strings/pppoe directory"); + return; + } + + pppoe_session_stats_d = debugfs_create_file("session_stats_str", 0400, pppoe_d, &nss_top_main, &nss_pppoe_session_stats_strings_ops); + if (!pppoe_session_stats_d) { + nss_warning("Failed to create qca-nss-drv/stats/pppoe/session_stats_str file"); + debugfs_remove_recursive(pppoe_d); + return; + } + + pppoe_base_stats_d = debugfs_create_file("base_stats_str", 0400, pppoe_d, &nss_top_main, &nss_pppoe_base_stats_strings_ops); + if (!pppoe_base_stats_d) { + nss_warning("Failed to create qca-nss-drv/stats/pppoe/base_stats_str file"); + debugfs_remove_recursive(pppoe_d); + return; + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_strings.h new file mode 100644 index 000000000..8cf9393a1 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pppoe_strings.h @@ -0,0 +1,26 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_PPPOE_STRINGS_H +#define __NSS_PPPOE_STRINGS_H + +extern struct nss_stats_info nss_pppoe_strings_session_stats[NSS_PPPOE_STATS_SESSION_MAX]; +extern struct nss_stats_info nss_pppoe_strings_base_stats[NSS_PPPOE_STATS_BASE_MAX]; +extern void nss_pppoe_strings_dentry_create(void); + +#endif /* __NSS_PPPOE_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pptp.c b/feeds/ipq807x/qca-nss-drv/src/nss_pptp.c new file mode 100644 index 000000000..73cf43460 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pptp.c @@ -0,0 +1,472 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include +#include "nss_tx_rx_common.h" +#include "nss_pptp_stats.h" +#include "nss_pptp_log.h" +#include "nss_pptp_strings.h" + +#define NSS_PPTP_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * Data structures to store pptp nss debug stats + */ +static DEFINE_SPINLOCK(nss_pptp_session_debug_stats_lock); +static struct nss_pptp_stats_session_debug nss_pptp_session_debug_stats[NSS_MAX_PPTP_DYNAMIC_INTERFACES]; + +/* + * Private data structure + */ +static struct nss_pptp_pvt { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +} pptp_pvt; + +/* + * nss_pptp_session_debug_stats_sync + * Per session debug stats for pptp + */ +void nss_pptp_session_debug_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_pptp_sync_session_stats_msg *stats_msg, uint16_t if_num) +{ + int i, j, if_type; + + if_type = nss_dynamic_interface_get_type(nss_pptp_get_context(), if_num); + spin_lock_bh(&nss_pptp_session_debug_stats_lock); + for (i = 0; i < NSS_MAX_PPTP_DYNAMIC_INTERFACES; i++) { + if (nss_pptp_session_debug_stats[i].if_num == if_num) { + break; + } + } + + if (i == NSS_MAX_PPTP_DYNAMIC_INTERFACES) { + spin_unlock_bh(&nss_pptp_session_debug_stats_lock); + return; + } + + if (if_type == NSS_DYNAMIC_INTERFACE_TYPE_PPTP_OUTER) { + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_DECAP_RX_PACKETS] += + stats_msg->node_stats.rx_packets; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_DECAP_RX_BYTES] += + stats_msg->node_stats.rx_bytes; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_DECAP_TX_PACKETS] += + stats_msg->node_stats.tx_packets; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_DECAP_TX_BYTES] += + stats_msg->node_stats.tx_bytes; + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_DECAP_RX_QUEUE_0_DROP + j] += + stats_msg->node_stats.rx_dropped[j]; + } + } else { + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_ENCAP_RX_PACKETS] += + stats_msg->node_stats.rx_packets; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_ENCAP_RX_BYTES] += + stats_msg->node_stats.rx_bytes; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_ENCAP_TX_PACKETS] += + stats_msg->node_stats.tx_packets; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_ENCAP_TX_BYTES] += + stats_msg->node_stats.tx_bytes; + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_ENCAP_RX_QUEUE_0_DROP + j] += + stats_msg->node_stats.rx_dropped[j]; + } + } + + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_ENCAP_HEADROOM_ERR] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_ENCAP_HEADROOM_ERR]; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_ENCAP_SMALL_SIZE] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_ENCAP_SMALL_SIZE]; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_ENCAP_PNODE_ENQUEUE_FAIL] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_ENCAP_PNODE_ENQUEUE_FAIL]; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_DECAP_NO_SEQ_NOR_ACK] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_DECAP_NO_SEQ_NOR_ACK]; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_DECAP_INVAL_GRE_FLAGS] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_DECAP_INVAL_GRE_FLAGS]; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_DECAP_INVAL_GRE_PROTO] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_DECAP_INVAL_GRE_PROTO]; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_DECAP_WRONG_SEQ] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_DECAP_WRONG_SEQ]; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_DECAP_INVAL_PPP_HDR] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_DECAP_INVAL_PPP_HDR]; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_DECAP_PPP_LCP] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_DECAP_PPP_LCP]; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_DECAP_UNSUPPORTED_PPP_PROTO] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_DECAP_UNSUPPORTED_PPP_PROTO]; + nss_pptp_session_debug_stats[i].stats[NSS_PPTP_STATS_SESSION_DECAP_PNODE_ENQUEUE_FAIL] += + stats_msg->exception_events[PPTP_EXCEPTION_EVENT_DECAP_PNODE_ENQUEUE_FAIL]; + + spin_unlock_bh(&nss_pptp_session_debug_stats_lock); +} + +/* + * nss_pptp_global_session_stats_get() + * Get session pptp statitics. + */ +void nss_pptp_session_debug_stats_get(void *stats_mem) +{ + struct nss_pptp_stats_session_debug *stats = (struct nss_pptp_stats_session_debug *)stats_mem; + int i; + + if (!stats) { + nss_warning("No memory to copy pptp session stats"); + return; + } + + spin_lock_bh(&nss_pptp_session_debug_stats_lock); + for (i = 0; i < NSS_MAX_PPTP_DYNAMIC_INTERFACES; i++) { + if (nss_pptp_session_debug_stats[i].valid) { + memcpy(stats, &nss_pptp_session_debug_stats[i], sizeof(struct nss_pptp_stats_session_debug)); + stats++; + } + } + spin_unlock_bh(&nss_pptp_session_debug_stats_lock); +} + +/* + * nss_pptp_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_pptp_verify_if_num(uint32_t if_num) +{ + uint32_t if_type; + + if (nss_is_dynamic_interface(if_num) == false) { + return false; + } + + if_type = nss_dynamic_interface_get_type(nss_pptp_get_context(), if_num); + switch(if_type) { + case NSS_DYNAMIC_INTERFACE_TYPE_PPTP_INNER: + case NSS_DYNAMIC_INTERFACE_TYPE_PPTP_OUTER: + case NSS_DYNAMIC_INTERFACE_TYPE_PPTP_HOST_INNER: + return true; + } + + return false; +} + +/* + * nss_pptp_handler() + * Handle NSS -> HLOS messages for pptp tunnel + */ +static void nss_pptp_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_pptp_msg *ntm = (struct nss_pptp_msg *)ncm; + void *ctx; + + nss_pptp_msg_callback_t cb; + + BUG_ON(!nss_pptp_verify_if_num(ncm->interface)); + + /* + * Trace Messages + */ + nss_pptp_log_rx_msg(ntm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_PPTP_MSG_MAX) { + nss_warning("%px: received invalid message %d for PPTP interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_pptp_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + switch (ntm->cm.type) { + + case NSS_PPTP_MSG_SYNC_STATS: + /* + * Update session debug stats in stats msg and send statistics notifications to the registered modules. + */ + nss_pptp_session_debug_stats_sync(nss_ctx, &ntm->msg.stats, ncm->interface); + nss_pptp_stats_notify(nss_ctx, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages, pptp sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->pptp_msg_callback; + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_pptp_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * call pptp tunnel callback + */ + if (!cb) { + nss_warning("%px: Event received for pptp tunnel interface %d before registration", nss_ctx, ncm->interface); + return; + } + + cb(ctx, ntm); +} + +/* + * nss_pptp_tx_msg() + * Transmit a pptp message to NSS firmware + */ +static nss_tx_status_t nss_pptp_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_pptp_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_pptp_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (!nss_is_dynamic_interface(ncm->interface)) { + nss_warning("%px: tx request for non dynamic interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_PPTP_MSG_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_pptp_sync_msg_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_pptp_sync_msg_callback(void *app_data, struct nss_pptp_msg *nim) +{ + nss_pptp_msg_callback_t callback = (nss_pptp_msg_callback_t)pptp_pvt.cb; + void *data = pptp_pvt.app_data; + + pptp_pvt.cb = NULL; + pptp_pvt.app_data = NULL; + + if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("pptp Error response %d\n", nim->cm.response); + + pptp_pvt.response = NSS_TX_FAILURE; + if (callback) { + callback(data, nim); + } + + complete(&pptp_pvt.complete); + return; + } + + pptp_pvt.response = NSS_TX_SUCCESS; + if (callback) { + callback(data, nim); + } + + complete(&pptp_pvt.complete); +} + +/* + * nss_pptp_tx_msg() + * Transmit a pptp message to NSS firmware synchronously. + */ +nss_tx_status_t nss_pptp_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_pptp_msg *msg) +{ + + nss_tx_status_t status; + int ret = 0; + + down(&pptp_pvt.sem); + pptp_pvt.cb = (void *)msg->cm.cb; + pptp_pvt.app_data = (void *)msg->cm.app_data; + + msg->cm.cb = (nss_ptr_t)nss_pptp_sync_msg_callback; + msg->cm.app_data = (nss_ptr_t)NULL; + + status = nss_pptp_tx_msg(nss_ctx, msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: pptp_tx_msg failed\n", nss_ctx); + up(&pptp_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&pptp_pvt.complete, msecs_to_jiffies(NSS_PPTP_TX_TIMEOUT)); + + if (!ret) { + nss_warning("%px: PPTP msg tx failed due to timeout\n", nss_ctx); + pptp_pvt.response = NSS_TX_FAILURE; + } + + status = pptp_pvt.response; + up(&pptp_pvt.sem); + return status; +} + +/* + * nss_pptp_tx_buf() + * Send packet to pptp interface owned by NSS + */ +nss_tx_status_t nss_pptp_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb) +{ + nss_trace("%px: pptp If Tx packet, id:%d, data=%px", nss_ctx, if_num, skb->data); + + return nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); +} + +/* + * nss_register_pptp_if() + */ +struct nss_ctx_instance *nss_register_pptp_if(uint32_t if_num, + uint32_t type, + nss_pptp_callback_t pptp_data_callback, + nss_pptp_msg_callback_t notification_callback, + struct net_device *netdev, + uint32_t features, + void *app_ctx) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.pptp_handler_id]; + int i = 0; + + nss_assert(nss_ctx); + nss_assert(nss_pptp_verify_if_num(if_num)); + + nss_ctx->subsys_dp_register[if_num].type = type; + + nss_core_register_subsys_dp(nss_ctx, if_num, pptp_data_callback, NULL, app_ctx, netdev, features); + + nss_top_main.pptp_msg_callback = notification_callback; + + nss_core_register_handler(nss_ctx, if_num, nss_pptp_handler, NULL); + + spin_lock_bh(&nss_pptp_session_debug_stats_lock); + for (i = 0; i < NSS_MAX_PPTP_DYNAMIC_INTERFACES; i++) { + if (!nss_pptp_session_debug_stats[i].valid) { + nss_pptp_session_debug_stats[i].valid = true; + nss_pptp_session_debug_stats[i].if_num = if_num; + nss_pptp_session_debug_stats[i].if_index = netdev->ifindex; + break; + } + } + spin_unlock_bh(&nss_pptp_session_debug_stats_lock); + + return nss_ctx; +} + +/* + * nss_unregister_pptp_if() + */ +void nss_unregister_pptp_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.pptp_handler_id]; + int i; + int j; + + nss_assert(nss_ctx); + nss_assert(nss_is_dynamic_interface(if_num)); + + spin_lock_bh(&nss_pptp_session_debug_stats_lock); + for (i = 0; i < NSS_MAX_PPTP_DYNAMIC_INTERFACES; i++) { + if (nss_pptp_session_debug_stats[i].valid == true && + nss_pptp_session_debug_stats[i].if_num == if_num) { + nss_pptp_session_debug_stats[i].valid = false; + nss_pptp_session_debug_stats[i].if_num = 0; + nss_pptp_session_debug_stats[i].if_index = 0; + for (j = 0; j < NSS_PPTP_STATS_SESSION_MAX; j++) + nss_pptp_session_debug_stats[i].stats[j] = 0; + break; + } + } + spin_unlock_bh(&nss_pptp_session_debug_stats_lock); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_top_main.pptp_msg_callback = NULL; + + nss_core_unregister_handler(nss_ctx, if_num); +} + +/* + * nss_get_pptp_context() + */ +struct nss_ctx_instance *nss_pptp_get_context() +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.pptp_handler_id]; +} + +/* + * nss_pptp_msg_init() + * Initialize nss_pptp msg. + */ +void nss_pptp_msg_init(struct nss_pptp_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} + +/* nss_pptp_register_handler() + * debugfs stats msg handler received on static pptp interface + */ +void nss_pptp_register_handler(void) +{ + int i; + + nss_info("nss_pptp_register_handler"); + nss_core_register_handler(nss_pptp_get_context(), NSS_PPTP_INTERFACE, nss_pptp_handler, NULL); + + spin_lock_bh(&nss_pptp_session_debug_stats_lock); + for (i = 0; i < NSS_MAX_PPTP_DYNAMIC_INTERFACES; i++) { + nss_pptp_session_debug_stats[i].valid = false; + nss_pptp_session_debug_stats[i].if_num = 0; + nss_pptp_session_debug_stats[i].if_index = 0; + } + spin_unlock_bh(&nss_pptp_session_debug_stats_lock); + + sema_init(&pptp_pvt.sem, 1); + init_completion(&pptp_pvt.complete); + + nss_pptp_stats_dentry_create(); + nss_pptp_strings_dentry_create(); +} + +EXPORT_SYMBOL(nss_pptp_get_context); +EXPORT_SYMBOL(nss_pptp_tx_msg_sync); +EXPORT_SYMBOL(nss_pptp_tx_buf); +EXPORT_SYMBOL(nss_unregister_pptp_if); +EXPORT_SYMBOL(nss_pptp_msg_init); +EXPORT_SYMBOL(nss_register_pptp_if); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pptp_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_log.c new file mode 100644 index 000000000..136a3c863 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_log.c @@ -0,0 +1,129 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_pptp_log.c + * NSS PPTP logger file. + */ + +#include "nss_core.h" + +/* + * nss_pptp_log_message_types_str + * NSS PPTP message strings + */ +static int8_t *nss_pptp_log_message_types_str[NSS_PPTP_MSG_MAX] __maybe_unused = { + "PPTP Session Configure", + "PPTP Session Deconfigure", + "PPTP Stats", +}; + +/* + * nss_pptp_log_configure_msg() + * Log NSS PPTP Session Configure. + */ +static void nss_pptp_log_configure_msg(struct nss_pptp_msg *npm) +{ + struct nss_pptp_session_configure_msg *npcm __maybe_unused = &npm->msg.session_configure_msg; + nss_trace("%px: NSS PPTP Session Configure message\n" + "PPTP Source Call ID: %x\n" + "PPTP Destination Call ID: %x\n" + "PPTP Source IP: %pI4\n" + "PPTP Destination IP: %pI4\n", + npcm, npcm->src_call_id, + npcm->dst_call_id, &npcm->sip, + &npcm->dip); +} + +/* + * nss_pptp_log_deconfigure_msg() + * Log NSS PPTP Session Deconfigure. + */ +static void nss_pptp_log_deconfigure_msg(struct nss_pptp_msg *npm) +{ + struct nss_pptp_session_deconfigure_msg *npdm __maybe_unused = &npm->msg.session_deconfigure_msg; + nss_trace("%px: NSS PPTP Session Configure message \n" + "PPTP Source Call ID: %x\n", + npdm, npdm->src_call_id); +} + +/* + * nss_pptp_log_verbose() + * Log message contents. + */ +static void nss_pptp_log_verbose(struct nss_pptp_msg *npm) +{ + switch (npm->cm.type) { + case NSS_PPTP_MSG_SESSION_CONFIGURE: + nss_pptp_log_configure_msg(npm); + break; + + case NSS_PPTP_MSG_SESSION_DECONFIGURE: + nss_pptp_log_deconfigure_msg(npm); + break; + + case NSS_PPTP_MSG_SYNC_STATS: + /* + * No log for valid stats message. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", npm); + break; + } +} + +/* + * nss_pptp_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_pptp_log_tx_msg(struct nss_pptp_msg *npm) +{ + if (npm->cm.type >= NSS_PPTP_MSG_MAX) { + nss_warning("%px: Invalid message type\n", npm); + return; + } + + nss_info("%px: type[%d]:%s\n", npm, npm->cm.type, nss_pptp_log_message_types_str[npm->cm.type]); + nss_pptp_log_verbose(npm); +} + +/* + * nss_pptp_log_rx_msg() + * Log messages received from FW. + */ +void nss_pptp_log_rx_msg(struct nss_pptp_msg *npm) +{ + if (npm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", npm); + return; + } + + if (npm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (npm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", npm, npm->cm.type, + nss_pptp_log_message_types_str[npm->cm.type], + npm->cm.response, nss_cmn_response_str[npm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + npm, npm->cm.type, nss_pptp_log_message_types_str[npm->cm.type], + npm->cm.response, nss_cmn_response_str[npm->cm.response]); + +verbose: + nss_pptp_log_verbose(npm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pptp_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_log.h new file mode 100644 index 000000000..bb800d5ed --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_PPTP_LOG_H +#define __NSS_PPTP_LOG_H + +/* + * nss_pptp.h + * NSS PPTP header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_pptp_log_tx_msg + * Logs a pptp message that is sent to the NSS firmware. + */ +void nss_pptp_log_tx_msg(struct nss_pptp_msg *ntm); + +/* + * nss_pptp_log_rx_msg + * Logs a pptp message that is received from the NSS firmware. + */ +void nss_pptp_log_rx_msg(struct nss_pptp_msg *ntm); + +#endif /* __NSS_PPTP_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pptp_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_stats.c new file mode 100644 index 000000000..afbe00ee0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_stats.c @@ -0,0 +1,154 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_pptp_stats.h" +#include "nss_pptp_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_pptp_stats_notifier); + +struct nss_pptp_stats_session_debug pptp_session_stats[NSS_MAX_PPTP_DYNAMIC_INTERFACES]; + +/* + * nss_pptp_stats_read() + * Read pptp statistics + */ +static ssize_t nss_pptp_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + + uint32_t max_output_lines = 2 /* header & footer for session stats */ + + NSS_MAX_PPTP_DYNAMIC_INTERFACES * (NSS_PPTP_STATS_SESSION_MAX + 2) /*session stats */ + + 2; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines ; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct net_device *dev; + int id, i; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + memset(&pptp_session_stats, 0, sizeof(struct nss_pptp_stats_session_debug) * NSS_MAX_PPTP_DYNAMIC_INTERFACES); + + /* + * Get all stats + */ + nss_pptp_session_debug_stats_get((void *)&pptp_session_stats); + + /* + * Session stats + */ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npptp session stats start:\n\n"); + for (id = 0; id < NSS_MAX_PPTP_DYNAMIC_INTERFACES; id++) { + + if (!pptp_session_stats[id].valid) { + break; + } + + dev = dev_get_by_index(&init_net, pptp_session_stats[id].if_index); + if (likely(dev)) { + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d, netdevice=%s\n", id, + pptp_session_stats[id].if_num, dev->name); + dev_put(dev); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "%d. nss interface id=%d\n", id, + pptp_session_stats[id].if_num); + } + + for (i = 0; i < NSS_PPTP_STATS_SESSION_MAX; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "\t%s = %llu\n", nss_pptp_strings_session_debug_stats[i].stats_name, + pptp_session_stats[id].stats[i]); + } + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\npptp session stats end\n"); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(lbuf); + return bytes_read; +} + +/* + * nss_pptp_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(pptp); + +/* + * nss_pptp_stats_dentry_create() + * Create PPTP node statistics debug entry. + */ +void nss_pptp_stats_dentry_create(void) +{ + nss_stats_create_dentry("pptp", &nss_pptp_stats_ops); +} + +/* + * nss_pptp_stats_notify() + * Sends notifications to the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_pptp_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_pptp_stats_notification pptp_stats; + int id; + + memset(&pptp_session_stats, 0, sizeof(pptp_session_stats)); + + /* + * Get all stats + */ + nss_pptp_session_debug_stats_get((void *)&pptp_session_stats); + + for (id = 0; id < NSS_MAX_PPTP_DYNAMIC_INTERFACES; id++) { + if (pptp_session_stats[id].if_num == if_num) { + memcpy(&pptp_stats.stats, &pptp_session_stats[id].stats, sizeof(pptp_stats.stats)); + } + } + pptp_stats.if_type = nss_dynamic_interface_get_type(nss_ctx, if_num); + pptp_stats.core_id = nss_ctx->id; + pptp_stats.if_num = if_num; + atomic_notifier_call_chain(&nss_pptp_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&pptp_stats); +} + +/* + * nss_pptp_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_pptp_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_pptp_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_pptp_stats_register_notifier); + +/* + * nss_pptp_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_pptp_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_pptp_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_pptp_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pptp_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_stats.h new file mode 100644 index 000000000..11c016617 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_stats.h @@ -0,0 +1,36 @@ +/* + ****************************************************************************** + * Copyright (c) 2016-2017,2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_PPTP_STATS_H +#define __NSS_PPTP_STATS_H + +/* + * NSS PPTP node statistics session + */ +struct nss_pptp_stats_session_debug { + uint64_t stats[NSS_PPTP_STATS_SESSION_MAX]; + int32_t if_index; + uint32_t if_num; /* nss interface number */ + bool valid; +}; + +/* + * NSS PPTP statistics APIs + */ +extern void nss_pptp_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_pptp_stats_dentry_create(void); + +#endif /* __NSS_PPTP_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pptp_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_strings.c new file mode 100644 index 000000000..966ec07f3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_strings.c @@ -0,0 +1,79 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_pptp_strings_session_debug_stats + * PPTP statistics strings for NSS session statistics. + */ +struct nss_stats_info nss_pptp_strings_session_debug_stats[NSS_PPTP_STATS_SESSION_MAX] = { + {"ENCAP_RX_PACKETS", NSS_STATS_TYPE_COMMON}, + {"ENCAP_RX_BYTES", NSS_STATS_TYPE_COMMON}, + {"ENCAP_TX_PACKETS", NSS_STATS_TYPE_COMMON}, + {"ENCAP_TX_BYTES", NSS_STATS_TYPE_COMMON}, + {"ENCAP_RX_QUEUE_0_DROP", NSS_STATS_TYPE_DROP}, + {"ENCAP_RX_QUEUE_1_DROP", NSS_STATS_TYPE_DROP}, + {"ENCAP_RX_QUEUE_2_DROP", NSS_STATS_TYPE_DROP}, + {"ENCAP_RX_QUEUE_3_DROP", NSS_STATS_TYPE_DROP}, + {"DECAP_RX_PACKETS", NSS_STATS_TYPE_COMMON}, + {"DECAP_RX_BYTES", NSS_STATS_TYPE_COMMON}, + {"DECAP_TX_PACKETS", NSS_STATS_TYPE_COMMON}, + {"DECAP_TX_BYTES", NSS_STATS_TYPE_COMMON}, + {"DECAP_RX_QUEUE_0_DROP", NSS_STATS_TYPE_DROP}, + {"DECAP_RX_QUEUE_1_DROP", NSS_STATS_TYPE_DROP}, + {"DECAP_RX_QUEUE_2_DROP", NSS_STATS_TYPE_DROP}, + {"DECAP_RX_QUEUE_3_DROP", NSS_STATS_TYPE_DROP}, + {"ENCAP_HEADROOM_ERR", NSS_STATS_TYPE_ERROR}, + {"ENCAP_SMALL_SIZE", NSS_STATS_TYPE_SPECIAL}, + {"ENCAP_PNODE_ENQUEUE_FAIL", NSS_STATS_TYPE_ERROR}, + {"DECAP_NO_SEQ_NOR_ACK", NSS_STATS_TYPE_ERROR}, + {"DECAP_INVAL_GRE_FLAGS", NSS_STATS_TYPE_ERROR}, + {"DECAP_INVAL_GRE_PROTO", NSS_STATS_TYPE_ERROR}, + {"DECAP_WRONG_SEQ", NSS_STATS_TYPE_ERROR}, + {"DECAP_INVAL_PPP_HDR", NSS_STATS_TYPE_ERROR}, + {"DECAP_PPP_LCP", NSS_STATS_TYPE_SPECIAL}, + {"DECAP_UNSUPPORTED_PPP_PROTO", NSS_STATS_TYPE_ERROR}, + {"DECAP_PNODE_ENQUEUE_FAIL", NSS_STATS_TYPE_ERROR} +}; + +/* + * nss_pptp_strings_read() + * Read PPTP node statistics names. + */ +static ssize_t nss_pptp_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_pptp_strings_session_debug_stats, NSS_PPTP_STATS_SESSION_MAX); +} + +/* + * nss_pptp_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(pptp); + +/* + * nss_pptp_strings_dentry_create() + * Create PPTP statistics strings debug entry. + */ +void nss_pptp_strings_dentry_create(void) +{ + nss_strings_create_dentry("pptp", &nss_pptp_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pptp_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_strings.h new file mode 100644 index 000000000..788a387c6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pptp_strings.h @@ -0,0 +1,25 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_PPTP_STRINGS_H +#define __NSS_PPTP_STRINGS_H + +extern struct nss_stats_info nss_pptp_strings_session_debug_stats[NSS_PPTP_STATS_SESSION_MAX]; +extern void nss_pptp_strings_dentry_create(void); + +#endif /* __NSS_PPTP_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_profiler.c b/feeds/ipq807x/qca-nss-drv/src/nss_profiler.c new file mode 100755 index 000000000..5717ac365 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_profiler.c @@ -0,0 +1,254 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_profiler.c + * NSS profiler APIs + */ + +#include "nss_tx_rx_common.h" + +/* + * nss_profiler_rx_msg_handler() + * Handle profiler information. + */ +static void nss_profiler_rx_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app) +{ + struct nss_profiler_msg *pm = (struct nss_profiler_msg*)ncm; + void *ctx = nss_ctx->nss_top->profiler_ctx[nss_ctx->id]; + nss_profiler_callback_t cb = nss_ctx->nss_top->profiler_callback[nss_ctx->id]; + + if (ncm->type >= NSS_PROFILER_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return; + } + + if (ncm->type <= NSS_PROFILER_FLOWCTRL_MSG) { + if (ncm->len > sizeof(pm->payload.pcmdp)) { + nss_warning("%px: reply for cmd %d size is wrong %d : %d\n", nss_ctx, ncm->type, ncm->len, ncm->interface); + return; + } + } else if (ncm->type <= NSS_PROFILER_DEBUG_REPLY_MSG) { + if (ncm->len > sizeof(pm->payload.pdm)) { + nss_warning("%px: reply for debug %d is too big %d\n", nss_ctx, ncm->type, ncm->len); + return; + } + } else if (ncm->type <= NSS_PROFILER_COUNTERS_MSG) { + if (ncm->len < (sizeof(pm->payload.pcmdp) - (PROFILE_MAX_APP_COUNTERS - pm->payload.pcmdp.num_counters) * sizeof(pm->payload.pcmdp.counters[0])) || ncm->len > sizeof(pm->payload.pcmdp)) { + nss_warning("%px: %d params data is too big %d : %d\n", nss_ctx, ncm->type, ncm->len, ncm->interface); + return; + } + } + + /* + * status per request callback + */ + if (ncm->response != NSS_CMN_RESPONSE_NOTIFY && ncm->cb) { + nss_info("%px: reply CB %px for %d %d\n", nss_ctx, (void *)ncm->cb, ncm->type, ncm->response); + cb = (nss_profiler_callback_t)ncm->cb; + } + + /* + * sample related callback + */ + if (!cb || !ctx) { + nss_warning("%px: Event received for profiler interface before registration", nss_ctx); + return; + } + + cb(ctx, (struct nss_profiler_msg *)ncm); +} + +/* + * nss_tx_profiler_if_buf() + * NSS profiler Tx API + */ +nss_tx_status_t nss_profiler_if_tx_buf(void *ctx, void *buf, uint32_t len, + void *cb, void *app_data) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx; + struct nss_profiler_msg *npm; + struct nss_profiler_data_msg *pdm = (struct nss_profiler_data_msg *)buf; + nss_tx_status_t ret; + + nss_trace("%px: Profiler If Tx, buf=%px", nss_ctx, buf); + + if (sizeof(npm->payload) < len) { + nss_warning("%px: (%u)Bad message length(%u)", nss_ctx, NSS_PROFILER_INTERFACE, len); + return NSS_TX_FAILURE_TOO_LARGE; + } + + if (NSS_NBUF_PAYLOAD_SIZE < (len + sizeof(npm->cm))) { + nss_warning("%px: (%u)Message length(%u) is larger than payload size (%u)", + nss_ctx, NSS_PROFILER_INTERFACE, (uint32_t)(len + sizeof(npm->cm)), NSS_NBUF_PAYLOAD_SIZE); + return NSS_TX_FAILURE_TOO_LARGE; + } + + npm = kzalloc(sizeof(*npm), GFP_KERNEL); + if (!npm) { + nss_warning("%px: Failed to allocate memory for message\n", nss_ctx); + return NSS_TX_FAILURE; + } + + memcpy(&npm->payload, pdm, len); + nss_profiler_msg_init(npm, NSS_PROFILER_INTERFACE, pdm->hd_magic & 0xFF, len, + cb, app_data); + + ret = nss_core_send_cmd(nss_ctx, npm, sizeof(npm->cm) + len, NSS_NBUF_PAYLOAD_SIZE); + kfree(npm); + return ret; +} +EXPORT_SYMBOL(nss_profiler_if_tx_buf); + +/* + * nss_profiler_alloc_dma() + * Allocate a DMA for profiler. + */ +void *nss_profiler_alloc_dma(struct nss_ctx_instance *nss_ctx, struct nss_profile_sdma_producer **dma_p) +{ + int size; + void *kaddr; + struct nss_profile_sdma_producer *dma; + struct nss_profile_sdma_ctrl *ctrl = (struct nss_profile_sdma_ctrl *)nss_ctx->meminfo_ctx.sdma_ctrl; + if (!ctrl) + return NULL; + + dma = ctrl->producer; + *dma_p = dma; + size = dma->num_bufs * dma->buf_size; + kaddr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); + + if (kaddr) { + dma->desc_ring = dma_map_single(nss_ctx->dev, kaddr, size, DMA_FROM_DEVICE); + NSS_CORE_DSB(); + } + ctrl->consumer[0].ring.kp = kaddr; + return kaddr; +} +EXPORT_SYMBOL(nss_profiler_alloc_dma); + +/* + * nss_profiler_release_dma() + * Free profiler DMA. + */ +void nss_profiler_release_dma(struct nss_ctx_instance *nss_ctx) +{ + struct nss_profile_sdma_ctrl *ctrl; + if (!nss_ctx) + return; + + ctrl = nss_ctx->meminfo_ctx.sdma_ctrl; + + if (ctrl && ctrl->consumer[0].ring.kp) { + kfree(ctrl->consumer[0].ring.kp); + ctrl->consumer[0].ring.kp = NULL; + } +} +EXPORT_SYMBOL(nss_profiler_release_dma); + +/* + * nss_profile_dma_register_cb + * Register a handler for profile DMA. + */ +bool nss_profile_dma_register_cb(struct nss_ctx_instance *nss_ctx, int id, + void (*cb)(void*), void *arg) +{ + struct nss_profile_sdma_ctrl *ctrl = (struct nss_profile_sdma_ctrl *)nss_ctx->meminfo_ctx.sdma_ctrl; + nss_info("%px dma_register_cb %d: %px %px\n", ctrl, id, cb, arg); + if (!ctrl) + return false; + + ctrl->consumer[id].dispatch.fp = cb; + ctrl->consumer[id].arg.kp = arg; + return true; +} +EXPORT_SYMBOL(nss_profile_dma_register_cb); + +/* + * nss_profile_dma_deregister_cb + * Deregister callback for profile DMA. + */ +bool nss_profile_dma_deregister_cb(struct nss_ctx_instance *nss_ctx, int id) +{ + struct nss_profile_sdma_ctrl *ctrl = (struct nss_profile_sdma_ctrl *)nss_ctx->meminfo_ctx.sdma_ctrl; + if (!ctrl) + return false; + + ctrl->consumer[id].dispatch.fp = NULL; + return true; +} +EXPORT_SYMBOL(nss_profile_dma_deregister_cb); + +/* + * nss_profile_dma_get_ctrl + * Wrapper to get profile DMA control. + */ +struct nss_profile_sdma_ctrl *nss_profile_dma_get_ctrl(struct nss_ctx_instance *nss_ctx) +{ + struct nss_profile_sdma_ctrl *ctrl = nss_ctx->meminfo_ctx.sdma_ctrl; + if (!ctrl) { + return ctrl; + } + + dmac_inv_range(ctrl, &ctrl->cidx); + dsb(sy); + return ctrl; +} +EXPORT_SYMBOL(nss_profile_dma_get_ctrl); + +/* + * nss_profiler_notify_register() + */ +void *nss_profiler_notify_register(nss_core_id_t core_id, nss_profiler_callback_t profiler_callback, void *ctx) +{ + nss_assert(core_id < NSS_CORE_MAX); + + if (NSS_CORE_STATUS_SUCCESS != + nss_core_register_handler(&nss_top_main.nss[core_id], NSS_PROFILER_INTERFACE, nss_profiler_rx_msg_handler, NULL)) { + nss_warning("Message handler FAILED to be registered for profiler"); + return NULL; + } + + nss_top_main.profiler_ctx[core_id] = ctx; + nss_top_main.profiler_callback[core_id] = profiler_callback; + + return (void *)&nss_top_main.nss[core_id]; +} +EXPORT_SYMBOL(nss_profiler_notify_register); + +/* + * nss_profiler_notify_unregister() + */ +void nss_profiler_notify_unregister(nss_core_id_t core_id) +{ + nss_assert(core_id < NSS_CORE_MAX); + + nss_core_unregister_handler(&nss_top_main.nss[core_id], NSS_PROFILER_INTERFACE); + nss_top_main.profiler_callback[core_id] = NULL; + nss_top_main.profiler_ctx[core_id] = NULL; +} +EXPORT_SYMBOL(nss_profiler_notify_unregister); + +/* + * nss_profiler_msg_init() + * Initialize profiler message. + */ +void nss_profiler_msg_init(struct nss_profiler_msg *npm, uint16_t if_num, uint32_t type, uint32_t len, + nss_profiler_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&npm->cm, if_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_profiler_msg_init); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_project.c b/feeds/ipq807x/qca-nss-drv/src/nss_project.c new file mode 100644 index 000000000..07402fb76 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_project.c @@ -0,0 +1,338 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * @file nss_project.h + * NSS project APIs. + */ +#include "nss_tx_rx_common.h" + +static int nss_project_wt_stats_enable; + +/* + * nss_project_free_wt_stats() + * Frees a number of allocated worker thread statistics. + */ +static void nss_project_free_wt_stats(struct nss_worker_thread_stats *wt_stats, int num_alloc) +{ + int i; + + if (!wt_stats) { + return; + } + + for (i = 0; i < num_alloc; i++) { + kfree(wt_stats[i].irq_stats); + } + kfree(wt_stats); +} + +/* + * nss_project_alloc_wt_stats() + * Allocates worker thread stats for a given number of threads and IRQs. + */ +static struct nss_worker_thread_stats *nss_project_alloc_wt_stats(uint32_t thread_count, uint32_t irq_count) +{ + struct nss_worker_thread_stats *wt_stats; + int i; + + wt_stats = kzalloc(thread_count * sizeof(struct nss_worker_thread_stats), GFP_ATOMIC); + if (unlikely(!wt_stats)) { + return NULL; + } + + for (i = 0; i < thread_count; i++) { + wt_stats[i].irq_stats = + kzalloc(irq_count * sizeof(struct nss_project_irq_stats), GFP_ATOMIC); + if (unlikely(!wt_stats[i].irq_stats)) { + nss_project_free_wt_stats(wt_stats, i); + return NULL; + } + } + + return wt_stats; +} + +/* + * nss_project_wt_stats_enable_callback() + * Callback function for wt stats enable messages + */ +static void nss_project_wt_stats_enable_callback(void *app_data, struct nss_project_msg *msg) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)app_data; + struct nss_project_msg_wt_stats_enable *stats_enable = &msg->msg.wt_stats_enable; + struct nss_worker_thread_stats *stats_temp; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (msg->cm.response != NSS_CMN_RESPONSE_ACK) { + return; + } + + nss_info("%px: Received response ACK for worker thread stats enable msg.\n", nss_ctx); + + /* + * If statistics have already been allocated, nothing else to do. + */ + if (nss_ctx->wt_stats) { + return; + } + + stats_temp = nss_project_alloc_wt_stats(stats_enable->worker_thread_count, + stats_enable->irq_count); + if (unlikely(!stats_temp)) { + nss_warning("%px: Unable to allocate worker thread statistics.\n", nss_ctx); + return; + } + + spin_lock_bh(&nss_ctx->nss_top->stats_lock); + nss_ctx->wt_stats = stats_temp; + nss_ctx->worker_thread_count = stats_enable->worker_thread_count; + nss_ctx->irq_count = stats_enable->irq_count; + spin_unlock_bh(&nss_ctx->nss_top->stats_lock); +} + +/* + * nss_project_wt_stats_send_enable() + * Sends message to firmware to enable or disable worker_thread statistics collection. + */ +static nss_tx_status_t nss_project_wt_stats_send_enable(struct nss_ctx_instance *nss_ctx, bool enable) +{ + struct nss_project_msg *npm; + struct nss_cmn_msg *ncm; + nss_tx_status_t ret; + + npm = kzalloc(sizeof(*npm), GFP_KERNEL); + if (!npm) { + nss_warning("%px: Failed to allocate buffer for message\n", nss_ctx); + return NSS_TX_FAILURE; + } + + /* + * Populate the message + */ + ncm = &npm->cm; + nss_cmn_msg_init(ncm, NSS_PROJECT_INTERFACE, + NSS_PROJECT_MSG_WT_STATS_ENABLE, + sizeof(struct nss_project_msg_wt_stats_enable), + (void *)nss_project_wt_stats_enable_callback, + (void *)nss_ctx); + npm->msg.wt_stats_enable.enable = enable; + + ret = nss_core_send_cmd(nss_ctx, npm, sizeof(*npm), NSS_NBUF_PAYLOAD_SIZE); + kfree(npm); + return ret; +} + +/* + * nss_project_wt_stats_update() + * Updates stored statistics with the data found in the notify. + */ +static void nss_project_wt_stats_update(struct nss_ctx_instance *nss_ctx, + struct nss_project_msg_wt_stats_notify *stats_notify) +{ + struct nss_worker_thread_stats *wt_stats; + int i; + + if (unlikely(!nss_ctx->wt_stats)) { + nss_warning("%px: Worker thread statistics not yet allocated.\n", nss_ctx); + return; + } + + if (unlikely(stats_notify->threadno >= nss_ctx->worker_thread_count)) { + nss_warning("%px: Invalid WT number %d\n", nss_ctx, stats_notify->threadno); + return; + } + + if (unlikely(stats_notify->stats_written > NSS_PROJECT_IRQS_PER_MESSAGE)) { + nss_warning("%px: Invalid worker thread stats written count %d\n", + nss_ctx, stats_notify->stats_written); + return; + } + + wt_stats = &(nss_ctx->wt_stats[stats_notify->threadno]); + + if (unlikely(!wt_stats->irq_stats)) { + nss_warning("%px: Worker thread statistics not allocated for thread %d\n", + nss_ctx, stats_notify->threadno); + return; + } + + spin_lock_bh(&nss_ctx->nss_top->stats_lock); + for (i = 0; i < stats_notify->stats_written; ++i) { + int irq = stats_notify->stats[i].irq; + if (unlikely(irq >= nss_ctx->irq_count)) { + nss_warning("%px: Invalid IRQ number %d\n", nss_ctx, irq); + continue; + } + + wt_stats->irq_stats[irq] = stats_notify->stats[i]; + } + spin_unlock_bh(&nss_ctx->nss_top->stats_lock); +} + +/* + * nss_project_msg_handler() + * Handles metadata messages on the project interface. + */ +static void nss_project_msg_handler(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_project_msg *npm = (struct nss_project_msg *)ncm; + nss_project_msg_callback_t cb; + + /* + * Sanity checks on message + */ + if (npm->cm.type >= NSS_PROJECT_MSG_MAX) { + nss_warning("%px: message type out of range: %d\n", nss_ctx, npm->cm.type); + return; + } + + if (nss_cmn_get_msg_len(&(npm->cm)) > sizeof(struct nss_project_msg)) { + nss_warning("%px: message length is invalid: %d\n", nss_ctx, nss_cmn_get_msg_len(&(npm->cm))); + return; + } + + switch (npm->cm.type) { + case NSS_PROJECT_MSG_WT_STATS_NOTIFY: + nss_project_wt_stats_update(nss_ctx, &(npm->msg.wt_stats_notify)); + return; + } + + nss_core_log_msg_failures(nss_ctx, ncm); + + if (!ncm->cb) { + return; + } + + cb = (nss_project_msg_callback_t)ncm->cb; + cb((void *)nss_ctx, npm); +} + +/* + * nss_project_wt_stats_handler() + * Sysctl handler for wt_stats. + * + * Uses proc_dointvec to process data. For a write operation, also sends worker + * thread stats enable messages containing the new value to each NSS core. + */ +static int nss_project_wt_stats_handler(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + int i; + + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + /* + * In case of error, stop now. + */ + if (ret) { + return ret; + } + + /* + * No additional behavior necessary for a read operation. + */ + if (!write) { + return ret; + } + + /* + * If a value was written, send a message containing that value to each + * NSS core. + */ + for (i = 0; i < nss_top_main.num_nss; ++i) { + nss_project_wt_stats_send_enable(&(nss_top_main.nss[i]), + nss_project_wt_stats_enable); + } + return ret; + +} + +/* + * Tree of ctl_tables used to put the wt_stats proc node in the correct place in + * the file system. Allows the command $ echo 1 > proc/sys/dev/nss/project/wt_stats + * to enable worker thread statistics (echoing 0 into the same target will disable). + */ +static struct ctl_table nss_project_table[] = { + { + .procname = "wt_stats", + .data = &nss_project_wt_stats_enable, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_project_wt_stats_handler, + }, + { } +}; + +static struct ctl_table nss_project_dir[] = { + { + .procname = "project", + .mode = 0555, + .child = nss_project_table, + }, + { } +}; + +static struct ctl_table nss_project_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_project_dir, + }, + { } +}; + +static struct ctl_table nss_project_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = nss_project_root_dir, + }, + { } +}; + +static struct ctl_table_header *nss_project_header; + +/* + * nss_project_register_sysctl() + * Registers any sysctl handlers for the project. + */ +void nss_project_register_sysctl(void) +{ + nss_project_header = register_sysctl_table(nss_project_root); +} + +/* + * nss_project_unregister_sysctl() + * De-registers any sysctl handlers for the project. + */ +void nss_project_unregister_sysctl(void) +{ + if (nss_project_header) { + unregister_sysctl_table(nss_project_header); + } +} + +/* + * nss_project_register_handler() + * Registers the handler for NSS->HLOS messages + */ +void nss_project_register_handler(struct nss_ctx_instance *nss_ctx) +{ + nss_core_register_handler(nss_ctx, NSS_PROJECT_INTERFACE, nss_project_msg_handler, NULL); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan.c b/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan.c new file mode 100644 index 000000000..abe78eeb7 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan.c @@ -0,0 +1,446 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + + /* + * nss_pvxlan.c + * NSS PVXLAN driver interface APIs + */ +#include "nss_core.h" +#include "nss_pvxlan.h" +#include "nss_cmn.h" +#include "nss_tx_rx_common.h" +#include "nss_pvxlan_stats.h" +#include "nss_pvxlan_log.h" + +#define NSS_PVXLAN_TX_TIMEOUT 3000 + +/* + * Spinlock for protecting tunnel operations colliding with a tunnel destroy + */ +DEFINE_SPINLOCK(nss_pvxlan_spinlock); + +/* + * Private data structure + */ +static struct nss_pvxlan_pvt { + struct semaphore sem; /* Semaphore structure. */ + struct completion complete; /* Completion structure. */ + int response; /* Response from FW. */ + void *cb; /* Original cb for msgs. */ + void *app_data; /* Original app_data for msgs. */ +} pvxlan_pvt; + +/* + * Per PVxLAN tunnel/interface number instance. + */ +struct nss_pvxlan_handle { + atomic_t refcnt; /* Reference count on the tunnel */ + uint32_t if_num; /* Interface number */ + uint32_t tunnel_status; /* 0=disable, 1=enabled */ + nss_pvxlan_msg_callback_t msg_callback; /* Msg callback */ + void *app_data; /* App data (argument) */ +}; + +/* + * Array of pointer for NSS PvLAN handles. Each handle has per-tunnel + * stats based on the if_num which is an index. + */ +static struct nss_pvxlan_handle *nss_pvxlan_hdl[NSS_MAX_DYNAMIC_INTERFACES]; + +/* + * nss_pvxlan_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_pvxlan_verify_if_num(uint32_t if_num) +{ + uint32_t type = nss_dynamic_interface_get_type(nss_pvxlan_get_ctx(), if_num); + + return ((type == NSS_DYNAMIC_INTERFACE_TYPE_PVXLAN_HOST_INNER) || + (type == NSS_DYNAMIC_INTERFACE_TYPE_PVXLAN_OUTER)); +} + +/* + * nss_pvxlan_hdl_instance_free() + * Free PVxLAN tunnel handle instance. + */ +static bool nss_pvxlan_hdl_instance_free(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_pvxlan_handle *h; + + spin_lock_bh(&nss_pvxlan_spinlock); + h = nss_pvxlan_hdl[if_num - NSS_DYNAMIC_IF_START]; + if (!h) { + spin_unlock_bh(&nss_pvxlan_spinlock); + nss_warning("%px: Instance does not exist: %d", nss_ctx, if_num); + return false; + } + + if (h->if_num != if_num) { + spin_unlock_bh(&nss_pvxlan_spinlock); + nss_warning("%px: Not correct if_num: %d", nss_ctx, if_num); + return false; + } + + nss_pvxlan_hdl[if_num - NSS_DYNAMIC_IF_START] = NULL; + spin_unlock_bh(&nss_pvxlan_spinlock); + kfree(h); + return true; +} + +/* + * nss_pvxlan_hdl_instance_alloc() + * Allocate PVxLAN tunnel instance. + */ +static bool nss_pvxlan_hdl_instance_alloc(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + nss_pvxlan_msg_callback_t notify_cb, void *app_data) +{ + struct nss_pvxlan_handle *h; + + /* + * Allocate a handle + */ + h = kzalloc(sizeof(struct nss_pvxlan_handle), GFP_ATOMIC); + if (!h) { + nss_warning("%px: no memory for allocating PVxLAN handle instance for interface : %d", nss_ctx, if_num); + return false; + } + h->if_num = if_num; + + spin_lock_bh(&nss_pvxlan_spinlock); + if (nss_pvxlan_hdl[if_num - NSS_DYNAMIC_IF_START] != NULL) { + spin_unlock_bh(&nss_pvxlan_spinlock); + kfree(h); + nss_warning("%px: The handle has been taken by another thread :%d", nss_ctx, if_num); + return false; + } + + h->msg_callback = notify_cb; + h->app_data = app_data; + nss_pvxlan_hdl[if_num - NSS_DYNAMIC_IF_START] = h; + spin_unlock_bh(&nss_pvxlan_spinlock); + + return true; +} + +/* + * nss_pvxlan_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_pvxlan_callback(void *app_data, struct nss_pvxlan_msg *nvxm) +{ + nss_pvxlan_msg_callback_t callback = (nss_pvxlan_msg_callback_t)pvxlan_pvt.cb; + void *data = pvxlan_pvt.app_data; + + pvxlan_pvt.response = NSS_TX_SUCCESS; + pvxlan_pvt.cb = NULL; + pvxlan_pvt.app_data = NULL; + + if (nvxm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("Pvxlan Error response %d\n", nvxm->cm.response); + pvxlan_pvt.response = nvxm->cm.response; + } + + if (callback) { + callback(data, nvxm); + } + complete(&pvxlan_pvt.complete); +} + +/* + * nss_pvxlan_handler() + * Handle NSS -> HLOS messages for PVxLAN. + */ +static void nss_pvxlan_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_pvxlan_msg *nvxm = (struct nss_pvxlan_msg *)ncm; + nss_pvxlan_msg_callback_t cb; + struct nss_pvxlan_handle * h; + + BUG_ON(!nss_pvxlan_verify_if_num(ncm->interface)); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_PVXLAN_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for PVXLAN interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_pvxlan_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Trace messages. + */ + nss_core_log_msg_failures(nss_ctx, ncm); + nss_pvxlan_log_rx_msg(nvxm); + + switch (nvxm->cm.type) { + case NSS_PVXLAN_MSG_TYPE_SYNC_STATS: + nss_pvxlan_stats_sync(nss_ctx, &nvxm->msg.stats, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + uint32_t if_num = ncm->interface - NSS_DYNAMIC_IF_START; + spin_lock_bh(&nss_pvxlan_spinlock); + h = nss_pvxlan_hdl[if_num]; + if (h) { + ncm->cb = (nss_ptr_t)h->msg_callback; + ncm->app_data = (nss_ptr_t)h->app_data; + } + spin_unlock_bh(&nss_pvxlan_spinlock); + + } + + cb = (nss_pvxlan_msg_callback_t)ncm->cb; + + /* + * Do we have a callback + */ + if (!cb) { + nss_trace("%px: cb is null for interface %d", nss_ctx, ncm->interface); + return; + } + + cb((void *)ncm->app_data, nvxm); +} + +/* + * nss_pvxlan_tx_msg() + * Transmit a PVXLAN message to NSS FW. Don't call this from softirq/interrupts. + */ +nss_tx_status_t nss_pvxlan_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_pvxlan_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + if (!nss_pvxlan_verify_if_num(msg->cm.interface)) { + return NSS_TX_FAILURE_BAD_PARAM; + } + + if (ncm->type >= NSS_PVXLAN_MSG_TYPE_MAX) { + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * Trace messages. + */ + nss_pvxlan_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_pvxlan_tx_msg); + +/* + * nss_pvxlan_tx_msg_sync() + * Transmit a pvxlan message to NSS firmware synchronously. + */ +nss_tx_status_t nss_pvxlan_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_pvxlan_msg *nvxm) +{ + nss_tx_status_t status; + int ret; + + down(&pvxlan_pvt.sem); + nvxm->cm.cb = (nss_ptr_t)nss_pvxlan_callback; + nvxm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_pvxlan_tx_msg(nss_ctx, nvxm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: pvxlan_tx_msg failed\n", nss_ctx); + up(&pvxlan_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&pvxlan_pvt.complete, msecs_to_jiffies(NSS_PVXLAN_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: pvxlan tx sync failed due to timeout\n", nss_ctx); + pvxlan_pvt.response = NSS_TX_FAILURE; + } + + status = pvxlan_pvt.response; + up(&pvxlan_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_pvxlan_tx_msg_sync); + +/* + * nss_pvxlan_tx_buf() + * Transmit data buffer (skb) to a NSS interface number + */ +nss_tx_status_t nss_pvxlan_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *buf, uint32_t if_num) +{ + BUG_ON(!nss_pvxlan_verify_if_num(if_num)); + + return nss_core_send_packet(nss_ctx, buf, if_num, H2N_BIT_FLAG_VIRTUAL_BUFFER | H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_pvxlan_tx_buf); + +/* + *********************************** + * Register/Unregister/Miscellaneous APIs + *********************************** + */ + +/* + * nss_pvxlan_unregister() + * Unregister a data packet notifier with NSS FW. + */ +bool nss_pvxlan_unregister(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx; + int32_t i; + + nss_ctx = nss_pvxlan_get_ctx(); + if (!nss_pvxlan_verify_if_num(if_num)) { + nss_warning("%px: data unregister received for invalid interface %d", nss_ctx, if_num); + return false; + } + + spin_lock_bh(&nss_pvxlan_tunnel_stats_debug_lock); + for (i = 0; i < NSS_PVXLAN_MAX_INTERFACES; i++) { + if (nss_pvxlan_tunnel_debug_stats[i].if_num != if_num) { + continue; + } + + memset(&nss_pvxlan_tunnel_debug_stats[i], 0, + sizeof(struct nss_pvxlan_tunnel_stats_debug)); + break; + } + spin_unlock_bh(&nss_pvxlan_tunnel_stats_debug_lock); + + nss_core_unregister_handler(nss_ctx, if_num); + nss_core_unregister_subsys_dp(nss_ctx, if_num); + nss_pvxlan_hdl_instance_free(nss_ctx, if_num); + return true; +} +EXPORT_SYMBOL(nss_pvxlan_unregister); + +/* + * nss_pvxlan_register() + * Registers a data packet notifier with NSS FW. + */ +struct nss_ctx_instance *nss_pvxlan_register(uint32_t if_num, + nss_pvxlan_buf_callback_t data_cb, + nss_pvxlan_msg_callback_t notify_cb, + struct net_device *netdev, + uint32_t features) +{ + struct nss_ctx_instance *nss_ctx; + int core_status; + int32_t i; + + nss_ctx = nss_pvxlan_get_ctx(); + if (!nss_pvxlan_verify_if_num(if_num)) { + nss_warning("%px: data register received for invalid interface %d", nss_ctx, if_num); + return NULL; + } + + core_status = nss_core_register_handler(nss_ctx, if_num, nss_pvxlan_msg_handler, NULL); + if (core_status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: nss core register handler failed for if_num:%d with error :%d", nss_ctx, if_num, core_status); + return NULL; + } + + if (!nss_pvxlan_hdl_instance_alloc(nss_ctx, if_num, notify_cb, (void *)netdev)) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: couldn't allocate handle instance for if_num:%d", nss_ctx, if_num); + return NULL; + } + + spin_lock_bh(&nss_pvxlan_tunnel_stats_debug_lock); + for (i = 0; i < NSS_PVXLAN_MAX_INTERFACES; i++) { + if (nss_pvxlan_tunnel_debug_stats[i].valid) { + continue; + } + + nss_pvxlan_tunnel_debug_stats[i].valid = true; + nss_pvxlan_tunnel_debug_stats[i].if_num = if_num; + nss_pvxlan_tunnel_debug_stats[i].if_index = netdev->ifindex; + break; + } + spin_unlock_bh(&nss_pvxlan_tunnel_stats_debug_lock); + + if (i == NSS_PVXLAN_MAX_INTERFACES) { + nss_warning("%px: No available debug stats instance :%d", nss_ctx, if_num); + nss_pvxlan_hdl_instance_free(nss_ctx, if_num); + nss_core_unregister_handler(nss_ctx, if_num); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, data_cb, NULL, NULL, netdev, features); + return nss_ctx; +} +EXPORT_SYMBOL(nss_pvxlan_register); + +/* + * nss_pvxlan_ifnum_with_core_id() + * Append core id to pvxlan interface num. + */ +int nss_pvxlan_ifnum_with_core_id(int if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_pvxlan_get_ctx(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_is_dynamic_interface(if_num)) { + nss_warning("%px: Invalid if_num: %d, must be a dynamic interface\n", nss_ctx, if_num); + return 0; + } + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_pvxlan_ifnum_with_core_id); + +/* + * nss_pvxlan_msg_init() + * Initialize pvxlan message. + */ +void nss_pvxlan_msg_init(struct nss_pvxlan_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, + nss_pvxlan_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, (void*)cb, app_data); +} +EXPORT_SYMBOL(nss_pvxlan_msg_init); + +/* + * nss_pvxlan_get_ctx() + * Return a Pvxlan NSS context. + */ +struct nss_ctx_instance *nss_pvxlan_get_ctx() +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = &nss_top_main.nss[nss_top_main.pvxlan_handler_id]; + return nss_ctx; +} +EXPORT_SYMBOL(nss_pvxlan_get_ctx); + +/* + * nss_pvxlan_init() + * Initializes Pvxlan. Gets called from nss_init.c. + */ +void nss_pvxlan_init() +{ + nss_pvxlan_stats_dentry_create(); + sema_init(&pvxlan_pvt.sem, 1); + init_completion(&pvxlan_pvt.complete); + + memset(&nss_pvxlan_hdl, 0, sizeof(nss_pvxlan_hdl)); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_log.c new file mode 100644 index 000000000..af516ab54 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_log.c @@ -0,0 +1,244 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_pvxlan_log.c + * NSS PVXLAN logger file. + */ + +#include "nss_core.h" + +/* + * nss_pvxlan_log_message_types_str + * PVXLAN message strings + */ +static int8_t *nss_pvxlan_log_message_types_str[NSS_PVXLAN_MSG_TYPE_MAX] __maybe_unused = { + "PVxLAN Sync Stats", + "PVxLAN Tunnel Configure Rule", + "PVxLAN Tunnel Unconfigure Rule", + "PVxLAN Enable Tunnel", + "PVxLAN Disable Tunnel", + "PVxLAN Add MAC rule", + "PVxLAN Delete MAC rule" +}; + +/* + * nss_pvxlan_log_error_response_types_str + * Strings for error types for PVXLAN messages + */ +static int8_t *nss_pvxlan_log_error_response_types_str[NSS_PVXLAN_ERROR_MAX] __maybe_unused = { + "PVXLAN Invalid L3 Protocool", + "PVXLAN Invalid UDP Protocol", + "PVXLAN Tunnel Disabled", + "PVXLAN Tunnel Enabled", + "PVXLAN Tunnel Not Configured", + "PVXLAN Invalid IP Node", + "PVXLAN Invalid Flag", + "PVXLAN MAC Table Full", + "PVXLAN MAC Exists", + "PVXLAN MAC Does Not Exist" +}; + +/* + * nss_pvxlan_log_rule_msg() + * Log NSS PVXLAN rule message. + */ +static void nss_pvxlan_log_rule_msg(struct nss_pvxlan_rule_msg *npvrm) +{ + nss_trace("%px: NSS PVXLAN Rule message \n" + "Encap Rule Src IP: %px\n" + "Encap Rule Src Port: %d\n" + "Encap Rule Dst Ip: %px\n" + "Encap Rule Dst Port: %d\n" + "RPS: %d\n" + "Flags: %x\n" + "Tunnel ID: %d\n", + npvrm, + &npvrm->encap.src.ip, + npvrm->encap.src_port, + &npvrm->encap.dest.ip, + npvrm->encap.dest_port, + npvrm->rps, npvrm->flags, + npvrm->tunnel_id); +} + +/* + * nss_pvxlan_mac_rule_msg() + * Log NSS PVxLAN MAC rule message. + */ +static void nss_pvxlan_log_mac_msg(struct nss_pvxlan_mac_msg *npvcm) +{ + nss_trace("%px: NSS PVXLAN MAC message \n" + "PVxLAN Mac Addr: %x : %x : %x" + "PVxLAN Flags: %u\n" + "PVxLAN VNet ID: %u\n" + "PVxLAN Tunnel ID: %d\n" + "PVxLAN Policy ID: %d", + npvcm, + npvcm->mac_addr[0], npvcm->mac_addr[1], + npvcm->mac_addr[2], npvcm->flags, + npvcm->vnet_id, npvcm->tunnel_id, + npvcm->policy_id); +} + +/* + * nss_pvxlan_log_rule_cfg_msg() + * Log NSS PVxLAN rule configure message. + */ +static void nss_pvxlan_log_rule_cfg_msg(struct nss_pvxlan_msg *npvm) +{ + struct nss_pvxlan_rule_msg *npvrm __maybe_unused = &npvm->msg.rule_cfg; + nss_pvxlan_log_rule_msg(npvrm); +} + +/* + * nss_pvxlan_log_rule_uncfg_msg() + * Log NSS PVxLAN rule unconfigure message. + */ +static void nss_pvxlan_log_rule_uncfg_msg(struct nss_pvxlan_msg *npvm) +{ + struct nss_pvxlan_rule_msg *npvrm __maybe_unused = &npvm->msg.rule_uncfg; + nss_pvxlan_log_rule_msg(npvrm); +} + +/* + * nss_pvxlan_log_enable_msg() + * Log NSS PVxLAN rule enable message. + */ +static void nss_pvxlan_log_enable_msg(struct nss_pvxlan_msg *npvm) +{ + struct nss_pvxlan_tunnel_state_msg *npvrm __maybe_unused = &npvm->msg.enable; + nss_trace("%px: NSS PVXLAN Tunnel state message: Enable \n", npvrm); +} + +/* + * nss_pvxlan_log_disable_msg() + * Log NSS PVxLAN rule disable message. + */ +static void nss_pvxlan_log_disable_msg(struct nss_pvxlan_msg *npvm) +{ + nss_trace("%px: NSS PVXLAN Tunnel state message: Disable \n", npvm); +} + +/* + * nss_pvxlan_log_mac_add_msg() + * Log NSS PVXLAN mac rule add message. + */ +static void nss_pvxlan_log_mac_add_msg(struct nss_pvxlan_msg *npvm) +{ + struct nss_pvxlan_mac_msg *npvcm __maybe_unused = &npvm->msg.mac_add; + nss_pvxlan_log_mac_msg(npvcm); +} + +/* + * nss_pvxlan_log_mac_del_msg() + * Log NSS PVXLAN mac rule del message. + */ +static void nss_pvxlan_log_mac_del_msg(struct nss_pvxlan_msg *npvm) +{ + struct nss_pvxlan_mac_msg *npvcm __maybe_unused = &npvm->msg.mac_del; + nss_pvxlan_log_mac_msg(npvcm); +} + +/* + * nss_pvxlan_log_verbose() + * Log message contents. + */ +static void nss_pvxlan_log_verbose(struct nss_pvxlan_msg *npvm) +{ + switch (npvm->cm.type) { + case NSS_PVXLAN_MSG_TYPE_TUNNEL_CREATE_RULE: + nss_pvxlan_log_rule_cfg_msg(npvm); + break; + + case NSS_PVXLAN_MSG_TYPE_TUNNEL_DESTROY_RULE: + nss_pvxlan_log_rule_uncfg_msg(npvm); + break; + + case NSS_PVXLAN_MSG_TYPE_TUNNEL_ENABLE: + nss_pvxlan_log_enable_msg(npvm); + break; + + case NSS_PVXLAN_MSG_TYPE_TUNNEL_DISABLE: + nss_pvxlan_log_disable_msg(npvm); + break; + + case NSS_PVXLAN_MSG_TYPE_MAC_ADD: + nss_pvxlan_log_mac_add_msg(npvm); + break; + + case NSS_PVXLAN_MSG_TYPE_MAC_DEL: + nss_pvxlan_log_mac_del_msg(npvm); + break; + + case NSS_PVXLAN_MSG_TYPE_SYNC_STATS: + break; + + default: + nss_trace("%px: Invalid message type\n", npvm); + break; + } +} + +/* + * nss_pvxlan_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_pvxlan_log_tx_msg(struct nss_pvxlan_msg *npvm) +{ + if (npvm->cm.type >= NSS_PVXLAN_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", npvm); + return; + } + + nss_info("%px: type[%d]:%s\n", npvm, npvm->cm.type, nss_pvxlan_log_message_types_str[npvm->cm.type]); + nss_pvxlan_log_verbose(npvm); +} + +/* + * nss_pvxlan_log_rx_msg() + * Log messages received from FW. + */ +void nss_pvxlan_log_rx_msg(struct nss_pvxlan_msg *npvm) +{ + if (npvm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", npvm); + return; + } + + if (npvm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (npvm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", npvm, npvm->cm.type, + nss_pvxlan_log_message_types_str[npvm->cm.type], + npvm->cm.response, nss_cmn_response_str[npvm->cm.response]); + goto verbose; + } + + if (npvm->cm.error >= NSS_PVXLAN_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + npvm, npvm->cm.type, nss_pvxlan_log_message_types_str[npvm->cm.type], + npvm->cm.response, nss_cmn_response_str[npvm->cm.response], + npvm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + npvm, npvm->cm.type, nss_pvxlan_log_message_types_str[npvm->cm.type], + npvm->cm.response, nss_cmn_response_str[npvm->cm.response], + npvm->cm.error, nss_pvxlan_log_error_response_types_str[npvm->cm.error]); + +verbose: + nss_pvxlan_log_verbose(npvm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_log.h new file mode 100644 index 000000000..cdc0dd772 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_PVXLAN_LOG_H__ +#define __NSS_PVXLAN_LOG_H__ + +/* + * nss_pvxlan_log.h + * NSS PVXLAN Log Header File. + */ + +/* + * nss_pvxlan_log_tx_msg + * Logs a PVxLAN message that is sent to the NSS firmware. + */ +void nss_pvxlan_log_tx_msg(struct nss_pvxlan_msg *ncm); + +/* + * nss_pvxlan_log_rx_msg + * Logs a PVxLAN message that is received from the NSS firmware. + */ +void nss_pvxlan_log_rx_msg(struct nss_pvxlan_msg *ncm); + +#endif /* __NSS_PVXLAN_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_stats.c new file mode 100644 index 000000000..59f861604 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_stats.c @@ -0,0 +1,213 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_pvxlan_stats.h" + +DEFINE_SPINLOCK(nss_pvxlan_tunnel_stats_debug_lock); +struct nss_pvxlan_tunnel_stats_debug nss_pvxlan_tunnel_debug_stats[NSS_PVXLAN_MAX_INTERFACES]; + +/* + * nss_pvxlan_tunnel_stats_debug_str + * PVxLAN statistics strings for nss tunnel stats + */ +static int8_t *nss_pvxlan_tunnel_stats_debug_str[NSS_PVXLAN_MAX_INTERFACES] = { + "rx_pkts", + "rx_bytes", + "tx_pkts", + "tx_bytes", + "rx_queue_0_dropped", + "rx_queue_1_dropped", + "rx_queue_2_dropped", + "rx_queue_3_dropped", + "MAC DB look up failed", + "UDP ENCAP look up failed", + "dropped packet malformed", + "dropped next node queue is full", + "dropped headroom insufficient", + "dropped version mismatch", + "dropped zero sized packet", + "dropped pbuf alloc failed", + "dropped linearization failed" +}; + +/* + * nss_pvxlan_tunnel_stats_debug_get() + * Get PVxLAN Tunnel statitics. + */ +static void nss_pvxlan_tunnel_stats_debug_get(struct nss_pvxlan_tunnel_stats_debug *stats) +{ + uint32_t i; + + if (!stats) { + nss_warning("No memory to copy pvxlan tunnel stats"); + return; + } + + spin_lock_bh(&nss_pvxlan_tunnel_stats_debug_lock); + for (i = 0; i < NSS_PVXLAN_MAX_INTERFACES; i++) { + if (nss_pvxlan_tunnel_debug_stats[i].valid) { + memcpy(stats, &nss_pvxlan_tunnel_debug_stats[i], + sizeof(struct nss_pvxlan_tunnel_stats_debug)); + stats++; + } + } + spin_unlock_bh(&nss_pvxlan_tunnel_stats_debug_lock); +} + +/* + * nss_pvxlan_stats_read() + * Read PVxLAN Tunnel statistics + */ +static ssize_t nss_pvxlan_stats_read(struct file *fp, char __user *ubuf, + size_t sz, loff_t *ppos) +{ + uint32_t max_output_lines = 2 + (NSS_PVXLAN_MAX_INTERFACES + * (NSS_PVXLAN_TUNNEL_STATS_MAX + 2)) + 2; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct net_device *dev; + uint32_t id, i; + struct nss_pvxlan_tunnel_stats_debug *pvxlan_tunnel_stats = NULL; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + pvxlan_tunnel_stats = kzalloc((sizeof(struct nss_pvxlan_tunnel_stats_debug) + * NSS_PVXLAN_MAX_INTERFACES), GFP_KERNEL); + if (unlikely(!pvxlan_tunnel_stats)) { + nss_warning("Could not allocate memory for populating PVxLAN stats"); + kfree(lbuf); + return 0; + } + + /* + * Get all stats + */ + nss_pvxlan_tunnel_stats_debug_get(pvxlan_tunnel_stats); + + /* + * Tunnel stats + */ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "\n PVxLAN Tunnel stats start:\n\n"); + + for (id = 0; id < NSS_PVXLAN_MAX_INTERFACES; id++) { + if (!pvxlan_tunnel_stats[id].valid) + break; + + dev = dev_get_by_index(&init_net, pvxlan_tunnel_stats[id].if_index); + if (likely(dev)) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "%d. nss interface id=%d, netdevice=%s\n", + id, pvxlan_tunnel_stats[id].if_num, + dev->name); + dev_put(dev); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "%d. nss interface id=%d\n", id, + pvxlan_tunnel_stats[id].if_num); + } + + for (i = 0; i < NSS_PVXLAN_TUNNEL_STATS_MAX; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "\t%s = %llu\n", + nss_pvxlan_tunnel_stats_debug_str[i], + pvxlan_tunnel_stats[id].stats[i]); + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "\n PVxLAN Tunnel stats end\n"); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(pvxlan_tunnel_stats); + kfree(lbuf); + return bytes_read; +} + +/* + * nss_pvxlan_stats_sync() + * Sync function for pvxlan statistics + */ +void nss_pvxlan_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_pvxlan_stats_msg *stats_msg, uint32_t if_num) +{ + uint32_t i; + struct nss_pvxlan_tunnel_stats_debug *s = NULL; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + spin_lock_bh(&nss_pvxlan_tunnel_stats_debug_lock); + for (i = 0; i < NSS_PVXLAN_MAX_INTERFACES; i++) { + if (nss_pvxlan_tunnel_debug_stats[i].if_num == if_num) { + s = &nss_pvxlan_tunnel_debug_stats[i]; + break; + } + } + + if (!s) { + spin_unlock_bh(&nss_pvxlan_tunnel_stats_debug_lock); + nss_warning("%px: Tunnel not found: %u", nss_ctx, if_num); + return; + } + + s->stats[NSS_PVXLAN_TUNNEL_STATS_RX_PKTS] += stats_msg->node_stats.rx_packets; + s->stats[NSS_PVXLAN_TUNNEL_STATS_RX_BYTES] += stats_msg->node_stats.rx_bytes; + s->stats[NSS_PVXLAN_TUNNEL_STATS_TX_PKTS] += stats_msg->node_stats.tx_packets; + s->stats[NSS_PVXLAN_TUNNEL_STATS_TX_BYTES] += stats_msg->node_stats.tx_bytes; + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + s->stats[NSS_PVXLAN_TUNNEL_STATS_RX_QUEUE_0_DROPPED + i] += stats_msg->node_stats.rx_dropped[i]; + } + s->stats[NSS_PVXLAN_TUNNEL_STATS_MAC_DB_LOOKUP_FAILED] += + stats_msg->mac_db_lookup_failed; + s->stats[NSS_PVXLAN_TUNNEL_STATS_UDP_ENCAP_LOOKUP_FAILED] += + stats_msg->udp_encap_lookup_failed; + s->stats[NSS_PVXLAN_TUNNEL_STATS_DROP_MALFORMED] += + stats_msg->dropped_malformed; + s->stats[NSS_PVXLAN_TUNNEL_STATS_DROP_NEXT_NODE_QUEUE_FULL] += + stats_msg->dropped_next_node_queue_full; + s->stats[NSS_PVXLAN_TUNNEL_STATS_DROP_HEADROOM_INSUFFICIENT] += + stats_msg->dropped_hroom; + s->stats[NSS_PVXLAN_TUNNEL_STATS_DROP_VERSION_MISMATCH] += + stats_msg->dropped_ver_mis; + s->stats[NSS_PVXLAN_TUNNEL_STATS_DROP_ZERO_SIZED_PACKET] += + stats_msg->dropped_zero_sized_packet; + s->stats[NSS_PVXLAN_TUNNEL_STATS_DROP_PBUF_ALLOC_FAILED] += + stats_msg->dropped_pbuf_alloc_failed; + s->stats[NSS_PVXLAN_TUNNEL_STATS_DROP_LINEAR_FAILED] += + stats_msg->dropped_linear_failed; + spin_unlock_bh(&nss_pvxlan_tunnel_stats_debug_lock); +} + +/* + * nss_pvxlan_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(pvxlan) + +/* + * nss_pvxlan_stats_dentry_create() + * Create gre tunnel statistics debug entry. + */ +void nss_pvxlan_stats_dentry_create(void) +{ + nss_stats_create_dentry("pvxlan", &nss_pvxlan_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_stats.h new file mode 100644 index 000000000..874bf785e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_pvxlan_stats.h @@ -0,0 +1,66 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_PVXLAN_STATS_H +#define __NSS_PVXLAN_STATS_H + +/* + * pvxlan statistic counters + */ +enum nss_pvxlan_tunnel_stats { + NSS_PVXLAN_TUNNEL_STATS_RX_PKTS, + NSS_PVXLAN_TUNNEL_STATS_RX_BYTES, + NSS_PVXLAN_TUNNEL_STATS_TX_PKTS, + NSS_PVXLAN_TUNNEL_STATS_TX_BYTES, + NSS_PVXLAN_TUNNEL_STATS_RX_QUEUE_0_DROPPED, + NSS_PVXLAN_TUNNEL_STATS_RX_QUEUE_1_DROPPED, + NSS_PVXLAN_TUNNEL_STATS_RX_QUEUE_2_DROPPED, + NSS_PVXLAN_TUNNEL_STATS_RX_QUEUE_3_DROPPED, + NSS_PVXLAN_TUNNEL_STATS_MAC_DB_LOOKUP_FAILED, + NSS_PVXLAN_TUNNEL_STATS_UDP_ENCAP_LOOKUP_FAILED, + NSS_PVXLAN_TUNNEL_STATS_DROP_MALFORMED, + NSS_PVXLAN_TUNNEL_STATS_DROP_NEXT_NODE_QUEUE_FULL, + NSS_PVXLAN_TUNNEL_STATS_DROP_HEADROOM_INSUFFICIENT, + NSS_PVXLAN_TUNNEL_STATS_DROP_VERSION_MISMATCH, + NSS_PVXLAN_TUNNEL_STATS_DROP_ZERO_SIZED_PACKET, + NSS_PVXLAN_TUNNEL_STATS_DROP_PBUF_ALLOC_FAILED, + NSS_PVXLAN_TUNNEL_STATS_DROP_LINEAR_FAILED, + NSS_PVXLAN_TUNNEL_STATS_MAX, +}; + +/* + * PVxLAN session debug statistics + */ +struct nss_pvxlan_tunnel_stats_debug { + uint64_t stats[NSS_PVXLAN_TUNNEL_STATS_MAX]; + int32_t if_index; + uint32_t if_num; /* nss interface number */ + bool valid; +}; + +/* + * Data structures to store PVxLAN nss debug stats + */ +extern spinlock_t nss_pvxlan_tunnel_stats_debug_lock; +extern struct nss_pvxlan_tunnel_stats_debug nss_pvxlan_tunnel_debug_stats[NSS_PVXLAN_MAX_INTERFACES]; + +/* + * PVxLAN statistics APIs + */ +extern void nss_pvxlan_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_pvxlan_stats_msg *stats_msg, uint32_t if_num); +extern void nss_pvxlan_stats_dentry_create(void); + +#endif /* __NSS_PVXLAN_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qrfs.c b/feeds/ipq807x/qca-nss-drv/src/nss_qrfs.c new file mode 100644 index 000000000..cfbff597e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qrfs.c @@ -0,0 +1,472 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_qrfs_stats.h" +#include "nss_qrfs_log.h" + +/* + * Notify data structure + */ +struct nss_qrfs_notify_data { + nss_qrfs_msg_callback_t qrfs_callback; + void *app_data; +}; + +static struct nss_qrfs_notify_data nss_qrfs_notify[NSS_CORE_MAX]; + +/* + * nss_qrfs_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_qrfs_verify_if_num(uint32_t if_num) +{ + return if_num == NSS_QRFS_INTERFACE; +} + +/* + * nss_qrfs_msg_handler() + * Handle NSS -> HLOS messages for QRFS + */ +static void nss_qrfs_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app_data) +{ + struct nss_qrfs_msg *nqm = (struct nss_qrfs_msg *)ncm; + nss_qrfs_msg_callback_t cb; + + /* + * Trace messages. + */ + nss_qrfs_log_rx_msg(nqm); + + if (!nss_qrfs_verify_if_num(ncm->interface)) { + nss_warning("%px: invalid interface %d for QRFS\n", nss_ctx, ncm->interface); + return; + } + + /* + * Is this a valid request/response? + */ + if (ncm->type >= NSS_QRFS_MSG_MAX) { + nss_warning("%px: invalid message %d for QRFS\n", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_qrfs_msg)) { + nss_warning("%px: message length is greater than required: %d\n", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + switch (ncm->type) { + case NSS_QRFS_MSG_STATS_SYNC: + /* + * Update QRFS statistics. + */ + nss_qrfs_stats_sync(nss_ctx, &nqm->msg.stats_sync); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_qrfs_notify[nss_ctx->id].qrfs_callback; + ncm->app_data = (nss_ptr_t)nss_qrfs_notify[nss_ctx->id].app_data; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_qrfs_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, nqm); +} + +/* + * nss_qrfs_get_ctx() + */ +static struct nss_ctx_instance *nss_qrfs_get_ctx(int core_id) +{ + return &nss_top_main.nss[core_id]; +} + +/* + * nss_qrfs_get_flow_keys() + * Get 5 tuple information from flow keys and set in flow rule message. + */ +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 18, 21)) +static bool nss_qrfs_get_flow_keys(struct nss_ctx_instance *nss_ctx, struct sk_buff *skb, + struct nss_qrfs_flow_rule_msg *nqfrm) +{ + struct flow_keys keys; + uint16_t protocol = skb->protocol; + bool res; + struct ipv6hdr *ip6hdr; + + res = skb_flow_dissect(skb, &keys); + if (!res) { + nss_warning("%px: failed to get flow keys\n", nss_ctx); + return res; + } + + nqfrm->protocol = keys.ip_proto; + nqfrm->src_port = keys.port16[0]; + nqfrm->dst_port = keys.port16[1]; + + if (protocol == htons(ETH_P_IP)) { + nqfrm->ip_version = 4; + nqfrm->src_addr[0] = keys.src; + nqfrm->dst_addr[0] = keys.dst; + return true; + } + + nqfrm->ip_version = 6; + ip6hdr = (struct ipv6hdr *)skb_network_header(skb); + if (!ip6hdr) { + nss_warning("%px: failed to get IPv6 address\n", nss_ctx); + return false; + } + + memcpy(nqfrm->src_addr, &ip6hdr->saddr, sizeof(struct in6_addr)); + memcpy(nqfrm->dst_addr, &ip6hdr->daddr, sizeof(struct in6_addr)); + + return true; +} +#else +static bool nss_qrfs_get_flow_keys(struct nss_ctx_instance *nss_ctx, struct sk_buff *skb, + struct nss_qrfs_flow_rule_msg *nqfrm) +{ + struct flow_keys keys; + bool res; + + res = skb_flow_dissect_flow_keys(skb, &keys, 0); + if (!res) { + nss_warning("%px: failed to get flow keys\n", nss_ctx); + return res; + } + + nqfrm->protocol = (uint16_t)keys.basic.ip_proto; + nqfrm->src_port = keys.ports.src; + nqfrm->dst_port = keys.ports.dst; + + if (keys.basic.n_proto == htons(ETH_P_IP)) { + nqfrm->ip_version = 4; + nqfrm->src_addr[0] = keys.addrs.v4addrs.src; + nqfrm->dst_addr[0] = keys.addrs.v4addrs.dst; + return true; + } + + nqfrm->ip_version = 6; + memcpy(nqfrm->src_addr, &keys.addrs.v6addrs.src, sizeof(struct in6_addr)); + memcpy(nqfrm->dst_addr, &keys.addrs.v6addrs.dst, sizeof(struct in6_addr)); + + return true; +} +#endif + +/* + * nss_qrfs_flow_add_msg_callback() + * Callback function for receiving flow add response messages. + */ +static void nss_qrfs_flow_add_msg_callback(void *app_data, struct nss_qrfs_msg *nqm) +{ + struct nss_ctx_instance *nss_ctx __maybe_unused = (struct nss_ctx_instance *)app_data; + struct nss_qrfs_flow_rule_msg *nqfrm; + + if (nqm->cm.type != NSS_QRFS_MSG_FLOW_ADD) { + nss_warning("%px: invalid flow response message %d\n", nss_ctx, nqm->cm.type); + return; + } + + nqfrm = &nqm->msg.flow_add; + + if ((nqfrm->ip_version != 4) && (nqfrm->ip_version != 6)) { + nss_warning("%px: invalid IP version %d\n", nss_ctx, nqfrm->ip_version); + return; + } + + if (nqm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: flow add configuration error: %d for NSS core %d\n", + nss_ctx, nqm->cm.error, nss_ctx->id); + } +} + +/* + * nss_qrfs_flow_delete_msg_callback() + * Callback function for receiving flow delete response messages. + */ +static void nss_qrfs_flow_delete_msg_callback(void *app_data, struct nss_qrfs_msg *nqm) +{ + struct nss_ctx_instance *nss_ctx __maybe_unused = (struct nss_ctx_instance *)app_data; + struct nss_qrfs_flow_rule_msg *nqfrm; + + if (nqm->cm.type != NSS_QRFS_MSG_FLOW_DELETE) { + nss_warning("%px: invalid flow response message %d\n", nss_ctx, nqm->cm.type); + return; + } + + nqfrm = &nqm->msg.flow_delete; + + if ((nqfrm->ip_version != 4) && (nqfrm->ip_version != 6)) { + nss_warning("%px: invalid IP version %d\n", nss_ctx, nqfrm->ip_version); + return; + } + + if (nqm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: flow delete configuration error: %d for NSS core %d\n", + nss_ctx, nqm->cm.error, nss_ctx->id); + } +} + +/* + * nss_qrfs_msg_init() + * Initialize the common header of QRFS message + */ +static void nss_qrfs_msg_init(struct nss_qrfs_msg *nqm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&nqm->cm, if_num, type, len, cb, app_data); +} + +/* + * nss_qrfs_tx_msg() + * Transmit a QRFS message to NSS firmware + */ +static nss_tx_status_t nss_qrfs_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_qrfs_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace messages. + */ + nss_qrfs_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (!nss_qrfs_verify_if_num(ncm->interface)) { + nss_warning("%px: interface is not QRFS interface: %d\n", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_QRFS_MSG_MAX) { + nss_warning("%px: message type is out of range: %d\n", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_qrfs_add_flow_rule() + * Set a QRFS flow rule add message and transmit the message to NSS core. + */ +static nss_tx_status_t nss_qrfs_add_flow_rule(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + struct sk_buff *skb, uint32_t cpu, bool need_cb) +{ + struct nss_qrfs_msg nqm; + struct nss_qrfs_flow_rule_msg *nqfrm; + nss_tx_status_t status; + nss_qrfs_msg_callback_t cb = NULL; + void *app_data = NULL; + bool res; + + memset(&nqm, 0, sizeof(struct nss_qrfs_msg)); + + if (need_cb) { + cb = nss_qrfs_flow_add_msg_callback; + app_data = (void *)nss_ctx; + } + + /* + * Initialize common header of QRFS flow rule add message. + */ + nss_qrfs_msg_init(&nqm, NSS_QRFS_INTERFACE, NSS_QRFS_MSG_FLOW_ADD, + sizeof(struct nss_qrfs_flow_rule_msg), cb, app_data); + + /* + * Set flow rule of QRFS flow rule add message + */ + nqfrm = &nqm.msg.flow_add; + res = nss_qrfs_get_flow_keys(nss_ctx, skb, nqfrm); + if (!res) { + return NSS_TX_FAILURE; + } + + nqfrm->cpu = (uint16_t)cpu; + nqfrm->if_num = if_num; + + /* + * Send QRFS flow rule add message to NSS core + */ + status = nss_qrfs_tx_msg(nss_ctx, &nqm); + if (status == NSS_TX_SUCCESS) { + return status; + } + + return NSS_TX_FAILURE; +} + +/* + * nss_qrfs_delete_flow_rule() + * Set a QRFS delete flow rule message and transmit the message to all NSS core. + */ +static nss_tx_status_t nss_qrfs_delete_flow_rule(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + struct sk_buff *skb, uint32_t cpu, bool need_cb) +{ + struct nss_qrfs_msg nqm; + struct nss_qrfs_flow_rule_msg *nqfrm; + nss_tx_status_t status; + nss_qrfs_msg_callback_t cb = NULL; + void *app_data = NULL; + bool res; + + memset(&nqm, 0, sizeof(struct nss_qrfs_msg)); + + if (need_cb) { + cb = nss_qrfs_flow_delete_msg_callback; + app_data = (void *)nss_ctx; + } + + /* + * Initialize common header of QRFS flow rule delete message. + */ + nss_qrfs_msg_init(&nqm, NSS_QRFS_INTERFACE, NSS_QRFS_MSG_FLOW_DELETE, + sizeof(struct nss_qrfs_flow_rule_msg), cb, app_data); + + /* + * Set flow rule of QRFS flow rule delete message + */ + nqfrm = &nqm.msg.flow_delete; + res = nss_qrfs_get_flow_keys(nss_ctx, skb, nqfrm); + if (!res) { + return NSS_TX_FAILURE; + } + + nqfrm->cpu = (uint16_t)cpu; + nqfrm->if_num = if_num; + + /* + * Send QRFS flow rule delete message to NSS core + */ + status = nss_qrfs_tx_msg(nss_ctx, &nqm); + if (status == NSS_TX_SUCCESS) { + return status; + } + + return NSS_TX_FAILURE; +} + +/* + * nss_qrfs_set_flow_rule() + * Set a QRFS flow rule message and transmit the message to all NSS cores. + */ +nss_tx_status_t nss_qrfs_set_flow_rule(struct sk_buff *skb, uint32_t cpu, uint32_t action) +{ + struct nss_ctx_instance *nss_ctx; + nss_tx_status_t status; + int i; + + for (i = 0; i < NSS_CORE_MAX; i++) { + nss_ctx = nss_qrfs_get_ctx(i); + + /* + * Set QRFS flow rule message and transmit the message to NSS core. + * + * TODO: Remove if_num parameter from add_flow_rule() and + * delete_flow_rule(), since it is unused in firmware. + */ + if (action == NSS_QRFS_MSG_FLOW_ADD) { + status = nss_qrfs_add_flow_rule(nss_ctx, 0, skb, cpu, true); + } else { + status = nss_qrfs_delete_flow_rule(nss_ctx, 0, skb, cpu, true); + } + + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: failed to send flow rule to NSS core %d\n", nss_ctx, i); + return NSS_TX_FAILURE; + } + } + + return NSS_TX_SUCCESS; +} +EXPORT_SYMBOL(nss_qrfs_set_flow_rule); + +/* + * nss_qrfs_register_handler() + */ +void nss_qrfs_register_handler(struct nss_ctx_instance *nss_ctx) +{ + nss_core_register_handler(nss_ctx, NSS_QRFS_INTERFACE, nss_qrfs_msg_handler, NULL); + + if (nss_ctx->id == NSS_CORE_0) { + nss_qrfs_stats_dentry_create(); + } +} +EXPORT_SYMBOL(nss_qrfs_register_handler); + +/* + * nss_qrfs_notify_register() + * Register to receive QRFS notify messages. + */ +struct nss_ctx_instance *nss_qrfs_notify_register(int core, nss_qrfs_msg_callback_t cb, void *app_data) +{ + if (core >= NSS_CORE_MAX) { + nss_warning("Input core number %d is wrong\n", core); + return NULL; + } + + nss_qrfs_notify[core].qrfs_callback = cb; + nss_qrfs_notify[core].app_data = app_data; + + return (struct nss_ctx_instance *)&nss_top_main.nss[core]; +} + +/* + * nss_qrfs_notify_unregister() + * Unregister to receive QRFS notify messages. + */ +void nss_qrfs_notify_unregister(int core) +{ + if (core >= NSS_CORE_MAX) { + nss_warning("Input core number %d is wrong\n", core); + return; + } + + nss_qrfs_notify[core].qrfs_callback = NULL; + nss_qrfs_notify[core].app_data = NULL; +} + +/* + * nss_qrfs_init() + */ +void nss_qrfs_init(void) +{ + int core; + + for (core = 0; core < NSS_CORE_MAX; core++) { + nss_qrfs_notify_register(core, NULL, NULL); + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_log.c new file mode 100644 index 000000000..d481e2866 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_log.c @@ -0,0 +1,174 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_qrfs_log.c + * NSS QRFS logger file. + */ + +#include "nss_core.h" +#define NSS_QRFS_LOG_IPV4 4 +#define NSS_QRFS_LOG_IPV6 6 + +/* + * nss_qrfs_log_message_types_str + * QRFS message strings + */ +static int8_t *nss_qrfs_log_message_types_str[NSS_QRFS_MSG_MAX] __maybe_unused = { + "QRFS Flow Add Message", + "QRFS Flow Delete Message", + "QRFS MAC Add Message", + "QRFS MAC Delete Message", + "QRFS Stats Sync", +}; + +/* + * nss_qrfs_log_error_response_types_str + * Strings for error types for QRFS messages + */ +static int8_t *nss_qrfs_log_error_response_types_str[NSS_QRFS_ERROR_MAX] __maybe_unused = { + "QRFS Invalid Message Type", + "QRFS Invalid Message Size", + "QRFS Invalid IP Version", + "QRFS V4 Flow Table Full", + "QRFS V6 Flow Table Full", + "QRFS MAC Table Full", +}; + +/* + * nss_qrfs_log_flow_rule_msg() + * Log NSS QRFS Flow Rule Message. + */ +static void nss_qrfs_log_flow_rule_msg(struct nss_qrfs_flow_rule_msg *nqfm) +{ + nss_trace("%px: NSS QRFS Flow Rule Message:\n" + "QRFS Source Port: %d\n" + "QRFS Destination Port: %d\n" + "QRFS IP Version: %d\n" + "QRFS Protcol: %d\n" + "QRFS CPU ID: %d\n" + "QRFS Physical Interface Number: %d\n", + nqfm, nqfm->src_port, + nqfm->dst_port, nqfm->ip_version, + nqfm->protocol, nqfm->cpu, + nqfm->if_num); + + /* + * Continuation of log. Different identifiers based on ip_version + */ + if (nqfm->ip_version == NSS_QRFS_LOG_IPV6) { + nss_trace("QRFS Source Address: %pI6\n" + "QRFS Destination Address: %pI6\n", + nqfm->src_addr, nqfm->dst_addr); + } else if (nqfm->ip_version == NSS_QRFS_LOG_IPV4) { + nss_trace("QRFS Source Address: %pI4\n" + "QRFS Destination Address: %pI4\n", + nqfm->src_addr, nqfm->dst_addr); + } +} + +/* + * nss_qrfs_log_mac_rule_msg() + * Log NSS QRFS MAC Rule Message. + */ +static void nss_qrfs_log_mac_rule_msg(struct nss_qrfs_mac_rule_msg *nqmm) +{ + nss_trace("%px: NSS QRFS MAC Rule Message:\n" + "QRFS MAC: %pM\n" + "QRFS CPU ID: %d\n" + "QRFS Physical Interface Number: %d\n", + nqmm, nqmm->mac, + nqmm->cpu, nqmm->if_num); +} + +/* + * nss_qrfs_log_verbose() + * Log message contents. + */ +static void nss_qrfs_log_verbose(struct nss_qrfs_msg *nqm) +{ + switch (nqm->cm.type) { + case NSS_QRFS_MSG_FLOW_ADD: + case NSS_QRFS_MSG_FLOW_DELETE: + nss_qrfs_log_flow_rule_msg(&nqm->msg.flow_add); + break; + + case NSS_QRFS_MSG_MAC_ADD: + case NSS_QRFS_MSG_MAC_DELETE: + nss_qrfs_log_mac_rule_msg(&nqm->msg.mac_add); + break; + + case NSS_QRFS_MSG_STATS_SYNC: + /* + * No log for valid stats message. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", nqm); + break; + } +} + +/* + * nss_qrfs_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_qrfs_log_tx_msg(struct nss_qrfs_msg *nqm) +{ + if (nqm->cm.type >= NSS_QRFS_MSG_MAX) { + nss_warning("%px: Invalid message type\n", nqm); + return; + } + + nss_info("%px: type[%d]:%s\n", nqm, nqm->cm.type, nss_qrfs_log_message_types_str[nqm->cm.type]); + nss_qrfs_log_verbose(nqm); +} + +/* + * nss_qrfs_log_rx_msg() + * Log messages received from FW. + */ +void nss_qrfs_log_rx_msg(struct nss_qrfs_msg *nqm) +{ + if (nqm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nqm); + return; + } + + if (nqm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nqm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nqm, nqm->cm.type, + nss_qrfs_log_message_types_str[nqm->cm.type], + nqm->cm.response, nss_cmn_response_str[nqm->cm.response]); + goto verbose; + } + + if (nqm->cm.error >= NSS_QRFS_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nqm, nqm->cm.type, nss_qrfs_log_message_types_str[nqm->cm.type], + nqm->cm.response, nss_cmn_response_str[nqm->cm.response], + nqm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nqm, nqm->cm.type, nss_qrfs_log_message_types_str[nqm->cm.type], + nqm->cm.response, nss_cmn_response_str[nqm->cm.response], + nqm->cm.error, nss_qrfs_log_error_response_types_str[nqm->cm.error]); + +verbose: + nss_qrfs_log_verbose(nqm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_log.h new file mode 100644 index 000000000..de9832e66 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_QRFS_LOG_H__ +#define __NSS_QRFS_LOG_H__ + +/* + * nss_qrfs_log.h + * NSS QRFS Log Header File + */ + +/* + * nss_qrfs_log_tx_msg + * Logs a qrfs message that is sent to the NSS firmware. + */ +void nss_qrfs_log_tx_msg(struct nss_qrfs_msg *nqm); + +/* + * nss_qrfs_log_rx_msg + * Logs a qrfs message that is received from the NSS firmware. + */ +void nss_qrfs_log_rx_msg(struct nss_qrfs_msg *nqm); + +#endif /* __NSS_QRFS_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_stats.c new file mode 100644 index 000000000..694908307 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_stats.c @@ -0,0 +1,148 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_qrfs_stats.h" + +/* + * Spinlock to protect QRFS statistics update/read + */ +DEFINE_SPINLOCK(nss_qrfs_stats_lock); + +/* + * nss_qrfs_stats_str + * QRFS stats strings + */ +struct nss_stats_info nss_qrfs_stats_str[NSS_QRFS_STATS_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"rx_byts" , NSS_STATS_TYPE_COMMON}, + {"tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"tx_byts" , NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops" , NSS_STATS_TYPE_DROP}, + {"invalid_offset" , NSS_STATS_TYPE_EXCEPTION}, + {"unknown_protocol" , NSS_STATS_TYPE_EXCEPTION}, + {"ipv4_flow_rule_hits" , NSS_STATS_TYPE_SPECIAL}, + {"ipv6_flow_rule_hits" , NSS_STATS_TYPE_SPECIAL} +}; + +uint64_t nss_qrfs_stats[NSS_MAX_CORES][NSS_QRFS_STATS_MAX]; + +/* + * nss_qrfs_stats_read() + * Read QRFS statistics. + */ +static ssize_t nss_qrfs_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i, core; + + /* + * Max output lines = #stats + few blank lines for banner printing + + * Number of Extra outputlines for future reference to add new stats. + */ + uint32_t max_output_lines = (NSS_QRFS_STATS_MAX + 3) * NSS_MAX_CORES + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_QRFS_STATS_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "qrfs", NSS_STATS_SINGLE_CORE); + /* + * QRFS statistics + */ + for (core = 0; core < nss_top_main.num_nss; core++) { + spin_lock_bh(&nss_qrfs_stats_lock); + for (i = 0; i < NSS_QRFS_STATS_MAX; i++) { + stats_shadow[i] = nss_qrfs_stats[core][i]; + } + spin_unlock_bh(&nss_qrfs_stats_lock); + + size_wr += nss_stats_print("qrfs", NULL, NSS_STATS_SINGLE_INSTANCE + , nss_qrfs_stats_str + , stats_shadow + , NSS_QRFS_STATS_MAX + , lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_qrfs_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(qrfs) + +/* + * nss_qrfs_stats_dentry_create() + * Create QRFS statistics debug entry. + */ +void nss_qrfs_stats_dentry_create(void) +{ + nss_stats_create_dentry("qrfs", &nss_qrfs_stats_ops); +} + +/* + * nss_qrfs_stats_sync() + * Handle the syncing of NSS QRFS statistics. + */ +void nss_qrfs_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_qrfs_stats_sync_msg *nqssm) +{ + int id = nss_ctx->id; + int j; + + spin_lock_bh(&nss_qrfs_stats_lock); + + /* + * Common node stats + */ + nss_qrfs_stats[id][NSS_STATS_NODE_RX_PKTS] += nqssm->node_stats.rx_packets; + nss_qrfs_stats[id][NSS_STATS_NODE_RX_BYTES] += nqssm->node_stats.rx_bytes; + nss_qrfs_stats[id][NSS_STATS_NODE_TX_PKTS] += nqssm->node_stats.tx_packets; + nss_qrfs_stats[id][NSS_STATS_NODE_TX_BYTES] += nqssm->node_stats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_qrfs_stats[id][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + j] += nqssm->node_stats.rx_dropped[j]; + } + + /* + * QRFS statistics + */ + nss_qrfs_stats[id][NSS_QRFS_STATS_INVALID_OFFSET] += nqssm->invalid_offset; + nss_qrfs_stats[id][NSS_QRFS_STATS_UNKNOWN_PROTO] += nqssm->unknown_protocol; + nss_qrfs_stats[id][NSS_QRFS_STATS_IPV4_FLOW_HITS] += nqssm->ipv4_flow_rule_hits; + nss_qrfs_stats[id][NSS_QRFS_STATS_IPV6_FLOW_HITS] += nqssm->ipv6_flow_rule_hits; + + spin_unlock_bh(&nss_qrfs_stats_lock); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_stats.h new file mode 100644 index 000000000..7992270da --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qrfs_stats.h @@ -0,0 +1,38 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_QRFS_STATS_H +#define __NSS_QRFS_STATS_H + +/* + * QRFS node statistics + */ +enum nss_qrfs_stats_types { + NSS_QRFS_STATS_INVALID_OFFSET = NSS_STATS_NODE_MAX, + /* Number of packets with invalid L3, L4 offset */ + NSS_QRFS_STATS_UNKNOWN_PROTO, /* Number of packets with protocol other than TCP, UDP */ + NSS_QRFS_STATS_IPV4_FLOW_HITS, /* Number of IPv4 flow rule hits */ + NSS_QRFS_STATS_IPV6_FLOW_HITS, /* Number of IPv6 flow rule hits */ + NSS_QRFS_STATS_MAX, +}; + +/* + * QRFS statistics APIs + */ +extern void nss_qrfs_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_qrfs_stats_sync_msg *nqssm); +extern void nss_qrfs_stats_dentry_create(void); + +#endif /* __NSS_QRFS_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qvpn.c b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn.c new file mode 100644 index 000000000..b6068d947 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn.c @@ -0,0 +1,344 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_qvpn_stats.h" +#include "nss_qvpn_strings.h" +#include "nss_qvpn_log.h" + +#define NSS_QVPN_TX_TIMEOUT 1000 /* 1 Second */ +#define NSS_QVPN_INTERFACE_MAX_LONG BITS_TO_LONGS(NSS_MAX_NET_INTERFACES) /**< QVPN interface mapping bits. */ + +/* + * Private data structure + */ +static struct nss_qvpn_pvt { + struct semaphore sem; + struct completion complete; + unsigned long if_map[NSS_QVPN_INTERFACE_MAX_LONG]; + enum nss_qvpn_error_type resp; +} qvpn_pvt; + +/* + * nss_qvpn_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_qvpn_verify_if_num(uint32_t if_num) +{ + enum nss_dynamic_interface_type if_type; + + if_type = nss_dynamic_interface_get_type(nss_qvpn_get_context(), if_num); + if ((if_type != NSS_DYNAMIC_INTERFACE_TYPE_QVPN_INNER) && + (if_type != NSS_DYNAMIC_INTERFACE_TYPE_QVPN_OUTER)) { + nss_warning("%px: if_num = %u interface type returned is %d\n", nss_qvpn_get_context(), if_num, if_type); + return false; + } + + return true; +} + +/* + * nss_qvpn_handler() + * Handle NSS to HLOS messages for QVPN + */ +static void nss_qvpn_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app_data) +{ + nss_qvpn_msg_callback_t cb; + + nss_assert(nss_qvpn_verify_if_num(ncm->interface)); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_QVPN_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for qvpn interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_qvpn_msg)) { + nss_warning("%px: length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log failures + */ + + nss_core_log_msg_failures(nss_ctx, ncm); + /* + * Trace messages. + */ + nss_qvpn_log_rx_msg((struct nss_qvpn_msg *)ncm); + + if (ncm->type == NSS_QVPN_MSG_TYPE_SYNC_STATS) { + nss_qvpn_stats_tunnel_sync(nss_ctx, ncm); + nss_qvpn_stats_notify(nss_ctx, ncm->interface); + } + + /* + * Update the callback and app_data for NOTIFY messages, qvpn sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->nss_rx_interface_handlers[ncm->interface].app_data; + } + + /* + * load, test & call + */ + cb = (nss_qvpn_msg_callback_t)ncm->cb; + if (unlikely(!cb)) { + nss_trace("%px: rx handler unregistered for i/f: %u\n", nss_ctx, ncm->interface); + return; + } + + cb((void *)ncm->app_data, ncm); +} + +/* + * nss_qvpn_callback() + * Callback to handle the completion of NSS to HLOS messages. + */ +static void nss_qvpn_callback(void *app_data, struct nss_qvpn_msg *nvm) +{ + enum nss_qvpn_error_type *resp = (enum nss_qvpn_error_type *)app_data; + + *resp = (nvm->cm.response == NSS_CMN_RESPONSE_ACK) ? NSS_QVPN_ERROR_TYPE_NONE : nvm->cm.error; + + /* + * Write memory barrier + */ + smp_wmb(); + + complete(&qvpn_pvt.complete); +} + +/* + * nss_qvpn_ifmap_get() + * Return QVPN active interfaces map. + */ +unsigned long *nss_qvpn_ifmap_get(void) +{ + return qvpn_pvt.if_map; +} + +/* + * nss_qvpn_get_context() + * Return NSS QVPN context. + */ +struct nss_ctx_instance *nss_qvpn_get_context(void) +{ + return &nss_top_main.nss[nss_top_main.qvpn_handler_id]; +} +EXPORT_SYMBOL(nss_qvpn_get_context); + +/* + * nss_qvpn_tx_msg() + * Transmit a QVPN message to NSS firmware + */ +nss_tx_status_t nss_qvpn_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_qvpn_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Sanity check the message + */ + if (!nss_qvpn_verify_if_num(ncm->interface)) { + nss_warning("%px: tx request for interface that is not a qvpn: %u\n", nss_ctx, ncm->interface); + return NSS_TX_FAILURE_BAD_PARAM; + } + + if (ncm->type >= NSS_QVPN_MSG_TYPE_MAX) { + nss_warning("%px: message type out of range: %d\n", nss_ctx, ncm->type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * Trace messages. + */ + nss_qvpn_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_qvpn_tx_msg); + +/* + * nss_qvpn_tx_msg_sync() + * Transmit a QVPN message to NSS firmware synchronously. + */ +nss_tx_status_t nss_qvpn_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_qvpn_msg *nvm, + uint32_t if_num, enum nss_qvpn_msg_type type, uint16_t len, enum nss_qvpn_error_type *resp) +{ + struct nss_qvpn_msg nqm; + nss_tx_status_t status; + int ret = 0; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (len > sizeof(nqm.msg)) { + nss_warning("%px: Incorrect message length=%u for type %d and if_num=%u\n", nss_ctx, len, type, if_num); + return NSS_TX_FAILURE_TOO_LARGE; + } + + if (!resp) { + nss_warning("%px: Invalid input, resp=NULL\n", nss_ctx); + return NSS_TX_FAILURE_BAD_PARAM; + } + + nss_qvpn_msg_init(&nqm, if_num, type, len, nss_qvpn_callback, &qvpn_pvt.resp); + memcpy(&nqm.msg, &nvm->msg, len); + + down(&qvpn_pvt.sem); + + status = nss_qvpn_tx_msg(nss_ctx, &nqm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: qvpn_tx_msg failed\n", nss_ctx); + goto done; + } + + ret = wait_for_completion_timeout(&qvpn_pvt.complete, msecs_to_jiffies(NSS_QVPN_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: qvpn msg tx failed due to timeout\n", nss_ctx); + status = NSS_TX_FAILURE_SYNC_TIMEOUT; + goto done; + } + + /* + * Read memory barrier + */ + smp_rmb(); + + *resp = qvpn_pvt.resp; + if (*resp != NSS_QVPN_ERROR_TYPE_NONE) + status = NSS_TX_FAILURE; +done: + up(&qvpn_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_qvpn_tx_msg_sync); + +/* + * nss_qvpn_tx_buf() + * Send packet to QVPN interface owned by NSS + */ +nss_tx_status_t nss_qvpn_tx_buf(struct nss_ctx_instance *nss_ctx, uint32_t if_num, struct sk_buff *skb) +{ + if (!nss_qvpn_verify_if_num(if_num)) { + nss_warning("%px: tx request for interface that is not a qvpn: %u\n", nss_ctx, if_num); + return NSS_TX_FAILURE_BAD_PARAM; + } + + return nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_qvpn_tx_buf); + +/* + * nss_qvpn_msg_init() + * Initialize nss_qvpn_msg. + */ +void nss_qvpn_msg_init(struct nss_qvpn_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_qvpn_msg_init); + +/* + * nss_qvpn_register_if() + * Register QVPN interface. + */ +struct nss_ctx_instance *nss_qvpn_register_if(uint32_t if_num, nss_qvpn_callback_t qvpn_data_callback, + nss_qvpn_msg_callback_t qvpn_event_callback, + struct net_device *netdev, uint32_t features, void *app_ctx) +{ + struct nss_ctx_instance *nss_ctx = nss_qvpn_get_context(); + uint32_t status; + + nss_assert(nss_ctx); + nss_assert(nss_qvpn_verify_if_num(if_num)); + + nss_core_register_subsys_dp(nss_ctx, if_num, qvpn_data_callback, NULL, app_ctx, netdev, features); + nss_core_register_handler(nss_ctx, if_num, nss_qvpn_handler, app_ctx); + status = nss_core_register_msg_handler(nss_ctx, if_num, qvpn_event_callback); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to register handler for interface %d with NSS core\n", nss_ctx, if_num); + return NULL; + } + + set_bit(if_num, qvpn_pvt.if_map); + return nss_ctx; +} +EXPORT_SYMBOL(nss_qvpn_register_if); + +/* + * nss_unregister_qvpn_if() + * Unregister QVPN interface. + */ +void nss_qvpn_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_qvpn_get_context(); + uint32_t status; + + nss_assert(nss_qvpn_verify_if_num(if_num)); + + clear_bit(if_num, qvpn_pvt.if_map); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Failed to unregister handler for IPsec NSS I/F:%u\n", nss_ctx, if_num); + return; + } + + status = nss_core_unregister_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Failed to unregister handler for IPsec NSS I/F:%u\n", nss_ctx, if_num); + return; + } +} +EXPORT_SYMBOL(nss_qvpn_unregister_if); + +/* + * nss_qvpn_ifnum_with_core_id() + * Append core id to QVPN interface number + */ +int nss_qvpn_ifnum_with_core_id(int if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_qvpn_get_context(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (nss_qvpn_verify_if_num(if_num) == false) { + nss_info("%px: if_num: %u is not QVPN interface\n", nss_ctx, if_num); + return 0; + } + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_qvpn_ifnum_with_core_id); + +/* + * nss_qvpn_register_handler() + * Intialize QVPN driver and register handler. + */ +void nss_qvpn_register_handler(void) +{ + nss_info("nss_qvpn_register_handler\n"); + sema_init(&qvpn_pvt.sem, 1); + init_completion(&qvpn_pvt.complete); + nss_qvpn_stats_dentry_create(); + nss_qvpn_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_log.c new file mode 100644 index 000000000..d71c79ee1 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_log.c @@ -0,0 +1,262 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_qvpn_log.c + * NSS qvpn logger file. + */ + +#include "nss_core.h" + +/* + * nss_qvpn_log_message_types_str + * qvpn message strings + */ +static int8_t *nss_qvpn_log_message_types_str[NSS_QVPN_MSG_TYPE_MAX] __maybe_unused = { + "QVPN tunnel config", + "QVPN tunnel deconfig", + "QVPN crypto key add", + "QVPN crypto key delete", + "QVPN crypto crypto key activate", + "QVPN crypto key Deactivate", + "QVPN statistics synchronization" +}; + +/* + * nss_qvpn_log_error_response_types_str + * Strings for error types for qvpn messages + */ +static int8_t *nss_qvpn_log_error_response_types_str[NSS_QVPN_ERROR_TYPE_MAX] __maybe_unused = { + "QVPN No error", + "QVPN Unknown message", + "QVPN Tunnel already configured", + "QVPN Invalid interface", + "QVPN Invalid sibling interface number", + "QVPN Invalid IV size", + "QVPN Invalid HMAC size", + "QVPN Invalid crypto block size", + "QVPN Invalid session idx size", + "QVPN Supported processing command count invalid", + "QVPN L4 protocol encapsulation is not supported", + "QVPN Invalid sibling interface type", + "QVPN Total number of commands is invalid", + "QVPN Entry not found", + "QVPN Entry not active", + "QVPN Entry already active", + "QVPN Invalid crypto index", + "QVPN Key info allocation failure", + "QVPN Invalid command profile", + "QVPN VPN with tail not supported" +}; + +/* + * nss_qvpn_tun_config_msg() + * Log NSS QVPN configuration message. + */ +static void nss_qvpn_log_tun_config_msg(struct nss_qvpn_msg *ncm) +{ + struct nss_qvpn_tunnel_config_msg *nqtcm __maybe_unused = &ncm->msg.tunnel_config; + nss_trace("%px: NSS QVPN tunnel config message \n" + "Sibling interface: %d" + "Total number of commands: %d" + "Commands: %px" + "Source IP: %x:%x:%x:%x\n" + "Source Port: %d\n" + "Destination IP: %x:%x:%x:%x\n" + "Destination Port: %d\n" + "Header Flags: %x\n" + "Sequence number size: %d\n" + "Sequence number offset: %d\n" + "Anti-replay algorithm: %d\n" + "Session ID size: %d\n" + "Session ID offset: %x\n" + "VPN header head size: %d\n" + "VPN header head offset: %d\n" + "VPN header tail size: %d\n" + "VPN header head: %px\n" + "VPN header tail: %px\n", + nqtcm, + nqtcm->sibling_if, + nqtcm->total_cmds, + nqtcm->cmd, + nqtcm->hdr_cfg.src_ip[0], nqtcm->hdr_cfg.src_ip[1], nqtcm->hdr_cfg.src_ip[2], nqtcm->hdr_cfg.src_ip[3], + nqtcm->hdr_cfg.src_port, + nqtcm->hdr_cfg.dst_ip[0], nqtcm->hdr_cfg.dst_ip[1], nqtcm->hdr_cfg.dst_ip[2], nqtcm->hdr_cfg.dst_ip[3], + nqtcm->hdr_cfg.dst_port, + nqtcm->hdr_cfg.hdr_flags, + nqtcm->hdr_cfg.seqnum_size, + nqtcm->hdr_cfg.seqnum_offset, + nqtcm->hdr_cfg.anti_replay_alg, + nqtcm->hdr_cfg.session_id_size, + nqtcm->hdr_cfg.session_id_offset, + nqtcm->hdr_cfg.vpn_hdr_head_size, + nqtcm->hdr_cfg.vpn_hdr_head_offset, + nqtcm->hdr_cfg.vpn_hdr_tail_size, + nqtcm->hdr_cfg.vpn_hdr_head, + nqtcm->hdr_cfg.vpn_hdr_tail); +} + +/* + * nss_qvpn_log_tun_deconfig_msg() + * Log NSS qvpn tunnel deconfigure message. + */ +static void nss_qvpn_log_tun_deconfig_msg(struct nss_qvpn_msg *ncm) +{ + nss_trace("%px: NSS QVPN deconfigure message \n", ncm); +} + +/* + * nss_qvpn_log_crypto_key_add_msg() + * Log NSS QVPN crypto key add message. + */ +static void nss_qvpn_log_crypto_key_add_msg(struct nss_qvpn_msg *ncm) +{ + struct nss_qvpn_crypto_key_add_msg *nqckam __maybe_unused = &ncm->msg.key_add; + nss_trace("%px: NSS QVPN crypto key add message \n" + "Crypto index: %d\n" + "Crypto session ID: %px", + nqckam, + nqckam->crypto_idx, + nqckam->session_id); +} + +/* + * nss_qvpn_log_crypto_key_activate_msg() + * Log NSS QVPN crypto key activate message. + */ +static void nss_qvpn_log_crypto_key_activate_msg(struct nss_qvpn_msg *ncm) +{ + struct nss_qvpn_crypto_key_activate_msg *nqckam __maybe_unused = &ncm->msg.key_activate; + nss_trace("%px: NSS QVPN crypto key activate message \n" + "Crypto index: %d\n" + "Crypto VPN header head: %px", + nqckam, + nqckam->crypto_idx, + nqckam->vpn_hdr_head); +} + +/* + * nss_qvpn_log_crypto_key_del_msg() + * Log NSS QVPN crypto key delete message. + */ +static void nss_qvpn_log_crypto_key_del_msg(struct nss_qvpn_msg *ncm) +{ + struct nss_qvpn_crypto_key_del_msg *nqckdm __maybe_unused = &ncm->msg.key_del; + nss_trace("%px: NSS QVPN crypto key delete message \n" + "Crypto index: %d\n", + nqckdm, + nqckdm->crypto_idx); +} + +/* + * nss_qvpn_log_crypto_key_deactivate_msg() + * Log NSS QVPN crypto key deactivate message. + */ +static void nss_qvpn_log_crypto_key_deactivate_msg(struct nss_qvpn_msg *ncm) +{ + struct nss_qvpn_crypto_key_del_msg *nqckdm __maybe_unused = &ncm->msg.key_del; + nss_trace("%px: NSS QVPN crypto key deactivate message \n" + "Crypto index: %d\n", + nqckdm, + nqckdm->crypto_idx); +} + +/* + * nss_qvpn_log_verbose() + * Log message contents. + */ +static void nss_qvpn_log_verbose(struct nss_qvpn_msg *ncm) +{ + switch (ncm->cm.type) { + case NSS_QVPN_MSG_TYPE_TUNNEL_CONFIGURE: + nss_qvpn_log_tun_config_msg(ncm); + break; + + case NSS_QVPN_MSG_TYPE_TUNNEL_DECONFIGURE: + nss_qvpn_log_tun_deconfig_msg(ncm); + break; + + case NSS_QVPN_MSG_TYPE_CRYPTO_KEY_ADD: + nss_qvpn_log_crypto_key_add_msg(ncm); + break; + + case NSS_QVPN_MSG_TYPE_CRYPTO_KEY_ACTIVATE: + nss_qvpn_log_crypto_key_activate_msg(ncm); + break; + + case NSS_QVPN_MSG_TYPE_CRYPTO_KEY_DEL: + nss_qvpn_log_crypto_key_del_msg(ncm); + break; + + case NSS_QVPN_MSG_TYPE_CRYPTO_KEY_DEACTIVATE: + nss_qvpn_log_crypto_key_deactivate_msg(ncm); + break; + + default: + nss_trace("%px: Invalid message type\n", ncm); + break; + } +} + +/* + * nss_qvpn_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_qvpn_log_tx_msg(struct nss_qvpn_msg *ncm) +{ + if (ncm->cm.type >= NSS_QVPN_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", ncm); + return; + } + + nss_info("%px: type[%d]:%s\n", ncm, ncm->cm.type, nss_qvpn_log_message_types_str[ncm->cm.type]); + nss_qvpn_log_verbose(ncm); +} + +/* + * nss_qvpn_log_rx_msg() + * Log messages received from FW. + */ +void nss_qvpn_log_rx_msg(struct nss_qvpn_msg *ncm) +{ + if (ncm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ncm); + return; + } + + if (ncm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ncm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ncm, ncm->cm.type, + nss_qvpn_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response]); + goto verbose; + } + + if (ncm->cm.error >= NSS_QVPN_ERROR_TYPE_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ncm, ncm->cm.type, nss_qvpn_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ncm, ncm->cm.type, nss_qvpn_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error, nss_qvpn_log_error_response_types_str[ncm->cm.error]); + +verbose: + nss_qvpn_log_verbose(ncm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_log.h new file mode 100644 index 000000000..e1dee898b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_QVPN_LOG_H__ +#define __NSS_QVPN_LOG_H__ + +/* + * nss_qvpn_log.h + * NSS QVPN Log Header File. + */ + +/* + * nss_qvpn_log_tx_msg + * Logs QVPN message that is sent to the NSS firmware. + */ +void nss_qvpn_log_tx_msg(struct nss_qvpn_msg *ncm); + +/* + * nss_qvpn_log_rx_msg + * Logs QVPN message that is received from the NSS firmware. + */ +void nss_qvpn_log_rx_msg(struct nss_qvpn_msg *ncm); + +#endif /* __NSS_QVPN_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_stats.c new file mode 100644 index 000000000..4a9bdc116 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_stats.c @@ -0,0 +1,203 @@ +/* + ************************************************************************** + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include +#include "nss_qvpn_stats.h" +#include "nss_qvpn_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_qvpn_stats_notifier); + +/* + * Spinlock to protect qvpn statistics update/read + */ +DEFINE_SPINLOCK(nss_qvpn_stats_lock); + +uint64_t nss_qvpn_stats[NSS_MAX_NET_INTERFACES][NSS_STATS_NODE_MAX]; /* to store the qvpn statistics */ + +/* + * nss_qvpn_stats_iface_type() + * Return a string for each interface type. + */ +static const char *nss_qvpn_stats_iface_type(enum nss_dynamic_interface_type type) +{ + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_QVPN_INNER: + return "qvpn_inner"; + + case NSS_DYNAMIC_INTERFACE_TYPE_QVPN_OUTER: + return "qvpn_outer"; + + default: + return "invalid_interface"; + } +} + +/* + * nss_qvpn_stats_read() + * Read qvpn node statiistics. + */ +static ssize_t nss_qvpn_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats + + * few blank lines for banner printing + Number of Extra outputlines + * for future reference to add new stats + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + struct nss_ctx_instance *nss_ctx = nss_qvpn_get_context(); + enum nss_dynamic_interface_type type; + unsigned long *ifmap; + uint64_t *stats_shadow; + ssize_t bytes_read = 0; + size_t size_wr = 0; + uint32_t if_num; + int32_t i; + int count; + char *lbuf; + + ifmap = nss_qvpn_ifmap_get(); + count = bitmap_weight(ifmap, NSS_MAX_NET_INTERFACES); + if (count) { + size_al = size_al * count; + } + + lbuf = vzalloc(size_al); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return -ENOMEM; + } + + stats_shadow = vzalloc(NSS_STATS_NODE_MAX * 8); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + vfree(lbuf); + return -ENOMEM; + } + + /* + * Common node stats for each QVPN dynamic interface. + */ + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "qvpn stats", NSS_STATS_SINGLE_CORE); + for_each_set_bit(if_num, ifmap, NSS_MAX_NET_INTERFACES) { + + type = nss_dynamic_interface_get_type(nss_ctx, if_num); + if ((type != NSS_DYNAMIC_INTERFACE_TYPE_QVPN_INNER) && + (type != NSS_DYNAMIC_INTERFACE_TYPE_QVPN_OUTER)) { + continue; + } + + spin_lock_bh(&nss_qvpn_stats_lock); + for (i = 0; i < NSS_STATS_NODE_MAX; i++) { + stats_shadow[i] = nss_qvpn_stats[if_num][i]; + } + spin_unlock_bh(&nss_qvpn_stats_lock); + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n%s if_num:%03u\n", + nss_qvpn_stats_iface_type(type), if_num); + size_wr += nss_stats_print("qvpn", NULL, NSS_STATS_SINGLE_INSTANCE, nss_qvpn_strings_stats, + stats_shadow, NSS_STATS_NODE_MAX, lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + vfree(lbuf); + vfree(stats_shadow); + return bytes_read; +} + +/* + * nss_qvpn_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(qvpn) + +/* + * nss_qvpn_stats_tunnel_sync + * Update qvpn interface statistics. + */ +void nss_qvpn_stats_tunnel_sync(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm) +{ + struct nss_qvpn_msg *ndcm = (struct nss_qvpn_msg *)ncm; + struct nss_qvpn_stats_sync_msg *msg_stats = &ndcm->msg.stats; + + spin_lock_bh(&nss_qvpn_stats_lock); + + /* + * Update common node stats + */ + nss_qvpn_stats[ncm->interface][NSS_STATS_NODE_RX_PKTS] += msg_stats->node_stats.rx_packets; + nss_qvpn_stats[ncm->interface][NSS_STATS_NODE_RX_BYTES] += msg_stats->node_stats.rx_bytes; + nss_qvpn_stats[ncm->interface][NSS_STATS_NODE_RX_QUEUE_0_DROPPED] += msg_stats->node_stats.rx_dropped[0]; + nss_qvpn_stats[ncm->interface][NSS_STATS_NODE_RX_QUEUE_1_DROPPED] += msg_stats->node_stats.rx_dropped[1]; + nss_qvpn_stats[ncm->interface][NSS_STATS_NODE_RX_QUEUE_2_DROPPED] += msg_stats->node_stats.rx_dropped[2]; + nss_qvpn_stats[ncm->interface][NSS_STATS_NODE_RX_QUEUE_3_DROPPED] += msg_stats->node_stats.rx_dropped[3]; + + nss_qvpn_stats[ncm->interface][NSS_STATS_NODE_TX_PKTS] += msg_stats->node_stats.tx_packets; + nss_qvpn_stats[ncm->interface][NSS_STATS_NODE_TX_BYTES] += msg_stats->node_stats.tx_bytes; + + spin_unlock_bh(&nss_qvpn_stats_lock); +} + +/* + * nss_qvpn_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_qvpn_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_qvpn_stats_notification qvpn_stats; + + spin_lock_bh(&nss_qvpn_stats_lock); + qvpn_stats.core_id = nss_ctx->id; + qvpn_stats.if_num = if_num; + memcpy(qvpn_stats.stats_ctx, nss_qvpn_stats[if_num], sizeof(qvpn_stats.stats_ctx)); + spin_unlock_bh(&nss_qvpn_stats_lock); + + atomic_notifier_call_chain(&nss_qvpn_stats_notifier, NSS_STATS_EVENT_NOTIFY, &qvpn_stats); +} + +/* + * nss_qvpn_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_qvpn_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_qvpn_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_qvpn_stats_unregister_notifier); + +/* + * nss_qvpn_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_qvpn_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_qvpn_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_qvpn_stats_register_notifier); + +/* + * nss_qvpn_stats_dentry_create() + * Create QVPN statistics debug entry. + */ +void nss_qvpn_stats_dentry_create(void) +{ + nss_stats_create_dentry("qvpn", &nss_qvpn_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_stats.h new file mode 100644 index 000000000..74bbe11de --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_stats.h @@ -0,0 +1,24 @@ +/* + ************************************************************************** + * Copyright (c) 2019, 2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef _NSS_QVPN_STATS_H_ +#define _NSS_QVPN_STATS_H_ + +extern void nss_qvpn_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_qvpn_stats_tunnel_sync(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm); +extern void nss_qvpn_stats_dentry_create(void); + +#endif /* _NSS_QVPN_STATS_H_ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_strings.c new file mode 100644 index 000000000..2af34aad4 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_strings.c @@ -0,0 +1,60 @@ +/* + ***************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ***************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_qvpn_strings.h" + +/* + * nss_qvpn_strings_stats + * qvpn statistics strings. + */ +struct nss_stats_info nss_qvpn_strings_stats[NSS_STATS_NODE_MAX] = { + {"rx_pkts", NSS_STATS_TYPE_COMMON}, + {"rx_byts", NSS_STATS_TYPE_COMMON}, + {"tx_pkts", NSS_STATS_TYPE_COMMON}, + {"tx_byts", NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops", NSS_STATS_TYPE_DROP} +}; + +/* + * nss_qvpn_strings_read() + * Read qvpn statistics names + */ +static ssize_t nss_qvpn_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_qvpn_strings_stats, NSS_STATS_NODE_MAX); +} + +/* + * nss_qvpn_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(qvpn); + +/* + * nss_qvpn_strings_dentry_create() + * Create qvpn statistics strings debug entry. + */ +void nss_qvpn_strings_dentry_create(void) +{ + nss_strings_create_dentry("qvpn", &nss_qvpn_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_strings.h new file mode 100644 index 000000000..4b874d803 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_qvpn_strings.h @@ -0,0 +1,27 @@ +/* + **************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + **************************************************************************** + */ + +#ifndef __NSS_QVPN_STRINGS_H +#define __NSS_QVPN_STRINGS_H + +#include "nss_qvpn_stats.h" + +extern struct nss_stats_info nss_qvpn_strings_stats[NSS_STATS_NODE_MAX]; +extern void nss_qvpn_strings_dentry_create(void); + +#endif /* __NSS_QVPN_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx.c b/feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx.c new file mode 100644 index 000000000..c77ef3f3e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx.c @@ -0,0 +1,781 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_rmnet_rx.c + * NSS rmnet receive handler APIs + */ + +#include "nss_tx_rx_common.h" +#include "nss_rmnet_rx_stats.h" +#include + +#define NSS_RMNET_RX_TX_TIMEOUT 3000 /* 3 Seconds */ +#define NSS_RMNET_RX_GET_INDEX(if_num) (if_num - NSS_DYNAMIC_IF_START) + +/* + * Spinlock to protect the global data structure rmnet handle. + */ +DEFINE_SPINLOCK(nss_rmnet_rx_lock); + +extern int nss_ctl_redirect; + +/* + * Data structure that holds theinterface context. + */ +struct nss_rmnet_rx_handle *rmnet_rx_handle[NSS_MAX_DYNAMIC_INTERFACES]; + +/* + * nss_rmnet_rx_verify_if_num() + * Verify if_num passed to us. + */ +bool nss_rmnet_rx_verify_if_num(uint32_t if_num) +{ + enum nss_dynamic_interface_type type = nss_dynamic_interface_get_type(nss_rmnet_rx_get_context(), if_num); + + return type == NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_N2H + || type == NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_H2N; +} + +/* + * nss_rmnet_rx_msg_handler() + * Handle msg responses from the FW on interfaces + */ +static void nss_rmnet_rx_msg_handler(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, + void *app_data) +{ + struct nss_rmnet_rx_msg *nvim = (struct nss_rmnet_rx_msg *)ncm; + int32_t if_num; + + nss_rmnet_rx_msg_callback_t cb; + struct nss_rmnet_rx_handle *handle = NULL; + + /* + * Sanity check the message type + */ + if (ncm->type > NSS_RMNET_RX_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return; + } + + /* + * Messages value that are within the base class are handled by the base class. + */ + if (ncm->type < NSS_IF_MAX_MSG_TYPES) { + return nss_if_msg_handler(nss_ctx, ncm, app_data); + } + + if (!nss_rmnet_rx_verify_if_num(ncm->interface)) { + nss_warning("%px: response for another interface: %d", nss_ctx, ncm->interface); + return; + } + + if_num = NSS_RMNET_RX_GET_INDEX(ncm->interface); + + spin_lock_bh(&nss_rmnet_rx_lock); + if (!rmnet_rx_handle[if_num]) { + spin_unlock_bh(&nss_rmnet_rx_lock); + nss_warning("%px: rmnet_rx handle is NULL\n", nss_ctx); + return; + } + + handle = rmnet_rx_handle[if_num]; + spin_unlock_bh(&nss_rmnet_rx_lock); + + switch (nvim->cm.type) { + case NSS_RMNET_RX_STATS_SYNC_MSG: + nss_rmnet_rx_stats_sync(handle, &nvim->msg.stats, ncm->interface); + break; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Update the callback and app_data for NOTIFY messages, IPv4 sends all notify messages + * to the same callback/app_data. + * + * TODO: RMNet driver does not provide a registration for a notifier callback. + * Since dynamic interface are allocated on both cores and since the array for + * registered callback is not core specific, we call the Wi-Fi callback + * inappropriately. Disable the callback locally until we have per-core + * callback registrations. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)NULL; + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].ndev; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * Callback + */ + cb = (nss_rmnet_rx_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, ncm); +} + +/* + * nss_rmnet_rx_callback + * Callback to handle the completion of NSS ->HLOS messages. + */ +static void nss_rmnet_rx_callback(void *app_data, struct nss_cmn_msg *ncm) +{ + struct nss_rmnet_rx_handle *handle = (struct nss_rmnet_rx_handle *)app_data; + struct nss_rmnet_rx_pvt *nvip = handle->pvt; + + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: rmnet_rx Error response %d\n", handle->nss_ctx, ncm->response); + nvip->response = NSS_TX_FAILURE; + complete(&nvip->complete); + return; + } + + nvip->response = NSS_TX_SUCCESS; + complete(&nvip->complete); +} + +/* + * nss_rmnet_rx_tx_msg_sync + * Send a message from HLOS to NSS synchronously. + */ +static nss_tx_status_t nss_rmnet_rx_tx_msg_sync(struct nss_rmnet_rx_handle *handle, + struct nss_rmnet_rx_msg *nvim) +{ + nss_tx_status_t status; + int ret = 0; + struct nss_rmnet_rx_pvt *nwip = handle->pvt; + struct nss_ctx_instance *nss_ctx = handle->nss_ctx; + + down(&nwip->sem); + + status = nss_rmnet_rx_tx_msg(nss_ctx, nvim); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_rmnet_rx_msg failed\n", nss_ctx); + up(&nwip->sem); + return status; + } + + ret = wait_for_completion_timeout(&nwip->complete, + msecs_to_jiffies(NSS_RMNET_RX_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: rmnet_rx tx failed due to timeout\n", nss_ctx); + nwip->response = NSS_TX_FAILURE; + } + + status = nwip->response; + up(&nwip->sem); + + return status; +} + +/* + * nss_rmnet_rx_msg_init() + * Initialize virt specific message structure. + */ +static void nss_rmnet_rx_msg_init(struct nss_rmnet_rx_msg *nvim, + uint16_t if_num, + uint32_t type, + uint32_t len, + nss_rmnet_rx_msg_callback_t cb, + struct nss_rmnet_rx_handle *app_data) +{ + nss_cmn_msg_init(&nvim->cm, if_num, type, len, (void *)cb, (void *)app_data); +} + +/* + * nss_rmnet_rx_handle_destroy_sync() + * Destroy the virt handle either due to request from user or due to error, synchronously. + */ +static int nss_rmnet_rx_handle_destroy_sync(struct nss_rmnet_rx_handle *handle) +{ + nss_tx_status_t status; + int32_t if_num_n2h = handle->if_num_n2h; + int32_t if_num_h2n = handle->if_num_h2n; + int32_t index_n2h; + int32_t index_h2n; + + if (!nss_rmnet_rx_verify_if_num(if_num_n2h) || !nss_rmnet_rx_verify_if_num(if_num_h2n)) { + nss_warning("%px: bad interface numbers %d %d\n", handle->nss_ctx, if_num_n2h, if_num_h2n); + return NSS_TX_FAILURE_BAD_PARAM; + } + + index_n2h = NSS_RMNET_RX_GET_INDEX(if_num_n2h); + index_h2n = NSS_RMNET_RX_GET_INDEX(if_num_h2n); + + status = nss_dynamic_interface_dealloc_node(if_num_n2h, NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_N2H); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Dynamic interface destroy failed status %d\n", handle->nss_ctx, status); + return status; + } + + status = nss_dynamic_interface_dealloc_node(if_num_h2n, NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_H2N); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Dynamic interface destroy failed status %d\n", handle->nss_ctx, status); + return status; + } + + spin_lock_bh(&nss_rmnet_rx_lock); + rmnet_rx_handle[index_n2h] = NULL; + rmnet_rx_handle[index_h2n] = NULL; + spin_unlock_bh(&nss_rmnet_rx_lock); + + kfree(handle->pvt); + kfree(handle); + + return status; +} + +/* + * nss_rmnet_rx_handle_create_sync() + * Initialize virt handle which holds the if_num and stats per interface. + */ +static struct nss_rmnet_rx_handle *nss_rmnet_rx_handle_create_sync(struct nss_ctx_instance *nss_ctx, int32_t if_num_n2h, int32_t if_num_h2n, int32_t *cmd_rsp) +{ + int32_t index_n2h; + int32_t index_h2n; + struct nss_rmnet_rx_handle *handle; + + if (!nss_rmnet_rx_verify_if_num(if_num_n2h) || !nss_rmnet_rx_verify_if_num(if_num_h2n)) { + nss_warning("%px: bad interface numbers %d %d\n", nss_ctx, if_num_n2h, if_num_h2n); + return NULL; + } + + index_n2h = NSS_RMNET_RX_GET_INDEX(if_num_n2h); + index_h2n = NSS_RMNET_RX_GET_INDEX(if_num_h2n); + + handle = (struct nss_rmnet_rx_handle *)kzalloc(sizeof(struct nss_rmnet_rx_handle), + GFP_KERNEL); + if (!handle) { + nss_warning("%px: handle memory alloc failed\n", nss_ctx); + *cmd_rsp = NSS_RMNET_RX_ALLOC_FAILURE; + goto error1; + } + + handle->nss_ctx = nss_ctx; + handle->if_num_n2h = if_num_n2h; + handle->if_num_h2n = if_num_h2n; + handle->pvt = (struct nss_rmnet_rx_pvt *)kzalloc(sizeof(struct nss_rmnet_rx_pvt), + GFP_KERNEL); + if (!handle->pvt) { + nss_warning("%px: failure allocating memory for nss_rmnet_rx_pvt\n", nss_ctx); + *cmd_rsp = NSS_RMNET_RX_ALLOC_FAILURE; + goto error2; + } + + handle->stats_n2h = (uint64_t *)kzalloc(sizeof(uint64_t) * NSS_RMNET_RX_STATS_MAX, + GFP_KERNEL); + if (!handle->stats_n2h) { + nss_warning("%px: failure allocating memory for N2H stats\n", nss_ctx); + *cmd_rsp = NSS_RMNET_RX_ALLOC_FAILURE; + goto error3; + } + + handle->stats_h2n = (uint64_t *)kzalloc(sizeof(uint64_t) * NSS_RMNET_RX_STATS_MAX, + GFP_KERNEL); + if (!handle->stats_h2n) { + nss_warning("%px: failure allocating memory for H2N stats\n", nss_ctx); + *cmd_rsp = NSS_RMNET_RX_ALLOC_FAILURE; + goto error4; + } + + handle->cb = NULL; + handle->app_data = NULL; + + spin_lock_bh(&nss_rmnet_rx_lock); + rmnet_rx_handle[index_n2h] = handle; + rmnet_rx_handle[index_h2n] = handle; + spin_unlock_bh(&nss_rmnet_rx_lock); + + *cmd_rsp = NSS_RMNET_RX_SUCCESS; + + return handle; + +error4: + kfree(handle->stats_n2h); +error3: + kfree(handle->pvt); +error2: + kfree(handle); +error1: + return NULL; +} + +/* + * nss_rmnet_rx_register_handler_sync() + * register msg handler for interface and initialize semaphore and completion. + */ +static uint32_t nss_rmnet_rx_register_handler_sync(struct nss_ctx_instance *nss_ctx, struct nss_rmnet_rx_handle *handle) +{ + uint32_t ret; + struct nss_rmnet_rx_pvt *nvip = NULL; + int32_t if_num_n2h = handle->if_num_n2h; + int32_t if_num_h2n = handle->if_num_h2n; + + ret = nss_core_register_handler(nss_ctx, if_num_n2h, nss_rmnet_rx_msg_handler, NULL); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Failed to register message handler for redir_n2h interface %d\n", nss_ctx, if_num_n2h); + return NSS_RMNET_RX_REG_FAILURE; + } + + ret = nss_core_register_handler(nss_ctx, if_num_h2n, nss_rmnet_rx_msg_handler, NULL); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num_n2h); + nss_warning("%px: Failed to register message handler for redir_h2n interface %d\n", nss_ctx, if_num_h2n); + return NSS_RMNET_RX_REG_FAILURE; + } + + nvip = handle->pvt; + if (!nvip->sem_init_done) { + sema_init(&nvip->sem, 1); + init_completion(&nvip->complete); + nvip->sem_init_done = 1; + } + + nss_rmnet_rx_stats_dentry_create(); + return NSS_RMNET_RX_SUCCESS; +} + +/* + * nss_rmnet_rx_destroy_sync() + * Destroy the virt interface associated with the interface number, synchronously. + */ +nss_tx_status_t nss_rmnet_rx_destroy_sync(struct nss_rmnet_rx_handle *handle) +{ + nss_tx_status_t status; + struct net_device *dev; + int32_t if_num_n2h; + int32_t if_num_h2n; + struct nss_ctx_instance *nss_ctx; + uint32_t ret; + + if (!handle) { + nss_warning("handle is NULL\n"); + return NSS_TX_FAILURE_BAD_PARAM; + } + + if_num_n2h = handle->if_num_n2h; + if_num_h2n = handle->if_num_h2n; + nss_ctx = handle->nss_ctx; + + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: Interface could not be destroyed as core not ready\n", nss_ctx); + return NSS_TX_FAILURE_NOT_READY; + } + + spin_lock_bh(&nss_top_main.lock); + if (!nss_ctx->subsys_dp_register[if_num_n2h].ndev || !nss_ctx->subsys_dp_register[if_num_h2n].ndev) { + spin_unlock_bh(&nss_top_main.lock); + nss_warning("%px: Unregister virt interface %d %d: no context\n", nss_ctx, if_num_n2h, if_num_h2n); + return NSS_TX_FAILURE_BAD_PARAM; + } + + dev = nss_ctx->subsys_dp_register[if_num_n2h].ndev; + nss_assert(dev == nss_ctx->subsys_dp_register[if_num_h2n].ndev); + nss_core_unregister_subsys_dp(nss_ctx, if_num_n2h); + nss_core_unregister_subsys_dp(nss_ctx, if_num_h2n); + spin_unlock_bh(&nss_top_main.lock); + dev_put(dev); + + status = nss_rmnet_rx_handle_destroy_sync(handle); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: handle destroy failed for if_num_n2h %d and if_num_h2n %d\n", nss_ctx, if_num_n2h, if_num_h2n); + return NSS_TX_FAILURE; + } + + ret = nss_core_unregister_handler(nss_ctx, if_num_n2h); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for redir_n2h interface %d with NSS core\n", nss_ctx, if_num_n2h); + return NSS_TX_FAILURE_BAD_PARAM; + } + + ret = nss_core_unregister_handler(nss_ctx, if_num_h2n); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for redir_h2n interface %d with NSS core\n", nss_ctx, if_num_h2n); + return NSS_TX_FAILURE_BAD_PARAM; + } + + return status; +} +EXPORT_SYMBOL(nss_rmnet_rx_destroy_sync); + +/* + * nss_rmnet_rx_create_sync_nexthop() + * Create redir_n2h and redir_h2n interfaces, synchronously and associate it with same netdev. + */ +struct nss_rmnet_rx_handle *nss_rmnet_rx_create_sync_nexthop(struct net_device *netdev, uint32_t nexthop_n2h, uint32_t nexthop_h2n) +{ + struct nss_ctx_instance *nss_ctx = nss_rmnet_rx_get_context(); + struct nss_rmnet_rx_msg nvim; + struct nss_rmnet_rx_config_msg *nvcm; + uint32_t ret; + struct nss_rmnet_rx_handle *handle = NULL; + int32_t if_num_n2h, if_num_h2n; + + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: Interface could not be created as core not ready\n", nss_ctx); + return NULL; + } + + if_num_n2h = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_N2H); + if (if_num_n2h < 0) { + nss_warning("%px: failure allocating redir_n2h\n", nss_ctx); + return NULL; + } + + if_num_h2n = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_H2N); + if (if_num_h2n < 0) { + nss_warning("%px: failure allocating redir_h2n\n", nss_ctx); + nss_dynamic_interface_dealloc_node(if_num_n2h, NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_N2H); + return NULL; + } + + handle = nss_rmnet_rx_handle_create_sync(nss_ctx, if_num_n2h, if_num_h2n, &ret); + if (!handle) { + nss_warning("%px: rmnet_rx handle creation failed ret %d\n", nss_ctx, ret); + nss_dynamic_interface_dealloc_node(if_num_n2h, NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_N2H); + nss_dynamic_interface_dealloc_node(if_num_h2n, NSS_DYNAMIC_INTERFACE_TYPE_RMNET_RX_H2N); + return NULL; + } + + /* + * Initializes the semaphore and also sets the msg handler for if_num. + */ + ret = nss_rmnet_rx_register_handler_sync(nss_ctx, handle); + if (ret != NSS_RMNET_RX_SUCCESS) { + nss_warning("%px: Registration handler failed reason: %d\n", nss_ctx, ret); + goto error1; + } + + nss_rmnet_rx_msg_init(&nvim, handle->if_num_n2h, NSS_RMNET_RX_TX_CONFIG_MSG, + sizeof(struct nss_rmnet_rx_config_msg), nss_rmnet_rx_callback, handle); + + nvcm = &nvim.msg.if_config; + nvcm->flags = 0; + nvcm->sibling = if_num_h2n; + nvcm->nexthop = nexthop_n2h; + nvcm->no_channel = 0; + + ret = nss_rmnet_rx_tx_msg_sync(handle, &nvim); + if (ret != NSS_TX_SUCCESS) { + nss_warning("%px: nss_rmnet_rx_tx_msg_sync failed %u\n", nss_ctx, ret); + goto error2; + } + + nvim.cm.interface = if_num_h2n; + nvcm->sibling = if_num_n2h; + nvcm->nexthop = nexthop_h2n; + nvcm->no_channel = NSS_RMNET_RX_CHANNEL_MAX; + + ret = nss_rmnet_rx_tx_msg_sync(handle, &nvim); + if (ret != NSS_TX_SUCCESS) { + nss_warning("%px: nss_rmnet_rx_tx_msg_sync failed %u\n", nss_ctx, ret); + goto error2; + } + + nss_core_register_subsys_dp(nss_ctx, (uint32_t)if_num_n2h, NULL, NULL, NULL, netdev, 0); + nss_core_register_subsys_dp(nss_ctx, (uint32_t)if_num_h2n, NULL, NULL, NULL, netdev, 0); + + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num_n2h, NSS_RMNET_RX_DP_N2H); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num_h2n, NSS_RMNET_RX_DP_H2N); + + /* + * Hold a reference to the net_device + */ + dev_hold(netdev); + + /* + * The context returned is the handle interface # which contains all the info related to + * the interface if_num. + */ + + return handle; + +error2: + nss_core_unregister_handler(nss_ctx, if_num_n2h); + nss_core_unregister_handler(nss_ctx, if_num_h2n); + +error1: + nss_rmnet_rx_handle_destroy_sync(handle); + return NULL; +} +EXPORT_SYMBOL(nss_rmnet_rx_create_sync_nexthop); + +/* + * nss_rmnet_rx_create() + * Create rmnet_n2h and rmnet_h2n interfaces with generic next hops and associate it with same netdev. + * + * When rmnet and eth_rx is running at the same core, we directly send packets to eth_rx node. + * When they are running at different cores, the packets needs to arrive eth_rx through C2C. + */ +struct nss_rmnet_rx_handle *nss_rmnet_rx_create(struct net_device *netdev) +{ + uint32_t nexthop_n2h = NSS_N2H_INTERFACE; + uint32_t nexthop_h2n = NSS_C2C_TX_INTERFACE; + + if (nss_top_main.rmnet_rx_handler_id == 0) { + nexthop_h2n = NSS_ETH_RX_INTERFACE; + } + + + return nss_rmnet_rx_create_sync_nexthop(netdev, nexthop_n2h, nexthop_h2n); +} +EXPORT_SYMBOL(nss_rmnet_rx_create); + +/* + * nss_rmnet_rx_tx_buf() + * HLOS interface has received a packet which we redirect to the NSS, if appropriate to do so. + */ +nss_tx_status_t nss_rmnet_rx_tx_buf(struct nss_rmnet_rx_handle *handle, + struct sk_buff *skb) +{ + int32_t if_num = handle->if_num_h2n; + struct nss_ctx_instance *nss_ctx = handle->nss_ctx; + int cpu = 0; + + if (unlikely(nss_ctl_redirect == 0)) { + return NSS_TX_FAILURE_NOT_ENABLED; + } + + if (unlikely(skb->vlan_tci)) { + return NSS_TX_FAILURE_NOT_SUPPORTED; + } + + if (!nss_rmnet_rx_verify_if_num(if_num)) { + nss_warning("%px: bad interface number %d\n", nss_ctx, if_num); + return NSS_TX_FAILURE_BAD_PARAM; + } + + nss_trace("%px: RmnetRx packet, if_num:%d, skb:%px", nss_ctx, if_num, skb); + + /* + * Sanity check the SKB to ensure that it's suitable for us + */ + if (unlikely(skb->len <= ETH_HLEN)) { + nss_warning("%px: Rmnet Rx packet: %px too short", nss_ctx, skb); + return NSS_TX_FAILURE_TOO_SHORT; + } + + /* + * set skb queue mapping + */ + cpu = get_cpu(); + put_cpu(); + skb_set_queue_mapping(skb, cpu); + + return nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_VIRTUAL_BUFFER); +} +EXPORT_SYMBOL(nss_rmnet_rx_tx_buf); + +/* + * nss_rmnet_rx_tx_msg() + */ +nss_tx_status_t nss_rmnet_rx_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_rmnet_rx_msg *nvim) +{ + struct nss_cmn_msg *ncm = &nvim->cm; + + /* + * Sanity check the message + */ + if (!nss_rmnet_rx_verify_if_num(ncm->interface)) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_RMNET_RX_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, nvim, sizeof(*nvim), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_rmnet_rx_tx_msg); + +/* + * nss_rmnet_rx_xmit_callback_unregister() + * Unregister interface xmit callback. + */ +void nss_rmnet_rx_xmit_callback_unregister(struct nss_rmnet_rx_handle *handle) +{ + struct nss_ctx_instance *nss_ctx; + struct nss_subsystem_dataplane_register *reg; + + if (!handle) { + nss_warning("handle is NULL\n"); + return; + } + + nss_ctx = handle->nss_ctx; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_rmnet_rx_verify_if_num(handle->if_num_n2h)) { + nss_warning("if_num is invalid\n"); + return; + } + + reg = &nss_ctx->subsys_dp_register[handle->if_num_n2h]; + reg->xmit_cb = NULL; +} +EXPORT_SYMBOL(nss_rmnet_rx_xmit_callback_unregister); + +/* + * nss_rmnet_rx_xmit_callback_register() + * Register interface xmit callback. + */ +void nss_rmnet_rx_xmit_callback_register(struct nss_rmnet_rx_handle *handle, + nss_rmnet_rx_xmit_callback_t cb) +{ + struct nss_ctx_instance *nss_ctx; + struct nss_subsystem_dataplane_register *reg; + + if (!handle) { + nss_warning("handle is NULL\n"); + return; + } + + nss_ctx = handle->nss_ctx; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_rmnet_rx_verify_if_num(handle->if_num_n2h)) { + nss_warning("if_num is invalid\n"); + return; + } + + reg = &nss_ctx->subsys_dp_register[handle->if_num_n2h]; + reg->xmit_cb = cb; +} +EXPORT_SYMBOL(nss_rmnet_rx_xmit_callback_register); + +/* + * nss_rmnet_rx_unregister() + */ +void nss_rmnet_rx_unregister(struct nss_rmnet_rx_handle *handle) +{ + struct nss_ctx_instance *nss_ctx; + int32_t if_num; + + if (!handle) { + nss_warning("handle is NULL\n"); + return; + } + + nss_ctx = handle->nss_ctx; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_rmnet_rx_verify_if_num(handle->if_num_n2h)) { + nss_warning("if_num is invalid\n"); + return; + } + + if_num = handle->if_num_n2h; + + nss_core_unregister_subsys_dp(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_rmnet_rx_unregister); + +/* + * nss_rmnet_rx_register() + */ +void nss_rmnet_rx_register(struct nss_rmnet_rx_handle *handle, + nss_rmnet_rx_data_callback_t data_callback, + struct net_device *netdev) +{ + struct nss_ctx_instance *nss_ctx; + int32_t if_num; + + if (!handle) { + nss_warning("handle is NULL\n"); + return; + } + + nss_ctx = handle->nss_ctx; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_rmnet_rx_verify_if_num(handle->if_num_n2h)) { + nss_warning("if_num is invalid\n"); + return; + } + + if_num = handle->if_num_n2h; + + nss_core_register_subsys_dp(nss_ctx, if_num, data_callback, NULL, NULL, netdev, (uint32_t)netdev->features); +} +EXPORT_SYMBOL(nss_rmnet_rx_register); + +/* + * nss_rmnet_rx_get_ifnum_with_core_id() + * Append core id to rmnet interface number + */ +int32_t nss_rmnet_rx_get_ifnum_with_core_id(int32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_rmnet_rx_get_context(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + if (nss_rmnet_rx_verify_if_num(if_num) == false) { + nss_info("%px: if_num: %u is not RMNET interface\n", nss_ctx, if_num); + return -1; + } + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_rmnet_rx_get_ifnum_with_core_id); + +/* + * nss_rmnet_rx_get_ifnum() + * Return rmnet interface number with core ID + */ +int32_t nss_rmnet_rx_get_ifnum(struct net_device *dev) +{ + int32_t ifnum = nss_cmn_get_interface_number_by_dev(dev); + return nss_rmnet_rx_get_ifnum_with_core_id(ifnum); +} +EXPORT_SYMBOL(nss_rmnet_rx_get_ifnum); + +/* + * nss_rmnet_rx_get_interface_num() + * Get interface number for an interface + */ +int32_t nss_rmnet_rx_get_interface_num(struct nss_rmnet_rx_handle *handle) +{ + if (!handle) { + nss_warning("rmnet_rx handle is NULL\n"); + return -1; + } + + /* + * Return if_num_n2h whose datapath type is 0. + */ + return handle->if_num_n2h; +} +EXPORT_SYMBOL(nss_rmnet_rx_get_interface_num); + +/* + * nss_rmnet_rx_get_context() + */ +struct nss_ctx_instance *nss_rmnet_rx_get_context(void) +{ + return &nss_top_main.nss[nss_top_main.rmnet_rx_handler_id]; +} +EXPORT_SYMBOL(nss_rmnet_rx_get_context); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx_stats.c new file mode 100644 index 000000000..efbcaffc0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx_stats.c @@ -0,0 +1,209 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_rmnet_rx_stats.h" + +/* + * Data structure that holds the virtual interface context. + */ +extern struct nss_rmnet_rx_handle *rmnet_rx_handle[]; + +/* + * Spinlock to protect the global data structure virt_handle. + */ +extern spinlock_t nss_rmnet_rx_lock; + +/* + * nss_rmnet_rx_stats_str + * rmnet_rx interface stats strings + */ +struct nss_stats_info nss_rmnet_rx_stats_str[NSS_RMNET_RX_STATS_MAX] = { + {"rx_packets" , NSS_STATS_TYPE_COMMON}, + {"rx_bytes" , NSS_STATS_TYPE_COMMON}, + {"tx_packets" , NSS_STATS_TYPE_COMMON}, + {"tx_bytes" , NSS_STATS_TYPE_COMMON}, + {"rx_queue_0_dropped" , NSS_STATS_TYPE_DROP}, + {"rx_queue_1_dropped" , NSS_STATS_TYPE_DROP}, + {"rx_queue_2_dropped" , NSS_STATS_TYPE_DROP}, + {"rx_queue_3_dropped" , NSS_STATS_TYPE_DROP}, + {"enqueue failed" , NSS_STATS_TYPE_DROP}, + {"no available channel" , NSS_STATS_TYPE_SPECIAL}, + {"linear pbuf count" , NSS_STATS_TYPE_SPECIAL}, + {"no pbuf to linear" , NSS_STATS_TYPE_SPECIAL}, + {"no enough room" , NSS_STATS_TYPE_SPECIAL}, + {"channel[0]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[1]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[2]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[3]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[4]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[5]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[6]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[7]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[8]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[9]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[10]" , NSS_STATS_TYPE_SPECIAL}, + {"channel[11]" , NSS_STATS_TYPE_SPECIAL}, + {"DMA full" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_rmnet_rx_stats_get() + * Get rmnet_rx interface stats by interface number. + */ +static bool nss_rmnet_rx_stats_get(struct nss_ctx_instance *nss_ctx, uint32_t if_num, uint64_t *stats, bool is_base) +{ + int i; + uint32_t if_num_curr = if_num; + uint64_t *stats_local; + + if_num = if_num - NSS_DYNAMIC_IF_START; + + spin_lock_bh(&nss_rmnet_rx_lock); + if (!rmnet_rx_handle[if_num]) { + spin_unlock_bh(&nss_rmnet_rx_lock); + return false; + } + + if (if_num_curr == rmnet_rx_handle[if_num]->if_num_n2h) { + stats_local = rmnet_rx_handle[if_num]->stats_n2h; + } else { + stats_local = rmnet_rx_handle[if_num]->stats_h2n; + } + + for (i = 0; i < NSS_RMNET_RX_STATS_MAX; i++) { + stats[i] = stats_local[i]; + } + spin_unlock_bh(&nss_rmnet_rx_lock); + + return true; +} + +/* + * nss_rmnet_rx_stats_read() + * Read rmnet_rx statistics + */ +static ssize_t nss_rmnet_rx_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + struct nss_stats_data *data = fp->private_data; + struct nss_ctx_instance *nss_ctx = nss_rmnet_rx_get_context(); + int32_t if_num = NSS_DYNAMIC_IF_START; + int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES; + uint32_t max_output_lines = ((NSS_RMNET_RX_STATS_MAX + 3) * NSS_MAX_DYNAMIC_INTERFACES) + + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("%px: Could not allocate memory for local statistics buffer", data); + return 0; + } + + stats_shadow = kzalloc(NSS_RMNET_RX_STATS_MAX * sizeof(uint64_t), GFP_KERNEL); + if (unlikely(!stats_shadow)) { + nss_warning("%px: Could not allocate memory for local shadow buffer", data); + kfree(lbuf); + return 0; + } + + if (data) { + if_num = data->if_num; + } + + if (if_num > max_if_num) { + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "rmnet_rx", NSS_STATS_SINGLE_CORE); + + /* + * Interface statistics for all interfaces. + */ + for (; if_num < max_if_num; if_num++) { + + if (!nss_rmnet_rx_stats_get(nss_ctx, if_num, stats_shadow, false)) { + continue; + } + + size_wr += nss_stats_print("rmnet_rx", "interface", if_num, + nss_rmnet_rx_stats_str, stats_shadow, NSS_RMNET_RX_STATS_MAX, + lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_rmnet_rx_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(rmnet_rx) + +/* + * nss_rmnet_rx_stats_dentry_create() + * Create rmnet_rx statistics debug entry. + */ +void nss_rmnet_rx_stats_dentry_create(void) +{ + nss_stats_create_dentry("rmnet_rx", &nss_rmnet_rx_stats_ops); +} + +/* + * nss_rmnet_rx_stats_sync() + * Sync stats from the NSS FW + */ +void nss_rmnet_rx_stats_sync(struct nss_rmnet_rx_handle *handle, + struct nss_rmnet_rx_stats *nwis, uint32_t if_num) +{ + int i; + uint64_t *stats; + spin_lock_bh(&nss_rmnet_rx_lock); + if (if_num == handle->if_num_n2h) { + stats = handle->stats_n2h; + } else { + stats = handle->stats_h2n; + } + + stats[NSS_RMNET_RX_STATS_RX_PKTS] += nwis->node_stats.rx_packets; + stats[NSS_RMNET_RX_STATS_RX_BYTES] += nwis->node_stats.rx_bytes; + stats[NSS_RMNET_RX_STATS_TX_PKTS] += nwis->node_stats.tx_packets; + stats[NSS_RMNET_RX_STATS_TX_BYTES] += nwis->node_stats.tx_bytes; + + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + stats[NSS_RMNET_RX_STATS_QUEUE_0_DROPPED + i] += nwis->node_stats.rx_dropped[i]; + } + + stats[NSS_RMNET_RX_STATS_ENQUEUE_FAILED] += nwis->enqueue_failed; + stats[NSS_RMNET_RX_STATS_NO_AVAIL_CHANNEL] += nwis->no_avail_channel; + stats[NSS_RMNET_RX_STATS_NUM_LINEAR_PBUF] += nwis->num_linear_pbuf; + stats[NSS_RMNET_RX_STATS_NO_PBUF_TO_LINEAR] += nwis->no_pbuf_to_linear; + stats[NSS_RMNET_RX_STATS_NO_ENOUGH_ROOM] += nwis->no_enough_room; + + for (i = 0; i < NSS_RMNET_RX_CHANNEL_MAX; i++) { + stats[NSS_RMNET_RX_STATS_USING_CHANNEL0 + i] += nwis->using_channel[i]; + } + + stats[NSS_RMNET_RX_STATS_DMA_FAILED] += nwis->dma_failed; + spin_unlock_bh(&nss_rmnet_rx_lock); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx_stats.h new file mode 100644 index 000000000..638593a6c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_rmnet_rx_stats.h @@ -0,0 +1,61 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_RMNET_RX_STATS_H +#define __NSS_RMNET_RX_STATS_H + +/* + * rmnet_rx interface statistics types. + */ +enum nss_rmnet_rx_stats_types { + NSS_RMNET_RX_STATS_RX_PKTS, + NSS_RMNET_RX_STATS_RX_BYTES, + NSS_RMNET_RX_STATS_TX_PKTS, + NSS_RMNET_RX_STATS_TX_BYTES, + NSS_RMNET_RX_STATS_QUEUE_0_DROPPED, + NSS_RMNET_RX_STATS_QUEUE_1_DROPPED, + NSS_RMNET_RX_STATS_QUEUE_2_DROPPED, + NSS_RMNET_RX_STATS_QUEUE_3_DROPPED, + NSS_RMNET_RX_STATS_ENQUEUE_FAILED, + NSS_RMNET_RX_STATS_NO_AVAIL_CHANNEL, + NSS_RMNET_RX_STATS_NUM_LINEAR_PBUF, + NSS_RMNET_RX_STATS_NO_PBUF_TO_LINEAR, + NSS_RMNET_RX_STATS_NO_ENOUGH_ROOM, + NSS_RMNET_RX_STATS_USING_CHANNEL0, + NSS_RMNET_RX_STATS_USING_CHANNEL1, + NSS_RMNET_RX_STATS_USING_CHANNEL2, + NSS_RMNET_RX_STATS_USING_CHANNEL3, + NSS_RMNET_RX_STATS_USING_CHANNEL4, + NSS_RMNET_RX_STATS_USING_CHANNEL5, + NSS_RMNET_RX_STATS_USING_CHANNEL6, + NSS_RMNET_RX_STATS_USING_CHANNEL7, + NSS_RMNET_RX_STATS_USING_CHANNEL8, + NSS_RMNET_RX_STATS_USING_CHANNEL9, + NSS_RMNET_RX_STATS_USING_CHANNEL10, + NSS_RMNET_RX_STATS_USING_CHANNEL11, + NSS_RMNET_RX_STATS_DMA_FAILED, + NSS_RMNET_RX_STATS_MAX, +}; + +/* + * Virtual interface statistics APIs + */ +extern void nss_rmnet_rx_stats_sync(struct nss_rmnet_rx_handle *handle, struct nss_rmnet_rx_stats *nwis, uint32_t if_num); +extern void nss_rmnet_rx_stats_dentry_create(void); + +#endif /* __NSS_RMNET_RX_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_rps.c b/feeds/ipq807x/qca-nss-drv/src/nss_rps.c new file mode 100644 index 000000000..c2a603f2d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_rps.c @@ -0,0 +1,644 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2017, 2019-2021 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_rps.c + * NSS RPS based APIs + */ + +#include "nss_tx_rx_common.h" + +#define NSS_RPS_MAX_CORE_HASH_BITMAP ((1 << (NSS_HOST_CORES)) - 1) + /**< Maximum value that when all cores are available. */ +#define NSS_RPS_PRI_MAP_PARAM_FIELD_COUNT 2 + +int nss_rps_config __read_mostly; +int nss_rps_hash_bitmap = NSS_RPS_MAX_CORE_HASH_BITMAP; +int nss_rps_pri_map[NSS_MAX_NUM_PRI]; + +/* + * It is used to parse priority and core from the input. + */ +struct nss_rps_pri_map_parse_data { + uint8_t pri; /**< Priority Index. */ + int8_t core; /**< Host core-id. */ +}; + +/* + * Private data structure. + */ +struct nss_rps_pvt { + struct semaphore sem; /* Semaphore structure. */ + struct completion complete; /* Completion structure. */ + int response; /* Response from FW. */ + void *cb; /* Original cb for sync msgs. */ + void *app_data; /* Original app_data for sync msgs. */ +}; + +static struct nss_rps_pvt nss_rps_cfg_pvt; + +/* + * nss_rps_pri_map_usage() + * Help function shows the usage of the command. + */ +static inline void nss_rps_pri_map_usage(void) +{ + nss_info_always("\nUsage:\n"); + nss_info_always("echo > /proc/sys/dev/nss/rps/pri_map\n\n"); + nss_info_always("priority[0 to %u] core[-1 to %u]:\n\n", + NSS_MAX_NUM_PRI - 1, + NSS_HOST_CORES - 1); +} + +/* + * nss_rps_pri_map_print() + * Sysctl handler for printing rps/pri mapping. + */ +static int nss_rps_pri_map_print(struct ctl_table *ctl, void __user *buffer, + size_t *lenp, loff_t *ppos, int *pri_map) +{ + char *r_buf; + int i, len; + size_t cp_bytes = 0; + + /* + * (2 * 4) + 12 bytes for the buffer size is sufficient to write + * the table including the spaces and new line characters. + */ + r_buf = kzalloc(((4 * NSS_MAX_NUM_PRI) + 12) * sizeof(char), + GFP_KERNEL); + if (!r_buf) { + nss_warning("Failed to alloc buffer to print pri map\n"); + return -EFAULT; + } + + /* + * Write the core values that corresponds to each priorities. + */ + len = scnprintf(r_buf + cp_bytes, 8, "Cores: "); + cp_bytes += len; + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + len = scnprintf(r_buf + cp_bytes, 4, "%d ", pri_map[i]); + if (!len) { + nss_warning("failed to read from buffer %d\n", pri_map[i]); + kfree(r_buf); + return -EFAULT; + } + cp_bytes += len; + } + + /* + * Add new line character at the end. + */ + len = scnprintf(r_buf + cp_bytes, 4, "\n"); + cp_bytes += len; + + cp_bytes = simple_read_from_buffer(buffer, *lenp, ppos, r_buf, cp_bytes); + *lenp = cp_bytes; + kfree(r_buf); + return 0; +} + +/* + * nss_rps_pri_map_parse() + * Sysctl handler for rps/pri mappings. + */ +static int nss_rps_pri_map_parse(struct ctl_table *ctl, void __user *buffer, + size_t *lenp, loff_t *ppos, struct nss_rps_pri_map_parse_data *out) +{ + size_t cp_bytes = 0; + char w_buf[5]; + loff_t w_offset = 0; + char *str; + unsigned int pri; + int core, res; + + /* + * Buffer length cannot be different than 4 or 5. + */ + if (*lenp < 4 || *lenp > 5) { + nss_warning("Buffer is not correct. Invalid lenght: %d\n", (int)*lenp); + return -EINVAL; + } + + /* + * It's a write operation + */ + cp_bytes = simple_write_to_buffer(w_buf, *lenp, &w_offset, buffer, 5); + if (cp_bytes != *lenp) { + nss_warning("failed to write to buffer\n"); + return -EFAULT; + } + + str = w_buf; + res = sscanf(str, "%u %d", &pri, &core); + if (res != NSS_RPS_PRI_MAP_PARAM_FIELD_COUNT) { + nss_warning("failed to read the buffer\n"); + return -EFAULT; + } + /* + * pri value cannot be higher than NSS_MAX_NUM_PRI. + */ + if (pri >= NSS_MAX_NUM_PRI) { + nss_warning("invalid pri value: %d\n", pri); + return -EINVAL; + } + + /* + * Host core must be less than NSS_HOST_CORE. + */ + if (core >= NSS_HOST_CORES || core < NSS_N2H_RPS_PRI_DEFAULT) { + nss_warning("invalid priority value: %d\n", core); + return -EINVAL; + } + + nss_info("priority: %d core: %d\n", pri, core); + + out->pri = pri; + out->core = core; + return 0; +} + +/* + * nss_rps_cfg_callback() + * Callback function for rps configuration. + */ +static void nss_rps_cfg_callback(void *app_data, struct nss_n2h_msg *nnm) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)app_data; + if (nnm->cm.response != NSS_CMN_RESPONSE_ACK) { + + /* + * Error, hence we are not updating the nss_rps + * Send a FAILURE to restore the current value + * to its previous state. + */ + nss_rps_cfg_pvt.response = NSS_FAILURE; + complete(&nss_rps_cfg_pvt.complete); + nss_warning("%px: RPS configuration failed : %d\n", nss_ctx, + nnm->cm.error); + return; + } + + nss_info("%px: RPS configuration succeeded: %d\n", nss_ctx, + nnm->cm.error); + nss_ctx->rps_en = nnm->msg.rps_cfg.enable; + nss_rps_cfg_pvt.response = NSS_SUCCESS; + complete(&nss_rps_cfg_pvt.complete); +} + +/* + * nss_rps_pri_map_cfg_callback() + * Callback function for rps pri map configuration. + */ +static void nss_rps_pri_map_cfg_callback(void *app_data, struct nss_n2h_msg *nnm) +{ + if (nnm->cm.response != NSS_CMN_RESPONSE_ACK) { + + /* + * Error, hence we are not updating the nss_pri_map + * Send a failure to restore the current value + * to its previous state. + */ + nss_rps_cfg_pvt.response = NSS_FAILURE; + complete(&nss_rps_cfg_pvt.complete); + nss_warning("%px: RPS pri_map configuration failed : %d\n", + app_data, nnm->cm.error); + return; + } + + nss_info("%px: RPS pri_map configuration succeeded: %d\n", + app_data, nnm->cm.error); + + nss_rps_cfg_pvt.response = NSS_SUCCESS; + complete(&nss_rps_cfg_pvt.complete); +} + +/* + * nss_rps_cfg() + * Send Message to NSS to enable RPS. + */ +static nss_tx_status_t nss_rps_cfg(struct nss_ctx_instance *nss_ctx, int enable_rps) +{ + struct nss_n2h_msg nnm; + nss_tx_status_t nss_tx_status; + int ret; + + down(&nss_rps_cfg_pvt.sem); + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, NSS_TX_METADATA_TYPE_N2H_RPS_CFG, + sizeof(struct nss_n2h_rps), + nss_rps_cfg_callback, + (void *)nss_ctx); + + nnm.msg.rps_cfg.enable = enable_rps; + + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_tx error setting rps\n", nss_ctx); + + up(&nss_rps_cfg_pvt.sem); + return NSS_FAILURE; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_rps_cfg_pvt.complete, msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: Waiting for ack timed out\n", nss_ctx); + up(&nss_rps_cfg_pvt.sem); + return NSS_FAILURE; + } + + /* + * ACK/NACK received from NSS FW + * If NACK: Handler function will restore nss_rps_config + * to previous state. + */ + if (NSS_FAILURE == nss_rps_cfg_pvt.response) { + up(&nss_rps_cfg_pvt.sem); + return NSS_FAILURE; + } + + up(&nss_rps_cfg_pvt.sem); + return NSS_SUCCESS; +} + +/* + * nss_rps_ipv4_hash_bitmap_cfg() + * Send Message to NSS to configure hash_bitmap. + */ +static nss_tx_status_t nss_rps_ipv4_hash_bitmap_cfg(struct nss_ctx_instance *nss_ctx, int hash_bitmap) +{ + struct nss_ipv4_msg nim; + nss_tx_status_t nss_tx_status; + + down(&nss_rps_cfg_pvt.sem); + nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_RPS_HASH_BITMAP_CFG_MSG, + sizeof(struct nss_ipv4_rps_hash_bitmap_cfg_msg), + NULL, NULL); + + nim.msg.rps_hash_bitmap.hash_bitmap = hash_bitmap; + + nss_tx_status = nss_ipv4_tx_sync(nss_ctx, &nim); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_tx error setting rps\n", nss_ctx); + + up(&nss_rps_cfg_pvt.sem); + return NSS_FAILURE; + } + + up(&nss_rps_cfg_pvt.sem); + return NSS_SUCCESS; +} + +#ifdef NSS_DRV_IPV6_ENABLE +/* + * nss_rps_ipv6_hash_bitmap_cfg() + * Send Message to NSS to configure hash_bitmap. + */ +static nss_tx_status_t nss_rps_ipv6_hash_bitmap_cfg(struct nss_ctx_instance *nss_ctx, int hash_bitmap) +{ + struct nss_ipv6_msg nim; + nss_tx_status_t nss_tx_status; + + down(&nss_rps_cfg_pvt.sem); + nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_RPS_HASH_BITMAP_CFG_MSG, + sizeof(struct nss_ipv4_rps_hash_bitmap_cfg_msg), + NULL, NULL); + + nim.msg.rps_hash_bitmap.hash_bitmap = hash_bitmap; + + nss_tx_status = nss_ipv6_tx_sync(nss_ctx, &nim); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_tx error setting rps\n", nss_ctx); + + up(&nss_rps_cfg_pvt.sem); + return NSS_FAILURE; + } + + up(&nss_rps_cfg_pvt.sem); + return NSS_SUCCESS; +} +#endif + +/* + * nss_rps_pri_map_cfg() + * Send Message to NSS to configure pri_map. + */ +static nss_tx_status_t nss_rps_pri_map_cfg(struct nss_ctx_instance *nss_ctx, int *pri_map) +{ + struct nss_n2h_msg nnm; + struct nss_n2h_rps_pri_map *rps_pri_map; + nss_tx_status_t nss_tx_status; + int ret, i; + + down(&nss_rps_cfg_pvt.sem); + nss_n2h_msg_init(&nnm, NSS_N2H_INTERFACE, NSS_TX_METADATA_TYPE_N2H_RPS_PRI_MAP_CFG, + sizeof(struct nss_n2h_rps_pri_map), + nss_rps_pri_map_cfg_callback, + (void *)nss_ctx); + + rps_pri_map = &nnm.msg.rps_pri_map; + + /* + * Fill entries at pri_map. + */ + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + rps_pri_map->pri_map[i] = pri_map[i]; + } + + nss_tx_status = nss_n2h_tx_msg(nss_ctx, &nnm); + + if (nss_tx_status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_tx error setting rps\n", nss_ctx); + + up(&nss_rps_cfg_pvt.sem); + return NSS_FAILURE; + } + + /* + * Blocking call, wait till we get ACK for this msg. + */ + ret = wait_for_completion_timeout(&nss_rps_cfg_pvt.complete, msecs_to_jiffies(NSS_CONN_CFG_TIMEOUT)); + if (ret == 0) { + nss_warning("%px: Waiting for ack timed out\n", nss_ctx); + up(&nss_rps_cfg_pvt.sem); + return NSS_FAILURE; + } + + /* + * ACK/NACK received from NSS FW + * If NACK: Handler function will restore nss_rps_config + * to previous state. + */ + if (NSS_FAILURE == nss_rps_cfg_pvt.response) { + up(&nss_rps_cfg_pvt.sem); + return NSS_FAILURE; + } + + up(&nss_rps_cfg_pvt.sem); + return NSS_SUCCESS; +} + +/* + * nss_rps_cfg_handler() + * Enable NSS RPS. + */ +static int nss_rps_cfg_handler(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx; + int ret, ret_rps, current_state, i; + current_state = nss_rps_config; + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (ret != NSS_SUCCESS) { + return ret; + } + + if (!write) { + return ret; + } + + if (nss_rps_config == 0) { + nss_info_always("Runtime disabling of NSS RPS not supported\n"); + return ret; + } + + if (nss_rps_config != 1) { + nss_info_always("Invalid input value. Valid values are 0 and 1\n"); + return ret; + } + + for (i = 0; i < nss_top_main.num_nss; i++) { + nss_ctx = &nss_top->nss[i]; + nss_info("Enabling NSS RPS\n"); + ret_rps = nss_rps_cfg(nss_ctx, 1); + + /* + * In here, we also need to revert the state of the previously enabled cores. + * However, runtime disabling is currently not supported since queues are not + * flushed in NSS FW. + * TODO: Flush queues in NSS FW. + */ + if (ret_rps != NSS_SUCCESS) { + nss_warning("%px: rps enabling failed\n", nss_ctx); + nss_rps_config = current_state; + return ret_rps; + } + } + return NSS_SUCCESS; +} + +/* + * nss_rps_hash_bitmap_cfg_handler() + * Configure NSS rps_hash_bitmap + */ +static int nss_rps_hash_bitmap_cfg_handler(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[0]; + int ret, ret_ipv4, current_state; + + current_state = nss_rps_hash_bitmap; + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + + if (ret != NSS_SUCCESS) { + nss_rps_hash_bitmap = current_state; + return ret; + } + + if (!write) { + return ret; + } + + if (nss_rps_hash_bitmap <= (NSS_RPS_MAX_CORE_HASH_BITMAP)) { + nss_info("Configuring NSS RPS hash_bitmap\n"); + ret_ipv4 = nss_rps_ipv4_hash_bitmap_cfg(nss_ctx, nss_rps_hash_bitmap); + + if (ret_ipv4 != NSS_SUCCESS) { + nss_warning("%px: ipv4 hash_bitmap config message failed\n", nss_ctx); + nss_rps_hash_bitmap = current_state; + return ret_ipv4; + } + +#ifdef NSS_DRV_IPV6_ENABLE + { + int ret_ipv6; + ret_ipv6 = nss_rps_ipv6_hash_bitmap_cfg(nss_ctx, nss_rps_hash_bitmap); + + if (ret_ipv6 != NSS_SUCCESS) { + nss_warning("%px: ipv6 hash_bitmap config message failed\n", nss_ctx); + nss_rps_hash_bitmap = current_state; + if (nss_rps_ipv4_hash_bitmap_cfg(nss_ctx, nss_rps_hash_bitmap != NSS_SUCCESS)) { + nss_warning("%px: ipv4 and ipv6 have different hash_bitmaps.\n", nss_ctx); + } + return ret_ipv6; + } + } +#endif + return 0; + } + + nss_info_always("Invalid input value. Valid values are less than %d\n", (NSS_RPS_MAX_CORE_HASH_BITMAP)); + return ret; +} + +/* nss_rps_pri_map_cfg_handler() + * Configure NSS rps_pri_map + */ +static int nss_rps_pri_map_cfg_handler(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx = &nss_top->nss[0]; + + int ret, ret_pri_map; + struct nss_rps_pri_map_parse_data out, current_state; + if (!write) { + return nss_rps_pri_map_print(ctl, buffer, lenp, ppos, nss_rps_pri_map); + } + + ret = nss_rps_pri_map_parse(ctl, buffer, lenp, ppos, &out); + + if (ret != NSS_SUCCESS) { + nss_rps_pri_map_usage(); + return ret; + } + + nss_info("Configuring NSS RPS Priority Map\n"); + current_state.pri = out.pri; + current_state.core = nss_rps_pri_map[out.pri]; + nss_rps_pri_map[out.pri] = out.core; + ret_pri_map = nss_rps_pri_map_cfg(nss_ctx, nss_rps_pri_map); + if (ret_pri_map != NSS_SUCCESS) { + nss_rps_pri_map[current_state.pri] = current_state.core; + nss_warning("%px: pri_map config message failed\n", nss_ctx); + } + + return ret_pri_map; +} + +static struct ctl_table nss_rps_table[] = { + { + .procname = "enable", + .data = &nss_rps_config, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_rps_cfg_handler, + }, + { + .procname = "hash_bitmap", + .data = &nss_rps_hash_bitmap, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_rps_hash_bitmap_cfg_handler, + }, + { + .procname = "pri_map", + .data = &nss_rps_pri_map[NSS_MAX_NUM_PRI], + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_rps_pri_map_cfg_handler, + }, + { } +}; + +static struct ctl_table nss_rps_dir[] = { + { + .procname = "rps", + .mode = 0555, + .child = nss_rps_table, + }, + { } +}; + +static struct ctl_table nss_rps_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_rps_dir, + }, + { } +}; + +static struct ctl_table nss_rps_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = nss_rps_root_dir, + }, + { } +}; + +static struct ctl_table_header *nss_rps_header; + +/* + * nss_rps_pri_map_init_handler() + * Initialize pri_map for priority based rps selection. + */ +void nss_rps_pri_map_init_handler(void) +{ + int i; + + /* + Initialize the mapping table with the default values. + */ + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + nss_rps_pri_map[i] = NSS_N2H_RPS_PRI_DEFAULT; + } + +} + +/* + * nss_rps_register_sysctl() + */ +void nss_rps_register_sysctl(void) +{ + + /* + * rps sema init. + */ + sema_init(&nss_rps_cfg_pvt.sem, 1); + init_completion(&nss_rps_cfg_pvt.complete); + + nss_rps_pri_map_init_handler(); + + /* + * Register sysctl table. + */ + nss_rps_header = register_sysctl_table(nss_rps_root); +} + +/* + * nss_rps_unregister_sysctl() + * Unregister sysctl specific to rps + */ +void nss_rps_unregister_sysctl(void) +{ + /* + * Unregister sysctl table. + */ + if (nss_rps_header) { + unregister_sysctl_table(nss_rps_header); + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_shaper.c b/feeds/ipq807x/qca-nss-drv/src/nss_shaper.c new file mode 100644 index 000000000..2726b8ba5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_shaper.c @@ -0,0 +1,367 @@ +/* + ************************************************************************** + * Copyright (c) 2014, 2016-2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" + +/* + * nss_shaper_register_shaping() + * Register to obtain an NSS context for basic shaping operations + */ +void *nss_shaper_register_shaping(void) +{ + if (nss_top_main.shaping_handler_id == (uint8_t)-1) { + nss_warning("%px: SHAPING IS NOT ENABLED", __func__); + return NULL; + } + return (void *)&nss_top_main.nss[nss_top_main.shaping_handler_id]; +} + +/* + * nss_shaper_unregister_shaping() + * Unregister an NSS shaping context + */ +void nss_shaper_unregister_shaping(void *nss_ctx) +{ +} + +/* + * nss_shaper_register_shaper_bounce_interface() + * Register for performing shaper bounce operations for interface shaper + */ +void *nss_shaper_register_shaper_bounce_interface(uint32_t if_num, nss_shaper_bounced_callback_t cb, void *app_data, struct module *owner) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_shaper_bounce_registrant *reg; + + nss_info("Shaper bounce interface register: %u, cb: %px, app_data: %px, owner: %px", + if_num, cb, app_data, owner); + + /* + * Must be valid interface number + */ + if (if_num >= NSS_MAX_NET_INTERFACES) { + nss_warning("Invalid if_num: %u", if_num); + BUG_ON(false); + } + + /* + * Shaping enabled? + */ + if (nss_top_main.shaping_handler_id == (uint8_t)-1) { + nss_warning("%px: SHAPING IS NOT ENABLED", __func__); + return NULL; + } + + /* + * Can we hold the module? + */ + if (!try_module_get(owner)) { + nss_warning("%px: Unable to hold owner", __func__); + return NULL; + } + + spin_lock_bh(&nss_top->lock); + + /* + * Must not have existing registrant + */ + reg = &nss_top->bounce_interface_registrants[if_num]; + if (reg->registered) { + spin_unlock_bh(&nss_top->stats_lock); + module_put(owner); + nss_warning("Already registered: %u", if_num); + BUG_ON(false); + } + + /* + * Register + */ + reg->bounced_callback = cb; + reg->app_data = app_data; + reg->owner = owner; + reg->registered = true; + spin_unlock_bh(&nss_top->lock); + + return (void *)&nss_top->nss[nss_top->shaping_handler_id]; +} + +/* + * nss_shaper_unregister_shaper_bounce_interface() + * Unregister for shaper bounce operations for interface shaper + */ +void nss_shaper_unregister_shaper_bounce_interface(uint32_t if_num) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_shaper_bounce_registrant *reg; + struct module *owner; + + nss_info("Shaper bounce interface unregister: %u", if_num); + + /* + * Must be valid interface number + */ + if (if_num >= NSS_MAX_NET_INTERFACES) { + nss_warning("Invalid if_num: %u", if_num); + BUG_ON(false); + } + + spin_lock_bh(&nss_top->lock); + + /* + * Must have existing registrant + */ + reg = &nss_top->bounce_interface_registrants[if_num]; + if (!reg->registered) { + spin_unlock_bh(&nss_top->stats_lock); + nss_warning("Already unregistered: %u", if_num); + BUG_ON(false); + } + + /* + * Unegister + */ + owner = reg->owner; + reg->owner = NULL; + reg->registered = false; + spin_unlock_bh(&nss_top->lock); + + module_put(owner); +} + +/* + * nss_shaper_register_shaper_bounce_bridge() + * Register for performing shaper bounce operations for bridge shaper + */ +void *nss_shaper_register_shaper_bounce_bridge(uint32_t if_num, nss_shaper_bounced_callback_t cb, void *app_data, struct module *owner) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_ctx_instance *nss_ctx; + struct nss_shaper_bounce_registrant *reg; + + nss_info("Shaper bounce bridge register: %u, cb: %px, app_data: %px, owner: %px", + if_num, cb, app_data, owner); + + /* + * Must be valid interface number + */ + if (if_num >= NSS_MAX_NET_INTERFACES) { + nss_warning("Invalid if_num: %u", if_num); + BUG_ON(false); + } + + /* + * Shaping enabled? + */ + if (nss_top_main.shaping_handler_id == (uint8_t)-1) { + nss_warning("%px: SHAPING IS NOT ENABLED", __func__); + return NULL; + } + + /* + * Can we hold the module? + */ + if (!try_module_get(owner)) { + nss_warning("%px: Unable to hold owner", __func__); + return NULL; + } + + spin_lock_bh(&nss_top->lock); + + /* + * Must not have existing registrant + */ + reg = &nss_top->bounce_bridge_registrants[if_num]; + if (reg->registered) { + spin_unlock_bh(&nss_top->stats_lock); + module_put(owner); + nss_warning("Already registered: %u", if_num); + BUG_ON(false); + } + + /* + * Register + */ + reg->bounced_callback = cb; + reg->app_data = app_data; + reg->owner = owner; + reg->registered = true; + spin_unlock_bh(&nss_top->lock); + + nss_ctx = &nss_top->nss[nss_top->shaping_handler_id]; + return (void *)nss_ctx; +} + +/* + * nss_shaper_unregister_shaper_bounce_bridge() + * Unregister for shaper bounce operations for bridge shaper + */ +void nss_shaper_unregister_shaper_bounce_bridge(uint32_t if_num) +{ + struct nss_top_instance *nss_top = &nss_top_main; + struct nss_shaper_bounce_registrant *reg; + struct module *owner; + + nss_info("Shaper bounce bridge unregister: %u", if_num); + + /* + * Must be valid interface number + */ + if (if_num >= NSS_MAX_NET_INTERFACES) { + nss_warning("Invalid if_num: %u", if_num); + BUG_ON(false); + } + + spin_lock_bh(&nss_top->lock); + + /* + * Must have existing registrant + */ + reg = &nss_top->bounce_bridge_registrants[if_num]; + if (!reg->registered) { + spin_unlock_bh(&nss_top->stats_lock); + nss_warning("Already unregistered: %u", if_num); + BUG_ON(false); + } + + /* + * Wait until any bounce callback that is active is finished + */ + while (reg->callback_active) { + spin_unlock_bh(&nss_top->stats_lock); + yield(); + spin_lock_bh(&nss_top->stats_lock); + } + + /* + * Unegister + */ + owner = reg->owner; + reg->owner = NULL; + reg->registered = false; + spin_unlock_bh(&nss_top->lock); + + module_put(owner); +} + +/* + * nss_shaper_bounce_interface_packet() + * Bounce a packet to the NSS for interface shaping. + * + * You must have registered for interface bounce shaping to call this. + */ +nss_tx_status_t nss_shaper_bounce_interface_packet(void *ctx, uint32_t if_num, struct sk_buff *skb) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_shaper_bounce_registrant *reg; + int32_t status; + + /* + * Must be valid interface number + */ + if (if_num >= NSS_MAX_NET_INTERFACES) { + nss_warning("Invalid if_num: %u", if_num); + BUG_ON(false); + } + + /* + * Must have existing registrant + */ + spin_lock_bh(&nss_top->lock); + reg = &nss_top->bounce_interface_registrants[if_num]; + if (!reg->registered) { + spin_unlock_bh(&nss_top->stats_lock); + nss_warning("unregistered: %u", if_num); + return NSS_TX_FAILURE; + } + spin_unlock_bh(&nss_top->lock); + + status = nss_core_send_buffer(nss_ctx, if_num, skb, NSS_IF_H2N_DATA_QUEUE, + H2N_BUFFER_SHAPER_BOUNCE_INTERFACE, 0); + if (status != NSS_CORE_STATUS_SUCCESS) { + return NSS_TX_FAILURE; + } + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE); + + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_PACKET]); + return NSS_TX_SUCCESS; +} + +/* + * nss_shaper_bounce_bridge_packet() + * Bounce a packet to the NSS for bridge shaping. + * + * You must have registered for bridge bounce shaping to call this. + */ +nss_tx_status_t nss_shaper_bounce_bridge_packet(void *ctx, uint32_t if_num, struct sk_buff *skb) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)ctx; + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_shaper_bounce_registrant *reg; + int32_t status; + + /* + * Must be valid interface number + */ + if (if_num >= NSS_MAX_NET_INTERFACES) { + nss_warning("Invalid if_num: %u", if_num); + BUG_ON(false); + } + + /* + * Must have existing registrant + */ + spin_lock_bh(&nss_top->lock); + reg = &nss_top->bounce_bridge_registrants[if_num]; + if (!reg->registered) { + spin_unlock_bh(&nss_top->stats_lock); + nss_warning("unregistered: %u", if_num); + return NSS_TX_FAILURE; + } + spin_unlock_bh(&nss_top->lock); + + nss_info("%s: Bridge bounce skb: %px, if_num: %u, ctx: %px", __func__, skb, if_num, nss_ctx); + status = nss_core_send_buffer(nss_ctx, if_num, skb, NSS_IF_H2N_DATA_QUEUE, + H2N_BUFFER_SHAPER_BOUNCE_BRIDGE, 0); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_info("%s: Bridge bounce core send rejected", __func__); + return NSS_TX_FAILURE; + } + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE); + + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_PACKET]); + return NSS_TX_SUCCESS; +} + +/* + * nss_shaper_get_device() + * Gets the original device from probe. + */ +struct device *nss_shaper_get_dev(void) +{ + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[nss_top_main.shaping_handler_id]; + return nss_ctx->dev; +} + +EXPORT_SYMBOL(nss_shaper_bounce_bridge_packet); +EXPORT_SYMBOL(nss_shaper_bounce_interface_packet); +EXPORT_SYMBOL(nss_shaper_unregister_shaper_bounce_interface); +EXPORT_SYMBOL(nss_shaper_register_shaper_bounce_interface); +EXPORT_SYMBOL(nss_shaper_unregister_shaper_bounce_bridge); +EXPORT_SYMBOL(nss_shaper_register_shaper_bounce_bridge); +EXPORT_SYMBOL(nss_shaper_register_shaping); +EXPORT_SYMBOL(nss_shaper_unregister_shaping); +EXPORT_SYMBOL(nss_shaper_get_dev); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_sjack.c b/feeds/ipq807x/qca-nss-drv/src/nss_sjack.c new file mode 100644 index 000000000..086eedfea --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_sjack.c @@ -0,0 +1,189 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_sjack_stats.h" +#include "nss_sjack_log.h" + +/* + * nss_sjack_handler() + * Handle NSS -> HLOS messages for sjack + */ +static void nss_sjack_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, + __attribute__((unused))void *app_data) +{ + void *ctx; + nss_sjack_msg_callback_t cb; + struct nss_sjack_msg *nsm = (struct nss_sjack_msg *)ncm; + + BUG_ON(ncm->interface != NSS_SJACK_INTERFACE); + + /* + * Trace Messages + */ + nss_sjack_log_rx_msg(nsm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_SJACK_MAX_MSG_TYPE) { + nss_warning("%px: received invalid message %d for sjack interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_sjack_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Update the callback and app_data for NOTIFY messages, sjack sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + switch (ncm->type) { + case NSS_SJACK_STATS_SYNC_MSG: + /* + * Update sjack statistics on node sync. + */ + nss_sjack_stats_node_sync(nss_ctx, &nsm->msg.stats_sync); + break; + } + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_sjack_msg_callback_t)ncm->cb; + ctx = nss_ctx->subsys_dp_register[ncm->interface].ndev; + + cb(ctx, ncm); +} + +/* + * nss_sjack_tx_msg() + * Transmit a sjack message to NSSFW + */ +nss_tx_status_t nss_sjack_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_sjack_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_sjack_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (ncm->interface != NSS_SJACK_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_SJACK_MAX_MSG_TYPE) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + * nss_sjack_register_if() + */ +struct nss_ctx_instance *nss_sjack_register_if(uint32_t if_num, struct net_device *netdev, + nss_sjack_msg_callback_t event_callback) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.sjack_handler_id]; + uint32_t status; + + nss_assert(nss_ctx); + nss_assert(if_num == NSS_SJACK_INTERFACE); + + nss_core_register_subsys_dp(nss_ctx, if_num, NULL, NULL, NULL, netdev, 0); + + status = nss_core_register_msg_handler(nss_ctx, NSS_SJACK_INTERFACE, event_callback); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to register handler for interface %d with NSS core\n", nss_ctx, if_num); + return NULL; + } + + return nss_ctx; +} + +/* + * nss_sjack_unregister_if() + */ +void nss_sjack_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.sjack_handler_id]; + uint32_t status; + + nss_assert(nss_ctx); + nss_assert(if_num == NSS_SJACK_INTERFACE); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for interface %d with NSS core\n", nss_ctx, if_num); + return; + } + + return; +} + +/* + * nss_sjack_get_context() + * get NSS context instance for sjack + */ +struct nss_ctx_instance *nss_sjack_get_context(void) +{ + return &nss_top_main.nss[nss_top_main.sjack_handler_id]; +} +EXPORT_SYMBOL(nss_sjack_get_context); + +/* + * nss_sjack_register_handler() + * Registering handler for sending msg to sjack node on NSS. + */ +void nss_sjack_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_sjack_get_context(); + + nss_core_register_handler(nss_ctx, NSS_SJACK_INTERFACE, nss_sjack_handler, NULL); + + nss_sjack_stats_dentry_create(); +} + +EXPORT_SYMBOL(nss_sjack_register_if); +EXPORT_SYMBOL(nss_sjack_unregister_if); +EXPORT_SYMBOL(nss_sjack_tx_msg); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_sjack_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_sjack_log.c new file mode 100644 index 000000000..c585b7483 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_sjack_log.c @@ -0,0 +1,133 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_sjack_log.c + * NSS SJACK logger file. + */ + +#include "nss_core.h" + +/* + * nss_sjack_log_message_types_str + * NSS SJACK message strings + */ +static int8_t *nss_sjack_log_message_types_str[NSS_SJACK_MAX_MSG_TYPE] __maybe_unused = { + "SJACK Configure", + "SJACK Unconfigure", + "SJACK Stats", +}; + +/* + * nss_sjack_log_configure_msg() + * Log NSS SJACK Configure. + */ +static void nss_sjack_log_configure_msg(struct nss_sjack_msg *nsm) +{ + struct nss_sjack_configure_msg *nscm __maybe_unused = &nsm->msg.configure; + nss_trace("%px: NSS SJACK Configure message \n" + "SJACK Ingress Interface Number: %d\n" + "SJACK Engress Interface Number: %d\n" + "SJACK Tunnel ID: %d\n" + "SJACK DSCP Value: %d\n" + "SJACK GRE Priority: %d\n" + "SJACK GRE Flags: %d\n" + "SJACK IPSEC SA Pattern Flag: %d\n", + nscm, nscm->ingress_if_num, + nscm->egress_if_num, nscm->tunnel_id, + nscm->ip_dscp, nscm->gre_prio, + nscm->gre_flags, nscm->use_ipsec_sa_pattern); +} + +/* + * nss_sjack_log_unconfigure_msg() + * Log NSS SJACK Unconfigure. + */ +static void nss_sjack_log_unconfigure_msg(struct nss_sjack_msg *nsm) +{ + struct nss_sjack_unconfigure_msg *nsum __maybe_unused = &nsm->msg.unconfigure; + nss_trace("%px: NSS SJACK UnConfigure message \n" + "SJACK Ingress Interface Number: %d\n", + nsum, nsum->ingress_if_num); +} + +/* + * nss_sjack_log_verbose() + * Log message contents. + */ +static void nss_sjack_log_verbose(struct nss_sjack_msg *nsm) +{ + switch (nsm->cm.type) { + case NSS_SJACK_CONFIGURE_MSG: + nss_sjack_log_configure_msg(nsm); + break; + + case NSS_SJACK_UNCONFIGURE_MSG: + nss_sjack_log_unconfigure_msg(nsm); + break; + + case NSS_SJACK_STATS_SYNC_MSG: + /* + * No log for valid stats message. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", nsm); + break; + } +} + +/* + * nss_sjack_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_sjack_log_tx_msg(struct nss_sjack_msg *nsm) +{ + if (nsm->cm.type >= NSS_SJACK_MAX_MSG_TYPE) { + nss_warning("%px: Invalid message type\n", nsm); + return; + } + + nss_info("%px: type[%d]:%s\n", nsm, nsm->cm.type, nss_sjack_log_message_types_str[nsm->cm.type]); + nss_sjack_log_verbose(nsm); +} + +/* + * nss_sjack_log_rx_msg() + * Log messages received from FW. + */ +void nss_sjack_log_rx_msg(struct nss_sjack_msg *nsm) +{ + if (nsm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nsm); + return; + } + + if (nsm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nsm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nsm, nsm->cm.type, + nss_sjack_log_message_types_str[nsm->cm.type], + nsm->cm.response, nss_cmn_response_str[nsm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + nsm, nsm->cm.type, nss_sjack_log_message_types_str[nsm->cm.type], + nsm->cm.response, nss_cmn_response_str[nsm->cm.response]); + +verbose: + nss_sjack_log_verbose(nsm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_sjack_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_sjack_log.h new file mode 100644 index 000000000..56435a444 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_sjack_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_SJACK_LOG_H +#define __NSS_SJACK_LOG_H + +/* + * nss_sjack.h + * NSS SJACK header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_sjack_log_tx_msg + * Logs a sjack message that is sent to the NSS firmware. + */ +void nss_sjack_log_tx_msg(struct nss_sjack_msg *nsm); + +/* + * nss_sjack_log_rx_msg + * Logs a sjack message that is received from the NSS firmware. + */ +void nss_sjack_log_rx_msg(struct nss_sjack_msg *nsm); + +#endif /* __NSS_SJACK_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_sjack_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_sjack_stats.c new file mode 100644 index 000000000..139a34941 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_sjack_stats.c @@ -0,0 +1,94 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_sjack_stats.h" + +/* + * nss_sjack_stats_read() + * Read SJACK stats + */ +static ssize_t nss_sjack_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * max output lines = #stats + start tag line + end tag line + three blank lines + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + 5; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "sjack", NSS_STATS_SINGLE_CORE); + size_wr += nss_stats_fill_common_stats(NSS_SJACK_INTERFACE, NSS_STATS_SINGLE_INSTANCE, lbuf, size_wr, size_al, "sjack"); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_sjack_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(sjack) + +/* + * nss_sjack_stats_dentry_create() + * Create SJACK node statistics debug entry. + */ +void nss_sjack_stats_dentry_create(void) +{ + nss_stats_create_dentry("sjack", &nss_sjack_stats_ops); +} + +/* + * nss_sjack_stats_node_sync() + * Update sjack node stats. + */ +void nss_sjack_stats_node_sync(struct nss_ctx_instance *nss_ctx, struct nss_sjack_stats_sync_msg *nins) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + int j; + + /* + * Update SJACK node stats. + */ + spin_lock_bh(&nss_top->stats_lock); + nss_top->stats_node[NSS_SJACK_INTERFACE][NSS_SJACK_STATS_RX_PKTS] += nins->node_stats.rx_packets; + nss_top->stats_node[NSS_SJACK_INTERFACE][NSS_SJACK_STATS_RX_BYTES] += nins->node_stats.rx_bytes; + nss_top->stats_node[NSS_SJACK_INTERFACE][NSS_SJACK_STATS_TX_PKTS] += nins->node_stats.tx_packets; + nss_top->stats_node[NSS_SJACK_INTERFACE][NSS_SJACK_STATS_TX_BYTES] += nins->node_stats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_top->stats_node[NSS_SJACK_INTERFACE][NSS_SJACK_STATS_RX_QUEUE_0_DROPPED + j] += nins->node_stats.rx_dropped[j]; + } + + spin_unlock_bh(&nss_top->stats_lock); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_sjack_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_sjack_stats.h new file mode 100644 index 000000000..cbcd60453 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_sjack_stats.h @@ -0,0 +1,45 @@ +/* + ****************************************************************************** + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_SJACK_STATS_H +#define __NSS_SJACK_STATS_H + +/* + * SJACK statistics + */ +enum nss_sjack_stats_types { + NSS_SJACK_STATS_RX_PKTS, /* sjack node RX packets */ + NSS_SJACK_STATS_RX_BYTES, /* sjack node RX bytes */ + NSS_SJACK_STATS_TX_PKTS, /* sjack node TX packets */ + NSS_SJACK_STATS_TX_BYTES, /* sjack node TX bytes */ + NSS_SJACK_STATS_RX_QUEUE_0_DROPPED, + /* sjack node RX Queue 0 dropped */ + NSS_SJACK_STATS_RX_QUEUE_1_DROPPED, + /* sjack node RX Queue 1 dropped */ + NSS_SJACK_STATS_RX_QUEUE_2_DROPPED, + /* sjack node RX Queue 2 dropped */ + NSS_SJACK_STATS_RX_QUEUE_3_DROPPED, + /* sjack node RX Queue 3 dropped */ + NSS_SJACK_STATS_MAX, +}; + +/* + * SJACK statistics APIs + */ +extern void nss_sjack_stats_node_sync(struct nss_ctx_instance *nss_ctx, struct nss_sjack_stats_sync_msg *nins); +extern void nss_sjack_stats_dentry_create(void); + +#endif /* __NSS_SJACK_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_stats.c new file mode 100644 index 000000000..9605c372b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_stats.c @@ -0,0 +1,481 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_drv_stats.h" + +/* + * Maximum banner length: + */ +#define NSS_STATS_BANNER_MAX_LENGTH 80 + +/* + * Maximum number of digits a stats value can have: + */ +#define NSS_STATS_DIGITS_MAX 16 + +/* + * Spaces to print core details inside banner + */ +#define NSS_STATS_BANNER_SPACES 12 + +/* + * Max characters for a node name. + */ +#define NSS_STATS_NODE_NAME_MAX 24 + +int nonzero_stats_print = 0; + +/* + * nss_stats_spacing() + * Framework to maintain consistent spacing between stats value and stats type. + */ +static size_t nss_stats_spacing(uint64_t stats_val, char *lbuf, size_t size_wr, size_t size_al) +{ + int i; + int digit_counter = (stats_val == 0 ? 1 : 0); + while (stats_val != 0) { + /* + * TODO: need to check for (nss_ptr_t) + */ + stats_val = (nss_ptr_t)stats_val / 10; + digit_counter++; + } + + for (i = 0; i < NSS_STATS_DIGITS_MAX - digit_counter; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, " "); + } + + return size_wr; +} + +/* + * nss_stats_nonzero_handler() + * Handler to take nonzero stats print configuration. + */ +static int nss_stats_nonzero_handler(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int ret; + ret = proc_dointvec(ctl, write, buffer, lenp, ppos); + return ret; +} + +static struct ctl_table nss_stats_table[] = { + { + .procname = "non_zero_stats", + .data = &nonzero_stats_print, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = &nss_stats_nonzero_handler, + }, + { } +}; + +static struct ctl_table nss_stats_dir[] = { + { + .procname = "stats", + .mode = 0555, + .child = nss_stats_table, + }, + { } +}; + +static struct ctl_table nss_stats_root_dir[] = { + { + .procname = "nss", + .mode = 0555, + .child = nss_stats_dir, + }, + { } +}; + +static struct ctl_table nss_stats_root[] = { + { + .procname = "dev", + .mode = 0555, + .child = nss_stats_root_dir, + }, + { } +}; +static struct ctl_table_header *nss_stats_header; + +/* + * nss_stats_register_sysctl() + * Register a sysctl table for stats. + */ +void nss_stats_register_sysctl(void) +{ + /* + * Register sysctl table. + */ + nss_stats_header = register_sysctl_table(nss_stats_root); +} + +/* + * nss_stats_open() + * Opens stats file. + */ +int nss_stats_open(struct inode *inode, struct file *filp) +{ + struct nss_stats_data *data = NULL; + + data = kzalloc(sizeof(struct nss_stats_data), GFP_KERNEL); + if (!data) { + return -ENOMEM; + } + + memset(data, 0, sizeof (struct nss_stats_data)); + data->if_num = NSS_DYNAMIC_IF_START; + data->index = 0; + data->edma_id = (nss_ptr_t)inode->i_private; + data->nss_ctx = (struct nss_ctx_instance *)(inode->i_private); + filp->private_data = data; + + return 0; +} + +/* + * nss_stats_release() + * Releases stats file. + */ +int nss_stats_release(struct inode *inode, struct file *filp) +{ + struct nss_stats_data *data = filp->private_data; + + if (data) { + kfree(data); + } + + return 0; +} + +/* + * nss_stats_clean() + * Cleanup NSS statistics files. + */ +void nss_stats_clean(void) +{ + /* + * Remove debugfs tree + */ + if (likely(nss_top_main.top_dentry != NULL)) { + debugfs_remove_recursive(nss_top_main.top_dentry); + nss_top_main.top_dentry = NULL; + } +} + +/* + * nss_stats_reset_common_stats() + * Reset common node statistics. + */ +void nss_stats_reset_common_stats(uint32_t if_num) +{ + if (unlikely(if_num >= NSS_MAX_NET_INTERFACES)) { + return; + } + + spin_lock_bh(&nss_top_main.stats_lock); + memset(nss_top_main.stats_node[if_num], 0, NSS_STATS_NODE_MAX * sizeof(uint64_t)); + spin_unlock_bh(&nss_top_main.stats_lock); +} + +/* + * nss_stats_fill_common_stats() + * Fill common node statistics. + */ +size_t nss_stats_fill_common_stats(uint32_t if_num, int instance, char *lbuf, size_t size_wr, size_t size_al, char *node) +{ + uint64_t stats_val[NSS_STATS_NODE_MAX]; + int i; + size_t orig_size_wr = size_wr; + + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; i < NSS_STATS_NODE_MAX; i++) { + stats_val[i] = nss_top_main.stats_node[if_num][i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print(node, NULL, instance, nss_strings_stats_node, stats_val, NSS_STATS_NODE_MAX, lbuf, size_wr, size_al); + return size_wr - orig_size_wr; +} + +/* + * nss_stats_banner() + * Printing banner for node. + */ +size_t nss_stats_banner(char *lbuf, size_t size_wr, size_t size_al, char *node, int core) +{ + uint16_t banner_char_length, i; + size_t orig_size_wr = size_wr; + char node_upr[NSS_STATS_NODE_NAME_MAX + 1]; + + if (strlen(node) > NSS_STATS_NODE_NAME_MAX) { + nss_warning("Node name %s larger than %d characters\n", node, NSS_STATS_NODE_NAME_MAX); + return 0; + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + for (i = 0; i < NSS_STATS_BANNER_MAX_LENGTH ; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "_"); + } + if (core > NSS_STATS_SINGLE_CORE) { + banner_char_length = (uint16_t)((NSS_STATS_BANNER_MAX_LENGTH - (strlen(node) + NSS_STATS_BANNER_SPACES)) / 2); + } else { + banner_char_length = (uint16_t)((NSS_STATS_BANNER_MAX_LENGTH - (strlen(node) + 2)) / 2); + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n\n"); + for (i = 0; i < banner_char_length; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "<"); + } + + strlcpy(node_upr, node, NSS_STATS_NODE_NAME_MAX); + for (i = 0; node_upr[i] != '\0' && i < NSS_STATS_NODE_NAME_MAX; i++) { + node_upr[i] = toupper(node_upr[i]); + } + + /* + * TODO: Enhance so that both core0 and core1 print the same way for a + * node that has presence in both cores. i.e. Core0 should have [CORE 0] + * and not just Core1. + */ + if (core > 1) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, " %s [CORE %d] ", node_upr, core); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, " %s ", node_upr); + } + for (i = 0; i < banner_char_length; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, ">"); + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + for (i = 0; i < NSS_STATS_BANNER_MAX_LENGTH; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "_"); + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n\n"); + return size_wr - orig_size_wr; +} + +/* + * nss_stats_print() + * Helper API to print stats. + */ +size_t nss_stats_print(char *node, char *stat_details, int instance, struct nss_stats_info *stats_info, + uint64_t *stats_val, uint16_t max, char *lbuf, size_t size_wr, size_t size_al) +{ + uint16_t i, j; + uint16_t maxlen = 0; + char stats_string[NSS_STATS_MAX_STR_LENGTH]; + size_t orig_size_wr = size_wr; + char node_lwr[NSS_STATS_NODE_NAME_MAX + 1]; + + if (strlen(node) > NSS_STATS_NODE_NAME_MAX) { + nss_warning("Node name %s (%u chars) is longer than max chars of %d\n", + node, (uint32_t)strlen(node), NSS_STATS_NODE_NAME_MAX); + return 0; + } + + /* + * Calculating the maximum of the array for indentation purposes. + */ + for (i = 0; i < max; i++){ + if (strlen(stats_info[i].stats_name) > maxlen) { + maxlen = strlen(stats_info[i].stats_name); + } + } + + if (stat_details != NULL) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n#%s\n\n", stat_details); + } + + for (i = 0; i < max; i++){ + if (nonzero_stats_print == 1 && stats_val[i] == 0) { + continue; + } + + strlcpy(stats_string, stats_info[i].stats_name, NSS_STATS_MAX_STR_LENGTH); + + /* + * Converting uppercase to lower case. + */ + for (j = 0; stats_string[j] != '\0' && j < NSS_STATS_MAX_STR_LENGTH; j++) { + stats_string[j] = tolower(stats_string[j]); + } + + strlcpy(node_lwr, node, NSS_STATS_NODE_NAME_MAX); + for (j = 0; node_lwr[j] != '\0' && j < NSS_STATS_NODE_NAME_MAX; j++) { + node_lwr[j] = tolower(node_lwr[j]); + } + + /* + * Space before %s is needed to avoid printing stat name from start of the line. + */ + if (instance < 0) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\t%s_%s", node_lwr, stats_string); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\t%s[%d]_%s", node_lwr, instance, stats_string); + } + + for (j = 0; j < (1 + maxlen - strlen(stats_string)); j++){ + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, " "); + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "= %llu", stats_val[i]); + size_wr = nss_stats_spacing(stats_val[i], lbuf, size_wr, size_al); + + /* + * Switch case will take care of the indentation and spacing details. + */ + switch (stats_info[i].stats_type) { + case NSS_STATS_TYPE_COMMON: + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "common\n"); + break; + + case NSS_STATS_TYPE_SPECIAL: + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "special\n"); + break; + + case NSS_STATS_TYPE_DROP: + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "drop\n"); + break; + + case NSS_STATS_TYPE_ERROR: + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "error\n"); + break; + + case NSS_STATS_TYPE_EXCEPTION: + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "exception\n"); + break; + + default: + nss_warning("unknown statistics type"); + break; + } + } + + return size_wr - orig_size_wr; +} + +/* + * nss_stats_create_dentry() + * Create statistics debug entry for subsystem. + */ +void nss_stats_create_dentry(char *name, const struct file_operations *ops) +{ + if (!debugfs_create_file(name, 0400, nss_top_main.stats_dentry, &nss_top_main, ops)) { + nss_warning("Failed to create debug entry for subsystem %s\n", name); + } +} + +/* + * TODO: Move the rest of the code to (nss_wt_stats.c, nss_gmac_stats.c) accordingly. + */ + +/* + * gmac_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(gmac); + +/* + * wt_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(wt); + +/* + * nss_stats_init() + * Enable NSS statistics. + */ +void nss_stats_init(void) +{ + struct dentry *core_dentry = NULL; + struct dentry *wt_dentry = NULL; + char file_name[10]; + int i; + + /* + * NSS driver entry + */ + nss_top_main.top_dentry = debugfs_create_dir("qca-nss-drv", NULL); + if (unlikely(nss_top_main.top_dentry == NULL)) { + nss_warning("Failed to create qca-nss-drv directory in debugfs"); + + /* + * Non availability of debugfs directory is not a catastrophy. + * We can still go ahead with other initialization. + */ + return; + } + + nss_top_main.stats_dentry = debugfs_create_dir("stats", nss_top_main.top_dentry); + if (unlikely(nss_top_main.stats_dentry == NULL)) { + nss_warning("Failed to create qca-nss-drv directory in debugfs"); + + /* + * Non availability of debugfs directory is not a catastrophy. + * We can still go ahead with rest of initialization. + */ + return; + } + + /* + * Create files to obtain statistics. + */ + + /* + * drv_stats + */ + nss_drv_stats_dentry_create(); + + /* + * gmac_stats + */ + nss_stats_create_dentry("gmac", &nss_gmac_stats_ops); + + /* + * Per-project stats + */ + nss_top_main.project_dentry = debugfs_create_dir("project", + nss_top_main.stats_dentry); + if (unlikely(nss_top_main.project_dentry == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/project directory in debugfs"); + return; + } + + for (i = 0; i < nss_top_main.num_nss; ++i) { + memset(file_name, 0, sizeof(file_name)); + scnprintf(file_name, sizeof(file_name), "core%d", i); + core_dentry = debugfs_create_dir(file_name, + nss_top_main.project_dentry); + if (unlikely(core_dentry == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/project/core%d directory in debugfs", i); + return; + } + + wt_dentry = debugfs_create_file("worker_threads", + 0400, + core_dentry, + &(nss_top_main.nss[i]), + &nss_wt_stats_ops); + if (unlikely(wt_dentry == NULL)) { + nss_warning("Failed to create qca-nss-drv/stats/project/core%d/worker_threads file in debugfs", i); + return; + } + } + + nss_log_init(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_stats.h new file mode 100644 index 000000000..385c71aad --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_stats.h @@ -0,0 +1,76 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_stats.h + * printing stats header file + */ + +#ifndef __NSS_STATS_PRINT_H +#define __NSS_STATS_PRINT_H +#include +#include +#include +#include + +/* + * Defines to be used by single instance/core packages. +*/ +#define NSS_STATS_SINGLE_CORE -1 +#define NSS_STATS_SINGLE_INSTANCE -1 + +/* + * Number of Extra outputlines for future reference to add new stats + start tag line + end tag line + three blank lines + */ +#define NSS_STATS_EXTRA_OUTPUT_LINES 35 + +#define NSS_STATS_DECLARE_FILE_OPERATIONS(name) \ +static const struct file_operations nss_##name##_stats_ops = { \ + .open = nss_stats_open, \ + .read = nss_##name##_stats_read, \ + .llseek = generic_file_llseek, \ + .release = nss_stats_release, \ +}; + +/* + * Private data for every file descriptor + */ +struct nss_stats_data { + uint32_t if_num; /**< Interface number for stats */ + uint32_t index; /**< Index for GRE_REDIR stats */ + uint32_t edma_id; /**< EDMA port ID or ring ID */ + struct nss_ctx_instance *nss_ctx; + /**< The core for project stats */ +}; + +/* + * Structure definition carrying stats info. + */ +struct nss_stats_info { + char stats_name[NSS_STATS_MAX_STR_LENGTH]; /* stat name */ + enum nss_stats_types stats_type; /* enum that tags stat type */ +}; + +extern void nss_stats_register_sysctl(void); +void nss_stats_init(void); +extern int nss_stats_release(struct inode *inode, struct file *filp); +extern int nss_stats_open(struct inode *inode, struct file *filp); +void nss_stats_create_dentry(char *name, const struct file_operations *ops); +extern void nss_stats_reset_common_stats(uint32_t if_num); +extern size_t nss_stats_fill_common_stats(uint32_t if_num, int instance, char *lbuf, size_t size_wr, size_t size_al, char *node); +extern size_t nss_stats_banner(char *lbuf , size_t size_wr, size_t size_al, char *node, int core); +extern size_t nss_stats_print(char *node, char *stat_details, int instance, struct nss_stats_info *stats_info, uint64_t *stats_val, uint16_t max, char *lbuf, size_t size_wr, size_t size_al); +#endif /* __NSS_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_strings.c new file mode 100644 index 000000000..432b1546b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_strings.c @@ -0,0 +1,148 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_strings.c + * NSS driver strings APIs. + */ + +#include "nss_strings.h" +#include "nss_core.h" +#include "nss_drv_strings.h" + +/* + * common stats + */ +struct nss_stats_info nss_strings_stats_node[NSS_STATS_NODE_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"rx_byts" , NSS_STATS_TYPE_COMMON}, + {"tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"tx_byts" , NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops" , NSS_STATS_TYPE_DROP} +}; + +/* + * nss_strings_print() + * Helper API to print stats names + */ +size_t nss_strings_print(char __user *ubuf, size_t sz, loff_t *ppos, struct nss_stats_info *stats_info, uint16_t max) +{ + int32_t i; + size_t size_al = (NSS_STATS_MAX_STR_LENGTH + 12) * max; + size_t size_wr = 0; + ssize_t bytes_read = 0; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (!lbuf) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + for (i = 0; i < max; i++) { + /* + * Print what we have but don't exceed the buffer. + */ + if (size_wr >= size_al) { + nss_info_always("Buffer overflowed.\n"); + break; + } + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "\t%d , %s\n", stats_info[i].stats_type, stats_info[i].stats_name); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + + return bytes_read; +} + +/* + * nss_strings_create_dentry() + * Create strings debug entry for subsystem. + */ +void nss_strings_create_dentry(char *name, const struct file_operations *ops) +{ + if (!nss_top_main.strings_dentry || !debugfs_create_file(name, 0400, nss_top_main.strings_dentry, &nss_top_main, ops)) { + nss_warning("Failed to create debug entry for subsystem %s\n", name); + } +} + +/* + * nss_strings_open() + */ +int nss_strings_open(struct inode *inode, struct file *filp) +{ + struct nss_strings_data *data = NULL; + + data = kzalloc(sizeof(struct nss_strings_data), GFP_KERNEL); + if (!data) { + return -ENOMEM; + } + data->if_num = NSS_DYNAMIC_IF_START; + data->nss_ctx = (struct nss_ctx_instance *)(inode->i_private); + filp->private_data = data; + + return 0; +} + +/* + * nss_strings_release() + */ +int nss_strings_release(struct inode *inode, struct file *filp) +{ + struct nss_strings_data *data = filp->private_data; + + if (data) { + kfree(data); + } + + return 0; +} + +/* + * nss_common_node_stats_strings_read() + * Read common node statistics names. + */ +static ssize_t nss_common_node_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_strings_stats_node, NSS_STATS_NODE_MAX); +} + +/* + * nss_common_node_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(common_node_stats); + +/* + * nss_strings_init() + * Enable NSS statistics + */ +void nss_strings_init(void) +{ + nss_top_main.strings_dentry = debugfs_create_dir("strings", nss_top_main.top_dentry); + if (unlikely(nss_top_main.strings_dentry == NULL)) { + nss_warning("Failed to create strings directory in debugfs/qca-nss-drv"); + return; + } + + /* + * Common node statistics + */ + nss_strings_create_dentry("common_node_stats", &nss_common_node_stats_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_strings.h new file mode 100644 index 000000000..32bf46601 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_strings.h @@ -0,0 +1,52 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_strings.h + * NSS driver strings header file. + */ + +#ifndef __NSS_STRINGS_H +#define __NSS_STRINGS_H + +#include +#include "nss_stats.h" + +#define NSS_STRINGS_DECLARE_FILE_OPERATIONS(name) \ +static const struct file_operations nss_##name##_strings_ops = { \ + .open = nss_strings_open, \ + .read = nss_##name##_strings_read, \ + .llseek = generic_file_llseek, \ + .release = nss_strings_release, \ +} + +/* + * Private data for every file descriptor + */ +struct nss_strings_data { + uint32_t if_num; /**< Interface number for stats */ + struct nss_ctx_instance *nss_ctx; /**< The core for project stats */ +}; + +extern struct nss_stats_info nss_strings_stats_node[NSS_STATS_NODE_MAX]; +void nss_strings_init(void); +int nss_strings_release(struct inode *inode, struct file *filp); +int nss_strings_open(struct inode *inode, struct file *filp); +void nss_strings_create_dentry(char *name, const struct file_operations *ops); +size_t nss_strings_fill_common_stats(char __user *ubuf, size_t sz, loff_t *ppos); +size_t nss_strings_print(char __user *ubuf, size_t sz, loff_t *ppos, struct nss_stats_info *stats_info, uint16_t max); + +#endif /* __NSS_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tls.c b/feeds/ipq807x/qca-nss-drv/src/nss_tls.c new file mode 100644 index 000000000..ff5336333 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tls.c @@ -0,0 +1,475 @@ +/* + ************************************************************************** + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_tls_log.h" +#include "nss_tls_stats.h" +#include "nss_tls_strings.h" + +#define NSS_TLS_INTERFACE_MAX_LONG BITS_TO_LONGS(NSS_MAX_NET_INTERFACES) +#define NSS_TLS_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * Private data structure for handling synchronous messaging. + */ +static struct nss_tls_pvt { + struct semaphore sem; + struct completion complete; + struct nss_tls_msg ntcm; + unsigned long if_map[NSS_TLS_INTERFACE_MAX_LONG]; +} tls_pvt; + +/* + * nss_tls_verify_ifnum() + * Verify if the interface number is a TLS interface. + */ +static bool nss_tls_verify_ifnum(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + enum nss_dynamic_interface_type type = nss_dynamic_interface_get_type(nss_ctx, if_num); + + if (type == NSS_DYNAMIC_INTERFACE_TYPE_TLS_INNER) + return true; + + if (type == NSS_DYNAMIC_INTERFACE_TYPE_TLS_OUTER) + return true; + + if (if_num == NSS_TLS_INTERFACE) + return true; + + return false; +} + +/* + * nss_tls_handler() + * Handle NSS -> HLOS messages for tls tunnel + */ +static void nss_tls_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *data) +{ + nss_tls_msg_callback_t cb; + void *app_data; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_trace("%px: handle event for interface num :%u", nss_ctx, ncm->interface); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_TLS_MSG_MAX) { + nss_warning("%px:Bad message type(%d) for TLS interface %d", nss_ctx, ncm->type, ncm->interface); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_tls_msg)) { + nss_warning("%px:Bad message length(%d)", nss_ctx, ncm->len); + return; + } + + if (ncm->type == NSS_TLS_MSG_TYPE_CTX_SYNC) { + nss_tls_stats_sync(nss_ctx, ncm); + nss_tls_stats_notify(nss_ctx, ncm->interface); + } + + /* + * Update the callback and app_data for NOTIFY messages + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->nss_rx_interface_handlers[ncm->interface].app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Trace messages. + */ + nss_tls_log_rx_msg((struct nss_tls_msg *)ncm); + + /* + * Callback + */ + cb = (nss_tls_msg_callback_t)ncm->cb; + app_data = (void *)ncm->app_data; + + /* + * Call TLS session callback + */ + if (!cb) { + nss_warning("%px: No callback for tls session interface %d", nss_ctx, ncm->interface); + return; + } + + nss_trace("%px: calling tlsmgr event handler(%u)", nss_ctx, ncm->interface); + cb(app_data, ncm); +} + +/* + * nss_tls_sync_resp() + * Callback to handle the completion of HLOS-->NSS messages. + */ +static void nss_tls_sync_resp(void *app_data, struct nss_cmn_msg *ncm) +{ + struct nss_tls_msg *pvt_msg = app_data; + struct nss_tls_msg *resp_msg = container_of(ncm, struct nss_tls_msg, cm); + + /* + * Copy response message to pvt message + */ + memcpy(pvt_msg, resp_msg, sizeof(*resp_msg)); + + /* + * Write memory barrier + */ + smp_wmb(); + + complete(&tls_pvt.complete); +} + +/* + * nss_tls_ifmap_get() + * Return TLS active interfaces map. + */ +unsigned long *nss_tls_ifmap_get(void) +{ + return tls_pvt.if_map; +} + +/* + * nss_tls_tx_buf() + * Transmit buffer over TLS interface + */ +nss_tx_status_t nss_tls_tx_buf(struct sk_buff *skb, uint32_t if_num, struct nss_ctx_instance *nss_ctx) +{ + int32_t status; + + if (!nss_tls_verify_ifnum(nss_ctx, if_num)) + return NSS_TX_FAILURE; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: tx_data packet dropped as core not ready", nss_ctx); + return NSS_TX_FAILURE_NOT_READY; + } + + status = nss_core_send_buffer(nss_ctx, if_num, skb, NSS_IF_H2N_DATA_QUEUE, H2N_BUFFER_PACKET, 0); + switch (status) { + case NSS_CORE_STATUS_SUCCESS: + break; + + case NSS_CORE_STATUS_FAILURE_QUEUE: /* queue full condition */ + nss_warning("%px: H2N queue full for tx_buf", nss_ctx); + return NSS_TX_FAILURE_QUEUE; + + default: + nss_warning("%px: general failure for tx_buf", nss_ctx); + return NSS_TX_FAILURE; + } + + /* + * Kick the NSS awake so it can process our new entry. + */ + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE); + + return NSS_TX_SUCCESS; +} +EXPORT_SYMBOL(nss_tls_tx_buf); + +/* + * nss_tls_tx_msg() + * Transmit a TLS message to NSS firmware + */ +nss_tx_status_t nss_tls_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_tls_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + if (ncm->type >= NSS_TLS_MSG_MAX) { + nss_warning("%px: tls message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + if (!nss_tls_verify_ifnum(nss_ctx, ncm->interface)) { + nss_warning("%px: tls message interface is bad: %u", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + /* + * Trace messages. + */ + nss_tls_log_tx_msg(msg); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_tls_tx_msg); + +/* + * nss_tls_tx_msg_sync() + * Transmit a TLS message to NSS firmware synchronously. + */ +nss_tx_status_t nss_tls_tx_msg_sync(struct nss_ctx_instance *nss_ctx, uint32_t if_num, + enum nss_tls_msg_type type, uint16_t len, + struct nss_tls_msg *ntcm) +{ + struct nss_tls_msg *local_ntcm = &tls_pvt.ntcm; + nss_tx_status_t status; + int ret = 0; + + /* + * Length of the message should be the based on type + */ + if (len > sizeof(struct nss_tls_msg)) { + nss_warning("%px: Invalid message length(%u), type (%d), I/F(%u)\n", nss_ctx, len, type, if_num); + return NSS_TX_FAILURE; + } + + down(&tls_pvt.sem); + + /* + * We need to copy the message content into the actual message + * to be sent to NSS + */ + memset(local_ntcm, 0, sizeof(*local_ntcm)); + + nss_tls_msg_init(local_ntcm, if_num, type, len, nss_tls_sync_resp, local_ntcm); + memcpy(&local_ntcm->msg, &ntcm->msg, len); + + status = nss_tls_tx_msg(nss_ctx, local_ntcm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Failed to send message\n", nss_ctx); + goto done; + } + + ret = wait_for_completion_timeout(&tls_pvt.complete, msecs_to_jiffies(NSS_TLS_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: Failed to receive response, timeout(%d)\n", nss_ctx, ret); + status = NSS_TX_FAILURE_NOT_READY; + goto done; + } + + /* + * Read memory barrier + */ + smp_rmb(); + + if (local_ntcm->cm.response != NSS_CMN_RESPONSE_ACK) { + status = NSS_TX_FAILURE; + ntcm->cm.response = local_ntcm->cm.response; + ntcm->cm.error = local_ntcm->cm.error; + goto done; + } + + /* + * Copy the message received + */ + memcpy(&ntcm->msg, &local_ntcm->msg, len); + +done: + up(&tls_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_tls_tx_msg_sync); + +/* + * nss_tls_notify_register() + * Register a handler for notification from NSS firmware. + */ +struct nss_ctx_instance *nss_tls_notify_register(uint32_t if_num, nss_tls_msg_callback_t ev_cb, void *app_data) +{ + struct nss_ctx_instance *nss_ctx = nss_tls_get_context(); + uint32_t ret; + + BUG_ON(!nss_ctx); + + ret = nss_core_register_handler(nss_ctx, if_num, nss_tls_handler, app_data); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to register event handler for interface(%u)", nss_ctx, if_num); + return NULL; + } + + ret = nss_core_register_msg_handler(nss_ctx, if_num, ev_cb); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: unable to register event handler for interface(%u)", nss_ctx, if_num); + return NULL; + } + + return nss_ctx; +} +EXPORT_SYMBOL(nss_tls_notify_register); + +/* + * nss_tls_notify_unregister() + * Unregister notification callback handler. + */ +void nss_tls_notify_unregister(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_tls_get_context(); + uint32_t ret; + + BUG_ON(!nss_ctx); + + ret = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to un register event handler for interface(%u)", nss_ctx, if_num); + return; + } + + ret = nss_core_unregister_handler(nss_ctx, if_num); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to un register event handler for interface(%u)", nss_ctx, if_num); + return; + } + + return; +} +EXPORT_SYMBOL(nss_tls_notify_unregister); + +/* + * nss_tls_register_if() + * Register data and event callback handlers for dynamic interface. + */ +struct nss_ctx_instance *nss_tls_register_if(uint32_t if_num, + nss_tls_data_callback_t data_cb, + nss_tls_msg_callback_t ev_cb, + struct net_device *netdev, + uint32_t features, + uint32_t type, + void *app_data) +{ + struct nss_ctx_instance *nss_ctx = nss_tls_get_context(); + uint32_t ret; + + if (!nss_tls_verify_ifnum(nss_ctx, if_num)) { + nss_warning("%px: TLS Interface is not dynamic:%u", nss_ctx, if_num); + return NULL; + } + + if (nss_ctx->subsys_dp_register[if_num].ndev) { + nss_warning("%px: Cannot find free slot for TLS NSS I/F:%u", nss_ctx, if_num); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, data_cb, NULL, app_data, netdev, features); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, type); + + ret = nss_core_register_handler(nss_ctx, if_num, nss_tls_handler, app_data); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to register event handler for interface(%u)", nss_ctx, if_num); + return NULL; + } + + ret = nss_core_register_msg_handler(nss_ctx, if_num, ev_cb); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: unable to register event handler for interface(%u)", nss_ctx, if_num); + return NULL; + } + + /* + * Atomically set the bitmap for the interface number + */ + set_bit(if_num, tls_pvt.if_map); + return nss_ctx; +} +EXPORT_SYMBOL(nss_tls_register_if); + +/* + * nss_tls_unregister_if() + * Unregister data and event callback handlers for the interface. + */ +void nss_tls_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_tls_get_context(); + uint32_t ret; + + if (!nss_ctx->subsys_dp_register[if_num].ndev) { + nss_warning("%px: Cannot find registered netdev for TLS NSS I/F:%u", nss_ctx, if_num); + return; + } + + /* + * Atomically clear the bitmap for the interface number + */ + clear_bit(if_num, tls_pvt.if_map); + + ret = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to un register event handler for interface(%u)", nss_ctx, if_num); + return; + } + + nss_core_unregister_handler(nss_ctx, if_num); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_tls_unregister_if); + +/* + * nss_tls_get_context() + * Return TLS NSS context. + */ +struct nss_ctx_instance *nss_tls_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.tls_handler_id]; +} +EXPORT_SYMBOL(nss_tls_get_context); + +/* + * nss_tls_get_device() + * Gets the original device from probe. + */ +struct device *nss_tls_get_dev(struct nss_ctx_instance *nss_ctx) +{ + return nss_ctx->dev; +} +EXPORT_SYMBOL(nss_tls_get_dev); + +/* + * nss_tls_msg_init() + * Initialize nss_tls msg to be sent asynchronously. + */ +void nss_tls_msg_init(struct nss_tls_msg *ncm, uint32_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_tls_msg_init); + +/* + * nss_tls_msg_sync_init() + * Initialize nss_tls_msg to be sent synchronously. + */ +void nss_tls_msg_sync_init(struct nss_tls_msg *ncm, uint32_t if_num, uint32_t type, uint32_t len) +{ + nss_cmn_msg_sync_init(&ncm->cm, if_num, type, len); +} +EXPORT_SYMBOL(nss_tls_msg_sync_init); + +/* + * nss_tls_register_handler() + * TLS initialization. + */ +void nss_tls_register_handler(void) +{ + sema_init(&tls_pvt.sem, 1); + init_completion(&tls_pvt.complete); + nss_tls_stats_dentry_create(); + nss_tls_strings_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tls_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_tls_log.c new file mode 100644 index 000000000..11afe45ed --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tls_log.c @@ -0,0 +1,167 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE + ************************************************************************** + */ + +/* + * nss_tls_log.c + * NSS TLS logger file. + */ + +#include "nss_core.h" + +/* + * nss_tls_log_message_types_str + * TLS message strings + */ +static int8_t *nss_tls_log_message_types_str[NSS_TLS_MSG_MAX] __maybe_unused = { + "TLS Node Configure", + "TLS Context Configure", + "TLS Context Deconfigure", + "TLS Cipher Update", + "TLS Context Sync", + "TLS Node Sync", +}; + +/* + * nss_tls_log_error_response_types_str + * Strings for error types for TLS messages + */ +static int8_t *nss_tls_log_error_response_types_str[NSS_TLS_ERROR_MAX] __maybe_unused = { + "TLS no_error", + "TLS unknown message", + "TLS fail node already config", + "TLS fail inner ctx", + "TLS fail outer ctx", + "TLS fail req pool", + "TLS invalid block len", + "TLS invalid hash len", + "TLS invalid version", + "TLS invalid context words", + "TLS fail alloc hwctx", + "TLS fail copy ctx", + "TLS Invalid algorithm", + "TLS fail nomem" +}; + +/* + * nss_tls_node_config_msg() + * Log TLS node configure message. + */ +static void nss_tls_node_config_msg(struct nss_tls_msg *ntm) +{ + nss_trace("%px: NSS TLS Node Configure Message:\n" + "TLS Interface: %d\n", ntm, ntm->cm.interface); +} + +/* + * nss_tls_ctx_config_msg() + * Log TLS session configure message. + */ +static void nss_tls_ctx_config_msg(struct nss_tls_msg *ntm) +{ + struct nss_tls_ctx_config *ntccm __maybe_unused = &ntm->msg.ctx_cfg; + nss_trace("%px: NSS TLS Context Configure Message:\n" + "TLS Except if_num: %d\n", + ntccm, ntccm->except_ifnum); +} + +/* + * nss_tls_cipher_upddate_msg() + * Log TLS Cipher Update message. + */ +static void nss_tls_cipher_update_msg(struct nss_tls_msg *ntm) +{ + struct nss_tls_cipher_update *ntcum __maybe_unused = &ntm->msg.cipher_update; + nss_trace("%px: NSS TLS Cipher Update message\n" + "TLS crypto index: %d\n", + ntcum, ntcum->crypto_idx); +} + +/* + * nss_tls_log_verbose() + * Log message contents. + */ +static void nss_tls_log_verbose(struct nss_tls_msg *ntm) +{ + switch (ntm->cm.type) { + case NSS_TLS_MSG_TYPE_NODE_CONFIG: + nss_tls_node_config_msg(ntm); + break; + + case NSS_TLS_MSG_TYPE_CIPHER_UPDATE: + nss_tls_cipher_update_msg(ntm); + break; + + case NSS_TLS_MSG_TYPE_CTX_CONFIG: + nss_tls_ctx_config_msg(ntm); + break; + + default: + nss_warning("%px: Invalid message type\n", ntm); + break; + } +} + +/* + * nss_tls_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_tls_log_tx_msg(struct nss_tls_msg *ntm) +{ + if (ntm->cm.type >= NSS_TLS_MSG_MAX) { + nss_warning("%px: Invalid message type\n", ntm); + return; + } + + nss_info("%px: type[%d]:%s\n", ntm, ntm->cm.type, nss_tls_log_message_types_str[ntm->cm.type]); + nss_tls_log_verbose(ntm); +} + +/* + * nss_tls_log_rx_msg() + * Log messages received from FW. + */ +void nss_tls_log_rx_msg(struct nss_tls_msg *ntm) +{ + if (ntm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ntm); + return; + } + + if (ntm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ntm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ntm, ntm->cm.type, + nss_tls_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response]); + goto verbose; + } + + if (ntm->cm.error >= NSS_TLS_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ntm, ntm->cm.type, nss_tls_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response], + ntm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ntm, ntm->cm.type, nss_tls_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response], + ntm->cm.error, nss_tls_log_error_response_types_str[ntm->cm.error]); + +verbose: + nss_tls_log_verbose(ntm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tls_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_tls_log.h new file mode 100644 index 000000000..37846c2b6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tls_log.h @@ -0,0 +1,39 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE + ************************************************************************** + */ + +#ifndef __NSS_TLS_LOG_H +#define __NSS_TLS_LOG_H + +/* + * nss_tls_log.h + * NSS TLS Log Header File + */ + +/* + * nss_tls_log_tx_msg + * Logs a TLS message that is sent to the NSS firmware. + */ +void nss_tls_log_tx_msg(struct nss_tls_msg *ndm); + +/* + * nss_tls_log_rx_msg + * Logs a TLS message that is received from the NSS firmware. + */ +void nss_tls_log_rx_msg(struct nss_tls_msg *ndm); + +#endif /* __NSS_TLS_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tls_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_tls_stats.c new file mode 100644 index 000000000..58082708e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tls_stats.c @@ -0,0 +1,206 @@ +/* + ****************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ****************************************************************************** + */ + +#include "nss_core.h" +#include "nss_tls.h" +#include "nss_tls_stats.h" +#include "nss_tls_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_tls_stats_notifier); + +/* + * Spinlock to protect tls statistics update/read + */ +DEFINE_SPINLOCK(nss_tls_stats_lock); + +uint64_t nss_tls_stats[NSS_MAX_NET_INTERFACES][NSS_TLS_STATS_MAX]; + +/* + * nss_tls_stats_iface_type() + * Return a string for each interface type. + */ +static const char *nss_tls_stats_iface_type(enum nss_dynamic_interface_type type) +{ + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_TLS_INNER: + return "tls_inner"; + + case NSS_DYNAMIC_INTERFACE_TYPE_TLS_OUTER: + return "tls_outer"; + + default: + return "invalid_interface"; + } +} + +/* + * nss_tls_stats_read() + * Read tls node statiistics. + */ +static ssize_t nss_tls_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats + + * few blank lines for banner printing + Number of Extra outputlines + * for future reference to add new stats + */ + uint32_t max_output_lines = NSS_TLS_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + struct nss_ctx_instance *nss_ctx = nss_tls_get_context(); + enum nss_dynamic_interface_type type; + unsigned long *ifmap; + uint64_t *stats_shadow; + ssize_t bytes_read = 0; + size_t size_wr = 0; + uint32_t if_num; + int32_t i; + int count; + char *lbuf; + + ifmap = nss_tls_ifmap_get(); + count = bitmap_weight(ifmap, NSS_MAX_NET_INTERFACES); + if (count) { + size_al = size_al * count; + } + + lbuf = vzalloc(size_al); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return -ENOMEM; + } + + stats_shadow = vzalloc(NSS_TLS_STATS_MAX * 8); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + vfree(lbuf); + return -ENOMEM; + } + + /* + * Common node stats for each TLS dynamic interface. + */ + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "tls stats", NSS_STATS_SINGLE_CORE); + for_each_set_bit(if_num, ifmap, NSS_MAX_NET_INTERFACES) { + + type = nss_dynamic_interface_get_type(nss_ctx, if_num); + if ((type != NSS_DYNAMIC_INTERFACE_TYPE_TLS_INNER) && + (type != NSS_DYNAMIC_INTERFACE_TYPE_TLS_OUTER)) { + continue; + } + + spin_lock_bh(&nss_tls_stats_lock); + for (i = 0; i < NSS_TLS_STATS_MAX; i++) { + stats_shadow[i] = nss_tls_stats[if_num][i]; + } + spin_unlock_bh(&nss_tls_stats_lock); + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n%s if_num:%03u\n", + nss_tls_stats_iface_type(type), if_num); + size_wr += nss_stats_print("tls", NULL, NSS_STATS_SINGLE_INSTANCE, nss_tls_strings_stats, + stats_shadow, NSS_TLS_STATS_MAX, lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + vfree(lbuf); + vfree(stats_shadow); + return bytes_read; +} + +/* + * nss_tls_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(tls); + +/* + * nss_tls_stats_dentry_create() + * Create tls statistics debug entry. + */ +void nss_tls_stats_dentry_create(void) +{ + nss_stats_create_dentry("tls", &nss_tls_stats_ops); +} + +/* + * nss_tls_stats_sync() + * Update tls node statistics. + */ +void nss_tls_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm) +{ + struct nss_tls_msg *ndcm = (struct nss_tls_msg *)ncm; + struct nss_tls_ctx_stats *ndccs = &ndcm->msg.stats; + uint64_t *ctx_stats; + uint32_t *msg_stats; + int i; + + spin_lock_bh(&nss_tls_stats_lock); + + /* + * Update common node stats, + * Note: TLS only supports a single queue for RX + */ + msg_stats = (uint32_t *)ndccs; + ctx_stats = nss_tls_stats[ncm->interface]; + + for (i = 0; i < NSS_TLS_STATS_MAX; i++, ctx_stats++, msg_stats++) { + *ctx_stats += *msg_stats; + } + + spin_unlock_bh(&nss_tls_stats_lock); +} + +/* + * nss_tls_stats_notify() + * Sends notifications to all the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_tls_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_tls_stats_notification tls_stats; + + spin_lock_bh(&nss_tls_stats_lock); + tls_stats.core_id = nss_ctx->id; + tls_stats.if_num = if_num; + memcpy(tls_stats.stats_ctx, nss_tls_stats[if_num], sizeof(tls_stats.stats_ctx)); + spin_unlock_bh(&nss_tls_stats_lock); + + atomic_notifier_call_chain(&nss_tls_stats_notifier, NSS_STATS_EVENT_NOTIFY, &tls_stats); +} + +/* + * nss_tls_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_tls_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_tls_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_tls_stats_unregister_notifier); + +/* + * nss_tls_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_tls_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_tls_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_tls_stats_register_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tls_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_tls_stats.h new file mode 100644 index 000000000..3883f6297 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tls_stats.h @@ -0,0 +1,28 @@ +/* + ****************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ****************************************************************************** + */ + +#ifndef __NSS_TLS_STATS_H +#define __NSS_TLS_STATS_H + +#include + +extern void nss_tls_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_tls_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm); +extern void nss_tls_stats_dentry_create(void); + +#endif /* __NSS_TLS_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tls_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_tls_strings.c new file mode 100644 index 000000000..8c4854865 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tls_strings.c @@ -0,0 +1,88 @@ +/* + ****************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ****************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_strings.h" +#include "nss_tls_strings.h" + +/* + * nss_tls_strings_stats + * tls statistics strings. + */ +struct nss_stats_info nss_tls_strings_stats[NSS_TLS_STATS_MAX] = { + {"rx_pkts", NSS_STATS_TYPE_COMMON}, + {"rx_byts", NSS_STATS_TYPE_COMMON}, + {"tx_pkts", NSS_STATS_TYPE_COMMON}, + {"tx_byts", NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops", NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops", NSS_STATS_TYPE_DROP}, + {"single_rec", NSS_STATS_TYPE_SPECIAL}, + {"multi_rec", NSS_STATS_TYPE_SPECIAL}, + {"tx_inval_reqs", NSS_STATS_TYPE_SPECIAL}, + {"rx_ccs_rec", NSS_STATS_TYPE_SPECIAL}, + {"fail_ccs", NSS_STATS_TYPE_ERROR}, + {"eth_node_deactive", NSS_STATS_TYPE_SPECIAL}, + {"crypto_alloc_success", NSS_STATS_TYPE_SPECIAL}, + {"crypto_free_req", NSS_STATS_TYPE_SPECIAL}, + {"crypto_free_success", NSS_STATS_TYPE_SPECIAL}, + {"fail_crypto_alloc", NSS_STATS_TYPE_EXCEPTION}, + {"fail_crypto_lookup", NSS_STATS_TYPE_EXCEPTION}, + {"fail_req_alloc", NSS_STATS_TYPE_ERROR}, + {"fail_pbuf_stats", NSS_STATS_TYPE_ERROR}, + {"fail_ctx_active", NSS_STATS_TYPE_ERROR}, + {"hw_len_error", NSS_STATS_TYPE_ERROR}, + {"hw_token_error", NSS_STATS_TYPE_ERROR}, + {"hw_bypass_error", NSS_STATS_TYPE_ERROR}, + {"hw_crypto_error", NSS_STATS_TYPE_ERROR}, + {"hw_hash_error", NSS_STATS_TYPE_ERROR}, + {"hw_config_error", NSS_STATS_TYPE_ERROR}, + {"hw_algo_error", NSS_STATS_TYPE_ERROR}, + {"hw_hash_ovf_error", NSS_STATS_TYPE_ERROR}, + {"hw_auth_error", NSS_STATS_TYPE_ERROR}, + {"hw_pad_verify_error", NSS_STATS_TYPE_ERROR}, + {"hw_timeout_error", NSS_STATS_TYPE_ERROR}, + {"no_desc_in", NSS_STATS_TYPE_EXCEPTION}, + {"no_desc_out", NSS_STATS_TYPE_EXCEPTION}, + {"no_reqs", NSS_STATS_TYPE_EXCEPTION} +}; + +/* + * nss_tls_strings_read() + * Read tls statistics names + */ +static ssize_t nss_tls_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_tls_strings_stats, NSS_TLS_STATS_MAX); +} + +/* + * nss_tls_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(tls); + +/* + * nss_tls_strings_dentry_create() + * Create tls statistics strings debug entry. + */ +void nss_tls_strings_dentry_create(void) +{ + nss_strings_create_dentry("tls", &nss_tls_strings_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tls_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_tls_strings.h new file mode 100644 index 000000000..1509722b0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tls_strings.h @@ -0,0 +1,27 @@ +/* + ****************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ****************************************************************************** + */ + +#ifndef __NSS_TLS_STRINGS_H +#define __NSS_TLS_STRINGS_H + +#include "nss_tls_stats.h" + +extern struct nss_stats_info nss_tls_strings_stats[NSS_TLS_STATS_MAX]; +extern void nss_tls_strings_dentry_create(void); + +#endif /* __NSS_TLS_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx.c b/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx.c new file mode 100644 index 000000000..2ba6ee64d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx.c @@ -0,0 +1,299 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2018, 2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_trustsec_tx_stats.h" +#include "nss_trustsec_tx_log.h" + +#define NSS_TRUSTSEC_TX_TIMEOUT 3000 /* 3 Seconds */ + +/* + * Private data structure for trustsec_tx interface + */ +static struct nss_trustsec_tx_pvt { + struct semaphore sem; + struct completion complete; + int response; +} ttx; + +/* + * nss_trustsec_tx_handler() + * Handle NSS -> HLOS messages for trustsec_tx + */ +static void nss_trustsec_tx_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, + __attribute__((unused))void *app_data) +{ + nss_trustsec_tx_msg_callback_t cb; + struct nss_trustsec_tx_msg *npm = (struct nss_trustsec_tx_msg *)ncm; + + BUG_ON(ncm->interface != NSS_TRUSTSEC_TX_INTERFACE); + + /* + * Trace messages. + */ + nss_trustsec_tx_log_rx_msg(npm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_TRUSTSEC_TX_MSG_MAX) { + nss_warning("%px: received invalid message %d for trustsec_tx interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_trustsec_tx_msg)) { + nss_warning("%px: message size incorrect: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + switch (ncm->type) { + case NSS_TRUSTSEC_TX_MSG_STATS_SYNC: + /* + * Update trustsec_tx statistics. + */ + nss_trustsec_tx_stats_sync(nss_ctx, &npm->msg.stats_sync); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages, trustsec_tx sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].ndev; + } + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_trustsec_tx_msg_callback_t)ncm->cb; + + cb((void *)ncm->app_data, npm); +} + +/* + * nss_trustsec_tx_msg() + * Transmit a trustsec_tx message to NSSFW + */ +nss_tx_status_t nss_trustsec_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_trustsec_tx_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace messages. + */ + nss_trustsec_tx_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (ncm->interface != NSS_TRUSTSEC_TX_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_TRUSTSEC_TX_MSG_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_trustsec_tx_msg); + +/* + * nss_trustsec_tx_callback + * Callback to handle the completion of NSS ->HLOS messages. + */ +static void nss_trustsec_tx_callback(void *app_data, struct nss_trustsec_tx_msg *npm) +{ + if (npm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("trustsec_tx error response %d\n", npm->cm.response); + ttx.response = NSS_TX_FAILURE; + complete(&ttx.complete); + return; + } + + ttx.response = NSS_TX_SUCCESS; + complete(&ttx.complete); +} + +/* + * nss_trustsec_tx_msg_sync() + * Send a message to trustsec_tx interface & wait for the response. + */ +nss_tx_status_t nss_trustsec_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_trustsec_tx_msg *msg) +{ + nss_tx_status_t status; + int ret = 0; + + down(&ttx.sem); + + msg->cm.cb = (nss_ptr_t)nss_trustsec_tx_callback; + msg->cm.app_data = (nss_ptr_t)NULL; + + status = nss_trustsec_tx_msg(nss_ctx, msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_trustsec_tx_msg failed\n", nss_ctx); + up(&ttx.sem); + return status; + } + + ret = wait_for_completion_timeout(&ttx.complete, msecs_to_jiffies(NSS_TRUSTSEC_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: trustsec_tx tx failed due to timeout\n", nss_ctx); + ttx.response = NSS_TX_FAILURE; + } + + status = ttx.response; + up(&ttx.sem); + + return status; +} +EXPORT_SYMBOL(nss_trustsec_tx_msg_sync); + +/* + * nss_trustsec_tx_get_ctx() + * Return a TrustSec TX NSS context. + */ +struct nss_ctx_instance *nss_trustsec_tx_get_ctx() +{ + return &nss_top_main.nss[nss_top_main.trustsec_tx_handler_id]; +} +EXPORT_SYMBOL(nss_trustsec_tx_get_ctx); + +/* + * nss_trustsec_tx_msg_init() + * Initialize trustsec_tx message. + */ +void nss_trustsec_tx_msg_init(struct nss_trustsec_tx_msg *npm, uint16_t if_num, uint32_t type, uint32_t len, + nss_trustsec_tx_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&npm->cm, if_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_trustsec_tx_msg_init); + +/* + * nss_trustsec_tx_update_nexthop() + */ +nss_tx_status_t nss_trustsec_tx_update_nexthop(uint32_t src, uint32_t dest, uint16_t sgt) +{ + struct nss_ctx_instance *ctx = nss_trustsec_tx_get_ctx(); + struct nss_trustsec_tx_msg ttx_msg = {{0}}; + struct nss_trustsec_tx_update_nexthop_msg *ttxunh; + nss_tx_status_t status; + + ttxunh = &ttx_msg.msg.upd_nexthop; + ttxunh->src = src; + ttxunh->dest = dest; + ttxunh->sgt = sgt; + + nss_trustsec_tx_msg_init(&ttx_msg, NSS_TRUSTSEC_TX_INTERFACE, NSS_TRUSTSEC_TX_MSG_UPDATE_NEXTHOP, + sizeof(*ttxunh), NULL, NULL); + + BUG_ON(in_atomic()); + status = nss_trustsec_tx_msg_sync(ctx, &ttx_msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: configure trustsec_tx failed: %d\n", ctx, status); + } + + return status; +} +EXPORT_SYMBOL(nss_trustsec_tx_update_nexthop); + +/* + * nss_trustsec_tx_configure_sgt() + */ +nss_tx_status_t nss_trustsec_tx_configure_sgt(uint32_t src, uint32_t dest, uint16_t sgt) +{ + struct nss_ctx_instance *ctx = nss_trustsec_tx_get_ctx(); + struct nss_trustsec_tx_msg ttx_msg = {{0}}; + struct nss_trustsec_tx_configure_msg *ttxcfg; + nss_tx_status_t status; + + ttxcfg = &ttx_msg.msg.configure; + ttxcfg->src = src; + ttxcfg->dest = dest; + ttxcfg->sgt = sgt; + + nss_trustsec_tx_msg_init(&ttx_msg, NSS_TRUSTSEC_TX_INTERFACE, NSS_TRUSTSEC_TX_MSG_CONFIGURE, + sizeof(*ttxcfg), NULL, NULL); + + BUG_ON(in_atomic()); + status = nss_trustsec_tx_msg_sync(ctx, &ttx_msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: configure trustsec_tx failed: %d\n", ctx, status); + } + + return status; +} +EXPORT_SYMBOL(nss_trustsec_tx_configure_sgt); + +/* + * nss_trustsec_tx_unconfigure() + */ +nss_tx_status_t nss_trustsec_tx_unconfigure_sgt(uint32_t src, uint16_t sgt) +{ + struct nss_ctx_instance *ctx = nss_trustsec_tx_get_ctx(); + struct nss_trustsec_tx_msg ttx_msg = {{0}}; + struct nss_trustsec_tx_unconfigure_msg *ttxucfg; + nss_tx_status_t status; + + ttxucfg = &ttx_msg.msg.unconfigure; + ttxucfg->src = src; + ttxucfg->sgt = sgt; + + nss_trustsec_tx_msg_init(&ttx_msg, NSS_TRUSTSEC_TX_INTERFACE, NSS_TRUSTSEC_TX_MSG_UNCONFIGURE, + sizeof(*ttxucfg), NULL, NULL); + + BUG_ON(in_atomic()); + status = nss_trustsec_tx_msg_sync(ctx, &ttx_msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: unconfigure trustsec_tx failed: %d\n", ctx, status); + } + + return status; +} +EXPORT_SYMBOL(nss_trustsec_tx_unconfigure_sgt); + +/* + * nss_trustsec_tx_register_handler() + * Registering handler for sending msg to trustsec_tx node on NSS. + */ +void nss_trustsec_tx_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_trustsec_tx_get_ctx(); + + nss_core_register_handler(nss_ctx, NSS_TRUSTSEC_TX_INTERFACE, nss_trustsec_tx_handler, NULL); + + nss_trustsec_tx_stats_dentry_create(); + + sema_init(&ttx.sem, 1); + init_completion(&ttx.complete); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_log.c new file mode 100644 index 000000000..3ed5b5149 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_log.c @@ -0,0 +1,170 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_trustsec_tx_log.c + * NSS TRUSTSEC_TX logger file. + */ + +#include "nss_core.h" + +/* + * nss_trustsec_tx_log_message_types_str + * TRUSTSEC_TX message strings + */ +static int8_t *nss_trustsec_tx_log_message_types_str[NSS_TRUSTSEC_TX_MSG_MAX] __maybe_unused = { + "TRUSTSEC_TX Configure Message", + "TRUSTSEC_TX Unconfigure Message", + "TRUSTSEC_TX Stats Sync", + "TRUSTSEC_TX Update next Hop", +}; + +/* + * nss_trustsec_tx_log_error_response_types_str + * Strings for error types for TRUSTSEC_TX messages + */ +static int8_t *nss_trustsec_tx_log_error_response_types_str[NSS_TRUSTSEC_TX_ERR_UNKNOWN] __maybe_unused = { + "TRUSTSEC_TX Invalid Source Interface", + "TRUSTSEC_TX Reconfigure Source Interface" + "TRUSTSEC_TX Destination Interface Not Found", + "TRUSTSEC_TX Not Configured", + "TRUSTSEC_TX SGT Mismatch", + "TRUSTSEC_TX Unknown Error", +}; + +/* + * nss_trustsec_tx_log_configure_msg() + * Log NSS TRUSTSEC_TX configure message. + */ +static void nss_trustsec_tx_log_configure_msg(struct nss_trustsec_tx_msg *ntm) +{ + struct nss_trustsec_tx_configure_msg *ntcm __maybe_unused = &ntm->msg.configure; + nss_trace("%px: NSS TRUSTSEC_TX Configure Message:\n" + "TRUSTSEC_TX Source: %d\n" + "TRUSTSEC_TX Destination: %d\n" + "TRUSTSEC_TX Security Group Tag: %d\n", + ntcm, ntcm->src, + ntcm->dest, ntcm->sgt); +} + +/* + * nss_trustsec_tx_log_unconfigure_msg() + * Log NSS TRUSTSEC_TX unconfigure message. + */ +static void nss_trustsec_tx_log_unconfigure_msg(struct nss_trustsec_tx_msg *ntm) +{ + struct nss_trustsec_tx_unconfigure_msg *ntcm __maybe_unused = &ntm->msg.unconfigure; + nss_trace("%px: NSS TRUSTSEC_TX Unconfigure Message:\n" + "TRUSTSEC_TX Source: %d\n" + "TRUSTSEC_TX Security Group Tag: %d\n", + ntcm, ntcm->src, ntcm->sgt); +} + +/* + * nss_trustsec_tx_log_update_nexthop_msg() + * Log NSS TRUSTSEC_TX update nexthop message. + */ +static void nss_trustsec_tx_log_update_nexthop_msg(struct nss_trustsec_tx_msg *ntm) +{ + struct nss_trustsec_tx_update_nexthop_msg *ntunm __maybe_unused = &ntm->msg.upd_nexthop; + nss_trace("%px: NSS TRUSTSEC_TX Update Next Hop Message:\n" + "TRUSTSEC_TX Source: %d\n" + "TRUSTSEC_TX Destination: %d\n" + "TRUSTSEC_TX Security Group Tag: %d\n", + ntunm, ntunm->src, + ntunm->dest, ntunm->sgt); +} + +/* + * nss_trustsec_tx_log_verbose() + * Log message contents. + */ +static void nss_trustsec_tx_log_verbose(struct nss_trustsec_tx_msg *ntm) +{ + switch (ntm->cm.type) { + case NSS_TRUSTSEC_TX_MSG_CONFIGURE: + nss_trustsec_tx_log_configure_msg(ntm); + break; + + case NSS_TRUSTSEC_TX_MSG_UNCONFIGURE: + nss_trustsec_tx_log_unconfigure_msg(ntm); + break; + + case NSS_TRUSTSEC_TX_MSG_UPDATE_NEXTHOP: + nss_trustsec_tx_log_update_nexthop_msg(ntm); + break; + + case NSS_TRUSTSEC_TX_MSG_STATS_SYNC: + /* + * No log for valid stats message. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", ntm); + break; + } +} + +/* + * nss_trustsec_tx_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_trustsec_tx_log_tx_msg(struct nss_trustsec_tx_msg *ntm) +{ + if (ntm->cm.type >= NSS_TRUSTSEC_TX_MSG_MAX) { + nss_warning("%px: Invalid message type\n", ntm); + return; + } + + nss_info("%px: type[%d]:%s\n", ntm, ntm->cm.type, nss_trustsec_tx_log_message_types_str[ntm->cm.type]); + nss_trustsec_tx_log_verbose(ntm); +} + +/* + * nss_trustsec_tx_log_rx_msg() + * Log messages received from FW. + */ +void nss_trustsec_tx_log_rx_msg(struct nss_trustsec_tx_msg *ntm) +{ + if (ntm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ntm); + return; + } + + if (ntm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ntm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ntm, ntm->cm.type, + nss_trustsec_tx_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response]); + goto verbose; + } + + if (ntm->cm.error >= NSS_TRUSTSEC_TX_ERR_UNKNOWN) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ntm, ntm->cm.type, nss_trustsec_tx_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response], + ntm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ntm, ntm->cm.type, nss_trustsec_tx_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response], + ntm->cm.error, nss_trustsec_tx_log_error_response_types_str[ntm->cm.error]); + +verbose: + nss_trustsec_tx_log_verbose(ntm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_log.h new file mode 100644 index 000000000..58633c942 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_TRUSTSEC_TX_LOG_H__ +#define __NSS_TRUSTSEC_TX_LOG_H__ + +/* + * nss_trustsec_tx_log.h + * NSS TRUSTSEC_TX Log Header File + */ + +/* + * nss_trustsec_tx_log_tx_msg + * Logs a trustsec_tx message that is sent to the NSS firmware. + */ +void nss_trustsec_tx_log_tx_msg(struct nss_trustsec_tx_msg *ncm); + +/* + * nss_trustsec_tx_log_rx_msg + * Logs a trustsec_tx message that is received from the NSS firmware. + */ +void nss_trustsec_tx_log_rx_msg(struct nss_trustsec_tx_msg *ncm); + +#endif /* __NSS_TRUSTSEC_TX_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_stats.c new file mode 100644 index 000000000..5302321ae --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_stats.c @@ -0,0 +1,145 @@ +/* + ************************************************************************** + * Copyright (c) 2017, 2019 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_trustsec_tx_stats.h" + +/* + * nss_trustsec_tx_stats_str + * Trustsec TX statistics strings. + */ + +struct nss_stats_info nss_trustsec_tx_stats_str[NSS_TRUSTSEC_TX_STATS_MAX] = { + {"INVALID_SRC" , NSS_STATS_TYPE_ERROR}, + {"UNCONFIGURED_SRC" , NSS_STATS_TYPE_ERROR}, + {"HEADROOM_NOT_ENOUGH" , NSS_STATS_TYPE_ERROR} +}; + +/* + * trustsec_tx_stats + * Trustsec TX statistics. + */ +uint64_t trustsec_tx_stats[NSS_TRUSTSEC_TX_STATS_MAX]; + +/* + * Trustsec TX statistics APIs + */ + +/* + * nss_trustsec_tx_stats_sync() + * Update trustsec_tx node statistics. + */ +void nss_trustsec_tx_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_trustsec_tx_stats_sync_msg *ntsm) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + int j; + + spin_lock_bh(&nss_top->stats_lock); + + /* + * Update common node stats + */ + nss_top->stats_node[NSS_TRUSTSEC_TX_INTERFACE][NSS_STATS_NODE_RX_PKTS] += ntsm->node_stats.rx_packets; + nss_top->stats_node[NSS_TRUSTSEC_TX_INTERFACE][NSS_STATS_NODE_RX_BYTES] += ntsm->node_stats.rx_bytes; + nss_top->stats_node[NSS_TRUSTSEC_TX_INTERFACE][NSS_STATS_NODE_TX_PKTS] += ntsm->node_stats.tx_packets; + nss_top->stats_node[NSS_TRUSTSEC_TX_INTERFACE][NSS_STATS_NODE_TX_BYTES] += ntsm->node_stats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_top->stats_node[NSS_TRUSTSEC_TX_INTERFACE][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + j] += ntsm->node_stats.rx_dropped[j]; + } + + /* + * Update trustsec node stats + */ + trustsec_tx_stats[NSS_TRUSTSEC_TX_STATS_INVALID_SRC] += ntsm->invalid_src; + trustsec_tx_stats[NSS_TRUSTSEC_TX_STATS_UNCONFIGURED_SRC] += ntsm->unconfigured_src; + trustsec_tx_stats[NSS_TRUSTSEC_TX_STATS_HEADROOM_NOT_ENOUGH] += ntsm->headroom_not_enough; + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_trustsec_tx_stats_read() + * Read trustsec_tx statiistics. + */ +static ssize_t nss_trustsec_tx_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i; + + /* + * Max output lines = #stats + few blank lines for banner printing + + * Number of Extra outputlines for future reference to add new stats. + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_TRUSTSEC_TX_STATS_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_STATS_NODE_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "trustsec_tx", NSS_STATS_SINGLE_CORE); + + /* + * Common node stats + */ + size_wr += nss_stats_fill_common_stats(NSS_TRUSTSEC_TX_INTERFACE, NSS_STATS_SINGLE_INSTANCE, lbuf, size_wr, size_al, "trustsec_tx"); + + /* + * TrustSec TX node stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_TRUSTSEC_TX_STATS_MAX); i++) { + stats_shadow[i] = trustsec_tx_stats[i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("trustsec_tx", NULL, NSS_STATS_SINGLE_INSTANCE + , nss_trustsec_tx_stats_str + , stats_shadow + , NSS_TRUSTSEC_TX_STATS_MAX + , lbuf, size_wr, size_al); + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_trustsec_tx_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(trustsec_tx) + +/* + * nss_trustsec_tx_stats_dentry_create() + * Create trustsec_tx statistics debug entry. + */ +void nss_trustsec_tx_stats_dentry_create(void) +{ + nss_stats_create_dentry("trustsec_tx", &nss_trustsec_tx_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_stats.h new file mode 100644 index 000000000..11a4d8f56 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_trustsec_tx_stats.h @@ -0,0 +1,44 @@ +/* + ************************************************************************** + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_trustsec_tx_stats.h + * NSS TRUSTSEC TX statistics header file. + */ + +#ifndef __NSS_TRUSTSEC_TX_STATS_H +#define __NSS_TRUSTSEC_TX_STATS_H + +/* + * Trustsec TX statistics + */ +enum nss_trustsec_tx_stats { + NSS_TRUSTSEC_TX_STATS_INVALID_SRC, + /* Number of packets with invalid src if */ + NSS_TRUSTSEC_TX_STATS_UNCONFIGURED_SRC, + /* Number of packets with unconfigured src if */ + NSS_TRUSTSEC_TX_STATS_HEADROOM_NOT_ENOUGH, + /* Number of packets with not enough headroom */ + NSS_TRUSTSEC_TX_STATS_MAX +}; + +/* + * Trustsec TX statistics APIs + */ +extern void nss_trustsec_tx_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_trustsec_tx_stats_sync_msg *ntsm); +extern void nss_trustsec_tx_stats_dentry_create(void); + +#endif /* __NSS_TRUSTSEC_TX_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tstamp.c b/feeds/ipq807x/qca-nss-drv/src/nss_tstamp.c new file mode 100644 index 000000000..1984afb07 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tstamp.c @@ -0,0 +1,423 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_tstamp.c + * NSS Tstamp APIs + */ + +#include +#include +#include +#include +#include +#include +#include "nss_tx_rx_common.h" +#include "nss_tstamp.h" +#include "nss_tstamp_stats.h" + +#define NSS_TSTAMP_HEADER_SIZE max(sizeof(struct nss_tstamp_h2n_pre_hdr), sizeof(struct nss_tstamp_n2h_pre_hdr)) + +/* + * Notify data structure + */ +struct nss_tstamp_notify_data { + nss_tstamp_msg_callback_t tstamp_callback; + void *app_data; +}; + +static struct nss_tstamp_notify_data nss_tstamp_notify = { + .tstamp_callback = NULL, + .app_data = NULL, +}; + +static struct net_device_stats *nss_tstamp_ndev_stats(struct net_device *ndev); + +/* + * dummy netdevice ops + */ +static const struct net_device_ops nss_tstamp_ndev_ops = { + .ndo_get_stats = nss_tstamp_ndev_stats, +}; + +/* + * nss_tstamp_ndev_setup() + * Dummy setup for net_device handler + */ +static void nss_tstamp_ndev_setup(struct net_device *ndev) +{ + return; +} + +/* + * nss_tstamp_ndev_stats() + * Return net device stats + */ +static struct net_device_stats *nss_tstamp_ndev_stats(struct net_device *ndev) +{ + return &ndev->stats; +} + +/* + * nss_tstamp_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_tstamp_verify_if_num(uint32_t if_num) +{ + return (if_num == NSS_TSTAMP_TX_INTERFACE) || (if_num == NSS_TSTAMP_RX_INTERFACE); +} + +/* + * nss_tstamp_interface_handler() + * Handle NSS -> HLOS messages for TSTAMP Statistics + */ +static void nss_tstamp_interface_handler(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_tstamp_msg *ntm = (struct nss_tstamp_msg *)ncm; + nss_tstamp_msg_callback_t cb; + + if (!nss_tstamp_verify_if_num(ncm->interface)) { + nss_warning("%px: invalid interface %d for tstamp_tx", nss_ctx, ncm->interface); + return; + } + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_TSTAMP_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for tstamp", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_tstamp_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + switch (ntm->cm.type) { + case NSS_TSTAMP_MSG_TYPE_SYNC_STATS: + nss_tstamp_stats_sync(nss_ctx, &ntm->msg.stats, ncm->interface); + break; + default: + nss_warning("%px: Unknown message type %d", + nss_ctx, ncm->type); + return; + } + + /* + * Update the callback and app_data for NOTIFY messages + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_tstamp_notify.tstamp_callback; + ncm->app_data = (nss_ptr_t)nss_tstamp_notify.app_data; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_tstamp_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, ntm); +} + +/* + * nss_tstamp_copy_data() + * Copy timestamps from received nss frame into skb + */ +static void nss_tstamp_copy_data(struct nss_tstamp_n2h_pre_hdr *ntm, struct sk_buff *skb) +{ + struct skb_shared_hwtstamps *tstamp; + + tstamp = skb_hwtstamps(skb); + tstamp->hwtstamp = ktime_set(ntm->ts_data_hi, ntm->ts_data_lo); +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 16, 0)) + tstamp->syststamp = ktime_set(ntm->ts_data_hi, ntm->ts_data_lo); +#endif +} + +/* + * nss_tstamp_get_dev() + * Get the net_device associated with the packet. + */ +static struct net_device *nss_tstamp_get_dev(struct sk_buff *skb) +{ + struct dst_entry *dst; + struct net_device *dev; + struct rtable *rt; + struct flowi6 fl6; + uint32_t ip_addr; + + /* + * It seems like the data came over IPsec, hence indicate + * it to the Linux over this interface + */ + skb_reset_network_header(skb); + skb_reset_mac_header(skb); + + skb->pkt_type = PACKET_HOST; + + switch (ip_hdr(skb)->version) { + case IPVERSION: + ip_addr = ip_hdr(skb)->saddr; + + rt = ip_route_output(&init_net, ip_addr, 0, 0, 0); + if (IS_ERR(rt)) { + return NULL; + } + + dst = (struct dst_entry *)rt; + skb->protocol = cpu_to_be16(ETH_P_IP); + break; + + case 6: + memset(&fl6, 0, sizeof(fl6)); + memcpy(&fl6.daddr, &ipv6_hdr(skb)->saddr, sizeof(fl6.daddr)); + + dst = ip6_route_output(&init_net, NULL, &fl6); + if (IS_ERR(dst)) { + return NULL; + } + + skb->protocol = cpu_to_be16(ETH_P_IPV6); + break; + + default: + nss_warning("%px:could not get dev for the skb\n", skb); + return NULL; + } + + dev = dst->dev; + dev_hold(dev); + + dst_release(dst); + return dev; +} + +/* + * nss_tstamp_buf_receive() + * Receive nss exception packets. + */ +static void nss_tstamp_buf_receive(struct net_device *ndev, struct sk_buff *skb, struct napi_struct *napi) +{ + struct nss_tstamp_n2h_pre_hdr *n2h_hdr = (struct nss_tstamp_n2h_pre_hdr *)skb->data; + struct nss_ctx_instance *nss_ctx; + struct net_device *dev; + uint32_t tstamp_sz; + + BUG_ON(!n2h_hdr); + + tstamp_sz = n2h_hdr->ts_hdr_sz; + if (tstamp_sz > (NSS_TSTAMP_HEADER_SIZE)) { + goto free; + } + + nss_ctx = &nss_top_main.nss[nss_top_main.tstamp_handler_id]; + BUG_ON(!nss_ctx); + + skb_pull_inline(skb, tstamp_sz); + + /* + * copy the time stamp and convert into ktime_t + */ + nss_tstamp_copy_data(n2h_hdr, skb); + if (unlikely(n2h_hdr->ts_tx)) { + /* + * We are in TX Path + */ + skb_tstamp_tx(skb, skb_hwtstamps(skb)); + + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; + goto free; + } + + /* + * We are in RX path. + */ + dev = nss_cmn_get_interface_dev(nss_ctx, n2h_hdr->ts_ifnum); + if (!dev) { + ndev->stats.rx_dropped++; + goto free; + } + + /* + * Hold the dev until we finish + */ + dev_hold(dev); + + switch(dev->type) { + case NSS_IPSEC_ARPHRD_IPSEC: + /* + * Release the prev dev reference + */ + dev_put(dev); + + /* + * find the actual IPsec tunnel device + */ + dev = nss_tstamp_get_dev(skb); + break; + + default: + /* + * This is a plain non-encrypted data packet. + */ + skb->protocol = eth_type_trans(skb, dev); + break; + } + + skb->skb_iif = dev->ifindex; + skb->dev = dev; + + ndev->stats.rx_packets++; + ndev->stats.rx_bytes += skb->len; + + netif_receive_skb(skb); + + /* + * release the device as we are done + */ + dev_put(dev); + return; +free: + dev_kfree_skb_any(skb); + return; +} + +/* + * nss_tstamp_tx_buf() + * Send data packet for tstamp processing + */ +nss_tx_status_t nss_tstamp_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *skb, uint32_t if_num) +{ + struct nss_tstamp_h2n_pre_hdr *h2n_hdr; + int extra_head; + int extra_tail = 0; + char *align_data; + uint32_t hdr_sz; + + nss_trace("%px: Tstamp If Tx packet, id:%d, data=%px", nss_ctx, NSS_TSTAMP_RX_INTERFACE, skb->data); + + /* + * header size + alignment size + */ + hdr_sz = NSS_TSTAMP_HEADER_SIZE; + extra_head = hdr_sz - skb_headroom(skb); + + /* + * Expand the head for h2n_hdr + */ + if (extra_head > 0) { + /* + * Try to accommodate using available tailroom. + */ + if (skb->end - skb->tail >= extra_head) + extra_tail = -extra_head; + if (pskb_expand_head(skb, extra_head, extra_tail, GFP_KERNEL)) { + nss_trace("%px: expand head room failed", nss_ctx); + return NSS_TX_FAILURE; + } + } + + align_data = PTR_ALIGN((skb->data - hdr_sz), sizeof(uint32_t)); + hdr_sz = (nss_ptr_t)skb->data - (nss_ptr_t)align_data; + + h2n_hdr = (struct nss_tstamp_h2n_pre_hdr *)skb_push(skb, hdr_sz); + h2n_hdr->ts_ifnum = if_num; + h2n_hdr->ts_tx_hdr_sz = hdr_sz; + + return nss_core_send_packet(nss_ctx, skb, NSS_TSTAMP_RX_INTERFACE, H2N_BIT_FLAG_VIRTUAL_BUFFER | H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_tstamp_tx_buf); + +/* + * nss_tstamp_register_netdev() + * register dummy netdevice for tstamp interface + */ +struct net_device *nss_tstamp_register_netdev(void) +{ + struct net_device *ndev; + uint32_t err = 0; + +#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 16, 0)) + ndev = alloc_netdev(sizeof(struct netdev_priv_instance), "qca-nss-tstamp", nss_tstamp_ndev_setup); +#else + ndev = alloc_netdev(sizeof(struct netdev_priv_instance), "qca-nss-tstamp", NET_NAME_ENUM, nss_tstamp_ndev_setup); +#endif + if (!ndev) { + nss_warning("Tstamp: Could not allocate tstamp net_device "); + return NULL; + } + + ndev->netdev_ops = &nss_tstamp_ndev_ops; + + err = register_netdev(ndev); + if (err) { + nss_warning("Tstamp: Could not register tstamp net_device "); + free_netdev(ndev); + return NULL; + } + + return ndev; +} + +/* + * nss_tstamp_notify_register() + * Register to receive tstamp notify messages. + */ +struct nss_ctx_instance *nss_tstamp_notify_register(nss_tstamp_msg_callback_t cb, void *app_data) +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = &nss_top_main.nss[nss_top_main.tstamp_handler_id]; + + nss_tstamp_notify.tstamp_callback = cb; + nss_tstamp_notify.app_data = app_data; + + return nss_ctx; +} +EXPORT_SYMBOL(nss_tstamp_notify_register); + +/* + * nss_tstamp_register_handler() + */ +void nss_tstamp_register_handler(struct net_device *ndev) +{ + uint32_t features = 0; + struct nss_ctx_instance *nss_ctx; + + nss_ctx = &nss_top_main.nss[nss_top_main.tstamp_handler_id]; + + nss_core_register_subsys_dp(nss_ctx, NSS_TSTAMP_TX_INTERFACE, nss_tstamp_buf_receive, NULL, NULL, ndev, features); + + nss_core_register_handler(nss_ctx, NSS_TSTAMP_TX_INTERFACE, nss_tstamp_interface_handler, NULL); + + nss_core_register_handler(nss_ctx, NSS_TSTAMP_RX_INTERFACE, nss_tstamp_interface_handler, NULL); + + nss_tstamp_stats_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tstamp_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_tstamp_stats.c new file mode 100644 index 000000000..6285ad84b --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tstamp_stats.c @@ -0,0 +1,165 @@ +/* + ************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include "nss_tstamp_stats.h" + +/* + * Spinlock to protect TSTAMP statistics update/read + */ +DEFINE_SPINLOCK(nss_tstamp_stats_lock); + +/* + * nss_tstamp_stats_str + * TSTAMP stats strings + */ +struct nss_stats_info nss_tstamp_stats_str[NSS_TSTAMP_STATS_MAX] = { + {"rx_packets" , NSS_STATS_TYPE_COMMON}, + {"rx_bytes" , NSS_STATS_TYPE_COMMON}, + {"tx_packets" , NSS_STATS_TYPE_COMMON}, + {"tx_bytes" , NSS_STATS_TYPE_COMMON}, + {"rx_queue_0_dropped" , NSS_STATS_TYPE_DROP}, + {"rx_queue_1_dropped" , NSS_STATS_TYPE_DROP}, + {"rx_queue_2_dropped" , NSS_STATS_TYPE_DROP}, + {"rx_queue_3_dropped" , NSS_STATS_TYPE_DROP}, + {"boomeranged" , NSS_STATS_TYPE_SPECIAL}, + {"dropped_fail_enqueue" , NSS_STATS_TYPE_DROP}, + {"dropped_fail_alloc" , NSS_STATS_TYPE_DROP}, + {"dropped_fail_copy" , NSS_STATS_TYPE_DROP}, + {"dropped_no_interface" , NSS_STATS_TYPE_DROP}, + {"dropped_no_headroom" , NSS_STATS_TYPE_DROP} +}; + +/* + * nss_tstamp_stats + * tstamp statistics + */ +uint64_t nss_tstamp_stats[2][NSS_TSTAMP_STATS_MAX]; + +/* + * nss_tstamp_stats_read() + * Read tstamp statistics + */ +static ssize_t nss_tstamp_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + int32_t i, num; + + /* + * Max output lines = (#stats + tx or rx tag + two blank lines) * 2(TX and RX) + + * start tag line + end tag line + three blank lines + */ + uint32_t max_output_lines = (NSS_TSTAMP_STATS_MAX + 3) * 2 + 5; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return -ENOMEM; + } + + stats_shadow = kzalloc(NSS_TSTAMP_STATS_MAX * sizeof(uint64_t), GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return -ENOMEM; + } + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "tstamp", NSS_STATS_SINGLE_CORE); + /* + * TSTAMP statistics + */ + for (num = 0; num < 2; num++) { + if (num == 0) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ntstamp TX stats:\n\n"); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\ntstamp RX stats:\n\n"); + } + + spin_lock_bh(&nss_tstamp_stats_lock); + for (i = 0; i < NSS_TSTAMP_STATS_MAX; i++) { + stats_shadow[i] = nss_tstamp_stats[num][i]; + } + spin_unlock_bh(&nss_tstamp_stats_lock); + size_wr += nss_stats_print("tstamp", NULL, NSS_STATS_SINGLE_INSTANCE + , nss_tstamp_stats_str + , stats_shadow + , NSS_TSTAMP_STATS_MAX + , lbuf, size_wr, size_al); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_tstamp_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(tstamp) + +/* + * nss_tstamp_stats_dentry_create() + * Create tstamp statistics debug entry. + */ +void nss_tstamp_stats_dentry_create(void) +{ + nss_stats_create_dentry("tstamp", &nss_tstamp_stats_ops); +} + +/* + * nss_tstamp_stats_sync() + * Handle the syncing of NSS TSTAMP statistics. + */ +void nss_tstamp_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_tstamp_stats_msg *nts, uint32_t interface) +{ + int id, j; + + if (interface == NSS_TSTAMP_TX_INTERFACE) { + id = 0; + } else { + id = 1; + } + + spin_lock_bh(&nss_tstamp_stats_lock); + + /* + * Common node stats + */ + nss_tstamp_stats[id][NSS_STATS_NODE_RX_PKTS] += nts->node_stats.rx_packets; + nss_tstamp_stats[id][NSS_STATS_NODE_RX_BYTES] += nts->node_stats.rx_bytes; + nss_tstamp_stats[id][NSS_STATS_NODE_TX_PKTS] += nts->node_stats.tx_packets; + nss_tstamp_stats[id][NSS_STATS_NODE_TX_BYTES] += nts->node_stats.tx_bytes; + + for (j = 0; j < NSS_MAX_NUM_PRI; j++) { + nss_tstamp_stats[id][NSS_STATS_NODE_RX_QUEUE_0_DROPPED + j] += nts->node_stats.rx_dropped[j]; + } + + /* + * TSTAMP statistics + */ + nss_tstamp_stats[id][NSS_TSTAMP_STATS_BOOMERANGED] += nts->boomeranged; + nss_tstamp_stats[id][NSS_TSTAMP_STATS_DROPPED_FAIL_ENQUEUE] += nts->dropped_fail_enqueue; + nss_tstamp_stats[id][NSS_TSTAMP_STATS_DROPPED_FAIL_ALLOC] += nts->dropped_fail_alloc; + nss_tstamp_stats[id][NSS_TSTAMP_STATS_DROPPED_FAIL_COPY] += nts->dropped_fail_copy; + nss_tstamp_stats[id][NSS_TSTAMP_STATS_DROPPED_NO_INTERFACE] += nts->dropped_no_interface; + nss_tstamp_stats[id][NSS_TSTAMP_STATS_DROPPED_NO_HEADROOM] += nts->dropped_no_headroom; + spin_unlock_bh(&nss_tstamp_stats_lock); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tstamp_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_tstamp_stats.h new file mode 100644 index 000000000..d488ae7d7 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tstamp_stats.h @@ -0,0 +1,48 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_TSTAMP_STATS_H +#define __NSS_TSTAMP_STATS_H + +#include + +/** + * TSTAMP node statistics + */ +enum nss_tstamp_stats_types { + + NSS_TSTAMP_STATS_BOOMERANGED = NSS_STATS_NODE_MAX, + /**< Number of boomeranged packets. */ + NSS_TSTAMP_STATS_DROPPED_FAIL_ENQUEUE, + /**< Number of failed enqueue drops. */ + NSS_TSTAMP_STATS_DROPPED_FAIL_ALLOC, + /**< Number of failed allocation drops. */ + NSS_TSTAMP_STATS_DROPPED_FAIL_COPY, + /**< Number of failed copy drops. */ + NSS_TSTAMP_STATS_DROPPED_NO_INTERFACE, + /**< Number of failed no interface drops. */ + NSS_TSTAMP_STATS_DROPPED_NO_HEADROOM, + /**< Number of failed no headroom drops. */ + NSS_TSTAMP_STATS_MAX, +}; + +/* + * TSTAMP statistics APIs + */ +extern void nss_tstamp_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_tstamp_stats_msg *nts, uint32_t interface); +extern void nss_tstamp_stats_dentry_create(void); + +#endif /* __NSS_TSTAMP_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tun6rd.c b/feeds/ipq807x/qca-nss-drv/src/nss_tun6rd.c new file mode 100644 index 000000000..af1a4ac2f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tun6rd.c @@ -0,0 +1,183 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_tun6rd_log.h" + +/* + * nss_tun6rd_handler() + * Handle NSS -> HLOS messages for 6rd tunnel + */ +static void nss_tun6rd_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_tun6rd_msg *ntm = (struct nss_tun6rd_msg *)ncm; + void *ctx; + + nss_tun6rd_msg_callback_t cb; + + BUG_ON(!nss_is_dynamic_interface(ncm->interface)); + + /* + * Trace Messages + */ + nss_tun6rd_log_rx_msg(ntm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_TUN6RD_MAX) { + nss_warning("%px: received invalid message %d for Tun6RD interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_tun6rd_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Update the callback and app_data for NOTIFY messages, tun6rd sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->tun6rd_msg_callback; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_tun6rd_msg_callback_t)ncm->cb; + ctx = nss_ctx->subsys_dp_register[ncm->interface].ndev; + + /* + * call 6rd tunnel callback + */ + if (!ctx) { + nss_warning("%px: Event received for 6rd tunnel interface %d before registration", nss_ctx, ncm->interface); + return; + } + + cb(ctx, ntm); +} + +/* + * nss_tun6rd_tx() + * Transmit a tun6rd message to NSSFW + */ +nss_tx_status_t nss_tun6rd_tx(struct nss_ctx_instance *nss_ctx, struct nss_tun6rd_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_tun6rd_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (!nss_is_dynamic_interface(ncm->interface)) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_TUN6RD_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + *********************************** + * Register/Unregister/Miscellaneous APIs + *********************************** + */ + +/* + * nss_register_tun6rd_if() + */ +struct nss_ctx_instance *nss_register_tun6rd_if(uint32_t if_num, uint32_t type, nss_tun6rd_callback_t tun6rd_callback, + nss_tun6rd_msg_callback_t event_callback, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.tun6rd_handler_id]; + + nss_assert(nss_ctx); + nss_assert((if_num >= NSS_DYNAMIC_IF_START) && (if_num < NSS_SPECIAL_IF_START)); + + nss_core_register_subsys_dp(nss_ctx, if_num, tun6rd_callback, NULL, NULL, netdev, features); + nss_ctx->subsys_dp_register[if_num].type = type; + + nss_top_main.tun6rd_msg_callback = event_callback; + + nss_core_register_handler(nss_ctx, if_num, nss_tun6rd_handler, NULL); + + return nss_ctx; +} + +/* + * nss_tun6rd_get_context() + */ +struct nss_ctx_instance *nss_tun6rd_get_context() +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.tun6rd_handler_id]; +} + +/* + * nss_unregister_tun6rd_if() + */ +void nss_unregister_tun6rd_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.tun6rd_handler_id]; + + nss_assert(nss_ctx); + nss_assert(nss_is_dynamic_interface(if_num)); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + nss_ctx->subsys_dp_register[if_num].type = 0; + + nss_top_main.tun6rd_msg_callback = NULL; + + nss_core_unregister_handler(nss_ctx, if_num); +} + +/* + * nss_tun6rd_msg_init() + * Initialize nss_tun6rd msg. + */ +void nss_tun6rd_msg_init(struct nss_tun6rd_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} + +EXPORT_SYMBOL(nss_tun6rd_get_context); +EXPORT_SYMBOL(nss_tun6rd_tx); +EXPORT_SYMBOL(nss_register_tun6rd_if); +EXPORT_SYMBOL(nss_unregister_tun6rd_if); +EXPORT_SYMBOL(nss_tun6rd_msg_init); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tun6rd_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_tun6rd_log.c new file mode 100644 index 000000000..121d70f82 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tun6rd_log.c @@ -0,0 +1,132 @@ +/* + ************************************************************************** + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_tun6rd_log.c + * NSS TUN6RD logger file. + */ + +#include "nss_core.h" + +/* + * nss_tun6rd_log_message_types_str + * NSS TUN6RD message strings + */ +static int8_t *nss_tun6rd_log_message_types_str[NSS_TUN6RD_MAX] __maybe_unused = { + "TUN6RD Attach PNODE", + "TUN6RD Stats", + "TUN6RD Update Peer", +}; + +/* + * nss_tun6rd_log_attach_pnode_msg() + * Log NSS TUN6RD Attach PNODE + */ +static void nss_tun6rd_log_attach_pnode_msg(struct nss_tun6rd_msg *ntm) +{ + struct nss_tun6rd_attach_tunnel_msg *ntam __maybe_unused = &ntm->msg.tunnel; + nss_trace("%px: NSS TUN6RD Attach Tunnel message \n" + "TUN6RD Source Address: %pI4\n" + "TUN6RD Destination Address: %pI4\n" + "TUN6RD Type of Service: %d\n" + "TUN6RD Time To Live: %d\n" + "TUN6RD Sibling Interface Number: %d\n", + ntam, &ntam->saddr, + &ntam->daddr, ntam->tos, + ntam->ttl, ntam->sibling_if_num); +} + +/* + * nss_tun6rd_log_set_peer_msg() + * Log NSS TUN6RD Set Peer Message + */ +static void nss_tun6rd_log_set_peer_msg(struct nss_tun6rd_msg *ntm) +{ + struct nss_tun6rd_set_peer_msg *ntspm __maybe_unused = &ntm->msg.peer; + nss_trace("%px: NSS TUN6RD Set Peer message \n" + "TUN6RD IPv6 Address: %pI6\n" + "TUN6RD Destination: %pI4\n", + ntspm, ntspm->ipv6_address, + &ntspm->dest); +} + +/* + * nss_tun6rd_log_verbose() + * Log message contents. + */ +static void nss_tun6rd_log_verbose(struct nss_tun6rd_msg *ntm) +{ + switch (ntm->cm.type) { + case NSS_TUN6RD_ATTACH_PNODE: + nss_tun6rd_log_attach_pnode_msg(ntm); + break; + + case NSS_TUN6RD_ADD_UPDATE_PEER: + nss_tun6rd_log_set_peer_msg(ntm); + break; + + case NSS_TUN6RD_RX_STATS_SYNC: + /* + * No log for valid stats message. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", ntm); + break; + } +} + +/* + * nss_tun6rd_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_tun6rd_log_tx_msg(struct nss_tun6rd_msg *ntm) +{ + if (ntm->cm.type >= NSS_TUN6RD_MAX) { + nss_warning("%px: Invalid message type\n", ntm); + return; + } + + nss_info("%px: type[%d]:%s\n", ntm, ntm->cm.type, nss_tun6rd_log_message_types_str[ntm->cm.type]); + nss_tun6rd_log_verbose(ntm); +} + +/* + * nss_tun6rd_log_rx_msg() + * Log messages received from FW. + */ +void nss_tun6rd_log_rx_msg(struct nss_tun6rd_msg *ntm) +{ + if (ntm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ntm); + return; + } + + if (ntm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ntm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ntm, ntm->cm.type, + nss_tun6rd_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + ntm, ntm->cm.type, nss_tun6rd_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response]); + +verbose: + nss_tun6rd_log_verbose(ntm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tun6rd_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_tun6rd_log.h new file mode 100644 index 000000000..c7c3b3a90 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tun6rd_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_TUN6RD_LOG_H +#define __NSS_TUN6RD_LOG_H + +/* + * nss_tun6rd.h + * NSS TUN6RD header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_tun6rd_log_tx_msg + * Logs a tun6rd message that is sent to the NSS firmware. + */ +void nss_tun6rd_log_tx_msg(struct nss_tun6rd_msg *ntm); + +/* + * nss_tun6rd_log_rx_msg + * Logs a tun6rd message that is received from the NSS firmware. + */ +void nss_tun6rd_log_rx_msg(struct nss_tun6rd_msg *ntm); + +#endif /* __NSS_TUN6RD_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6.c b/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6.c new file mode 100644 index 000000000..1801e861f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6.c @@ -0,0 +1,291 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_tunipip6_log.h" +#include "nss_tunipip6_stats.h" + +#define NSS_TUNIPIP6_TX_TIMEOUT 3000 + +/* + * Data structure used to handle sync message. + */ +static struct nss_tunipip6_pvt { + struct semaphore sem; /* Semaphore structure. */ + struct completion complete; /* Completion structure. */ + int response; /* Response from FW. */ + void *cb; /* Original cb for msgs. */ + void *app_data; /* Original app_data for msgs. */ +} tunipip6_pvt; + +/* + * nss_tunipip6_verify_if_num + * Verify the interface is a valid interface + */ +static bool nss_tunipip6_verify_if_num(uint32_t if_num) +{ + enum nss_dynamic_interface_type type; + + type = nss_dynamic_interface_get_type(nss_tunipip6_get_context(), if_num); + + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_TUNIPIP6_INNER: + case NSS_DYNAMIC_INTERFACE_TYPE_TUNIPIP6_OUTER: + return true; + default: + return false; + } +} + +/* + * nss_tunipip6_handler() + * Handle NSS -> HLOS messages for ipip6 tunnel + */ +static void nss_tunipip6_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_tunipip6_msg *ntm = (struct nss_tunipip6_msg *)ncm; + void *ctx; + nss_tunipip6_msg_callback_t cb; + + BUG_ON(!nss_tunipip6_verify_if_num(ncm->interface)); + + /* + * Trace Messages + */ + nss_tunipip6_log_rx_msg(ntm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_TUNIPIP6_MAX) { + nss_warning("%px: received invalid message %d for DS-Lite interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_tunipip6_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + switch (ntm->cm.type) { + case NSS_TUNIPIP6_STATS_SYNC: + /* + * Sync common node stats. + */ + nss_tunipip6_stats_sync(nss_ctx, ntm); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages, tunipip6 sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->tunipip6_msg_callback; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_tunipip6_msg_callback_t)ncm->cb; + ctx = nss_ctx->subsys_dp_register[ncm->interface].ndev; + + /* + * call ipip6 tunnel callback + */ + if (!ctx) { + nss_warning("%px: Event received for DS-Lite tunnel interface %d before registration", nss_ctx, ncm->interface); + return; + } + + cb(ctx, ntm); +} + +/* + * nss_tunipip6_tx() + * Transmit a tunipip6 message to NSSFW + */ +nss_tx_status_t nss_tunipip6_tx(struct nss_ctx_instance *nss_ctx, struct nss_tunipip6_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_tunipip6_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (!nss_tunipip6_verify_if_num(ncm->interface)) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_TUNIPIP6_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_tunipip6_tx); + +/* + * nss_tunipip6_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_tunipip6_callback(void *app_data, struct nss_tunipip6_msg *nclm) +{ + tunipip6_pvt.response = NSS_TX_SUCCESS; + tunipip6_pvt.cb = NULL; + tunipip6_pvt.app_data = NULL; + + if (nclm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: tunipip6 Error response %d Error: %d\n", app_data, nclm->cm.response, nclm->cm.error); + tunipip6_pvt.response = nclm->cm.response; + } + + /* + * Write memory barrier. + */ + smp_wmb(); + complete(&tunipip6_pvt.complete); +} + +/* + * nss_tunipip6_tx_sync() + * Transmit a tunipip6 message to NSSFW synchronously. + */ +nss_tx_status_t nss_tunipip6_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_tunipip6_msg *msg) +{ + nss_tx_status_t status; + int ret; + + down(&tunipip6_pvt.sem); + msg->cm.cb = (nss_ptr_t)nss_tunipip6_callback; + msg->cm.app_data = (nss_ptr_t)NULL; + + status = nss_tunipip6_tx(nss_ctx, msg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: tunipip6_tx_msg failed\n", nss_ctx); + up(&tunipip6_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&tunipip6_pvt.complete, msecs_to_jiffies(NSS_TUNIPIP6_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: tunipip6 tx sync failed due to timeout\n", nss_ctx); + tunipip6_pvt.response = NSS_TX_FAILURE; + } + + status = tunipip6_pvt.response; + up(&tunipip6_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_tunipip6_tx_sync); + +/* + * ********************************** + * Register/Unregister/Miscellaneous APIs + * ********************************** + */ + +/* + * nss_register_tunipip6_if() + */ +struct nss_ctx_instance *nss_register_tunipip6_if(uint32_t if_num, + uint32_t dynamic_interface_type, + nss_tunipip6_callback_t tunipip6_callback, + nss_tunipip6_msg_callback_t event_callback, + struct net_device *netdev, + uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.tunipip6_handler_id]; + + nss_assert(nss_ctx); + nss_assert(nss_tunipip6_verify_if_num(if_num)); + + nss_ctx->subsys_dp_register[if_num].type = dynamic_interface_type; + nss_top_main.tunipip6_msg_callback = event_callback; + nss_core_register_subsys_dp(nss_ctx, if_num, tunipip6_callback, NULL, NULL, netdev, features); + nss_core_register_handler(nss_ctx, if_num, nss_tunipip6_handler, NULL); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_register_tunipip6_if); + +/* + * nss_unregister_tunipip6_if() + */ +void nss_unregister_tunipip6_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.tunipip6_handler_id]; + + nss_assert(nss_ctx); + nss_assert(nss_tunipip6_verify_if_num(if_num)); + + nss_stats_reset_common_stats(if_num); + nss_core_unregister_handler(nss_ctx, if_num); + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_top_main.tunipip6_msg_callback = NULL; +} +EXPORT_SYMBOL(nss_unregister_tunipip6_if); + +/* + * nss_tunipip6_get_context() + */ +struct nss_ctx_instance *nss_tunipip6_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.tunipip6_handler_id]; +} +EXPORT_SYMBOL(nss_tunipip6_get_context); + +/* + * nss_tunipip6_register_handler() + */ +void nss_tunipip6_register_handler() +{ + struct nss_ctx_instance *nss_ctx = nss_tunipip6_get_context(); + + nss_core_register_handler(nss_ctx, NSS_TUNIPIP6_INTERFACE, nss_tunipip6_handler, NULL); + nss_tunipip6_stats_dentry_create(); + sema_init(&tunipip6_pvt.sem, 1); + init_completion(&tunipip6_pvt.complete); +} + +/* + * nss_tunipip6_msg_init() + * Initialize nss_tunipip6 msg. + */ +void nss_tunipip6_msg_init(struct nss_tunipip6_msg *ntm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ntm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_tunipip6_msg_init); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_log.c new file mode 100644 index 000000000..1565ed87c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_log.c @@ -0,0 +1,189 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_tunipip6_log.c + * NSS TUNIPIP6 logger file. + */ + +#include "nss_core.h" + +/* + * nss_tunipip6_log_message_types_str + * NSS TUNIPIP6 message strings + */ +static int8_t *nss_tunipip6_log_message_types_str[NSS_TUNIPIP6_MAX] __maybe_unused = { + "TUNIPIP6 Encap Interface Create", + "TUNIPIP6 Decap Interface Create", + "TUNIPIP6 Stats", + "TUNIPIP6 FMR add", + "TUNIPIP6 FMR delete", + "TUNIPIP6 FMR flush", + "TUNIPIP6 BMR add", + "TUNIPIP6 BMR delete", +}; + +/* + * nss_tunipip6_log_error_types_str + * Strings for error types for TUNIPIP6 messages + */ +static char *nss_tunipip6_log_error_types_str[NSS_TUNIPIP6_ERROR_MAX] __maybe_unused = { + "TUNIPIP6 maximum tunnel reached", + "TUNIPIP6 tunnel already exists", + "TUNIPIP6 configuration parameters are incorrect", + "TUNIPIP6 FMR already exists ", + "TUNIPIP6 no FMR configured", + "TUNIPIP6 FMR table is full", + "TUNIPIP6 invalid FMR", + "TUNIPIP6 BMR already exists", + "TUNIPIP6 no BMR configured", + "TUNIPIP6 memory allocation for FMR failed", + "TUNIPIP6 unknown error", +}; + +/* + * nss_tunipip6_log_map_rule() + * Log NSS TUNIPIP6 map rule. + */ +static void nss_tunipip6_log_map_rule(struct nss_tunipip6_msg *ntm) +{ + struct nss_tunipip6_map_rule *nmr __maybe_unused = &ntm->msg.map_rule; + nss_trace("%px: NSS TUNIPIP6 Interface Create message \n" + "TUNIPIP6 Map Rule IPv6 prefix: %pI6\n" + "TUNIPIP6 Map Rule IPv6 prefix length: %d\n" + "TUNIPIP6 Map Rule IPv4 prefix: %pI4\n" + "TUNIPIP6 Map Rule IPv4 prefix length: %d\n" + "TUNIPIP6 Map Rule IPv6 suffix: %pI6\n" + "TUNIPIP6 Map Rule IPv6 suffix length: %d\n" + "TUNIPIP6 Map Rule EA length: %d\n" + "TUNIPIP6 Map Rule PSID offset: %d\n", + nmr, nmr->ip6_prefix, + nmr->ip6_prefix_len,&nmr->ip4_prefix, + nmr->ip4_prefix_len, nmr->ip6_suffix, + nmr->ip6_suffix_len, nmr->ea_len, + nmr->psid_offset); +} + +/* + * nss_tunipip6_log_if_create_msg() + * Log NSS TUNIPIP6 Interface Create + */ +static void nss_tunipip6_log_if_create_msg(struct nss_tunipip6_msg *ntm) +{ + struct nss_tunipip6_create_msg *ntcm __maybe_unused = &ntm->msg.tunipip6_create; + nss_trace("%px: NSS TUNIPIP6 Interface Create message \n" + "TUNIPIP6 Source Address: %pI6\n" + "TUNIPIP6 Destination Address: %pI6\n" + "TUNIPIP6 Flow Label: %d\n" + "TUNIPIP6 Flags: %d\n" + "TUNIPIP6 Hop Limit: %d\n" + "TUNIPIP6 Draft03 Specification: %d\n" + "TUNIPIP6 TTL inherit: %u\n" + "TUNIPIP6 TOS inherit: %u\n" + "TUNIPIP6 Frag ID Update: %u\n" + "TUNIPIP6 Max FMR: %u\n", + ntcm, ntcm->saddr, + ntcm->daddr, ntcm->flowlabel, + ntcm->flags, ntcm->hop_limit, + ntcm->draft03, + ntcm->ttl_inherit, + ntcm->tos_inherit, + ntcm->frag_id_update, + ntcm->fmr_max); +} + +/* + * nss_tunipip6_log_verbose() + * Log message contents. + */ +static void nss_tunipip6_log_verbose(struct nss_tunipip6_msg *ntm) +{ + switch (ntm->cm.type) { + case NSS_TUNIPIP6_TX_ENCAP_IF_CREATE: + case NSS_TUNIPIP6_TX_DECAP_IF_CREATE: + nss_tunipip6_log_if_create_msg(ntm); + break; + + case NSS_TUNIPIP6_STATS_SYNC: + /* + * No log for valid stats message. + */ + break; + + case NSS_TUNIPIP6_BMR_RULE_ADD: + case NSS_TUNIPIP6_BMR_RULE_DEL: + case NSS_TUNIPIP6_FMR_RULE_ADD: + case NSS_TUNIPIP6_FMR_RULE_DEL: + nss_tunipip6_log_map_rule(ntm); + break; + case NSS_TUNIPIP6_FMR_RULE_FLUSH: + nss_trace("%px: FMR rule flush.\n", ntm); + break; + default: + nss_trace("%px: Invalid message type\n", ntm); + break; + } +} + +/* + * nss_tunipip6_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_tunipip6_log_tx_msg(struct nss_tunipip6_msg *ntm) +{ + if (ntm->cm.type >= NSS_TUNIPIP6_MAX) { + nss_warning("%px: Invalid message type\n", ntm); + return; + } + + nss_info("%px: type[%d]:%s\n", ntm, ntm->cm.type, nss_tunipip6_log_message_types_str[ntm->cm.type]); + nss_tunipip6_log_verbose(ntm); +} + +/* + * nss_tunipip6_log_rx_msg() + * Log messages received from FW. + */ +void nss_tunipip6_log_rx_msg(struct nss_tunipip6_msg *ntm) +{ + if (ntm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ntm); + return; + } + + if (ntm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ntm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ntm, ntm->cm.type, + nss_tunipip6_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response]); + goto verbose; + } + + if (ntm->cm.error >= NSS_TUNIPIP6_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ntm, ntm->cm.type, nss_tunipip6_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response], + ntm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ntm, ntm->cm.type, nss_tunipip6_log_message_types_str[ntm->cm.type], + ntm->cm.response, nss_cmn_response_str[ntm->cm.response], + ntm->cm.error, nss_tunipip6_log_error_types_str[ntm->cm.error]); + +verbose: + nss_tunipip6_log_verbose(ntm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_log.h new file mode 100644 index 000000000..2ebccee1f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_log.h @@ -0,0 +1,41 @@ +/* + ************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_TUNIPIP6_LOG_H +#define __NSS_TUNIPIP6_LOG_H + +/* + * nss_tunipip6.h + * NSS TUNIPIP6 header file. + */ + +/* + * Logger APIs + */ + +/* + * nss_tunipip6_log_tx_msg + * Logs a tunipip6 message that is sent to the NSS firmware. + */ +void nss_tunipip6_log_tx_msg(struct nss_tunipip6_msg *ntm); + +/* + * nss_tunipip6_log_rx_msg + * Logs a tunipip6 message that is received from the NSS firmware. + */ +void nss_tunipip6_log_rx_msg(struct nss_tunipip6_msg *ntm); + +#endif /* __NSS_TUNIPIP6_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_stats.c new file mode 100644 index 000000000..76834d3ef --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_stats.c @@ -0,0 +1,124 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_tunipip6.h" +#include "nss_stats.h" +#include "nss_tunipip6_stats.h" + +#define NSS_TUNIPIP6_STATS_MAX_LINES (NSS_STATS_NODE_MAX + 32) + /**< Maximum number of lines for tunipip6 statistics dump. */ +#define NSS_TUNIPIP6_STATS_SIZE_PER_IF (NSS_STATS_MAX_STR_LENGTH * NSS_TUNIPIP6_STATS_MAX_LINES) + /**< Total number of statistics per tunipip6 interface. */ + +/* + * nss_tunipip6_stats_read() + * Read tunipip6 common node statistics + */ +static ssize_t nss_tunipip6_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + struct nss_ctx_instance *nss_ctx = nss_tunipip6_get_context(); + enum nss_dynamic_interface_type type; + ssize_t bytes_read = 0; + size_t len = 0, size; + uint32_t if_num; + char *buf; + + /* + * Allocate memory for NSS_TUNIPIP6_TUNNEL_MAX tunnels and one + * static interface. + */ + size = NSS_TUNIPIP6_STATS_SIZE_PER_IF * (NSS_TUNIPIP6_TUNNEL_MAX << 1) + 1; + buf = vzalloc(size); + if (!buf) { + nss_warning("tunipip6: Could not allocate memory for local statistics buffer\n"); + return 0; + } + + len += nss_stats_banner(buf, len, size, "tunipip6", NSS_STATS_SINGLE_CORE); + + len += scnprintf(buf + len, size - len, "\nBase node if_num:%03u", NSS_TUNIPIP6_INTERFACE); + len += scnprintf(buf + len, size - len, "\n-------------------\n"); + len += nss_stats_fill_common_stats(NSS_TUNIPIP6_INTERFACE, NSS_STATS_SINGLE_INSTANCE, buf, len, size - len, "tunipip6"); + + /* + * Common node stats for each tunipip6 dynamic interface. + */ + for (if_num = NSS_DYNAMIC_IF_START; if_num < NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES; if_num++) { + type = nss_dynamic_interface_get_type(nss_ctx, if_num); + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_TUNIPIP6_INNER: + len += scnprintf(buf + len, size - len, "\nInner if_num:%03u", if_num); + break; + + case NSS_DYNAMIC_INTERFACE_TYPE_TUNIPIP6_OUTER: + len += scnprintf(buf + len, size - len, "\nOuter if_num:%03u", if_num); + break; + + default: + continue; + } + + len += scnprintf(buf + len, size - len, "\n-------------------\n"); + len += nss_stats_fill_common_stats(if_num, NSS_STATS_SINGLE_INSTANCE, buf, len, size - len, "tunipip6"); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, buf, len); + vfree(buf); + return bytes_read; +} + +/* + * nss_tunipip6_stats_sync() + * Update tunipip6 common node statistics. + */ +void nss_tunipip6_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_tunipip6_msg *ntm) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_tunipip6_stats_sync_msg *msg_stats = &ntm->msg.stats; + uint64_t i, *dest; + uint32_t *src; + + spin_lock_bh(&nss_top->stats_lock); + + /* + * Update common node stats + */ + dest = nss_top->stats_node[ntm->cm.interface]; + src = &msg_stats->node_stats.rx_packets; + for (i = NSS_STATS_NODE_RX_PKTS; i <= NSS_STATS_NODE_RX_QUEUE_3_DROPPED; i++) { + *dest++ = *src++; + } + + spin_unlock_bh(&nss_top->stats_lock); + +} + +/* + * nss_tunipip6_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(tunipip6) + +/* + * nss_tunipip6_stats_dentry_create() + * Create tunipip6 statistics debug entry. + */ +void nss_tunipip6_stats_dentry_create(void) +{ + nss_stats_create_dentry("tunipip6", &nss_tunipip6_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_stats.h new file mode 100644 index 000000000..0f1748fc3 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tunipip6_stats.h @@ -0,0 +1,34 @@ +/* + ****************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_TUNIPIP6_STATS_H +#define __NSS_TUNIPIP6_STATS_H + +/* + * nss_tunipip6_stats_dentry_create() + * Creates tunipip6 interface statistics debug entry. + */ +void nss_tunipip6_stats_dentry_create(void); + +/* + * nss_tunipip6_stats_sync() + * Update tunipip6 common node statistics. + */ +void nss_tunipip6_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_tunipip6_msg *ntm); + +#endif /* __NSS_TUNIPIP6_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tx_msg_sync.c b/feeds/ipq807x/qca-nss-drv/src/nss_tx_msg_sync.c new file mode 100644 index 000000000..9eb0c70a9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tx_msg_sync.c @@ -0,0 +1,197 @@ +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * nss_tx_msg_sync.c + * NSS Tx msg sync core APIs + */ + +#include "nss_tx_rx_common.h" + +/* + * nss_tx_msg_sync_callback() + * Internal callback used to handle the message response. + */ +static void nss_tx_msg_sync_callback(void *app_data, struct nss_cmn_msg *ncm) +{ + uint32_t resp_offset; + + /* + * Per-message sync data was used as app_data. + * Retrieve the address of the original message from it. + */ + struct nss_tx_msg_sync_cmn_data *sync_data = (struct nss_tx_msg_sync_cmn_data *)app_data; + struct nss_cmn_msg *original_msg = (struct nss_cmn_msg *)sync_data->original_msg; + + /* + * Set TX status. And Copy back ncm->error and ncm->response if it is NACK. + */ + sync_data->status = NSS_TX_SUCCESS; + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + nss_warning("Tx msg sync error response %d\n", ncm->response); + sync_data->status = NSS_TX_FAILURE_SYNC_FW_ERR; + original_msg->error = ncm->error; + original_msg->response = ncm->response; + } + + /* + * ncm is the return message containing message response. + * It is different from the original message caller built. + * Because the return message is only visible in this callback context, + * we copy back message response by specifying offset and length to + * the return message. So the caller can use response in their context + * once wake up instead of calling a passed-in user callback here. + */ + resp_offset = sync_data->resp_offset + sizeof(struct nss_cmn_msg); + + if (sync_data->copy_len > 0) + memcpy((uint8_t *)((nss_ptr_t)original_msg + resp_offset), + (uint8_t *)((nss_ptr_t)ncm + resp_offset), + sync_data->copy_len); + + /* + * Wake up the caller + */ + complete(&sync_data->complete); +} + +/* + * nss_tx_msg_sync_internal() + * Internal call for sending messages to FW synchronously. + */ +static nss_tx_status_t nss_tx_msg_sync_internal(struct nss_ctx_instance *nss_ctx, + nss_tx_msg_sync_subsys_async_t tx_msg_async, + nss_tx_msg_sync_subsys_async_with_size_t tx_msg_async_with_size, + uint32_t msg_buf_size, + struct nss_tx_msg_sync_cmn_data *sync_data, + struct nss_cmn_msg *ncm, + uint32_t timeout) +{ + nss_tx_status_t status; + int ret; + + /* + * Per-msg sync data is used as app_data. + * A generic callback is used to handle the return message. + */ + ncm->cb = (nss_ptr_t)nss_tx_msg_sync_callback; + ncm->app_data = (nss_ptr_t)sync_data; + + BUG_ON(!tx_msg_async && !tx_msg_async_with_size); + + /* + * Per-subsystem asynchronous call to send down the message. + */ + if (tx_msg_async) + status = tx_msg_async(nss_ctx, ncm); + else + status = tx_msg_async_with_size(nss_ctx, ncm, msg_buf_size); + + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Tx msg async failed\n", nss_ctx); + return status; + } + + /* + * Sleep. Wake up either by notification or timeout. + */ + ret = wait_for_completion_timeout(&sync_data->complete, msecs_to_jiffies(timeout)); + if (!ret) { + nss_warning("%px: Tx msg sync timeout\n", nss_ctx); + return NSS_TX_FAILURE_SYNC_TIMEOUT; + } + + /* + * Wake up. Message response has been received within timeout. + */ + return sync_data->status; +} + +/* + * nss_tx_msg_sync() + * Send messages to FW synchronously with default message buffer size. + * + * tx_msg_async specifies the per-subsystem asynchronous call. + * timeout specifies the maximum sleep time for the completion. + * ncm is the original message the caller built. + * Since the caller cannot access the return message containing message response, + * we copy back message response from return message. + * resp_offset and copy_len specify the part of return message it'll copy. + */ +nss_tx_status_t nss_tx_msg_sync(struct nss_ctx_instance *nss_ctx, + nss_tx_msg_sync_subsys_async_t tx_msg_async, + uint32_t timeout, struct nss_cmn_msg *ncm, + uint32_t resp_offset, uint32_t copy_len) +{ + struct nss_tx_msg_sync_cmn_data sync_data; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + /* + * Check Tx msg async API + */ + if (!unlikely(tx_msg_async)) { + nss_warning("%px: missing Tx msg async API\n", nss_ctx); + return NSS_TX_FAILURE_SYNC_BAD_PARAM; + } + + /* + * Initialize the per-message sync data. + */ + init_completion(&sync_data.complete); + sync_data.status = NSS_TX_FAILURE; + sync_data.original_msg = (void *)ncm; + sync_data.resp_offset = resp_offset; + sync_data.copy_len = copy_len; + + return nss_tx_msg_sync_internal(nss_ctx, tx_msg_async, NULL, 0, &sync_data, ncm, timeout); +} +EXPORT_SYMBOL(nss_tx_msg_sync); + +/* + * nss_tx_msg_sync_with_size() + * Send messages to FW synchronously with specified message buffer size. + */ +nss_tx_status_t nss_tx_msg_sync_with_size(struct nss_ctx_instance *nss_ctx, + nss_tx_msg_sync_subsys_async_with_size_t tx_msg_async_with_size, + uint32_t msg_buf_size, uint32_t timeout, + struct nss_cmn_msg *ncm, uint32_t resp_offset, uint32_t copy_len) +{ + struct nss_tx_msg_sync_cmn_data sync_data; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + /* + * Check Tx msg async API + */ + if (!unlikely(tx_msg_async_with_size)) { + nss_warning("%px: missing Tx msg async API\n", nss_ctx); + return NSS_TX_FAILURE_SYNC_BAD_PARAM; + } + + /* + * Initialize the per-message sync data. + */ + init_completion(&sync_data.complete); + sync_data.status = NSS_TX_FAILURE; + sync_data.original_msg = (void *)ncm; + sync_data.resp_offset = resp_offset; + sync_data.copy_len = copy_len; + + return nss_tx_msg_sync_internal(nss_ctx, NULL, tx_msg_async_with_size, + msg_buf_size, &sync_data, ncm, timeout); +} +EXPORT_SYMBOL(nss_tx_msg_sync_with_size); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tx_msg_sync.h b/feeds/ipq807x/qca-nss-drv/src/nss_tx_msg_sync.h new file mode 100644 index 000000000..248287e2e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tx_msg_sync.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* + * nss_tx_msg_sync.h + * NSS Tx msg sync header file + */ + +#ifndef __NSS_TX_MSG_SYNC_H +#define __NSS_TX_MSG_SYNC_H + +#include + +/* + * Amount time in msec the synchronous message should wait for response + * from NSS before the timeout happens. + */ +#define NSS_TX_MSG_SYNC_DEFAULT_TIMEOUT_MSEC (5000) + +/* + * Per-message sync data + * Used as message app_data. + */ +struct nss_tx_msg_sync_cmn_data { + struct completion complete; /* Completion structure */ + nss_tx_status_t status; /* Tx status */ + void *original_msg; /* Address of the caller-build message */ + uint32_t resp_offset; /* Response offset in message payload */ + uint32_t copy_len; /* Length in bytes copied from the return message */ +}; + +/* + * nss_tx_msg_sync_subsys_async_t() + * Tx msg asynchronous API of each subsystem. + */ +typedef nss_tx_status_t (*nss_tx_msg_sync_subsys_async_t)(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm); + +/* + * nss_tx_msg_sync_subsys_async_with_size_t() + * Tx msg asynchronous API of each subsystem with message buffer size specified. + */ +typedef nss_tx_status_t (*nss_tx_msg_sync_subsys_async_with_size_t)(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, uint32_t size); + +/* + * nss_tx_msg_sync() + * Core function to send message to FW synchronously. + * + * tx_msg_async specifies the per-subsystem asynchronous call. + * timeout specifies the maximum sleep time for the completion. + * ncm is the original message the caller built. + * Since the caller cannot access the return message containing message response, + * we copy back message response from the return message. + * resp_offset and copy_len specify the part of return message it'll copy. + */ +nss_tx_status_t nss_tx_msg_sync(struct nss_ctx_instance *nss_ctx, + nss_tx_msg_sync_subsys_async_t tx_msg_async, + uint32_t timeout, struct nss_cmn_msg *ncm, + uint32_t resp_offset, uint32_t copy_len); + +/* + * nss_tx_msg_sync_with_size() + * Send messages to FW synchronously with specified message buffer size. + */ +nss_tx_status_t nss_tx_msg_sync_with_size(struct nss_ctx_instance *nss_ctx, + nss_tx_msg_sync_subsys_async_with_size_t tx_msg_async_with_size, + uint32_t msg_buf_size, uint32_t timeout, + struct nss_cmn_msg *ncm, uint32_t resp_offset, uint32_t copy_len); + +#endif /* __NSS_TX_MSG_SYNC_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_tx_rx_common.h b/feeds/ipq807x/qca-nss-drv/src/nss_tx_rx_common.h new file mode 100644 index 000000000..6e148c450 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_tx_rx_common.h @@ -0,0 +1,114 @@ +/* + ************************************************************************** + * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_tx_rx_common.h + * NSS APIs common header file + */ + +#ifndef __NSS_TX_RX_COMMON_H +#define __NSS_TX_RX_COMMON_H + +#include +#include +#include +#include +#include "nss_tx_msg_sync.h" + +/* + * Global definitions + */ +#define NSS_HLOS_MESSAGE_VERSION 1 /* Update when the common message structure changed */ + +#if (NSS_DEBUG_LEVEL > 0) +#define NSS_VERIFY_CTX_MAGIC(x) nss_verify_ctx_magic(x) +#define NSS_VERIFY_INIT_DONE(x) nss_verify_init_done(x) + +/* + * nss_verify_ctx_magic() + */ +static inline void nss_verify_ctx_magic(struct nss_ctx_instance *nss_ctx) +{ + nss_assert(nss_ctx->magic == NSS_CTX_MAGIC); +} + +static inline void nss_verify_init_done(struct nss_ctx_instance *nss_ctx) +{ + nss_assert(nss_ctx->state == NSS_CORE_STATE_INITIALIZED); +} + +#else +#define NSS_VERIFY_CTX_MAGIC(x) +#define NSS_VERIFY_INIT_DONE(x) +#endif + +/* + * CB handlers for variour interfaces + */ +void nss_phys_if_register_handler(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_c2c_tx_register_handler(struct nss_ctx_instance *nss_ctx); +extern void nss_c2c_rx_register_handler(struct nss_ctx_instance *nss_ctx); +extern void nss_crypto_register_handler(void); +extern void nss_crypto_cmn_register_handler(void); +extern void nss_ipsec_register_handler(void); +extern void nss_ipsec_cmn_register_handler(void); +extern void nss_ipv4_register_handler(void); +extern void nss_ipv4_reasm_register_handler(void); +extern void nss_ipv6_register_handler(void); +extern void nss_ipv6_reasm_register_handler(void); +extern void nss_n2h_register_handler(struct nss_ctx_instance *nss_ctx); +extern void nss_tunipip6_register_handler(void); +extern void nss_pppoe_register_handler(void); +extern void nss_freq_register_handler(void); +extern void nss_eth_rx_register_handler(struct nss_ctx_instance *nss_ctx); +extern void nss_edma_register_handler(void); +extern void nss_lag_register_handler(void); +extern void nss_dynamic_interface_register_handler(struct nss_ctx_instance *nss_ctx); +extern void nss_gre_redir_register_handler(void); +extern void nss_gre_redir_lag_us_register_handler(void); +extern void nss_gre_redir_lag_ds_register_handler(void); +extern void nss_lso_rx_register_handler(struct nss_ctx_instance *nss_ctx); +extern void nss_sjack_register_handler(void); +extern void nss_wifi_register_handler(void); +extern struct net_device *nss_tstamp_register_netdev(void); +extern void nss_tstamp_register_handler(struct net_device *ndev); +extern void nss_portid_register_handler(void); +extern void nss_oam_register_handler(void); +extern void nss_dtls_register_handler(void); +extern void nss_dtls_cmn_register_handler(void); +extern void nss_tls_register_handler(void); +extern void nss_gre_tunnel_register_handler(void); +extern void nss_trustsec_tx_register_handler(void); +extern void nss_wifili_register_handler(void); +extern void nss_ppe_register_handler(void); +extern void nss_gre_redir_mark_register_handler(void); +extern void nss_ppe_vp_register_handler(void); +extern void nss_wifi_mac_db_register_handler(void); +extern void nss_wifi_ext_vdev_register_handler(void); +extern void nss_wifili_thread_scheme_db_init(uint8_t core_id); +extern void nss_wifi_mesh_init(void); + +/* + * nss_if_msg_handler() + * External reference for internal base class handler for interface messages. + * + * This is not registered with nss_core.c as it is really a base class feature + * of the phys_if and virt_if handlers. + */ +extern void nss_if_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, + __attribute__((unused))void *app_data); + +#endif /* __NSS_TX_RX_COMMON_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_udp_st.c b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st.c new file mode 100755 index 000000000..eeab9ad03 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st.c @@ -0,0 +1,233 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_udp_st.c + * NSS UDP_ST APIs + */ + +#include "nss_core.h" +#include "nss_udp_st_stats.h" +#include "nss_udp_st_strings.h" +#include "nss_udp_st_log.h" + +#define NSS_UDP_ST_TX_MSG_TIMEOUT 1000 /* 1 sec timeout for udp_st messages */ + +/* + * Private data structure for udp_st configuration + */ +struct nss_udp_st_pvt { + struct semaphore sem; /* Semaphore structure */ + struct completion complete; /* completion structure */ + int response; /* Response from FW */ + void *cb; /* Original cb for sync msgs */ + void *app_data; /* Original app_data for sync msgs */ +} nss_udp_st_pvt; + +/* + * nss_udp_st_msg_handler() + * Handle NSS -> HLOS messages for UDP_ST node + */ +static void nss_udp_st_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_udp_st_msg *num = (struct nss_udp_st_msg *)ncm; + nss_udp_st_msg_callback_t cb; + + /* + * Is this a valid message type? + */ + if (num->cm.type >= NSS_UDP_ST_MAX_MSG_TYPES) { + nss_warning("%px: received invalid message %d for udp_st interface", nss_ctx, num->cm.type); + return; + } + + /* + * Log messages. + */ + nss_udp_st_log_rx_msg(num); + + switch (num->cm.type) { + case NSS_UDP_ST_STATS_SYNC_MSG: + /* + * Update driver statistics and send stats notifications to the registered modules. + */ + nss_udp_st_stats_sync(nss_ctx, &num->msg.stats); + break; + + case NSS_UDP_ST_RESET_STATS_MSG: + /* + * This is a response to the statistics reset message. + */ + nss_udp_st_stats_reset(NSS_UDP_ST_INTERFACE); + break; + default: + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + /* + * Check response. + */ + nss_info("%px: Received response %d for type %d, interface %d", + nss_ctx, ncm->response, ncm->type, ncm->interface); + } + } + + /* + * Return for NOTIFY messages because there is no notifier functions. + */ + if (num->cm.response == NSS_CMN_RESPONSE_NOTIFY) { + return; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * Callback + */ + cb = (nss_udp_st_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, num); +} + +/* + * nss_udp_st_tx_sync_callback() + * Callback to handle the completion of synchronous tx messages. + */ +static void nss_udp_st_tx_sync_callback(void *app_data, struct nss_udp_st_msg *num) +{ + nss_udp_st_msg_callback_t callback = (nss_udp_st_msg_callback_t)nss_udp_st_pvt.cb; + void *data = nss_udp_st_pvt.app_data; + + nss_udp_st_pvt.cb = NULL; + nss_udp_st_pvt.app_data = NULL; + + if (num->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("udp_st error response %d\n", num->cm.response); + nss_udp_st_pvt.response = NSS_TX_FAILURE; + } else { + nss_udp_st_pvt.response = NSS_TX_SUCCESS; + } + + if (callback) { + callback(data, num); + } + + complete(&nss_udp_st_pvt.complete); +} + +/* + * nss_udp_st_tx() + * Transmit a udp_st message to the FW. + */ +nss_tx_status_t nss_udp_st_tx(struct nss_ctx_instance *nss_ctx, struct nss_udp_st_msg *num) +{ + struct nss_cmn_msg *ncm = &num->cm; + + /* + * Sanity check the message + */ + if (ncm->interface != NSS_UDP_ST_INTERFACE) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_UDP_ST_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Trace messages. + */ + nss_udp_st_log_tx_msg(num); + + return nss_core_send_cmd(nss_ctx, num, sizeof(*num), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_udp_st_tx); + +/* + * nss_udp_st_tx_sync() + * Transmit a synchronous udp_st message to the FW. + */ +nss_tx_status_t nss_udp_st_tx_sync(struct nss_ctx_instance *nss_ctx, struct nss_udp_st_msg *num) +{ + nss_tx_status_t status; + int ret = 0; + + down(&nss_udp_st_pvt.sem); + nss_udp_st_pvt.cb = (void *)num->cm.cb; + nss_udp_st_pvt.app_data = (void *)num->cm.app_data; + + num->cm.cb = (nss_ptr_t)nss_udp_st_tx_sync_callback; + num->cm.app_data = (nss_ptr_t)NULL; + + status = nss_udp_st_tx(nss_ctx, num); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: nss udp_st msg tx failed\n", nss_ctx); + up(&nss_udp_st_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&nss_udp_st_pvt.complete, msecs_to_jiffies(NSS_UDP_ST_TX_MSG_TIMEOUT)); + if (!ret) { + nss_warning("%px: udp_st tx sync failed due to timeout\n", nss_ctx); + nss_udp_st_pvt.response = NSS_TX_FAILURE; + } + + status = nss_udp_st_pvt.response; + up(&nss_udp_st_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_udp_st_tx_sync); + +/* + * nss_udp_st_msg_init() + * Initialize udp_st message. + */ +void nss_udp_st_msg_init(struct nss_udp_st_msg *num, uint16_t if_num, uint32_t type, uint32_t len, + nss_udp_st_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&num->cm, if_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_udp_st_msg_init); + +/* + * nss_udp_st_register_handler() + */ +void nss_udp_st_register_handler(struct nss_ctx_instance *nss_ctx) +{ + nss_core_register_handler(nss_ctx, NSS_UDP_ST_INTERFACE, nss_udp_st_msg_handler, NULL); + + nss_udp_st_stats_dentry_create(); + nss_udp_st_strings_dentry_create(); + + sema_init(&nss_udp_st_pvt.sem, 1); + init_completion(&nss_udp_st_pvt.complete); +} + +/* + * nss_udp_st_get_mgr() + * + */ +struct nss_ctx_instance *nss_udp_st_get_mgr(void) +{ + return (void *)&nss_top_main.nss[nss_top_main.udp_st_handler_id]; +} +EXPORT_SYMBOL(nss_udp_st_get_mgr); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_log.c new file mode 100644 index 000000000..bd4e07b20 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_log.c @@ -0,0 +1,254 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_udp_st_log.c + * NSS UDP Speedtest logger file. + */ + +#include "nss_core.h" + +/* + * nss_udp_st_log_message_types_str + * udp_st message strings + */ +static int8_t *nss_udp_st_log_message_types_str[NSS_UDP_ST_MAX_MSG_TYPES] __maybe_unused = { + "UDP_ST Start Msg", + "UDP_ST Stop Msg", + "UDP_ST Configure Rule Msg", + "UDP_ST Unconfigure Rule Msg", + "UDP_ST Stats Sync Msg", + "UDP_ST TX Create Msg", + "UDP_ST TX Destroy Msg", + "UDP_ST Reset Stats Msg", +}; + +/* + * nss_udp_st_log_error_response_types_str + * Strings for error types for udp_st messages + */ +static int8_t *nss_udp_st_log_error_response_types_str[NSS_UDP_ST_ERROR_MAX] __maybe_unused = { + "UDP_ST No Error", + "UDP_ST Incorrect Rate", + "UDP_ST Incorrect Buffer Size", + "UDP_ST Memory Failure", + "UDP_ST Incorrect State", + "UDP_ST Incorrect Flags", + "UDP_ST Entry Exist", + "UDP_ST Entry Add Failed", + "UDP_ST Entry Not Exist", + "UDP_ST Wrong Start Msg Type", + "UDP_ST Wrong Stop Msg Type", + "UDP_ST Too Many Users", + "UDP_ST Unknown Msg Type", + "UDP_ST Pbuf Alloc Failure", + "UDP_ST Pbuf Size Failure", + "UDP_ST Drop Queue", + "UDP_ST Timer call missed", +}; + +/* + * nss_udp_st_log_tx_create_destroy_msg() + * Log NSS udp_st Tx create/destroy message. + */ +static void nss_udp_st_log_tx_create_destroy_msg(struct nss_udp_st_msg *num, uint8_t *msg_type) +{ + struct nss_udp_st_tx_create *create __maybe_unused = &num->msg.create; + nss_trace("%px: NSS udp_st message: %s\n" + "Rate: %u\n" + "Buffer Size: %u\n" + "DSCP: %u\n", + create, + msg_type, + create->rate, + create->buffer_size, + create->dscp); +} + +/* + * nss_udp_st_log_uncfg_rule_msg() + * Log NSS udp_st unconfig rule message. + */ +static void nss_udp_st_log_uncfg_rule_msg(struct nss_udp_st_msg *num) +{ + struct nss_udp_st_cfg *uncfg __maybe_unused = &num->msg.uncfg; + nss_trace("%px: NSS udp_st message: Unconfig\n" + "IP version: %u\n", + uncfg, + uncfg->ip_version); + + if (uncfg->ip_version == NSS_UDP_ST_FLAG_IPV4) { + nss_trace("Src IP: %pI4\n" + "Dest IP: %pI4\n", + &(uncfg->src_ip.ip.ipv4), + &(uncfg->dest_ip.ip.ipv4)); + } else { + nss_trace("Src IP: %pI6\n" + "Dest IP: %pI6\n", + &(uncfg->src_ip.ip.ipv6), + &(uncfg->dest_ip.ip.ipv6)); + } + + nss_trace("Src Port: %u\n Dest Port: %u\n Type: %u\n", + uncfg->src_port, uncfg->dest_port, uncfg->type); +} + +/* + * nss_udp_st_log_cfg_rule_msg() + * Log NSS udp_st config rule message. + */ +static void nss_udp_st_log_cfg_rule_msg(struct nss_udp_st_msg *num) +{ + struct nss_udp_st_cfg *cfg __maybe_unused = &num->msg.cfg; + nss_trace("%px: NSS udp_st message: Config\n" + "IP version: %u\n", + cfg, + cfg->ip_version); + + if (cfg->ip_version == NSS_UDP_ST_FLAG_IPV4) { + nss_trace("Src IP: %pI4\n" + "Dest IP: %pI4\n", + &(cfg->src_ip.ip.ipv4), + &(cfg->dest_ip.ip.ipv4)); + } else { + nss_trace("Src IP: %pI6\n" + "Dest IP: %pI6\n", + &(cfg->src_ip.ip.ipv6), + &(cfg->dest_ip.ip.ipv6)); + } + + nss_trace("Src Port: %u\n Dest Port: %u\n Type: %u\n", + cfg->src_port, cfg->dest_port, cfg->type); +} + +/* + * nss_udp_st_log_stop_msg() + * Log NSS udp_st stop message. + */ +static void nss_udp_st_log_stop_msg(struct nss_udp_st_msg *num) +{ + struct nss_udp_st_stop *stop __maybe_unused = &num->msg.stop; + nss_trace("%px: NSS udp_st message: Stop\n" + "Type: %u\n", + stop, + stop->type); +} + +/* + * nss_udp_st_log_start_msg() + * Log NSS udp_st start message. + */ +static void nss_udp_st_log_start_msg(struct nss_udp_st_msg *num) +{ + struct nss_udp_st_start *start __maybe_unused = &num->msg.start; + nss_trace("%px: NSS udp_st message: Start\n" + "Type: %u\n", + start, + start->type); +} + +/* + * nss_udp_st_log_verbose() + * Log message contents. + */ +static void nss_udp_st_log_verbose(struct nss_udp_st_msg *num) +{ + switch (num->cm.type) { + case NSS_UDP_ST_START_MSG: + nss_udp_st_log_start_msg(num); + break; + + case NSS_UDP_ST_STOP_MSG: + nss_udp_st_log_stop_msg(num); + break; + + case NSS_UDP_ST_CFG_RULE_MSG: + nss_udp_st_log_cfg_rule_msg(num); + break; + + case NSS_UDP_ST_UNCFG_RULE_MSG: + nss_udp_st_log_uncfg_rule_msg(num); + break; + + case NSS_UDP_ST_TX_CREATE_MSG: + nss_udp_st_log_tx_create_destroy_msg(num, "Create"); + break; + + case NSS_UDP_ST_TX_DESTROY_MSG: + nss_udp_st_log_tx_create_destroy_msg(num, "Destroy"); + break; + + case NSS_UDP_ST_RESET_STATS_MSG: + case NSS_UDP_ST_STATS_SYNC_MSG: + break; + + default: + nss_trace("%px: Invalid message type\n", num); + break; + } +} + +/* + * nss_udp_st_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_udp_st_log_tx_msg(struct nss_udp_st_msg *num) +{ + if (num->cm.type >= NSS_UDP_ST_MAX_MSG_TYPES) { + nss_warning("%px: Invalid message type\n", num); + return; + } + + nss_info("%px: type[%d]:%s\n", num, num->cm.type, nss_udp_st_log_message_types_str[num->cm.type]); + nss_udp_st_log_verbose(num); +} + +/* + * nss_udp_st_log_rx_msg() + * Log messages received from FW. + */ +void nss_udp_st_log_rx_msg(struct nss_udp_st_msg *num) +{ + if (num->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", num); + return; + } + + if (num->cm.response == NSS_CMN_RESPONSE_NOTIFY || (num->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", num, num->cm.type, + nss_udp_st_log_message_types_str[num->cm.type], + num->cm.response, nss_cmn_response_str[num->cm.response]); + goto verbose; + } + + if (num->cm.error >= NSS_UDP_ST_ERROR_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + num, num->cm.type, nss_udp_st_log_message_types_str[num->cm.type], + num->cm.response, nss_cmn_response_str[num->cm.response], + num->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + num, num->cm.type, nss_udp_st_log_message_types_str[num->cm.type], + num->cm.response, nss_cmn_response_str[num->cm.response], + num->cm.error, nss_udp_st_log_error_response_types_str[num->cm.error]); + +verbose: + nss_udp_st_log_verbose(num); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_log.h new file mode 100644 index 000000000..fa2a551fb --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_log.h @@ -0,0 +1,39 @@ +/* + ****************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_UDP_ST_LOG_H__ +#define __NSS_UDP_ST_LOG_H__ + +/* + * nss_udp_st_log.h + * NSS UDP Speedtest Log Header File. + */ + +/* + * nss_udp_st_log_tx_msg + * Logs a udp_st message that is sent to the NSS firmware. + */ +void nss_udp_st_log_tx_msg(struct nss_udp_st_msg *num); + +/* + * nss_udp_st_log_rx_msg + * Logs a udp_st message that is received from the NSS firmware. + */ +void nss_udp_st_log_rx_msg(struct nss_udp_st_msg *num); + +#endif /* __NSS_UDP_ST_LOG_H__*/ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_stats.c new file mode 100755 index 000000000..0fee47b2e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_stats.c @@ -0,0 +1,178 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_udp_st_stats.h" +#include "nss_udp_st_strings.h" + +uint32_t nss_udp_st_errors[NSS_UDP_ST_ERROR_MAX]; +uint32_t nss_udp_st_stats_time[NSS_UDP_ST_TEST_MAX][NSS_UDP_ST_STATS_TIME_MAX]; + +/* + * nss_udp_st_stats_read() + * Read UDP_ST stats. + */ +static ssize_t nss_udp_st_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + /* + * Max output lines = #stats * NSS_MAX_CORES + + * few blank lines for banner printing + Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_output_lines = NSS_STATS_NODE_MAX + NSS_UDP_ST_ERROR_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + uint32_t i; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + /* + * Note: The assumption here is that we do not have more than 64 stats. + */ + stats_shadow = kzalloc(64 * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "udp_st", NSS_STATS_SINGLE_CORE); + + size_wr += nss_stats_fill_common_stats(NSS_UDP_ST_INTERFACE, NSS_STATS_SINGLE_INSTANCE, lbuf, size_wr, size_al, "udp_st"); + + /* + * Error stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_UDP_ST_ERROR_MAX); i++) { + stats_shadow[i] = nss_udp_st_errors[i]; + } + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("udp_st", "udp_st error stats" + , NSS_STATS_SINGLE_INSTANCE + , nss_udp_st_strings_error_stats + , stats_shadow + , NSS_UDP_ST_ERROR_MAX + , lbuf, size_wr, size_al); + + /* + * Rx time stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_UDP_ST_STATS_TIME_MAX); i++) { + stats_shadow[i] = nss_udp_st_stats_time[NSS_UDP_ST_TEST_RX][i]; + } + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("udp_st", "udp_st Rx time stats (ms)" + , NSS_STATS_SINGLE_INSTANCE + , nss_udp_st_strings_rx_time_stats + , stats_shadow + , NSS_UDP_ST_STATS_TIME_MAX + , lbuf, size_wr, size_al); + + /* + * Tx time stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_UDP_ST_STATS_TIME_MAX); i++) { + stats_shadow[i] = nss_udp_st_stats_time[NSS_UDP_ST_TEST_TX][i]; + } + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("udp_st", "udp_st Tx time stats (ms)" + , NSS_STATS_SINGLE_INSTANCE + , nss_udp_st_strings_tx_time_stats + , stats_shadow + , NSS_UDP_ST_STATS_TIME_MAX + , lbuf, size_wr, size_al); + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_udp_st_stats_ops. + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(udp_st); + +/* + * nss_udp_st_stats_dentry_create() + * Create udp_st statistics debug entry. + */ +void nss_udp_st_stats_dentry_create(void) +{ + nss_stats_create_dentry("udp_st", &nss_udp_st_stats_ops); +} + +/* + * nss_udp_st_stats_reset() + * Reset the udp_st statistics. + */ +void nss_udp_st_stats_reset(uint32_t if_num) +{ + uint32_t i; + + /* + * Reset common node stats. + */ + nss_stats_reset_common_stats(if_num); + + /* + * Reset error stats. + */ + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; i < NSS_UDP_ST_ERROR_MAX; i++) { + nss_udp_st_errors[i] = 0; + } + spin_unlock_bh(&nss_top_main.stats_lock); +} + +/* + * nss_udp_st_stats_sync() + * Handle the syncing of UDP_ST node statistics. + */ +void nss_udp_st_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_udp_st_stats *nus) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + uint32_t i, j; + + spin_lock_bh(&nss_top->stats_lock); + + nss_top->stats_node[NSS_UDP_ST_INTERFACE][NSS_STATS_NODE_RX_PKTS] += nus->nstats.node_stats.rx_packets; + nss_top->stats_node[NSS_UDP_ST_INTERFACE][NSS_STATS_NODE_RX_BYTES] += nus->nstats.node_stats.rx_bytes; + nss_top->stats_node[NSS_UDP_ST_INTERFACE][NSS_STATS_NODE_TX_PKTS] += nus->nstats.node_stats.tx_packets; + nss_top->stats_node[NSS_UDP_ST_INTERFACE][NSS_STATS_NODE_TX_BYTES] += nus->nstats.node_stats.tx_bytes; + + for (i = 0; i < NSS_UDP_ST_ERROR_MAX; i++) { + nss_udp_st_errors[i] += nus->nstats.errors[i]; + } + + for (i = 0; i < NSS_UDP_ST_TEST_MAX; i++) { + for (j = 0; j < NSS_UDP_ST_STATS_TIME_MAX; j++) { + nss_udp_st_stats_time[i][j] = nus->time_stats[i][j]; + } + } + spin_unlock_bh(&nss_top->stats_lock); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_stats.h new file mode 100755 index 000000000..86b387da8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_stats.h @@ -0,0 +1,36 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_UDP_ST_STATS_H +#define __NSS_UDP_ST_STATS_H + +#include + +/* + * nss_udp_st_stats.h + * NSS driver UDP_ST statistics header file. + */ + +/* + * udp_st statistics APIs + */ +extern void nss_udp_st_stats_reset(uint32_t if_num); +extern void nss_udp_st_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_udp_st_stats *nus); +extern void nss_udp_st_stats_dentry_create(void); + +#endif /* __NSS_UDP_ST_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_strings.c new file mode 100644 index 000000000..3b67b13b9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_strings.c @@ -0,0 +1,151 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" + +/* + * nss_udp_st_strings_error_stats + * Statistics strings for udp_st errors. + */ +struct nss_stats_info nss_udp_st_strings_error_stats[NSS_UDP_ST_ERROR_MAX] = { + {"error_none" , NSS_STATS_TYPE_SPECIAL}, + {"incorrect_rate" , NSS_STATS_TYPE_DROP}, + {"incorrect_buffer_size" , NSS_STATS_TYPE_DROP}, + {"memory_failure" , NSS_STATS_TYPE_DROP}, + {"incorrect_state" , NSS_STATS_TYPE_DROP}, + {"incorrect_flags" , NSS_STATS_TYPE_DROP}, + {"entry_exist" , NSS_STATS_TYPE_DROP}, + {"entry_add_failed" , NSS_STATS_TYPE_DROP}, + {"entry_not_exist" , NSS_STATS_TYPE_DROP}, + {"wrong_start_msg_type" , NSS_STATS_TYPE_DROP}, + {"wrong_stop_msg_type" , NSS_STATS_TYPE_DROP}, + {"too_many_users" , NSS_STATS_TYPE_DROP}, + {"unknown_msg_type" , NSS_STATS_TYPE_DROP}, + {"pb_alloc_failure" , NSS_STATS_TYPE_DROP}, + {"pb_size_failure" , NSS_STATS_TYPE_DROP}, + {"drop_queue_failure" , NSS_STATS_TYPE_DROP}, + {"timer call is missed" , NSS_STATS_TYPE_SPECIAL}, +}; + +/* + * nss_udp_st_strings_rx_time_stats + * Statistics strings for Rx udp_st time. + */ +struct nss_stats_info nss_udp_st_strings_rx_time_stats[NSS_UDP_ST_STATS_TIME_MAX] = { + {"rx_start_time" , NSS_STATS_TYPE_SPECIAL}, + {"rx_current_time" , NSS_STATS_TYPE_SPECIAL}, + {"rx_elapsed_time" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_udp_st_strings_tx_time_stats + * Statistics strings for Tx udp_st time. + */ +struct nss_stats_info nss_udp_st_strings_tx_time_stats[NSS_UDP_ST_STATS_TIME_MAX] = { + {"tx_start_time" , NSS_STATS_TYPE_SPECIAL}, + {"tx_current_time" , NSS_STATS_TYPE_SPECIAL}, + {"tx_elapsed_time" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_udp_st_error_stats_strings_read() + * Read udp_st error statistics names. + */ +static ssize_t nss_udp_st_error_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_udp_st_strings_error_stats, NSS_UDP_ST_ERROR_MAX); +} + +/* + * nss_udp_st_rx_time_stats_strings_read() + * Read Rx udp_st time statistics names. + */ +static ssize_t nss_udp_st_rx_time_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_udp_st_strings_rx_time_stats, NSS_UDP_ST_STATS_TIME_MAX); +} + +/* + * nss_udp_st_tx_time_stats_strings_read() + * Read Tx udp_st time statistics names. + */ +static ssize_t nss_udp_st_tx_time_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_udp_st_strings_tx_time_stats, NSS_UDP_ST_STATS_TIME_MAX); +} + +/* + * nss_udp_st_error_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(udp_st_error_stats); + +/* + * nss_udp_st_rx_time_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(udp_st_rx_time_stats); + +/* + * nss_udp_st_tx_time_stats_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(udp_st_tx_time_stats); + +/* + * nss_udp_st_strings_dentry_create() + * Create udp_st statistics strings debug entry. + */ +void nss_udp_st_strings_dentry_create(void) +{ + struct dentry *dir_d; + struct dentry *file_d; + + if (!nss_top_main.strings_dentry) { + nss_warning("qca-nss-drv/strings is not present"); + return; + } + + dir_d = debugfs_create_dir("udp_st", nss_top_main.strings_dentry); + if (!dir_d) { + nss_warning("Failed to create qca-nss-drv/strings/udp_st directory"); + return; + } + + file_d = debugfs_create_file("error_stats_str", 0400, dir_d, &nss_top_main, &nss_udp_st_error_stats_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/stats/udp_st/error_stats_str file"); + goto fail; + } + + file_d = debugfs_create_file("rx_time_stats_str", 0400, dir_d, &nss_top_main, &nss_udp_st_rx_time_stats_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/stats/udp_st/rx_time_stats_str file"); + goto fail; + } + + file_d = debugfs_create_file("tx_time_stats_str", 0400, dir_d, &nss_top_main, &nss_udp_st_tx_time_stats_strings_ops); + if (!file_d) { + nss_warning("Failed to create qca-nss-drv/stats/udp_st/tx_time_stats_str file"); + goto fail; + } + + return; +fail: + debugfs_remove_recursive(dir_d); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_strings.h new file mode 100644 index 000000000..6f5b513cf --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_udp_st_strings.h @@ -0,0 +1,28 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_UDP_ST_STRINGS_H +#define __NSS_UDP_ST_STRINGS_H + +extern struct nss_stats_info nss_udp_st_strings_error_stats[NSS_UDP_ST_ERROR_MAX]; +extern struct nss_stats_info nss_udp_st_strings_rx_time_stats[NSS_UDP_ST_STATS_TIME_MAX]; +extern struct nss_stats_info nss_udp_st_strings_tx_time_stats[NSS_UDP_ST_STATS_TIME_MAX]; + +extern void nss_udp_st_strings_dentry_create(void); + +#endif /* __NSS_UDP_ST_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_unaligned.c b/feeds/ipq807x/qca-nss-drv/src/nss_unaligned.c new file mode 100644 index 000000000..099abdb0a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_unaligned.c @@ -0,0 +1,91 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_unaligned.c + * NSS unaligned APIs + */ + +#include "nss_tx_rx_common.h" +#include "nss_unaligned_stats.h" +#include "nss_unaligned_log.h" + +/* + * nss_unaligned_update_stats() + * Updates the statistics in the nss_ctx. + */ +static void nss_unaligned_update_stats(struct nss_ctx_instance *nss_ctx, + struct nss_unaligned_stats_msg *usm) +{ + uint32_t start_index = NSS_UNALIGNED_OPS_PER_MSG * usm->current_iteration; + uint32_t i; + spin_lock_bh(&nss_top_main.stats_lock); + nss_ctx->unaligned_stats.trap_count = usm->trap_count; + for (i = 0; i < NSS_UNALIGNED_OPS_PER_MSG; i++) { + uint32_t index = i + start_index; + if (unlikely(index >= NSS_UNALIGNED_EMULATED_OPS)) { + break; + } + nss_ctx->unaligned_stats.ops[index] = usm->ops[i]; + } + spin_unlock_bh(&nss_top_main.stats_lock); +} + +/* + * nss_unaligned_msg_handler() + * Handles metadata messages on the unaligned interface. + */ +static void nss_unaligned_msg_handler(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_unaligned_msg *um = (struct nss_unaligned_msg *)ncm; + + /* + * Sanity checks on message + */ + if (um->cm.type >= NSS_UNALIGNED_MSG_MAX) { + nss_warning("%px: message type out of range: %d\n", nss_ctx, um->cm.type); + return; + } + + if (nss_cmn_get_msg_len(&(um->cm)) > sizeof(struct nss_unaligned_msg)) { + nss_warning("%px: message length is invalid: %d\n", nss_ctx, nss_cmn_get_msg_len(&(um->cm))); + return; + } + + nss_unaligned_log_rx_msg(um); + + switch (um->cm.type) { + case NSS_UNALIGNED_MSG_STATS: + nss_unaligned_update_stats(nss_ctx, &um->msg.stats_msg); + return; + } + + nss_core_log_msg_failures(nss_ctx, ncm); +} + +/* + * nss_unaligned_register_handler() + * Registers message handler on the NSS unaligned interface and stats dentry. + */ +void nss_unaligned_register_handler(struct nss_ctx_instance *nss_ctx) +{ + nss_core_register_handler(nss_ctx, NSS_UNALIGNED_INTERFACE, nss_unaligned_msg_handler, NULL); + + if (nss_ctx->id == NSS_CORE_0) { + nss_unaligned_stats_dentry_create(); + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_log.c new file mode 100644 index 000000000..079e2d76c --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_log.c @@ -0,0 +1,75 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_unaligned_log.c + * NSS unaligned logger file. + */ + +#include "nss_core.h" + +/* + * nss_unaligned_log_message_types_str + * NSS unaligned message strings + */ +static int8_t *nss_unaligned_log_message_types_str[NSS_UNALIGNED_MSG_MAX] __maybe_unused = { + "Unaligned Stats Message", +}; + +/* + * nss_unaligned_log_verbose() + * Log message contents. + */ +static void nss_unaligned_log_verbose(struct nss_unaligned_msg *um) +{ + switch (um->cm.type) { + case NSS_UNALIGNED_MSG_STATS: + /* + * No log for valid stats message. + */ + break; + + default: + nss_trace("%px: Invalid message type\n", um); + break; + } +} + +/* + * nss_unaligned_log_rx_msg() + * Log messages received from FW. + */ +void nss_unaligned_log_rx_msg(struct nss_unaligned_msg *um) +{ + if (um->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", um); + return; + } + + if (um->cm.response == NSS_CMN_RESPONSE_NOTIFY || (um->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", um, um->cm.type, + nss_unaligned_log_message_types_str[um->cm.type], + um->cm.response, nss_cmn_response_str[um->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + um, um->cm.type, nss_unaligned_log_message_types_str[um->cm.type], + um->cm.response, nss_cmn_response_str[um->cm.response]); + +verbose: + nss_unaligned_log_verbose(um); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_log.h new file mode 100644 index 000000000..98ec707de --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_log.h @@ -0,0 +1,31 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_UNALIGNED_LOG_H__ +#define __NSS_UNALIGNED_LOG_H__ + +/* + * nss_unaligned_log.h + * NSS Unaligned Log Header File. + */ + +/* + * nss_unaligned_log_rx_msg + * Logs an unaligned trap handler message that is received from the NSS firmware. + */ +void nss_unaligned_log_rx_msg(struct nss_unaligned_msg *um); + +#endif /* __NSS_UNALIGNED_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_stats.c new file mode 100644 index 000000000..af0fd74be --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_stats.c @@ -0,0 +1,88 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_unaligned_stats.h" + +/* + * nss_unaligned_stats_read() + * Read unaligned stats + */ +static ssize_t nss_unaligned_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + uint32_t max_output_lines = NSS_MAX_CORES * NSS_UNALIGNED_OPS_PER_MSG; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + struct nss_unaligned_stats *stats_shadow; + uint32_t i, j; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(!lbuf)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_MAX_CORES * sizeof(struct nss_unaligned_stats), GFP_KERNEL); + if (unlikely(!stats_shadow)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; i < NSS_MAX_CORES; i++) { + stats_shadow[i] = nss_top_main.nss[i].unaligned_stats; + } + spin_unlock_bh(&nss_top_main.stats_lock); + + for (i = 0; i < NSS_MAX_CORES; i++) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "core: %u, total unaligned traps: %llu\n", + i, stats_shadow[i].trap_count); + for (j = 0; j < NSS_UNALIGNED_OPS_PER_MSG; j++) { + struct nss_unaligned_stats_op op = stats_shadow[i].ops[j]; + if (op.count == 0) { + break; + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "op: %2x, ext: %2x, count:%10llu, min: %10u, avg: %10u, max: %10u\n", + op.opcode_primary, op.opcode_extension, op.count, op.ticks_min, + op.ticks_avg, op.ticks_max); + } + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + + return bytes_read; +} + +/* + * nss_unaligned_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(unaligned) + +/* + * nss_unaligned_stats_dentry_create() + * Create unaligned statistics debug entry. + */ +void nss_unaligned_stats_dentry_create(void) +{ + nss_stats_create_dentry("unaligned", &nss_unaligned_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_stats.h new file mode 100644 index 000000000..761cda634 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_unaligned_stats.h @@ -0,0 +1,22 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_UNALIGNED_STATS_H +#define __NSS_UNALIGNED_STATS_H + +extern void nss_unaligned_stats_dentry_create(void); + +#endif diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_virt_if.c b/feeds/ipq807x/qca-nss-drv/src/nss_virt_if.c new file mode 100644 index 000000000..b530517cf --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_virt_if.c @@ -0,0 +1,736 @@ +/* + ************************************************************************** + * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_virt_if.c + * NSS virtual/redirect handler APIs + */ + +#include "nss_tx_rx_common.h" +#include "nss_virt_if_stats.h" +#include + +#define NSS_VIRT_IF_TX_TIMEOUT 3000 /* 3 Seconds */ +#define NSS_VIRT_IF_GET_INDEX(if_num) (if_num-NSS_DYNAMIC_IF_START) + +extern int nss_ctl_redirect; + +/* + * Data structure that holds the virtual interface context. + */ +struct nss_virt_if_handle *nss_virt_if_handle_t[NSS_MAX_DYNAMIC_INTERFACES]; + +/* + * Spinlock to protect the global data structure virt_handle. + */ +DEFINE_SPINLOCK(nss_virt_if_lock); + +/* + * nss_virt_if_get_context() + */ +struct nss_ctx_instance *nss_virt_if_get_context(void) +{ + return &nss_top_main.nss[nss_top_main.virt_if_handler_id]; +} + +/* + * nss_virt_if_verify_if_num() + * Verify if_num passed to us. + */ +bool nss_virt_if_verify_if_num(uint32_t if_num) +{ + enum nss_dynamic_interface_type type = nss_dynamic_interface_get_type(nss_virt_if_get_context(), if_num); + + return type == NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_N2H + || type == NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_H2N; +} +EXPORT_SYMBOL(nss_virt_if_verify_if_num); + +/* + * nss_virt_if_msg_handler() + * Handle msg responses from the FW on virtual interfaces + */ +static void nss_virt_if_msg_handler(struct nss_ctx_instance *nss_ctx, + struct nss_cmn_msg *ncm, + void *app_data) +{ + struct nss_virt_if_msg *nvim = (struct nss_virt_if_msg *)ncm; + int32_t if_num; + + nss_virt_if_msg_callback_t cb; + struct nss_virt_if_handle *handle = NULL; + + /* + * Sanity check the message type + */ + if (ncm->type > NSS_VIRT_IF_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return; + } + + /* + * Messages value that are within the base class are handled by the base class. + */ + if (ncm->type < NSS_IF_MAX_MSG_TYPES) { + return nss_if_msg_handler(nss_ctx, ncm, app_data); + } + + if (!nss_virt_if_verify_if_num(ncm->interface)) { + nss_warning("%px: response for another interface: %d", nss_ctx, ncm->interface); + return; + } + + if_num = NSS_VIRT_IF_GET_INDEX(ncm->interface); + + spin_lock_bh(&nss_virt_if_lock); + if (!nss_virt_if_handle_t[if_num]) { + spin_unlock_bh(&nss_virt_if_lock); + nss_warning("%px: virt_if handle is NULL\n", nss_ctx); + return; + } + + handle = nss_virt_if_handle_t[if_num]; + spin_unlock_bh(&nss_virt_if_lock); + + switch (nvim->cm.type) { + case NSS_VIRT_IF_STATS_SYNC_MSG: + nss_virt_if_stats_sync(handle, &nvim->msg.stats); + break; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Update the callback and app_data for NOTIFY messages, IPv4 sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].ndev; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + /* + * Callback + */ + cb = (nss_virt_if_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, ncm); +} + +/* + * nss_virt_if_callback + * Callback to handle the completion of NSS ->HLOS messages. + */ +static void nss_virt_if_callback(void *app_data, struct nss_cmn_msg *ncm) +{ + struct nss_virt_if_handle *handle = (struct nss_virt_if_handle *)app_data; + struct nss_virt_if_pvt *nvip = handle->pvt; + + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + nss_warning("%px: virt_if Error response %d\n", handle->nss_ctx, ncm->response); + nvip->response = NSS_TX_FAILURE; + complete(&nvip->complete); + return; + } + + nvip->response = NSS_TX_SUCCESS; + complete(&nvip->complete); +} + +/* + * nss_virt_if_tx_msg_sync + * Send a message from HLOS to NSS synchronously. + */ +static nss_tx_status_t nss_virt_if_tx_msg_sync(struct nss_virt_if_handle *handle, + struct nss_virt_if_msg *nvim) +{ + nss_tx_status_t status; + int ret = 0; + struct nss_virt_if_pvt *nwip = handle->pvt; + struct nss_ctx_instance *nss_ctx = handle->nss_ctx; + + down(&nwip->sem); + + status = nss_virt_if_tx_msg(nss_ctx, nvim); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: nss_virt_if_msg failed\n", nss_ctx); + up(&nwip->sem); + return status; + } + + ret = wait_for_completion_timeout(&nwip->complete, + msecs_to_jiffies(NSS_VIRT_IF_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: virt_if tx failed due to timeout\n", nss_ctx); + nwip->response = NSS_TX_FAILURE; + } + + status = nwip->response; + up(&nwip->sem); + + return status; +} + +/* + * nss_virt_if_msg_init() + * Initialize virt specific message structure. + */ +static void nss_virt_if_msg_init(struct nss_virt_if_msg *nvim, + uint16_t if_num, + uint32_t type, + uint32_t len, + nss_virt_if_msg_callback_t cb, + struct nss_virt_if_handle *app_data) +{ + nss_cmn_msg_init(&nvim->cm, if_num, type, len, (void *)cb, (void *)app_data); +} + +/* + * nss_virt_if_handle_destroy_sync() + * Destroy the virt handle either due to request from user or due to error, synchronously. + */ +static int nss_virt_if_handle_destroy_sync(struct nss_virt_if_handle *handle) +{ + nss_tx_status_t status; + int32_t if_num_n2h = handle->if_num_n2h; + int32_t if_num_h2n = handle->if_num_h2n; + int32_t index_n2h; + int32_t index_h2n; + + if (!nss_virt_if_verify_if_num(if_num_n2h) || !nss_virt_if_verify_if_num(if_num_h2n)) { + nss_warning("%px: bad interface numbers %d %d\n", handle->nss_ctx, if_num_n2h, if_num_h2n); + return NSS_TX_FAILURE_BAD_PARAM; + } + + index_n2h = NSS_VIRT_IF_GET_INDEX(if_num_n2h); + index_h2n = NSS_VIRT_IF_GET_INDEX(if_num_h2n); + + status = nss_dynamic_interface_dealloc_node(if_num_n2h, NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_N2H); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Dynamic interface destroy failed status %d\n", handle->nss_ctx, status); + return status; + } + + status = nss_dynamic_interface_dealloc_node(if_num_h2n, NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_H2N); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Dynamic interface destroy failed status %d\n", handle->nss_ctx, status); + return status; + } + + spin_lock_bh(&nss_virt_if_lock); + nss_virt_if_handle_t[index_n2h] = NULL; + nss_virt_if_handle_t[index_h2n] = NULL; + spin_unlock_bh(&nss_virt_if_lock); + + kfree(handle->pvt); + kfree(handle); + + return status; +} + +/* + * nss_virt_if_handle_create_sync() + * Initialize virt handle which holds the if_num and stats per interface. + */ +static struct nss_virt_if_handle *nss_virt_if_handle_create_sync(struct nss_ctx_instance *nss_ctx, int32_t if_num_n2h, int32_t if_num_h2n, int32_t *cmd_rsp) +{ + int32_t index_n2h; + int32_t index_h2n; + struct nss_virt_if_handle *handle; + + if (!nss_virt_if_verify_if_num(if_num_n2h) || !nss_virt_if_verify_if_num(if_num_h2n)) { + nss_warning("%px: bad interface numbers %d %d\n", nss_ctx, if_num_n2h, if_num_h2n); + return NULL; + } + + index_n2h = NSS_VIRT_IF_GET_INDEX(if_num_n2h); + index_h2n = NSS_VIRT_IF_GET_INDEX(if_num_h2n); + + handle = (struct nss_virt_if_handle *)kzalloc(sizeof(struct nss_virt_if_handle), + GFP_KERNEL); + if (!handle) { + nss_warning("%px: handle memory alloc failed\n", nss_ctx); + *cmd_rsp = NSS_VIRT_IF_ALLOC_FAILURE; + goto error1; + } + + handle->nss_ctx = nss_ctx; + handle->if_num_n2h = if_num_n2h; + handle->if_num_h2n = if_num_h2n; + handle->pvt = (struct nss_virt_if_pvt *)kzalloc(sizeof(struct nss_virt_if_pvt), + GFP_KERNEL); + if (!handle->pvt) { + nss_warning("%px: failure allocating memory for nss_virt_if_pvt\n", nss_ctx); + *cmd_rsp = NSS_VIRT_IF_ALLOC_FAILURE; + goto error2; + } + + handle->cb = NULL; + handle->app_data = NULL; + + spin_lock_bh(&nss_virt_if_lock); + nss_virt_if_handle_t[index_n2h] = handle; + nss_virt_if_handle_t[index_h2n] = handle; + spin_unlock_bh(&nss_virt_if_lock); + + *cmd_rsp = NSS_VIRT_IF_SUCCESS; + + return handle; + +error2: + kfree(handle); +error1: + return NULL; +} + +/* + * nss_virt_if_register_handler_sync() + * register msg handler for virtual interface and initialize semaphore and completion. + */ +static uint32_t nss_virt_if_register_handler_sync(struct nss_ctx_instance *nss_ctx, struct nss_virt_if_handle *handle) +{ + uint32_t ret; + struct nss_virt_if_pvt *nvip = NULL; + int32_t if_num_n2h = handle->if_num_n2h; + int32_t if_num_h2n = handle->if_num_h2n; + + ret = nss_core_register_handler(nss_ctx, if_num_n2h, nss_virt_if_msg_handler, NULL); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Failed to register message handler for redir_n2h interface %d\n", nss_ctx, if_num_n2h); + return NSS_VIRT_IF_CORE_FAILURE; + } + + ret = nss_core_register_handler(nss_ctx, if_num_h2n, nss_virt_if_msg_handler, NULL); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num_n2h); + nss_warning("%px: Failed to register message handler for redir_h2n interface %d\n", nss_ctx, if_num_h2n); + return NSS_VIRT_IF_CORE_FAILURE; + } + + nvip = handle->pvt; + if (!nvip->sem_init_done) { + sema_init(&nvip->sem, 1); + init_completion(&nvip->complete); + nvip->sem_init_done = 1; + } + + nss_virt_if_stats_dentry_create(); + return NSS_VIRT_IF_SUCCESS; +} + +/* + * nss_virt_if_create_sync_nexthop() + * Create redir_n2h and redir_h2n interfaces, synchronously and associate it with same netdev. + */ +struct nss_virt_if_handle *nss_virt_if_create_sync_nexthop(struct net_device *netdev, uint32_t nexthop_n2h, uint32_t nexthop_h2n) +{ + struct nss_ctx_instance *nss_ctx = nss_virt_if_get_context(); + struct nss_virt_if_msg nvim; + struct nss_virt_if_config_msg *nvcm; + uint32_t ret; + struct nss_virt_if_handle *handle = NULL; + int32_t if_num_n2h, if_num_h2n; + + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: Interface could not be created as core not ready\n", nss_ctx); + return NULL; + } + + if_num_n2h = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_N2H); + if (if_num_n2h < 0) { + nss_warning("%px: failure allocating redir_n2h\n", nss_ctx); + return NULL; + } + + if_num_h2n = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_H2N); + if (if_num_h2n < 0) { + nss_warning("%px: failure allocating redir_h2n\n", nss_ctx); + nss_dynamic_interface_dealloc_node(if_num_n2h, NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_N2H); + return NULL; + } + + handle = nss_virt_if_handle_create_sync(nss_ctx, if_num_n2h, if_num_h2n, &ret); + if (!handle) { + nss_warning("%px: virt_if handle creation failed ret %d\n", nss_ctx, ret); + nss_dynamic_interface_dealloc_node(if_num_n2h, NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_N2H); + nss_dynamic_interface_dealloc_node(if_num_h2n, NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_H2N); + return NULL; + } + + /* + * Initializes the semaphore and also sets the msg handler for if_num. + */ + ret = nss_virt_if_register_handler_sync(nss_ctx, handle); + if (ret != NSS_VIRT_IF_SUCCESS) { + nss_warning("%px: Registration handler failed reason: %d\n", nss_ctx, ret); + goto error1; + } + + nss_virt_if_msg_init(&nvim, handle->if_num_n2h, NSS_VIRT_IF_TX_CONFIG_MSG, + sizeof(struct nss_virt_if_config_msg), nss_virt_if_callback, handle); + + nvcm = &nvim.msg.if_config; + nvcm->flags = 0; + nvcm->sibling = if_num_h2n; + nvcm->nexthop = nexthop_n2h; + memcpy(nvcm->mac_addr, netdev->dev_addr, ETH_ALEN); + + ret = nss_virt_if_tx_msg_sync(handle, &nvim); + if (ret != NSS_TX_SUCCESS) { + nss_warning("%px: nss_virt_if_tx_msg_sync failed %u\n", nss_ctx, ret); + goto error2; + } + + nvim.cm.interface = if_num_h2n; + nvcm->sibling = if_num_n2h; + nvcm->nexthop = nexthop_h2n; + + ret = nss_virt_if_tx_msg_sync(handle, &nvim); + if (ret != NSS_TX_SUCCESS) { + nss_warning("%px: nss_virt_if_tx_msg_sync failed %u\n", nss_ctx, ret); + goto error2; + } + + nss_core_register_subsys_dp(nss_ctx, handle->if_num_n2h, NULL, NULL, NULL, netdev, 0); + nss_core_register_subsys_dp(nss_ctx, handle->if_num_h2n, NULL, NULL, NULL, netdev, 0); + + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num_n2h, NSS_VIRT_IF_DP_REDIR_N2H); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num_h2n, NSS_VIRT_IF_DP_REDIR_H2N); + + /* + * Hold a reference to the net_device + */ + dev_hold(netdev); + + /* + * The context returned is the handle interface # which contains all the info related to + * the interface if_num. + */ + + return handle; + +error2: + nss_core_unregister_handler(nss_ctx, if_num_n2h); + nss_core_unregister_handler(nss_ctx, if_num_h2n); + +error1: + nss_virt_if_handle_destroy_sync(handle); + return NULL; +} +EXPORT_SYMBOL(nss_virt_if_create_sync_nexthop); + +/* + * nss_virt_if_create_sync() + * Create redir_n2h and redir_h2n interfaces, synchronously and associate it with same netdev. + * It uses the default nexthop interfaces. + * + * + */ +struct nss_virt_if_handle *nss_virt_if_create_sync(struct net_device *netdev) +{ + /* + * NSS_N2H_INTERFACE is the nexthop of the dynamic interface which is created for handling the + * n2h traffic. + * NSS_ETH_RX_INTERFACE is the nexthop of the dynamic interface which is created for handling the + * h2n traffic. + */ + return nss_virt_if_create_sync_nexthop(netdev, NSS_N2H_INTERFACE, NSS_ETH_RX_INTERFACE); +} +EXPORT_SYMBOL(nss_virt_if_create_sync); + +/* + * nss_virt_if_destroy_sync() + * Destroy the virt interface associated with the interface number, synchronously. + */ +nss_tx_status_t nss_virt_if_destroy_sync(struct nss_virt_if_handle *handle) +{ + nss_tx_status_t status; + struct net_device *dev; + int32_t if_num_n2h; + int32_t if_num_h2n; + struct nss_ctx_instance *nss_ctx; + uint32_t ret; + + if (!handle) { + nss_warning("handle is NULL\n"); + return NSS_TX_FAILURE_BAD_PARAM; + } + + if_num_n2h = handle->if_num_n2h; + if_num_h2n = handle->if_num_h2n; + nss_ctx = handle->nss_ctx; + + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: Interface could not be destroyed as core not ready\n", nss_ctx); + return NSS_TX_FAILURE_NOT_READY; + } + + spin_lock_bh(&nss_top_main.lock); + if (!nss_ctx->subsys_dp_register[if_num_n2h].ndev || !nss_ctx->subsys_dp_register[if_num_h2n].ndev) { + spin_unlock_bh(&nss_top_main.lock); + nss_warning("%px: Unregister virt interface %d %d: no context\n", nss_ctx, if_num_n2h, if_num_h2n); + return NSS_TX_FAILURE_BAD_PARAM; + } + + dev = nss_ctx->subsys_dp_register[if_num_n2h].ndev; + nss_assert(dev == nss_ctx->subsys_dp_register[if_num_h2n].ndev); + nss_core_unregister_subsys_dp(nss_ctx, if_num_n2h); + nss_core_unregister_subsys_dp(nss_ctx, if_num_h2n); + spin_unlock_bh(&nss_top_main.lock); + dev_put(dev); + + status = nss_virt_if_handle_destroy_sync(handle); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: handle destroy failed for if_num_n2h %d and if_num_h2n %d\n", nss_ctx, if_num_n2h, if_num_h2n); + return NSS_TX_FAILURE; + } + + ret = nss_core_unregister_handler(nss_ctx, if_num_n2h); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for redir_n2h interface %d with NSS core\n", nss_ctx, if_num_n2h); + return NSS_TX_FAILURE_BAD_PARAM; + } + + ret = nss_core_unregister_handler(nss_ctx, if_num_h2n); + if (ret != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Not able to unregister handler for redir_h2n interface %d with NSS core\n", nss_ctx, if_num_h2n); + return NSS_TX_FAILURE_BAD_PARAM; + } + + return status; +} +EXPORT_SYMBOL(nss_virt_if_destroy_sync); + +/* + * nss_virt_if_tx_buf() + * HLOS interface has received a packet which we redirect to the NSS, if appropriate to do so. + */ +nss_tx_status_t nss_virt_if_tx_buf(struct nss_virt_if_handle *handle, + struct sk_buff *skb) +{ + int32_t if_num = handle->if_num_h2n; + struct nss_ctx_instance *nss_ctx = handle->nss_ctx; + int cpu = 0; + + if (unlikely(nss_ctl_redirect == 0)) { + return NSS_TX_FAILURE_NOT_ENABLED; + } + + if (unlikely(skb->vlan_tci)) { + return NSS_TX_FAILURE_NOT_SUPPORTED; + } + + if (!nss_virt_if_verify_if_num(if_num)) { + nss_warning("%px: bad interface number %d\n", nss_ctx, if_num); + return NSS_TX_FAILURE_BAD_PARAM; + } + + nss_trace("%px: Virtual Rx packet, if_num:%d, skb:%px", nss_ctx, if_num, skb); + + /* + * Sanity check the SKB to ensure that it's suitable for us + */ + if (unlikely(skb->len <= ETH_HLEN)) { + nss_warning("%px: Virtual Rx packet: %px too short", nss_ctx, skb); + return NSS_TX_FAILURE_TOO_SHORT; + } + + /* + * set skb queue mapping + */ + cpu = get_cpu(); + put_cpu(); + skb_set_queue_mapping(skb, cpu); + + return nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_VIRTUAL_BUFFER | + H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_virt_if_tx_buf); + +/* + * nss_virt_if_tx_msg() + */ +nss_tx_status_t nss_virt_if_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_virt_if_msg *nvim) +{ + struct nss_cmn_msg *ncm = &nvim->cm; + + /* + * Sanity check the message + */ + if (!nss_virt_if_verify_if_num(ncm->interface)) { + nss_warning("%px: tx request for another interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type > NSS_VIRT_IF_MAX_MSG_TYPES) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, nvim, sizeof(*nvim), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_virt_if_tx_msg); + +/* + * nss_virt_if_xmit_callback_register() + * Register virtual interface xmit callback. + */ +void nss_virt_if_xmit_callback_register(struct nss_virt_if_handle *handle, + nss_virt_if_xmit_callback_t cb) +{ + struct nss_ctx_instance *nss_ctx; + struct nss_subsystem_dataplane_register *reg; + + if (!handle) { + nss_warning("handle is NULL\n"); + return; + } + + nss_ctx = handle->nss_ctx; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_virt_if_verify_if_num(handle->if_num_n2h)) { + nss_warning("if_num is invalid\n"); + return; + } + + reg = &nss_ctx->subsys_dp_register[handle->if_num_n2h]; + reg->xmit_cb = cb; +} +EXPORT_SYMBOL(nss_virt_if_xmit_callback_register); + +/* + * nss_virt_if_xmit_callback_unregister() + * Unregister virtual interface xmit callback. + */ +void nss_virt_if_xmit_callback_unregister(struct nss_virt_if_handle *handle) +{ + struct nss_ctx_instance *nss_ctx; + struct nss_subsystem_dataplane_register *reg; + + if (!handle) { + nss_warning("handle is NULL\n"); + return; + } + + nss_ctx = handle->nss_ctx; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_virt_if_verify_if_num(handle->if_num_n2h)) { + nss_warning("if_num is invalid\n"); + return; + } + + reg = &nss_ctx->subsys_dp_register[handle->if_num_n2h]; + reg->xmit_cb = NULL; +} +EXPORT_SYMBOL(nss_virt_if_xmit_callback_unregister); + +/* + * nss_virt_if_register() + */ +void nss_virt_if_register(struct nss_virt_if_handle *handle, + nss_virt_if_data_callback_t data_callback, + struct net_device *netdev) +{ + struct nss_ctx_instance *nss_ctx; + int32_t if_num; + uint32_t status; + + if (!handle) { + nss_warning("handle is NULL\n"); + return; + } + + nss_ctx = handle->nss_ctx; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_virt_if_verify_if_num(handle->if_num_n2h)) { + nss_warning("if_num is invalid\n"); + return; + } + + if_num = handle->if_num_n2h; + + nss_core_register_subsys_dp(nss_ctx, if_num, data_callback, NULL, NULL, netdev, (uint32_t)netdev->features); + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to unregister event handler for interface(%u)", nss_ctx, if_num); + return; + } +} +EXPORT_SYMBOL(nss_virt_if_register); + +/* + * nss_virt_if_unregister() + */ +void nss_virt_if_unregister(struct nss_virt_if_handle *handle) +{ + struct nss_ctx_instance *nss_ctx; + int32_t if_num; + uint32_t status; + + if (!handle) { + nss_warning("handle is NULL\n"); + return; + } + + nss_ctx = handle->nss_ctx; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_virt_if_verify_if_num(handle->if_num_n2h)) { + nss_warning("if_num is invalid\n"); + return; + } + + if_num = handle->if_num_n2h; + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to unregister event handler for interface(%u)", nss_ctx, if_num); + return; + } +} +EXPORT_SYMBOL(nss_virt_if_unregister); + +/* + * nss_virt_if_get_interface_num() + * Get interface number for a virtual interface + */ +int32_t nss_virt_if_get_interface_num(struct nss_virt_if_handle *handle) +{ + if (!handle) { + nss_warning("virt_if handle is NULL\n"); + return -1; + } + + /* + * Return if_num_n2h whose datapath type is 0. + */ + return handle->if_num_n2h; +} +EXPORT_SYMBOL(nss_virt_if_get_interface_num); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_virt_if_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_virt_if_stats.c new file mode 100644 index 000000000..d43b72c3d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_virt_if_stats.c @@ -0,0 +1,339 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_virt_if_stats.h" + +/* + * Data structure that holds the virtual interface context. + */ +extern struct nss_virt_if_handle *nss_virt_if_handle_t[]; + +/* + * Spinlock to protect the global data structure virt_handle. + */ +extern spinlock_t nss_virt_if_lock; + +/* + * nss_virt_if_base_node_stats_str + * virt_if base node stats strings + */ +static int8_t *nss_virt_if_base_node_stats_str[NSS_VIRT_IF_BASE_NODE_STATS_MAX] = { + "active_interfaces", + "ocm_alloc_failed", + "ddr_alloc_failed", +}; + +/* + * nss_virt_if_interface_stats_str + * virt_if interface stats strings + */ +static int8_t *nss_virt_if_interface_stats_str[NSS_VIRT_IF_INTERFACE_STATS_MAX] = { + "rx_packets", + "rx_bytes", + "rx_dropped", + "tx_packets", + "tx_bytes", + "tx_enqueue_failed", + "shaper_enqueue_failed", + "ocm_alloc_failed", +}; + +/* + * nss_virt_if_base_node_stats_fill_row() + * Fill one row of virt_if base node stats. + */ +static int32_t nss_virt_if_base_node_stats_fill_row(char *line, int len, int start, struct nss_virt_if_base_node_stats *stats) +{ + uint64_t tcnt = 0; + switch (start) { + case NSS_VIRT_IF_BASE_NODE_STATS_ACTIVE_INTERFACES: + tcnt = stats->active_interfaces; + break; + + case NSS_VIRT_IF_BASE_NODE_STATS_OCM_ALLOC_FAILED: + tcnt = stats->ocm_alloc_failed; + break; + + case NSS_VIRT_IF_BASE_NODE_STATS_DDR_ALLOC_FAILED: + tcnt = stats->ddr_alloc_failed; + break; + + default: + return 0; + } + + return scnprintf(line, len, "%s = %llu\n", nss_virt_if_base_node_stats_str[start], tcnt); +} + +/* + * nss_virt_if_interface_stats_fill_row() + * Fill one row of virt_if interface stats. + */ +static int32_t nss_virt_if_interface_stats_fill_row(char *line, int len, int start, struct nss_virt_if_interface_stats *stats) +{ + uint64_t tcnt = 0; + switch (start) { + case NSS_VIRT_IF_INTERFACE_STATS_RX_PACKETS: + tcnt = stats->node_stats.rx_packets; + break; + + case NSS_VIRT_IF_INTERFACE_STATS_RX_BYTES: + tcnt = stats->node_stats.rx_bytes; + break; + + case NSS_VIRT_IF_INTERFACE_STATS_RX_DROPPED: + tcnt = nss_cmn_rx_dropped_sum(&stats->node_stats); + break; + + case NSS_VIRT_IF_INTERFACE_STATS_TX_PACKETS: + tcnt = stats->node_stats.tx_packets; + break; + + case NSS_VIRT_IF_INTERFACE_STATS_TX_BYTES: + tcnt = stats->node_stats.tx_bytes; + break; + + case NSS_VIRT_IF_INTERFACE_STATS_TX_ENQUEUE_FAILED: + tcnt = stats->tx_enqueue_failed; + break; + + case NSS_VIRT_IF_INTERFACE_STATS_SHAPER_ENQUEUE_FAILED: + tcnt = stats->shaper_enqueue_failed; + break; + + case NSS_VIRT_IF_INTERFACE_STATS_OCM_ALLOC_FAILED: + tcnt = stats->ocm_alloc_failed; + break; + + default: + return 0; + } + + return scnprintf(line, len, "%s = %llu\n", nss_virt_if_interface_stats_str[start], tcnt); +} + +/* + * nss_virt_if_stats_get() + * Get virt_if base node stats or interface stats by interface number. + */ +bool nss_virt_if_stats_get(struct nss_ctx_instance *nss_ctx, uint32_t if_num, void *stats, bool is_base) +{ + if (nss_virt_if_verify_if_num(if_num) == false) { + return false; + } + + /* + * Statistics for redir_h2n and redir_n2h are collected on redir_h2n in NSS. + */ + if (nss_dynamic_interface_get_type(nss_ctx, if_num) != NSS_DYNAMIC_INTERFACE_TYPE_GENERIC_REDIR_H2N) + return false; + + if_num = if_num - NSS_DYNAMIC_IF_START; + spin_lock_bh(&nss_virt_if_lock); + if (!nss_virt_if_handle_t[if_num]) { + spin_unlock_bh(&nss_virt_if_lock); + return false; + } + + /* + * Check if it is base node statistics or interface statistics. + */ + if (is_base) { + memcpy((struct nss_virt_if_base_node_stats *)stats, + &nss_virt_if_handle_t[if_num]->stats.base_stats, + sizeof(struct nss_virt_if_base_node_stats)); + } else { + memcpy((struct nss_virt_if_interface_stats *)stats, + &nss_virt_if_handle_t[if_num]->stats.if_stats, + sizeof(struct nss_virt_if_interface_stats)); + } + + spin_unlock_bh(&nss_virt_if_lock); + return true; +} + +/* + * nss_virt_if_stats_read() + * Read virt_if statistics + */ +static ssize_t nss_virt_if_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + struct nss_stats_data *data = fp->private_data; + struct nss_ctx_instance *nss_ctx = nss_virt_if_get_context(); + int32_t if_num = NSS_DYNAMIC_IF_START; + int32_t max_if_num = if_num + NSS_MAX_DYNAMIC_INTERFACES; + size_t bytes = 0; + ssize_t bytes_read = 0; + char line[80]; + int start, end; + int32_t if_num_valid = NSS_DYNAMIC_IF_START - 1; + struct nss_virt_if_base_node_stats base_node_stats_local; + struct nss_virt_if_interface_stats interface_stats_local; + + if (data) { + if_num = data->if_num; + } + + if (if_num > max_if_num) { + return 0; + } + + /* + * Interface statistics for all virtual interface pairs. + */ + for (; if_num < max_if_num; if_num++) { + + if (!nss_virt_if_stats_get(nss_ctx, if_num, &interface_stats_local, false)) + continue; + + bytes = scnprintf(line, sizeof(line), "if_num %d stats start:\n\n", if_num); + if ((bytes_read + bytes) > sz) + break; + + if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) + return -EFAULT; + + bytes_read += bytes; + + start = NSS_VIRT_IF_INTERFACE_STATS_RX_PACKETS; + end = NSS_VIRT_IF_INTERFACE_STATS_MAX; + while (bytes_read < sz && start < end) { + bytes = nss_virt_if_interface_stats_fill_row(line, sizeof(line), start, &interface_stats_local); + if (!bytes) + break; + + if ((bytes_read + bytes) > sz) + break; + + if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) + return -EFAULT; + + bytes_read += bytes; + start++; + } + + /* + * Save one valid interface number for base node statistics. + */ + if_num_valid = if_num; + + bytes = scnprintf(line, sizeof(line), "if_num %d stats end:\n\n", if_num); + if (bytes_read > (sz - bytes)) + break; + + if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) + return -EFAULT; + + bytes_read += bytes; + } + + /* + * Base node statistics. + */ + if (!nss_virt_if_stats_get(nss_ctx, if_num_valid, &base_node_stats_local, true)) + goto done; + + bytes = scnprintf(line, sizeof(line), "base node stats begin (shown on if_num %d):\n\n", if_num_valid); + if ((bytes_read + bytes) > sz) + goto done; + + if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) + return -EFAULT; + + bytes_read += bytes; + + start = NSS_VIRT_IF_BASE_NODE_STATS_ACTIVE_INTERFACES; + end = NSS_VIRT_IF_BASE_NODE_STATS_MAX; + while (bytes_read < sz && start < end) { + bytes = nss_virt_if_base_node_stats_fill_row(line, sizeof(line), start, &base_node_stats_local); + if (!bytes) + break; + + if ((bytes_read + bytes) > sz) + break; + + if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) + return -EFAULT; + + bytes_read += bytes; + start++; + } + + bytes = scnprintf(line, sizeof(line), "base node stats end.\n\n"); + if ((bytes_read + bytes) > sz) + goto done; + + if (copy_to_user(ubuf + bytes_read, line, bytes) != 0) + return -EFAULT; + + bytes_read += bytes; + +done: + if (bytes_read > 0) { + *ppos = bytes_read; + } + + if (data) { + data->if_num = if_num; + } + + return bytes_read; +} + +/* + * nss_virt_if_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(virt_if) + +/* + * nss_virt_if_stats_dentry_create() + * Create virt_if statistics debug entry. + */ +void nss_virt_if_stats_dentry_create(void) +{ + nss_stats_create_dentry("virt_if", &nss_virt_if_stats_ops); +} + +/* + * nss_virt_if_stats_sync() + * Sync stats from the NSS FW + */ +void nss_virt_if_stats_sync(struct nss_virt_if_handle *handle, + struct nss_virt_if_stats *nwis) +{ + struct nss_virt_if_stats *stats = &handle->stats; + int i; + + spin_lock_bh(&nss_virt_if_lock); + stats->if_stats.node_stats.rx_packets += nwis->if_stats.node_stats.rx_packets; + stats->if_stats.node_stats.rx_bytes += nwis->if_stats.node_stats.rx_bytes; + stats->if_stats.node_stats.tx_packets += nwis->if_stats.node_stats.tx_packets; + stats->if_stats.node_stats.tx_bytes += nwis->if_stats.node_stats.tx_bytes; + + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + stats->if_stats.node_stats.rx_dropped[i] += nwis->if_stats.node_stats.rx_dropped[i]; + } + + stats->if_stats.tx_enqueue_failed += nwis->if_stats.tx_enqueue_failed; + stats->if_stats.shaper_enqueue_failed += nwis->if_stats.shaper_enqueue_failed; + stats->if_stats.ocm_alloc_failed += nwis->if_stats.ocm_alloc_failed; + + stats->base_stats.active_interfaces = nwis->base_stats.active_interfaces; + stats->base_stats.ocm_alloc_failed = nwis->base_stats.ocm_alloc_failed; + stats->base_stats.ddr_alloc_failed = nwis->base_stats.ddr_alloc_failed; + spin_unlock_bh(&nss_virt_if_lock); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_virt_if_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_virt_if_stats.h new file mode 100644 index 000000000..0c26fac92 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_virt_if_stats.h @@ -0,0 +1,51 @@ +/* + ****************************************************************************** + * Copyright (c) 2017,2019 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_VIRT_IF_STATS_H +#define __NSS_VIRT_IF_STATS_H + +/* + * virt_if base node statistics types. + */ +enum nss_virt_if_base_node_stats_types { + NSS_VIRT_IF_BASE_NODE_STATS_ACTIVE_INTERFACES, /* Number of active virtual interfaces */ + NSS_VIRT_IF_BASE_NODE_STATS_OCM_ALLOC_FAILED, /* Number of interface allocation failure on OCM */ + NSS_VIRT_IF_BASE_NODE_STATS_DDR_ALLOC_FAILED, /* Number of interface allocation failure on DDR */ + NSS_VIRT_IF_BASE_NODE_STATS_MAX, +}; + +/* + * virt_if interface statistics types. + */ +enum nss_virt_if_interface_stats_types { + NSS_VIRT_IF_INTERFACE_STATS_RX_PACKETS, /* Rx packets */ + NSS_VIRT_IF_INTERFACE_STATS_RX_BYTES, /* Rx bytes */ + NSS_VIRT_IF_INTERFACE_STATS_RX_DROPPED, /* Rx drop count */ + NSS_VIRT_IF_INTERFACE_STATS_TX_PACKETS, /* Tx packets */ + NSS_VIRT_IF_INTERFACE_STATS_TX_BYTES, /* Tx bytes */ + NSS_VIRT_IF_INTERFACE_STATS_TX_ENQUEUE_FAILED, /* Number of Tx enqueue failure */ + NSS_VIRT_IF_INTERFACE_STATS_SHAPER_ENQUEUE_FAILED, /* Number of shaper enqueue failure */ + NSS_VIRT_IF_INTERFACE_STATS_OCM_ALLOC_FAILED, /* Number of interface allocation failure on OCM */ + NSS_VIRT_IF_INTERFACE_STATS_MAX, +}; + +/* + * Virtual interface statistics APIs + */ +extern void nss_virt_if_stats_sync(struct nss_virt_if_handle *handle, struct nss_virt_if_stats *nwis); +extern void nss_virt_if_stats_dentry_create(void); + +#endif /* __NSS_VIRT_IF_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_vlan.c b/feeds/ipq807x/qca-nss-drv/src/nss_vlan.c new file mode 100644 index 000000000..23b5c0ba5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_vlan.c @@ -0,0 +1,411 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_vlan_log.h" + +#define NSS_VLAN_TX_TIMEOUT 1000 /* 1 Second */ + +/* + * Private data structure + */ +static struct nss_vlan_pvt { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +} vlan_pvt; + +/* + * nss_vlan_get_context() + */ +struct nss_ctx_instance *nss_vlan_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.vlan_handler_id]; +} +EXPORT_SYMBOL(nss_vlan_get_context); + +/* + * nss_vlan_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_vlan_verify_if_num(uint32_t if_num) +{ + if (!nss_is_dynamic_interface(if_num)) { + return false; + } + + if (nss_dynamic_interface_get_type(nss_vlan_get_context(), if_num) != NSS_DYNAMIC_INTERFACE_TYPE_VLAN) { + return false; + } + + return true; +} + +/* + * nss_vlan_handler() + * Handle NSS -> HLOS messages for vlan + */ +static void nss_vlan_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app_data) +{ + struct nss_vlan_msg *nvm = (struct nss_vlan_msg *)ncm; + nss_vlan_msg_callback_t cb; + + nss_assert(nss_vlan_verify_if_num(ncm->interface)); + + /* + * Trace messages. + */ + nss_vlan_log_rx_msg(nvm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_VLAN_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for vlan interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_vlan_msg)) { + nss_warning("%px: length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Update the callback and app_data for NOTIFY messages, vlan sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->vlan_callback; + ncm->app_data = (nss_ptr_t)app_data; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + return; + } + + /* + * callback + */ + cb = (nss_vlan_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, nvm); +} + +/* + * nss_vlan_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_vlan_callback(void *app_data, struct nss_vlan_msg *nvm) +{ + nss_vlan_msg_callback_t callback = (nss_vlan_msg_callback_t)vlan_pvt.cb; + void *data = vlan_pvt.app_data; + + vlan_pvt.response = NSS_TX_SUCCESS; + vlan_pvt.cb = NULL; + vlan_pvt.app_data = NULL; + + if (nvm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("vlan error response %d\n", nvm->cm.response); + vlan_pvt.response = nvm->cm.response; + } + + if (callback) { + callback(data, nvm); + } + complete(&vlan_pvt.complete); +} + +/* + * nss_vlan_tx_msg() + * Transmit a vlan message to NSSFW + */ +nss_tx_status_t nss_vlan_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_vlan_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace messages. + */ + nss_vlan_log_tx_msg(msg); + + /* + * Sanity check the message + */ + if (!nss_vlan_verify_if_num(ncm->interface)) { + nss_warning("%px: tx request for interface that is not a vlan: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_VLAN_MSG_TYPE_MAX) { + nss_warning("%px: message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_vlan_tx_msg); + +/* + * nss_vlan_tx_msg_sync() + * Transmit a vlan message to NSS firmware synchronously. + */ +nss_tx_status_t nss_vlan_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_vlan_msg *nvm) +{ + nss_tx_status_t status; + int ret = 0; + + down(&vlan_pvt.sem); + vlan_pvt.cb = (void *)nvm->cm.cb; + vlan_pvt.app_data = (void *)nvm->cm.app_data; + + nvm->cm.cb = (nss_ptr_t)nss_vlan_callback; + nvm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_vlan_tx_msg(nss_ctx, nvm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: vlan_tx_msg failed\n", nss_ctx); + up(&vlan_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&vlan_pvt.complete, msecs_to_jiffies(NSS_VLAN_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: vlan msg tx failed due to timeout\n", nss_ctx); + vlan_pvt.response = NSS_TX_FAILURE; + } + + status = vlan_pvt.response; + up(&vlan_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_vlan_tx_msg_sync); + +/* + * nss_vlan_msg_init() + * Initialize nss_vlan_msg. + */ +void nss_vlan_msg_init(struct nss_vlan_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_vlan_msg_init); + +/* + * nss_vlan_tx_change_mtu_msg + * API to send change mtu message to NSS FW + */ +nss_tx_status_t nss_vlan_tx_set_mtu_msg(uint32_t vlan_if_num, uint32_t mtu) +{ + struct nss_ctx_instance *nss_ctx = nss_vlan_get_context(); + struct nss_vlan_msg nvm; + struct nss_if_mtu_change *nimc; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_vlan_verify_if_num(vlan_if_num) == false) { + nss_warning("%px: received invalid interface %d", nss_ctx, vlan_if_num); + return NSS_TX_FAILURE; + } + + nss_vlan_msg_init(&nvm, vlan_if_num, NSS_IF_MTU_CHANGE, + sizeof(struct nss_if_mtu_change), NULL, NULL); + + nimc = &nvm.msg.if_msg.mtu_change; + nimc->min_buf_size = (uint16_t)mtu; + + return nss_vlan_tx_msg_sync(nss_ctx, &nvm); +} +EXPORT_SYMBOL(nss_vlan_tx_set_mtu_msg); + +/* + * nss_vlan_tx_set_mac_addr_msg + * API to send change mac addr message to NSS FW + */ +nss_tx_status_t nss_vlan_tx_set_mac_addr_msg(uint32_t vlan_if_num, uint8_t *addr) +{ + struct nss_ctx_instance *nss_ctx = nss_vlan_get_context(); + struct nss_vlan_msg nvm; + struct nss_if_mac_address_set *nmas; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_vlan_verify_if_num(vlan_if_num) == false) { + nss_warning("%px: received invalid interface %d", nss_ctx, vlan_if_num); + return NSS_TX_FAILURE; + } + + nss_vlan_msg_init(&nvm, vlan_if_num, NSS_IF_MAC_ADDR_SET, + sizeof(struct nss_if_mac_address_set), NULL, NULL); + + nmas = &nvm.msg.if_msg.mac_address_set; + memcpy(nmas->mac_addr, addr, ETH_ALEN); + return nss_vlan_tx_msg_sync(nss_ctx, &nvm); +} +EXPORT_SYMBOL(nss_vlan_tx_set_mac_addr_msg); + +/* + * nss_vlan_tx_vsi_attach_msg + * API to send VSI attach message to NSS FW + */ +nss_tx_status_t nss_vlan_tx_vsi_attach_msg(uint32_t vlan_if_num, uint32_t vsi) +{ + struct nss_ctx_instance *nss_ctx = nss_vlan_get_context(); + struct nss_vlan_msg nvm; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_vlan_verify_if_num(vlan_if_num) == false) { + nss_warning("%px: received invalid interface %d\n", nss_ctx, vlan_if_num); + return NSS_TX_FAILURE; + } + + nvm.msg.if_msg.vsi_assign.vsi = vsi; + nss_vlan_msg_init(&nvm, vlan_if_num, NSS_IF_VSI_ASSIGN, + sizeof(struct nss_if_vsi_assign), NULL, NULL); + + return nss_vlan_tx_msg_sync(nss_ctx, &nvm); +} +EXPORT_SYMBOL(nss_vlan_tx_vsi_attach_msg); + +/* + * nss_vlan_tx_vsi_detach_msg + * API to send VSI detach message to NSS FW + */ +nss_tx_status_t nss_vlan_tx_vsi_detach_msg(uint32_t vlan_if_num, uint32_t vsi) +{ + struct nss_ctx_instance *nss_ctx = nss_vlan_get_context(); + struct nss_vlan_msg nvm; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_vlan_verify_if_num(vlan_if_num) == false) { + nss_warning("%px: received invalid interface %d\n", nss_ctx, vlan_if_num); + return NSS_TX_FAILURE; + } + + nvm.msg.if_msg.vsi_unassign.vsi = vsi; + nss_vlan_msg_init(&nvm, vlan_if_num, NSS_IF_VSI_UNASSIGN, + sizeof(struct nss_if_vsi_unassign), NULL, NULL); + + return nss_vlan_tx_msg_sync(nss_ctx, &nvm); +} +EXPORT_SYMBOL(nss_vlan_tx_vsi_detach_msg); + +/* + * nss_vlan_tx_add_tag_msg + * API to send vlan add tag message to NSS FW + */ +nss_tx_status_t nss_vlan_tx_add_tag_msg(uint32_t vlan_if_num, uint32_t vlan_tag, uint32_t next_hop, uint32_t physical_dev) +{ + struct nss_ctx_instance *nss_ctx = nss_vlan_get_context(); + struct nss_vlan_msg nvm; + + if (!nss_ctx) { + nss_warning("Can't get nss context\n"); + return NSS_TX_FAILURE; + } + + if (nss_vlan_verify_if_num(vlan_if_num) == false) { + nss_warning("%px: received invalid interface %d\n", nss_ctx, vlan_if_num); + return NSS_TX_FAILURE; + } + + nvm.msg.add_tag.next_hop = next_hop; + nvm.msg.add_tag.if_num = physical_dev; + nvm.msg.add_tag.vlan_tag = vlan_tag; + nss_vlan_msg_init(&nvm, vlan_if_num, NSS_VLAN_MSG_ADD_TAG, + sizeof(struct nss_vlan_msg_add_tag), NULL, NULL); + + return nss_vlan_tx_msg_sync(nss_ctx, &nvm); +} +EXPORT_SYMBOL(nss_vlan_tx_add_tag_msg); + +/** + * @brief Register to send/receive vlan messages to NSS + * + * @param if_num NSS interface number + * @param vlan_data_callback Callback for vlan data + * @param netdev netdevice associated with the vlan interface + * @param features denotes the skb types supported by this interface + * + * @return nss_ctx_instance* NSS context + */ +struct nss_ctx_instance *nss_register_vlan_if(uint32_t if_num, nss_vlan_callback_t vlan_data_callback, + struct net_device *netdev, uint32_t features, void *app_ctx) +{ + struct nss_ctx_instance *nss_ctx = nss_vlan_get_context(); + + nss_assert(nss_vlan_verify_if_num(if_num)); + + nss_core_register_subsys_dp(nss_ctx, if_num, vlan_data_callback, NULL, app_ctx, netdev, features); + + nss_core_register_handler(nss_ctx, if_num, nss_vlan_handler, app_ctx); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_register_vlan_if); + +/* + * nss_unregister_vlan_if() + */ +void nss_unregister_vlan_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_vlan_get_context(); + + nss_assert(nss_vlan_verify_if_num(if_num)); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_core_unregister_handler(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_unregister_vlan_if); + +/* + * nss_vlan_register_handler() + * debugfs stats msg handler received on static vlan interface + */ +void nss_vlan_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_vlan_get_context(); + + nss_info("nss_vlan_register_handler\n"); + nss_core_register_handler(nss_ctx, NSS_VLAN_INTERFACE, nss_vlan_handler, NULL); + + sema_init(&vlan_pvt.sem, 1); + init_completion(&vlan_pvt.complete); +} +EXPORT_SYMBOL(nss_vlan_register_handler); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_vlan_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_vlan_log.c new file mode 100644 index 000000000..b9e946a74 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_vlan_log.c @@ -0,0 +1,120 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_vlan_log.c + * NSS VLAN logger file. + */ + +#include "nss_core.h" + +/* + * nss_vlan_log_message_types_str + * VLAN message strings + */ +static int8_t *nss_vlan_log_message_types_str[NSS_VLAN_MSG_TYPE_MAX] __maybe_unused = { + "VLAN ADD TAG", +}; + +/* + * nss_vlan_log_error_response_types_str + * Strings for error types for VLAN messages + */ +static int8_t *nss_vlan_log_error_response_types_str[NSS_VLAN_ERROR_TYPE_MAX] __maybe_unused = { + "VLAN Unknown Message", +}; + +/* + * nss_vlan_log_add_tag_msg() + * Log NSS VLAN Add Tag message. + */ +static void nss_vlan_log_add_tag_msg(struct nss_vlan_msg *nvm) +{ + struct nss_vlan_msg_add_tag *nvtm __maybe_unused = &nvm->msg.add_tag; + nss_trace("%px: NSS VLAN Add Tag Message:\n" + "VLAN Tag: %d\n" + "VLAN Next Hop: %d\n" + "VLAN Interface Number: %d\n", + nvtm, nvtm->vlan_tag, + nvtm->next_hop, nvtm->if_num); +} + +/* + * nss_vlan_log_verbose() + * Log message contents. + */ +static void nss_vlan_log_verbose(struct nss_vlan_msg *nvm) +{ + switch (nvm->cm.type) { + case NSS_VLAN_MSG_ADD_TAG: + nss_vlan_log_add_tag_msg(nvm); + break; + + default: + nss_warning("%px: Invalid message type\n", nvm); + break; + } +} + +/* + * nss_vlan_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_vlan_log_tx_msg(struct nss_vlan_msg *nvm) +{ + if (nvm->cm.type >= NSS_VLAN_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", nvm); + return; + } + + nss_info("%px: type[%d]:%s\n", nvm, nvm->cm.type, nss_vlan_log_message_types_str[nvm->cm.type]); + nss_vlan_log_verbose(nvm); +} + +/* + * nss_vlan_log_rx_msg() + * Log messages received from FW. + */ +void nss_vlan_log_rx_msg(struct nss_vlan_msg *nvm) +{ + if (nvm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nvm); + return; + } + + if (nvm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nvm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nvm, nvm->cm.type, + nss_vlan_log_message_types_str[nvm->cm.type], + nvm->cm.response, nss_cmn_response_str[nvm->cm.response]); + goto verbose; + } + + if (nvm->cm.error >= NSS_VLAN_ERROR_TYPE_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nvm, nvm->cm.type, nss_vlan_log_message_types_str[nvm->cm.type], + nvm->cm.response, nss_cmn_response_str[nvm->cm.response], + nvm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nvm, nvm->cm.type, nss_vlan_log_message_types_str[nvm->cm.type], + nvm->cm.response, nss_cmn_response_str[nvm->cm.response], + nvm->cm.error, nss_vlan_log_error_response_types_str[nvm->cm.error]); + +verbose: + nss_vlan_log_verbose(nvm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_vlan_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_vlan_log.h new file mode 100644 index 000000000..21b365d15 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_vlan_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_VLAN_LOG_H__ +#define __NSS_VLAN_LOG_H__ + +/* + * nss_vlan_log.h + * NSS VLAN Log Header File + */ + +/* + * nss_vlan_log_tx_msg + * Logs a vlan message that is sent to the NSS firmware. + */ +void nss_vlan_log_tx_msg(struct nss_vlan_msg *ncm); + +/* + * nss_vlan_log_rx_msg + * Logs a vlan message that is received from the NSS firmware. + */ +void nss_vlan_log_rx_msg(struct nss_vlan_msg *ncm); + +#endif /* __NSS_VLAN_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_vxlan.c b/feeds/ipq807x/qca-nss-drv/src/nss_vxlan.c new file mode 100644 index 000000000..bde299663 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_vxlan.c @@ -0,0 +1,326 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_vxlan.c + * NSS VxLAN driver interface APIs + */ +#include "nss_core.h" +#include "nss_vxlan.h" +#include "nss_cmn.h" +#include "nss_tx_rx_common.h" +#include "nss_vxlan_log.h" +#include "nss_vxlan_stats.h" + +#define NSS_VXLAN_TX_TIMEOUT 3000 + +/* + * Private data structure + */ +static struct { + struct semaphore sem; /* Semaphore structure. */ + struct completion complete; /* Completion structure. */ + int response; /* Response from FW. */ + void *cb; /* Original cb for msgs. */ + void *app_data; /* Original app_data for msgs. */ +} nss_vxlan_pvt; + +/* + * nss_vxlan_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_vxlan_verify_if_num(uint32_t if_num) +{ + uint32_t type; + + if (if_num == NSS_VXLAN_INTERFACE) { + return true; + } + + type = nss_dynamic_interface_get_type(nss_vxlan_get_ctx(), if_num); + + return ((type == NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_INNER) || + (type == NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_OUTER)); +} + +/* + * nss_vxlan_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_vxlan_callback(void *app_data, struct nss_cmn_msg *msg) +{ + nss_vxlan_msg_callback_t callback = (nss_vxlan_msg_callback_t)nss_vxlan_pvt.cb; + void *data = nss_vxlan_pvt.app_data; + + nss_vxlan_pvt.response = NSS_TX_SUCCESS; + nss_vxlan_pvt.cb = NULL; + nss_vxlan_pvt.app_data = NULL; + + if (msg->response != NSS_CMN_RESPONSE_ACK) { + nss_warning("Vxlan Error response %d\n", msg->response); + nss_vxlan_pvt.response = NSS_TX_FAILURE; + } + + if (callback) { + callback(data, msg); + } + complete(&nss_vxlan_pvt.complete); +} + +/* + * nss_vxlan_handler() + * Handle NSS -> HLOS messages for vxlan. + */ +static void nss_vxlan_msg_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_vxlan_msg *nvm = (struct nss_vxlan_msg *)ncm; + nss_vxlan_msg_callback_t cb; + + BUG_ON(!nss_vxlan_verify_if_num(ncm->interface)); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_VXLAN_MSG_TYPE_MAX) { + nss_warning("%px: received invalid message %d for vxlan interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_vxlan_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Log messages. + */ + nss_core_log_msg_failures(nss_ctx, ncm); + nss_vxlan_log_rx_msg(nvm); + + switch (nvm->cm.type) { + case NSS_VXLAN_MSG_TYPE_STATS_SYNC: + /* + * Update common node statistics + */ + nss_vxlan_stats_sync(nss_ctx, nvm); + } + + /* + * Update the callback for NOTIFY messages + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + } + + cb = (nss_vxlan_msg_callback_t)ncm->cb; + + /* + * Do we have a callback? + */ + if (!cb) { + nss_trace("%px: cb is null for interface %d\n", nss_ctx, ncm->interface); + return; + } + + cb((void *)nss_ctx->subsys_dp_register[ncm->interface].ndev, ncm); +} + +/* + * nss_vxlan_tx_msg() + * Transmit a vxlan message to NSS FW. Don't call this from softirq/interrupts. + */ +nss_tx_status_t nss_vxlan_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_vxlan_msg *nvm) +{ + struct nss_cmn_msg *ncm = &nvm->cm; + + if (!nss_vxlan_verify_if_num(ncm->interface)) { + nss_warning("%px: wrong interface number %u\n", nss_ctx, nvm->cm.interface); + return NSS_TX_FAILURE_BAD_PARAM; + } + + if (ncm->type >= NSS_VXLAN_MSG_TYPE_MAX) { + nss_warning("%px: wrong message type %u\n", nss_ctx, ncm->type); + return NSS_TX_FAILURE_BAD_PARAM; + } + + /* + * Trace messages. + */ + nss_vxlan_log_tx_msg(nvm); + + return nss_core_send_cmd(nss_ctx, nvm, sizeof(*nvm), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_vxlan_tx_msg); + +/* + * nss_vxlan_tx_msg_sync() + * Transmit a vxlan message to NSS firmware synchronously. + */ +nss_tx_status_t nss_vxlan_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_vxlan_msg *nvm) +{ + nss_tx_status_t status; + int ret; + + down(&nss_vxlan_pvt.sem); + nss_vxlan_pvt.cb = (void *)nvm->cm.cb; + nss_vxlan_pvt.app_data = (void *)nvm->cm.app_data; + + nvm->cm.cb = (nss_ptr_t)nss_vxlan_callback; + nvm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_vxlan_tx_msg(nss_ctx, nvm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: vxlan_tx_msg failed\n", nss_ctx); + up(&nss_vxlan_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&nss_vxlan_pvt.complete, msecs_to_jiffies(NSS_VXLAN_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: vxlan tx sync failed due to timeout\n", nss_ctx); + nss_vxlan_pvt.response = NSS_TX_FAILURE; + } + + status = nss_vxlan_pvt.response; + up(&nss_vxlan_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_vxlan_tx_msg_sync); + +/* + * nss_vxlan_msg_init() + * Initialize VxLAN message. + */ +void nss_vxlan_msg_init(struct nss_vxlan_msg *nvm, uint16_t if_num, uint32_t type, uint32_t len, + nss_vxlan_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&nvm->cm, if_num, type, len, (void*)cb, app_data); +} +EXPORT_SYMBOL(nss_vxlan_msg_init); + +/* + * nss_vxlan_unregister_if() + * Unregister a data packet notifier with NSS FW. + */ +bool nss_vxlan_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = nss_vxlan_get_ctx(); + if (!nss_vxlan_verify_if_num(if_num)) { + nss_warning("%px: data unregister received for invalid interface %d", nss_ctx, if_num); + return false; + } + + nss_core_unregister_handler(nss_ctx, if_num); + nss_core_unregister_subsys_dp(nss_ctx, if_num); + return true; +} +EXPORT_SYMBOL(nss_vxlan_unregister_if); + +/* + * nss_vxlan_register_if() + * Registers a data packet notifier with NSS FW. + */ +struct nss_ctx_instance *nss_vxlan_register_if(uint32_t if_num, + uint32_t type, + nss_vxlan_buf_callback_t data_cb, + nss_vxlan_msg_callback_t notify_cb, + struct net_device *netdev, + uint32_t features) +{ + struct nss_ctx_instance *nss_ctx; + int core_status; + + nss_ctx = nss_vxlan_get_ctx(); + if (!nss_vxlan_verify_if_num(if_num)) { + nss_warning("%px: data register received for invalid interface %d", nss_ctx, if_num); + return NULL; + } + + core_status = nss_core_register_handler(nss_ctx, if_num, nss_vxlan_msg_handler, NULL); + if (core_status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: nss core register handler failed for if_num:%d with error :%d", nss_ctx, if_num, core_status); + return NULL; + } + + core_status = nss_core_register_msg_handler(nss_ctx, if_num, notify_cb); + if (core_status != NSS_CORE_STATUS_SUCCESS) { + nss_core_unregister_handler(nss_ctx, if_num); + nss_warning("%px: nss core register handler failed for if_num:%d with error :%d", nss_ctx, if_num, core_status); + return NULL; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, data_cb, NULL, NULL, netdev, features); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, type); + return nss_ctx; +} +EXPORT_SYMBOL(nss_vxlan_register_if); + +/* + * nss_vxlan_ifnum_with_core_id() + * Append core id to vxlan interface num. + */ +int nss_vxlan_ifnum_with_core_id(int if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_vxlan_get_ctx(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (!nss_vxlan_verify_if_num(if_num)) { + nss_warning("%px: Invalid if_num: %d, must be a dynamic interface\n", nss_ctx, if_num); + return 0; + } + return NSS_INTERFACE_NUM_APPEND_COREID(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_vxlan_ifnum_with_core_id); + +/* + * nss_vxlan_get_ctx() + * Return a VxLAN NSS context. + */ +struct nss_ctx_instance *nss_vxlan_get_ctx() +{ + struct nss_ctx_instance *nss_ctx; + + nss_ctx = &nss_top_main.nss[nss_top_main.vxlan_handler_id]; + return nss_ctx; +} +EXPORT_SYMBOL(nss_vxlan_get_ctx); + +/* + * nss_vxlan_init() + * Initializes Vxlan. Gets called from nss_init.c. + */ +void nss_vxlan_init() +{ + uint32_t core_status; + struct nss_ctx_instance *nss_ctx = nss_vxlan_get_ctx(); + if (!nss_ctx) { + nss_warning("%px: VxLAN is not registered", nss_ctx); + return; + } + + nss_vxlan_stats_dentry_create(); + sema_init(&nss_vxlan_pvt.sem, 1); + init_completion(&nss_vxlan_pvt.complete); + core_status = nss_core_register_handler(nss_ctx, NSS_VXLAN_INTERFACE, nss_vxlan_msg_handler, NULL); + + if (core_status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: nss core register handler failed for if_num:%d with error :%d", nss_ctx, NSS_VXLAN_INTERFACE, core_status); + } + +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_log.c new file mode 100644 index 000000000..7bbfc5e87 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_log.c @@ -0,0 +1,257 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_vxlan_log.c + * NSS VXLAN logger file. + */ + +#include "nss_core.h" + +/* + * nss_vxlan_log_message_types_str + * VXLAN message strings + */ +static int8_t *nss_vxlan_log_message_types_str[NSS_VXLAN_MSG_TYPE_MAX] __maybe_unused = { + "VxLAN Sync Stats", + "VxLAN Tunnel Configure Rule", + "VxLAN Tunnel Unconfigure Rule", + "VxLAN Enable Tunnel", + "VxLAN Disable Tunnel", + "VxLAN Add MAC rule", + "VxLAN Delete MAC rule", + "VxLAN MAC DB Stats" +}; + +/* + * nss_vxlan_log_error_response_types_str + * Strings for error types for VXLAN messages + */ +static int8_t *nss_vxlan_log_error_response_types_str[NSS_VXLAN_ERROR_TYPE_MAX] __maybe_unused = { + "VxLAN Unknown Error", + "VXLAN Decap Register fail", + "VXLAN Dest IP mismatch", + "VXLAN Invalid VNI", + "VXLAN Invalid L3 Proto", + "VXLAN Invalid UDP Proto", + "VXLAN Invalid Src Port", + "VXLAN MAC Bad entry", + "VXLAN MAC Entry exists", + "VXLAN MAC Entry does not exist", + "VXLAN MAC Entry unhashed", + "VXLAN MAC Entry alloc failed", + "VXLAN MAC Entry delete failed", + "VXLAN MAC Table full", + "VXLAN Sibling Node does not exist", + "VXLAN Tunnel Configured", + "VXLAN Tunnel Unconfigured", + "VXLAN Tunnel addition failed", + "VXLAN Tunnel Disabled", + "VXLAN Tunnel Enabled", + "VXLAN Tunnel Entry exists" +}; + +/* + * nss_vxlan_log_rule_msg() + * Log NSS VXLAN rule message. + */ +static void nss_vxlan_log_rule_msg(struct nss_vxlan_rule_msg *nvrm) +{ + nss_trace("%px: NSS VXLAN Rule message \n" + "VxLAN Tunnel Flags: %x\n" + "VNET ID: %u\n" + "Flowlabel: %u\n" + "TOS: %u\n" + "TTL: %u\n" + "source port min: %u max: %u" + "destination port: %u", + nvrm, + nvrm->tunnel_flags, + nvrm->vni, + nvrm->flow_label, + nvrm->tos, + nvrm->ttl, + nvrm->src_port_min, + nvrm->src_port_max, + nvrm->dest_port); +} + +/* + * nss_vxlan_mac_rule_msg() + * Log NSS Vxlan MAC rule message. + */ +static void nss_vxlan_log_mac_msg(struct nss_vxlan_mac_msg *nvmm) +{ + nss_trace("%px: NSS VXLAN MAC message \n" + "Encap Rule Src IP: %px\n" + "Encap Rule Dst Ip: %px\n" + "Vxlan VNet ID: %u\n" + "Vxlan Mac Addr: %pM", + nvmm, + &nvmm->encap.src_ip, + &nvmm->encap.dest_ip, + nvmm->vni, + nvmm->mac_addr); +} + +/* + * nss_vxlan_log_rule_create_msg() + * Log NSS Vxlan rule create message. + */ +static void nss_vxlan_log_rule_create_msg(struct nss_vxlan_msg *nvm) +{ + struct nss_vxlan_rule_msg *nvrm __maybe_unused = &nvm->msg.vxlan_create; + nss_vxlan_log_rule_msg(nvrm); +} + +/* + * nss_vxlan_log_rule_destroy_msg() + * Log NSS Vxlan rule destroy message. + */ +static void nss_vxlan_log_rule_destroy_msg(struct nss_vxlan_msg *nvm) +{ + struct nss_vxlan_rule_msg *nvrm __maybe_unused = &nvm->msg.vxlan_destroy; + nss_vxlan_log_rule_msg(nvrm); +} + +/* + * nss_vxlan_log_enable_msg() + * Log NSS Vxlan rule enable message. + */ +static void nss_vxlan_log_enable_msg(struct nss_vxlan_msg *nvm) +{ + nss_trace("%px: NSS VXLAN Tunnel state message: Enable \n", nvm); +} + +/* + * nss_vxlan_log_disable_msg() + * Log NSS Vxlan rule disable message. + */ +static void nss_vxlan_log_disable_msg(struct nss_vxlan_msg *nvm) +{ + nss_trace("%px: NSS VXLAN Tunnel state message: Disable \n", nvm); +} + +/* + * nss_vxlan_log_mac_add_msg() + * Log NSS VXLAN mac rule add message. + */ +static void nss_vxlan_log_mac_add_msg(struct nss_vxlan_msg *nvm) +{ + struct nss_vxlan_mac_msg *nvmm __maybe_unused = &nvm->msg.mac_add; + nss_vxlan_log_mac_msg(nvmm); +} + +/* + * nss_vxlan_log_mac_del_msg() + * Log NSS VXLAN mac rule del message. + */ +static void nss_vxlan_log_mac_del_msg(struct nss_vxlan_msg *nvm) +{ + struct nss_vxlan_mac_msg *nvmm __maybe_unused = &nvm->msg.mac_del; + nss_vxlan_log_mac_msg(nvmm); +} + +/* + * nss_vxlan_log_verbose() + * Log message contents. + */ +static void nss_vxlan_log_verbose(struct nss_vxlan_msg *nvm) +{ + switch (nvm->cm.type) { + case NSS_VXLAN_MSG_TYPE_TUN_CONFIGURE: + nss_vxlan_log_rule_create_msg(nvm); + break; + + case NSS_VXLAN_MSG_TYPE_TUN_UNCONFIGURE: + nss_vxlan_log_rule_destroy_msg(nvm); + break; + + case NSS_VXLAN_MSG_TYPE_TUN_ENABLE: + nss_vxlan_log_enable_msg(nvm); + break; + + case NSS_VXLAN_MSG_TYPE_TUN_DISABLE: + nss_vxlan_log_disable_msg(nvm); + break; + + case NSS_VXLAN_MSG_TYPE_MAC_ADD: + nss_vxlan_log_mac_add_msg(nvm); + break; + + case NSS_VXLAN_MSG_TYPE_MAC_DEL: + nss_vxlan_log_mac_del_msg(nvm); + break; + + case NSS_VXLAN_MSG_TYPE_STATS_SYNC: + case NSS_VXLAN_MSG_TYPE_MACDB_STATS: + break; + + default: + nss_trace("%px: Invalid message type\n", nvm); + break; + } +} + +/* + * nss_vxlan_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_vxlan_log_tx_msg(struct nss_vxlan_msg *nvm) +{ + if (nvm->cm.type >= NSS_VXLAN_MSG_TYPE_MAX) { + nss_warning("%px: Invalid message type\n", nvm); + return; + } + + nss_info("%px: type[%d]:%s\n", nvm, nvm->cm.type, nss_vxlan_log_message_types_str[nvm->cm.type]); + nss_vxlan_log_verbose(nvm); +} + +/* + * nss_vxlan_log_rx_msg() + * Log messages received from FW. + */ +void nss_vxlan_log_rx_msg(struct nss_vxlan_msg *nvm) +{ + if (nvm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nvm); + return; + } + + if (nvm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nvm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nvm, nvm->cm.type, + nss_vxlan_log_message_types_str[nvm->cm.type], + nvm->cm.response, nss_cmn_response_str[nvm->cm.response]); + goto verbose; + } + + if (nvm->cm.error >= NSS_VXLAN_ERROR_TYPE_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nvm, nvm->cm.type, nss_vxlan_log_message_types_str[nvm->cm.type], + nvm->cm.response, nss_cmn_response_str[nvm->cm.response], + nvm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nvm, nvm->cm.type, nss_vxlan_log_message_types_str[nvm->cm.type], + nvm->cm.response, nss_cmn_response_str[nvm->cm.response], + nvm->cm.error, nss_vxlan_log_error_response_types_str[nvm->cm.error]); + +verbose: + nss_vxlan_log_verbose(nvm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_log.h new file mode 100644 index 000000000..2db12be9f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_VXLAN_LOG_H__ +#define __NSS_VXLAN_LOG_H__ + +/* + * nss_vxlan_log.h + * NSS VXLAN Log Header File. + */ + +/* + * nss_vxlan_log_tx_msg + * Logs a Vxlan message that is sent to the NSS firmware. + */ +void nss_vxlan_log_tx_msg(struct nss_vxlan_msg *nvm); + +/* + * nss_vxlan_log_rx_msg + * Logs a Vxlan message that is received from the NSS firmware. + */ +void nss_vxlan_log_rx_msg(struct nss_vxlan_msg *nvm); + +#endif /* __NSS_VXLAN_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_stats.c new file mode 100644 index 000000000..0559d0a2a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_stats.c @@ -0,0 +1,122 @@ +/* + ************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_stats.h" +#include "nss_vxlan_stats.h" +#include + +#define NSS_VXLAN_STATS_MAX_LINES (NSS_STATS_NODE_MAX + 32) + /**< Maximum number of lines for VXLAN statistics dump. */ +#define NSS_VXLAN_STATS_SIZE_PER_IF (NSS_STATS_MAX_STR_LENGTH * NSS_VXLAN_STATS_MAX_LINES) + /**< Total number of statistics per VXLAN interface. */ + +/* + * nss_vxlan_stats_read() + * Read vxlan node statiistics. + */ +static ssize_t nss_vxlan_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + struct nss_ctx_instance *nss_ctx = nss_vxlan_get_ctx(); + enum nss_dynamic_interface_type type; + ssize_t bytes_read = 0; + size_t len = 0, size; + uint32_t if_num; + char *buf; + + size = NSS_VXLAN_STATS_SIZE_PER_IF; + buf = kzalloc(size, GFP_KERNEL); + if (!buf) { + nss_warning("Could not allocate memory for local statistics buffer\n"); + return 0; + } + + /* + * Common node stats for each VxLAN dynamic interface. + */ + for (if_num = 0; if_num < NSS_MAX_NET_INTERFACES; if_num++) { + if (if_num == NSS_VXLAN_INTERFACE) { + len += scnprintf(buf + len, size - len, "\nBase node if_num:%03u", if_num); + len += scnprintf(buf + len, size - len, "\n-------------------\n"); + len += nss_stats_fill_common_stats(if_num, NSS_STATS_SINGLE_INSTANCE, buf, len, size - len, "vxlan"); + continue; + } + + type = nss_dynamic_interface_get_type(nss_ctx, if_num); + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_INNER: + len += scnprintf(buf + len, size - len, "\nInner if_num:%03u", if_num); + break; + + case NSS_DYNAMIC_INTERFACE_TYPE_VXLAN_OUTER: + len += scnprintf(buf + len, size - len, "\nOuter if_num:%03u", if_num); + break; + + default: + continue; + } + + len += scnprintf(buf + len, size - len, "\n-------------------\n"); + len += nss_stats_fill_common_stats(if_num, NSS_STATS_SINGLE_INSTANCE, buf, len, size - len, "vxlan"); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, buf, len); + kfree(buf); + return bytes_read; +} + +/* + * nss_vxlan_stats_sync() + * Update vxlan common node statistics. + */ +void nss_vxlan_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_vxlan_msg *nvm) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_vxlan_stats_msg *msg_stats = &nvm->msg.stats; + uint64_t *if_stats; + + spin_lock_bh(&nss_top->stats_lock); + + /* + * Update common node stats + */ + if_stats = nss_top->stats_node[nvm->cm.interface]; + if_stats[NSS_STATS_NODE_RX_PKTS] += msg_stats->node_stats.rx_packets; + if_stats[NSS_STATS_NODE_RX_BYTES] += msg_stats->node_stats.rx_bytes; + if_stats[NSS_STATS_NODE_RX_QUEUE_0_DROPPED] += msg_stats->node_stats.rx_dropped[0]; + if_stats[NSS_STATS_NODE_RX_QUEUE_1_DROPPED] += msg_stats->node_stats.rx_dropped[1]; + if_stats[NSS_STATS_NODE_RX_QUEUE_2_DROPPED] += msg_stats->node_stats.rx_dropped[2]; + if_stats[NSS_STATS_NODE_RX_QUEUE_3_DROPPED] += msg_stats->node_stats.rx_dropped[3]; + + if_stats[NSS_STATS_NODE_TX_PKTS] += msg_stats->node_stats.tx_packets; + if_stats[NSS_STATS_NODE_TX_BYTES] += msg_stats->node_stats.tx_bytes; + + spin_unlock_bh(&nss_top->stats_lock); +} + +/* + * nss_vxlan_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(vxlan) + +/* + * nss_vxlan_stats_dentry_create() + * Create vxlan statistics debug entry. + */ +void nss_vxlan_stats_dentry_create(void) +{ + nss_stats_create_dentry("vxlan", &nss_vxlan_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_stats.h new file mode 100644 index 000000000..b4748f416 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_vxlan_stats.h @@ -0,0 +1,32 @@ +/* + ************************************************************************** + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef _NSS_VXLAN_STATS_H_ +#define _NSS_VXLAN_STATS_H_ + +/* + * nss_vxlan_stats_dentry_create + * Creates vxlan interface statistics debug entry. + */ +void nss_vxlan_stats_dentry_create(void); + +/* + * nss_vxlan_stats_sync + * Update vxlan common node statistics. + */ +void nss_vxlan_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_vxlan_msg *nvm); + +#endif /* _NSS_VXLAN_STATS_H_ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi.c new file mode 100644 index 000000000..559d947c8 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi.c @@ -0,0 +1,198 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_wifi_stats.h" +#include "nss_wifi_log.h" + +/* + * nss_wifi_get_context() + * Get NSS context of Wifi. + */ +struct nss_ctx_instance *nss_wifi_get_context() +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; +} + +/* + * nss_wifi_handler() + * Handle NSS -> HLOS messages for wifi + */ +static void nss_wifi_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_wifi_msg *ntm = (struct nss_wifi_msg *)ncm; + void *ctx; + nss_wifi_msg_callback_t cb; + + nss_info("%px: NSS ->HLOS message for wifi\n", nss_ctx); + + BUG_ON(((ncm->interface < NSS_WIFI_INTERFACE0) || (ncm->interface > NSS_WIFI_INTERFACE2))); + + /* + * Trace messages. + */ + nss_wifi_log_rx_msg(ntm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_WIFI_MAX_MSG) { + nss_warning("%px: received invalid message %d for wifi interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_wifi_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Snoop messages for local driver and handle + */ + switch (ntm->cm.type) { + case NSS_WIFI_STATS_MSG: + /* + * To create the old API gmac statistics, we use the new extended GMAC stats. + */ + nss_wifi_stats_sync(nss_ctx, &ntm->msg.statsmsg, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages, wifi sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->wifi_msg_callback; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + nss_info("%px: cb null for wifi interface %d", nss_ctx, ncm->interface); + return; + } + + /* + * Get callback & context + */ + cb = (nss_wifi_msg_callback_t)ncm->cb; + ctx = nss_ctx->subsys_dp_register[ncm->interface].ndev; + + /* + * call wifi msg callback + */ + if (!ctx) { + nss_warning("%px: Event received for wifi interface %d before registration", nss_ctx, ncm->interface); + return; + } + + cb(ctx, ntm); +} + +/* + * nss_wifi_tx_msg + * Transmit a wifi message to NSS FW + */ +nss_tx_status_t nss_wifi_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_wifi_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace messages. + */ + nss_wifi_log_tx_msg(msg); + + if (ncm->type > NSS_WIFI_MAX_MSG) { + nss_warning("%px: wifi message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} + +/* + **************************************** + * Register/Unregister/Miscellaneous APIs + **************************************** + */ + +/* + * nss_register_wifi_if() + * Register Wifi with nss driver + */ +struct nss_ctx_instance *nss_register_wifi_if(uint32_t if_num, nss_wifi_callback_t wifi_callback, + nss_wifi_callback_t wifi_ext_callback, + nss_wifi_msg_callback_t event_callback, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; + + nss_assert(nss_ctx); + nss_assert((if_num >= NSS_MAX_VIRTUAL_INTERFACES) && (if_num < NSS_MAX_NET_INTERFACES)); + + nss_info("%px: nss_register_wifi_if if_num %d wifictx %px", nss_ctx, if_num, netdev); + + nss_core_register_subsys_dp(nss_ctx, if_num, wifi_callback, wifi_ext_callback, NULL, netdev, features); + + nss_top_main.wifi_msg_callback = event_callback; + + return nss_ctx; +} + +/* + * nss_unregister_wifi_if() + * Unregister wifi with nss driver + */ +void nss_unregister_wifi_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; + + nss_assert(nss_ctx); + nss_assert((if_num >= NSS_MAX_VIRTUAL_INTERFACES) && (if_num < NSS_MAX_NET_INTERFACES)); + + nss_ctx->nss_top->wifi_msg_callback = NULL; + nss_core_unregister_subsys_dp(nss_ctx, if_num); +} + +/* + * nss_wifi_register_handler() + * Register handle for notfication messages received on wifi interface + */ +void nss_wifi_register_handler(void ) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; + + nss_assert(nss_ctx); + + nss_info("nss_wifi_register_handler"); + + nss_core_register_handler(nss_ctx, NSS_WIFI_INTERFACE0, nss_wifi_handler, NULL); + nss_core_register_handler(nss_ctx, NSS_WIFI_INTERFACE1, nss_wifi_handler, NULL); + nss_core_register_handler(nss_ctx, NSS_WIFI_INTERFACE2, nss_wifi_handler, NULL); + + nss_wifi_stats_dentry_create(); +} + +EXPORT_SYMBOL(nss_wifi_get_context); +EXPORT_SYMBOL(nss_wifi_tx_msg); +EXPORT_SYMBOL(nss_register_wifi_if); +EXPORT_SYMBOL(nss_unregister_wifi_if); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev.c new file mode 100644 index 000000000..bb2723906 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev.c @@ -0,0 +1,338 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_wifi_ext_vdev_stats.h" +#include "nss_wifi_ext_vdev_log.h" + +#define NSS_WIFI_EXT_VDEV_TX_TIMEOUT 3000 /* 3 seconds */ + +/* + * Private data structure + */ +static struct nss_wifi_ext_vdev_pvt { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +} wifi_ext_vdev_pvt; + +/* + * nss_wifi_ext_vdev_verify_if_num() + * Verify if_num passed to us. + */ +static bool nss_wifi_ext_vdev_verify_if_num(uint32_t if_num) +{ + uint32_t type = nss_dynamic_interface_get_type(nss_wifi_ext_vdev_get_ctx(), if_num); + + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_WIFI_EXT_VDEV_WDS: + case NSS_DYNAMIC_INTERFACE_TYPE_WIFI_EXT_VDEV_VLAN: + return true; + default: + return false; + } +} + +/* + * nss_wifi_ext_vdev_handler() + * Handle NSS -> HLOS messages for wifi_ext_vdev + */ +static void nss_wifi_ext_vdev_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app_data) +{ + struct nss_wifi_ext_vdev_msg *nwevm = (struct nss_wifi_ext_vdev_msg *)ncm; + void *ctx; + + nss_wifi_ext_vdev_msg_callback_t cb; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + BUG_ON(!nss_wifi_ext_vdev_verify_if_num(ncm->interface)); + + /* + * Trace Messages + */ + nss_wifi_ext_vdev_log_rx_msg(nwevm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_WIFI_EXT_VDEV_MSG_MAX) { + nss_warning("%px: received invalid message %d for WiFi extended VAP interface %d", nss_ctx, ncm->type, ncm->interface); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_wifi_ext_vdev_msg)) { + nss_warning("%px: wifi_ext_vdev message length is invalid: %d", nss_ctx, ncm->len); + return; + } + + /* + * Check messages + */ + switch (nwevm->cm.type) { + case NSS_WIFI_EXT_VDEV_MSG_STATS_SYNC: + nss_wifi_ext_vdev_stats_sync(nss_ctx, &nwevm->msg.stats, ncm->interface); + break; + } + + /* + * Update the callback and app_data for NOTIFY messages + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].app_data; + } + + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * callback + */ + cb = (nss_wifi_ext_vdev_msg_callback_t)ncm->cb; + ctx = (void *)ncm->app_data; + + /* + * call the callback + */ + if (!cb) { + return; + } + + cb(ctx, ncm); +} + +/* + * nss_wifi_ext_vdev_msg_init() + * Initialize wifi message. + */ +void nss_wifi_ext_vdev_msg_init(struct nss_wifi_ext_vdev_msg *nim, uint32_t if_num, + uint32_t type, uint32_t len, + nss_wifi_ext_vdev_msg_callback_t cb, void *app_data) +{ + nss_cmn_msg_init(&nim->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_wifi_ext_vdev_msg_init); + +/* + * nss_wifi_ext_vdev_tx_msg() + * Transmit a wifi vdev message to NSSFW + */ +nss_tx_status_t nss_wifi_ext_vdev_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_wifi_ext_vdev_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace Messages + */ + nss_wifi_ext_vdev_log_tx_msg(msg); + + if (ncm->type >= NSS_WIFI_EXT_VDEV_MSG_MAX) { + nss_warning("%px: wifi vdev message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + BUG_ON(!nss_wifi_ext_vdev_verify_if_num(ncm->interface)); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_wifi_ext_vdev_tx_msg); + +/* + * nss_wifi_ext_vdev_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_wifi_ext_vdev_callback(void *app_data, struct nss_cmn_msg *ncm) +{ + nss_wifi_ext_vdev_msg_callback_t callback = (nss_wifi_ext_vdev_msg_callback_t)wifi_ext_vdev_pvt.cb; + void *data = wifi_ext_vdev_pvt.app_data; + + wifi_ext_vdev_pvt.response = NSS_TX_SUCCESS; + wifi_ext_vdev_pvt.cb = NULL; + wifi_ext_vdev_pvt.app_data = NULL; + + if (ncm->response != NSS_CMN_RESPONSE_ACK) { + nss_warning("WiFi extension vap Error response %d\n", ncm->response); + wifi_ext_vdev_pvt.response = NSS_TX_FAILURE; + } + + if (callback) { + callback(data, ncm); + } + complete(&wifi_ext_vdev_pvt.complete); +} + +/* + * nss_wifi_ext_vdev_tx_msg() + * Transmit a WiFi extended virtual interface to NSS firmware synchronously. + */ +nss_tx_status_t nss_wifi_ext_vdev_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_wifi_ext_vdev_msg *nwevm) +{ + nss_tx_status_t status; + int ret = 0; + + down(&wifi_ext_vdev_pvt.sem); + wifi_ext_vdev_pvt.cb = (void *)nwevm->cm.cb; + wifi_ext_vdev_pvt.app_data = (void *)nwevm->cm.app_data; + + nwevm->cm.cb = (nss_ptr_t)nss_wifi_ext_vdev_callback; + nwevm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_wifi_ext_vdev_tx_msg(nss_ctx, nwevm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: wifi_ext_vdev_tx_msg failed\n", nss_ctx); + up(&wifi_ext_vdev_pvt.sem); + return status; + } + + /* + * Wait for the acknowledgement + */ + ret = wait_for_completion_timeout(&wifi_ext_vdev_pvt.complete, msecs_to_jiffies(NSS_WIFI_EXT_VDEV_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: WiFi extended vap msg tx failed due to timeout\n", nss_ctx); + wifi_ext_vdev_pvt.response = NSS_TX_FAILURE; + } + + status = wifi_ext_vdev_pvt.response; + up(&wifi_ext_vdev_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_wifi_ext_vdev_tx_msg_sync); + +/* + * nss_wifi_ext_vdev_tx_buf + * Send data packet for vap processing + */ +nss_tx_status_t nss_wifi_ext_vdev_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *skb, uint32_t if_num) +{ + BUG_ON(!nss_wifi_ext_vdev_verify_if_num(if_num)); + + return nss_core_send_packet(nss_ctx, skb, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_wifi_ext_vdev_tx_buf); + +/* + * nss_wifi_ext_vdev_set_next_hop() + * Set the WiFI extended vap next hop. + */ +nss_tx_status_t nss_wifi_ext_vdev_set_next_hop(struct nss_ctx_instance *ctx, int if_num, int next_hop) +{ + struct nss_wifi_ext_vdev_msg *nwevm = kzalloc(sizeof(struct nss_wifi_ext_vdev_msg), GFP_KERNEL); + struct nss_wifi_ext_vdev_set_next_hop_msg *nhm = NULL; + nss_tx_status_t status; + + if (!nwevm) { + nss_warning("%px: Unable to allocate next hop message", ctx); + return NSS_TX_FAILURE; + } + + nhm = &nwevm->msg.wnhm; + + nhm->if_num = next_hop; + nss_wifi_ext_vdev_msg_init(nwevm, if_num, NSS_WIFI_EXT_VDEV_SET_NEXT_HOP, + sizeof(struct nss_wifi_ext_vdev_set_next_hop_msg), NULL, NULL); + + status = nss_wifi_ext_vdev_tx_msg(ctx, nwevm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to send next hop message", ctx); + } + + kfree(nwevm); + return status; +} +EXPORT_SYMBOL(nss_wifi_ext_vdev_set_next_hop); + +/* + * nss_get_wifi_ext_vdev_ext_context() + * Return the core ctx which the feature is on + */ +struct nss_ctx_instance *nss_wifi_ext_vdev_get_ctx(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; +} +EXPORT_SYMBOL(nss_wifi_ext_vdev_get_ctx); + +/* + * nss_wifi_ext_vdev_register_if() + */ +struct nss_ctx_instance *nss_wifi_ext_vdev_register_if(uint32_t if_num, + nss_wifi_ext_vdev_data_callback_t data_callback, + nss_wifi_ext_vdev_ext_data_callback_t ext_callback, + nss_wifi_ext_vdev_msg_callback_t event_callback, + struct net_device *netdev, + uint32_t features, void *app_data) +{ + struct nss_ctx_instance *nss_ctx = nss_wifi_ext_vdev_get_ctx(); + + BUG_ON(!nss_wifi_ext_vdev_verify_if_num(if_num)); + + nss_core_register_subsys_dp(nss_ctx, if_num, data_callback, ext_callback, app_data, netdev, features); + + nss_core_register_msg_handler(nss_ctx, if_num, event_callback); + + nss_core_register_handler(nss_ctx, if_num, nss_wifi_ext_vdev_handler, app_data); + + nss_wifi_ext_vdev_stats_register(if_num, netdev); + + return nss_ctx; +} +EXPORT_SYMBOL(nss_wifi_ext_vdev_register_if); + +/* + * nss_wifi_ext_vdev_unregister_if() + */ +bool nss_wifi_ext_vdev_unregister_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_wifi_ext_vdev_get_ctx(); + struct net_device *netdev; + + BUG_ON(!nss_wifi_ext_vdev_verify_if_num(if_num)); + + nss_assert(nss_ctx); + + netdev = nss_cmn_get_interface_dev(nss_ctx, if_num); + if (!netdev) { + nss_warning("%px: Unable to find net device for the interface %d\n", nss_ctx, if_num); + return false; + } + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + nss_core_unregister_msg_handler(nss_ctx, if_num); + + nss_core_unregister_handler(nss_ctx, if_num); + + nss_wifi_ext_vdev_stats_unregister(if_num, netdev); + return true; +} +EXPORT_SYMBOL(nss_wifi_ext_vdev_unregister_if); + +/* + * nss_wifi_ext_vdev_register_handler() + * Register debugfs handler received on base interface + */ +void nss_wifi_ext_vdev_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = nss_wifi_ext_vdev_get_ctx(); + + nss_info("nss_wifi_ext_vdev_handler"); + sema_init(&wifi_ext_vdev_pvt.sem, 1); + init_completion(&wifi_ext_vdev_pvt.complete); + nss_core_register_handler(nss_ctx, NSS_WIFI_EXT_VDEV_INTERFACE, nss_wifi_ext_vdev_handler, NULL); + nss_wifi_ext_vdev_stats_dentry_create(); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_log.c new file mode 100644 index 000000000..7fd188860 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_log.c @@ -0,0 +1,220 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_wifi_ext_vdev_log.c + * NSS WiFi extended VAP logger file. + */ + +#include "nss_core.h" + +#define NSS_WIFI_EXT_VDEV_LOG_MESSAGE_TYPE_INDEX(type) ((type) - NSS_IF_MAX_MSG_TYPES) + +/* + * nss_wifi_ext_vdev_log_message_types_str + * NSS WiFi extended VAP message strings + */ +static int8_t *nss_wifi_ext_vdev_log_message_types_str[NSS_WIFI_EXT_VDEV_MSG_MAX] __maybe_unused = { + "WiFi Common I/F Message", + "WiFi Extendev VAP configure", + "WiFi Extendev VAP configure wds", + "WiFi Extendev VAP configure next hop", + "WiFi Extendev VAP stats", + "WiFi Extended VAP configure VLAN" +}; + +/* + * nss_wifi_ext_vdev_log_configure_msg() + * Log NSS WiFi extended vap configure message. + */ +static void nss_wifi_ext_vdev_log_configure_if_msg(struct nss_wifi_ext_vdev_msg *nwevm) +{ + struct nss_wifi_ext_vdev_configure_if_msg *cmsg __maybe_unused = &nwevm->msg.cmsg; + nss_trace("%px: WiFi extended VAP configure message \n" + "Mac address: %pM\n" + "Radio interface num: %d\n" + "Parent VAP interface num: %d\n", + cmsg, cmsg->mac_addr, cmsg->radio_ifnum, + cmsg->pvap_ifnum); + +} + +/* + * nss_wifi_ext_vdev_log_wds_msg() + * Log NSS WiFi extended vap wds message. + */ +static void nss_wifi_ext_vdev_log_wds_msg(struct nss_wifi_ext_vdev_msg *nwevm) +{ + struct nss_wifi_ext_vdev_wds_msg *wmsg __maybe_unused = &nwevm->msg.wmsg; + nss_trace("%px: NSS WiFi extended VAP wds message: \n" + "WDS sta ID: %d\n" + "WDS sta macaddr: %pM\n", + wmsg, wmsg->wds_peer_id, + wmsg->mac_addr); +} + +/* + * nss_wifi_ext_vdev_set_nxt_hop_msg() + * Set the next hop message. + */ +static void nss_wifi_ext_vdev_set_nxt_hop_msg(struct nss_wifi_ext_vdev_msg *nwevm) +{ + struct nss_wifi_ext_vdev_set_next_hop_msg *wnhm __maybe_unused = &nwevm->msg.wnhm; + nss_trace("%px: NSS WiFi extended vap set next hop message: \n" + "Next hop if num: %d\n", + wnhm, wnhm->if_num); + +} + +/* + * nss_wifi_ext_vdev_linkup_msg() + * Log NSS linkup message. + */ +static void nss_wifi_ext_vdev_linkup_msg(struct nss_wifi_ext_vdev_msg *nwevm) +{ + union nss_if_msgs *if_msg __maybe_unused = &nwevm->msg.if_msg; + nss_trace("%px: NSS WiFi ext linkup message\n", if_msg); +} + +/* + * nss_wifi_ext_vdev_linkdown_msg() + * Log NSS linkdown message. + */ +static void nss_wifi_ext_vdev_linkdown_msg(struct nss_wifi_ext_vdev_msg *nwevm) +{ + union nss_if_msgs *if_msg __maybe_unused = &nwevm->msg.if_msg; + nss_trace("%px: NSS WiFi ext linkdown message\n", if_msg); +} + +/* + * nss_wifi_ext_vdev_macaddr_set_msg() + * Set/Change the mac address + */ +static void nss_wifi_ext_vdev_macaddr_set_msg(struct nss_wifi_ext_vdev_msg *nwevm) +{ + union nss_if_msgs *if_msg = &nwevm->msg.if_msg; + struct nss_if_mac_address_set *nimas __maybe_unused = &if_msg->mac_address_set; + nss_trace("%px: NSS WiFi ext change mac addr: \n" + "mac addr %pM\n", + nimas, nimas->mac_addr); +} + +/* + * nss_wifi_ext_vdev_log_vlan_msg() + * Configure vlan message. + */ +static void nss_wifi_ext_vdev_log_vlan_msg(struct nss_wifi_ext_vdev_msg *nwevm) +{ + struct nss_wifi_ext_vdev_vlan_msg *vmsg __maybe_unused = &nwevm->msg.vmsg; + nss_trace("%px: NSS WiFi extended VAP vlan message: \n" + "vlan ID %hu\n", + vmsg, vmsg->vlan_id); +} + +/* + * nss_wifi_ext_vdev_log_verbose() + * Log message contents. + */ +static void nss_wifi_ext_vdev_log_verbose(struct nss_wifi_ext_vdev_msg *nwevm) +{ + switch (nwevm->cm.type) { + case NSS_WIFI_EXT_VDEV_MSG_CONFIGURE_IF: + nss_wifi_ext_vdev_log_configure_if_msg(nwevm); + break; + + case NSS_WIFI_EXT_VDEV_MSG_CONFIGURE_WDS : + nss_wifi_ext_vdev_log_wds_msg(nwevm); + break; + + case NSS_WIFI_EXT_VDEV_SET_NEXT_HOP: + nss_wifi_ext_vdev_set_nxt_hop_msg(nwevm); + break; + + case NSS_WIFI_EXT_VDEV_MSG_STATS_SYNC: + break; + + case NSS_IF_OPEN: + nss_wifi_ext_vdev_linkup_msg(nwevm); + break; + + case NSS_IF_CLOSE: + nss_wifi_ext_vdev_linkdown_msg(nwevm); + break; + + case NSS_IF_MAC_ADDR_SET: + nss_wifi_ext_vdev_macaddr_set_msg(nwevm); + break; + + case NSS_WIFI_EXT_VDEV_MSG_CONFIGURE_VLAN: + nss_wifi_ext_vdev_log_vlan_msg(nwevm); + break; + + default: + nss_trace("%px: Invalid message type\n", nwevm); + break; + } +} + +/* + * nss_wifi_ext_vdev_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_wifi_ext_vdev_log_tx_msg(struct nss_wifi_ext_vdev_msg *nwevm) +{ + uint32_t type_idx = 0; + if (nwevm->cm.type >= NSS_WIFI_EXT_VDEV_MSG_MAX) { + nss_warning("%px: Invalid message type\n", nwevm); + return; + } + + type_idx = (nwevm->cm.type > NSS_IF_MAX_MSG_TYPES) ? + (NSS_WIFI_EXT_VDEV_LOG_MESSAGE_TYPE_INDEX(nwevm->cm.type)) : 0; + + nss_info("%px: type[%d]:%s\n", nwevm, nwevm->cm.type, nss_wifi_ext_vdev_log_message_types_str[type_idx]); + nss_wifi_ext_vdev_log_verbose(nwevm); +} + +/* + * nss_wifi_ext_vdev_log_rx_msg() + * Log messages received from FW. + */ +void nss_wifi_ext_vdev_log_rx_msg(struct nss_wifi_ext_vdev_msg *nwevm) +{ + uint32_t type_idx = 0; + if (nwevm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nwevm); + return; + } + + type_idx = (nwevm->cm.type > NSS_IF_MAX_MSG_TYPES) ? + (NSS_WIFI_EXT_VDEV_LOG_MESSAGE_TYPE_INDEX(nwevm->cm.type)) : 0; + + if (nwevm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nwevm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nwevm, nwevm->cm.type, + nss_wifi_ext_vdev_log_message_types_str[type_idx], + nwevm->cm.response, nss_cmn_response_str[nwevm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + nwevm, nwevm->cm.type, nss_wifi_ext_vdev_log_message_types_str[type_idx], + nwevm->cm.response, nss_cmn_response_str[nwevm->cm.response]); + +verbose: + nss_wifi_ext_vdev_log_verbose(nwevm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_log.h new file mode 100644 index 000000000..a5c851055 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_log.h @@ -0,0 +1,34 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_WIFI_EXT_VDEV_LOG_H +#define __NSS_WIFI_EXT_VDEV_LOG_H + +/* + * nss_wifi_ext_vdev_log_tx_msg + * Logs a wifi_ext_vdev message that is sent to the NSS firmware. + */ +void nss_wifi_ext_vdev_log_tx_msg(struct nss_wifi_ext_vdev_msg *nwevm); + +/* + * nss_wifi_ext_vdev_log_rx_msg + * Logs a wifi_ext_vdev message that is received from the NSS firmware. + */ +void nss_wifi_ext_vdev_log_rx_msg(struct nss_wifi_ext_vdev_msg *nwevm); + +#endif /* __NSS_WIFI_EXT_VDEV_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_stats.c new file mode 100644 index 000000000..493ca84b0 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_stats.c @@ -0,0 +1,234 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_wifi_ext_vdev_stats.h" + +DEFINE_SPINLOCK(nss_wifi_ext_vdev_debug_lock); +struct nss_wifi_ext_vdev_debug nss_wifi_ext_vdev_debug_stats[NSS_WIFI_EXT_VDEV_MAX]; + +/* + * nss_wifi_ext_vdev_debug_str + * WiFi extended VAP statistics strings. + */ +struct nss_stats_info nss_wifi_ext_vdev_debug_str[NSS_WIFI_EXT_VDEV_STATS_MAX] = { + {"node_rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"node_rx_bytes" , NSS_STATS_TYPE_COMMON}, + {"node_tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"node_tx_bytes" , NSS_STATS_TYPE_COMMON}, + {"node_rx_dropped" , NSS_STATS_TYPE_DROP}, + {"mc_count" , NSS_STATS_TYPE_SPECIAL}, + {"uc_count" , NSS_STATS_TYPE_SPECIAL}, + {"nxt_hop_drop" , NSS_STATS_TYPE_DROP}, +}; + +/* + * WiFi extended vdev statistics APIs + */ + +/* + * nss_wifi_ext_vdev_stats_register() + * Register debug statistic for WiFi extended VAP. + */ +void nss_wifi_ext_vdev_stats_register(uint32_t if_num, struct net_device *netdev) +{ + int i; + + spin_lock_bh(&nss_wifi_ext_vdev_debug_lock); + for (i = 0; i < NSS_WIFI_EXT_VDEV_MAX; i++) { + if (!nss_wifi_ext_vdev_debug_stats[i].valid) { + nss_wifi_ext_vdev_debug_stats[i].valid = true; + nss_wifi_ext_vdev_debug_stats[i].if_num = if_num; + nss_wifi_ext_vdev_debug_stats[i].if_index = netdev->ifindex; + break; + } + } + + spin_unlock_bh(&nss_wifi_ext_vdev_debug_lock); +} + +/* + * nss_wifi_ext_vdev_stats_unregister() + * Register debug statistic for WiFi extended vap. + */ +void nss_wifi_ext_vdev_stats_unregister(uint32_t if_num, struct net_device *netdev) +{ + int i; + + spin_lock_bh(&nss_wifi_ext_vdev_debug_lock); + for (i = 0; i < NSS_WIFI_EXT_VDEV_MAX; i++) { + if (nss_wifi_ext_vdev_debug_stats[i].if_num == if_num) { + memset(&nss_wifi_ext_vdev_debug_stats[i], 0, + sizeof(struct nss_wifi_ext_vdev_debug)); + break; + } + } + spin_unlock_bh(&nss_wifi_ext_vdev_debug_lock); +} + +/* + * nss_wifi_ext_vdev_stats_sync() + * Sync function for WiFi extendev vap statistics. + */ +void nss_wifi_ext_vdev_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_wifi_ext_vdev_stats *stats_msg, + uint16_t if_num) +{ + int i; + struct nss_wifi_ext_vdev_debug *s = NULL; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + spin_lock_bh(&nss_wifi_ext_vdev_debug_lock); + for (i = 0; i < NSS_WIFI_EXT_VDEV_MAX; i++) { + if (nss_wifi_ext_vdev_debug_stats[i].if_num == if_num) { + s = &nss_wifi_ext_vdev_debug_stats[i]; + break; + } + } + + if (!s) { + spin_unlock_bh(&nss_wifi_ext_vdev_debug_lock); + nss_warning("%px: Interface:%u not found", nss_ctx, if_num); + return; + } + + s->stats[NSS_WIFI_EXT_VDEV_STATS_NODE_RX_PKTS ] += stats_msg->node_stats.rx_packets; + s->stats[NSS_WIFI_EXT_VDEV_STATS_NODE_RX_BYTES] += stats_msg->node_stats.rx_bytes; + s->stats[NSS_WIFI_EXT_VDEV_STATS_NODE_TX_PKTS] += stats_msg->node_stats.tx_packets; + s->stats[NSS_WIFI_EXT_VDEV_STATS_NODE_TX_BYTES] += stats_msg->node_stats.tx_bytes; + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + s->stats[NSS_WIFI_EXT_VDEV_STATS_NODE_TOTAL_DROPPED] += stats_msg->node_stats.rx_dropped[i]; + } + s->stats[NSS_WIFI_EXT_VDEV_STATS_MULTICAST_COUNT] += stats_msg->mc_count; + s->stats[NSS_WIFI_EXT_VDEV_STATS_UNICAST_COUNT] += stats_msg->node_stats.rx_packets - stats_msg->mc_count; + s->stats[NSS_WIFI_EXT_VDEV_STATS_NEXT_HOP_DROP_COUNT] += stats_msg->nxt_hop_drp; + spin_unlock_bh(&nss_wifi_ext_vdev_debug_lock); +} + +/* + * nss_wifi_ext_vdev_debug_get() + * Get WiFi extendev vap debug statitics. + */ +static void nss_wifi_ext_vdev_debug_get(struct nss_wifi_ext_vdev_debug *stats) +{ + int i; + + if (!stats) { + nss_warning("No memory to copy WiFi extended VAP stats"); + return; + } + + spin_lock_bh(&nss_wifi_ext_vdev_debug_lock); + for (i = 0; i < NSS_WIFI_EXT_VDEV_MAX; i++) { + if (nss_wifi_ext_vdev_debug_stats[i].valid) { + memcpy(stats, &nss_wifi_ext_vdev_debug_stats[i], + sizeof(struct nss_wifi_ext_vdev_debug)); + stats++; + } + } + spin_unlock_bh(&nss_wifi_ext_vdev_debug_lock); +} + +/* + * nss_wifi_ext_vdev_read() + * Read WiFi extended VAP statistics + */ +static ssize_t nss_wifi_ext_vdev_stats_read(struct file *fp, char __user *ubuf, + size_t sz, loff_t *ppos) +{ + uint32_t max_output_lines = 2 /* header and footer of the interface stats*/ + + (NSS_WIFI_EXT_VDEV_STATS_MAX * (NSS_WIFI_EXT_VDEV_MAX + 2)) /* Interface stats */ + + 2; + + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + size_t bytes_read = 0; + struct net_device *dev; + int id; + struct nss_wifi_ext_vdev_debug *wifi_ext_vdev_stats = NULL; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + wifi_ext_vdev_stats = kzalloc((sizeof(struct nss_wifi_ext_vdev_debug) * NSS_WIFI_EXT_VDEV_MAX), GFP_KERNEL); + if (unlikely(wifi_ext_vdev_stats == NULL)) { + nss_warning("Could not allocate memory for populating stats"); + kfree(lbuf); + return 0; + } + + /* + * Get all stats + */ + nss_wifi_ext_vdev_debug_get(wifi_ext_vdev_stats); + + /* + * WiFi extended vap stats. + */ + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "WiFi extended VAP stats", NSS_STATS_SINGLE_CORE); + + for (id = 0; id < NSS_WIFI_EXT_VDEV_MAX; id++) { + if (!wifi_ext_vdev_stats[id].valid) { + continue; + } + + dev = dev_get_by_index(&init_net, wifi_ext_vdev_stats[id].if_index); + if (likely(dev)) { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "%d. nss interface id=%d, netdevice=%s\n", + id, wifi_ext_vdev_stats[id].if_num, + dev->name); + dev_put(dev); + } else { + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, + "%d. nss interface id=%d\n", id, + wifi_ext_vdev_stats[id].if_num); + } + + size_wr += nss_stats_print("vdev", "debug", id + , nss_wifi_ext_vdev_debug_str + , wifi_ext_vdev_stats[id].stats + , NSS_WIFI_EXT_VDEV_STATS_MAX + , lbuf, size_wr, size_al); + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + + kfree(wifi_ext_vdev_stats); + kfree(lbuf); + return bytes_read; +} + +/* + * nss_wifi_ext_vdev_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(wifi_ext_vdev); + +/* + * nss_wifi_ext_vdev_dentry_create() + * Create wifi extension vap statistics debug entry. + */ +void nss_wifi_ext_vdev_stats_dentry_create(void) +{ + nss_stats_create_dentry("wifi_ext_vdev", &nss_wifi_ext_vdev_stats_ops); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_stats.h new file mode 100644 index 000000000..589e2c09e --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_ext_vdev_stats.h @@ -0,0 +1,60 @@ +/* + ****************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_WIFI_EXT_VDEV_STATS_H +#define __NSS_WIFI_EXT_VDEV_STATS_H + +/* + * WiFi extendev vap debug statistic counters. + */ +enum nss_wifi_ext_vdev_stats_types { + NSS_WIFI_EXT_VDEV_STATS_NODE_RX_PKTS, + NSS_WIFI_EXT_VDEV_STATS_NODE_RX_BYTES, + NSS_WIFI_EXT_VDEV_STATS_NODE_TX_PKTS, + NSS_WIFI_EXT_VDEV_STATS_NODE_TX_BYTES, + NSS_WIFI_EXT_VDEV_STATS_NODE_TOTAL_DROPPED, + NSS_WIFI_EXT_VDEV_STATS_MULTICAST_COUNT, + NSS_WIFI_EXT_VDEV_STATS_UNICAST_COUNT, + NSS_WIFI_EXT_VDEV_STATS_NEXT_HOP_DROP_COUNT, + NSS_WIFI_EXT_VDEV_STATS_MAX, +}; + +/* + * WiFi extendev vap debug statistics. + */ +struct nss_wifi_ext_vdev_debug { + uint64_t stats[NSS_WIFI_EXT_VDEV_STATS_MAX]; + int32_t if_index; /**< Netdevice's ifindex. */ + uint32_t if_num; /**< NSS interface number. */ + bool valid; /**< Is node valid ? */ +}; + +/* + * Data structures to store WiFi extended VAP debug stats. + */ +extern struct nss_wifi_ext_vdev_debug nss_wifi_ext_vdev_debug_stats[NSS_WIFI_EXT_VDEV_MAX]; + +/* + * WiFi extendev vap statistics APIs + */ +extern void nss_wifi_ext_vdev_stats_register(uint32_t if_num, struct net_device *netdev); +extern void nss_wifi_ext_vdev_stats_unregister(uint32_t if_num, struct net_device *netdev); +extern void nss_wifi_ext_vdev_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_wifi_ext_vdev_stats *stats_msg, uint16_t if_num); +extern void nss_wifi_ext_vdev_stats_dentry_create(void); + +#endif /* __NSS_WIFI_EXT_VDEV_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_log.c new file mode 100644 index 000000000..4d6b4c52d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_log.c @@ -0,0 +1,806 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_wifi_log.c + * NSS WIFI logger file. + */ + +#include "nss_core.h" + +/* + * nss_wifi_log_message_types_str + * WIFI message strings + */ +static int8_t *nss_wifi_log_message_types_str[NSS_WIFI_MAX_MSG] __maybe_unused = { + "WIFI INIT MSG", + "WIFI POST RECV MSG", + "WIFI HTT INIT MSG", + "WIFI TX INIT MSG", + "WIFI RAW SEND MSG", + "WIFI MGMT SEND MSG", + "WIFI WDS PEER ADD MSG", + "WIFI WDS PEER DEL MSG", + "WIFI STOP MSG", + "WIFI RESET MSG", + "WIFI STATS MSG", + "WIFI PEER FREELIST APPEND MSG", + "WIFI RX REORDER ARRAY FREELIST APPEND MSG", + "WIFI SEND PEER MEMORY REQUEST MSG", + "WIFI SEND RRA MEMORY REQUEST MSG", + "WIFI FW STATS MSG", + "WIFI MONITOR FILTER SET MSG", + "WIFI PEER BS STATE MSG", + "WIFI MSDU TTL SET MSG", + "WIFI RX VOW EXTSTATS SET MSG", + "WIFI PKTLOG CFG MSG", + "WIFI ENABLE PERPKT TXSTATS MSG", + "WIFI IGMP MLD TOS OVERRIDE MSG", + "WIFI OL STATS CFG MSG", + "WIFI OL STATS MSG", + "WIFI TX QUEUE CFG MSG", + "WIFI TX MIN THRESHOLD CFG MSG", + "WIFI DBDC PROCESS ENABLE MSG", + "WIFI PRIMARY RADIO SET MSG", + "WIFI FORCE CLIENT MCAST TRAFFIC SET MSG", + "WIFI STORE OTHER PDEV STAVAP MSG", + "WIFI STA KICKOUT MSG", + "WIFI WNM PEER RX ACTIVITY MSG", + "WIFI PEER STATS MSG", + "WIFI WDS VENDOR MSG", + "WIFI TX CAPTURE SET MSG", + "WIFI ALWAYS PRIMARY SET MSG", + "WIFI FLUSH HTT CMD MSG", + "WIFI CMD MSG", + "WIFI ENABLE OL STATSV2 MSG", + "WIFI OL PEER TIME MSG", +}; + +/* + * nss_wifi_log_error_response_types_str + * Strings for error types for WIFI messages + */ +static int8_t *nss_wifi_log_error_response_types_str[NSS_WIFI_EMSG_MAX] __maybe_unused = { + "WIFI NO ERROR", + "WIFI UNKNOWN MSG", + "WIFI MGMT DLEN", + "WIFI MGMT SEND", + "WIFI CE INIT FAIL", + "WIFI PDEV INIT FAIL", + "WIFI HTT INIT FAIL", + "WIFI PEER ADD", + "WIFI WIFI START FAIL", + "WIFI STATE NOT RESET", + "WIFI STATE NOT INIT DONE", + "WIFI STATE NULL CE HANDLE", + "WIFI STATE NOT CE READY", + "WIFI STATE NOT HTT READY", + "WIFI FW STATS DLEN", + "WIFI FW STATS SEND", + "WIFI STATE TX INIT FAILED", + "WIFI IGMP MLD TOS OVERRIDE CFG", + "WIFI PDEV INVALID", + "WIFI OTHER PDEV STAVAP INVALID", + "WIFI HTT SEND FAIL", + "WIFI CE RING INIT", + "WIFI NOTIFY CB", + "WIFI PEERID INVALID", + "WIFI PEER INVALID", + "WIFI UNKNOWN CMD" +}; + +/* + * nss_wifi_log_init_msg() + * Log NSS WIFI Init message. + */ +static void nss_wifi_log_init_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_init_msg *nwim __maybe_unused = &ncm->msg.initmsg; + + nss_trace("%px: NSS WIFI Init Message:\n" + "WIFI Radio ID: %d\n" + "WIFI PCI Memory Address: %x\n" + "WIFI Target Type: %d\n" + "WIFI MU MIMO Enhancement Enable Flag: %d\n" + "WIFI Transmit Copy Engine Source Ring:\n" + "\tNumber of Entries: %d\n" + "\tNumber of Entries Mask: %x\n" + "\tInitial Software Index: %d\n" + "\tInitial Write Index: %d\n" + "\tInitial Hardware Index: %d\n" + "\tPhysical Address: %x\n" + "\tVirtual Address: %x\n" + "WIFI Transmit Copy Engine Dest Ring:\n" + "\tNumber of Entries: %d\n" + "\tNumber of Entries Mask: %x\n" + "\tInitial Software Index: %d\n" + "\tInitial Write Index: %d\n" + "\tInitial Hardware Index: %d\n" + "\tPhysical Address: %x\n" + "\tVirtual Address: %x\n" + "WIFI Transmit Control Address of PCIe Bar: %x\n" + "WIFI Receive Copy Engine Source Ring:\n" + "\tNumber of Entries: %d\n" + "\tNumber of Entries Mask: %x\n" + "\tInitial Software Index: %d\n" + "\tInitial Write Index: %d\n" + "\tInitial Hardware Index: %d\n" + "\tPhysical Address: %x\n" + "\tVirtual Address: %x\n" + "WIFI Receive Copy Engine Dest Ring:\n" + "\tNumber of Entries: %d\n" + "\tNumber of Entries Mask: %x\n" + "\tInitial Software Index: %d\n" + "\tInitial Write Index: %d\n" + "\tInitial Hardware Index: %d\n" + "\tPhysical Address: %x\n" + "\tVirtual Address: %x\n" + "WIFI Receive Control Address of PCIe Bar: %x\n" + "WIFI Bypass Network Process: %d", + nwim, nwim->radio_id, + nwim->pci_mem, nwim->target_type, + nwim->mu_mimo_enhancement_en, nwim->ce_tx_state.src_ring.nentries, + nwim->ce_tx_state.src_ring.nentries_mask, nwim->ce_tx_state.src_ring.sw_index, + nwim->ce_tx_state.src_ring.write_index, nwim->ce_tx_state.src_ring.hw_index, + nwim->ce_tx_state.src_ring.base_addr_CE_space, nwim->ce_tx_state.src_ring.base_addr_owner_space, + nwim->ce_tx_state.dest_ring.nentries, nwim->ce_tx_state.dest_ring.nentries_mask, + nwim->ce_tx_state.dest_ring.sw_index, nwim->ce_tx_state.dest_ring.write_index, + nwim->ce_tx_state.dest_ring.hw_index, nwim->ce_tx_state.dest_ring.base_addr_CE_space, + nwim->ce_tx_state.dest_ring.base_addr_owner_space, nwim->ce_tx_state.ctrl_addr, + nwim->ce_rx_state.src_ring.nentries, nwim->ce_rx_state.src_ring.nentries_mask, + nwim->ce_rx_state.src_ring.sw_index, nwim->ce_rx_state.src_ring.write_index, + nwim->ce_rx_state.src_ring.hw_index, nwim->ce_rx_state.src_ring.base_addr_CE_space, + nwim->ce_rx_state.src_ring.base_addr_owner_space, nwim->ce_rx_state.dest_ring.nentries, + nwim->ce_rx_state.dest_ring.nentries_mask, nwim->ce_rx_state.dest_ring.sw_index, + nwim->ce_rx_state.dest_ring.write_index, nwim->ce_rx_state.dest_ring.hw_index, + nwim->ce_rx_state.dest_ring.base_addr_CE_space, nwim->ce_rx_state.dest_ring.base_addr_owner_space, + nwim->ce_rx_state.ctrl_addr, nwim->bypass_nw_process); +} + +/* + * nss_wifi_log_stop_msg() + * Log NSS WIFI Init message. + */ +static void nss_wifi_log_stop_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_stop_msg *nwsm __maybe_unused = &ncm->msg.stopmsg; + nss_trace("%px: NSS WIFI Init Message:\n" + "WIFI Radio ID: %d\n", + nwsm, nwsm->radio_id); +} + +/* + * nss_wifi_log_reset_msg() + * Log NSS WIFI Init message. + */ +static void nss_wifi_log_reset_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_reset_msg *nwrm __maybe_unused = &ncm->msg.resetmsg; + nss_trace("%px: NSS WIFI Init Message:\n" + "WIFI Radio ID: %d\n", + nwrm, nwrm->radio_id); +} + +/* + * nss_wifi_log_htt_init_msg() + * Log NSS WIFI HTT Init message. + */ +static void nss_wifi_log_htt_init_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_htt_init_msg *nwim __maybe_unused = &ncm->msg.httinitmsg; + nss_trace("%px: NSS WIFI HTT Init Message:\n" + "WIFI Radio ID: %d\n" + "WIFI Ring Size: %d\n" + "WIFI Fill Level: %d\n" + "WIFI MAC Hardware Ring Phy Address: %x\n" + "WIFI MAC Hardware Ring Virtual Address: %x\n" + "WIFI Hardware Ring Index Phy Address: %x\n" + "WIFI Hardware Ring Index Virtual Address: %x\n", + nwim, nwim->radio_id, + nwim->ringsize, nwim->fill_level, + nwim->paddrs_ringptr, nwim->paddrs_ringpaddr, + nwim->alloc_idx_paddr, nwim->alloc_idx_vaddr); +} + +/* + * nss_wifi_log_tx_init_msg() + * Log NSS TX HTT Init message. + */ +static void nss_wifi_log_tx_init_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_tx_init_msg *nwim __maybe_unused = &ncm->msg.pdevtxinitmsg; + nss_trace("%px: NSS WIFI HTT Init Message:\n" + "WIFI Radio ID: %d\n" + "WIFI Number of Descriptor Pools Allocated: %d\n" + "WIFI TX Descriptor Array: %x\n" + "WIFI MAC extenstion descriptor Address: %x\n" + "WIFI WLAN MAC extenstion descriptor size: %d\n" + "WIFI HTT Tx descriptor memory start virtual address: %x\n" + "WIFI HTT Tx descriptor memory base virtual address: %x\n" + "WIFI HTT Tx descriptor memory offset: %x\n" + "WIFI Firmware shared TID map: %x\n", + nwim, nwim->radio_id, + nwim->desc_pool_size, nwim->tx_desc_array, + nwim->wlanextdesc_addr, nwim->wlanextdesc_size, + nwim->htt_tx_desc_base_vaddr, nwim->htt_tx_desc_base_paddr, + nwim->htt_tx_desc_offset, nwim->pmap_addr); +} + +/* + * nss_wifi_log_rawsend_msg() + * Log NSS WIFI RAW Send message. + */ +static void nss_wifi_log_rawsend_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_rawsend_msg *nwrm __maybe_unused = &ncm->msg.rawmsg; + nss_trace("%px: NSS WIFI RAW Send Message:\n" + "WIFI Radio ID: %d\n" + "WIFI Size of Raw Data: %d\n" + "WIFI Raw Data: %px", + nwrm, nwrm->radio_id, + nwrm->len, nwrm->array); +} + +/* + * nss_wifi_log_mgmtsend_msg() + * Log NSS WIFI Management Send message. + */ +static void nss_wifi_log_mgmtsend_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_mgmtsend_msg *nwmm __maybe_unused = &ncm->msg.mgmtmsg; + nss_trace("%px: NSS WIFI Management Send Message:\n" + "WIFI Descriptor ID: %d\n" + "WIFI Size of Management Data: %d\n" + "WIFI Management Data: %px", + nwmm, nwmm->desc_id, + nwmm->len, nwmm->array); +} + +/* + * nss_wifi_log_wds_peer_msg() + * Log NSS WIFI WDS Peer message. + */ +static void nss_wifi_log_wds_peer_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_wds_peer_msg *nwmm __maybe_unused = &ncm->msg.pdevwdspeermsg; + nss_trace("%px: NSS WIFI WDS Peer Message:\n" + "WIFI Dest MAC: %pM\n" + "WIFI Peer MAC: %pM\n", + nwmm, nwmm->dest_mac, + nwmm->peer_mac); +} + +/* + * nss_wifi_log_peer_freelist_append_msg() + * Log NSS WIFI Create/Append Freelist message + */ +static void nss_wifi_log_peer_freelist_append_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_peer_freelist_append_msg *nwpm __maybe_unused = &ncm->msg.peer_freelist_append; + nss_trace("%px: NSS WIFI Create/Append Freelist Message:\n" + "WIFI Starting Address of Peer Freelist Pool: %x\n" + "WIFI Length of freelist pool: %d\n" + "WIFI Number of Peers supported in freelist pool: %d\n", + nwpm, nwpm->addr, + nwpm->length, nwpm->num_peers); +} + +/* + * nss_wifi_log_rx_reorder_array_freelist_append_msg() + * Log NSS WIFI RX Reorder Array Freelist message + */ +static void nss_wifi_log_rx_reorder_array_freelist_append_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_rx_reorder_array_freelist_append_msg *nwpm __maybe_unused = &ncm->msg.rx_reorder_array_freelist_append; + nss_trace("%px: NSS WIFI RX Reorder Array Freelist Message:\n" + "WIFI Starting Address of TIDQ Freelist Pool: %x\n" + "WIFI Length of TIDQ freelist pool: %d\n" + "WIFI Number of Rx reorder array entries supported in freelist pool: %d\n", + nwpm, nwpm->addr, + nwpm->length, nwpm->num_rra); +} + +/* + * nss_wifi_log_set_filter_msg() + * Log NSS WIFI Set Filter message + */ +static void nss_wifi_log_set_filter_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_monitor_set_filter_msg *nwfm __maybe_unused = &ncm->msg.monitor_filter_msg; + nss_trace("%px: NSS WIFI Set Filter Message:\n" + "WIFI Filter Type: %dn", + nwfm, nwfm->filter_type); +} + +/* + * nss_wifi_log_peer_activity_msg() + * Log NSS WIFI Get Active Peer for Radio message + */ +static void nss_wifi_log_peer_activity_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_bs_peer_activity *nwpm __maybe_unused = &ncm->msg.peer_activity; + nss_trace("%px: NSS WIFI Get Active Peer Message:\n" + "WIFI Number of Entries in Peer ID Array: %d\n" + "WIFI PEER ID: %d\n", + nwpm, nwpm->nentries, + nwpm->peer_id[0]); +} + +/* + * nss_wifi_rx_vow_extstats_set_msg() + * Log NSS WIFI VoW Extended Statistics Set Message. + */ +static void nss_wifi_log_rx_vow_extstats_set_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_rx_vow_extstats_set_msg *nwpm __maybe_unused = &ncm->msg.vow_extstats_msg; + nss_trace("%px: NSS WIFI VoW Extended Statistics Set Message:\n" + "WIFI VoW Extended Statistics Enable:: %d\n", + nwpm, nwpm->vow_extstats_en); +} + +/* + * nss_wifi_log_pktlog_cfg_msg() + * Log NSS WIFI Packet Log Configuration Message. + */ +static void nss_wifi_log_pktlog_cfg_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_pktlog_cfg_msg *nwpm __maybe_unused = &ncm->msg.pcm_msg; + nss_trace("%px: NSS WIFI Packet Log Configuration Message:\n" + "WIFI Packet Log Enable: %d\n" + "WIFI PAcket Log buffer Size: %d\n" + "WIFI Size of packet log header: %d\n" + "WIFI Offset for the MSDU ID: %d\n", + nwpm, nwpm->enable, + nwpm->bufsize, nwpm->hdrsize, + nwpm->msdu_id_offset); +} + +/* + * nss_wifi_log_enable_perpkt_txstats_msg() + * Log NSS WIFI Enable TX Stats Message. + */ +static void nss_wifi_log_enable_perpkt_txstats_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_enable_perpkt_txstats_msg *nwpm __maybe_unused = &ncm->msg.ept_msg; + nss_trace("%px: NSS WIFI Enable TX Stats Message:\n" + "WIFI TX Stats Enable Flag: %d\n", + nwpm, nwpm->perpkt_txstats_flag); +} + +/* + * nss_wifi_log_override_tos_msg() + * Log NSS WIFI Override TOS Message. + */ +static void nss_wifi_log_override_tos_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_igmp_mld_override_tos_msg *nwpm __maybe_unused = &ncm->msg.wigmpmldtm_msg; + nss_trace("%px: NSS WIFI Override TOS Message:\n" + "WIFI enable TID override Flag: %d\n" + "WIFI Value of TID to be overriden: %d\n", + nwpm, nwpm->igmp_mld_ovride_tid_en, + nwpm->igmp_mld_ovride_tid_val); +} + +/* + * nss_wifi_log_ol_stats_cfg_msg() + * Log NSS WIFI Offload Stats Config Message. + */ +static void nss_wifi_log_ol_stats_cfg_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_ol_stats_cfg_msg *nwpm __maybe_unused = &ncm->msg.scm_msg; + nss_trace("%px: NSS WIFI Enable/Disable Offload Stats Message:\n" + "WIFI enable/disable offload stats config: %d\n", + nwpm, nwpm->stats_cfg); +} + +/* + * nss_wifi_log_tx_queue_cfg_msg() + * Log NSS WIFI TX Queue Configuration message. + */ +static void nss_wifi_log_tx_queue_cfg_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_tx_queue_cfg_msg *nwpm __maybe_unused = &ncm->msg.wtxqcm; + nss_trace("%px: NSS WIFI TX Queue Config Message:\n" + "WIFI TX Queue Size: %d\n" + "WIFI TX Queue Range: %d\n", + nwpm, nwpm->size, nwpm->range); +} + +/* + * nss_wifi_log_tx_min_threshold_cfg() + * Log NSS WIFI TX Queue Min Threshold Configuration message. + */ +static void nss_wifi_log_tx_min_threshold_cfg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_tx_min_threshold_cfg_msg *nwpm __maybe_unused = &ncm->msg.wtx_min_threshold_cm; + nss_trace("%px: NSS WIFI TX Queue Min Threshold Config Message:\n" + "WIFI TX Queue Min Threshold Value: %d\n", + nwpm, nwpm->min_threshold); +} + +/* + * nss_wifi_log_dbdc_process_enable_msg() + * Log NSS WIFI DBDC repeater process configuration. + */ +static void nss_wifi_log_dbdc_process_enable_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_dbdc_process_enable_msg *nwpm __maybe_unused = &ncm->msg.dbdcpe_msg; + nss_trace("%px: NSS WIFI DBDC repeater process configuration:\n" + "WIFI DBDC Process Enable Flag: %d\n", + nwpm, nwpm->dbdc_process_enable); +} + +/* + * nss_wifi_log_primary_radio_set_msg() + * Log NSS WIFI Primary Radio Set message. + */ +static void nss_wifi_log_primary_radio_set_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_primary_radio_set_msg *nwpm __maybe_unused = &ncm->msg.wprs_msg; + nss_trace("%px: NSS WIFI Primary Radio Set Message:\n" + "WIFI Current Radio as Primary Radio Enable/Disable Flag: %d\n", + nwpm, nwpm->flag); +} + +/* + * nss_wifi_log_force_client_mcast_traffic_set_msg() + * Log NSS WIFI Force Multicat Traffic for Radio + */ +static void nss_wifi_log_force_client_mcast_traffic_set_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_force_client_mcast_traffic_set_msg *nwpm __maybe_unused = &ncm->msg.wfcmts_msg; + nss_trace("%px: NSS WIFI Force Multicat Traffic for Radio Message:\n" + "WIFI Radio Multicast Traffic Flag: %d\n", + nwpm, nwpm->flag); +} + +/* + * nss_wifi_log_store_other_pdev_stavap_msg() + * Log NSS WIFI Store Other Radio Station VAP Message. + */ +static void nss_wifi_log_store_other_pdev_stavap_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_store_other_pdev_stavap_msg *nwpm __maybe_unused = &ncm->msg.wsops_msg; + nss_trace("%px: NSS WIFI Store Other Radio Station VAP Message:\n" + "WIFI Station VAP Interface Number: %d\n", + nwpm, nwpm->stavap_ifnum); +} + +/* + * nss_wifi_log_sta_kickout_msg() + * Log NSS WIFI Station Kickout Message. + */ +static void nss_wifi_log_sta_kickout_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_sta_kickout_msg *nwpm __maybe_unused = &ncm->msg.sta_kickout_msg; + nss_trace("%px: NSS WIFI Station Kickout Message:\n" + "WIFI PEER ID: %d\n", + nwpm, nwpm->peer_id); +} + +/* + * nss_wifi_log_wnm_peer_rx_activity() + * Log NSS WIFI RX Active State Information of Peer. + */ +static void nss_wifi_log_wnm_peer_rx_activity(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_wnm_peer_rx_activity_msg *nwpm __maybe_unused = &ncm->msg.wprm; + nss_trace("%px: NSS WIFI RX Active State Information of Peer:\n" + "WIFI Peer ID: %px\n" + "WIFI Number of Entries: %d\n", + nwpm, nwpm->peer_id, nwpm->nentries); +} + +/* + * nss_wifi_log_wds_extn_peer_cfg_msg() + * Log NSS WIFI WDS Extension Enabled Configuraion Message. + */ +static void nss_wifi_log_wds_extn_peer_cfg_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_wds_extn_peer_cfg_msg *nwpm __maybe_unused = &ncm->msg.wpeercfg; + nss_trace("%px: NSS WIFI Extension Enabled Configuraion Message:\n" + "WIFI Peer MAC Address: %pM\n" + "WIFI WDS Flags: %d\n" + "WIFI Peer ID: %d\n", + nwpm, nwpm->mac_addr, nwpm->wds_flags, + nwpm->peer_id); +} + +/* + * nss_wifi_log_tx_capture_msg() + * Log NSS WIFI Enable TX Capture Message. + */ +static void nss_wifi_log_tx_capture_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_tx_capture_msg *nwpm __maybe_unused = &ncm->msg.tx_capture_msg; + nss_trace("%px: NSS WIFI Enable TX Capture Message:\n" + "WIFI TX Capture Enable Flag: %d\n", + nwpm, nwpm->tx_capture_enable); +} + +/* + * nss_wifi_log_always_primary_set_msg() + * Log NSS WIFI Always Set Current Radio Primary Message. + */ +static void nss_wifi_log_always_primary_set_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_always_primary_set_msg *nwpm __maybe_unused = &ncm->msg.waps_msg; + nss_trace("%px: NSS WIFI Always Set Current Radio Primary Message:\n" + "WIFI Always Set Flag: %d\n", + nwpm, nwpm->flag); +} + +/* + * nss_wifi_log_cmd_msg() + * Log NSS WIFI PDEV Command Message. + */ +static void nss_wifi_log_cmd_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_cmd_msg *nwpm __maybe_unused = &ncm->msg.wcmdm; + nss_trace("%px: NSS WIFI PDEV Command Message:\n" + "WIFI Type of Command: %d\n" + "WIFI Value of Command: %d\n", + nwpm, nwpm->cmd, nwpm->value); +} + +/* + * nss_wifi_log_enable_ol_statsv2_msg() + * Log NSS WIFI Enable Version 2 of TX/RX Stats + */ +static void nss_wifi_log_enable_ol_statsv2_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_enable_ol_statsv2 *nwpm __maybe_unused = &ncm->msg.wesh_msg; + nss_trace("%px: NSS WIFI Enable Version 2 of TX/RX Stats:\n" + "WIFI Enable Version 2 Stats: %d\n", + nwpm, nwpm->enable_ol_statsv2); +} + +/* + * nss_wifi_log_enable_ol_peer_time_msg() + * Log NSS WIFI Enable Per Peer Stats to Host + */ +static void nss_wifi_log_enable_ol_peer_time_msg(struct nss_wifi_msg *ncm) +{ + struct nss_wifi_ol_peer_time_msg *nwpm __maybe_unused = &ncm->msg.wopt_msg; + int32_t i; + + nss_trace("%px: NSS WIFI Enable Per PEer Stats to Host:\n" + "WIFI Number of Peers: %d\n" + "WIFI Peed ID: %d\n", + nwpm, nwpm->npeers, + nwpm->tstats[0].peer_id); + /* + * Continuation of the log. + */ + nss_trace("WIFI TX Timestamp:\n"); + nss_trace("\tSum of sojourn for each packet:"); + for (i = 0; i < NSS_WIFI_TX_NUM_TOS_TIDS; i++) { + nss_trace("\t\t%d = %x", i, nwpm->tstats[0].sum[i].sum_tx); + } + nss_trace("\tNumber of MSDU per peer per TID:"); + for (i = 0; i < NSS_WIFI_TX_NUM_TOS_TIDS; i++) { + nss_trace("\t\t%d = %x", i, nwpm->tstats[0].sum[i].sum_msdus); + } + nss_trace("WIFI Exponential Weighted Average:"); + for (i = 0; i < NSS_WIFI_TX_NUM_TOS_TIDS; i++) { + nss_trace("\t%d = %d", i, nwpm->tstats[0].avg[i]); + } +} + +/* + * nss_wifi_log_verbose() + * Log message contents. + */ +static void nss_wifi_log_verbose(struct nss_wifi_msg *ncm) +{ + switch (ncm->cm.type) { + case NSS_WIFI_INIT_MSG: + nss_wifi_log_init_msg(ncm); + break; + + case NSS_WIFI_HTT_INIT_MSG: + nss_wifi_log_htt_init_msg(ncm); + break; + + case NSS_WIFI_TX_INIT_MSG: + nss_wifi_log_tx_init_msg(ncm); + break; + + case NSS_WIFI_RAW_SEND_MSG: + nss_wifi_log_rawsend_msg(ncm); + break; + + case NSS_WIFI_MGMT_SEND_MSG: + nss_wifi_log_mgmtsend_msg(ncm); + break; + + case NSS_WIFI_WDS_PEER_ADD_MSG: + nss_wifi_log_wds_peer_msg(ncm); + break; + + case NSS_WIFI_WDS_PEER_DEL_MSG: + nss_wifi_log_wds_peer_msg(ncm); + break; + + case NSS_WIFI_STOP_MSG: + nss_wifi_log_stop_msg(ncm); + break; + + case NSS_WIFI_RESET_MSG: + nss_wifi_log_reset_msg(ncm); + break; + + case NSS_WIFI_PEER_FREELIST_APPEND_MSG: + nss_wifi_log_peer_freelist_append_msg(ncm); + break; + + case NSS_WIFI_RX_REORDER_ARRAY_FREELIST_APPEND_MSG: + nss_wifi_log_rx_reorder_array_freelist_append_msg(ncm); + break; + + case NSS_WIFI_MONITOR_FILTER_SET_MSG: + nss_wifi_log_set_filter_msg(ncm); + break; + + case NSS_WIFI_PEER_BS_STATE_MSG: + nss_wifi_log_peer_activity_msg(ncm); + break; + + case NSS_WIFI_RX_VOW_EXTSTATS_SET_MSG: + nss_wifi_log_rx_vow_extstats_set_msg(ncm); + break; + + case NSS_WIFI_PKTLOG_CFG_MSG: + nss_wifi_log_pktlog_cfg_msg(ncm); + break; + + case NSS_WIFI_ENABLE_PERPKT_TXSTATS_MSG: + nss_wifi_log_enable_perpkt_txstats_msg(ncm); + break; + + case NSS_WIFI_IGMP_MLD_TOS_OVERRIDE_MSG: + nss_wifi_log_override_tos_msg(ncm); + break; + + case NSS_WIFI_OL_STATS_CFG_MSG: + nss_wifi_log_ol_stats_cfg_msg(ncm); + break; + + case NSS_WIFI_TX_QUEUE_CFG_MSG: + nss_wifi_log_tx_queue_cfg_msg(ncm); + break; + + case NSS_WIFI_TX_MIN_THRESHOLD_CFG_MSG: + nss_wifi_log_tx_min_threshold_cfg(ncm); + break; + + case NSS_WIFI_DBDC_PROCESS_ENABLE_MSG: + nss_wifi_log_dbdc_process_enable_msg(ncm); + break; + + case NSS_WIFI_PRIMARY_RADIO_SET_MSG: + nss_wifi_log_primary_radio_set_msg(ncm); + break; + + case NSS_WIFI_FORCE_CLIENT_MCAST_TRAFFIC_SET_MSG: + nss_wifi_log_force_client_mcast_traffic_set_msg(ncm); + break; + + case NSS_WIFI_STORE_OTHER_PDEV_STAVAP_MSG: + nss_wifi_log_store_other_pdev_stavap_msg(ncm); + break; + + case NSS_WIFI_STA_KICKOUT_MSG: + nss_wifi_log_sta_kickout_msg(ncm); + break; + + case NSS_WIFI_WNM_PEER_RX_ACTIVITY_MSG: + nss_wifi_log_wnm_peer_rx_activity(ncm); + break; + + case NSS_WIFI_WDS_VENDOR_MSG: + nss_wifi_log_wds_extn_peer_cfg_msg(ncm); + break; + + case NSS_WIFI_TX_CAPTURE_SET_MSG: + nss_wifi_log_tx_capture_msg(ncm); + break; + + case NSS_WIFI_ALWAYS_PRIMARY_SET_MSG: + nss_wifi_log_always_primary_set_msg(ncm); + break; + + case NSS_WIFI_CMD_MSG: + nss_wifi_log_cmd_msg(ncm); + break; + + case NSS_WIFI_ENABLE_OL_STATSV2_MSG: + nss_wifi_log_enable_ol_statsv2_msg(ncm); + break; + + case NSS_WIFI_OL_PEER_TIME_MSG: + nss_wifi_log_enable_ol_peer_time_msg(ncm); + break; + + case NSS_WIFI_FLUSH_HTT_CMD_MSG: + case NSS_WIFI_OL_STATS_MSG: + case NSS_WIFI_MSDU_TTL_SET_MSG: + case NSS_WIFI_PEER_STATS_MSG: + case NSS_WIFI_FW_STATS_MSG: + case NSS_WIFI_SEND_RRA_MEMORY_REQUEST_MSG: + case NSS_WIFI_STATS_MSG: + case NSS_WIFI_POST_RECV_MSG: + case NSS_WIFI_SEND_PEER_MEMORY_REQUEST_MSG: + /* + * No log for these valid messages. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", ncm); + break; + } +} + +/* + * nss_wifi_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_wifi_log_tx_msg(struct nss_wifi_msg *ncm) +{ + if (ncm->cm.type >= NSS_WIFI_MAX_MSG) { + nss_warning("%px: Invalid message type\n", ncm); + return; + } + + nss_info("%px: type[%d]:%s\n", ncm, ncm->cm.type, nss_wifi_log_message_types_str[ncm->cm.type]); + nss_wifi_log_verbose(ncm); +} + +/* + * nss_wifi_log_rx_msg() + * Log messages received from FW. + */ +void nss_wifi_log_rx_msg(struct nss_wifi_msg *ncm) +{ + if (ncm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", ncm); + return; + } + + if (ncm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (ncm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", ncm, ncm->cm.type, + nss_wifi_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response]); + goto verbose; + } + + if (ncm->cm.error >= NSS_WIFI_EMSG_MAX) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + ncm, ncm->cm.type, nss_wifi_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + ncm, ncm->cm.type, nss_wifi_log_message_types_str[ncm->cm.type], + ncm->cm.response, nss_cmn_response_str[ncm->cm.response], + ncm->cm.error, nss_wifi_log_error_response_types_str[ncm->cm.error]); + +verbose: + nss_wifi_log_verbose(ncm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_log.h new file mode 100644 index 000000000..c47a62b3a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_WIFI_LOG_H__ +#define __NSS_WIFI_LOG_H__ + +/* + * nss_WIFI_log.h + * NSS WIFI Log Header File + */ + +/* + * nss_WIFI_log_tx_msg + * Logs a WIFI message that is sent to the NSS firmware. + */ +void nss_wifi_log_tx_msg(struct nss_wifi_msg *ncm); + +/* + * nss_WIFI_log_rx_msg + * Logs a WIFI message that is received from the NSS firmware. + */ +void nss_wifi_log_rx_msg(struct nss_wifi_msg *ncm); + +#endif /* __NSS_WIFI_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mac_db.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mac_db.c new file mode 100644 index 000000000..5fb825a1a --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mac_db.c @@ -0,0 +1,215 @@ +/* + ************************************************************************** + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_wifi_mac_db_if.h" + +/* + * Compile time assertion. + */ +#define NSS_WIFI_MAC_DB_COMPILE_TIME_ASSERT(assertion_name, predicate) \ + typedef char assertion_name[(predicate) ? 1 : -1] + +#define NSS_WIFI_MAC_DB_TX_TIMEOUT 1000 /* Millisecond to jiffies*/ + +/* + * Validate the Wi-Fi MAC database message size not exceeding buffer size. + */ +NSS_WIFI_MAC_DB_COMPILE_TIME_ASSERT(NSS_WIFI_MAC_DB_MAX_BUF_MSG, + (sizeof(struct nss_wifi_mac_db_msg) < NSS_NBUF_PAYLOAD_SIZE)); + +/* + * nss_wifi_mac_db_get_context() + */ +struct nss_ctx_instance *nss_wifi_mac_db_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wmdb_handler_id]; +} +EXPORT_SYMBOL(nss_wifi_mac_db_get_context); + +/* + * nss_wifi_mac_db_pvt + * Private data structure + */ +static struct nss_wifi_mac_db_pvt { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +} wifi_mac_db_pvt; + +/* + * nss_wifi_mac_db_handler() + * Handle NSS -> HLOS messages for wifi_mac_db + */ +static void nss_wifi_mac_db_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_wifi_mac_db_msg *ntm = (struct nss_wifi_mac_db_msg *)ncm; + void *ctx; + nss_wifi_mac_db_msg_callback_t cb; + + nss_info("%px: NSS->HLOS message for wifi_mac_db\n", nss_ctx); + + /* + * The interface number shall be wifi_mac_db soc interface or wifi_mac_db radio interface + */ + BUG_ON((ncm->interface != NSS_WIFI_MAC_DB_INTERFACE)); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_WIFI_MAC_DB_MAX_MSG) { + nss_warning("%px: Received invalid message %d for wifi_mac_db interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_wifi_mac_db_msg)) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Update the callback and app_data for notify messages, wifi_mac_db sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->wifi_mac_db_msg_callback; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + nss_info("%px: cb null for wifi_mac_db interface %d", nss_ctx, ncm->interface); + return; + } + + /* + * Get callback & context + */ + cb = (nss_wifi_mac_db_msg_callback_t)ncm->cb; + ctx = nss_ctx->subsys_dp_register[ncm->interface].ndev; + + /* + * call wifi_mac_db msg callback + */ + if (!ctx) { + nss_warning("%px: Event received for wifi_mac_db interface %d before registration", nss_ctx, ncm->interface); + return; + } + + cb(ctx, ntm); +} + +/* + * nss_wifi_mac_db_tx_msg + * Transmit a wifi_mac_db message to NSS FW + * + * NOTE: The caller is expected to handle synchronous wait for message + * response if needed. + */ +nss_tx_status_t nss_wifi_mac_db_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_wifi_mac_db_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + if (ncm->type >= NSS_WIFI_MAC_DB_MAX_MSG) { + nss_warning("%px: wifi_mac_db message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * The interface number shall be one of the wifi_mac_db soc interfaces + */ + if ((ncm->interface != NSS_WIFI_MAC_DB_INTERFACE)) { + nss_warning("%px: tx request for interface that is not a wifi_mac_db: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_wifi_mac_db_tx_msg); + +/* + **************************************** + * Register/Unregister/Miscellaneous APIs + **************************************** + */ + +/* + * nss_register_wifi_mac_db_if() + * Register wifi_mac_db with nss driver + */ +struct nss_ctx_instance *nss_register_wifi_mac_db_if(uint32_t if_num, nss_wifi_mac_db_callback_t wifi_mac_db_callback, + nss_wifi_mac_db_callback_t wifi_mac_db_ext_callback, + nss_wifi_mac_db_msg_callback_t event_callback, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wmdb_handler_id]; + + /* + * The interface number shall be wifi_mac_db interface + */ + nss_assert(if_num == NSS_WIFI_MAC_DB_INTERFACE); + + nss_info("%px: nss_register_wifi_mac_db_if if_num:%d wifi_mac_db_dev:%px", nss_ctx, if_num, netdev); + + nss_core_register_subsys_dp(nss_ctx, if_num, wifi_mac_db_callback, NULL, NULL, netdev, features); + + nss_top_main.wifi_mac_db_msg_callback = event_callback; + + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wmdb_handler_id]; +} +EXPORT_SYMBOL(nss_register_wifi_mac_db_if); + +/* + * nss_unregister_wifi_mac_db_if() + * Unregister wifi_mac_db with nss driver + */ +void nss_unregister_wifi_mac_db_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wmdb_handler_id]; + + /* + * The interface number shall be wifi_mac_db interface + */ + nss_assert(if_num == NSS_WIFI_MAC_DB_INTERFACE); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_unregister_wifi_mac_db_if); + +/* + * nss_wifi_mac_db_register_handler() + * Register handle for notfication messages received on wifi mac db + */ +void nss_wifi_mac_db_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = + (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wmdb_handler_id]; + + nss_info("wifi_mac_db_register_handler"); + nss_core_register_handler(nss_ctx, NSS_WIFI_MAC_DB_INTERFACE, nss_wifi_mac_db_handler, NULL); + + sema_init(&wifi_mac_db_pvt.sem, 1); + init_completion(&wifi_mac_db_pvt.complete); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh.c new file mode 100644 index 000000000..d4a6c0a1d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh.c @@ -0,0 +1,242 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_core.h" +#include "nss_cmn.h" +#include "nss_wifi_mesh.h" +#include "nss_wifi_mesh_log.h" +#include "nss_wifi_mesh_strings.h" + +/* + * nss_wifi_mesh_verify_if_num() + * Verify interface number. + */ +bool nss_wifi_mesh_verify_if_num(nss_if_num_t if_num) +{ + enum nss_dynamic_interface_type if_type = nss_dynamic_interface_get_type(nss_wifi_mesh_get_context(), if_num); + + return ((if_type == NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_INNER) || + (if_type == NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_OUTER)); +} +EXPORT_SYMBOL(nss_wifi_mesh_verify_if_num); + +/* nss_wifi_mesh_handler() + * Handles Wi-Fi mesh messages from NSS to HLOS. + */ +static void nss_wifi_mesh_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, void *app_data) +{ + nss_wifi_mesh_msg_callback_t cb; + struct nss_wifi_mesh_msg *nwmm = (struct nss_wifi_mesh_msg *)ncm; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_assert(nss_is_dynamic_interface(ncm->interface)); + nss_assert(nss_wifi_mesh_verify_if_num(ncm->interface)); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_WIFI_MESH_MSG_MAX) { + nss_warning("%px: Received invalid message %d for wifi_mesh interface\n", nss_ctx, ncm->type); + return; + } + + + /* + * For variable array the size of the common length will be greater the nss_wifi_mesh_msg + * length. Add conditional checking for messages where length check will fail. + */ + if ((nss_cmn_get_msg_len(ncm) > sizeof(struct nss_wifi_mesh_msg)) && + (ncm->type != NSS_WIFI_MESH_MSG_PATH_TABLE_DUMP) && + (ncm->type != NSS_WIFI_MESH_MSG_PROXY_PATH_TABLE_DUMP)) { + nss_warning("%px: Length of message is greater than expected, type: %d, len: %d", + nss_ctx, ncm->type, ncm->len); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Trace Messages + */ + nss_wifi_mesh_log_rx_msg(nwmm); + + /* + * Update the stats and send statistics notifications to the registered modules. + */ + if (nwmm->cm.type == NSS_WIFI_MESH_MSG_STATS_SYNC) { + nss_wifi_mesh_update_stats(ncm->interface, &nwmm->msg.stats_sync_msg); + nss_wifi_mesh_stats_notify(ncm->interface, nss_ctx->id); + } + + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)app_data; + } + + if (!ncm->cb) { + return; + } + + cb = (nss_wifi_mesh_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, ncm); +} + +/* + * nss_wifi_mesh_msg_init() + * Initiliaze a Wi-Fi mesh message. + */ +void nss_wifi_mesh_msg_init(struct nss_wifi_mesh_msg *nwm, nss_if_num_t if_num, uint32_t type, uint32_t len, + nss_wifi_mesh_msg_callback_t cb, void *app_data) +{ + nss_assert(nss_wifi_mesh_verify_if_num(if_num)); + nss_cmn_msg_init(&nwm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_wifi_mesh_msg_init); + +/* + * nss_wifi_mesh_tx_buf + * Send data packet for vap processing asynchronously. + */ +nss_tx_status_t nss_wifi_mesh_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, nss_if_num_t if_num) +{ + nss_assert(nss_is_dynamic_interface(if_num)); + return nss_core_send_packet(nss_ctx, os_buf, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_wifi_mesh_tx_buf); + +/* + * nss_wifi_mesh_tx_msg + * Transmit a Wi-Fi mesh message to the NSS firmware asynchronously. + * + * NOTE: The caller is expected to handle synchronous waiting for message + * response if needed. + */ +nss_tx_status_t nss_wifi_mesh_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_wifi_mesh_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (ncm->type >= NSS_WIFI_MESH_MSG_MAX) { + nss_warning("%px: wifi_mesh message type out of range: %d\n", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * Log messages. + */ + nss_wifi_mesh_log_tx_msg(msg); + + /* + * The interface number shall be one of the Wi-Fi mesh socket interfaces. + */ + nss_assert(nss_is_dynamic_interface(ncm->interface)); + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_wifi_mesh_tx_msg); + +/* + **************************************** + * Register/Unregister/Miscellaneous APIs + **************************************** + */ + +/* + * nss_wifi_mesh_get_context() + * Return the core ctx which the feature is on. + */ +struct nss_ctx_instance *nss_wifi_mesh_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; +} +EXPORT_SYMBOL(nss_wifi_mesh_get_context); + +/* + * nss_unregister_wifi_mesh_if() + * Unregister Wi-Fi mesh from the NSS driver. + */ +void nss_unregister_wifi_mesh_if(nss_if_num_t if_num) +{ + struct nss_ctx_instance *nss_ctx = nss_wifi_mesh_get_context(); + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + nss_core_unregister_msg_handler(nss_ctx, if_num); + nss_core_unregister_handler(nss_ctx, if_num); + nss_wifi_mesh_stats_handle_free(if_num); +} +EXPORT_SYMBOL(nss_unregister_wifi_mesh_if); + +/* + * nss_register_wifi_mesh_if() + * Register wifi_mesh with nss driver. + */ +uint32_t nss_register_wifi_mesh_if(nss_if_num_t if_num, + nss_wifi_mesh_data_callback_t mesh_data_callback, + nss_wifi_mesh_ext_data_callback_t mesh_ext_data_callback, + nss_wifi_mesh_msg_callback_t mesh_event_callback, + uint32_t dp_type, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = nss_wifi_mesh_get_context(); + uint32_t status; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + nss_assert(netdev); + nss_assert(nss_wifi_mesh_verify_if_num(if_num)); + + if (!nss_wifi_mesh_stats_handle_alloc(if_num, netdev->ifindex)) { + nss_warning("%px: couldn't allocate stats handle for device name: %s, if_num: 0x%x\n", nss_ctx, netdev->name, if_num); + return NSS_CORE_STATUS_FAILURE; + } + + nss_core_register_handler(nss_ctx, if_num, nss_wifi_mesh_handler, netdev); + + status = nss_core_register_msg_handler(nss_ctx, if_num, mesh_event_callback); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to register event handler for interface(%u)\n", nss_ctx, if_num); + nss_core_unregister_handler(nss_ctx, if_num); + nss_wifi_mesh_stats_handle_free(if_num); + return status; + } + + nss_core_register_subsys_dp(nss_ctx, if_num, mesh_data_callback, mesh_ext_data_callback, NULL, netdev, features); + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, dp_type); + return NSS_CORE_STATUS_SUCCESS; +} +EXPORT_SYMBOL(nss_register_wifi_mesh_if); + +/* + * nss_wifi_mesh_init() + * Initialize the mesh stats dentries. + */ +void nss_wifi_mesh_init(void) +{ + if (!nss_wifi_mesh_strings_dentry_create()) { + nss_warning("Unable to create dentry for Wi-Fi mesh strings\n"); + } + + if (!nss_wifi_mesh_stats_dentry_create()) { + nss_warning("Unable to create dentry for Wi-Fi mesh stats\n"); + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_log.c new file mode 100644 index 000000000..bf9ddc658 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_log.c @@ -0,0 +1,368 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_wifi_mesh_log.c + * NSS WiFi Mesh logger file. + */ + +#include "nss_core.h" +#include "nss_wifi_mesh.h" + +#define NSS_WIFI_MESH_LOG_MESSAGE_TYPE_INDEX(type) ((type) - NSS_IF_MAX_MSG_TYPES) + +/* + * nss_wifi_mesh_log_message_types_str + * NSS Wi-Fi mesh message strings. + */ +static uint8_t *nss_wifi_mesh_log_message_types_str[NSS_WIFI_MESH_LOG_MESSAGE_TYPE_INDEX(NSS_WIFI_MESH_MSG_MAX)] __maybe_unused = { + "WiFi Mesh configure", + "WiFi Mesh configure Mpath Add", + "WiFi Mesh configure Mpath Delete", + "WiFi Mesh configure Mpath Update", + "WiFi Mesh configure Proxy Learn", + "WiFi Mesh configure Proxy Add", + "WiFi Mesh configure Proxy Update", + "WiFi Mesh configure Proxy Delete", + "WiFi Mesh configure Mpath Not Found", + "WiFi Mesh configure Refresh" + "WiFi Mesh configure Mpath Table Dump", + "WiFi Mesh configure Proxy Path Table Dump", + "WiFi Mesh configure Assoc Link Vap", + "WiFi Mesh configure Exception Message", + "WiFi Mesh configure Stats Sync" +}; + +/* + * nss_wifi_mesh_log_configure_msg() + * Log a NSS Wi-Fi mesh interface configure message. + */ +static void nss_wifi_mesh_log_configure_if_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_config_msg *cmsg __maybe_unused = &nwmm->msg.mesh_config; + nss_trace("%px: WiFi Mesh configure message\n" + "Local Mac address: %pM\n" + "TTL: %d\n" + "Mesh Path Refresh Time: %d\n" + "Mpp Learning Mode: %d\n" + "Block Mesh Forwarding: %d\n" + "Configs Flags: 0x%x\n", + cmsg, cmsg->local_mac_addr, cmsg->ttl, + cmsg->mesh_path_refresh_time, + cmsg->mpp_learning_mode, + cmsg->block_mesh_forwarding, + cmsg->config_flags); +} + +/* + * nss_wifi_mesh_log_mpath_add_msg() + * Log a NSS Wi-Fi mesh mpath add message. + */ +static void nss_wifi_mesh_log_mpath_add_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_mpath_add_msg *mamsg __maybe_unused = &nwmm->msg.mpath_add; + nss_trace("%px: NSS WiFi Mesh Mpath add message:\n" + "Dest Mac address: %pM\n" + "Next Hop Mac address: %pM\n" + "Metric: %d\n" + "Expiry Time: %d\n" + "Hop Count: %d\n" + "Flags: 0x%x\n" + "Link Vap id: %d\n" + "Is Mesh Gate: %d\n", + mamsg, mamsg->dest_mac_addr, mamsg->next_hop_mac_addr, + mamsg->metric, mamsg->expiry_time, mamsg->hop_count, + mamsg->path_flags, mamsg->link_vap_id, mamsg->is_mesh_gate); +} + +/* + * nss_wifi_mesh_log_mpath_delete_msg() + * Log a NSS Wi-Fi mesh mpath delete message. + */ +static void nss_wifi_mesh_log_mpath_delete_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_mpath_del_msg *mdmsg __maybe_unused = &nwmm->msg.mpath_del; + nss_trace("%px: NSS WiFi Mesh Mpath delete message:\n" + "Dest Mac Address: %pM\n" + "Link Vap id: %d\n" + "Next Hop Mac address: %pM\n", + mdmsg, mdmsg->mesh_dest_mac_addr, mdmsg->link_vap_id, mdmsg->next_hop_mac_addr); +} + +/* + * nss_wifi_mesh_log_mpath_update_msg() + * Log a NSS Wi-Fi mesh mpath update message. + */ +static void nss_wifi_mesh_log_mpath_update_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_mpath_update_msg *mumsg __maybe_unused = &nwmm->msg.mpath_update; + nss_trace("%px: NSS WiFi Mesh Mpath update message:\n" + "Dest Mac address: %pM\n" + "Next Hop Mac address: %pM\n" + "Metric: %d\n" + "Expiry Time: %d\n" + "Hop Count: %d\n" + "Flags: 0x%x\n" + "Link Vap id: %d\n" + "Is Mesh Gate: %d\n" + "Update Flags: %d\n", + mumsg, mumsg->dest_mac_addr, mumsg->next_hop_mac_addr, + mumsg->metric, mumsg->expiry_time, mumsg->hop_count, + mumsg->path_flags, mumsg->link_vap_id, mumsg->is_mesh_gate, + mumsg->update_flags); +} + +/* + * nss_wifi_mesh_log_proxy_path_learn_msg() + * Log a NSS Wi-Fi mesh proxy path learn message. + */ +static void nss_wifi_mesh_log_proxy_path_learn_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_proxy_path_learn_msg *pplm __maybe_unused = &nwmm->msg.proxy_learn_msg; + nss_trace("%px: NSS WiFi Mesh Proxy Path Learn message:\n" + "Mesh Dest Mac address: %pM\n" + "Destination Mac address: %pM\n" + "flags: 0x%x\n", + pplm, pplm->mesh_dest_mac, pplm->dest_mac_addr, + pplm->path_flags); +} + +/* + * nss_wifi_mesh_log_proxy_path_add_msg() + * Log a NSS Wi-Fi Mesh proxy path add message. + */ +static void nss_wifi_mesh_log_proxy_path_add_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_proxy_path_add_msg *ppam __maybe_unused = &nwmm->msg.proxy_add_msg; + nss_trace("%px: NSS WiFi Mesh Proxy Path Add message:\n" + "Mesh Dest Mac address: %pM\n" + "Destination Mac address: %pM\n" + "flags: 0x%x\n", + ppam, ppam->mesh_dest_mac, ppam->dest_mac_addr, + ppam->path_flags); +} + +/* + * nss_wifi_mesh_log_proxy_path_delete_msg() + * Log a NSS Wi-Fi proxy path delete message. + */ +static void nss_wifi_mesh_log_proxy_path_delete_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_proxy_path_del_msg *ppdm __maybe_unused = &nwmm->msg.proxy_del_msg; + nss_trace("%px: NSS WiFi Mesh Proxy Path Delete message:\n" + "Mesh Dest Mac address: %pM\n" + "Destination Mac address: %pM\n", + ppdm, ppdm->mesh_dest_mac_addr, ppdm->dest_mac_addr); +} + +/* + * nss_wifi_mesh_log_proxy_path_update_msg() + * Log a NSS Wi-Fi mesh proxy path update message. + */ +static void nss_wifi_mesh_log_proxy_path_update_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_proxy_path_update_msg *ppum __maybe_unused = &nwmm->msg.proxy_update_msg; + nss_trace("%px: NSS WiFi Mesh Proxy Path Add message:\n" + "Mesh Dest Mac address: %pM\n" + "Destination Mac address: %pM\n" + "flags: 0x%x\n" + "Bitmap: %d\n", + ppum, ppum->mesh_dest_mac, ppum->dest_mac_addr, + ppum->path_flags, ppum->bitmap); +} + +/* + * nss_wifi_mesh_log_mpath_not_found_msg() + * Log a NSS Wi-Fi mesh mpath not found message. + */ +static void nss_wifi_mesh_log_mpath_not_found_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_mpath_not_found_msg *mnfm __maybe_unused = &nwmm->msg.mpath_not_found_msg; + nss_trace("%px: NSS WiFi Mesh Mpath not found message:\n" + "Destination Mac address: %pM\n" + "Transmitter Mac address: %pM\n" + "Link Vap Id: %d\n" + "Is Mesh Forwarding Path: %d\n", + mnfm, mnfm->dest_mac_addr, mnfm->transmitter_mac_addr, + mnfm->link_vap_id, mnfm->is_mesh_forward_path); +} + +/* + * nss_wifi_mesh_log_mpath_refresh_msg() + * Log a NSS Wi-Fi mesh mpath refresh message. + */ +static void nss_wifi_mesh_log_mpath_refresh_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_path_refresh_msg *mprm __maybe_unused = &nwmm->msg.path_refresh_msg; + nss_trace("%px: NSS WiFi Mesh Mpath refresh message:\n" + "Destination Mac address: %pM\n" + "Next Hop Mac address: %pM\n" + "Flags: 0x%x\n" + "Link Vap Id: %d\n", + mprm, mprm->dest_mac_addr, mprm->next_hop_mac_addr, + mprm->path_flags, mprm->link_vap_id); +} + +/* + * nss_wifi_mesh_log_mpath_expiry_msg() + * Log a NSS Wi-Fi mesh mpath expiry message. + */ +static void nss_wifi_mesh_log_mpath_expiry_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_path_expiry_msg *mpem __maybe_unused = &nwmm->msg.path_expiry_msg; + nss_trace("%px: NSS WiFi Mesh Mpath expiry message:\n" + "Destination Mac address: %pM\n" + "Next Hop Mac address: %pM\n" + "Flags: 0x%x\n" + "Link Vap Id: %d\n", + mpem, mpem->mesh_dest_mac_addr, mpem->next_hop_mac_addr, + mpem->path_flags, mpem->link_vap_id); +} + +/* + * nss_wifi_mesh_log_exception_flag_msg() + * Log a NSS Wi-Fi mesh exception flag message. + */ +static void nss_wifi_mesh_log_exception_flag_msg(struct nss_wifi_mesh_msg *nwmm) +{ + struct nss_wifi_mesh_exception_flag_msg *efm __maybe_unused = &nwmm->msg.exception_msg; + nss_trace("%px: NSS WiFi Mesh Exception Flag message:\n" + "Destination Mac address: %pM\n", + efm, efm->dest_mac_addr); +} + +/* + * nss_wifi_mesh_log_verbose() + * Log message contents. + */ +static void nss_wifi_mesh_log_verbose(struct nss_wifi_mesh_msg *nwmm) +{ + switch (nwmm->cm.type) { + case NSS_WIFI_MESH_MSG_INTERFACE_CONFIGURE: + nss_wifi_mesh_log_configure_if_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_MPATH_ADD: + nss_wifi_mesh_log_mpath_add_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_MPATH_DELETE: + nss_wifi_mesh_log_mpath_delete_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_MPATH_UPDATE: + nss_wifi_mesh_log_mpath_update_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_PROXY_PATH_LEARN: + nss_wifi_mesh_log_proxy_path_learn_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_PROXY_PATH_ADD: + nss_wifi_mesh_log_proxy_path_add_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_PROXY_PATH_DELETE: + nss_wifi_mesh_log_proxy_path_delete_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_PROXY_PATH_UPDATE: + nss_wifi_mesh_log_proxy_path_update_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_PATH_NOT_FOUND: + nss_wifi_mesh_log_mpath_not_found_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_PATH_REFRESH: + nss_wifi_mesh_log_mpath_refresh_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_PATH_EXPIRY: + nss_wifi_mesh_log_mpath_expiry_msg(nwmm); + break; + + case NSS_WIFI_MESH_MSG_PATH_TABLE_DUMP: + break; + + case NSS_WIFI_MESH_MSG_PROXY_PATH_TABLE_DUMP: + break; + + case NSS_WIFI_MESH_MSG_STATS_SYNC: + break; + + case NSS_WIFI_MESH_MSG_EXCEPTION_FLAG: + nss_wifi_mesh_log_exception_flag_msg(nwmm); + break; + + default: + nss_trace("%px: Invalid message, type: %d\n", nwmm, nwmm->cm.type); + break; + } +} + +/* + * nss_wifi_mesh_log_tx_msg() + * Log messages transmitted to firmware. + */ +void nss_wifi_mesh_log_tx_msg(struct nss_wifi_mesh_msg *nwmm) +{ + uint32_t index; + if ((nwmm->cm.type >= NSS_WIFI_MESH_MSG_MAX) || (nwmm->cm.type <= NSS_IF_MAX_MSG_TYPES)) { + nss_warning("%px: Invalid message, type: %d\n", nwmm, nwmm->cm.type); + return; + } + + index = NSS_WIFI_MESH_LOG_MESSAGE_TYPE_INDEX(nwmm->cm.type); + + nss_info("%px: type[%d]:%s\n", nwmm, nwmm->cm.type, nss_wifi_mesh_log_message_types_str[index - 1]); + nss_wifi_mesh_log_verbose(nwmm); +} + +/* + * nss_wifi_mesh_log_rx_msg() + * Log messages received from firmware. + */ +void nss_wifi_mesh_log_rx_msg(struct nss_wifi_mesh_msg *nwmm) +{ + uint32_t index; + if (nwmm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response, message type: %d\n", nwmm, nwmm->cm.type); + return; + } + + if (nwmm->cm.type <= NSS_IF_MAX_MSG_TYPES) { + return; + } + + index = NSS_WIFI_MESH_LOG_MESSAGE_TYPE_INDEX(nwmm->cm.type); + + if (nwmm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nwmm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nwmm, nwmm->cm.type, + nss_wifi_mesh_log_message_types_str[index - 1], + nwmm->cm.response, nss_cmn_response_str[nwmm->cm.response]); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s\n", + nwmm, nwmm->cm.type, nss_wifi_mesh_log_message_types_str[index - 1], + nwmm->cm.response, nss_cmn_response_str[nwmm->cm.response]); + +verbose: + nss_wifi_mesh_log_verbose(nwmm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_log.h new file mode 100644 index 000000000..a6c54368f --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_log.h @@ -0,0 +1,34 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_WIFI_MESH_LOG_H +#define __NSS_WIFI_MESH_LOG_H + +/* + * nss_wifi_mesh_log_tx_msg + * Logs a Wi-Fi mesh message that was sent to the NSS firmware. + */ +void nss_wifi_mesh_log_tx_msg(struct nss_wifi_mesh_msg *nwmm); + +/* + * nss_wifi_mesh_log_rx_msg + * Logs a Wi-Fi mesh message that was received from the NSS firmware. + */ +void nss_wifi_mesh_log_rx_msg(struct nss_wifi_mesh_msg *nwmm); + +#endif /* __NSS_WIFI_MESH_LOG_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_stats.c new file mode 100644 index 000000000..4cfa96a67 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_stats.c @@ -0,0 +1,662 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_tx_rx_common.h" +#include "nss_wifi_mesh.h" +#include "nss_wifi_mesh_stats.h" +#include "nss_wifi_mesh_strings.h" + +#define NSS_WIFI_MESH_OUTER_STATS 0 +#define NSS_WIFI_MESH_INNER_STATS 1 +#define NSS_WIFI_MESH_PATH_STATS 2 +#define NSS_WIFI_MESH_PROXY_PATH_STATS 3 +#define NSS_WIFI_MESH_EXCEPTION_STATS 4 + +/* + * Wi-Fi mesh stats dentry file size. + */ +#define NSS_WIFI_MESH_DENTRY_FILE_SIZE 19 + +/* + * Spinlock for protecting tunnel operations colliding with a tunnel destroy + */ +static DEFINE_SPINLOCK(nss_wifi_mesh_stats_lock); + +/* + * Declare atomic notifier data structure for statistics. + */ +static ATOMIC_NOTIFIER_HEAD(nss_wifi_mesh_stats_notifier); + +/* + * Declare an array of Wi-Fi mesh stats handle. + */ +struct nss_wifi_mesh_stats_handle *nss_wifi_mesh_stats_hdl[NSS_WIFI_MESH_MAX_DYNAMIC_INTERFACE]; + +/* + * nss_wifi_mesh_max_statistics() + * Wi-Fi mesh maximum statistics. + */ +static uint32_t nss_wifi_mesh_max_statistics(void) +{ + uint32_t max1; + uint32_t exception_stats_max = NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_MAX; + uint32_t encap_stats_max = NSS_WIFI_MESH_ENCAP_STATS_TYPE_MAX; + uint32_t decap_stats_max = NSS_WIFI_MESH_DECAP_STATS_TYPE_MAX; + uint32_t path_stats_max = NSS_WIFI_MESH_PATH_STATS_TYPE_MAX; + uint32_t proxy_path_stats_max = NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_MAX; + + max1 = max(max(encap_stats_max, decap_stats_max), max(path_stats_max, proxy_path_stats_max)); + + return (max(max1, exception_stats_max)); +} + +/* + * nss_wifi_mesh_stats_handle_alloc() + * Allocate Wi-Fi mesh tunnel instance + */ +bool nss_wifi_mesh_stats_handle_alloc(nss_if_num_t if_num, int32_t ifindex) +{ + struct nss_wifi_mesh_stats_handle *h; + uint32_t idx; + + /* + * Allocate a handle + */ + h = kzalloc(sizeof(struct nss_wifi_mesh_stats_handle), GFP_ATOMIC); + if (!h) { + nss_warning("Failed to allocate memory for Wi-Fi mesh instance for interface : 0x%x\n", if_num); + return false; + } + + spin_lock(&nss_wifi_mesh_stats_lock); + for (idx = 0; idx < NSS_WIFI_MESH_MAX_DYNAMIC_INTERFACE; idx++) { + if (nss_wifi_mesh_stats_hdl[idx] && nss_wifi_mesh_stats_hdl[idx]->if_num == if_num) { + spin_unlock(&nss_wifi_mesh_stats_lock); + nss_warning("Already a handle present for this interface number: 0x%x\n", if_num); + kfree(h); + return false; + } + } + + for (idx = 0; idx < NSS_WIFI_MESH_MAX_DYNAMIC_INTERFACE; idx++) { + if (nss_wifi_mesh_stats_hdl[idx]) { + continue; + } + + h->if_num = if_num; + h->mesh_idx = idx; + h->ifindex = ifindex; + nss_wifi_mesh_stats_hdl[idx] = h; + spin_unlock(&nss_wifi_mesh_stats_lock); + return true; + } + spin_unlock(&nss_wifi_mesh_stats_lock); + nss_warning("No free index available for handle with ifnum: 0x%x\n", if_num); + kfree(h); + return false; +} + +/* + * nss_wifi_mesh_stats_handle_free() + * Free Wi-Fi mesh tunnel handle instance. + */ +bool nss_wifi_mesh_stats_handle_free(nss_if_num_t if_num) +{ + struct nss_wifi_mesh_stats_handle *h; + + spin_lock(&nss_wifi_mesh_stats_lock); + h = nss_wifi_mesh_get_stats_handle(if_num); + if (!h) { + spin_unlock(&nss_wifi_mesh_stats_lock); + nss_warning("Unable to free Wi-Fi mesh stats handle instance for interface number: 0x%x\n", if_num); + return false; + } + + nss_wifi_mesh_stats_hdl[h->mesh_idx] = NULL; + spin_unlock(&nss_wifi_mesh_stats_lock); + kfree(h); + return true; +} + +/** + * nss_wifi_mesh_get_stats_handle() + * Get Wi-Fi mesh stats handle from interface number. + */ +struct nss_wifi_mesh_stats_handle *nss_wifi_mesh_get_stats_handle(nss_if_num_t if_num) +{ + uint32_t idx; + + assert_spin_locked(&nss_wifi_mesh_stats_lock); + + for (idx = 0; idx < NSS_WIFI_MESH_MAX_DYNAMIC_INTERFACE; idx++) { + if (nss_wifi_mesh_stats_hdl[idx]) { + if (nss_wifi_mesh_stats_hdl[idx]->if_num == if_num) { + struct nss_wifi_mesh_stats_handle *h = nss_wifi_mesh_stats_hdl[idx]; + return h; + } + } + } + return NULL; +} + +/* + * nss_wifi_mesh_get_stats() + * API for getting stats from a Wi-Fi mesh interface stats + */ +static bool nss_wifi_mesh_get_stats(nss_if_num_t if_num, struct nss_wifi_mesh_hdl_stats_sync_msg *stats) +{ + struct nss_wifi_mesh_stats_handle *h; + + if (!nss_wifi_mesh_verify_if_num(if_num)) { + return false; + } + + spin_lock(&nss_wifi_mesh_stats_lock); + h = nss_wifi_mesh_get_stats_handle(if_num); + if (!h) { + spin_unlock(&nss_wifi_mesh_stats_lock); + nss_warning("Invalid Wi-Fi mesh stats handle for interface number: %d\n", if_num); + return false; + } + + memcpy(stats, &h->stats, sizeof(*stats)); + spin_unlock(&nss_wifi_mesh_stats_lock); + return true; +} + +/* + * nss_wifi_mesh_get_valid_interface_count() + * Get count of valid Wi-Fi mesh interfaces up. + */ +static uint32_t nss_wifi_mesh_get_valid_interface_count(uint16_t type, uint32_t if_num, uint32_t max_if_num) +{ + uint32_t interface_count = 0; + enum nss_dynamic_interface_type dtype; + + for (; if_num <= max_if_num; if_num++) { + if (!nss_is_dynamic_interface(if_num)) { + continue; + } + + dtype = nss_dynamic_interface_get_type(nss_wifi_mesh_get_context(), if_num); + + if ((type == NSS_WIFI_MESH_OUTER_STATS) && (dtype != NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_OUTER)) { + continue; + } + + if ((type == NSS_WIFI_MESH_INNER_STATS) && (dtype != NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_INNER)) { + continue; + } + + if ((type == NSS_WIFI_MESH_PATH_STATS) && (dtype != NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_INNER)) { + continue; + } + + if ((type == NSS_WIFI_MESH_PROXY_PATH_STATS) && (dtype != NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_INNER)) { + continue; + } + interface_count++; + } + return interface_count; +} + +/** + * nss_wifi_mesh_stats_read() + * Read Wi-Fi Mesh stats. + */ +static ssize_t nss_wifi_mesh_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos, uint16_t type) +{ + uint32_t max_output_lines, max_stats; + size_t size_al, size_wr; + ssize_t bytes_read = 0; + struct nss_stats_data *data = fp->private_data; + int ifindex; + uint32_t if_num = NSS_DYNAMIC_IF_START; + uint32_t interface_count = 0; + uint32_t max_if_num = NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES; + struct nss_wifi_mesh_hdl_stats_sync_msg *stats; + struct net_device *ndev; + struct nss_wifi_mesh_stats_handle *handle; + char *lbuf; + enum nss_dynamic_interface_type dtype; + + if (data) { + if_num = data->if_num; + } + + /* + * If we are done accomodating all the Wi-Fi mesh interfaces. + */ + if (if_num > max_if_num) { + return 0; + } + + /* + * Get number of Wi-Fi mesh interfaces up. + */ + interface_count = nss_wifi_mesh_get_valid_interface_count(type, if_num, max_if_num); + if (!interface_count) { + nss_warning("%px: Invalid number of valid interface for if_num: 0x%x\n", data, if_num); + return 0; + } + + /* + * max output lines = #stats + Number of Extra outputlines for future reference to add new stats + + * Maximum node stats + Maximum of all the stats + three blank lines. + */ + max_stats = nss_wifi_mesh_max_statistics(); + max_output_lines = max_stats + NSS_STATS_NODE_MAX + NSS_STATS_EXTRA_OUTPUT_LINES; + size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines * interface_count; + + lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer\n"); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "wifi_mesh", NSS_STATS_SINGLE_CORE); + + stats = kzalloc(sizeof(struct nss_wifi_mesh_hdl_stats_sync_msg), GFP_KERNEL); + if (!stats) { + nss_warning("%px: Failed to allocate stats memory for if_num: 0x%x\n", data, if_num); + kfree(lbuf); + return 0; + } + + for (; if_num <= max_if_num; if_num++) { + bool ret; + + if (!nss_is_dynamic_interface(if_num)) { + continue; + } + + dtype = nss_dynamic_interface_get_type(nss_wifi_mesh_get_context(), if_num); + + if ((type == NSS_WIFI_MESH_OUTER_STATS) && (dtype != NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_OUTER)) { + continue; + } + + if ((type == NSS_WIFI_MESH_INNER_STATS) && (dtype != NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_INNER)) { + continue; + } + + if ((type == NSS_WIFI_MESH_PATH_STATS) && (dtype != NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_INNER)) { + continue; + } + + if ((type == NSS_WIFI_MESH_PROXY_PATH_STATS) && (dtype != NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_INNER)) { + continue; + } + + /* + * If Wi-Fi mesh stats handle does not exists, then ret will be false. + */ + ret = nss_wifi_mesh_get_stats(if_num, stats); + if (!ret) { + continue; + } + + spin_lock(&nss_wifi_mesh_stats_lock); + handle = nss_wifi_mesh_get_stats_handle(if_num); + if (!handle) { + spin_unlock(&nss_wifi_mesh_stats_lock); + nss_warning("Invalid Wi-Fi mesh stats handle, if_num: %d\n", if_num); + continue; + } + ifindex = handle->ifindex; + spin_unlock(&nss_wifi_mesh_stats_lock); + + ndev = dev_get_by_index(&init_net, ifindex); + if (!ndev) { + continue; + } + + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n%s if_num:%03u\n", + ndev->name, if_num); + dev_put(ndev); + + /* + * Read encap stats, path stats, proxy path stats from inner node and decap stats from outer node. + */ + switch (type) { + case NSS_WIFI_MESH_INNER_STATS: + size_wr += nss_stats_print("wifi_mesh", "encap stats", NSS_STATS_SINGLE_INSTANCE + , nss_wifi_mesh_strings_encap_stats + , stats->encap_stats + , NSS_WIFI_MESH_ENCAP_STATS_TYPE_MAX + , lbuf, size_wr, size_al); + break; + + case NSS_WIFI_MESH_PATH_STATS: + size_wr += nss_stats_print("wifi_mesh", "path stats", NSS_STATS_SINGLE_INSTANCE + , nss_wifi_mesh_strings_path_stats + , stats->path_stats + , NSS_WIFI_MESH_PATH_STATS_TYPE_MAX + , lbuf, size_wr, size_al); + break; + + case NSS_WIFI_MESH_PROXY_PATH_STATS: + size_wr += nss_stats_print("wifi_mesh", "proxy path stats", NSS_STATS_SINGLE_INSTANCE + , nss_wifi_mesh_strings_proxy_path_stats + , stats->proxy_path_stats + , NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_MAX + , lbuf, size_wr, size_al); + break; + + case NSS_WIFI_MESH_OUTER_STATS: + size_wr += nss_stats_print("wifi_mesh", "decap stats", NSS_STATS_SINGLE_INSTANCE + , nss_wifi_mesh_strings_decap_stats + , stats->decap_stats + , NSS_WIFI_MESH_DECAP_STATS_TYPE_MAX + , lbuf, size_wr, size_al); + break; + + case NSS_WIFI_MESH_EXCEPTION_STATS: + size_wr += nss_stats_print("wifi_mesh", "exception stats", NSS_STATS_SINGLE_INSTANCE + , nss_wifi_mesh_strings_exception_stats + , stats->except_stats + , NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_MAX + , lbuf, size_wr, size_al); + break; + + default: + nss_warning("%px: Invalid stats type: %d\n", stats, type); + nss_assert(0); + kfree(stats); + kfree(lbuf); + return 0; + } + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, size_wr); + kfree(stats); + kfree(lbuf); + return bytes_read; +} + +/** + * nss_wifi_mesh_decap_stats_read() + * Read Wi-Fi Mesh decap stats. + */ +static ssize_t nss_wifi_mesh_decap_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_wifi_mesh_stats_read(fp, ubuf, sz, ppos, NSS_WIFI_MESH_OUTER_STATS); +} + +/** + * nss_wifi_mesh_encap_stats_read() + * Read Wi-Fi Mesh encap stats + */ +static ssize_t nss_wifi_mesh_encap_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_wifi_mesh_stats_read(fp, ubuf, sz, ppos, NSS_WIFI_MESH_INNER_STATS); +} + +/** + * nss_wifi_mesh_path_stats_read() + * Read Wi-Fi Mesh path stats + */ +static ssize_t nss_wifi_mesh_path_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_wifi_mesh_stats_read(fp, ubuf, sz, ppos, NSS_WIFI_MESH_PATH_STATS); +} + +/** + * nss_wifi_mesh_proxy_path_stats_read() + * Read Wi-Fi Mesh proxy path stats + */ +static ssize_t nss_wifi_mesh_proxy_path_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_wifi_mesh_stats_read(fp, ubuf, sz, ppos, NSS_WIFI_MESH_PROXY_PATH_STATS); +} + +/** + * nss_wifi_mesh_exception_stats_read() + * Read Wi-Fi Mesh exception stats + */ +static ssize_t nss_wifi_mesh_exception_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_wifi_mesh_stats_read(fp, ubuf, sz, ppos, NSS_WIFI_MESH_EXCEPTION_STATS); +} + +/* + * nss_wifi_mesh_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(wifi_mesh_encap); +NSS_STATS_DECLARE_FILE_OPERATIONS(wifi_mesh_decap); +NSS_STATS_DECLARE_FILE_OPERATIONS(wifi_mesh_path); +NSS_STATS_DECLARE_FILE_OPERATIONS(wifi_mesh_proxy_path); +NSS_STATS_DECLARE_FILE_OPERATIONS(wifi_mesh_exception); + +/* + * nss_wifi_mesh_get_interface_type() + * Function to get the type of dynamic interface. + */ +static enum nss_dynamic_interface_type nss_wifi_mesh_get_interface_type(nss_if_num_t if_num) +{ + struct nss_ctx_instance *nss_ctx = &nss_top_main.nss[nss_top_main.wifi_handler_id]; + NSS_VERIFY_CTX_MAGIC(nss_ctx); + return nss_dynamic_interface_get_type(nss_ctx, if_num); +} + +/* + * nss_wifi_mesh_update_stats() + * Update stats for Wi-Fi mesh interface. + */ +void nss_wifi_mesh_update_stats(nss_if_num_t if_num, struct nss_wifi_mesh_stats_sync_msg *mstats) +{ + struct nss_wifi_mesh_stats_handle *handle; + struct nss_wifi_mesh_hdl_stats_sync_msg *stats; + enum nss_dynamic_interface_type type; + uint64_t *dst; + uint32_t *src; + int i; + + spin_lock(&nss_wifi_mesh_stats_lock); + handle = nss_wifi_mesh_get_stats_handle(if_num); + if (!handle) { + spin_unlock(&nss_wifi_mesh_stats_lock); + nss_warning("Invalid Wi-Fi mesh stats handle, if_num: %d\n", if_num); + return; + } + + type = nss_wifi_mesh_get_interface_type(handle->if_num);; + stats = &handle->stats; + + switch (type) { + case NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_INNER: + /* + * Update pnode Rx stats. + */ + stats->encap_stats[NSS_WIFI_MESH_ENCAP_STATS_TYPE_PNODE_RX_PACKETS] += mstats->pnode_stats.rx_packets; + stats->encap_stats[NSS_WIFI_MESH_ENCAP_STATS_TYPE_PNODE_RX_BYTES] += mstats->pnode_stats.rx_bytes; + stats->encap_stats[NSS_WIFI_MESH_ENCAP_STATS_TYPE_PNODE_RX_DROPPED] += nss_cmn_rx_dropped_sum(&mstats->pnode_stats); + + /* + * Update pnode Tx stats. + */ + stats->encap_stats[NSS_WIFI_MESH_ENCAP_STATS_TYPE_PNODE_TX_PACKETS] += mstats->pnode_stats.tx_packets; + stats->encap_stats[NSS_WIFI_MESH_ENCAP_STATS_TYPE_PNODE_TX_BYTES] += mstats->pnode_stats.tx_bytes; + + /* + * Update encap stats. + */ + dst = &stats->encap_stats[NSS_WIFI_MESH_ENCAP_STATS_TYPE_EXPIRY_NOTIFY_SENT]; + src = &mstats->mesh_encap_stats.expiry_notify_sent; + for (i = NSS_WIFI_MESH_ENCAP_STATS_TYPE_EXPIRY_NOTIFY_SENT; i < NSS_WIFI_MESH_ENCAP_STATS_TYPE_MAX; i++) { + *dst++ += *src++; + } + + /* + * Update mesh path stats. + */ + dst = &stats->path_stats[NSS_WIFI_MESH_PATH_STATS_TYPE_ALLOC_FAILURES]; + src = &mstats->mesh_path_stats.alloc_failures; + for (i = NSS_WIFI_MESH_PATH_STATS_TYPE_ALLOC_FAILURES; i < NSS_WIFI_MESH_PATH_STATS_TYPE_MAX; i++) { + *dst++ += *src++; + } + + /* + * Update mesh proxy path stats. + */ + dst = &stats->proxy_path_stats[NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_ALLOC_FAILURES]; + src = &mstats->mesh_proxy_path_stats.alloc_failures; + for (i = NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_ALLOC_FAILURES; i < NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_MAX; i++) { + *dst++ += *src++; + } + + /* + * Update exception stats. + */ + dst = &stats->except_stats[NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_PACKETS_SUCCESS]; + src = &mstats->mesh_except_stats.packets_success; + for (i = NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_PACKETS_SUCCESS; i < NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_MAX; i++) { + *dst++ += *src++; + } + spin_unlock(&nss_wifi_mesh_stats_lock); + break; + + case NSS_DYNAMIC_INTERFACE_TYPE_WIFI_MESH_OUTER: + /* + * Update pnode Rx stats. + */ + stats->decap_stats[NSS_WIFI_MESH_DECAP_STATS_TYPE_PNODE_RX_PACKETS] += mstats->pnode_stats.rx_packets; + stats->decap_stats[NSS_WIFI_MESH_DECAP_STATS_TYPE_PNODE_RX_BYTES] += mstats->pnode_stats.rx_bytes; + stats->decap_stats[NSS_WIFI_MESH_DECAP_STATS_TYPE_PNODE_RX_DROPPED] += nss_cmn_rx_dropped_sum(&mstats->pnode_stats); + + /* + * Update pnode Tx stats. + */ + stats->decap_stats[NSS_WIFI_MESH_DECAP_STATS_TYPE_PNODE_TX_PACKETS] += mstats->pnode_stats.tx_packets; + stats->decap_stats[NSS_WIFI_MESH_DECAP_STATS_TYPE_PNODE_TX_BYTES] += mstats->pnode_stats.tx_bytes; + + /* + * Update decap stats. + */ + dst = &stats->decap_stats[NSS_WIFI_MESH_DECAP_STATS_TYPE_PATH_REFRESH_SENT]; + src = &mstats->mesh_decap_stats.path_refresh_sent; + for (i = NSS_WIFI_MESH_DECAP_STATS_TYPE_PATH_REFRESH_SENT; i < NSS_WIFI_MESH_DECAP_STATS_TYPE_MAX; i++) { + *dst++ += *src++; + } + spin_unlock(&nss_wifi_mesh_stats_lock); + break; + + default: + spin_unlock(&nss_wifi_mesh_stats_lock); + nss_warning("%px: Received invalid dynamic interface type: %d\n", handle, type); + nss_assert(0); + } +} + +/* + * nss_wifi_mesh_stats_notify() + * Sends notifications to the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_wifi_mesh_stats_notify(nss_if_num_t if_num, uint32_t core_id) +{ + struct nss_wifi_mesh_stats_notification wifi_mesh_stats; + + if (!nss_wifi_mesh_get_stats(if_num, &wifi_mesh_stats.stats)) { + nss_warning("No handle is present with ifnum: 0x%x\n", if_num); + return; + } + + wifi_mesh_stats.core_id = core_id; + wifi_mesh_stats.if_num = if_num; + atomic_notifier_call_chain(&nss_wifi_mesh_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)&wifi_mesh_stats); +} + +/* + * nss_wifi_mesh_stats_dentry_create() + * Create Wi-Fi Mesh statistics debug entry + */ +struct dentry *nss_wifi_mesh_stats_dentry_create(void) +{ + struct dentry *stats_dentry_dir; + struct dentry *stats_file; + char dir_name[NSS_WIFI_MESH_DENTRY_FILE_SIZE] = {0}; + + if (!nss_top_main.stats_dentry) { + nss_warning("qca-nss-drv/stats is not present\n"); + return NULL; + } + + snprintf(dir_name, sizeof(dir_name), "wifi_mesh"); + + stats_dentry_dir = debugfs_create_dir(dir_name, nss_top_main.stats_dentry); + if (!stats_dentry_dir) { + nss_warning("Failed to create qca-nss-drv/stats/wifi_mesh directory\n"); + return NULL; + } + + stats_file = debugfs_create_file("encap_stats", 0400, stats_dentry_dir, &nss_top_main, &nss_wifi_mesh_encap_stats_ops); + if (!stats_file) { + nss_warning("Failed to create qca-nss-drv/stats/wifi_mesh/encap_stats file\n"); + goto fail; + } + + stats_file = debugfs_create_file("decap_stats", 0400, stats_dentry_dir, &nss_top_main, &nss_wifi_mesh_decap_stats_ops); + if (!stats_file) { + nss_warning("Failed to create qca-nss-drv/stats/wifi_mesh/decap_stats file\n"); + goto fail; + } + + stats_file = debugfs_create_file("path_stats", 0400, stats_dentry_dir, &nss_top_main, &nss_wifi_mesh_path_stats_ops); + if (!stats_file) { + nss_warning("Failed to create qca-nss-drv/stats/wifi_mesh/path_stats file\n"); + goto fail; + } + + stats_file = debugfs_create_file("proxy_path_stats", 0400, stats_dentry_dir, &nss_top_main, &nss_wifi_mesh_proxy_path_stats_ops); + if (!stats_file) { + nss_warning("Failed to create qca-nss-drv/stats/wifi_mesh/proxy_path_stats file\n"); + goto fail; + } + stats_file = debugfs_create_file("exception_stats", 0400, stats_dentry_dir, &nss_top_main, &nss_wifi_mesh_exception_stats_ops); + if (!stats_file) { + nss_warning("Failed to create qca-nss-drv/stats/wifi_mesh/exception_stats file\n"); + goto fail; + } + return stats_dentry_dir; +fail: + debugfs_remove_recursive(stats_dentry_dir); + return NULL; +} + +/** + * nss_wifi_mesh_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_wifi_mesh_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_wifi_mesh_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_wifi_mesh_stats_register_notifier); + +/** + * nss_wifi_mesh_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_wifi_mesh_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_wifi_mesh_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_wifi_mesh_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_stats.h new file mode 100644 index 000000000..0e3a11850 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_stats.h @@ -0,0 +1,42 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_WIFI_MESH_STATS_H__ +#define __NSS_WIFI_MESH_STATS_H__ + +/** + * Array of pointer for NSS Wi-Fi mesh handles. + * Each handle has per-tunnel statistics based on the interface number which is an index. + */ +struct nss_wifi_mesh_stats_handle { + nss_if_num_t if_num; /**< Interface number. */ + uint32_t ifindex; /**< Netdev index. */ + uint32_t mesh_idx; /**< Mesh index. */ + struct nss_wifi_mesh_hdl_stats_sync_msg stats; /**< Stats per-interface number. */ +}; + +/* + * Wi-Fi Mesh statistics APIs + */ +extern void nss_wifi_mesh_update_stats(nss_if_num_t if_num, struct nss_wifi_mesh_stats_sync_msg *mstats); +extern void nss_wifi_mesh_stats_notify(nss_if_num_t if_num, uint32_t core_id); +extern struct dentry *nss_wifi_mesh_stats_dentry_create(void); +extern struct nss_wifi_mesh_stats_handle *nss_wifi_mesh_get_stats_handle(nss_if_num_t if_num); +extern bool nss_wifi_mesh_stats_handle_alloc(nss_if_num_t if_num, int32_t ifindex); +extern bool nss_wifi_mesh_stats_handle_free(nss_if_num_t if_num); +#endif /* __NSS_WIFI_MESH_STATS_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_strings.c new file mode 100644 index 000000000..25f647323 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_strings.c @@ -0,0 +1,276 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_wifi_mesh_stats.h" +#include "nss_strings.h" +#include "nss_wifi_mesh_strings.h" + +/* + * nss_wifi_mesh_strings_encap_stats + * Wi-Fi mesh encap statistics string. + */ +struct nss_stats_info nss_wifi_mesh_strings_encap_stats[NSS_WIFI_MESH_ENCAP_STATS_TYPE_MAX] = { + {"rx_packets", NSS_STATS_TYPE_COMMON}, + {"rx_bytes", NSS_STATS_TYPE_COMMON}, + {"tx_packets", NSS_STATS_TYPE_COMMON}, + {"tx_bytes", NSS_STATS_TYPE_COMMON}, + {"rx_dropped", NSS_STATS_TYPE_COMMON}, + {"expiry_notify_sent", NSS_STATS_TYPE_SPECIAL}, + {"mc_count", NSS_STATS_TYPE_SPECIAL}, + {"mp_not_found", NSS_STATS_TYPE_SPECIAL}, + {"mp_active", NSS_STATS_TYPE_SPECIAL}, + {"mpp_not_found", NSS_STATS_TYPE_SPECIAL}, + {"mpp_found", NSS_STATS_TYPE_SPECIAL}, + {"encap_hdr_fail", NSS_STATS_TYPE_SPECIAL}, + {"mp_del_notify_fail", NSS_STATS_TYPE_SPECIAL}, + {"link_enqueue", NSS_STATS_TYPE_SPECIAL}, + {"link_enq_fail", NSS_STATS_TYPE_SPECIAL}, + {"ra_lup_fail", NSS_STATS_TYPE_SPECIAL}, + {"dummy_add_count", NSS_STATS_TYPE_SPECIAL}, + {"encap_mp_add_notify_fail", NSS_STATS_TYPE_SPECIAL}, + {"dummy_add_fail", NSS_STATS_TYPE_SPECIAL}, + {"dummy_lup_fail", NSS_STATS_TYPE_SPECIAL}, + {"send_to_host_failed", NSS_STATS_TYPE_SPECIAL}, + {"sent_to_host", NSS_STATS_TYPE_SPECIAL}, + {"expiry_notify_fail", NSS_STATS_TYPE_SPECIAL}, + {"no_headroom", NSS_STATS_TYPE_SPECIAL}, + {"path_refresh_sent", NSS_STATS_TYPE_SPECIAL}, + {"linearise_failed", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_wifi_mesh_encap_strings_read() + * Read Wi-Fi mesh encap statistics names. + */ +static ssize_t nss_wifi_mesh_encap_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifi_mesh_strings_encap_stats, NSS_WIFI_MESH_ENCAP_STATS_TYPE_MAX); +} + +/* + * nss_wifi_mesh_strings_path_stats + * Wi-Fi mesh path statistics string. + */ +struct nss_stats_info nss_wifi_mesh_strings_path_stats[NSS_WIFI_MESH_PATH_STATS_TYPE_MAX] = { + {"alloc_failures", NSS_STATS_TYPE_SPECIAL}, + {"error_max_radio_count", NSS_STATS_TYPE_SPECIAL}, + {"invalid_interface_failures", NSS_STATS_TYPE_SPECIAL}, + {"add_success", NSS_STATS_TYPE_SPECIAL}, + {"table_full_errors", NSS_STATS_TYPE_SPECIAL}, + {"insert_failures", NSS_STATS_TYPE_SPECIAL}, + {"not_found", NSS_STATS_TYPE_SPECIAL}, + {"delete_success", NSS_STATS_TYPE_SPECIAL}, + {"update_success", NSS_STATS_TYPE_SPECIAL}, + {"mesh_path_expired", NSS_STATS_TYPE_SPECIAL}, + {"mesh_path_refresh_needed", NSS_STATS_TYPE_SPECIAL}, + {"add_requests", NSS_STATS_TYPE_SPECIAL}, + {"del_requests", NSS_STATS_TYPE_SPECIAL}, + {"update_requests", NSS_STATS_TYPE_SPECIAL}, + {"next_hop_updations", NSS_STATS_TYPE_SPECIAL}, + {"hop_count_updations", NSS_STATS_TYPE_SPECIAL}, + {"flag_updations", NSS_STATS_TYPE_SPECIAL}, + {"metric_updations", NSS_STATS_TYPE_SPECIAL}, + {"block_mesh_fwd_updations", NSS_STATS_TYPE_SPECIAL}, + {"delete_failures", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_wifi_mesh_path_strings_read() + * Read Wi-Fi mesh path statistics names. + */ +static ssize_t nss_wifi_mesh_path_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifi_mesh_strings_path_stats, NSS_WIFI_MESH_PATH_STATS_TYPE_MAX); +} + +/* + * nss_wifi_mesh_strings_proxy_path_stats + * Wi-Fi mesh proxy path statistics string. + */ +struct nss_stats_info nss_wifi_mesh_strings_proxy_path_stats[NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_MAX] = { + {"alloc_failures", NSS_STATS_TYPE_SPECIAL}, + {"entry_exist_failures", NSS_STATS_TYPE_SPECIAL}, + {"add_success", NSS_STATS_TYPE_SPECIAL}, + {"table_full_errors", NSS_STATS_TYPE_SPECIAL}, + {"insert_failures", NSS_STATS_TYPE_SPECIAL}, + {"not_found", NSS_STATS_TYPE_SPECIAL}, + {"unhashed_errors", NSS_STATS_TYPE_SPECIAL}, + {"delete_failures", NSS_STATS_TYPE_SPECIAL}, + {"delete_success", NSS_STATS_TYPE_SPECIAL}, + {"update_success", NSS_STATS_TYPE_SPECIAL}, + {"lookup_success", NSS_STATS_TYPE_SPECIAL}, + {"add_requests", NSS_STATS_TYPE_SPECIAL}, + {"del_requests", NSS_STATS_TYPE_SPECIAL}, + {"update_requests", NSS_STATS_TYPE_SPECIAL}, + {"mda_updations", NSS_STATS_TYPE_SPECIAL}, + {"flag_updations", NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_wifi_mesh_proxy_path_strings_read() + * Read Wi-Fi mesh proxy path statistics names. + */ +static ssize_t nss_wifi_mesh_proxy_path_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifi_mesh_strings_proxy_path_stats, NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_MAX); +} + +/* + * nss_wifi_mesh_strings_decap_stats + * Wi-Fi mesh decap statistics string. + */ +struct nss_stats_info nss_wifi_mesh_strings_decap_stats[NSS_WIFI_MESH_DECAP_STATS_TYPE_MAX] = { + {"rx_packets", NSS_STATS_TYPE_COMMON}, + {"rx_bytes", NSS_STATS_TYPE_COMMON}, + {"tx_packets", NSS_STATS_TYPE_COMMON}, + {"tx_bytes", NSS_STATS_TYPE_COMMON}, + {"rx_dropped", NSS_STATS_TYPE_COMMON}, + {"path_refresh_sent", NSS_STATS_TYPE_SPECIAL}, + {"reserved", NSS_STATS_TYPE_SPECIAL}, + {"mc_drop", NSS_STATS_TYPE_DROP}, + {"ttl_0", NSS_STATS_TYPE_SPECIAL}, + {"mpp_lup_fail", NSS_STATS_TYPE_SPECIAL}, + {"decap_hdr_fail", NSS_STATS_TYPE_SPECIAL}, + {"rx_fwd_fail", NSS_STATS_TYPE_SPECIAL}, + {"rx_fwd_success", NSS_STATS_TYPE_SPECIAL}, + {"mp_fwd_lookup_fail", NSS_STATS_TYPE_SPECIAL}, + {"mp_fwd_inactive", NSS_STATS_TYPE_SPECIAL}, + {"nxt_mnode_fwd_success", NSS_STATS_TYPE_SPECIAL}, + {"nxt_mnode_fwd_fail", NSS_STATS_TYPE_SPECIAL}, + {"mpp_add_fail", NSS_STATS_TYPE_SPECIAL}, + {"mpp_add_event2host_fail", NSS_STATS_TYPE_SPECIAL}, + {"mpp_upate_fail", NSS_STATS_TYPE_SPECIAL}, + {"mpp_update_even2host_fail", NSS_STATS_TYPE_SPECIAL}, + {"mpp_learn2host_fail", NSS_STATS_TYPE_SPECIAL}, + {"block_mesh_fwd_packets", NSS_STATS_TYPE_SPECIAL}, + {"no_headroom", NSS_STATS_TYPE_SPECIAL}, + {"linearise_failed", NSS_STATS_TYPE_SPECIAL}, + {"mpp_learn_event_rl_dropped", NSS_STATS_TYPE_DROP}, + {"mp_missging_event_rl_dropped", NSS_STATS_TYPE_DROP} +}; + +/* + * nss_wifi_mesh_decap_strings_read() + * Read Wi-Fi mesh decap statistics names. + */ +static ssize_t nss_wifi_mesh_decap_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifi_mesh_strings_decap_stats, NSS_WIFI_MESH_DECAP_STATS_TYPE_MAX); +} + +/* + * nss_wifi_mesh_strings_exception_stats + * Wi-Fi mesh exception statistics string. + */ +struct nss_stats_info nss_wifi_mesh_strings_exception_stats[NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_MAX] = { + {"packets_success", NSS_STATS_TYPE_SPECIAL}, + {"packets_failure", NSS_STATS_TYPE_DROP} +}; + +/* + * nss_wifi_mesh_exception_strings_read() + * Read Wi-Fi mesh exception statistics names. + */ +static ssize_t nss_wifi_mesh_exception_stats_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifi_mesh_strings_exception_stats, NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_MAX); +} + +/* + * nss_wifi_mesh_decap_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifi_mesh_decap_stats); + +/* + * nss_wifi_mesh_encap_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifi_mesh_encap_stats); + +/* + * nss_wifi_mesh_path_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifi_mesh_path_stats); + +/* + * nss_wifi_mesh_proxy_path_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifi_mesh_proxy_path_stats); + +/* + * nss_wifi_mesh_exception_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifi_mesh_exception_stats); + +/* + * nss_wifi_mesh_strings_dentry_create() + * Create Wi-Fi mesh statistics strings debug entry. + */ +struct dentry *nss_wifi_mesh_strings_dentry_create(void) +{ + struct dentry *str_dentry_dir; + struct dentry *str_file; + + if (!nss_top_main.strings_dentry) { + nss_warning("qca-nss-drv/strings is not present\n"); + return NULL; + } + + str_dentry_dir = debugfs_create_dir("wifi_mesh", nss_top_main.strings_dentry); + if (!str_dentry_dir) { + nss_warning("Failed to create qca-nss-drv/string/wifi_mesh directory\n"); + return NULL; + } + + str_file = debugfs_create_file("encap_stats", 0400, str_dentry_dir, &nss_top_main, &nss_wifi_mesh_encap_stats_strings_ops); + if (!str_file) { + nss_warning("Failed to create qca-nss-drv/string/wifi_mesh/encap_stats file\n"); + goto fail; + } + + str_file = debugfs_create_file("decap_stats", 0400, str_dentry_dir, &nss_top_main, &nss_wifi_mesh_decap_stats_strings_ops); + if (!str_file) { + nss_warning("Failed to create qca-nss-drv/string/wifi_mesh/decap_stats file\n"); + goto fail; + } + + str_file = debugfs_create_file("path_stats", 0400, str_dentry_dir, &nss_top_main, &nss_wifi_mesh_path_stats_strings_ops); + if (!str_file) { + nss_warning("Failed to create qca-nss-drv/string/wifi_mesh/path_stats file\n"); + goto fail; + } + + str_file = debugfs_create_file("proxy_path_stats", 0400, str_dentry_dir, &nss_top_main, &nss_wifi_mesh_proxy_path_stats_strings_ops); + if (!str_file) { + nss_warning("Failed to create qca-nss-drv/string/wifi_mesh/proxy_path_stats file\n"); + goto fail; + } + + str_file = debugfs_create_file("exception_stats", 0400, str_dentry_dir, &nss_top_main, &nss_wifi_mesh_exception_stats_strings_ops); + if (!str_file) { + nss_warning("Failed to create qca-nss-drv/string/wifi_mesh/exception_stats file\n"); + goto fail; + } + + return str_dentry_dir; +fail: + debugfs_remove_recursive(str_dentry_dir); + return NULL; +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_strings.h new file mode 100644 index 000000000..e858cbd05 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_mesh_strings.h @@ -0,0 +1,32 @@ +/* + ************************************************************************** + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_WIFI_MESH_STRINGS_H +#define __NSS_WIFI_MESH_STRINGS_H + +#include "nss_wifi_mesh_stats.h" +#include "nss_strings.h" + +extern struct nss_stats_info nss_wifi_mesh_strings_encap_stats[NSS_WIFI_MESH_ENCAP_STATS_TYPE_MAX]; +extern struct nss_stats_info nss_wifi_mesh_strings_decap_stats[NSS_WIFI_MESH_DECAP_STATS_TYPE_MAX]; +extern struct nss_stats_info nss_wifi_mesh_strings_path_stats[NSS_WIFI_MESH_PATH_STATS_TYPE_MAX]; +extern struct nss_stats_info nss_wifi_mesh_strings_proxy_path_stats[NSS_WIFI_MESH_PROXY_PATH_STATS_TYPE_MAX]; +extern struct nss_stats_info nss_wifi_mesh_strings_exception_stats[NSS_WIFI_MESH_EXCEPTION_STATS_TYPE_MAX]; +extern struct dentry *nss_wifi_mesh_strings_dentry_create(void); + +#endif /* __NSS_WIFI_MESH_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_stats.c new file mode 100644 index 000000000..7a31b25b9 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_stats.c @@ -0,0 +1,213 @@ +/* + ************************************************************************** + * Copyright (c) 2016-2017, 2019-2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_core.h" +#include "nss_wifi.h" +#include "nss_wifi_stats.h" + +/* + * nss_wifi_stats_str + * Wifi statistics strings. + */ +struct nss_stats_info nss_wifi_stats_str[NSS_WIFI_STATS_MAX] = { + {"rx_pkts" , NSS_STATS_TYPE_COMMON}, + {"rx_queue[0]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[1]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[2]_drops" , NSS_STATS_TYPE_DROP}, + {"rx_queue[3]_drops" , NSS_STATS_TYPE_DROP}, + {"tx_pkts" , NSS_STATS_TYPE_COMMON}, + {"tx_drops" , NSS_STATS_TYPE_DROP}, + {"tx_transmit_completed" , NSS_STATS_TYPE_SPECIAL}, + {"tx_mgmt_received" , NSS_STATS_TYPE_SPECIAL}, + {"tx_mgmt_transmitted" , NSS_STATS_TYPE_SPECIAL}, + {"tx_mgmt_drops" , NSS_STATS_TYPE_DROP}, + {"tx_mgmt_completed" , NSS_STATS_TYPE_SPECIAL}, + {"tx_inv_peer_enq_cnt" , NSS_STATS_TYPE_SPECIAL}, + {"rx_inv_peer_rcv_cnt" , NSS_STATS_TYPE_SPECIAL}, + {"rx_pn_check_failed" , NSS_STATS_TYPE_DROP}, + {"rx_pkts_deliverd" , NSS_STATS_TYPE_SPECIAL}, + {"rx_bytes_delivered" , NSS_STATS_TYPE_SPECIAL}, + {"tx_bytes_completed" , NSS_STATS_TYPE_SPECIAL}, + {"rx_deliver_unaligned_drop_cnt" , NSS_STATS_TYPE_DROP}, + {"tidq_enqueue_cnt_0" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_enqueue_cnt_1" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_enqueue_cnt_2" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_enqueue_cnt_3" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_enqueue_cnt_4" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_enqueue_cnt_5" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_enqueue_cnt_6" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_enqueue_cnt_7" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_cnt_0" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_cnt_1" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_cnt_2" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_cnt_3" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_cnt_4" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_cnt_5" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_cnt_6" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_cnt_7" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_enqueue_fail_cnt_0" , NSS_STATS_TYPE_DROP}, + {"tidq_enqueue_fail_cnt_1" , NSS_STATS_TYPE_DROP}, + {"tidq_enqueue_fail_cnt_2" , NSS_STATS_TYPE_DROP}, + {"tidq_enqueue_fail_cnt_3" , NSS_STATS_TYPE_DROP}, + {"tidq_enqueue_fail_cnt_4" , NSS_STATS_TYPE_DROP}, + {"tidq_enqueue_fail_cnt_5" , NSS_STATS_TYPE_DROP}, + {"tidq_enqueue_fail_cnt_6" , NSS_STATS_TYPE_DROP}, + {"tidq_enqueue_fail_cnt_7" , NSS_STATS_TYPE_DROP}, + {"tidq_ttl_expire_cnt_0" , NSS_STATS_TYPE_DROP}, + {"tidq_ttl_expire_cnt_1" , NSS_STATS_TYPE_DROP}, + {"tidq_ttl_expire_cnt_2" , NSS_STATS_TYPE_DROP}, + {"tidq_ttl_expire_cnt_3" , NSS_STATS_TYPE_DROP}, + {"tidq_ttl_expire_cnt_4" , NSS_STATS_TYPE_DROP}, + {"tidq_ttl_expire_cnt_5" , NSS_STATS_TYPE_DROP}, + {"tidq_ttl_expire_cnt_6" , NSS_STATS_TYPE_DROP}, + {"tidq_ttl_expire_cnt_7" , NSS_STATS_TYPE_DROP}, + {"tidq_dequeue_req_cnt_0" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_req_cnt_1" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_req_cnt_2" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_req_cnt_3" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_req_cnt_4" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_req_cnt_5" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_req_cnt_6" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_dequeue_req_cnt_7" , NSS_STATS_TYPE_SPECIAL}, + {"total_tidq_depth" , NSS_STATS_TYPE_SPECIAL}, + {"rx_htt_fetch_cnt" , NSS_STATS_TYPE_SPECIAL}, + {"total_tidq_bypass_cnt" , NSS_STATS_TYPE_SPECIAL}, + {"global_q_full_cnt" , NSS_STATS_TYPE_SPECIAL}, + {"tidq_full_cnt" , NSS_STATS_TYPE_SPECIAL} +}; + +uint64_t nss_wifi_stats[NSS_MAX_WIFI_RADIO_INTERFACES][NSS_WIFI_STATS_MAX]; /* WIFI statistics */ + +/* + * nss_wifi_stats_read() + * Read wifi statistics. + */ +static ssize_t nss_wifi_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + uint32_t i, id; + + /* + * Max output lines = #stats * NSS_MAX_CORES + + * Few output lines for banner printing + Number of Extra outputlines for future reference to add new stats. + */ + uint32_t max_output_lines = NSS_WIFI_STATS_MAX * NSS_MAX_WIFI_RADIO_INTERFACES + NSS_STATS_EXTRA_OUTPUT_LINES; + size_t size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + size_t size_wr = 0; + ssize_t bytes_read = 0; + uint64_t *stats_shadow; + + char *lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + stats_shadow = kzalloc(NSS_WIFI_STATS_MAX * 8, GFP_KERNEL); + if (unlikely(stats_shadow == NULL)) { + nss_warning("Could not allocate memory for local shadow buffer"); + kfree(lbuf); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "wifi", NSS_STATS_SINGLE_CORE); + + for (id = 0; id < NSS_MAX_WIFI_RADIO_INTERFACES; id++) { + spin_lock_bh(&nss_top_main.stats_lock); + for (i = 0; (i < NSS_WIFI_STATS_MAX); i++) { + stats_shadow[i] = nss_wifi_stats[id][i]; + } + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("wifi", NULL, id, nss_wifi_stats_str, stats_shadow, NSS_WIFI_STATS_MAX, lbuf, size_wr, size_al); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + kfree(stats_shadow); + return bytes_read; +} + +/* + * nss_wifi_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(wifi) + +/* + * nss_wifi_stats_dentry_create() + * Create wifi statistics debug entry. + */ +void nss_wifi_stats_dentry_create(void) +{ + nss_stats_create_dentry("wifi", &nss_wifi_stats_ops); +} + +/* + * nss_wifi_stats_sync() + * Handle the syncing of WIFI stats. + */ +void nss_wifi_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_wifi_stats_sync_msg *stats, uint16_t interface) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + uint32_t radio_id = interface - NSS_WIFI_INTERFACE0; + uint8_t i = 0; + + if (radio_id >= NSS_MAX_WIFI_RADIO_INTERFACES) { + nss_warning("%px: invalid interface: %d", nss_ctx, interface); + return; + } + + spin_lock_bh(&nss_top->stats_lock); + + /* + * Tx/Rx stats + */ + nss_wifi_stats[radio_id][NSS_WIFI_STATS_RX_PKTS] += stats->node_stats.rx_packets; + for (i = 0; i < NSS_MAX_NUM_PRI; i++) { + nss_wifi_stats[radio_id][NSS_WIFI_STATS_RX_QUEUE_0_DROPPED + i] += stats->node_stats.rx_dropped[i]; + } + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TX_PKTS] += stats->node_stats.tx_packets; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TX_DROPPED] += stats->tx_transmit_dropped; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TX_COMPLETED] += stats->tx_transmit_completions; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_MGMT_RCV_CNT] += stats->tx_mgmt_rcv_cnt; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_MGMT_TX_PKTS] += stats->tx_mgmt_pkts; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_MGMT_TX_DROPPED] += stats->tx_mgmt_dropped; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_MGMT_TX_COMPLETIONS] += stats->tx_mgmt_completions; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TX_INV_PEER_ENQUEUE_CNT] += stats->tx_inv_peer_enq_cnt; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_RX_INV_PEER_RCV_CNT] += stats->rx_inv_peer_rcv_cnt; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_RX_PN_CHECK_FAILED] += stats->rx_pn_check_failed; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_RX_DELIVERED] += stats->rx_pkts_deliverd; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_RX_BYTES_DELIVERED] += stats->rx_bytes_deliverd; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TX_BYTES_COMPLETED] += stats->tx_bytes_transmit_completions; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_RX_DELIVER_UNALIGNED_DROP_CNT] += stats->rx_deliver_unaligned_drop_cnt; + + for (i = 0; i < NSS_WIFI_TX_NUM_TOS_TIDS; i++) { + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TIDQ_ENQUEUE_CNT + i] += stats->tidq_enqueue_cnt[i]; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TIDQ_DEQUEUE_CNT + i] += stats->tidq_dequeue_cnt[i]; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TIDQ_ENQUEUE_FAIL_CNT + i] += stats->tidq_enqueue_fail_cnt[i]; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TIDQ_TTL_EXPIRE_CNT + i] += stats->tidq_ttl_expire_cnt[i]; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TIDQ_DEQUEUE_REQ_CNT + i] += stats->tidq_dequeue_req_cnt[i]; + } + + nss_wifi_stats[radio_id][NSS_WIFI_STATS_RX_HTT_FETCH_CNT] += stats->rx_htt_fetch_cnt; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TOTAL_TIDQ_DEPTH] = stats->total_tidq_depth; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TOTAL_TIDQ_BYPASS_CNT] += stats->total_tidq_bypass_cnt; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_GLOBAL_Q_FULL_CNT] += stats->global_q_full_cnt; + nss_wifi_stats[radio_id][NSS_WIFI_STATS_TIDQ_FULL_CNT] += stats->tidq_full_cnt; + + spin_unlock_bh(&nss_top->stats_lock); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_stats.h new file mode 100644 index 000000000..782777777 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_stats.h @@ -0,0 +1,62 @@ +/* + ****************************************************************************** + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_WIFI_STATS_H +#define __NSS_WIFI_STATS_H + +/* + * wifi statistics + */ +enum nss_wifi_stats_types { + NSS_WIFI_STATS_RX_PKTS, + NSS_WIFI_STATS_RX_QUEUE_0_DROPPED, + NSS_WIFI_STATS_RX_QUEUE_1_DROPPED, + NSS_WIFI_STATS_RX_QUEUE_2_DROPPED, + NSS_WIFI_STATS_RX_QUEUE_3_DROPPED, + NSS_WIFI_STATS_TX_PKTS, + NSS_WIFI_STATS_TX_DROPPED, + NSS_WIFI_STATS_TX_COMPLETED, + NSS_WIFI_STATS_MGMT_RCV_CNT, + NSS_WIFI_STATS_MGMT_TX_PKTS, + NSS_WIFI_STATS_MGMT_TX_DROPPED, + NSS_WIFI_STATS_MGMT_TX_COMPLETIONS, + NSS_WIFI_STATS_TX_INV_PEER_ENQUEUE_CNT, + NSS_WIFI_STATS_RX_INV_PEER_RCV_CNT, + NSS_WIFI_STATS_RX_PN_CHECK_FAILED, + NSS_WIFI_STATS_RX_DELIVERED, + NSS_WIFI_STATS_RX_BYTES_DELIVERED, + NSS_WIFI_STATS_TX_BYTES_COMPLETED, + NSS_WIFI_STATS_RX_DELIVER_UNALIGNED_DROP_CNT, + NSS_WIFI_STATS_TIDQ_ENQUEUE_CNT, + NSS_WIFI_STATS_TIDQ_DEQUEUE_CNT = NSS_WIFI_STATS_TIDQ_ENQUEUE_CNT + 8, + NSS_WIFI_STATS_TIDQ_ENQUEUE_FAIL_CNT = NSS_WIFI_STATS_TIDQ_DEQUEUE_CNT + 8, + NSS_WIFI_STATS_TIDQ_TTL_EXPIRE_CNT = NSS_WIFI_STATS_TIDQ_ENQUEUE_FAIL_CNT + 8, + NSS_WIFI_STATS_TIDQ_DEQUEUE_REQ_CNT = NSS_WIFI_STATS_TIDQ_TTL_EXPIRE_CNT + 8, + NSS_WIFI_STATS_TOTAL_TIDQ_DEPTH = NSS_WIFI_STATS_TIDQ_DEQUEUE_REQ_CNT + 8, + NSS_WIFI_STATS_RX_HTT_FETCH_CNT, + NSS_WIFI_STATS_TOTAL_TIDQ_BYPASS_CNT, + NSS_WIFI_STATS_GLOBAL_Q_FULL_CNT, + NSS_WIFI_STATS_TIDQ_FULL_CNT, + NSS_WIFI_STATS_MAX, +}; + +/* + * wifi statistics APIs + */ +extern void nss_wifi_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_wifi_stats_sync_msg *stats, uint16_t interface); +extern void nss_wifi_stats_dentry_create(void); + +#endif /* __NSS_WIFI_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifi_vdev.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_vdev.c new file mode 100644 index 000000000..8f020b2a6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifi_vdev.c @@ -0,0 +1,379 @@ +/* + ************************************************************************** + * Copyright (c) 2015-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" + +/* + * nss_wifi_vdev_handler() + * Handle NSS -> HLOS messages for wifi_vdev + */ +static void nss_wifi_vdev_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + nss_wifi_vdev_msg_callback_t cb; + + nss_info("%px: NSS->HLOS message for wifi vdev on interface:%d", nss_ctx, ncm->interface); + + BUG_ON(((ncm->interface < NSS_DYNAMIC_IF_START) || (ncm->interface >= (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES)))); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_WIFI_VDEV_MAX_MSG) { + nss_warning("%px: received invalid message %d for wifi vdev interface", nss_ctx, ncm->type); + return; + } + + if (nss_cmn_get_msg_len(ncm) > sizeof(struct nss_wifi_vdev_msg)) { + nss_warning("%px: Length of message %d is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm), (int)sizeof(struct nss_wifi_vdev_msg)); + return; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * callback + */ + if (!nss_ctx->subsys_dp_register[ncm->interface].ndev) { + nss_warning("%px: Event received wifi vdev interface %d before registration", nss_ctx, ncm->interface); + return; + + } + + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_core_get_msg_handler(nss_ctx, ncm->interface); + ncm->app_data = (nss_ptr_t)nss_ctx->subsys_dp_register[ncm->interface].ndev; + } + + /* + * Do we have a callback? + */ + if (!ncm->cb) { + return; + } + + cb = (nss_wifi_vdev_msg_callback_t)ncm->cb; + cb((void *)ncm->app_data, ncm); +} + +/* + * nss_wifi_vdev_msg_init() + * Initialize wifi message. + */ +void nss_wifi_vdev_msg_init(struct nss_wifi_vdev_msg *nim, uint32_t if_num, uint32_t type, uint32_t len, + nss_wifi_vdev_msg_callback_t *cb, void *app_data) +{ + nss_cmn_msg_init(&nim->cm, if_num, type, len, (void *)cb, app_data); +} +EXPORT_SYMBOL(nss_wifi_vdev_msg_init); + +/* + * nss_wifi_vdev_base_tx_msg() + * Transmit a wifi vdev base message to NSSFW + */ +nss_tx_status_t nss_wifi_vdev_base_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_wifi_vdev_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + nss_trace("%px: Sending wifi vdev message on interface :%d", nss_ctx, ncm->interface); + + /* + * Sanity checks on the message + */ + + /* + * The interface number shall be wifi vdev base vap + */ + if (ncm->interface != NSS_VAP_INTERFACE) { + nss_warning("%px: wifi vdev base tx request not on wifi vdev vap: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_WIFI_VDEV_MAX_MSG) { + nss_warning("%px: wifi vdev base message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_wifi_vdev_base_tx_msg); + +/* + * nss_wifi_vdev_tx_msg() + * Transmit a wifi vdev message to NSSFW + */ +nss_tx_status_t nss_wifi_vdev_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_wifi_vdev_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + nss_trace("%px: Sending wifi vdev message on interface :%d", nss_ctx, ncm->interface); + + /* + * Sanity checks on the message + */ + + /* + * Interface shall be of dynamic interface type + */ + if ((ncm->interface < NSS_DYNAMIC_IF_START) || (ncm->interface >= (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))) { + nss_warning("%px: wifi vdev tx request for invalid interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_WIFI_VDEV_MAX_MSG) { + nss_warning("%px: wifi vdev message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_wifi_vdev_tx_msg); + +/* + * nss_wifi_vdev_tx_msg_ext() + * Send special data packet with metadata for vap processing + */ +nss_tx_status_t nss_wifi_vdev_tx_msg_ext(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf) +{ + struct nss_wifi_vdev_msg *nm; + struct nss_cmn_msg *ncm; + nss_tx_status_t status; + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: wifi vdev message dropped as core not ready", nss_ctx); + return NSS_TX_FAILURE_NOT_READY; + } + + nm = (struct nss_wifi_vdev_msg *) os_buf->data; + ncm = &nm->cm; + + nss_trace("%px: Sending wifi vdev message on interface :%d", nss_ctx, ncm->interface); + + /* + * Interface shall be of dynamic interface type + */ + if ((ncm->interface < NSS_DYNAMIC_IF_START) || (ncm->interface >= (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))) { + nss_warning("%px: wifi vdev tx request for invalid interface: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + if (ncm->type >= NSS_WIFI_VDEV_MAX_MSG) { + nss_warning("%px: wifi vdev message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + status = nss_core_send_buffer(nss_ctx, 0, os_buf, NSS_IF_H2N_CMD_QUEUE, H2N_BUFFER_CTRL, H2N_BIT_FLAG_BUFFER_REUSABLE); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: Unable to enqueue 'wifi vdev message'", nss_ctx); + return NSS_TX_FAILURE; + } + + nss_hal_send_interrupt(nss_ctx, NSS_H2N_INTR_DATA_COMMAND_QUEUE); + + NSS_PKT_STATS_INC(&nss_ctx->nss_top->stats_drv[NSS_DRV_STATS_TX_CMD_REQ]); + +return status; +} +EXPORT_SYMBOL(nss_wifi_vdev_tx_msg_ext); + +/* + * nss_wifi_vdev_tx_buf + * Send data packet for vap processing + */ +nss_tx_status_t nss_wifi_vdev_tx_buf(struct nss_ctx_instance *nss_ctx, struct sk_buff *os_buf, uint32_t if_num) +{ + BUG_ON(((if_num < NSS_DYNAMIC_IF_START) || (if_num >= (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES)))); + + return nss_core_send_packet(nss_ctx, os_buf, if_num, H2N_BIT_FLAG_BUFFER_REUSABLE); +} +EXPORT_SYMBOL(nss_wifi_vdev_tx_buf); + +/* + * nss_wifi_vdev_set_next_hop() + */ +nss_tx_status_t nss_wifi_vdev_set_next_hop(struct nss_ctx_instance *ctx, int if_num, int next_hop) +{ + nss_tx_status_t status; + struct nss_wifi_vdev_msg *wifivdevmsg = kzalloc(sizeof(struct nss_wifi_vdev_msg), GFP_KERNEL); + struct nss_wifi_vdev_set_next_hop_msg *next_hop_msg = NULL; + + if (!wifivdevmsg) { + nss_warning("%px: Unable to allocate next hop message", ctx); + return NSS_TX_FAILURE; + } + + next_hop_msg = &wifivdevmsg->msg.next_hop; + + next_hop_msg->ifnumber = next_hop; + nss_wifi_vdev_msg_init(wifivdevmsg, if_num, NSS_WIFI_VDEV_SET_NEXT_HOP, sizeof(struct nss_wifi_vdev_set_next_hop_msg), NULL, NULL); + + status = nss_wifi_vdev_tx_msg(ctx, wifivdevmsg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to send next hop message", ctx); + } + + kfree(wifivdevmsg); + return status; +} +EXPORT_SYMBOL(nss_wifi_vdev_set_next_hop); + +/* + * nss_wifi_vdev_base_set_next_hop() + */ +nss_tx_status_t nss_wifi_vdev_base_set_next_hop(struct nss_ctx_instance *ctx, int next_hop) +{ + nss_tx_status_t status; + struct nss_wifi_vdev_msg *wifivdevmsg = kzalloc(sizeof(struct nss_wifi_vdev_msg), GFP_KERNEL); + struct nss_wifi_vdev_set_next_hop_msg *next_hop_msg = NULL; + + if (!wifivdevmsg) { + nss_warning("%px: Unable to allocate next hop message", ctx); + return NSS_TX_FAILURE; + } + + next_hop_msg = &wifivdevmsg->msg.next_hop; + + next_hop_msg->ifnumber = next_hop; + nss_wifi_vdev_msg_init(wifivdevmsg, NSS_VAP_INTERFACE, NSS_WIFI_VDEV_SET_NEXT_HOP, sizeof(struct nss_wifi_vdev_set_next_hop_msg), NULL, NULL); + + status = nss_wifi_vdev_base_tx_msg(ctx, wifivdevmsg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to send next hop message", ctx); + } + + kfree(wifivdevmsg); + return status; +} +EXPORT_SYMBOL(nss_wifi_vdev_base_set_next_hop); + +/* + * nss_wifi_vdev_set_peer_next_hop() + */ +nss_tx_status_t nss_wifi_vdev_set_peer_next_hop(struct nss_ctx_instance *ctx, uint32_t nss_if, uint8_t *addr, uint32_t next_hop_if) +{ + nss_tx_status_t status; + struct nss_wifi_vdev_msg *wifivdevmsg = kzalloc(sizeof(struct nss_wifi_vdev_msg), GFP_KERNEL); + struct nss_wifi_vdev_set_peer_next_hop_msg *peer_next_hop_msg = NULL; + + if (!wifivdevmsg) { + nss_warning("%px: Unable to allocate next hop message", ctx); + return NSS_TX_FAILURE; + } + + peer_next_hop_msg = &wifivdevmsg->msg.vdev_set_peer_next_hp; + memcpy(peer_next_hop_msg->peer_mac_addr, addr, ETH_ALEN); + + peer_next_hop_msg->if_num = next_hop_if; + nss_wifi_vdev_msg_init(wifivdevmsg, nss_if, NSS_WIFI_VDEV_SET_PEER_NEXT_HOP, + sizeof(struct nss_wifi_vdev_set_peer_next_hop_msg), NULL, NULL); + + status = nss_wifi_vdev_tx_msg(ctx, wifivdevmsg); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: Unable to send peer next hop message", ctx); + } + + kfree(wifivdevmsg); + return status; +} +EXPORT_SYMBOL(nss_wifi_vdev_set_peer_next_hop); + +/* + * nss_wifi_vdev_set_dp_type() + * Set the vap datapath type of the packet. + */ +bool nss_wifi_vdev_set_dp_type(struct nss_ctx_instance *nss_ctx, struct net_device *netdev, + uint32_t if_num, enum nss_wifi_vdev_dp_type dp_type) +{ + + NSS_VERIFY_CTX_MAGIC(nss_ctx); + + nss_assert((if_num >= NSS_DYNAMIC_IF_START) && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))); + + if (unlikely(nss_ctx->state != NSS_CORE_STATE_INITIALIZED)) { + nss_warning("%px: Vap interface dp type could not be set as core is not initialized\n", nss_ctx); + return false; + } + + /* + * set the subsytem dp type for the Wi-Fi vdev + */ + nss_core_set_subsys_dp_type(nss_ctx, netdev, if_num, dp_type); + + return true; +} +EXPORT_SYMBOL(nss_wifi_vdev_set_dp_type); + +/* + *********************************** + * Register/Unregister/Miscellaneous APIs + *********************************** + */ + +/* + * nss_register_wifi_vdev_if() + */ +uint32_t nss_register_wifi_vdev_if(struct nss_ctx_instance *nss_ctx, + int32_t if_num, + nss_wifi_vdev_callback_t vdev_data_callback, + nss_wifi_vdev_ext_data_callback_t vdev_ext_data_callback, + nss_wifi_vdev_msg_callback_t vdev_event_callback, + struct net_device *netdev, + uint32_t features) +{ + uint32_t status; + + nss_assert((if_num >= NSS_DYNAMIC_IF_START) && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))); + + nss_core_register_subsys_dp(nss_ctx, if_num, vdev_data_callback, vdev_ext_data_callback, NULL, netdev, features); + + status = nss_core_register_msg_handler(nss_ctx, if_num, vdev_event_callback); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to register event handler for interface(%u)", nss_ctx, if_num); + return status; + } + + nss_core_register_handler(nss_ctx, if_num, nss_wifi_vdev_handler, NULL); + + return NSS_CORE_STATUS_SUCCESS; +} +EXPORT_SYMBOL(nss_register_wifi_vdev_if); + +/* + * nss_unregister_wifi_vdev_if() + */ +void nss_unregister_wifi_vdev_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; + uint32_t status; + + nss_assert(nss_ctx); + nss_assert((if_num >= NSS_DYNAMIC_IF_START) && (if_num < (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES))); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + + status = nss_core_unregister_msg_handler(nss_ctx, if_num); + if (status != NSS_CORE_STATUS_SUCCESS) { + nss_warning("%px: unable to unregister event handler for interface(%u)", nss_ctx, if_num); + return; + } + + nss_core_unregister_handler(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_unregister_wifi_vdev_if); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifili.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifili.c new file mode 100644 index 000000000..c1904e465 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifili.c @@ -0,0 +1,670 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_tx_rx_common.h" +#include "nss_wifili_stats.h" +#include "nss_wifili_log.h" +#include "nss_wifili_strings.h" + +#define NSS_WIFILI_TX_TIMEOUT 1000 /* Millisecond to jiffies*/ +#define NSS_WIFILI_INVALID_SCHEME_ID -1 +#define NSS_WIFILI_THREAD_SCHEME_ENTRY_MAX 4 /* Maximum number of thread scheme entries. */ +#define NSS_WIFILI_EXTERNAL_INTERFACE_MAX 2 /* Maximum external I/F supported */ + +/* + * NSS external interface number table + */ +nss_if_num_t nss_wifili_external_tbl[NSS_WIFILI_EXTERNAL_INTERFACE_MAX] = + {NSS_WIFILI_EXTERNAL_INTERFACE0, NSS_WIFILI_EXTERNAL_INTERFACE1}; + +/* + * nss_wifili_thread_scheme_entry + * Details of thread scheme. + */ +struct nss_wifili_thread_scheme_entry { + int32_t radio_ifnum; /* Radio interface number. */ + uint32_t radio_priority; /* Priority of radio. */ + uint32_t scheme_priority; /* Priority of scheme. */ + uint8_t scheme_index; /* Scheme index allocated to radio. */ + bool allocated; /* Flag to check if scheme is allocated. */ +}; + +/* + * nss_wifili_thread_scheme_db + * Wifili thread scheme database. + */ +struct nss_wifili_thread_scheme_db { + spinlock_t lock; /* Lock to protect from simultaneous access. */ + uint32_t radio_count; /* Radio counter. */ + struct nss_wifili_thread_scheme_entry nwtse[NSS_WIFILI_THREAD_SCHEME_ENTRY_MAX]; + /* Metadata for each of scheme. */ +}; + +/* + * nss_wifili_external_if_state_tbl + * External interface state table + */ +struct nss_wifili_external_if_state_tbl { + nss_if_num_t ifnum; + bool in_use; +}; + +/* + * nss_wifili_external_if_info + * Wifili external interface info + */ +struct nss_wifili_external_if_info { + spinlock_t lock; + struct nss_wifili_external_if_state_tbl state_tbl[NSS_WIFILI_EXTERNAL_INTERFACE_MAX]; +} nss_wifi_eif_info; + +/* + * nss_wifili_pvt + * Private data structure + */ +static struct nss_wifili_pvt { + struct semaphore sem; + struct completion complete; + int response; + void *cb; + void *app_data; +} wifili_pvt; + +/* + * Scheme to radio mapping database + */ +static struct nss_wifili_thread_scheme_db ts_db[NSS_MAX_CORES]; + +/* + * nss_wifili_handler() + * Handle NSS -> HLOS messages for wifi + */ +static void nss_wifili_handler(struct nss_ctx_instance *nss_ctx, struct nss_cmn_msg *ncm, __attribute__((unused))void *app_data) +{ + struct nss_wifili_msg *ntm = (struct nss_wifili_msg *)ncm; + void *ctx; + nss_wifili_msg_callback_t cb; + + nss_info("%px: NSS->HLOS message for wifili\n", nss_ctx); + + /* + * The interface number shall be wifili soc interface or wifili radio interface + */ + BUG_ON((nss_is_dynamic_interface(ncm->interface)) + || ((ncm->interface != NSS_WIFILI_INTERNAL_INTERFACE) + && (ncm->interface != NSS_WIFILI_EXTERNAL_INTERFACE0) + && (ncm->interface != NSS_WIFILI_EXTERNAL_INTERFACE1))); + + /* + * Trace messages. + */ + nss_wifili_log_rx_msg(ntm); + + /* + * Is this a valid request/response packet? + */ + if (ncm->type >= NSS_WIFILI_MAX_MSG) { + nss_warning("%px: Received invalid message %d for wifili interface", nss_ctx, ncm->type); + return; + } + + if ((nss_cmn_get_msg_len(ncm) > sizeof(struct nss_wifili_msg)) && + ntm->cm.type != NSS_WIFILI_PEER_EXT_STATS_MSG) { + nss_warning("%px: Length of message is greater than required: %d", nss_ctx, nss_cmn_get_msg_len(ncm)); + return; + } + + /* + * Snoop messages for local driver and handle + */ + switch (ntm->cm.type) { + case NSS_WIFILI_STATS_MSG: + /* + * Update WIFI driver statistics and send statistics notifications to the registered modules + */ + nss_wifili_stats_sync(nss_ctx, &ntm->msg.wlsoc_stats, ncm->interface); + nss_wifili_stats_notify(nss_ctx, ncm->interface); + break; + } + + /* + * Update the callback and app_data for notify messages, wifili sends all notify messages + * to the same callback/app_data. + */ + if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) { + ncm->cb = (nss_ptr_t)nss_ctx->nss_top->wifili_msg_callback; + } + + /* + * Log failures + */ + nss_core_log_msg_failures(nss_ctx, ncm); + + /* + * Do we have a call back + */ + if (!ncm->cb) { + nss_info("%px: cb null for wifili interface %d", nss_ctx, ncm->interface); + return; + } + + /* + * Get callback & context + */ + cb = (nss_wifili_msg_callback_t)ncm->cb; + ctx = nss_ctx->subsys_dp_register[ncm->interface].ndev; + + /* + * call wifili msg callback + */ + if (!ctx) { + nss_warning("%px: Event received for wifili interface %d before registration", nss_ctx, ncm->interface); + return; + } + + cb(ctx, ntm); +} + +/* + * nss_wifili_callback() + * Callback to handle the completion of NSS->HLOS messages. + */ +static void nss_wifili_callback(void *app_data, struct nss_wifili_msg *nvm) +{ + nss_wifili_msg_callback_t callback = (nss_wifili_msg_callback_t)wifili_pvt.cb; + void *data = wifili_pvt.app_data; + + wifili_pvt.response = NSS_TX_SUCCESS; + wifili_pvt.cb = NULL; + wifili_pvt.app_data = NULL; + + if (nvm->cm.response != NSS_CMN_RESPONSE_ACK) { + nss_warning("wifili error response %d\n", nvm->cm.response); + wifili_pvt.response = nvm->cm.response; + } + + if (callback) { + callback(data, nvm); + } + complete(&wifili_pvt.complete); +} + +/* + * nss_wifili_tx_msg + * Transmit a wifili message to NSS FW + * + * NOTE: The caller is expected to handle synchronous wait for message + * response if needed. + */ +nss_tx_status_t nss_wifili_tx_msg(struct nss_ctx_instance *nss_ctx, struct nss_wifili_msg *msg) +{ + struct nss_cmn_msg *ncm = &msg->cm; + + /* + * Trace messages. + */ + nss_wifili_log_tx_msg(msg); + + if (ncm->type >= NSS_WIFILI_MAX_MSG) { + nss_warning("%px: wifili message type out of range: %d", nss_ctx, ncm->type); + return NSS_TX_FAILURE; + } + + /* + * The interface number shall be one of the wifili soc interfaces + */ + if ((ncm->interface != NSS_WIFILI_INTERNAL_INTERFACE) + && (ncm->interface != NSS_WIFILI_EXTERNAL_INTERFACE0) + && (ncm->interface != NSS_WIFILI_EXTERNAL_INTERFACE1)) { + nss_warning("%px: tx request for interface that is not a wifili: %d", nss_ctx, ncm->interface); + return NSS_TX_FAILURE; + } + + return nss_core_send_cmd(nss_ctx, msg, sizeof(*msg), NSS_NBUF_PAYLOAD_SIZE); +} +EXPORT_SYMBOL(nss_wifili_tx_msg); + +/* + * nss_wifili_tx_msg_sync() + * Transmit a wifili message to NSS firmware synchronously. + */ +nss_tx_status_t nss_wifili_tx_msg_sync(struct nss_ctx_instance *nss_ctx, struct nss_wifili_msg *nvm) +{ + nss_tx_status_t status; + int ret = 0; + + down(&wifili_pvt.sem); + wifili_pvt.cb = (void *)nvm->cm.cb; + wifili_pvt.app_data = (void *)nvm->cm.app_data; + + nvm->cm.cb = (nss_ptr_t)nss_wifili_callback; + nvm->cm.app_data = (nss_ptr_t)NULL; + + status = nss_wifili_tx_msg(nss_ctx, nvm); + if (status != NSS_TX_SUCCESS) { + nss_warning("%px: wifili_tx_msg failed\n", nss_ctx); + up(&wifili_pvt.sem); + return status; + } + + ret = wait_for_completion_timeout(&wifili_pvt.complete, msecs_to_jiffies(NSS_WIFILI_TX_TIMEOUT)); + if (!ret) { + nss_warning("%px: wifili msg tx failed due to timeout\n", nss_ctx); + wifili_pvt.response = NSS_TX_FAILURE; + } + + status = wifili_pvt.response; + up(&wifili_pvt.sem); + return status; +} +EXPORT_SYMBOL(nss_wifili_tx_msg_sync); + +/* + * nss_wifili_get_context() + */ +struct nss_ctx_instance *nss_wifili_get_context(void) +{ + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; +} +EXPORT_SYMBOL(nss_wifili_get_context); + +/* + * nss_wifili_release_external_if() + * Release the external interface. + */ +void nss_wifili_release_external_if(nss_if_num_t ifnum) +{ + uint32_t idx; + + spin_lock_bh(&nss_wifi_eif_info.lock); + for (idx = 0; idx < NSS_WIFILI_EXTERNAL_INTERFACE_MAX; idx++) { + if (nss_wifi_eif_info.state_tbl[idx].ifnum != ifnum) { + continue; + } + + if (!nss_wifi_eif_info.state_tbl[idx].in_use) { + spin_unlock_bh(&nss_wifi_eif_info.lock); + nss_warning("%px: I/F num:%d is not in use\n", &nss_wifi_eif_info, ifnum); + return; + } + + nss_wifi_eif_info.state_tbl[idx].in_use = false; + break; + } + + spin_unlock_bh(&nss_wifi_eif_info.lock); + + if (idx == NSS_WIFILI_EXTERNAL_INTERFACE_MAX) { + nss_warning("%px: Trying to release invalid ifnum:%d\n", &nss_wifi_eif_info, ifnum); + } +} +EXPORT_SYMBOL(nss_wifili_release_external_if); + +/* + * nss_get_available_wifili_external_if() + * Check and return the available external interface + */ +nss_if_num_t nss_get_available_wifili_external_if(void) +{ + nss_if_num_t ifnum = -1; + uint32_t idx; + + /* + * Check if the external interface is registered. + * Return the interface number if not registered. + */ + spin_lock_bh(&nss_wifi_eif_info.lock); + for (idx = 0; idx < NSS_WIFILI_EXTERNAL_INTERFACE_MAX; idx++) { + if (nss_wifi_eif_info.state_tbl[idx].in_use) { + continue; + } + + nss_wifi_eif_info.state_tbl[idx].in_use = true; + ifnum = nss_wifi_eif_info.state_tbl[idx].ifnum; + break; + } + + spin_unlock_bh(&nss_wifi_eif_info.lock); + + BUG_ON(idx == NSS_WIFILI_EXTERNAL_INTERFACE_MAX); + return ifnum; +} +EXPORT_SYMBOL(nss_get_available_wifili_external_if); + +/* + * nss_wifili_get_radio_num() + * Get NSS wifili radio count. + * + * Wi-Fi host driver needs to know the current radio count + * to extract the radio priority from ini file. + */ +uint32_t nss_wifili_get_radio_num(struct nss_ctx_instance *nss_ctx) +{ + uint8_t core_id; + uint32_t radio_count; + + nss_assert(nss_ctx); + nss_assert(nss_ctx->id < nss_top_main.num_nss); + + core_id = nss_ctx->id; + + spin_lock_bh(&ts_db[core_id].lock); + radio_count = ts_db[core_id].radio_count; + spin_unlock_bh(&ts_db[core_id].lock); + + return radio_count; +} +EXPORT_SYMBOL(nss_wifili_get_radio_num); + +/* + * nss_wifili_thread_scheme_alloc() + * Allocate NSS worker thread scheme index. + * + * API does search on scheme database and returns scheme index based on + * priority of radio and free entry available. + * Wi-Fi driver fetches radio priority from ini file and calls this API + * to get the scheme index based on radio priority. + * + */ +uint8_t nss_wifili_thread_scheme_alloc(struct nss_ctx_instance *nss_ctx, + int32_t radio_ifnum, + enum nss_wifili_thread_scheme_priority radio_priority) +{ + uint8_t i; + uint8_t scheme_idx; + uint8_t core_id; + uint8_t next_avail_entry_idx = NSS_WIFILI_THREAD_SCHEME_ENTRY_MAX; + + nss_assert(nss_ctx); + nss_assert(nss_ctx->id < nss_top_main.num_nss); + + core_id = nss_ctx->id; + + /* + * Iterate through scheme database and allocate + * scheme_id matching the priority requested. + */ + spin_lock_bh(&ts_db[core_id].lock); + for (i = 0; i < NSS_WIFILI_THREAD_SCHEME_ENTRY_MAX; i++) { + if (ts_db[core_id].nwtse[i].allocated) { + continue; + } + + if (radio_priority == + ts_db[core_id].nwtse[i].scheme_priority) { + ts_db[core_id].nwtse[i].radio_ifnum = radio_ifnum; + ts_db[core_id].nwtse[i].radio_priority = radio_priority; + ts_db[core_id].nwtse[i].allocated = true; + ts_db[core_id].radio_count++; + scheme_idx = ts_db[core_id].nwtse[i].scheme_index; + spin_unlock_bh(&ts_db[core_id].lock); + + nss_info("%px: Allocated scheme index:%d radio_ifnum:%d", + nss_ctx, + scheme_idx, + radio_ifnum); + + return scheme_idx; + } + + next_avail_entry_idx = i; + } + + /* + * When radio priority does not match any of scheme entry priority + * and database has unallocated entries, provide available unallocated entry. + * This prevents any catastrophic failure during attach of Wi-Fi radio. + */ + if (next_avail_entry_idx != NSS_WIFILI_THREAD_SCHEME_ENTRY_MAX) { + + ts_db[core_id].nwtse[next_avail_entry_idx].radio_ifnum = radio_ifnum; + ts_db[core_id].nwtse[next_avail_entry_idx].radio_priority = radio_priority; + ts_db[core_id].nwtse[next_avail_entry_idx].allocated = true; + ts_db[core_id].radio_count++; + scheme_idx = ts_db[core_id].nwtse[next_avail_entry_idx].scheme_index; + spin_unlock_bh(&ts_db[core_id].lock); + + nss_info("%px: Priority did not match for radio_ifnum:%d, allocated a next available scheme:%d", + nss_ctx, + radio_ifnum, + scheme_idx); + + return scheme_idx; + } + spin_unlock_bh(&ts_db[core_id].lock); + + nss_warning("%px: Could not find scheme - radio_ifnum:%d radio_map:%d\n", + nss_ctx, + radio_ifnum, + radio_priority); + + return NSS_WIFILI_INVALID_SCHEME_ID; +} +EXPORT_SYMBOL(nss_wifili_thread_scheme_alloc); + +/* + * nss_wifili_thread_scheme_dealloc() + * Reset thread scheme metadata. + */ +void nss_wifili_thread_scheme_dealloc(struct nss_ctx_instance *nss_ctx, + int32_t radio_ifnum) +{ + uint32_t id; + uint8_t core_id; + + nss_assert(nss_ctx); + nss_assert(nss_ctx->id < nss_top_main.num_nss); + + core_id = nss_ctx->id; + + /* + * Radio count cannot be zero here. + */ + nss_assert(ts_db[core_id].radio_count); + + spin_lock_bh(&ts_db[core_id].lock); + for (id = 0; id < NSS_WIFILI_THREAD_SCHEME_ENTRY_MAX; id++) { + if (ts_db[core_id].nwtse[id].radio_ifnum != radio_ifnum) { + continue; + } + + ts_db[core_id].nwtse[id].radio_priority = 0; + ts_db[core_id].nwtse[id].allocated = false; + ts_db[core_id].nwtse[id].radio_ifnum = 0; + ts_db[core_id].radio_count--; + break; + } + spin_unlock_bh(&ts_db[core_id].lock); + + if (id == NSS_WIFILI_THREAD_SCHEME_ENTRY_MAX) { + nss_warning("%px: Could not find scheme database with radio_ifnum:%d", + nss_ctx, + radio_ifnum); + } +} +EXPORT_SYMBOL(nss_wifili_thread_scheme_dealloc); + +/* + * nss_wifili_thread_scheme_db_init() + * Initialize thread scheme database. + */ +void nss_wifili_thread_scheme_db_init(uint8_t core_id) +{ + uint32_t id; + + spin_lock_init(&ts_db[core_id].lock); + + /* + * Iterate through scheme database and assign + * scheme_id and priority for each entry + */ + ts_db[core_id].radio_count = 0; + for (id = 0; id < NSS_WIFILI_THREAD_SCHEME_ENTRY_MAX; id++) { + ts_db[core_id].nwtse[id].radio_priority = 0; + ts_db[core_id].nwtse[id].radio_ifnum = 0; + ts_db[core_id].nwtse[id].allocated = false; + + switch (id) { + case 0: + ts_db[core_id].nwtse[id].scheme_priority = NSS_WIFILI_HIGH_PRIORITY_SCHEME; + ts_db[core_id].nwtse[id].scheme_index = NSS_WIFILI_THREAD_SCHEME_ID_0; + break; + case 1: + ts_db[core_id].nwtse[id].scheme_priority = NSS_WIFILI_LOW_PRIORITY_SCHEME; + ts_db[core_id].nwtse[id].scheme_index = NSS_WIFILI_THREAD_SCHEME_ID_1; + break; + case 2: + case 3: + ts_db[core_id].nwtse[id].scheme_priority = NSS_WIFILI_HIGH_PRIORITY_SCHEME; + ts_db[core_id].nwtse[id].scheme_index = NSS_WIFILI_THREAD_SCHEME_ID_2; + break; + default: + nss_warning("Invalid scheme index:%d", id); + } + } +} + +/* + * nss_wifili_msg_init() + * Initialize nss_wifili_msg. + */ +void nss_wifili_msg_init(struct nss_wifili_msg *ncm, uint16_t if_num, uint32_t type, uint32_t len, void *cb, void *app_data) +{ + nss_cmn_msg_init(&ncm->cm, if_num, type, len, cb, app_data); +} +EXPORT_SYMBOL(nss_wifili_msg_init); + +/* + **************************************** + * Register/Unregister/Miscellaneous APIs + **************************************** + */ + +/* + * nss_register_wifili_if() + * Register wifili with nss driver + */ +struct nss_ctx_instance *nss_register_wifili_if(uint32_t if_num, nss_wifili_callback_t wifili_callback, + nss_wifili_callback_t wifili_ext_callback, + nss_wifili_msg_callback_t event_callback, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; + + /* + * The interface number shall be wifili soc interface + */ + nss_assert((if_num == NSS_WIFILI_INTERNAL_INTERFACE) + || (if_num == NSS_WIFILI_EXTERNAL_INTERFACE0) + || (if_num == NSS_WIFILI_EXTERNAL_INTERFACE1)); + + nss_info("nss_register_wifili_if if_num %d wifictx %px", if_num, netdev); + + nss_core_register_subsys_dp(nss_ctx, if_num, wifili_callback, wifili_ext_callback, NULL, netdev, features); + + nss_top_main.wifili_msg_callback = event_callback; + + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; +} +EXPORT_SYMBOL(nss_register_wifili_if); + +/* + * nss_unregister_wifili_if() + * Unregister wifili with nss driver + */ +void nss_unregister_wifili_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; + + /* + * The interface number shall be wifili soc interface + */ + nss_assert((if_num == NSS_WIFILI_INTERNAL_INTERFACE) + || (if_num == NSS_WIFILI_EXTERNAL_INTERFACE0) + || (if_num == NSS_WIFILI_EXTERNAL_INTERFACE1)); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); + nss_wifili_release_external_if(if_num); +} +EXPORT_SYMBOL(nss_unregister_wifili_if); + +/* + * nss_register_wifili_radio_if() + * Register wifili radio with nss driver + */ +struct nss_ctx_instance *nss_register_wifili_radio_if(uint32_t if_num, nss_wifili_callback_t wifili_callback, + nss_wifili_callback_t wifili_ext_callback, + nss_wifili_msg_callback_t event_callback, struct net_device *netdev, uint32_t features) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; + + /* + * The interface number shall be wifili radio dynamic interface + */ + nss_assert(nss_is_dynamic_interface(if_num)); + nss_info("nss_register_wifili_if if_num %d wifictx %px", if_num, netdev); + + nss_core_register_subsys_dp(nss_ctx, if_num, wifili_callback, wifili_ext_callback, NULL, netdev, features); + + return (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; +} +EXPORT_SYMBOL(nss_register_wifili_radio_if); + +/* + * nss_unregister_wifili_radio_if() + * Unregister wifili radio with nss driver + */ +void nss_unregister_wifili_radio_if(uint32_t if_num) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; + + /* + * The interface number shall be wifili radio dynamic interface + */ + nss_assert(nss_is_dynamic_interface(if_num)); + + nss_core_unregister_subsys_dp(nss_ctx, if_num); +} +EXPORT_SYMBOL(nss_unregister_wifili_radio_if); + +/* + * nss_wifili_register_handler() + * Register handle for notfication messages received on wifi interface + */ +void nss_wifili_register_handler(void) +{ + struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *)&nss_top_main.nss[nss_top_main.wifi_handler_id]; + uint32_t idx; + + nss_info("nss_wifili_register_handler"); + nss_core_register_handler(nss_ctx, NSS_WIFILI_INTERNAL_INTERFACE, nss_wifili_handler, NULL); + nss_core_register_handler(nss_ctx, NSS_WIFILI_EXTERNAL_INTERFACE0, nss_wifili_handler, NULL); + nss_core_register_handler(nss_ctx, NSS_WIFILI_EXTERNAL_INTERFACE1, nss_wifili_handler, NULL); + + nss_wifili_stats_dentry_create(); + nss_wifili_strings_dentry_create(); + + sema_init(&wifili_pvt.sem, 1); + init_completion(&wifili_pvt.complete); + + /* + * Intialize the external interfaces info. + */ + spin_lock_init(&nss_wifi_eif_info.lock); + for (idx = 0; idx < NSS_WIFILI_EXTERNAL_INTERFACE_MAX; idx++) { + nss_wifi_eif_info.state_tbl[idx].ifnum = nss_wifili_external_tbl[idx]; + nss_wifi_eif_info.state_tbl[idx].in_use = false; + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifili_log.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_log.c new file mode 100644 index 000000000..7c679dfca --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_log.c @@ -0,0 +1,553 @@ +/* + ************************************************************************** + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_wifili_log.c + * NSS WIFILI logger file. + */ + +#include "nss_core.h" + +/* + * nss_wifili_log_message_types_str + * WIFILI message strings + */ +static int8_t *nss_wifili_log_message_types_str[NSS_WIFILI_MAX_MSG] __maybe_unused = { + "WIFILI INIT MSG", + "WIFILI SOC RESET MSG", + "WIFILI PDEV INIT MSG", + "WIFILI PDEV DEINIT MSG", + "WIFILI START MSG", + "WIFILI STOP MSG", + "WIFILI PEER CREATE MSG", + "WIFILI PEER DELETE MSG", + "WIFILI SEND PEER MEMORY REQUEST MSG", + "WIFILI PEER FREELIST APPEND MSG", + "WIFILI STATS MSG", + "WIFILI WDS VENDOR MSG", + "WIFILI PEER STATS MSG", + "WIFILI WDS PEER ADD MSG", + "WIFILI WDS PEER DEL MSG", + "WIFILI WDS PEER MAP MSG", + "WIFILI WDS ACTIVE INFO MSG", + "WIFILI STATS CFG MSG", + "WIFILI TID REOQ SETUP MSG", + "WIFILI RADIO CMD MSG", + "WIFILI LINK DESC INFO MSG", + "WIFILI PEER SECURITY TYPE MSG", + "WIFILI PEER NAWDS ENABLE MSG", + "WIFILI RADIO BUF CFG", + "WIFILI DBDC REPEATER SET MSG", + "WIFILI DBDC REPEATER AST FLUSH MSG" +}; + +/* + * nss_wifili_log_error_response_types_str + * Strings for error types for WIFILI messages + */ +static int8_t *nss_wifili_log_error_response_types_str[NSS_WIFILI_EMSG_UNKNOWN] __maybe_unused = { + "WIFILI NO ERROR", + "WIFILI INIT FAIL IMPROPER STATE", + "WIFILI RINGS INIT FAIL", + "WIFILI PDEV INIT IMPROPER STATE FAIL", + "WIFILI PDEV INIT INVALID RADIOID FAIL", + "WIFILI PDEV TX IRQ ALLOC FAIL", + "WIFILI PDEV RESET INVALID RADIOID FAIL", + "WIFILI PDEV RESET PDEV NULL FAIL", + "WIFILI PDEV RESET IMPROPER STATE FAIL", + "WIFILI START IMPROPER STATE FAIL", + "WIFILI PEER CREATE FAIL", + "WIFILI PEER DELETE FAIL", + "WIFILI HASHMEM INIT FAIL", + "WIFILI PEER FREELIST APPEND FAIL", + "WIFILI PEER CREATE INVALID VDEVID FAIL", + "WIFILI PEER CREATE INVALID PEER ID FAIL", + "WIFILI PEER CREATE VDEV NULL FAIL", + "WIFILI PEER CREATE PDEV NULL FAIL", + "WIFILI PEER CREATE ALLOC FAIL", + "WIFILI PEER DELETE VAPID INVALID FAIL", + "WIFILI PEER DELETE INVALID PEERID FAIL", + "WIFILI PEER DELETE VDEV NULL FAIL", + "WIFILI PEER DELETE PDEV NULL FAIL", + "WIFILI PEER DELETE PEER NULL FAIL", + "WIFILI PEER DELETE PEER CORRUPTED FAIL", + "WIFILI PEER DUPLICATE AST INDEX PEER ID FAIL", + "WIFILI GROUP0 TIMER ALLOC FAIL", + "WIFILI INSUFFICIENT WT FAIL", + "WIFILI INVALID NUM TCL RING FAIL", + "WIFILI INVALID NUM REO DST RING FAIL", + "WIFILI HAL SRNG SOC ALLOC FAIL", + "WIFILI HAL SRNG INVALID RING INFO FAIL", + "WIFILI HAL SRNG TCL ALLOC FAIL", + "WIFILI HAL SRNG TXCOMP ALLOC FAIL", + "WIFILI HAL SRNG REODST ALLOC FAIL", + "WIFILI HAL SRNG REOREINJECT ALLOC FAIL", + "WIFILI HAL SRNG RXRELEASE ALLOC FAIL", + "WIFILI HAL SRNG RXEXCP ALLOC FAIL", + "WIFILI HAL TX MEMALLOC FAIL", + "WIFILI HAL TX INVLID POOL NUM FAIL", + "WIFILI HAL TX INVALID PAGE NUM FAIL", + "WIFILI HAL TX DESC MEM ALLOC FAIL", + "WIFILI HAL RX MEMALLOC FAIL", + "WIFILI PDEV RXDMA RING ALLOC FAIL", + "WIFILI NAWDSEN PEERID INVALID", + "WIFILI NAWDSEN PEER NULL", + "WIFILI NAWDSEN PEER CORRUPTED", + "WIFILI WDS PEER CFG FAIL", + "WIFILI RESET NO STOP", + "WIFILI HAL SRNG INVALID RING BASE FAIL", + "WIFILI PDEV RX INIT FAIL", + "WIFILI EMESG AST ADD FAIL", + "WIFILI EMESG AST REMOVE FAIL", + "WIFILI EMESG WDS ADD FAIL", + "WIFILI EMESG WDS REMOVE FAIL", + "WIFILI EMESG WDS MAP FAIL", + "WIFILI WDS INVALID PEERID FAIL", + "WIFILI WDS DUPLICATE AST INDEX PEER ID FAIL", + "WIFILI INVALID RADIO CMD", + "WIFILI INVALID RADIO IFNUM", + "WIFILI PEER SECURITY PEER NULL FAIL", + "WIFILI PEER SECURITY PEER CORRUPTED FAIL", + "WIFILI RADIO INVALID BUF CFG", +}; + +/* + * nss_wifili_log_wifili_hal_srng() + * Log NSS WIFILI HAL SRNG Information + */ +static void nss_wifili_log_wifili_hal_srng(struct nss_wifili_hal_srng_info *ring) +{ + int32_t i; + nss_trace("\tRing ID: %d\n" + "\tMAC ID: %d\n" + "\tRing base physical address: %x\n" + "\tNumber of entries: %d\n" + "\tFlags: %x\n" + "\tDirection: %d\n" + "\tEntry size: %d\n" + "\tLow Threshold: %d\n", + ring->ring_id, ring->mac_id, + ring->ring_base_paddr, ring->num_entries, + ring->flags, ring->ring_dir, + ring->entry_size, ring->low_threshold); + nss_trace("Ring Base Addresses:"); + for (i = 0; i < NSS_WIFILI_MAX_SRNG_REG_GROUPS_MSG; i++) { + nss_trace("\t%x", ring->hwreg_base[i]); + } +} + +/* + * nss_wifili_log_init_msg() + * Log NSS WIFILI Init message. + */ +static void nss_wifili_log_init_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_init_msg *nwim __maybe_unused = &nwm->msg.init; + int32_t i; + nss_trace("%px: NSS WIFILI Init Message:\n" + "WIFILI HAL Source Ring Base Address: %x\n" + "WIFILI HAL Source Ring Shadow Read Pointer Address: %x\n" + "WIFILI HAL Source Ring Shadow Write Pointer Address: %x\n" + "WIFILI Number of Transmit Classifier data rings: %d\n" + "WIFILI Number of reorder rings: %d\n" + "WIFILI Flags for SoC initialization: %d\n" + "WIFILI Tx descriptor initialization number of software descriptors: %d" + "WIFILI Tx descriptor initialization number of software extended descriptors: %d" + "WIFILI Tx descriptor initialization number of descriptor pools: %d" + "WIFILI Tx descriptor initialization number of memory addresses: %d" + "WIFILI Tx descriptor initialization extended descriptor page number: %d" + "WIFILI Tx descriptor initialization number of software secriptors for second radio: %d" + "WIFILI Tx descriptor initialization number of software extended descriptors for second radio: %d", + nwim, nwim->hssm.dev_base_addr, + nwim->hssm.shadow_rdptr_mem_addr, nwim->hssm.shadow_wrptr_mem_addr, + nwim->num_tcl_data_rings, nwim->num_reo_dest_rings, + nwim->flags, nwim->wtdim.num_tx_desc, + nwim->wtdim.num_tx_desc_ext, nwim->wtdim.num_pool, + nwim->wtdim.num_memaddr, nwim->wtdim.ext_desc_page_num, + nwim->wtdim.num_tx_desc_2, nwim->wtdim.num_tx_desc_ext_2); + /* + * Continuation of the log. + */ + nss_trace("WIFILI Tx descriptor initialization memory start address and size:"); + for (i = 0; i < NSS_WIFILI_MAX_NUMBER_OF_PAGE_MSG; i++) { + nss_trace("\tPage[%d]: Addr: %x Size: %d", i, nwim->wtdim.memory_addr[i], nwim->wtdim.memory_size[i]); + } + nss_trace("WIFILI Transmit Classifier data ring Information:"); + for (i = 0; i < NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG; i++) { + nss_wifili_log_wifili_hal_srng(&nwim->tcl_ring_info[i]); + } + nss_trace("WIFILI TX Completion Ring configuration information:"); + for (i = 0; i < NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG; i++) { + nss_wifili_log_wifili_hal_srng(&nwim->tx_comp_ring[i]); + } + nss_trace("WIFILI Reorder destination ring configuration information:"); + for (i = 0; i < NSS_WIFILI_MAX_REO_DATA_RINGS_MSG; i++) { + nss_wifili_log_wifili_hal_srng(&nwim->reo_dest_ring[i]); + } + nss_trace("WIFILI Reorder exception ring configuration information:"); + nss_wifili_log_wifili_hal_srng(&nwim->reo_exception_ring); + nss_trace("WIFILI Reinject ring configuration information:"); + nss_wifili_log_wifili_hal_srng(&nwim->reo_reinject_ring); +} + +/* + * nss_wifili_log_pdev_init_msg() + * Log NSS WIFILI PDEV Init message. + */ +static void nss_wifili_log_pdev_init_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_pdev_init_msg *nwim __maybe_unused = &nwm->msg.pdevmsg; + nss_trace("%px: NSS WIFILI PDEV Init Message:\n" + "WIFILI Radio ID: %x\n" + "WIFILI MAC Hardware Mode: %d\n" + "WIFILI Lower MAC ID: %x\n", + nwim, nwim->radio_id, + nwim->hwmode, nwim->lmac_id); + /* + * Continuation of the log. + */ + nss_trace("WIFILI Media Access Point ring information:"); + nss_wifili_log_wifili_hal_srng(&nwim->rxdma_ring); +} + +/* + * nss_wifili_log_pdev_init_msg() + * Log NSS WIFILI PDEV Deinit message. + */ +static void nss_wifili_log_pdev_deinit_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_pdev_deinit_msg *nwim __maybe_unused = &nwm->msg.pdevdeinit; + nss_trace("%px: NSS WIFILI PDEV Deinit Message:\n" + "WIFILI Interface Number: %d\n", + nwim, nwim->ifnum); +} + +/* + * nss_wifili_log_peer_msg() + * Log NSS WIFILI Peer message. + */ +static void nss_wifili_log_peer_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_peer_msg *nwim __maybe_unused = &nwm->msg.peermsg; + nss_trace("%px: NSS WIFILI Peer Message:\n" + "WIFILI Peer MAC Address: %pM\n" + "WIFILI VAP ID: %d\n" + "WIFILI Peed ID: %d\n" + "WIFILI Hardware address search table index: %d\n" + "WIFILI NAWDS enabled for peer: %d\n" + "WIFILI peer memory adderss for NSS: %x\n", + nwim, nwim->peer_mac_addr, + nwim->vdev_id, nwim->peer_id, + nwim->hw_ast_idx, nwim->is_nawds, + nwim->nss_peer_mem); +} + +/* + * nss_wifili_log_peer_freelist_append_msg() + * Log NSS WIFILI Peer memory request message. + */ +static void nss_wifili_log_peer_freelist_append_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_peer_freelist_append_msg *nwim __maybe_unused = &nwm->msg.peer_freelist_append; + nss_trace("%px: NSS WIFILI Peer Memory Request Message:\n" + "WIFILI Starting Address of Freelist: %x\n" + "WIFILI Length: %d\n" + "WIFILI Maximum number of peer entries supported in pool: %d\n", + nwim, nwim->addr, + nwim->length, nwim->num_peers); +} + +/* + * nss_wifili_log_wds_peer_msg() + * Log NSS WIFILI WDS Peer message. + */ +static void nss_wifili_log_wds_peer_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_wds_peer_msg *nwim __maybe_unused = &nwm->msg.wdspeermsg; + nss_trace("%px: NSS WIFILI WDS Peer Message:\n" + "WIFILI Destination MAC: %pM\n" + "WIFILI Peer MAC: %pM\n", + nwim, nwim->dest_mac, nwim->peer_mac); +} + +/* + * nss_wifili_log_wds_active_info_msg() + * Log NSS WIFILI WDS Active Info message. + */ +static void nss_wifili_log_wds_active_info_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_wds_active_info_msg *nwim __maybe_unused = &nwm->msg.wdsinfomsg; + nss_trace("%px: NSS WIFILI WDS Active Info Message:\n" + "WIFILI Number OF Entries: %d\n" + "WIFILI Hardware AST Index: %d\n", + nwim, nwim->nentries, nwim->info[0].ast_idx); +} + +/* + * nss_wifili_log_stats_cfg_msg() + * Log NSS WIFILI Stats Configuration Message. + */ +static void nss_wifili_log_stats_cfg_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_stats_cfg_msg *nwim __maybe_unused = &nwm->msg.scm; + nss_trace("%px: NSS WIFILI Stats Config Message:\n" + "WIFILI Enable/Disable Config: %d\n", + nwim, nwim->cfg); +} + +/* + * nss_wifili_log_reo_tidq_msg() + * Log NSS WIFILI REO TIDQ Setup Message. + */ +static void nss_wifili_log_reo_tidq_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_reo_tidq_msg *nwim __maybe_unused = &nwm->msg.reotidqmsg; + nss_trace("%px: NSS WIFILI reo tidq setup Message:\n" + "WIFILI Traffic Identification Value: %d\n" + "WIFILI Peer ID: %d\n", + nwim, nwim->tid, nwim->peer_id); +} + +/* + * nss_wifili_log_radio_cfg_msg() + * Log NSS WIFILI Radio Command Message. + */ +static void nss_wifili_log_radio_cfg_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_radio_cfg_msg *nwim __maybe_unused = &nwm->msg.radiocfgmsg; + nss_trace("%px: NSS WIFILI Radio Command Message:\n" + "WIFILI Radio Interface Number %d\n", + nwim, nwim->radio_if_num); +} + +/* + * nss_wifili_log_wds_extn_peer_cfg_msg() + * Log NSS WIFILI WDS vendor extension configuration message. + */ +static void nss_wifili_log_wds_extn_peer_cfg_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_wds_extn_peer_cfg_msg *nwim __maybe_unused = &nwm->msg.wpeercfg; + nss_trace("%px: NSS WIFILI WDS vendor extension configuration message:\n" + "WIFILI Peer MAC Addr: %pM\n" + "WIFILI WDS Flags: %d\n" + "WIFILI Peer ID: %d\n", + nwim, nwim->peer_mac_addr, + nwim->wds_flags, nwim->peer_id); +} + +/* + * nss_wifili_log_soc_linkdesc_buf_info_msg() + * Log NSS WIFILI Link descriptor buffer address information. + */ +static void nss_wifili_log_soc_linkdesc_buf_info_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_soc_linkdesc_buf_info_msg *nwim __maybe_unused = &nwm->msg.linkdescinfomsg; + nss_trace("%px: NSS WIFILI Link descriptor buffer address information:\n" + "WIFILI Link Descriptor Low Address: %x\n" + "WIFILI Link Descriptor High Address: %x\n", + nwim, nwim->buffer_addr_low, + nwim->buffer_addr_high); +} + +/* + * nss_wifili_log_peer_security_msg() + * Log NSS WIFILI Peer Security Message. + */ +static void nss_wifili_log_peer_security_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_peer_security_type_msg *nwim __maybe_unused = &nwm->msg.securitymsg; + int32_t i; + nss_trace("%px: NSS WIFILI Peer Security Message:\n" + "WIFILI Peer ID: %d\n" + "WIFILI Packet Type: %d\n" + "WIFILI Security Type: %d\n", + nwim, nwim->peer_id, + nwim->pkt_type, nwim->security_type); + /* + * Continuation of the log. + */ + nss_trace("WIFILI MIC KEY:"); + for (i = 0; i < NSS_WIFILI_MIC_KEY_LEN; i++) { + nss_trace("\t%x", nwim->mic_key[i]); + } +} + +/* + * nss_wifili_log_peer_nawds_enable_msg() + * Log NSS WIFILI NAWDS enable for peer. + */ +static void nss_wifili_log_peer_nawds_enable_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_peer_nawds_enable_msg *nwim __maybe_unused = &nwm->msg.nawdsmsg; + nss_trace("%px: NSS WIFILI NAWDS enable for peer:\n" + "WIFILI Peer ID: %d\n" + "WIFILI Enable NAWDS: %d\n", + nwim, nwim->peer_id, nwim->is_nawds); +} + +/* + * nss_wifili_log_dbdc_repeater_set_msg() + * Log NSS WIFILI DBDC Repeaster Enable Message + */ +static void nss_wifili_log_dbdc_repeater_set_msg(struct nss_wifili_msg *nwm) +{ + struct nss_wifili_dbdc_repeater_set_msg *nwim __maybe_unused = &nwm->msg.dbdcrptrmsg; + nss_trace("%px: NSS WIFILI DBDC Repeater Enable Message:\n" + "WIFILI DBDC Enable Flag: %d\n", + nwim, nwim->is_dbdc_en); +} + +/* + * nss_wifili_log_verbose() + * Log message contents. + */ +static void nss_wifili_log_verbose(struct nss_wifili_msg *nwm) +{ + switch (nwm->cm.type) { + case NSS_WIFILI_INIT_MSG: + nss_wifili_log_init_msg(nwm); + break; + + case NSS_WIFILI_SOC_RESET_MSG: + break; + + case NSS_WIFILI_PDEV_INIT_MSG: + nss_wifili_log_pdev_init_msg(nwm); + break; + + case NSS_WIFILI_PDEV_DEINIT_MSG: + nss_wifili_log_pdev_deinit_msg(nwm); + break; + + case NSS_WIFILI_PEER_CREATE_MSG: + case NSS_WIFILI_PEER_DELETE_MSG: + nss_wifili_log_peer_msg(nwm); + break; + + case NSS_WIFILI_PEER_FREELIST_APPEND_MSG: + nss_wifili_log_peer_freelist_append_msg(nwm); + break; + + case NSS_WIFILI_WDS_VENDOR_MSG: + nss_wifili_log_wds_extn_peer_cfg_msg(nwm); + break; + + case NSS_WIFILI_WDS_PEER_ADD_MSG: + case NSS_WIFILI_WDS_PEER_DEL_MSG: + case NSS_WIFILI_WDS_PEER_MAP_MSG: + nss_wifili_log_wds_peer_msg(nwm); + break; + + case NSS_WIFILI_WDS_ACTIVE_INFO_MSG: + nss_wifili_log_wds_active_info_msg(nwm); + break; + + case NSS_WIFILI_STATS_CFG_MSG: + nss_wifili_log_stats_cfg_msg(nwm); + break; + + case NSS_WIFILI_TID_REOQ_SETUP_MSG: + nss_wifili_log_reo_tidq_msg(nwm); + break; + + case NSS_WIFILI_RADIO_CMD_MSG: + nss_wifili_log_radio_cfg_msg(nwm); + break; + + case NSS_WIFILI_LINK_DESC_INFO_MSG: + nss_wifili_log_soc_linkdesc_buf_info_msg(nwm); + break; + + case NSS_WIFILI_PEER_SECURITY_TYPE_MSG: + nss_wifili_log_peer_security_msg(nwm); + break; + + case NSS_WIFILI_PEER_NAWDS_ENABLE_MSG: + nss_wifili_log_peer_nawds_enable_msg(nwm); + break; + + case NSS_WIFILI_DBDC_REPEATER_SET_MSG: + nss_wifili_log_dbdc_repeater_set_msg(nwm); + break; + + case NSS_WIFILI_SOJOURN_STATS_MSG: + case NSS_DBDC_REPEATER_AST_FLUSH_MSG: + case NSS_WIFILI_SEND_PEER_MEMORY_REQUEST_MSG: + case NSS_WIFILI_PEER_STATS_MSG: + case NSS_WIFILI_RADIO_BUF_CFG: + case NSS_WIFILI_STATS_MSG: + case NSS_WIFILI_START_MSG: + case NSS_WIFILI_STOP_MSG: + /* + * No log for these valid messages. + */ + break; + + default: + nss_warning("%px: Invalid message type\n", nwm); + break; + } +} + +/* + * nss_wifili_log_tx_msg() + * Log messages transmitted to FW. + */ +void nss_wifili_log_tx_msg(struct nss_wifili_msg *nwm) +{ + if (nwm->cm.type >= NSS_WIFILI_MAX_MSG) { + nss_warning("%px: Invalid message type\n", nwm); + return; + } + + nss_info("%px: type[%d]:%s\n", nwm, nwm->cm.type, nss_wifili_log_message_types_str[nwm->cm.type]); + nss_wifili_log_verbose(nwm); +} + +/* + * nss_wifili_log_rx_msg() + * Log messages received from FW. + */ +void nss_wifili_log_rx_msg(struct nss_wifili_msg *nwm) +{ + if (nwm->cm.response >= NSS_CMN_RESPONSE_LAST) { + nss_warning("%px: Invalid response\n", nwm); + return; + } + + if (nwm->cm.response == NSS_CMN_RESPONSE_NOTIFY || (nwm->cm.response == NSS_CMN_RESPONSE_ACK)) { + nss_info("%px: type[%d]:%s, response[%d]:%s\n", nwm, nwm->cm.type, + nss_wifili_log_message_types_str[nwm->cm.type], + nwm->cm.response, nss_cmn_response_str[nwm->cm.response]); + goto verbose; + } + + if (nwm->cm.error >= NSS_WIFILI_EMSG_UNKNOWN) { + nss_warning("%px: msg failure - type[%d]:%s, response[%d]:%s, error[%d]:Invalid error\n", + nwm, nwm->cm.type, nss_wifili_log_message_types_str[nwm->cm.type], + nwm->cm.response, nss_cmn_response_str[nwm->cm.response], + nwm->cm.error); + goto verbose; + } + + nss_info("%px: msg nack - type[%d]:%s, response[%d]:%s, error[%d]:%s\n", + nwm, nwm->cm.type, nss_wifili_log_message_types_str[nwm->cm.type], + nwm->cm.response, nss_cmn_response_str[nwm->cm.response], + nwm->cm.error, nss_wifili_log_error_response_types_str[nwm->cm.error]); + +verbose: + nss_wifili_log_verbose(nwm); +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifili_log.h b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_log.h new file mode 100644 index 000000000..a381ab6cf --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_log.h @@ -0,0 +1,37 @@ +/* + ****************************************************************************** + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * **************************************************************************** + */ + +#ifndef __NSS_WIFILI_LOG_H__ +#define __NSS_WIFILI_LOG_H__ + +/* + * nss_WIFILI_log.h + * NSS WIFILI Log Header File + */ + +/* + * nss_WIFILI_log_tx_msg + * Logs a WIFILI message that is sent to the NSS firmware. + */ +void nss_wifili_log_tx_msg(struct nss_wifili_msg *ncm); + +/* + * nss_WIFILI_log_rx_msg + * Logs a WIFILI message that is received from the NSS firmware. + */ +void nss_wifili_log_rx_msg(struct nss_wifili_msg *ncm); + +#endif /* __NSS_WIFILI_LOG_H__ */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifili_stats.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_stats.c new file mode 100644 index 000000000..6f983bcf5 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_stats.c @@ -0,0 +1,512 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_wifili_stats.c + * NSS wifili statistics APIs + */ + +#include "nss_tx_rx_common.h" +#include "nss_core.h" +#include "nss_wifili_if.h" +#include "nss_wifili_stats.h" +#include "nss_wifili_strings.h" + +/* + * Declare atomic notifier data structure for statistics. + */ +ATOMIC_NOTIFIER_HEAD(nss_wifili_stats_notifier); + +/* + * Statistics structures + * The structure will hold the statistics for 3 SOCs. + */ +struct nss_wifili_soc_stats soc_stats[NSS_WIFILI_MAX_SOC_NUM]; + +/* + * nss_wifili_stats_read() + * Read wifili statistics + */ +static ssize_t nss_wifili_stats_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + uint32_t i; + + /* + * max output lines = ((#stats + eight blank lines) * #WIFILI #STATS) + start/end tag + 3 blank + * + Number of Extra outputlines for future reference to add new stats + */ + uint32_t max_pdev = 0; + uint32_t max_output_lines; + size_t size_al = 0; + size_t size_wr = 0; + ssize_t bytes_read = 0; + char *lbuf = NULL; + uint32_t soc_idx; + struct nss_wifili_stats *stats_wifili = NULL; + + /* + * Max number of pdev depends on type of soc (Internal/Attached). + */ + for (soc_idx = 0; soc_idx < NSS_WIFILI_MAX_SOC_NUM; soc_idx++) { + max_pdev += soc_stats[soc_idx].soc_maxpdev; + } + + /* + * Max pdev cannot be null. + */ + if (unlikely(max_pdev == 0)) { + nss_warning("Cannot have max pdev zero "); + return 0; + } + + max_output_lines = (((NSS_WIFILI_STATS_MAX + 9) * max_pdev) + + NSS_WIFILI_STATS_WBM_MAX + NSS_STATS_EXTRA_OUTPUT_LINES); + + size_al = NSS_STATS_MAX_STR_LENGTH * max_output_lines; + + lbuf = kzalloc(size_al, GFP_KERNEL); + if (unlikely(lbuf == NULL)) { + nss_warning("Could not allocate memory for local statistics buffer"); + return 0; + } + + size_wr += nss_stats_banner(lbuf, size_wr, size_al, "wifili", NSS_STATS_SINGLE_CORE); + + for (soc_idx = 0; soc_idx < NSS_WIFILI_MAX_SOC_NUM; soc_idx++) { + stats_wifili = &(soc_stats[soc_idx].stats_wifili); + for (i = 0; i < soc_stats[soc_idx].soc_maxpdev; i++) { + + spin_lock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("wifili", "txrx", i + , nss_wifili_strings_stats_txrx + , stats_wifili->stats_txrx[i] + , NSS_WIFILI_STATS_TXRX_MAX + , lbuf, size_wr, size_al); + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += scnprintf(lbuf + size_wr + , size_al - size_wr, "\n"); + + /* + * Filling TCL ring stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("wifili", "tcl ring", i + , nss_wifili_strings_stats_tcl + , stats_wifili->stats_tcl_ring[i] + , NSS_WIFILI_STATS_TCL_MAX + , lbuf, size_wr, size_al); + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += scnprintf(lbuf + size_wr + , size_al - size_wr, "\n"); + + /* + * Filling TCL comp stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("wifili", "tcl comp", i + , nss_wifili_strings_stats_tx_comp + , stats_wifili->stats_tx_comp[i] + , NSS_WIFILI_STATS_TX_DESC_FREE_MAX + , lbuf, size_wr, size_al); + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += scnprintf(lbuf + size_wr + , size_al - size_wr, "\n"); + + /* + * Filling reo ring stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("wifili", "reo ring", i + , nss_wifili_strings_stats_reo + , stats_wifili->stats_reo[i] + , NSS_WIFILI_STATS_REO_MAX + , lbuf, size_wr, size_al); + + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += scnprintf(lbuf + size_wr + , size_al - size_wr, "\n"); + + /* + * Filling TX SW Pool + */ + spin_lock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("wifili", "tx sw pool", i + , nss_wifili_strings_stats_txsw_pool + , stats_wifili->stats_tx_desc[i] + , NSS_WIFILI_STATS_TX_DESC_MAX + , lbuf, size_wr, size_al); + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += scnprintf(lbuf + size_wr + , size_al - size_wr, "\n"); + + /* + * Filling TX EXt SW Pool + */ + spin_lock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("wifili", "tx ext sw pool", i + , nss_wifili_strings_stats_ext_txsw_pool + , stats_wifili->stats_ext_tx_desc[i] + , NSS_WIFILI_STATS_EXT_TX_DESC_MAX + , lbuf, size_wr, size_al); + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += scnprintf(lbuf + size_wr + , size_al - size_wr, "\n"); + + /* + * Filling rxdma pool stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("wifili", "rxdma pool", i + , nss_wifili_strings_stats_rxdma_pool + , stats_wifili->stats_rx_desc[i] + , NSS_WIFILI_STATS_RX_DESC_MAX + , lbuf, size_wr, size_al); + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += scnprintf(lbuf + size_wr + , size_al - size_wr, "\n"); + + /* + * Filling rxdma ring stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("wifili", "rxdma ring", i + , nss_wifili_strings_stats_rxdma_ring + , stats_wifili->stats_rxdma[i] + , NSS_WIFILI_STATS_RXDMA_DESC_MAX + , lbuf, size_wr, size_al); + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += scnprintf(lbuf + size_wr + , size_al - size_wr, "\n"); + } + + /* + * Filling wbm ring stats + */ + spin_lock_bh(&nss_top_main.stats_lock); + size_wr += nss_stats_print("wifili", "wbm ring" + , NSS_STATS_SINGLE_INSTANCE + , nss_wifili_strings_stats_wbm + , stats_wifili->stats_wbm + , NSS_WIFILI_STATS_WBM_MAX + , lbuf, size_wr, size_al); + spin_unlock_bh(&nss_top_main.stats_lock); + size_wr += scnprintf(lbuf + size_wr, size_al - size_wr, "\n"); + } + + bytes_read = simple_read_from_buffer(ubuf, sz, ppos, lbuf, strlen(lbuf)); + kfree(lbuf); + + return bytes_read; +} + +/* + * wifili_stats_ops + */ +NSS_STATS_DECLARE_FILE_OPERATIONS(wifili); + +/* + * nss_wifili_stats_dentry_create() + * Create wifili statistics debug entry. + */ +void nss_wifili_stats_dentry_create(void) +{ + nss_stats_create_dentry("wifili", &nss_wifili_stats_ops); +} + +/* + * nss_wifili_stats_sync() + * Handle the syncing of WIFI stats. + */ +void nss_wifili_stats_sync(struct nss_ctx_instance *nss_ctx, + struct nss_wifili_stats_sync_msg *wlsoc_stats, uint16_t interface) +{ + struct nss_top_instance *nss_top = nss_ctx->nss_top; + struct nss_wifili_soc_stats *nwss = NULL; + struct nss_wifili_stats *stats = NULL; + struct nss_wifili_device_stats *devstats = &wlsoc_stats->stats; + uint32_t index; + + /* + * Max number of pdev depends on type of soc (Internal/Attached). + */ + switch (interface) { + case NSS_WIFILI_INTERNAL_INTERFACE: + nwss = &soc_stats[0]; + nwss->soc_maxpdev = NSS_WIFILI_MAX_PDEV_NUM_MSG; + break; + + case NSS_WIFILI_EXTERNAL_INTERFACE0: + nwss = &soc_stats[1]; + nwss->soc_maxpdev = NSS_WIFILI_SOC_ATTACHED_MAX_PDEV_NUM; + break; + + case NSS_WIFILI_EXTERNAL_INTERFACE1: + nwss = &soc_stats[2]; + nwss->soc_maxpdev = NSS_WIFILI_SOC_ATTACHED_MAX_PDEV_NUM; + break; + + default: + nss_warning("%px: Invalid wifili interface\n", nss_ctx); + return; + } + + /* + * Wifili statistics structure. + */ + stats = &(nwss->stats_wifili); + + spin_lock_bh(&nss_top->stats_lock); + + for (index = 0; index < nwss->soc_maxpdev; index++) { + /* + * Rx stats + */ + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_MSDU_ERROR] += + devstats->rx_data_stats[index].rx_msdu_err; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_INV_PEER_RCV] += + (devstats->rx_data_stats[index].rx_inv_peer + + devstats->rx_data_stats[index].rx_scatter_inv_peer); + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_WDS_SRCPORT_EXCEPTION] += + devstats->rx_data_stats[index].rx_wds_learn_send; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_WDS_SRCPORT_EXCEPTION_FAIL] += + devstats->rx_data_stats[index].rx_wds_learn_send_fail; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_DELIVERD] += + devstats->rx_data_stats[index].rx_deliver_cnt; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_DELIVER_DROPPED] += + devstats->rx_data_stats[index].rx_deliver_cnt_fail; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_INTRA_BSS_UCAST] += + devstats->rx_data_stats[index].rx_intra_bss_ucast_send; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_INTRA_BSS_UCAST_FAIL] += + devstats->rx_data_stats[index].rx_intra_bss_ucast_send_fail; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_INTRA_BSS_MCAST] += + devstats->rx_data_stats[index].rx_intra_bss_mcast_send; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_INTRA_BSS_MCAST_FAIL] += + devstats->rx_data_stats[index].rx_intra_bss_mcast_send_fail; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_SG_RCV_SEND] += + devstats->rx_data_stats[index].rx_sg_recv_send; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_SG_RCV_FAIL] += + devstats->rx_data_stats[index].rx_sg_recv_fail; + stats->stats_txrx[index][NSS_STATS_WIFILI_RX_MCAST_ECHO] += + devstats->rx_data_stats[index].rx_me_pkts; + stats->stats_txrx[index][NSS_STATS_WIFILI_RX_INV_TID] += + devstats->rx_data_stats[index].rx_inv_tid; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_FRAG_INV_SC] += + devstats->rx_data_stats[index].rx_frag_inv_sc; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_FRAG_INV_FC] += + devstats->rx_data_stats[index].rx_frag_inv_fc; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_FRAG_NON_FRAG] += + devstats->rx_data_stats[index].rx_non_frag_err; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_FRAG_RETRY] += + devstats->rx_data_stats[index].rx_repeat_fragno; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_FRAG_OOO] += + devstats->rx_data_stats[index].rx_ooo_frag; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_FRAG_OOO_SEQ] += + devstats->rx_data_stats[index].rx_ooo_frag_seq; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_FRAG_ALL_FRAG_RCV] += + devstats->rx_data_stats[index].rx_all_frag_rcv; + stats->stats_txrx[index][NSS_WIFILI_STATS_RX_FRAG_DELIVER] += + devstats->rx_data_stats[index].rx_frag_deliver; + + /* + * Tx stats + */ + stats->stats_txrx[index][NSS_WIFILI_STATS_TX_ENQUEUE] += + devstats->tx_data_stats[index].tx_enqueue_cnt; + stats->stats_txrx[index][NSS_WIFILI_STATS_TX_ENQUEUE_DROP] += + devstats->tx_data_stats[index].tx_enqueue_dropped; + stats->stats_txrx[index][NSS_WIFILI_STATS_TX_DEQUEUE] += + devstats->tx_data_stats[index].tx_dequeue_cnt; + stats->stats_txrx[index][NSS_WIFILI_STATS_TX_HW_ENQUEUE_FAIL] += + devstats->tx_data_stats[index].tx_send_fail_cnt; + stats->stats_txrx[index][NSS_WIFILI_STATS_TX_SENT_COUNT] += + devstats->tx_data_stats[index].tx_processed_pkt; + } + + /* + * update the tcl ring stats + */ + for (index = 0; index < NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG; index++) { + stats->stats_tcl_ring[index][NSS_WIFILI_STATS_TCL_NO_HW_DESC] += + devstats->tcl_stats[index].tcl_no_hw_desc; + stats->stats_tcl_ring[index][NSS_WIFILI_STATS_TCL_RING_FULL] += + devstats->tcl_stats[index].tcl_ring_full; + stats->stats_tcl_ring[index][NSS_WIFILI_STATS_TCL_RING_SENT] += + devstats->tcl_stats[index].tcl_ring_sent; + } + + /* + * update the tcl comp stats + */ + for (index = 0; index < NSS_WIFILI_MAX_TCL_DATA_RINGS_MSG; index++) { + stats->stats_tx_comp[index][NSS_WIFILI_STATS_TX_DESC_FREE_INV_BUFSRC] += + devstats->txcomp_stats[index].invalid_bufsrc; + stats->stats_tx_comp[index][NSS_WIFILI_STATS_TX_DESC_FREE_INV_COOKIE] += + devstats->txcomp_stats[index].invalid_cookie; + stats->stats_tx_comp[index][NSS_WIFILI_STATS_TX_DESC_FREE_HW_RING_EMPTY] += + devstats->txcomp_stats[index].hw_ring_empty; + stats->stats_tx_comp[index][NSS_WIFILI_STATS_TX_DESC_FREE_REAPED] += + devstats->txcomp_stats[index].ring_reaped; + } + + /* + * update reo ring stats + */ + for (index = 0; index < NSS_WIFILI_MAX_REO_DATA_RINGS_MSG; index++) { + stats->stats_reo[index][NSS_WIFILI_STATS_REO_ERROR] += + devstats->rxreo_stats[index].ring_error; + stats->stats_reo[index][NSS_WIFILI_STATS_REO_REAPED] += + devstats->rxreo_stats[index].ring_reaped; + stats->stats_reo[index][NSS_WIFILI_STATS_REO_INV_COOKIE] += + devstats->rxreo_stats[index].invalid_cookie; + stats->stats_reo[index][NSS_WIFILI_STATS_REO_FRAG_RCV] += + devstats->rxreo_stats[index].defrag_reaped; + } + + /* + * update tx sw pool + */ + for (index = 0; index < NSS_WIFILI_MAX_TXDESC_POOLS_MSG; index++) { + stats->stats_tx_desc[index][NSS_WIFILI_STATS_TX_DESC_IN_USE] = + devstats->tx_sw_pool_stats[index].desc_alloc; + stats->stats_tx_desc[index][NSS_WIFILI_STATS_TX_DESC_ALLOC_FAIL] += + devstats->tx_sw_pool_stats[index].desc_alloc_fail; + stats->stats_tx_desc[index][NSS_WIFILI_STATS_TX_DESC_ALREADY_ALLOCATED] += + devstats->tx_sw_pool_stats[index].desc_already_allocated; + stats->stats_tx_desc[index][NSS_WIFILI_STATS_TX_DESC_INVALID_FREE] += + devstats->tx_sw_pool_stats[index].desc_invalid_free; + stats->stats_tx_desc[index][NSS_WIFILI_STATS_TX_DESC_FREE_SRC_FW] += + devstats->tx_sw_pool_stats[index].tx_rel_src_fw; + stats->stats_tx_desc[index][NSS_WIFILI_STATS_TX_DESC_FREE_COMPLETION] += + devstats->tx_sw_pool_stats[index].tx_rel_tx_desc; + stats->stats_tx_desc[index][NSS_WIFILI_STATS_TX_DESC_NO_PB] += + devstats->tx_sw_pool_stats[index].tx_rel_no_pb; + stats->stats_tx_desc[index][NSS_WIFILI_STATS_TX_QUEUELIMIT_DROP] += + devstats->tx_sw_pool_stats[index].tx_queue_limit_drop; + } + + /* + * update ext tx desc pool stats + */ + for (index = 0; index < NSS_WIFILI_MAX_TX_EXT_DESC_POOLS_MSG; index++) { + stats->stats_ext_tx_desc[index][NSS_WIFILI_STATS_EXT_TX_DESC_IN_USE] = + devstats->tx_ext_sw_pool_stats[index].desc_alloc; + stats->stats_ext_tx_desc[index][NSS_WIFILI_STATS_EXT_TX_DESC_ALLOC_FAIL] += + devstats->tx_ext_sw_pool_stats[index].desc_alloc_fail; + stats->stats_ext_tx_desc[index][NSS_WIFILI_STATS_EXT_TX_DESC_ALREADY_ALLOCATED] += + devstats->tx_ext_sw_pool_stats[index].desc_already_allocated; + stats->stats_ext_tx_desc[index][NSS_WIFILI_STATS_EXT_TX_DESC_INVALID_FREE] += + devstats->tx_ext_sw_pool_stats[index].desc_invalid_free; + } + + /* + * update rx desc pool stats + */ + for (index = 0; index < nwss->soc_maxpdev; index++) { + stats->stats_rx_desc[index][NSS_WIFILI_STATS_RX_DESC_NO_PB] += + devstats->rx_sw_pool_stats[index].rx_no_pb; + stats->stats_rx_desc[index][NSS_WIFILI_STATS_RX_DESC_ALLOC_FAIL] += + devstats->rx_sw_pool_stats[index].desc_alloc_fail; + stats->stats_rx_desc[index][NSS_WIFILI_STATS_RX_DESC_IN_USE] = + devstats->rx_sw_pool_stats[index].desc_alloc; + } + + /* + * update rx dma ring stats + */ + for (index = 0; index < nwss->soc_maxpdev; index++) { + stats->stats_rxdma[index][NSS_WIFILI_STATS_RXDMA_DESC_UNAVAILABLE] += + devstats->rxdma_stats[index].rx_hw_desc_unavailable; + stats->stats_rxdma[index][NSS_WIFILI_STATS_RXDMA_BUF_REPLENISHED] += + devstats->rxdma_stats[index].rx_buf_replenished; + } + + /* + * update wbm ring stats + */ + stats->stats_wbm[NSS_WIFILI_STATS_WBM_IE_LOCAL_ALLOC_FAIL] += devstats->rxwbm_stats.invalid_buf_mgr; + stats->stats_wbm[NSS_WIFILI_STATS_WBM_SRC_DMA] += devstats->rxwbm_stats.err_src_rxdma; + stats->stats_wbm[NSS_WIFILI_STATS_WBM_SRC_DMA_CODE_INV] += devstats->rxwbm_stats.err_src_rxdma_code_inv; + stats->stats_wbm[NSS_WIFILI_STATS_WBM_SRC_REO] += devstats->rxwbm_stats.err_src_reo; + stats->stats_wbm[NSS_WIFILI_STATS_WBM_SRC_REO_CODE_NULLQ] += devstats->rxwbm_stats.err_src_reo_code_nullq; + stats->stats_wbm[NSS_WIFILI_STATS_WBM_SRC_REO_CODE_INV] += devstats->rxwbm_stats.err_src_reo_code_inv; + stats->stats_wbm[NSS_WIFILI_STATS_WBM_SRC_INV] += devstats->rxwbm_stats.err_src_invalid; + spin_unlock_bh(&nss_top->stats_lock); + return; +} + +/* + * nss_wifili_stats_notify() + * Sends notifications to the registered modules. + * + * Leverage NSS-FW statistics timing to update Netlink. + */ +void nss_wifili_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num) +{ + struct nss_wifili_stats_notification *wifili_stats; + uint32_t index = 0; + + wifili_stats = kzalloc(sizeof(struct nss_wifili_stats_notification), GFP_ATOMIC); + if (!wifili_stats) { + nss_warning("%px: Failed to allocate memory for wifili stats\n", nss_ctx); + return; + } + + wifili_stats->core_id = nss_ctx->id; + switch (if_num) { + case NSS_WIFILI_INTERNAL_INTERFACE: + index = 0; + break; + + case NSS_WIFILI_EXTERNAL_INTERFACE0: + index = 1; + break; + + case NSS_WIFILI_EXTERNAL_INTERFACE1: + index = 2; + break; + + default: + nss_warning("%px: Invalid wifili interface\n", nss_ctx); + goto done; + } + wifili_stats->if_num = if_num; + memcpy(&wifili_stats->stats, &soc_stats[index].stats_wifili, sizeof(wifili_stats->stats)); + atomic_notifier_call_chain(&nss_wifili_stats_notifier, NSS_STATS_EVENT_NOTIFY, (void *)wifili_stats); + +done: + kfree(wifili_stats); + return; +} + +/* + * nss_wifili_stats_register_notifier() + * Registers statistics notifier. + */ +int nss_wifili_stats_register_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&nss_wifili_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_wifili_stats_register_notifier); + +/* + * nss_wifili_stats_unregister_notifier() + * Deregisters statistics notifier. + */ +int nss_wifili_stats_unregister_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nss_wifili_stats_notifier, nb); +} +EXPORT_SYMBOL(nss_wifili_stats_unregister_notifier); diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifili_stats.h b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_stats.h new file mode 100644 index 000000000..9c073ce8d --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_stats.h @@ -0,0 +1,35 @@ +/* + ************************************************************************** + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +/* + * nss_wifili_stats.h + * NSS wifili statistics header file. + */ + +#ifndef __NSS_WIFILI_STATS_H +#define __NSS_WIFILI_STATS_H + +#include "nss_core.h" +#include "nss_wifili_if.h" + +/* + * NSS wifili statistics APIs + */ +extern void nss_wifili_stats_notify(struct nss_ctx_instance *nss_ctx, uint32_t if_num); +extern void nss_wifili_stats_sync(struct nss_ctx_instance *nss_ctx, struct nss_wifili_stats_sync_msg *wlsoc_stats, uint16_t interface); +extern void nss_wifili_stats_dentry_create(void); + +#endif /* __NSS_WIFILI_STATS_H */ diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifili_strings.c b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_strings.c new file mode 100644 index 000000000..3828221bd --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_strings.c @@ -0,0 +1,366 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#include "nss_stats.h" +#include "nss_core.h" +#include +#include "nss_strings.h" +#include "nss_wifili_strings.h" + +/* + * nss_wifili_strings_stats_txrx + * wifili txrx statistics + */ +struct nss_stats_info nss_wifili_strings_stats_txrx[NSS_WIFILI_STATS_TXRX_MAX] = { + {"rx_msdu_error" , NSS_STATS_TYPE_ERROR}, + {"rx_inv_peer_rcv" , NSS_STATS_TYPE_SPECIAL}, + {"rx_wds_srcport_exception" , NSS_STATS_TYPE_EXCEPTION}, + {"rx_wds_srcport_exception_fail" , NSS_STATS_TYPE_DROP}, + {"rx_deliverd" , NSS_STATS_TYPE_SPECIAL}, + {"rx_deliver_drops" , NSS_STATS_TYPE_DROP}, + {"rx_intra_bss_ucast" , NSS_STATS_TYPE_SPECIAL}, + {"rx_intra_bss_ucast_fail" , NSS_STATS_TYPE_DROP}, + {"rx_intra_bss_mcast" , NSS_STATS_TYPE_SPECIAL}, + {"rx_intra_bss_mcast_fail" , NSS_STATS_TYPE_DROP}, + {"rx_sg_rcv_send" , NSS_STATS_TYPE_SPECIAL}, + {"rx_sg_rcv_fail" , NSS_STATS_TYPE_DROP}, + {"rx_mcast_echo" , NSS_STATS_TYPE_SPECIAL}, + {"rx_inv_tid" , NSS_STATS_TYPE_SPECIAL}, + {"stats_rx_frag_inv_sc" , NSS_STATS_TYPE_SPECIAL}, + {"stats_rx_frag_inv_fc" , NSS_STATS_TYPE_SPECIAL}, + {"stats_rx_frag_non_frag" , NSS_STATS_TYPE_SPECIAL}, + {"stats_rx_frag_retry" , NSS_STATS_TYPE_SPECIAL}, + {"stats_rx_frag_ooo" , NSS_STATS_TYPE_SPECIAL}, + {"stats_rx_frag_ooo_seq" , NSS_STATS_TYPE_SPECIAL}, + {"stats_rx_frag_all_frag_rcv" , NSS_STATS_TYPE_SPECIAL}, + {"stats_rx_frag_deliver" , NSS_STATS_TYPE_SPECIAL}, + {"tx_enqueue" , NSS_STATS_TYPE_SPECIAL}, + {"tx_enqueue_drop" , NSS_STATS_TYPE_DROP}, + {"tx_dequeue" , NSS_STATS_TYPE_SPECIAL}, + {"tx_hw_enqueue_fail" , NSS_STATS_TYPE_DROP}, + {"tx_sent_count" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_wifili_strings_stats_tcl + * wifili tcl stats + */ +struct nss_stats_info nss_wifili_strings_stats_tcl[NSS_WIFILI_STATS_TCL_MAX] = { + {"tcl_no_hw_desc" , NSS_STATS_TYPE_SPECIAL}, + {"tcl_ring_full" , NSS_STATS_TYPE_SPECIAL}, + {"tcl_ring_sent" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_wifili_strings_stats_tx_comp + * wifili tx comp stats + */ +struct nss_stats_info nss_wifili_strings_stats_tx_comp[NSS_WIFILI_STATS_TX_DESC_FREE_MAX] = { + {"tx_desc_free_inv_bufsrc" , NSS_STATS_TYPE_ERROR}, + {"tx_desc_free_inv_cookie" , NSS_STATS_TYPE_SPECIAL}, + {"tx_desc_free_hw_ring_empty" , NSS_STATS_TYPE_SPECIAL}, + {"tx_desc_free_reaped" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_wifili_strings_stats_reo + * wifili tx reo stats + */ +struct nss_stats_info nss_wifili_strings_stats_reo[NSS_WIFILI_STATS_REO_MAX] = { + {"reo_error" , NSS_STATS_TYPE_ERROR}, + {"reo_reaped" , NSS_STATS_TYPE_SPECIAL}, + {"reo_inv_cookie" , NSS_STATS_TYPE_SPECIAL}, + {"stats_reo_frag_rcv" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_wifili_strings_stats_txsw_pool + * wifili tx desc stats + */ +struct nss_stats_info nss_wifili_strings_stats_txsw_pool[NSS_WIFILI_STATS_TX_DESC_MAX] = { + {"tx_desc_in_use" , NSS_STATS_TYPE_SPECIAL}, + {"tx_desc_alloc_fail" , NSS_STATS_TYPE_SPECIAL}, + {"tx_desc_already_allocated" , NSS_STATS_TYPE_SPECIAL}, + {"tx_desc_invalid_free" , NSS_STATS_TYPE_SPECIAL}, + {"tx_desc_free_src_fw" , NSS_STATS_TYPE_SPECIAL}, + {"tx_desc_free_completion" , NSS_STATS_TYPE_SPECIAL}, + {"tx_desc_no_pb" , NSS_STATS_TYPE_SPECIAL}, + {"tx_desc_queuelimit_drop" , NSS_STATS_TYPE_DROP} +}; + +/* + * nss_wifili_strings_stats_ext_txsw_pool + * wifili tx ext desc stats + */ +struct nss_stats_info nss_wifili_strings_stats_ext_txsw_pool[NSS_WIFILI_STATS_EXT_TX_DESC_MAX] = { + {"ext_tx_desc_in_use" , NSS_STATS_TYPE_SPECIAL}, + {"ext_tx_desc_alloc_fail" , NSS_STATS_TYPE_SPECIAL}, + {"ext_tx_desc_already_allocated" , NSS_STATS_TYPE_SPECIAL}, + {"ext_tx_desc_invalid_free" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_wifili_strings_stats_rxdma_pool + * wifili rx desc stats + */ +struct nss_stats_info nss_wifili_strings_stats_rxdma_pool[NSS_WIFILI_STATS_RX_DESC_MAX] = { + {"rx_desc_no_pb" , NSS_STATS_TYPE_SPECIAL}, + {"rx_desc_alloc_fail" , NSS_STATS_TYPE_SPECIAL}, + {"rx_desc_in_use" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_wifili_strings_stats_rxdma_ring + * wifili rx dma ring stats + */ +struct nss_stats_info nss_wifili_strings_stats_rxdma_ring[NSS_WIFILI_STATS_RXDMA_DESC_MAX] = { + {"rxdma_hw_desc_unavailable" , NSS_STATS_TYPE_SPECIAL}, + {"rxdma_buf_replenished" , NSS_STATS_TYPE_SPECIAL} +}; + +/* + * nss_wifili_strings_stats_wbm + * wifili wbm ring stats + */ +struct nss_stats_info nss_wifili_strings_stats_wbm[NSS_WIFILI_STATS_WBM_MAX] = { + {"wbm_ie_local_alloc_fail" , NSS_STATS_TYPE_ERROR}, + {"wbm_src_dma" , NSS_STATS_TYPE_SPECIAL}, + {"wbm_src_dma_code_inv" , NSS_STATS_TYPE_SPECIAL}, + {"wbm_src_reo" , NSS_STATS_TYPE_SPECIAL}, + {"wbm_src_reo_code_nullq" , NSS_STATS_TYPE_SPECIAL}, + {"wbm_src_reo_code_inv" , NSS_STATS_TYPE_ERROR}, + {"wbm_src_inv" , NSS_STATS_TYPE_ERROR} +}; + +/* + * nss_wifili_txrx_strings_read() + * Read wifili Tx Rx statistics names. + */ +static ssize_t nss_wifili_txrx_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifili_strings_stats_txrx, NSS_WIFILI_STATS_TXRX_MAX); +} + +/* + * nss_wifili_tcl_ring_strings_read() + * Read wifili TCL ring statistics names. + */ +static ssize_t nss_wifili_tcl_ring_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifili_strings_stats_tcl, NSS_WIFILI_STATS_TCL_MAX); +} + +/* + * nss_wifili_tcl_comp_strings_read() + * Read wifili TCL comp statistics names. + */ +static ssize_t nss_wifili_tcl_comp_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifili_strings_stats_tx_comp, NSS_WIFILI_STATS_TX_DESC_FREE_MAX); +} + +/* + * nss_wifili_reo_ring_strings_read() + * Read wifili reorder ring statistics names. + */ +static ssize_t nss_wifili_reo_ring_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifili_strings_stats_reo, NSS_WIFILI_STATS_REO_MAX); +} + +/* + * nss_wifili_tx_sw_strings_read() + * Read wifili Tx sw statistics names. + */ +static ssize_t nss_wifili_tx_sw_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifili_strings_stats_txsw_pool, NSS_WIFILI_STATS_TX_DESC_MAX); +} + +/* + * nss_wifili_tx_ext_sw_strings_read() + * Read wifili Tx ext sw statistics names. + */ +static ssize_t nss_wifili_tx_ext_sw_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifili_strings_stats_ext_txsw_pool, NSS_WIFILI_STATS_EXT_TX_DESC_MAX); +} + +/* + * nss_wifili_rx_dma_pool_strings_read() + * Read wifili Rx DMA pool statistics names. + */ +static ssize_t nss_wifili_rx_dma_pool_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifili_strings_stats_rxdma_pool, NSS_WIFILI_STATS_RX_DESC_MAX); +} + +/* + * nss_wifili_rx_dma_ring_strings_read() + * Read wifili Rx DMA ring statistics names. + */ +static ssize_t nss_wifili_rx_dma_ring_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifili_strings_stats_rxdma_ring, NSS_WIFILI_STATS_RXDMA_DESC_MAX); +} + +/* + * nss_wifili_wbm_ring_strings_read() + * Read wifili WBM ring statistics names. + */ +static ssize_t nss_wifili_wbm_ring_strings_read(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos) +{ + return nss_strings_print(ubuf, sz, ppos, nss_wifili_strings_stats_wbm, NSS_WIFILI_STATS_WBM_MAX); +} + +/* + * nss_wifili_txrx_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifili_txrx); + +/* + * nss_wifili_tcl_ring_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifili_tcl_ring); + +/* + * nss_wifili_tcl_comp_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifili_tcl_comp); + +/* + * nss_wifili_reo_ring_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifili_reo_ring); + +/* + * nss_wifili_tx_sw_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifili_tx_sw); + +/* + * nss_wifili_tx_ext_sw_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifili_tx_ext_sw); + +/* + * nss_wifili_rx_dma_pool_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifili_rx_dma_pool); + +/* + * nss_wifili_rx_dma_ring_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifili_rx_dma_ring); + +/* + * nss_wifili_wbm_ring_strings_ops + */ +NSS_STRINGS_DECLARE_FILE_OPERATIONS(wifili_wbm_ring); + +/* + * nss_wifili_strings_dentry_create() + * Create wifili statistics strings debug entry. + */ +void nss_wifili_strings_dentry_create(void) +{ + struct dentry *wifili_d = NULL; + struct dentry *wifili_txrx_d = NULL; + struct dentry *wifili_tcl_ring_d = NULL; + struct dentry *wifili_tcl_comp_d = NULL; + struct dentry *wifili_reo_ring_d = NULL; + struct dentry *wifili_tx_sw_d = NULL; + struct dentry *wifili_tx_ext_sw_d = NULL; + struct dentry *wifili_rx_dma_pool_d = NULL; + struct dentry *wifili_rx_dma_ring_d = NULL; + struct dentry *wifili_wbm_ring_d = NULL; + + if (!nss_top_main.strings_dentry) { + nss_warning("qca-nss-drv/strings is not present"); + return; + } + + wifili_d = debugfs_create_dir("wifili", nss_top_main.strings_dentry); + if (!wifili_d) { + nss_warning("Failed to create qca-nss-drv/strings/wifili directory"); + return; + } + + wifili_txrx_d = debugfs_create_file("txrx_str", 0400, wifili_d, &nss_top_main, &nss_wifili_txrx_strings_ops); + if (!wifili_txrx_d) { + nss_warning("Failed to create qca-nss-drv/strings/wifili/txrx_str file"); + debugfs_remove_recursive(wifili_d); + return; + } + + wifili_tcl_ring_d = debugfs_create_file("tcl_ring_str", 0400, wifili_d, &nss_top_main, &nss_wifili_tcl_ring_strings_ops); + if (!wifili_tcl_ring_d) { + nss_warning("Failed to create qca-nss-drv/strings/wifili/tcl_ring_str file"); + debugfs_remove_recursive(wifili_d); + return; + } + + wifili_tcl_comp_d = debugfs_create_file("tcl_comp_str", 0400, wifili_d, &nss_top_main, &nss_wifili_tcl_comp_strings_ops); + if (!wifili_tcl_comp_d) { + nss_warning("Failed to create qca-nss-drv/strings/wifili/tcl_comp_str file"); + debugfs_remove_recursive(wifili_d); + return; + } + + wifili_reo_ring_d = debugfs_create_file("reo_ring_str", 0400, wifili_d, &nss_top_main, &nss_wifili_reo_ring_strings_ops); + if (!wifili_reo_ring_d) { + nss_warning("Failed to create qca-nss-drv/strings/wifili/reo_ring_str file"); + debugfs_remove_recursive(wifili_d); + return; + } + + wifili_tx_sw_d = debugfs_create_file("tx_sw_str", 0400, wifili_d, &nss_top_main, &nss_wifili_tx_sw_strings_ops); + if (!wifili_tx_sw_d) { + nss_warning("Failed to create qca-nss-drv/strings/wifili/tx_sw_str file"); + debugfs_remove_recursive(wifili_d); + return; + } + + wifili_tx_ext_sw_d = debugfs_create_file("tx_ext_sw_str", 0400, wifili_d, &nss_top_main, &nss_wifili_tx_ext_sw_strings_ops); + if (!wifili_tx_ext_sw_d) { + nss_warning("Failed to create qca-nss-drv/strings/wifili/tx_ext_sw_str file"); + debugfs_remove_recursive(wifili_d); + return; + } + + wifili_rx_dma_pool_d = debugfs_create_file("rx_dma_pool_str", 0400, wifili_d, &nss_top_main, &nss_wifili_rx_dma_pool_strings_ops); + if (!wifili_rx_dma_pool_d) { + nss_warning("Failed to create qca-nss-drv/strings/wifili/rx_dma_pool_str file"); + debugfs_remove_recursive(wifili_d); + return; + } + + wifili_rx_dma_ring_d = debugfs_create_file("rx_dma_ring_str", 0400, wifili_d, &nss_top_main, &nss_wifili_rx_dma_ring_strings_ops); + if (!wifili_rx_dma_ring_d) { + nss_warning("Failed to create qca-nss-drv/strings/wifili/rx_dma_ring_str file"); + debugfs_remove_recursive(wifili_d); + return; + } + + wifili_wbm_ring_d = debugfs_create_file("wbm_ring_str", 0400, wifili_d, &nss_top_main, &nss_wifili_wbm_ring_strings_ops); + if (!wifili_wbm_ring_d) { + nss_warning("Failed to create qca-nss-drv/strings/wifili/wbm_ring_str file"); + debugfs_remove_recursive(wifili_d); + return; + } +} diff --git a/feeds/ipq807x/qca-nss-drv/src/nss_wifili_strings.h b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_strings.h new file mode 100644 index 000000000..263d172f6 --- /dev/null +++ b/feeds/ipq807x/qca-nss-drv/src/nss_wifili_strings.h @@ -0,0 +1,44 @@ +/* + ************************************************************************** + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + ************************************************************************** + */ + +#ifndef __NSS_WIFILI_STRINGS_H +#define __NSS_WIFILI_STRINGS_H + +/* + * Maximum string length: + * This should be equal to maximum string size of any stats + * inclusive of stats value + */ +#define NSS_WIFILI_STATS_MAX (NSS_WIFILI_STATS_TXRX_MAX + NSS_WIFILI_STATS_TCL_MAX + \ + NSS_WIFILI_STATS_TX_DESC_FREE_MAX + NSS_WIFILI_STATS_REO_MAX + \ + NSS_WIFILI_STATS_TX_DESC_MAX + NSS_WIFILI_STATS_EXT_TX_DESC_MAX + \ + NSS_WIFILI_STATS_RX_DESC_MAX + NSS_WIFILI_STATS_RXDMA_DESC_MAX) + +extern struct nss_stats_info nss_wifili_strings_stats_txrx[NSS_WIFILI_STATS_TXRX_MAX]; +extern struct nss_stats_info nss_wifili_strings_stats_tcl[NSS_WIFILI_STATS_TCL_MAX]; +extern struct nss_stats_info nss_wifili_strings_stats_tx_comp[NSS_WIFILI_STATS_TX_DESC_FREE_MAX]; +extern struct nss_stats_info nss_wifili_strings_stats_reo[NSS_WIFILI_STATS_REO_MAX]; +extern struct nss_stats_info nss_wifili_strings_stats_txsw_pool[NSS_WIFILI_STATS_TX_DESC_MAX]; +extern struct nss_stats_info nss_wifili_strings_stats_ext_txsw_pool[NSS_WIFILI_STATS_EXT_TX_DESC_MAX]; +extern struct nss_stats_info nss_wifili_strings_stats_rxdma_pool[NSS_WIFILI_STATS_RX_DESC_MAX]; +extern struct nss_stats_info nss_wifili_strings_stats_rxdma_ring[NSS_WIFILI_STATS_RXDMA_DESC_MAX]; +extern struct nss_stats_info nss_wifili_strings_stats_wbm[NSS_WIFILI_STATS_WBM_MAX]; + +extern void nss_wifili_strings_dentry_create(void); + +#endif /* __NSS_WIFILI_STRINGS_H */ diff --git a/feeds/ipq807x/qca-nss-fw/files/IPQ5018/qca-nss0.bin b/feeds/ipq807x/qca-nss-fw/files/IPQ5018/qca-nss0.bin index 15ec4e259c84b4fe492b09aedd0628d6ab5d72c4..52c8468a3cf7726786a521d32e19e84c23c96258 100755 GIT binary patch delta 194191 zcmb@v349bq7C&CEr+bc&gk%T_36RVrlRyZ!+;Sw`;ZhV26map*E54zb$`GA=l}V`XHwl= zRj*#XdRM)wuJ(U3pyI3k@j{3_7IFAM&NH}rZbN)>e7<==zHk|dTqgQ->AQnJq0EHa zMSXm|phYRR`mB08NvI&4cF9hnqCFIVaEg;-^u}plhA=i&_QM-=^N)`E_abKs6@eWog4;_4S0il_3Mm>RwgtiAk0a zQT_aE8nROzn|POW?O^q-#MOhNb^F~`S(ojWNH}N6MWN7v=<{A3p8N28`;IO1t&d1j z$>1#~lXj6ZWw5_)jJn*KqLmGyA&FCjZJ#7B^DcEe$U(w7Mommv(Ir&`a*^SZUq1Tu zDzuR(3RQnf-?%K{|5EBv;eAS$tZ7=}tHxfIPeXT93(pJkCz|V3V+!q1?cKWA!xaU6 z4Ft&jlq}rqW#N?Y6Znzbs8zmd%+ShaD)N?P-99o(?_aI<&&Y|tCdFBw;}TSguHQ34 zou4tP-+gZiG791Gxe3Ge+|f+eck-H8$>VijkVP2VqYp54eIUll83dUz-RJj zeZdjd%ec-)bD0(~b+hEJF(MtlSWphg{W9{lze83*t$_`ay35{2dRS7=*;l9TMozrj zPN9$#Oh`)b+IVmh;=P;Hb(!O6tNK}HinQLK9?wj-7AwN~y1`$w!$?+gwa6UOS|k=ZqykYW6&eZ+>qSO#L9 zEjx;hV2ENxUFL8MtCVc}T7Zyn5!qc50UhfGz@X*mf1;?k5Ba-`vnaI@aF_#mrwKDE~dgk(9 zWj@B4dftLxQC@;%01=53I_@kM&M^jDc1uT)(At335e-m+iiJ$)tb-e2WCb0zPdNS^QK z@p$%1=89ic)UM|7Z^#7&q|A!(|y1ENksVVs6#Ek7Kuc z-^BA=s9)h+BR}7+4(t7|__?UUsw&I%nX#x%m2wA5|G2CU%k4jG266%r)R{<&dKC2~ zV7R064qpTAtMI&CuUUY z^=w{B^5+1@#K^&DP)Q6u9!nc`x2frU$4_gywC&I-C{@-5tQ8o@3LOg(JP$+jg;7`= z+O}199HGk?p-)h+V}swesjv4m(=wBY4M-8h7K4$5aM9yQ1!d5>g4`?qzf0-yZh(Z8<8V~F({m8Kj9O9f4SSgd`B%4!;!Uq_0- zN2t@FRu1YyFRFhVG?Y%LO@j*Qgw}iwrE2}>k=CL`V;txn4l10TPq>mdfcf2Rx!p+{5D zBT^Hu>O$YC*;i%Id+MaCaxFwto}98`=i;5(t+&wtb@Nq&d&*?nClbnJk+2rO3jCgv z#qum#;65dv(wfIn3hh_phNNgSvdorvnfm6zVC0qL(Q>y#?KC(SR2g&~rTUJ^<#IVP z9eTHMeT6Km-El;T@z%KIHlhW<7UakyfAu1{leIerq6DN!MY;f_NI|*~r09xtK1k68 zX)#EVh|~pAbVgbPQp6#3f)t&Qx5Um z5KS3A>z$EjAF8(vy#ZEu5eU~1I#i?ec9M;nv_dEKQq9A9+?Edj^725!`=GKQ$!f$9 zz9yugSni-Q;0On}WxLA;0FBqrgguRaW~?4tnGa%6SD**_V?}qVa5x zpa!r^X8FE#q?QlMo1YDjqQPyXMo43p%S@Rrna1Ddue${;hXRt-kSG_*@G^P;eP%YX zJe%H>-*mJ`7*2qqrlk_a$#H!pS0KR(H~e=fmscbx1(<%3sm2da8<-Dm!JP>Ogw;S& zA)J_sw)Sn+i(>-`KecbWQY9Hx{+U@>r}?K-x1Rk`t3PV7WAWVLam3@ zb;DEaC!t78E=5Os*A4!^D5j;=euISyX%`i=2_+Zd}PjZPSO^rv7>4 z)yf9own05Ha$*b^^mB9>3PT+}s=u^6UR^lKPVwqvqq=vsp-mI;IWv;=yNQ@SAE+OU z%1HfXk8HR2>#jxVQEJtwlwalk^;&=3MC1|XtZUFmx${_moe8yhym+kC zk!t$rKC1s>H=kIZNxR{Zezh3$N?{1j?KyYlEkkc%MeS(6)Qff|1rl64B)SQ@K`K$m zm@6vYCjVYm*snmmB`EI&JD{LTs;u2`f2z0H!vtT^noE;_tcl8ML}_mfn4l7#i=kYXo$LT$Q=3PZWZ9_18+%26OKt;{RGihoj{O1%D;cfj>dG-G_BSBb ze}Jm_Gtx?^s6Qdy1o8hPr}bj(tJ>Lplrg3^jM`C}tl7j(jO z=!7%~Qd^4p;kdN1n{W5mH9%eo8Xd%;P9>c{50C&{i)Xs>7EoFQS&ng4V#={eCj2_# zC!E@AJz&M$-Ks!o{!!SfZ-^+QOqZbfUFWZ4CT9gy0D`V|p+d(3&aTcY z1xBL41IUjKMk)s5;grg{j^#Z`o+}!zAIjg=p?o)#FHKR$j!#qU$eORtEa;w^iYvAj zxt06ZWmWE5SK^h(SRvMKRm%%f2IhhmSuUBnJm{~Rfl|ijMZgYekdUU4*zfczq}X>_ zmDj{t8UAWvtE($IFgMd{=+Oxj$ia~Q z@f@rWt_4VRYiQj5QwW<7l!}?m8*(YvGZT4zida9ySWW29tKew^`pO!jJMLdc-VdMw z$)w&f2{qKmCgu57qJ@8zTc~8vq*Y)Y}Y-Nv?-g-RCRu4|@-t`+?^+fIfK_|bupUeltXR~D z+(hVo*1!gO@JOZ*>fx!GU7kS$Ps_F76PL_Y1U0Mn<_tJTafR7qPon7sEfS_a2G4nA zg}Ygob08fndHbq#c;X0IW_5@IByIZ0iZUqKW zR1Cv&m}!ser~?EM75CzveduLKMJ{=qM{Q&PM4sbGNQ;rWj%E?$*xC!%t9!tNd?4sH zb;GnlS@6NRUT;jfdaspjDAR@OMaoP@&q!TYm=co z{+DO{b?ji3{^+mcF205_;k+g$2@o@5W+Eg1Fc{>h+fLuPGwFyshfamybs(_LPWpMc zs3S!F&?a;?9CXPdB^C@&pgf(6x{Y|A|5GMSi=F1h$f-|F&-O9A6KLbNU>e|oeTYf@ zPT<1Vukr*R*Ft0kf3K$$d^U0~ge`I{f=}=>XF&EM%?Dl!5tF{ajEw=D^`TC!34T$XV{oG81Mso|U1aX8?%#LUDE)VNf&2>AMk@b8mc&;y-O8 zD<~%t0|`r6RAmaDLxGSWk1!j8!a~oJg{cIL^SuD1HcONtwa~+ zT?8WM;q%TyV=QdzjO(yY5J-q+c0ebh%k-Uv^GTI;4d~fPpU|;nY@aYbvFedx zaMAmcJt_&-mk>jd_mc6YRWByLxaxcK z(jO~Yl0GtF9RLnDs`UZVc;@* zehBmEWhg`gl;shtsXY zJ%0d@S1pw-qKELxP~O8`7{plNT?mX>mctYhbPuS&@=DN>l0)+HVE921ZumIL>OGPT z8)|UP}TUQ#WVUMieQf&Kq)$U%v&~~$tAglKx@om zECk%rTtMagj^$fGveuOn{q0h?EYp|EGL5+&vttjqK}kaFV-l_a2^|*jzmR;vTzjsE z>1al!O3+dy32(#8a7<9RCkoDnpftd=Fuigix56$w{!ab}bsj&SO>Zv|^iLqw@%{tc zH$Ze6R~d+rWdzsfZ>pP&=jTC^G6<<8ltPDPy5zYSyTf{U$94?ZBr7v{$DScTs8{AN zKxK~V-Z0Izu^A*g^0ABxl_M}l(-?=W`cS2W1*{;K~TyjBHUWP5HxF%ET+a_xoHw~+=bTA zZc$>UWB{|74K1vaQg6TtoJGtf5uCFd3}&n~g@Kf+J;%AagqVx>4b_$AloeDe67W2fi zn4m9&MTAyZ2>scDTY?Ywv6f+|V_XJ#yE}&D)=Y|bk!E?GYCQGTLA^1S^?`H5icLV8!Ra-Mic*nxYYEX(s~ zP?fa|c8z&%BPeFFo29q?2%jW^*<|v>LM583%~p6lh5plsVGI)14}6&9S$63RZ(MG)DjsO7a!mRL zDRWJ5;hlY*^Ta?UIC0{{dA~0*T8+6%AwP@9aD)s>FwlEvipUj(7?imJL$k|)y48wR zs5I(c!i@Sh_hDzl27i& zWGcd6Sk4Kz4T^hvnR@x={40>q7ret##O6@yDy=kP-uM+FGZ|D?odb2mjs#2gY)C1j z(?@fX0|~{@Blj()GgTwPQP`wCLGXbZmo#)C# zGe@Wo+|=KvqUwbUq1`DU$A!gkzSw0c3Cc}qwgkhVFyj>j5-uZZ%;da?sIfr6Bmq{C zeI>Gp0EV&}&ST%A)*?q1ns7krop0WDs2Dzitr<%kRq8&hKD?4CxVn7WRqq#CbR3IY)HXfDY^>8elbL zSEFo$P2)b^jptNK>ULg>s1EqdFJJAS$8if*rBmS-FabHX zEFq=R{W2qEDmBqNcqacvc63Hr4m|7!1HGr7y{U^2%a1i>%&r(oTcYrUN?R3XDd9XV zheC%xyW9rE(bNDKgEzrR0s$uzP&ET(Mwe-|v$xKARLla6r9+YMn#^c3oDh@&+{>0Y z)V&H07su3vuz60v?CR{wME)xVzzExGhN4#{i;CB9&wv>~e8fxRfqCK=gYqZ_B|kPF zd_y`L&WM|_G8SD-@^v|^v674{i3yVM8(4{to>f4QU zVX;h^7`z-fcs4i*VQfW31Y0-@4!RwHqC$8#n353)g(z4USGba+UQ9_WvUoN@{XBN0qQX|TF$_G_p#?95L&^AGjJPvE+tA<2`s!91^785!Ff76PYko^cns(! zkhWuwxG`=d6UgUa3Tg%i{;&Gn<{)%r?s#Av3W=aav8+4{pdA~z8hLClJHvCq)~JLp zQGwZ$#V^_zi{tIXH`O)4^Wnx!a5@;--Ri+xGkn`35Sd=_YY?gn-La2RHiCPi5Kci? zy$#1X>|&hqMoDoP63gYdOZPASQ5G2FsZfGk@05Fw+-H7e-6?Pxm;X_xHFGBgPeE8< zN0q5&;j0&|1`=DiprEP9?0KGjqp+9A4emuGgWU?~7dLfpg9N2cFSOurY&!6GOskgo5kZHF*Z z^#oC71q&z!>Ur?Ip%lA|NKCy}t(uo%nbU?+7@B_SiFq0JKHvN6s-UM(I9cSDS^J?b zdL9_eDPk^zwaT|mDcZnoWX|OtvOYW9Cef)j1KmDx6DSC5z(G^Q;r*hF>?`8M)C1g( zqc^vMFh>mpgKOoW_Er1+b*x6WqeX$1wY>w)BYbRz$Tfw&V3EO&f>)5{U}Wws##H4& z!oNVm1HdO6iu!J0t6@B{zzU(#^U9CoqBt~ojpA^16vy+x@u6Yiyl8nnPUAcQg?3U7 zIh!zd)BEDnJ9G2oC?ISI3yu4-0R#=15jQ1Fel1`JXt?fJd2MvtDI`u}R~Nb<8oj?` z#71ZLZGuuZ%obuk?m2iAmW+CtyULS9o|Ex>jaR8^TDKhn!GpUx@A8SVn^;V;XeM}p zG5vf{7!kyxtX@PY8_{bHvkL67;JFe5jsjhP*a?s2wWtehg1bz+d`p>%D2qAj9a*VU zfX-cvGSm_;7~0S+VZO^dPFgO?MdlAnB#^sqv-eX>n5+vkE3)c;x?o7SRQn`v#^zIX+Axu%l2MABQ zNWI3HIx|6(?VFGOt-J)Ai3M{rc%RE$M%j*3I|Fq-0MA%m@Wu=d%Do-Ar^x*qm>G3b z<6I_R`^8@1-6P!h8IQ@C!z^99~k#ku&hJl`wRN%rE{9XPq* zB+W~{LML`ZeD*1!(*25Xp(N&9%}Z+KOJKTMl-aprr$a^!QKDMsTYa}nW!19hC&lFF z$b_gimHwm*LG#`HVa>7^Rc!7Zatr!!HzD{3=5B_mSlD@??{}Sl=UZ%A?Bl$rkT)bJ zVC{@4-PI-$hyz?(d|68c=7?!hSxv}Q&4fpfk48? znV>=fa@}yx4sZTGk^*N0Q#+g+LwX8DHPgrVx<8ZhkmYHlI#&rc&R-YyH+Xcho zii&r@+6j8deONiH_H?IMIHt~6HLGLX88__0T^91E!3_!rNHY;2wd(=W`qSY6DHHsI z&?{lksey#8P$Jyllh9l_E-a=-Q4r1E@z@SpufFR}8T<7e9G^2_7|to8^tj33guN|2 zWeSBZeHIE`{;U}SW`I6p!CXIbQynAD6yxydPb<}!qBLKVPFRVWlCnIuSX)cK*nZxm z&PhJ(*z+ae>5Y`z;;~6GCHWYx_vAL;q4$s>ssai36XN3L@)qrF1HAxl;OQoIfK=O# zJLsv1C({cmmf0HWckk#Thc0c55Q}xye=0Fa5iMZ}Q-T@&B#-ECD36NqzX4ykG8A$N zBaGz+M9xUq4I=QN+H+xp@28clntn8y(C*xim@7D#utzfvP1-Hx;(W_620hvD#)$z@(%l;dD9&y7Yx|AyNo z^^2mO3hTL2^@HDa_niUF!%ijHF+vi0f%f?`yKlgIH%lsj=_g*HZ1)v z-o^nzgO%rwe+1VriAMlET3}kBQ=RM7od|1qX3Lx4iNv9dD-!z|ayoE(!Z^sMAtFYP zwTF4}RJ#HC3R5|76BQRRa() zz^Sn5er3IUdeu?mi6!m2s(u+7EP5Ori)Vl|tEy=4q605tpKN&& z-pk+mM&L#mU?>=19qO~LeH7z)_%s$du*V`xEnm<@O)Bn|-hw!$2}T@2VcI$HXTR!3 zjCxo{vix;zZK2()Mi^*J=#^@&*oK`1Ff28tZR(~aIV11KG>9oWU}M z2V!y=#VHQ>YzIEA&@fxom3O5W9|q=Nt{3h~_cg_0wH2Js2In#i&08qQwv(SRwb-)1 z68FlzC3yZFHf_;z8??`NvT?qh$aO|$H;>&pcKZnKo(<20vlYLe;5jwPlIyZ;Kv2+* zzBsWsOs*^$y$!r7`S)FRn-R!$UWC!StOzF@Drd7?jEMtp5O5$0m=kN8;XIl{Ihe&g z1G5{-KjlS#gS!#xE@Xq^7xq1^Hu@O%9JaPWJ!V1f;lfoULsXquz=a2?FStGmZ#)qS zs(jvftF&Q))sSnjHudmjd!!PCs_l|>Q`9ntrliO<7K%LvyVxt)MXh3g zDE1*NvaUXCa3<1RfU#paX|4l57b+N2CtHQ6Je-YtBbHdIv)K5Q)JC=LvD0pt)t6s#_u3Q08Z zDCR`(1Z45f9s~#UrSpJQ`1dglnU+k)=fQLx2am~O0=(ZEqo+~!@#3BGnn&4AY`g}3)cv#D zb~&=Hjb3ARFE$7vZxD7-ELq?b%7A^+_SSmTLm=I%mdeRtr<5;((Hd{*w%xs!U$-EqEO zwnL>Cj6I6Z$p*mSI%h!&uFr57rr-Urd@26x$?W8`JoZ~upE8~^TkD^%sUQOzW&}ElaXmJTo+{K zGNw6`^k$u;iT8Au45aqICpp*9fqH160|md+DL6r=AT}AOCHHjqeF-zukyeaClwSR> z(CZY_%Vz|=V>`$CIF=QqTgMit^b}~|pQL$oy5paKHQU`nZqINYc4A@Ztf-2n}Ryk{zDm7BS0{ zl|UX=egx&!lw|WX#b$f3w;}ih)PBHSCS%RU69;6fV*N9!<`rNC^*GeLke%?#1j-3C zeDc;xI8zP`;*xv2$2YpMjN68p3AE5BLVfn$$9-qPp9dlCRC>wOg6qk+W@q0SJHMqV zDc2)Q2JF&45BJ~)V1Gf*P5mAlh0e(`cDprU8%w6^M|m*#)>Of(P*8d$PCww|d-Yepj1K`C!_&O(0SYX2nh*vRE38a>@s1gu#=bf0qnihWAyitkb8>=2`eeM=)6Vx@2%f_Mpr+=*36 zJJyhyx!A3F2C|TeAT)aeoDe00o%1z8YGr(QESB01Lne$oND^V`0y`K9^Sn9b$x-|LF;036L z4+Q4lZX5j->n`VH#bfpw$@Gpsr{2q2m;s#ypmUf@j)S{@4rQUYbl)b`ln$g&`ur{rTDz;~2HMwOeOND~1x5tYa$)Dh5{%su?J(7m|l z@;N}Qz20BxHi@pez!)9~LGj*(^6d6Q6Iz9;QB8~`?A zXXTerjf=IQeXjCL1I7+O9>l66M>C}=F`NPv7ND>n*S~w?7pegx#N3<`=4PX=8qP91 zmoO(V2-ZN2VC6)x^4Z@~2v7u3rNG*0HOLno$W!&1pLmb)em-wbAd;Mu)(kk`D(4ajxy9wS2fZ_^>4oWrPsbw`ctMRXPnOOdQgh04q{c()tn$@#zgk^>zf^K=%e2+$ z63tXg9=-~HUwZgz;|TKC`$Out4<{R)eA%Y{_;9*)8eg^|=n5+qdE95sBdGH3n&~u9 zZCjH<{nW0H+=}bvk95cNx<|f3ZqcLT@Z{ac_(|2HH{m*UEng4Cn~*cr57zz(*RIE| zxl-oH#;)pjk6qVupx$!0xf49?fX(5!hl1YqQ)fT^>Mu$_`9lqI2}v)J_{37cEPn!R zZF%X5hp-1hwXb7Dm#kw%*Q{eikE~-<+Sa{_+{*Qj;&1L#jBd$OVRTc}7oW<<{XzZy zho`op^t%-c@g#MF-iZxM{|gO%Rf(6O>cLbx8e6J17%(l406emhs}`z^Q;~W#<5cvy@eLs!yF4P{y z-)j6FsLp+kpWOSLFM7Qt@%cQ)dCUu(G53XA{|hkhnGPcuo~88LQvJeH#x41Od5xrY zp`LtmqoosGuC|YqN|vc>-nyS3w!ifjCvUwy8OfH5Z>JggcKxR0ExrH#jl|E4A8a&n z`a+;slDExr)@#ns%w5&g?VSd%Xcu%mwGz?2o!*|%HbOWLFT~m*pQ2%R?J;J$f<``} zqfCNd2;=UF{<+ZB6xtms&OtLz0=R|2$s`~iObR$=EwyJOKOq2T8sh{k; zF^AEm*+h)!$DKZ&9*d@SVi+-y6>;o!fc)2_s;hR7QpCiyoRS z4X;2epL8F4GpfVCN*1%B?Oi-8yNhNP&di-zFf-RfwtCiSHbhu6;IH!%F!K;^0dGw;KM}hytr3@SEYC- zox1XirCo%`46^qz29OrFArzS6RJ-r(nSGrk1>I=;-pQ4<6XBr@M48M25y0jTeVKMl zN`Czq)wQ>~9gAf_u6Yw`+>9CvP=o#Cy=c>3E&^w@LGjeZ+!C4ltNu@V$6b>W!1fR& zxVJrr#;P@+^h)RUcOZno&=lA&hWG$n`gvG{A2nG``*)@fbhvg`ehX(D~ zc~^%Z<1fp&QGEr9ug&+ubiAo-WJLq~yI|)A`|A(=ysPrg@%7m*$ zUX+sWtDlD+6}fjIhr)X!2TEES|AI1mDNTNFlUvV?~-XLMB_#b;yf3q z;^IK#S(%YHxK7IwFR{9IDq~V?dT06>q+nn4*$E0>ngpgsTz({Yze=uh?8N3wA-b4? z{8}{e>+vpuppw7ZthVh>^_`bTVDdE-4T*|=I`(5z%IZx(zyv*LhC{>DV&KgfqNmZo zd6@VRQyPy=EB$6=16aIyYg14GSkQP=SL{7*>PsSrO1u&BS9T~ytTcU?!qJk>iXjR`T2RF>;FCg3! zw7B-d$>Bz;WrSU4gz!#GtSI$Gu#>1~f~AlZ>>_g%yGFSqat0t~u}?zi%NR7`eUvC- z9T9Owkc=Y4$bJZo;!LNq<^v%bL1XnGl11mcRL4e zO?nJ<0(xyas~{9Au$?W;7s?N7tPEU9RC4#Aftz|O9xo*7Tkys85x%GzQspou=37m! zZ{*J6TOF>G;X+WC=|f^9+2=ZlC|f{Tg)Iq~LR+0+E(69G=2L(3jvMEOy%0riunvt~ z9U<`w9hgFH9oRlOobJb#pD(?3L;|=Y|6S(mNSV4w`XAXRde`EGSbt3t3Ojk6WwW)1 zI9zSEOt2<_GG=UTS&N2+#Q~CwdUG2>PYV>9#MhvP!(~Kt!Gu{LuM8eTv@FaEjBGoQ z?E$KE2Ex3Tf+a;b(Sqw^g?E})jjp=IQRd#ROo}iTuUxRVwG0f#y^ev%2~;NM6ufA~ zr*b*#MQR|y0SXByt$%}G49ja10o14f}f~^4rFMHpE8?u^|%=Lu=?QB zf*5ocsEkEu=Ey|#z60rcIiCzrGV%^V*Hy&|`$;!q?5-;LGQ zAh19(Vrzid!{{KZs`D5iKM-U7f2B(r)0() zj<0nL3N1jRXx56gglg9erBEph@3?hh6tO)#cRmJ(8)gnT4IRg^0FF2DsB@#y`e&gZ z^MJR&zdV*0!O|F|TFJF$|b-Lna zvlAt?u9PF>-i$_@FyY(Weh9H80~9bvx1oRu3OJ&FdHa)(*=s@U0W&maJ?(x zy^$Bu81G3O76HrvHUSx~XoRC<0kPpj9N?WgAX9J{7{>^v<+ZY__&F#r4e3O5gn=sH zi)CQWGY~{Znj6aiY(|)s{SIuw*wQ|VKFic_c6fx z7O=7+NJOQ^HwYD@zzZV14U1J+XZ#Zt-2f65sm}m}%UhOLSyLeyv8cl~leN2op&(cV zR?5n95in!z(vllU`Uzq?KEhi(BvGjWVg))Q%mAs%(8%8bJSsjsUd`xT)Inq9!eeHj zNhtpY%C=yJueR288_6>hc3s&+<%C;1U(Rv4 zw z`Mnd=Sw~-%PV`YvA5F8Apo7kKqFGxK>#ry2l?_{pkKISHj3Hy^%d4zaot=}^*5d=E z8~&~KZ+JjDy-|I=;qEI(GY-g8a~kI-hiMU#xE52J=YoZO)t4JHuK+XV>9d)xTaGkt zqiJsvw`?H0KBH6U4cs#^_1$LBfKKc&XQDDe|MqlcEl+5kK)UBN$b-pd8LoL~*1xZw zJUNOAw8Y*x#b|^&rfCtM%IMHvdhT#4W5vMUl&%e`G+*BF@cG&%xJ)U zAz%IQ^famA6V-U8fBGBsem!?8An(QWZx>W^?;<-P3H~9d&8n8twEPsMhcXt>e*okz zTl{Ctl#>pqdoYwI7Jq`c8}EPIj`;e=MzeDVru+!uqNOGI>gO%j$v5Zg!`eg5KRZGi zpRYb}HeDK(ufA}0RAwHaOa_#8OzpWwFWk?-{g1##!hKf0dhzV2xk-534L$27PJEgU z#CWb%k0M-!SvocX=2YaF8TUs}<}$!?K9@5@%0%mh##PqVGNo`TjjaCRoI`qcochVR z^yK&!R$34xG&CXstNw^TT3XchbA9B^7hXAp>2_DO;QT`|kM7xaFd|}~pT9+V>x|mx zLb|;9jNaKK&AHr|pf0;GB>BwP%33oJ;MmtLeJ;04p0-E*@WP$8FOYQ^(GD}3`bz$4 z#V)*g&Bb^_X>Wj6o!|6Oo!>i>x1u0h?v605d(;=cUu}#{DXU-Gu4Z05DcvwujlVQX z+Pqtxb}5Gjsmm{Qm)5qb>n|lskM>qKUmBmb8E}rl03UbT=n>F4CPH`r*e%lK8_CK- z%K0p$95MYiXUm;GNCs)m81=5!WXU&1eWdknY5o}X$JUK$gRZsJ-_e#yjD05d{Pad8 zmTFTkb#vQfsduY-x-DI4&Z(>&q$aixkP^nI>wTjjHV*X z4y+!}QC|pU_KDsafZ!^i#Nny}NZl7nb}JX-d>MK7=cva+^NoCaTh24HXgtk%W&>pm z(Bivtx z(j2*FC?_M=42dHzdejBsu6hT!>ToxkhBwc8OM`-2U&d-7ZN)OfeCdj!ZO+o}Qs^QL z(u$09b&MJ9u)#Ut&|Wvv`p$!{EvwH0+_iXqe^#ZoZI{wj&A-jO6~6BZ$ues9wu8Kr z@rYKwPRi0A?nDCzPlvr1t{~=G@Wm0=^F`u%3cw01!}7~6_EfUkX^(diz&2!Q$2-y8 zz+i4HW$B|6fJu8aQ+p(qGJ9~(r=gce(X;6M{TO;zni*CQTMx(5z%Uas0T?1zw`FRX z@${C?oKZV%1O~mcwtg3nIn-9CmE0t0$F*Hu=woZnH-5-3v>0B-?_GrBj+(U>k|=tn zr1p6d^`UFDAChRPlsrPavnvggT1nf~l`^F%f7Nz%r5tJf4DEDRnjcdL<7b2ZSkbQC zl1zWMvaCHE!c+hUQj|G_SU+QHW(xI@xWcSdx=}h*r)^H9+x3cUkK7?GK8@0@+%F4h zgVSg*jt?wKqYT;9tm~6*YJRgt`*RvimzsuZ7t`pj_zNV*3Yo%wpus`%6PXY zYOslz)6je2ERb_80>d zCY;T!t86@7N6syo^d3nsRcY}K8ZE8F%hYmmXXoK;*N>9qIws$P&J5JB=#!GW0zC0z zqz~QG8HXGXi?L0DvjG3Ar=BX5tG(->E;*geVZY$uBN4vCBS*l)oG2e=fv=&jn%*!dZl0v7*huDbnfhzs@Ei3u5trHwLXc zbZ{`w*s1jV8`{g+l-zY7R4H>YW6TOc>rq*I5elK3b|jm6Np59pOb!{y0aqkLcSX{V zCar@1jpKDB527*h68+?t*lzhl7ic4UQTI4_&|j{0BzQYx@}8i%dXb~s(_-zHI5k>= z$FUdo&V}=}lU8A8t9ImX#+0t?gz#H(25fjMi!JlAvqCBz%+l(6(PQic_53X-C?)I! zb!I1MxT%8^#D@z+9UYusq&qrztrU(?*wH!9*D%vC80sCTk~MoS9UXq@Bi$?Um_a9; zXq?T*WCrl|7Iu2;XDRY2$Ho7I?m@+<|0`S`?dm>sOj`SpmYGMZrG*b^oAV%;Z69f$ z=TW-!{YTpAJW7_%e54usQn5V_Ea$~U!ijw!P6*~GyyfsGG{}C>dL!6Gt4^jan!hjg zz7YdyT?pGl7}BeuA3lXfBG-K7Gxq{zfR1dA=QQSxT$cy%Zq#QdVj|wr9RvkXfp|af z%E_fn53_oXYm~C6T}$srZ*^Gm(^~pb>CnOO>1?rq7CU^ogV0Iepe^?8*vB(rN5?mK zMv)iCrU74UtqaW23Q|p79Hk(p4RjLDzQ&a(#?`#2o`Y)abJnAjP8ITIQ$FFMD$7-hUes(qyyqfcNDKi zgAMXz|Ga;PXE9Ej!fWv4nyi=arC(3cuN_+Xddaa>8AxwSx4j{ub!SjHU?eLmo#+H) zCNCU04;b+ljvj(4`4ivoS85{nMf!b>a)q^kVXYDvoLM zB&08+A@;9?(}NW<5$vts*Q{jiu>(XlIKR?5@5U)fx!tsXT}7MdTFo^C5%4J(yI&qE z-(5RAgy#N#KUse04iI@|+)91&0!jyuV9FVl>#unLe)F99wxMKGsVVEqOiH_L82$e= zm0GEUvH8DFrL^V4DUT|(N+k0Seixp*WV*gk?5@wTi<;e)&lH|j@#OsxeGtQO4Slu+ z5&9g0z&kWwMc#f~>xK*S+jE;MYbW4YP4pSKzIn6Oe*~?zB)z}w;BNO(`KUIiMCzh_ zI)cn;TRzx!P>(cWH2I+1Y*$G79qKAawbLWQE=2N37|e^`ZM}0O?xog#+QzGK=>0eE zYlBDA1J;=nnIj|*?>;^T`TpR2?O&s*SbFq*EoThPp<~*EV<aoR2^GWAUecGNozvjm08D3Hl#b5=Z96`YYf#sVtT( z?(lh)2J9Mt70sHkb+{6#)K^rzg48%eRJ?=~XSG!P9;q@98-0 zj?<5h%yq?4Av$&p$V@GX+Aph71D|5 zObybh=*&LE9$65kyR+%_Lgn>)`Nk@T&zrmt5)o6xNW@w{(${8ViRVac17cmd#LH&W zBKhwje<~1bg^DeV)$S^wYvp6ui>d7@K>d2{L;;N}90hC*@b9^ zeVl$UuzJHyJBZ|X_ z8Viky+UpZ2J^dI0o|#zITA1#yP0%}nBasvFg#`@!zIN^PX_R9P;w_bEM|z_p7mjM_ z6Ddt80#+BbF%v0e(t*EkJ2)L46cd_{+2#1@w4!WmRgELbGh8QdHumVV*YFzljLH}( z-W>YpBvHXjbbP?#Wo^(*8W48@C)FIo;nIw4oK`)Nu2u%1p@5b+i6$fs$K38U4Dn~^ zGQT*)Z1wCcO3T3f!4>OygDtC*k5rtFV-y`E;at#69H|(qRZgN_G+W==qdDu$Ns}am z{AH}G8eP1Ht4U4_;D{4Uf;fa|fCf;o25|$G)tE+#z)VQ(@&A(A7)Wg=s?bJGCLAQL zEuBmmiH$OE->Q@=bx#j_!?af>Q%|MQEy*Xes>zg<@r!(Ullz!_%$*4V!RaloX3a2# z9vl!GoTf~>_dWM}UV!Q=;9K)NZXu4PX#ItBS};ff?ae8akxp1imY)wKApBUf)4kI% z1HtE$?pTVSYV@3QYA2`Aee@>FYpUV2d^&sr_=jP^wLL>-AMa%2=JJYc`Z?f?X(D<| zSn*;PkMjk@X_%`Lhgxltd*hdjUmSk1_%*sWVJqM!InlXE&cnTMz6yuLj|jck^GEO? z;OMX!Zbz7KoYDIjAI|J>M07)})qu`Sm1C{nqceq&^YchqFPud>1srn*>3D~zXhu4D zw5Vu8S^)V!fppSDQPF@je??fIOoSd`eUexjOH+@9kD_J0z?_P>A`a+${a$O5_EsS! zrxI3l7{Oye>Sr}T*1u^rg>*w_L<1tHiZeA7t*U_RCcHxko;#+^okrtkgjK=MM|_Wk zDqw)@wc|K%{g5P`lIZM}!!=!jgdxx`Q3>NCzDL6nstLopDR2I??a+LUrjtFRL7pCY zrTnIdxTpMW+d*Dp&bu#f)pc6Ibm}rqL-`pHVM zv5lJ#1vgVXmMBK|@YhKeT^bM9K?sb*Rz|q{FGN%n!Km*d?4{X4zjeI z_4cLsU<>kjBM83}6@KNt0Uz3E_KM}_yjU`|UIN1gFabKxsT+Y5@p0rlgn;)Ho`t`W zlS#eYXRxy$QYC0sP~cp+Q_z`C+3C&RpWPR4bp&dmXny{z!EjmBl07?COx;QRX@f?5 z+Ta8}Z4ghBFNn2o5?Yh#3~MEEWC)(rUbzOl z)`kpD3xDUJ0Rq65;SET8R2p$~d53Quv|v*m?<~huY5~R_%cSgqM89=_xtRW~gFhmD z2!hGp2Y>5;y^qx}*zA2g0s&?3qZ|Us-p3jUD0?6H*1=fa{orpMu=|mS(|p!Kur`7Z zp3rSbJR}o#yLjWldG4@B^$HFrc?&y0Q z2Z#@zi_rN!%|3^EB{B{9y#0;Q&nswi=TP@FY^f{clLZ^0(lX1hS{GkFWSs~IyMeUz zb11n#?<0zL8L?p^{9OfZInyOm?=*ky1u+I8n+o2^oJU;jwTaA{(vS2lnKdjJz zBeJ6KSWhy%SN;dQ`{es#bby@!aIUuI23k0DDd7HA+biT1deKu@Od5tVXCMds{g2GY zu3Fcq*>A*w+T87Em-(v;l;qb}baPRZzuqtkuj@|6AtJ*BzmkG=HVg`rjKAd&t+&GJ zuTB0RD0pO#j>2}J5Qj3nFD;2v94=yQ57B{VhgiC5x4mRG`#`v5I3$F>wQ!b)7O(B| zaA(0P_QH@K*M<1!Z!Ua~mD;sni!K7*6tL-tT_em!$DTb3TAe_7{$4_qwfbPyEc)Gq z);4_8!+jHVvvc2mS^L%$e{BtF-wC~TOnd1j${lwsBA8HSV7a@aEaxcDzv$6`SFwKn z#KYi8VgAFB!{EOx@Lz+b+ze}TJ81HnR(LZU$hV5Ey5F4HGblR}Wv^|+TSOQ-<6}eC@y8*e z%oY7eM3ljrpaJ@!e01oRY6Z7Ycg=YVJ#FcZV@W=Tv9xL%Lgo}2rFFfP2F$+vKP=uc zXtSwpr(p4#VeurIjM(UB&}z-t&|-ka+lYGnT3{w7(sOp;G(}9N^YB@uyR~O-r7wI> zcCc1^p!@mDJiCyxwc3I7QD{`y4&7P>kUj#9`!A&B(5N3FWoyNU;+8?Y7_0c;on2w9R)lAmFjg_JRuWAC&STkHNwY}%_F7CC8#HWP?x$UM8&*}` z(JE)tRbAuIQTF0!S%a6!_65!h&(;S0)zk%ZfBWq;!15T|IBZvZOxrLCM=375owE6p zJ^zLYihQz1Yq}MSG$XFVmx_+b+U47+hwqo>W=X`{+=1rU+}wtg&CRVy7lIgbk+QkD z328BiaRX8ph;cnqHaD}8IzfzUkn*>A_)Tmk23wnH6ty;CV#IWf>2vSK6=8F8V+V8d zzrNVhLpyQ@jRescS_9l${%nssu_qf{L=qp+(n2yHpY8ks z_05s^-q+0(tK}C_H^mNQ2WYny(KdvadKOcj{e~ES?RjVxHVjM%9<*ipA>SrqpJS3% zQcPKcX1w)l`|~ZT?YavubK&ho@k2g7g~kRJrv|c;1kg}BfaQU!i zSwwkBcSCoUqEZywi#T{&wT;GBf<-hjIb*lKws)IBcd@5~nRq+E`2T1hE~5TY{9m+_ zi)gU)=Qp+XFQpvq|6%Vt+@dH8 z4VXp$n-ZEt+cn=7RFNiz_8VXcp4HGejUSAcI{>{b{`uz*qK1_6% zb54@~aCBzYNoqe_yd1%2GDE8#oyo*N&CN#=k^Z!HxagVr;?dla;|RxLFlk@nXadHb z{QCsrQaC&&N?ZZ^-wR)!Q80%r#PkhJ)o%q?L^`5_NKT*+ydkndM_KK{-5Do>zXrn?e;V)HgfxHEJ32EzJC z8#{xWb0;m%;1~G|)k7dEKZf^YcO!f(&>6fL--n$**)xyTbrRuXh8pT>$8~gpZxDPl z6)ma|z3pv9U=PIjEEYc?V*IRT73&268FuZ;k9rZlx?L~MZ%2RzFpk~~Vit|x$9A_G zR@#T1fai)|ygPyLCWKkc`4VLnUtD+*%5&}t@ESEL0LUIy;42H)p#X!6D^(C)i?9w6 zK!72_xNZczvJ8Yb=Gavj{WMB+w5$ceV3wuZ+;|E<&S53}RmKTM2Wt}9kdj7=x*--! z06y>w$DYapB2J-^qeW*vVqZ5JXPXljQzs!K{1z4GmK?*moP%rvYV!x91uX53Qv91@ zpo~#z)|(96RV1f^y|aiX4X?JSVBmIGkO~KS_hep_Sf}CXW-5!bywAi8?+TAt5+#UZbDJ ziI(Aau%t(Wi$r7h${H)S-r97W$K7+05=h)F?yLZVsn8il86Er~cnyP#1@;9A8WuHOh zID^VQ?*R2bz8cc4*fXqzNOBxu*3@$NQ-q5VmhSKhgo_YXkhTWD58$`RbSLH^Vh<;} z!>bYAiSQ+0o-4~mcq_to{9c9M1^E5her4un44l#Mg^NP;V+GHyIu{y>;qHwOYrsRT z+jVxu*5P|V8C))E7nTFX3f&PdgYYZDc)Hy10CB~j2hq|QT&~b8DI=pVC|N}ZL8{ws zLW%0<43;40Zv0y~gHMC<7)>vX5sfoh1%CmcYej}5H%RM?uh8|#DL%mm^P z=8ATEbxKK+g&Vv&#o_PVVFnPh(KX)b2(LhxJH5A};iJ)I@r3Zo$l+Xf#sb>%mZ&42 zEuqtIiHOYJ?reJfU`o@xiZPSQLYVux&><_zWIRi0 zndDs`!sFdxE{F9bQw&9f_rhXFc%D}!JAs*9DXK7nTkgQugR$+17l?b<7&`lscVh@Y zk1%(>CF)~V)e|J`$zO`C`)PZoD$amZZ(>BAik+x<3mV#*f_pKD*GU32-^|T@x#ukD zclBzF!=2oX)j?xBK^9lEYmO+8ucyINXz&xSc1|L^I1>@vLtl4BCXM6hzzbfL^U+yD zggbaar388Uy7TC$L!L;4d8D4kdx1fF8dtDj1$ozrus^~EZu}gXhbC{4DM_$m9vLjW=Jh z%tD$KzxmnYY4~l&@3Z(l6pPYn?1HNyKSPpbgJCqrAuO3+7={BU*q<31*oGN+bo?rQ zv@KWjH~8x-*L-cht`GJH8ZM0pPAywfug!Nf5Tl`w8uV8%gjaX1>f?M4q*v>z_!61)pya)cwyWvb8_#V-cGF z(Nxh-n$BYV;Wy6tz2lr6Nv7?`@^5DfDMDVj=DKFhTTH9_*e1=0*XI<$Z^0*xqp0cPEx82wr;CV}=O511 z?@-hQI`?DH?*ZC-0nL_}0PnlgS|V*H(m0;epFrAlq_OxqZcRbyP_J@NA}!-^zW)sI ztCV?%>GVu=Dx)~x|6Q1YnkEAVJc=y0PNwH-T02Wbgq^|LRGtkm!)Dv>1!u<6CqIft z`QklMUl=a0Dk!Z_A)Bx+o?S&<-xu)_{Sf6c$GqZz&aH@<6!&n$uQcm@aX_vuqJgtT z8xczLW<$K4S46+e2B2mY(FsJ$BO}Q)N3;naG=>4?Bf@bE#2ZZ`_JOzFq(O7UhbB9^ z!QCC^26~HCBtHO-5{szK2ck*iMnx{*@ooB}ZUC{`hXG_k8Uv_0y4llppqOR#3#A`E z5Oy((Hb=?&Vpx8=xiHPFJ^PR=uMg^pfTiDDa15SbHC@^MCc}rKxl}Jxn-5_y^Eah@ zD2|CO)NwwJqkf{7=8MeMALCh=X(+}`wntGyn4gm6a65$OBg}7&aacw81B55(dwKIm zVy3K+wDuzi{o@bQiC7sWo}@oN5^d$`6%@Y!yZew8G-v^+X7CC+-B89FnF_KFl7Ax^ zN#=zjAjE*`qEL0Lo4Bt~$A#ikS&&B;7m7~u)0z}ZAoAF>{Jtax!I27mzDRV&$?cUz z7}?r9idZaCYtPLqD7|8b$qeuij!sHzci3@t)0)uY#UeU38_Ap$honfh`hvm3DvZe~ ze)+8xYrx`gRfz`dJU;N96)^ zAyvQqd$RsUV_M6w{9!AEQ6z*eFDTuJ-t9w+F=&xR6AimVgRM-Pz6|;uK#VmNdyyUm zVkzI3zhb4BCFJU56u(;Z6%qOAt3^#i$hC9-rl>=dw^qC@+wP&*FVKtlJ=FILK&s{^ z`Ke!s144dZNNv6nx8;EpYO)SPd^If(n}1dK)>8F(tzusPK9t@k7>p?;Ag7}^S}EB{4Da# z-7Fd_q6Tmp;^u-}5I+A3({|IK@6f~K-87lMukEIV--%(7N$xU1D02>L9M2^#6NobH zc2nf{Vng`b82liwH1>IF+)Y=%7i}W{fl83ud=ljsd*!K%JhygHrytPP)m`}me-QCn z$f(o*##Mh>{G+&On6tQ`bP1h%Mb;$8Z0KtJY2!vwKjQYHM=X_(%snji^<8v(BMfI# z7E}BtF-!K}Nt-r_XH0|6y7*%|H#={#m?C}>>tt#&?fD5fC{Ous-uiY5h5szpMjqPX z!a*Gj<0e2i9AIFgXoX=E?x52@V;Ea^&^>(pe?nenT%RwZ$jyNNx*gPaGtxiZk)OI* z7}a_gw!1QLWn)hjoZ-wKR`fEx>lAo@l9>2@cd#a_DcWJ;`)CK5wqS~Wy`6e)LA~R5 zK}wJg@d*!QisD7E zi5{%N78+XO|M0iUAa zowE0cn8vtl!q&@Qt#?tp#|rw}EubtfE9hL6ZnPEQO>R01&TIh&A6h-5d{+CVL?8v`&A_4z=2B{#rCHRF+d*!ZC-JOKdfk0bQ-5m5j}ThU{CS;f_^aOt(~f{0ugBV{Z!yf6=h3&tqG7wQvANSLZR`yG3g&PP-Q2r3 zX6&gAxh39?#)pOv&G2xc=@fEQgf)VFkKO#xn2Jp$@idgE26M@1m_rJ;nWSQ+jKiYV z3);h+661gYBn}0mVQOU&7E#T#3u7@?GyuqQ0kY4uXsw#*3yePoAY;&305S$$6hOu% zSW&CsnU^*m6?Mc=Dn2S!W_AXP^9-OD1~Bol0SpNr@^AoV=m+2S%HI?Dhd-7-H9YlU z{!rv+8zK)t?BVtd7VU!&&u$-3Y2j%PySEbc8BtK+hPqLt8)2j$Ni_`6#(5~EOLsX zdHHIGY-rP99XH(YMT(EW%U=KT)$RjSY1mZjzS?QP3DHo?MAfwDgy@}_4y84l0ye|J zj}wvyn*wHmMis^Bh(+8CP{uoWLxHft6T_ee=@7!#8u_N@I+Q5!!vjt9z)Iv4 zS#Tg|6d{;2pJURzhOjRTC%V)0lVX<$rl(KAIR7J%PxsfAxgX-&1>`js->!9)xpVMs zlpZ9y!r<;bOv7F*FSGTU;-lL;q;^bwXz#!e=^AksViQ@#t}Z!)*<669>t|@$hUsd= zY!aBR1_s3lBh)E4P)mbc$@4XsuAdV1UJ5Eq8lQyMNuh+VYjqf*qm!76(dbDKezWFZ zX7@D7EuU(}Eo-!3#6@nmY8=-F0F+xkNBn2-;)8NKY})h@(_@<2qUk@SqRY$bH_|jc zbo%RDsj^73Xdy0h!RhwtF?w7sJaxL_ihS)ewdv!hLud>t^dIl(m&rWc(7{fQo0)5E!<4YN^I#jAPTjDQ1D=YEx^>0Uj!bCD>6m zJ#j{KuNGx%1HDdDamg;|oiid$^r2g4gbkJtFPs(4LS#sQDF~Hx9n6&o)5kRLtf*;e z4FqXGd?F(Xh;NCn1mc_X_XsQqsqn0b_VY&4)dmQ{?tD&Vb zcOz~{VP=>N80%-4AAD?_6rjxgmhb?}e*)uhPr*~;6EyO?s8jpI_x}yi#p4${>QEk% z<^J#Km-8ZgNDxMXgHIEzIZT*RbNk`dTt)yRf|0 zBJEv}fB`mlP<<9jNJF15Rc2Z$mlr8E19d(RX(#I(Z0dtx>EP+>J;?Kqx0XF2Tdy&d zd$w3{_Qi`|0^e?!FS!m-v1O~t0N2;8m8{|hA=^x>KVz4>d7(0WMX1bPiN96&Q-Wo- z%zoA+v$vXLI{tJ04sr#aM@6^-a+A*ehWXx;+k$1^3G_!%VhT(6i#C6#>U`uH&NT%% z23!Qp)8O{wI@0onNs){8&}O!$&Cfyz^`&ult(SxgCB4kpZ2b_>B^4^VcXp+;amEIYi+1j9PiMDJb^jh|A) zRl7y!V&+gYST5!m)}E9jDKP@TNx|B4#%t}t!7A1sKTAqKrMHv$McPD7-Oh5^Hq$pG zu8JBFCh1vi@I25yWo8As!K=+5g5s}=t+nh0aZ;>+sEv z#<(?*gQ7P;?(gWm8ec3me1C0* zd#RajhY0()Em?YUOm9TRSe^Hm0zK`4@5Ovs&*ZXGB~a=IO--j zW?-)I3S>d&=J4FKgHl+rh$h1alBxq zgr>aCeehneQtT-)dYspSHPb#+OBoN;0a&y8&2qdK_nw0}6pt~WUw|X9ZlHsy?;V~c zVKeB?HPJC_9YYO5Pg_gd^tLu1aj>H@*WYD!_#vAeHl(cU0G*_6H$=0(62|L(z(Let z*JXefAgL@|sT7Fw2=;G7>gfqv3dV1Dd>OxNBg6Pj;x9yrPLej=5K*GFzCQMO0_&rW zEzhRnZ!O*WT8l0qM_|70jlMjWPy{S~Tj8h^%)H3ru53d9m>>8vj-Qux7wv;!6v z%?2{S;&SkosGUhL{PPsLj6$wPd>%Gp>uA#(Igbu7ZyzLF>&h!aIih&2D}6AD%pqG% zZ7>d4KEa|KYX|teia+iDzOFR+*Sg|g>2f6|;M#~snOQgUAzHT5yxXFFJGSRmtOqrI zbRBZAjzJb8I6I8w_3YoGuMP6X&o<|co9%}_8o^~3k?n8X<6Z`a=NayD4A*-Re;SMU z!Qj@p_3f!E7+ToYkN`hvWI0JQF3~}bjM*GkBaE| z9^dJ55zwE&g21*QKXp6m!U>A&V7YbZVBI_Q7&eoDV7j`?V2CNNe{ z$Jk3dVEO17gb_)qd%>LmNtpq|)0?Mx7q6<59#-ul5GD?r(Uvg*s1G}#Tx0U z`f>$b{X@hy`*juz-Z7YHO5r=;5~+sN*`7SdFyNe&!P?UpFt6Ox^=bm!7#~%$i{||; zYKR-O`fu^H@S#f|8yirqe?-8@1AMp$0Z75bz~spGOb*}w2LY(MTL9XP{xd-{fj|IC z<8f8tRX64e@@`jCa)V^t0)}3LZv820mjDVaz>2S6x0?)Be2Kqc#TV%KUm{a(dO!#M z5$yxNfr+aJXnquS7sAIa*s?Ms|%I>OEMfUV7)hEmSk#bz7+7c`qaNybpD4 z!HWO3pf#qsq#Khcw-$6dw}?>NPi#(Mg%?fz(|bSn6=(T3aSu& z2b}mkx8yJ=mF+8r%!Ga!50&!oBZwQ!VeCq-L(1~eU~Cj-9MA#boTYa;r^s$fOS zDvAPTIWFEEcb~qk5OtdDz{nNlh9exyD4RDmMb*KG(@mXhzr$R$N;g-n@df==AtF0& z@G?uS+N>z+A2Ulm1+)3>ZnM-lEVXQc*O9+{@N6kPUny#2_CQl?KBi#K`LG$LpUV*S zHnM@j9~z?mj_%aOa?4%f-^w9r8M^Mj4N{K2*EFX}#G7h+H;9)o!`9IO#5eFm zJm=&5Y=UAySRWJy!LleL=&uD9`K+a`j^;)~p^z~VtcOi2P!yw*1sONZr!}5IfiYmwmbMP40LYP5OfnQOwLH0BidZlsv z-(S)ugN*gt2^pM;@+@63$cFW<01ynmI{+g~sTUB|aS5XAPR)$6VUH58ENsbi#4Ae& zH|E%-S0!NVsp>v{|6%_f-kMn7cFpk}EjG#z+VJgMr2_gZ`1ND=@G7pk+y$aEXrhr; zEWj22#8*O{dWXM1C^K&fs;ng=yAIK%PM12;W75fXP5%ICyx#nxFR0gG&538>*{2a^ zF59>07M-B-+b)YvS**}nU_u^HXs49%^0f;3OUkHZeh8N9@ixxfi}E}qo_=hCH^2=F zCc0e+zvKZ$i0i&T6JMdbjBayfmWBI1a}njX3Yw$Jh|I4suK#7A*&p#-egn#Tj4Bh1 z2^p)gpm+>KxgSZeot)(cbr<~RzRWT&DDK;#G_}0WW9_kQu1nV~|?S zt0}gdWIM`dzRE4xhA)qUo7{@{K8SlbF4VR8vAQ>YksG8}&e!pHKBrQ;pM*lkD<7LV zp2h~nnWBkeyyDUsY5KGh&furOH9YmR4cup5v*RjHEAs*nVCEJc>*??o2w6kZBIWEQ5F&KZ;JC1A;vj7b9GXFgMO(si$!kX-{A? z$ICAJKZu*o`W`G*Sa*2&?Fn|7rO=1z!jvg-eaB-O$tJ;ND9h!JqkJ8dQ}8eko?|JJ z*afRl6(~2Ng#+q>JJ9@>Am2^5<(S@Q5ai3=14Xz$!rZ@O$mc}alK^t5U4#89UZZzN zy6Gg@KnBDR{ zBjK)(!%Vjo$iwaF;YBjj!Ws6samI~up+DT2^iH8rxp#Pxv~c>D?lO8s==e>qin-&z zAq4yEZ3zRi7+qr?`!F|4Wb~O7v!r9(U!kTA;cOHblb5}VT z`8=kS7ht{UL5>;mJZ6u9>1m|-r|8em`%DNn zpqTYoo*4*lcIV-7><0K2yYqO)H5K8$7^^o|?+5dKLOhGB7DYGvkJ4_TB0HAGz`Z4Z$PP(rLoWK~V#Rq;d`JvTiU1@5C z6;rodf_!U(`Q+~J^Sg$~CL*NX^?zHkd`?qMa+{p=Hg&Bbo5;hYYb*?v=7=A2JO(aT9Y2B9@g&;|k{f!xv=^DBbE+Zn3A`h3DhK`T+%9DUR z2iH(+O_?CKuAzZ7G4!9--n-?4M01>d1K01~`x9U8DuW zX4Oj9YZOseZiuw6cJ-tUda~DRM6J=v8>{I?T^TFYYznU@6Cy@sc|iEPBMc&w2ZW8* zP;xzaKnAR)ghWm`EV!)3R5X)4=owxOjBWs+={!N!|S z+X6Q~=q91oMsk{@sdTXs=0xTlift^TWM%jKzKta;_0&srw22&5d*akb)|wIMdW73r z^Blc_+hp}xpt{R;=tTjMe5{~sx#tz($;N$+8$qgAsAERiGJh zob_pBn@wLKn_TXoopG3XQ|Qk)Sj~>3ux9dOxgF%uOs2|Nt0*E~Mzo&-6mJ2;<}K@G zsE~N6iUOhUfHW||lLiq7*717UK7ftWTd$%~@fctCRrF!J+$670p=X-QCgB5@^9JbZ zL)%ZjBnl(NInCvah?RQU?~coEaf$C0L_zsFHIL$3$Wd~@a$42`Q?p+l{n|pt#tzJN z(NRAPid|Y>fn<;ata2aqI|*1F3iBwkB}jJcDr(;n6K?1fN^U7<$YptSwI%x3I*)=9 zWVpPPOR)*^Lur~qn-b)ph)tkT{gx!)d;)OhIb`ZyHb^4rswgRu2%m6!irqFg6JtZqMu9*%J>AGV&+B4Op^g3^2pAO@A~LxbLC zo}oy4+|2Xf^8B4`fI|7yWcsVEY%e=p$WM4e-d1}y1UR^RzoYBSo0vS-o_BGHC(>b; ziB_^9y?9z;*N$r^6_h%Wj|Dt#ea$2q)LBTyLq2_5Z-4sV}CrcSbx_<^49ge~vgrIgVL z*t)%xmUNQKAkvo8bA zyhI;m?u$6R!vT<}=?kaI*^09d?MD^jOZpQH;&G0!ZnAUSw2`^mOq@r}fvJ=jk%x1r z6*!Yxso6%X(vnB4cArVP45w&&H@PT;A7u!HVpCe2!IN-yGmb`fmtQB0M0B+=u1h`4 z8vTH=sjw#H-?5mKygK;-gFhi3W6?N~p6LM$pGO>n^$So7=VfY1K{?KI=Pg>&0~4kF z$Mj1NfHU#q{1ZLEAmrjD)U~%vXtN5(C9GIxO6Ns(<`UPe?1D-fp`jU2|17Mm()HS$ zCjfaem(beYsBF@b{GGiahX~x3g$^X5`2;2#m&tGp@WdzAT;|FWTG3a|k)?|%;VJn` z_#5b4b>QFAZWGk`*R~7f;R~)3Y1ZN|BA^tMjRAikvLl&CmZOMdAc-8Er|GwKP1YyqZc= zWlTG~`y;`NC1!1D8tLc?}NE$W~vgk^jvQX%_O!k@V|O8Q=8HcXCT`RI8tr_r~G! zQGh8CCmDQv`y5^h*qst9Vo_1*hyr>x&9Ceg=MpFE66t|6}jNx*WA&B1)r(46ZeVUpJ?00e@sTvALCzW}Yl><#VWdnH zvGn;!X{niE-~-JzfXQyarmBix|4<7*qjMwC@))WZDG!Psw0{%|PNWN?C8g;8S>YfyUHEs zczx@un5@1z<$Nn+x^a3eKQhW;t8YsA9{Usn;^Mg-zPiTknB47zc)^XYt_i$SoJb*K zu#|PDmSg0Tz{1Ee@;%EvZV+?nKaklOYOSFQdK!iKHw!)esrFdeOW<|Lv9ep{YNNF* z0Vy7DJ^aFZG8LR@iBb^nl?XJy9)z6uSe^mMlZ$ws(pK2Vn}I%yO%*5MIKj-%y<-Ub zQif{#>_c$j=?Oy*ba}{sw^d#ALPabQaRU#C62{5o*xwMR*jgB1StSfpFUYtl<}6linr{u%Cf5R zM@^K2ggZb8L{j-Ad1&NGj3E~NK7jF_w_9{iCDuw@)K6|8?!sjivva4&`0cRq@LLYt z#@d1WY-(YJtj*IuO+2E``Sy9`t6W-e<^;APHQ0et0QH+J_hqg?oMPQ) z*pFBK+3b(o;Wn1)@%s%2-0?Oo<2QD{kkO3qKO8XZ23h!Fg0_6l<(gqA()4%X(dPxb zvPwK}#P0)BvZWQ2{;z!R?aCsWB1`?bqdb^2h<^^S*}7?T+adc$aUrg_9xms=m8*6T zRxaF0PkhZ?lsZ*LKFk-W=R2-#ad+C2&u15{nJW7yMDU|b%zBtve6#}1bV5cZV9{kgJ>Dl=rfL64`FnX+aON3-zE?pzMA%Vcyq9B%0dSqMwhuMxFiS>sN6YX?$|j&N2smm6w^eLo^|tvn6vb-Ou#I-bTRDE5b_QW@ zT(wW3C9`BgkY_wrjORl;otPzGHjmN9#7)E40x&jyOYIFFso~lo#!^ zXEp>_tDUe@=qh*FsplLBc-`#zZ_SZ?MdT-`cu4?oDH(B0lkgB24bNcw7|_fVi`bY< zr$0dEm+bjLb1@gBEtXEt0|6ZwOr{TIY=6F+rD1rDFwf6yVRp1bI|X*+i94akodU9P za^-gYJrZBeAB({;fp~a3`)N9`g=T#S72EqZN|-O#%8j=C)AMDHkTa7hd4Y@y9|EUW z%&Tf+2`z#w?^;4!tHzHDWQSIjT2`5W1IQ?}IHuW{sR;}kfG*3;4(6i*F34{5KwD?6 zxSK#pD}6$#!$KJv#QweZ1-pEEg>0ok3*}q>z0p~A>UC=ion8p7+ zlbW*vOfJwl;YJKKA$W=B{5!DdPII~>H_&@wc4fht>$vyZDE|t{TN&Ayq?z9!HKt3IJK23UFhL|HE0dQfLR%H3P4om^%Eeu(`ff$(qo^ zgI(V$yC(H;itAgIs;PQ-i0fN5G*iN_t_mKEa(ydD zyDE6_rt4cd##R2oSk0g}FwXU@9Pj#8P0)-S|KKgxw`!v1qlYsmxqeinyS|l^UEiuH zny;S0;rdogb$u%{T;Hlp%}-Ca8uV|4&GoHJc73Y`8~i!F%5Gq)$f`^+)ZlPMsv&^C zl|v0WF^zTUt% z*B5Bk!$7l{6>1zd*D8 z1)B9Q&>X|*Kyxkr0?p0%3pB^-;h_e7pp51QddA_d1fW^(G0?1M1ezP^>7xye@%2VC zhyRV{uia>dQM6_WuBnuTR+wRcq9{Ju&fq7oqq2bm2zIFgZqU~xWOx|)2lyO`K#E|;rcO>ZXs zJWlx7FJ!D1>)_Rt^Kg0dXwl*620-GY;IUyE4L((=KUz6LmFT(-Z^4v$B*(qhU1NLO9H9! zM%gres`Bq{S7J*NV~f>vBv`NN>kotM1sFH~T}e3`W#1m7@t~)Icl6j{k`vuxp<%kw zB`A16!+kn|x+K6Z@w;zh1|JR{D+}OcDXQ==o(YjP3>FnC0hc3>+@F1o0S~0rcUn2K z@lR5z|0XEdBVN^4Qp1J;1*P}(&4zQ6tW`6$dQ7=BU^^D@s6u~uc~PupZI8BxZf=tG zYT$=UIs=aXfMCf9^?#CGIt=?ND#AgZ=Z*B^~TEy;sHS=M9~8RO7j(`_J`*S;^^ z;usu&I>>xPv&{eGuWGdWC)vS|U-?$k+G>*UEu1qQ*c=evHp}>^2n}l> z8=(~|);bWewDQ`iX5^Xf>WlSbjdGDsrua~Kb2Z!#Wl>L-J%w&=mfeh_3?hA`fu8sU z`rH(aUjIe5!v1~UFY+rm3Z$e09Aiy3qV+IVB4+^t!Z!7>aJnA$MLH{!v#78DHWMtU z^FDbfDYTaD?t-x0G#EGJ5YNoNutmX`4{Q=cU$!52SDM4N)oQm@=F`mLaoYY z-#$gzzpolLj`WRFhU!i)8K>M$G*kHW23qHoZ89ZB!`GH%^{5afJ;}F0d5o>p=x1+jcopq);sDg@AF9t=nky+g%})%zh?Mr zsf9rA&;mz!Ev#Mfm!w&_4{#!>*t%une_iwpAQc~LWvLIk@1xj``LwW}!ruw}l^}cu ze<$&G+$T$^+@mPx_MJ;Ujl6q&E~}pKULRrIkG})>>w$((!;$bA{I$jJSo}4_Un6Y* zVUN*9+8b!2?6KNt zdqeF_dz3cD-bfp3Z>){8Cu-yEO|%L2rrKNfIBlZ6nKs4VQghf7v~+uOEh|VXpKNad z7vT-WRC_BeXrvHBQac-#0ja5}oeVq|gsr1tXsX$mmTECN?d!Ee_OA@P?VB}c%6g)$ zvbKn$Z@0=iTJgTD5<2v{Pgs3utfqTuulUZl)iqso4HgJFa{hsEV|@kO!|G=!Prb<* za>^Am^mmg2&}wzgNWHen`XK=Z&{c0tts>!$p14i6tM6%m$&t58dU>l&U1eOA!DSC_ zll5wPGCj($PUA#%yRBC-Vk`gIBV)> zTF&gxyQhxI|7Dj95RsZDYvXb{_#5xJrD8mg4Kag%tN2ZqcgybDWW(gC)S^(n+&IPP z+t@I`2Y<%K_BWQGp~YzMV>GY`W}!YIa)1$khLBYHx)6%#6xxeVyPJwj$?;Np*7S`K(`C%POmjOy<}Eot=R9{Ifg<`*8i=nfhaNk8n7 zwVHbzt0rOA4aLI5N-e%cl>{xyQ+1zmG_Yl9Y4$wu{4@;)uylWqq`)+-fo-V9$sDzO zuYB4+<$u&YLK~r@lIu=&*FD5tci*~w^}2`Xbq~?{Y5nN6eW-f~>Q2=-nWL)q$u4zg zzQ{@~i|#WutR7BJSc?EUwb;%W==J^59DD|<%zG?ASkD-iAqPB#hf?l-*(m%iy=-jy zB#i3@M8BPIA+%4S)B9!n8tEzi+T@gK+6~w|vnL_UNNRcjx{aC!8hb!?4Nu&YTf*Kr zcx8wNJx;<(akP;39FXxUXTE=p4@ZceZZG{YqS2Koyl4G32g4Q%Sii}1pEYJ+&d7kl&c z-WbPDK3?^OJbcqAEJ96Zjf-M7Ky~oZJ{1}Us2Zov9iX0mz8-h@FVSgBf{MS zoPYry9T5XW!O$-*+p9h~e+uqU4 zwi#M3_`4OeLa}}aI{Efrd9gFE$DSUuzzkXR8$!*FLPLxR)JLhUn1<4TqmtTrv&lk; z%&hd~?HaSp!6N_)t?*&19LM1*ry^LMuSrSAFg1celx24KCjeO`>0||y6@@a6$@ZC` z?$}D`fmK{kHQO5qc0R}p%MZ^ey}=^%ccgg%)9{Fx+68w1V1EKypYdNR;wh-Ra4;vr zFl8-%3SLiO?r$%jni?h85;g?qZVKwfBQh~c+A#oNJLEP6lV_Xu<#0X{7NnN>hUhqw zE1#o*B{JSG&zPq#p#?OrL`GNpAs#FJeOwkE(S-_1WY~Z%9lE^8lD-nEjL-7z3P!A? zX9Z?s&)UVPOj@K_F^};Ah6$6a7XV~$JsvP;Ak3g=(XQQ-+W!oY+un%bYlS#fg+MzMbID;qVQ-%jkMjFiE4JwuO9G%-{ql z<3S09yh`+nX{%wiv=SwgW(RFj$}`F1aVBbC-&z>!psp>*H^md_4P^YR%% zNi_`y3b_b*;V3G+t1q5iF2ZObCqEmpLQZ~$mRyqkWy&*jU*uDltXq8pD6U)v)O=LibaW)S{9~|I#L=_m*u*_bOhIB!c_!o) zCaAZ|rA0anw5D9P7lY|sIqdfaliw|wn2-uG+sPaPGUX{qk=sD`J-35Bj_JPLiFk=F z`S#<-2$e$E4;;-f(5PE-ELqpUk(rf(f<;dhUWIw80(D0WE(n;#6^GgHZ-@EPA87a%uGRfv zxQT+v_KWb)s=!ObRc+#kGD9kq1$9}PQr!j=({Kz*o zVN1alQ<=-ED-n_s6D8oY6y!XIboP9jgVFfo zH!oR6od=X@S?I{Zd2DuO6bcm2+~9{O+b+c{`~YqxAglZxyf-!;qelI~xR+3D#HO}H zZ!=1iL+fHp$J*)wrq z1A1H*gjde(z7MF1x!PPutl#?%1;XkSa50DGaKwg8akLuc0yo92^{thuPblHL{XG~W zrh*z~ZbwNhvO=_Q&Zzp&VSW{ssUjvsq%+_rj==RLDBj=0p(F#Ik(r7nE3vcZ$8!V( zo7ele3?-R*CSYRnka?2&8~rmFUJv>fl>UNvkbS0(O{ROM^=vQtW?(V}!RofCLka{G zV4Goc4V5iU#SL|oxjL5(1pz6Ls0#9AWDiEgf1s`zpn?ZgXK;7jEVzDwG;U)t{ycLC zbfU~X#hX(kzJ!9z<;-X(6vrEaXE!*40rAl`xQYN2IGt|<{Rwzpg5LTfZpn@DXGa8> zClE(E0Esh^pUIOu+!Yd!XY}>7dJa}Ua6G3!$!VE7XhdPXfO-x#I|jj;-UkUukZ9RR z_|ipO2Zjoi(Ttwjsh6}F+mTBoIWu^~SK4gvhp(6vCCqx|r;SX$PrDmplrmt2@f+!CUrXOcnK!rOa)82E)N(76Z% zG@|?h9e?9@XbO{f=+rwH-3Z{Ad1DXc)90Ns?JVY4Z?-mL6f#6ncP8v(Ab5kQ8hwlt zc~s07QaHowe@BT1sZd6xb<=`DTv1Rx$PwK^V8A5^O$koIUvHg%x=@7o47gF$(Fk=! z!$gub2Rwje5NXz8SN#^GB|^5yf=&7NRhegsv+?6wP=7MO8w`ntM;!w}#R$s(3!|I% z;5O{rvb94vJ->~PUBc-8vKeYICS38%XuA ztmo>3k^6JI2SuoAe+aT8Rkdl@33v*Cm7_KiD31c2#oBIb2k0wReNv|OqN}RfM84K5 zKU`A}2-6VQ6kNo%iuYhD8hWw^{bf*}kq3KHPotU;cJ(Khlb)+7P}^bxacADhpKVmf z;h2_$ulg)3%%AsF-}dnv5+ed&RdmuG4*MALergeWrT^QYX36r1Q?HMM=r;Z3CE%6uIo)Y;b#+ugYWIRtjol?={rVKu$%z zMjP|bo75Z`#Dxrye%VDv-Msv{4VHe7)_P)m*Ix&|knH!&_=;<=}{ zH*jy@ITq8a<#S3IALsMcw^y`R#`~g<1YD70Ef41(ig0``HL0y8NRdy2YO7JO0<)HJ zeohO^r8%|LCVjbWhK(=EvQsBc=Qs`Fn;;if!$qUB4@ZMO@d){n`NKHmx8{?KP-FYQ z>`t%sSo(AB_(quB)v;QPcn~9MO&ig@F~TjM%hLNb6ZM?NbvhoKK4yeZET_p4YHCFL zAhu3}BkD@5elU7t5>^1OwLWw<zJ6PmKjxw63Ez1gm+Yj`~!X@nXRlh8{B^^^WUm zM+j}Mqt=)0z-{ZO18bkh#W_!wGuUD4)^n)%eR{sGT3>9ZG5CZq(_{O57gNHm0LrVY zexEtgM=@D|2?@SA3gSJp{bq=p4+-)01ooT|qf=xSpp|_;M)Zc%(A4K6$K?Q#?trLn z1^*a^{H(@iH$(y+gA`bhD%KA*VH@Vd!kMSZEKf(13QDIS9U#+L0NHt#O ztv75inL`mX5UzpWTqA6=X<~h~N1$+5HyVkR`n4N6S6@w(uO(5q zS$&x~vRQ2lm;xX4xfu}K(3$p_)mZtyfo_}CoUSTLLX)c>2PpXMLPu<2MdyN2hT?c| z88&5QYvC-!!Kx&EA7nY!@UY?FozA#atd=C+)>wv-kY!b{=?1p5x~Tx~hy_mh6fj5? z#HpoM83v@nlPb{p9BqkGYiIsB?N0;(l%&>ANMqpz%=*U2$X2CW0PES+vdgntxwgo+ zwdi!K=40})-O%0vtP_I~Z&6IK;;usle^;!xw5|7}#n5WDa;t-nZdiRJ%X#8>zOq! z1u?#bKP#%|w@un%!7sy??_2+add@+nI-dftpp3x80UMmF`YDUPq`Vl_l6i;s6@_C^X`t50{0rF|ph|9vwFO{_u%O5- zdVu1!qXzx-l~h&7#E3P6Y#X4NrTmz2hPP1pWP7;HY>zfc-Z*#JIYPc;4M) z)m2PPeg3H`5TJ>Jg}?5_zRzVImxz;(`MMT}iKN@W3Gxd0oVO zOKXbX%rHjW#Hkhb)OLVFOzLQ0T1eK@ZfU*ot*Y`bta4T}3FGs=eS8M^^^rJdQu`>2 zf~Y7~t@GlDs)9p2Ru61=oD9FOQI>~MRb@nZA5p87)~9v{Lmz2D@y5y`n_lh_Ot}oh zH4o+JiAE|e@#YU`r0x~+SVxL&qW&rjbhilvq(vR`qnfICaU)O*)V0lg#?K0GKYAlh zwZ!{DHc<+PYr`ElRM!pkV{Crl7rvSm`x-XPSD?`M^_6*QyEv10)Fhvx$v=ck_! zpwIMuGnM2d16zlYEncmOJ8kLlYV)uk{QeJPUjiRRvHU+h-Lsp7BqW;~NZ8$6jd=+#p~U5_Y*k1VNBiPzgS7eIAISJ|zn(5;-(NFdhgsquz9%7!F4{a58?VS{)!GM%FIJUk-xFK+>^<}!|FLZkNJffAv^a=Xasid zXqlBP{Q6KNjMZMRx?zs7;2M(;8)0rDBa1e9Cy4P&@lLVDVt5e+YFl9-&jn1GNQ((U zydlJm4Jz{fHXK3FwC_pIZo|Wxk|IXg)-}w@fm@=z6N06!TaBwbGOJprLD~#xh?{j)a*dweF_NY^_`GQT*%fP|_Azcj< z7MZUm739k1ZF&2aOAC+z7>j^(ML4)cDPC$cJ6H&XGN^*?6Yp4#=tpEK+^T-^Z*Wn1}52P|L4$(WA354Hf?JMvaR#55`zal-?ZE}!Yhqe2fF zFg|wtAF5>lsPusRtRrt5(+8T4nyEVW^1W$9q?mV(?Ppqf9~pWN595V~LYaII9|P;b z;(PcgHMO(+?jAn2#kepEuEZ&k>}LehoCIr;AM}(cg5S!5C`jG^U@t<5XeS<>n2yr# zo3I25y;Qu@A3IEfm(`eY%(2=3Lh)nMG2oz`x8zfu_#|wbt?R_QpmUu%L+wUJ)XrFc zmKfyx&fKaF8!xwX=7WP`fR<>?wkhDIKJsQ~KEBB_h9W#9y9|MXX%L=*lQ8oEglS#) zd38W1Ir3iK!#Pz``dTrskwaJkS7YdOjobHSUYt9`?0b*!*O|~2!l0MXuyqHWoq-?v zB-r71m_{3J$+0zHs`K^9?BpF+)o(=JSlzoxLP9}>L0Cq0sPq8u<*Ry2gh1yY#D^(naXM`Y% zt0Oc(Hx z0|K^n=Uw%i6Ww{XdVb9=YZi}V*3vcPNkMlV7O0gTrP&7bO$Ykkz!W>9MCN4kIUIo_ z@|$e_ZHtG+!}=_y2V}$hbqu;4x?V8^z9eFgO&!aQBM3WPR`uY`nolx%-qLDwG6cyk z?C~erNfSe}+odwBCvS;^39@_grP?Tbc}+K=L7o}p*`9oz`e2Ei--}OGH!yj+7vMfu zyesKGo`Zv0cCGG>rAT55hW3P!fp#x|Z!dy=6;@q`Wg$e$EtqaQ)RtCJ5*|d}^ySg& ztL^3OzC1~kw)eUSv-K4PUIsJG53y5h)p>9gGQZKu8F~-T~`}2ez22k1%3~m<2 zMnmxYSp6P0Z&-j6U|k@7ZvqWI8o7nv`N9(@PK1D9_X-m9&$gEV19%fLBxp#9{AenJ zpLGELUVS@B#tr0)0#Xc0wFcp++?6j5n68$8N1(xVk zFHH|Ke=n^%=zUMm=si8BGL!L+T5j|bp4KG zuP(yEm13F{67Nk+pA7sF-DVWYizpv@k7CAJWLB|aGmDv6zqFQ#qj^G9jN-N;kvi?o z4M)xxrI;Z%lhRLqI+3?<9<1+P!9v#S-#iAEBtsR7&4ywr*egmH#u2}CB(f;2DSO4g|du&KqDc%0^pIog~O>GvQ{P}jDS84p6L zX>!nmyp>vEm5Uw(ozBXX&*2(bfGUZ;NURf`QpGu$^1BCllM$y|6Ahm!&hygn4wNAp zruDLJYHC2Iqkn~w5cRy$pv?IO?}y+$wXUCh#S! zo6MZZ!;qBhzKJ|H3`7#I6A6ZJQ3C=AjU}Se2}BV@Ad#|eB9H2aaI>#yJc$+#qd#W* zOku`YQ{*B&pJbS{$hR)h>0*(>XfqYj&~?51V<^8)T#`d=JWaZ6Ji$M&;1|Ju&o)hx z|F&^U#BE`K`bp|b&d;zVLWNQ6x8-RYw1qpext%Ab(YkXvrm+|Bj@GTD&m^$h#ja^} z+Tv-o+FXM>WIV(4YO9ko?ch2Crj;>x;0n-cq7_3qt;)5JaI12VPeGnsA4bHmGqvfpz5e8G%+@Z$EDUy zUnOd%uhzd=hFOi{yY|TWM_vdkC^R_QSuu$>jT(=ZF_D=N*Nap%wc<0NIOp_j$#Z$9_&H)u@k4;VUskMD(Q2#z^`gGg@{ zuHWKw#omW?#XH5`n|0Bz_c3xmlj2wJhG{DgFu~juhVfTchq$o~eEq`)jq z$5F~5i~XaW7p_ByLr&0=D*?n!AZ1}Sw-8{A!WRboF2X$t$%pWp25+wS_e}iufTgf6 zaIZKPh~v0gAY5r8C1Y$)3QEDZd-$G(WU`^)VRX;;Z==(s$&*ugYmHWH=`vy(Zx!dr z{|!p+Ni7M=F0{*Ih+c9(!b&UZz%5xVWzIDIH8;c=c6FT2Ews)YGXoxrwq#_Tq{8=q zso0FdyX9&;Mq2q-X21iQD`RK!Ns+OJ*hS=8{}h@e%%O7&L0&(n$;~tQ;MkS=CQ`-m z14vD0JNWO9fK_Tx-XPg5UZ6I~ms4i(-mS$o*mSUjB}3jXSa2y^NxO*q>j039hmAy| z;!xa&1cnqRvCv|9auy$`2Bpi4*?ddRy5IDa8tYwcMu(lvrwC&v@2fhDVKg8t@T1D{PPsnKaD6R-hmy3x(E7jnoOO;zxJP9 zQ+9&PkbR`~5R|^^X2|=)yQ#%#a?V3g7too_4?*o7E-N15tpd?{`Z^jMTYDh`=kjWJ zi?7Y)9o6|YGIbu*XOSj{;c9&xekIauJd=)@srZQ2YBy3ntbILIZkfkZwR8CWtXw;v zN6C}(uqL^_Ox~Wyt)ZRJ{mWo|17;H<{ugA+`Mj;QMGZ|X21dk4Rp){reMN}q-cOZF z=EEPpe2IKxK0oU}Q=3^rdP9nQ=3(A3b`n~1qE;HY->xv$7>G$?jrAho^4h~ZEMgRj ztV9vBx8Vd#hp95A2yW=tuF8=`z)9y+xuA%*v9v_dv#5#sm5O^CK0A*EFa1u!?{Oq6 z!sV$VzENE_P|kPo9(4WC!6yt_H!!PuV7-;~Lg+AA$+5&fnL~CatPLr^m)3{WZ+Ba) zBw=uhTup*O==fBWUJD`CD@;+YYD71fHp#pNd_de|bYQNnKzOc`#n&`WkT_J;$1COG z1yJo{<(UP1V3S${J94eGN(IZk9YOKn2{n|sPEp#(0SkHa_^C!e`!tw?3XJ`Xw(4`a z%Z+YeE8Mo$NC~DGHY4Q9wF`N4CKf7GLMhq+N6sq<+-qbwn_I3K?O)(e(Y<^&_$8C< z8Y;JzKQvn&TL>z#`^mctd6T%*F`m39cMY!UIatC`el`~zGUFnVpRGu)8*OOf*9OlQ z0!V4@sD;aE#r&yIzoj*s44ZT%5h9{|UVw}&;mMu+Ce_R}%+)spl2O#={Y}~_{nn)k zlrI}T->Pj*q<~ubyx9Mb&lhS7TavQs`<#CN)8~I{{a6X_*blX)pvyKm%;pA6!k!76 zKrwxPqp!M#(ESf+sJcN7s$1yqsSZH@0S$GoA+)~g5@GD54*m_o?-qe>9}YmiCf?lH z08eDaWlExh+=Yo)z6VBUw@<;9s_3ZqCXr1-k9G$9t7d)xN8}OCo5Lz<9(u^&F!A+} z>Y+O5J0BM2qYOD!8kQk2<-FmzAps33fF8#S%zD=x}KixG@dXOM>%^I-wQGd-d9`4?sC60Cr3 zc9QFs@L|E`&J@m|I!=Q^@$AYH9v2_seO85MuY+o7&8gUWV+!npzkc}ZD_bq)u_{uM z4_*pAdt-*&ycFxrtr?!m-hh-@6;oQ(5{qs{8D9J6IO*8h<#okBY#@T z`>TgK$&6*-)2*;KF2maOu}*TuGAtV3=;WzuHtHhJLOdkZ;!bk+G9DMa-TN*V@8)-s z7nkuS*kdIP%Xyq(dI4iII?2@KsQVq5L6`GX(_|Bg@_58YGd2~mv&{y{9m{!}$Wc^} zZID)t=VnZD0+ZRWPVzS1S*N2fB<2-cTXDT*Y1L{WaKv&fHE@h91G zIdU~m4*FP8A~5T@LAACr`N(SCK{eau-qoPRP`kXl8euKfiL%Wje4)4X9gpBurA=OW zgfCF4vnKC%6ed<;wX$aVD+^ z3N^LD)$NxKR~GE=QE1JcV5B4meK7>?Kzh#0Gn&)$45PAvxx3MGziBlkK>t1T?G+1a zgRo-?Mo-6YCB_}1*V%YlO(Ae(Bpd~%C!-=!-YS_YYNc1y4i&+FUVZ?MtRtY+;h90@ zXASbhCwS9P@TiYpFV!Wp&(6uSPk@{fP(S7`2Zz@2IqblTb13Ya=@a*D^quti2O~7Hs{qpJNH_l{jG<}h=U^PYCBDoFJ>35QoT0k9X*3kwA(;GHE0 z#$S9>>Gv;8O4zVR#^E9>yC6d)Z|W>WaRGE(5El^Q&e(-OgmVKe1XniTUx804UWM!( z9101=t!%;qsdECwjspNJ6v$ZL8`~o2J?4}0vuOTxgIZqk6vdvC>A=ir&&Et_7UobM z)@W-I^_`J+r4yr%b|H-0Q(3&j4z)ZTmC$qAFGn(l7EUfy7_kM2!aFJ}TWsNRh5oQQ zE9@*bd-%h%6{Hv(O%%@n*3-681uob<#>F(huZ6Oc-zDGkOi}b_ zTcB3vwaxp>vz2)o!iu5oq~*#tw?Me^+4Arf-o$wpm?8|tf=YLF{WE9m3G|#6FjfHk zJ6v9?bnNX0_To-|1k8m4bKl34CVsA10&C07ac{P3w=k8I`daVJ?038H;26L#7wySM--c#C-6CgffWAALqI1eye=$ah}f(OaG00B7093ZbXb|-f!}a zjl4J8C~t1$FE<98bFeuyj~blUuF2Oo@pkg$Had`MKrXmhRw8*;!B?Pb^Pu%wRg z6p6XD`M--)LfVa%+qd&Bk)2S(Irwa8cr{>^;uL?_EU$0pNtv_YxPBhLX`dbWf{DHp zQJyeN<>OJF-tR{fBtX+;zvs~XASu^9$E^{s|E=Zo04`O2_8bD$u5XsNsmO+ZC=xC+ z|AnR6vCVSIzqmDKUc(}=A0T~HW#_XT*@9m{vHl<6^%<-<2p$1=5&+LQJR=whoi_YF z5B{8f4#E(>sUHMb!#+@%`!~y^=Yj57&1KH>aMd)nlpQ}oE2Ah``aBnbL!eCM*2gpZ zBt*)u{|pPJPoL+L)z3m@#tS@By?>DG{Q_SXTZ4qN`$3459d|DX7Wxqa7zCR>u zGDZ0bzRj;u)5%wO-$-Pn^6bRIWabA~DV4~y%5JanBzs5wT>^UW}$ z^uE46`@s6)z;4kVjU1#je@;PQNGQ!Oc<+Gxw%R=+aYAFFS-ZYv^=xjQ!GzYSju3)U zX1~hAVzU}etC@q2;?xMZI}+Gyfol0mvSPu5K2zR&mG?3E>wWU<$KO|M(=J=&;Mbr% zUbo0Cuknm~=2PDs43vn^(BIS%h-3BsCY2%yzddFX#Z4nX<@SK`kk!KK{9R4LGKu0s zC`Lq~36a)?jStq81F8~?5V=%bm~gwAT-DPsBhx|C3+&tV6QF7GbzxZ^&(0Oc`=2kK z=hw4_Z0->-LWLSitL_DY=3=!#n#n_$UCF>a_t@N|G!xSM)`3}C*jE}_(~rBvIm-}Cg7myhiQdq;zj_7bgEr$2MhP$&Lt) zJpAPws0!j~I<-1I2wp|Non|su$FuXr7XnoW_19cyVbnZnTQ+!YvY?q!GIhd38t{Ol zH=KfF2By6N)bQ76xEfO7j6SU+>KK&^bUe2feZd~U5PWO{Uh^_5g1 zg;Kg8s{v?|#cl+UnnBnN2H&@^!&dgHwd(!10(e*nD^KM(Dei>gPk_sY$NaJ0 ztU*zhd-iEz8?(my<)P!{X;6dDs^+6+Of!@ueL5)jb2GIU@5tmx249k^y@uak0AH8k z1E6N9+%fE?BVp^0!S~g3le~2*Z8?DEDEo@Nbv;TqS_f38CBj2L` z_u)OVP`HoaH|?d2=9HU{IeO!{g{!7RfRx0j5%2KNp+e^0i+9cP5)kc&afVOjQ{dB# zrKiPyPMm|5w;$a2Se{@;&@$N_t@swvTr3DhUTdKZjaBZOj`M=)TN26y zI5o~eTlD=}e1CG3laAFkqf(|rm97sRBEHnpicrse`S1o<>#h%VyiQpyQGr=66YqEr z<@NJPe|NffsLGN2d~h5h##v#Mann9G@@ddMXGCW=CrVVxC;IAif?QKkX=O>Rh=3MV zi^-fKZH|_l)XcD@MnnD(DIbQwt}UjX^YZCvcr37`6k3ruyt^AllOt4Sf1Re2y9O*# z&Xmgg4(cE1zJBmHtH)XyNMytmvAtO(K2pR)FyfV2Ru`~MX4)! zZ|4zaKKcz!kRZ4ipsT93JDgmsybBy%M2s&Z{{_S3X+d%n258r-u1a!%MPnrZvFk9Q zta%a$DEfSH1^}V?n;Hzj9KHubK~oH2#HG^7=NH8OI$#Q8{&XG)Q(SZw)0h()X%lo6 z8bpvI{;<6V#4;6o*KHsyI^d0nF%k_B0nwf}DsKgPs}Ozk?KG|vW)SL`TCB0xMq)^S zu~z}?-^)Li=b`$kSmK@HyJNOnr>S8Zh`oGJYHaUz>+uA%bXZ+Gaos zM!?8T%wW6A*t%Sts{pP|GoxFyQ1_9AQm+`OH<02elB zyd4@hK(JMo2zGS2BD=rG{hia93k(qthQP)w*f*#m5+TP?8I4xQyY-d#cq<#xpfa=G z6A}RmyN~AuE%1Y)}UM@k>)1mjsQ|^cbgv}>WIQU1EY<*JJG3g8ujv89I-NU z90a`)AXG7t(Hd{%Ls2=6OgmJLNy!w8VIjdRFq)uYw+3e6paKoQzk!kBT{KK1u41Ig z$4K8WybFiQvL}Cn7q-;l2GhHhbx1j3<#jVoyn&MXITCTWv>4159JQz7@ z$+9sK@2C-mMn<9$yBAZ90HpAekqv4Jb%ue|s4S5g$t(rmF!DC}h~pr_s7X|inD`1} zf<%NaDj>f|b{zoY==wf=9AvBcqNGqi3DfueM-9Gr|HJqH`LFLM3nv|F?5p5)RM2pq zU#ky^)iDY^{|d8W5OY=bitCj@v+oT3ol;)W;QOZu0EmW5bsp7&Hu2?dD6R5|rtcw+ z_SSWG55sfN0Qul0e;+}Omt^MqJZ$Vopt<3AM<+P!)s$74kbA0^sb6i-&OEe3v-uz( z0*NKuyX(3iA<{SiooO(xAL2KOu!H!0Kd$f9$<7~QZho__tO5oK-LnI27apLcH3xE; zb};uTjSLqwDr*|FGm`Lu5u2mi5tS6hF-Ifqz+Shl!VG)9aLpOPl&**AyOIXq^(8#u z8|?UAwNgPJaQ#qmHLWnicx#3M(&G;+)XZ2y(DLEJcsX_#0PGK1H)HWbKBZaM#!_*& z`B5UBafk^!+<;Qj2rw`$DO$v^f%;&?J7!^ozbLV5HBbmoGytgi-^wfA^3zeCrdh=g zs><)#<4w#lNjs%$lS+9nRFDEOTG@mc)8fbn_C6sU>M6zj0O%e;O713g^_6tA&PT+g z>}1%mI*$a1z_*=&mBF}DAaO77#YWfo2q&JU;+Y1ST(KED&NOJI4VM$EfQ&4xZ)jOU6J97}x(&|8wFq6knFCD~qj&=v*?G)c zjSP)4XoRh(`+FeJ0JtB=0Qm-(=?>3AjW!bw0Y!Jil@2*eK$+G~FR*8D{PALs@09h6aUNP+@=b}6)Tk|7uw1317G1R%EFEvsl3kV_uc^7?waM9wHP6gAr> zLu-+|L@v>_8AhiBtQ1Ms_r?T7G)D_66|^-sa_*ujC{Mj5I&Px#%{eHX@I-ibaQaqg z92mD@{Atzn6!?hJ<-q(ZAK1fV`)z*g&()d{bGaD}>}gwC)fcm%A38x|^qgQ04u;|A z+)pS^3fH&L_+IuyrI;T`e>f{Y*u&GDt8pL*N!)wUH?pdel%v%uQdfRCNt@I%koxSk zuym_CQ5hcT4!IXFeg6aU_W;IDbrX-;QHEw}!sWM6JxG^3oGaLuFj$T;mR3>v07bb_ zJPO}cLL!ox^(zEQUQkLO)ET4>v`C1d89X3ZaS*)q3U`n;s>JZ82w_Sz-=GA!YDh_3 z1BI_Pn)f3eTZ2t?GOS6|FfEaw5IZh_2hQip!Joq2@*I-SlU>1QWqJlYK(=dYi^|;w zKah>2Mjtq({AV2VYyciMod#FSxr2lchcGo$km zcGwZDKw`5Zo7N&Pfs8)gbdl}Aa-Kf2l_|=OJD3lEVF&KtymR8OAA3doO&lpl6#25m zE|jK0B=y5k1wDFJZudQ6-bb5cx$n_c?}v}bYkPTA3Vrw^9(jTJ(*gy}Q)vKGk<9#r zH%+ACAA4(<1w5GbQ}LAYuS(j9Y4}b3Eta!C;bBfH+TGiJ4qlB1Xj^eTkb9p2vwqSm zpovt7kPH|QMt~%VmJee5ZD$j7oqrwDAa(#91mpcky#Ea4o1l(IPzM8lv!Xc~DPs!w z{X3GCULAQXXz97ydgn0@J$qzZeZBbr5%FZ5!KT$j+ol`)qC&;K6g#Gtz&11s`uc1E z#?H8j27CmaQ@^e9dwlneP*y=cfEH=YY&BozIy9=E*@`vAzCdHPg&LKc83;ngN=WhN zisLmo{WBh==iDjrm(P91+c>{O-Cfa%{28Z_AGs&>=b65c(`!mlfX-G79F?Od!ga z<2}ZN{%u@n3XrGDc~5xZ6TjeQiYZ6*I07vxrX1rPicJT$FL(<()*bpL162<@S@*{# zgFtVjB?XmNQ^Rn8Dzu3e@sAAgj}$=e3no894I`LbH(6nQuGkcmL4-O5mE$y_=CbrV z7AP-$!NbFkqrCv^?@*y2lR1lHT9RLdsL?Y00B;t2<%GxhsK7MY=%gHZ0J~`d3sylHv2{v7<>iM+MyHvw0R6NLMA{SF*C?;Us&B_<|%m zurLzeoI#+An-uL@E>7d#Ru`>R)Ag<}9HMJ&g!YC0JW|`QKabK57;K|Z(BDw5zX&jV zj;mr*g{{638V-lvfQ?14>XL<*DBNuSSy=2pXpc=rVi>RT{4=H-QlQfm<*3ICBghN0 z;2`hZ$R81trGce2+`dzm9pn)zUK~5f)18}i4+_ZHK^A9n=Klh9FU!klhsMC^j%V!^ zr6S84M9>D}t~J?y?`klUlVMmevIUPILWHfvg&91F8L{IPV@lk=-(Yb;;?jl_kLRYe z<|C-biSW}Kop`ttbMY=9-UHuD2F?BgOyDmX=!g_@81ePWsu}=AjnSY|10LfuY!8@! zG2kFFXgCx76IE zI)JHUv%|buLlB z0>lTazQ=b_gZYET+6>~#3VgJtUSBZHnn#>Z25l0fII=-JiaG5K44fHi2KDh1oNdqw zwxcyzbC3$MZHPw-NVGvXS8N`9bNfq%qaBpSefy!mMsQjftOvd8<>9Y*WTX)U7Y|)5 z!G2M^1Cc`3{c8`%b$HkEXNVc{aMG^*Hh)z5oB*+D0Kf0AP=yWN`fkWWU-9MvzABFC zRm5ffxAVR9H7w1Zt7PxCYHoUNtO>^pMEG_$}QPyq!8}g1l9MNTg1(@M}Jc zB-#XBqWwK|eDvQ!$A{{n;{*G71sa8pBh}|I67F*wEAD-uGhZxv=f70Q`teoDs@?V5 zz9PYqiFbd*sINN0$F%fTP>Txuit0rgt+SUv7mZrt}TDWs2%HX5C&3$th z!8MDh4^lIJmZ`@O3%5KQ6E_U^2wGA|`$S+~ z#gJr~a}05FGm_;axFR_E?PI)E@GJ0L_sc8SXnXAoS@Oy;Y|AQ?xIxTr|9>H>jMpe3ZtPXq^CdV`)EBKrXj#k3q{re0Cm-bSAE z-2eGTzV|KP>u>tOGZoq3^+)`{*kkg;?|8SE$Kf<4`92F*%(RYL%*^!rUK#p5!dgmx zkQv`2m~F^0ne#o*>D3Cwj)K;)NegGqAJsk)pWrYdRz4R>L{(tb+^;Z!Q$PS|F68|m z-ISIvYWYuqbn-D*6_uwG+csuh4w0>Y;C?JXcKU%2NHtRXvn_}!P*@_C5FwyXg2Nxr zEx0D*O23Z>$TxoAos+0RUt>Sz!ZV4+s8L^ol#L+qm<&G7<8Y{}%s7sS&r=a{(QypH zGpOS@@?XrC-yY}9Grxvgi&l2c0A~(HKAuDyR(r6MmJe8kX0sJHWsxQ@wh=eT+0WwS z!mdB^Mq)F5OY=3j^=&`$ny%4E+(nMs+c?4caX?6Z&m>q}I3(p+^o-(95A-1sg19bx zN>N2gg-MV+a1+S2CwOdd4y1XWA+W2b5HhD3%{W)mU!FR_A5- z5!AXtkg&9%Bh@Zpu-tJH8wWm3lh^TH-4!B}PVpqu2$M%Y-K{gD!iJjUgi}0U-L+Eg zJB5J6-^R!uKk+3=kA)*15`vai9eqHElk>?ZhWy3w3PsOktTg!1nhnA7;!k{-iUR_= zo#q=Okn&fxZ|9*io`mws$|2y3TZPNBr}>OQX@69gVt>8`n+X@!Y(`b)0E7_+ zE2}6JgVfDjzpVDmFMGi%1PD<+1S>`$3Eq@kj0yufKb707d77c?jQPeJVe;#0-UC@b zBF`m+@jD?|I;;4cG0Y5XSR8I3KO6Nr4w;pXCL`Q?TW!!T~8`8Yq} zt25X@^j&Xx`wU;vt`v_+0VnrN-+VI;53$!M=<}8ta2_^RR<#7M)flr1rDJ(7o>I;V z9Y(qHEMKLL0TAa9Df2*>9C?n1UE{E+ELMit+L!I3-eGY$5?`!Or*A(td$QsXgwK3oh`^QA_)gxyN3UinSE+zf!nJ zQwnqBfeU5W?`Srh;QYvb3S$xW62Gk-w*=50Ivx`QD^8kg&e6kUR{= zexxOf*1)15x$xk>9I6t1iJMh5Sa!a|+lXfxBWR=b$#7YHnVU1B@QXrS5Jie3Jgn>( z%neIH?MJbJh?s<;gZjeLLFY~luaj-Apx@;!s-+l7`NL)2V(6CuhZ>=DQNBi&H*5r~f^OrlV*x6nep zImMB}vfBS3=UwIT0Xsnmfmr&vyzo4v!_z|zXe^u5@YL9qe`z)zeN)&m zR?5ZKc(}l1v_L*}jrZ}ZNhVUoxu_pqMWEjKKpjuf!5%y);w}&ho8+drWuH zB&5)CPys8@b8VVjcLP)PLiz6-yyHCzG?V)J`G-`C;_xe)foXWAvA(|OkYCXp+&_f- z1-PG&`-gE~jQa(+Uucx(o4g%X$Rlp@RGLrY~?yUi01vTRo3f3zx{8mk8NQ-XC3 zvg}u*Y{TP(~&5)yXbx#v_oYb=;U>i>^n_I|Ks+Tl2f6oPAEfb{lqKnl+PY!m*phg>hLXWsG7perFu;MsiqCVWscG&tKB(cv?|NcOCP>s7+yy{~ zcd>7?w_I=+X@l;Qd++k@xTy6A8SX7}>anM$z$N$9^B>ijW1o0Y>{e$6$$TaTt389{ zdrUkY@+|O3vvfQ<-4yKR$)zz%6@68N+kU8u&gw$}@|G$x)T{n7iHo@K6Hd=bPJzld z2Lpkpcl_{KRyZc~X$WUkzw?(1xLBdS?=Mv$I;f9)DLV`CID1(h5~79Zh516heHlB= z12r*3{nlSj&_ok;N}ybzp>(f6xkD3M)MxzVV1pQ@hQLl~5OX4?`jdKu?L|P^`9=`C zxSyv>;w+=6RridR-TcH3b>J9z-A_E%h62<4Ko4htSDN6i!qiS1^4I1uHWzEk7f{Vr zFh0GbD7h1(1sE>j<#r^*6;UI&F5HA33vGcI zO#$n$5|RyO-4k*NUJy+$qars8#4Jeg_hNeQi8xF^fLspb05BnF1A75TZ?dcl7R}ir z8QWMq#_pH@ZY(BQighIxW)Muxw6l?hxGkn;8x1v#F@t1gh=>X-J(r;8pZ~J{M1>q3 zBJO2n@`(`fK<9Vr>-7wX?Tzi7gd=#k5d4ZGZQxTRL3Y(pcAR$ZkRp*KvaoZ479WhI z{35NP;t|$U{yS7ORe$YdX$mu2#Z;6Ht zNG-e}9wi}FESLyY`>EN&{X1|7&NEGuZ<|C@_JBNO5^Y&8$-=}`_1%MVMwl@3OGrAg zAxxy1W_O{~nei+bl$j5x*0z?1!$c%6)5_$zFp=qm-SFr+;EoEZQ2qo=4k|1iD##-T zlM?sBSh~=o)6%98dNx> zO%BRaR_@zEaW@5IgipnGMaL)MXeep3sB;z}L)7#*RMtHVRG z+S3w*Fq4ANNlUsvbL{)I>_iqs0BzgKNysTC?>;4yLdqTo#qfqegTB&?IgZE*7dDMg zjuxxf^YU7>NMltpHb!h?cKLA(1i}P)JVvCcyB#v1iI_8Cg1%*nJ4nI9oD07-)|i;6 zRHyap@&kHm??j9*mC zm9H|c+O^dzmNT_xzqDE*)<4-Vk6Fcn=KUM@cj7deO++^kQFBKVv`nQoM-ejT=6xj~8PSw~{`EA%797PHqtNDQwJZ1atHxSe%gEl%|Zz99fbe zPK1pCxzk$w7R*}tSgUmfbIwc@56GOCjFG*nFk~OpgMh)3VOaXmZ(2dZU#LGLBTWtT z3mR@Ko^M9H2HjH`S|r~l;{~=KMhNuej(j&!yoo8hOco@GCmLTsAeC>$(E$$|C5w(5 z2j6Ru`&)SgB?(T1PF{>qstB0~FBDLZ}xGh(#do+8rtbUgnuMZ~Jw ze0e2BSWW3o$m6bHDxg%m$ZkGgT2nrbpH1XB#F_F2(jH+p+saf}k!)alq76UL%Jqp5i4YeXiRwW?^HLDq%qcoduO!gg<`2 zFDx3abZ_uuW0nM#4qQC0muU&4tdMKdL_AJ|HZ$cd1xaZ9@E3x=Xb4jk`Gh_3-pN{W z33)BZm7*f9Bo@t1L-Ta>?i^?x#`WBk5To={3bRZJg$c|J6+Xd&Q@gQMZ!shjNhw31 zvc5Y=PWAF$0{Cz+(2i6?9@0qwxuil`cvMh`GMXaM7&UrEj!72=a_Nc&wqvL;9J9fiD$p) zTAKT!DrBz=QL6r0C(mYxHsW!-lP9bE$i^mQX{N|*wjtZZQm{ZEe>G4=oV^^Vv;cLg zyqPJS>c8vc{5Aq7Io9pg)z$*ZZW_X*yA2MJJPKC%zKw_pUmvu-WGC|k&lBvSh%WQ! zwdS@WFro~v5YgdiiV?dG8^dty+pm(@ZADbeW(X}II~K7urZn9g8wEJ&83!gdHK_HP zv~)z%w8>YNwiP1L9|e8rI*W=bfmz~tj^~sy4+N{6m#5l_FsCPtxfv^;lOX;Jg7gn_ zV2_$%dGO2GanM&TG(mit2U?$uXZrpHz;uDT^rJKwEpPsYtv#e0ic7`h^H4wqcB~b9 zgTTJ!bo>T`$HtR*=hE%6-X|2t4UJ%{sPV|FaL6kmr+4HDsGlnBfdjf(w2!ZSAt52)Cl{&XH!Va!L!DAMq5~_7oo!lUtEY7 zHqzwkAp#H++V=%+uB=8In&NWeL^8#l#9cUjOmUlWrJ>4dFjTQ<$c%me#Pn99)j12~ z==+^uOmG&^?|(q^w%~0z2+M=!r9gEC`wJ1j z;fPF{1^H!*hp+*hga}`!dw_)K8IjjzSb2H^F8UUoL=d+J{&-`&^Bz3EkZZxHn3=`H z3r#$t!%K5M5ke{dm&mT21R_wRy^~1ucj{wxsd$NeqLXkAi>>tvlH6N@hUgYhPLnjz z3;1>YUJ&Oc3|AL?Mv#%7{v;r~0$t7n0rq-9`|v^mAmohBA`_H#^;B;svt3DBdo2;y}wso^^ATd^D#O^;s!8#3WB@g&0x^H+{Eb8(nW-ga6qyV zv7E(@$?6h?QD$RGKazr+`Ymv(L5e*Q6Hfx$3Wd@JR_lgPexl@yz}95KPG~)O1_bd7BB!54MM*e= zp%iGk2;L_m{3uUj|ADI^jbSndah}C~7ErCb8XQ4@^u==<>Y>a{&*1w~QCh6vdquGo zKaDys_XT1|q7{?I28p;3*AjhY?8$aTQ0ow*w;jBb0RaTrXUe3?p%J*y&~RCDFBXmc z)tGtFZ9v z;xY-WrtGrp+D){Ya6iEFXi@PF+PriUq9g!BcM(cQD;>!2z{oC@3ZlZ~E5xym2u%R4BI6TAsFPKP(AliX3uAz=1 zpoe&B8l(oSPUUk|V|kJ4S6+MvVy zMn*l?*o8zv7B6XvYH5t*uj(s(tuvJ93-+F_ap*+96-7&A&Sb-`z7A@cb5{RQQ$}^=vdSPP+2!%TQ;@#-o$0N~uRPX+Xxhr=fv*CyAWW}~XX~+_B zs_i$Klm)w2+%-8POGFNL0m#dL42R(#h66*9iJ|C(p~zM-95}ld$;a1YNa8cGnZJ`k z9}atv`HvxyrF}$_yqhIbExWL|4Fb|>emZ)GMm_`z{%1Ah|rl?4?m4R+P6hf=dxaaH8x=3|&WpWK2M;4Z3|1$s4uHs^w7&7jXBAixY7 zEe8FWL8mUzFSJ(2GW|JotdfO2M8_Zvq{>063KEJ7`R^X0xz-0jq{@mOPz%R+VPt@x zq~*c8o``^2#J-LNfESU{V}Ap2z7N4R|Cbca0cjDP|0zT-{ZAp<-B+A>lF^)d@l{}DvT?!+Msg#DXBb6iEa%elNKyD(j_ z^NS1~f`_{1z>q)#!%7FXeSAdJ&_ndlGrABaJECHu64E!rftgF~@n#wI0aVpaq{1=J$L{W;Z zRJ;pZ5P0w4dKENZiCj}hkTPEmyibH{Kcn`mGVeaDI+OR5R_)S1wER&>v1JJrQ<7RZ zN|3ZNW0iRsdT|l_DWvCE;qL&gZ(??CCqTk|?2*;#dlsgFR~R>%+hM&RQ67_|DL z2b?(PcerNCGb5k$XdD$f3V+vy29_Y1rGZjOxgjsULR5=6$H!_rnXz248z87O9d%Zq zG65n#-a7?oois-bMgqfKPOICrK55Q$W4f~v01|87{(blHuC-1&cL$AQEdGg1Xe?TQ zOx{9}HId1O7&{+TJcD~;ub!f3F$H0JsG@iARUnsu$)7e#*f4%sZaSk56DHQ7R{LU+ zs@Dabn*Pa>zT&hK#tdlF%BrpqvUJ6PO-EE*smXEl49XmZl1>2&s@UR%qsqiI&wT~6 zPZLwY%@55DoB8vnXdB8u0_g;lPCyw4D8C_y0K^{vq2WK! zhQ>T-^EBYW8tzv!FV=AXE%Rax_bKY106Ek@0jf~{0#J*;qK!M=2C4{kJd`^^0n`ox z3S+v_y*F0c`-w^EzR(3Xg5Aj)P1ffgqyHgvVH6#?6NzLxqQ7XTJ~mKx?l1P@l%=}< zB1-LPl#v5OlDYx83kQf-)fbJsE)Ec9S?@=|$-W>gBgWZ=$e)L?jM)o6AJ(@O)4cC- z{+k~5_LEgn3h<1A%qb{?p8sT(9gy?S`9mHuhBz#`VbqAQW-}}g{lE|IKzEG`nNMSj>Q* z8nNB=a>m1IoE&|>m=(Tnn#af647Qey7ATYGm^A-KSY+M(aK25FnL|WJwf8hRWr%2k zpUa1cXJn5NqK7Ik$Q2_*n+`iD^n_#>!b8cLL{t1Z2*w-wbIfOWP8K+_ebUr<23=pf zR9+b&_Ntf8$sHr%4DQxUUK}ZsM7w5-ageFUR2ZnA-NCQl+%50)<%#0_&1a9x&~F>X z%I%}XEI%Z913#EPkTIjh)+ST$-Cjk;PI6V!Zk4OpJ>zPmCm1|DTJ*?Fm`d>)bcUW0 zO1?+$_zgOW5yJEdnDXhJzP`p-kO1B+v&X=v(*CZTF$R5o^t{|LMr0tR;i)kqIp{UG zJ?MA9zr9$hrK?fR?d1` zxD6_Ud@OfkQT&SbihS)sF(%FgHJ_I1M*BHo2EO949-;Yxrp|l1OY1n1s_kly-J5dg zI1#44^sCGtCt5|6@c;T^cX#>fI6%I9io7yTbm=@rzy`^g8hx=?;DqiRv~GqF7LeWx zaW68n+)vb>fN4+x-9oT}-Y5&sOA#;0N#o&i>2Ov)GG5$L%~Rx(38I_x?-171c}ylP z@S#7t9g2#m+qV$G(6HO7XoPGBJ27KHSJd}DCgN20^)z_CF&%+pOhORl8LEy^{Zoc_ z!$kDA_C{joh^N7DGz@>0L|FySI-B5*A%3ORrP!j|u$vrO1P%TrL>k|ic11dE!at^} zi>D$Y)`pe+BDdQ_7<)m!YZE=yX&=bjHqqPq9DG|k@%^o|QE}^b$cw+o9J|Qq-2vbc zVN#9pI@p~3sz*&AJi#v5v%rX64@RtEt2?o4`eo=FWYawKi$sumDvbPon{$Mpm?8AT zcEXucT5?2d4ZT~_MUKo7R`W1&xdEg`pgb!STauIqv;!riZG549Q6guhif{udF`Iso zJ95P6MmXmg*2Kwm@en}~lf=d*$69&J5eeu?YM#fqNc$lsw~~7&iHxLAC&T#+s6s%* zAxg)CPhQ)7yr#~I7U;Jp258b`8JUaOA*P4ymMh{?=J<3F@SdOiJx_EDbs?CGefo*X z4@$Kk6jDE#oiCzNzdJ>76-X(G0XhUTEw!A8mT5Ss_s^-NRY;3_%=ngEk}s?+7h>~7 zeLXK<-i)+7WrM+CV1cc(+AG_$P0G+LQ%ZJ%W2ygQxLsA#jE!tqbc<|QL0TIefvvOK zKPq^6M--qqH_8Z->%Uz$5fUXVk)Z`53ect$h~~~@Od&M8CSrC?^df*Tn+VekVb=if z3iZN=!F>cmNx>Yh!VOQOFcwRUjxor8$hd=$NC)Q; z1S*hc6Sm@iosO4B3Pjigevk(mn+cU)@pp%WDkm!hK=A`s4ER`c2=-5)kRNyeR{WlW zMJOP}PX~+Y@{^q=i>5>J;7j|Xnt$jGRk8g4sDjdU&;b~<^=CN_zo1l{^8LxeX1bn^ z2_5DUrSPggg>1edQwtI3_3$;BUno-5fxG16g(8VR0{{7c3gJQ?c0+zwDB_0ozd`De z!hWNwfr#%7-!G!^{}fzAVyy?>(^P?-`hN)>>4o=-VsU(luo2&PRKCLv*?)>yfCIa? z$%@E!EeY;2B zl%LHI_jkmi;mBhHmFmX^x~f;Adbonun`mnfWPJh_C$x}ofto0SF%iFMPnpsJNy=u5 z814twnL1On-}UrN;bzc5cTJcrmN51G++9D-5y6Z-Ew4Q!!bjApjJ8aSf5)Y7vp9gT z0^j!fXCZn%hjL^?nIz29)2tvDbdQj?$6Ia|xl;u9`vMQO^mJlvb{jMRs$DNVVw>Inp6!SpMwNT{RkApe|82D6E0H*j%1* zi0)awVp%9ck4e7{801-dT%rR!hSCDje1xVXN^NL|gX<^Zsw(8Js`Cs{+HZ+wtsBNy ze>}kRG4#DZ{xy96F5b%_9k^M3wLm-_@&qDtT4KfPQu-cLlzs>0+=U{69g|Nj6!FoA zKou^;b@jk>jmpaHr>%nagzfrap}3@)&44)vAw)|85s{FRWm-C|5yFy#L~dS|sj(8! z3$Wqj&_{*eE7`Ys?@OyTyjoh-;%I4=b8czXd#{yN&3nDH>PAf2iAgxNw0}iul|TMU z@%M0QX;oxdY1Q<>rB$_+P~;Kz_Z^a1*!yj-+TVM7)wbECRr4_y4jQt(Y7iS_>2m+} zs;=nKTxi1maV=3;i}G2uDoY$wTJ=15_$`c*Z}{o-g6=Q@uE8-jiv1yZ|Kp-$SbIQ; zehZ4tu!3#AA~J^#qF?JZb`-y@U{+h50W^uZ7Dk;GnWa@uZgerA>OWyrjYuX5t-Xw^=7w_}#r*}o- zpqJj8M08Ka*cip_|Np3a@9?OKE`I#pyLV=jkc1?plfdq#5lSdhLkmSZ5l|5u?8Z$f zBGOcXX&_)j3z`AX`pro|3BqUwf!U!EyEVwq-E@U7g*w>}h^ZX}DxqivH75{Eg)PBo-N61OOs{kOI0J35))mg*H?Qoku z+dwSr7DgRNFZVy`!^g5A5@PA$c}dmncP*HYgo;SDb(x6Q>wr4^Ojlt-gx?`m+|Co+3)y53UMD( zyt%*lc+ivfsvmA8sTN*)6#E{W$J>kleS#+s>sR=|%jW$?7h{Sn<^J(+_baU`$CbLJ zK8ZKE#tPtQ$K^!OI#woxP(@^Zj9!Ru4E1|g4{h&+Q;#oJgL25)+p}PUU?(p}9bI*V z>ZN*VW1|g>_?4`dJ>#=AnYKi`+7$NNQ>@;DC-p*#9Zhzqvh{ERB zBalrT#BT(yQhtR{&$BV%AnjkG_X!>WK_5z^Hb7>my z^z6tckS!ePba0n%bLyC+*P>lZ^_K0qiYE=i{7&a1^4zoL=-H`jmvjtO$i7UQIv7>& z$+W_*kG)jzzFZ3kTv7H=n_|65og^ru@K*RBG9Ae*mz9~@>7`=$f)1-9xlDf~V1!`Y zvX?6co`x^eJJ#olSv>@n?WtI)*eC8)ylol0aTpsvEz_IVV{90wpFuZl+htjWIO$?4;={(ZG!hRI*Z!wjBdKJE04(`6`9qS8qAk3x`PggiO`B65N`L`CeTV zIIg}Q=tQv}6N=)bxDWKlQA_RzSdbs0RUhcB;=1dNNB1zg(;+7v-YS|J4}#hASn=_A z5M!LCXdi57@mPR@&9aIUaY}YIHbV@V4bGr$ z&4s4+x?_W$c$#$5ln?cYuw$9W0S-RTyn21+3B^5-ottcH~&4KdBrT2 zdlM?5Mjz?RVpB7*=|IUuM{12y*sh~=JNoV;{Uy;Fr{OG=`LQ0?uzhBtx($14Y+)*Q zpluuY$a?g^bt?HtPZTO@X-}t7l=glEVK8!yUJ#fJRJOvN=O18N zZy%!1m190ZXE>M6uhHuSoW!*F8iJ1Uhq-hJscZE* z9fx54n&8#ILAUTX8%sVy85SQjm2*w*XdK$>npGWH9s>SQ$rswHRvZ{hR!T?Hz_svp znYLF~U)d`VMHs$BuPdcLpp|MYmaQ>Tra~=bd#KXu8Ctwnk9ZKfi-UF;%BPf@)PizN z6;^#D!wY}kkx&nh#4)sy%6dDTdPm@YtZo6BW{!sIqp#ZAO~{JchSYU>(+F#zQusKk zYYXfn5(C?zFe}-9RpgYZ>#SmjvEgq8T+mR78X!Ez+# z6{bo^Gwh+e!&xT_y-q`=>_G+GS9BJgb1i|EJY`aM*~JNo8Dnd6bn(zz7px&2M7%@6 zGY$WswV%@TSXA$(Drh$#HL8=GvU5V`+>QwybK@KZdK@UN#ZPwqWn6jw>;`>>?=DO^ z16LHaQGY49Hp-`=`_(uZ%I#VbEqPs>4z$h37rUT*fzjR;`h27Qr2Zu|o6z3-q|uO1 z^bhI!S6VQspX%G-x4ZpQJr0*#eD|r|)31)Nz3KoqruI9Ox=9}u{SfXK;q`n6=6FBP z@2&XVyCaosg8bU0Bc0g|i-&+C(sm_ISSnZ1Q^8wu4Li zLPwOmp%1%jWnRk4V(np9^p|=M6{jd1U*RBCj_bx(`j?vR(G}pY_MQ4Z;NAox9%#Jp z(+_&y*PNpLTX5X1&M6vPs?XAvZlcSj`fhFJKH6A@O|A8Ps<&0QX_4ABqmS#)Qy2x! z-jceW(P!y83Q^%M94;*EZVF;8@@HrDy(ZgR@A9zh(hqos;dSHvfPUEXe(7tvb^*iE z_iK9bB8KJI$0Vz8{&8nH?WjWfqwi4qC9J~f@6f1A`g~u@*Y06CU1L}->wYHf!)>mv z-|1O;?0*}UeA@n_zDMiVjOJX^^Zqt0H(aql>EEcDw$SzGbzN%O`fAdC#<1)r*W{l; ziRwX`@(YG#IJx9Man?l}MD)dP7?wuF!_u&*v0gkZ8Y}8(=oA!(Q~wmhB0#kq_i=Tn z0^SQL369$u(@JziY1LljbAsICFsVO5UcR1&Bd@ea8J0M2|50)_Xef5|kQ%rS#ql+r z{ap`>ex@6anBzhPt7sVs!&0JN`Bu{X?JCDGEL1a$H7n>WEGuR44qO3IMr|HA-Lw+=a#jfMrq5 zipAvcZzX3vGROX(>r>gEI2d@kt1IO%y|s{!;Rx@nf=#1DxF+3bq#2q>ZRY+Rug=T? z3ZS;-jh-9enI3@Cf+y%xaz+D1{ckVD34-tR2->NMc7C4$t2{$-1GS$?L`PFm@$oI` zP)XRgN@tC&%Br`hlS!m&i@%~-CecaDt)TsQ22THq&5m*ED2!6{R}`&_dU7(-#(hPd zbdlB3s0+dnBbV#@+!#fKcEKoq_!gUtdGEk{KWi`B$Qch8c9)#ikcMC#Z;AwxLM+nS z?xy8Jq-eJw6bjL!^}nmv2pNNV`=Z{9kQ4s4rA58dcjF+gSpDE?NMP88W;X;6JOOxk zOIyiLY+XzF2~C9Ygcf28-fe*0UTp@PEIaw&-?w4|bu^1c!C9YiRR?p~J%d4igR;${ zZpUZmFjf?-9Bhb%5%$j#DC&Dp_B2(PFj5&3un&70#6bjU2~h&Bz1 zk6(Ao%Ra&fH~IMbh|W+)_Vy7E^^T4xsG3;~p#XwGK|FXc%C~}kF31it1Llh1Il^f{ zNaLsUJVQOtuAb*uI`1R?FNlXnWkOA&Z+%5nN9a#Ds+4hga~@FctMpbe+hIpl5zZ~U zpjN@lTzY|ir#qWRdUM(68@ z$d(BD#XH5xGnLoAsvdSoMVzfK7(EbqPC}%PcboxlE=yFQmQf86>a_TrZp;)B!5<^r zMg3w9-;ZX&GW7*L8Y+@vpTeGz;~gq?tNu8C-!PEvjzzYOf@h)RVI*-T~*78N#YKt^ zFw3t%IOKY~!5nc5;89$7G)Y7S!fI!l`Z8x`(@@d5Yw_Ao+i7Juc+)3m>1en}H!UqM zp@$A>QIr-TVnn+gC8z(OE+6O#G$TSZ)ZX1r8zaOU+T-QaDN-bAzip?iNO3f92gr&! z40Fa8Z&05oamN2D()gZ&xiDtTk3+bqUeN-6N90>qbPleIS%aDHS0!onHg^wfeN~PaEorm(@q9NsMTu**4G*#G@5NrhCZJ~+g#fS+F zd$92Y`;pra>vw}MX&Z`<*EFPfH`=Fc)FM{EyQQ2)#flUh6| z8za;@6jV>dXw%CnrJm^Hi6cRI^+ZNZUmX|Rhr6rt0Wdar@1$Eq3$+DJv4{@p9V)Si zu7NGFkha2CpK6O*SnUe5io-rK7AtS#TXZ>D4Dvg<#k*UlwT(nPl{6NC?hLnSazk-A z*qzy`W?}1N!z@6ojbzNXNsUBfzx=-yZsBX>#T70rLq(F>SiJ4m{cl;~U8sMnk56k1 z!lddrmCo%pMN)Aq(N7JcA6tpIhNohAX^6}2>RB4P!#vxL`8Kp;e6XcWL^0$Rwc;y8 zyl*X1WNUm|0LP`)Vk*L!-Dr(@@D7Ew5lN`9V;fOd4X4N3hzHOmu7l>Z5ocsQwC=p+ zdaErY(J!z-=VGT#31eK=@gzS(mUGxN~=ntU`%w36jtyX%+5-i zpQXxj$BO;LC}Bg|Pexi=Ra!NBIzEeorHbL88%N7hMNciS1zkUT&#)c9MF<+3knXcuuoZA4wViaLR>KnCKKmgDK~xKe~zs)F5YRfgxUcnLQPJqQ(2OSdsw(kBx$T zs@eNGl&jrEq*lKfWpx)BSamD9i@wQw)Z*h69%-E0ehKfqi4vZn^-hP^spErUIJVm> z9>lP{K>HrVAnJ7ELDBw$em#V)R#N{y;@K#czgfiPZ4k%G;XMy4oIa&PeZ;Hk6ZCjr z@vtxF;xFB3Q(v&iX>_BnsH=5Mp}>A(vKmOQ_Y?Ki=CrDxs2j`kw{;{FoHg4IQ1!vA z-^9I#*1i{=?X+9vWYBORB_d?xlLoAQr`V+J3qy8bNzY6Oa+bEA70<(OX)~1WpM(Idz_JUgt zz8!dFRXU4hk%&OS8{hSSiPy}=-&<&{zpY?zvs{>ON{nv3LGrPYmxahbugscbDP{|8;&sW zTpsNoEV^pH$n#EowbPlxW*F(XT_qU~T&*N`GAR2$-;xZKO>a8zf509v7iz z$fwJZO6y-oXyC$|K0eyAO?2jQakNz#$?tl{;+HOBJvhc($Ny6nodxo=qoCg%ZL4EPZM(g1|To#FRh+_0}w+Cn*3Deduk zGn`xZmap)Z&qoDNZW$Gn-&a9fPX#tqU`7R&8Wrpkp3u8!NW~CsLRqG4?8pTV-4{=! zRpHGT<^k1;x@K|laelI?j+g7l@s5=<+cwTU)&^6vId+K(sQU+b#-Dq!bP1K*5aDEB zsOyh#X>S*HL3HIBe36+UnC*c8)fYa(`a?j%m1TM>12jGH`->;sYxh_{GYwGCfQ5Q! ze$ATMP*bz!mul49+Nilg>_W|5YSat?xzYq2u~@`zDqbiN0L&et*n=nMT|8CP#8bvy zL_JjlQ5q2C<3UuMyD<-*+yKCETsh2`_-?T}H>dcx-TMytXB^%?05Cobn4#d#*E}%3 z=Y|pSLzN1Fw52uPb-cfWcg2%;0e(YNGWs?TzmH~eh+aj#jL|rT-=^8_B=zqfpti_#a7ThiIjJ_X%}!h%uJi@50XHu|0WP ztY*9Cq=|<82yy#xbE!kLY}*Jh@Q9tqCfKgq8=C;UhK?lENPopSTJSeU7=NpWZ*xu~ zMrwv=8$8it1Z1JbZ3JAEA$mq1dH>%V0r$U8e$QY*pXjl0f_T~c%WdI2_l&SSxF=In zUxFLq-Y})G3G4tz^dmVZ^^-YjklSmMk7IqKf0I+GSLW=_bo)$MfDpHGdAHzxPoxK( zqKTL&V7#N3oT622w&L-YGtRMF#5pSHOQ*1E?Uv9Pr$~?EK14$I&6As_U_~Zd8~Hl( zJsxXg1wE80tm+;bmnojd<9w!wFcr)zpuaLj^9Q(c-cK>Ack&ZjFy6JWsqmkW+-(^A zQ$X5Hq#469X(A2F6509tn0yfC_K-nFLS2R@yIR<9Niba4lHqz$0n1Pxf5-Hjf+sI= zioLzCfX>zxTE&a9FFWK&i01zIv( zL~C1v=!@ZExwdKp4IKe_<~e$6gs2-b3&AOE3^S-_2&P4gB>G~6h&DaBzJ%KUV2Yya zBZQB>2#pnu{V9}!Mv6#%7T)M!C3Y(_CR3l0B3hi?P;&aBu?Ltk5>h0>+l&<5^2-do znA9Eo#BB=y4u9(Z0e_!oyYcsS?R)rR%kY<8{2>spV#jGQMEIu>#Y9FE~YIH;KsyXvT4qqQM$P{M*NX-iBdMY@PO?RFZV*CMvfW=V^#ekx3CC7U*qUEQ^a^Byq4{+!u7i%^bjOISg!@xi*#Tuz!5QnVb zBF>ZS9V0C5IQOS{ipmjE_{r)I)%iBgz0Jb;Hx2&^{#Biq)hd*nTFb3SIByV`^?4|b zo>=di{Jen0;8$fJ6BxRKDD|w<}o&+KXy$n5GPfw(ML3R zDstdbsJy8V#=jj$Tc?Wj7Hf@JgM|=WhZn)Na9vvi=$z2!^8%S@#76-afXbdZ*@%y* z>nmcnI+_BeiHCeg1AjWMa~w>W)1V|trSsE7ohM$lxWndg+%PU=a`e!3=;-^eDmk5o zSjjyw`yj>~9pE9xmc}UD3%!r1VLi4?P@@}SI4OsmDLp>#^hszzW zt`5H&ai0m&@L2iye?kV`i!U4H$<~Q^x-avIdq!zhG<~t9yA>fdzuGpOuLU5(^u=w z{`Kc>TM$mpeObA7QB$u6#buXW#8wkLzc-YMOl*9_&s)B}(bJ(9AMwmfIx@=CAipkb z+iK9*9K~+YpN9n&E`7$8E*J}p2fUbB@i|R-&EGRE_yz>8D9Ov!XGZ_IR%}D@n;~>h zfyu^|F5b%sa3T58>yY3WDu?GX0-QoFnh;W-cDyNU>MQgej2JK;usV1!tqQ@!YBp0$ z5^F)C3N4%9i^4Kui;;{A>qL>97kU43g z2ZgPH0@e#F9A*+Itb$bMMPbH$DEttrO$L?)2Mg*L>#4(bmteRaCOl2e_X_`R1vBn0 zzpG#d3&z&cgrml+U`AMWD45+Gm)CT58nQU;x|O+~-G+Z?iS1T>N1#XL49vf4u;j`5 zA^^f0Eyg-eSvMgn>bdIbR!6y4dy&W$SqwIMuQD7QKgtZ4*(g`A8Skv!p|s~kTW!&Y zGQHtN*UNG(O1s1>J>7^M~9N$kfe2kVhzbTr5U`EcqpJ3L{2En9{{r^QU%~0&$ z6AXyfvyL&r*s+dXhrW`x13S$-lh|Z&TAt3s@23B+5KIaP1|mG;`hOu9`n*UytF4Qn z#JS?CHZg`m=0P!^yu!6)o*1NRx9ZZ(`CxK;{AfIhMgofYA){$IiJ>WNSC*Wfj%HE= z?Nuh6lXM@7snYqn@9Vc=-o)k&tOj^fD_V`Flm%jj_RRaP&lU(xi{?U{N#GJ|Zze7< z^I6y7MOYuTuFuh*?}$Fyy7#HayHFS9KkLeRmp{Dl9L-uRp3sIq=eoL>_3eMYN6|~6 zfOo!6gO;Mip(t9iRM@oQ<+ODv6j^am)Vdfoyu92sxLBmC!BhR9V25sH8b8sYS83BS zVd(`GQ5<`SIj-o;Xc_|S{onI>9?0pC4X=RV^ukW10QqvbB-DyC@Qj0siSs8Te*rcW zWfZ$yMC2E#?!$J72c6p(?BFdUtELt}jGu;Y@8X@S<|41*x7XEMQDg<*&lHC$S1LRu6qt)!54zbCT%elz5JRZT6V)c2uVT(F#$zYnxOMTg!Ob(3yzk9~q8&4--!j;OM*2ok;}y_Se!rYruMqR$RZ)tQ2v%d&D5n!E zMEypr?e#%H*7&lvRl@Ne%q;t#aC9q&tj5Z-5im5UKIN{sm13FZ$CcQ?rH|aA{U5?5 z0ME-Gih8llk-iJHJPldLTLYWfcipnoU1jdOKhB}SNraz@tIDfX@KLr7FD_HBBa zyv$U0-8?$&_b=XGl~A6ek@aO^A9`8RC!U)TXk|fd7qG=-HL!+!tp0tCrhH?v7F~-Br0Z)$sCG7tSu+!lGHsEb zVx*fuwz?XO_b}t#jCY(R#Q3M(7MZFA>KO$N0^Biwj3-nv`qtNrj~C)1`9xnOyZ&q( zzVQ4$g<~+KIA0s|WIg5 zn4-fjEuxeKyr669?71f2Iq%v5;aKz7rtDPUuGbeHYiTrWy@(QTW=VA><*pZv^50BU zvS+5KQ`oUzv(MI*-{-{h^2hEajCNKDUz+3YG3qm5=HBO|lm;b(v~mC+dmXBPxfGFh z9N2_rWTH*B>r(ZZZT9tD zXwSq6L`W#U$;tw#Yra;j`8iNGOy>qPheMbg8OF6~0EmWf5V5{(-YhttGZ-8%Z- zu9!Sn!POdT@Wq@D?rs<7sd*E<4pnUs4g7da<1nVL%%<>-AilqF&rd1*o*7DS4|l;+ zz^#ER$L(s{@xKHAJLPMwi;oWhFDt!;^B1T?f%4`Rq_yVxAEks~ytAt>Z#kP<>=ssTF^%fhJ6# z39eEBO{ky=<2Q(lo6Cp6*dCxPSdF~N9$g)S#l5oV7S?3bwogTJz87}>m>8&LPr|HX z@8C6>^q2^iA5j!yLKCd}a+P;3rmZ!(Bi<9lH?iTWmhMqh3c zQBgtWprktx?QURA3HJfrf!3_*4>^13N92pj$^20VLuvXWs!mWjxE>SZ<&NS4WkMSu2natI> zq1j+?bI=V1WTPUmBE#(kFf;21BM+ysOp4%HF@CGxeXrw{@p_;b{KiCcE|x3pbWfM=U2-(+s09SAzMqR$OdofLATeCa z?iSi@E~9(_hKbjvKd?3}G}fuKaHc#9)}@8`UVx!wjg)&P@Y*CRmKtnaW*6{G@Vz$^ zE?{_cb?IE;TdfJB= z$L>DiYVZCCRp9^lXt1-rwBZXlX6&VXc$hG===(2()yf^V+_GTEw2Gf0vv_GN8X>+- zQKpb1)Z|MM6N)&aj4qxISK}4@DE>O0p8iry5VIap)LB&drKn$jCYqo52%?|#7U6nS zw{Y1T7eFM7$5+Dv9i^$xSHfyw{Vl-59Tn)Pmp`x!(hV7Zh*3g*z^r1v6y6rcdog^> zhb({D)c%(cH_OXb-y-H`O}P$^`hOKe@8unQsLO_QY!3KB zU;5-L5!`ei664@0Z*%MutWLE7qc(Ww|KLo+DKMMb4H9G!Sb?V^`uQu-#9&Kt{}2fs z_hmxa3%y4iN;3#?C>aN*vmc;E;O4HPdzyVIgo=WM`uOeRn63Ld=ID_O zNGtv!+UFbO6t84YX{=89C%z^-4(SaXhxJYPorZl(R|Na&;+&#)L5FOB_kV%=m)rSA zS)1z}$s;uo5CKDK8c3JLF`O8nVI{)1Y2{sHR0;)}RTW+GB7{lphzB7$5W>SqC8A4G zC59rwzyTMsp-`F!55>9(_~2HdZ=x+FA}arX$*cfmFD>NW$UAeu~V78x+UR`_A4EW?73*i~b&$nPyvgyPYc&}trSgCk8!n;C37BW(G}^n$yWqm9^mv1( zw+#$TK#GOQpK3=3gl;o5QF^pQ&)k}uwf!oXV+GM9R^?-08p^@ z<9@uyJp9b!0pbcO;fjM$@!K9iq$f>9+8drU*tYKuQyrvpnO8jD{seQTbEV>uB!F$u z*EDblI2=?KxMyWk!(5NeJcEZk^kkXn>Ap#1AM9ZV%S0+IDHXNz-Jgvr*p|)|RW8f( z7#lTrKbKIIMYlAz)X2}j6S5GVj`>iH^l}I+D*TV&`1UpDc?rXk2HCI;Xs6nzJg*vl zP7l_ZUdc1Av?{%-uY?1Wum-&NR^} zRLu%qLb5RSB%_hHBl3>M-h(Ft>k&Bb^LQT%X=$7xf4bEO<5AYDMo2T%2;*3fFn$^r z&>RzVlVhUJzsG?Uj>jl44S9RW=?`Y%cI5l$N`VN*-yW-7d_7vxA(n{>JxUMZ@a*$4 zL1PGlVgVT?IOd52NK|Ew%3e;izLc*7DTPX8nzEplVmbU4GIl`5hij)nkez4_htAOE zcuQb*=`3Lzh13c8rD%u&V85r)kS|9&Y3&9P?#IT8F^_2K7$?obdjK0Uq+^_Jc8u4{ zuuYFGTI(N}?>pq4s;nPWW$euK5TpdJ^HLxF4eewc^gP}9H;&ZwH_~*qr;$&nJ@|DShKYrZfu;i?v4ugfra9FB2Ram2O z{&yU4D%dLq#JwrrOyH%y0MMex2DbFHFpK60LbOOaPo7?n!U&N&f)p zoKHo1>#6j{0g+y(CVm-3jL+{6h;enF70(*@vm;Q?Yd}2AtcL3t(&?pxBEiHzmL3%S z>(tDN+t3Ans6HqjjP%0KL%?Ilr`Q=TDkv4*jluY%=!QL(wjL7Eb!!5`nL{wpst2Io z4vBUx`EFTcYEld?iOH@EG>B7EjVDCg0eDxT`eARoR_I~mxus`|4w+-fK2C=EeS^h_ zwcpLB(UQYZwb%NUK07QDZ52C9&VGZLJ&Ywd*h##uFt6C%PJ~nJ9C#+Zf9!9picie$ zjyj$*S~jU8ji*p&;>p~S{rbFykl^!M&OXB5;m*KMW4gfUY7YePIIrkGbYw&y&`5AW3&zyQamSTp( z3f9T*6L(U`5z#f^JzU{sGtyMtAn7V2GX}!FFU76suJjE397{F2t zbm*!EP!lmIEH@oO9*kR|Ku9_!!^-)tlwM#baEaSZACUG8HWd5 zJC2Kus&$VyIzY zS9(*yIng*E3QCBnF+n-C;bRm;YwG(Xg{|}W81V%19x=N}=Qqg+^(NhJrPd|Od2!|M zO%k0LS_BwIRMR}Udl;X@3e z{ds*Io;-dXX49M>#eU6aHf3BBT}{Jl70|FBMVN-;KPA`14DCQ6_4o<4PM_9BHB8QJ6I6#|AF=N9DDeyYJ*S`DxnH=`N=kH7K-M5fBT^Bu6 zqUqN~=g0scJ1$fSF`Uw|a(PctQ9&b9sPAPgBkjLgde(^Q6hX~y2!D#aA%>)z4DUoR z!T;bM{@0{??%^gZY#-nqD9vWqKCd^00`GA46}XC}CA z61Bc55`9C5U`k+TokJsUiZ)@PLzJo*h8VsN1)l97bQ>qp+~Bxl`|@C+?O^(6&G|g2{moKVO1C8u3lY+#Z&!|RDo5Q5qX^oI^ z!8I$;kqo$md|PsZBXA#^lquc#_ayl0XjpH~L*`T`O+s*~;^Ul-w+g$|Dn`Iiw+3k) z7@pg3q5(bHh7=gz;~Z1zij=X41ZNml5R5Jf7i7en<%pUG-onC=|6OvC93D8X9u@mYe^dz&I6zIk2(BUCk)bO= z1^9nW=N4YoU!^NPvYq6QBW_WmugpVtVIjUvt9|9r*6DCvFNF_K*pRfUrvMJ$v7}6T zMdz&qUpe|W@SIlUgZck9b@G#~wW4+OqMuCEl5=Q@pNz(_wa@)z-H;4mBO^2;=fvyY z6L}0?xY0p;;xmg%kR1U@ypU7dS~939?lNe+3f#ezT@DZZ(!Kn?0`FW-Dv=i@&glEx zdPe8>X}6s8QZ1S6Iu{@p$a;T%%j;`gb`a>e+~7$#We=+{J51GjkLHI0iQ_+~FGA%| zbr3~`$v9{WJBG=+(c`7tKiV60zzW;23d?-X0oV92IY{%Ze*pec9#`?uIOzCBjdovX z1G8%Hr}i&p6Z_r2^>{a=21}$IoBzud_p09sP?cS_sY&3R%x-&uFD%~2BAol3X_YrC z=a7aUBe54!t8j$~!pc=%L2u53`xf9#`T_jrV+rdL{Uq!}KuBSy9jdje2kG%DETomZL9&?=vE!p)!IDx`xL=@j#k_PddG zE=J;_rad$$R?g9u?xD-Ea)-8JH?6G)dWqjdhwI51+SJ|jM15&XutKIY)*Z~Sdd6=| z6xcxO-c7e7*3h#0k~F_PlxmS1wXofE&4PJ6Z#M;6We@GI=``4i`EsZ)JvB^0{E6x!rmH?w(U%?bZP^lpj?M;vkS~cKS zB7=-pSS+%F(m1|woKZt-kPZK|ZU8 zK$=mw`{KtYvguF<(oUlyW>Dp5;Ay*b z&q9N-56(VmV4M~(Q{H2=*h6dg=+UXHX8IF#c)xM}ro8NfM{>TnJ?Id&db}pwgsp_X zb6dA})BY69@$Yw2cvG;C3%e<`sZ3PclB22Y+3gmpY~&n-sHLjf#QCt;cn$$-;Qk@} z!Gf`cXO_vky(Z2lMPug^Vl}=eqiS9w_}h|=#(~+e`OTySA+@c|{!N`z{||LezOT+P zo;s9B3n*M;? zrmdYqeOhAePom70vVGvUe{4BB6}8mnlGr7*lvlA`T-Zu}plvRvuC3*Znr%CM(OPDy zoB7h!ICULAV$D5_&BaRNg=VyoA<-ubN>1-URXmy53JysmhnLSI1+I;4fLR0xIMG(_ zgr#FyJ6TuD-r*`~Co@#-_gu2Imp!5`=YoAUFAXtV!7!>Laut;w$Lr9P_A(~+ztF;k zo%d)V6dkEa3(c7pu&-fS_%BSzQcz)=p2 zW>e>6VX~hQ&!2Wvf$csj7>RTqeH#d!^FO_gu5pE>|E)qD6}I^g6*j-G!i06SzoR@F zU7hD1blo%P+c4-2dFXDrN;=7KRc-I8>@1&D+f779M*d07eXv4BH?Lq&c#&mxHrn(3 zFupGXlG*BD>=tQc7Yyq_h)2dmoo$*#PfvbE~l9#@gG*q1=(+sgWMs+SyL zjqs#_PgJHsfME|bW<<=83b$@Kd)81IP3mvJp)Kj>xPoI;1L-gOP(^Ro@P3Gnu$R(BV1*VfU;wIdh)dN_{&@CfBMTxDtlQ;ab;0=(`UtTFQN^?4^QRr}ZI>i#upNxiqLD$SX%z zN*ka)C8{*272Y2*@PN={w=4DjJXCjR{}VDgjCZ?ZvHx^D16NNFxr&_yE{1wSJ`jF? z6OV8c)zsrjSb5%`d&Kk6v?#vN!dqzlER*nuP`5f zNGbp?GJ-5m%LeLY>ix8=i%5u>Ps{pX@6(?K5e|NV zRzEH4#mWxF#|P!Od4q;X^UK@2HIj@i1gC#m?u=shc*M9geCU6Ht1Yt;K0qEp+lK;* z9@Kjn>TOLUhoRon6KU2kOrE!B%`o{GL<(V-eg9U)Ie6N8t5{1T?b7APJ4la{aYKjv zQ`_{gZ9A$lA{6ik_psswCrKEK}%p80?7$+ zWZLrNw(r(EJAtlAdxgj>K0c6UkCyFf_0p$2Q>J&%Dn34uDo4xuF`SnjPcJ&%Zrr^4 zWK+$9nFTl@_=Jq2$@BOr+WM{>Cfk6Vl<;=1RI=cXxbRN z;NYoaWDgsE*Wg3Pc}(!5``3J~=SD?g#GmDT4rm z;AR69q0YdQ!L{GHcij95fc%P)Pg8N(a+R_2K~@e0)$(r^`T6L18uGDf3XxF^KN-bG zsYo&>QN#q9(DlRtjIolG7l%d}m`dZlL0&B0SE14*q_Me!?Fl>ZZ7$Lpa2iOB)5;D| z-UQhryeE3ic^|}+>tSK$!T~xtK@RTpRXg{&lao(#TgiFnu*%yE;52nB@=jY;a+U$! zh`r(%Gz5TwKuRM#cj3L(0Scca*XnPNg_6QD zNp>gyY`Ix`Xe{l>mZ^0%bN|6gd|+u~_Ij#qF*R+gMroC8T`7~LpJwtK!{PU$=#d=R zR$KhMYgP^ziQ0iShj;DNXu4S-J82WgP>U(pKV^)e$y4NVt=$-^Hx-gq z${1IlsZjB%p^Icb8mp9cEUR!u{7{iT$3O;uAJLbfm=EcWzA#f;CxVB^#zAzp5+7Np-hA#_b%b=*G4m zSA&G+c>)<`N9$|_;<{C+LeAp%{b#c;7j?<6L*LGl1A+%Z%`jExL-vNBcStX4l4q_< z&bMVGV#MXXEiQ+Q-mIoVj?!YXZ}K&5Z+~o`eZ(ea<@cE`>V!VivZ2Px@2K zb0W0m=V<gt^uZqKl>W<`LCGwm;{GHkBMVzAFRfv5Vmt=iKf@2Cg z?mi70Z|E`#aHoShTSg?X>gU^A7FDaI8Ikz7uiVHH;DXg1{5%I`SzSt-EuGp26|{1; z%u0O=>H@aHMq()bfjPmflb=stGlMM%bqju*F_q2WGhRz9TV?YFnzHL*OhfyAfrTx^5B=Kb3Da|OJ3^FYg3xyqAK%MQTs`sQcDnYij1IqC zRC2c3xRPSGsK7Vot+TgNy~VPj7(Be>^ie}OYF{i{Mt`(}*)7!ZKpV~k9XiQqgVyAX zKdoIXBcgycj&(GzsXNw$YCogDi>XzXMQ0Xci!<_Jy0ch@!z0tcTr9O*BI5@yLi>DY zCbMNet->$_0A56e%H^@yg+>es0Bp0my!iNTeBYA^++=EM9V6e~+(6_zj(mJIp^+E5 zh9%e})_;U{EP*C8CyQDv#newLpp2z*sHs7wCnjUzFrP^JWvPtOzRIGIV%gdqP+@E_ z2)_-zUhJOQjqlUK86hVRh8ZCz>lvolk>U4E-2WO-0o34~*i4|zpeKk4xg19FJX8~2 zwHohv*o@2G&fXvkRYe1TFs!&CMf=0U$>%u{6FdaqX~yhj^`jD!ju08KG`?7lr)I5G zUp6MYnNtm8GDpT}gemKdjA1h(VynqvutX^zL=MB2Q6L!Hok#QuYhVP zDwCG1#QON7ldi3V{B_Y;avDxPnA>?5OVuS8`O36+vGd4vQpyJy>|;*q@c|~)Zl`PV z2jJA&b$?p@p-j@j)r+X|L+R3-^3BwSL^w;Ap&aJv`xzcA%elyKKwZgP3p;MQK0!H~bMf7_@gh&&Tg8;T4EqE~6LM z$UaZ=oGb&EehczH{zjIR%^ZII&|E(6{!JB7iSZ0L;%1r4xnuk&ik+6W0b z4CPoR<1qsltY}wCOE$_;ZSzWtkDmZzY5;1kV=6v=z#}EMO-JxLNL!r$B6d6%obDYD zJ3j`O!ZGdoaZ{x4NryM?&6HrDF0}j;*;Rdn2A&mlgWF(kyH{q53UEW3Q1GX6VT9q3 z>yZTR#jLqN&CiL5R=gnAj9H_PgY&Ux@ot6s?vGj16%K<1l>4Pjim92^9sS^5G#wUb z!XtzcQ}~&`P$g!p$rp#Dl9j@`Fm?Gn5WB?V^y0S6xZ~fj%I#Au!TSv7mq-qGP&3@k zKkyFnQRvntY3X5|U2^s$@c2!ps?OTt#-on$`~-O9c_lsa^og0f>3E%{7F=Z`AoNeFu?mM}g(z=LzUgjbtya@jH)7~tNW zsK!&MUGd~f75iyn!ytM$R~#JcF%Sc9my~6!pD8}n40#irCk1aRzCkB=(w{-yMqNm& z6=2UYDoX@+Ve5k#5NlvVvHe+3m1ovqz!JTO;+m!-!BDkaXtE#?5jOjhw%q)9?FC0 zKnQ${Pa-zyY$;lWqv*zW3?hGLg5cfsd*1XrHPT@jTI@~7PV#PkSP8uRdLB)ATMHxG z76?VXN~r4=*;T9l$~9w) zr@WwP?VfUV+AUK|+V)Z!vk#W!pG#fy_sJJc+Vm$V@DSvkLtCiRA$+Lb;>tP%6SMaF z7Mfa#)~;-E?WmN8RjtKlT69bf)~Y_Eo5y5o^Eh_HfL_Ak2_s!TIuR&{R~HPAQ_DaX z4$wv8Ax0e3oG_5;=k)Y(`7@N~h)#RQHTZ;Ftu`2Bpc3Ac#?zz@!&3nhWlseas7nP? ztzGO<8+7?bc5Y>V&^^7q9+T57xO;H8n+ltM23G>K%(Kag*8lOUI}+K;wCNl9WNN4j z1PrbJ0|3Sf_h?qt^E_F($vuA*WpX}WH}(itgE&-CfHC1b^EXkSQ(y=V1`;cSlb>*_ z=*Lf}B5u@&>z0wGs!!tS9rI6+&c9dT$xf9!yD{@Pk&lC4hdBK`q;vig@bYI(0)xG> zJJc3{iRUiAk3ttP*()0A4e8=38Jk?ZY0KH)L8B*8=r#c7isFC+UN*Qhywni#{KTRG zdl31!dYzUZs=oe@m7HcP*0817B?-&Ce2_I0OFsi&D`vy+>)IBuW=xwX6lyXr<#|D17 z)KHZ8BQADZ&_^`(uL%0azmHP#`NJO zK-_`KF3GVhn>Pc;bBoYGxi@MZ^!|5cb1O~1ER*6&J&S7z#@pe9I8XyrmdWEB@R@v1 z#x~CnaKptU#+G)ke9l3h{+)b&o`SC+G~6dZb0l@SBKyXSVZsNq8)-}cO&!fo?2ixJ z@(d=&_;u~LBFCGIsSmfkPTzxPg)E}U-^*G0j)$Qo8t}c0r1&3XgOJk;x12qX#=Cpw zz=?&fp+A7>gqiRG$5d6Qjf}}N4DTv!h(t(*Q`clmR_Oc$wSL?}YW)*zfL9mL7eB%7 zlfHl&UYBDMcQ4p-b_?otqTV`;JYCA?=WSQY z4T+%T@v!|B+@g|S(1(pgfBu3#WDWH8;r)N254rUBK4g>Y%s=7v7WUEnzxTn6jGjJp z_#J)dN8^4+ANI_r`M;wNbLLa@A955Tt0iBT576d6WQXJlsDe*kc>D1@-nkz<(%ycI zLRwYP?eF3m&oKrA@i>g9NqyVs38Z(pqcNN9g6Wfio_AVc3|1%=hnoH9jX!0dW^b-_ z&%BXX>3hMh!2(1nvepz$xj`0BEo-sN7mr5#^Y0#_dVk4`w)T$qacM-j*$qK09yK&a z&41(4`5rEfJVo|X7F=-TM;HE*DXF3BfG;p?D2%|cM~J%&dySW2kIH2$*J3Zj1|1p< zdkyuu1%|zXy9=emDt;n7&^qHC3>yK--^Fj{oy>Sw;JwiJ21TGZ9gF%pq;nqr&J27T zY)*LOdKvgqq;r1OLomTG!(I+tc^J6aNXW{ktG8rg;=60yjGL7bf1m?gaeZ{TCgUzz zL#=PiGui~A@H=43o734*KAa}qk&#-?2%2+84)Pt||8EN1YS~P?l};_HWtKK7ofcKg zH`Jtg3FcR|g!Fl9%{Far`n<#D9@^A&SF~z|75x4FG+r}b)2{cY3=_Uz>F=6hGXJV- zWqn{7c~9RebcZkGUhm_b;0eL)qx|w|6TW zaUTLJ8_%5+{=VLrn->kuU?ltfH4DzW2Lzjo(={e$TJ*dq<7mzt%QqwBUNw zNdo;q99ZlqO%6@h9Cj0Yv7xm_APJ}z*b$xcY2b%w|BWE?Ait&&mZ~>05u&Qvbvei! zqV`|3>YjQg3kW|AOtMgS{QqO`O`xMHws7H7r%!iPLK2dYfdmq|I|C5H5QH#8m~z;GEE#3LaS>kz~nsc@hl6~tR--$1Zava>m+r2x_BfTG9j!XYI9m=y&t zYKTd6SB6-9!l15nbMkSp?r*@zaoqu*CtFb&Jb9J$Nm`*hme(u25exLTjTIyImn~N* zgv`2>u+;?rbmk6|{)c>WnO3tgts_*oAetF2=TwZP@6^K@#ubBIW!4RXS$8Wpl)_lq zCYX0(HDyLHuj6|av9x{MmF<&X1@rn1%a8=%7g(6JM47ADzyvKo1-l_&S-|l`pb*HG zOod#s5MGb{ByAzQ)nnvVLw=#u+y?Ao)RYS|`U7f%6l%d?i~?#{sK9@Lz#}PN1E`&| z5i^wd*<}!MdAk8wE_>JJtzsz1DOI^1qmTar4*`+%-mDKST#o9fiSOlywRzKk7xkhD zO64y4t2_{ju1t4#1Ys} z5rBppVArccR9Av^xm)M~O9J%v4F1mI?_9Bxeo;H0ZtSxcadf?c^0Wal)KkOVw~lBd z4uYqotJJC1h0g;-Un8%6Q7dF_NseMA*lKq`h7b#-QhRU@y7dRxz4prJ;*`mO$w@$89bV?4Xdb0NGA^N~ku^U&-$z1AJ>4;g9;fL8;* zF%=ou4N41H)*qRW-Rglck*B%yF2g-@8z2F!AxguXz!n$^OlKj9X22U4w!xD*d!ccq z=mNaNP3~@<#c%yO0HmM7Z<BgLQH@?1FM1G z%-LOsz7W4H?)0_zty{OE%#V7Ro_d*l{dW)j_qX^>fKJ4O6K@y~1SxO>`UUc8y6gH3 zzX7~LG7RQ}4~RyPl>vl8;rs-`6&+)~anmzNIMDREU zqvpI_n?aQQPC&Kxjxsk^rbhBdaOQhAl6PwC`7jx1;dJG4hbxCyw-+YcQB#JhtxPe) zTKGUDZwfVRK$*E#ccNb;Q5c^Fj06}t_rSocLkEa^+%7U$L;2$(vYCpqhvdsSfFl^` zN~Xf-X50yKVLd*bJtcjue0Lk1l}eNZHE{k|6Pt;WH6S@Kfd)YFx`)z}Cnranl<~!6 z%S`!9)X4V69i3JgX5;PsQU3z!gx+^}2_b8_~k8*l( zGGbf9U?9$5TrOwZc$ECDRD_d{wA^XqzM<&ehkExsnisV zU_f885!LG>{gyJfzPu5|+p%ETGMe{vYgb_PsW+U9hI9Bi{6=VBjIN5&T^Nngecd=D zYu;_wg2WM~EW+~-Y;=1ojg?Ibf8ui(R=5IfH{CfuP~;G*E9AQ5s*9Lgmq=7kWSQsnDEwaYC(Cem8jx4N0RwMXUJR4Zdg zK3&5->`v9Mbm;rZ(+29=2{QvvJGCdQlYhkU$HFhV8{6g1Bw(nqyW?e_SRNO60;Fpy zs4#KqIam@so69+|e5m?yH(43Wn>QXc6h|wp{q4B_Z9E9k;3)tA?|bo1v|u3KpFsMb zAVNE2_xik5K*o>v?Jj1a-x2b)`utJzE(|A=6P`EMk{9dq*4p{o?&Fo?FHOi^4YJ&Mo0iVMP<mupzcK-I8u2*>%CXx*gPSo<9%;nG)q&&W z%|?8k`WdQ<=dZ&cu6JWTJY-B)&|F87PJOQF2vnh^PmvoMqsRwxKokB#HJ35DrwQ-n zO|24vxu$6M089w6nCd;zI&G6?+yBDxKI0`Dc^l*R62g z`XecSZz~QXo@#z<-P-bKzeHd>DI1yr+C~%P)V4T3^aWJoZ_8b6dA~?y;<_?B^l>}k zJwSB5S)~mCS*roG8eRkE+9A*ms7V{0novY)(#)B9Q+)3=%v_`SDUxU{K*~pd{tfAQL3H!tJQPln$BE!64rI*Lp{GS-H{Ju z_Uppm^Y-@3t1vlFXn9rgW*0Eb2Tl3mU3n!-B6B&#Fxdu1y_24s2 z-)i5=(4M?y#NU0r6Xp>3I-bTaRN`W6A?NnwKI)@QFKR?t(AUivbmt272UQTOKc{3e5v#R4UCl{GGc z?#*L$lM+g!Pv}$1+xvF@tG)R;R-5Yc*qivfGo6s&F3LM;^`T7qDss6_>4NiDJqk`< z5K7?=9}B#P!}6Vv@!=-+xZKpAcdfgw^@_s`t4{?JRU>%?h@bJ>zs-unx+7%yaENZZ z58zQDv1N7VJL<!j=2}Ec z&qJMU!#8Bh3lZL~fxL+pNWg+CzBe-Sm*xjP1)3U88KllHf;~L-h6>RbSeE9;J;V2! z4Ex`77YD)fexCOU-~G7}+^N!Kf#8X@7F?N&?ZtP19Q!=)kvPyV2lzNfvpI)rW@j>< zBk@eta&C{(>^L`&TEy-gq1$wkIp?fwe{EKkJpDXx>s8R7#M~n^eJ%MB=92D9`9W{J z-%uW|t-gi9UZVx)=MLp>v+Uj^_H!$?$s1t|R>>p)FxgM3Fgv%HZ4}=X5k~09NCBhs z!1d+OhV6D_x{+m^A;`w}DU9D!`2Kl(OQ&E38#FEKJh$Z~orH;TIPNMoM?r-(!Y`+1 z#b6Z9ff!8Du@Y)r95NUh8bMf#yKtHFfbPUXBDWKWw@~RSF@OG+VWw%v$kE1}mm-fj zxZS)D5|?ZGD$xBeqn$k7q_tcp%id5MoAVOa9lnLk2tADR8Yf?49*HDF@i=oBw?mVt zUklqyt{ujsV4iby7}qWD?wjYJKVe2phVYtZmJH`*{aylyvto_F36XR`>;uL+8$sQ2 zT~_A0fCDHTauhZ-#^TJ?ERa7S!j`$Z;Gc}qFk(en<}6+CPZY?*HvX-#jtp?|TI`D6 zWzyQnUM^nF=Hxd_=07pNo-NlMCYu?;>259aFQ@Qbrtp=EFk!la+NNJgJs#ssiWtWo zd1Mq14|#J&K^diyu5t>VHhhdcztONsw944gyq|h&INj|NBIl3h(Q5pg^6SxjPW(iO zND{#ym0`{-;A+O=ox%=K9dzRI=&G(m?Iis#nS*3{8cz$j08(3T$~i-v--S|j;Uy#H?gy)n7jdhA{2BX3+X?#N6aaVhR- z(;ZxNd)LBoyoG9dB>(C-ZeeP`oAT~>5YLIjfkJHjNL1$AJ85Dz0!oG@U1bT1I5@7i$|@Z$>eJ z3kiiuQ4N54%kMLJ^aJYdSbf*8CZvTBG$tEZZPkUeYcfeUt~eBpj4#P%PQ^QjFNA1c z-ZL%ruO_4==?OJQNNZSELb>NQE4Av@m5TN`<+;y9-h*ZF7ueI%nZ;jfY*>66ouxZo$RY&)dS{w5V<;vQ!jLku{OpP-thq8G zn}?33t+^6gM2_pe?J2y9ab~@5lS22Bz--P4l8Ae+hn5FqQ4>>O8~mh0Lf$JJ*cF4*6GwQe)lMP3x^3!r7@u0vE2?4BHy&)do1=Z^!6slCVbi}Y_`av08|`FX z4$ow5WJE5X7asBfp*mw1H}=!6V>kuY($k<8{vXHrkE$7VS1KAdN zU?#8cO`B_lzM)F*mv|o!!lu2XBkUJ1{eK@}t$&NKm>^=H=wE))Y~Ef~ZH;Bt%lt?6 za(sTr%J<&nu_;YZZX@;CaR^@CMsyC!C;Kr9v_yQ@aqx9-O{x~D1KumRU*{>>mQ}DB zHt=^Y^juMPWsxBCdm1OOS%r8Kp9X%p$s!&ZZbm=IBy2t9V~Zzg-!5$FtwLqWB0eX0 z24L}wUie(2e}|;)L*7&NdJ6}UIuqsKxA;VLFdY2ef|@I=tGxRb_^sB`wwTun|9wS4 z*&*|x#*?vi4oo)8>1aPTH<0}nL)5akpkVnFXPr6-ZA^(3pnWPq+gL*70Y(`8BqnPo5T;S(bS@7z)`FZ0a?YB zYg_2|Z6nIycKQue+NbAhY!5wQj=oz@x3CgC^&z{<8C*z7{?(=^3HPKbxSWG7<%kJ1 zl(uvw>g51f$$PE8NXPWaXutu!0mgX?4!a`^(v_%ID71wUMnqf2TX?0yN4_08K1z08 z#%uYHM&C(pP2nr6l+iPmEazaTP_W);UkEgjLcypgnBD$NhMQ)v83HjJiyd<7GM->^ z;KtCZWjw4URa;!usiaC7HDe3VdKAW!)Ek?S#^64zbEk;&eFo|SGTZsCR^I-Tw8uys zwd%^u<=lVJE2tw1`{t{7nn*L@eAPS>=%U^j8C@uH5&zj#G%o^@1m`?6G!%uwO8G9u ziLX$OAS8Xr{!FO8fVM0hN3IVYvTQkTTnkW6DMWZg&lwsY$$D?|U__5;`8EWYqveFR zc~4wm{=?gRP(W%jZt3(_GAMp7gHm>sjChC7GgFVsbchSz;l1F+@AWRX#XM?OFyr0c zrIQ#f7K2%&v=i?T0{6k8r?-6kUH-0Z6NKP}!g+BQr1w;i3RSz4*8ETDy8=Y{oLsm9 zGC6{m5P;}*1tryCz2z6kVf*v~tf4%-0%GosGGHatZe9Vh<4WE!xHr&3aTJ>9^HBHa zAIOC(d9AROXdew1k>Z=OnLB%#T)UDF_ZZt85YrqQ5|ZZ`SFazzS@5u*e03FX z;&%v(hK!Yy5Vi2jv2yb&9_RbPSPU10JQfo2$n`<6n(}{-i-2iJ|Ap`xNJBjE3YpLn zchBt%vS>2tJ!r?jaW9BpK}hro2C030mQJ99WX^lMnP0cT>&kF@i5?1(UBJ~!JkGWq z_(#oMt6Et?c_#Gy(s`P(%@$gJ=cU*Ce026pvyp@zbV0TiP*i|j8B37DS{F;9@NuLo zUJPyvd8OxQ2}2omR)3n=$$LxLqJQV{%^5om#|PB)4#s~`jG=0xpfjMRhHs#=Oe4yA z#Lp3;lgVRxMU|QZM`nU8QhI&B;Y0kSZ1Mqwm4372C_G2Ca>E*qy6h;n$+9WS1GBXn z%-?`IR1%C3WuK=f$j`UH38Lh8K~WUt;EY2Q1ORm$JO|M{${Yllpo1J8k`D(;zYlqe z`o|bK^+QnO$QXJ0L!Q{OyyN{HV($eLBMN20teHS!>GY&PT=VdW>V5g-M|==m{y+T) zD$^sf=p%IOizxU}aC=x^3=SbIG59nfo_VU24L|0?nO(m2F>e%2wjRyMj{v8W5S{BL zL86LVOIUAW)=5Vw%RlBxZV@A6B-4n9xTPzoDb7lY3ws4bZZ=%5L{_v1l5MinCp=Ux zY0Uj>$}@=Hi+X7&J7fAAjVUOzBHCaK+BG}6A<_#)Q0-!mzaVj;~-~2A4kfBPkHE&k&vF>!u-%jv^A+1 zzlQq9QTW)FLAF0pn3pK$W((zi4JUT`ob021!?zSB%Y#RB3Vs5+>@PSKTbIlCKIKi! z^zGaErJwTps`|`J^6o$Q%)ZJ@!tto0KU_A{YdAd9LOOvX4gI@9I0Ad5%k;$7r9Z(1 zdCa)l1CJemw{o<5(ZP=*$V~iG7kT4zUN81w?n+(ur)&CS3&UGQz-4AuiKCE1? znX=;-kTUMJlkdOfBPl%Up>F@V}ZRZAd^{9t|^_B;|0S^8SAPB-w`O<9Cd)EltaCqz zkjfJ9z8}PiVjU7+6;zWtXJUe$uqdmEC#_O)P4Z9|^88ygs-P?yGh%n`vk;ZZw;EKY zel_e++*C%d3s~Y))8XyrJXD;ip3LV_P1FJF%7`lT_cf>jx!+KG@uv#Pe9?Aws?gL= zev}WT+-uqS+w*zv2h6zc#U}lM{}*GEk}at8M?4kT`9uDQ`u<2c_(zbVBlYB#A0fzj zuZ_IJF0U&bp3e~@IJ1U@x z$^a>ig3S3w1o^&;Pc?Tn%MCVcEe{H7cd|hD!wDc1#eVKF%x@{rt>um4M?Jpc9_b;n zL*$*X_U9V3n(s3V*JrpnnVc2~7}emY*|F%`rrFg^5py*Jy{%@6=uwp8@^13!_}_?k zyNHagQyr!n62V(UwkPt}5zi?DF)Jc!8mEkKod6i83jH_i(W20;x|9yqqp_Y3sS-Uw zAnMZ-vCCudoJ5-$e%(R!E zCE2Fqhieg!)2)?XU@=2{V=2_=Apz2`h$#T*L{v^2Oq7A%G zOWq#JPOLZv<=JpOF1@l;Rr2<@FgSJ$mI@p^3A}_+18naAG}3>RVZZPzzEg;e*9vu3 zZHo-s$QPQDxl4Yrk&lk)qvq`QcBWctI7eAX(&mEM!@lHWnuc*Uqe?r#ERr2I@u&_X z@$)}Ha15;z6l6m{5YUcHP|&@0Ve;KgyldN1&??2T3CH?U^S)$oqc{U8&=2X%Jz8YW z3hm1>V!E)UU~TxqJZmoo<(Qug-wbA8HS#X&O9@`q@vg@#hR^aJ@C84DGfJwfplmOm zBkSwxG5sn4Y}-p&?`tb_>KE(aVRSLFN1QmKVAdIdH=($80WuIZ|m6Fo)8-v0{l3UNXzbA|6unjM+HAeS-WbXQ}L3#Ov2D)tmPm^$xm= zNOu*?UBw=`5C=MaDN#&&_xbL1H2QbvySct}zKi@9!N=+$ZovmgQY$w}>P95yu0fKT z(eEmQB(>Ckn^+h9sj-fFIWy~^KP~JLJc;}eNwNS+oh((8bM!Xj(jD4Mi1dV@TN~Y^ zC$k#qfv?8jUZrpo1}!pkD{t1m1Y=F|4KU8Tn+)mZij8GK0Pil5_EuRsV`=qne_j z%4nq5gs<0Z_o`cyBqk&|A@(e1agMx|Y&N7OHFtQ~}6&&JD`ZM1V-t)(`t*GQU z>?2>pk#-v>OjnrQrr^G>0atB=#qq>Y{HSBM%B-4Ir-Ly;%7O_fA15d5;5&k8xSIM> zK=!xe!&2!yA#d~zp$p)rkZBC60?Igx5)E4WfyEfnZBYsR{xt${CNdOEJdDm{O)!V*vHhJ!08xHhiavIKJ%!|AFT+ zJc($Mid%8+#NQ4O$u0OR(fcE*>B3uA8YmH+&#R7FQMXtVyL=j zz5kv}RU%#P{LL#tKA&g`_JLjl47`eIPVkXgrLbD-BNvzQj~^uj8j3DPIs0Q?wwqyl ztq^t`JpZ_vq6^T}57zVh>dS+$h&i7|S%lS}D z-NzSulkJ8nfCVB{`SyK0L~WWbEBC?L3h&Sg&8RMdD+4 zqm;^Ktr5Pq(CMWu`sA0|a?PWf4_O2SjzCx!@Jx!ZW#D#>QcSi^0M@%~43G!51RPZ$ zl&D|Zpl!(9jJ62@e$gO`asHwaa?q9rQwyD4Z;avh)UnBBg9v7mQAS>BZ>g1iKg-Ur zI|g#4oSKYR;1=*FaH_M*NWY~=vr+&8x`rOygrb(pQ^angN!o2`J=ltbyXgu@{*rFv zbf%vG@4J>!BwYsl-RQHiaVC9CjGPA$^b;_y$I!8&4L%$6e5#bJBV_soHA4QdiPs1y zSI|A22BG3E-GlvVlN@k_*Xcx{k#xs~b!31Au9}KshG|r~1GMTb(a2G2G?GYPRB%2x z-v3$1`tM-5!?5QaY>hdnUU4i`6~_c%%yo>Pty;UqFt9HLVt`c5Qom+_I>g?YN*r8fMxczdr8KxGx@X>Fg}RL&;_KZ7Wa z)zxM>cgJgIB{L&RnDZzf-k}>r=1b;*V}ckRx((2nWK72hjW{inbF7v|Oa=U2AOQ#0 zyg?&2l*E>uCxe2>+$67hGOvZ%B4dxiaD28La13T*v*p5LkUh@M|LPc|@9mdjzakkl zje%#}DD5`P9vx6Zyx{_NIA&w4-_WRxg8c(0hm!5VFdbV1{t@OgmS;$qXsmA1agku5 zkX8a{lL|3q2b5O>l)_Yc+LXe{^h7vr5@nKNgI4nfl%E0HCP?No8sR<7MfyD^e;Q^E zi}=NLT-yMv65=e!>4lU9D^bYe0BRZx@5ST1wO1XG0l1Yc2$Knwytd}a4a&xMz1@{x z!x|Z-SFU2la7H9+MVuDuVbeCz7nnv0)G@-9gQ-q@l8ADwo`yOokup6QQFu2nTVbQ`qSt7c*zw>XKw8HQX%QeD z4{jt_mK^7GvOfY)G)jJYwk_jlWCI8t(`m8C08rruj5@(k=Q4a_c6VwCei!4=+ze+F z6Ux(W*cbTt^{WMC4XQB97>pKpAB=Y!fPVzQQzx;)0)cSZjCx9)prvcVXi%r_bqIwv z10W9yQC_kS>9q{jj3g;D0DQ6THaA1tfdy5B_nq2|%&kB%hp%Hsrk0sGEofRM#p>RM z{O!n}u1(L(^2y4~4r-dMuSc?w@qbpSs6>Gv(1Tld44jEaLu0B$UD^hBhU_fEV@O(b@z& zsSM33^^fvAlS&}bj1x#SxhDYD(5kBNy{dck6Y%kH0D(Y+Fky|-j*w7Z_nq%`lKXU+ zjj^H)hL#@PG0D=G>dWnAv^7VYqffF!u!}yl99f}13C58g?*L^OAs3$HE!Ab3{Ou&K zYqM%5aJ*hxRG1163ZH1znX8A0Qei(7?G)NkYFXM(@#ZzgVJ}E_oxtKACto&eDEg2NnW_`tIOVX=)zdD|=kz z!+H@9L8AD&C>!fMikNZO&q1Zg=CmHP{Fw3#LQ`UzW1Yh^{rrwik-npyipm|H04l!V zYHcYl@dj$&RpXWEOKg80;s$so@p?IY%& zWE!zfjam1~nV0!@>Xt;=_6q7)n<&R#K^^~0l&@cbmg%KZdG!h(>P0;!np!NM|C3Kr zU5PU5Dvy=d|Abw@Q;E{@7ruEsQO5oSci7H}a_Cvp@ zKG%4nO!`3c&kh2tWFSy@H`Ly8dovujx+< zn}?^F$nqVx=Q21&Fi*`?WPR8h@P**ZgYVHl*ESTPOM<4Lj zH^5b$zin90Lbrbd=i7AY`>yts*RM#`4cQm5jkPFhn4Eu;`>-Op>?VKNYbj3bDT7r6 zE`RMSm>(wH;^Tv<^=HxgRyr9oOW@N$$|AYx7XJocW!&Zk9gCA~=%lG=L&Sy|g>Kfe z`)&*Z_AjDbW!SW5VE~Cj6S9F+=z<2v9TS_kTTZ>hw+tu(_rEn6OGlTaZ8aAslO!#g zjezWKWD1r>Iu6T@(H6y#W>F#AGeNdTZ86lzK>e@pdQif)MJ~L{A2DA;G6~nPU?E&< zviL?pSy3{~I^k7oh@iHi)~yhrrRyiAw*Vz^D_S@&@zu-sDqNf?9DW23Z5t$q8vD`W)50G5Je9z}eTJ36 zoY7hZo>9)N68=0fwtnu+w~9ivwN^(u&+vM9M{MgmvWH1@G~ogk`Lang9Iz5o3Kc=Ht(38*OQ&0fN~)pV}lH$r3hQn95*NZ@v|~f6GKh7WJO9% zL?qHBFm#+ExMQ3*RU+3!1V09Wp`+s-2;y|+5@g=PX`qtRnrIR_t+`=r<-YvKWOHuU z9@8F^P0gaWiGEBsi=hjFnLB1mkP=bz2_eYARKn~?%a1(+q9fxI{ zefL%`Pn%%%u`#Ai9^+yfu*F)@;*K09M4}0bD*3h$%k&?uyo62v@vIldnzBCj5@Tvn zy_>;*{pzDE?FE-l!m#?@VuBi;AeVcKaJ^){w-})Rs8K_74a}#Dp*6|Sy1F2ueh!$2Lm?|(v4)zhTYC2q4e1yFt-7Gi$ zE;N969w5^xL<_5xr85EUh2khrMVi4h81Y{HiHd@0=$s}aeMLS?lY4!|BY1fGiKp6( zCK(cBCQ#s4xGapKrLM&a`W#0)+UM-f25>@Ys70ZYpRhODoMx7UeJ`gs$5JiYVRWH-ezz zinFcRj;)Xg71=bdd9tAFYrP)E^6{i}GOV$Il&9_~C<{V+RF1f!3gGX2q|tMT{Q15&PNRUM&0&?lLM~N)goft5JO&2YD1wW z&_VWxwqAiFh3z+jjT(-Y=8-{VQ7hzvF#?%;_3&_S!a zGiPzo_jIx=(o~TAgzMB%8wM?oE@pM;!To&n4@kJGvT1r}Gt72uggXcj9IOwe8lCoZ zm^x3L-rkOg4=z;1W=w-Z#xJALSAFDRZEzRy-Byk;2h-)kTH+4W0&L5Gk>I^ixtZ!7 z6)Z>^=E3gajl!snMNOAe<+K1X)6}`fI$0GU8ms0Q*(6XzhQ#E0_CCWF&kAPqH-ys- z6e+gVFvcKDB0{hp0IwG&+u@33`@K9~8tacjPI@~~Sk-_%GP0Hk_Zn5hutK^d*+>yB zlWU1~=Ke^d%Uf}cZ!M9idgsWSwM29EPPS|kBvRC_gK&4BXsgEO$X!8Vh$-27oopH` zmgBCJEx{sE?U*A^2a8sCwTFn8th`Qk;SL)8v z)#Hv~vND{gyDCI_t80M5%_&>nH+tKHP;BP)anJuXP zDtLpUYX-k`1(un#=+5oRUmYqoGt=*0>+)X=6CaqWtHF(kJhf#~gowBwmghey!xMNl ze~zy(o1Q3N$MO4LE`LQ@4^Pk?9|>)|w~UJvM;`*Ac0JMge$bt2{wDwKlH1j8cL_2j z^Z1Gwm)x3(;`fj&5l%@FR&h(6I3qvYCT_w*hp)>IjTX`BLyaRM(f_6X>p_S5)EEB} zbf|9w@jn%GDEM=C|B0{jxPnxRJ=EjxVh<%X6sw?LKh{u4He61N6Dj5#vw`<@8l2Jz zfXYtlPu#gl6bYo9E}~|@A`L4m`!Tnuj|9hZU?Y(jynT4y{@rf8o_EPOr0kU?A)=P% z8cw!Q36>yp6SU&C4Q?dLTbyjM?%_KkVY{jnwbWY9-wjHTe%U}^kLkxd+X~9uk-U6l z;c3MEi*76)%Z3)4EIF?L<*yOYAg8zJ3393fPvjJ~Q$)##G#+>DwbbiZP6=X`-{uS|*adKPnIuF` zOYtF9N?p*?(8wCkI<<;h5 znyErvCnr538vA}yx1g-o9Xs1~L1ndN!V996tawEDn-($SzDeodLI_iHR8XgdX!d}a zmS=5`Z~6arZ8!d(THE(7cJ`4;iQ?8n%c@ta|Js^1F0Kx3p!(B1V;k0=CjF@+DXbR> zNy)ytX`kvy`etj9=$qxZkEHEnX=~BIN7o&U|e^hV}DY2iA#D`SrJ){JpE;|1pQa**QS3^q23sP+-2C9pC%fao$ zRMR#ELG#;*1a(2URNIT8Ug=e9tKadJ*&g^8igA3oy-2pi68~A1f4RNDr2-*S$sr8K z@e@qv*A!*!_8$;jy`vbSzBfhQ>?pnt+&0B<8uQ1tzV8GkaEe^lNyMosQ{?GRVufbu zrO#(jeqNIBQPmmA`K!B#BPMmqaQS)}$gc;8pYX-lf#PxP83=ads*%vuo*XFR#W#?=7lO5rbq9&r zrf2kK>SELK$}buuo>l!6CF>9K1T*oJA&%Sm%?FD_SsNA+4T?rA3P)9 zd`e7*52oMKqAMN)pB8VbJI3Z$J}sK4b?K80Pp>4&Bowz;&se*v6E{jdg=IOu1q;vb z^Q@T1@ZH|$guUOy>uV1y%3k7PO9+uy0ZYc{Y5Vl3)s*%=(njiO`}KQvh-?QPrxj9~ z6^8HnVG)qsl;f`K@x17v`W~0BKQHQ-21Cx0hg(Ty;qxMiU6gF7unHuSiLN2;$(iNl zE&B`=v5|dIM=9E(mHH{__AIJ$f~SFeeJEH(({*`uDEjzofQ)@X^fP^Pb*+57L=BU; z@Bz2t07Ba8=xBhT7~vKnPre|cBkR+up_8mTWSW8+U04tYD#0j^$&Yi0MAmm8ny34r zNnl`t28vEcPa{orTJYP94TfaDsxJE_LKEE?vk5OU8@$N&kU>Ivn%4A3!jTP@MTO@{ z8!Q}=;#5J|t`pEXLRP&C@r!oXWD9MW4+cr`r`Kf1VZzR?OXo1r$o2#pqecgDNBI+| zkmO8sXixxYKU91Gy$X`+hlvqd?2iVH%B11qqVMNFAQN2hFwz9c&T*_Bi6NxJCF-g; zbY{84E`+5WkSyw{hpx(r$>=TaWlk2Y!k_TZsUQ`EN6MUo=51(p+7I$(vKYm(Wpav` z#5Uv~NDTuyLAbrGEUUOi2^X z{o3NFW=ChY05e8hksqarF6!Ms<>@prFoM-vSLXHnFwkrWw{ckQ9=@(@V<4&TE3`iH znK1x%Vr@BX4EX<1wdL|LA~wKUNxm&KmR4=*2^*^`OU8&=;iEBb8?oJ!#BwuQ(rZLV zCRpX&F~Y94{!`k=ieW+W&$WkRkspn+RA#z$U71|)v<2$=v7%S!8Hh<$A6X|*e4+@dt52-g@QE~MOEt}YkA5doRZByZwvduVAk9{bgp~n=N zF;47L$_8Vx1%KsBx%16GZt92pipsu!Z zceRU5m?&zo9NB%MxDarVB9b|Ku>Jb8JKHOB53+i)ytdgYb0&$_>co-q$4R0o8<>B3 zl6a3jKLrz20?>%ZI1cFB_!zPaiq;y7pc{W3q4#Jun-&7fMT2%J&RLoR+Qk$*Py=FW zGRMoLe6W9XETxmS3w3Rmr=|!i>!0sERfIG3+9etLqG%rS>IkD}c96ef3zX-Oe*d$E z9Q&e}s4l%I`=xq?%iAxCPXcRRrrA`A_M-c=K5JZ--=&Me*`GX%wr+!T3|AP*?sQT@ za>yt}@#H0A?NOfRJst2=f6sX6ecD6s(;s@zy#Jo9H$LNjg8rWM(0len?~aE6az6Av z?4kGJ552qG?}{S5r&SvS5HS;-kv#)J5o&7OSEU?|Syze$6;p}U^@>zX3i7ZESNye5Fy@Gpnpw6bLjTY^D{)O-rQNtE0LpaaXh z7+Bv+%K#kxa>X$7Ai$9(|CuWWg%GF`m~P2U3alT7=us$&Vv#e&cJmW@u}d?>%P9mZ zf-m+ydtPG~tJI<@{|%wkGXh%)aQ4+8%%!euN}jGpfw&r7KtMUH2gQFRy5OjwF1SDu zRp&{~1j`i7c?uy`Dzulv?uAVCn-Hmf@~pi3k_b`Z+8i=V3^6~cLosERm_AU4;<{2q zP*|9yw}!AAmd?)TP08=5MoSL8rT^4wG+K>&pv71kgVQdPK7J-FXk!db?37Dmxk}!C z1AXM2*FUvj1ZwOB5%oVb65>b*Knj)KI&!;tdE(!<(5~( zRIR~q@L`S`@DzCZRpHYy^c)d8TgFKo?p2`D84LOa;##458$(U=&-<>4(7Wk|HY5im z!r1IP`QEGIQ4<=GI~QW}6!S#Gh`zL68WB!cQfI+|uA(Ql;rIMyzj>lg4ICoLi8ZYy zKuLQZe);7&s*hYcPb6i#P}~7&I!Vdj(5tG*OvL#9pjTyLKjTT_U-Ho)VFR&W3WU|O z5{>Z-f7B4GD$3L>`tEDJX~w?AvjQVR9&T;WC;Ht~f6~T5j$qRe*Jyf(`GhMIF@k59 z?anEdQ?4v?tTV$rMUI&->LqqKV2ETDWpWldbWH_^N}JB7I&6%c>tnP{@KO3S(9ZI@ za>IOKw--RsOyXSnX0iuH$5ab(%7h0sAC;EZ#1OVG-}##OhM7xKqOgmGy&>wbQknZD zTq^s%A@-=v&Pw|Nu|cakv-YsOxDx)~+Cp)by(lj&6ptr71xc_2!6z94EMsJfq$c(I z_=rgBCjg*@CzJ~9a4*UUZ;E=gJfA5JhlzA%_)Nb4rf3uK3QX+3L+41KvWM7loN0BN z1}Ga}l{elLPla`>V_=>g9*NYCoiN0sqDy4zBGJp#8>nGOrU0;hLnU1j0sd=|=oa?% zX#@Aj)-cvZo}>ymUHDbW0dI*=->I4*kNKriW&R|ONeEI4LGDY-5FIEM^H&#!Kf_Mx zJGVEYxRel1vNQ6?TVj-c$D3_RN;C%Cde7`d!$(jA`?td6k;%&xxp1*~j>Mr9oaccH z8sM2EAGrQ)%>VB7ZzC)tvPkWB6w+jmAeUH-f4Tk*X+24hYx~dFzt#D--D`8NdroO| z@Bhr*a4q2vy?2D4(JjKa>Rp4L=5KGJz)f*WMO`y6aY_o*$KmlC^c9NG7W9M^M}LAs zc${S0fGSPLO0qO}(=vFb*Ol#;3I70cf1UNUvG5hdll@9Qzf9C-J>}G8f-XvbcbRAe zU#OyGqRyiZAd3p3;@g|(6B#quGRSIh2Q>@Ay%A44@H`d>csLoyEu8agIPWRxoQ;J4 z77;}XXSyM!g>1T9*lYAO>Y>ny@=%Odf~hIsYj!%w#{=+;1R(##Q?S5veI#l(n2yG5 zl_xcO0~~?41C`k06`Xd+1cG?#A5mpgK-5CLsE^WXDc*a5 zFHhO1vJo2@L=o#lXBES*2mvcz#=%Cv5hVLZj8og>japUuZgW-o9&-y2vp+6S%!V!6 zrl3uIiQo=*UDmFrMKP1}SIy+ytkJkxXw{y&U$dv~(;NofT7Y=(rKgcjI7XeZ zry@o+-sv};SD8X*BE~wwb7EmtcICs z4FR>yt6@eJc&6Bp*r^SjXb?qG<2YEwnAV-cGNyPI+&DX^ig zK0KBu;`|$ccO(}ok2FW-?7?ztwhG6;bU9#^h{9vaDq*X&-{ME-cF1TBB-@!k@F-n= zx=M7-e)0tEK`f&wCX+e$S;#J_9M_l7zU&DHz|i%VbVISjX5xLQn$~G4=0@GrKpJK` zxzn(!&QgmPY_({gCvriOu>6;PxO!9qZ1-* zwDbhCw40s?@#vw0t=)8RHPIOeC8cA6B`A>g+F_7S6E;yu?H4R^=rR!!ysG<3$WK&8 z)*A%B3iu%Sd0&LO38FVVs1Z8d#5VPiufH!s5^L&G?iN|W7>SE+7!Kk|M3E++dU&WB zVPqhT;bwX8eGwVf0#G!>_au5E?q_UwB92PYRhbZJ4(bgyepqpiHoQ1ja~03jl8ax` zs?uT(?FK`|t~mS@lpO|y*|Q9B9qe`?ocZbl5v7ivCHHUKE%SS1hz0SWeSC*ibCry zl)M%X|0cW>h7<#0_^)HZ82|2Et&ed+NFu~IB4Hv~B#|{zCve!?&?_7E13UJEgvu*g zqsl+ASNx^5th}tnS6;<_aSi*$S?m|*uwPu#td({%Oz=)=@`G{azFwDH2-u9}RwlhZ z7L8hxZ99?8(@0j}Qb1(WvnTAH0p9lpFHB`QC1feBs5#})OzHK6O#N6SWS>tPIOeXl zFRk3X0h#!Gq0(C|JWKJ~)5{OhX>eWFA?&iB#h^NN5hfVtXLeC zniWGp=tz(D;$qb?BSBxRkU!dyJW#k5+p7H&8Q7 zz>v4n5cX$1*26}I2292Hf5zf8rfI3FrmR;qs!$0i^#2wOZ~A#tv}r;N`w6=rHLw$D zH(|M9^)hGN#SRL`jf@u%p0LifXjbnUHu;RXTI4m2yzAM7%Ipi}D9#(n2KBE=QAOk!DJZH|H zj7)?t(Vcan_cqSe#>Uy4(Z;DUn~gS3b5U#)+Bku>*f@h}SZ6q5as}hUf;+xB}jSV5o!;;{0qIMQJ|g@y;=?zxdnQjx#&+%Uc8aF z2zjp{k4BLYGa8}`&$|7f5eZtOb>yVc9EoxV0j0I!`OFux%-JjiNMp`;Sqn&?1LT_t z(qaZTsbQCmVJ0-JVeZZAa}Y_BwgjUAS_RaI39^wO?8+z6uX5y5`Pz7=2C1>=fNk)Q zurgW^gD{1+K@RVL9NsOSrBxNr)~*+Ko1@(*ep$O&JXgC_{EC6~e}i28#sD9FgW@)f zJ#!uvHURQs`((;0h^?3$_PK~_W7kJVExM+U4)TJO%od<;ZIQ0zsWxCuFFNN&rA9%T zPD|MYOMR`#BpZJrtk2obr+tczn<$5AfjRJ3bYWAx((5w7-j@cJ5Llmh~R8mus#`;Ri+|qn0LnMDy_(B z^U64{PX^Z)^5rVuB5SHS!^#?FT-6u0DI@Hf9}B-Gu(&O(E*5pfHN3(l~g682G;9sR|D1{_p!hRm^WXnT|cCGqx_w0mk;yFeKgPq88wh0kl}@l zyDmTGm)j@M59!`Ww;-J(y~-t+$n6~{knV+aq9jBxuDc-2%Iz6wK{`h|g>oh8bOZ7y zEw_81fiS>Zq}{}tY=yPJa=Qk0)2RqPNJPRd40A)LL1jGDXDlYFI%l*4(?Lt&Dkh83 z83#*2?rdZUiqLScqX)tF#u2-~^iBOI#OC=*u3s(c1gyu$w0TevSdIID!{ z1_GDJJ&ew|#MEA)egK<4#sG~({w(0%?2QUW6-BY@Gj{Vi7t(ScNW1=A3erYAkd}Sp zxiln>c_8ujb7PSERrH$W+I9W(jbJakWil^7z=O!X;64L10 z#GF;y1^`%uks)u-Q9)d@~0Y=0x9mAcm z+e|AeDkF+<~b`LqNB{Cp4<<< z_(0lN^PU-5meLuRV(`I1ET@F!WKaG5b6jhLP8|X^p2m}!sH&Q9!y=Reu~gpI7)(&$bWn#nubm^Tj}ZqG{w>zaEiP}pN*84z7q9(J-slJBEA;S z2H#Jzvt9*?@(3zRl&^ga=B833#)bH{etMXxsEiQoO4PF)Oi`|Tic;ZA1?g^S4^UHP zEA)4P8jb{=+Yau-bL^exM)oL>`mQ*?dFq{udg%xaa5IPtQ{b+62Tz;|7AmsCH#q2= zlvBPD9kNGqIEwRF9P2fOtTqy@UW^@DrHmDKU(LunJt zbO!wiyC>Ct7`5-e6U}mq(S11RK(tm0z74BUS+ETC+6?nj~fCdB!#bdA(XF$}~>!9T9_D!R7{4H4>XLPI0*{u%(IccPnB>DiWZ zc>alDq#>m7EimUbyeo(i@F+Y1!~Au`NA3ljSGb@AdSFm@NX=D&d#v%!jpJGlKOZ0OjhQL z$m|BFOCb#dJfbIOQM(;xV8be*v#UrVbL#dpNTWli_io!Mzj;;&-l-nzX9Z{&pHHeLD zf6W}N+picpgP+9{>dmSw;(0M%;m_%oHpE5(ce|D`90skmB z41=~}UqSeZc6vfqZV+SD z=`nKfFXC18X*i4g0wev)gJk$dQK^o6LaLj@%tk$*FvN?=RRv{lyNMKq%%tH=LLNQ! zn!VP^FE@#EYHGAxu^B4gU!vuAo5g40t)l_v5AZVwO`_ZQXdrN%-~2bD<+NX+nC^&t zBfOU368D=}u8tfe)gm!UJsc&+7Qqsz`ylyxk!aq$^&nW*Wm3d#h9IN3Wh07~BX0cy zxyFuF1ZSqnWV`MmD|@2ku#LIZn-tu7) z4eO??01PXY$wfMSw+U;3bYnznW^E}b>xy>iorH(Id|^`LU#SMkHABpryuC#TFa8ng zfmC3NZoXyPDqdDMACjMKg%#VYSLMO2aCn&X`&!xVtSL-V%*Bwv!v?ZaTxeQ+)c6-4 z%CW`hKvay}TP&Kl-|VxlY!eK_H30W4fS`qG0e3}ZvjHd>FzAcY4s1c%GUR>rRb)0tlSIN> zr7^Z4S}xx%hN-6p$h+HtYWshe_8nrTZ&i$;K!!f3+QCGbVr+D7gz5rdnbYfH@;Y9Bh&wxrMRXe@!A*gaa{Ns>-lgWa)eV7)kW z7n*;&ja;%zY*G6kki&M1bq!kU^ zYCRe)AD|N5x$2|rFW&6>6*8bL+N1l{9`5PFg*6yCV!UZ&A(E448f#u&REY}k5cBvq z5mXy*x2UrKwAj57l>toS8X^7G;AV$}4=#S}a7Z*#d-Ri|4hehkV9aA0o#Tev*d}e0 zP7Zd;Rfj}RwpyM#Bw97uuQRE3_-Y`$8~Q||aO!6u`h_oarPx_;+`phNW2VS{heh+c z6xN5l%v1oC409My&2J)I=KYf>*kKW5ei^S@K?g#~n$}02JuK=5Hv&NE)ee^WG|7k~ zuv$=Mn`Zt}@X~eS5N8QYPB^ArQ=d>XaVPcZxhxCiNKX)CetcXGl>88j4PSXSP}i6!o0P0kE3T?r(&z5W%B{>VVs!-M^GgOy@4te zlSY}#@DWk#-xpnSz(1~B%+!Ya41W%>al&Nhqe80QljXsq!j8DTcaCB;4Vff|90L=% zV3M494Eyz%Nz(5RVGkMtBErzU3bLia7`FznZ*HtB9e;>-)EgJ%T@*51+~YQv&bcgV ztMm5C!4>esv+R|xR)Bh!PLiuD#Ovy(lVr!^u%bLBpE)ibQP(~yUq222_Q<;m6$2&F1&yCF=UG59iL-i6nOsoa51Zy^U?5OG& zlq$9F-vUAvufhjbKy61)sqh#xjtly&cr6b?BVcF z<9_!6l?*(LWChfH3_uCIgaSl?vSbHMI+x^yVoA3lO6S{ zwp?{ygsOIb`Qv#JE*lKu{@G-;JqKlI4(J?9viU?XY-*~B*qcLXqGot$g=d zUuwAMIsml8;9d5&n1k|XT@Z^N#>ePOB7x4gb#+TDa!kdl$6e8K{3X%g$q))C?(d(e zIC8X_P!o~m4x12zkOg_rtJKsS=dCOpmwWMQ9KcO)LnAu75FbaT~oey zMZD+L6j(>>a93IKrwEmT(TCIAdo0=cqSe99Qb+#rX;sS zuDdGUi+_oT8F;vv^aN9?KdUjNJyF()Zz{udBr{A07v(1R+5kDQ95eASxb2t4up0w%p05;Kz zR)8^%yB3f&&c3{5U``+~hv*g!1md+KnoA1)X29ku3zPt+*$xLjaw2UQ{)XevWoAxA z?!GRMSZhFQm93q65zDG?+YM9f`r4FFV{2bHSWQE>xLNwMa77Q(S{xjA}&!7v@lV@#2AfxqH&2p4=kK}ero(Ik9AL38u?@JsN(w6q;SlFd3)6_Yiq9)hTeEQ6} ztfwtI>O!?UzIRsDA90q|f9EW&Kk8K1H#ke`4>?Qg4?C&)ublZ`?RA#d|KLR8pNER$ zTSFh$f8*5D|Jzwm|E04D2Zu%V|4KSlc*M=a8*X9!*G_8R9y;l6$KoSjMY!&S61M#U z8|X;PNO^CKz`PHFo68>;*Mb`j?xQT+5UWciK4uZmVivG4GP<3cdfoJG$i3Pv2vn|Jfj?fJnc9%sHj*3gz8cc25cj*sXoOOPr7S@c z<8*LhK>l#egB{H04Y_SDzu2aY(5*49g`9wuvD>b!sZ>;UZ!Mb}gZ+=#NY0LOr|Mn| zQBBH9kdiZLJk9^51nTy6#mX(QXFYX+Tim;bTz2y|Wv*pKpsiHCDKxQC=2<%v+ZaVP zGGO@FgPJ_`d$&XK-xT)y*_PFo=Xwdu+SVA==)G zH+Qy_YiGXL{w;Nuw2M1c%T0$4ZfCr3n1?fctVdlajw-sltSdzSITVV&6q4?`2Ex@b zza($Q^hBn4uVm7ZX#w37C9lDUVbChdu)IwV%kvuSXICnoH}$$5eO5>Q8ub|clAcE2 z@l`ilt`M1~69;rNO8ahs>gpp}50@U_#aAXu>{7U=P2cWR_@%O)901{f2iF>k0dK$n z#SwG#z7ys=Lio!*ey7iz8ROc89xW4Nx*?~|5 zVh)(so4Re2e}OrgY&`SeGgsQsCl?8;nUzo(8QOK4LuEfinW9_t=>1mkQ;0Xg$Ls6r z*5nb6!hb$$KXWQnB=L(WZWzTLK<_XU#yO?LYoO`YtR!dPESwM1t+0O-bdey2qhSe& z{)xk_!oT*z>)8V+Df`wr6EjGTpg2(5cAP;0^GF)Myqx>b%7gaMa@;Rk2BwDE2cMzU zssn6n1YK*!d3;Ol-{hq{)#`=ulP&v`6+ab)`#jr#uS-$5L}iH|?rkQMXrZ~%al0hX zWAu9L@GfT~A4&+o-wd(?-+x12nWe0$xnejp+9otu=)F zcG7NRnfm+<&7!TgAPiJj%j7Ml??5MX88kOptAd2s1)?J@P>1a!DYM76cXr@~)mv)- z@qLcPM^n@#GDK*Q5Cgu05n@6nbM3@cHPIX@-L>S$(v`HSqjSCZU-|Jrq7edCt3ZWr zfyT4Le*5Un>rgTkZ<+}qx7C^NH_Y@@@}IIQ_jhno$$!kky#Y=t`GqXpE2f4ldOUY@ zX3d{y2C@GppZ)*xix+v4jqWth)uwsay)k(h->f6cGK_o+SIcZhPUC*eY$mpVXA+7a zjkXWoU30GdZum+#crjeKSHWfYdw}=9@UC7W9`A#?iOGk2 zsvo5`X37(}1<8zJ$R&4ykwdBd{_G~UgwK2Y+Ffh2(ajd88w+H3rr_)&W-Oxq6OYL?PsQOxCJ2+2>2YAZJ0Y$^{t?5W!U%ZNbR7}C;;51Z;=V5SO zTHpC5$>nMnh&2*LuF*C*Hb;VrA?1NVS2n6Iu{m-4GlgU&f$IG9jqt&KJ8;6&P1m> zoqulI_O({Zl_OJME5-WaDhX!xO?KN@a(}4p%vu{AEoW`}yZ%+Do4(E6E}R&*@>%9y ztFOC9OkEp0WX3gjo8At4*UC3mG-~E`pL1VHZ(7Uhk;*%ZFyWX)KDXDM^~yB^)7Gv^o=u!6zb7HU3u$oX0~I-bEsEk5 z&gIkR)?Qb>F?4PDhLGGtdUWf~pwMHPn614@k^GkCrc96DOwUn|Pd{YMj|U^jnja4? z$jpxexvuGPoZHGdz#U&E)+47NaC%R3)p2g;!uy~^Q=t?Irg#*2mt}iP)|%1&(>!&Y zdva#Zobu<)^0dDQ&jj@u&6zt;hicKosQ(XsHD^8#MNmWzT4m9>QO%i;`<67U8@0kp z5GPr@;TX*wp;GUzc9xQ3qnIDx4%`+R^!VJ`#|T&0K%Sw-ocw#7i0cmJHgos0tFF?$ zZ~vLdG`Ha7A_^@n2u{UBN@47;iKOPY>e|UVAG?(AD3;(-p))r_mrc~7SROWB-UzcY z*KK`#s;tB*E$im|d{lo&7d}Xx4J-QsdElVQwD?!8Zu~U->G;eZ(Kbm2B}td zXEqAoMfg6mHQz0~a<8+-lWuGOVy4v9`1>#6jM!3q{+DrXY{iZroQ=$oYd9aUXvZOE z{f_ULT7TuN;(S0A)987e7g)(Tf#sYH&2HO zX`6fj!&K9mLKMeL3NH2cuVNarnZr(Fmaw6`o;@utB1hmWbPR@hUo~;Zjzd&i&db;H zZA0*0=T~-8S)yI2JkdThJkcR^OQK_FM50q@Wa9YHJl5&eEj6u^{mHa6X&*+(f6g5B zQDw|Fj6;M2Oh}(V0UlsN$_G)lIfqH$-`CQ3LR!8=I$=lR1PYg>)SgLas9C5a48f1t zZJCfda3JVNi)#|vW7{0h6;)HNL5EMk2db>zsrTnx7e>>QQ7NN(=35ss$2kvNJrqA? zzAM1`Z&jUMio2z3f2f!|_cOH-b7B})sG2Dc`_rmxAH3F^Rvl1fnO0@%xr2-@Uy-Ie zS&R8~%r{k(HTG66)4t77@wsCU<^;$7i%*1o3;a9h5Eox`iemj|R;Z4GXh#NOpb~se zrUkqo^xrk4u8;_4PxAh1=N` z`w4G2Tb*&w3d`ENqO>fTK1kM+t}}epKIgJ0(%o#NRq5%!F|>x7I5WGJnPIaEp|y;i z_)?zJ(6B1QiOekfmq=veS@sv&K8{LjrC#)87Iq|diZ_Bdo+m9~a+c0ahqN1@)*Q_oBW<1(RmfXF zOjAD5#Ej6u#519D62A|foA^U$P~zFpd5Pzep#`(U}`l-}@I0SeLJm!`5ZkN7YzR)_3x(_t@J?9)~%B)oEpyz0y*?yKLG+xgk z*~q4g)3(?{^c>CO_LryavSQk-lHQZ%w>4!F?2`|-H-o~5dnkl^y#%xOC-#QF&?4e9 zQ`)PEyGLhEy4c;!uKoMS(8%ltchK{u*Je3&zg)q+ThJHtPj}m=VW4kHW@88 z7JC#1E9Rqb@IV`Ucc`eRH&#(fW}+jO-T{A(V8mImmCnujWx{jgQS*357q>)b*JATV zN4En?qq?Kp@q$-BPy18oTh1AdmBGE`a7fkmxWqK>wz4M9_OkTHKPJ)I&n?0$5Dweb>)QO8O8g;8ylCZ=)+;iV&#?3DjXW*qg#32~rQ} zzt=ZPGczL9Q}K!jtKeC!)P-lI-^ROdu=ey$Vg7@}*)tZGsk@jMX5^T62d>5}V$U=7 zo4M_q1-QgZ-G{Z8mZrrmc1N?hS*WQQdwDRgwOsu(lcwD~YtnS9nfhd)m6>?F+vev1 zcIbk3F@4g0rhnBaS~HLHB=&~>RpmUNspkXzs~+W9X7B_0-P*tE7pCTTw_{gX(%lv1 z;r>-+1l=gB8mk#YW36ZAwom0fYFc(?#&9CRvaf7xN-v02*}LJhoNQgfB5X$$Uif~hvdZ%dJ~mQ{N@zR_6=A7$7d?;G~s zCQw=73AP+*#$(6WZ}+b{hm7nG`d6K%-&guqeGI)ajDs}Dc_Fi;v)ehhD{F~b;hDC7 zenE4%n=dM5f4}9~-_Gk4q-j7wQghI6HE|*K=j4P(?>glcAh&C@p9p>vcszQ4#GmHl zltLC;JMK05Q*`HTI zC!E&ax+2$Uu9Vu5Xpf`ZzjYb?88w)KGVSDNq4H!F zo73mk%9N7Clv4Cpyhmx8K0NE>>7LWg(>>hp1FxNKp6uz~)OPITKP?X1Xi)cs(rJZ*1vZuK0iM$lxX^u*@siOACu zXGvnCvvCaJo+Xo+4{i76Luf;*w7Q$qEul>}#j$BL>ECY6x+Pj*q2R=oy%{$xnIG~l zIiK9y9TIrqA7)E$_g1ccx!@%CmJZWT!%8}$TY-?!SP~aGE#n95H{xc=N$zQdzYx6$ zhdSvn=+h^ERi?&^$WWMI`f{Ay4Cv$jGSKD7dz1ROrFQ=7aaA&XqCgQph?_Zm-45Zk zJlhM*mcA}G#FKHMsk$Q6%Jk{yI)RQy%+LC{rGb{LVD)oU9~<8cWK` zPH^~)g2yv8lp5z;{4!DlOyFeql4fJ1tf19XVVzEKd)ddCkrB5~j-B@5;Ir0&l3#U2 z{{LHV?|WHl+eL75F*!S$c^9i9MHTAR>g`d#w-1g9A5CaQp{VTgV?F&MrBqK36w%XZ zhp+xG{rm#hB);ECZj#j+o-_UY>q2_Y4ZqLrK9$l*%$^S)!D!(1Y-$2!fZ9O}4xpq|-^>cH9g{8HIX74m&feba+UcgvztZsY(=? zn$z4?rt&n`NouT*Bx0e$L_AcK7#b>03=5Sc%0gWe<)PBVa4hXxu(U^DX^+Ixz7l=Ro$t&~Xrk|bk=}fE zn(LZnr@8G~i+ZKrC+fLOWEm#-$>#FY-Qt!#$xOuK<(DWXaTN-vt(kSYdrFtv;DLCd zl#5B3K3P><(S`cn>D+0)Io&<82@k#0slPvJPCLULdg=fYsdFmP(_V3wcMna31&=0F zNS8L)@u1gs0pbOD-)lCU;daau8L~4MItx9WVLm#`9dTB+rp-v*?;|tSarukYjmDu} zaWqOv8*7x3`>M_nEG~R+=-KYqcEJfbKV7k?4|^jG-x?g~_6lqdn>Pl!Z35kL%)){0 zb-mXUJ6+kLil$UH2%Qp^A!I+0OU|z2S9KKb%%7V}&T;!WhjP3eJLzV6t|~akZ5Qa9 zW9FRW4i0q5G2wIF&d#0bC|nVLuG`&tKK)kkUgfzi*AchQdGGP_+)M0yoq>CVm-MOvp7jD1i!b`Kx=&UhXqs6{EiHvZ=CO5VFxZd)dVhd*M)n@1@^Gna-rKH z&|$9m{zA8B;H!!6mHyHlY6spp#XNG6dsU#_T=V%w?$8bmbJFwUB68G9#t`IATk8|% zn42zkZ~95yv|@N-=O62~SZJXSGeK-u2fBbOJTd=~KO zmnX5~`4(#a6y$9|$j*OUziiTP>-i7Wx>CK^}chYhfNW9cr2JnSG14Gb-K;us+(+rYb3b9y zgo~U-2^3uQZ+MT9{|@pV){a8d#Gb#O^q>FtFJoWlp3wSto)B62nlCQEci>_Dn#-?q zJDOS7gmaIdlqD^{#V0{0^(V3T9n7&N4>Zpa&)wq-OKTG!;!nho;2lYelx=LDIb$_kT{C3iH3Zf_o1dTad8yDorQ2t zVtzUZa6uGJV zY~nP55P_Fx5hy`d%ZR{y`hqw=KQw=wTIHdCrdwo&-&)d@RSo%MOviZ=3Ke;?vpHcF z-@vxdyIFN%J7cY4X^DRk{MrF|yXg|+{ZzG1ZDOn6&>Nf$UPF)NT|B)fUC(Mv>x5Rd z#0T+3{CYO4jN6D;e=NL>7h!r$WmWIs?CP0+^d^+ z&T8h4YUa~h|JiTi4seLV^BD-&TBd5D4B@$UuIu$`II`aiyvA)cXzmxsIxtePR=Vp; zzb4FcF+uCrDH}~F6xLhW!oO9{D%@3=$$Y3A9~W^US+5R(+@zvxpdHmk4`m703t7d~ zp!3vr^|amPWGh{9HowYtowF{XPLc`NFusQdkgcY?yF;SlNtK-Mr&-~wNNgYt_;7V9 zlp43rt6K93S!L#$>T6h{T9lQwmR6MY$$r);zt+l{?Y*^fp%-Z%@@$V?;JlwM!Yg{C z`h*|Odc)f3glQ^)V<0!Zr6mmOHSsEVmBEH1eWW;POY3-=2F)NXG^}i|vlk7K(LCBb zSN+yHYdy`A^b4&DRgIH8D{Nk9-ngfD&JggPPvE=YB(ymaVxO2hd}3BRs}qwO&Pr$S ziStR~K|ixK&YHvn{_|32X#&MmJ(PDz^o>48WMpgM?JubtT|0atmc^LEX5me4?*i|c zp<0KFEQRE@r{a?nhog zpW#w-+%S<#1%e~z=LagVIT2MZTF4K!3|7Li!t8+si)Vkk=-@6}dXciml1H^v|+S7*ecHy|uZe_2{nqnaXg1enaIs z`PSGm;p@n|q4M1PKzv>@M+Z2r^biB{8!DqHw%V|Kp@)F8;~(c(@x^4E0;+)dzye?) zun6F=S8Pd+`dJv_budsD)V8r9;)5V}hNQLlc9f$kp)(p*Y+cT}owI8%C;!s;FIZtZ zYTKW3TEKlNt+Im^`ZBub8R*RhcN9?Yl;>T z9>}q#*f38nZV&Pt;8|1E$>v6)HH(v5?3Q9%pSm-tH(8{rn#z%@SdNu1=~52Ec!q~w z4#Rm?cUKN0&4DtPm7N?rJ~JGMSDG&6Zr{RWi*&g|k5jqnROLKpQjIWomAl35D#_>& z@@~Z@z_!YqhRQ+t4*8f!x!d}^s&aQ)V93)sPj_p;0c#p@wSzL94<6W4?NoEtL%Xv+ zS~;xLuKa@28+Jf8R*kObyK88A{Y0A7*2NOf*xr4wlr*h$4TJ}L^qRFcarN;($zVu znb6M$I(BybrIK*|)hJ`M%Z${meK2y3M`*#24{n;nC+= z!}&{2r@)r5`F<3h#+x~B{T*_k`L!w_;-kT1)1)RMn)N2lEPf-BtY3{E0B(0<-mYxA;68nO}X}>SR$o% zlkA(!)H~f)(UaMP+0O@xNqy2yaNz_7heE{MTmC?$O7G7&E$!X0od#Sxc4lovUtVF* zP#QDq(O{uDa;MudFt4fUFxp)a82rB3J=&dQ4>5P${Xa;vc)hv5f zO-BqvJ3gRlF3-L6JYhi9y*wwhr*zkVs`WfCGPmE&6ro=Ln;C=rOf&Uvx0v;W{#8$B zQtj$r^(&=#p?}plxyDWHUv&myea!|^p4Y2w<+d3|m{4&>pt1yQG+;p0Z3JJd0xuX) zwV3BG?OChr%Li1w#j}0wfT~Mj)tWY`Jb9iGPE| zq4vZIL;;&nmx=-w=;kP^LM!!ivN|78J51AR?Pe%9g*D!TIgGBoA&u`P$--(|XFqJE zE>fNotkkJCc~)>(XN=pr@BK9XdGDkfehr0WGPB|vy_-~(!Ar`3y}N3#s`|o!s&=ZrUx&vPz5Rfy`w7oyfzj?epo*@zc1bx-6v&y1?Bo~uNAt`* zZpXN!_1AO|Ie6*x!fmy?ARQR<@a8^3OseXp}!u?}S zsV2r6J>+9m{)2k1RozRO`umgEU!m^2FM(hSvDgEPwU)b!-;mG#h$;LH(lt<7UXm`R zjbBVB;fAMiErmnE4%*h5CVtXp!nIVk)|%EnF2_$BQrdm#w1MpWZc5{hu-mM(yGa|4 z4dRR#v>y@@I;qf_0zboVgu}H#k+%^!$H#vuJp5^*g6{Y!FOK(lvCWUOg8HtxH(hY8 zkE;{zs!Y1$(o8(gPq@xpFxD+jE>~Rm^)2PL^2=QePPVVK=|l+ubk7Hukt208bvbQs z-hhXm?O(H_cpqfNv$F7i2R~#~*uDrWrt2A9ds*>PJn?L^XROdE!66Pm9C@WUt;N<$phEEr_#`gExnS^iBwe+>k@N-63b7fM6Fj18E4z= z4^Rp{bJEe7YfJn_JD%j~6rYd`*V&rOa2_4PJnH#1>A3e3x5$s1Ph8yta~U(zi;3IG z$2T)~-A_je`B|+lFSL8GE-M)vesnGtTGEA#s+#m#bl|8uwzaajlBqn3{xpcK*3U&2$ZQle4>$8>i*wLZY4d##I-F{qVv39(B z!a15$Y&bHvMn;(ovK#WKD@irEC?Yv(zxP+l=ErekQK%E|5;=`aW1E?ClI|se@x#rN zNw>%@H18(evs-H}5`{(CBSvTAR=OSYC728_UIT7E+^}xkaPzZD_Mu0Z7b@N2YXiVyD)~SCR#7QEtjOQT?Zwa`DJE;J$<|k@7?69xX*!;8caZn%6Nu%;f;^K z*Szo@2T{BmSN2b!f};r72SJu;`2mN9AA7)UV~T$34$bY`yzjWd1e)jtZo#zF{9H46 zqB|x~__jGZ(d}s#PjcT6?0m~S@R0jt?)Ql8x%C}sO8Y+imqTlR06qYn`G{K@Xi zxub}qmQsEZCTqF+FiM_z>k&875&<%%& zgIYZGK6y=L*WD6!@{cehkwMTCgMeELYevw(8h5l@p(&X!cT?~y@U8;fNxBFYqmBMO zvDt}g>jAqhk-7J#^=b)ZmBJFl$XPh00wQ-N2g>1VOn$m~e&US-2?_u}Oz-@1u ze>{v=z(B5MdDwlR@clQR+uYcd_w+Z#k;004$XjK$eiVq9sz=#Sy4!4h)NRYH7T-PU zM(j6DhsWHa=40RmhabF?3&YI?kGZ#9IgIkz9L3sq}y@>dR&U{!TUWQyO8a z9^Q^{bViss$O**rgkJILew#J2< zgyU`*x*|lF@Wr@Z2(MF}mXF)ba3kDka6c5AHdEZv`~x^qsPAm^!@evwmrQZT2iCrB zR!niv>HOl|>HO4R+tFXu^@};g7{e$y^$pW|s@pmLb@I@{*QwYQLBc1zW^S757C8}B zC-QUHc+pl&m|0WV@_p`g^Tt%x8ZUg!oc1K!#3Noe4?OAiEIN~NR7Rd(MwI+ly*_*M zVmPHTmqX8!Zksljf|D+wJ!&NC>i_dzH{U<$UXnBX_~GNqTbG+lr@1G#D5o!7pk_ta zj-G8EndUwaSo^$bG2OkY?GxcAyfI9N-5*DoZ0qsy;LQEg-5%&XPkKI@&Zh&HJk7^T z>!!OU$%h!q-=Vx9{9Dq-n!=j(+7vrzB?Z=$7met}WtH=JdKgkGx z*LZW*47cs^yiObLqZssqnvnU++`sXn^a=bX%-*rvDSGdCO@pnG4gMjmSt4FKv7QB` zv4%yaTfD>WmUrkq&ht=U1TYd9?yxA|=FDRYL|W})L5rpEt+@HKw9RUl#_WfhYRoRK z2uUVlw~@3QGa$zAt)`*EDLmg>8lxo?`OGFWHNgMmFFX9y{}8T@Yzlpcj?{+%z63YI zX_CBq_;yG7!s5Va@3zrx=w7BGMIN`BU$vyE3P=1{hX7>#+_7RD9%5_oOy{doN&suS(~)i z(Q(JX<3i2~);G*uC7(3gDynOUo|7M8Q7eWCoTNOTw2>)kTo39jRd12=0^KO`;XMCkRE5RubeaPo0=}gy|9IVqSQp~E{Zz^ZH z+?!$kI1@LIBMZ!mnb-#<&$xM4>eeqS20yeT!$#@|)}hGV+e_D~RuRt;aI#uA(U9V% z8s^4`THm{ZXKfV>NU{Ikg*fDrUt*>{!%=-P7wPiW(e-R0!=>>`$BI``r2JRnG^7RZ zwkK3%9STS9Ur=0TXF->lNvIT;#ZfOEcECDv%h$@0cI%7d4}+KBRXK77*5E){NhCx< zJ;6(~*$Y5wVv0Q3*}!|(_z!_n9L3}`CTB4@jaf0ajAQjqiBnRdNq~40(LyUub^M>- zinXR*tnigk&$A`@Y_(fdijj+SA|mdHXC1Ai?9iyAMdl)?FLsK#1}Mg`QsjAC%H)?y zEGnY`5Eku*PG^mkPZCYn8n+y2G5u}FN zd$qP5*$-As#6=uALiA8)m+~l;;y`IE!03IE8Y*)wdA4Ihtaa*{X2KJJj(kPDS>#KC zr50rSNJOMMywy>n9nH;n5qERKCvU~3{@jt9v_p&8WG|1KpdC#~EH+Q#`VDSqmCu1U z-->N>ID2zbxK%Qb=$=VcrVCQ+{BK$7uB2C5?sks!BEGC&>#&lvXc3*`2We5hhQ+el zn)7LwsstYNns^?#p;=9F7dH2xlWb5{8 zHJJBjw#>xl9bt~lbicT$ou7|30U~robxrxl@A0D__;)v3_vxrvfArzTUeX=l>f#!; zMl(d+4FgSdT)4HleV&t>WV=urn~qdbfL69*Um_{{oE}hLsY7y3%MVL9blyAvt2dX%muDHn&5 z+evhg1(8vR{5B^#y}PflB>w-Rddd{r#3N2baf?L-MD_ZT3{-G-w^_{+Po~@KBp!3=~>ar8RVw{DZD~?a_I*>ZjF5<`l*Z1&> z>Gg$#t8?il5%u^&J{b#zi4`=A^r7PJ@${jzgW?uCU#ZbZqvt{{D?}jrt<$kKUrADE zJebyz5l0g#eQ0SfN>~|qabmIH7G@aSj`tczO(FECwC+FJ{hbZs)EOUYWi55Lu?|=f60G9`)=|w${LTV*v|f&{DI~6z+11P08!- zDS<~io9kX@BE9uxGyZkAXTN{kp#%S6Tlu%M(}sDNj($FQT#13EVQ8Vlq8by2FdHuO zI-8BJvnSHxFXpdr;M3n`l3DVG+dKch&NA?Wb_=v@kQTb+FQ&zkg@0dYgQ%f7#sowtH6B)qjx*7p^#XbgpO2{^I4? zo1^T+YR%(H=gI`u-@4PxhPT~LE-PN!jym?hJIv9y-40Fq5mU0!=Vep$H+OK}U1 z>KD~bm@!IxTgzfaWY^X2b~e`UL@c(2Vuu?R?WbY1HKrMjh8bBxyJ@7?J4svE)jN3> z?iIa{Z3t}}r)W{C4x140WyJhJwSd|l8>CM$D|h#X|ARVP)4m=SQVwt zE>5Dz3&*ZdVY|s(t7$51SAk6j4ODLjr_5CdjwN2_U)A8)DX!MCVtXBjK~{p;Cc<5R z)?U5>#hr+#6uSJB&O_O)$&1((#V}R1bI?7ip+urfC-dREFg;VVHl56tcilEg3Srm4 zs{W&z36Z+gPOVj@ZDH@TxE$>%<2y)jJQ6)dHY<; zx}Eloai^2i+F&x-NxWvWE$aXrI+2VmwtM+bc0~@&qp)3_xN;Gqn~kG$V-UAwZ%&;o z(<-~sHM%70?Xr&PDKykg*mS| zortWYNa_0^qWk@L=@zvsH0~q9q>kW^U=56>|TZGcaca3=MZ@LqUn_GW2g)vuD3 zult?*6Be1p>6CfUI+WiQ{5I~G|AbEx-AreFk9T!Qbx!q{8?p$$gS=UW@lGP@o}Jch z)c3A0;ZBS|wZ7Y>&i5Fq_b8cYnjpL~*DYAzfvelm75Oc(Cy#nD=iyO*&PmY+zQ*Mx z)AMI>XI|5Az`LwU`F+IlA`DiN#Ybz`1`w!+xxTWGlmhgnz$7R!u^Z%86U~52yvt0 z%n#hAzp8gmpnbPdq{K;AnpCnPfl;&8x!kg5lElG3EM0`oCCDwnu^YyP?2qYKm_fE- z&RvORWEH0JE5E>UG4_Y<@yQa~pb{QFdQ2s#4YSpORDsMYEl+<+-0??et{uz!m(<6$ zdQ`|DNec>xNwWk+0>ToLCm$_5`AEyZ<;%zI+I2~iX$_faX)q~t{^%7;^6Ll>_v97pI61od6P5Q{P-vM_TjW!uAz%zDoTCk+w z{;53pGtqy#Hw0SFG0*?gJ;%g0=C(KOUvQ7>-kP0q`SZm%t`#e`tO&aAUi97%481yD z`k8I<$xyhO_mthPUE|Q{glfUU3r$9#N1ftc8v7!<&Rfc06kE5HdRm3_YMZ00^@ZHO z)HiI6JvnTT?Gx4-(7WKRzE-N5-@w?P5!R0|zTrdKY^N4oJf*pVJaJY!>OjR6+D=y7 zNIg@-r!$04uqIDqRQUsYpH@W?w=L~npcy@XM$IKIs(;r+LsSR&kowp^3NmQ*_xKI1x|nM@<7#JsX>M- z_rR66F)3G4z*6+5x{pRC%S_$F;(p{QJJ;i&cr4M>=<^76fL82eEIf&0fOp6@%bbm5 z+1PRqx3R5BTM-$CafvgVx9W_}cj&U-td8fSXWsev;e}N{~ z^NyC-^Ts>&gNYn_0&|-O5<&a7iI6=p5w;VDBRCwPd=o6kkSO3X8^mMv9Xelt9v}24 zZ*TGbEbok5-n8g7-h;fOlDzkqdDo?P48q>~i@b+;&tY-FL(Bpp_Do=yxfu#+r2O~v zs~LaX!jh>3CNNP7*admfQ}x|Q4IkpbuwKKs$1x*s2&T_bW&-)*W)UKw)yx+UvA@oH z=PWKKf9!can>R5%l>n=tOs4025#rQP$WupMc`nRiyVxyOhtP>3RJ58v?5{x{cc4ox zZi(+mMdqPxmSD<;&^`(;^qQkMS+NB{$rQAhcDCZTdXu?KEkv;vH8VmzQmfSnk%aw_ zRE<9Y{BLsemS3Sv85yoI=+YyTb8%rlCOGGL(!4XV6Us9})D_`*UcHNhaR(r_h#E+G zH3`(MHSJLe;?Rp( zoVyjyy~=PVHIPvW$;|%M?};b@JWs5(N<7D(Y)v@E*MwTaeKeTnMl)(v-x={4pdHJCvOQq8^=y*s`7tJqf^!_o4vcoTF6t3EZ7{qLU5bMtS3C;=Lcq4f^ zR`Y1SRdIKiFi+cdCSEerX4&kM;Ix41Hbe+#WsD+601G_hQaVzr~z3*3&f9`qVMC#%4&u$YrM`BTMC1{F`^c`LPA z$#}pQtbvTC=m&NeeohzEPJ>(}BWj+4iHYoSL>?f=Dt4?nRI%n%Ok3ya@T7+5V=6Nf zVLcHd^oB&&hqiiLVci2!V-o> zYMXmeBtxu9Zz?sCruikrXiaS=7MGfc8ab@_z!f#o!#`;vHH(@^{eD=%u%9%|15~&f zHLv8?u}YKGcO%Wk@t4U+nN>Q!gvB-mk-c^$y^VlNyuxXapR8`FNo?=7=LUV-n!?JO zl_g#weG!Fwa^o8(#qkJ>Q!%fMJ1Fm%Y$T8AVua3a3S~RKO)HISpfKH@=^-~gaHRXS z+EaqG+K+oNajlpt@ru(C0yMFyFB|F9^C3majVks&1>XT@o}fu}<5p>VyJB^c4AeHM zeVl%P?JE+^b6)f)^ty*#(EXuvpdd@rwQ%!A0uBqyFYBtXl zix;5!^Chs?DST+~>TYid4nyEWeh7d+N;^Vt<9G~4YGc%`i=o$Z$P zEh{_M`L-?JS4YomLd8oLea(9z3O2%YaT8NzF+@ijB4da0A^UN&*%qBWYxd@D{uwC^ z2s_}L_SGzG?*VwrxUJ7+wnDM`(7{KHk?KU5nySb9wd|E)Md?19^s1=@EJqkJaS5TFe7 zUQ8wnc{d-;aihVwJ=ZkMaj$IoOIunPeaUjE^ptJN=ej2a|FnFzSy>t2c9*4rytF4p zHz)VK#dEorrQLnYlVE*58pD+&3B98LDhWs z{$$@LS>$t4UMG+<&c`@I+$HY=dRAht#fhcLwVQ7V*jsK1a@8cO7%i=-e7pP^ofESP zDWr!r<2Ea@W+S<4R}Z=L?!n}tViXf;>cx5Lq~{2ziOM#_(_;lPY+FGycY#}e_R`i% z$1N#`rzi|9>}@NH(ZiO6GDK(#${w4@Npam<6>M0~tfv~2OS9j~=PJ#xkKMjb?7`SQ zCi1*fVEP>3!+vjl?B*4=TritMD%+iz6{t=+8XGuMmNcBbMvE%S9M89p-P;0>KWZ*n z=niVJB($xpI`j%+N;NF=>_WHusk-twL&rs^6&gv`AlFov7kZl%OG8V?MWBg#kNXAq z6tn)g`EH?mLhe$c7LwdVUT{j9kD7Xn(@V{bi#Vhldn$Gh%iJBJ)N|}s1o}2$q^YlV zPYe#5Ioq5a=SWU&TcuNXy0ki@NfYzb=5OIk$CeM_k z4aqfIJXTXWREQdDY)i9psoQbL?PN2WY`6tBepkqfs}6SncLQU9dw@|PkI18GB10J> zkEDsj!EBufZRZnu`DAm}GPlDi7s4+|nnqBCJAU{eA20VNwYJt7wG}WbG-@2)F0FMb zt1bkPIt2HW$@>)_;CtuBW2!IWtG+H$S+s~$T_;a{rBz*to>6r@^~)|ssrSyz#)}iS zi}>~W^^qLzK**@LT!?ki2cJ6|9-3R*!Ary0LbP3Re8m7y&HaghEf^V6bEDC*8q1{S z;I60Uz9>89ETJ2ga{}H+4co~rv%vI!KBxK5%UBhlO}Le9?Q+SSYo!=VR`)=oG+4{W z_2U}8p5<|;XE|~j?-?rgTr_%wOx{YioAQ?B^f|i+LE_i)P&zu-o#po|pFk1`SWn%H zh*8-iq#aEup|k?BP(T5@@~KMmLwTsL*L9Cu=@te?J#6}|bbA*sA=h8;3*hv3YY1Ie z9f&UpEk9l#qN^mg<)KgZjWoYq>Gp79e@1n22DX=%#}lcX_JPwLHs6y+dr=}k^Xl>< z`&_QsXm3T?dAM%R{jvD?&52&8c0d_xLizf2mpdi96xagGAJ_ zyTufzLj?Y0Km(y(5aaHCOB+xdOHdgK;gY9Jw@0Wy@l^64SG0k=fU33``bJp#< z*vwq*p4#JL0xzL$FQGn)thQBTvB*XYWis7I!L}(pzy$!&)oxxOHOUmMaZ3kZK-w#~ zlJPnUzK}dFBoCYOdCEii+VKlQnY=AOZ_CeH`AUmiImtY}#_gHoiT2SIX6_odb?Dq5 z3+$X0`4t{La@`SKEquCuHpw(u>t4}(Im3j^%Jq)o(-CR=C%F59j=N|8f*- zCDVOhcrb0G$S}Es=XqZ-OHh{`lhtL}**a#K7|vFmZDOCg?c7S1Y{QHrcbW;Gx+gci z(;F8UFJgC^xu0^a4cox$-Md@Gh$eGG%SiY`gf$dSTkoEl=@hL^-FnvvwMAhZWW&C# z@#OEw#JRPSy^deZsolu0xM;;!Ab0RJwvtn&F>~n#cVwstvTb7*=d?1bHn`1-M)Iy@ z^YdZ-N_ts8H37}j%5R>Q51OMJ+@7KEkLjAOgmN$c*h=$NLr!~h{)$kZ^9>X{U7S;p zkEnYMEy0k0?3!qv-{`ipSDN=Wy4~_tdVy{Iz_k<2zKw2?yNuMZx%#i>nd+RgT`{2< zyo#ClXYRQj9(wFQ3&MoQ-kbB8dzIUB4K8epA*Qh@J|S56Q=t1;2d-3cDMEL_qL=X{E-Ns5&}i326uT@zN^ZmcAV;4@1Q)&(L3|_WVIrRLuCYHGP%Id_&qZD#Lsw;pYh-R zL&gCz9zy!Y1qQ)i@*nUbc*wezEqa=+hb{ix?#r(d8cY zPTCiHEixVv*tKF#Z3Y*C8%c0Cw3z!NZuF5N+Y3*U(LDsdyPS)I`5iWYc5MkPE{fB9 zWUig*E!A^s*RGpgtD{tE{s(5xt<}-~FUq*}xvbEefHk9#u}3Xi!}G24pR2t9jy{00 zI}>7sm!7yktT+L?pUVMhAD>jful;~tSpmXd?c+-u;V<^_kw*Am`1lTu@B@5&K_fhS zZmvgB3j(IqZ_h*5Q7n`eg;b0TI*xak%A1*Ra>-U?B%s=?;x#yy}wN-TB zTr^%Oi{?En@r^>frs4nxnhiqT|n>H=9Kn^cx+xG+0^FYeyjx zQMpZ$G0~j4o1NY_lcKOGtCwA;s_sernRHTFi@o>4ia+q4ca!co;o^8JzA!xR zT8$PKI|1!iAi0lw~+VoPICMg&r2FvWU=BOJByd3Yw!UzkWy0E z$4ngashPMkf?7TR2iBt$M&UMX#TPlRzDEHt_$AU*67~XV#*p6v(os2QwUrK^LHI&& z%2#oYESz2Y+aw7_Am(p^2tZvRzBaUI>73ft<;C_|OgL$&Z$9%hR~u3PK4Hp0@z+Ac zUNXrLHkrIOfE%l(iB~(JRJ!0X6#O7ze#SDLWJ&gRc%*Cs{zaKE;SuowzIB~x~r zJoYEHh>%w zJDp~#J?LMdWPO=TeBnii5y9p%eama_Pl;gR;vdHvg_xwVU7iWOhwXmML*-m{sqVA`#L${JmFuW@Sz@$~#LWuwJiaH%;#V3UKu0N)@@UJ`yuV~jv3BCISOw;+oP)`$AL z&9*U#@4Q;46HOw+qO9;S6fy!_S8(c18A8>CFd^~C$Etx~Rd7tXW1q_bFQ0&`5RRu3 zo5*}28Aw5>b|(`-x>|%u2dhu+Rbq(6U!eN8C#z3>mRQ^<-96w{SYKvXZmSDberTAY z+7<5*)sBDas0bCMV)k*T@fwtxn35@K1k6z*e0m1y=$71|mY8Xq@dw7e<%J*1 zLqkv3m%X&fai0_TG0c|yTGbdrJgYJx|5-YziT}Kw=i_*mVXgR1L;6{G*MDBba|@o= zG7u|H$0c)W7yI$MZ=CHtEB=YzGbUm+Fa3Ct0#t%0n1iqJpC5Z-ZY?LFsvkHaYQq)l zF|bwB1#nDb8$0Gm0$&A&Q(t;gmQ&qUHS@Uq*|=rI4=~@>Ou_zThSL&?pe;^+Y>xlN z%_;iimAT$1HDZ$(wrN~*ddBS-h0=@CiTx&-!QZ%@FZ?w)d17g>P~%{JB>qkj8R1m1 z>Yim3rvaqR$FrH{AL|)GihPjpLtD%n-?(iq{E+-K$k;f2DxJ-Fv(K=zek$->8c*1w zo?^ky#46g0O-O(A-G;ul?e#rPi-T@yGSeGZ_#H%pmxONtv|frcHp9g@-MOOt_rSu8 zP({&clj#SGU|}`7^9)CEH;+4kxFPb&^Vq*LF`2rmIhuLzyAxA>Yz}X%O4@&cu$@$R zHwCUJ5802<_?ES0oVJ+Mw&Qts$l&l{6|g&TD|i+{IXjcCT$z_)cZ~5`2`SG@XJvZq^tZfwWB|qZBjSAY$*+UWdTR(#u(&^|*b|F`AIlAK9 z*C_1_Kkb;G;2B{T`2lzQBp|l!c^^Nb5q^b_AKD22sE@y)5q`Xnzp4>_l<-L?y{u8d z06)QnjqtCt2dSD3YJ`8p$Dh>*KhDSZZ-l?o$M*yOllH&a59rk>!3jRTv=P3OkB>CM zBXl029U9@U@$m)6@JVmVd$}Lb;#h!(AME3E8{z-$;{%QG5BhC?7_E~n6XSjSw~g?p zat6zzbbq6Odwc=h-3b4-pV78P_?bR_OC$UYAHTj4eyWfEB-tq7L!YI~8sRtiHC_l_ zDoW;CtJLh;0P(YHD*2dAoIQj$rFUfF<`TCVaphT?9Y0y$y%E!-Wgy+M?;(XG6vQfZ1Kfz;-@E`j4NsaJzep6L8!Z-C-Gwy4IZ{_31 zczF7MX5Hm@Kj4l=33~eljA(=(=;Mbr!Wa1X8^Bvxe-$ho>kOo>`7DiZb(TqAcVWHc zkFU*P4ItnRPeNvP>YK4ZG%b2YYt!Sfds3iij=AozTRNcKoY}Rg_>{bG)5oW*>d|aJ zvXxVFA4M$G=^woF5CBeS1gdpO-_k1~#Y{ahr&U-hpzq8!pB#3Fv*^pAgR{8A$wzw0~+E?GKy} zTmy^&rT{Mk9|6mNO~6jzFp&3R(_}RAV$-PgxfMOU^%(5s`5@2?{5!TCjRL)VNb%bk zX33|}$1me|Rs)2!wxg%CwW9}t#lR*Y0&WPu#~^v^Wwakbkt%t9*@wdgb~HA`jt;t- zxcp{d0q@POv7?=UN?;Q(14sc8;*Hp5N3W^3qc`xIfla)Z?1V6225>ORe{TWnff6D< zc4Z(M1I7U71I>VgLjuudz*~TGRUo<-+@UK1(E^?m;PfmgKL*LR&fc5Wy2`*{cBYB1i zMqdMRf$mQQqs@R-(}U6Xr-6Su7@Y+^0~7uCQ}}(Bc&RDrurdDcMgH&JCxX#yejkkL zecLnXI0Z9;(KT6l?R+vAz2V7VGWwbyAs0j+o~IKrQ{ZGW(Q}l-hJb&Ej7I^f$Ai%s zFAyKd1?px&FmMs@GVhyOInli>o#y@O;A=$Dr5Aawj@5 z3ljeG*esl$1(6{R-N%W>enmQ97LWq!fY_D%0#V|`fJ$JN;sbSnMOZ}PfORd;$*Xt) zVn8J@3rGQVfORzyfG7|HDuG!*3aA4th^PZ1KvV<)mi{QHQqR{B|0cpHC>hyj&K0HlCAz#>8fuxu62^pm=G}#K$L=FKqW8>Na-DX9T15V4#a>;U>1-9>Hupf;Xo9K0hPck zAO%>%2m_)(45$QV0V$wvST7=!5dnw-F`!Nv@H-1g>0JOJk#h6M@}}o>9<|zumaKQ8 zZ>@Er{eikQ#0NL=6Aq5onBB{pKABwb7~@5wzd^raps91B&jK$3uL17>9|4nrHQzYV zm;Xml+zed9b0bJLM*QSGlTg2j|4(4R|KAy#zpre1PBJeP?FF0x>_09PwfKFR-}iwT zK=yMUVVl4$2B!186IcVJfR}*;@Za!<3Clobt59?nkOGp+_-{RM2xwOjiuMNv0ha+c z03(1gz;s|1@EY(wumD&EYyx%y2LY>fDB28Y2b2JPfHQ#efgu3SQPEMrSYRR$Z55(Z zQ1FmbsO_ntXdS=j^SkEcP_%$&tEqJc+uUcYSqOo^`;DN}qBQQ=%rD_PYpCh}49Wjr zBLBY=cThHQ$*h;WtmoHOrVC5>zyC{(@M4JeY81bq(Yx|lyy93zlI=b@_L3n$g%0u~ z={av_8anyECH(gCpHJa87uee-?7<;^1(|pKI&IUC$8RT~4%};h4o7GGIZXTWa09Ra zxa=<^d?_4V>_a8Lg3<4Xqa%Pdz%roZgK%_zRXDm67&0#$y$E;_n7%$7eIHl@v;#%} zsHW)QhV-AOrtY`SKHLyETNpW#SXk?dG%y{dp~1A=(DWn|+0fLxo2~uX775akiIudX zim&%$@f5jrvj68)02l=n>>CjZWd^ zMc{p)jtGi$5O{e65rNBqUclj7h&whn`WEn71q1@Q_vJ>TKuq|V{~NqdydNQ%{nF?; z#ryvPi;4HYz$)T3hGb)e-q-(+P)EG~1!fWNzd-UQ0WTBre}TyV)41J(v=N4J0MA{M zcyAuj3=TIa1hjBN?i%qcNNApFh6q~3l#-hRzV=r54{ z$OU8;xsGf=W|46)V6Ri~2I#17k&EyaY#4ze#(R67@PiuFt9V@QN`69cFemtQO7QNq z;FU?iji|T5{Yvm0<_(N}M6On`tNQBs|LMIqIC*J84wYzFQRQ-p9{>JA;I{0@fD%Zvwl<9&mHzF6R)?Q%Y!&{xGtm8qyW^Jf%S>;1aHoNnWL zcaUHEg7WB`cKo4C5ZgzSvoB2k^vz^hydb02{$cU%Pm6W!(Vvm#_c1l9N7d-_(cI)% zX1qJ_G?B0d2CddstKAA&9cG8=G+&ZO=?-Dr-wt7j07D#-kb*R1!G>9Q2Sr$4lrOJn Yt*-Kc_zqmSN39^OYkK2sUQ<&21vE+%dH?_b delta 206869 zcmb@vd3+RA_6K@z^{wtsNJ7$N0TM!QNeBrD$gVfCeB ze$PGkR;A*=K<~e=i(!oIieN|fr$5VB&X1Kbi7~dt*X1y~mUFv~3-<>@zUJmL53t6b zjf|g3GB#QoONlhXJl+(M)0op}<~hvU#zulLkmkwRExewbWFF=D_Iu$= zcqOp;a1`6n!0eneF8STdjVbY z;x!^FVC0(koVZd^RsUGBSZKdZKST{5;@a%a+8_!QE0}_p% zj_)

e~~))Jn0rN=%>hn0*jvx}EXj0O#M_q>Sq`dO*?JjB6Ql&U0|s!iEO3{g_x0 zB{}ChE(qr4epaUQOA$OTFgC(psJzkV@6v~oGBNcQ=?qt%PVLVlm4BrsC2+}GYq7h9 z#lBl>>@aatF01INoJ?K7|Ll43W}Y;B4gkIr-(|zh4EN)A8oDz_u*uydZ=DwB5k-ur zL$n$^+xk8?Fy8LF#Fbb3_Loj_<%hmYdu8KEjKjnO0m&a5A6sH1Ma0IG=#=@!u{=e2 z$CxDjEGavTsfLAu8Mh5eLBi%PS$vJu}hn^iEIMcmM*s{Rz(U4$C? z8JUydo_nII{-Oh(#<-uZN8`H55yKlD5&XDA$DzIqYap<>?y$o`7ul@|V|em;Txj*l#aL=XVE`2l^$(Oop#y!U>pdg%}!8 zmiGH9<^mwdh%EN+w5(IPHFJn`_f=(S=D^`6@WclZHshS1lAq0q%4K5>LWDZEH2FBH{^yz;yzeBP2zoED5o2vSNBaPd3 zA^HgKEEek(A0o!E4o|9Hm#XaRZ{ypPVOiIuUqQ#r_PuzK&dlWhyGoD|)Snk_Ci|Iw zd0TmhvN|hOdcH$>GizLALdV#B0kGYpL%EbSuvY>!*;SxtK$ZzgdQG`0J1MacS0s#d zsDu|r^3_qia(1UOFMHfAPhHu3xE1D3S|Du@lI{RURQ3N18Gf&2hNYdGYr87r3Y3wL zc3nGkb}E;$Q~6aTVZhjgqeT8Sn?5Hv?7B`V9PouS;EIwyaGaEKMVU9ScT6GRG@mhc zC~&xTYKQXl!2Z(u%gR>+(@o1%L1_KVc24}2e%rc>tL->D9~X1KWb6|~zRnfz!5t0m zJWEdiIU0~2+iKm3XkoOM2@ZA{7go=Vr*y;quORqb9Ph`wd-#*u5&V5m?uXi*p6O3& zdnhwvV?9TfaG|_^UHYU`VpN||rK1x@mu}y;JzE;ZeFwR5-?(i$-fyJNsVh7nw*T7~ zyA+-_xuaPe9C@-Nu9)$O%UStq?yfsY)<25*wtC`5@+AJUGHlQ=&jTa5R(WL5c+Zj1 zT<`g4BsaL4M42ed&dN7J6Hu1ms#|dQ9*^S&$#Gn8d#gAOH=*bDITYr(p$OI&Qy9a` z7qN;rF_JwwgH^na^D@CIUd4G;JgfKz&P#`~icL7Lw6Y2>&SlxG;w79{jAs=u;Orhh zl~w!=KbEJkiVZl&6T{o>iEovcJ$_;t-x*0z5Oq-0+=IHgK;=1vN4zcnRfx~sgdz$^Y=e^0ikKBru z$33-^xkV`&ni4Szh{Zx*?5Ca|vv_jcocyh#DavOUBKzVgpJ`J5H8d&i6oh1TGTw~d z%w>DmrYXmVX1S~wRnm~YorK?8?0Rmt3;xAyZ~dJ>8SqI4O`OOR7-mkYksD#b<|p^AZB{aeXH6rPiCt~6 zp1(cITQ2~`AP%ctquwWSgQV(tg#Cm_Mr>tnR{l%p;X3NugT7B7RA$|pwdA5wHGD3g zrbLWL?llGU?M3J$wuwz=inC-Q?(J4?8j+kpv^6_|0;k^1z`8}5KcbiAgt$+|NWxjx_zj7nDZ)KwO1r+e$u(04Za zi(&4%X65v#EO}jyw?0wnGukqJIetaq`RyHARlnLN_pJ?UiR8Km0eEsL8F5g9?wg@8 zz=vT3_#s}J0~>2OG;_jZ?_-FQ+S$f}q`zXd+J1=MO z?eV@?`!4rDRz3mU zj^yq-R@@J&WV{doG9A}?j0Gv&7`bl>LVXz;yOn7-6uL5bVM!Fvv>Pebsm>-jG@Qpp z8#Hh@J|~T*ajgN7xmH)G<6fl0te;V^e*MJDH?oTDaQc5B7H!433Nfq(=S_%Vo9W!h z%Krt~l8wEFRFC>dm(w4mYl8?zaj!DqM%}0c?yd{6O9)Vi2y!Y;y~`r+j&1x+{$j z?6mNzA}%%+Ng`vuM%aslm|Y}`VPaVJ_K&t()sEtJ7&M3(iC}jb67J#96Ov@#2$*IZ zbJyxj!l|9E<=R3m7lVaUl;s*k*){_=aVIYuZ{TG$1`)xX0iWR9#45f49zGqU$u-M_ z#;!!(bbDXS^c+p12~I~YZ=*RS!)z5yG`DAP=2`d)@_;or#in`?2vwqx)jx|Mm;s)q zAlUUrjEXiZpWoEmrFFn<5KJ=dCTKh!VLa3aghccOhPjU_hAxGaIdS@ z*pUW%@WLKm7Z#y042AZru!kWyn_v%va5lppFy_^nFR=1#Xqxn&0fM>#*{kxazq5yF zVCK#IU9%^b~QBIFd?o~C% zTfY?DTdVf1wN&k0TUa7-Z3Qd;Mlt0i^|7K)i(T-a-QM~|fMO<}fyFU{jDe*>nKm|A zUWN7#Df7oBC%W*<2AHq{dqIJZo0ONv_D=mP9?ig`K!E#)cLC`MkRp?^du(qxA0QJI ze@^m%aG$JL3k!|#JE8s2XynIuy8=Vf^3Fi56!Q2or82k2bfR!pskeRrpzaEdjb=OM z;|casvOXF##shyEp3BcO-&JPZC*gb^j%O=@ott(TBHfD{TieP>9A(<5qU7HxDEvZR74CieBpJlMdP{`CXs<_ zkFSPXm8rRUS6IpxHJ}EELo2cc6EX$jg01q_A4Py=pze{t*nO>-<#R+hJ=a&df3@<# zc*Ze_B1-y4^)JcF&MKJ_GiAx&KvhO4g)EQ@*6snE8ClDZpfqqqV~~1Dd4XolZ2Vvq zyJ2BuBuFt-v%i*Z%=E=3gWNd?%M#ez76{aam6;2cel%9q|4zJ#_9bEQ{(uyx8l9Fa z=Wb3;4XbcHRQEdWhOs0A%NC>3e|+y~%86beZeVM-k*(bjZe%O=)>q<5|9lF{8iNib zy$q+*K-v_9Q*iwzo?QiRA@B8UuHk90N#?gI)5c}F)&cnfprBaY2tT4&eT>fV%_A73 z8C>E`nHdC7kt7(@@fKX`Ex6Wea4p?*9bD@zxKozuS{k^P$&MC))gfSzOQqS3&=-Si zVU)*xV~R2CTD`Nq?ZHWJH&{WHsLI|6T8Dts0mNxcnA1Uj_15Qhdd^Ih#${RnLBb_l zU6Qh-fTpCg5JWa&&xh^iBAcil>>t8A&*Mg>m*??yc{EC)eQ#4f$>* zZh|E`{50!PShA8kAtRzEFkHp7u1r}t!J?1G)frqV#tHFAVS`RT11@!(=7^zxY4(6b zQwagCA43Q(mRZFZD23@)w3W-{@GgX4{9c9V&2sztdch^Q8i-bvGMiVdR29k$7|Cim z1t$ASnd#*=y{sSb2-?7~6IrI)^Hd)m5n(0O#4%rA&-96!I-jm;&GAhU5Zmd7x0 ze;S&b4MWspHe<0L7un}FAEvpf8Gdg;L^~K+RnOixV)BN_02=N^-&-+o`p||UWw=8* zJt=AaUFku{HhCKWZg31U|%sD_eBJA+8eBp8LRj+uEAAB0nQBL zy{!SG2{2*=ua$6~g|ofE!mYqbv2AGzMug88zf)N_d9Y{YR5@li{K?RRz(bF21T%bA zj$y1#Hn2>d$MbRx%<#1>A&cR*&b6bDcobBN%fV943CuVdh62>AX3R!M#X7G)Ne? zp&s%*P|@#q@cRq=X3XM$MLl!BH+0RIG4z}HpQU>?pIC`=4!WM}XcJ6A!53C%6{tw& z-E3e{OWWEUMi+lN-^vFTHOXPU5f3T(=fpV>^?LqU*w0C=H6E2&z2xL^((_V;M6r1$ z(`)<^Jxify8Yl0?pQpM+AZNTmdh+Dm>5fanV!b$ZP)W0v%CNk=ec}p?hunTaEXCOk z2G2TNMovb;oE=)%F3v4cFs7Ou1Jf^SW6l=Bf#Ka8S^BuWNK?vx&`)kU3xeJYva6WQdC5jvLsi?NO+5^n+8dtoMLAYe`< z%3{GQVk(wEN*p1gGO0+z<*O46m8q3WSQRVhO!!fG-^*~8O(GAmh(DL{oGYM5wt98E zfxSU6atKEDi%dDsbDhWKk^uO}hlF55#s(K^yIA)KW^OO0zPRKC?F&mtao#A_7XPJA zCD00mhLyVkgKisGBe{{xp3(YZ_kgK0UBuA6t*hfAMtug7IHER|DIFhYKz<_aVO zOO0kGAJQOW3kUC>p|Dk?O|J`9J;ss4Y z!ky}buB1U(IBh@=B2?n1mS}hfI=fMV!7Cq3OLh%|P^}2oUAr=ZgfMgoL3WW#Ca}|U zYD3t%wxc$kMS`E;!8WJhFuG#u5fXTwm6KdasU)5hjI%u$bM3`I$rm9?S|bDsi;_Rn zl2pT1ImDr~ZuYen}j6!(IJT$^p1qPW7 zNTO>40geBa6ex_dN~6=rjgUr^K4%!K!l*Gl$B+qESo&)ke=VY_&5LC<5Di?xP?!|A0f6BeF@gCzRTA>WOE!EX5EWndyKO#io z1tHr{31&YjhUB-3>+?AulrQn_`l@=W)G(IrY=q8G^{h2w!RZ;);Nht#YeVea%i3Om zZv+sNa$?+VZp7IEc$NdW`xa0luthNEK4G@+6ffoPK(et*u;TrKxrz@6S{G!tuf+<7 zUm&g+@&%iG6ajn!0{(^ez)n`E=Zk^N7=^(BGrDK*!UA3do5`7f#!eWe7HhZe6A%)D ze;w9V;sYS)2_1$WZ=D+iY{aOh<-XXPL3mD+mA?q$B7hJ57+iC?v2GZ+B#wsW>v>te zmR~N_@N*6sn!s=c?xI9gXwuHO5ZtMa;(3}VRkM%+kBDAB<1R=Wjtw_PH_ z@Bj$thT;-KxSNP-;XUw*ofM^;EWCt9r$^i_Q7*+`iDNlLvJ6KVj+Ho;;#h%WmC&)` zv%Edh@17Bv7rD?qi#-zKcLgFjmD_{&=G^CBKF|z#V8K?bGI)f@B+ z^?6&dq7#ADkeHbf27R7>X72dhS<9_r=CUZ)vRT5uT$>pS3SOH!e!g^Vr~B zhGkL9X005*au)l58QA;W1d-=07t3c+2-2B!23_t>!?GKf&8leE^IhOooofyoG<@i+v5({4-zd4Uh(L&m`_oi_kfvFz#zzwOWHVLYwD3lZLq|H+<$oEiGb~ zfnpyhH^@%eAeU`7$hq=N9gy0xZL`=Q**|{h_*vcpw8dyi_W~L$6d~#M4cd{!2lG5z z6raiMHnGEswzV=AVr{cxVK9gllTkFQ3v6KJWURxW9KQq|jThrfSh-t3*V$WO2(k>Ms1@4(VHsEg zs3Yvb9+9dFO-Y6>!!_e8U+i>{M7~0yZ&|{!QZ1s;0o2=iaCa@uTggXh`RF|z4Z#*Z zBheRI0Qt^XTqJSIZSR7wNPO`k&I_JlXiUy-jGEI{)Sdah$25{y3QtIojda-uu%v+| zBsS~=2&SDI_bAWYX)O2>2;7j(F0pF{!*A-HC@BX8(n?&{$>*BL-aN>fF~Y-dmmIr80= z4kQ8y;$EnWirazWcI3Uy;yO4!jjC?FrY#y0oHBq0QxwuEq{T9 zg}}}yk-uChLU=>$WrcpR8Zs}0R92uk@zEEs!Dpgh0RTOWhxnh!LF?(fr@5VHR_mLq)CR9t+; z_#n4;0J#LC3F|HF!)Ch#Jqd0H$Am1@@LD_SewX) zK3rAb3P};%(|UI!*~Bl93c)d(Ms^1@NLG+CZYDU!C*%I~_8!Q4Ie9k5ACq+md>6d3NExT&5}MxsQAEczeH$$R<=s=s+= z?BUi?w5i}Rq#n7jN?)|C&3EQF`Z*zX%xwZ!WPZz<#F|mkEgYRjIp=8b`k5m3Nvv0&- zI{*ce6y^BLzNX3!Z+#7{7r;~tGV~=&hsB4>K~_)A6S^di@1RCMlo;HqTDrt_b70C-an}6>1o@?h3An88m;Hnd$o}&HcxCF7p}uF z0hEIHA)IO7$U8WvgXc#JI4`5p5?G)qQ5u}_DQ2uAjgpK5#h2yxZrh}q9%IcVT6P4X zg{e+>xkKW+CIp{_;h7CJC0h%QQzBq%MaqdiY#q`J-qxYv)9wu;qVlovhh~^wK#xK& zW%>~a7cjSEEN`E5U}qIOg(-{Ha$VnUe%b z|4v5G*eocVh-%0Q_DQ4iYA|>b3^rhFW!N-B5hlzGh*W7jeAjO@Vp!Yl^y0kqHr>UVT!mDum+WD(})iPd!U_~}pHdeRDMrvpZBhTZ%blOLvkDytW~Q-^^Cf-DzD7yJE{*FWI}H% z#`|KiZc$6~Tr#~XqF1?HiJN6dnV6*{L z;;#@HwgrMJgG9~&oN2;En`fwchq;S-%|qAU;raxqD2K<$@EA&gK>={Ih&h9By|*qa!i=*4VX{Bc+}{82D48q=L^%Z z?cj*m;V|>YfQd_(bF2VqBy9=?bMc#k_nFO@{)3e5Dch}Q!@uLVTO0A4<{hUfKqHiG zDiQqUl16O{a0~khm$)>)Q~ZL!yTNf1NfSl!Ic@Xd)O#?-op4YlU~x7;7X{eX)qFI%DlW<|OgE}Jxf#Z~A{z1_JBdiC^ z_C?#R0xejfVtCNtS}D0}g|k7c^HL#$)#{2Hv_nBcvKP}QD7OdlJ)>irXa+Ck;DsX| zXq{RFrLbN4^Ht|CYl>55$1s}l`cn9Dx^NAR!VJT!f~p&B@D&@tZ!++Ax7 z&AGn9v(6$e@n4k62a{Y);H(V06l*c_A&b+%g-}%TO_VVEDcB~6BzDvntH%aJR_lvR zz*(Knbz9R`iCK~fa82kkI@GQk)Nv6K;#Xb13e>d1c@{0GhsVkV{ez)*l(RYk4}iF6X;|j&P0%#q3#M|d*YfpP>gfql}BvG{!K2yPhft~ z?9%auC3-HM4E97p)C$T}Zm=-Df+?TdhDXpef0?66Zc;jINfE=)Yb&r_Rr=Wb-i0Ex z8W1C462U608C6)5T7_*p6Rg6LtUnKI45ICY9X4=Ca>N$IFp2~(qgF(Y$Pm|l;ojPI zrP7`>&j07C`Xi`BvSJ}9?f9jrQ)b1TI*sj!90)YP9~*a{g2QUyH{{Nxd7J8qaKc0* ze!~)F-rU|U8jCFeX^poyjZwz70Z{`=;=9MYY*Dt7z3M>0`1F>|hiSEj1UpAJ$3?_Z zxSE3dgzhVPhOHp!I1@gCi`T`wXc2*`mtlvK0s9aRa=KGEx6cOo8=0c?Z9~fR1Vwi)m#psOH_+sya#BKVrwvF#;_?xhLh8bov(g|ZYy@19_ z;UVP2$7u5+Jn=RZkZ#OZql5@%OO0wCxwpA$biFZ#wFRRCYOvXoQ_j|u(NIK^q&)H! zbja+O=-Fj~DnunJm7JNFZ7fcA!frz0rsOEq058|Q#1d)CG<>YS@lCv018kS^Yl}F-> zVsnPIDrNbWX+NTC3bV7&qRj<$c6YjF;vP->XrNm!uPR$nM zM6m@FKac*q#<0O?MOGby>CqLq^TiIAuLUtlM*jXAEu7zoitKR|a~M0XX2I*!$ZzD0 zORUgX?76{Mm6X>JmVmwO|EwN;{0|DF_*;M^MZ46nqZuR zM;}26!w7f|PoD>`=gxnq5rEpYmv-vfiyE|-iW;@&L*B2QyeUXnNIhg^h=qugF6OV) zRjM(7V_=Bw9_zY$+cSj@q8`_L{6?aGxW9m%dngzn=A-5e$BQ-_0waS$wp$$BynsQr|#L*wJ? zFaTmrptg!J{hGH@+`VamhCe>$x2Q=bPLBI`^wt%Y+7tZuH*IoZJ`8V#pu--$uw$b- z=#uOl-5oBnvK7rizo4Di;dmLRi*r4#UZ;V0?x#@gg%@m$-uUQ;xKDAE;#}8`TRXA6 z2+CXyYjPu|w#^^SR0?VHZ-ZRj>P$dPj1vOV@n}pM!=0}}$EA2qZsMXi3qL0H&QF@}dN}D}b(rit z5e|DYU(_#WShy|ii0c+PyNv=`YZMl|p>i5>|I*^xeE1X1k0+_yw8St^mGO z%*AABCpg{)>FkK|O%1P!A8=`CQT$X1(_*g{3glxsh9fUC^}{NH8P_*sbnw9+w<5YA zc~zI6ZE8(gGQ=a~t-|!efG}A$Yn(w-Aq%oOL8(LD*oV2MClMchCcx)2x=N8+L z3?s$O1EvZYZEaXYcOlU+NVs7AY{G;ka!o~OiM$dUP@NYL?l|X}+|UvQBzVIJMr~lb zgo*9ZnAkztv{YD}j4eGPu!cFU&PxL@!=ke`R+=e#VQb&HT$-Eb32bYkHD5ar(Z+U4MYLFLK?MqRXU4`V_ZKC( zT0uH8LX7vh7#G#r7gr30J9Py;vD@w#ycjp4yQqXlV2R$9Lik5gR_j8%Z?5 zS&4lBbU$Ifq~%X@cWsMOH9uKu6UtZf6Q^r2){!4s;goIYRVr+fb(puCj&D9pt($>e zhr*byuvPAf*S0QzI&Y|8`aTBcCd6OHjOyCZap<5;8Srqjt1DJsd_kL%e_pHtG(pfx zHF+Gh^g5PfR%pa3IWt6qUgBdE$L2PbBF?vbR#i{WDQr<2F8DK-NiT4mQ$k;9>7q-$ z{e*b6Mb+nURiP)K(8mztNU@al5<4Wol@40wx}XY$ga^TJnntxK(F>Aq!ZWzU22=w* zfLeuPrHIF@hghO6!ooVG3hS6EtZy1gSZTajDO`~3y042^hz25@7#|F$B^%ckw25xr zySZrFY*hCs7dB-47j|CU3Zb19*M)@E77`la>ngYlluqc$Ab2H=6$ZhT?vKcY+o>-o z-3?*>Mm6>ihXi^I0u9p^7Aj|#OR69zauTaOMqiLt_FnPe@&82dr?woohtifKarNZQL=A$zc#No-LY0gXBFr*!< z5Hus>Aa#%d&cMnvaWsmvy%*4C!y=!dnR9<;gw}a>sMfhxUc87*e6L)*h^C%e;5rNA zI`7c%%K@ZM*e5XtQwr7Ju^*!xUDpQFKkRUV+noc|oSiFaL!lEpi$dpx?9^kUtnPhmvd4UnW=4eM9FVkulh*ly-{0V^KM#0>Ej4B7 zrR(LQ$`rG?F$kAoKrR>WfrvJwAKF5pwkKkYkO=K=VA##zlCc-I;Yhdyz??55KO+C2 zjhEx1#$it#6S*i&{+;{~3(3q5uCoA z5lKv?+pof2P_>6mnP4|5^3SdkHJC**KN(F=aj}H8r*l2hhsO}NDc>PPdr=}GwU9c( zsv)%u$L@uokxAGj{hN^m4a~x43zL&82oZF`&`j;QH>!`{8!Y^7RkI2Tc!#ZLmCBQAL1(EA*A6U zgW2YZ2@SzqoEWiyk@FCHGt}1%_5BRBG`TQ#v>wAIQveXB&x6w)5ZNUd+f^9bFCk;A zMy6MQ`&ujx$KiYmjk|_J5;^IVL#L$D1yE@?EUt%2keoq9=T*`7#dX*)U120$t@Fqy zA?t(`2niWzD-5(3;z8Iz&vr4;U};dRCIn%8;_*f4UZSy^k zc08n-Fn${LuC-4Jx2MWKwWlEmJu8PF>*b;~VUibZJg4zn%d4L8BZ&^*qaldySwX!P zIwW15!R+uST$ANhgNv|!W1-)$D74soT#}DNSK`{M!Zr%x`Zd54{j^?awx8<{rJWT% zn7%;CnWEBJn0^9G{|B(pHNLhe{zv;vX%>}0K#Wk7+6H&x>@3_23)ZMsw1Al$+&c%u z`T`x0z+S-o>h}p2=Zm3h%CCATw=dQY!wBHy$R&7}T;iipE;xHuVR-ut5hS zEyJk00VA*{6$_w@ck5!%VPmGc>>1_$;^e|`Y&{E;rPxZ&dGshnoc|G95mUqQm4f^* z*a3A(dP7NH)f=T~WN~3uerKtO&NS4{dpp3blE0E1lV!v8BO$Nqs5Bu@~T83GY1 zuA`O2{eY*5?!2d0;s09vKU$fo(49vWSNL~z+%s83^G(mvgPG6%>34`}KnxN{>j}_B zb?vk3wNJk?)Wv4{4Ei|z%yg6~!s~GSYOXsF>y_ym@K0fPEsl#mdYs_}x_J%mj zmoa3iFRw)4{3>7qc&k!IWkJr1sMO|4m2w`d5O}LZ&Xk|$$U9g+xiSL5Rmzkqdn5M{ zSI7D=2RGE}Khv9ZHQe_jzAB=-%6ziof93jV|EsAMO(hiXX-Qc6Yzs~44WH6)FeD*B z=v5C08eb!HdIgKK92nnH@?PzmfSlt86CgP=RhsagA3gb6DSh?&95XFKo9OimZ|yd0 zr%AWt8k2dzKF{>L($ucw*wgM*;fMv$fHwx}bavFP?K-YRz1}lp6l9~DxzoU^eCnYV zIj&-v755r25a_COhGPzslJ%Nt?2?civ(O>QVFb=31_@&L4$n|qFrb=J7r`S8SlPr( zBi*k74E^RTU3u!Y!O~TxeDzwYiD)D7et-~$kCqtBXoeBXU?lp`^>*c^*OM^C#=EGX zoTN(7G;<5YO3n{M1bmB9L>NX@mR{!%=uCdvZ>0l)R=`Q0Ntt}4#`!YtnIcTkgp^6W zuOk@rh_C3>vgetJdfkM3Hc-Hg$mwSKw6qD+_qMYA^}eo!KoyZGSLz3AD*Ob~fM(|-u#GBV)Rdm#we;UOgudc+=f;3)0 zuWZ7oPSS=_RB7jsa=OU(sPY|f3|2bcOp4P(_GI{ki!$D%Y7u_oN3RSUn1a`mA9FXg#rJd{8Y5BBy>FDGZ(YY*mAJPpzw4cP zc~zP4_6XBIs0FyjE&|N>gY>Bi2U>RB(m%_TioB42tsVlBQJV;UtD+LTvbT1+=nH@-`!-5V$3RGsMUt)=sj zSIb<^Y2Nm0oJw5K-l29D~951cxtGxNoR4p~c z*Kl_Kv$ym>qtf!vyVHqUdefN*-O_E#lz4Qt9U~eHts)xTak$w%eU+C!8LfCfHkt`M z1p23oss9Z+l0e5O#!qgP?z&BR;FI*;1ij;bKt}Yf22ws@%k-*)4H%&`nPmnzG+rLl zBvLP$=^B-&n&g@(gl$is#6KT3Jc^Jb8=_%`vf0WYaOI;ngi>f0$3po0d5(cUtDl+}KFr z#fYs-eUV4g(m-EqM$8HXCmO~MWy$CLd!B)jAgZIeLy+Vf%t1?f`pbUN`;vT(0z38h zbf$BkGVIHY)Xjk2h6*kwb6D7D6h){kKgA{gj7|;z{8r`hFU>9jq1!&33Cw~1mjdLi zTdNM5uw@#xfZ9L8lmKQ0U^W5f2bgo3{Z}a+uQi%+nCSJ!E6l{~c|4L8Hx#vF^&X}Y z&CGcfh+bzvWWaVp{2TL?%}GWVE}BGC7r;IMr0qx#>FsSc>`ae9-N%eA*9_}(GG1lW z;I(73X#zqat_O1`ec7wH!vZ+^9>#HO*~j~fb9B50_p`y+Hz2gAJ^1B?$3U>zzF*W> zYplmr+^z%Yd_x1?vGT9M_YmAs?x{qKqm!sMa|{ubeAE&$LTo-K8wja%=Y{6+=VR(J6G22ZSWbMIoXbXl%CsAZ(7kf_~c^czXeaJ_7L)@xO;V2)G|Z zaP^_{Pb&O$z`Nqvy$v}X_+FjR8;~Z|>LYG8=&?O69#ScXd$R}&MC@)XM~QLS9GP#$ zZ#_7*AmZ9kpVmXD04svmw6}m>lts=?`5=mec;OOp z(t^DFQ4}ZIAp##nA}Pl|UvOPGENlaF4D>ofjFY$d@alFcXkp9@0lOQwC3%&(U-XGt z1A3PL?o+@ln5L}zB2nI!l+(CbdE<*@R}mf(v&#T56*hD~5Lkg|54ts#F&}k^t>JaY zQ8%Ek6A^qGI;J+O(B`GfM*iaEbiVO2RsD2)8#?yF{(Lksgpl3P4%zA?;H51M!Ma15 z+J3bpx$ri+P!SL!LQaT?yD*YPq+UV<@2&6&#@cvrA(Wv(E)z9llek6MtiH!*iQ*s_F(o2 zvJF}!;ctd}?9@p9Zy~eRsZ|G?J53npxcX@&yyok_=QdcS-6WOaSc+pAj^#L3;8=-c z6^?QoPvTgO<0%|#aICevOwv01SdXIuMMkTnrZ31Aul(+-#mfO(+{ z416L8R0^Qu0O|nHMF1%P+Cgd|f}jRMjibpYrh5S<3hf-W$Q5Yfv3(+QYMfH?!0 zf-W#6A(-WW2>|9YV9o;Oe!%PuF}A3EPO!6;06J12_!R)P0cb`SAR=ZJo)>}oqky>z zm~()c0+9$l zR|_(slMXnn{SJO-#GQCgFzC5QT#htO-}fZDal53rZo$1sm-h!+PdSE)1XBeky*A$E!H3*})_WcSXDUal~xgr+?G3-Y) zJ`W@UH)uZBvl{|8b|h)A3P#-g04|gxH7V&~O1egV+So(lc?Y6-2s&+;qWq^O)fEP1 z#aCn^>SGMwxlzp2>==j8k&6X#Za_7l?wYDY=I|2YIDOlJ7DK+CQgtw=9SIWNiK3aR z49%G^9#GO_0wXIxGtLlaL}K$)Hu)<0Fy&zzGi|lzxq6fS+Bnw$^&W$IF?9@$bu7nG zavI6)E=XLD;79MQQm#qkUs&CrfS7nVjtOQ!>ONMW1ey(a?gl9u{gC zN?eVX^JMH7R$oyAa?EWr%}^B)Nn3PJquPXeM;j4x9%+ML&B_{u5t7?Lp6-IN5*H2N zKn7REpxOvTO%N*~l3^2Fk&wC#L?A+oUzG?UlT{pK_@{s;3J!J}9pr~3un!$GOc79t zs@1@BDC?s2EW+W^Vvd8%FApNdVxU%q;TGb!1Nsko5vjq6{;rftF5X@W3E)*++GRTg>w_&0 z4P1}*Ov3f}Z>~qLngs_%O-Wa;ng-h-uUd__FwDd$@PIn1>C|e$E$#WrqWe(cI6BAW zT2DD{7HEgn1H5}rmPv!)Y`7&+b|)T$Uq%l<`xBl~dPGJ@l2`*#?;@jvB*^HxK??17 zcbN1{I3UrPfgCI|aQ*XoY_AyuoMBv(Rb0W@+%6aVn-DyLU`2?(0MaYBR2^(Xp@YLE z`*vwaM2$jY>co$Bq;K*FYENx1qb-e9VGn2m9)i^`tT9kfTZaUiGHoLanp~7)GGwQ` zO^(`&-BG3=+u4K*Rtn^0F{@LiHcaHMB$QU0>?Iq9*XO60E2$<$Mty;F(;cIuI^Q^kjR zx=!tdIn}x;R&>+Ri&`5oGe+$gJdWT_4=Fq7+4COnt<3X(Xqbrs#2hjd8#1N4rdAL9 z`X#CNUAs0PqSyL3il#02?$SjhgrQHge3g-^i0|A67i^Klfb$H<bpGrMRR?M4`ZLbE&cKgQsOc?P!*f#{Pr3=OitHgzF3=V{_BG6m z+ZPCKe^`WHSoo{_*V6f7ezq3@mOe60tJK`S9x&^~Sta3Mmg19zl5_Cj&F0H^L(BfH zOu9u;htb={RKQ9Bi{Is=KBfjV+T$tsgO@fRJ_MHkgZorNv%$;Wtf}&BeqFYBmh6_a z%JLtJCHr=z^G9<8+qwA=(D~1*-%dAf)x{~(>yr#GO-4~BWGy$sTE$)G80}K(y$MsZP{{m_xVEAT|^6{b3Grz>`p5Uw}JNa!Y2&4K@BS5GV zvgVIMXj((}-pBP(z+3@ZdQNa}gc4RZ_kusP_0Dt2Abkvid)%iHTP!RKbnV z)5uunL~wA%T~)sNPk*uQ=Z%L^r%O=y;boD5t(y;p+-u$8`=pPrC@qIm#k*HjZsQf> zk@1P1iB$*n=$x{t9q}}Q+99@VRUSVw&$t+muAoG!M=uA&fkitozalUK@dC4A+?Apj zk6IHS1R%|ay5d;9Rk`ozQti4VcjJZ*<=D{{sbhk&``BnHb*mEDkj@V%gBy}1Ji4_Z zQQ9~_DQFni`&QsNj`X6*fgO~Pab!s5lP6YhX}Fsk$T#UZ`KA?wT*|8Me_YZ?112Z~ zn-iss3ChIgzetf2l+T-=?tS2PW8;IU01@?O?BE`WM&!(Wvz3CA6Qz+I%DX31WjVX* z-~pxforqxH;oftFzTgiKn#6=iJ0z!|cbS5lv=r>`JLca;qF^5U5 zkvE{0)t(81@+BITp{M&vZ)~f6;Ix&GL0wHOQ{M|Fk6$mg^oRgMD>2$r_F$i_;ennA zER2<5_{aCISOvz?J!qo_sA@O12wjWK7yBt^+AhInj-R_BvL`5`5$ZpgO8@ig;tt&I zZnS`)4Y=*sO(QpBb$s16`_9raY5lhQoeCwYvEJ`^Wm4T-9^*nm@V z8>rG0V%G@ne~%aS3)K7EKNg!V)QSX3J&d({i_N~MTp!uLRcuwcGGm9}4sc_eL-{Z_ z&?2M;_n3(pPGIMC3*P1CvBFs%GU67z)+GvCTw+y`Lp)RRu-H(tK+J}fn+x-W2C2h~ zxm5>&Ji8GY9V++ET$DSg6A)G23m=dmrK10kc|gq+Hj5>|Psj7gZA!O zr;d7kwegV0eXC?umR}q+7^ka`TpAP%2V{zb zjT-MtSIk%5Rii{WPtv0EJsH)h5L{HET(yxi>7KlmRPfI%o@eXcg zFXmk7Kt-w$n_Ec#q+5*V^s$~Sk)_%|OA2=MK-a;VI!AT+NS&2ZMWevClq~!NB#Fr_ zY=I;9)Ho64UH-{12r>puVT-u6LxjxJwv6}mJhV?vs6P7394=ixs!VL(V%TxS z3;HoOW4k~Oy{}y_YWI97@gaP&=Qv)66n#&rV`ESHn>7)hObwqRZN1U+n1(-`c-`j+ zx`q3->?*!~gov>m-akO{{G#Eh6K7XYdJ-I`DaDDww@7}F4(@&~@5kK@m|&2#&ql%* zOo#W#@zacI(#dRt1YAf>AuFw(2LvAz^Cb0SpD0%03@+Xje4O;%VGrKONsJ*bsAicN zG0x!V@ZdYTcMzwj$)uY*zMw(j>ryI`WJ$y=QZ3{XuK^E@@QzN;yE1=O`uVd@?$+`c zj)tFLR=AzbIe3Tx|0T`+du=Qfm7A9%1%P*nbC3l7;fMoNHYENnmPfXa49{hnG4RUAoCI5wJh4+21xK=`WHG1_Q#jMEl{>O4WKkKnSDp@>^SpH=8wJx#G z08!+(zXT+432(5Yu^LUH@TE1ICl0y%^zK-d;5{o!=*1^yV`4q1panbH>ddikp7M6v z87u$WGx&1{UGjc>Z2={7m|QpZM3^SZ_od*U*7)dftmhwb{K$xnpQwqrQxBZ@(=+PV zS$%kE0O5y%%@aZ2TusC$G{iXW<6gR_;G2&YR{$~e5tf}wI@BEr%SJV;<+`R zr%Eq=;(0ipCrX}AJS*dQfoVPLQeA1nHwta=-qA8ww>VL%|8XR2Ntqt#;V~xges@FJ zhPlYNIMjXvV#l|LbKE{(eCK#r9Qc@MO?RH5G8meYlK+P0X8axxk$r?9QGxhLyY4f& z*;%aygV8dA!AlAJ-7XsiJ&z>v;-cZ0#TcV}Z6?fKb|GHvL|?S!gUq=vwr}oKV`cCQ zBY7lq?75+Dn9xex44k=rGT@oN7_v4(X3UnYeF$v`$6VkV103}H5jC5X_KzR3DJNn5kW-z zFq{xFZqF99zsGVGU&IJaEEbF;CuROT&$%Q%K$9hI@|b$^gj@eCMgWrAC#Wrl;&+&& zX3cQ8-H30m>vAvBhCr{vPgLRislO+wzhNx#?-G6K*C$=u?4Vs+KQ`)F#7 z$noN}D1twuf%_(!quLH=DR-rm!As;Ye<_u}DJavu$MMG51@Z`F0UCYIx`7Qc)z}Vr*1v}j`8rcP52Wx-yw1b>@H_wK-}_4Txcl&# z|JRKD|I;7+ir)h1mPVJVGj)hN7*T?M0?bpD%8i~Eqh!6ZeShC;l>$#kD*ykvR8T4h z3HYC-0#8|ou7}d^U{9(S@I9VfBhNBFdm=bVH`~9LP4?Y#88#R*`*$*x4=4fsNb=lT z%@g}?##;tIK#lug^XeFe#4HkG{|oNbh3`St@_U}$M!qy++lQME?Q}GV22b4W_-k-Q zsI>N;^U>x*ST7B3H$q~YhP@eoM%#a#@T@Z-9%0P<*o3k3-4mZgnsF`td#GoKmA@pd zsrLL}<&PWEP)dTgIuTK^=*oCuwda8}ULcLA_UuUGGZ4Fz(|NC6cW~oA+UKpo`)hn# z7H8X{@kekn`4G>vbpEIAXOU}Fi7GX8TkU5(pQQ5%34gdLHaU)qNAOPtY49!bf8d@Df6Ip6sFbpUe=0~jHTb83_@dyS3X*39|56Z} z5&TO*VkoP?HqJWzv-nFvo)sB9BViXT`YyaUG!2Z&7~u4nSq=2mWbnQc>R`t*xK%{+ zxvieae!PHx>si>3U(dhv9Q^{)b$=}Bv!d*osLOIpu+Ivxr~XwW`qMweS)3#7cPyXi z$8T^&(Jd8>K)4A0xdm!2IY#g({1uI8dp|zHjz9Z1hceq}!&OLPvWPOYgrtxMDV)IX zn<0hcI8TBU8gL#5DICRlBBXE_=UhnP5Y7`I1^PQhIgrASF8mk|F&w~oGQ?1aa{m<2 zNzTJrG(P42=A(w9;Gf!f6jl?NPeeo+zQl0Ck$RIFj)Py7c<@f^Xt$^^-0(hNCKm<& z+yW8#3!*a-iEsU=L}L4VohPL~AFVknGCfC!aFb4hcUzFldrJHBv3V471Nd`0S|0p! zJHvr*D*j5rP^9xy&}SB`HUq;(%1Q6Ru79^@Zo=Gaf198V8D@b;&f;^^zCx8d43?je zUrL{Y|0f>P=1a=1>Oa_g2=&7(Zs~J77RDGBKStsmaC-DD9?_Zu{rQ~hJ+)c<&ffH< z8S_P%c-g+S0)L_?NT@bXYxj)F=Bci;Snn{~^}Hz6doWsML^5dO@D4K!Y7ffaBrwWH z!hb1%G|n)2T7^GQv{tOmmh8UR9e8s*%xFI_`bP8b8GR_>p=*rNg2K;K$49gV$)2CG zdGw@ZpnD23Khh@sfgZf04(+6=+8SYx=4sF*B{8qVoCGX@wD|@!h5ftg-vNN8@NVT< z&xv6?Gx|a)yY?&?kAk2@&xV=tD{Z^Fb|=W}Ur7#Q9ixaBi87J*pNgM@3*mpu0{t ziuv*oO^*LAnnWU+bmMFOAM(D$KgR2e`?+_XnJkht84(ghCW#Os2%^-I*!SA1s4^2- zBw{TpL>5~^@l#qy)skqby{)#=s>)bv>?t93O6^PNs%?_@d!CtL>i2s;?;r5`eB#c1 z?so3E=bm%!z31LT=b}XmahqOhEb72X&ZiNgHuY*OeD${yrFxqzjYSyYh%-o!cR4Nh z>Ep&?IxVRuf~x)b!c6zT!0e*lO+=mGa&%w)(CW*EwFeRpg!fA17YpHstD7O?$tarN zL`)GIdF}`5oSwhL4m%UT^#*s^UtcuQfXLQ>03d#5sYY2OlyV)E6T7<GS|$SAS*kJ%k;?mBn`uc4?#W zga#$?nA^-0I(M=&g}6y}rqBsYf|-KfRK4i7Rmr#wRT|bn1i7i=KBnpo@@OX78~Mn|)dg3FN}ASCL^ORFQ`||xnv1QQ|EA!7x!C%f zr|K8YgsIL=^;O;v!3=!EF$0f$#xu}IL`{jx8bptqiK;z5LjJyJ*8s9f2HW}}AiuO1 z;yht@A$xZXCa6#j_F-_a57mWzu(vMTP3qGe1Tqu#wWKl4MbB#dpaot9&^Q~8*0o7~ zhmJKDjRWt17JkOuzKa8Xp5Hzh9-><<%yFvLLZpa9n$bd3Z!`k!zc}$)1J0pCcXd$msu>_OuXnDW;KV;LTpJ_=HD-tK6a5v7)_R53qKUC02Cu#5f(Zj{$uaD{AXJ zEuM5D7L&`r3oSe+aUE4%kWSm)?3_|my-DaY{;ydGwYtN+facc!Yi229rimN5w=v7Qemp= zrF1(g*css+I3v6ZXM|Nmmy1f?S0V=|wLZcJj9>%G-C1gCQdQ(pTQfw)E?*; zwpg43dJCQH9*uZe^cMGM>&qfA`=*);*Lv(p?j~6F<|KgR5VHUFQ21EovsBJwaqLNs z#)NZvDBO&>!X64)^_Yc8_bI}6FzMDIoC!huF~Yf+d<6(|G1t~0oP)WysuO^6A^@xQzih5(b-S_6|l;@_AI=eVZ8ag{S68rV^x7lfMAQQ-hk1UoA1j6r2bg&h%& z1oq+)rT}~G5heh8Z4o8|d;H|G1=wqaFbUXeiI5!?wm_IT5ZG&mAB??G@a@<{;dae^Azgb#wEExv3r(1w@LkxxEh{4m zmXc17d$+;-cw0+>N4^@EaLlZ}sIb3``Tka3`&Y63!h&8=D%e@kbyuuDrmI=F!${wE$I`8e#m;2Hz+||wP%OBc2IZH|!pyF;3Zu>g zS`n-Q!OpbqxhfNOZ->v73EVihA#b;*6_q8o0n})qgV}MFBs|%7Bfpd(V;#ZXA$X2U z4x=M&g-46WAxd{xPAulv$=K`RRXm^Ttc>|6S2ma{Bae2XrrsAt`%`2) zk?CzkChu21fMeFi$ipTfc*917&T9tlcc7i97s+ha*?V?==;@zd zS_@etk(He&Hp9b9UGN5EEEV(=Re@qVqGlNLOJCwvsa1PXv&McjU_ewc?@_%ugxydT zgi9C4RHH^+A+qPS7gZWP0gMV>L*soCexkK>OVK5^uVT1IBV-loA?|kMVXUiCaW7G| z?Hz|Ga5kKWUgHTW?KN5c#u$ylfw1=aGK&3Hex+nTNJ+)-QCbzXU#udrdVc9~RFRex zB5qQAoQQ1iC8lQ2VDuxZMHBZxB0@`v@s5XWCV~< zKo8=eQ{ANY@nVd4NL%Bfj9u*|RLC_uxj8%Xk#~M+5};^IU&P~#{D0?iRhgEjo!4+5BXiQ*aywzh}X57~w3izC#G#0joQRkOjwngmb|P_ae*% ztFt0x!Lb`*4p`wXge*69B6P})Lg3LZHzec+yxLa#mseZ(%LdF{0dr>-9Nw>qkW)A{ z6H173HH5Zy7S%l-0zzfEkI9FQbrvKFpNorX{^@p#6&nrJ$+xShThH>d>hyMd1agFzyu2*@w>*LB2yeF`l-^YYNgqLZ zT}7?z7iGqHhs;O-94s@;2w7&ZbAl8gZWuzA8AA~!197-8^t`Uxo6#ONwM&Z5}Cu?qBN7>hiOGhRiUb+;-R zL27l1e-*sRkA}P|#_4~Cd&K@11^8$RiNwZfDb;8pB>4EPB+6dy)l0mq-lYlsMAc;jM7aLZr$v|k&@6ZO z)*y8nAUer*S84tLfYJIY?HnKm1@Y;K<_E%YIueczXFe53_Mrv?MVD;#LVoEv#2cb{ zKh($P&iwm4zIk&T=74t~4}QWkb)m36y^8P-DHB09Ux*V7W+P|H2e1m_p z;-lsFRBOcpH462e&Vm^%7HlWbRW^g=>o(;V^Rux-dN;ElSlL}P$QfTo|M@q+G$5w_ z0#~Docm>~BFJeQ9D)1C`IvsNe7Nym|vVMVa@ZSk>WoXyz7`I^V^N*M>IU?RFLgO3S^;3@Fk_&*)wdAgZ7f-FFlg%q8bCj~ zF^XCBOUIJ53PkTSAYcMI))ffE(-3D3dI8GfroIj3`#LJ9E4H8lpzL`C?$S0K1sGji zse<@W#5Ifn19br~k99D{l^Ms~#1P@B_oP=|7i~>LK`@wP85YMl9lXSGC1Ws;33@l0 zzPKgok@a;^%dhx6(s1||08wTF6~9RjUKj0odp~+Gjv3!6py9W%aO{R`XK^ByNx8S7 z=VcBSO{D$;Z5u3xiqmAegKxEl2zS}_4~iHf+C^W%#8O~4i`SJj_*f?f2)56`U+;od zrvj7`HrQxt^(_*V_CN%ObrCtmC(ze@kD!EjSbZh%(UE@(t1p&-#nwmnXx5t|GP@u4 zUHNVxGg7`0&Tkvs!>lUH)IX!QZpg!kF&Ru*p@z^bM%*4|^teB2_X~;FEHS|f@y$7E zhmxdNarrqJ+7MrcxJC)~308u7vc*nEp@&S&A-G%v>~I5L;k)%=s0IX2j)7VB3YeF= zDLEy6=KUHw;BvkgBnI$Jlz9#L7ziHi`Dl>y{{{t>>--q$tfo76fPIpv^>E?Y>J?Bx zZ?(6!uR*jDh1hh>t^@Z^O;TZ(yb*@0gO8hK}>du#S%TF`0jFSGQ-P z6AkTpyL~&}A5;U&t~1p+M4o1bW?AJJ-Ry&i+)?jkGF>}qSc#Li@xY*yoH(BDh%-qt zYc;#3BR&jqrtuP}0M2@HFcQOE@^O5qBOgC@g?uz_gs7J7fsaR$bmDlDDvl*RpMXa( zd8DHPtzmGmb}kh%M{A8Z(`vq>VV)P&G{VGnX?){y43P4fxr}V2_p1a zPw^qzr9qZHp@_RmpX-Gu8e!CAkGywajEbk6oNEI!<_E|;>nJNTRCQLD_Uq4)wADs z$!0}!d(i$~Abb(NYupJeG6XD@VPX(?*>KCH49BlKs`dp4ItM8ofcEIb-#M27)4;Y$ z(;1X`rM$AoGqNZ}rl*dIwZ0{~uF}ZC3xwG$tc#XCT7t z3`f{b;rBu+Nx(^LCbDqP_~A8nYBIvnQ;xHZ_W;CsNG2j5d&%S3%srp(K(%%v2#VBp>?XK^85Z zf;cX!ff*zIIx-~S!$W*vZk&wY_wbt^PT|59#BXt$BhG96>xkzd&5YkL)-0Kg---Br z3BN~y5nsT9n`S492HEfGM}u0hM%0H}()8RpJ?aUw#HmoOGzR^p`lZI8&A7TZ1S0|M z#MacOY1T3q>@mUHKryZp-~i%XT=PtorbDiI5Z|Be_@JR2j2Rw!o>zkSSBSSpB|KcL z&GX4)05IRig{vrsq-ncrImEXi&NLZ`vb>IS{`(cjK74J4-_qhEokHJa(N`K5L!G&S z!Q(sNGY`ZWe?3z;ag>n4ucyOf1+QdzH=!}jNflRQowbylCPHPvTAH7R#b0g#?M{R8 z_{%vulO}eEt+a5o_|jPap?y{PYlVG(j3j#iZ z8QK(+p^YQ0DbjFGeulqu`pZabfizZmYZeq;;=N~QxmS=DU0OJQtoTvNex)>g0)X0f zx^Vskn0gu~0tc+01>qaM&Vq*_8-LADuUy!5PuhO4@X>w_&bPC)wTfgGew?ksG|lmV2c!9 zN`HCFP|=Jg&k!wT?^U#I1~vu0T}hW`fNrX+q6!}wVc_v@a+(^vB!ekG3q~~MFe(g1 zQxOjGzDD0?iBDz9N=nWa?PPW^t{wrxku1F4i zd1cY1JBgvN@8NAUoT!*Pslwb*4W+=DA}k^j$(+;>Ndaud2t(*k&?l?deZ4u1vUb6= zcEd-MFcU~p>Ajia6}jpR9iJ(N%cL_DJqrK~JVX6Cj5!eDxuIk(EVYgGi^V_QpuV2bqS5l6G1_jwSFnKQqZ&$whMR<-S<@C+C1RS?++Ns zxz5)M^jmn-FE_%xiwGq-7W4yz01nHrmmi*$v z&;???ki9>oZ3{&YVJv*Q5C;-|H?IAgqz;j3shA~i9jEn60mbFxbQp6-7E$5NrQ*1d zBahRL55yCBIh{5wLl;Ml$BP(1u{-Xn@YL2}U+C{~^gGf#^~dc~_uev!B~cL+wv6w6 zf^EYST`d5|x3_u%zz>d7pXC_Jf@8FMx#%L_UPguw!O{jEFO2yREG;#CI=~cfg|14ijIe!?a{0rsxP89o&d|!w(le*(mx5AM2%} zE7d{84B*f2QJ>AAxGgC(akFR_(0nxR$U_fuZ(_)}2f{L7bMc2XI=ES^H`WHad0riX z%;2&gzDHSGM1ov?h<@54vZAk|UQS=+NFN5Ec!d!a5S29#NazV9FmNnL#~h+XTg4#H z*ALkd)%HDlxK*rA*8POJ3#;am7p$5O{K?--o>?_-gbA{%Rr5M6{%@=1B|7`<;6)r( z>cvxpbuH!6TtPc)ilDTQlL|`5Yd435_!?>4Cv&+Jz44vRo1Lc3HtbPC<@=Xu^K_Ut z_Z8T!o2nDVPdUBhnQ8MFhQ-CSIdwi|;c~Wa?H~CxqL!#epMQqap6;+dm+($F4pS48 z)7XCLWB>nO4Vix@@udh)t+3nMzq!*+?@oJWTM_8W#h$rX8!HG;-0xRF=nClRIOxg^ z`F=r~t9^5SjKwN^duHDZQ~loaY0;OkZ}wXBU-r#%^M49szry5`;s2rjJH$?T>08aP z*~7oL`3Cf3TPl5T!ri6g-xA*BT`QBqDf%18;2Dbw7kwjE2sw8bHP{IQ=)gskwG-R| z7k#3?6A7Xwz4x8?wAFG)`m$a^+Qypetq?0oJ;Zn?k8CA@BwatYCphT>u*26 zU3*{2+es64iLh3Wm7J1U`=RAy1NtvyOiFimU^~L#?kk&??t^a%djNWnvS&k zz7IpYdJgH)4I}*4clo8SX~&oLTiv4qMrAp-6i?fF{rbtq zfp!}$zCE&ue%me9G>X7aXWtS760>-r;Eg`>#Bo-`f;%}N`5B7JU=o?`lD{kR-+Li{N+wx_*C1erX)p ztfJ=|pTAJT5@2~&Vk1iM{S*N)#XvXkYLFS#Fe5)Ogm zQ;^SmzjhNYxbVS?cHb6T|C^IW>o+I433Z5?@hRqnV&1H;Yl+gkVN7*c#+xjg^eTX{z*hpWi^lS-+NY0f02+aK8P<<|xE zc+LG|{lz4RY+tT0gAp_&r*-(NAuh{LDt5 zue=#>5|oJ+>>ycvd0Sdc^${CTej3<}0drD-&FC2&=?2jbsDfu7gT`l~=+1so`!#P{ zTzVXQ=3t|{mO0aa4NQp{-3Ac(;5TpR$lNYQx#v^WuxGvMi*LdE<5d0@1x6$HY{d5s z@IZ@l2jbN^>2tzkEur+r0TG`)=c_s425=yQ-(I5u3lio|TwwsV`a6O8Nu3im&-< zn4jG@{hY)(ub^>!%vVjS{bR9vVxBr@(j16zH=Ac0&&(v!kb?`l0pPbDvj_6iz^qm zR3H+R8YbxELD8woEevKTtb@ZWQB>y#Tr6)w^L`Mhn(qA|8g&;iBKJWh@gunsZ{tdg zwE_vs6)W}Qzy`aiFiAJYf<{!(UUQ}u&|VBe3EFGIA*N_FSq_P?&I}X3NY)A6jaTCv z@_j4r2cw7k!u?p5zATgb5{n(3C@ilwz);KCr@c6GA47W%iDt5qpa#c9sP{wIw>Xpg zQ(c>A)@rp27CLBHu=fDPPH5@EgNa`Ks9<;D0;v`Q9{UXgpfB>tSP1@$3?BADA=Dk zdwm6~q!*ExJ0@4FDFrp@KUAze@PH}aHMe%e{Kb|Cn%ra5=P7wDl$;WF%@zvuCYRAnhx^!2Ydy35slV+}d?m{Qd z29w`07_RN1jmKc@Q{RPP!U5*T`+=be@P6pd9D}8ZHg!!nR&y)WAhn5@{$tu%2{dru3R0E5g#7v`Vo2r?tgD~^$OifRS zDDRs=&kOFQ)Dxn;_b;%Pbb|jGeSJbS_Pz*{Oy(Z^IiDV%5FNd1xDdtyia&|-ck@Y1 z-5|<6DW-d8NxL26WQe5Wl>4(<)q4$ovh?zW8Ei+2EXFxHUV2P4VNyz4h<^yUICjt> z(vnUm;pI|ABtfM4x`;G`CejQh?5KBpg86P2X>Y4K!k6$uy~^hAh#pAdej)tM46@6;nNgJIZ3!e0nY@ zpC#IyL|A%2qP4b5G!|&ti|fJs(WkyH<4L8PO^3%BLR#U$6ic$o56{6I!4}dhB=bI_ zs8(F&T!)}*rR0|^;@{AQDzVb{sX1TFV?Ak7^c>IG_2 zdQ!OiF84_vs2^zO_a;l7mKu8&=*=Qrl%G8&M9sqk0es{UpKek+(4JGcvXS&vQuy3N z%reMC6*4OTobBWN2C*y=S~)azf^76tpsn|V+yobNZw2|+lx1KSO$ORE_YRuoE}ODE z#HBfK%Igk!D50oxy~=Volbb^pCM9y4_?GWc&xG4g-Pp`1FL9KbGzoN~c-iJB&d+?e z;k%j4$(WYB5-Iq!=o882NzjvQEln*mT3QU^Lzge#CQ;+fz3th z(XP{CFT_!HiD+aL(8xW23X^tp?*-DL-6bL*^iNdDMDjaACX!zfGLbMj&VQG{o6h=9KC4fCR-dJ=MXL{5ye67}4ox)uhKUHh zQ#eJ_EhUHEnxn?oJ_iBBo#IhoptAk5pN&8vjX8%Kci({RItPhi=4?~^r)YY_hDzFb z9vi>0U;X!BPW7Lf=~YP&k={1)S-RV^!Om!tky(j897{s;?n;2b+y0F)xA`G@kZVJ> zrD5I~L=%SVtI?9PBDgl|S&byc%1+$mUKd80ziUaNLd-qK5J*o=l*(Dr^o(B}VLS4=}W zr#0&!&S$Zkunxd)lf}+Y+%kr$-8rB6KkAWZQq}<76IVlv~w$(F1MDDP_D`Zr*lnKV+}kM9)c>mDZ?R8sV1QKPxeCZBdNCe*eQ_!XqvX++Uz zBumZ8G@)P4Sv$jjMrLhXD!h!Hn%xz2_p)e!M`)^D5p@Dfk<9|0@ds9u3E&yOaR{FA z3(dJAvOOwrG@1vA9zz8eMVRTV%M40o1Okyxq;>_C zvt4Ex{P`)(yNV}wx@cI#RXOhqFq(Ge|C!Z-I%Mr9>dW=B$nzS^a1%cL-)6Xp`hT0@ z*8lvu8E#EB!)-#Vu89wTA^Z-^d4Vz%X%o&veC`9~qZ zQSdA-|2~axUveBG^8wZmdv!2&#CEF{;R6VKHrbuN9viq=Xn&{6DGmky;BQkL-*!;( z4N+vJENJsbN~(zYHSG_wMT z#|Sjc=C(;3!q}8KQwhv%_aR;abK7qax>iQdE7R&)inwN#)YM_J8*lY(i88Jhw(skl z7Pbwoz6%^Cwo^ImXZ7`SZNQ}`oWFgs>DJ;e9A7JTO(GjMEm^(Nw4 zr_c#pNiE?4`h((ciC0aNFs6!&jqC<(+@IUXy0eXJN8{gHkqyk*588tX6qE7`J$xN< zAmp|P58|!j)FNmwN)GhQRJRt`n1a!2apVlmXjV?#O4Dw`2)^+)I(Qp~vK!e@wx+!E zBelDO^W>-7DC>?0GU8n-<1j`s+rt9=U)b$ojA7G36T|w&~V21RppfeAk9dy8s zAZq}wrmf$B1t@T|u9)xP50;)F(=YJNIZSN4w(<)*4KRj&Kzupk+A!J4SF;*r3sx?p zSvQ49+e?^jjKOXA{vc7aLZxQIVF}V1L*z6V?SleN^V5a+&IJlkz-h1;V~ELeR(cf` zGVpzn&ciXk;tH*~BVO}(hy+C`%QRB?9T=n*W9XdqyBefE5Y*u=B+;Jjly(=$IU#87 zU5Mj%MNaWCz;Wdvlu}F}j022^@mpAZXVad$qGiBrEDcytu%HTuX-fzMmHQXub5C?J zCb*>W_a~oI@;wpZ!A7GzWk=Bm_psKy0V?61`~noRB61aR4TWe?N9T~G^C_1sY=(Bi zCCfI@AQxxn)=xoz&Z>UL?_c4(`xo#jMY*5(CUw3q{uBEeH(riiJou(p>W@-&xX<(r zs!(x`umhoh82bvL#3t!>4!^>+7x6)`49a%X)LHu;EZdLzL;5+59+`K<&A_vR4hZ7$ zn`z<_;yiT4kV_1i#$#Yq`L?4+oQJJ0-*UvYbj-dK*L1{3ehw=&zOllUFd@}J#eAgF z8Q-`Ne`k|U3kp`cHWmk4RM*N7AMDx~_*5^~bi}(MzSE_S9H6}C=h!UbMi^rL&=r|@ zn6}ZzM_6m#1U$ARymX$V;;`0kvdUQxV^UmkAVupOSV)v>dx+OVoRKsKZL-zk1NCv{ zWXGi8GHeXW)}iY#x%MG+g0UCh^X(I`za6uv%>RYTeI1p*?9!n#SbL>8fMOs^5Kl&& zf#~X(L_=sy85qmb&uD&`2+Cf7WQ{68`|ZG37zT#T9jN%tr7Dg;c2pIBCY@HHY)R>a z`z+utbt$7*rRfIZ@va~u&k08!w3C9Bs5D)`2UdR0L#Jl>EHUGwuu(S(yZFa!7y>AWVqpBN&>;t*}$ zn3QMMqibHsyXZ3-`UKXaS@`~|Hid<43)q=`It$xI{9Z~0PjG*&KRr$mb+Tt7Cu5iy z0UMEyaRAM6U$kyPoJWxDQFC!oojWko(NX3-eC2AeqYTGc*bQ`zFPA1xk8o*+%k;G8 z$+kC)LR}q&G(b>@Gq}4nn#7td$lPh;8HTvi{xigp*2K^xYcNI{!)j~d(gp9174N>e@C3ZMyOzR=A6z-^q3gRyz-qV2v4S=cAWPI6?N#khv;NysE8NgJeYdYc;7(i_}DE?O{YzrN5 zIXm%NI^jN6Z4E5-Hw@pt8!M97i* zSO!@jNAB`+2yz6ct0f7LBRA=T-^A<{}T1| zt7jBlYDLh6@I>L?A|%Kial5tJ-OTU6xp+n%0TvH$p`m{xTaN*>;%}JboF4StB*$*O za$8LqRCx1m7|jL0wb^cJ(g5_x=Z`7qK37s7B$K|GV#-D9;LZ+P)HM}$6PFIidkChI ztm9Q=()@Cf9NflHPbAWAfP|d&@B+8RX7Z^JLBY|;!(~oq7F`TRYj6|l<* z+Dy}sS61CzxU53_BxMJDz&p?K)z1sp3)x@DwbKcYf<_yM-~Bfd@7|-LR$a`gsEbC-SO{0+2|UYZs33lz27j1Xh|1FI%0qp)ab)|DY%F zUh>oIhp-JD0*E=4*LimlZ-uxYdc%99{JkTOEWS5$ zQp7tt;%tL@8u2j1q2Or$9>b;@*M=y?Tc!fZ#ojVp4;v!6*>02Clxq9Pmh#9{yG?3K znCZ@dxyCLZ87JpWWy914^5J?mN^MYW0ICi}vk{Itbp>X%U)gWGz9eVMTW^!WSGJRt zPpONq43#^27N+@17$I)HNx%Eav4KCl{oE2Y2%rWzEK%pt45R$Gb}6d6jZ;(}-$Ij+ zRyBWF2PQBr{N+co7U;uYCd;2TQ0r$53`Mkr7^#S1Nw1N5u$=HA{Nar-C87rqX#r^3Z85GnSaDl8eyLKlmCt$|9 zU|r#lL2{e$bHDL#mhpZe%vi%j%rBFQE)}8!;i&8=I^k-;nuxR)Em-S)Qh2jAhERSq zi2~}#*7C-U!v1yS6J3`RXo11I2hf?rGkNs?$<8M7-h#;~TFHfu;%vzsB=pzBqDxlf zVX4%Pte{ zWL|TT3FFt&-f#@z^aA=l902>jMKN_{J28wV)WyTs{R(JxU5r+b0@_(uu9l$%G&BP7 zngui^LOu-s0rtP#t?S@yf4Xq)_TX%#`82bh>>z*0r-Sumg4~u*4eF!pr}@;SzU(2r z1TCpAv%FUV9K~a=F{YZ;e<{_Dlu_a;b&r&FO!Kk)`~(s@4`Bf|{pTWF3o$#J!~II$ zOqv%d>$N=rO{e32Oo>#h?*SySnO8TQ08ban=^U7x@JW>(ufU7$%HkxEf6^Np=hfXs zcaf`DhtdO7A-<<-4KU{#Qi}$%eRdv9>-f?2*<M6R2p48u?l|QXw)tK-0P4jDKw}Bd@Ne;Knkh|5C^9G*2ED+$UDsCwe?8sgH){V*6=MMWuEe z#Cg1s=b$5x7xJ*PBi`NSG`4LnT3xudv1}kfX2+UHTtFGe)QNot7+r@0k$ghWd7G@E ztfq323|~z?F>-gnuK<{z8gkav0ChcDML)%Wa<8tU;AQ~++$w6_3=Dn3IGWZ>u8=cV z7X~z!D@5=stL#8m13>RXv2#|!uPHQNMaNs9aHCapm&1rvWQdhxTYEan_@c~6mohJ* zjLwnv64Hjbr1>H3pOuB%V&xzyFLxwED~R=Ect~+t3fr&X+)|%*p`op0nrG}vyJB*2 z3>|MJ#}>x5mPe&u$%TK&yovBBh94@m~(55)d@T3*w6A#qCwu1V^qrE;WC@miC$FHE(9B;jX zcE-yDZ_^6f7AGb z^YpNbjF#8VQ*c+=I^anvv*Wv&y`Zfz>4buWzW$KLc151lw8GV0WtIr;j?VEUxB_Qe@_tI*7LNe zrwsDhP4*#71Qf4S(8FHPy#DMd>&twi&|Y$y%%#HBy(HMdNh;_qYp9hvahAUCEyG(? z;Pe;_UP9fU)7I`_X=Fk{sa1Q1+k{g&1@5Txi+L`;hIDqu^i36E>(Q2i`p6!E)mIf= zf#1LzCNMeenPM0mH5kdxMdJU~RV`fHM}`P_KZUlxDx;$|yqjO@JUM13MQ4DhS8!s& z$8&x0BEzAi2s|jLz_PrEPDFYHE^T-EPZ8TzpnyvO5ebrfHIJj zj3d{X|ME0VhD`y?sx;76xrW(iQm!7~k_)aW2 zgOCrzf1(fj$+zTdDdf{1#S2nscz-!ozZlP^=TLEfSyOyK5BkfIq7(HSAYVtYae!MWg)OEUO?no=IhgW~3QZM_PBJ9h!kB3Q4>!>(ow5 z!TGbhU0eALjE-S$%~=2tw8_`e^hkQ`b=hd#+irY}$LY*4`JMHVjqT;m@po;Js~FAh zGs|ireVJidL?wg>$Ib3ZWrq{f4fq!A)QY$serufD39R}YZwFBq-Z@7P9|JsrM=uA< zS1<~_2g`R&vWZ>g8i9oXW32@?mR>_){>>}cq4adH>wo^P-a6gv?ds=zO}0WZ`I>bdtmiw(kS(4 zVhW7hU)Fa9$Sa&};jtWSbK&76^(FDg`?zQYv~vSnJ|N>znGmrQl`EDedIjL}1K{6R zR+gMb1@GXZ1JgY_YLl&L#ZY-rPEDeOH!zjI7+HAv4LL=4ZU-o+EU-1|46l2mn#h9_CxejkJ^P1;s`}tOpb^Vm# z)RatIU;6`n=A(4JW5MN4euN`SfX|HInQ#0*_IyWO@*PCJ^H|?hT^d)4TZR^SGCK{Q z6!TI2F}O8gJ3cPXO(bgHkLiy)zFX9+>yuEW4|~!xAJ-oORd`^2@?-fCz!1By(H_DF z$dixclsX^9KMI~yFtX^<|H?P|NDjT7ATN1xf4DDAKce9y!Dok!q#sAh{-Ms7_{(vz zM=}4Po&Z@ekk0Ra9HuUbGWc1(2U@;!>K+Hs&V08H6D(}{$NaRy&f}{?E98zc4!N@z z9p8QMojc0oz@44jj69R%UZVxH#dWbG9`2~^2whH+(R%GW`6bKhKKz*#a&{E-E$|?WU7UBr^@ZFE+gJg zlbwQASc)#$Kqc(CgM0t}Kla|&vJ^f^lgCBy1~bdau#C%~bgNi>oxO^z1kC=(SYW2h z>2gKgX^uSCv^-hbjY>x0B**s);3R@zDprRd)+g;c>!C5Sn;d8+<5<~6dh2QESXna= zg3_sD`ak77tnfjTqs4prBai3E*8>AZp^z&voM5n4)_TWg*szz95>qFc-mvzv0 zXhiAbWexdm3e6oa8@Pu97<{cu-;9?{nuqE`8?ne$aP0GLrTwUoJr40JI862|mU$N6 zgu4;0Gr}@UPqik!&^NY=z6SKRuUvD*?5;;UIz2U+DE+fJ z*EK*Ef`e~nbcMOUouiDjW+ zA$-G@IX|EKjfSur`4>+g*w6r<8f$VtP$n?WjNi-NmuJw`z3wJZnHS|wlJ#W72--9W z8f^Y>`e_o@mTn^`Vlp(mS;GqlO_n`G@S+qvmjJw!rk_=%B|!~?2S9#uDKE(+o(`kk zQ;_+C;e`*UU@pjO4QcmO5Rm_HdNNf;^ye#VD!RemIEoH1J363V1Uqtm+o^rK2qIF% zXCMj=#~|eVOv6kdOcYEY>;!Dv0ZPljzTMF`$T&@Il26|#+&xXs6!Oq8>X0cz18%Vc zJ?Nu>kf6m-=Is*1E;N>B$~MiL=yPrecyX`&`DwFh`Ijl(5x6|UI^cv>6mZ#Z2{4?4f zmZ$5r9+Z!<$1BF_8KGYVl-yh*9pV z#WBj-4~()FVwANCG0Iwn7-cQQC~FmBl(i6}tW}6n)G z5TiUmi@$9Td6cye7-cQQC=b*!Ot*&^Wi5_T)8 zYhj|k2ZtEt5gcNa2W#2C0-foTiA4LO!m!pj(C z>S%)%i^XBM0;0guDw@9iNH!a&SoW!wMC@rna<*h+9Ng`wja@f&PbVB#hl#}N67M4_ zi)+ACoPB8qAk8hlBTTD17hm9nAVIxk#d06RX7t?fO? zF^^0ur74?lCdC^Qq3Uz7+Ep}YG9KY{k6pq>s1cbso;$;NIK3R}&p#`5FntfTMbwJO zjtR>3Th zf!2Tn;JvBZ8rfLCss>*v?7l{Z`Go-ot~r{~j5TtE{zU!!(jdCFMn)N@EC23bl}ZZE zmjQTkakUS{^%U&a-Bf)EsQhm~I261?dYrUg-Kd)Io{FqV z%h$?=9!dJ78M@S#s-)bB*e|%cR=RsUtDhdPl~uX9hO7aFVyVP>4pT7eHxZZxtmCZV zV^Uy_mr_>l40IcowgK*HtCs7pP&^RMv2YRkLe2>^LnG_nsBS zl;ID`W}8%b805mOuhUw7(@im7fTG?Of9>!Ws&8U0 zQJa`g;jb8f$MAOqe>OEoxeSkZSF+)W(xmT*T@S_F&&`abnqnS`O5Sk0t*bb!DA$f& zOBm)R%){|F0)J;v>8nWZi@*9%23jKiGX7d&opcC)r}6hA{!ZfWDEW30R8k`~J*lZWCMiZuOKPm< z_^4&0lbWbvLR~R7skw^hEXCC1_WJ*L?=O2hxt*S;g|M{MXC#LhrX`yonZ8y}CT`Uq zO8j26CVfq>ZpDv4ZgUpoFPfWKV9;46=ofbG>bGc47rzDA5O(trLk#%) z&wxo~TjQR51;w&DCV*xeulamOCKjaTzhaX*m@9fW`D@S8S# zCOfI44WqND>@ztiGR4h3Qa{8Ee}>4(Z>~T?AELpJ(7E{obZ=pJT^+ z3cZV2$21z(-p2|8vXsb z?C1Gg@1jfW{nuPhi_Yrm(Bv;<^~Sh(qTxFZ^DYBIiTBH<;TSLu8#Kde4a=-+$x){z z7J#cxQ-`a=>ANo^c~4X8S~65l=1(KG%if-={#WHQ)EUnzf7?;{R7d4+)Otg!e5zLY zRP{~uO{)1NDxZqV-&Q%9KNWo`<7-*_@P?U5b3prb7!GE5hXRypvYdqY`$~rRo`mM~ z7poHHlZJNT1*JYGzJ=M|4Rn&(t zS-uY)40Dq1eT97!cLT+IEjt8!bTYq`9bdCt2m?h*P9>KkDwdh%J85*Ko2(YGA5&cXVSwlNDnb6xZ^7*bf=8fai8AUX&;!i zJ8?1?cOt*^qMNYAVL(`0;{m#UoPOVlfnmN~?>qazbomZy)Yuf7@||p;em*|Gl&o9b z{pp+Uq?gBMJ$XzXsgLYqaur#^{yiqg9OJ;IYK)|I0P~@9Uq6u97HjRR)Kwal^KTZ7 z%gM4!Mz&s#VNq;QA2Trx8fr2btUkoDTn(|1nhzGwvQ9B?2cdlNue{g^sO`l(6a0`B zzp3%HKF%X;lR4_tlr} z#?I~S|!0)sMjxoXDD$KxJGHYG*X(JIQu8e*gQB$MMlj2p-F{n9}qeSp(b zOg4(L^mSivy1EauM77F>qElg0tHhfDohW_}j^s*c-X58*_ncF$UyM3~QZiTvnX6=(E{a`#c{yJ^khX#kx<9)JmGbV#D;FY!gYYCzD1EmCPm?lJs zTi7ry+n!Tu!U0I4m)&i~P<1E`*e_H4hbYP^ks2a8VXB2-%7uW%(b4^~Mqn}y9Cz|*sec7KuM|^C%oXYphS6U5*NBEmwSW#Wa5>Pd3bEOle#!Xdba8@P-#k^Pd`X_dq zs!%}J2?RZuCLF+l=Vp5EfUK!;v(FBIn=Npo!v|z=Gp0T+da4c!1WlpR2A`iCvO<;N z2|Lzn>?EuE-m)jSX>`VLhH4&^Z-z{RzH=V>4rgLrhcEVGQ43wVy?z6J)5?SLs$WfPXcdRqhtD$Z+m8Kb)Lpfq_e0Uc~R(YHj)h#(wbp9B7kuH2l3{ zlYx|CgH{kckIW*kMU!~x%#TsB zu)I~y2NaqfO|Kr7)#QjHlzbSZ%-nqQVHsGn3T7?#y^6V(9zJ+f128&ETsbTkYMruo z2&_}h(ddjj%HP6qh#Y3wqqFL)QHB_mE37Zr!KWSRfb*ns^!lNt}}J%0@x7=oA{wrl+T5SkPFUBvsS)OOnc*s=5^-$(%&7r)30A zclR_R^JzI%tf6zKVPb9QN}eS$!o)f}-fl3KU=z^lKD&$yg5OdFkcrB9YI7Y@(n)AMzn!aDKBQmXWzD6f zU+A$aoMG^y`)8zA_2;$C0U!xw3&1k*jb1v7W!>|{BqYu(8=q_vOi&%qN|PL8pyac% zwU|T&XJtJEwzINXOd81SAae)kn8%=1?gQO-*$)miq0@dVzDs}<*M;ASRBSL+I5W;b z(WP=CSv}p{vjrqVnTr)TT>rm@*>N3*d00JK;OpA*yPB5r5CT=n zW1YmW6{vMNc<>wK>uxC-MP{T4T&;ra*o^Na0MNd>rcO2n_+V+A>*~_RG z013g~#v}EQ>`cWU+b2fFb|0p-X7~MAuiaI{#GTAAk3Ta*ZvBdef5DZ?TZT~Bw)Vhg z9v;Fo-b2N=(Ca&BygKg|TkfmPfkgwvKG=b84S{_sT&?7nOcYEAci+9$6+hT+V&^zV zu;HlHHwI}TmTW0PFsC1Y^q3TTf{EXN76)0l80(6XUjc2(_lir)2c@*GFlUzO88_9_@9M)q4cz5j$iqHC-b)J4 z#-4pXW^jnMyBw0Js`^R7baRZaDlB@e(v`)$@hANB;h{h-+t-%Oz)Ha52jeSligh74 zTqrjFu|02{g*5h?Jrkop2EepRsw+G&+W8pmckr$p+oTzQ{5i;Hz;b^n!Y69BVw{VK z$&fO1?6iv^Wat>m+$4|}0Mwj!0T?Hsc>Cd58P4}1uwZ^69}fgm&sflXm;DN;!+rl% z7%m!+KX1Pfi($(lvPBc?g~}19_|aW{j5QnQIaUz`_!YzQ~!_xph*Q z=*_);FC{{|v6Z858jq=me8D86;ix|1kC?a8Xuk-0%CIcV++qaa_O! zoEa8HM03wA!F|tc)k_;>6zGr=&=RD^*&vt6J9BP08+VGx}t_$@%^fT&NzwfC6dk!ZQ zg!*#SBR!IvEHW=zVnZJ$aP0@s7MBs*8v@}4Wjscq>89z6@4`zi-0_%@hBb3`8kDm} z=K|=~0cBho6Bq>1&VkeS{|a`MabZlXfE+uD&CP)j%0TC#UZ~%Tq!IFpkLY}J(m8$E zPNF!?y9hL?obhzsIS5~vuP6hBjYX?GR))1PxEoM#0rX}wHUUNbW|FERQKvfRRY-Iw zdjXw1^oK)D%z^-(-A|7KsvRyf9Ju=f-9R1Cu0*BbZ?OIwY!+O-(=b?IBKIo3XM!6! zJZk0=UTN`Ke{9uQ(nw1OaDQtdS70TAigeXx((lHMIoo;wJ3;-s-o`_Y^CQX$fVvG7 zW#8bIKu#i9k5JkM5tQXgE?41j{X$^yHtcK${wIJ~&%eC&C=b95Z%d#!TNPF@a^6z<&Jx#-dvDZhC5gV^F*SGH&*Y zS^_*z0G>Yp*xmsI$gJ`rT#%|4>9-l^QH^pcc*eSCwnCMHBr;e5?C`;xTMS+@2KBfq ziP}6!1YDU<0YXWkPvvHUUXmJQE@=alHxp=3+REVDit~KmGVQh&D@~_SaVZKf3)j{u z4Jva7q7mFduO9kJt}fFY*14E9^A8eY1M~q8t)3)zltCoP>?_Zg!M!!VpSwO(O%v||h5X;zv~Lf&iA*YC#iK|eY6B-$DI zm3;Fgysj(K<=K-E@7!qj6uhz_^1OITYob0VKRSgo!GrS9DLAQ)lcD9>LwYJUrrQ^oRz+C9c^p*bALq~vV%xU#$ zcy@P9mv5fdIwDib@zdIZ1~{l1QkDf`g$wKUgv_kax(Pg%TPn2f=>7W&4Wfap!Q+Ve z*kW|7Vg&-ql5Odg+TE(X30561m^%zMa|Rd-#|Zc0J4uJ5oFYG}(%M5udAdqFphfzJ zVdjw9q{(Av5D(a2rk>SO8h?k_JNS+2(|~FRXfDZhNk4%*xU`#3 z&RW0SL5&;lSM=YlIH!;N_#7Z?NtfndwT_KP<2`Q+KAWjRJx~s*3O?D}M*OPHL0Gul zeqOs%JtrGn&>pokgBwz;C00InK^r2zuzVpaFKAtc;7fZdw?W8;16Kidl6cp1Fv2*o zR!oM&Sb-s>EyOddA4va7z+udI?&R}DmJ=WjB)|4)pJSH2FKYdW>!la9(GmZ?W9!j0 zoDmerf@9^+7ctABlVsc_ZJ`*wyEE?Y z*6Hvq=?1)V&EFq>|J={+kUg$w)5Bl9qv$xFJ&#Z6yR5~?k}KL=t)NKu{X?6Os#e>e zX7HAW0GCr5P3eCl2O;=4O1n0=`qNNogE4U+;vNCuGM6maQ%xqBhf82wknYkX>%DIV`xmQ0)FDqoWWuW5aoZ9-d&eG5ao=g>ZZ zoCQ>^E!y_dHEpRD$x0?{x@Jh$mtWb|v057{tbAArwLE)fYxlk?SJ!II`bHM|uVi9C zGe=_4HwJBlA$pDq8U;X;>#*w1UQ1(p7lp%hwIo1nb^VyI1jm&7UJ~BmIl^&eSK8%3#V>o*78RxhA zi;X)((&)m#>zKd2{v`0cHI{aLY#UQ?n^4SSQ~M|5_mNR~#;7)<+mF#LC&mfH;P(eF z%O0x8YPL0!o^SA8xQVS00U4xU1;82_DL+$1SJPz3asB1*s%Qm16RU|~jc+2T@d8MK z97z1;cK{5Pi#5?wYXfepiLp(O)hTiUCu+kp4-G#i8wk--{akjyM+36FAz*oaj)a4b zbP4e<&k~DbwF3=tr9=yaN!63&x04x7Sf;t$oOfSoZ+$y9A z&4u{s1oK#g@>FH>^q{&5bj7tE%;jlabuF^cIuz_OCos^Yo<+wO;W;EAxn39DwZ@;z zeY$9;g?_&6rY@SPEqC;Q(1YazJCot@QHb9S@trNIu7$FjSqzA{R_+JQKk)LV5z`}K z)asKlG8As`-0Ow9oumIQ3j!n%6+7s%je8h)Fmi0C`xrR3`Mo8)&$pi zo$7$3(CS<$0<4Pb4?U^DwcG@k3t5M7eD;Rq`)(I>rM!q81`AsMJ@;DpbriS>vF!$q z(B-gpUx3Q+SN&>64dl&py1>l1f|4*`Z?!`um?MD^5=;^pgMu^Aib--wn257$cy5E_ zWyqNzEE=CGGm_M_43tY#is#BN!bH615(aY`W9N~_;rbOobm}Nzy&cb3oSI9DHx+4< zEqb4UO`6vtwKML@7*S8yJr{UpD87K!dSF#tf+ZHRfH5+Tih{{Sw4x|mN+#f)z&ntH zb)_LM{Hm8PR>|prfpQaPum?wui^s;7l#t_Jl;TD9R!4X-w68@di03qn= z*Ivaub^;4C5d{+u+YG7m96Gv+_tmwO=b?u|<14ZNSp(rJtgM_sygC7c{2q0$$e+W7 zy?H#&Vh9v?9CvnO(!l}@B&d%<%`fJRpXfkTdr{B>p@g&K%9Wy_GR=@w+_qK)G2=*-+#`; zK_?SHLK`aDxM{rKjQYbKGSIeIxM6_o-+61`=p zCb*g*GgniX2WjEU2lEHifQdjEIisOSjtH9GX$oj6Uu`J*#Qv!NnER9d(=Af~)om}q z$-#!YKhhMCj?UN^MbdK2P(de#=n6f=pH=`qYi3vhFk(5Uk&xO#Gd;qjy|IW^zmwe? zi}sBVhZ+k2Nw1;v%65qY(C1;fxUqOJxqpRSrRp3c?oO*?wxOFJmn-)LJEZytq}oJ$ z-D`n)fpONz!`?R!dmsHys2Nc*<^k{~s6Ce&E>}7Ab379IQuf4W{J!3MsD;dP!YJE0 zT12IMyA8928aLRajZIGS>NYS2dhjH<=2>zqIi7CjZgNI6fFCs-3tUya$X!>O7q0v8 zy*L)fdo$CtI%Cb?0mV5c6~h zg4OwqKqIh>`)n|oNQC_zHp*b10=q5GFoBtw)wz@A$rDXQm(&fDgi3k8t)jbyUCG^G zvANYK`YvcXzbD?XL%ZSBm;5304Klr%h>iApVYY=NK)`Tgr6kZu&TA$Hw3?4I*kM58 zE9j1Q;qPnw?Zsb-i6+gTC)oYw%=PvUTZVrQOfxUnK;3|LBf7SR%eKu$Bh%d$W$it3 zKy%SDc1D+?@oYgzTjua9n zwvfZSRa5Z;vNTRK6bsD@Wo4Wg1A9bzOL4z8p|gCpr5M|~pdmdek$+$PC6@vfbtM$4 zvKuP^<4K+71;v$(2q>F~Nt}t&)m9vL8l1~L9ExC-$j5v(+$(MiJ(Ig`Ixr5U?00gE zO-xi<$S-W7Cnk5z1}VG0wA-NvnQoC??ZTn;eL#-2i;)pwVE>_5Mafu256F+~;^Ahi zE!lWT4Gf1e885M8LZgNuv~`Fo?a@xMNxbOmfgXLZ16$r{(4T_U4BQz4cChCBIB%3K z_#UpVbBdr&g@{4C%?6>JhUvHr>)~34uGWq2an@N)Rkf|aukw#lS!jq6WQ~wV3u|lT zk$5qt-=FSzCb(TJ^@T&XfoJUi3FmnLBmD&BF2dBTKjQt5nKtO#&>dhkM_x=2k2VfN5i2IjF?<+k zqM>{;QKV=;bd(<_iq{M~{`e%(+xW62Nu+DarfnyaM4amQWFwOSV7hKQL`Q#AmlNQd z1@OI#&2hFzI$Dd_0&XHwwie&Fe#G<$l*G_^f~PoP-1-{9o0xj(2AYgOxg{Nnj>G#p zRc=leEn3cn&W~?-w9Ek~S@TT6QSwag2pg2G@<6gkMTR1^jd;|w1~o4lTd8xaB}Bg7 zMy%2fER$VR#1z=;<;y7;_xNSoDpJG*Ro%30NGeVqi4Ov`C*a6zM-ofLc^34msO2>{ z1%eS@fll$Tw)H4;1T~Ze?L~}svc3GQy-3oJw)a~AH{B*~(^guxb-Yc4sbP1uqidT| zuGGl55KElQ=pYjMMuX5s0k>&DO%qu1#v0#sUlz2A2~Z?(xz~e`9)sNOZ|(8>hm#KA zLilHA$9Av zHJwC+sy)+AzSvpxP#4JWJBx1WJlUX&=%Zb3Cr5V?4inwche~%B(Jy>LCXR}l$meEc=-A%L!abm>w2F@khc6JjjRBdyrJb61#ZHH@R zSa-1}>_7^wCCELIDu3!O`Zk+np0tE~tT46cI9jH_QHCZQYfG9CGHTc zd;oQ<$-DjdgqK1or*2A>k-bF|al&*$w(2eHO}@s}#%G|%VPxO-lKDa!*L?mDskRt^b7lTE2V}uC(B>@2)9<0BD2zHWV zb`5V3q;~fV45ku{hy2@*8lg@AYWPe5HDbHw1$s`Vi5+@nG8BWOelfL3AkF*)I_2G; z)Jr{x@Ru`afy3ecG0{A6iL6Q&3EDTwGO{l?T!c*RE81&Y+sJW!LA3u&kxTK}~=iO1xg!<@ZgJ?FNW(+OKWo>H%V@nkE|z6b;qeW#T}Q(fof>4D2$tKBBd! z87a*m5Jg`DnY>U}UA>lWohzRjBXNC7Kvf)(gUjgV7PTi2FhKla-6HOBrXGCNy{tX4b zK|K~^xERmsc>z}0XP9W5Oe&)i?MxjiyM|6EZqX~*37guZsQj`_pCcN|wZlYsY&Lp+ z9=%bHzY%^THv-@DU<@G~r2KRk&b?`^<;h{9?|_Nun-@Rl(mot&!qM$<-_3<5bJ24$Mz1NGPdyIhOb7Y%aKVf&5hFx<$QZpxh+&>8kjujqmXzy=2fFdi zTTn0~cbAoWmZrpi4Wh4%53OMG(@`D zj?c4bKyeLHN8+908mJC8J_o2n`3c5W>jmqfB@NmlE>q>nC@*ww2e=jV$D&gv#;ccs zn`HUTNYOH)HZRS}Y7phkaq`+o(R@GxdigtnUxTT35qc)JM27_T<&5XGo3I_ff@jw; z^-zH8rP>pvL-0Gly7ud7^4Affd3Y8E^#EplFCNQYcZ%D!Us}oecZ#m+3-X;iu|QYK zGk1#Sp*Xf%>tV3_<@*QWTx%O8X2xc0pluuiC+rkBTz?l2!mq}0lK{C_zB5WR*P1*n z_ly!l;j-EIF44-tYi8>%zfnLs_f9T7Gpu9RTFG&Di8gJ|w<6nCiph#^gOwgY`P+`U zjte1{ukV7RjW;Qc*GQ)7!zQ3yUe@h!=DBF;J4WO_UZHS{Slap zsIGoT+DGH?yM|Zbf-N+{m=ZGjqnM(m;mr3b#zHhJ?u9zt@a^E3+&)j@dypG$7uy#x zT@&5E%&7M^>X1uL!f%qr>w(`Bjjh8wTHI#h>20R`eY9vFH+j-k2)t#cBnZAnJmsf~ zcZFI|T7xB!*Gl%iTYM*?(>kXjxGb7w7xSrBa zzz-9UO-S6KeV!;skA=8^$;=-M8T=vn{#emI9KENmMrp;FWRmB`igLADmOd!DX>otZ zu!kT^_fM1^@#%OLCM8NX{+-08YN%ubPCH{5#BIk!x$q&;#?%VGUzF1x7Vr>w2*TSAgrh+^rcY-OGqwoQ`8zV=VbRI4{|a}DKX;-GkX^6{o4uMKCp;|LXs1`p)enn` zP^a0sgmOa%xpXyy$43hN2Z{iSwF_?@m;!f-;`Jv+(;N+Kw8@?c>FItEbc5bo);5ou*HT(nF?95G~{9SVCNLp&OcESz~dQS7tnA@d8?noEt4n zd&F3ZH{`SlBBlc()mcKxSqD%5H=s$XXRgR7Hd$PI@W+i*rNaVvn+c6OiaX7dA5Q?4 zbPtn1O%TmmT)N+%PNk0}uY4hK;nLJR7-00ctw&|SInhY2t*3{C^uVilj8od`n;OaC z6U8$P!XNdmHLo?arDS=tBIT8dqE+|siN2ZUnMOch0hFW8z;6nu{EebeC2}Kw)F(~S ze~w4>oQPZ17nl~LQeF*K=kNbk{cpX0oFTdmL95A_wG$?@nUNC_|KQY7%!qFcHdl9a z|CiCQxe;}mTM*%I4uifm8cwe6XoJlqLix!F{%wSxWrB3)L?RQEXz8f~C(5jt(HqC zi5}Xu5Lq}$G)p=j))v9^IPd!kjwWNp@H>-Wu|?{!dH(NyQQn*+MrvP#$dQkT(P48t z_?_y;g3lgT)auJ7P)>|HRS)-4K-reZLPa!>A?(4=`Iokj+L z8PkM19iGs)wU84gi=o=9z2&=;v5e>RmWL&yv(Ofe5A@6%(_7v# zMZ`tA1Mf2MZe(vcd5UPJ&FC$c;hlLN{ND!lmTyi$+jBzXl_{c)wE!s(sb*)6yg5+y z9K1(%oGLmtY0q|?BTVIZZiBq1W3Ow|TQ0;qM<(XNT|jYm($C&jaP+HiJC?konB{10 zX^go~6%J3h(MBu7O4)5S0~ zRQ8@BHml3z;TfV;1SD44roABPnT0ZJrs%4bjhB6Af*4E3%Ox{KM=jhYKbR>N1$yr| z3$Hegm(yp7g<9b_d3Kg)qp!#C<=R%E$ufPmh}XM~+j0UXmZr!YAzIzF={Cp`P`T7n z?!J}>Cu^^jODoZJU864D4*iD)Gd_dfs81J&qhOGl;@k;B{8I1whX$uJ0k2<&y0

    yQ(h%d~IcW1IvY$WUk+g6L#n$EQmlXQXXA5=TkM?!FTk_J&OKb)8zJPs8$cJ?z(Y zjmGmTcxfHxq1~QtRYAp1_bWP4JiO?{+AZ+E=}pXnrZ6nKt5R=+<<>^&R>JfwDjaI< zt_qJ%xhEd?L`B1Vro!@6H)q^7V*v5sxPI$7HSP4pZ=)YrT`CgUJWzxJc#3a z@11w{a!B%!DK)B^9GcdpbyGjin*iq{fb&Mc`Gfy&oCD=5L~R4*9s}nbDM!PR_WT>p znFp9?$qLT`pLT+X7L){WJ`98VE6!n-K+f?0h4WrcKU#46Yyjn+zv8?HUfg?cm$NA~ zc3fPq-okx1ph5-ip#T8xO>hod-R%d_h zi$R`>2RN(%-bfVl0z_fGRRJ>(|C1mQWCsORrGBgqy|6>l4C#e~4ARRnbpCgG3DSw7 zghe}KlCtb$Em4h?uPqXe@YYaKH$_7m(&60re25;SZEPXWFM=k$Q)H(lGOATbBU5V( z@{++moVg-h)PVVw zc)dQ@t^nJG2;@_pxCgSWM|7564dGR7=ZMyu9#ca+B*b~Tup+&`y$~x8SXIxW#nNa8tVcqj~wc}zeZv+>m z<2SDs?8WExr~0xnu0-g2aJlX@%KH3iLfT_3z6adArmE}+H2O9A}a~){#4<0xEw&LKs8H0zf z)uGPV&s)wwVrQIh1ym57@cYYAIEP@bRGbSe`$!5Po!5W|ezpMGzX?2hA32`M67YPQ zdbn}HQO=wi1Qu+Dm!t!JA8+NR+|}}4;P-=gr#RbMUd3-`xKd2zV-k$OBxaLAn>Jj3 z>qKHmZ`4Pa7>|v1B>bWvV4r{(-Kcq@ktfl07Vcw}P!pmw7Q7NtVFWnwOq3U>eFL?1 zfEONlxI|l3&tmN|sS{nzlpKe_5qDxVXN)$ejPSRi)oBCXSVkLX^fN{qXZ&qwho;J( z=i`#t?k)VA7&vHY64<-~e!|t3fKBObGomfwsZfsR6rpBhuFxxh+P6$^B{CMBWfD=g z3;{SSL$$y;C__f!W}{U&EXKpAP0@|ze5zJFFwhnRW7uxu)x2t(M^!qg)=^=ixNV5; zR-2*%E&-?qEtGbQwF8lrj z6Y2>y5p*{pWx{%+Y>dcxAeqc1caAp}*4}-uclLLcD zZs6}+2@D1?Ve2>$4yx`zI4}<8(EsmXX8k#s7L9BCqbb8^r~;^Gs+a1>ko#WJnn>?q zQ(IgMr4=WJCnmgY?uLX$2@reY0Z;B&ih3K?^J9JfOkaKiltw)m@kW{FqiaPcW*Ko5 z8SgbGnjLP00G1BSs03B1#g!<|qB$@8>5MWXGYlwBP*<8(22iZ14*ug(*3-9kV*aI)4D+#wAVcZ6bcH-nQ&D@C~Ts2RvH8wuW=J)M5+ z+-A<_Dfubs0yVaL7Mn*#N{0WHq8P89$kY*@lb}wFpC~=^aq_6hkT1>`%{-M@5(Gmm zz>xAkz~I369s(FHL#5(X3r{h%@8{S`8(}NG&wxpi+XR^GLR>!QP!)hlOU#KrE`-IJ z%*lTtdy;%@Sb^C9N(0$@0W_^yFKipTKzuAb6i?qa9|5j*>IVgmKoC8YbMtzzLj&(kLdRDpyB`gcQrYhIZ)_tTO2Y*3>Vc;@bo0D9`s!JmZ$wOBUy2KYjH%8M#zA8u$NO&lD&+$^lE^fW6{5xsXMY|4Wh1 z^8KaILr#BAo<)(S5p|0+RlH}=ilb5>!G7YSqTA5B@S2d}cnFyUjOQ}0a^M&VZ6|(L zVXaK7^v95K9voQRIj~ISv+}h^fsLcl^5mm%N?6de==ce${i3DqF`*+nMdD-P3GIt$ z`R-$)iPma}-1V4P9eW))*>?j;bkf)XBvAvvO?SZHq7%dAtj9%k>>c<@7N%pWa zyP(uBi{5(FEVo;6sS+Hpbh-0!(IvV?T7%gch`|M)bg1Q!^`S4u;Xy_%6FJ)aRJmfA zm>y|Gi^b@)6dkS^EX_}dj$s;}vDKVt+4l)JV_2bx%Y#xc9?2n|fE}YynwN{VjXpIA z1kZ6b!^dy!dB73{1}+!zR)`l(DE7druE075H&(f5xoCgi7*o?CML7x2o$pYj48nop zOgGc_Jy=w6J_Ap)r8=y)*9u26DC|I1UDRU}7@7A(_yD%lT*UR)@>a#}3yg9lMNl z3>6vK=E>-uZ9KwZ%KRrq!`QTXQ+=}m2i)icDIh`pSGZRE1T8e!X&1;YE5snH%b1iu z^~|nZr)dC^46J}YWN0&aY=vld`_0XmoLdETavg&gjD~C!_)WpljNg7cf#M~CSgtAB zLKuE*TX-=IIXggRu&~7`$xe}V@6=qgv zIt`AoZ3KL1FwS?Zw(_cQ&5byKTqovd!;y#ba~h<)7C;{7+Y0jy##FGMG3El7!4G+e zCX?4Ou?&tCk@@vWh8SD|9Mo=L$5|1_+;X*t*iaN2%7eha;qZneyQ>}s6=n|BL-@y` z{s>>-JRKOQjIvuAc><*|1%K&NApW{TNx_tZ5gR-ZIU)X2T22Hi*yAdIhrdboF?hBb zWj+MT@~||Kd=YsHCY#^k?;4EQHN~v#h760y;%6{&c<)A9CB88KnmuFd(G4>Hnw~7r zJ|(gur=iieD*WpQrM2k}@6DKEk!$a^ zH1z};%KlqJ-Ty~Jo&KYtsTS~g!y|(Oy8mzOwE7?I*aPjL|KNxwTNo1(oY{cCHPjdl zaaN5fC_q{Y&aD1_3@F<|PlRAQozRXAy2L$;9V%~1NyVbkv15?ky(|hM41VWuKVqtk zp})@>*gp#HwuA=Z(h}7h3#FJ<@y}fhVB3XiU5gv-6}XAqC-O>>lVJJl_#^^;LEJIV zC7c$|Cgz;E`U@CNF!IQMpM$D?+n7U0a-T2+Du*FllE8v8k@T$=;Vm{uCPo1jXK{w<8T{V!g`&KV`EUd*cM$RSY-i%1?ReIfvLUP(0*I?bJOYP0 z6dLcn9DDK`xb7+auWRc)OU=RP*+N+s4uAK5*Yqi@W3d_rT{TNY=zV zArHldd=itjg$Q4srrcfpQ05ukh6DN; z{g>PbfP?`uJY?^#;VHpXd=wp3fyYnDHLLYAbyb7z>(P6PPVgF44Y~-Xz@Rf75BIEF zl_8gG5KTP$?uI0Q1yzDU{q;R0sZ32}UsH00s-vwQXiE*C^;4W)QZm8)AY%{Fut*_@ z4!|b?r8`qhVX#e@a^{#w8jX=Jo21}CT?OynL|u%DSn0O*c%Tw1=%_$Cz>Ex(&BVJ- zPCp&JfJ#9^t*6PM8%4WLX8@u54U3ZoTP%#XQ*qf4m{5v2#kezRf1Ns)Rj}}yD*4t% zM5-A%U$Dqu#Hp(!~JUop*3GtumYik{Q*#YZQ8vw;v zF$whT02JT8x#dvzz<_Bo4-R-AIyXbeJG59=4?eEQVVgv#$D!o`zwy3(usiR@>S99* zJZ}^XiYVKBv-Z&KfyRc{Y3ws&CaNJWB|?n^f^|pX+(yD(Rj1rPSq^-Z{PM$FY#nrp z4GWu<$NFAfo*v-@@6Ogc)@h)iP6My}X9LzP|55)D-BG9h+CY6W?vtvYaX00pVdt^c zZv=&yVO!5O+aR4$GCP(fSK(wE`9>c))EOwcajo*l$z^OD?CyRrsai~6MgB&h@c>JB4VEUlUiWU5C+}B*R zS@ib!YE;z*sIjk%jeQ3W7%#<`lTDwXb%65Pb`*?GJo&zkjyT)Yf8zzjDq2q?%M4-}- zs9+pav&{zal91b|-b2Gc2Mg<9YRq6? z3co2az>vj%a=|kqqMZ#>m|~ieXhh&adZnR%Z-#0r$=GPZ^HeU8VdxZ`$9hDOky6BU zeTqZ`uBCzbtO3}9%;$M5KN4L5!o&HVoQw4plK=&al)b*FDJtdR(E{d(|J+O?i2bd&MP`EP_yqzooCZIv zIdN!rO$fFMT@8U%BLv$H6B~tqR_R_uig2K9KW{U^+F|xUOH!^si8CE@y^zsWW`wA> zqv6xop9;{azcD0NhO0O$I>?=|#~s9}HJBfUbx826!?TJb+o?OU5AAY+zgW;=a6qgt zu)IQmS@@~}ZrjGqSADmAr-dZ6>Abs&`hupeomc^l(-E}+BBH5%VlF3p(W9 z^%>Ho>i!+5BnKyY9e_ed!w()RmYSkNq~R^N&^{ULW2Y7TsVD5SvoXsrDF{8Icu5EF zg!Z1Ot_3KO5)Diax{!wBP?_|043%lU=nwjkw#|AYD5#a2#l*dVm6L({Gc4AUbAw1=d z$q-pFoXe1PDZ11%*K5c9DUBQ*v_*bLMT?TNf$6Z5xQO}eb`X0?SU2gc3A^A(&?KLASodkwdeb#UX#Hruh%egHbnfYEWh9|dkc z4$t;r0v3oMIP-X|iVds>RvZFMv4H{}<9bE*S`rVS+!g?u4c{LK ze2>NV5H!Z+EwH@F=UjnzP4KQy;Cn-SA0PN0h3}*huq(B*Rett7R_Xv1gpCPx0-cRR zr~6PQI3ezxDkfA#x+P=AdopGdPC{32KnsgH!vk$!#%~1|Oet8+WsutqNV}$)6&(B{ zFwSqmHhYC-z@bEe+rMiM8UvHblYL$gF&-TKpEuu@GY7rp0EBM1k!;2GJ>2*5r25FN zpJx7n8=SL=MR7HN8^|ilMjqn96dfS!Q*0?z@y1kRKdaZ)=)soL$-EaMB8kO-#MtZC z`0a!e{4w)m2Hqcq?`cmij9K#V3lLLZ#mpyQJGw~-fF?j62JO58)ENhHY_MEBwuwNa ziM-F@o_>?`1Jnb>8pt@eN$!$2(2ods$x*2QwZpgYp-<&?#cY>LsS;;BMXAdD4QLOX zC*>lm8d`6E1HX6yE+r?vC=!QkSpVm_(E=tzRqGR73XTp2F9^Y`%=lfUt8RfsDlpqJ zl&=Eeeh*pWlMufoMkZD%OJ5Wzo?2K1D2Bir8{(G8v)7o`>C1-JP+4|X6Wj-Fcy3V_P~5pyxX`7Djve7O`G|CMBaEX2=Y79R}I() zGSj+iVSM7*P>BG)Ybu`Mk!@wJU)ED%#5x}3s$;|uZj~mOGA55II)P3>HA*q};i`|FfwMr_JPhm%KBpLqLMh&Hz4q|li~(lo0g`58)|6^> z5d6I$RVpSbF?eT10&eC*+A!pI?t)2_H!M1SAKsS%0?h!e#i*x(5vVz{&5)YFA;1}a zzzXuywV!d;*4p-xm_H?GAqDVEHWm=7G=5L@#W?3#78?xOuRlHj_1_ODe?+G$+j}St z1o~~p5)<20oi0c@9+T>HnptK+I3SG^G{x$Fh07f;i6(JH*hLiAqbRo&9BU>HnzM9p zlXg?lp9?;@FNuzxarn-iYAYt>#8^JU@7|bD4}7*AR&*jCZIJr7kVq~;8oxI{X72)c z6euWMP{^Rf!*$OM_Z;{tN$NqA36flJF~VDxi*eAc2FIX5#Ah5HgB7^>`mGfRTjZZK z>c#caY;YYKR3OaBk^@_OX-k`U&1=RCl zeSdOldRjl~rg01=PjE-xy!8rq=W3xog-;9StiT_|S!?q10h~m;ord}!OU42F%?!Ej zRfH34l<&VPnn%`@gUg*)paNFaOXc#|tHKen9^0`OG|nygWf}S!LU<3C%QmlxxVVkz zfYv(>h_~tP2GymZssrL;WtPl(4anJuNc+a1Ubu9c(x5wqw}gpxxB&v8X4ZWJ-EsoO zsLugY*xJya?)bX;G$k0oC(W!D&q7-tLIS`aD0PZqh#|Zjs6(Ie6AQRS=*5qdX#D@y zKNuAz?f(${)OeZs4`KViM%Zos2V(si$nF0S(dp0`+zOy$2#BGUbmr+Q!a`Jpg&b!e z2%P)FcxS_06g)>rULb((vP^#+;Ts1r^)RJizXkz>;{J6b5R1+=a@FgiMPye@omxf( z8t%Kx<;Slhp19GZRr18^qC@`QgC+?qPQZwQDN8-s4G8s_+XOaoNUgknIe0> zf#gZU@FE7t@bYkkD`A?D0%~GS(fqs_YOY1dwX>#~shXcRo13olv$ln)#`qm!zF~Yu zn)zIDYPz$!K2}>(Si8dTSAY@@205`-S$+}1BFgoBQ?yl|ku%;DO>ZaTf5ZhB8_1IF z6{R#S;0x3itjcdN&{afa-+cr3w1v-1nth?Sc|GhKTB>eMg zM;j;PG3Rvi*;8`d+oFkl+o6Uu zI~eJWA~T_*jsj3p{Nm^ALb>T}k!2K;`a9w_#P@W0N3=2?^ga0E!6dE*#W}?c3V=ms zvKgsL4-*+SAU-X8m9N%Nt<0Ry6-H&Y9Vi%! z(urrbq1I^w$;jRnEyIF!7uBiT;ap(s9CsQNqErjnT)*fum(85dPJ`p;*js}zev#k5 zE8?{~M#!7*is;smc%HktrS#xHZ`3mo3BATy-)-51R;dZ4C`p7r=ud~1{vNFHd&kT9 z@4;cYr#$nXnE9XK*}-5l6{ZTq!R1zXwpY%YV2bvP{7dwiVjV;T!d_vAzboU#IEaM^ z&+bzv9Bn&5SAHwNAGFi64Ct`E&`cSlwd=IV!Lo$m^3?MM|lB+*N z$oH@Eoe#yZEN&L908=j%bV|--xU4&UUdU43h?jldaN$#;yinwC{}!uDug!T9+1UOG zDP=jNf{8fJhJTwD^ee>r<>FHDT<7!f;V^cq>OCN(rsL(fj}QVd4_$vGrpJBXv*^SQ zJl_ue%6*tct#iBOaKKG|MQhpYW4Nq5h@>bViym<;u)(vP&M05k-H`VU*Nl^^S@w=_ z`R2#sS?g0MGv&Od?x^R#z2n9Y^1f|iN9g1-KgX|<_D|scvErB<`-!-tX*1j{Nj7`c z@PSg?)!66wd#8N=6S(~xDwF#@fg^n0F{ynjCiLHiVh16OMnZk0;*yjv^)HP~1)qT9 z9}Pb`cRzxa>7PL$(;4G|=9FT!;*%KZ=3{wBS>CvJHm$iDE=xWY;p)@!`%lHNHfuNl zxWVrLwiBV1i^5Tb16haX6q4V=Cx2(wlOsP9J(AdCu(y*LcB41;8tjqzj_y4s-~CL) zX{fXBGsKIOM##8-0|$v{7&SYJq`f0rl|!HGOhu+M9xKvZQpy zOY=F0v4-R>XHCX!%g6sM>Y0`qzqju)HQe;M@bzwsY@h;6uopW$G6qHZlv$JKH?!N{7my zzQ7DjVKTB%bhLg6DQN_D4odcUuvvr;lcNg}mH2k5d>Zeye}u?gg(As1Ak0{$#f8Rd zRMhk^d9zStX-8JdtRh4+q`BnDBC#~d6HfmT%5L#)a+yc6D)maJi=1JqPk|6$5V^{i zUtg~9iqV>VrTob&);A8z_i3)JLY)CA1rP&o&utklmu(l*M>N0HT#}o9(7Utj$W)N; zdvS)U+QO##rqzem8yhX9w~UaqE^0{n!p-pCqEEF07)XMlD+5@uuPivaTc+<2DdyA} zODyH}d7lR#e6L_L{9$-!777}$z5Lo5n+?$^{?Qi z@<)~&xJ&eqwzaz6l^0yj%e{!SFy^4+ zPBu`u8wrZqny|7#sFzK_OWxKOwu+NE#lohYG|6X+MMu+O3t~;=tQ}fo&$Yie);y9` zq1QF8!<>nWl9G%70D)K+9W33HeNn$?CO+bg`)_&zCs|MeW`<`bklf!kFMtywPOi?0 zhH^Lo*rk&pvHq{+qy&GS$^-x>#slSI%u18hwCZ* zq@oO|%vh~oocf|3r>w$V8Y{8OuFy$F6VRvb(L=2b!4Ya&mveI7UJ)O*6DSA=3Y5TjZs;6O zU+rnLY*r%L#Mb{yxAB-8G9n=k#}@LauSmK|M2rbX!Mpm)B_&{RpC-$~63psDU0y0d z-rkX@&9EjvpkluUsOhPJh=E&x`n*~xCZI}W{x_nL4o+;9&wL{Wh0Me)$K^Ql$&3`?2BqFP(h#KLAy7Lw>MNj0|c0m%dv6wJ(>& zwcuy~Cq-yH&=y+X9tIM#j z;ucv&+5HW6-o{ehFDAAPp6L~s6nTW@d4BTrJgNv39&Ht@4?a(wmUmRHU8*&v0#bG5 z=pOmbe$lO0NX~RYJB zvs|ZXs``N@j~#@RGEkRo4v9F=Z%B}ogdjm?Fy@As?4rx9Kk*)KhMNR{Fi~;5OIM%3 zQ9Tc1Xt`Wilfi0QLk8Iiw=*AbF|>skBA)DL749-qT7no-AI~6U+wtEJSH?M=$^8^q zUo@PyNLh4IcE+T69vTb+nD@9p{9;!YB3{6=$`(VIF=(Mmj{%TA&-?`;9*OEuT^pl5 zsaQQwN#=!XK5M2;y%DlK`X)NlLbV|l+s--4hD*S3eVMQAzJIAp>(BBdm}dlrc!7;z~q}|2Kz4?0pqFK~M0p zp_zajKM7JD&l=%32h28!PPJ_Oh4Ii{-E3J;*bY!o5Ck z|9GoRJ0b>~|GY=FqkQF@h}v{j4GC?lw{6SAChyUVX@CRaW=uRtvL*fHr6Xc~WY9F= zt-s3mWTBjYRQ#a!m7~8GDWPzN@Yaq$7=y${-y^F>UwQp|F-T39eUFJg_*i-jL0o<1 z$zuqqu6j2bd(t&NkD=PeOWWvXh4bL-Jp(aZ(#2fnMw(9{Lv<-pSj7H_O*e*v@iTl+e`D&F|34izw zXT*`vS1^WiG(E@$XGQeA7J~=EI1YNUgS{rhi1|#O-wZ?!eGcZKP@{~7{C$dXDZx79 zy{=L+P5?{Dg3bnz(9eOcr-06;%Ef1)iuK4B&WbhaD4F_;m}sBcoyQNes$g$t!V99d zGY0A;is3LnQojF-h>l`T?}Ew%bs3UK?*k>tgTIJg@P%)1PTbw&19&o0(&%F8f-qx6 zeHOuK1UqxkkmucZv+#Htg0@*EO;5|EfguSCqu`Q!@tjxz%1izgN6?BMa^SCGvKlAf z{}qWsn{=1EeiaGw{I9}cho(!7fqqZRzO1i;2G(TPhv@0L_Zq;46=oeMJDeBI)!uT% zdC^I2C-cuEL*(Q8<@xi%CJ=Roi#tS$)zytj)8i4*>lfBCQ{>1C$lcefrch40AUb$X zD+LED0WKEOAam>BKN>QyYXIp4Y*zxaR?(+UU|WCN8buiz#L z>TV<`YB(;HC4~0?vMg7LU1LE_;c@mwZAKDy8qIm$teb(?voh8v5_G5v&dLEk@rQ-I zZVFklZ(E3ocZpt>P_q0yy5+jQ*0;cybkrfxk z(B3DZwbGEwUVMsu9~1lh=6HInxcQwvdAp%EErwZzgZsHUh%VfM8=jWCQck-B!`Wdx zCI%n2JnVTKRqAH!zaQ_oIuxZcw(@`WgW!GMjbHdp;RfNPXGK}p#+YduDxk5Gst_FLe^M08MzO7G!0J;#l+0lp&& z4SLKk_3!YWpI;iERr+>(vi>R5=d~aAM5T4%0gD|oT+WV|y@SZ-L^F~KI-8I%A-6N& zqI?P)N@C3kL<%bE26RA3jJJ&U^h1MH8H|kDq4x~{aFy{MPLCM*xccKfJ*XmVDa!_g zhRflX#bdZ9qVzKIkX6Z=%VLX~EnoN@>oQAz@jKEDO9}~ITz#f9xFfN1gt>Ww_6lY(-^7v7v+f^$CLDr!F_+5tl z?22&2W`*NuiKxSD;}a%YS6`vK(gclSN946DVv+dQjlyjU{t!=T+L3Rh`8qa;!f&=s zzApBuV)FGu*|QocWY=2OZd+6>9#^%aB~rTy?)p}V{P?C=*m89J(Vh%Fa|3bsFEq+c zg9oGp4E)t=h4Ri?(M8;4xl2A#D`Lf(x=&8jiZO{>z_t<~LA?gS=4?8po zIy+>XC+9wtweey7M8of~0x7p$fnG2Rr`9j9UhY)&yQM?ZUlO%f3+30EzOlajO3{g+ zkbf8trwF}U;K)kpi}4`TO))0qc96svMB{U1jl!vizsa?x(svTih8fQwmp~GJdal>X(X$?6VGH{JB>OGF#WhJ+4$TEC)8tYyG68SvFGZ(HSz^Vw1cK=Z`}9p03Ag^W3<*M0Z#-n$uST?SU_HgY>$kX42b5JanZH-u8qzyzOO&3wk4Y zmr0K{UHPLhaJF{N&~(owh}(A8Ddgj@tHI+D#cw97F+^k#f(qDk^*8}9W5qbZz`@*} z2CT`n%J7aC+H{6#G#vsW^;!zVY8Cr*Em%XkerC9eed$q@uN7%!4`4s9-E_R|~ z5-KvVhTZ^R$;Gx9-R-P~{;k@k>S}@;DmH6VcD7%$LnEatyG?de19gmYRI@8ylO2uQ zwNU*21fn)R`Q03%j#(OBFg*XEzSgDK6Er!^tjBu=XwIUcO6#C_TcX>k5Jj$GDb2>b#9CwyZmJAXveT?|_*216yb!FH;$9kBE%Q;9 zB9sb()+N^1aeKR43as1;}=_=3b(b%O>XvMSISH+6K%&QHk>7JU*@l|Ez9+c#PX z{S=vBC2A8DQR9)U;GaiDOk))YNQy{W3=Nhs0h?~i{oT+$$)pgyP3TV00d)fL^83{y zzR8n0f4bX{T&y7N+-CCW5Z$3I`dxk;qW5U}e4q-ft);<(d40^`H-lu$zQk4A388v{ z*6nw>D^%~Oe~0oiuONgPTe#OfubVvyYNG3}$}fiWJO)#JOom+@fQ4uaf64bjCKB5j zi~Tf+g;D2czzuO@c}5aWJr(*_c<%+5I0KGHJrkJ#-?JXe>Z0KQNXjc|I^67{Jr&?yB={ZBerLrdI3pKSCh=2p_ z-~`2XNQacr00X91TYY?rwTd-UR|X?J3+`ei3y0Pg${!{tbAcU_K^T(XEv*)G72?{9 z$I0*W0V-nsFI)ugZ(M!xxQgd=kIO+B?x-x&$id`E(PvNaDwZ>BkXxxUdajm7ah3Op<^3$#1A-DyX z*>;*DrVuVGkCPPa3XG-Yy8Q+=4>>JN_h@%tmgmFtcG{rJGOnK9F?At0CwD;$ke!v4 z08pvLuKF$n+2#4xMXXyq7Vm?cyPI_BW~6Mbr#BjtY19+$=BUTf-vhf~l?v?vrksr9 z2W4IXxF=x-ec^7#BL??Ch<3*I^2P65?tX(_k2-xs`}>Fr*TZAqLz!UT;n#~!AkBs$ zioJp7JJzRPgn+cy6e0(sEN(jn2M3Fu08Iy3vWit=Gn5Umj8h&5s};CGiS&~R0)Hfv z`T>}lF(yrCNDB1>M3;)Fd1yrP(~#;~83Gz}!#&d~;35pI2-p~uvuMNF;(Tr-#>6-+ zn8spFE_6H)KQgdH+9zed&LrPAy=`4@^ zSU9Z!OTf;^O#(AR^QFZ<**W1j$)bkePB>nq2N0|l5%^%shA4vAKeVlKR4 z=nS`y@Cm-znCq(CY7^$|Q76@zcmquQA{r|IKfaELCI=KM7tslUUa=piiNL^+xg4x$ zsqbkBBFss~hSdO}JdQaNpadozB}y=NTcA^x4d#*8vjj)hKLK#{FYz12n+O&9z0(1{Pe~0KqO%^N>M1LGW!QO!- z*9m0JrN|`_eAWb;KA&+v;cc(wOE6gMLyNEn)!l!d6ShO;=)EH z`Qdf>u)HHmZvyA$d!zJtq_K8K=_#S#7*fOe+!gYjDBUx5$|XO^P0F~c6D+(lggoEi zOz@nzq45${MSU``Bc8?L%)*G?#TnAyGeeY5>;B&V()p4G=v;L_p z=X<_9PoBH?-nlbp&YU@O+MMZg)rcalxA?_`Zz$2t)nayu7Kj~fb3d{tt(7am(buwOt%yO z!qW?fXXC6p?C=0L7qlQ;HV(ZGUG}1MA=s0HBs>2~d%|V3xdv!%dN5rIm+|?PV4W2CZAtHe-3atm-uVt-09*mE$^7Cx_z4z)aLIK;7%z}oo$#ipj3u%H6yV0VA&1dWL2e2a zN631;uaA+UGqA+pb3GS9PE1OSac0{*sLZ;xzggXxt7=%?*~ni&mPlDIzZ|QCpMBkp z)@S>C5z}i;&vyXF*}>VhGlZ9|t=FyOjH2RbivZ^=YDp zc*8e`yV8e|vTjOU9#>!>1xig6P{Z{b+Bs0S8oq^8@l)<)N(qrR(=~aS;-X}2aFJF~ zvT6QefXQ*i%t>OP6_T6qauk?bEkEWaWc?z?;#*#Kc(HsL zU1J&y$G84xc|ItE@vH*n_!|9GCRK@&U=xfzA$_hEQ=&hoSC*pycWCgf*OH(;OW zj`u=np8sG|-p3Gr{}8A5&p;CeW0GOP;SEyQKm$3p0`*xT!}7Ne@vx)s8tVNaQ==bZ zWV=y~D_ABCEM;Uh?Ab8CQ9z@72Ix0%P@WpM`fNs@1JLI{U!T$6IP|wU`r8hfBQgZ~ zY%X=7Oj2_V&&)=jvx2SOe$#?j`G9RLVE+}d-gY!`uYuNNbOV8u*i11#V0``wQgQO3 z{5ci>%q3w_+n)~9=~&1Oj)Cf~fqaf*a`B8v!^HXt&~vxXqsiYZL0XU(b#z?^TKwP4 zzszTrra)W=3&VG=5TI~_c8~TXhJI+~a1``b@-EzZtx;(KNWhBWo(VFrV#paKRt!6% z#fqWFDO9Ccl5-Xr&qdBs9498*1Y0mP0T_J^IST`6R=lj6-yLA(ViXya({T7PsAeqv z)(CA{ zZ@~P^ZVVI(w!Jyl?KqgzAazCfy z2E)bTAZWSnfs>noyIj5Ts1_)jnj zuvmH;R#aSvvu3Xp+o&sg#TQd_UW7UNK#2^H5wg!E& zo-r8pG68E2mpmQ>k^uuPEyML1olKB*Rc&bF*GMGFq?(nRU1dj5{92}!P9;d>4Z#w8 z1=KZpLW!Y2Vopjh!9@r%lG{-5AQb5&(Ib}+17%DPAM=pp1??g#;sY<)#cX#L$`jb) zAOH)C>Lxg3wdb`tw(E5dF?@AV4-be*nsPr!Ic6=jfINAlD)fjQl{f-}Aj|8ewPI>f zOda_|+u!9^poXiyK6L~IT`~q$ma?M8ywDD{n31o6XN;{kHvc5>n{%C};Izr+d_SHA zIMl(q8B<>cD9_s$&7i=^6Sjqa2P(V(Dy*PmiL%~r=P7 zGQsu*s^*nvHrN^x%^8Dq30Hq}yXn0;vTpxc^=jqU1Moc11*nnN8D3+Oo1=9w8JwYN zw9ab}^V(!EoaE-*dVsg}cHY*RSQvtx3qL#+K{Hd$CrKpBTX7y9>~dgh85=Fb2mdwB z^Qg!SNT7F)ygbHK`3J5rkC89VX$o}Zt!)1)*xUtf_; z<_5BkzM=`GHjrcC@h(7i9_l+puS1_UklB%gvc3M|1ZY0P5@27($8@?#+UW6yvZWYF z3mVEc`jMw;M?)Ep+w%?OtNH)YIiV99?%9ZM%CTcnunFO`2pzT?H7ZaD7ksce0pbzB z%fFjJB+zk%wG4S@n4&jDp zBd1s9%^*5{GlY7a=HWOF&eNsC93kCFwoHc*OFr4H%6s{#0oA3!``<_8eJOZ))h_ji5(@jFcT0L1Th!UrIJj}jhqcA2K- zYYYAk!h9cudBL8)0LZ&|7^s^70+5>+MB`ozdK>g&5Wk-nJ`CdbG2z1?e&?s@w;;^X zZ$Y?U_d)nq06zq#+F%J_6avLL&-@LLj{!&?ce646x5j=Km>euwMbF|uw??47l!>r$ zFu^eaGNCa6GV^6(3kR{4Lw_GbC11;weD-mrVtw;Y7f$9aM_rL$XO&Bwn~LyEy9ijS z%}Is5qBppeC&W`xP{|RI7v;m3*=r$$4XH^ri@rdIm9-qqkI$FBf)v`Z@R#)lhvH{k zhM#LF!|$?eumH&JUI~Y#p#ZRqf5Ruu?^wsV03@CV@|RL!GucpYe1q2FbA7^P`mvd8f+M+Y z&1Jpdk05af*zR>mtIyKF=CU)wNftMk4aE^EX)YfUqbaV1e6!(5wEELl4m$_{SM0Wh zTk*HRiV%;2C&*RtwtAbsZy^WQi4?BGDF813T+0f4B{?rZ?cgmxSXsK$(3Y}++E`bc z^D8ZADI@f%E3~?$Y#NovSsZTvVs>}B)Dn;fKSfRMk{#M_k}&9+gdU62%D-#9g-wH2 z@L?&^J-PTTo2PpgW8(?C3mGD&U>(zBP1G8C{VtiJ*SSPH?vlUiYsS!~R>NHjB{S~EAE`)e~nzGmL3)=U7J8HZ-Pz4Cft-wJ+f z+E_eeN?{bkY!5fYaG?UJi7XyDDjU8=2a8Hptc>4v=1Td|kV- z;_#g#`Z`@{BfIJizN8jyWe-z7INGPSg?Bdn5-o2FU&NQ4X-8XGCv_1zVAKgbSFaQ4 z6%Mb44914Rq)QaiPBx(Qb}~@b`Oj-7c}J?)PtJC-nSxT(A%m8*lXk1lu5Q2vcQ#UX zFll%ycBx&{m|gEwJEu`UB_qu?pziW_^kX~ua80ve8l4Q&=x`d?UM`GJZRoWgBx|0- z4YIs;O3u;qX#>(b$R??Mo;PoScelLp^(fL~4v^B_-ERhdd>CBQB%v;_f>$%5LZx6+C;!nOYC;c2~- zbDpylBEUlre}=+3$=Le6Va0A6kP`&?;}wYn_$Z;X%8%HUk(Oe^?#HAsoBDT>F#vZ| zCupnY+=FKE-;oMVoIsR$Tl=M|vIFJVKk+)47qP8Jf}D7{hK-&8PoqueGg#J_$0(dxX!{Uh_k4qXFxsrwzKRPe!L#! za#(1zQ9l@~!i7t8Pr7WcZ@Wmd(`5r)eopJsWvZzKVtP)bgFURgM1Q8sIs=wpVod@( zjw{EQoiLxsV@iqq)~j~Eg!jz-kuv4B5VJ*?kD6x7*^BTp|989`&AvpVGvpN055K=g z7c;OzWL?B5ub8B-e@3lwHSI@O$rD{$K393veFf#uq53u0^A>?;PK+R&y5YQey#N$pbvh;3eYgqtT76Aw^#ke2 z9x~3fMy{dfddOC8PY?N}fXe31>m{cPz3m9Mevb?h;wpvRD@LX14L&$QUtk>WbZbjRg+rnshh;f1l8PB%ma-6=u)h_Glk^AIm{n>Kb zdY^nvzrUP@_my4jSJA7x&^zw=cJ%ISrp-`F85@kT==HRtFQmWv<)q&)SJq%G@Vmwd zwDx|vBtC}Q!s?R%_Q*YFO!5&rVF@&(pN!VO_?Ra3lVfeSx>Tvhq6^$5?gmTS+{Id? z`peGg{$lAU!jA=t{MpB=s$XUYV1L%YtOwb*mkq(&3E!Sw6_hPAp^T2K4W4xg%^0(q z53}#x>-`vF+|9r0?^oeHW$)2r>D&JDjlifZoM{F1l3p1glSLVo43J4yo*3QGuP)$G z8oZCFsMLARZk*@KmAbDFkeBu3p$K-H4kET{h)+n%2%j;gCL&gYrM3({B@1j#&j)o| zGDt?ct%Ic0IY3At!W!Ii^5e8G9!LKrKP+91hOI&*5h8~e1KOwpIV@P~Ju+*i)L_2L z$|{uFs6shBYq}hfHA8wo+w!n{EQk@p5w*Ehvm}rZ<<1x^pZninqSKH5e-G3fLu78y zQXf!bZuo(ELasns?(JsP^p0*m^$*>2!Qt*cEf`br8 z1{8p`M$-A=&=I02Y=o?>uX=)-jF6A;(IsWA9(I*&>ESBt+Q?P*_BXDwWAD1kmSUI8 z`;%N{A!~6y1O8`5Zaj5xovUp5L08#e{8!-rH*9vAUE(Us>gy^SbIetiw8vF8x}~e^ zv%bY;fqjb0_KtUz^?(>~cwlkaed0db<_C+*Hlmvosx+||*J;=aTsq-~E_OfSDtiMA z=>w1>lO6x@>5a_sN6XQ<qG=m%=ct9~;gCo)E8SjPQEAvB}^O)i@9eb~%%8@WrwOdDZ zM&W7;r%t2fB%E%uew3_NLxg+1HY?WAhBc~oO)UDuFfn>`_oY>_j+S-WKs6hzc>*z+ z*~AioJRP@1dVu%rYhF;MKi{^5GjZz++LmybmQFdNWm0{%+;rtz7*RMc84koS@&%Y7 zjn+rfs?9%u1#wW^>8&bqp`%LD*P~@n*xs*sp|t1p;!i;iFVi2RvWjQ8@)5@~a8mgh6KNLJywAr`b9H@qidKH0ZJ88vx5I zlH>YW*_sKX>YP0BC^~@UfD?@g0DW#{cMNzp)23yl9`1m$lGY8n{X*Nvbwl|r zBaTZoqPDXYZi2yGPR=ur|D7OuVD+6u}%^4|$GfH}-rxBZW z+0^@RXmZXsKR_VI`y|wKDl3{E9Vg=gH$R3{KqyG3--7ve+V7^Cv}PPuoow1YPCjID zAK+}+Fc|}F5mc~8$LNlije2ssRRka)NAUA31B}#IM0&?9SZ^ z#S|a!&8N!$3Vny&K93)u`4i+lVcF1ukN{!74PsdksvSu-3h0^e1~{D1%5$~A^E26u zG7`lUioJ&Cl>NK~N9#_MojdT?j~WF1o#d1*+^Oa)Li})E0`cK{v+QCJYTcb>Lr?)U z%&@)Fte9$=wwr#NC>uw_U>^Pm&J?XDi0-5$2VS zJyh>m+0nukvvn(#Y>(xc6yr%_sHVvH( z4TYlQJS(}%`l#kCta7Zn4a?kV01sj>)j=`O?4=<&GF~6Om!|M_*j`$aBU{)80UY3< zbSuE+=8}Bn`}v-Gi~wCwPUJ#YjX+A6BCUyD@V~-JfD@oV^9=4TEG!V%OZU>?DR3TS z9;270$kbYld;b(bkKNm*$Y5bT0ANSsgCn?>VQD*#t3m2u*oV@a z6&|CMTv-?Hi0-*);JY%)%a!r=W1!%X*Z^j?Xhq*wZQR!j$q#r_;MYesob+z>Mop8gl6uKTW9~8fb9e3_#MsSpK z*_yrd7!~Kq+F_-JUg>o(aswl2cb>dowFWjjkS$+6jg2r1@@3cNcC$Ut7H!Kb$rQ2# zX5v(5iR{GF8CPxc7IUhzld?LYPC7mKcgXgn;K##muZ{(%WFoy+AUl{)34LE6i>Y9` zM0z0NTfH6p;wM;8G^gF13ku_+o=mIQ zBsb^Y4h%=XCi(NJF5v6?DB4@UEC6G<=4vlei9^ zoui-zBiw3~=6MT#k>Oh~*i8JuKS!)`ay|Sl zKrnvM0LT)iEnJf~la&5E#KKWqjMMM5Ts*gxgo`xIlTBeWVGEhFMT+OQC^37BSvnu_%BuOhWxSq6|0kYM-V&02v-ItW>$s41FsAUdINi%Ar0 zfz7)4))-~m62qM_+mH;vf?r|yj}xP`4;Ep{-d_6GL_7L$ zrfeFM6rvRkM!oHUJ(!o+?}o|Lo_?DtZE~Pb2jGB zYW!|b182*g`Hy2d_JB^*2-byeh*B61e59jN4)((6l-kU;JbQ>ePn+D$oRL{5Ge9JE zXNHeTGqBinQpxbK+d-wg__Y5aUB;pMyA7&oj%Y?Of>BiZoP0Jd7~iL(`&HOk&F$J#?RgoQPPCnDw9R#JdtcC~ zIr35IW;=CyXka=8J`an5=XtpW>|@qknFM#iTXSX48Y3YH?1iT8xK4l0m80V?-tFBO zu@y|A-1mDEexJFU@}Mi|$L^-r{sr&I>C) zbbY`|`txnMTyMCNmb@dc>PK7A?70ASPb+%+T{)@7ySE0S!F$pk_tagt24b}DHxI-J z%6d=M6OYpi@5zsJZI`>tS`0(t6VR-x55uFl_YcFv?&1$+fCvw{@Agqx*T?<&I*fw; zdrKtElJn&&Eg-BkD?(zNehp2b5cdNYrhhEmTRxWCP4+7|-;nJ$dwpZ^opHZc9`N0t zETy%dVr;sWQtQt!Ha*{`t)Iya`u*E!_UCxM{#6S30;~AiSE<1la)~9j)H^mOtBuVU z5+_+S+ve6c%HcBcpT;JJW^I-`_41}PV2jMVZET)*pWGr(2z}_w?$5VMv#vM$lQwO~ z*fdz~ZeI#=#G;e217ovonS1LFxlY&DEu$&BF*g5N#$(gqiB}9Ylog#!scJaN70A{B z$$WHr0R#qY`gGpVnOI(l@suB`g0CC&&WRW#%5sthPW03P->_Kv!cQAcT^$11AA*Jh zs;%ZcNuBq~sQ8bdAMHb$X`8Tn_^6~`hgi#_&5qO7DjK^NGi_COn!i`xMf!f(qsc2V zTDh3KO9oY0g()&4%o9|S`&kKZkWr;C$5_iFwNKAQYVqf)>T5ZTDrwSwIlmJtIs6@e zqh(4Et`cLAMWp~xNi0$di`RC@1FHatq2yv2S$A#2DW3>3|5ROa9*|G!)4IF&9FVP* z`L`LavL-X!HOgS$GJA1EsmEpeh1MfxT8$qy%s>FoCPyV*Ixgem^Klx&!26+UI5wGC zRq_i=ql&1`38+}_d1&kj_-k)$q_wz)EcUP~G)asGLK}GK&l9qac`lw6dZ^CVa(HW_ zE=W48NO65N41{}~EF+6}|269Z%$|qu!VYx9mN~S9KL1*7)j##n^po&KO(>^zCuNVn ztt%CS$PSyDsOubL0l!u70`wUhL?~54O}>$Dci#b#igjCdC!_%e9`F>tUY8=wxGE89 zJV#PUT; zE^7Ud(8^tWAx!E8KEAk4?~PGxvWr;A^m@M3eMnYN;@s@ z@BL~_LHQzV@Gj)=;$-kheBW3Wl!YRx5@XN>+Y>(v#52BD!qCXqAm4p^-*qIFoR(ex zUnV+DhyJ*aTN!R1KItsRi4A`ohxNZEI;GQRq%8x=JmN79)j~zQABxvxHv3M7ebg}( zf)-@jf|>S`xMu!)Hwt|h+>8xx~lXr(~1?u{5*s!qbm`z8*q1Vnz zyLvhpER^vl8)FD5{RnpW{C-vjHMk%6j>RKZ0eEa2m{gY<6_+)|v#%~_7-{dAop0oB zat>@&^rwGcgmBWt-TPblklxA;fU>{TGz9hCG~nz50HtF=*%ll&i38P~uu|CNzTCZ_ zXfUGihiLyL*;I5T@x3er@DRjq>-?>bLzjY(<~Iy+Hb)OmSQs@{A}q%wHY0vHI}^S^ zM7Sv&%+XqN9k3^4=ROTC&USOQrCKu!q%HGlYMP~@gBEdS$S3ye$maMdMSTZt#!a@% zGA8V8jN7@ObGdvbs})~=O1&@3dSQd1(EW^8n!_ds;rBIqflKqwF3$ew@MV6yNb4^{ z7_Z+~)u-4iG7>vH!#)aZNscSB@n4fZ@QYEl<;ttFo`D-WMCF z)~7lwCO^moz2r+8@`D_pCx1!nevotZGoMqZ3OJ(jzog+6@<>QDNQSFrqvY7{>7Hxy zo8b0%#h+`ah{0s}Np=o<6>L45xA*WS2F%nOzO6oHeAyKg;nAT62q#{BJ-^ z-T{LA@na-1L{VlgT-49LWfHac1tzntpVR1H5PCA5mi;0hwT%C4!^wJ?*lSJC^cN8n z`m0P(!CzpXHT9>!DC+U6Y~fF3llQ9}T0O|mEq{~Ug!vP2B@X-hUA7SGsqlAsx42Fl zewSTB+*rsy#oNFeNZoY99rA}f5NKY3l@I5qTo7tNjiI0Tm1kOOQpvPIst|91N}8gp z17Y66HZdG31>1tHbWLEeC!18G8UeR`+`?j%t+WZ2O=3w%^u;;yjrunQT0X;I za6o~Pc{zq$!K#mN(3N17glI-)>`6IYe2l#tsP#+qU>+zb4UGP?7*{#V!(UcW`w-Q{ z{2AUYy+LC`)O7I(T?_YQnZn!6f zs`es$;QgL*4EJq{LG zxWf{tPlAfokG7!U32LYqM5PJp-n4^AJyPlub9s%<$33ek?Dph)|GZd09TU|E(V12y zV%Vn9?nDe?5M4`D9RfIix9)t}9Z?569>g!y*sh+8V+of9N!B(v*95B;YzaCS2@=tMg=)=q}0Bd;+w~(5{X#v)tg>tND4p`p%9@uzlaURW2RqY$T zhUd*D!kLHKz5G@Y?@aK`2bMy>Uq~Wq;eRc$J7+Us2&2$42r6!-cREi_zV8} zT&4}u7-AhWQ;nMZHJ8KuwwN;J21FRDimb09b$$AK6yHD%*B#T{(;KLGq1)c0*BUCj zS~DGRFKm7y!d=!-Z5JVBgR7o2b#@C8&p|6zi-u5JXBLjcuyB)LDn=)qRX7)LLZ|@bv8G zwNl4>jyGJjlJ+!Jb@an4>1tDDH*;C*4T^52Ht1d7r6bMMAiev$)TFuU5!_)VyJs6W zVsc$ah0RrD0K6ol@sx_P;lr)?A}~<5y-ORKt0S#`Dt486QBt5-=N-8?`$}E)sPcUw@?#WZT>$#=jy9}{{GF+3*HJ0&PSwZ4~!AJ?EmBg zZl1A;%{U?&PD;;fkZFpUVKNJV0~HA$No~0r2zf zpyMF!fJO7P(oP>XJGRJ{*tX}dD?hDTWo#t#SN7b0sq7E#4l1*wvH(Arjre`1KT(1i zZCi26pEwAsn#172t*q>-z;ZNLZ z+?&J}11>2x8&~DMUN?&p<9&eG!B^}|--atMT$aMm2QId!I40Lux*flZ%B|2K)UG4dd%(05!vHZpFw&LAF z&vs;RXs6nS<@#KFc1)!p05*~=?N!hCEo=VX#kXM%jcyNNA=m4_tP9G&>A!^2SyHzS zDxv#s@TF&=h3<*ciW;Sja$)pl3^9z zrMUCCw5o$@ta25cc679ZYE_H9ckH!Ga_&+|&Ye{IZe`Ptxv10Ks&6p&BNj$$j6!$@ zJ@u@Np%3m>Rsg;C8MK?G0j;#haT;ynz!V-4xlbZ?>4@|V#P6U>=FD08Ub^Y zT+G@Z@yr;C$+@(#qsqyzQxn91A1+wc%0vf?fHefBcZV7eZ} zl_!zr=%8#)9OD8ZB*hNP(l&!^3RKWOh z7}vkzYbX}o>iFvMwHIG6Lb2jR0T4Zhdpqq+?gRr!ldK}+4C0i9>QDEECGb!Q& zOwK#4Dg{$vDSRC33Uay>nXsM}!C)pW2=z_VirT}Pq9+&Sz6j-jV-7T$ z)?HZ}y`s(82pftCNB8_2lcx|aO)Ja~0_f3%Oy+DwR;LRl2u=Qb;hCtIyWdBtGeKu^ z+0x*LQ$t^h{su;d|7TOuYU z)$Zdx6dV`*rn*=5Qp*vl`Z{&GSEcH-%02vEwF!9;X+fxn5%*F?UsaE`cTRN^{_5;=sOf_=i=s4Hhn=fyBF|B+=?Ess9>QQyS zDgV{2kZP6yI`Z z$}XTibaEjARM_|TAfi_JIX^2ohx|Ku9`+A#aec7dh8vaobJMe4b~lZB9NP2?7{Unh ztTzSGyOZ@mef&iCCy%S?LVtTAwSPk8>2oK#KYv0s5HJ*+cc_NqehPD{=T!Ja@FNPF z6A(ckI8}TIPd%QwHo;Sm%ABfx{=tuG4&KdNoS*aaUjd7`Npw(M>kNO!E4NtcS&Cb;v zR>=n0TE>jA@;E~hy>--2fN#cZHE*5760M8n0~!9g+5^+XB<6*53FFzY!b-EmLnLWJ>LF99YW*)?}RA7;Z3YO)Q8XMci?k1 zY_E(@j={2m?cHnx-}+f#{_RNh^69@?y}f*zv!Q1*pGGPN=F@R(^-lI$y?KXjHLJJO z9CrM}q74J~*66L1Sk##U4o(IKpCVZ`Ks)9}EQYKxtlP0H68+59?FjF=MxWsxI8;`FeYJA@Z4~K6scVF?>T|}^rV%RXul8@hQ{ykB^Usu%49Y3^ zuaxtSwrV%f=E5ePdTL{|DJL}MuBpojc)!8<@CIRattm3>iKP4b;|kc zV(h0J2NpUk**Vl`G}5j86OH`;KqB^kqLFGO680Aop*S2Pu6M4B&{}m#mvw2}7&TXa z{$+RN7&SoX^Xkxmv0!V7HR-@u)lk7JX?>dXr`3=qOJCV=k~bhZ8o^&IQoI-Xy2%%-ao)FXOow)>umkc{<5meZ6;s{`;%u?UlDe&hBrX>2w%~mGK$Jk)?RFr0_dFJpJ|~1z+U@3 zZ)bwz3zFnTaDZM=V+v4iH-8Be&{15IVAJCAX(-PJ^A*#y9GsWdFre5-wXbQ zeaa}R3qBz!0q?Hho~!0hj^VdI?pxE$r3E;#G6_}MokF}d)|;}Q@AG`WHROmGiS6U# zaL?1fV4=4Rl#e{N3qr}LX<(Hr=)H&POi>Ne=KBERKKkM9?dzZa8TYvxJu^kQ_1TN4 z!BjQ8MlVAvfcCVAmg8RkeJSa=K>NqkJXh7O7mS3ryk_zsr(=!%Fy2+R&AhEi0q%EY z!O9Og00u$Yn`VildATZ3zZcOBxoQbwB|O+NVZ(|54omH(sk#l>!W@VX*~-lJU9%1r z1^4MMh=bb)M!u4LPP~W&?sLzWri%3%T!|f9^|n+}lL8g3x=Vh=GKMtA3 zUjuvP%WgR2kK~-FC%NA!P$Nx4xFDxTcp|XmE3^lVDl=^MUz&@ zG}%}Sb)ffYq-Eh94ZKs3BcM9}=2Z-NDE4EULLd~by>O4bQc0s{syg9oN3dd*BcTU7 zqw*c6f5y=+IsboVaa0C^oH)3_QX141ptC`!2f$wnKvI#%;QK|cvNgWn4`)HMLt-z@F%CMsVHfhE z$d4XqH1x_VDmF0Li2>FMDzF7OfllLK2hP^^Qou{~*!?>OQC2!aXc4%1(1GVKfZ}!0 zB^oDVdZ;q<1atCRwgy4;G zssQIudU1~GSc8W$2}61BVmdNMb;_TLbJ^JRR=sOG5dPpb`)xHNkPrXc@n35Gwymqo z0p|Jj50JD`2Q-Y*b9mMU4nD>!EQmR7WTfa&?fxU&YmRhqh;EV(E9-V0D;D;>K%zZd zc2#SK9p9iKgzIZCe(``YfWtwM+yI*$&SA>k^ci8wu&=1eV#BW$2_s}MU3p&BQpQ&# z=BmhtQHJ;a#i2~~PVp2jHETs|3*-*6dQ;KH+aYd??=Rz?zp*(>%?t}`q`o?ua*lKC zWSFa(Wt1bV$7+CGA%*j*^pixYSqtBdJb3($N#h}dG;|ELD7_k)9&I&918NIho2w%7 znHnCrNeu|+YHl)$+(ryxAcnZxiD8@WI;evfOdtlXQUfsv5QFg>RK?9XvA1Gqu(lLi zv=27)S^pUKkH$e<^rqSjXRbs<(lcm(!J22;1Ppv2rz|6?B!Y+3t zfb%8l*iDqtEhATlDz2P|sb-OC#q@D=2WF0x{%mSdQBW%>gv)62#xLNS`+(PdFx@ol-6#4=x4>x8`@ZNvXVBm=U}t;inFR<#*+Vbj zV#3U#cNQp{&3*w`0$@CEeu6;cr?J|hup;0LuOoD2fl7!(-d9E!Plrp%#>Rnz36%Jf z8n58S#A$4cUs84J&PVg}A4JBO-YQzgb&WRnmeHm(HTZ4BvEyqx_>!_2SbqWVa7Pt7 z>gNwEO7PJumlL$`%a~QnmtxvtyD^53`H=NLc5#1Jis;l<%SizetO)jEv#YhMb*jy> znE6>#uA>F&xP+ni^A6rbX2)}ABYdhnjbEt3n(oEJBn120oqHAAZSBCQ9YOw=Gt;r_ z%r3e@WDNx?@Kr<~EmVySwsdZxO6j;a3wCLkp_1@5gOG$Tlc1_!&U6@i)^Ok~q=`bM z{XRXVE`A3(7b5G!A~_hKy-eK~A>QL<%37p4+w4s zic=TaH8-Q$=7j$R?w@buA3<;cW89b|N2(zp27cjmP!lWT*myv7L6}XC-bBVx<+t$q z5yGT)#D@?Ggzzvvv{+@-JBp!5F>t^i*@ZwsFCJXd1blF-$nj74SOoI_m%zav z*^ii=h9sogqf%!7{czPCSSx=Ynlci$1PWvjX1j5Dy@yI-a3u){JY+8 zlQ5vX?Rh~(yIPKlWh1QMjowMYq077*RLe&kMIz+Q?d88m`bK5IhO@2h8xC)S;c`$9J{#$-z^!DBs#?+=@@3H1~RZ2Y)UKLY}!24 z1Mbe8{7&FKKIBk3saOEvtt}knZ!#7`PErK}08qlJXnCdyqT?++f|(b0rwbOqDuk>s zl-?es2v1E9AYBe#!IShB#Qrw^24b7sTyyhOFW91VYX}RwTX6v9M*r|PrDhWYB7-hK z(){ty6|5y|WL=^9b?}cljxQ|T+6mt|K{!BZ08o0X>$jrP%Q0@gF{XD`sCN181&FDr zHdy>S8!PLhz(_XM9KWK$&nH4|nFP5-w?WZJQ-<6!7G-;(>{!SrfXYxZxa>2y9|w*) z9^d$Nj0yPGuVdV6=osVkb4A*Onf!t7oQOF*N%HUUnaFPslrtsyd&tjwf?{rahJamu z{`@pyJZHD_r+Se;X#8WDtCO>SwqWol|#PQDhZ6~QmQw5Db~@FL1}LM+6e><3{xrwrUCuEed`IbH@hC&*8# zO=%wDYgF>7itGasNkBN`gnB&rS&!xLHvDe%9RgEp8tfG#h138YnU33O9_)qN&*4*e z2Om7V7GOVbA%UHs{x6y$$n`)#V16m;;4Qvgf$Xbz2mLC?0tpn;+GryPqY1Ni(R-+a zr+RgyLYES|AoMRm+dCofYr^G6dea7&L!7tvJJic+P%2a%o=5C>W&XJnb~y}Aior9u zQk!$9p^Gt(WMHQP7#7@_oXEOVMrKwA)nkB~oO1$T707~{piT#4CCQg54i{_!b}BH0VYCeL+G2G5W|GZmysXmZmk z&oABa93o0-a7))+Wvsz;MP(PzH5Qot6*6Kl&5`~GlN?ikktilV&q(BTqDg)=^6#Fx z-F!83o0(k_qY&J|S^(z`St==#CK|2DIFm#Str7>4SLYM;C_I7?@vu z@9s0+Su3>3qZGqvY!t`$oJE~s%x4n4h(dRg4U@|E=m@jVmRJ!yz-Bo#a4WE7gI1j6 zTo{n#T!epb61z)C33dqY`Q1hQ-koI?=RsafN=eY-lQ8A1D1RXnM^9W`7y$8$$uIvO z=?}i+!<_btMHSZbc>QlVg0bkSxNAhAybF9i$$3`*i)cyCmIj6xJ2Ak*Cq4`W8yJ8x zjBq7pNdE#rj0Gk(hnDZE;18Id-^K?#T-bei1k`#PWfowV#*eUxg|#$b!c#F}giTHu z!G8S-0X7ygo#TzhIYlO3jS)TLEM@4TY4xQuRxELA{W|1X_R@!MPPA=mF0f`^6;urjJ&t-g?Pw zs$8l1THgacnAz@DyNwTxKqih;Lp}gJYiz`k6}+zcsY56!=$Svw5i$P4O!D`kqf9Ay zMpdt4z1f6vno*9Ly`h41%Pi{jhH9YJ-u#{l-ca)1>XldxiOx~M)Z(2p69SSkFPKV_ zF<;(2#fcu11QcV2giPh@9$m%7v$wdnHjp=l@OkNloRD-UrR?)HkUh%;2h(@3? zhL&Z!ZpIM(frFtEsPIiyyLNSuxG*zB;RDV6rfPTB%vl9xDAd@6H49T$Tg@Z~|IsRg z^i0N8z>mZm_gbOfMWcZ#emxp`8Dx?|ti$^Pj`yyP=LvM$0C#!MG}M23I>)Qx1~zyP%#2 zTYQV(!&Svs)=C)KhNw4*#3%&tWxu85kez=rBRZK^5A_}}%0b{qkjcOC+=P*D`5eDL zk;fThxQ<#rxPo!zdKq>u&r)$H`oM75uz2zP&n0x=E!8Es1Jc~vjb{RTvutmxXGJ(I zds{uAAD%~7--aV<0VTep9x%=Pm(R&$-DQp>*E=dGJO%AAqvU*kOi^(!(1v$Zm!N`q z#+F}5Zm#{5eGL1It?#P(`mgioo_E#Q)@P@C>@zxkrBhB8&!mF8pga78PCys7-;NsKnYggBV~V#a2TnvS!6rT7`yfhScm)a|%8F zi3-4;?^marTT<>3a}d2z2RXGWrki7Ri1)`oQCEU*YlIHDRja>N>)+_6i6vp8KC;7! z=gn=z2UK_?AeQ<+Z;n{_U}z}0UKK&^U7up3N|T53ndEd>1qI{~#9*RZmmrVudGmAd zj=O6HuDHA1o_B|Tq3%)P?*q9urx{4q@DY%FoyL09rtp8)EV%7?!3}qp5+D#cITAOk z*hG+`XM_~{5sP>~hE{VtEjdz+$KkH$XuxK*N7tSs`xe#3^kvNgs=HZ5>DbPdw*?O9 zZL`Vz6}-o;A86=Ts=XdJn_l}$MaM_X_MQrfEGN0Vox};6TY;H~G|YSWeZbB4!*Y2G z1;2j{?*$n8SE`3-O5L}r&dq`#&m~3TtZd$20JoukcN}+--FEDcXYT^}FE3Rf^1 z8;n5tWKSnNWv6!CbPuHhs;5A z6wq-&?5Q~Q{Pf`z?BU>nFUN=>d0KuMd*R?>3TFbdA)d2rrZ(qy=$1CqZpl2vRb_VIA9Fh8{VfZ&rvBB|B*?AZmPNXtQ z2$7K<4!gi8=rO%{VRPo15W$)(hUMy_C=VOjauF7YEUXTG9*1&a__t`inKBlhUYA^x zQ!vhC91hdy#Ua?Mx_t&V5uL73fsaqdw+zcI=vYgv#XD;36xcRgk7M7>Lbj85R4%P#ExM+ zkeqPz^xduVBeBGD8?*2Ty5$0np-63%}zT!|ND}KHR0V;M2A7 zF>2Q6%ejGV^EY#QK5F!P*I8?r=7YlwxVe_^0H+Rkj9LIYKih(545R-UG>jZGNQPPG zG+64nUY^%HoA{aToF-X?;NQZTC-IyPma0GWR;y{x4%JuBc!TQfM95RfY8t&0Ax|l9 z(EOchioW&@sN2zVau#^ciN*i;EYnP9>lW`S9n>(NbHC{pvDS4!{p98 zs3wWHBY4B%GXglx9EMapxaU1|lPUPHN(!r9fy86Lby&S<-i-#47xBnp)ubyop1Tp4 z0_F0o7L#)W8lXE|26v?nGc-!T&{21lalj4-+GK-Bk3C#qyianzOg)dNMA=j~T<)<{ za72x%zUez^H;NYUVl5qp>E{@#o=Pc4Rad>oDjIzhv16T9(cGgza4VhG9#su$v|oEO zK?!oCEh$q$zAfdPGA)KHNOn$@T4(|O=SZUgSwKC@R68@j98*QJ%TyjZ>M7rKooXCY zLt1yrgGz{q-Q5pk-!q`$BMY_3GbG!IFTlie8+4gAlq)Eod7WN6hS0Zn-k>AL5c(aL zN0rAQhAyYL(l7yak$}AoPX+C@$B$aER^QKTf_aV zm1h?pUyLJF5pB+fTYHXundWH-hay-=_!Dr(R5`lC*NY_ZmRZrMX<9E1JNRDv;M?s_ zCtUmzj3VKN$ymra!jR_?M>;&XY6IpHhx(XGEo(M(^D+@EY5s_GPCH%$wpH*Q4_p@q zHJ7w8KbB%=E+5^TuqvgGMSkv*iT$fh94l$_uN^`5FpQtXrH=WUe!s!)df6tk}p|6U2f( zOt}#9*gkjAMW39(EZgxR9Y3RnhgKDWQ* zf?d>u{9{M&dx65v?_fc|d(V7~DX#ubBQ$&z3k2mj7Oo92aUXiqGsuC!P$gR7H*e15 zptS;I^<^~k{xf#yrR{~)(U&k6KAFAvO-(PTj%kOnM}Y0F8r&8b&+u>n(+m}I7LM+? zawV*Xe?*HfsFvYJ02k9uJ*@SBz4YukI(`9yya&~kT)=KZyst;jH7Y{xJLl`{=_jBQ z_PMAM;wSae&a1-C!B zjhb9lJ@lP9H0dhXs-+LTc2&j87E?Byqz~$vB5B`MnAkqdp>MdjZ3eyehox|9v7tt5wUB|b4kiAiVuB%~?n75fXPakFdYv+U zQgsLMHf3jKu=>Dg8%JcI?cgj(_aZcf&#uvpwu6+B=MWn}IDR0>Sqmob5Lu4j%TXOd zSq+JFx@r2<7vu|#&e-XjpRgfLr{8{3$P-C%KdUxKFW3KP^+4DREcy z|FcRCjdbM}$bwu}QuZN%QD6G*XV704=-xwRXl^Zhz=fMp)Gw$ilJ5IO-Jj^pd_e^T z^J%|M`9c=BmMBU|ae4sTJavH<{ECom-g)iF43W;vT8Q==ihl?GopQg??Zqn6_xWPu z^QFGe7paAKACGzK2y)~G%V1-dH=~Ln_wN(YtL?qHwM`AQ0E3xH613Fz){ELbZh0-Ffj#w-p;$5wl0+D5JaL`ZyHn)RouCsxsWf2xLZF1{p> z-5gCn{;BF%c5U5wD!`E}1!d^KX5;^;rb9l%vy-TZ`Og+Kupaln;vNm${2Kz6#eRV% z;36y)*r5h+J_cSpIZFqp_URs!UfwduTAmKRXa%oi``X;?G`3jAw`adHP&_&v8Za65%u$L#I8od#V8V`#sfrd(I4BAtq_tDaq*sIp<^ z0L5IjapnVR(>Wx-LrmzC%wb{V_KW|GGaprrG9OW&<9!;c<|TmNRlaQ`3W=ih8w%SH z$$8^%Dt+nysC4cfmG1FXx};LQ5npf8hLb-4nV^}{Z{TL!jddU!&^d0nH&jAYN26z| z)JEMiaq&vCU9?&Jm3gy1dm_yh=6D#QRta;5kP7(CrlXFD{M~iBD$JL}N;;;S-_!#) z(R7n}l0I$|{b@1}7anp-a}x4t;gT58!&s}<8aITzh2in*CTuv#Vgxe-4vYia%;mN5 z-3jg=r5Vy)dx|rgx4~j_#%!*w@89gMG@FMCecv-QG|=25Zo@NRi_JaZeywNZG!3Fn z)se|m5@=3H{0B;C|J5x@n1YV@sf0(mIa2}_L#Bj=U;UL5n&G|riuzj219a^-T4^yi zfqJpuVzzdK1Y>eyF9`;-Y33AVa(tumPkV`A&K*RMgXcW>cF;MO|M#2Jq=tEb*iOY3 zbJPD)p+to}|4W4#cT_m*O$w=LJ`#U)oOjHnZ_GDi%wOcOt8`b^G)Kdw?v4sJKPuYw zK|#j;ap*hjLIEQyy@*la#giFOH{bgMcwdb7EMM`Wbd}x;!6>h%-67`Mb@)K!7w{JO z)#a0X#ga7&*LmDzmD^m)d`Z{GY@i>*{vY!GJ3gwS`vb@Cy?b}&CO}9b2?-&|rjdjY zX`uuJLKhJMX(DA8lF&N>fiw^iRGJ7!P%xrmd3;cLkOh?_R1MOR7KBhjup)vL1AO1- z?k)sGpU>~}`~35Leeo6V-nmoG%$zxM+ME${v?GNy&0ieT0w+3soaD?55dBnb z%XnvDEzwN%>Tr!|OLSH&*@TFouplwg7U50k06o*yG6Et7~ zMb#FG+QQ@1qqgWA_t+?xehx~GY>48A;&4C-l&NGTvpb`*r7|i<=HH@^Ym0C#aWrB- zi8Wf{D4H1}!kNH^h+w~_$6p111=<2>lSdaZ29c~K9;ea}F;6=&9#Nk}V{O?Bv?3JC zU@2`46;r&Xjw?LYA>du;Y-pJBth86q9gQ-+8y%TX9<|=Io6Ls>qNSM3GJ6xnPSBH1n zu{S*FH*<7PI?VgWRwF&0^Fs-sK!cz_qi`PST#uQKreEWF&@_jg=Xy+#P>%;BK2FMD zA%cYkN0X>uT`^v3-j;UP6+`NN))xF5h;Uo%)Tw|OHYQV8;(Kyla;Alg;i?wVmexgx zY1o+RMvB*~`MQgIeCt)35Gw-dY^2y+s}#!I)h6~aq-Ox>Y2CDf7TZKj-Q#Us&7``U zY3pt#49&dlRHJ~|b)RwPY3j}c6|D&hdu9x9rbUY>sutabHpPhb>Lq7JtQeuSccQ-r zv`{$zv8Y7r3B|sNK(B0s5sycGxVhOk1@9l3Te#?_u}R?YXW&Eh;HztO#&42Xh9O{oks5@q27;1(*7hYo_+K~lIV}{&Yc>F zhwrN5ENp0=DnN}I2&Xq|5EOf^KI0aRP8QcS8bz58i0f)TIU0(HpcW6hW_>eepfeWv zk67$=M$(Fg*j@(G9}UF>wTE+5BLJcPN(&l`2ZFx+wy5kfj{6?3b$se>lwd!Yg(GNx zV=+W~b0j4<5uw`6k<_h;h|+!;=^WogtW}|r0r|N_K`A2HYmL4pN!b>q52Z%UL8nWQWfSY}nRh0Ywh*D!N_jv)rtD#My46A?XmKMb zu_b`c8$q300_bTYXmm@_*;WcIQR#rl@{WjM*Ba_C#lA;>D+9wdW3P2wCW0{2p_Za$ zwKRS9@NN2gBXCgw1*VFKFwV<|Q7)vr3{mWa!3R?Qor%~zA2^_dAC)Sqg_dF^osRr>bMSo)2Kg0T_l-aX`BS z4e}m+PtxDjGdYt|TZy<(9u_|7R@D&2qYM4l%vPdv6o1#?SI>DY@U8!AKIi#XLQ_*X zBlDu1kyX-v%^0IT@A4fu3O*+Zome`DLLL;${6~Q8v|{`z5Rtmi7Q@22MU=lU>!u5s_ZET+HrwPHNUh#QFc!l%>@>T;{)5 z(fBr^ThO;h5bp$b@~`p4;UCiv;OJ1=-$n%Km-d-beF&*-#bR|TEp02hrl6ts;uYWrc+x{C|o`S1xbEDBH19Q=QkI#iy?javE zUJq$(`L}_kI)-o8(NH|60o6F|&|xZUCpri9N3S_=Up%=UW@ZNNqtG1LYfrkzQrZtbx% zubNLY+l$bYYfw@#)4`OLRX~LIlh%_B*t}Al9;3%$rY7Rsh|D;3$?&^2{K#*tFu1oD zO}!#ASc9UJnCc8?yAGn3T8ohYWijHAno;qACZt>?L~6kA*aw)H7;e);Sjz%B5UGY}fi0cm zy9jU1a^Ar)g+u7YheQkQ$>GlZ4}p}Z9qFfsLDwF1IDNX~UE64<#yvzEt@|*V+(UHM zQisvUJ;YkgZx{`E6ugxf=6v>1F`i@p{n1l&Q41)ompFi{oBeu=wm9Y6tG9SnYue0t zwKq0r?OnU`dLN9ic4nb7vA?*gCiCvX^TT7DgrVSF=67rn)fyL^ z$N|ft^mpt{z?1W=80zfyI2d@fJJszfgAspt=|E9W?L=D!iuAw6De!1GG!FZ4chnq8 zdji{I^=nq+L5_E$*b8(B`(}tM&yfKY8Vmd7NHr8;du!hl6vh+urI1!XArk4)Ek;NJ zd*5nVp*r(hTvmm)gLy1htlm4(U%ug>1;G?QNc8mU2VKP+Gcu5Q;>*7~jq3&{k2eO1 z+En?QG-&4_F~p1xRCr-CBqY}Cu-ts`J{p~Z`$h+f#mR9`bf8%EgLE7>bw>vRUvzj* z%m>-;qu&j&AQj3QEF!BTw!~I?fYY{$M|j`Z0$Q}57cr=tt%+?W_sNt6$5#?oWpvJWH6*OiefOp|7CpfgaK zA%g7?5y}7Ss6~866+=X3s?}6q#F-v`QB1#}G2rLm%Mzr-s>b5C#PXM*O&&)kc4|8Q z0nHmK5-o#XaEt8|H)!Ua0t8)Xk~VlS`PfAp|K%SrsZ>g?101F@OFy6?c5zkfo=v?S z;PTsM(>#ZGRtw8^`VSK;)F5_B;LG0FDWQdXOZa9N6%I$HV}qRC(jgLP`XJ}z3^7^L zetg1NF+!YCwX(&udz45`Hu_@CB5)k`L4Q7Xu+JU1@Y~N}+RA`Jt)$?@UOd_7@W$tq zJX(YXB|leKcFPQAx5rw7Z%m54cF>T~BGH=sMB(XUrrxw*v}hU{y_2akj0ezZ<52OT zc06LRLv*M?mq&}>5DZupd_|5v;EJZD+Rx~(U>&X;Na1535xvug+KdrFmMM$PL90ts z$B3AI=bwe0pqHXB6-Ssz6Nb8g7q>#?u;XO|G-5Igpt3cQz6UUVk%6(vfO_A|w>QTJ z`Pv|#ifzfv3*7`-(uVdWF;3eXAPTv|I;46!JWyW?12i8E?b({Un9dvPH3 z9tWxOc3`O2#)*3L(gZOGA*`U0xz7@*s&O+tnxdDzc02%Q3b%bIf*r8To0?7z1JByA ze+rDn-#tWtee0RPPq9wP;kZ`kps-R92I>bh&tedQMP^8-CbVsi5lN@tM}s3f;t2UG z8$=T2PY|_n+0e=fBK@Bnu&7-pYf=q}f`>J?Rxo1Ohk^CzmBFHV-G73#h(ma-*eG<` zI8peTk+{?g%NhhlY?>rG0ucvL=gDGD;G+I6r8P6EQ4o@OPk)V0PXDQmpA0BG&E|a$hf}PsxiA&_lvf~z^c_Isr(!DKA3*b_V#%x>;QV+h zh_=?Z27UXasHcO#AECf$!l|VUp#9TC`=IKmibt@&wSNvPD_A^;7~qVXE;eY*JZ?Wd zftb9?I3mR0;yZZ#JvMw-p2KhrV*8Ayo_^DG_30F`4$IlKB_8T;qNR)BV3cde->SI% zeER0yeo1Ugkx)<^G$|3^IG5N7MHsg4kd>eB+*8EE9$zb@O*2KeQ8r-tcHrS6j^d=^ zA|x21!0;Du!@Cu`^g`V63`^Z8EKpeY@(;~6Ro1#cdzSGx)#mzk{no2T7vWuTzvC9T zqtD~r;(otMgJ+3)+KcOG-Yn6IY77;%oZDsz=u(RSAseTFkxP|7^4U=*0qzu8jH46R zxvW9U3V!h+pBxs6-lW7F(c7AUeqN*3da03?H(h!3k?tRKHV5Q#7sfYN#05Q!>Z8nq zh><4K#e=fqP3o2l_Mrl{@?0@F(GO!-0V6EapeS?hlu zw{*niiR8vFzs32n6JXw$(YlC1fhX;uJNa}lcevOP_`q7f#c$EPJh09gD96kilS7`n zceYYto*3O?-VWRn3KA3#%nq<1B!`=Qp7?0NiFhz!oWH7H%|(bov$w(*ZOJA}ZSCE@ zG&5g33qjWJBWoAxK3n7lt9qnk_TM;YHa`OVy%Q{tSrARt|Arpj4wg3yo=@{d18k3X z7tl};(^m`?@a>Bwl>6@Uk*e2tfhd4hHRDeZKa9843)hz8C^syuhvy{09)f+|k51N9 zW5Te8=YG85O*d#!n-WJ&?m0cl@i{;0J)P3mEwi*HPh9A~dgbE3ZV@Pk|wb z^b36-?lwVFa2n2GOYBr&3h!DY%%@d-%e|5n`)O+wW{>3+Q+|OL|69NU7N8Uu%0Vp; ztw7$`^inH(@|;39c;#1YD>z}+g|vw^?3viD_(QpFhXJ}RE>pp9m-OwynnUm~%7I)) zTjtjr;v1u6H1gXV+14byM^#uyIlZZL9(a3OAG$eD%)||`>Jbq}%byWz?{Ng~IA0_t zxI$6>-7b1B*5>iH%9%NSC2zZ&;a;r$AIGjlJhYl@iHuy5&2=hvMPJOAiz&FnnY%C4 z03LfO`BArbZ8kW%*llPi>$sv!vhB-aIx9P8ED&9_aWi;QAarIiW*{yKPkJ150nAsw z1J5i@XKa~?CnE++z^{AKvA^6-ht*-VCmoY~I~}9o*$9@CV;TaKwSOMW)aFlU*z=;j zHsNFEy61(zMH}`ReY{wtYSEw5uZu;Y7O>m7X^B{C(NcHQz!yMR8u!ym~r^ zkVv*@fjeo@DiNo3*y-G~N=&n8eS1*y8t_Rr@PBLY;n5w=d22+du65l(YuBQ+<~y7f zYsFzzTl*1x{D$bK_1Q@NZ;I4LZ$ec$@)lH;?u$Eij*Y_*#HcUf#OpR78+$faK5Pb= z&*n(BKT*z`;u>`H_VVmo&S~q!yK21C97;{yWIiqGE<6>GAa-|9F?6XQ(L-0ewG)oF zMcb5@I=iTb=ZwOqkx)sEaG`d~EpSCZ%e+KwXgx6B6{Gb(bo6cUMC#m)SQRiXGyyO+ z#|+690bYFTH@g<4qRhExHBfxfjmE&Oh<8Ti?4IyyP`n# z`l?&uX||SqO_K(Q=FXn)iMhDd;uSjfZ|E(~K15OLfd#LOqu%RblbF+$O4o~SUco4Y zDocJgB?4SQipy)?7quG9e#ynZu!O_x5-W#xB8}-e({O0JO_liwPNPGSxIU%l6iugs z_eJ%vU)+UXc0WD1-v1?YqT#vwzOeh`J6*Gqg^rHvKs`PXp>=aVe6@^s?&`REqy?`3CU!ZRp$vG4a3i_c%gxo4*gI)&CLoyb4XL|0?}c>NOqlF3gC$etga$uST9J zZtw&bQCeqfa3DKZwDr}phE*G83eo&&qq#{qHj0PCzGRFCWjnx6P^m-w1UG)5(miwb z*Ceha+$bhm#5%0=sg*QjGYHf$qE(y4B7H>{=$$5RhHj$c77_2?fQrh_p~a4Fgij!6 z?iNs(z{geqZMIqfcQfn6iTUeG0o`>Iy3T6t<)_ z%c;`|$QW*hpVEvL}!=u5Lsp1$ndj=t=B@$SBqyy(2ST{P1I|6F!=U-~1X zyDx)wp)Yf3=`Qr8&Wp5Z7y5F38Kv%q{Muz1_oXqN*ezNo>_8pGRXS9JckU2}x%71C zW2BWY+j=xQ={Iv$H1${WX;FVMy8`APo@`89)39=eVF(s2H)pLD<#(_q(caHQe8XiQ zx)$9iFb(-InlLX?jI34JTW*k9+$H~dwQL1KP-|N{P@B)iuof@5?Pj-0Gs4B~0n-^f>fHA*UZB$OgaefxX}wLQKk!iL(K&2Ev-r5F zH1N@+(jU<15>V-CPzSP3rzSt)K^JJggG!?%4Zj(mnA)BWE;wP15PtuO5i--UlV3tQ z|K{&Zwf}^D3y&NR)jp2&O5|t72_pR5R?-6lv)&J$9)~vx$F$7$3 ze*jXIT3h}_{q~46+Kv~g*%zS2^V{=zv>$1lvHt;o0flJ7ZDJ1BfIdrvD_(A}8c8Z3mK zv{RdNFov(B!RJJMEqO9Mdrq|I`=`e7isGO(w&5=`-KIw2$?^x>t4pF0lEHI+@NILx zVDNM^>Gk*GA~Ya-sv7xab@s(WehZH1sP-7DIbQ~HsIrnoRM3eYKQHQdd8?5}ls}Z> zE|hZ~n!JXz6{&TP-pV^ttma;52$?z?8aqBpxZ4GO$q~AEUerq{#+@Jt+OQ;AjXZ=S zLN@S-OX{LBa7#;K@I*W@=#wnPF_dxvc6TTBy?~RU@6f^vBDm8k6lQ1i>#%O1z)}Sl z*YXlsjpY#!4c=S?C|AI0N~plR-MwHv#OENh@vypnh;&zw^gISlMbMcuV2rqL_0czk z=Qx~%=Se&p;CUQR4o<*Ls99f%q@1IoVa_qpH0QX;LA8nLQEIzi!CR{>inju^8tx{i z;oF{!Xtg=Eq63iD;~cu$8k?&MD^xzVpw`M5%ihal=-G>+ecT|f2y*CDOm8teXDhan zGH!In6ulVf8W8rq))anT1o?f6yqt$cDE#MNNKclFdMVA^W4aP-s%K1>xrS&7hV1s3 z^6+{3vxCwuhyb+#oy7RYa_}wwvdvxb5;U5)7fo#QFMa?u@KleblydPv4R2(%xH~Gz zuR;Xp4QyR_x+jeJO|j*cwPMLm$-)7^ql^lS2CT@Pif<>}->%|3<35ZF`VgcfZJI=W(7;1Gxa5r(_DOYUG1=6uXJqF=!KwTXp!SRQd%EXT)V-z5a# zXB_C}HdZ2r2|Drv{x0M1$GysopY<~nU0lY`0O~p9=k50=q*Ze_v=0?0?uB0%#<&Hv zz$`p*bcrM&4M&$?HB8PMMP=Jykcx8TDQZlmjnC&T!A5qFp-aLbaiy>L!v1j?VjH|% zmf|;m+f=0&+KhwxhAQvsjKlhRy!*HT`4@hBxqrWn-&XhUH}M;XiOglXHV-eVqmwyY zuj03Ge|QDIO}|<+y3)+k#muw9{N367y$rt@&<*H4bB;+E!7?|X&*NK7cU=qc8^9~9 z*8uGWO5-*^(3S72;g*Swmqa={sO?I1R(vB8RX^yk5AOz9 z_qi0QYEZa5K(_Bh6E%$beka<}&hJDL$lL1gL~xA?M!76ae>KypveG<&&u~`nG;u5t z^d22%KBBWYbPqp(C5}G%;*95X=sEc~=xj6?h&)QRqTT|QOu=ogViqny-1WUU(vr{0 zDUDSB=W#_&Wvr&J(WNro?^cXY^a?z7N!JBj` zrTMG%Q+I(%@|?en=R?@)j#aow6iaN(>8K!8>^s05Pj4fikya_fevy8I#8vP#v7;#Y zDvmq%MA`kB%r(1w4VcUg8(CP!=}h)mC(^=A_V`;1C3^2bd>?@CEtPtEO$ibCT2{%^ z8{dm7aH{OatvsG#1AUqWqF0QNV$U@q2RXh^f&)wKz_eTbXGbE3s#A_na{}IsO zX`pJ%jO(YQg>Ac#<)Q~PmO?nKbAHRAvP-jCs(Y1b;e_dz<7 ziZY7*Sv2<>_3<6&jk$oQ295g}n$l7Xr+F(I9sF6e(0{FTOHq6fm|}hrwY0YegSfnIHWI8w{eeMCg*S&fJ^q05>RwHrx`6P^nU`+@NqnMl8>euZGGLk8^HJ*4J38`RVMxi&edV z!d3Zf=$WQ>zHh4Xef|R4uF72R$AK(1j5ZwByJ<4i4K!b*8%57+vUd2Zo(iC^?1}OC z5ksoXXQ(}Zz}BG2o!X_Qw9X>iXvdpUg+&hZ_tflJJ7}S|j_>a^r5AJ=rM=$Nxk;DZ zw5sD2BV~+F?kMK&;<6e7+$p1|pOpQ?Aq?@z8?-^n9^qX_7nWfgDn5t<2u#cxR@eYh zWEF_k!#{qXAsaq$n~GRUETiBnhLay3sMZp#k?yZ2=vk~xS8UN)e8u!rF!fN3mn=|s z(k?Fn(#q>NmM_1SuZB8EM!;^1Vrx zi@oK{CrXuW17s)A<_m*{WY?iH|fq32S`h5YKPSb@*K$lWWSg-jjR^$}Nsd zdO;(1*nWrf<#^k=k>)2Ct$;C2E~X9ted4AWUIV7kTMr{WrxeBBjwjTO=J zs<-oJbGqVH>~BWp@h%;pK+Zp>*TS95{<3|5dB_HXZOVNxXyTn-_m{Rhr8rN^hZ3V5 z7IiU@iB4!cKJ8NmhlkPu1?8-L+<1c~2go^=f9wCI-vVSx-TRe}bd^rx z0SqQnlRC1NtggNM08OhU+lO*1JN2DOb3wzNW|;&n#-a7fhIF`=T%{dtNaF)#V>1#) zSS+jtY_Vp!l`YyHxi#3OK@E+tpB)Y30%wT?(e)U;+pm0;B@E>lZ3~nUK72bu9FT|Y z;VCQWa-fVey-_%gi*Ql?7Pr!w7$ghT+FYN{7-zk(9$( z*?xlx>&i3Q<4Lq8Tqat7tq+wrrTqzAl0+>W?QiT;igI5<`}M95r!P9GdxG>`8P-FQDAS`0u$vJ zVLGI-$LsuDUw!^fN=%fgUXS)*wfYF%T--j~vdRXyEoSSB5@o2q;!jK}LVGy3CCZo8 zyxFX4bX(;4e8o{L+fo4Vm5)+xb?mWn)I@~Ix3UP!_iz*dne)MQD5mGORrSku@$RwM z`3o_AMJT@uW!b8tfSX`SQ{eU`Wgjf&VlJlRN=V@S0iXOX<%P(ckDXdE5ANWsJD7VF z4ISWB^dlq=2TrkLbXY-mDgA`CBK?pxn_P~kQ5ZS?Ry>5F8ptSX%@>Nwe)FtKozfe~ zB+D!Dg;X?6YhZ1WSa|wReB|SoK6Isl{J^Rp$z?d*mMo)T2{o^k>qa;DttHtWkbmQc z6L~wnQaGh_`=hCVv&*it9WN)L>sh(9$$i)1L`X91Wv@vAIE^R@yC9P!kf#`pm%42G9isM ztY6`2Lt%XD(Hzc}Hp*-+duVkA@lA_Cw70p8(b5;w)#h?uQtCTJWzAS7!Q=_>TFvkd zVw-dtb+8EO(X{?jDJ|vmY7}j6Df?;9K0%?W za;OgTSR!q+8v)v&gHtBp6AuM{YJubcyq~h3YQT%qx8Q>(|awiZ|Hy3X#fEANWdvZ?lR zoU>VLDOByD#nk3OQ1urEQt5*-Jbcx3c3Hvgo~VHEn?3hnhs;p$B)B(_`4}G3)7vOvT9GoBvb zSw7EnuQC4<3|9bjZU!0MWDB(irFD}-)j{-HH~Cacc98uiztnteMcLNuSdP%hLMHpE zc0k~uZPe>w8Jx%ApYkz@F1hso*4GxUlDBcW^@8DG({5J8bGGF({ZxgS8 zZ;V$lf0xnQlhKWUbhf+9hVo|cBl5Y>Gp{q&lN>mApwp^<6X?kI>>>cjJO+?9f z_ZUo<^)4*q7T9af2qA5VT+{x_JkLfPmz9QeR;EOO>*0EC-OAio8+9P2W{#Kbo5+N4(B!nEao_2Gml5T%({Vv9zRruhsL5GtWxE4cA#R#;$dv= z&1=)Tp>kf}BY@d6dcB`^_3taPO_E(`z8$hslL@rKE+=c3!^qnq8*A^hqc#o+?v@-5 z84&vVnxe9=tY0;licPe4hRZ{ccjnb~Xput>(HhjH%MRHuba5D~uA{Re0V*p(dSFOe zWvJ?cFd8yU{;KVGn)(fw&uSy*()r=COxoOuzweDpjPtA|@B7`k;GP+1`w%{e zqN+d5WvjexW5iuCi65NH&UV-IT6vpwwPZTB9o9Ot&Xu*!ETOV2aO1anCIgVkLxI?C zY=LfS4&7P3F+VHM>CH@dENWXb9p(pq!s^}1OcyHdHUPX*ZH}iWCb9jNp)$jLTV(DQnS;4B0dS z9=TZxQrPX71Kpkgi7&xU$h`Z=luI zYtJsEA?kh34y37wG4~xJv9W_uZUC;0<9*u{)Y3C${A16difE|7p2yR|idzKV7zxbM z{umikY|OsGrvet%<-j5ft*|r|2P$U|D-M)GJ_eBOA1AV5#{-6|Gjw$%&UBV#$_4>^ zX?!u_DSI>mC`1t>WT4uCQbx#D>NuJ-LUsXzxn+cW%x_Evg4y~iS<#M*x(Z|SSgJcx zK4;|)l`+6nJW_U5n~~QjX^VZts_0rt&?FEudFr{UJSu`*K6 z_q;`plYN5fKgC1By2-dKj%eArW`236n)K{Ane5ZCW;>7&tJ)4^*ExoEkCTZt-W!AA zf=*wscMzRBhrp!1P)2jBw(?u+W zSmn*pC*q zDbUe^*|C5kv)HwH)n7%iC#dga`4~99cP7gSbp@47M#oZu>GEV574l_m7whvLfa3x2 z%+oPSm?DR((X?=itRL@-?%wZCY_?spxd(T475V=q)*+)&8x$&$^q6S*{GyAke;oGB|+ZDo)% zJX_9DlhPS3EDZArmTtKDeh1E0Za51^+zY2a{gDGW59iz;&hNE5aISoc+T;SxA9C-8 zGovb;x9Htmz*&%E!nr9=wo|nwfzI~%a{y>nz_o^=lUzJaPa;A@1A z{xKVkfedk+PiDCKDY+N;e8WWU4R-(Oc=CGWvsfNF!pz<4BZ|smFbh%X#|6`qF11|r z1YV=JF+y>;=!sbu*6`Gtc5gRfLeNxt@+lde9Qsh98I!cDk8Az&qT=>qz9}k0e%Ja3 z_rtK(t4X_`l5tw!InK*Z$%oW`3{F1sY5BhhPOi3H)+i{zj80B1CP6;>PX_5Y&O4y&KGjQC!z}0As z7Q!Eb=YfWTZR9k2fhEAER9Wse4Dw?cxa$+ko!z`lF9tJPyfVARw(qf0>w|1C_VY52 zf}Xe|vL`VUfbn4$OOqmaN$h9Z?Kbk#Q9uJ@VR2OGWx9@CKr#cF>&QOkKL|A%}PL7?W79+2+wmW{7?!Gxm^kJO16NIM&TpR zuGQ@yz?A>oFc3n_HaB{1SQP%^Ooz|{tQn8T$h%w?6<$T}RdnD|`cZKSjlX}%CGvad z57+Z80A+&hNmI>OhIBL05H&cIn#TcRxoOs>y$hrfQvd5B^H7n>7>JG8<8vB|OHgI~ zq?ujT+@>ZTdd-{CdvMu0QDytzobO^w(sF^dCXavPyq?3W=$`9~C00vK1BJn-GmlP> z+skX(HGkG~_L_x(Cdb#Je#gByeamC$F@~&!I`jm%B^W3UO#}OJ(*1kNdDjihOtH8+ z4?D7OrbjTif(B>(7r|f?8U`KA;&Zh54D^k&dLBP-ozrKVov`2n7f#pJ#r(w9{k-{I zsLz?{Qk{dRDHvcQ;7+nUfZLDb!5W^|o1w@5{s@iWM%u>!KIo6N7eH4`^|~oAEE{?< zh-?dG2#)W!S|~dOk1aAoRGUUjX0Mg3v65MTS}0RY8Q=giTPwT@PPh*h{BD6VZjp>Z z1q~L-SzcVhI4p(c^x+~|Pfenei=bFfr5lUn1n}SE7R%`9u?-8$E&;cpp<1dKa%Ibm zuk}>Suc^C*wL_gTj{dV)_66(fwM5oQA>`-9roy5&+G37%i2f-mrGM5kYn9a4`& zrd8~_AjTiJp3DF{2|-4gF;)89lQTF2UZm1jUqhv;N7gb-(fuM2m*oPLc^EXu zd7YuVu>lr27VKbA`n%S+o&aCk^r9SMjIYG)o%%2U8?`+tj@fI>BHqR0Ku(qSNPR(89MTaH9Gx(2};At3jKX{ zp#t6Iq3>AJT>)q<(`Zk(IED*%g*BZ~m|YD6&RuDN{Fu-sKY~P-0C_mg8!OO;sq_;x zWQ7c>S&duAV6%TwwBtBEwL;dbcie2=BeGqG1)sB!y9(7^#m{tTh3sOkCO#SLmeYO( zy}X~CcEyL~wD3;LF`FJ;2mij84hqy>6D=|w%;$*NAYRj-;C!HiGm6TtR097vF8*Em zj@8pWWbGiz|2ML>8}%?*V|~ZF48yInI`I?wj#td5MSaJ7>gpTjcdPok`4s9ZGhM1{ z@Vu?>s5D?xGW8uHCVlIS4)DfvN8bTw@If!hhCPp9o>;|!rjInrfJP18LAR76IlK~6 zu{bnGsNWup@-WfEf0Sy#!3=t-z81~D^-v9Zjafm$Y6BOTlHpDQEPNUz!1 zieoZ$Sq1iN3@u$HXQ|7m#>?`#wsVkuDReBnr>)|#0c~a3*N%`z1#rC>55QKTBax0p zAQXrbf?-x2tMFUVXI@@N$6uC>9y)?eFBgSMFJIL6p^}TR*e`?xd;s*U13X_d5xM)( zUv0c#k~AzNZ*w7OYNQ@%Qr*~E%92UQpG3W0kp}{K5F6FtYhVfv{f?-ukAcjkQp##M zv;j{=>Sa(r_c(f{M0HPzim~HX&SR~dp>3;WaNUC_67ArNZx5Qo9_46<-*ElI^8o#_ zTDEG$7v^#n#j)SaqTrK)EEm(;nf)y2|^ux7TL$i<-*vsA%gtqKNfRWR5yTq%H>hpZVe zyP1jE&D@xsVm2FvHH3v)G6&!))M%c-DZc{g3%@r8nE`%@-=9z7zOXvScSC)D|kOj*bM13ae-GO%Gs|~qpnl*TH5Ry zG-Ry|Le!uq*UGmZW}In{&P6+VV=4|>Sy{;laprO_D-ORL;@KXE4o5qBm@9%2M|BKD zUb|j;`4Kbi2&+sTM`*XCHnb+?YR<{NUkH#3kHKh=K*0{XongS5p@((X6l zUxxQ@-;mq&x>G?>C+^iFod@5Py)E!g@E$bxiDDS#Y`~I(dht!L5B6=~6}Ia`TQTdQ zsvL5~4~vHD=1pb>OdD^{z|<)_vRRDw@V0Y7&%4%l>8}q?ah)`yu#H;SvWA~?#GC5u zzEmrZ@BCB@TUHE;V&u)Gx;>qb7jQuLvqyt*tRT((&L&Hqm3}IGFtF!chw6dz`MAm^ zu_}YVD{zW`5P6t=(J+$jDsAA&?v6Ca;e62^U2AO(%;OhC_dPq>%_{eSKAra@75ejt zsl1QavN*nn2S7%Z9oU?=;Q}5N^p8=7?~&j}t|_jDSq}5$-=1#MZI10`KIZUvj2KvM z19gj%{gZLekGXRBinY2PniYroe&_li6o!Cy#lyQPiYEO_*6E0eG1WKQfa=I@Uaq5X zpHVFr8;sxFOdQ_Nr`t@1iko9LXWisi%eV|(nWTrb@{P{o0aT61tm;O)lt_oBR`*Ry zVMl=3->GVIH8LwfjmctJcH4zmjL94NuyjRtq|f1qm|?~#OtfO3$QE4J6-%`D0mCZ~ zDjUalV4L90nVyYqLIU(cGu%W1;48*al40r5svY#7htFjATyv6y?pC>!#O+3I9#uJK z>desm9li+Z_U~}S*=z?a9@;U`4nz+ni%3_>9fyGcISGB|CS5DNwC&!lvt!7&J;-o@zv$oofZ4-ayQsJ~pID6V*u2_a)QS1xQ)H0p(0Dn!9 zah?}E)QUIMB71#{Z=MAM=DGf+7=W_A!V;nsbA$_vpHloVKW$1euW^g2`SW#o7BDlt z@!Sra&xZFDba*&W4U-PWGs;9b#2PSVcp#cvj<+lMS26+7L5LyvZ|Ux_pyR62dcI{; zl*(>OTp$+2g%=Ym1I&WJfE{po*`9t$=UYY`W@E~dd1y@7x0#2F@@naNi4tO2%IXZ8 zP088|RA&_hFL$o&XQ-2G7fX&?E6HYe<)kz z`GJz*020DVAdqWij5-1!M`Mau3CpXIL03^?GfME-@b7!aW<+|v9u4k4CdQ={jt2H9 zN=ykZd0WCz@X$nKLU>rXOEHitdSZA`e;#gda@{Kn=F*?pp6Ajd!Obb=3@YwEkH0+@ z%w{NRzB`>O<%MpCw`bB#?mkail({=poje5^o>S<6)eZd9_{~eg=GBTJQq@P8M!Cl( z7azIOOOR=};+yb^=SfKhaO0`~QbCVwlo5S_*-M~eIvR~3k3yD;x$XQbcDDD&q93KD zZTGjrcIIl!)l}5*5g4o}%T-WN41h^icuEGMS;dY+e3*pwv}Gf>tW!_qm-Ce80}6)n zYd|sv*)(@Iy5je6GuENLGJvL3MULDmcnZk#F8rXUw6@=TSJcy;YfkLF`8qx z&PQW4M**g?x_%B@L|xK0$;Q>DK(Zf&RifCZP}@y%oHlAJt=}YTSDON>?|sx5V8pV{4|KnPdhvGTXkfN%rkDMK90`lE$F(_-F#_$}szir(hWnv?4tgp*R-m zxThfIJ#;40fqM%8LZrjIvw+Ac$7mz1q;Hl{mXX#X09u>2?U50Wu&Xcgwx(m5n`$|L zd6B?ECe%hoENXei8ta%3zSzLuTjubc11aD@eKBVGGp;nqgaKstGpd6JauGmA;9)=v z-op4dX34yyZhl6j`b>tjK7;BN`!m)^+&rS#T{FW~&(9cc9wc_TfInk?Q|u@WcW|CP z^VSyL#5@hD4%deQtOy{m`HvmvV>eC*kJZ6{Izp)GA?;FrfxSHndWTrYS!`A=Lr;82 zJKR8Y0i#zOz#J@JuI$&hu;bzeSyFAWf+Df(}5dsfK zl|a~t?y$ZbcrHH+lL_oVb`;$@?!1Q}c_;KIgdKps-DmIUDas05)&_pxyqw zX);yvt*v2GRMbngw5$py3>fci$j79T^*WyUV1P3KFqh#8V=ILXjNUEdV)h5<^w;S_ zpy{q2la+%Hi>fo9oCB0q)fe=fY)0+u>e<eU^2^wO2c>H*rc>Lo2`#9Y-ObhNA zrUw5!OnLViCv(!OjFYNn05D!4JWf1D8R!a6YAMJWPwELgc~Xzz$@4K7-+3Vn!;=-7 zm-N3I#xDQ!apcB1P6sRS<1mIpF@}t*Y<_x}8vSeuZsWP7>nq40$8j#Z4400cxP`TF z)24R6iQNXpIs$hXKQ252h70^K7O)nL_j*TZ-ex(hE6XKp{tH9iILA4id3fes*+0Jj z1as%g!sNjBk(bdh-NSftIR&Jj^97EFT=c;#Er$ARkzv{^i8Of&JcSk~(z-1;Px4G6 z?cO32wd_Q?x&?l~r@o|SAIoV$(B)$7n_#a-s^pP<#B$|>5$1S%|)anxcfJeJ-{pw3%S=G6qs+zM;o z(ga$wRfaa>_B4R3&L|cZZ~{BA)I9Ar`4WMXG1_0G9{^3;k4>Ri@b@2%(B-YNbCvcU zER=cLLuPx8wbt&YnkJBc5x}j7_abz0-Vu7XNH$Bmf+kJP-p%v+kN9oBbwTGXyd1vy zyoFma>Yw4V504aBhHp!{yQDy`aaAlFE^9^;=+M5n$J)^Yt_u5sj2b4XK6>GiVpw|d zsD(ShmhEzV+KC2d{uc|!cr*;uF)bWd8R(m5b@K^T;5D-oU{HNoHK?mM&3CJM!+Z+0 zlAo1Q{S8l#g@Ze3S~x1*7LLp4aSe2ZEgZw&LSxV~djWJ7*%?=%A~Y==DtWb~v&Aw# z(q5=2x=TX5E&A|*E%qEzvh0H5x5!{e?@%CgrMza^v z{>gejuj%;4HSoq2U>@+Hr0uf0`USPzE}!x0inAA){WLX`9+?jx?X%nE#6Sk(2>`K= z)q1M%AAk}6f--i<|DaUJPFd7ue}+wEFR1Nxx92cnV2c0CA#@C^Hg>?0GKe0(U}QL* zv0XJs#)bW@4pY!Bxu@3`83?!7%R%kGT zSYQU>wp7%~8u7Q$UR7fRP8#l(O{{Mrne7n41Up1h@*}?&m3@(+s5{;SSzwcf{Q&m# zeK0oI&DwO#FUwl2L&s5q(%o{VbqvZ>QS(>Zhi|U3`>6gZuMn4pzc+GK(eS^7SC5AO zICKL0TpIqicj^lJT-tpY7T&&CS1%BP@^cv#9LRYvYHZe2At#jM8SU6dJ3g1OHsSur z+YEP~PrUH3D(>{S23m!-sIp7^_ZTA>~n4+JrL5S9pT;iH{UDkHUkVed#{u*96ok6IKRI%XA8qmui+%-1Hp*#3q>d1DkOvq3LRv+}9GcaiPtK@0m)?9ZU zuB7*Wt0dO(hsJy<_c3cwJ1p4?dOO}zAtEwQ+U~AC7MJg=q{jQ@CYWSk?Tz3s)T**R;`` z19B#?C9*uGlC*=enFabYYIRVqG=CgFC~fACKM!K8`BRrea$Eq{yBi$gXVsOLx*~W5 zW7ubhsQr2bekwR{7yhM;>dIHj@f4v;Ef}1%k(B$TbYk^4 zI|?7oku>_K?B5c8I_z`}0V1nBU8ro}PsU;k_sxef-vZJUuUj1ndNe9Y@@0&$njVt* zm2~~6jEc+Z$>m+b2769MnG^|O6;U=4^{`X>#GcgtnCzDK!x&Sn&uV}+TJSdxT!JSr zX!a$7<_a3MRBIIa##D_R!&&8V+mng!U1gj2+3Z?mjX*fZNP zAkZLAb@$rSW9~iodQ)R2REoOOU7mgCN29am&_iDb9mcTE0Iw@%+nye$wy50dKJ|8PJxoD^N@b*W@-XF=%22O8H@2FA zB!42ulUg+GES0UTA0Vv~^HrCkPJuUGl}iIpfpJ}!ON&l{zm9$sVW!})`e`mT{tC`p zyDGQR;;-Z?FhYr^!Bg+grEaHXbG*(uEuV=@hqEs>)MAh^J{ruuT{#*D|Jg5&yn*&Hjr2bCx+rU#foQ3tpAk>bUzBC8y5?5Ml)sP2 z0&k^;Pl#9=yvZtY<=^T#q6rPU0tU15ZMQj&3qbjo z{a=9Vug1~4m*ro=yuJ6M{NIInd)FQ2jU6L8|Ccat1y|&2s)O49BndI{N`I1>*0aC4 zP_It>KywXh>f`3qqV{K^0CH-FQb@)0>A;eq54lYY=CS>^ugZ3TnHdF$ljjn39{UNX zhXr+v8l8~=diqdEI7@C?Ygree74zT%wYw(IHa%Fs@ZS4+P1SLVp7Q(=EXHW6rIx<&&zc_#KS(}OwB&L!fhQf-?_{}o()kj6KUK9GYySAhas_{Vy0-EHUV9C#jh3#NRCf;uKh)>w^+h{-RgYo z4+(#t=ekh8>vE>0<1bt3$aUGEh7wX#*7Zh|diY0##ipfkMsw=;r}WjXU8DU8UZFJc zPia_Q_+_he)}ONBozR8V>0ssmFKcS{4bPhTawDQ(7T=JkTnNAM=Kp3*(Hr5o+VsjC zQJa<4$`p(XZ;X@8r^S4l*mc8&U3_e#X6LHd_4O^;u13(XJJ@Aeo?^sX2Dx#{((31} z%cmQonijL!gfdPN6wI%{n*jOv;;c!bl(+c<+b+ZlAZhOuAU zcEwrfW2|;>0{{Oa85SuG2{3-BLZkn;=--D7e`n`FqqC~TPNYRaMyNh*qN^($hRd7w z1Q}D*e$*z|2$uaYmG+&w%kN}xeZ<085^S8q9S4(Z8zb;2t!*?{`;vc%@d=9T4>2Ca zTX?9^&nU*$xdB!<$__Q2w)8a{2xFy(mouS`@r351!1TvF--|L9;65^H7Gp&9IQ;F_(};}1OxqzwzSpoa_nB$ObEDj8uOsbq zGwpO(i`24^Z{Lnj5XS+o5>Mt zL>fqB{i&%4S=MeNbV9L4eE3#WaTLw7Ki#B@(6hUV>RB}1H-aw58kySgZ>e7#x)|X@ z^WuyimVj@zQq9e{aSvD9H1&E50AlP)m=i{blA!`2yXzS-;U=QrfHdrm1gW4#c7Hut z0d>b1=d<;UW@`5xXg=0)Lt|0e0`ZxJffA%?4uRiR(DHLo%W9@tmT80jAapi3d1`}0 z@R#giG-IrOU^64fcCjqK6g^Hb?BTzET~v1X5)0c7{qP7?JyKI$$|3_BX1u$P z`ZaA!FzVZOp)qdsF=;>B2NkjshP_G^KzbP!pG2=53z<2Z-|7MG{So_S%q3)UBz} zyY8V{g=JoYhk@+66mfyRg*eg5-aF-bDs5?ML}<@eqtd2ExHhL6U2kf{`T17xp*J4e zNPXHRn_8V3r5FLBUtt_~Vy9)*-fpyH)`*Uz1<{ZcBT9Rzg65NO{%t5b|wibWO`eG`)KG!e-w7*BQ~HvZeyj<6-C@1EYAc8h(KfPWeJk=D?5r zP#g0mA0yAPN=xwvw6d8Ifop5G@M9XCY-SwSe%?YGnj6WsWY}5Q4~}oB@rl&4W(Cd4 zos99yj?2#F@gG1pnj6vTaOEU2o{b6Alo=N?0OB_~ z!i0kV&!oXEj3(;iv=pVZ&K0z-g|XJ_S+I#GYAab!P_LFofcGL)#@kk{3L4+i2&%i{ zq6_XcHWFhnRjiFDd>>zPd|n>C*3$UV?<5CCbaYmao6k<_QF0`!;j|;w=&NZXD72OF z03xS!Yh|oe2UlS551|U)`7Z*trG`r!w_sVuIfm<7&+vPU`WquV^2LBWDUP|i9mCio z1k(-ifqix_^LGp@3tB8YrlYPE^w5Jwq}ts%`avU9)%usyyf#MTpis!a+@mN^xV`Yp zRiOHd4W3u-9c^AhtsIBp~|Acbxs#0p5%ZEQPOLq$9{N+1e#r)R>{#uXXu-^wbh&guwM}_T;fu>?idkAMJ z9EjwXrq~c}IkAIr)$2G0BC=~#RyM_UG>$i}>1QuL1%-qHH>q^Q_4*#F`&}8in&O+U z*Ytz77-@>{eDk*otNGj;l-$V(@;(5Xwjns5uP*M<$%ylx3B>yz^U5^lYt%RKhI1bB zSxh(xt3y0q>|f7I&jK9lyBfo+CzxZSz=w>-penYP6EI!OZs%VwM9)%?71?qhGWJ_f zn%R1FGoHz0P~rLb(D#>h^~x=+19mlY*}AJPS$`%9W3Y*2y3|+RX|Zk04C3g zGZ>rv*}03gT1*sw-$+_-<6#TxXIo_uW%f4e*X_z1peq2eIS`O}xTDOj1o+YV-bS5j z5bOA~diO>EWp4~F8Y~{6Rj1#38}0HkFc!cuQ|Y_dtg1Y_8QM(bDxjBbfTxYe6jv&; zsKxA+0^MtRW7pVi=N!~xoER#h}U?QV=y>ipo%{6@8ABzPu?{64CqXRKbsX~ zy_BAf$mTiLD91GMO?ocOn_1T7wC^z^qM7xiE5D*VnaijBri1I`4JXuu4Lq~EZHAB2 zS&8|107dmNqM{DKddh8and*Of)g+AKIuzSqzAQd@F{=JYqES zIo3^>>^8(qkJ7$3JwDLrAClzn!n(#dfXw|ks1}^ytV5_|pwZRR38>*x@|@m!+Ps;U zia)Z{qCQU;9Yg$)#<=$jaL;vQ+_f`zr4OGlf@|zoT&j&76`JZt=Pv6F1Jkr((E6ig z?Bjz83`6{k?WcI#j?x^bsm&npovv#t8?l$C3P*UpJd&eZPRoqJ*hk)WSkqA)#!%mu z$j8oV)99x`#-P7y2k%ibdScQ2pIl@)LX61$r;9AR-0vbw+u!wsb@#5GY#Z{=J^4Sl zjmt+U7#V2(5GSAVX3RVVx?iN2enxfUKY@lG&Yw9_6uMiYjs+J%*?{l zsA%A(OK=HK^5gTS-9L0KVr7PkOEI_FjoJt(a?EbTsabTxZq!#ZD8XUWc^GR3h7*fg zqZ88__EoTDF=0yMmOy}0@Qeb^vbynf21FysWH!iV$_%LvkTB$cz;QU`?U^aG*a5Ms z18r~^sru^rfCzyxt9w8k3=pYdbI7E62esA%WgvigXlGl+*ZTH~Z}fc?Kk6kFSMyR0g*DkvfXE_hvL zAYl!w=*{TBB#4NJ8bm4a%BHAz-S1U#111mx!yefc3@9Y*1Y{>9|NC~&K;-lL{^$Sm zJd^IO_0*|Tr_MQbs;cya7Cd!Q-riF@7{^3&sLoWWOmEjn9S(ks6meN7<&juuG`V7R2>|U_HE0h!HH-2qsP6B=D!?e~Yx{ z-9v$ofuLWq(zL3acJ`{A+w42Rk^VT%6IPmP>51w6QZd?xW&Ww1%T5(GZzIj-&CtmC zre1W5KLNCRR42_ns-xzym{0zPDEWA`M!HAAZv@|LB$Y^k&DxxuOR#C_wW@%80T&~< ziqYC_h(s^X!H%;$Tz8~wKo~LiD<;5wvS64!!T1p^qU-0c@~e19CmDB-sHc`!%5h~{ zT~RI%XTp^=|A?kzXUd#MaoyFAQmnSu!gkpS*o!nw79Y`4#WLA_wLRg{7XW1y*r`LKTNl29go5YKvUQ0b|HWUxta+bP;AOaz zxKVO1H0iCTt4ioS%$(&&N!&3tx#$dRXr%Kn34(_%n{gxt#03!dPsG6iL`&xr?vtr_ z+8{MV%ASg{NeP1x?na)Uq3+|^L2Sbd{asE)3Qe&J9gRZi495!35NuS&<`e?-$K~cM zOhd`?XqMipMPnd>MSgA-DLyqz6lRG8&lAm@I4cn)c9oq+z`}LFyi%)4Hx*a6 zIGn!)G%J_8#ka;c2Z`c)P8)%NvQ=<9`zfd6=tRnbJs*|4aT5r=@liL#q6Rsbj zr^Qn1$gTpoVQYhcjL8d3vemfh(Vmf7yv^VRe z#uF`6$;oz9?=Httcr0dUsR%i z9nyzEAWj8*Fo2APF~?%4H70^GwC&*aaPvwCJo-kAG7iOv?3$}0{ z87rhPGFAb`dQ6i)jMf{}X%8q2KtVGT388;?mZgL)%>Ak)p+*GqYfWYUF?xslqfDH; zef?3>!~68$ldz@qj_#dycc#mfi&#P*jF2c0ZHjmVCtCM{lT}ZxpkieSd0!RI=V2cx z;3of+#bfkT_4qV-VT^vSnmRMeb_ak{=SPB6tcd$&HIrN5dfOoji!H{bOAo{^a9 zON|TC=X>WUaMP?!mmR>$}e=F8KTC`FsrCaz6Wly#e^UBfUScaPWO zZY4^9qtLjDbg?*ku{=FqPrdb$b_vYIHfoP%8?{@rN!J9u&ON{e_R_5e8jqo1_PL26 z^f~8V57>S6x%Mv1wcT(_Q46hw$=#f=PEb~9B=aWd?bNnHj?2^=Q1z%Se~Wk2P!Cnf z&olM94GvW`5+r71YlQJiO+lZa z7vs;$;)#000Eb5|8u?nG&_LLO+h7o}(SM@xWKtw{GAs}Fzvi29QK7SuxI3)4A&7e! zaW&C%HaLrsM$yeT67c|9Lr%Aw98}!mMU=wL^h4D7u zOQ64LS`#n$kw=8$a%O;3(nIM59*{;ZNF!B{GYs9Ni(#9hMCK+3xP)B{-=v8Vn`~m_ zCRIFwp3_6qw5ZSvnj9mZurmH}euU;myQOqnAP30+%(Bgqp*cah-+`h|_k( z5)t1RMD1CxiUFWCA{17gA&Mc}KCL$mWP~wv9<3pEI9W^pBjoI?ELL4AhCWzWaVpqa zgw+qg9ACur-V@r_n2Pycm9rgU;STtqZUA@K1FWsnZ27kMwJAy;?B>|Hn}ZF5(>6*8 zE`qa~qYyMMy19dkA-=^Rvp~ieumK9SZy}6)RM8zFRkk0eJGC|^vggXU$$G6objazw z#g}{$)@&fKvdRcK`^Nb6yuR_bpvOw9ZV5Su)bQyTdj=3)=b|ADq>IYy6JG=I$Es?C zoDUklD8%cCuX|A!I^rF9_rx2BKV_Du`y7amLVUf8Q6dWQ_Po2}b;O?yROumS_b8to z2^>g}auH@JB+&AD$44Q-g{r8Kv$MmeA>NMo`WJC0IO1)2z2Y5+zXHm3wz2zch}RHL z?m|{pX5{Mg>})#wwIOQ`Kr5ioJzL&VgYVE^3f02hW(kRs{%VLB}Xr;fwWk}bX6do z7OQskWg;;z*9d`$Il(eUr`hK8z&GgKrTRcj-VGXW__&*uckf+TY62xfF)$HblHYsv zcC}h$Knl9)@0fJu=&kB}0x{A2v1m@+hKCloYZ zyW|$=vD{(*{Pj~x#ko88;bP!xc(RJBs;4(A!AYC>zCZgJ`>&V~_hoY*usTQmSQU2U z#n6#se0r;-$#y3%L?LnHG>_sbRLqtwzw%*0uR7q_a=%aiN39z{ZqaX*qO=3)bd>i$ zhk5_7#yOpvjxJ240GH}Z0lQ|)av`+j6$0~d1xcf@@dl>PohD0i1!ISUbBqM9+yXW? z9qbIJismmwe5%(HjeBBEJ5nwW%QMAe=m8%fpMp)fHq-%8d5$8lJqOLCsgs1Hm%+{?J%e z=*M7!MSiAdS(hrX8z1n!fpipJ-CiN?!n;|Xa@aqK+Xk3$Q>9y+gJFXWV50s&jFm_+ zZ_CE+$l%jRiw>9r04;?TPgNIvqoEU0pE!&`SZ)?B#4Op}j|IQZ((BjjMdt(eT>83< zaV2N_b!UKeu7|hBo@-V^@^5NLA-?0o4dGpQ=P@{J-EHxQGe>GR*o(JMRRjeqJ7I!8 zko{`*Lrk+XbF|sHsc6G1X6IU+cpk8Q0aj1?*h!rTbUK^Re!iLgkzrxA7w!k$2wf#*N)L>m6dF)g99hyX}vl5_?16b+^?zw92+ zo#`-m@!$zs$e)4uGKo4IMDu6yG{qs{GwEYFBY;-4MyU!d?vO8{rx1cWr=$Wg)Zf1B zPOs-Xrf_wG8f?lI#AlUXbxog8xNyRjJWX$j0LBCa#!g z>TPaLf09?&Rxsy?pWzp1>%J`P5(oSdBY4y9^0^oEfi>?$8zX*>P37JfQ2g-ivc`0H z2etB>Dtetb%)(T_J!r&K6wy3g4xFyfQ@8vqk4@Jb;3O8)=sokoO4FaO;C~psskKIo z-u_~1t$Oy0x*k_=yXDHliI>vc>&riQQE&TypCGBfr2qdpL9+LCS6EPk{N*Lx-Monl zZfB_Vw9|6#SCrr;N*KRqx=b|D(VrW0T6Hn;P*?Vtsn_nZdw}y0{rWp0_L193R2jaS zAS!-0$51(o(iB*AV-TibW`*h(5+I%wmwaO;95dE9<+n5SS+^b-WSKi0$aAu4#qtMU z*4qoUWT2ceOCP6>amfp_^jYeqfpXd_dI$B5f%1n}^iowHB;T8@Pis~-&~h@#!mRYK zFxzloMj;jtAdXM7JX>w`Wn_P|Lc?wzo!4YZexP^J-5tapAvL@A_5nO z&YtF!bzjHoYD>ghXUUoSn)H1AZTR5H_wx18>gI+rW)56b4nHVc&e7Yp-SZ&!zWG(C z!$NciN4Tva$B`lig6UG+>!x*^%tL!4oT0Fxd}9v$oPKu8D|7UgkI&;6$80elPXf-} z3!|1=jd%)AGVUoMqIK=mIG~kjh9I2z)`H%-$U`3S_=Z&_Jy9>e4*`~L+DaUM&#m)_;pHaaZ`b%nut@7TvaJM>pRz5dZf6VsB<`p*%5v@IKt3{(5 z&;XjvSow=fWz3ssK!Qs?`=;LZj+M5RC2Qd?f>3zRxE(w)w=0XvDV1 z{$1D594yJET~7|4uRGOW+skL?>kqfR)`8V=M7Vrhg#6GfP809(3AXc8}rjXyTitHOLGK+7wzu$3TK+LE7c?($dzOmX z^^mstwCulF&oI^~+!_M=mUP7kcwa8o)5I|O&0@V_^7GJsNIelgMIJR=HP1OsGx!xL-c^p58LCHr7Eg zN9Wf7bmR(W@h$Tow5K@ubyUdT-qR1s-ll!0&;~Dqna*9{#*A;?Gyb?we8dLolkVP=qp!zfL{o$sV6#UU+qq-14dJPAmek3h(clxyc)1jOXc9*i!H{ue^g9_A?Z8^n_gb zneMh7Sr>L9J@BKRtiBMEwLjO}s+H^Iy`N)Nx;9CU`&@rbEuJJRKS$3VlXbp;8SU`B zvi}!=VxOG&h5j$~)%7ywOMSR@eEov`@<1-EWcgp}daYT>t4dY^ajED{@_-rmp2_u& zzyW~ABi17~ne!B82Mkm;FWgNrbbluJ*k7kn;zMq`?;;c=?r*S*C^+OO7!fMmROy5A zRy~Vpz5=7yAt-8GYR%<6+untCxA$E;j~G#M{89HUu#_{bPY2Q};qd2BMs4L{^=g2& zqkTBDvS3Od5O_q~D?)fNfT1Y33(rGHOIGJZVHaUls0lh@*c~`t)6)EWQ`#dxq$M7o8E&EyP_uPY08HHu7MFP&=;nkeD|%~nmV5uB*Z#ldvt|GN|0SRG z{EK|{+}C;+_4zvT+pqO|@9ehDa<<^&R>avNSE!-%==8K@q9Ybo(y*|Ss=0xZ`i*R% zienT+o#HoaEQyi_ztLOEvES$kEgr1{=F>(E@~k?gvl_e&Pqvn#=`u)a!(iwwTl<%- z1i^2x9#_jO^7QZEMI;cnr)pUXsC;$DeKJ=jlS65(( zhznJU_~Ew2dnmZaf{@6u;5TYsFYjLkM=xw7A}lZ0#Bq{%zJ%)&&`t=9Q`QOG&%(jM>0ym#A`>`cXmM%QDHxI!(mUjlzfD5b7#VF zDovZCVX+Gbg!x1rB_Gq@4l-0QSIDh4405~p*V~YYOvkEjH5{58P;S@aR$JwVE&&ub ze2GtkwInY;_#HNM+@xJi|3Po_KOJEr7ng&E*Z-h5j*H<@@3FCd#REr180_Pu_)$;l zVpO|#?sHVtlXeeCp5E|%iU%>WXk?@@FU@-^4DB)68N5TCc)$7yhVc*d8gY z&S=lBl~;bm?ve3pWyVi+(e_N*t>(dk&$j>4@{Y&#jB}e6*tMt3XP&s84{Kzl2 zkPBC#!UhhxVU^yc?ufz_`z_8j%ujvAgWL;&d2gX?5Y*pmUQX@@<@&VA1#Q-|nsA5X zWI|r#xx2PG?O6-AGO+;4`{Ss5Zm@Q;>{p;C#(6+3i{@Lrnp|qIoLB&Z*iiX#fnIOS zGU)5$k=2Tw~EI1)R0fYtxp=6|41n z_BYDl4<=pRv9q;&HCpvSEzIZ>wy4Q+{UlrTE#weIXn)@773R&MdVL_2?z=K^jXvA{ zD?DEF;@8sBgu^3vS}e4+I!0< z3&Heqv*bH?+R~!ZWK|(911m4owOif?$fU*8s8oi&)z+-83Kq>_7(b)_)Ra+fk%|?d zBM_?6!ZjTM5S)@JQ#b4_i`ME5YP5=K6(EPmS|d-bg-3gD*`NrE>Ahu#BE4(Fe^d}x zijVUHrRm{lFT@^mU8;3dYdN<_Zz1-`f+F2nyVt6cuCR>|B^L8^iRTN#lpniJPYKil z%$Lpafg;0y$1gO3DdB*|Z6-YNfP3qC+QDyNocqz6!J4_{!RW>1C8fXHgJl(3aaonN zx$LSIDm!4`Pi8Mi9{`dgzPNSf1%L=W8Nf%~Hj3=*8V>x)pj5 zUQ5NYTTjt1?l@A#Lexl6EpMwHazRg#RKJ&c!`tdDB&JtYKZ(P@OJbg)mpNK1Ec?7`mHTW0j-+BbZ+~H5^58&)pJ{i_&~ifbu0YCVC1$ zfDzZGF5dah3Pnjn89;pK9L}4@ArRZo)l%X=Pg;EFCoT1ly%s3(1&J3>DUyARj%s=f;RQP8&O&S>$)}5^eDEW~Fmf-8WR`m3#RZ!U0Q!7M^G{h*6PdX>lGaOHae(0jAPSubHZoU!Et z!8xr3Zx>WYNQ@A7j-75K76T|q)A6^#P8^{MrVsFthdPba(9&xW5!UG=v?)M&V_?Mr z2bAgP7Pk6mqGDod2{6SpL?bbGG)3{vLYjYt!;fOc{p~5=*04A{dOC@@7bbOJl? z1A||oF6M{DBu31CasET>V*xFEAbDFo+y;pZ!m;EDsvYc<`kB_p!o*7jtDD5}N-k)~M(L=z$j*6^bA($Y4BI zE)gH_m%@roY#pjxWgcqy9TIRp`y8~%gcq!Y{(VTdH|wO9mU&S-iH4EBs<1U-a1Cmc zsXlr#65BH&ew7m#EDZ{;Mbk4OnsIWP2s)gUX%jDJ!FC*{&au~#yEf@Do!j6Zy-TQv z^$}<`?#(b9v%+vhC}u1;FyZ|K6!VR2vKgKSy)bpot~jRsl66$$Zih&x9|MR-0qRvq zliXyWC{8$KTDTqx7DN9K4QyqpAM7&BFTvi5*{3=1mi-nkqT>O!4LB!FM9O799m8qa zHPJ_?BVcyiNdy=OAkQJSiJT?&V>m!P1-w!5p451=4v8ZI0H-9j_bMt|tSv^HE&jzJ zwiTi{JCtdOYv%nD{Ubm(=Gtzw;{O!=p(?6ReBTjfE-=d_l~3&g8}ltf1ZRN{(6UIY zzOPzj&I0dO3;SB;ZP61R`~p#A{7b4u{TpGk5tdUe>{Tp7F9q&N8!6#Fxs3L(zX&Ud zs{#7#9(j0+{zTvkymGE$<X7F!*ort<+BXG8Cu1tRb6v8v<<2!)s? z_8B%GVeC8D5L=Y%hDvB#qh*H?y z1v%O@zF9!e2Za>6!mfxDG21O!UDO&2Z6Fny80LDS0S4b+u_G zpyRdoB4J^d3jZ~^GFBfK@F4yu(&kj0)M{g}b4(K855TYLp)rv$SrdJxpzp4L>>h;= zWrPf`;rmm3a}Fk1GJYApNrqQIlKV+jVM>hjWs@)!9>{VA%)zq^q{TtcLYH9RGob=b z#l}oV;_8GJT4@m%fvnD830xE~sDp?;%!)8q+kBP*zL#$^l(*YZO9bYskaw}kaPGRn zaRvcWCT8UwfR#wg*)=s$cr8%&Xw0`3a7WD%|B!66Lr-YKhK@v|DLZhCQ>LK&@F9>u z*o~Dp$q(}2Mm;G|>sl39f+9Xx1)V%xb1Z6($UfDZ9lp+ye9f?7puDM`D7Wo|zH%o%;msOLs-|Y?wR_WB0^>@P zVC-FpW0(h&eh=c!@HXMJH9h67so5ICauua7)d>8BegPB5iduP_%+>b4Kn6PkoJ)dR ztI2R!y_I7+M;UU$F1>YZ5aW-v71j)vW7X9zzuX0%igoz1L9?ZAe?<1Dy5ZRhgJd`e3hIt2ma|khwu%m!yQ<`|tY7Ryvi90}sCkn{jWGc=*+gW& zgKsvVA+>QB1R7Xcd421pnnmDl5IQXa!(%OrfO*Kl%-wpT_7#pulHD#i64mE3%6?CCd=b zuvEMg3IS;T$aKk#d-Pgr+YDaZ=(bK7cwW^5G*t6|dY+xYBM6HUZF6`8A-F}54w8-G zS?mn1NuVDgX-gfMTFM!Nm2ThZ<9VZDYDG*>=nys@f;y=3(zg=KR@1p*U%ufhx;4YNBA4sNBR}VD8F#b!A?i^C+!iGL!rF)6qT5d4ndQj z#e!kcrG|bM8v1N7*0ZLDt{jTJh74g3!=RW74SnGs{z@nv6+%?}B)9C->(*W-lAv&F zXs}HqhN)zKcR*Gmu72EImSRnIRR)dlUgV2LSQ!9WCfo0aQu6N~t*K%}<{5h`vY){q zVVffZU5d;)M6Y3_k$oc;ZCQxrCeTVsO~< zt#D|+jQ1<}U5#?1uHM;|0efC{m3>f_~9dcBSJM1_xzSMDH z+!4pcaYr4O#vOCC0wP%`Z82(2vUVKpXbPZ)qe3N%GB)t#tD||cPou1Ily$~2Ec>h@ zFZ)l&wCr=(MssqDqMX{o{2U#_v)i502EJwuA9@rsFlNPHW$Gj7eW4ujb9O%h=f&P~ z&To1`|2IHSH>>@(AdE!Ce&~K7qW0fl6s2LcpTt0CdlCbyJoS@2`ua9|FI981wmLv{Q9yb8JoQ^U0qz$W6E$>FrHnx#?83ayR_6Vm}eR z4Qoj&!c2wrlJ;0P6=UvbVw&;x#Q{)wslN!+y!3B?xCP;KY9|1!crE~X697eki2yPJ zfDi~aGSa|h0HhRn{2YMLZH93d!+=tT<049FW&j(TVS%xPH@E4lZ~&(wGjTWnyY}w> z1@<2Ph4!BQMfP6)Tt{zz9`;*XSOmV}e+BPnOKL&Ai$Wz7r6Tzi^+Q$xML3rfS*qvAVn@QG6J33l&fgb{zuiG6`6V#P!jl!`H#HMGMRC#35c z=%AzDuh>sDG721)0{a%aH^V!z649xt6<3$_$0MFP!RRof=k9BilO%>Cfc~YNAR|uvD=m$))_YJ8 z%y4L@i||hG{ik`1r<`_3Pwr^L>Z>MFA(N@|I~9xpk;P)ahPt;?K_=es;QN_Q1+(x@ z@js8xgPjWQmc@tkdg;_28Z*huoeG8_>Tzlk&=%kVveT6B>jYzG$+*LMLZCUKDMt#3 z&Sn%0>yv;tI^-INb54a(88V2NsmydK!EL`_@-la{c%L=Z0;&+HNJnw}5VwC_wgqb= zeQi`p6@lskE-SO|1RAK!z{0w%ns-klaSvoI^{Y+=Jy3}HeW!vhe16=ipa`&1_Sb;e zEe^TmFud}1Rmf9^^}4YbuDmLwn7YPSNN4UN)wr^0Dn0d=;q4=8erWUWh(C&NmSsa( zd!@HjPpHq=AGt1!B)gDgf3+mnx2im@WchDp`hq|aF-sLkl&$Ya18fLr8u17gI%Ur zfo5=<*(<8pGL+##e;tIc2OB245lCNHyYN}zJ*@FcOP6<#)-7|lc6=rWp(=Vo?k((v z1a$nNn92+kd|N-m#coY>f7ud5p>ABa#+O2Vr_fJe@D?g}!TH4W`iqIK;aOu}gH6+` zcG4L`0QAC`!q-$NI7pgDo%2>q+DOx)H>ze$^eUmcwR{-%nc}$<&Det{Yxo%@vm8?l zMyDe)_P#f1=_ZuJDNf2b>N9O^RklP5yyJ|K%3tFZdFM~bVVmHE(O+e zQO7pla}2ToaL3g$HEifPgIzt$PwBFH{_1K2+GrP#qi<#G5XWMU+FmBQ?eQ zPfcJuh4ALcSB~j<@HCJy$Mp_1mv^xgUQ*g*`QUMQ!M@Q&{&F1bldH4ze66eKEH;VZh;sS>s=5XI1Af7}JSW~U3Nqmh#Ts(!E_g}_yFTVM$ znuiN*Oi}VHzyedgrOg&5Q15PSL*jU(Lz638*Ku{txM?icuL2wU{sLvv(8!CEdM*&JCl*FIj7%9 zxwI89On?AW5#A`ek^g{MZd!;ICg#ODYp9Xd$bYt~3P<_;bXNDMp>Je^KXF!#Prm%8 zKBDg9fa<_Cp5_FOD+P-qPXXm}Vpp8! zad-N-Z_^*r4?Fn+i z1zl5*SIB2Cz(4X}gmr zT^vRNb2aNENncID6c$Ekb%q+W2mdmeq0Corp3IP@#Yog=PG(>MB(il+T9esfz`PUR z9QMDXj_Q*c@`>uRS+gf1wf$Dpn$B4ASd?r|XP72g!%~#Y*6o8`!<^30x|^pn+w?^x z%=;glDtgtHiXNT4O;kftpIfO`BvyVqY}AL?IiALd_U&-53#RDsY7xi6jlamO`h+>F){>^fb-G6L5jF?2Y_q)*p z?AwATXz<)%(zhjA;9|t}ligU9Cthw8`<Bz!9pst}^Yj0_)k1TwmHc|e! zLcg;HUQ9P;wMt&8(1+gXLMAdwX8f&^kh^?NWN-l*2(!gJ!!2Tca$a)@6=~U1Ht?*` za?Uj3!|+T6lRfNC|?MuC&EZp zFDr7TFz!aF(xDm+w2oF1+(#Wy9rA9~aBIV@pv2|5s(~}%_bba^(~SNio-=Siw1TrC z9bPxrk*Dkcu+uf^a2RcCMrO&}<)23x1BJT2jZ_U|m&4o^Qz?HljJoR3FXR=& zXsRyCTb@zF7%J3^HgeW2#>1-T3;Fvk#?ZR=d|{cN-6)YZy}=O5Y4Z<#E}x7xp8QMO zxK39q`Fh(*qpP)TeeM6NZQoV=?``|4wwzkixRNxq5+^3#)V3!oS1x}&#<*XoajlpC zcB@fCZA|ic8}N~QNH!a4Tja%XDlH6ul|H@Oz7VyRr^g!&tqzI}cMu!#bZz-?g3+}G z(VdDsO=`)73C4$}GDHVHmQj(od|)DaciwP;EjINGXWU9yk=DQ;;wKPUAUWeFux?Cai7HCcLSR?08miTG7MArB*8p2JY5VrF_#Jk8T)RrPI%YnQ$Wl8v* zX^Sy8M$&FU+E>GItL>`;QzSmfx{o2@OGwB9wrX4ba}d4`m1^NGA;X<*rWN8C)?#q; zx|x=|ZwA666?_{8=4*tptc}`6Ke0Oz-z0(Ta3d%{6^QWTW1^=^(?!2URT{G5Hq($c#b^fQh{ddD;34@MolTYzCIW zY*EV0(Xs$}44#?02FM&Gy$MEA&1bGts)(OtQfdGbR`GgN_6Ug;gKsU~UkelNV8q#= z7@UJ)gQFw}ntStLnAw6*GQ)&Y3M>|)roYmor96+XMvvKOoHwjIXZub1g~=7IC5on^ zQhFVO2J^rAAB9=O{baSm{=vdfF^M}|SaZ;j>6Wns=w?}e1;{Gnfof&+Qvx>%8ER!{ z4n&&19}3WE%RG)nKQ7+gSfT8O$&!5xTauqd`x5Ekbeiy@3-aKU`X(wLiuY-F4`8I4 zhdKeX|7Nl4%;$_7|dlc*wObg@N z6xpw?;S4;CS;Q2}&_VUWB~s}iS(g$o=zu}t8cMVh|G{vnTqa}bpN5|-HKc|7d-2Wp zz1m(sNxC<_*h)zg|`^(L?hZC7;npaEX2cBB; zMlq*c(!i*4d;w#Vr&4~{&}f-xzE4%eV!B9}>esj^s91G)1QO29<$JQu!?Q=iK*<|xGj#_mbiNFh(YPiX-oN@?up+QvRyb2BPu zRY_GKw@3V56kL_tGalzaM)87!o#uD1_^RCQ@iy=FD60M25vF;!MG5a-6wdj!6ACXz zB|qc01HYa4?Z$5pe!t-NYm@`PbDK8L&+d8^Q~LKdoT6lL5-uXK5$W8zuXq}1j^9?4 zY(piD5fRzmUuUUBJT~cFV@q%`isPme8biXSCydQ^!@^Z2=cgH|E)0cy?suAtC(jRs z?o~Vw?<(H8Q(?Z`k7TivuuX5`h*W{wKbw1$+;TZ!J$B`F8=^<2PmF@{1gx=?$#^H# zvy^}0o!De4&&i4DM!N9^Fp?jodgsVR=|&sZgj=n;pT;wi>sh=0H)?dRps`Mw|nzhM(yR@GmN=v!(6OBjnGDzCIh}_Lfq(&db8N`byRu0u~BEx zEns02u?I9msODLYI7QJXT-Jg}E)LgzNqece;(1YPiFEuH`ip*sQM{vQ$D2MTL(Dt= z5m#+pzBFaAe5#32f8Zua$m!TX@zKT`2aXg)DM9d&;yw_G7+)CO4X$NER5r1+MA9n5 z-@`Y35t`!wxL?DkUoZI3JOl)5Hr-F-sJzA9?3&JD(qY+({c~IYnwXBJnnHl$$UVfE(C8fL z?8YdD;XK_uQ^Up|&l(%E^BlHKpjizAh6l5>*=OzrmUm@n^`&RL>gZ6ZasxBLG$mkY zvx-xMxZFl5meTHrv1mHFtcpu$-t`Vhmh-I}pY}I1>fx%=%Y3YtwVE6MXtRUPxt@)< z3%bA&jNvvdY}dk`*I=m=Beu{{m3w!*lJg-LTYbl#Qm{U`pt;dGW4dYdA=P2A0-1qg zj~7qOg4mhEHKHMx7M94xLQ7V+M#;J@j0S-%7!TMvhRM!zfNdcB18`3f;U?SaLb&0# zR*o^V+uTnvXexa>$%%dhhs{X9EO8C~;e6tp*HSPN$ZjTEWE#%eWS9Dtl_ep>@$8X( zcrp$guZ8&c^Q%gJiUeRllwMwGcFxujkJ;T=P1rKxANCCIy>@nzJkY|ZqoE^f$!jf) z)cc7Sw>L=UYIYbqi)@OW#frHbZ``jdOqTTxrayMbI&EEZHqwoFHgaVQ8)yrC#)D_& z^p-~B8m}Td`}zG#^4pe1kL2d~-hl5%Kjc{D<{Ku^9k;Py9ih(d(-lD<|(t3iq8UU2~eRB_O0I0|dpr&%U?2Sj&Eg(WUP_vk3D`js_59Rk>PUIxk@}Z(3_3QN2<_c4=*FQFkqqHQE?6 zM4_D1#&}7*x4ceUqqk^Y4wqd1udYKD8ThYICGe{%^Sl54m5Ww!YfK1)M}Frs@S>IqKNINBvP2- z{lz@2ItNVS*hY)5Fo4kG+gBp`F(q%-V-lixIB++BxjySwUuL(5)o>D1`-{^X2Cg% zVV{WH;vG|WWDv4(tZI%D+!roB0{!?AH=xgw?K>FF0*@i73o`K&*d4i})XM%4!dUd^ zaM3SASYxpdgwv)l?Qd4vd&6OM5ylZa5~ugfVtkPm-vjYM+*QT=E?<$oE551J;S$*V zr&ajkvhv56p{tdlL%6^MhJCvuBNe=aQXl4zACO(YB zQQ`1Y3RSia2Ye$%CDYB=NPp z+zECWL%KQ}jf|^eF|H(2LJM)xm{G66v!Jyd!-Wv=2(ly#Px(K?QpAYua#m-fX^W*{ zFke8u95tK)vPTH<u-lA;07>bwL4EA z4eW4+n>7O2a8f`QqhqZ{@w$UVgslr>O_t;-&yiEQfF%=E&8La`6bjD`3FOLRn7#!dX()r9UM!ON~j&H z2REeAiegnKG(HCJO@^lq7yU5_?*kAvVBZE$84pyoKCozZLKv+|gQws~VFCz7@KPnL z-QngnK%pEcyHOY?_5sK<7^h0C`q@hBM_P9{ZGxO~yOCtCN%-VvW3;61j&SZhSuW8O z_C*ri2p|$ZI(C9w2}d$FAwnF7)Rk*+_(Exy5N_5g&)jY#1f1Kg${wm#*$E_R8m{;# z-qXYHrFgF$e*XjS4Z`mS@a_t~@2~d$3*KYG;k)rJmRPjD1Mg%@kkyPBurI??>R6)APtOmroiwQ0V5(*P@0RK zgz`|%Qvg3017|$tm#sqib*sqnIF17ysUWbsfHEGkpeQ+mrf?d`2;*)bG8y6b2k?F; z#vv)Q6y#5;WWjwAPHBzrcjLV=(sL+G`oOZYbiXE7cQx9ILGpZ8qgf9s7<;Zk0)^m4 zp>1{z3qC3uZe+(S(DD?XH>Qzw_7r@R^f*mT(B;s(jQ;BQ5%NEG8E))6+$R^#4dhr)nYFDeH_g7Ty8aF~d+|3xx6B)W0sr4scTeP!vxQ*W+ny_1R zAwoF5rMpqD?$NCg{B)eM0_dR)%8g`$-jf}=<0_(`m&nJu8!fY9CR3+&2o6kcqH#cg zrQX#Eq{8qLKtfG1qB;Js#0sZWjV!>fR-CWpAUYT;IcZTyI;m?UZkk5_(EhVz+c} z#dU*;Mhyr48>?yMhHzSxhO=xTe5ESR9dRD7mIjYV8q1)1Ln-W=NIpkIplBs|`ON>5U^XVxw0Kj>G%I_EOZsh|rw6~kWLlf)c zmU)Nl)65TeL{T(A^U=@KiJ8g*O&v8Iww}D3!Gh5t43-k=VDIo3 zuvmT5YSp*5x1VweU{weAnVQc`aUsoVdd(87ER}f5`PpvT%y75BYAh+FgCqiMF)DDPHx*X}Q^|HqGzQUhqe0d6{_6v}v>bu>chQMmXv}vaRWP45!GpCXbxc z+em7nqS)~0%GYQfh0e>xJG$o)_|uEQdBTRU1Q|T2#>i7WjsK|MEtc=~GTzYVBAz58 zA1_ujEX!PPW10Rqvw^Zm15WJE(m_NHnbF5+AQs8qeT?zye;3Jb`xqJOnThhZKE^bx zvoF{mc1*v%api?YEXCz3gQmGndh6I5$@Oe+DfDER(*y#{&UUPFUrU`juuUTg@pamylFXa31x zKV!13-GUW!tM=BB1MW9;?dOHoh-GhSGWUMN|JV?xb<)Ejuud+madsIT4&&Yex;!v! zM)oNTT}9lFIEJUfWsBiF`JqoZ>==X^hD|_NPaJUC-^hsXj5yNHp>SM#r+lryF;zXe zK*l^^^k|Sa!77g|E*UJ2^q2};s|V`v{R`!|2aE>s=Nnq4#rBZ!Y>1!#u3Y#4)_Xg# zInG;Q4cfOJt1ky0z^d;z3*@N>V96c&u6%9)mWy*2$khXkrtTHvt!WT`7iJyYTz6!_ ziv8<>G1hq)@H_}nZv?`~K7sJ!BZwo*{Dol{AFD8Uo5}14jg+Vn^+!w@UTe5~{XwHe z%xKUdR@-)RI`piR8y+;KsZ-vS{RbKkC(U)tHKl+c41I5aCWZ3GRD{VN2O3RquKKmX zMv|-;Xw;Rj^)qTn=O803FalCISE_74<%Zfy{+sCUAf(Ts>1fH?E4W=b*#MKxEyD#G0lDyDBdI<<=ME1C zt%8|{YAQN;F*1xdleAXHN6FUDs)_%Iq#K7TR~(8mstT4KUOdIL)vaT6o5+WSjMhHW zun!(jndA{WFsG#Y?C3q5k_DSvCPW&nRp4~2h7W44Skp8JTrqJ?4U6MnCgYUY!XZZN z6I^uQ9KmDW#=^)$gB1S9(`kveB*ex$5l)vy8o2gh@X?0Pfp|xKYv6z%ZOKzT^CN=4fASw&2#ste-0MO{=&|Rbbl+|G!3Y&2b9<(O}GQ8c)D3~D4 zWaof($a?X##BfJgs`oX7vvmbpDdNa36kuENjzw6Wndg8RjxZ0?zN$e;G#OXqluB;$ zb-`VRE;EV;OKI@jT%)a_v>L!*fV2#M2IXV$3eW)E$T5Y$)AumMBWoe_+oV+`mteG1 zATMw{6SxBG=v@rM5!VyKMcQo;dH4 zFalWAQE)FVQ0h}$Sc~x-M}a3aCwHW39#fIP`9oNj$?$R3bq`a-V1O+Fgkm!y^ zEAbiLGOG;ur78y2p6Vcve$4L@hj6u!b>|}UHbZZmfW}GPfIT=28`aH&B0ZUhwM5{D z-o#L5-@G5>3+7W?K$ecg8Ms6M66}tnC=3^Dn7}%(qE(u+`Ej!gmH2hAS+h^rgO@Si zhI>$+z9{OXbIx-C0@6hbK*58kNEdFK`*}AqB_r$rz3%$hL^pWgW8)777ncG&=-OZ5 z+_HZL+_piLHg0YJXz1M#nFZ7EA_b-^Mmi4C-6@v)lsvFT(*j)9<}s}#d=7ww;nqfE zV(`nPZif`m`Cyz~=P64Qx8%%@%7MRs8Zb&en5=YbPnfBKHGzG6WHNgKXI7$?QZ2*R z1nhy|-=o3tm5WHDCW`K0lO%TtehTFK{%26-yl z8NUJJntCk@I48SLG#aJ3VDsg&cXlOuJXKLRT*d{2xXOh-*y%3|rC2ULVN0Y_Nbe2k z>uOiH<+p+1O#$7s!jqS7-b|uNo!RPC?o680wzuW1Vec+rn4V<#%8OXxD9c-pF7LJ5H=@7d?q0 z4R{U>C<3Cl0tXv0;z$^f)a4i7Ick~SG>xA&q61@q*I5vH7oZ!+lKut5wYjH{`}Y76 zSypupRC=8Q33Cj8ok$i44ut8GJ4iAxHlX$3wvZc~=9OAzxBJLoI0uf!X`~p@#bRPw zK;+`15|zJ$MC34TbP2cxXryWfSrf!4UxGNW>o52N5xiWrXd7`@6_ynUY#L?&{P|3w z1>H)#kNiLIe-A9oW(T-kTD<1ed@h&fyMfyqe0Kvz@;*q|o4_ww_)}CN&=O9|kE1`v zf@m<=#;--IR#!U3>Zvwi_dDQ92-CP98j>HUDf#W@@b?G!UW;!>_0=xJwjJJMDr1}x}l ze2V}f*?7j&LUj#90jV5Y5&_G{^EM;{=d=CR`Q(yh6^RkO+c^P;N)KFFHOO@ zdmuCgqLuS1Bv3GcU}l;22@JWk`jz2mWk`DQEpBQZTXoc&f7QQ8P;Vh0`=1gN%PPQ- zWn|eAs7fkNWs!jf^Q(< zb68MBVl(Y=Fczj6K*oc)`$PS{%YY5oDZkrLI#IZ7J$s~jsszloeJ{`xe;lMJS^* z4GLla+)vR(XehjEJt$DQgvEzAbcmwP!830YD4XwnDim=pR_z-btNK2vgXQy#9&uYh zO!tF8Kf-re=H1$+sj7G!=_sSC-g~vpQ~TN1gC01<9jJKROUle{N-i$cf;a_w>xB9O z<}w1ttJ|BaDbPn1PdCktR+Xk%^8$wN6e0Dc`jp??)*j5y&7o@&JTcvZ<3|NT)vBJ%Hs z)-9-DoIE%W*VlNxurSt=@-?qx+aR303-_}_^W?yx#&GqWWckBTEJhT}lfMl$nzpSR$mjRtDWOR{OE(K4RbRFr~c#GqGf zU<)nZlA|-RzOekbEXp+AP~Vxi{IM*fsZbx7Cua;dTB^@ql-~|V0k_YSyM`NG)BpPx z%{>m^IrKyZ3TQMBmyg2j4LuTiNPEbig$6Cb#oa&GS$41 z25#^fWz-1pAZ8~AK&S>pR7l}FYXK1%8tUuxQqtNPJmR8XgGIf*Ab)-B!f%yEF_){LcQ<^))pILE{t^wU!-NM zjHFS9477x>P>v(Egq1*3$DzyC{K(2(f0KqbbpXeBHD}Gon0{CM!u-b#C!4n84m1WDz`gEIYb%Qj{`uJm%)m38SZ{yKc%2+ zY8nD^w(y?PC~Idx){eCP6sEa|o))|=5C(9m3Dj(>Ft&gy1vWLIP@dz+LQjH^Q5rl6 z3@MkC5N>|;DXp>Wlykz4_dLFzgIjzoxJgaqXFn^kWqw-sIPC>-iaJ$)(o;6Nx)tc} zE;Q@}8kP`%1Hfq%?7+Mh^SUwp&|w4`{5B6et71 z6DdQ`W8Cz@;#7`i6!#s9;{re=+c(TMrHL2^L^c6@%u7TsMH+^u+!VeL;V~$S?Z6ad z4mu5KIKpe86L4x&*XyH>5wQs5=si&C6fb9MQM0n`Q9*p4g^vmw&cTukc5{LM!qoyk}TY~^d=0PN7HV+mZ-nFao4}zNfS=MwqGXgy#2IErF~ENR)zuJs zaF^ZBZa(?-d-Z?+_kaI)pY`ejrsf=;-I78cB`{<(bXSDi^6Mp^l2Y{onVUHI6EZxM z1`i2nFUQ%HnhRvulHj_6Eym57_BI|34(i7OOJ}Uv&2$$WEcDp>0~_eym^A9;bcl8{ znMO0Yi_*w+@q4Pp*(m!Nlj#SNKWfK1ZAvw322y9G(SH7DXkNXPG&fpHJDlLxmudriV=A?&&$)xwaZc9a;75>iFJnk7{v zso!RChd!# z$wYpdE}uUTzv5>f_RO~U)0%_7m8Ori3*i~XbkUT->@^TGtv!Wi_cJA{=Km7 z&Kql9;Cd^?x@N4|t;2(X>y1N<#|JUW+`({08oh%l*3)CnP9aIeiS*9eLw;K3u6*e! z&Hk{))aNR`>>l{u_#T>M%r~gQNLLmOOE~mI7n5eM5{nkH7T8SiM~R|zn7X#lz6Wq0 zqvIv42OYq#-Kd^OCT6^LT6V}x`#{s!e&h0u3|&s)rxlDh_}U>0hjpR@-(EG_ z_2JMalPDHXC?ok4SPAlTf;^S@(YTYPnO5C%dg^(xtrAhX9v6`!_5;sXeEA70)+yFU zA@euxnx8AJVe7$oNE)lT$c}AEoOzhT<0a!=?Y%hUit0k>HQnsu&iCY7H;p$hG4jvMe}VUZ zeEJG&{&@4gYbSCnTn>)-TfR1m>?7j4j4$r;H{0J(V;m|MICr1SWl3ygB?J@_{xB_xg7W4tsYuClEnuHBqXxg%UCLVRm> zB7^R>pfg2lf`osx$*|31Um4HKGcVYbhn)*TbghX$-~zrL_GH@ea|42_!uexo^1Yrn@K zO0-7_JwL!jtQop&hBdI_SRyAW;*Zlbg|Nk&F*d#wr(gkJEfAm5jt5dC&U{UMTVZ^PY~8LYsrJIcD;0} z{2r>jq}thBTP3@p;;dr3g;*=!kYZ7d5ODSNrg49ucy6L`>d&c^~6Mygw=r7 z<@iKdnTNM9Z<0KYnQvV_)x5}R8fSW}+^OcT2aRtRY4(5XV^K4h=;wn%dj7eIlLCdj zUiD03DFeR9vDa9Q*PfdMpKPc{j=pMW1*wVjFC-Fg*-_MrCt6=mH3tq&@py9DdW@X2 zJno!xJg%IxJzR7}pvj;;=m0u;f)r#aye!R?@CEYCJMCm9bhHp{wET0eho_mHZ+jRq zk0Ry~#60HFazy_z;0f>~;P|kAw8vKRXjI7wF(r>gmHZgdqG+n4=+KGQ&S_?+KG%_6 zs9?V$Anu0a-ru1wvvZti(*VefX0&H?Aq`{u0MBeW=oaUDp}kPvYU>JaZ4Ql$-C!R5 z+7P?{$jJ?+NjOh&_m4Ds3liD>qS2dqukh*m2mh>WRB~-3jb`VQ8cokXV~u7N{>O3w z#DCRj_WUv0Xv!sguZRvbnsdc$`O0`rOlI8g*t!||8}>6Fl(pRx^!fI4qF# z*bS!WO?#Qjk6H6B5twazsEak0R^!`7x^?vo^U~CfQd(Jb<+Q?|e?_ZvOxsv^B{yYS z)n$ppQht@2VJGNDPn5cmgEH2z*@<1+Mq3>^YzDZ+FBDsMUG3>@SK^yynrW_qldMN) znmxPL;{VA5F7{JL(g3<^TsgI#jpu0vE_>~1qi5@Z5!NR&&5Mk@H)yZQrzd9GGB!?6 zJlA#3B&%PE+4EcpQxfO1+;shVrbg##zAaMae{YQ|;rM!;^>&GQIoEi7S;7$%yWwO} zE1iHS>X@>)mO9Vk zot2HB%X7{KYQlP}?JRTLO`BniPPozwGKkR`6zS_|gjrYIa_<3dAI5_$UC2aW3Ib$8 zZ{%cqHjpny&P|@K*bM|4MCkJaH(8Cd%qx1_gkyGYj*(4#gt;q8E6kEaI^{q{V0RL* z6gY0KRUur*lqGn5iZo3~zWCb)@Z)K3< zKb91`M82b4Z|_BUXK)i86{cIArYHL7wK`ko)n;)e3sm${k61aDdAauyyVs#N@;_qD zv$#Jb&-%_XA8VHf+d1}#f&41SOOSr{u`=_DSUqcRMaoRWvyB`(#Bi2RP)LzpGj4UG z6z9%gRyOX$l-^RysiP>9S^j!@<~-}t+2%v8z2mH%v)S`sJlkxO-W!c&6HbN$_0-zS zFeSs=IT^m)X1>_9A?m4ZJv!TxV-n|>7kWMm{g<%^EG?_IY-@n?ve?Jf4Oleks2?JC(7nC5f`P)61YK}p~Wp8hLR3F zBn~qjhpbi(LVw($4~nDz!J(@-I!mo~0R7_Vw>$JcarEp2-LyUQ#KC@{oQrUitl|Y` zpX+)MnTzx^`;^8tE2s}Lv11|Y2Ns^>OrBKkZj|r*BQAZ?-=z^rFWAQs))@Vw71q%O z=9R9`hFVv?&A!EZL#^Az{Ewm5lVZL$)Or!~;+KX-Pn0CF^3shh?TK#f&l`qF0$;Oh zim`K-#(e-%!FwFDcD`+L*f? zDFo!DsC=Kh+*Vn0)D^djZIw3{-h$gy@^Wi-xH-UlTMi9sIaS72%sq?Py7RA!HU-}i z6g^nY+&d(y`+~dhCrwtF{hU^?o#z8G%#?3MYB_5Y#@`<83KXAb*lMSjq0hxb6|ze1CO zI?sOIUbvg*u|{a@u2-ZqGVqn7jdiP|G5CTQNTVcfYnXd>B4*Log(#_{Km(0JkWANd z))|*CB>)n*8b4v&=Hn-Tyej+=IpaV65tX?T_qEVOuJCw}IU$6Hd}8=V2rjg9GMFO6P z+iUodY8O?k+E`Ry(HIiVi=EgCLp2&UB6P8n{J{oEzVw2jB-h5=-G=Pq+#zL!@*(Br zYUrDCzc=)pZ&?uI5Cd-Ck|kOWO(^b{;x6UI&A3ne77b7Zu>s5^E0cmzZu1?z664WVZYFeLOz8V|8OSiNesp zNN2r7!o2ZI!XnWcT;cjU!jOJz7<^)1LwNUK-y7It)CKl4ScxWBuk$PURvPtC8x_3C zTC~XQ+-*PY&lQkG)Xj(zXM>=+ntOsGauniTKHoa9$m|dj^{4RVmZBpW`j_w&5rs6; zSE5tAQX1;0o0c;!A!GZAhC64t7+Oa{6i+9T5|Kvo;@2xqD6MG^j2(a?k_Z>s^iVW*)1A_{kVY+5x}@-k zU^mRM5+;r8-~JXWVS9q@;EAQs3`fRGwhUG=DWM)mhJg$@@gk*EWPFW`Z%6?x#1{^` zqmh)tSU*|D5}%wFmJ%5tmOfU=W_;8tE40aD<+6Yn{|G+WDXCIu2TV2)u+Nt1Zx*_o zaF9UP5U4bnDNcCvsD`0*G?QX0%^))@Mr}B@8nCxW!UQ{D_P)ImX4XN*^TQeI7iFq9@CZMl|hhX+yRZ z?zi2eDJ>n7$m#c{?YD3|Zfi(TDd0yc7%5?wly+6x_K=;@iO~l;8{1%)p|^ObKNPh~ z_HK3T>#%=;eG6T+@FX(?t9JOm+gM@Sh5xUPy$1U~u}cdn{I5?Up&a}C=~=5eW3JUa zWgpK~^E-{tz$B{R#a*_7IP1YG?p_G}#noNnAotj@1*1|8}Rgk&Fn}e;&rDmt} zZ{J#NcUt#smDJf$*oNHd#~4wXNogW~U1S|z$}nd@t<~v0HeO_KD)OXK1dt;&yWPqP zyU&Q#WH&iF5%=%tpW@rjAbutzUuv=(HV`>FZ~uJ3kn7}19Id>8?vgXRgB!^imX+E% zJuycg@c2Y`>*M##ZWl=>CJDj!pX{+EH`L@jk{9qCg; z)A_+siT9U$o1$UGFH=e z7Cs4|WZYvs8~Fc0wj8y+!9PHkYsbH9q0UN#0=B?#v(!B)!qSJz=ph}??;`xOMDf^I zD9iABC6P$`#U2^rM0v~M6F&{PIv*Dgx%5&9 z(Mlg3#6>$?kIvdP13S0f+Dw%}npQQo!j2KwlOaIr%Ihc>sELl7R%zjSp(nGvFS++A z?qzf!!Fl30GIk|?s{(0y9?u*nr?aLm$}({9gXq}kdjgZTwaE^c&%w_Yk88#~kf z0X4)0KGlikS#h$)I`kx)PX5Ox-Xk3cSDXhbk*!pVc5-PR=FtBUN59#jAB>|9a_DNT;`2ZxrweB7^y zn6}vJY^`)J>}y!^SCFN%bv+wwCp+ZXEeR zaCGaQarB5=(8zk^a6-ETSz;|(VqLb%9DcJ5Jq$W2d50G{_G^7P#`(KMWa?2LpRr=6 zoflP#3w6Ffp%v4G|5-247^$);IWDS#@XGF+RwBD@X(ttlKu|BUb*Y*^Z<%E)a>uC_G$JAbW^Xf*n}Jg8gFf=H19m`n>V80t7DUw(}aI8kJA|JC2&w>o>v1hRhO`cORW4QZ0O-26VRXf0b&;nAw z@v1%`3;4kp5C+pgIan6rrwXWlXsUlJWMc0Fvcx=>gRZH%TIfLW91kCEFq@SLDVkKt+5Px9w>4kep|c$lKqZfa`^A) z#VBy_EVo+N3VoB^str)dZmZoC??s`8m2R~d1VNYg-9*`~s+PG`>3eQfx5};he|Q4< ze~Q|BV3xpF>bMPY%%-JL_b_JZa<`IaAEwA?!cV#5U!D)HaI3%ycSzk$Ktf2u)KH?@ z@gTIw2T`{^1f~$y1#Dj8R((M8VgdoeC&nlJ!`5Df)=Kmk7pgK`i7O+X9d4ixZ%FenER&;)$Q z@QuSA_<_b>lPHBf0zw);Y62*R2y;1zfF__p4;Nuh#Ix8zIf#HJpyB2N3iv@eGL-Ow zFenER&;+ze@B#(=APmYU86g$H(gd{0Fn};Z{9=M~AR7B5P_at@m`zh~15G?@m_DF@ zA860Q3$&@wfdYOI2IU|Ent(P9_h}EqfW;5Opd2&-Z90xX0Y30?j(HR_ z`2T{d{?~%7sWZISh5C9_Cg=~i-dSmw3ouuJGSJ$djN1-q)u0snZm=0dzyc5izeI1r zEr#%KJgOW-K&Y0V2Jj)x2 zWDIj?4UV}3_hMIqntR#^{ZE+xZ+M&aR&w|p&v9)Rw{pnRqVz6~c{(xD;H^F8IWAB9 zEQ@;M(bJ zaxkqSL4_LJR?yJ!K6)6EytpNcKa082DBV=4G@YP2GyFhGprk)_uhXF4pAa(MVb=& z>gT-=g$|`8sy6KtRS(ddnyAKLP6Nw8Mu$XoH<$(%fFj&$J0_|u%xa$7VDAA+zz*bO zU!A1-Iw-=d2F5i>sy}GypQHl*BsC30Kr_hlCxui}PLj$1T_E%=NK!+LA#^BuS+%MPU#I<@h4FNt;5!{?@SOsoe<3EpcslU!B-ueJSO8K%FR)uTRhD6@Rc=$2g4Mkm7Ha&fj5LqHS7kloNPA#&QvM+Fh6Cg2H*!{c&@_if%!OQ7Uo>c zgJK66zyOP%G*vk`1nNK-j0Cp<1&ZOh{<>?GJJFS(b6DJHns2HuubOKA8>Xs$(^Sh| zGgT_~%|Lq#;kZ}fo)3K?_SWbd_uSg;bnIscQ!&D;=Ud$@?*qEWdfM`yXT4>4(~`Mx ztMy+8>xY-U=gE?QVEGZtyDPyk_IQ=mW}dhI@`3Zc3-va)Geb)~en(Z4pP`nHs^xNC6AA${C zhTd{tuYSJV(Y{{!xX8~X@+0)#zd*n27wDItiXIyH3jshsWdP7m836QC1^~U!sRD+6 z1$6Z> (bit_offset)) & ((1 << (field_len)) - 1)) + +#define SW_REG_SET_BY_FIELD_U32(reg_value, field_value, bit_offset, field_len)\ + do { \ + (reg_value) = \ + (((reg_value) & SW_FIELD_MASK_NOT_U32((bit_offset),(field_len))) \ + | (((field_value) & SW_BIT_MASK_U32(field_len)) << (bit_offset)));\ + } while (0) + +#define SW_FIELD_GET_BY_REG_U32(reg_value, field_value, bit_offset, field_len)\ + do { \ + (field_value) = \ + (((reg_value) >> (bit_offset)) & SW_BIT_MASK_U32(field_len)); \ + } while (0) + +#define SW_SWAP_BITS_U8(x) \ + ((((x)&0x80)>>7) | (((x)&0x40)>>5) | (((x)&0x20)>>3) | (((x)&0x10)>>1) \ + |(((x)&0x1)<<7) | (((x)&0x2)<<5) | (((x)&0x4)<<3) |(((x)&0x8)<<1) ) + + +#define SW_OFFSET_U8_2_U16(byte_offset) ((byte_offset) >> 1) + +#define SW_OFFSET_U16_2_U8(word16_offset) ((word16_offset) << 1) + +#define SW_OFFSET_BIT_2_U8_ALIGN16(bit_offset) (((bit_offset) / 16) * 2) + +#define SW_SET_REG_BY_FIELD(reg, field, field_value, reg_value) \ + SW_REG_SET_BY_FIELD_U32(reg_value, field_value, reg##_##field##_BOFFSET, \ + reg##_##field##_BLEN) + +#define SW_GET_FIELD_BY_REG(reg, field, field_value, reg_value) \ + SW_FIELD_GET_BY_REG_U32(reg_value, field_value, reg##_##field##_BOFFSET, \ + reg##_##field##_BLEN) + + /* port bitmap functions */ +#define SW_IS_PBMP_MEMBER(pbm, port) ((pbm & (1 << port)) ? A_TRUE: A_FALSE) +#define SW_IS_PBMP_EQ(pbm0, pbm1) ((pbm0 == pbm1) ? A_TRUE: A_FALSE) + +#define SW_PBMP_AND(pbm0, pbm1) ((pbm0) &= (pbm1)) +#define SW_PBMP_OR(pbm0, pbm1) ((pbm0) |= (pbm1)) +#define SW_IS_PBMP_INCLUDE(pbm0, pbm1) \ + ((pbm1 == SW_PBMP_AND(pbm0, pbm1)) ? A_TRUE: A_FALSE) + +#define SW_PBMP_CLEAR(pbm) ((pbm) = 0) +#define SW_PBMP_ADD_PORT(pbm, port) ((pbm) |= (1U << (port))) +#define SW_PBMP_DEL_PORT(pbm,port) ((pbm) &= ~(1U << (port))) + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _SHARED_FUNC_H */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/common/sw.h b/feeds/ipq807x/qca-ssdk-shell/src/include/common/sw.h new file mode 100755 index 000000000..2793cd205 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/common/sw.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2014,2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#ifndef _SW_H_ +#define _SW_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "sw_config.h" +#include "aos_head.h" +#include "sw_error.h" +#include "shared_func.h" + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _SW_H_ */ + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/common/sw_config.h b/feeds/ipq807x/qca-ssdk-shell/src/include/common/sw_config.h new file mode 100755 index 000000000..7809571fc --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/common/sw_config.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#ifndef _SW_CONFIG_H +#define _SW_CONFIG_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#define SW_MAX_NR_DEV 3 +#define SW_MAX_NR_PORT 16 + +#ifdef HSL_STANDALONG +#define HSL_LOCAL +#else +#define HSL_LOCAL static +#endif + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/common/sw_error.h b/feeds/ipq807x/qca-ssdk-shell/src/include/common/sw_error.h new file mode 100755 index 000000000..e6cb2d20b --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/common/sw_error.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#ifndef _SW_ERROR_H +#define _SW_ERROR_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + typedef enum { + SW_OK = 0, /* Operation succeeded */ + SW_FAIL = -1, /* Operation failed */ + SW_BAD_VALUE = -2, /* Illegal value */ + SW_OUT_OF_RANGE = -3, /* Value is out of range */ + SW_BAD_PARAM = -4, /* Illegal parameter(s) */ + SW_BAD_PTR = -5, /* Illegal pointer value */ + SW_BAD_LEN = -6, /* Wrong length */ + SW_BAD_STATE = -7, /* Wrong state of state machine */ + SW_READ_ERROR = -8, /* Read operation failed */ + SW_WRITE_ERROR = -9, /* Write operation failed */ + SW_CREATE_ERROR = -10, /* Fail in creating an entry */ + SW_DELETE_ERROR = -11, /* Fail in deleteing an entry */ + SW_NOT_FOUND = -12, /* Entry not found */ + SW_NO_CHANGE = -13, /* The parameter(s) is the same */ + SW_NO_MORE = -14, /* No more entry found */ + SW_NO_SUCH = -15, /* No such entry */ + SW_ALREADY_EXIST = -16, /* Tried to create existing entry */ + SW_FULL = -17, /* Table is full */ + SW_EMPTY = -18, /* Table is empty */ + SW_NOT_SUPPORTED = -19, /* This request is not support */ + SW_NOT_IMPLEMENTED = -20, /* This request is not implemented */ + SW_NOT_INITIALIZED = -21, /* The item is not initialized */ + SW_BUSY = -22, /* Operation is still running */ + SW_TIMEOUT = -23, /* Operation Time Out */ + SW_DISABLE = -24, /* Operation is disabled */ + SW_NO_RESOURCE = -25, /* Resource not available (memory ...) */ + SW_INIT_ERROR = -26, /* Error occured while INIT process */ + SW_NOT_READY = -27, /* The other side is not ready yet */ + SW_OUT_OF_MEM = -28, /* Cpu memory allocation failed. */ + SW_ABORTED = -29 /* Operation has been aborted. */ + } sw_error_t; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _SW_ERROR_H */ + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/common/util.h b/feeds/ipq807x/qca-ssdk-shell/src/include/common/util.h new file mode 100755 index 000000000..94b5ab65f --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/common/util.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#ifndef _UTIL_H_ +#define _UTIL_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#define LL_IN_ORDER 0x1 +#define LL_FIX_NDNR 0x2 + + typedef enum { + LL_CMP_EQUAL = 0, + LL_CMP_GREATER = 1, + LL_CMP_SMALLER = 2 + } + ll_cmp_rslt_t; + + typedef ll_cmp_rslt_t(*ll_nd_cmp) (void *src, void *dest); + + typedef void (*ll_nd_dump) (void *data); + + typedef struct _sll_node_t + { + struct _sll_node_t *next; + void *data; + } sll_node_t; + + typedef struct + { + sll_node_t *fst_nd; + a_uint32_t nd_nr; + a_uint32_t flag; + ll_nd_cmp nd_cmp; + ll_nd_dump nd_dump; + sll_node_t *free_nd; + } sll_head_t; + + sll_head_t *sll_creat(ll_nd_cmp cmp_func, ll_nd_dump dump_func, + a_uint32_t flag, a_uint32_t nd_nr); + + void sll_destroy(sll_head_t * sll); + + void sll_lock(sll_head_t * sll); + + void sll_unlock(sll_head_t * sll); + + void *sll_nd_find(const sll_head_t * sll, void *data, + a_uint32_t * iterator); + + void *sll_nd_next(const sll_head_t * sll, a_uint32_t * iterator); + + sw_error_t sll_nd_insert(sll_head_t * sll, void *data); + + sw_error_t sll_nd_delete(sll_head_t * sll, void *data); + + typedef struct + { + a_uint32_t id_ptr; + a_uint32_t id_nr; + a_uint32_t id_min; + a_uint32_t id_size; + void *id_pool; + } sid_pool_t; + + sid_pool_t *sid_pool_creat(a_uint32_t id_nr, a_uint32_t min_id); + + void sid_pool_destroy(sid_pool_t * pool); + + sw_error_t sid_pool_id_alloc(sid_pool_t * pool, a_uint32_t * id); + + sw_error_t sid_pool_id_free(sid_pool_t * pool, a_uint32_t id); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _UTIL_H_ */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal.h new file mode 100755 index 000000000..cdffadee4 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2014, 2017-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/*qca808x_start*/ +#ifndef _FAL_H +#define _FAL_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +#include "fal_port_ctrl.h" +/*qca808x_end*/ +#include "fal_misc.h" +#include "fal_vlan.h" +#include "fal_fdb.h" +#include "fal_portvlan.h" +#include "fal_qos.h" +#include "fal_stp.h" +#include "fal_rate.h" +#include "fal_mirror.h" +#include "fal_leaky.h" +#include "fal_igmp.h" +#include "fal_mib.h" +#include "fal_acl.h" +#include "fal_led.h" +/*qca808x_start*/ +#include "fal_reg_access.h" +#include "fal_init.h" +/*qca808x_end*/ +#include "fal_cosmap.h" +#include "fal_ip.h" +#include "fal_nat.h" +#include "fal_sec.h" +#include "fal_trunk.h" +#include "fal_interface_ctrl.h" +#include "fal_fdb.h" +#include "fal_multi.h" +#include "fal_vsi.h" +#include "fal_qm.h" +#include "fal_flow.h" +#include "fal_ctrlpkt.h" +#include "fal_servcode.h" +#include "fal_rss_hash.h" +#include "fal_pppoe.h" +#include "fal_shaper.h" +#include "fal_bm.h" +#include "fal_policer.h" +#include "fal_ptp.h" +#include "fal_sfp.h" +/*qca808x_start*/ +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_H */ +/*qca808x_end*/ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_acl.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_acl.h new file mode 100755 index 000000000..edb605e40 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_acl.h @@ -0,0 +1,619 @@ +/* + * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_acl FAL_ACL + * @{ + */ +#ifndef _FAL_ACL_H_ +#define _FAL_ACL_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + + + /** + @brief This enum defines the ACL rule type. + */ + typedef enum { + FAL_ACL_RULE_MAC = 0, /**< include MAC, udf fields*/ + FAL_ACL_RULE_IP4, /**< include MAC, IP4 and Tcp/Udp udf fields*/ + FAL_ACL_RULE_IP6, /**< include MAC, IP6 and Tcp/Udp udf fields*/ + FAL_ACL_RULE_UDF, /**< only include user defined fields*/ + FAL_ACL_RULE_BUTT, + } + fal_acl_rule_type_t; + + + /** + @brief This enum defines the ACL field operation type. + */ + typedef enum + { + FAL_ACL_FIELD_MASK = 0, /**< match operation is mask*/ + FAL_ACL_FIELD_RANGE, /**< match operation is range*/ + FAL_ACL_FIELD_LE, /**< match operation is less and equal*/ + FAL_ACL_FIELD_GE, /**< match operation is great and equal*/ + FAL_ACL_FIELD_NE, /**<- match operation is not equal*/ + FAL_ACL_FIELD_OP_BUTT, + } fal_acl_field_op_t; + + + typedef enum + { + FAL_ACL_POLICY_ROUTE = 0, + FAL_ACL_POLICY_SNAT, + FAL_ACL_POLICY_DNAT, + FAL_ACL_POLICY_RESERVE, + } fal_policy_forward_t; + + typedef enum + { + FAL_ACL_COMBINED_NONE = 0, + FAL_ACL_COMBINED_START, + FAL_ACL_COMBINED_CONTINUE, + FAL_ACL_COMBINED_END, + } fal_combined_t; + + /** + @brief This enum defines the ACL field operation type. + */ + typedef enum + { + FAL_ACL_UDF_TYPE_L2 = 0, /**< */ + FAL_ACL_UDF_TYPE_L3, /**< */ + FAL_ACL_UDF_TYPE_L4, /**< */ + FAL_ACL_UDF_TYPE_L2_SNAP, /**< */ + FAL_ACL_UDF_TYPE_L3_PLUS, /**< */ + FAL_ACL_UDF_TYPE_BUTT, + } fal_acl_udf_type_t; + + /** + @brief This enum defines the ACL rule type. + */ + typedef enum { + FAL_ACL_UDF_NON_IP = 0, + FAL_ACL_UDF_IP4, + FAL_ACL_UDF_IP6, + FAL_ACL_UDF_BUTT, + }fal_acl_udf_pkt_type_t; + + typedef enum { + FAL_ACL_DEST_PORT_BMP = 0, /*dest info is bitmap*/ + FAL_ACL_DEST_NEXTHOP, /*dest info is nexthop*/ + FAL_ACL_DEST_PORT_ID, /*dest info is port id*/ + }fal_acl_dest_type_t; + +#define FAL_ACL_DEST_OFFSET(type,value) (((type)<<24)|(value)) +#define FAL_ACL_DEST_TYPE(dest) (((dest)>>24)&0xff) +#define FAL_ACL_DEST_VALUE(dest) ((dest)&0xffffff) + +#define FAL_ACL_FIELD_MAC_DA 0 +#define FAL_ACL_FIELD_MAC_SA 1 +#define FAL_ACL_FIELD_MAC_ETHTYPE 2 +#define FAL_ACL_FIELD_MAC_TAGGED 3 +#define FAL_ACL_FIELD_MAC_UP 4 +#define FAL_ACL_FIELD_MAC_VID 5 +#define FAL_ACL_FIELD_IP4_SIP 6 +#define FAL_ACL_FIELD_IP4_DIP 7 +#define FAL_ACL_FIELD_IP6_LABEL 8 +#define FAL_ACL_FIELD_IP6_SIP 9 +#define FAL_ACL_FIELD_IP6_DIP 10 +#define FAL_ACL_FIELD_IP_PROTO 11 +#define FAL_ACL_FIELD_IP_DSCP 12 +#define FAL_ACL_FIELD_L4_SPORT 13 +#define FAL_ACL_FIELD_L4_DPORT 14 +#define FAL_ACL_FIELD_UDF 15 +#define FAL_ACL_FIELD_MAC_CFI 16 +#define FAL_ACL_FIELD_ICMP_TYPE 17 +#define FAL_ACL_FIELD_ICMP_CODE 18 +#define FAL_ACL_FIELD_TCP_FLAG 19 +#define FAL_ACL_FIELD_RIPV1 20 +#define FAL_ACL_FIELD_DHCPV4 21 +#define FAL_ACL_FIELD_DHCPV6 22 +#define FAL_ACL_FIELD_MAC_STAG_VID 23 +#define FAL_ACL_FIELD_MAC_STAG_PRI 24 +#define FAL_ACL_FIELD_MAC_STAG_DEI 25 +#define FAL_ACL_FIELD_MAC_STAGGED 26 +#define FAL_ACL_FIELD_MAC_CTAG_VID 27 +#define FAL_ACL_FIELD_MAC_CTAG_PRI 28 +#define FAL_ACL_FIELD_MAC_CTAG_CFI 29 +#define FAL_ACL_FIELD_MAC_CTAGGED 30 +#define FAL_ACL_FIELD_INVERSE_ALL 31 +/*new add for hawkeye*/ +#define FAL_ACL_FIELD_POST_ROURING_EN 32 +#define FAL_ACL_FIELD_RES_CHAIN 33 +#define FAL_ACL_FIELD_FAKE_MAC_HEADER 34 +#define FAL_ACL_FIELD_SNAP 35 +#define FAL_ACL_FIELD_ETHERNET 36 +#define FAL_ACL_FIELD_IPV6 37 +#define FAL_ACL_FIELD_IP 38 +#define FAL_ACL_FIELD_VSI 39 +#define FAL_ACL_FIELD_PPPOE_SESSIONID 40 +#define FAL_ACL_FIELD_L3_FRAGMENT 41 +#define FAL_ACL_FIELD_AH_HEADER 42 +#define FAL_ACL_FIELD_ESP_HEADER 43 +#define FAL_ACL_FIELD_MOBILITY_HEADER 44 +#define FAL_ACL_FIELD_FRAGMENT_HEADER 45 +#define FAL_ACL_FIELD_OTHER_EXT_HEADER 46 +#define FAL_ACL_FIELD_L3_TTL 47 +#define FAL_ACL_FIELD_IPV4_OPTION 48 +#define FAL_ACL_FIELD_FIRST_FRAGMENT 49 +#define FAL_ACL_FIELD_L3_LENGTH 50 +#define FAL_ACL_FIELD_VSI_VALID 51 +#define FAL_ACL_FIELD_IP_PKT_TYPE 52 + +#define FAL_ACL_FIELD_UDF0 53 +#define FAL_ACL_FIELD_UDF1 54 +#define FAL_ACL_FIELD_UDF2 55 +#define FAL_ACL_FIELD_UDF3 56 + +#define FAL_ACL_FIELD_NUM 57 + + +#define FAL_ACL_ACTION_PERMIT 0 +#define FAL_ACL_ACTION_DENY 1 +#define FAL_ACL_ACTION_REDPT 2 +#define FAL_ACL_ACTION_RDTCPU 3 +#define FAL_ACL_ACTION_CPYCPU 4 +#define FAL_ACL_ACTION_MIRROR 5 +#define FAL_ACL_ACTION_MODIFY_VLAN 6 +#define FAL_ACL_ACTION_NEST_VLAN 7 +#define FAL_ACL_ACTION_REMARK_UP 8 +#define FAL_ACL_ACTION_REMARK_QUEUE 9 +#define FAL_ACL_ACTION_REMARK_STAG_VID 10 +#define FAL_ACL_ACTION_REMARK_STAG_PRI 11 +#define FAL_ACL_ACTION_REMARK_STAG_DEI 12 +#define FAL_ACL_ACTION_REMARK_CTAG_VID 13 +#define FAL_ACL_ACTION_REMARK_CTAG_PRI 14 +#define FAL_ACL_ACTION_REMARK_CTAG_CFI 15 +#define FAL_ACL_ACTION_REMARK_LOOKUP_VID 16 +#define FAL_ACL_ACTION_REMARK_DSCP 17 +#define FAL_ACL_ACTION_POLICER_EN 18 +#define FAL_ACL_ACTION_WCMP_EN 19 +#define FAL_ACL_ACTION_ARP_EN 20 +#define FAL_ACL_ACTION_POLICY_FORWARD_EN 21 +#define FAL_ACL_ACTION_BYPASS_EGRESS_TRANS 22 +#define FAL_ACL_ACTION_MATCH_TRIGGER_INTR 23 +/*new add for hawkeye*/ +#define FAL_ACL_ACTION_ENQUEUE_PRI 25 +#define FAL_ACL_ACTION_INT_DP 26 +#define FAL_ACL_ACTION_SERVICE_CODE 27 +#define FAL_ACL_ACTION_CPU_CODE 28 +#define FAL_ACL_ACTION_SYN_TOGGLE 29 +#define FAL_ACL_ACTION_METADATA_EN 30 + + +enum{ + FAL_ACL_BYPASS_IN_VLAN_MISS = 0, + FAL_ACL_BYPASS_SOUCE_GUARD, + FAL_ACL_BYPASS_MRU_MTU_CHECK, + FAL_ACL_BYPASS_EG_VSI_MEMBER_CHECK = 8, + FAL_ACL_BYPASS_EG_VLAN_TRANSLATION, + FAL_ACL_BYPASS_EG_VLAN_TAG_CTRL = 10, + FAL_ACL_BYPASS_FDB_LEARNING, + FAL_ACL_BYPASS_FDB_REFRESH, + FAL_ACL_BYPASS_L2_SECURITY,/*new address, station move, learn limit, hash full*/ + FAL_ACL_BYPASS_MANAGEMENT_FWD, + FAL_ACL_BYPASS_L2_FWD = 15, + FAL_ACL_BYPASS_IN_STP_CHECK, + FAL_ACL_BYPASS_EG_STP_CHECK, + FAL_ACL_BYPASS_SOURCE_FILTER, + FAL_ACL_BYPASS_POLICYER, + FAL_ACL_BYPASS_L2_EDIT = 20,/*VLAN tag edit*/ + FAL_ACL_BYPASS_L3_EDIT,/*Edit MAC address, PPPoE, IP address, TTL, DSCP, L4 port*/ + FAL_ACL_BYPASS_POST_ACL_CHECK_ROUTING, + FAL_ACL_BYPASS_PORT_ISOLATION, +}; + + + /** + * @brief This type defines the action in Acl rule. + * @details Comments: + * It's a bit map type, we can access it through macro FAL_ACTION_FLG_SET, + * FAL_ACTION_FLG_CLR and FAL_ACTION_FLG_TST. + */ + typedef a_uint32_t fal_acl_action_map_t; + +#define FAL_ACTION_FLG_SET(flag, action) \ + (flag) |= (0x1UL << (action)) + +#define FAL_ACTION_FLG_CLR(flag, action) \ + (flag) &= (~(0x1UL << (action))) + +#define FAL_ACTION_FLG_TST(flag, action) \ + ((flag) & (0x1UL << (action))) ? 1 : 0 + + + /** + * @brief This type defines the field in Acl rule. + * @details Comments: + * It's a bit map type, we can access it through macro FAL_FIELD_FLG_SET, + * FAL_FIELD_FLG_CLR and FAL_FIELD_FLG_TST. + */ + typedef a_uint32_t fal_acl_field_map_t[2]; + +#define FAL_FIELD_FLG_SET(flag, field) \ + ((flag[(field) / 32]) |= (0x1UL << ((field) % 32))) + +#define FAL_FIELD_FLG_CLR(flag, field) \ + ((flag[(field) / 32]) &= (~(0x1UL << ((field) % 32)))) + +#define FAL_FIELD_FLG_TST(flag, field) \ + (((flag[(field) / 32]) & (0x1UL << ((field) % 32))) ? 1 : 0) + +#define FAL_ACL_UDF_MAX_LENGTH 16 + + /** + * @brief This structure defines the Acl rule. + * @details Fields description: + * + * + * vid_val - If vid_op equals FAL_ACL_FIELD_MASK it's vlan id field value. + * If vid_op equals FAL_ACL_FIELD_RANGE it's vlan id field low value. If + * vid_op equals other value it's the compared value. + * + * vid_mask - If vid_op equals FAL_ACL_FIELD_MASK it's vlan id field mask. + * If vid_op equals FAL_ACL_FIELD_RANGE it's vlan id field high value. If vid_op + * equals other value it's meaningless. + * + * + * ip_dscp_val - It's eight bits field we can set any value between 0 - 255. + * ip_dscp_mask - It's eight bits field we can set any value between 0 - 255. + * + * + * src_l4port_val - If src_l4port_op equals FAL_ACL_FIELD_MASK it's layer four + * source port field value. If src_l4port_op equals FAL_ACL_FIELD_RANGE it's + * layer four source port field low value. If src_l4port_op equals other value + * it's the compared value. + * + * + * src_l4port_mask - If src_l4port_op equals FAL_ACL_FIELD_MASK it's layer four + * source port field mask. If src_l4port_op equals FAL_ACL_FIELD_RANGE it's + * layer four source port field high value. If src_l4port_op equals other value + * it's meaningless. + * + * + * dest_l4port_val - If dest_l4port_op equals FAL_ACL_FIELD_MASK it's layer four + * destination port field value. If dest_l4port_op equals FAL_ACL_FIELD_RANGE it's + * layer four source port field low value. If dest_l4port_op equals other value + * it's the compared value. + * + * + * dest_l4port_mask - If dest_l4port_op equals FAL_ACL_FIELD_MASK it's layer four + * source port field mask. If dest_l4port_op equals FAL_ACL_FIELD_RANGE it's + * layer four source port field high value. If dest_l4port_op equals other value + * it's meaningless. + * + * + * ports - If FAL_ACL_ACTION_REDPT bit is setted in action_flg it's redirect + * destination ports. + * + * + * dot1p - If FAL_ACL_ACTION_REMARK_DOT1P bit is setted in action_flg it's + * the expected dot1p value. + * + * + * queue - If FAL_ACL_ACTION_REMARK_QUEUE bit is setted in action_flg it's + * the expected queue value. + * + * + * vid - If FAL_ACL_ACTION_MODIFY_VLAN or FAL_ACL_ACTION_NEST_VLAN bit is + * setted in action_flg it's the expected vlan id value. + */ + typedef struct + { + fal_acl_rule_type_t rule_type; + fal_acl_field_map_t field_flg; + + /* fields of mac rule */ + fal_mac_addr_t src_mac_val; + fal_mac_addr_t src_mac_mask; + fal_mac_addr_t dest_mac_val; + fal_mac_addr_t dest_mac_mask; + a_uint16_t ethtype_val; + a_uint16_t ethtype_mask; + a_uint16_t vid_val; + a_uint16_t vid_mask; + fal_acl_field_op_t vid_op; + a_uint8_t tagged_val; + a_uint8_t tagged_mask; + a_uint8_t up_val; + a_uint8_t up_mask; + a_uint8_t cfi_val; + a_uint8_t cfi_mask; + a_uint16_t resv0; + + /* fields of enhanced mac rule*/ + a_uint8_t stagged_val; /*for s17c : 0-untag, 1-tag, for hawkeye: 2-pritag, 3-utag+pritag, 4- untag+tag, 5-tag+pritag, 6-all*/ + a_uint8_t stagged_mask; + a_uint8_t ctagged_val; + a_uint8_t ctagged_mask; + a_uint16_t stag_vid_val; + a_uint16_t stag_vid_mask; + fal_acl_field_op_t stag_vid_op; + a_uint16_t ctag_vid_val; + a_uint16_t ctag_vid_mask; + fal_acl_field_op_t ctag_vid_op; + a_uint8_t stag_pri_val; + a_uint8_t stag_pri_mask; + a_uint8_t ctag_pri_val; + a_uint8_t ctag_pri_mask; + a_uint8_t stag_dei_val; + a_uint8_t stag_dei_mask; + a_uint8_t ctag_cfi_val; + a_uint8_t ctag_cfi_mask; + + + /* fields of ip4 rule */ + fal_ip4_addr_t src_ip4_val; + fal_ip4_addr_t src_ip4_mask; + fal_ip4_addr_t dest_ip4_val; + fal_ip4_addr_t dest_ip4_mask; + + /* fields of ip6 rule */ + a_uint32_t ip6_lable_val; + a_uint32_t ip6_lable_mask; + fal_ip6_addr_t src_ip6_val; + fal_ip6_addr_t src_ip6_mask; + fal_ip6_addr_t dest_ip6_val; + fal_ip6_addr_t dest_ip6_mask; + + /* fields of ip rule */ + a_uint8_t ip_proto_val; + a_uint8_t ip_proto_mask; + a_uint8_t ip_dscp_val; + a_uint8_t ip_dscp_mask; + + /* fields of layer four */ + a_uint16_t src_l4port_val; + a_uint16_t src_l4port_mask; + fal_acl_field_op_t src_l4port_op; + a_uint16_t dest_l4port_val; + a_uint16_t dest_l4port_mask; + fal_acl_field_op_t dest_l4port_op; + a_uint8_t icmp_type_val; + a_uint8_t icmp_type_mask; + a_uint8_t icmp_code_val; + a_uint8_t icmp_code_mask; + a_uint8_t tcp_flag_val; + a_uint8_t tcp_flag_mask; + a_uint8_t ripv1_val; + a_uint8_t ripv1_mask; + a_uint8_t dhcpv4_val; + a_uint8_t dhcpv4_mask; + a_uint8_t dhcpv6_val; + a_uint8_t dhcpv6_mask; + + /* user defined fields */ + fal_acl_udf_type_t udf_type; + a_uint8_t udf_offset; + a_uint8_t udf_len; + a_uint8_t udf_val[FAL_ACL_UDF_MAX_LENGTH]; + a_uint8_t udf_mask[FAL_ACL_UDF_MAX_LENGTH]; + + /* fields of action */ + fal_acl_action_map_t action_flg; + fal_pbmp_t ports; /*high 8bits, 00-port bitmap, 01-nexthop, 10-vp*/ + a_uint32_t match_cnt; + a_uint16_t vid; + a_uint8_t up; + a_uint8_t queue; + a_uint16_t stag_vid; + a_uint8_t stag_pri; + a_uint8_t stag_dei; + a_uint16_t ctag_vid; + a_uint8_t ctag_pri; + a_uint8_t ctag_cfi; + a_uint16_t policer_ptr; + a_uint16_t arp_ptr; + a_uint16_t wcmp_ptr; + a_uint8_t dscp; + a_uint8_t rsv; + fal_policy_forward_t policy_fwd; + fal_combined_t combined; + + /*new add match fields for hawkeye*/ + a_uint8_t pri; /*rule priority 0-7*/ + a_bool_t post_routing; + a_uint8_t acl_pool; + + a_bool_t is_ip_val; + a_uint8_t is_ip_mask; + + a_bool_t is_ipv6_val; + a_uint8_t is_ipv6_mask; + + a_bool_t is_fake_mac_header_val; + a_uint8_t is_fake_mac_header_mask; + + a_bool_t is_snap_val; + a_uint8_t is_snap_mask; + + a_bool_t is_ethernet_val; + a_uint8_t is_ethernet_mask; + + a_bool_t is_fragement_val; + a_uint8_t is_fragement_mask; + + a_bool_t is_ah_header_val; + a_uint8_t is_ah_header_mask; + + a_bool_t is_esp_header_val; + a_uint8_t is_esp_header_mask; + + a_bool_t is_mobility_header_val; + a_uint8_t is_mobility_header_mask; + + a_bool_t is_fragment_header_val; + a_uint8_t is_fragment_header_mask; + + a_bool_t is_other_header_val; + a_uint8_t is_other_header_mask; + + a_bool_t is_ipv4_option_val; + a_uint8_t is_ipv4_option_mask; + + a_bool_t is_first_frag_val; + a_uint8_t is_first_frag_mask; + + /*fields of VLAN rule*/ + a_bool_t vsi_valid; + a_uint8_t vsi_valid_mask; + a_uint8_t vsi; /*0-31*/ + a_uint8_t vsi_mask; /*0-31*/ + /*fields of L2 MISC rule*/ + a_uint16_t pppoe_sessionid; + a_uint16_t pppoe_sessionid_mask; + fal_acl_field_op_t icmp_type_code_op; + /*fields of IP MISC rule*/ + a_uint8_t l3_ttl; + a_uint8_t l3_ttl_mask; + fal_acl_field_op_t l3_length_op; + a_uint16_t l3_length; + a_uint16_t l3_length_mask; + a_uint16_t l3_pkt_type; + a_uint16_t l3_pkt_type_mask; + /*field of udf*/ + fal_acl_field_op_t udf0_op; + a_uint16_t udf0_val; + a_uint16_t udf0_mask; + fal_acl_field_op_t udf1_op; + a_uint16_t udf1_val; + a_uint16_t udf1_mask; + a_uint16_t udf2_val; + a_uint16_t udf2_mask; + a_uint16_t udf3_val; + a_uint16_t udf3_mask; + + /*new add acl action for hawkeye*/ + a_uint32_t bypass_bitmap; + a_uint8_t enqueue_pri; + a_uint8_t stag_fmt; + a_uint8_t ctag_fmt; + a_uint8_t int_dp; + a_uint8_t service_code; + a_uint8_t cpu_code; + a_uint64_t match_bytes; + /*new add acl action for IPQ60xx*/ + a_uint8_t dscp_mask; + a_uint8_t qos_res_prec; + } fal_acl_rule_t; + + + /** + @brief This enum defines the ACL will work on which derection traffic. + */ + typedef enum + { + FAL_ACL_DIREC_IN = 0, /**< Acl will work on ingressive traffic */ + FAL_ACL_DIREC_EG, /**< Acl will work on egressive traffic */ + FAL_ACL_DIREC_BOTH, /**< Acl will work on both ingressive and egressive traffic*/ + } fal_acl_direc_t; + + + /** + @brief This enum defines the ACL will work on which partiualr object. + */ + typedef enum + { + FAL_ACL_BIND_PORT = 0, /**< Acl wil work on particular port and virtual port */ + FAL_ACL_BIND_PORTBITMAP = 1, /**< Acl wil work on port bitmap */ + FAL_ACL_BIND_SERVICE_CODE = 2, /**< Acl wil work on service code */ + FAL_ACL_BIND_L3_IF = 3, /**< Acl wil work on l3 interface */ + } fal_acl_bind_obj_t; + +enum +{ + /*acl*/ + FUNC_ACL_LIST_CREAT = 0, + FUNC_ACL_LIST_DESTROY, + FUNC_ACL_RULE_ADD, + FUNC_ACL_RULE_DELETE, + FUNC_ACL_RULE_QUERY, + FUNC_ACL_RULE_DUMP, + FUNC_ACL_LIST_BIND, + FUNC_ACL_LIST_UNBIND, + FUNC_ACL_LIST_DUMP, + FUNC_ACL_UDF_PROFILE_SET, + FUNC_ACL_UDF_PROFILE_GET, +}; + + +sw_error_t +fal_acl_list_creat(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t list_pri); + +sw_error_t +fal_acl_list_destroy(a_uint32_t dev_id, a_uint32_t list_id); + +sw_error_t +fal_acl_rule_add(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, a_uint32_t rule_nr, fal_acl_rule_t * rule); + +sw_error_t +fal_acl_rule_delete(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, a_uint32_t rule_nr); + +sw_error_t +fal_acl_rule_query(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, fal_acl_rule_t * rule); + +sw_error_t +fal_acl_list_bind(a_uint32_t dev_id, a_uint32_t list_id, fal_acl_direc_t direc, fal_acl_bind_obj_t obj_t, a_uint32_t obj_idx); + +sw_error_t +fal_acl_list_unbind(a_uint32_t dev_id, a_uint32_t list_id, fal_acl_direc_t direc, fal_acl_bind_obj_t obj_t, a_uint32_t obj_idx); + +sw_error_t +fal_acl_status_set(a_uint32_t dev_id, a_bool_t enable); + +sw_error_t +fal_acl_status_get(a_uint32_t dev_id, a_bool_t * enable); + +sw_error_t +fal_acl_list_dump(a_uint32_t dev_id); + +sw_error_t +fal_acl_rule_dump(a_uint32_t dev_id); + +sw_error_t +fal_acl_port_udf_profile_set(a_uint32_t dev_id, fal_port_t port_id, fal_acl_udf_type_t udf_type, a_uint32_t offset, a_uint32_t length); +sw_error_t +fal_acl_port_udf_profile_get(a_uint32_t dev_id, fal_port_t port_id, fal_acl_udf_type_t udf_type, a_uint32_t * offset, a_uint32_t * length); + +sw_error_t +fal_acl_udf_profile_set(a_uint32_t dev_id, fal_acl_udf_pkt_type_t pkt_type,a_uint32_t udf_idx, fal_acl_udf_type_t udf_type, a_uint32_t offset); + +sw_error_t +fal_acl_udf_profile_get(a_uint32_t dev_id, fal_acl_udf_pkt_type_t pkt_type,a_uint32_t udf_idx, fal_acl_udf_type_t *udf_type, a_uint32_t *offset); + +sw_error_t +fal_acl_rule_active(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, a_uint32_t rule_nr); +sw_error_t +fal_acl_rule_deactive(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, a_uint32_t rule_nr); +sw_error_t +fal_acl_rule_src_filter_sts_set(a_uint32_t dev_id, a_uint32_t rule_id, a_bool_t enable); +sw_error_t +fal_acl_rule_src_filter_sts_get(a_uint32_t dev_id, a_uint32_t rule_id, a_bool_t* enable); + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_ACL_H_ */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_api.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_api.h new file mode 100755 index 000000000..2a8121db2 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_api.h @@ -0,0 +1,2115 @@ +/* + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/*qca808x_start*/ +#ifndef _FAL_API_H_ +#define _FAL_API_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ +/*qca808x_end*/ +#ifdef IN_PORTCONTROL +/*qca808x_start*/ +#define PORTCONTROL_API \ + SW_API_DEF(SW_API_PT_DUPLEX_GET, fal_port_duplex_get), \ + SW_API_DEF(SW_API_PT_DUPLEX_SET, fal_port_duplex_set), \ + SW_API_DEF(SW_API_PT_SPEED_GET, fal_port_speed_get), \ + SW_API_DEF(SW_API_PT_SPEED_SET, fal_port_speed_set), \ + SW_API_DEF(SW_API_PT_AN_GET, fal_port_autoneg_status_get), \ + SW_API_DEF(SW_API_PT_AN_ENABLE, fal_port_autoneg_enable), \ + SW_API_DEF(SW_API_PT_AN_RESTART, fal_port_autoneg_restart), \ + SW_API_DEF(SW_API_PT_AN_ADV_GET, fal_port_autoneg_adv_get), \ + SW_API_DEF(SW_API_PT_AN_ADV_SET, fal_port_autoneg_adv_set), \ +/*qca808x_end*/\ + SW_API_DEF(SW_API_PT_HDR_SET, fal_port_hdr_status_set), \ + SW_API_DEF(SW_API_PT_HDR_GET, fal_port_hdr_status_get), \ + SW_API_DEF(SW_API_PT_FLOWCTRL_SET, fal_port_flowctrl_set), \ + SW_API_DEF(SW_API_PT_FLOWCTRL_GET, fal_port_flowctrl_get), \ + SW_API_DEF(SW_API_PT_FLOWCTRL_MODE_SET, fal_port_flowctrl_forcemode_set), \ + SW_API_DEF(SW_API_PT_FLOWCTRL_MODE_GET, fal_port_flowctrl_forcemode_get), \ + SW_API_DEF(SW_API_PT_POWERSAVE_SET, fal_port_powersave_set), \ + SW_API_DEF(SW_API_PT_POWERSAVE_GET, fal_port_powersave_get), \ +/*qca808x_start*/\ + SW_API_DEF(SW_API_PT_HIBERNATE_SET, fal_port_hibernate_set), \ + SW_API_DEF(SW_API_PT_HIBERNATE_GET, fal_port_hibernate_get), \ + SW_API_DEF(SW_API_PT_CDT, fal_port_cdt), \ +/*qca808x_end*/\ + SW_API_DEF(SW_API_PT_TXHDR_SET, fal_port_txhdr_mode_set), \ + SW_API_DEF(SW_API_PT_TXHDR_GET, fal_port_txhdr_mode_get), \ + SW_API_DEF(SW_API_PT_RXHDR_SET, fal_port_rxhdr_mode_set), \ + SW_API_DEF(SW_API_PT_RXHDR_GET, fal_port_rxhdr_mode_get), \ + SW_API_DEF(SW_API_HEADER_TYPE_SET, fal_header_type_set), \ + SW_API_DEF(SW_API_HEADER_TYPE_GET, fal_header_type_get), \ + SW_API_DEF(SW_API_TXMAC_STATUS_SET, fal_port_txmac_status_set), \ + SW_API_DEF(SW_API_TXMAC_STATUS_GET, fal_port_txmac_status_get), \ + SW_API_DEF(SW_API_RXMAC_STATUS_SET, fal_port_rxmac_status_set), \ + SW_API_DEF(SW_API_RXMAC_STATUS_GET, fal_port_rxmac_status_get), \ + SW_API_DEF(SW_API_TXFC_STATUS_SET, fal_port_txfc_status_set), \ + SW_API_DEF(SW_API_TXFC_STATUS_GET, fal_port_txfc_status_get), \ + SW_API_DEF(SW_API_RXFC_STATUS_SET, fal_port_rxfc_status_set), \ + SW_API_DEF(SW_API_RXFC_STATUS_GET, fal_port_rxfc_status_get), \ + SW_API_DEF(SW_API_BP_STATUS_SET, fal_port_bp_status_set), \ + SW_API_DEF(SW_API_BP_STATUS_GET, fal_port_bp_status_get), \ + SW_API_DEF(SW_API_PT_LINK_MODE_SET, fal_port_link_forcemode_set), \ + SW_API_DEF(SW_API_PT_LINK_MODE_GET, fal_port_link_forcemode_get), \ +/*qca808x_start*/\ + SW_API_DEF(SW_API_PT_LINK_STATUS_GET, fal_port_link_status_get), \ +/*qca808x_end*/\ + SW_API_DEF(SW_API_PT_MAC_LOOPBACK_SET, fal_port_mac_loopback_set), \ + SW_API_DEF(SW_API_PT_MAC_LOOPBACK_GET, fal_port_mac_loopback_get), \ + SW_API_DEF(SW_API_PTS_LINK_STATUS_GET, fal_ports_link_status_get), \ + SW_API_DEF(SW_API_PT_CONGESTION_DROP_SET, fal_port_congestion_drop_set), \ + SW_API_DEF(SW_API_PT_CONGESTION_DROP_GET, fal_port_congestion_drop_get), \ + SW_API_DEF(SW_API_PT_RING_FLOW_CTRL_THRES_SET, fal_ring_flow_ctrl_thres_set), \ + SW_API_DEF(SW_API_PT_RING_FLOW_CTRL_THRES_GET, fal_ring_flow_ctrl_thres_get), \ +/*qca808x_start*/\ + SW_API_DEF(SW_API_PT_8023AZ_SET, fal_port_8023az_set), \ + SW_API_DEF(SW_API_PT_8023AZ_GET, fal_port_8023az_get), \ + SW_API_DEF(SW_API_PT_MDIX_SET, fal_port_mdix_set), \ + SW_API_DEF(SW_API_PT_MDIX_GET, fal_port_mdix_get), \ + SW_API_DEF(SW_API_PT_MDIX_STATUS_GET, fal_port_mdix_status_get), \ +/*qca808x_end*/\ + SW_API_DEF(SW_API_PT_COMBO_PREFER_MEDIUM_SET, fal_port_combo_prefer_medium_set), \ + SW_API_DEF(SW_API_PT_COMBO_PREFER_MEDIUM_GET, fal_port_combo_prefer_medium_get), \ + SW_API_DEF(SW_API_PT_COMBO_MEDIUM_STATUS_GET, fal_port_combo_medium_status_get), \ + SW_API_DEF(SW_API_PT_COMBO_FIBER_MODE_SET, fal_port_combo_fiber_mode_set), \ + SW_API_DEF(SW_API_PT_COMBO_FIBER_MODE_GET, fal_port_combo_fiber_mode_get), \ +/*qca808x_start*/\ + SW_API_DEF(SW_API_PT_LOCAL_LOOPBACK_SET, fal_port_local_loopback_set), \ + SW_API_DEF(SW_API_PT_LOCAL_LOOPBACK_GET, fal_port_local_loopback_get), \ + SW_API_DEF(SW_API_PT_REMOTE_LOOPBACK_SET, fal_port_remote_loopback_set), \ + SW_API_DEF(SW_API_PT_REMOTE_LOOPBACK_GET, fal_port_remote_loopback_get), \ + SW_API_DEF(SW_API_PT_RESET, fal_port_reset), \ + SW_API_DEF(SW_API_PT_POWER_OFF, fal_port_power_off), \ + SW_API_DEF(SW_API_PT_POWER_ON, fal_port_power_on), \ + SW_API_DEF(SW_API_PT_MAGIC_FRAME_MAC_SET, fal_port_magic_frame_mac_set), \ + SW_API_DEF(SW_API_PT_MAGIC_FRAME_MAC_GET, fal_port_magic_frame_mac_get), \ + SW_API_DEF(SW_API_PT_PHY_ID_GET, fal_port_phy_id_get), \ + SW_API_DEF(SW_API_PT_WOL_STATUS_SET, fal_port_wol_status_set), \ + SW_API_DEF(SW_API_PT_WOL_STATUS_GET, fal_port_wol_status_get), \ +/*qca808x_end*/\ + SW_API_DEF(SW_API_PT_INTERFACE_MODE_APPLY, fal_port_interface_mode_apply), \ + SW_API_DEF(SW_API_PT_INTERFACE_MODE_SET, fal_port_interface_mode_set), \ + SW_API_DEF(SW_API_PT_INTERFACE_MODE_GET, fal_port_interface_mode_get), \ +/*qca808x_start*/\ + SW_API_DEF(SW_API_PT_INTERFACE_MODE_STATUS_GET, fal_port_interface_mode_status_get), \ + SW_API_DEF(SW_API_DEBUG_PHYCOUNTER_SET, fal_debug_phycounter_set), \ + SW_API_DEF(SW_API_DEBUG_PHYCOUNTER_GET, fal_debug_phycounter_get), \ + SW_API_DEF(SW_API_DEBUG_PHYCOUNTER_SHOW, fal_debug_phycounter_show),\ +/*qca808x_end*/\ + SW_API_DEF(SW_API_PT_MTU_SET, fal_port_mtu_set), \ + SW_API_DEF(SW_API_PT_MTU_GET, fal_port_mtu_get), \ + SW_API_DEF(SW_API_PT_MRU_SET, fal_port_mru_set), \ + SW_API_DEF(SW_API_PT_MRU_GET, fal_port_mru_get), \ + SW_API_DEF(SW_API_PT_SOURCE_FILTER_GET, fal_port_source_filter_status_get), \ + SW_API_DEF(SW_API_PT_SOURCE_FILTER_SET, fal_port_source_filter_enable), \ + SW_API_DEF(SW_API_PT_FRAME_MAX_SIZE_GET, fal_port_max_frame_size_get), \ + SW_API_DEF(SW_API_PT_FRAME_MAX_SIZE_SET, fal_port_max_frame_size_set), \ + SW_API_DEF(SW_API_PT_INTERFACE_3AZ_STATUS_SET, fal_port_interface_3az_status_set), \ + SW_API_DEF(SW_API_PT_INTERFACE_3AZ_STATUS_GET, fal_port_interface_3az_status_get), \ + SW_API_DEF(SW_API_PT_PROMISC_MODE_SET, fal_port_promisc_mode_set), \ + SW_API_DEF(SW_API_PT_PROMISC_MODE_GET, fal_port_promisc_mode_get), \ + SW_API_DEF(SW_API_PT_INTERFACE_EEE_CFG_SET, fal_port_interface_eee_cfg_set), \ + SW_API_DEF(SW_API_PT_INTERFACE_EEE_CFG_GET, fal_port_interface_eee_cfg_get), \ + SW_API_DEF(SW_API_PT_SOURCE_FILTER_CONFIG_GET, fal_port_source_filter_config_get),\ + SW_API_DEF(SW_API_PT_SOURCE_FILTER_CONFIG_SET, fal_port_source_filter_config_set), \ + SW_API_DEF(SW_API_PT_SWITCH_PORT_LOOPBACK_SET, fal_switch_port_loopback_set), \ + SW_API_DEF(SW_API_PT_SWITCH_PORT_LOOPBACK_GET, fal_switch_port_loopback_get), +/*qca808x_start*/\ +/*end of PORTCONTROL_API*/ +#define PORTCONTROL_API_PARAM \ + SW_API_DESC(SW_API_PT_DUPLEX_GET) \ + SW_API_DESC(SW_API_PT_DUPLEX_SET) \ + SW_API_DESC(SW_API_PT_SPEED_GET) \ + SW_API_DESC(SW_API_PT_SPEED_SET) \ + SW_API_DESC(SW_API_PT_AN_GET) \ + SW_API_DESC(SW_API_PT_AN_ENABLE) \ + SW_API_DESC(SW_API_PT_AN_RESTART) \ + SW_API_DESC(SW_API_PT_AN_ADV_GET) \ + SW_API_DESC(SW_API_PT_AN_ADV_SET) \ +/*qca808x_end*/\ + SW_API_DESC(SW_API_PT_HDR_SET) \ + SW_API_DESC(SW_API_PT_HDR_GET) \ + SW_API_DESC(SW_API_PT_FLOWCTRL_SET) \ + SW_API_DESC(SW_API_PT_FLOWCTRL_GET) \ + SW_API_DESC(SW_API_PT_FLOWCTRL_MODE_SET) \ + SW_API_DESC(SW_API_PT_FLOWCTRL_MODE_GET) \ + SW_API_DESC(SW_API_PT_POWERSAVE_SET) \ + SW_API_DESC(SW_API_PT_POWERSAVE_GET) \ +/*qca808x_start*/\ + SW_API_DESC(SW_API_PT_HIBERNATE_SET) \ + SW_API_DESC(SW_API_PT_HIBERNATE_GET) \ + SW_API_DESC(SW_API_PT_CDT) \ +/*qca808x_end*/\ + SW_API_DESC(SW_API_PT_TXHDR_SET) \ + SW_API_DESC(SW_API_PT_TXHDR_GET) \ + SW_API_DESC(SW_API_PT_RXHDR_SET) \ + SW_API_DESC(SW_API_PT_RXHDR_GET) \ + SW_API_DESC(SW_API_HEADER_TYPE_SET) \ + SW_API_DESC(SW_API_HEADER_TYPE_GET) \ + SW_API_DESC(SW_API_TXMAC_STATUS_SET) \ + SW_API_DESC(SW_API_TXMAC_STATUS_GET) \ + SW_API_DESC(SW_API_RXMAC_STATUS_SET) \ + SW_API_DESC(SW_API_RXMAC_STATUS_GET) \ + SW_API_DESC(SW_API_TXFC_STATUS_SET) \ + SW_API_DESC(SW_API_TXFC_STATUS_GET) \ + SW_API_DESC(SW_API_RXFC_STATUS_SET) \ + SW_API_DESC(SW_API_RXFC_STATUS_GET) \ + SW_API_DESC(SW_API_BP_STATUS_SET) \ + SW_API_DESC(SW_API_BP_STATUS_GET) \ + SW_API_DESC(SW_API_PT_LINK_MODE_SET) \ + SW_API_DESC(SW_API_PT_LINK_MODE_GET) \ +/*qca808x_start*/\ + SW_API_DESC(SW_API_PT_LINK_STATUS_GET) \ +/*qca808x_end*/\ + SW_API_DESC(SW_API_PT_MAC_LOOPBACK_SET) \ + SW_API_DESC(SW_API_PT_MAC_LOOPBACK_GET) \ + SW_API_DESC(SW_API_PTS_LINK_STATUS_GET) \ + SW_API_DESC(SW_API_PT_CONGESTION_DROP_SET) \ + SW_API_DESC(SW_API_PT_CONGESTION_DROP_GET) \ + SW_API_DESC(SW_API_PT_RING_FLOW_CTRL_THRES_SET) \ + SW_API_DESC(SW_API_PT_RING_FLOW_CTRL_THRES_GET) \ +/*qca808x_start*/\ + SW_API_DESC(SW_API_PT_8023AZ_SET) \ + SW_API_DESC(SW_API_PT_8023AZ_GET) \ + SW_API_DESC(SW_API_PT_MDIX_SET) \ + SW_API_DESC(SW_API_PT_MDIX_GET) \ + SW_API_DESC(SW_API_PT_MDIX_STATUS_GET) \ +/*qca808x_end*/\ + SW_API_DESC(SW_API_PT_COMBO_PREFER_MEDIUM_SET) \ + SW_API_DESC(SW_API_PT_COMBO_PREFER_MEDIUM_GET) \ + SW_API_DESC(SW_API_PT_COMBO_MEDIUM_STATUS_GET) \ + SW_API_DESC(SW_API_PT_COMBO_FIBER_MODE_SET) \ + SW_API_DESC(SW_API_PT_COMBO_FIBER_MODE_GET) \ +/*qca808x_start*/\ + SW_API_DESC(SW_API_PT_LOCAL_LOOPBACK_SET) \ + SW_API_DESC(SW_API_PT_LOCAL_LOOPBACK_GET) \ + SW_API_DESC(SW_API_PT_REMOTE_LOOPBACK_SET) \ + SW_API_DESC(SW_API_PT_REMOTE_LOOPBACK_GET) \ + SW_API_DESC(SW_API_PT_RESET) \ + SW_API_DESC(SW_API_PT_POWER_OFF) \ + SW_API_DESC(SW_API_PT_POWER_ON) \ + SW_API_DESC(SW_API_PT_MAGIC_FRAME_MAC_SET) \ + SW_API_DESC(SW_API_PT_MAGIC_FRAME_MAC_GET) \ + SW_API_DESC(SW_API_PT_PHY_ID_GET) \ + SW_API_DESC(SW_API_PT_WOL_STATUS_SET) \ + SW_API_DESC(SW_API_PT_WOL_STATUS_GET) \ +/*qca808x_end*/\ + SW_API_DESC(SW_API_PT_INTERFACE_MODE_SET) \ + SW_API_DESC(SW_API_PT_INTERFACE_MODE_GET) \ + SW_API_DESC(SW_API_PT_INTERFACE_MODE_APPLY) \ +/*qca808x_start*/\ + SW_API_DESC(SW_API_PT_INTERFACE_MODE_STATUS_GET) \ + SW_API_DESC(SW_API_DEBUG_PHYCOUNTER_SET) \ + SW_API_DESC(SW_API_DEBUG_PHYCOUNTER_GET) \ + SW_API_DESC(SW_API_DEBUG_PHYCOUNTER_SHOW)\ +/*qca808x_end*/\ + SW_API_DESC(SW_API_PT_MTU_SET) \ + SW_API_DESC(SW_API_PT_MTU_GET) \ + SW_API_DESC(SW_API_PT_MRU_SET) \ + SW_API_DESC(SW_API_PT_MRU_GET) \ + SW_API_DESC(SW_API_PT_SOURCE_FILTER_GET) \ + SW_API_DESC(SW_API_PT_SOURCE_FILTER_SET) \ + SW_API_DESC(SW_API_PT_FRAME_MAX_SIZE_GET) \ + SW_API_DESC(SW_API_PT_FRAME_MAX_SIZE_SET) \ + SW_API_DESC(SW_API_PT_INTERFACE_3AZ_STATUS_SET) \ + SW_API_DESC(SW_API_PT_INTERFACE_3AZ_STATUS_GET)\ + SW_API_DESC(SW_API_PT_PROMISC_MODE_SET) \ + SW_API_DESC(SW_API_PT_PROMISC_MODE_GET) \ + SW_API_DESC(SW_API_PT_INTERFACE_EEE_CFG_SET) \ + SW_API_DESC(SW_API_PT_INTERFACE_EEE_CFG_GET) \ + SW_API_DESC(SW_API_PT_SOURCE_FILTER_CONFIG_GET) \ + SW_API_DESC(SW_API_PT_SOURCE_FILTER_CONFIG_SET) \ + SW_API_DESC(SW_API_PT_SWITCH_PORT_LOOPBACK_SET) \ + SW_API_DESC(SW_API_PT_SWITCH_PORT_LOOPBACK_GET) +/*qca808x_start*/\ +/*end of PORTCONTROL_API_PARAM*/ +/*qca808x_end*/ +#else +#define PORTCONTROL_API +#define PORTCONTROL_API_PARAM +#endif + +#ifdef IN_VLAN +#define VLAN_API \ + SW_API_DEF(SW_API_VLAN_ADD, fal_vlan_create), \ + SW_API_DEF(SW_API_VLAN_DEL, fal_vlan_delete), \ + SW_API_DEF(SW_API_VLAN_MEM_UPDATE, fal_vlan_member_update), \ + SW_API_DEF(SW_API_VLAN_FIND, fal_vlan_find), \ + SW_API_DEF(SW_API_VLAN_NEXT, fal_vlan_next), \ + SW_API_DEF(SW_API_VLAN_APPEND, fal_vlan_entry_append), \ + SW_API_DEF(SW_API_VLAN_FLUSH, fal_vlan_flush), \ + SW_API_DEF(SW_API_VLAN_FID_SET, fal_vlan_fid_set), \ + SW_API_DEF(SW_API_VLAN_FID_GET, fal_vlan_fid_get), \ + SW_API_DEF(SW_API_VLAN_MEMBER_ADD, fal_vlan_member_add), \ + SW_API_DEF(SW_API_VLAN_MEMBER_DEL, fal_vlan_member_del), \ + SW_API_DEF(SW_API_VLAN_LEARN_STATE_SET, fal_vlan_learning_state_set), \ + SW_API_DEF(SW_API_VLAN_LEARN_STATE_GET, fal_vlan_learning_state_get), + +#define VLAN_API_PARAM \ + SW_API_DESC(SW_API_VLAN_ADD) \ + SW_API_DESC(SW_API_VLAN_DEL) \ + SW_API_DESC(SW_API_VLAN_MEM_UPDATE) \ + SW_API_DESC(SW_API_VLAN_FIND) \ + SW_API_DESC(SW_API_VLAN_NEXT) \ + SW_API_DESC(SW_API_VLAN_APPEND) \ + SW_API_DESC(SW_API_VLAN_FLUSH) \ + SW_API_DESC(SW_API_VLAN_FID_SET) \ + SW_API_DESC(SW_API_VLAN_FID_GET) \ + SW_API_DESC(SW_API_VLAN_MEMBER_ADD) \ + SW_API_DESC(SW_API_VLAN_MEMBER_DEL) \ + SW_API_DESC(SW_API_VLAN_LEARN_STATE_SET) \ + SW_API_DESC(SW_API_VLAN_LEARN_STATE_GET) +#else +#define VLAN_API +#define VLAN_API_PARAM +#endif + +#ifdef IN_PORTVLAN +#define PORTVLAN_API \ + SW_API_DEF(SW_API_PT_ING_MODE_GET, fal_port_1qmode_get), \ + SW_API_DEF(SW_API_PT_ING_MODE_SET, fal_port_1qmode_set), \ + SW_API_DEF(SW_API_PT_EG_MODE_GET, fal_port_egvlanmode_get), \ + SW_API_DEF(SW_API_PT_EG_MODE_SET, fal_port_egvlanmode_set), \ + SW_API_DEF(SW_API_PT_VLAN_MEM_ADD, fal_portvlan_member_add), \ + SW_API_DEF(SW_API_PT_VLAN_MEM_DEL, fal_portvlan_member_del), \ + SW_API_DEF(SW_API_PT_VLAN_MEM_UPDATE, fal_portvlan_member_update), \ + SW_API_DEF(SW_API_PT_VLAN_MEM_GET, fal_portvlan_member_get), \ + SW_API_DEF(SW_API_PT_DEF_VID_SET, fal_port_default_vid_set), \ + SW_API_DEF(SW_API_PT_DEF_VID_GET, fal_port_default_vid_get), \ + SW_API_DEF(SW_API_PT_FORCE_DEF_VID_SET, fal_port_force_default_vid_set), \ + SW_API_DEF(SW_API_PT_FORCE_DEF_VID_GET, fal_port_force_default_vid_get), \ + SW_API_DEF(SW_API_PT_FORCE_PORTVLAN_SET, fal_port_force_portvlan_set), \ + SW_API_DEF(SW_API_PT_FORCE_PORTVLAN_GET, fal_port_force_portvlan_get), \ + SW_API_DEF(SW_API_PT_NESTVLAN_SET, fal_port_nestvlan_set), \ + SW_API_DEF(SW_API_PT_NESTVLAN_GET, fal_port_nestvlan_get), \ + SW_API_DEF(SW_API_NESTVLAN_TPID_SET, fal_nestvlan_tpid_set), \ + SW_API_DEF(SW_API_NESTVLAN_TPID_GET, fal_nestvlan_tpid_get), \ + SW_API_DEF(SW_API_PT_IN_VLAN_MODE_SET, fal_port_invlan_mode_set), \ + SW_API_DEF(SW_API_PT_IN_VLAN_MODE_GET, fal_port_invlan_mode_get), \ + SW_API_DEF(SW_API_PT_TLS_SET, fal_port_tls_set), \ + SW_API_DEF(SW_API_PT_TLS_GET, fal_port_tls_get), \ + SW_API_DEF(SW_API_PT_PRI_PROPAGATION_SET, fal_port_pri_propagation_set), \ + SW_API_DEF(SW_API_PT_PRI_PROPAGATION_GET, fal_port_pri_propagation_get), \ + SW_API_DEF(SW_API_PT_DEF_SVID_SET, fal_port_default_svid_set), \ + SW_API_DEF(SW_API_PT_DEF_SVID_GET, fal_port_default_svid_get), \ + SW_API_DEF(SW_API_PT_DEF_CVID_SET, fal_port_default_cvid_set), \ + SW_API_DEF(SW_API_PT_DEF_CVID_GET, fal_port_default_cvid_get), \ + SW_API_DEF(SW_API_PT_VLAN_PROPAGATION_SET, fal_port_vlan_propagation_set), \ + SW_API_DEF(SW_API_PT_VLAN_PROPAGATION_GET, fal_port_vlan_propagation_get), \ + SW_API_DEF(SW_API_PT_VLAN_TRANS_ADD, fal_port_vlan_trans_add), \ + SW_API_DEF(SW_API_PT_VLAN_TRANS_DEL, fal_port_vlan_trans_del), \ + SW_API_DEF(SW_API_PT_VLAN_TRANS_GET, fal_port_vlan_trans_get), \ + SW_API_DEF(SW_API_QINQ_MODE_SET, fal_qinq_mode_set), \ + SW_API_DEF(SW_API_QINQ_MODE_GET, fal_qinq_mode_get), \ + SW_API_DEF(SW_API_PT_QINQ_ROLE_SET, fal_port_qinq_role_set), \ + SW_API_DEF(SW_API_PT_QINQ_ROLE_GET, fal_port_qinq_role_get), \ + SW_API_DEF(SW_API_PT_VLAN_TRANS_ITERATE, fal_port_vlan_trans_iterate), \ + SW_API_DEF(SW_API_PT_MAC_VLAN_XLT_SET, fal_port_mac_vlan_xlt_set), \ + SW_API_DEF(SW_API_PT_MAC_VLAN_XLT_GET, fal_port_mac_vlan_xlt_get), \ + SW_API_DEF(SW_API_NETISOLATE_SET, fal_netisolate_set), \ + SW_API_DEF(SW_API_NETISOLATE_GET, fal_netisolate_get), \ + SW_API_DEF(SW_API_EG_FLTR_BYPASS_EN_SET, fal_eg_trans_filter_bypass_en_set), \ + SW_API_DEF(SW_API_EG_FLTR_BYPASS_EN_GET, fal_eg_trans_filter_bypass_en_get), \ + SW_API_DEF(SW_API_PT_VRF_ID_SET, fal_port_vrf_id_set), \ + SW_API_DEF(SW_API_PT_VRF_ID_GET, fal_port_vrf_id_get), \ + SW_API_DEF(SW_API_GLOBAL_QINQ_MODE_SET, fal_global_qinq_mode_set), \ + SW_API_DEF(SW_API_GLOBAL_QINQ_MODE_GET, fal_global_qinq_mode_get), \ + SW_API_DEF(SW_API_PORT_QINQ_MODE_SET, fal_port_qinq_mode_set), \ + SW_API_DEF(SW_API_PORT_QINQ_MODE_GET, fal_port_qinq_mode_get), \ + SW_API_DEF(SW_API_TPID_SET, fal_ingress_tpid_set), \ + SW_API_DEF(SW_API_TPID_GET, fal_ingress_tpid_get), \ + SW_API_DEF(SW_API_EGRESS_TPID_SET, fal_egress_tpid_set), \ + SW_API_DEF(SW_API_EGRESS_TPID_GET, fal_egress_tpid_get), \ + SW_API_DEF(SW_API_PT_INGRESS_VLAN_FILTER_SET, fal_port_ingress_vlan_filter_set), \ + SW_API_DEF(SW_API_PT_INGRESS_VLAN_FILTER_GET, fal_port_ingress_vlan_filter_get), \ + SW_API_DEF(SW_API_PT_DEFAULT_VLANTAG_SET, fal_port_default_vlantag_set), \ + SW_API_DEF(SW_API_PT_DEFAULT_VLANTAG_GET, fal_port_default_vlantag_get), \ + SW_API_DEF(SW_API_PT_TAG_PROPAGATION_SET, fal_port_tag_propagation_set), \ + SW_API_DEF(SW_API_PT_TAG_PROPAGATION_GET, fal_port_tag_propagation_get), \ + SW_API_DEF(SW_API_PT_VLANTAG_EGMODE_SET, fal_port_vlantag_egmode_set), \ + SW_API_DEF(SW_API_PT_VLANTAG_EGMODE_GET, fal_port_vlantag_egmode_get), \ + SW_API_DEF(SW_API_PT_VLAN_XLT_MISS_CMD_SET, fal_port_vlan_xlt_miss_cmd_set), \ + SW_API_DEF(SW_API_PT_VLAN_XLT_MISS_CMD_GET, fal_port_vlan_xlt_miss_cmd_get), \ + SW_API_DEF(SW_API_PT_VSI_EGMODE_SET, fal_port_vsi_egmode_set), \ + SW_API_DEF(SW_API_PT_VSI_EGMODE_GET, fal_port_vsi_egmode_get), \ + SW_API_DEF(SW_API_PT_VLANTAG_VSI_EGMODE_EN_SET, fal_port_vlantag_vsi_egmode_enable), \ + SW_API_DEF(SW_API_PT_VLANTAG_VSI_EGMODE_EN_GET, fal_port_vlantag_vsi_egmode_status_get), \ + SW_API_DEF(SW_API_PT_VLAN_TRANS_ADV_ADD, fal_port_vlan_trans_adv_add), \ + SW_API_DEF(SW_API_PT_VLAN_TRANS_ADV_DEL, fal_port_vlan_trans_adv_del), \ + SW_API_DEF(SW_API_PT_VLAN_TRANS_ADV_GETFIRST, fal_port_vlan_trans_adv_getfirst), \ + SW_API_DEF(SW_API_PT_VLAN_TRANS_ADV_GETNEXT, fal_port_vlan_trans_adv_getnext), \ + SW_API_DEF(SW_API_PT_VLAN_COUNTER_GET, fal_port_vlan_counter_get), \ + SW_API_DEF(SW_API_PT_VLAN_COUNTER_CLEANUP, fal_port_vlan_counter_cleanup), + +#define PORTVLAN_API_PARAM \ + SW_API_DESC(SW_API_PT_ING_MODE_GET) \ + SW_API_DESC(SW_API_PT_ING_MODE_SET) \ + SW_API_DESC(SW_API_PT_EG_MODE_GET) \ + SW_API_DESC(SW_API_PT_EG_MODE_SET) \ + SW_API_DESC(SW_API_PT_VLAN_MEM_ADD) \ + SW_API_DESC(SW_API_PT_VLAN_MEM_DEL) \ + SW_API_DESC(SW_API_PT_VLAN_MEM_UPDATE) \ + SW_API_DESC(SW_API_PT_VLAN_MEM_GET) \ + SW_API_DESC(SW_API_PT_DEF_VID_SET) \ + SW_API_DESC(SW_API_PT_DEF_VID_GET) \ + SW_API_DESC(SW_API_PT_FORCE_DEF_VID_SET) \ + SW_API_DESC(SW_API_PT_FORCE_DEF_VID_GET) \ + SW_API_DESC(SW_API_PT_FORCE_PORTVLAN_SET) \ + SW_API_DESC(SW_API_PT_FORCE_PORTVLAN_GET) \ + SW_API_DESC(SW_API_PT_NESTVLAN_SET) \ + SW_API_DESC(SW_API_PT_NESTVLAN_GET) \ + SW_API_DESC(SW_API_NESTVLAN_TPID_SET) \ + SW_API_DESC(SW_API_NESTVLAN_TPID_GET) \ + SW_API_DESC(SW_API_PT_IN_VLAN_MODE_SET) \ + SW_API_DESC(SW_API_PT_IN_VLAN_MODE_GET) \ + SW_API_DESC(SW_API_PT_TLS_SET) \ + SW_API_DESC(SW_API_PT_TLS_GET) \ + SW_API_DESC(SW_API_PT_PRI_PROPAGATION_SET) \ + SW_API_DESC(SW_API_PT_PRI_PROPAGATION_GET) \ + SW_API_DESC(SW_API_PT_DEF_SVID_SET) \ + SW_API_DESC(SW_API_PT_DEF_SVID_GET) \ + SW_API_DESC(SW_API_PT_DEF_CVID_SET) \ + SW_API_DESC(SW_API_PT_DEF_CVID_GET) \ + SW_API_DESC(SW_API_PT_VLAN_PROPAGATION_SET) \ + SW_API_DESC(SW_API_PT_VLAN_PROPAGATION_GET) \ + SW_API_DESC(SW_API_PT_VLAN_TRANS_ADD) \ + SW_API_DESC(SW_API_PT_VLAN_TRANS_DEL) \ + SW_API_DESC(SW_API_PT_VLAN_TRANS_GET) \ + SW_API_DESC(SW_API_QINQ_MODE_SET) \ + SW_API_DESC(SW_API_QINQ_MODE_GET) \ + SW_API_DESC(SW_API_PT_QINQ_ROLE_SET) \ + SW_API_DESC(SW_API_PT_QINQ_ROLE_GET) \ + SW_API_DESC(SW_API_PT_VLAN_TRANS_ITERATE) \ + SW_API_DESC(SW_API_PT_MAC_VLAN_XLT_SET) \ + SW_API_DESC(SW_API_PT_MAC_VLAN_XLT_GET) \ + SW_API_DESC(SW_API_NETISOLATE_SET) \ + SW_API_DESC(SW_API_NETISOLATE_GET) \ + SW_API_DESC(SW_API_EG_FLTR_BYPASS_EN_SET) \ + SW_API_DESC(SW_API_EG_FLTR_BYPASS_EN_GET) \ + SW_API_DESC(SW_API_PT_VRF_ID_SET) \ + SW_API_DESC(SW_API_PT_VRF_ID_GET) \ + SW_API_DESC(SW_API_GLOBAL_QINQ_MODE_SET) \ + SW_API_DESC(SW_API_GLOBAL_QINQ_MODE_GET) \ + SW_API_DESC(SW_API_PORT_QINQ_MODE_SET) \ + SW_API_DESC(SW_API_PORT_QINQ_MODE_GET) \ + SW_API_DESC(SW_API_TPID_SET) \ + SW_API_DESC(SW_API_TPID_GET) \ + SW_API_DESC(SW_API_EGRESS_TPID_SET) \ + SW_API_DESC(SW_API_EGRESS_TPID_GET) \ + SW_API_DESC(SW_API_PT_INGRESS_VLAN_FILTER_SET) \ + SW_API_DESC(SW_API_PT_INGRESS_VLAN_FILTER_GET) \ + SW_API_DESC(SW_API_PT_DEFAULT_VLANTAG_SET) \ + SW_API_DESC(SW_API_PT_DEFAULT_VLANTAG_GET) \ + SW_API_DESC(SW_API_PT_TAG_PROPAGATION_SET) \ + SW_API_DESC(SW_API_PT_TAG_PROPAGATION_GET) \ + SW_API_DESC(SW_API_PT_VLANTAG_EGMODE_SET) \ + SW_API_DESC(SW_API_PT_VLANTAG_EGMODE_GET) \ + SW_API_DESC(SW_API_PT_VLAN_XLT_MISS_CMD_SET) \ + SW_API_DESC(SW_API_PT_VLAN_XLT_MISS_CMD_GET) \ + SW_API_DESC(SW_API_PT_VSI_EGMODE_SET) \ + SW_API_DESC(SW_API_PT_VSI_EGMODE_GET) \ + SW_API_DESC(SW_API_PT_VLANTAG_VSI_EGMODE_EN_SET) \ + SW_API_DESC(SW_API_PT_VLANTAG_VSI_EGMODE_EN_GET) \ + SW_API_DESC(SW_API_PT_VLAN_TRANS_ADV_ADD) \ + SW_API_DESC(SW_API_PT_VLAN_TRANS_ADV_DEL) \ + SW_API_DESC(SW_API_PT_VLAN_TRANS_ADV_GETFIRST) \ + SW_API_DESC(SW_API_PT_VLAN_TRANS_ADV_GETNEXT) \ + SW_API_DESC(SW_API_PT_VLAN_COUNTER_GET) \ + SW_API_DESC(SW_API_PT_VLAN_COUNTER_CLEANUP) + + +#else +#define PORTVLAN_API +#define PORTVLAN_API_PARAM +#endif + +#ifdef IN_FDB +#define FDB_API \ + SW_API_DEF(SW_API_FDB_ADD, fal_fdb_entry_add), \ + SW_API_DEF(SW_API_FDB_DELALL, fal_fdb_entry_flush), \ + SW_API_DEF(SW_API_FDB_DELPORT,fal_fdb_entry_del_byport), \ + SW_API_DEF(SW_API_FDB_DELMAC, fal_fdb_entry_del_bymac), \ + SW_API_DEF(SW_API_FDB_FIRST, fal_fdb_entry_getfirst), \ + SW_API_DEF(SW_API_FDB_NEXT, fal_fdb_entry_getnext), \ + SW_API_DEF(SW_API_FDB_FIND, fal_fdb_entry_search), \ + SW_API_DEF(SW_API_FDB_PT_LEARN_SET, fal_fdb_port_learn_set), \ + SW_API_DEF(SW_API_FDB_PT_LEARN_GET, fal_fdb_port_learn_get), \ + SW_API_DEF(SW_API_FDB_PT_NEWADDR_LEARN_SET, fal_fdb_port_learning_ctrl_set), \ + SW_API_DEF(SW_API_FDB_PT_NEWADDR_LEARN_GET, fal_fdb_port_learning_ctrl_get), \ + SW_API_DEF(SW_API_FDB_PT_STAMOVE_SET, fal_fdb_port_stamove_ctrl_set), \ + SW_API_DEF(SW_API_FDB_PT_STAMOVE_GET, fal_fdb_port_stamove_ctrl_get), \ + SW_API_DEF(SW_API_FDB_AGE_CTRL_SET, fal_fdb_aging_ctrl_set), \ + SW_API_DEF(SW_API_FDB_AGE_CTRL_GET, fal_fdb_aging_ctrl_get), \ + SW_API_DEF(SW_API_FDB_LEARN_CTRL_SET, fal_fdb_learning_ctrl_set), \ + SW_API_DEF(SW_API_FDB_LEARN_CTRL_GET, fal_fdb_learning_ctrl_get), \ + SW_API_DEF(SW_API_FDB_VLAN_IVL_SVL_SET, fal_fdb_vlan_ivl_svl_set),\ + SW_API_DEF(SW_API_FDB_VLAN_IVL_SVL_GET, fal_fdb_vlan_ivl_svl_get),\ + SW_API_DEF(SW_API_FDB_AGE_TIME_SET, fal_fdb_aging_time_set), \ + SW_API_DEF(SW_API_FDB_AGE_TIME_GET, fal_fdb_aging_time_get), \ + SW_API_DEF(SW_API_FDB_ITERATE, fal_fdb_entry_getnext_byindex), \ + SW_API_DEF(SW_API_FDB_EXTEND_NEXT, fal_fdb_entry_extend_getnext), \ + SW_API_DEF(SW_API_FDB_EXTEND_FIRST, fal_fdb_entry_extend_getfirst), \ + SW_API_DEF(SW_API_FDB_TRANSFER, fal_fdb_entry_update_byport), \ + SW_API_DEF(SW_API_PT_FDB_LEARN_COUNTER_GET, fal_fdb_port_learned_mac_counter_get), \ + SW_API_DEF(SW_API_PT_FDB_LEARN_LIMIT_SET, fal_port_fdb_learn_limit_set), \ + SW_API_DEF(SW_API_PT_FDB_LEARN_LIMIT_GET, fal_port_fdb_learn_limit_get), \ + SW_API_DEF(SW_API_PT_FDB_LEARN_EXCEED_CMD_SET, fal_port_fdb_learn_exceed_cmd_set), \ + SW_API_DEF(SW_API_PT_FDB_LEARN_EXCEED_CMD_GET, fal_port_fdb_learn_exceed_cmd_get), \ + SW_API_DEF(SW_API_FDB_LEARN_LIMIT_SET, fal_fdb_learn_limit_set), \ + SW_API_DEF(SW_API_FDB_LEARN_LIMIT_GET, fal_fdb_learn_limit_get), \ + SW_API_DEF(SW_API_FDB_LEARN_EXCEED_CMD_SET, fal_fdb_learn_exceed_cmd_set), \ + SW_API_DEF(SW_API_FDB_LEARN_EXCEED_CMD_GET, fal_fdb_learn_exceed_cmd_get), \ + SW_API_DEF(SW_API_FDB_RESV_ADD, fal_fdb_resv_add), \ + SW_API_DEF(SW_API_FDB_RESV_DEL, fal_fdb_resv_del), \ + SW_API_DEF(SW_API_FDB_RESV_FIND, fal_fdb_resv_find), \ + SW_API_DEF(SW_API_FDB_RESV_ITERATE, fal_fdb_resv_iterate), \ + SW_API_DEF(SW_API_FDB_PT_LEARN_STATIC_SET, fal_fdb_port_learn_static_set), \ + SW_API_DEF(SW_API_FDB_PT_LEARN_STATIC_GET, fal_fdb_port_learn_static_get), \ + SW_API_DEF(SW_API_FDB_PORT_ADD, fal_fdb_port_add), \ + SW_API_DEF(SW_API_FDB_PORT_DEL, fal_fdb_port_del), \ + SW_API_DEF(SW_API_FDB_RFS_SET, fal_fdb_rfs_set), \ + SW_API_DEF(SW_API_FDB_RFS_DEL, fal_fdb_rfs_del), \ + SW_API_DEF(SW_API_FDB_PT_MACLIMIT_CTRL_SET, fal_fdb_port_maclimit_ctrl_set), \ + SW_API_DEF(SW_API_FDB_PT_MACLIMIT_CTRL_GET, fal_fdb_port_maclimit_ctrl_get), \ + SW_API_DEF(SW_API_FDB_DEL_BY_FID, fal_fdb_entry_del_byfid), + +#define FDB_API_PARAM \ + SW_API_DESC(SW_API_FDB_ADD) \ + SW_API_DESC(SW_API_FDB_DELALL) \ + SW_API_DESC(SW_API_FDB_DELPORT) \ + SW_API_DESC(SW_API_FDB_DELMAC) \ + SW_API_DESC(SW_API_FDB_FIRST) \ + SW_API_DESC(SW_API_FDB_NEXT) \ + SW_API_DESC(SW_API_FDB_FIND) \ + SW_API_DESC(SW_API_FDB_PT_LEARN_SET) \ + SW_API_DESC(SW_API_FDB_PT_LEARN_GET) \ + SW_API_DESC(SW_API_FDB_PT_NEWADDR_LEARN_SET) \ + SW_API_DESC(SW_API_FDB_PT_NEWADDR_LEARN_GET) \ + SW_API_DESC(SW_API_FDB_PT_STAMOVE_SET) \ + SW_API_DESC(SW_API_FDB_PT_STAMOVE_GET) \ + SW_API_DESC(SW_API_FDB_AGE_CTRL_SET) \ + SW_API_DESC(SW_API_FDB_AGE_CTRL_GET) \ + SW_API_DESC(SW_API_FDB_LEARN_CTRL_SET) \ + SW_API_DESC(SW_API_FDB_LEARN_CTRL_GET) \ + SW_API_DESC(SW_API_FDB_VLAN_IVL_SVL_SET) \ + SW_API_DESC(SW_API_FDB_VLAN_IVL_SVL_GET) \ + SW_API_DESC(SW_API_FDB_AGE_TIME_SET) \ + SW_API_DESC(SW_API_FDB_AGE_TIME_GET) \ + SW_API_DESC(SW_API_FDB_ITERATE) \ + SW_API_DESC(SW_API_FDB_EXTEND_NEXT) \ + SW_API_DESC(SW_API_FDB_EXTEND_FIRST) \ + SW_API_DESC(SW_API_FDB_TRANSFER) \ + SW_API_DESC(SW_API_PT_FDB_LEARN_COUNTER_GET) \ + SW_API_DESC(SW_API_PT_FDB_LEARN_LIMIT_SET) \ + SW_API_DESC(SW_API_PT_FDB_LEARN_LIMIT_GET) \ + SW_API_DESC(SW_API_PT_FDB_LEARN_EXCEED_CMD_SET) \ + SW_API_DESC(SW_API_PT_FDB_LEARN_EXCEED_CMD_GET) \ + SW_API_DESC(SW_API_FDB_LEARN_LIMIT_SET) \ + SW_API_DESC(SW_API_FDB_LEARN_LIMIT_GET) \ + SW_API_DESC(SW_API_FDB_LEARN_EXCEED_CMD_SET) \ + SW_API_DESC(SW_API_FDB_LEARN_EXCEED_CMD_GET) \ + SW_API_DESC(SW_API_FDB_RESV_ADD) \ + SW_API_DESC(SW_API_FDB_RESV_DEL) \ + SW_API_DESC(SW_API_FDB_RESV_FIND) \ + SW_API_DESC(SW_API_FDB_RESV_ITERATE) \ + SW_API_DESC(SW_API_FDB_PT_LEARN_STATIC_SET) \ + SW_API_DESC(SW_API_FDB_PT_LEARN_STATIC_GET) \ + SW_API_DESC(SW_API_FDB_PORT_ADD) \ + SW_API_DESC(SW_API_FDB_PORT_DEL) \ + SW_API_DESC(SW_API_FDB_RFS_SET) \ + SW_API_DESC(SW_API_FDB_RFS_DEL) \ + SW_API_DESC(SW_API_FDB_PT_MACLIMIT_CTRL_SET) \ + SW_API_DESC(SW_API_FDB_PT_MACLIMIT_CTRL_GET) \ + SW_API_DESC(SW_API_FDB_DEL_BY_FID) +#else +#define FDB_API +#define FDB_API_PARAM +#endif + +#ifdef IN_ACL +#define ACL_API \ + SW_API_DEF(SW_API_ACL_LIST_CREAT, fal_acl_list_creat), \ + SW_API_DEF(SW_API_ACL_LIST_DESTROY, fal_acl_list_destroy), \ + SW_API_DEF(SW_API_ACL_RULE_ADD, fal_acl_rule_add), \ + SW_API_DEF(SW_API_ACL_RULE_DELETE, fal_acl_rule_delete), \ + SW_API_DEF(SW_API_ACL_RULE_QUERY, fal_acl_rule_query), \ + SW_API_DEF(SW_API_ACL_LIST_BIND, fal_acl_list_bind), \ + SW_API_DEF(SW_API_ACL_LIST_UNBIND, fal_acl_list_unbind), \ + SW_API_DEF(SW_API_ACL_STATUS_SET, fal_acl_status_set), \ + SW_API_DEF(SW_API_ACL_STATUS_GET, fal_acl_status_get), \ + SW_API_DEF(SW_API_ACL_LIST_DUMP, fal_acl_list_dump), \ + SW_API_DEF(SW_API_ACL_RULE_DUMP, fal_acl_rule_dump), \ + SW_API_DEF(SW_API_ACL_PT_UDF_PROFILE_SET, fal_acl_port_udf_profile_set), \ + SW_API_DEF(SW_API_ACL_PT_UDF_PROFILE_GET, fal_acl_port_udf_profile_get), \ + SW_API_DEF(SW_API_ACL_RULE_ACTIVE, fal_acl_rule_active), \ + SW_API_DEF(SW_API_ACL_RULE_DEACTIVE, fal_acl_rule_deactive),\ + SW_API_DEF(SW_API_ACL_RULE_SRC_FILTER_STS_SET, fal_acl_rule_src_filter_sts_set),\ + SW_API_DEF(SW_API_ACL_RULE_SRC_FILTER_STS_GET, fal_acl_rule_src_filter_sts_get),\ + SW_API_DEF(SW_API_ACL_UDF_SET, fal_acl_udf_profile_set),\ + SW_API_DEF(SW_API_ACL_UDF_GET, fal_acl_udf_profile_get), + +#define ACL_API_PARAM \ + SW_API_DESC(SW_API_ACL_LIST_CREAT) \ + SW_API_DESC(SW_API_ACL_LIST_DESTROY) \ + SW_API_DESC(SW_API_ACL_RULE_ADD) \ + SW_API_DESC(SW_API_ACL_RULE_DELETE) \ + SW_API_DESC(SW_API_ACL_RULE_QUERY) \ + SW_API_DESC(SW_API_ACL_LIST_BIND) \ + SW_API_DESC(SW_API_ACL_LIST_UNBIND) \ + SW_API_DESC(SW_API_ACL_STATUS_SET) \ + SW_API_DESC(SW_API_ACL_STATUS_GET) \ + SW_API_DESC(SW_API_ACL_LIST_DUMP) \ + SW_API_DESC(SW_API_ACL_RULE_DUMP) \ + SW_API_DESC(SW_API_ACL_PT_UDF_PROFILE_SET) \ + SW_API_DESC(SW_API_ACL_PT_UDF_PROFILE_GET) \ + SW_API_DESC(SW_API_ACL_RULE_ACTIVE) \ + SW_API_DESC(SW_API_ACL_RULE_DEACTIVE) \ + SW_API_DESC(SW_API_ACL_RULE_SRC_FILTER_STS_SET)\ + SW_API_DESC(SW_API_ACL_RULE_SRC_FILTER_STS_GET)\ + SW_API_DESC(SW_API_ACL_UDF_SET) \ + SW_API_DESC(SW_API_ACL_UDF_GET) +#else +#define ACL_API +#define ACL_API_PARAM +#endif + +#ifdef IN_QOS +#define QOS_API \ + SW_API_DEF(SW_API_QOS_SCH_MODE_SET, fal_qos_sch_mode_set), \ + SW_API_DEF(SW_API_QOS_SCH_MODE_GET, fal_qos_sch_mode_get), \ + SW_API_DEF(SW_API_QOS_QU_TX_BUF_ST_SET, fal_qos_queue_tx_buf_status_set), \ + SW_API_DEF(SW_API_QOS_QU_TX_BUF_ST_GET, fal_qos_queue_tx_buf_status_get), \ + SW_API_DEF(SW_API_QOS_QU_TX_BUF_NR_SET, fal_qos_queue_tx_buf_nr_set), \ + SW_API_DEF(SW_API_QOS_QU_TX_BUF_NR_GET, fal_qos_queue_tx_buf_nr_get), \ + SW_API_DEF(SW_API_QOS_PT_TX_BUF_ST_SET, fal_qos_port_tx_buf_status_set), \ + SW_API_DEF(SW_API_QOS_PT_TX_BUF_ST_GET, fal_qos_port_tx_buf_status_get), \ + SW_API_DEF(SW_API_QOS_PT_RED_EN_SET, fal_qos_port_red_en_set), \ + SW_API_DEF(SW_API_QOS_PT_RED_EN_GET, fal_qos_port_red_en_get), \ + SW_API_DEF(SW_API_QOS_PT_TX_BUF_NR_SET, fal_qos_port_tx_buf_nr_set), \ + SW_API_DEF(SW_API_QOS_PT_TX_BUF_NR_GET, fal_qos_port_tx_buf_nr_get), \ + SW_API_DEF(SW_API_QOS_PT_RX_BUF_NR_SET, fal_qos_port_rx_buf_nr_set), \ + SW_API_DEF(SW_API_QOS_PT_RX_BUF_NR_GET, fal_qos_port_rx_buf_nr_get), \ + SW_API_DEF(SW_API_COSMAP_UP_QU_SET, fal_cosmap_up_queue_set), \ + SW_API_DEF(SW_API_COSMAP_UP_QU_GET, fal_cosmap_up_queue_get), \ + SW_API_DEF(SW_API_COSMAP_DSCP_QU_SET, fal_cosmap_dscp_queue_set), \ + SW_API_DEF(SW_API_COSMAP_DSCP_QU_GET, fal_cosmap_dscp_queue_get), \ + SW_API_DEF(SW_API_QOS_PT_MODE_SET, fal_qos_port_mode_set), \ + SW_API_DEF(SW_API_QOS_PT_MODE_GET, fal_qos_port_mode_get), \ + SW_API_DEF(SW_API_QOS_PT_MODE_PRI_SET, fal_qos_port_mode_pri_set), \ + SW_API_DEF(SW_API_QOS_PT_MODE_PRI_GET, fal_qos_port_mode_pri_get), \ + SW_API_DEF(SW_API_QOS_PORT_DEF_UP_SET, fal_qos_port_default_up_set), \ + SW_API_DEF(SW_API_QOS_PORT_DEF_UP_GET, fal_qos_port_default_up_get), \ + SW_API_DEF(SW_API_QOS_PORT_SCH_MODE_SET, fal_qos_port_sch_mode_set), \ + SW_API_DEF(SW_API_QOS_PORT_SCH_MODE_GET, fal_qos_port_sch_mode_get), \ + SW_API_DEF(SW_API_QOS_PT_DEF_SPRI_SET, fal_qos_port_default_spri_set), \ + SW_API_DEF(SW_API_QOS_PT_DEF_SPRI_GET, fal_qos_port_default_spri_get), \ + SW_API_DEF(SW_API_QOS_PT_DEF_CPRI_SET, fal_qos_port_default_cpri_set), \ + SW_API_DEF(SW_API_QOS_PT_DEF_CPRI_GET, fal_qos_port_default_cpri_get), \ + SW_API_DEF(SW_API_QOS_PT_FORCE_SPRI_ST_SET, fal_qos_port_force_spri_status_set), \ + SW_API_DEF(SW_API_QOS_PT_FORCE_SPRI_ST_GET, fal_qos_port_force_spri_status_get), \ + SW_API_DEF(SW_API_QOS_PT_FORCE_CPRI_ST_SET, fal_qos_port_force_cpri_status_set), \ + SW_API_DEF(SW_API_QOS_PT_FORCE_CPRI_ST_GET, fal_qos_port_force_cpri_status_get), \ + SW_API_DEF(SW_API_QOS_QUEUE_REMARK_SET, fal_qos_queue_remark_table_set), \ + SW_API_DEF(SW_API_QOS_QUEUE_REMARK_GET, fal_qos_queue_remark_table_get), \ + SW_API_DEF(SW_API_QOS_PORT_GROUP_GET, fal_qos_port_group_get), \ + SW_API_DEF(SW_API_QOS_PORT_GROUP_SET, fal_qos_port_group_set), \ + SW_API_DEF(SW_API_QOS_PORT_PRI_GET, fal_qos_port_pri_precedence_get), \ + SW_API_DEF(SW_API_QOS_PORT_PRI_SET, fal_qos_port_pri_precedence_set), \ + SW_API_DEF(SW_API_QOS_PORT_REMARK_GET, fal_qos_port_remark_get), \ + SW_API_DEF(SW_API_QOS_PORT_REMARK_SET, fal_qos_port_remark_set), \ + SW_API_DEF(SW_API_QOS_PCP_MAP_GET, fal_qos_cosmap_pcp_get), \ + SW_API_DEF(SW_API_QOS_PCP_MAP_SET, fal_qos_cosmap_pcp_set), \ + SW_API_DEF(SW_API_QOS_FLOW_MAP_GET, fal_qos_cosmap_flow_get), \ + SW_API_DEF(SW_API_QOS_FLOW_MAP_SET, fal_qos_cosmap_flow_set), \ + SW_API_DEF(SW_API_QOS_DSCP_MAP_GET, fal_qos_cosmap_dscp_get), \ + SW_API_DEF(SW_API_QOS_DSCP_MAP_SET, fal_qos_cosmap_dscp_set), \ + SW_API_DEF(SW_API_QOS_QUEUE_SCHEDULER_GET, fal_queue_scheduler_get), \ + SW_API_DEF(SW_API_QOS_QUEUE_SCHEDULER_SET, fal_queue_scheduler_set), \ + SW_API_DEF(SW_API_QOS_RING_QUEUE_MAP_GET, fal_edma_ring_queue_map_get), \ + SW_API_DEF(SW_API_QOS_RING_QUEUE_MAP_SET, fal_edma_ring_queue_map_set), \ + SW_API_DEF(SW_API_QOS_PORT_QUEUES_GET, fal_port_queues_get), \ + SW_API_DEF(SW_API_QOS_SCHEDULER_DEQUEU_CTRL_GET, fal_scheduler_dequeue_ctrl_get), \ + SW_API_DEF(SW_API_QOS_SCHEDULER_DEQUEU_CTRL_SET, fal_scheduler_dequeue_ctrl_set), \ + SW_API_DEF(SW_API_QOS_PORT_SCHEDULER_CFG_RESET, fal_port_scheduler_cfg_reset), \ + SW_API_DEF(SW_API_QOS_PORT_SCHEDULER_RESOURCE_GET, fal_port_scheduler_resource_get), + +#define QOS_API_PARAM \ + SW_API_DESC(SW_API_QOS_SCH_MODE_SET) \ + SW_API_DESC(SW_API_QOS_SCH_MODE_GET) \ + SW_API_DESC(SW_API_QOS_QU_TX_BUF_ST_SET) \ + SW_API_DESC(SW_API_QOS_QU_TX_BUF_ST_GET) \ + SW_API_DESC(SW_API_QOS_QU_TX_BUF_NR_SET) \ + SW_API_DESC(SW_API_QOS_QU_TX_BUF_NR_GET) \ + SW_API_DESC(SW_API_QOS_PT_TX_BUF_ST_SET) \ + SW_API_DESC(SW_API_QOS_PT_TX_BUF_ST_GET) \ + SW_API_DESC(SW_API_QOS_PT_RED_EN_SET)\ + SW_API_DESC(SW_API_QOS_PT_RED_EN_GET)\ + SW_API_DESC(SW_API_QOS_PT_TX_BUF_NR_SET) \ + SW_API_DESC(SW_API_QOS_PT_TX_BUF_NR_GET) \ + SW_API_DESC(SW_API_QOS_PT_RX_BUF_NR_SET) \ + SW_API_DESC(SW_API_QOS_PT_RX_BUF_NR_GET) \ + SW_API_DESC(SW_API_COSMAP_UP_QU_SET) \ + SW_API_DESC(SW_API_COSMAP_UP_QU_GET) \ + SW_API_DESC(SW_API_COSMAP_DSCP_QU_SET) \ + SW_API_DESC(SW_API_COSMAP_DSCP_QU_GET) \ + SW_API_DESC(SW_API_QOS_PT_MODE_SET) \ + SW_API_DESC(SW_API_QOS_PT_MODE_GET) \ + SW_API_DESC(SW_API_QOS_PT_MODE_PRI_SET) \ + SW_API_DESC(SW_API_QOS_PT_MODE_PRI_GET) \ + SW_API_DESC(SW_API_QOS_PORT_DEF_UP_SET) \ + SW_API_DESC(SW_API_QOS_PORT_DEF_UP_GET) \ + SW_API_DESC(SW_API_QOS_PORT_SCH_MODE_SET) \ + SW_API_DESC(SW_API_QOS_PORT_SCH_MODE_GET) \ + SW_API_DESC(SW_API_QOS_PT_DEF_SPRI_SET) \ + SW_API_DESC(SW_API_QOS_PT_DEF_SPRI_GET) \ + SW_API_DESC(SW_API_QOS_PT_DEF_CPRI_SET) \ + SW_API_DESC(SW_API_QOS_PT_DEF_CPRI_GET) \ + SW_API_DESC(SW_API_QOS_PT_FORCE_SPRI_ST_SET) \ + SW_API_DESC(SW_API_QOS_PT_FORCE_SPRI_ST_GET) \ + SW_API_DESC(SW_API_QOS_PT_FORCE_CPRI_ST_SET) \ + SW_API_DESC(SW_API_QOS_PT_FORCE_CPRI_ST_GET) \ + SW_API_DESC(SW_API_QOS_QUEUE_REMARK_SET) \ + SW_API_DESC(SW_API_QOS_QUEUE_REMARK_GET) \ + SW_API_DESC(SW_API_QOS_PORT_GROUP_GET) \ + SW_API_DESC(SW_API_QOS_PORT_GROUP_SET) \ + SW_API_DESC(SW_API_QOS_PORT_PRI_GET) \ + SW_API_DESC(SW_API_QOS_PORT_PRI_SET) \ + SW_API_DESC(SW_API_QOS_PORT_REMARK_GET) \ + SW_API_DESC(SW_API_QOS_PORT_REMARK_SET) \ + SW_API_DESC(SW_API_QOS_PCP_MAP_GET) \ + SW_API_DESC(SW_API_QOS_PCP_MAP_SET) \ + SW_API_DESC(SW_API_QOS_FLOW_MAP_GET) \ + SW_API_DESC(SW_API_QOS_FLOW_MAP_SET) \ + SW_API_DESC(SW_API_QOS_DSCP_MAP_GET) \ + SW_API_DESC(SW_API_QOS_DSCP_MAP_SET) \ + SW_API_DESC(SW_API_QOS_QUEUE_SCHEDULER_GET) \ + SW_API_DESC(SW_API_QOS_QUEUE_SCHEDULER_SET) \ + SW_API_DESC(SW_API_QOS_RING_QUEUE_MAP_GET) \ + SW_API_DESC(SW_API_QOS_RING_QUEUE_MAP_SET)\ + SW_API_DESC(SW_API_QOS_PORT_QUEUES_GET) \ + SW_API_DESC(SW_API_QOS_SCHEDULER_DEQUEU_CTRL_GET) \ + SW_API_DESC(SW_API_QOS_SCHEDULER_DEQUEU_CTRL_SET) \ + SW_API_DESC(SW_API_QOS_PORT_SCHEDULER_CFG_RESET) \ + SW_API_DESC(SW_API_QOS_PORT_SCHEDULER_RESOURCE_GET) +#else +#define QOS_API +#define QOS_API_PARAM +#endif + +#ifdef IN_IGMP +#define IGMP_API \ + SW_API_DEF(SW_API_PT_IGMPS_MODE_SET, fal_port_igmps_status_set), \ + SW_API_DEF(SW_API_PT_IGMPS_MODE_GET, fal_port_igmps_status_get), \ + SW_API_DEF(SW_API_IGMP_MLD_CMD_SET, fal_igmp_mld_cmd_set), \ + SW_API_DEF(SW_API_IGMP_MLD_CMD_GET, fal_igmp_mld_cmd_get), \ + SW_API_DEF(SW_API_IGMP_PT_JOIN_SET, fal_port_igmp_mld_join_set), \ + SW_API_DEF(SW_API_IGMP_PT_JOIN_GET, fal_port_igmp_mld_join_get), \ + SW_API_DEF(SW_API_IGMP_PT_LEAVE_SET, fal_port_igmp_mld_leave_set), \ + SW_API_DEF(SW_API_IGMP_PT_LEAVE_GET, fal_port_igmp_mld_leave_get), \ + SW_API_DEF(SW_API_IGMP_RP_SET, fal_igmp_mld_rp_set), \ + SW_API_DEF(SW_API_IGMP_RP_GET, fal_igmp_mld_rp_get), \ + SW_API_DEF(SW_API_IGMP_ENTRY_CREAT_SET, fal_igmp_mld_entry_creat_set), \ + SW_API_DEF(SW_API_IGMP_ENTRY_CREAT_GET, fal_igmp_mld_entry_creat_get), \ + SW_API_DEF(SW_API_IGMP_ENTRY_STATIC_SET, fal_igmp_mld_entry_static_set), \ + SW_API_DEF(SW_API_IGMP_ENTRY_STATIC_GET, fal_igmp_mld_entry_static_get), \ + SW_API_DEF(SW_API_IGMP_ENTRY_LEAKY_SET, fal_igmp_mld_entry_leaky_set), \ + SW_API_DEF(SW_API_IGMP_ENTRY_LEAKY_GET, fal_igmp_mld_entry_leaky_get), \ + SW_API_DEF(SW_API_IGMP_ENTRY_V3_SET, fal_igmp_mld_entry_v3_set), \ + SW_API_DEF(SW_API_IGMP_ENTRY_V3_GET, fal_igmp_mld_entry_v3_get), \ + SW_API_DEF(SW_API_IGMP_ENTRY_QUEUE_SET, fal_igmp_mld_entry_queue_set), \ + SW_API_DEF(SW_API_IGMP_ENTRY_QUEUE_GET, fal_igmp_mld_entry_queue_get), \ + SW_API_DEF(SW_API_PT_IGMP_LEARN_LIMIT_SET, fal_port_igmp_mld_learn_limit_set), \ + SW_API_DEF(SW_API_PT_IGMP_LEARN_LIMIT_GET, fal_port_igmp_mld_learn_limit_get), \ + SW_API_DEF(SW_API_PT_IGMP_LEARN_EXCEED_CMD_SET, fal_port_igmp_mld_learn_exceed_cmd_set), \ + SW_API_DEF(SW_API_PT_IGMP_LEARN_EXCEED_CMD_GET, fal_port_igmp_mld_learn_exceed_cmd_get), \ + SW_API_DEF(SW_API_IGMP_SG_ENTRY_SET, fal_igmp_sg_entry_set), \ + SW_API_DEF(SW_API_IGMP_SG_ENTRY_CLEAR, fal_igmp_sg_entry_clear), \ + SW_API_DEF(SW_API_IGMP_SG_ENTRY_SHOW, fal_igmp_sg_entry_show), \ + SW_API_DEF(SW_API_IGMP_SG_ENTRY_QUERY, fal_igmp_sg_entry_query), + +#define IGMP_API_PARAM \ + SW_API_DESC(SW_API_PT_IGMPS_MODE_SET) \ + SW_API_DESC(SW_API_PT_IGMPS_MODE_GET) \ + SW_API_DESC(SW_API_IGMP_MLD_CMD_SET) \ + SW_API_DESC(SW_API_IGMP_MLD_CMD_GET) \ + SW_API_DESC(SW_API_IGMP_PT_JOIN_SET) \ + SW_API_DESC(SW_API_IGMP_PT_JOIN_GET) \ + SW_API_DESC(SW_API_IGMP_PT_LEAVE_SET) \ + SW_API_DESC(SW_API_IGMP_PT_LEAVE_GET) \ + SW_API_DESC(SW_API_IGMP_RP_SET) \ + SW_API_DESC(SW_API_IGMP_RP_GET) \ + SW_API_DESC(SW_API_IGMP_ENTRY_CREAT_SET) \ + SW_API_DESC(SW_API_IGMP_ENTRY_CREAT_GET) \ + SW_API_DESC(SW_API_IGMP_ENTRY_STATIC_SET) \ + SW_API_DESC(SW_API_IGMP_ENTRY_STATIC_GET) \ + SW_API_DESC(SW_API_IGMP_ENTRY_LEAKY_SET) \ + SW_API_DESC(SW_API_IGMP_ENTRY_LEAKY_GET) \ + SW_API_DESC(SW_API_IGMP_ENTRY_V3_SET) \ + SW_API_DESC(SW_API_IGMP_ENTRY_V3_GET) \ + SW_API_DESC(SW_API_IGMP_ENTRY_QUEUE_SET) \ + SW_API_DESC(SW_API_IGMP_ENTRY_QUEUE_GET) \ + SW_API_DESC(SW_API_PT_IGMP_LEARN_LIMIT_SET) \ + SW_API_DESC(SW_API_PT_IGMP_LEARN_LIMIT_GET) \ + SW_API_DESC(SW_API_PT_IGMP_LEARN_EXCEED_CMD_SET) \ + SW_API_DESC(SW_API_PT_IGMP_LEARN_EXCEED_CMD_GET) \ + SW_API_DESC(SW_API_IGMP_SG_ENTRY_SET) \ + SW_API_DESC(SW_API_IGMP_SG_ENTRY_CLEAR) \ + SW_API_DESC(SW_API_IGMP_SG_ENTRY_SHOW) \ + SW_API_DESC(SW_API_IGMP_SG_ENTRY_QUERY) +#else +#define IGMP_API +#define IGMP_API_PARAM +#endif + +#ifdef IN_LEAKY +#define LEAKY_API \ + SW_API_DEF(SW_API_UC_LEAKY_MODE_SET, fal_uc_leaky_mode_set), \ + SW_API_DEF(SW_API_UC_LEAKY_MODE_GET, fal_uc_leaky_mode_get), \ + SW_API_DEF(SW_API_MC_LEAKY_MODE_SET, fal_mc_leaky_mode_set), \ + SW_API_DEF(SW_API_MC_LEAKY_MODE_GET, fal_mc_leaky_mode_get), \ + SW_API_DEF(SW_API_ARP_LEAKY_MODE_SET, fal_port_arp_leaky_set), \ + SW_API_DEF(SW_API_ARP_LEAKY_MODE_GET, fal_port_arp_leaky_get), \ + SW_API_DEF(SW_API_PT_UC_LEAKY_MODE_SET, fal_port_uc_leaky_set), \ + SW_API_DEF(SW_API_PT_UC_LEAKY_MODE_GET, fal_port_uc_leaky_get), \ + SW_API_DEF(SW_API_PT_MC_LEAKY_MODE_SET, fal_port_mc_leaky_set), \ + SW_API_DEF(SW_API_PT_MC_LEAKY_MODE_GET, fal_port_mc_leaky_get), + +#define LEAKY_API_PARAM \ + SW_API_DESC(SW_API_UC_LEAKY_MODE_SET) \ + SW_API_DESC(SW_API_UC_LEAKY_MODE_GET) \ + SW_API_DESC(SW_API_MC_LEAKY_MODE_SET) \ + SW_API_DESC(SW_API_MC_LEAKY_MODE_GET) \ + SW_API_DESC(SW_API_ARP_LEAKY_MODE_SET)\ + SW_API_DESC(SW_API_ARP_LEAKY_MODE_GET) \ + SW_API_DESC(SW_API_PT_UC_LEAKY_MODE_SET) \ + SW_API_DESC(SW_API_PT_UC_LEAKY_MODE_GET) \ + SW_API_DESC(SW_API_PT_MC_LEAKY_MODE_SET) \ + SW_API_DESC(SW_API_PT_MC_LEAKY_MODE_GET) +#else +#define LEAKY_API +#define LEAKY_API_PARAM +#endif + +#ifdef IN_MIRROR +#define MIRROR_API \ + SW_API_DEF(SW_API_MIRROR_ANALY_PT_SET, fal_mirr_analysis_port_set), \ + SW_API_DEF(SW_API_MIRROR_ANALY_PT_GET, fal_mirr_analysis_port_get), \ + SW_API_DEF(SW_API_MIRROR_IN_PT_SET, fal_mirr_port_in_set), \ + SW_API_DEF(SW_API_MIRROR_IN_PT_GET, fal_mirr_port_in_get), \ + SW_API_DEF(SW_API_MIRROR_EG_PT_SET, fal_mirr_port_eg_set), \ + SW_API_DEF(SW_API_MIRROR_EG_PT_GET, fal_mirr_port_eg_get), \ + SW_API_DEF(SW_API_MIRROR_ANALYSIS_CONFIG_SET, fal_mirr_analysis_config_set), \ + SW_API_DEF(SW_API_MIRROR_ANALYSIS_CONFIG_GET, fal_mirr_analysis_config_get), + +#define MIRROR_API_PARAM \ + SW_API_DESC(SW_API_MIRROR_ANALY_PT_SET) \ + SW_API_DESC(SW_API_MIRROR_ANALY_PT_GET) \ + SW_API_DESC(SW_API_MIRROR_IN_PT_SET) \ + SW_API_DESC(SW_API_MIRROR_IN_PT_GET) \ + SW_API_DESC(SW_API_MIRROR_EG_PT_SET) \ + SW_API_DESC(SW_API_MIRROR_EG_PT_GET) \ + SW_API_DESC(SW_API_MIRROR_ANALYSIS_CONFIG_SET) \ + SW_API_DESC(SW_API_MIRROR_ANALYSIS_CONFIG_GET) +#else +#define MIRROR_API +#define MIRROR_API_PARAM +#endif + +#ifdef IN_RATE +#define RATE_API \ + SW_API_DEF(SW_API_RATE_QU_EGRL_SET, fal_rate_queue_egrl_set), \ + SW_API_DEF(SW_API_RATE_QU_EGRL_GET, fal_rate_queue_egrl_get), \ + SW_API_DEF(SW_API_RATE_PT_EGRL_SET, fal_rate_port_egrl_set), \ + SW_API_DEF(SW_API_RATE_PT_EGRL_GET, fal_rate_port_egrl_get), \ + SW_API_DEF(SW_API_RATE_PT_INRL_SET, fal_rate_port_inrl_set), \ + SW_API_DEF(SW_API_RATE_PT_INRL_GET, fal_rate_port_inrl_get), \ + SW_API_DEF(SW_API_STORM_CTRL_FRAME_SET, fal_storm_ctrl_frame_set), \ + SW_API_DEF(SW_API_STORM_CTRL_FRAME_GET, fal_storm_ctrl_frame_get), \ + SW_API_DEF(SW_API_STORM_CTRL_RATE_SET, fal_storm_ctrl_rate_set), \ + SW_API_DEF(SW_API_STORM_CTRL_RATE_GET, fal_storm_ctrl_rate_get), \ + SW_API_DEF(SW_API_RATE_PORT_POLICER_SET, fal_rate_port_policer_set), \ + SW_API_DEF(SW_API_RATE_PORT_POLICER_GET, fal_rate_port_policer_get), \ + SW_API_DEF(SW_API_RATE_PORT_SHAPER_SET, fal_rate_port_shaper_set), \ + SW_API_DEF(SW_API_RATE_PORT_SHAPER_GET, fal_rate_port_shaper_get), \ + SW_API_DEF(SW_API_RATE_QUEUE_SHAPER_SET, fal_rate_queue_shaper_set), \ + SW_API_DEF(SW_API_RATE_QUEUE_SHAPER_GET, fal_rate_queue_shaper_get), \ + SW_API_DEF(SW_API_RATE_ACL_POLICER_SET, fal_rate_acl_policer_set), \ + SW_API_DEF(SW_API_RATE_ACL_POLICER_GET, fal_rate_acl_policer_get), \ + SW_API_DEF(SW_API_RATE_PT_ADDRATEBYTE_SET, fal_rate_port_add_rate_byte_set), \ + SW_API_DEF(SW_API_RATE_PT_ADDRATEBYTE_GET, fal_rate_port_add_rate_byte_get), \ + SW_API_DEF(SW_API_RATE_PT_GOL_FLOW_EN_SET, fal_rate_port_gol_flow_en_set), \ + SW_API_DEF(SW_API_RATE_PT_GOL_FLOW_EN_GET, fal_rate_port_gol_flow_en_get), + +#define RATE_API_PARAM \ + SW_API_DESC(SW_API_RATE_QU_EGRL_SET) \ + SW_API_DESC(SW_API_RATE_QU_EGRL_GET) \ + SW_API_DESC(SW_API_RATE_PT_EGRL_SET) \ + SW_API_DESC(SW_API_RATE_PT_EGRL_GET) \ + SW_API_DESC(SW_API_RATE_PT_INRL_SET) \ + SW_API_DESC(SW_API_RATE_PT_INRL_GET) \ + SW_API_DESC(SW_API_STORM_CTRL_FRAME_SET) \ + SW_API_DESC(SW_API_STORM_CTRL_FRAME_GET) \ + SW_API_DESC(SW_API_STORM_CTRL_RATE_SET) \ + SW_API_DESC(SW_API_STORM_CTRL_RATE_GET) \ + SW_API_DESC(SW_API_RATE_PORT_POLICER_SET) \ + SW_API_DESC(SW_API_RATE_PORT_POLICER_GET) \ + SW_API_DESC(SW_API_RATE_PORT_SHAPER_SET) \ + SW_API_DESC(SW_API_RATE_PORT_SHAPER_GET) \ + SW_API_DESC(SW_API_RATE_QUEUE_SHAPER_SET) \ + SW_API_DESC(SW_API_RATE_QUEUE_SHAPER_GET) \ + SW_API_DESC(SW_API_RATE_ACL_POLICER_SET) \ + SW_API_DESC(SW_API_RATE_ACL_POLICER_GET) \ + SW_API_DESC(SW_API_RATE_PT_ADDRATEBYTE_SET) \ + SW_API_DESC(SW_API_RATE_PT_ADDRATEBYTE_GET) \ + SW_API_DESC(SW_API_RATE_PT_GOL_FLOW_EN_SET) \ + SW_API_DESC(SW_API_RATE_PT_GOL_FLOW_EN_GET) +#else +#define RATE_API +#define RATE_API_PARAM +#endif + +#ifdef IN_STP +#define STP_API \ + SW_API_DEF(SW_API_STP_PT_STATE_SET, fal_stp_port_state_set), \ + SW_API_DEF(SW_API_STP_PT_STATE_GET, fal_stp_port_state_get), + +#define STP_API_PARAM \ + SW_API_DESC(SW_API_STP_PT_STATE_SET) \ + SW_API_DESC(SW_API_STP_PT_STATE_GET) +#else +#define STP_API +#define STP_API_PARAM +#endif + +#ifdef IN_MIB +#define MIB_API \ + SW_API_DEF(SW_API_PT_MIB_GET, fal_get_mib_info), \ + SW_API_DEF(SW_API_MIB_STATUS_SET, fal_mib_status_set), \ + SW_API_DEF(SW_API_MIB_STATUS_GET, fal_mib_status_get), \ + SW_API_DEF(SW_API_PT_MIB_FLUSH_COUNTERS, fal_mib_port_flush_counters), \ + SW_API_DEF(SW_API_MIB_CPU_KEEP_SET, fal_mib_cpukeep_set), \ + SW_API_DEF(SW_API_MIB_CPU_KEEP_GET, fal_mib_cpukeep_get),\ + SW_API_DEF(SW_API_PT_XGMIB_GET, fal_get_xgmib_info),\ + SW_API_DEF(SW_API_PT_MIB_COUNTER_GET, fal_mib_counter_get), +#define MIB_API_PARAM \ + SW_API_DESC(SW_API_PT_MIB_GET) \ + SW_API_DESC(SW_API_PT_XGMIB_GET) \ + SW_API_DESC(SW_API_MIB_STATUS_SET) \ + SW_API_DESC(SW_API_MIB_STATUS_GET) \ + SW_API_DESC(SW_API_PT_MIB_FLUSH_COUNTERS) \ + SW_API_DESC(SW_API_MIB_CPU_KEEP_SET) \ + SW_API_DESC(SW_API_MIB_CPU_KEEP_GET) \ + SW_API_DESC(SW_API_PT_MIB_COUNTER_GET) +#else +#define MIB_API +#define MIB_API_PARAM +#endif + +#ifdef IN_MISC +#define MISC_API \ + SW_API_DEF(SW_API_ARP_STATUS_SET, fal_arp_status_set), \ + SW_API_DEF(SW_API_ARP_STATUS_GET, fal_arp_status_get), \ + SW_API_DEF(SW_API_FRAME_MAX_SIZE_SET, fal_frame_max_size_set), \ + SW_API_DEF(SW_API_FRAME_MAX_SIZE_GET, fal_frame_max_size_get), \ + SW_API_DEF(SW_API_PT_UNK_SA_CMD_SET, fal_port_unk_sa_cmd_set), \ + SW_API_DEF(SW_API_PT_UNK_SA_CMD_GET, fal_port_unk_sa_cmd_get), \ + SW_API_DEF(SW_API_PT_UNK_UC_FILTER_SET, fal_port_unk_uc_filter_set), \ + SW_API_DEF(SW_API_PT_UNK_UC_FILTER_GET, fal_port_unk_uc_filter_get), \ + SW_API_DEF(SW_API_PT_UNK_MC_FILTER_SET, fal_port_unk_mc_filter_set), \ + SW_API_DEF(SW_API_PT_UNK_MC_FILTER_GET, fal_port_unk_mc_filter_get), \ + SW_API_DEF(SW_API_PT_BC_FILTER_SET, fal_port_bc_filter_set), \ + SW_API_DEF(SW_API_PT_BC_FILTER_GET, fal_port_bc_filter_get), \ + SW_API_DEF(SW_API_CPU_PORT_STATUS_SET, fal_cpu_port_status_set), \ + SW_API_DEF(SW_API_CPU_PORT_STATUS_GET, fal_cpu_port_status_get), \ + SW_API_DEF(SW_API_BC_TO_CPU_PORT_SET, fal_bc_to_cpu_port_set), \ + SW_API_DEF(SW_API_BC_TO_CPU_PORT_GET, fal_bc_to_cpu_port_get), \ + SW_API_DEF(SW_API_PT_DHCP_SET, fal_port_dhcp_set), \ + SW_API_DEF(SW_API_PT_DHCP_GET, fal_port_dhcp_get), \ + SW_API_DEF(SW_API_ARP_CMD_SET, fal_arp_cmd_set), \ + SW_API_DEF(SW_API_ARP_CMD_GET, fal_arp_cmd_get), \ + SW_API_DEF(SW_API_EAPOL_CMD_SET, fal_eapol_cmd_set), \ + SW_API_DEF(SW_API_EAPOL_CMD_GET, fal_eapol_cmd_get), \ + SW_API_DEF(SW_API_EAPOL_STATUS_SET, fal_eapol_status_set), \ + SW_API_DEF(SW_API_EAPOL_STATUS_GET, fal_eapol_status_get), \ + SW_API_DEF(SW_API_RIPV1_STATUS_SET, fal_ripv1_status_set), \ + SW_API_DEF(SW_API_RIPV1_STATUS_GET, fal_ripv1_status_get), \ + SW_API_DEF(SW_API_PT_ARP_REQ_STATUS_SET, fal_port_arp_req_status_set), \ + SW_API_DEF(SW_API_PT_ARP_REQ_STATUS_GET, fal_port_arp_req_status_get), \ + SW_API_DEF(SW_API_PT_ARP_ACK_STATUS_SET, fal_port_arp_ack_status_set), \ + SW_API_DEF(SW_API_PT_ARP_ACK_STATUS_GET, fal_port_arp_ack_status_get), \ + SW_API_DEF(SW_API_INTR_MASK_SET, fal_intr_mask_set), \ + SW_API_DEF(SW_API_INTR_MASK_GET, fal_intr_mask_get), \ + SW_API_DEF(SW_API_INTR_STATUS_GET, fal_intr_status_get), \ + SW_API_DEF(SW_API_INTR_STATUS_CLEAR, fal_intr_status_clear), \ + SW_API_DEF(SW_API_INTR_PORT_LINK_MASK_SET, fal_intr_port_link_mask_set), \ + SW_API_DEF(SW_API_INTR_PORT_LINK_MASK_GET, fal_intr_port_link_mask_get), \ + SW_API_DEF(SW_API_INTR_PORT_LINK_STATUS_GET, fal_intr_port_link_status_get), \ + SW_API_DEF(SW_API_INTR_MASK_MAC_LINKCHG_SET, fal_intr_mask_mac_linkchg_set), \ + SW_API_DEF(SW_API_INTR_MASK_MAC_LINKCHG_GET, fal_intr_mask_mac_linkchg_get), \ + SW_API_DEF(SW_API_INTR_STATUS_MAC_LINKCHG_GET, fal_intr_status_mac_linkchg_get), \ + SW_API_DEF(SW_API_INTR_STATUS_MAC_LINKCHG_CLEAR, fal_intr_status_mac_linkchg_clear), \ + SW_API_DEF(SW_API_CPU_VID_EN_SET, fal_cpu_vid_en_set), \ + SW_API_DEF(SW_API_CPU_VID_EN_GET, fal_cpu_vid_en_get), \ + SW_API_DEF(SW_API_GLOBAL_MACADDR_SET, fal_global_macaddr_set), \ + SW_API_DEF(SW_API_GLOBAL_MACADDR_GET, fal_global_macaddr_get), \ + SW_API_DEF(SW_API_LLDP_STATUS_SET, fal_lldp_status_set), \ + SW_API_DEF(SW_API_LLDP_STATUS_GET, fal_lldp_status_get), \ + SW_API_DEF(SW_API_FRAME_CRC_RESERVE_SET, fal_frame_crc_reserve_set), \ + SW_API_DEF(SW_API_FRAME_CRC_RESERVE_GET, fal_frame_crc_reserve_get), \ + SW_API_DEF(SW_API_DEBUG_PORT_COUNTER_ENABLE, fal_debug_port_counter_enable), \ + SW_API_DEF(SW_API_DEBUG_PORT_COUNTER_STATUS_GET, fal_debug_port_counter_status_get), + + + +#define MISC_API_PARAM \ + SW_API_DESC(SW_API_ARP_STATUS_SET) \ + SW_API_DESC(SW_API_ARP_STATUS_GET) \ + SW_API_DESC(SW_API_FRAME_MAX_SIZE_SET) \ + SW_API_DESC(SW_API_FRAME_MAX_SIZE_GET) \ + SW_API_DESC(SW_API_PT_UNK_SA_CMD_SET) \ + SW_API_DESC(SW_API_PT_UNK_SA_CMD_GET) \ + SW_API_DESC(SW_API_PT_UNK_UC_FILTER_SET) \ + SW_API_DESC(SW_API_PT_UNK_UC_FILTER_GET) \ + SW_API_DESC(SW_API_PT_UNK_MC_FILTER_SET) \ + SW_API_DESC(SW_API_PT_UNK_MC_FILTER_GET) \ + SW_API_DESC(SW_API_PT_BC_FILTER_SET) \ + SW_API_DESC(SW_API_PT_BC_FILTER_GET) \ + SW_API_DESC(SW_API_CPU_PORT_STATUS_SET) \ + SW_API_DESC(SW_API_CPU_PORT_STATUS_GET) \ + SW_API_DESC(SW_API_BC_TO_CPU_PORT_SET) \ + SW_API_DESC(SW_API_BC_TO_CPU_PORT_GET) \ + SW_API_DESC(SW_API_PT_DHCP_SET) \ + SW_API_DESC(SW_API_PT_DHCP_GET) \ + SW_API_DESC(SW_API_ARP_CMD_SET) \ + SW_API_DESC(SW_API_ARP_CMD_GET) \ + SW_API_DESC(SW_API_EAPOL_CMD_SET) \ + SW_API_DESC(SW_API_EAPOL_CMD_GET) \ + SW_API_DESC(SW_API_EAPOL_STATUS_SET) \ + SW_API_DESC(SW_API_EAPOL_STATUS_GET) \ + SW_API_DESC(SW_API_RIPV1_STATUS_SET) \ + SW_API_DESC(SW_API_RIPV1_STATUS_GET) \ + SW_API_DESC(SW_API_PT_ARP_REQ_STATUS_SET) \ + SW_API_DESC(SW_API_PT_ARP_REQ_STATUS_GET) \ + SW_API_DESC(SW_API_PT_ARP_ACK_STATUS_SET) \ + SW_API_DESC(SW_API_PT_ARP_ACK_STATUS_GET) \ + SW_API_DESC(SW_API_INTR_MASK_SET) \ + SW_API_DESC(SW_API_INTR_MASK_GET) \ + SW_API_DESC(SW_API_INTR_STATUS_GET) \ + SW_API_DESC(SW_API_INTR_STATUS_CLEAR) \ + SW_API_DESC(SW_API_INTR_PORT_LINK_MASK_SET) \ + SW_API_DESC(SW_API_INTR_PORT_LINK_MASK_GET) \ + SW_API_DESC(SW_API_INTR_PORT_LINK_STATUS_GET) \ + SW_API_DESC(SW_API_INTR_MASK_MAC_LINKCHG_SET) \ + SW_API_DESC(SW_API_INTR_MASK_MAC_LINKCHG_GET) \ + SW_API_DESC(SW_API_INTR_STATUS_MAC_LINKCHG_GET) \ + SW_API_DESC(SW_API_INTR_STATUS_MAC_LINKCHG_CLEAR) \ + SW_API_DESC(SW_API_CPU_VID_EN_SET) \ + SW_API_DESC(SW_API_CPU_VID_EN_GET) \ + SW_API_DESC(SW_API_GLOBAL_MACADDR_SET) \ + SW_API_DESC(SW_API_GLOBAL_MACADDR_GET) \ + SW_API_DESC(SW_API_LLDP_STATUS_SET) \ + SW_API_DESC(SW_API_LLDP_STATUS_GET) \ + SW_API_DESC(SW_API_FRAME_CRC_RESERVE_SET) \ + SW_API_DESC(SW_API_FRAME_CRC_RESERVE_GET) \ + SW_API_DESC(SW_API_DEBUG_PORT_COUNTER_ENABLE) \ + SW_API_DESC(SW_API_DEBUG_PORT_COUNTER_STATUS_GET) + + +#else +#define MISC_API +#define MISC_API_PARAM +#endif + +#ifdef IN_LED +#define LED_API \ + SW_API_DEF(SW_API_LED_PATTERN_SET, fal_led_ctrl_pattern_set), \ + SW_API_DEF(SW_API_LED_PATTERN_GET, fal_led_ctrl_pattern_get), + +#define LED_API_PARAM \ + SW_API_DESC(SW_API_LED_PATTERN_SET) \ + SW_API_DESC(SW_API_LED_PATTERN_GET) +#else +#define LED_API +#define LED_API_PARAM +#endif + +#ifdef IN_COSMAP +#define COSMAP_API \ + SW_API_DEF(SW_API_COSMAP_DSCP_TO_PRI_SET, fal_cosmap_dscp_to_pri_set), \ + SW_API_DEF(SW_API_COSMAP_DSCP_TO_PRI_GET, fal_cosmap_dscp_to_pri_get), \ + SW_API_DEF(SW_API_COSMAP_DSCP_TO_DP_SET, fal_cosmap_dscp_to_dp_set), \ + SW_API_DEF(SW_API_COSMAP_DSCP_TO_DP_GET, fal_cosmap_dscp_to_dp_get), \ + SW_API_DEF(SW_API_COSMAP_UP_TO_PRI_SET, fal_cosmap_up_to_pri_set), \ + SW_API_DEF(SW_API_COSMAP_UP_TO_PRI_GET, fal_cosmap_up_to_pri_get), \ + SW_API_DEF(SW_API_COSMAP_UP_TO_DP_SET, fal_cosmap_up_to_dp_set), \ + SW_API_DEF(SW_API_COSMAP_UP_TO_DP_GET, fal_cosmap_up_to_dp_get), \ + SW_API_DEF(SW_API_COSMAP_PRI_TO_QU_SET, fal_cosmap_pri_to_queue_set), \ + SW_API_DEF(SW_API_COSMAP_PRI_TO_QU_GET, fal_cosmap_pri_to_queue_get), \ + SW_API_DEF(SW_API_COSMAP_PRI_TO_EHQU_SET, fal_cosmap_pri_to_ehqueue_set), \ + SW_API_DEF(SW_API_COSMAP_PRI_TO_EHQU_GET, fal_cosmap_pri_to_ehqueue_get), \ + SW_API_DEF(SW_API_COSMAP_EG_REMARK_SET, fal_cosmap_egress_remark_set), \ + SW_API_DEF(SW_API_COSMAP_EG_REMARK_GET, fal_cosmap_egress_remark_get), \ + SW_API_DEF(SW_API_COSMAP_DSCP_TO_EHPRI_SET, fal_cosmap_dscp_to_ehpri_set), \ + SW_API_DEF(SW_API_COSMAP_DSCP_TO_EHPRI_GET, fal_cosmap_dscp_to_ehpri_get), \ + SW_API_DEF(SW_API_COSMAP_DSCP_TO_EHDP_SET, fal_cosmap_dscp_to_ehdp_set), \ + SW_API_DEF(SW_API_COSMAP_DSCP_TO_EHDP_GET, fal_cosmap_dscp_to_ehdp_get), \ + SW_API_DEF(SW_API_COSMAP_UP_TO_EHPRI_SET, fal_cosmap_up_to_ehpri_set), \ + SW_API_DEF(SW_API_COSMAP_UP_TO_EHPRI_GET, fal_cosmap_up_to_ehpri_get), \ + SW_API_DEF(SW_API_COSMAP_UP_TO_EHDP_SET, fal_cosmap_up_to_ehdp_set), \ + SW_API_DEF(SW_API_COSMAP_UP_TO_EHDP_GET, fal_cosmap_up_to_ehdp_get), + +#define COSMAP_API_PARAM \ + SW_API_DESC(SW_API_COSMAP_DSCP_TO_PRI_SET) \ + SW_API_DESC(SW_API_COSMAP_DSCP_TO_PRI_GET) \ + SW_API_DESC(SW_API_COSMAP_DSCP_TO_DP_SET) \ + SW_API_DESC(SW_API_COSMAP_DSCP_TO_DP_GET) \ + SW_API_DESC(SW_API_COSMAP_UP_TO_PRI_SET) \ + SW_API_DESC(SW_API_COSMAP_UP_TO_PRI_GET) \ + SW_API_DESC(SW_API_COSMAP_UP_TO_DP_SET) \ + SW_API_DESC(SW_API_COSMAP_UP_TO_DP_GET) \ + SW_API_DESC(SW_API_COSMAP_PRI_TO_QU_SET) \ + SW_API_DESC(SW_API_COSMAP_PRI_TO_QU_GET) \ + SW_API_DESC(SW_API_COSMAP_PRI_TO_EHQU_SET) \ + SW_API_DESC(SW_API_COSMAP_PRI_TO_EHQU_GET) \ + SW_API_DESC(SW_API_COSMAP_EG_REMARK_SET) \ + SW_API_DESC(SW_API_COSMAP_EG_REMARK_GET) \ + SW_API_DESC(SW_API_COSMAP_DSCP_TO_EHPRI_SET) \ + SW_API_DESC(SW_API_COSMAP_DSCP_TO_EHPRI_GET) \ + SW_API_DESC(SW_API_COSMAP_DSCP_TO_EHDP_SET) \ + SW_API_DESC(SW_API_COSMAP_DSCP_TO_EHDP_GET) \ + SW_API_DESC(SW_API_COSMAP_UP_TO_EHPRI_SET) \ + SW_API_DESC(SW_API_COSMAP_UP_TO_EHPRI_GET) \ + SW_API_DESC(SW_API_COSMAP_UP_TO_EHDP_SET) \ + SW_API_DESC(SW_API_COSMAP_UP_TO_EHDP_GET) +#else +#define COSMAP_API +#define COSMAP_API_PARAM +#endif + +#ifdef IN_SEC +#define SEC_API \ + SW_API_DEF(SW_API_SEC_NORM_SET, fal_sec_norm_item_set), \ + SW_API_DEF(SW_API_SEC_NORM_GET, fal_sec_norm_item_get), \ + SW_API_DEF(SW_API_SEC_MAC_SET, fal_sec_norm_item_set), \ + SW_API_DEF(SW_API_SEC_MAC_GET, fal_sec_norm_item_get), \ + SW_API_DEF(SW_API_SEC_IP_SET, fal_sec_norm_item_set), \ + SW_API_DEF(SW_API_SEC_IP_GET, fal_sec_norm_item_get), \ + SW_API_DEF(SW_API_SEC_IP4_SET, fal_sec_norm_item_set), \ + SW_API_DEF(SW_API_SEC_IP4_GET, fal_sec_norm_item_get), \ + SW_API_DEF(SW_API_SEC_IP6_SET, fal_sec_norm_item_set), \ + SW_API_DEF(SW_API_SEC_IP6_GET, fal_sec_norm_item_get), \ + SW_API_DEF(SW_API_SEC_TCP_SET, fal_sec_norm_item_set), \ + SW_API_DEF(SW_API_SEC_TCP_GET, fal_sec_norm_item_get), \ + SW_API_DEF(SW_API_SEC_UDP_SET, fal_sec_norm_item_set), \ + SW_API_DEF(SW_API_SEC_UDP_GET, fal_sec_norm_item_get), \ + SW_API_DEF(SW_API_SEC_ICMP4_SET, fal_sec_norm_item_set), \ + SW_API_DEF(SW_API_SEC_ICMP4_GET, fal_sec_norm_item_get), \ + SW_API_DEF(SW_API_SEC_ICMP6_SET, fal_sec_norm_item_set), \ + SW_API_DEF(SW_API_SEC_ICMP6_GET, fal_sec_norm_item_get), \ + SW_API_DEF(SW_API_SEC_L3_PARSER_CTRL_GET, fal_sec_l3_excep_parser_ctrl_get), \ + SW_API_DEF(SW_API_SEC_L3_PARSER_CTRL_SET, fal_sec_l3_excep_parser_ctrl_set), \ + SW_API_DEF(SW_API_SEC_L4_PARSER_CTRL_GET, fal_sec_l4_excep_parser_ctrl_get), \ + SW_API_DEF(SW_API_SEC_L4_PARSER_CTRL_SET, fal_sec_l4_excep_parser_ctrl_set), \ + SW_API_DEF(SW_API_SEC_EXP_CTRL_GET, fal_sec_l3_excep_ctrl_get), \ + SW_API_DEF(SW_API_SEC_EXP_CTRL_SET, fal_sec_l3_excep_ctrl_set), + +#define SEC_API_PARAM \ + SW_API_DESC(SW_API_SEC_NORM_SET) \ + SW_API_DESC(SW_API_SEC_NORM_GET) \ + SW_API_DESC(SW_API_SEC_MAC_SET) \ + SW_API_DESC(SW_API_SEC_MAC_GET) \ + SW_API_DESC(SW_API_SEC_IP_SET) \ + SW_API_DESC(SW_API_SEC_IP_GET) \ + SW_API_DESC(SW_API_SEC_IP4_SET) \ + SW_API_DESC(SW_API_SEC_IP4_GET) \ + SW_API_DESC(SW_API_SEC_IP6_SET) \ + SW_API_DESC(SW_API_SEC_IP6_GET) \ + SW_API_DESC(SW_API_SEC_TCP_SET) \ + SW_API_DESC(SW_API_SEC_TCP_GET) \ + SW_API_DESC(SW_API_SEC_UDP_SET) \ + SW_API_DESC(SW_API_SEC_UDP_GET) \ + SW_API_DESC(SW_API_SEC_ICMP4_SET) \ + SW_API_DESC(SW_API_SEC_ICMP4_GET) \ + SW_API_DESC(SW_API_SEC_ICMP6_SET) \ + SW_API_DESC(SW_API_SEC_ICMP6_GET) \ + SW_API_DESC(SW_API_SEC_L3_PARSER_CTRL_GET) \ + SW_API_DESC(SW_API_SEC_L3_PARSER_CTRL_SET) \ + SW_API_DESC(SW_API_SEC_L4_PARSER_CTRL_GET) \ + SW_API_DESC(SW_API_SEC_L4_PARSER_CTRL_SET) \ + SW_API_DESC(SW_API_SEC_EXP_CTRL_GET) \ + SW_API_DESC(SW_API_SEC_EXP_CTRL_SET) +#else +#define SEC_API +#define SEC_API_PARAM +#endif + +#ifdef IN_IP +#define IP_API \ + SW_API_DEF(SW_API_IP_HOST_ADD, fal_ip_host_add), \ + SW_API_DEF(SW_API_IP_HOST_DEL, fal_ip_host_del), \ + SW_API_DEF(SW_API_IP_HOST_GET, fal_ip_host_get), \ + SW_API_DEF(SW_API_IP_HOST_NEXT, fal_ip_host_next), \ + SW_API_DEF(SW_API_IP_HOST_COUNTER_BIND, fal_ip_host_counter_bind), \ + SW_API_DEF(SW_API_IP_HOST_PPPOE_BIND, fal_ip_host_pppoe_bind), \ + SW_API_DEF(SW_API_IP_PT_ARP_LEARN_SET, fal_ip_pt_arp_learn_set), \ + SW_API_DEF(SW_API_IP_PT_ARP_LEARN_GET, fal_ip_pt_arp_learn_get), \ + SW_API_DEF(SW_API_IP_ARP_LEARN_SET, fal_ip_arp_learn_set), \ + SW_API_DEF(SW_API_IP_ARP_LEARN_GET, fal_ip_arp_learn_get), \ + SW_API_DEF(SW_API_IP_SOURCE_GUARD_SET, fal_ip_source_guard_set), \ + SW_API_DEF(SW_API_IP_SOURCE_GUARD_GET, fal_ip_source_guard_get), \ + SW_API_DEF(SW_API_IP_ARP_GUARD_SET, fal_ip_arp_guard_set), \ + SW_API_DEF(SW_API_IP_ARP_GUARD_GET, fal_ip_arp_guard_get), \ + SW_API_DEF(SW_API_IP_ROUTE_STATUS_SET, fal_ip_route_status_set), \ + SW_API_DEF(SW_API_IP_ROUTE_STATUS_GET, fal_ip_route_status_get), \ + SW_API_DEF(SW_API_IP_INTF_ENTRY_ADD, fal_ip_intf_entry_add), \ + SW_API_DEF(SW_API_IP_INTF_ENTRY_DEL, fal_ip_intf_entry_del), \ + SW_API_DEF(SW_API_IP_INTF_ENTRY_NEXT, fal_ip_intf_entry_next), \ + SW_API_DEF(SW_API_IP_UNK_SOURCE_CMD_SET, fal_ip_unk_source_cmd_set), \ + SW_API_DEF(SW_API_IP_UNK_SOURCE_CMD_GET, fal_ip_unk_source_cmd_get), \ + SW_API_DEF(SW_API_ARP_UNK_SOURCE_CMD_SET, fal_arp_unk_source_cmd_set), \ + SW_API_DEF(SW_API_ARP_UNK_SOURCE_CMD_GET, fal_arp_unk_source_cmd_get), \ + SW_API_DEF(SW_API_IP_AGE_TIME_SET, fal_ip_age_time_set), \ + SW_API_DEF(SW_API_IP_AGE_TIME_GET, fal_ip_age_time_get), \ + SW_API_DEF(SW_API_WCMP_HASH_MODE_SET, fal_ip_wcmp_hash_mode_set), \ + SW_API_DEF(SW_API_WCMP_HASH_MODE_GET, fal_ip_wcmp_hash_mode_get), \ + SW_API_DEF(SW_API_IP_VRF_BASE_ADDR_SET, fal_ip_vrf_base_addr_set), \ + SW_API_DEF(SW_API_IP_VRF_BASE_ADDR_GET, fal_ip_vrf_base_addr_get), \ + SW_API_DEF(SW_API_IP_VRF_BASE_MASK_SET, fal_ip_vrf_base_mask_set), \ + SW_API_DEF(SW_API_IP_VRF_BASE_MASK_GET, fal_ip_vrf_base_mask_get), \ + SW_API_DEF(SW_API_IP_DEFAULT_ROUTE_SET, fal_ip_default_route_set), \ + SW_API_DEF(SW_API_IP_DEFAULT_ROUTE_GET, fal_ip_default_route_get), \ + SW_API_DEF(SW_API_IP_HOST_ROUTE_SET, fal_ip_host_route_set), \ + SW_API_DEF(SW_API_IP_HOST_ROUTE_GET, fal_ip_host_route_get), \ + SW_API_DEF(SW_API_IP_WCMP_ENTRY_SET, fal_ip_wcmp_entry_set), \ + SW_API_DEF(SW_API_IP_WCMP_ENTRY_GET, fal_ip_wcmp_entry_get), \ + SW_API_DEF(SW_API_IP_RFS_IP4_SET, fal_ip_rfs_ip4_rule_set), \ + SW_API_DEF(SW_API_IP_RFS_IP6_SET, fal_ip_rfs_ip6_rule_set), \ + SW_API_DEF(SW_API_IP_RFS_IP4_DEL, fal_ip_rfs_ip4_rule_del), \ + SW_API_DEF(SW_API_IP_RFS_IP6_DEL, fal_ip_rfs_ip6_rule_del), \ + SW_API_DEF(SW_API_IP_DEFAULT_FLOW_CMD_SET, fal_default_flow_cmd_set), \ + SW_API_DEF(SW_API_IP_DEFAULT_FLOW_CMD_GET, fal_default_flow_cmd_get), \ + SW_API_DEF(SW_API_IP_DEFAULT_RT_FLOW_CMD_SET, fal_default_rt_flow_cmd_set), \ + SW_API_DEF(SW_API_IP_DEFAULT_RT_FLOW_CMD_GET, fal_default_rt_flow_cmd_get), \ + SW_API_DEF(SW_API_IP_VIS_ARP_SG_CFG_GET, fal_ip_vsi_arp_sg_cfg_get), \ + SW_API_DEF(SW_API_IP_VIS_ARP_SG_CFG_SET, fal_ip_vsi_arp_sg_cfg_set), \ + SW_API_DEF(SW_API_IP_NETWORK_ROUTE_GET, fal_ip_network_route_get), \ + SW_API_DEF(SW_API_IP_NETWORK_ROUTE_ADD, fal_ip_network_route_add), \ + SW_API_DEF(SW_API_IP_INTF_GET, fal_ip_intf_get), \ + SW_API_DEF(SW_API_IP_INTF_SET, fal_ip_intf_set), \ + SW_API_DEF(SW_API_IP_VSI_INTF_GET, fal_ip_vsi_intf_get), \ + SW_API_DEF(SW_API_IP_VSI_INTF_SET, fal_ip_vsi_intf_set), \ + SW_API_DEF(SW_API_IP_NEXTHOP_GET, fal_ip_nexthop_get), \ + SW_API_DEF(SW_API_IP_NEXTHOP_SET, fal_ip_nexthop_set), \ + SW_API_DEF(SW_API_IP_VSI_SG_SET, fal_ip_vsi_sg_cfg_set), \ + SW_API_DEF(SW_API_IP_VSI_SG_GET, fal_ip_vsi_sg_cfg_get), \ + SW_API_DEF(SW_API_IP_PORT_SG_SET, fal_ip_port_sg_cfg_set), \ + SW_API_DEF(SW_API_IP_PORT_SG_GET, fal_ip_port_sg_cfg_get), \ + SW_API_DEF(SW_API_IP_PUB_IP_SET, fal_ip_pub_addr_set), \ + SW_API_DEF(SW_API_IP_PUB_IP_GET, fal_ip_pub_addr_get), \ + SW_API_DEF(SW_API_IP_NETWORK_ROUTE_DEL, fal_ip_network_route_del), \ + SW_API_DEF(SW_API_IP_PORT_INTF_GET, fal_ip_port_intf_get), \ + SW_API_DEF(SW_API_IP_PORT_INTF_SET, fal_ip_port_intf_set), \ + SW_API_DEF(SW_API_IP_PORT_MAC_GET, fal_ip_port_macaddr_get), \ + SW_API_DEF(SW_API_IP_PORT_MAC_SET, fal_ip_port_macaddr_set), \ + SW_API_DEF(SW_API_IP_ROUTE_MISS_GET, fal_ip_route_mismatch_action_get), \ + SW_API_DEF(SW_API_IP_ROUTE_MISS_SET, fal_ip_route_mismatch_action_set), \ + SW_API_DEF(SW_API_IP_PORT_ARP_SG_SET, fal_ip_port_arp_sg_cfg_set), \ + SW_API_DEF(SW_API_IP_PORT_ARP_SG_GET, fal_ip_port_arp_sg_cfg_get), \ + SW_API_DEF(SW_API_IP_VSI_MC_MODE_SET, fal_ip_vsi_mc_mode_set), \ + SW_API_DEF(SW_API_IP_VSI_MC_MODE_GET, fal_ip_vsi_mc_mode_get), \ + SW_API_DEF(SW_API_GLOBAL_CTRL_GET, fal_ip_global_ctrl_get), \ + SW_API_DEF(SW_API_GLOBAL_CTRL_SET, fal_ip_global_ctrl_set), + +#define IP_API_PARAM \ + SW_API_DESC(SW_API_IP_HOST_ADD) \ + SW_API_DESC(SW_API_IP_HOST_DEL) \ + SW_API_DESC(SW_API_IP_HOST_GET) \ + SW_API_DESC(SW_API_IP_HOST_NEXT) \ + SW_API_DESC(SW_API_IP_HOST_COUNTER_BIND) \ + SW_API_DESC(SW_API_IP_HOST_PPPOE_BIND) \ + SW_API_DESC(SW_API_IP_PT_ARP_LEARN_SET) \ + SW_API_DESC(SW_API_IP_PT_ARP_LEARN_GET) \ + SW_API_DESC(SW_API_IP_ARP_LEARN_SET) \ + SW_API_DESC(SW_API_IP_ARP_LEARN_GET) \ + SW_API_DESC(SW_API_IP_SOURCE_GUARD_SET) \ + SW_API_DESC(SW_API_IP_SOURCE_GUARD_GET) \ + SW_API_DESC(SW_API_IP_ARP_GUARD_SET) \ + SW_API_DESC(SW_API_IP_ARP_GUARD_GET) \ + SW_API_DESC(SW_API_IP_ROUTE_STATUS_SET) \ + SW_API_DESC(SW_API_IP_ROUTE_STATUS_GET) \ + SW_API_DESC(SW_API_IP_INTF_ENTRY_ADD) \ + SW_API_DESC(SW_API_IP_INTF_ENTRY_DEL) \ + SW_API_DESC(SW_API_IP_INTF_ENTRY_NEXT) \ + SW_API_DESC(SW_API_IP_UNK_SOURCE_CMD_SET) \ + SW_API_DESC(SW_API_IP_UNK_SOURCE_CMD_GET) \ + SW_API_DESC(SW_API_ARP_UNK_SOURCE_CMD_SET) \ + SW_API_DESC(SW_API_ARP_UNK_SOURCE_CMD_GET) \ + SW_API_DESC(SW_API_IP_AGE_TIME_SET) \ + SW_API_DESC(SW_API_IP_AGE_TIME_GET) \ + SW_API_DESC(SW_API_WCMP_HASH_MODE_SET) \ + SW_API_DESC(SW_API_WCMP_HASH_MODE_GET) \ + SW_API_DESC(SW_API_IP_VRF_BASE_ADDR_SET) \ + SW_API_DESC(SW_API_IP_VRF_BASE_ADDR_GET) \ + SW_API_DESC(SW_API_IP_VRF_BASE_MASK_SET) \ + SW_API_DESC(SW_API_IP_VRF_BASE_MASK_GET) \ + SW_API_DESC(SW_API_IP_DEFAULT_ROUTE_SET) \ + SW_API_DESC(SW_API_IP_DEFAULT_ROUTE_GET) \ + SW_API_DESC(SW_API_IP_HOST_ROUTE_SET) \ + SW_API_DESC(SW_API_IP_HOST_ROUTE_GET) \ + SW_API_DESC(SW_API_IP_WCMP_ENTRY_SET) \ + SW_API_DESC(SW_API_IP_WCMP_ENTRY_GET) \ + SW_API_DESC(SW_API_IP_RFS_IP4_SET) \ + SW_API_DESC(SW_API_IP_RFS_IP6_SET) \ + SW_API_DESC(SW_API_IP_RFS_IP4_DEL) \ + SW_API_DESC(SW_API_IP_RFS_IP6_DEL) \ + SW_API_DESC(SW_API_IP_DEFAULT_FLOW_CMD_SET) \ + SW_API_DESC(SW_API_IP_DEFAULT_FLOW_CMD_GET) \ + SW_API_DESC(SW_API_IP_DEFAULT_RT_FLOW_CMD_SET) \ + SW_API_DESC(SW_API_IP_DEFAULT_RT_FLOW_CMD_GET) \ + SW_API_DESC(SW_API_IP_VIS_ARP_SG_CFG_GET) \ + SW_API_DESC(SW_API_IP_VIS_ARP_SG_CFG_SET) \ + SW_API_DESC(SW_API_IP_NETWORK_ROUTE_GET) \ + SW_API_DESC(SW_API_IP_NETWORK_ROUTE_ADD) \ + SW_API_DESC(SW_API_IP_INTF_GET) \ + SW_API_DESC(SW_API_IP_INTF_SET) \ + SW_API_DESC(SW_API_IP_VSI_INTF_GET) \ + SW_API_DESC(SW_API_IP_VSI_INTF_SET) \ + SW_API_DESC(SW_API_IP_NEXTHOP_GET) \ + SW_API_DESC(SW_API_IP_NEXTHOP_SET) \ + SW_API_DESC(SW_API_IP_VSI_SG_SET) \ + SW_API_DESC(SW_API_IP_VSI_SG_GET) \ + SW_API_DESC(SW_API_IP_PORT_SG_SET) \ + SW_API_DESC(SW_API_IP_PORT_SG_GET) \ + SW_API_DESC(SW_API_IP_PUB_IP_SET) \ + SW_API_DESC(SW_API_IP_PUB_IP_GET) \ + SW_API_DESC(SW_API_IP_NETWORK_ROUTE_DEL) \ + SW_API_DESC(SW_API_IP_PORT_INTF_GET) \ + SW_API_DESC(SW_API_IP_PORT_INTF_SET) \ + SW_API_DESC(SW_API_IP_PORT_MAC_GET) \ + SW_API_DESC(SW_API_IP_PORT_MAC_SET) \ + SW_API_DESC(SW_API_IP_ROUTE_MISS_GET) \ + SW_API_DESC(SW_API_IP_ROUTE_MISS_SET) \ + SW_API_DESC(SW_API_IP_PORT_ARP_SG_SET) \ + SW_API_DESC(SW_API_IP_PORT_ARP_SG_GET) \ + SW_API_DESC(SW_API_IP_VSI_MC_MODE_SET) \ + SW_API_DESC(SW_API_IP_VSI_MC_MODE_GET) \ + SW_API_DESC(SW_API_GLOBAL_CTRL_GET) \ + SW_API_DESC(SW_API_GLOBAL_CTRL_SET) +#else +#define IP_API +#define IP_API_PARAM +#endif + +#ifdef IN_FLOW +#define FLOW_API \ + SW_API_DEF(SW_API_FLOW_STATUS_SET, fal_flow_status_set), \ + SW_API_DEF(SW_API_FLOW_STATUS_GET, fal_flow_status_get), \ + SW_API_DEF(SW_API_FLOW_AGE_TIMER_SET, fal_flow_age_timer_set), \ + SW_API_DEF(SW_API_FLOW_AGE_TIMER_GET, fal_flow_age_timer_get), \ + SW_API_DEF(SW_API_FLOW_CTRL_SET, fal_flow_mgmt_set), \ + SW_API_DEF(SW_API_FLOW_CTRL_GET, fal_flow_mgmt_get), \ + SW_API_DEF(SW_API_FLOW_ENTRY_ADD, fal_flow_entry_add), \ + SW_API_DEF(SW_API_FLOW_ENTRY_DEL, fal_flow_entry_del), \ + SW_API_DEF(SW_API_FLOW_ENTRY_GET, fal_flow_entry_get), \ + SW_API_DEF(SW_API_FLOW_GLOBAL_CFG_GET, fal_flow_global_cfg_get), \ + SW_API_DEF(SW_API_FLOW_GLOBAL_CFG_SET, fal_flow_global_cfg_set), \ + SW_API_DEF(SW_API_FLOW_HOST_ADD, fal_flow_host_add), \ + SW_API_DEF(SW_API_FLOW_HOST_GET, fal_flow_host_get), \ + SW_API_DEF(SW_API_FLOW_HOST_DEL, fal_flow_host_del), \ + SW_API_DEF(SW_API_FLOWENTRY_NEXT, fal_flow_entry_next), + +#define FLOW_API_PARAM \ + SW_API_DESC(SW_API_FLOW_STATUS_SET) \ + SW_API_DESC(SW_API_FLOW_STATUS_GET) \ + SW_API_DESC(SW_API_FLOW_AGE_TIMER_SET) \ + SW_API_DESC(SW_API_FLOW_AGE_TIMER_GET) \ + SW_API_DESC(SW_API_FLOW_CTRL_SET) \ + SW_API_DESC(SW_API_FLOW_CTRL_GET) \ + SW_API_DESC(SW_API_FLOW_ENTRY_ADD) \ + SW_API_DESC(SW_API_FLOW_ENTRY_DEL) \ + SW_API_DESC(SW_API_FLOW_ENTRY_GET) \ + SW_API_DESC(SW_API_FLOW_GLOBAL_CFG_GET) \ + SW_API_DESC(SW_API_FLOW_GLOBAL_CFG_SET) \ + SW_API_DESC(SW_API_FLOW_HOST_ADD) \ + SW_API_DESC(SW_API_FLOW_HOST_GET) \ + SW_API_DESC(SW_API_FLOW_HOST_DEL) \ + SW_API_DESC(SW_API_FLOWENTRY_NEXT) +#else +#define FLOW_API +#define FLOW_API_PARAM +#endif + +#ifdef IN_NAT +#define NAT_API \ + SW_API_DEF(SW_API_NAT_ADD, fal_nat_add), \ + SW_API_DEF(SW_API_NAT_DEL, fal_nat_del), \ + SW_API_DEF(SW_API_NAT_GET, fal_nat_get), \ + SW_API_DEF(SW_API_NAT_NEXT, fal_nat_next), \ + SW_API_DEF(SW_API_NAT_COUNTER_BIND, fal_nat_counter_bind), \ + SW_API_DEF(SW_API_NAPT_ADD, fal_napt_add), \ + SW_API_DEF(SW_API_NAPT_DEL, fal_napt_del), \ + SW_API_DEF(SW_API_NAPT_GET, fal_napt_get), \ + SW_API_DEF(SW_API_NAPT_NEXT, fal_napt_next), \ + SW_API_DEF(SW_API_NAPT_COUNTER_BIND, fal_napt_counter_bind), \ + SW_API_DEF(SW_API_FLOW_ADD, fal_flow_add), \ + SW_API_DEF(SW_API_FLOW_DEL, fal_flow_del), \ + SW_API_DEF(SW_API_FLOW_GET, fal_flow_get), \ + SW_API_DEF(SW_API_FLOW_NEXT, fal_flow_next), \ + SW_API_DEF(SW_API_FLOW_COUNTER_BIND, fal_flow_counter_bind), \ + SW_API_DEF(SW_API_NAT_STATUS_SET, fal_nat_status_set), \ + SW_API_DEF(SW_API_NAT_STATUS_GET, fal_nat_status_get), \ + SW_API_DEF(SW_API_NAT_HASH_MODE_SET, fal_nat_hash_mode_set), \ + SW_API_DEF(SW_API_NAT_HASH_MODE_GET, fal_nat_hash_mode_get), \ + SW_API_DEF(SW_API_NAPT_STATUS_SET, fal_napt_status_set), \ + SW_API_DEF(SW_API_NAPT_STATUS_GET, fal_napt_status_get), \ + SW_API_DEF(SW_API_NAPT_MODE_SET, fal_napt_mode_set), \ + SW_API_DEF(SW_API_NAPT_MODE_GET, fal_napt_mode_get), \ + SW_API_DEF(SW_API_PRV_BASE_ADDR_SET, fal_nat_prv_base_addr_set), \ + SW_API_DEF(SW_API_PRV_BASE_ADDR_GET, fal_nat_prv_base_addr_get), \ + SW_API_DEF(SW_API_PRV_ADDR_MODE_SET, fal_nat_prv_addr_mode_set), \ + SW_API_DEF(SW_API_PRV_ADDR_MODE_GET, fal_nat_prv_addr_mode_get), \ + SW_API_DEF(SW_API_PUB_ADDR_ENTRY_ADD, fal_nat_pub_addr_add), \ + SW_API_DEF(SW_API_PUB_ADDR_ENTRY_DEL, fal_nat_pub_addr_del), \ + SW_API_DEF(SW_API_PUB_ADDR_ENTRY_NEXT, fal_nat_pub_addr_next), \ + SW_API_DEF(SW_API_NAT_UNK_SESSION_CMD_SET, fal_nat_unk_session_cmd_set), \ + SW_API_DEF(SW_API_NAT_UNK_SESSION_CMD_GET, fal_nat_unk_session_cmd_get), \ + SW_API_DEF(SW_API_PRV_BASE_MASK_SET, fal_nat_prv_base_mask_set), \ + SW_API_DEF(SW_API_PRV_BASE_MASK_GET, fal_nat_prv_base_mask_get), \ + SW_API_DEF(SW_API_NAT_GLOBAL_SET, fal_nat_global_set), \ + SW_API_DEF(SW_API_FLOW_COOKIE_SET, fal_flow_cookie_set), \ + SW_API_DEF(SW_API_FLOW_RFS_SET, fal_flow_rfs_set), + +#define NAT_API_PARAM \ + SW_API_DESC(SW_API_NAT_ADD) \ + SW_API_DESC(SW_API_NAT_DEL) \ + SW_API_DESC(SW_API_NAT_GET) \ + SW_API_DESC(SW_API_NAT_NEXT) \ + SW_API_DESC(SW_API_NAT_COUNTER_BIND) \ + SW_API_DESC(SW_API_NAPT_ADD) \ + SW_API_DESC(SW_API_NAPT_DEL) \ + SW_API_DESC(SW_API_NAPT_GET) \ + SW_API_DESC(SW_API_NAPT_NEXT) \ + SW_API_DESC(SW_API_NAPT_COUNTER_BIND) \ + SW_API_DESC(SW_API_FLOW_ADD) \ + SW_API_DESC(SW_API_FLOW_DEL) \ + SW_API_DESC(SW_API_FLOW_GET) \ + SW_API_DESC(SW_API_FLOW_NEXT) \ + SW_API_DESC(SW_API_FLOW_COUNTER_BIND) \ + SW_API_DESC(SW_API_NAT_STATUS_SET) \ + SW_API_DESC(SW_API_NAT_STATUS_GET) \ + SW_API_DESC(SW_API_NAT_HASH_MODE_SET) \ + SW_API_DESC(SW_API_NAT_HASH_MODE_GET) \ + SW_API_DESC(SW_API_NAPT_STATUS_SET) \ + SW_API_DESC(SW_API_NAPT_STATUS_GET) \ + SW_API_DESC(SW_API_NAPT_MODE_SET) \ + SW_API_DESC(SW_API_NAPT_MODE_GET) \ + SW_API_DESC(SW_API_PRV_BASE_ADDR_SET) \ + SW_API_DESC(SW_API_PRV_BASE_ADDR_GET) \ + SW_API_DESC(SW_API_PRV_ADDR_MODE_SET) \ + SW_API_DESC(SW_API_PRV_ADDR_MODE_GET) \ + SW_API_DESC(SW_API_PUB_ADDR_ENTRY_ADD) \ + SW_API_DESC(SW_API_PUB_ADDR_ENTRY_DEL) \ + SW_API_DESC(SW_API_PUB_ADDR_ENTRY_NEXT) \ + SW_API_DESC(SW_API_NAT_UNK_SESSION_CMD_SET) \ + SW_API_DESC(SW_API_NAT_UNK_SESSION_CMD_GET) \ + SW_API_DESC(SW_API_PRV_BASE_MASK_SET) \ + SW_API_DESC(SW_API_PRV_BASE_MASK_GET) \ + SW_API_DESC(SW_API_NAT_GLOBAL_SET) \ + SW_API_DESC(SW_API_FLOW_COOKIE_SET) \ + SW_API_DESC(SW_API_FLOW_RFS_SET) +#else +#define NAT_API +#define NAT_API_PARAM +#endif + +#ifdef IN_TRUNK +#define TRUNK_API \ + SW_API_DEF(SW_API_TRUNK_GROUP_SET, fal_trunk_group_set), \ + SW_API_DEF(SW_API_TRUNK_GROUP_GET, fal_trunk_group_get), \ + SW_API_DEF(SW_API_TRUNK_HASH_SET, fal_trunk_hash_mode_set), \ + SW_API_DEF(SW_API_TRUNK_HASH_GET, fal_trunk_hash_mode_get), \ + SW_API_DEF(SW_API_TRUNK_MAN_SA_SET, fal_trunk_manipulate_sa_set), \ + SW_API_DEF(SW_API_TRUNK_MAN_SA_GET, fal_trunk_manipulate_sa_get), \ + SW_API_DEF(SW_API_TRUNK_FAILOVER_EN_SET, fal_trunk_failover_enable), \ + SW_API_DEF(SW_API_TRUNK_FAILOVER_EN_GET, fal_trunk_failover_status_get), + +#define TRUNK_API_PARAM \ + SW_API_DESC(SW_API_TRUNK_GROUP_SET) \ + SW_API_DESC(SW_API_TRUNK_GROUP_GET) \ + SW_API_DESC(SW_API_TRUNK_HASH_SET) \ + SW_API_DESC(SW_API_TRUNK_HASH_GET) \ + SW_API_DESC(SW_API_TRUNK_MAN_SA_SET)\ + SW_API_DESC(SW_API_TRUNK_MAN_SA_GET)\ + SW_API_DESC(SW_API_TRUNK_FAILOVER_EN_SET)\ + SW_API_DESC(SW_API_TRUNK_FAILOVER_EN_GET) +#else +#define TRUNK_API +#define TRUNK_API_PARAM +#endif + +#ifdef IN_INTERFACECONTROL +#define INTERFACECTRL_API \ + SW_API_DEF(SW_API_MAC_MODE_SET, fal_interface_mac_mode_set), \ + SW_API_DEF(SW_API_MAC_MODE_GET, fal_interface_mac_mode_get), \ + SW_API_DEF(SW_API_PORT_3AZ_STATUS_SET, fal_port_3az_status_set), \ + SW_API_DEF(SW_API_PORT_3AZ_STATUS_GET, fal_port_3az_status_get), \ + SW_API_DEF(SW_API_PHY_MODE_SET, fal_interface_phy_mode_set), \ + SW_API_DEF(SW_API_PHY_MODE_GET, fal_interface_phy_mode_get), \ + SW_API_DEF(SW_API_FX100_CTRL_SET, fal_interface_fx100_ctrl_set), \ + SW_API_DEF(SW_API_FX100_CTRL_GET, fal_interface_fx100_ctrl_get), \ + SW_API_DEF(SW_API_FX100_STATUS_GET, fal_interface_fx100_status_get),\ + SW_API_DEF(SW_API_MAC06_EXCH_SET, fal_interface_mac06_exch_set),\ + SW_API_DEF(SW_API_MAC06_EXCH_GET, fal_interface_mac06_exch_get), + +#define INTERFACECTRL_API_PARAM \ + SW_API_DESC(SW_API_MAC_MODE_SET) \ + SW_API_DESC(SW_API_MAC_MODE_GET) \ + SW_API_DESC(SW_API_PORT_3AZ_STATUS_SET) \ + SW_API_DESC(SW_API_PORT_3AZ_STATUS_GET) \ + SW_API_DESC(SW_API_PHY_MODE_SET) \ + SW_API_DESC(SW_API_PHY_MODE_GET) \ + SW_API_DESC(SW_API_FX100_CTRL_SET) \ + SW_API_DESC(SW_API_FX100_CTRL_GET) \ + SW_API_DESC(SW_API_FX100_STATUS_GET) \ + SW_API_DESC(SW_API_MAC06_EXCH_SET) \ + SW_API_DESC(SW_API_MAC06_EXCH_GET) + +#else +#define INTERFACECTRL_API +#define INTERFACECTRL_API_PARAM +#endif + +#ifdef IN_VSI +#define VSI_API \ + SW_API_DEF(SW_API_VSI_ALLOC, fal_vsi_alloc), \ + SW_API_DEF(SW_API_VSI_FREE, fal_vsi_free), \ + SW_API_DEF(SW_API_PORT_VSI_SET, fal_port_vsi_set), \ + SW_API_DEF(SW_API_PORT_VSI_GET, fal_port_vsi_get), \ + SW_API_DEF(SW_API_PORT_VLAN_VSI_SET, fal_port_vlan_vsi_set), \ + SW_API_DEF(SW_API_PORT_VLAN_VSI_GET, fal_port_vlan_vsi_get), \ + SW_API_DEF(SW_API_VSI_TBL_DUMP, fal_vsi_tbl_dump), \ + SW_API_DEF(SW_API_VSI_NEWADDR_LRN_GET, fal_vsi_newaddr_lrn_get), \ + SW_API_DEF(SW_API_VSI_NEWADDR_LRN_SET, fal_vsi_newaddr_lrn_set), \ + SW_API_DEF(SW_API_VSI_STAMOVE_SET, fal_vsi_stamove_set), \ + SW_API_DEF(SW_API_VSI_STAMOVE_GET,fal_vsi_stamove_get), \ + SW_API_DEF(SW_API_VSI_MEMBER_SET, fal_vsi_member_set), \ + SW_API_DEF(SW_API_VSI_MEMBER_GET, fal_vsi_member_get), \ + SW_API_DEF(SW_API_VSI_COUNTER_GET,fal_vsi_counter_get), \ + SW_API_DEF(SW_API_VSI_COUNTER_CLEANUP,fal_vsi_counter_cleanup), + + +#define VSI_API_PARAM \ + SW_API_DESC(SW_API_VSI_ALLOC) \ + SW_API_DESC(SW_API_VSI_FREE) \ + SW_API_DESC(SW_API_PORT_VSI_SET) \ + SW_API_DESC(SW_API_PORT_VSI_GET) \ + SW_API_DESC(SW_API_PORT_VLAN_VSI_SET) \ + SW_API_DESC(SW_API_PORT_VLAN_VSI_GET) \ + SW_API_DESC(SW_API_VSI_TBL_DUMP) \ + SW_API_DESC(SW_API_VSI_NEWADDR_LRN_GET) \ + SW_API_DESC(SW_API_VSI_NEWADDR_LRN_SET) \ + SW_API_DESC(SW_API_VSI_STAMOVE_SET) \ + SW_API_DESC(SW_API_VSI_STAMOVE_GET) \ + SW_API_DESC(SW_API_VSI_MEMBER_SET) \ + SW_API_DESC(SW_API_VSI_MEMBER_GET) \ + SW_API_DESC(SW_API_VSI_COUNTER_GET) \ + SW_API_DESC(SW_API_VSI_COUNTER_CLEANUP) + +#else +#define VSI_API +#define VSI_API_PARAM +#endif + +#ifdef IN_QM +#define QM_API \ + SW_API_DEF(SW_API_UCAST_QUEUE_BASE_PROFILE_SET, fal_ucast_queue_base_profile_set), \ + SW_API_DEF(SW_API_UCAST_QUEUE_BASE_PROFILE_GET, fal_ucast_queue_base_profile_get), \ + SW_API_DEF(SW_API_UCAST_PRIORITY_CLASS_SET, fal_ucast_priority_class_set), \ + SW_API_DEF(SW_API_UCAST_PRIORITY_CLASS_GET, fal_ucast_priority_class_get), \ + SW_API_DEF(SW_API_MCAST_PRIORITY_CLASS_SET, fal_port_mcast_priority_class_set), \ + SW_API_DEF(SW_API_MCAST_PRIORITY_CLASS_GET, fal_port_mcast_priority_class_get), \ + SW_API_DEF(SW_API_QUEUE_FLUSH, fal_queue_flush), \ + SW_API_DEF(SW_API_UCAST_HASH_MAP_SET, fal_ucast_hash_map_set), \ + SW_API_DEF(SW_API_UCAST_HASH_MAP_GET, fal_ucast_hash_map_get), \ + SW_API_DEF(SW_API_UCAST_DFLT_HASH_MAP_SET, fal_ucast_default_hash_set), \ + SW_API_DEF(SW_API_UCAST_DFLT_HASH_MAP_GET, fal_ucast_default_hash_get), \ + SW_API_DEF(SW_API_MCAST_CPUCODE_CLASS_SET, fal_mcast_cpu_code_class_set), \ + SW_API_DEF(SW_API_MCAST_CPUCODE_CLASS_GET, fal_mcast_cpu_code_class_get), \ + SW_API_DEF(SW_API_AC_CTRL_SET, fal_ac_ctrl_set), \ + SW_API_DEF(SW_API_AC_CTRL_GET, fal_ac_ctrl_get), \ + SW_API_DEF(SW_API_AC_PRE_BUFFER_SET, fal_ac_prealloc_buffer_set), \ + SW_API_DEF(SW_API_AC_PRE_BUFFER_GET, fal_ac_prealloc_buffer_get), \ + SW_API_DEF(SW_API_QUEUE_GROUP_SET, fal_ac_queue_group_set), \ + SW_API_DEF(SW_API_QUEUE_GROUP_GET, fal_ac_queue_group_get), \ + SW_API_DEF(SW_API_STATIC_THRESH_SET, fal_ac_static_threshold_set), \ + SW_API_DEF(SW_API_STATIC_THRESH_GET, fal_ac_static_threshold_get), \ + SW_API_DEF(SW_API_DYNAMIC_THRESH_SET, fal_ac_dynamic_threshold_set), \ + SW_API_DEF(SW_API_DYNAMIC_THRESH_GET, fal_ac_dynamic_threshold_get), \ + SW_API_DEF(SW_API_GOURP_BUFFER_SET, fal_ac_group_buffer_set), \ + SW_API_DEF(SW_API_GOURP_BUFFER_GET, fal_ac_group_buffer_get), \ + SW_API_DEF(SW_API_QUEUE_CNT_CTRL_GET, fal_queue_counter_ctrl_get), \ + SW_API_DEF(SW_API_QUEUE_CNT_CTRL_SET, fal_queue_counter_ctrl_set), \ + SW_API_DEF(SW_API_QUEUE_CNT_GET, fal_queue_counter_get), \ + SW_API_DEF(SW_API_QUEUE_CNT_CLEANUP, fal_queue_counter_cleanup), \ + SW_API_DEF(SW_API_QM_ENQUEUE_CTRL_SET, fal_qm_enqueue_ctrl_set), \ + SW_API_DEF(SW_API_QM_ENQUEUE_CTRL_GET, fal_qm_enqueue_ctrl_get), \ + SW_API_DEF(SW_API_QM_SOURCE_PROFILE_SET, fal_qm_port_source_profile_set), \ + SW_API_DEF(SW_API_QM_SOURCE_PROFILE_GET, fal_qm_port_source_profile_get), + +#define QM_API_PARAM \ + SW_API_DESC(SW_API_UCAST_QUEUE_BASE_PROFILE_SET) \ + SW_API_DESC(SW_API_UCAST_QUEUE_BASE_PROFILE_GET) \ + SW_API_DESC(SW_API_UCAST_PRIORITY_CLASS_SET) \ + SW_API_DESC(SW_API_UCAST_PRIORITY_CLASS_GET) \ + SW_API_DESC(SW_API_MCAST_PRIORITY_CLASS_SET) \ + SW_API_DESC(SW_API_MCAST_PRIORITY_CLASS_GET) \ + SW_API_DESC(SW_API_QUEUE_FLUSH) \ + SW_API_DESC(SW_API_UCAST_HASH_MAP_SET) \ + SW_API_DESC(SW_API_UCAST_HASH_MAP_GET) \ + SW_API_DESC(SW_API_UCAST_DFLT_HASH_MAP_SET) \ + SW_API_DESC(SW_API_UCAST_DFLT_HASH_MAP_GET) \ + SW_API_DESC(SW_API_MCAST_CPUCODE_CLASS_SET) \ + SW_API_DESC(SW_API_MCAST_CPUCODE_CLASS_GET) \ + SW_API_DESC(SW_API_AC_CTRL_SET) \ + SW_API_DESC(SW_API_AC_CTRL_GET) \ + SW_API_DESC(SW_API_AC_PRE_BUFFER_SET) \ + SW_API_DESC(SW_API_AC_PRE_BUFFER_GET) \ + SW_API_DESC(SW_API_QUEUE_GROUP_SET) \ + SW_API_DESC(SW_API_QUEUE_GROUP_GET) \ + SW_API_DESC(SW_API_STATIC_THRESH_SET) \ + SW_API_DESC(SW_API_STATIC_THRESH_GET) \ + SW_API_DESC(SW_API_DYNAMIC_THRESH_SET) \ + SW_API_DESC(SW_API_DYNAMIC_THRESH_GET) \ + SW_API_DESC(SW_API_GOURP_BUFFER_SET) \ + SW_API_DESC(SW_API_GOURP_BUFFER_GET) \ + SW_API_DESC(SW_API_QUEUE_CNT_CTRL_GET) \ + SW_API_DESC(SW_API_QUEUE_CNT_CTRL_SET) \ + SW_API_DESC(SW_API_QUEUE_CNT_GET) \ + SW_API_DESC(SW_API_QUEUE_CNT_CLEANUP) \ + SW_API_DESC(SW_API_QM_ENQUEUE_CTRL_SET) \ + SW_API_DESC(SW_API_QM_ENQUEUE_CTRL_GET) \ + SW_API_DESC(SW_API_QM_SOURCE_PROFILE_SET) \ + SW_API_DESC(SW_API_QM_SOURCE_PROFILE_GET) + +#else +#define QM_API +#define QM_API_PARAM +#endif + + +#ifdef IN_PPPOE +#define PPPOE_API \ + SW_API_DEF(SW_API_PPPOE_CMD_SET, fal_pppoe_cmd_set), \ + SW_API_DEF(SW_API_PPPOE_CMD_GET, fal_pppoe_cmd_get), \ + SW_API_DEF(SW_API_PPPOE_STATUS_SET, fal_pppoe_status_set), \ + SW_API_DEF(SW_API_PPPOE_STATUS_GET, fal_pppoe_status_get), \ + SW_API_DEF(SW_API_PPPOE_SESSION_ADD, fal_pppoe_session_add), \ + SW_API_DEF(SW_API_PPPOE_SESSION_DEL, fal_pppoe_session_del), \ + SW_API_DEF(SW_API_PPPOE_SESSION_GET, fal_pppoe_session_get), \ + SW_API_DEF(SW_API_PPPOE_SESSION_TABLE_ADD, fal_pppoe_session_table_add), \ + SW_API_DEF(SW_API_PPPOE_SESSION_TABLE_DEL, fal_pppoe_session_table_del), \ + SW_API_DEF(SW_API_PPPOE_SESSION_TABLE_GET, fal_pppoe_session_table_get), \ + SW_API_DEF(SW_API_PPPOE_SESSION_ID_SET, fal_pppoe_session_id_set), \ + SW_API_DEF(SW_API_PPPOE_SESSION_ID_GET, fal_pppoe_session_id_get), \ + SW_API_DEF(SW_API_RTD_PPPOE_EN_SET, fal_rtd_pppoe_en_set), \ + SW_API_DEF(SW_API_RTD_PPPOE_EN_GET, fal_rtd_pppoe_en_get), \ + SW_API_DEF(SW_API_PPPOE_EN_SET, fal_pppoe_l3intf_enable), \ + SW_API_DEF(SW_API_PPPOE_EN_GET, fal_pppoe_l3intf_status_get), + +#define PPPOE_API_PARAM \ + SW_API_DESC(SW_API_PPPOE_CMD_SET) \ + SW_API_DESC(SW_API_PPPOE_CMD_GET) \ + SW_API_DESC(SW_API_PPPOE_STATUS_SET) \ + SW_API_DESC(SW_API_PPPOE_STATUS_GET) \ + SW_API_DESC(SW_API_PPPOE_SESSION_ADD) \ + SW_API_DESC(SW_API_PPPOE_SESSION_DEL) \ + SW_API_DESC(SW_API_PPPOE_SESSION_GET) \ + SW_API_DESC(SW_API_PPPOE_SESSION_TABLE_ADD) \ + SW_API_DESC(SW_API_PPPOE_SESSION_TABLE_DEL) \ + SW_API_DESC(SW_API_PPPOE_SESSION_TABLE_GET) \ + SW_API_DESC(SW_API_PPPOE_SESSION_ID_SET) \ + SW_API_DESC(SW_API_PPPOE_SESSION_ID_GET) \ + SW_API_DESC(SW_API_RTD_PPPOE_EN_SET) \ + SW_API_DESC(SW_API_RTD_PPPOE_EN_GET) \ + SW_API_DESC(SW_API_PPPOE_EN_SET) \ + SW_API_DESC(SW_API_PPPOE_EN_GET) + +#else +#define PPPOE_API +#define PPPOE_API_PARAM +#endif + +#ifdef IN_BM +#define BM_API \ + SW_API_DEF(SW_API_BM_CTRL_SET, fal_port_bm_ctrl_set), \ + SW_API_DEF(SW_API_BM_CTRL_GET, fal_port_bm_ctrl_get), \ + SW_API_DEF(SW_API_BM_PORTGROUP_MAP_SET, fal_port_bufgroup_map_set), \ + SW_API_DEF(SW_API_BM_PORTGROUP_MAP_GET, fal_port_bufgroup_map_get), \ + SW_API_DEF(SW_API_BM_GROUP_BUFFER_SET, fal_bm_bufgroup_buffer_set), \ + SW_API_DEF(SW_API_BM_GROUP_BUFFER_GET, fal_bm_bufgroup_buffer_get), \ + SW_API_DEF(SW_API_BM_PORT_RSVBUFFER_SET, fal_bm_port_reserved_buffer_set), \ + SW_API_DEF(SW_API_BM_PORT_RSVBUFFER_GET, fal_bm_port_reserved_buffer_get), \ + SW_API_DEF(SW_API_BM_STATIC_THRESH_SET, fal_bm_port_static_thresh_set), \ + SW_API_DEF(SW_API_BM_STATIC_THRESH_GET, fal_bm_port_static_thresh_get), \ + SW_API_DEF(SW_API_BM_DYNAMIC_THRESH_SET, fal_bm_port_dynamic_thresh_set), \ + SW_API_DEF(SW_API_BM_DYNAMIC_THRESH_GET, fal_bm_port_dynamic_thresh_get), \ + SW_API_DEF(SW_API_BM_PORT_COUNTER_GET, fal_bm_port_counter_get), + +#define BM_API_PARAM \ + SW_API_DESC(SW_API_BM_CTRL_SET) \ + SW_API_DESC(SW_API_BM_CTRL_GET) \ + SW_API_DESC(SW_API_BM_PORTGROUP_MAP_SET) \ + SW_API_DESC(SW_API_BM_PORTGROUP_MAP_GET) \ + SW_API_DESC(SW_API_BM_GROUP_BUFFER_SET) \ + SW_API_DESC(SW_API_BM_GROUP_BUFFER_GET) \ + SW_API_DESC(SW_API_BM_PORT_RSVBUFFER_SET) \ + SW_API_DESC(SW_API_BM_PORT_RSVBUFFER_GET) \ + SW_API_DESC(SW_API_BM_STATIC_THRESH_SET) \ + SW_API_DESC(SW_API_BM_STATIC_THRESH_GET) \ + SW_API_DESC(SW_API_BM_DYNAMIC_THRESH_SET) \ + SW_API_DESC(SW_API_BM_DYNAMIC_THRESH_GET) \ + SW_API_DESC(SW_API_BM_PORT_COUNTER_GET) + +#else +#define BM_API +#define BM_API_PARAM +#endif + +/*qca808x_start*/ +#define REG_API \ + SW_API_DEF(SW_API_PHY_GET, fal_phy_get), \ + SW_API_DEF(SW_API_PHY_SET, fal_phy_set), \ +/*qca808x_end*/\ + SW_API_DEF(SW_API_REG_GET, fal_reg_get), \ + SW_API_DEF(SW_API_REG_SET, fal_reg_set), \ + SW_API_DEF(SW_API_PSGMII_REG_GET, fal_psgmii_reg_get), \ + SW_API_DEF(SW_API_PSGMII_REG_SET, fal_psgmii_reg_set), \ + SW_API_DEF(SW_API_REG_FIELD_GET, fal_reg_field_get), \ + SW_API_DEF(SW_API_REG_FIELD_SET, fal_reg_field_set), \ + SW_API_DEF(SW_API_REG_DUMP, fal_reg_dump), \ + SW_API_DEF(SW_API_DBG_REG_DUMP, fal_dbg_reg_dump),\ + SW_API_DEF(SW_API_DBG_PSGMII_SELF_TEST, fal_debug_psgmii_self_test), \ + SW_API_DEF(SW_API_PHY_DUMP, fal_phy_dump), \ + SW_API_DEF(SW_API_UNIPHY_REG_GET, fal_uniphy_reg_get), \ + SW_API_DEF(SW_API_UNIPHY_REG_SET, fal_uniphy_reg_set),\ +/*qca808x_start*/\ + /*end of REG_API*/ +#define REG_API_PARAM \ + SW_API_DESC(SW_API_PHY_GET) \ + SW_API_DESC(SW_API_PHY_SET)\ +/*qca808x_end*/\ + SW_API_DESC(SW_API_REG_GET) \ + SW_API_DESC(SW_API_REG_SET) \ + SW_API_DESC(SW_API_PSGMII_REG_GET) \ + SW_API_DESC(SW_API_PSGMII_REG_SET) \ + SW_API_DESC(SW_API_REG_FIELD_GET) \ + SW_API_DESC(SW_API_REG_FIELD_SET) \ + SW_API_DESC(SW_API_REG_DUMP) \ + SW_API_DESC(SW_API_DBG_REG_DUMP) \ + SW_API_DESC(SW_API_DBG_PSGMII_SELF_TEST) \ + SW_API_DESC(SW_API_PHY_DUMP) \ + SW_API_DESC(SW_API_UNIPHY_REG_GET) \ + SW_API_DESC(SW_API_UNIPHY_REG_SET)\ +/*qca808x_start*/\ +/*end of REG_API_PARAM*/ +/*qca808x_end*/ +#ifdef IN_CTRLPKT +#define CTRLPKT_API \ + SW_API_DEF(SW_API_MGMTCTRL_ETHTYPE_PROFILE_SET, fal_mgmtctrl_ethtype_profile_set), \ + SW_API_DEF(SW_API_MGMTCTRL_ETHTYPE_PROFILE_GET, fal_mgmtctrl_ethtype_profile_get), \ + SW_API_DEF(SW_API_MGMTCTRL_RFDB_PROFILE_SET, fal_mgmtctrl_rfdb_profile_set), \ + SW_API_DEF(SW_API_MGMTCTRL_RFDB_PROFILE_GET, fal_mgmtctrl_rfdb_profile_get), \ + SW_API_DEF(SW_API_MGMTCTRL_CTRLPKT_PROFILE_ADD, fal_mgmtctrl_ctrlpkt_profile_add), \ + SW_API_DEF(SW_API_MGMTCTRL_CTRLPKT_PROFILE_DEL, fal_mgmtctrl_ctrlpkt_profile_del), \ + SW_API_DEF(SW_API_MGMTCTRL_CTRLPKT_PROFILE_GETFIRST, fal_mgmtctrl_ctrlpkt_profile_getfirst), \ + SW_API_DEF(SW_API_MGMTCTRL_CTRLPKT_PROFILE_GETNEXT, fal_mgmtctrl_ctrlpkt_profile_getnext), + +#define CTRLPKT_API_PARAM \ + SW_API_DESC(SW_API_MGMTCTRL_ETHTYPE_PROFILE_SET) \ + SW_API_DESC(SW_API_MGMTCTRL_ETHTYPE_PROFILE_GET) \ + SW_API_DESC(SW_API_MGMTCTRL_RFDB_PROFILE_SET) \ + SW_API_DESC(SW_API_MGMTCTRL_RFDB_PROFILE_GET) \ + SW_API_DESC(SW_API_MGMTCTRL_CTRLPKT_PROFILE_ADD) \ + SW_API_DESC(SW_API_MGMTCTRL_CTRLPKT_PROFILE_DEL) \ + SW_API_DESC(SW_API_MGMTCTRL_CTRLPKT_PROFILE_GETFIRST) \ + SW_API_DESC(SW_API_MGMTCTRL_CTRLPKT_PROFILE_GETNEXT) +#else +#define CTRLPKT_API +#define CTRLPKT_API_PARAM +#endif + +#ifdef IN_SERVCODE +#define SERVCODE_API \ + SW_API_DEF(SW_API_SERVCODE_CONFIG_SET, fal_servcode_config_set), \ + SW_API_DEF(SW_API_SERVCODE_CONFIG_GET, fal_servcode_config_get), \ + SW_API_DEF(SW_API_SERVCODE_LOOPCHECK_EN, fal_servcode_loopcheck_en), \ + SW_API_DEF(SW_API_SERVCODE_LOOPCHECK_STATUS_GET, fal_servcode_loopcheck_status_get), + +#define SERVCODE_API_PARAM \ + SW_API_DESC(SW_API_SERVCODE_CONFIG_SET) \ + SW_API_DESC(SW_API_SERVCODE_CONFIG_GET) \ + SW_API_DESC(SW_API_SERVCODE_LOOPCHECK_EN) \ + SW_API_DESC(SW_API_SERVCODE_LOOPCHECK_STATUS_GET) +#else +#define SERVCODE_API +#define SERVCODE_API_PARAM +#endif + +#ifdef IN_RSS_HASH +#define RSS_HASH_API \ + SW_API_DEF(SW_API_RSS_HASH_CONFIG_SET, fal_rss_hash_config_set), \ + SW_API_DEF(SW_API_RSS_HASH_CONFIG_GET, fal_rss_hash_config_get), + +#define RSS_HASH_API_PARAM \ + SW_API_DESC(SW_API_RSS_HASH_CONFIG_SET) \ + SW_API_DESC(SW_API_RSS_HASH_CONFIG_GET) +#else +#define RSS_HASH_API +#define RSS_HASH_API_PARAM +#endif + +#ifdef IN_SHAPER +#define SHAPER_API \ + SW_API_DEF(SW_API_PORT_SHAPER_TIMESLOT_SET, fal_port_shaper_timeslot_set), \ + SW_API_DEF(SW_API_PORT_SHAPER_TIMESLOT_GET, fal_port_shaper_timeslot_get), \ + SW_API_DEF(SW_API_FLOW_SHAPER_TIMESLOT_SET, fal_flow_shaper_timeslot_set), \ + SW_API_DEF(SW_API_FLOW_SHAPER_TIMESLOT_GET, fal_flow_shaper_timeslot_get), \ + SW_API_DEF(SW_API_QUEUE_SHAPER_TIMESLOT_SET, fal_queue_shaper_timeslot_set), \ + SW_API_DEF(SW_API_QUEUE_SHAPER_TIMESLOT_GET, fal_queue_shaper_timeslot_get), \ + SW_API_DEF(SW_API_PORT_SHAPER_TOKEN_NUMBER_SET, fal_port_shaper_token_number_set), \ + SW_API_DEF(SW_API_PORT_SHAPER_TOKEN_NUMBER_GET, fal_port_shaper_token_number_get), \ + SW_API_DEF(SW_API_FLOW_SHAPER_TOKEN_NUMBER_SET, fal_flow_shaper_token_number_set), \ + SW_API_DEF(SW_API_FLOW_SHAPER_TOKEN_NUMBER_GET, fal_flow_shaper_token_number_get), \ + SW_API_DEF(SW_API_QUEUE_SHAPER_TOKEN_NUMBER_SET, fal_queue_shaper_token_number_set), \ + SW_API_DEF(SW_API_QUEUE_SHAPER_TOKEN_NUMBER_GET, fal_queue_shaper_token_number_get), \ + SW_API_DEF(SW_API_PORT_SHAPER_SET, fal_port_shaper_set), \ + SW_API_DEF(SW_API_PORT_SHAPER_GET,fal_port_shaper_get), \ + SW_API_DEF(SW_API_FLOW_SHAPER_SET, fal_flow_shaper_set), \ + SW_API_DEF(SW_API_FLOW_SHAPER_GET,fal_flow_shaper_get), \ + SW_API_DEF(SW_API_QUEUE_SHAPER_SET, fal_queue_shaper_set), \ + SW_API_DEF(SW_API_QUEUE_SHAPER_GET,fal_queue_shaper_get), \ + SW_API_DEF(SW_API_SHAPER_IPG_PRE_SET, fal_shaper_ipg_preamble_length_set), \ + SW_API_DEF(SW_API_SHAPER_IPG_PRE_GET,fal_shaper_ipg_preamble_length_get), + + +#define SHAPER_API_PARAM \ + SW_API_DESC(SW_API_PORT_SHAPER_TIMESLOT_SET) \ + SW_API_DESC(SW_API_PORT_SHAPER_TIMESLOT_GET) \ + SW_API_DESC(SW_API_FLOW_SHAPER_TIMESLOT_SET) \ + SW_API_DESC(SW_API_FLOW_SHAPER_TIMESLOT_GET) \ + SW_API_DESC(SW_API_QUEUE_SHAPER_TIMESLOT_SET) \ + SW_API_DESC(SW_API_QUEUE_SHAPER_TIMESLOT_GET) \ + SW_API_DESC(SW_API_PORT_SHAPER_TOKEN_NUMBER_SET) \ + SW_API_DESC(SW_API_PORT_SHAPER_TOKEN_NUMBER_GET) \ + SW_API_DESC(SW_API_FLOW_SHAPER_TOKEN_NUMBER_SET) \ + SW_API_DESC(SW_API_FLOW_SHAPER_TOKEN_NUMBER_GET) \ + SW_API_DESC(SW_API_QUEUE_SHAPER_TOKEN_NUMBER_SET) \ + SW_API_DESC(SW_API_QUEUE_SHAPER_TOKEN_NUMBER_GET) \ + SW_API_DESC(SW_API_PORT_SHAPER_SET) \ + SW_API_DESC(SW_API_PORT_SHAPER_GET) \ + SW_API_DESC(SW_API_FLOW_SHAPER_SET) \ + SW_API_DESC(SW_API_FLOW_SHAPER_GET) \ + SW_API_DESC(SW_API_QUEUE_SHAPER_SET) \ + SW_API_DESC(SW_API_QUEUE_SHAPER_GET) \ + SW_API_DESC(SW_API_SHAPER_IPG_PRE_SET) \ + SW_API_DESC(SW_API_SHAPER_IPG_PRE_GET) + +#else +#define SHAPER_API +#define SHAPER_API_PARAM +#endif + + +#ifdef IN_POLICER +#define POLICER_API \ + SW_API_DEF(SW_API_POLICER_TIMESLOT_SET, fal_policer_timeslot_set), \ + SW_API_DEF(SW_API_POLICER_TIMESLOT_GET, fal_policer_timeslot_get), \ + SW_API_DEF(SW_API_POLICER_PORT_COUNTER_GET, fal_port_policer_counter_get), \ + SW_API_DEF(SW_API_POLICER_ACL_COUNTER_GET, fal_acl_policer_counter_get), \ + SW_API_DEF(SW_API_POLICER_COMPENSATION_SET, fal_port_policer_compensation_byte_set), \ + SW_API_DEF(SW_API_POLICER_COMPENSATION_GET, fal_port_policer_compensation_byte_get), \ + SW_API_DEF(SW_API_POLICER_PORT_ENTRY_SET, fal_port_policer_entry_set), \ + SW_API_DEF(SW_API_POLICER_PORT_ENTRY_GET, fal_port_policer_entry_get), \ + SW_API_DEF(SW_API_POLICER_ACL_ENTRY_SET, fal_acl_policer_entry_set), \ + SW_API_DEF(SW_API_POLICER_ACL_ENTRY_GET,fal_acl_policer_entry_get), \ + SW_API_DEF(SW_API_POLICER_GLOBAL_COUNTER_GET, fal_policer_global_counter_get), + +#define POLICER_API_PARAM \ + SW_API_DESC(SW_API_POLICER_TIMESLOT_SET) \ + SW_API_DESC(SW_API_POLICER_TIMESLOT_GET) \ + SW_API_DESC(SW_API_POLICER_PORT_COUNTER_GET) \ + SW_API_DESC(SW_API_POLICER_ACL_COUNTER_GET) \ + SW_API_DESC(SW_API_POLICER_COMPENSATION_SET) \ + SW_API_DESC(SW_API_POLICER_COMPENSATION_GET) \ + SW_API_DESC(SW_API_POLICER_PORT_ENTRY_SET) \ + SW_API_DESC(SW_API_POLICER_PORT_ENTRY_GET) \ + SW_API_DESC(SW_API_POLICER_ACL_ENTRY_SET) \ + SW_API_DESC(SW_API_POLICER_ACL_ENTRY_GET) \ + SW_API_DESC(SW_API_POLICER_GLOBAL_COUNTER_GET) + +#else +#define POLICER_API +#define POLICER_API_PARAM +#endif + +#ifdef IN_PTP +#define PTP_API \ + SW_API_DEF(SW_API_PTP_CONFIG_SET, fal_ptp_config_set), \ + SW_API_DEF(SW_API_PTP_CONFIG_GET, fal_ptp_config_get), \ + SW_API_DEF(SW_API_PTP_REFERENCE_CLOCK_SET, fal_ptp_reference_clock_set), \ + SW_API_DEF(SW_API_PTP_REFERENCE_CLOCK_GET, fal_ptp_reference_clock_get), \ + SW_API_DEF(SW_API_PTP_RX_TIMESTAMP_MODE_SET, fal_ptp_rx_timestamp_mode_set), \ + SW_API_DEF(SW_API_PTP_RX_TIMESTAMP_MODE_GET, fal_ptp_rx_timestamp_mode_get), \ + SW_API_DEF(SW_API_PTP_TIMESTAMP_GET, fal_ptp_timestamp_get), \ + SW_API_DEF(SW_API_PTP_PKT_TIMESTAMP_SET, fal_ptp_pkt_timestamp_set), \ + SW_API_DEF(SW_API_PTP_PKT_TIMESTAMP_GET, fal_ptp_pkt_timestamp_get), \ + SW_API_DEF(SW_API_PTP_GRANDMASTER_MODE_SET, fal_ptp_grandmaster_mode_set), \ + SW_API_DEF(SW_API_PTP_GRANDMASTER_MODE_GET, fal_ptp_grandmaster_mode_get), \ + SW_API_DEF(SW_API_PTP_RTC_TIME_SET, fal_ptp_rtc_time_set), \ + SW_API_DEF(SW_API_PTP_RTC_TIME_GET, fal_ptp_rtc_time_get), \ + SW_API_DEF(SW_API_PTP_RTC_TIME_CLEAR, fal_ptp_rtc_time_clear), \ + SW_API_DEF(SW_API_PTP_RTC_ADJTIME_SET, fal_ptp_rtc_adjtime_set), \ + SW_API_DEF(SW_API_PTP_RTC_ADJFREQ_SET, fal_ptp_rtc_adjfreq_set), \ + SW_API_DEF(SW_API_PTP_RTC_ADJFREQ_GET, fal_ptp_rtc_adjfreq_get), \ + SW_API_DEF(SW_API_PTP_LINK_DELAY_SET, fal_ptp_link_delay_set), \ + SW_API_DEF(SW_API_PTP_LINK_DELAY_GET, fal_ptp_link_delay_get), \ + SW_API_DEF(SW_API_PTP_SECURITY_SET, fal_ptp_security_set), \ + SW_API_DEF(SW_API_PTP_SECURITY_GET, fal_ptp_security_get), \ + SW_API_DEF(SW_API_PTP_PPS_SIGNAL_CONTROL_SET, fal_ptp_pps_signal_control_set), \ + SW_API_DEF(SW_API_PTP_PPS_SIGNAL_CONTROL_GET, fal_ptp_pps_signal_control_get), \ + SW_API_DEF(SW_API_PTP_RX_CRC_RECALC_SET, fal_ptp_rx_crc_recalc_enable), \ + SW_API_DEF(SW_API_PTP_RX_CRC_RECALC_GET, fal_ptp_rx_crc_recalc_status_get), \ + SW_API_DEF(SW_API_PTP_ASYM_CORRECTION_SET, fal_ptp_asym_correction_set), \ + SW_API_DEF(SW_API_PTP_ASYM_CORRECTION_GET, fal_ptp_asym_correction_get), \ + SW_API_DEF(SW_API_PTP_OUTPUT_WAVEFORM_SET, fal_ptp_output_waveform_set), \ + SW_API_DEF(SW_API_PTP_OUTPUT_WAVEFORM_GET, fal_ptp_output_waveform_get), \ + SW_API_DEF(SW_API_PTP_RTC_TIME_SNAPSHOT_SET, fal_ptp_rtc_time_snapshot_enable), \ + SW_API_DEF(SW_API_PTP_RTC_TIME_SNAPSHOT_GET, fal_ptp_rtc_time_snapshot_status_get), \ + SW_API_DEF(SW_API_PTP_INCREMENT_SYNC_FROM_CLOCK_SET, \ + fal_ptp_increment_sync_from_clock_enable), \ + SW_API_DEF(SW_API_PTP_INCREMENT_SYNC_FROM_CLOCK_GET, \ + fal_ptp_increment_sync_from_clock_status_get), \ + SW_API_DEF(SW_API_PTP_TOD_UART_SET, fal_ptp_tod_uart_set), \ + SW_API_DEF(SW_API_PTP_TOD_UART_GET, fal_ptp_tod_uart_get), \ + SW_API_DEF(SW_API_PTP_ENHANCED_TIMESTAMP_ENGINE_SET, fal_ptp_enhanced_timestamp_engine_set), \ + SW_API_DEF(SW_API_PTP_ENHANCED_TIMESTAMP_ENGINE_GET, fal_ptp_enhanced_timestamp_engine_get), \ + SW_API_DEF(SW_API_PTP_TRIGGER_SET, fal_ptp_trigger_set), \ + SW_API_DEF(SW_API_PTP_TRIGGER_GET, fal_ptp_trigger_get), \ + SW_API_DEF(SW_API_PTP_CAPTURE_SET, fal_ptp_capture_set), \ + SW_API_DEF(SW_API_PTP_CAPTURE_GET, fal_ptp_capture_get), \ + SW_API_DEF(SW_API_PTP_INTERRUPT_SET, fal_ptp_interrupt_set), \ + SW_API_DEF(SW_API_PTP_INTERRUPT_GET, fal_ptp_interrupt_get), + +#define PTP_API_PARAM \ + SW_API_DESC(SW_API_PTP_CONFIG_SET) \ + SW_API_DESC(SW_API_PTP_CONFIG_GET) \ + SW_API_DESC(SW_API_PTP_REFERENCE_CLOCK_SET) \ + SW_API_DESC(SW_API_PTP_REFERENCE_CLOCK_GET) \ + SW_API_DESC(SW_API_PTP_RX_TIMESTAMP_MODE_SET) \ + SW_API_DESC(SW_API_PTP_RX_TIMESTAMP_MODE_GET) \ + SW_API_DESC(SW_API_PTP_TIMESTAMP_GET) \ + SW_API_DESC(SW_API_PTP_PKT_TIMESTAMP_SET) \ + SW_API_DESC(SW_API_PTP_PKT_TIMESTAMP_GET) \ + SW_API_DESC(SW_API_PTP_GRANDMASTER_MODE_SET) \ + SW_API_DESC(SW_API_PTP_GRANDMASTER_MODE_GET) \ + SW_API_DESC(SW_API_PTP_RTC_TIME_SET) \ + SW_API_DESC(SW_API_PTP_RTC_TIME_GET) \ + SW_API_DESC(SW_API_PTP_RTC_TIME_CLEAR) \ + SW_API_DESC(SW_API_PTP_RTC_ADJTIME_SET) \ + SW_API_DESC(SW_API_PTP_RTC_ADJFREQ_SET) \ + SW_API_DESC(SW_API_PTP_RTC_ADJFREQ_GET) \ + SW_API_DESC(SW_API_PTP_LINK_DELAY_SET) \ + SW_API_DESC(SW_API_PTP_LINK_DELAY_GET) \ + SW_API_DESC(SW_API_PTP_SECURITY_SET) \ + SW_API_DESC(SW_API_PTP_SECURITY_GET) \ + SW_API_DESC(SW_API_PTP_PPS_SIGNAL_CONTROL_SET) \ + SW_API_DESC(SW_API_PTP_PPS_SIGNAL_CONTROL_GET) \ + SW_API_DESC(SW_API_PTP_RX_CRC_RECALC_SET) \ + SW_API_DESC(SW_API_PTP_RX_CRC_RECALC_GET) \ + SW_API_DESC(SW_API_PTP_ASYM_CORRECTION_SET) \ + SW_API_DESC(SW_API_PTP_ASYM_CORRECTION_GET) \ + SW_API_DESC(SW_API_PTP_OUTPUT_WAVEFORM_SET) \ + SW_API_DESC(SW_API_PTP_OUTPUT_WAVEFORM_GET) \ + SW_API_DESC(SW_API_PTP_RTC_TIME_SNAPSHOT_SET) \ + SW_API_DESC(SW_API_PTP_RTC_TIME_SNAPSHOT_GET) \ + SW_API_DESC(SW_API_PTP_INCREMENT_SYNC_FROM_CLOCK_SET) \ + SW_API_DESC(SW_API_PTP_INCREMENT_SYNC_FROM_CLOCK_GET) \ + SW_API_DESC(SW_API_PTP_TOD_UART_SET) \ + SW_API_DESC(SW_API_PTP_TOD_UART_GET) \ + SW_API_DESC(SW_API_PTP_ENHANCED_TIMESTAMP_ENGINE_SET) \ + SW_API_DESC(SW_API_PTP_ENHANCED_TIMESTAMP_ENGINE_GET) \ + SW_API_DESC(SW_API_PTP_TRIGGER_SET) \ + SW_API_DESC(SW_API_PTP_TRIGGER_GET) \ + SW_API_DESC(SW_API_PTP_CAPTURE_SET) \ + SW_API_DESC(SW_API_PTP_CAPTURE_GET) \ + SW_API_DESC(SW_API_PTP_INTERRUPT_SET) \ + SW_API_DESC(SW_API_PTP_INTERRUPT_GET) +#else +#define PTP_API +#define PTP_API_PARAM +#endif + +#ifdef IN_SFP +#define SFP_API \ + SW_API_DEF(SW_API_SFP_DATA_GET, fal_sfp_eeprom_data_get), \ + SW_API_DEF(SW_API_SFP_DATA_SET, fal_sfp_eeprom_data_set), \ + SW_API_DEF(SW_API_SFP_DEV_TYPE_GET, fal_sfp_device_type_get), \ + SW_API_DEF(SW_API_SFP_TRANSC_CODE_GET, fal_sfp_transceiver_code_get), \ + SW_API_DEF(SW_API_SFP_RATE_ENCODE_GET, fal_sfp_rate_encode_get), \ + SW_API_DEF(SW_API_SFP_LINK_LENGTH_GET, fal_sfp_link_length_get), \ + SW_API_DEF(SW_API_SFP_VENDOR_INFO_GET, fal_sfp_vendor_info_get), \ + SW_API_DEF(SW_API_SFP_LASER_WAVELENGTH_GET, fal_sfp_laser_wavelength_get), \ + SW_API_DEF(SW_API_SFP_OPTION_GET, fal_sfp_option_get), \ + SW_API_DEF(SW_API_SFP_CTRL_RATE_GET, fal_sfp_ctrl_rate_get), \ + SW_API_DEF(SW_API_SFP_ENHANCED_CFG_GET, fal_sfp_enhanced_cfg_get), \ + SW_API_DEF(SW_API_SFP_DIAG_THRESHOLD_GET, fal_sfp_diag_internal_threshold_get), \ + SW_API_DEF(SW_API_SFP_DIAG_CAL_CONST_GET, fal_sfp_diag_extenal_calibration_const_get), \ + SW_API_DEF(SW_API_SFP_DIAG_REALTIME_GET, fal_sfp_diag_realtime_get), \ + SW_API_DEF(SW_API_SFP_DIAG_CTRL_STATUS_GET, fal_sfp_diag_ctrl_status_get), \ + SW_API_DEF(SW_API_SFP_DIAG_ALARM_WARN_FLAG_GET, fal_sfp_diag_alarm_warning_flag_get), \ + SW_API_DEF(SW_API_SFP_CHECKCODE_GET, fal_sfp_checkcode_get), + +#define SFP_API_PARAM \ + SW_API_DESC(SW_API_SFP_DATA_GET) \ + SW_API_DESC(SW_API_SFP_DATA_SET) \ + SW_API_DESC(SW_API_SFP_DEV_TYPE_GET) \ + SW_API_DESC(SW_API_SFP_TRANSC_CODE_GET) \ + SW_API_DESC(SW_API_SFP_RATE_ENCODE_GET) \ + SW_API_DESC(SW_API_SFP_LINK_LENGTH_GET) \ + SW_API_DESC(SW_API_SFP_VENDOR_INFO_GET) \ + SW_API_DESC(SW_API_SFP_LASER_WAVELENGTH_GET) \ + SW_API_DESC(SW_API_SFP_OPTION_GET) \ + SW_API_DESC(SW_API_SFP_CTRL_RATE_GET) \ + SW_API_DESC(SW_API_SFP_ENHANCED_CFG_GET) \ + SW_API_DESC(SW_API_SFP_DIAG_THRESHOLD_GET) \ + SW_API_DESC(SW_API_SFP_DIAG_CAL_CONST_GET) \ + SW_API_DESC(SW_API_SFP_DIAG_REALTIME_GET) \ + SW_API_DESC(SW_API_SFP_DIAG_CTRL_STATUS_GET) \ + SW_API_DESC(SW_API_SFP_DIAG_ALARM_WARN_FLAG_GET) \ + SW_API_DESC(SW_API_SFP_CHECKCODE_GET) +#else +#define SFP_API +#define SFP_API_PARAM +#endif + +/*qca808x_start*/ +#define SSDK_API \ +/*qca808x_end*/\ + SW_API_DEF(SW_API_SWITCH_RESET, fal_reset), \ + SW_API_DEF(SW_API_SSDK_CFG, fal_ssdk_cfg), \ + SW_API_DEF(SW_API_MODULE_FUNC_CTRL_SET, fal_module_func_ctrl_set), \ + SW_API_DEF(SW_API_MODULE_FUNC_CTRL_GET, fal_module_func_ctrl_get), \ + /*qca808x_start*/\ + PORTCONTROL_API \ +/*qca808x_end*/\ + VLAN_API \ + PORTVLAN_API \ + FDB_API \ + ACL_API \ + QOS_API \ + IGMP_API \ + LEAKY_API \ + MIRROR_API \ + RATE_API \ + STP_API \ + MIB_API \ + MISC_API \ + LED_API \ + COSMAP_API \ + SEC_API \ + IP_API \ + NAT_API \ + FLOW_API \ + TRUNK_API \ + INTERFACECTRL_API \ + VSI_API \ + QM_API \ + BM_API \ + PPPOE_API \ +/*qca808x_start*/\ + REG_API \ +/*qca808x_end*/\ + CTRLPKT_API \ + SERVCODE_API \ + RSS_HASH_API \ + POLICER_API \ + SHAPER_API \ + PTP_API \ + SFP_API \ +/*qca808x_start*/\ + SW_API_DEF(SW_API_MAX, NULL), + + +#define SSDK_PARAM \ +/*qca808x_end*/\ + SW_PARAM_DEF(SW_API_SWITCH_RESET, SW_UINT32, 4, SW_PARAM_IN, "Dev ID"), \ + SW_PARAM_DEF(SW_API_SSDK_CFG, SW_UINT32, 4, SW_PARAM_IN, "Dev ID"), \ + SW_PARAM_DEF(SW_API_SSDK_CFG, SW_SSDK_CFG, sizeof(ssdk_cfg_t), SW_PARAM_PTR|SW_PARAM_OUT, "ssdk configuration"), \ + SW_PARAM_DEF(SW_API_MODULE_FUNC_CTRL_SET, SW_UINT32, 4, SW_PARAM_IN, "Dev ID"), \ + SW_PARAM_DEF(SW_API_MODULE_FUNC_CTRL_SET, SW_MODULE, 4, SW_PARAM_IN, "Module"), \ + SW_PARAM_DEF(SW_API_MODULE_FUNC_CTRL_SET, SW_FUNC_CTRL, sizeof(fal_func_ctrl_t), SW_PARAM_PTR|SW_PARAM_IN, "Function bitmap"), \ + SW_PARAM_DEF(SW_API_MODULE_FUNC_CTRL_GET, SW_UINT32, 4, SW_PARAM_IN, "Dev ID"), \ + SW_PARAM_DEF(SW_API_MODULE_FUNC_CTRL_GET, SW_MODULE, 4, SW_PARAM_IN, "Module"), \ + SW_PARAM_DEF(SW_API_MODULE_FUNC_CTRL_GET, SW_FUNC_CTRL, sizeof(fal_func_ctrl_t), SW_PARAM_PTR|SW_PARAM_OUT, "Function bitmap"), \ + MIB_API_PARAM \ + LEAKY_API_PARAM \ + MISC_API_PARAM \ + IGMP_API_PARAM \ + MIRROR_API_PARAM \ +/*qca808x_start*/\ + PORTCONTROL_API_PARAM \ +/*qca808x_end*/\ + PORTVLAN_API_PARAM \ + VLAN_API_PARAM \ + FDB_API_PARAM \ + QOS_API_PARAM \ + RATE_API_PARAM \ + STP_API_PARAM \ + ACL_API_PARAM \ + LED_API_PARAM \ + COSMAP_API_PARAM \ + SEC_API_PARAM \ + IP_API_PARAM \ + NAT_API_PARAM \ + FLOW_API_PARAM \ + TRUNK_API_PARAM \ + INTERFACECTRL_API_PARAM \ + VSI_API_PARAM \ + QM_API_PARAM \ + BM_API_PARAM \ + PPPOE_API_PARAM \ +/*qca808x_start*/\ + REG_API_PARAM \ +/*qca808x_end*/\ + CTRLPKT_API_PARAM \ + SERVCODE_API_PARAM \ + RSS_HASH_API_PARAM \ + POLICER_API_PARAM \ + SHAPER_API_PARAM \ + PTP_API_PARAM \ + SFP_API_PARAM \ +/*qca808x_start*/\ + SW_PARAM_DEF(SW_API_MAX, SW_UINT32, 4, SW_PARAM_IN, "Dev ID"), + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#endif /* _FAL_API_H_ */ +/*qca808x_end*/ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_bm.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_bm.h new file mode 100755 index 000000000..b8931053e --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_bm.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_qos FAL_BM + * @{ + */ +#ifndef _FAL_BM_H_ +#define _FAL_BM_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + +typedef struct +{ + a_uint16_t max_thresh; /* Static Maximum threshold */ + a_uint16_t resume_off; /*resume offset */ +} fal_bm_static_cfg_t; + +typedef struct +{ + a_uint8_t weight; /* port weight in the shared group */ + a_uint16_t shared_ceiling; /* Maximum shared buffers */ + a_uint16_t resume_off; /*resume offset */ + a_uint16_t resume_min_thresh; /* Minumum thresh for resume */ +} fal_bm_dynamic_cfg_t; + +enum { + FUNC_PORT_BUFGROUP_MAP_GET = 0, + FUNC_BM_PORT_RESERVED_BUFFER_GET, + FUNC_BM_BUFGROUP_BUFFER_GET, + FUNC_BM_PORT_DYNAMIC_THRESH_GET, + FUNC_PORT_BM_CTRL_GET, + FUNC_BM_BUFGROUP_BUFFER_SET, + FUNC_PORT_BUFGROUP_MAP_SET, + FUNC_BM_PORT_STATIC_THRESH_GET, + FUNC_BM_PORT_RESERVED_BUFFER_SET, + FUNC_BM_PORT_STATIC_THRESH_SET, + FUNC_BM_PORT_DYNAMIC_THRESH_SET, + FUNC_PORT_BM_CTRL_SET, + FUNC_PORT_TDM_CTRL_SET, + FUNC_PORT_TDM_TICK_CFG_SET, + FUNC_BM_PORT_COUNTER_GET, +}; + +typedef struct +{ + a_uint64_t drop_byte_counter; /*drop byte due to overload*/ + a_uint32_t drop_packet_counter; /*drop packet due to overload*/ + a_uint64_t fc_drop_byte_counter; /*drop byte due to fc*/ + a_uint32_t fc_drop_packet_counter; /*drop packet due to fc*/ + a_uint32_t used_counter; /*total used buffer counter for the port*/ + a_uint32_t react_counter; /*react used buffer counter for the port*/ +} fal_bm_port_counter_t; + +sw_error_t +fal_port_bm_ctrl_set(a_uint32_t dev_id, fal_port_t port, a_bool_t enable); + +sw_error_t +fal_port_bm_ctrl_get(a_uint32_t dev_id, fal_port_t port, a_bool_t *enable); + +sw_error_t +fal_port_bufgroup_map_set(a_uint32_t dev_id, fal_port_t port, + a_uint8_t group); + +sw_error_t +fal_port_bufgroup_map_get(a_uint32_t dev_id, fal_port_t port, + a_uint8_t *group); + +sw_error_t +fal_bm_bufgroup_buffer_set(a_uint32_t dev_id, a_uint8_t group, + a_uint16_t buff_num); + +sw_error_t +fal_bm_bufgroup_buffer_get(a_uint32_t dev_id, a_uint8_t group, + a_uint16_t *buff_num); + +sw_error_t +fal_bm_port_reserved_buffer_set(a_uint32_t dev_id, fal_port_t port, + a_uint16_t prealloc_buff, a_uint16_t react_buff); + +sw_error_t +fal_bm_port_reserved_buffer_get(a_uint32_t dev_id, fal_port_t port, + a_uint16_t *prealloc_buff, a_uint16_t *react_buff); + +sw_error_t +fal_bm_port_static_thresh_set(a_uint32_t dev_id, fal_port_t port, + fal_bm_static_cfg_t *cfg); + +sw_error_t +fal_bm_port_static_thresh_get(a_uint32_t dev_id, fal_port_t port, + fal_bm_static_cfg_t *cfg); + +sw_error_t +fal_bm_port_dynamic_thresh_set(a_uint32_t dev_id, fal_port_t port, + fal_bm_dynamic_cfg_t *cfg); + +sw_error_t +fal_bm_port_dynamic_thresh_get(a_uint32_t dev_id, fal_port_t port, + fal_bm_dynamic_cfg_t *cfg); + +sw_error_t +fal_bm_port_counter_get(a_uint32_t dev_id, fal_port_t port, + fal_bm_port_counter_t *counter); + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _PORT_BM_H_ */ +/** + * @} + */ + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_cosmap.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_cosmap.h new file mode 100755 index 000000000..c8a9eff09 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_cosmap.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_cosmap FAL_COSMAP + * @{ + */ +#ifndef _FAL_COSMAP_H_ +#define _FAL_COSMAP_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + + typedef struct + { + a_bool_t remark_dscp; + a_bool_t remark_up; + a_bool_t remark_dei; + a_uint8_t g_dscp; + a_uint8_t y_dscp; + a_uint8_t g_up; + a_uint8_t y_up; + a_uint8_t g_dei; + a_uint8_t y_dei; + } fal_egress_remark_table_t; + + sw_error_t + fal_cosmap_dscp_to_pri_set(a_uint32_t dev_id, a_uint32_t dscp, + a_uint32_t pri); + + sw_error_t + fal_cosmap_dscp_to_pri_get(a_uint32_t dev_id, a_uint32_t dscp, + a_uint32_t * pri); + + sw_error_t + fal_cosmap_dscp_to_dp_set(a_uint32_t dev_id, a_uint32_t dscp, + a_uint32_t dp); + + sw_error_t + fal_cosmap_dscp_to_dp_get(a_uint32_t dev_id, a_uint32_t dscp, + a_uint32_t * dp); + + sw_error_t + fal_cosmap_up_to_pri_set(a_uint32_t dev_id, a_uint32_t up, + a_uint32_t pri); + + sw_error_t + fal_cosmap_up_to_pri_get(a_uint32_t dev_id, a_uint32_t up, + a_uint32_t * pri); + + sw_error_t + fal_cosmap_up_to_dp_set(a_uint32_t dev_id, a_uint32_t up, + a_uint32_t dp); + + sw_error_t + fal_cosmap_up_to_dp_get(a_uint32_t dev_id, a_uint32_t up, + a_uint32_t * dp); + + sw_error_t + fal_cosmap_dscp_to_ehpri_set(a_uint32_t dev_id, a_uint32_t dscp, + a_uint32_t pri); + + sw_error_t + fal_cosmap_dscp_to_ehpri_get(a_uint32_t dev_id, a_uint32_t dscp, + a_uint32_t * pri); + + sw_error_t + fal_cosmap_dscp_to_ehdp_set(a_uint32_t dev_id, a_uint32_t dscp, + a_uint32_t dp); + + sw_error_t + fal_cosmap_dscp_to_ehdp_get(a_uint32_t dev_id, a_uint32_t dscp, + a_uint32_t * dp); + + sw_error_t + fal_cosmap_up_to_ehpri_set(a_uint32_t dev_id, a_uint32_t up, + a_uint32_t pri); + + sw_error_t + fal_cosmap_up_to_ehpri_get(a_uint32_t dev_id, a_uint32_t up, + a_uint32_t * pri); + + sw_error_t + fal_cosmap_up_to_ehdp_set(a_uint32_t dev_id, a_uint32_t up, + a_uint32_t dp); + + sw_error_t + fal_cosmap_up_to_ehdp_get(a_uint32_t dev_id, a_uint32_t up, + a_uint32_t * dp); + + sw_error_t + fal_cosmap_pri_to_queue_set(a_uint32_t dev_id, a_uint32_t pri, + a_uint32_t queue); + + sw_error_t + fal_cosmap_pri_to_queue_get(a_uint32_t dev_id, a_uint32_t pri, + a_uint32_t * queue); + + sw_error_t + fal_cosmap_pri_to_ehqueue_set(a_uint32_t dev_id, a_uint32_t pri, + a_uint32_t queue); + + sw_error_t + fal_cosmap_pri_to_ehqueue_get(a_uint32_t dev_id, a_uint32_t pri, + a_uint32_t * queue); + + sw_error_t + fal_cosmap_egress_remark_set(a_uint32_t dev_id, a_uint32_t tbl_id, + fal_egress_remark_table_t * tbl); + + sw_error_t + fal_cosmap_egress_remark_get(a_uint32_t dev_id, a_uint32_t tbl_id, + fal_egress_remark_table_t * tbl); +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_COSMAP_H_ */ + +/** + * @} + */ + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_ctrlpkt.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_ctrlpkt.h new file mode 100755 index 000000000..e2419d157 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_ctrlpkt.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +/** + * @defgroup fal_ctrlpkt FAL_CTRLPKT + * @{ + */ +#ifndef _FAL_CTRLPKT_H_ +#define _FAL_CTRLPKT_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + +typedef struct { + fal_fwd_cmd_t action; /* the action when condition matched */ + a_bool_t sg_bypass; /* check if sg_bypass when condition matched */ + a_bool_t l2_filter_bypass; /* check if l2_filter_bypass when condition matched */ + a_bool_t in_stp_bypass; /* check if in_stp_bypass when condition matched */ + a_bool_t in_vlan_fltr_bypass; /* check if in_vlan_fltr_bypass when condition matched */ +} fal_ctrlpkt_action_t; + +typedef struct +{ + a_bool_t mgt_eapol; /* eapol protocol management type */ + a_bool_t mgt_pppoe; /* pppoe protocol management type */ + a_bool_t mgt_igmp; /* igmp protocol management type */ + a_bool_t mgt_arp_req; /* arp request protocol management type */ + a_bool_t mgt_arp_rep; /* arp response protocol management type */ + a_bool_t mgt_dhcp4; /* dhcp4 protocol management type */ + a_bool_t mgt_mld; /* mld protocol management type */ + a_bool_t mgt_ns; /* ns protocol management type */ + a_bool_t mgt_na; /* na protocol management type */ + a_bool_t mgt_dhcp6; /* dhcp6 protocol management type */ +} fal_ctrlpkt_protocol_type_t; + +typedef struct { + fal_ctrlpkt_action_t action; /* the all action when condition matched */ + fal_pbmp_t port_map; /* the condition port bitmap */ + a_uint32_t ethtype_profile_bitmap; /* the condition ethtype_profile bitmap */ + a_uint32_t rfdb_profile_bitmap; /* the condition rfdb_profile bitmap */ + fal_ctrlpkt_protocol_type_t protocol_types; /* the condition protocol types */ +} fal_ctrlpkt_profile_t; + +enum { + FUNC_MGMTCTRL_ETHTYPE_PROFILE_SET = 0, + FUNC_MGMTCTRL_ETHTYPE_PROFILE_GET, + FUNC_MGMTCTRL_RFDB_PROFILE_SET, + FUNC_MGMTCTRL_RFDB_PROFILE_GET, + FUNC_MGMTCTRL_CTRLPKT_PROFILE_ADD, + FUNC_MGMTCTRL_CTRLPKT_PROFILE_DEL, + FUNC_MGMTCTRL_CTRLPKT_PROFILE_GETFIRST, + FUNC_MGMTCTRL_CTRLPKT_PROFILE_GETNEXT, +}; + +sw_error_t fal_mgmtctrl_ethtype_profile_set(a_uint32_t dev_id, a_uint32_t profile_id, a_uint32_t ethtype); +sw_error_t fal_mgmtctrl_ethtype_profile_get(a_uint32_t dev_id, a_uint32_t profile_id, a_uint32_t * ethtype); + +sw_error_t fal_mgmtctrl_rfdb_profile_set(a_uint32_t dev_id, a_uint32_t profile_id, fal_mac_addr_t *addr); +sw_error_t fal_mgmtctrl_rfdb_profile_get(a_uint32_t dev_id, a_uint32_t profile_id, fal_mac_addr_t *addr); + +sw_error_t fal_mgmtctrl_ctrlpkt_profile_add(a_uint32_t dev_id, fal_ctrlpkt_profile_t *ctrlpkt); +sw_error_t fal_mgmtctrl_ctrlpkt_profile_del(a_uint32_t dev_id, fal_ctrlpkt_profile_t *ctrlpkt); +sw_error_t fal_mgmtctrl_ctrlpkt_profile_getfirst(a_uint32_t dev_id, fal_ctrlpkt_profile_t *ctrlpkt); +sw_error_t fal_mgmtctrl_ctrlpkt_profile_getnext(a_uint32_t dev_id, fal_ctrlpkt_profile_t *ctrlpkt); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_CTRLPKT_H_ */ +/** + * @} + */ + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_fdb.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_fdb.h new file mode 100755 index 000000000..be205d4fc --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_fdb.h @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_fdb FAL_FDB + * @{ + */ +#ifndef _FAL_FDB_H_ +#define _FAL_FDB_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + + /** + @details Fields description: + + portmap_en - If value of portmap_en is A_TRUE then port.map is valid + otherwise port.id is valid. + + + leaky_en - If value of leaky_en is A_TRUE then packets which + destination address equals addr in this entry would be leaky. + + + mirror_en - If value of mirror_en is A_TRUE then packets which + destination address equals addr in this entry would be mirrored. + + + clone_en - If value of clone_en is A_TRUE which means this address is + a mac clone address. + @brief This structure defines the Fdb entry. + + */ + typedef struct + { + fal_mac_addr_t addr; /* mac address of fdb entry */ + a_uint16_t fid; /* vlan_id/vsi value of fdb entry */ + fal_fwd_cmd_t dacmd; /* source address command */ + fal_fwd_cmd_t sacmd; /* dest address command */ + union + { + fal_port_t id; /* union value is port id value */ + fal_pbmp_t map; /* union value is bitmap value */ + } port; + a_bool_t portmap_en; /* use port bitmap or not */ + a_bool_t is_multicast; /* if it is a multicast mac fdb entry */ + a_bool_t static_en; /* enable static or not */ + a_bool_t leaky_en; /* enable leaky or not */ + a_bool_t mirror_en; /* enable mirror or not */ + a_bool_t clone_en; /* enable clone or not */ + a_bool_t cross_pt_state; /* cross port state */ + a_bool_t da_pri_en; /* enable da pri or not */ + a_uint8_t da_queue; /* da queue value */ + a_bool_t white_list_en; /* enable white list or not */ + a_bool_t load_balance_en; /* enable load balance value or not */ + a_uint8_t load_balance; /* load balance value */ + a_bool_t entry_valid; /* check if entry is value */ + a_bool_t lookup_valid; /* check if entry is lookup */ + } fal_fdb_entry_t; + +#define FAL_FDB_DEL_STATIC 0x1 + + typedef struct + { + a_bool_t port_en; /* enable port value matching or not */ + a_bool_t fid_en; /* enable fid value matching or not */ + a_bool_t multicast_en; /* enable multicast value matching or not */ + } fal_fdb_op_t; + + typedef enum + { + INVALID_VLAN_SVL=0, + INVALID_VLAN_IVL + } fal_fdb_smode; + +enum { + FUNC_FDB_ENTRY_ADD = 0, + FUNC_FDB_ENTRY_FLUSH, + FUNC_FDB_ENTRY_DEL_BYPORT, + FUNC_FDB_ENTRY_DEL_BYMAC, + FUNC_FDB_ENTRY_GETFIRST, + FUNC_FDB_ENTRY_GETNEXT, + FUNC_FDB_ENTRY_SEARCH, + FUNC_FDB_PORT_LEARN_SET, + FUNC_FDB_PORT_LEARN_GET, + FUNC_FDB_PORT_LEARNING_CTRL_SET, + FUNC_FDB_PORT_LEARNING_CTRL_GET, + FUNC_FDB_PORT_STAMOVE_CTRL_SET, + FUNC_FDB_PORT_STAMOVE_CTRL_GET, + FUNC_FDB_AGING_CTRL_SET, + FUNC_FDB_AGING_CTRL_GET, + FUNC_FDB_LEARNING_CTRL_SET, + FUNC_FDB_LEARNING_CTRL_GET, + FUNC_FDB_AGING_TIME_SET, + FUNC_FDB_AGING_TIME_GET, + FUNC_FDB_ENTRY_GETNEXT_BYINDEX, + FUNC_FDB_ENTRY_EXTEND_GETNEXT, + FUNC_FDB_ENTRY_EXTEND_GETFIRST, + FUNC_FDB_ENTRY_UPDATE_BYPORT, + FUNC_PORT_FDB_LEARN_LIMIT_SET, + FUNC_PORT_FDB_LEARN_LIMIT_GET, + FUNC_PORT_FDB_LEARN_EXCEED_CMD_SET, + FUNC_PORT_FDB_LEARN_EXCEED_CMD_GET, + FUNC_FDB_PORT_LEARNED_MAC_COUNTER_GET, + FUNC_FDB_PORT_ADD, + FUNC_FDB_PORT_DEL, + FUNC_FDB_PORT_MACLIMIT_CTRL_SET, + FUNC_FDB_PORT_MACLIMIT_CTRL_GET, + FUNC_FDB_DEL_BY_FID, +}; + + typedef struct + { + fal_mac_addr_t addr; + a_uint16_t fid; + a_uint8_t load_balance; + } fal_fdb_rfs_t; + + typedef struct + { + a_bool_t enable; /* enable port learn limit or not */ + a_uint32_t limit_num; /* port learn limit number */ + fal_fwd_cmd_t action; /* the action when port learn number exceed limit*/ + } fal_maclimit_ctrl_t; + + sw_error_t + fal_fdb_rfs_set(a_uint32_t dev_id, fal_fdb_rfs_t * entry); + + sw_error_t + fal_fdb_rfs_del(a_uint32_t dev_id, fal_fdb_rfs_t * entry); + + sw_error_t + fal_fdb_entry_add(a_uint32_t dev_id, const fal_fdb_entry_t * entry); + + + + sw_error_t + fal_fdb_entry_flush(a_uint32_t dev_id, a_uint32_t flag); + + + + sw_error_t + fal_fdb_entry_del_byport(a_uint32_t dev_id, a_uint32_t port_id, a_uint32_t flag); + + + + sw_error_t + fal_fdb_entry_del_bymac(a_uint32_t dev_id, const fal_fdb_entry_t *entry); + + + + sw_error_t + fal_fdb_entry_getfirst(a_uint32_t dev_id, fal_fdb_entry_t * entry); + + + + sw_error_t + fal_fdb_entry_getnext(a_uint32_t dev_id, fal_fdb_entry_t * entry); + + + + sw_error_t + fal_fdb_entry_search(a_uint32_t dev_id, fal_fdb_entry_t * entry); + + + + sw_error_t + fal_fdb_port_learn_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable); + + + sw_error_t + fal_fdb_port_learning_ctrl_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable, fal_fwd_cmd_t cmd); + + + sw_error_t + fal_fdb_port_learning_ctrl_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t *enable, fal_fwd_cmd_t *cmd); + + + sw_error_t + fal_fdb_port_stamove_ctrl_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable, fal_fwd_cmd_t cmd); + + + sw_error_t + fal_fdb_port_stamove_ctrl_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t *enable, fal_fwd_cmd_t *cmd); + + + sw_error_t + fal_fdb_port_learn_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable); + + + sw_error_t + fal_fdb_aging_ctrl_set(a_uint32_t dev_id, a_bool_t enable); + + + sw_error_t + fal_fdb_aging_ctrl_get(a_uint32_t dev_id, a_bool_t * enable); + + + sw_error_t + fal_fdb_learning_ctrl_set(a_uint32_t dev_id, a_bool_t enable); + + + sw_error_t + fal_fdb_learning_ctrl_get(a_uint32_t dev_id, a_bool_t * enable); + + + sw_error_t + fal_fdb_vlan_ivl_svl_set(a_uint32_t dev_id, fal_fdb_smode smode); + + + sw_error_t + fal_fdb_vlan_ivl_svl_get(a_uint32_t dev_id, fal_fdb_smode * smode); + + + sw_error_t + fal_fdb_aging_time_set(a_uint32_t dev_id, a_uint32_t * time); + + + + sw_error_t + fal_fdb_aging_time_get(a_uint32_t dev_id, a_uint32_t * time); + + + sw_error_t + fal_fdb_entry_getnext_byindex(a_uint32_t dev_id, a_uint32_t * iterator, fal_fdb_entry_t * entry); + + + sw_error_t + fal_fdb_entry_extend_getnext(a_uint32_t dev_id, fal_fdb_op_t * option, + fal_fdb_entry_t * entry); + + + sw_error_t + fal_fdb_entry_extend_getfirst(a_uint32_t dev_id, fal_fdb_op_t * option, + fal_fdb_entry_t * entry); + + + sw_error_t + fal_fdb_entry_update_byport(a_uint32_t dev_id, fal_port_t old_port, fal_port_t new_port, + a_uint32_t fid, fal_fdb_op_t * option); + + + sw_error_t + fal_fdb_port_learned_mac_counter_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * cnt); + + + sw_error_t + fal_port_fdb_learn_limit_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable, a_uint32_t cnt); + + + sw_error_t + fal_port_fdb_learn_limit_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable, a_uint32_t * cnt); + + + sw_error_t + fal_port_fdb_learn_exceed_cmd_set(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t cmd); + + + sw_error_t + fal_port_fdb_learn_exceed_cmd_get(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t * cmd); + + + sw_error_t + fal_fdb_learn_limit_set(a_uint32_t dev_id, a_bool_t enable, a_uint32_t cnt); + + + sw_error_t + fal_fdb_learn_limit_get(a_uint32_t dev_id, a_bool_t * enable, a_uint32_t * cnt); + + + sw_error_t + fal_fdb_learn_exceed_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd); + + + sw_error_t + fal_fdb_learn_exceed_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd); + + + sw_error_t + fal_fdb_resv_add(a_uint32_t dev_id, fal_fdb_entry_t * entry); + + sw_error_t + fal_fdb_resv_del(a_uint32_t dev_id, fal_fdb_entry_t * entry); + + + sw_error_t + fal_fdb_resv_find(a_uint32_t dev_id, fal_fdb_entry_t * entry); + + + sw_error_t + fal_fdb_resv_iterate(a_uint32_t dev_id, a_uint32_t * iterator, fal_fdb_entry_t * entry); + + + sw_error_t + fal_fdb_port_learn_static_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable); + + + sw_error_t + fal_fdb_port_learn_static_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable); + + sw_error_t + fal_fdb_port_add(a_uint32_t dev_id, a_uint32_t fid, fal_mac_addr_t * addr, fal_port_t port_id); + + sw_error_t + fal_fdb_port_del(a_uint32_t dev_id, a_uint32_t fid, fal_mac_addr_t * addr, fal_port_t port_id); + + sw_error_t + fal_fdb_port_maclimit_ctrl_set(a_uint32_t dev_id, fal_port_t port_id, fal_maclimit_ctrl_t * maclimit_ctrl); + + sw_error_t + fal_fdb_port_maclimit_ctrl_get(a_uint32_t dev_id, fal_port_t port_id, fal_maclimit_ctrl_t * maclimit_ctrl); + + sw_error_t + fal_fdb_entry_del_byfid(a_uint32_t dev_id, a_uint16_t fid, a_uint32_t flag); + +#define fal_fdb_add fal_fdb_entry_add +#define fal_fdb_del_all fal_fdb_entry_flush +#define fal_fdb_del_by_port fal_fdb_entry_del_byport +#define fal_fdb_del_by_mac fal_fdb_entry_del_bymac +#define fal_fdb_first fal_fdb_entry_getfirst +#define fal_fdb_next fal_fdb_entry_getnext +#define fal_fdb_find fal_fdb_entry_search +#define fal_fdb_age_ctrl_set fal_fdb_aging_ctrl_set +#define fal_fdb_age_ctrl_get fal_fdb_aging_ctrl_get +#define fal_fdb_age_time_set fal_fdb_aging_time_set +#define fal_fdb_age_time_get fal_fdb_aging_time_get +#define fal_fdb_iterate fal_fdb_entry_getnext_byindex +#define fal_fdb_extend_next fal_fdb_entry_extend_getnext +#define fal_fdb_extend_first fal_fdb_entry_extend_getfirst +#define fal_fdb_transfer fal_fdb_entry_update_byport + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_FDB_H_ */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_flow.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_flow.h new file mode 100755 index 000000000..66070f783 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_flow.h @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_flow + * @{ + */ +#ifndef _FAL_FLOW_H_ +#define _FAL_FLOW_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" +#include "fal/fal_ip.h" + +typedef enum { + FAL_FLOW_L3_UNICAST = 0, + FAL_FLOW_L2_UNICAST, + FAL_FLOW_MCAST, +} fal_flow_pkt_type_t; + +typedef enum { + FAL_FLOW_LAN_TO_LAN_DIR = 0, + FAL_FLOW_LAN_TO_WAN_DIR, + FAL_FLOW_WAN_TO_LAN_DIR, + FAL_FLOW_WAN_TO_WAN_DIR, + FAL_FLOW_UNKOWN_DIR_DIR, +} fal_flow_direction_t; + +typedef enum { + FAL_FLOW_FORWARD = 0, + FAL_FLOW_SNAT, + FAL_FLOW_DNAT, + FAL_FLOW_ROUTE, + FAL_FLOW_BRIDGE, +} fal_flow_fwd_type_t; + +/* FLOW entry type field */ +#define FAL_FLOW_IP4_5TUPLE_ADDR 0x1 +#define FAL_FLOW_IP6_5TUPLE_ADDR 0x2 +#define FAL_FLOW_IP4_3TUPLE_ADDR 0x4 +#define FAL_FLOW_IP6_3TUPLE_ADDR 0x8 + +#define FAL_FLOW_OP_MODE_KEY 0x0 +#define FAL_FLOW_OP_MODE_INDEX 0x1 +#define FAL_FLOW_OP_MODE_FLUSH 0x2 + +typedef struct { + fal_fwd_cmd_t miss_action; /* flow mismatch action*/ + a_bool_t frag_bypass_en; /*0 for disable and 1 for enable*/ + a_bool_t tcp_spec_bypass_en; /*0 for disable and 1 for enable*/ + a_bool_t all_bypass_en; /*0 for disable and 1 for enable*/ + a_uint8_t key_sel; /*0 for source ip address and 1 for destination ip address*/ +} fal_flow_mgmt_t; + +typedef struct { + a_uint32_t entry_id; /*entry index*/ + a_uint8_t entry_type; /*1:ipv4 5 tuple, 2:ipv6 5 tuple, 4:ipv4 3 tuple, 8:ipv6 3 tuple*/ + a_uint8_t host_addr_type; /*0:souce ip index, 1:destination ip index*/ + a_uint16_t host_addr_index; /*host table entry index*/ + a_uint8_t protocol; /*1:tcp, 2:udp, 3:udp-lite, 0:other*/ + a_uint8_t age; /*aging value*/ + a_bool_t src_intf_valid; /*source interface check valid*/ + a_uint8_t src_intf_index; /*souce l3 interface*/ + a_uint8_t fwd_type; /*forward type*/ + a_uint16_t snat_nexthop; /*nexthop index for snat*/ + a_uint16_t snat_srcport; /*new source l4 port*/ + a_uint16_t dnat_nexthop; /*nexthop index for dnat*/ + a_uint16_t dnat_dstport; /*new destination l4 port*/ + a_uint16_t route_nexthop; /*nexthop index for route*/ + a_bool_t port_valid; /*route port valid*/ + fal_port_t route_port; /*port for route*/ + fal_port_t bridge_port; /*port for l2 bridge*/ + a_bool_t deacclr_en; /*0 for disable and 1 for enable*/ + a_bool_t copy_tocpu_en; /*0 for disable and 1 for enable*/ + a_uint8_t syn_toggle; /*update by host*/ + a_uint8_t pri_profile; /*flow qos index*/ + a_uint8_t sevice_code; /*service code for bypass*/ + a_uint8_t ip_type; /*0 for ipv4 and 1 for ipv6*/ + union { + fal_ip4_addr_t ipv4; + fal_ip6_addr_t ipv6; + } flow_ip; + a_uint16_t src_port; /*l4 source port*/ + a_uint16_t dst_port; /*l4 destination port*/ + a_uint32_t tree_id; /*for qos*/ + a_uint32_t pkt_counter; /*flow packet counter*/ + a_uint64_t byte_counter; /*flow byte counter*/ +} fal_flow_entry_t; + +typedef struct { + fal_fwd_cmd_t src_if_check_action; /*source inferface check fail action*/ + a_bool_t src_if_check_deacclr_en; /*0 for disable and 1 for enable*/ + a_bool_t service_loop_en; /*0 for disable and 1 for enable*/ + fal_fwd_cmd_t service_loop_action; /*0 for disable and 1 for enable*/ + a_bool_t service_loop_deacclr_en; /*0 for disable and 1 for enable*/ + fal_fwd_cmd_t flow_deacclr_action; /*flow de acceleration action*/ + fal_fwd_cmd_t sync_mismatch_action; /*sync toggle mismatch action*/ + a_bool_t sync_mismatch_deacclr_en; /*0 for disable and 1 for enable*/ + a_uint8_t hash_mode_0; /*0 crc10, 1 xor, 2 crc16*/ + a_uint8_t hash_mode_1; /*0 crc10, 1 xor, 2 crc16*/ + a_bool_t flow_mismatch_copy_escape_en; /*0 for disable and 1 for enable*/ +} fal_flow_global_cfg_t; + +typedef struct { + fal_flow_entry_t flow_entry; + fal_host_entry_t host_entry; +} fal_flow_host_entry_t; + +typedef struct { + a_uint16_t age_time; /* age value*/ + a_uint16_t unit; /*0:second 1:cycle 2:million cycle*/ +} fal_flow_age_timer_t; + +enum { + FUNC_FLOW_HOST_ADD = 0, + FUNC_FLOW_ENTRY_GET, + FUNC_FLOW_ENTRY_DEL, + FUNC_FLOW_STATUS_GET, + FUNC_FLOW_CTRL_SET, + FUNC_FLOW_AGE_TIMER_GET, + FUNC_FLOW_STATUS_SET, + FUNC_FLOW_HOST_GET, + FUNC_FLOW_HOST_DEL, + FUNC_FLOW_CTRL_GET, + FUNC_FLOW_AGE_TIMER_SET, + FUNC_FLOW_ENTRY_ADD, + FUNC_FLOW_GLOBAL_CFG_GET, + FUNC_FLOW_GLOBAL_CFG_SET, + FUNC_FLOW_ENTRY_NEXT, +}; + +sw_error_t +fal_flow_status_set(a_uint32_t dev_id, a_bool_t enable); + +sw_error_t +fal_flow_status_get(a_uint32_t dev_id, a_bool_t *enable); + +sw_error_t +fal_flow_age_timer_set(a_uint32_t dev_id, fal_flow_age_timer_t *age_timer); + +sw_error_t +fal_flow_age_timer_get(a_uint32_t dev_id, fal_flow_age_timer_t *age_timer); + +sw_error_t +fal_flow_mgmt_set( + a_uint32_t dev_id, + fal_flow_pkt_type_t type, + fal_flow_direction_t dir, + fal_flow_mgmt_t *mgmt); + +sw_error_t +fal_flow_mgmt_get( + a_uint32_t dev_id, + fal_flow_pkt_type_t type, + fal_flow_direction_t dir, + fal_flow_mgmt_t *mgmt); + +sw_error_t +fal_flow_entry_add( + a_uint32_t dev_id, + a_uint32_t add_mode, /*index or hash*/ + fal_flow_entry_t *flow_entry); + +sw_error_t +fal_flow_entry_del( + a_uint32_t dev_id, + a_uint32_t del_mode, + fal_flow_entry_t *flow_entry); + +sw_error_t +fal_flow_entry_get( + a_uint32_t dev_id, + a_uint32_t get_mode, + fal_flow_entry_t *flow_entry); + +sw_error_t +fal_flow_entry_next( + a_uint32_t dev_id, + a_uint32_t next_mode, + fal_flow_entry_t *flow_entry); + +sw_error_t +fal_flow_host_add( + a_uint32_t dev_id, + a_uint32_t add_mode, + fal_flow_host_entry_t *flow_host_entry); + +sw_error_t +fal_flow_host_del( + a_uint32_t dev_id, + a_uint32_t del_mode, + fal_flow_host_entry_t *flow_host_entry); + +sw_error_t +fal_flow_host_get( + a_uint32_t dev_id, + a_uint32_t get_mode, + fal_flow_host_entry_t *flow_host_entry); + +sw_error_t +fal_flow_global_cfg_get( + a_uint32_t dev_id, + fal_flow_global_cfg_t *cfg); + +sw_error_t +fal_flow_global_cfg_set( + a_uint32_t dev_id, + fal_flow_global_cfg_t *cfg); + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_FLOW_H_ */ + +/** + * @} + */ + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_igmp.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_igmp.h new file mode 100755 index 000000000..f41fdf781 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_igmp.h @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_igmp FAL_IGMP + * @{ + */ +#ifndef _FAL_IGMP_H_ +#define _FAL_IGMP_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" +#include "fal/fal_multi.h" + + + sw_error_t + fal_port_igmps_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable); + + + + sw_error_t + fal_port_igmps_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable); + + + + sw_error_t + fal_igmp_mld_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd); + + + + sw_error_t + fal_igmp_mld_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd); + + + + sw_error_t + fal_port_igmp_mld_join_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable); + + + + sw_error_t + fal_port_igmp_mld_join_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable); + + + + sw_error_t + fal_port_igmp_mld_leave_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable); + + + + sw_error_t + fal_port_igmp_mld_leave_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable); + + + + sw_error_t + fal_igmp_mld_rp_set(a_uint32_t dev_id, fal_pbmp_t pts); + + + + sw_error_t + fal_igmp_mld_rp_get(a_uint32_t dev_id, fal_pbmp_t * pts); + + + + sw_error_t + fal_igmp_mld_entry_creat_set(a_uint32_t dev_id, a_bool_t enable); + + + + sw_error_t + fal_igmp_mld_entry_creat_get(a_uint32_t dev_id, a_bool_t * enable); + + + sw_error_t + fal_igmp_mld_entry_static_set(a_uint32_t dev_id, a_bool_t static_en); + + + sw_error_t + fal_igmp_mld_entry_static_get(a_uint32_t dev_id, a_bool_t * static_en); + + + sw_error_t + fal_igmp_mld_entry_leaky_set(a_uint32_t dev_id, a_bool_t enable); + + + sw_error_t + fal_igmp_mld_entry_leaky_get(a_uint32_t dev_id, a_bool_t * enable); + + + sw_error_t + fal_igmp_mld_entry_v3_set(a_uint32_t dev_id, a_bool_t enable); + + + sw_error_t + fal_igmp_mld_entry_v3_get(a_uint32_t dev_id, a_bool_t * enable); + + + sw_error_t + fal_igmp_mld_entry_queue_set(a_uint32_t dev_id, a_bool_t enable, a_uint32_t queue); + + + sw_error_t + fal_igmp_mld_entry_queue_get(a_uint32_t dev_id, a_bool_t * enable, a_uint32_t * queue); + + + sw_error_t + fal_port_igmp_mld_learn_limit_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable, a_uint32_t cnt); + + + sw_error_t + fal_port_igmp_mld_learn_limit_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable, a_uint32_t * cnt); + + + sw_error_t + fal_port_igmp_mld_learn_exceed_cmd_set(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t cmd); + + + sw_error_t + fal_port_igmp_mld_learn_exceed_cmd_get(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t * cmd); + + sw_error_t + fal_igmp_sg_entry_set(a_uint32_t dev_id, fal_igmp_sg_entry_t * entry); + + sw_error_t + fal_igmp_sg_entry_clear(a_uint32_t dev_id, fal_igmp_sg_entry_t * entry); + + sw_error_t + fal_igmp_sg_entry_show(a_uint32_t dev_id); + + sw_error_t + fal_igmp_sg_entry_query(a_uint32_t dev_id, fal_igmp_sg_info_t * info); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_IGMP_H_ */ + +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_init.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_init.h new file mode 100755 index 000000000..6510291e3 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_init.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2014, 2016-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/*qca808x_start*/ +/** + * @defgroup fal_init FAL_INIT + * @{ + */ +#ifndef _FAL_INIT_H_ +#define _FAL_INIT_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "ssdk_init.h" +/*qca808x_end*/ +enum{ + FAL_MODULE_ACL, + FAL_MODULE_VSI, + FAL_MODULE_IP, + FAL_MODULE_FLOW, + FAL_MODULE_QM, + FAL_MODULE_QOS, + FAL_MODULE_BM, + FAL_MODULE_SERVCODE, + FAL_MODULE_RSS_HASH, + FAL_MODULE_PPPOE, + FAL_MODULE_SHAPER, + FAL_MODULE_PORTCTRL, + FAL_MODULE_MIB, + FAL_MODULE_MIRROR, + FAL_MODULE_FDB, + FAL_MODULE_STP, + FAL_MODULE_TRUNK, + FAL_MODULE_PORTVLAN, + FAL_MODULE_CTRLPKT, + FAL_MODULE_SEC, + FAL_MODULE_POLICER, + FAL_MODULE_PTP, + FAL_MODULE_MAX, +}; + +typedef struct +{ + a_uint32_t bitmap[3]; +}fal_func_ctrl_t; + + +sw_error_t fal_init(a_uint32_t dev_id, ssdk_init_cfg * cfg); +sw_error_t fal_reset(a_uint32_t dev_id); +/*qca808x_start*/ +sw_error_t fal_ssdk_cfg(a_uint32_t dev_id, ssdk_cfg_t *ssdk_cfg); +/*qca808x_end*/ +sw_error_t fal_cleanup(void); +sw_error_t fal_module_func_ctrl_set(a_uint32_t dev_id, + a_uint32_t module, fal_func_ctrl_t *func_ctrl); +sw_error_t fal_module_func_ctrl_get(a_uint32_t dev_id, + a_uint32_t module, fal_func_ctrl_t *func_ctrl); +/*qca808x_start*/ +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _FAL_INIT_H_ */ +/** + * @} + */ + /*qca808x_end*/ \ No newline at end of file diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_interface_ctrl.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_interface_ctrl.h new file mode 100755 index 000000000..40ac18b5b --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_interface_ctrl.h @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_interface_ctrl FAL_INTERFACE_CONTROL + * @{ + */ +#ifndef _FAL_INTERFACECTRL_H_ +#define _FAL_INTERFACECTRL_H_ + +#ifdef __cplusplus +extern "c" { +#endif + +#include "common/sw.h" +#include "fal/fal_type.h" + + typedef enum { + FAL_MAC_MODE_RGMII = 0, + FAL_MAC_MODE_GMII, + FAL_MAC_MODE_MII, + FAL_MAC_MODE_SGMII, + FAL_MAC_MODE_FIBER, + FAL_MAC_MODE_RMII, + FAL_MAC_MODE_DEFAULT + } + fal_interface_mac_mode_t; + + typedef enum + { + FAL_INTERFACE_CLOCK_MAC_MODE = 0, + FAL_INTERFACE_CLOCK_PHY_MODE = 1, + } fal_interface_clock_mode_t; + + typedef struct + { + a_bool_t txclk_delay_cmd; + a_bool_t rxclk_delay_cmd; + a_uint32_t txclk_delay_sel; + a_uint32_t rxclk_delay_sel; + } fal_mac_rgmii_config_t; + + typedef struct + { + a_bool_t master_mode; + a_bool_t slave_mode; + a_bool_t clock_inverse; + a_bool_t pipe_rxclk_sel; + } fal_mac_rmii_config_t; + + typedef struct + { + fal_interface_clock_mode_t clock_mode; + a_uint32_t txclk_select; + a_uint32_t rxclk_select; + } fal_mac_gmii_config_t; + + typedef struct + { + fal_interface_clock_mode_t clock_mode; + a_uint32_t txclk_select; + a_uint32_t rxclk_select; + } fal_mac_mii_config_t; + + typedef struct + { + fal_interface_clock_mode_t clock_mode; + a_bool_t auto_neg; + a_bool_t force_speed; + a_bool_t prbs_enable; + a_bool_t rem_phy_lpbk; + } fal_mac_sgmii_config_t; + + typedef struct + { + a_bool_t auto_neg; + a_bool_t fx100_enable; + } fal_mac_fiber_config_t; + + typedef struct + { + fal_interface_mac_mode_t mac_mode; + union + { + fal_mac_rgmii_config_t rgmii; + fal_mac_gmii_config_t gmii; + fal_mac_mii_config_t mii; + fal_mac_sgmii_config_t sgmii; + fal_mac_rmii_config_t rmii; + fal_mac_fiber_config_t fiber; + } config; + } fal_mac_config_t; + + typedef struct + { + fal_interface_mac_mode_t mac_mode; + a_bool_t txclk_delay_cmd; + a_bool_t rxclk_delay_cmd; + a_uint32_t txclk_delay_sel; + a_uint32_t rxclk_delay_sel; + } fal_phy_config_t; + + typedef enum + { + Fx100BASE_MODE = 2, + Fx100BASE_BUTT = 0xffff, + } fx100_ctrl_link_mode_t; + + typedef enum + { + FX100_SERDS_MODE = 1, + Fx100_SERDS_BUTT = 0xffff, + } sgmii_fiber_mode_t; + +#define FX100_HALF_DUPLEX 0 +#define FX100_FULL_DUPLEX 1 + + typedef struct + { + fx100_ctrl_link_mode_t link_mode; + a_bool_t overshoot; + a_bool_t loopback; + a_bool_t fd_mode; + a_bool_t col_test; + sgmii_fiber_mode_t sgmii_fiber_mode; + a_bool_t crs_ctrl; + a_bool_t loopback_ctrl; + a_bool_t crs_col_100_ctrl; + a_bool_t loop_en; + } fal_fx100_ctrl_config_t; + + sw_error_t + fal_port_3az_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable); + + sw_error_t + fal_port_3az_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable); + + sw_error_t + fal_interface_mac_mode_set(a_uint32_t dev_id, fal_port_t port_id, fal_mac_config_t * config); + + sw_error_t + fal_interface_mac_mode_get(a_uint32_t dev_id, fal_port_t port_id, fal_mac_config_t * config); + + sw_error_t + fal_interface_phy_mode_set(a_uint32_t dev_id, a_uint32_t phy_id, fal_phy_config_t * config); + + sw_error_t + fal_interface_phy_mode_get(a_uint32_t dev_id, a_uint32_t phy_id, fal_phy_config_t * config); + + sw_error_t + fal_interface_fx100_ctrl_set(a_uint32_t dev_id, fal_fx100_ctrl_config_t * config); + + sw_error_t + fal_interface_fx100_ctrl_get(a_uint32_t dev_id, fal_fx100_ctrl_config_t * config); + + sw_error_t + fal_interface_fx100_status_get(a_uint32_t dev_id, a_uint32_t* status); + + sw_error_t + fal_interface_mac06_exch_set(a_uint32_t dev_id, a_bool_t enable); + + sw_error_t + fal_interface_mac06_exch_get(a_uint32_t dev_id, a_bool_t* enable); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_INTERFACECTRL_H_ */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_ip.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_ip.h new file mode 100755 index 000000000..9303971a4 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_ip.h @@ -0,0 +1,633 @@ +/* + * Copyright (c) 2014, 2015, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_ip FAL_IP + * @{ + */ +#ifndef _FAL_IP_H_ +#define _FAL_IP_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" +#include "fal_multi.h" + + /* IP WCMP hash key flags */ +#define FAL_WCMP_HASH_KEY_SIP 0x1 +#define FAL_WCMP_HASH_KEY_DIP 0x2 +#define FAL_WCMP_HASH_KEY_SPORT 0x4 +#define FAL_WCMP_HASH_KEY_DPORT 0x8 + + /* IP entry operation flags */ +#define FAL_IP_ENTRY_ID_EN 0x1 +#define FAL_IP_ENTRY_INTF_EN 0x2 +#define FAL_IP_ENTRY_PORT_EN 0x4 +#define FAL_IP_ENTRY_STATUS_EN 0x8 +#define FAL_IP_ENTRY_IPADDR_EN 0x10 +#define FAL_IP_ENTRY_ALL_EN 0x20 + + /* IP host entry structure flags field */ +#define FAL_IP_IP4_ADDR 0x1 +#define FAL_IP_IP6_ADDR 0x2 +#define FAL_IP_CPU_ADDR 0x4 +#define FAL_IP_IP4_ADDR_MCAST 0x8 +#define FAL_IP_IP6_ADDR_MCAST 0x10 + +typedef struct { + a_uint8_t vsi; /*vsi value for l2 multicast*/ + fal_ip4_addr_t sip4_addr; /*source ipv4 address*/ + fal_ip6_addr_t sip6_addr; /*source ipv4 address*/ +} fal_host_mcast_t; + +typedef struct +{ + a_uint32_t rx_pkt_counter; /*rx packet counter*/ + a_uint64_t rx_byte_counter; /*rx byte counter*/ + a_uint32_t rx_drop_pkt_counter; /*rx drop packet counter*/ + a_uint64_t rx_drop_byte_counter; /*rx drop byte counter*/ + a_uint32_t tx_pkt_counter; /*tx packet counter*/ + a_uint64_t tx_byte_counter; /*tx byte counter*/ + a_uint32_t tx_drop_pkt_counter; /*tx drop packet counter*/ + a_uint64_t tx_drop_byte_counter; /*tx drop byte counter*/ +} fal_ip_counter_t; + +typedef struct +{ + a_uint32_t entry_id; /*index for host table*/ + a_uint32_t flags; /*1:ipv4 uni 2:ipv6 uni 8:ipv4 multi 0x10:ipv6 multi*/ + a_uint32_t status; /* valid status: 0 or 1*/ + fal_ip4_addr_t ip4_addr; /* ipv4 address */ + fal_ip6_addr_t ip6_addr; /* ipv6 address */ + fal_mac_addr_t mac_addr; /* unused for ppe */ + a_uint32_t intf_id; /* unused for ppe */ + a_uint32_t lb_num; /* unused for ppe */ + a_uint32_t vrf_id; /* unused for ppe */ + a_uint32_t expect_vid; /* unused for ppe */ + fal_port_t port_id; /* unused for ppe */ + a_bool_t mirror_en; /* unused for ppe */ + a_bool_t counter_en; /* unused for ppe */ + a_uint32_t counter_id; /* unused for ppe */ + a_uint32_t packet; /* unused for ppe */ + a_uint32_t byte; /* unused for ppe */ + a_bool_t pppoe_en; /* unused for ppe */ + a_uint32_t pppoe_id; /* unused for ppe */ + fal_fwd_cmd_t action; /*forward action*/ + a_uint32_t dst_info; /*bit 12:13: 1.nexthop, 2.port id, 3.port bitmap*/ + a_uint8_t syn_toggle; /* sync toggle */ + a_uint8_t lan_wan; /*0: ip over lan side ; 1: ip over wan side*/ + fal_host_mcast_t mcast_info; /* multicast information */ +} fal_host_entry_t; + + typedef enum + { + FAL_MAC_IP_GUARD = 0, + FAL_MAC_IP_PORT_GUARD, + FAL_MAC_IP_VLAN_GUARD, + FAL_MAC_IP_PORT_VLAN_GUARD, + FAL_NO_SOURCE_GUARD, + } fal_source_guard_mode_t; + + typedef enum + { + FAL_DEFAULT_FLOW_FORWARD = 0, + FAL_DEFAULT_FLOW_DROP, + FAL_DEFAULT_FLOW_RDT_TO_CPU, + FAL_DEFAULT_FLOW_ADMIT_ALL, + } fal_default_flow_cmd_t; + + typedef enum + { + FAL_FLOW_LAN_TO_LAN = 0, + FAL_FLOW_WAN_TO_LAN, + FAL_FLOW_LAN_TO_WAN, + FAL_FLOW_WAN_TO_WAN, + } fal_flow_type_t; + + typedef enum + { + FAL_ARP_LEARN_LOCAL = 0, + FAL_ARP_LEARN_ALL, + } fal_arp_learn_mode_t; + + /* IP host entry auto learn arp packets type */ +#define FAL_ARP_LEARN_REQ 0x1 +#define FAL_ARP_LEARN_ACK 0x2 + + typedef struct + { + a_uint32_t entry_id; + a_uint32_t vrf_id; + a_uint16_t vid_low; + a_uint16_t vid_high; + fal_mac_addr_t mac_addr; + a_bool_t ip4_route; + a_bool_t ip6_route; + } fal_intf_mac_entry_t; + + typedef struct + { + a_uint32_t nh_nr; + a_uint32_t nh_id[16]; + } fal_ip_wcmp_t; + + typedef struct + { + fal_mac_addr_t mac_addr; + fal_ip4_addr_t ip4_addr; + a_uint32_t vid; + a_uint8_t load_balance; + } fal_ip4_rfs_t; + + typedef struct + { + fal_mac_addr_t mac_addr; + fal_ip6_addr_t ip6_addr; + a_uint32_t vid; + a_uint8_t load_balance; + } fal_ip6_rfs_t; + + typedef struct + { + a_bool_t valid; + a_uint32_t vrf_id; + fal_addr_type_t ip_version; /*0 for IPv4 and 1 for IPv6*/ + a_uint32_t droute_type; /*0 for ARP and 1 for WCMP*/ + a_uint32_t index;/*when droute_type equals 0, means ARP entry index or means WCMP indexs*/ + } fal_default_route_t; + + typedef struct + { + a_bool_t valid; + a_uint32_t vrf_id; + a_uint32_t ip_version; /*0 for IPv4 and 1 for IPv6*/ + union { + fal_ip4_addr_t ip4_addr; + fal_ip6_addr_t ip6_addr; + }route_addr; + a_uint32_t prefix_length;/*For IPv4, up to 32 and for IPv6, up to 128*/ + } fal_host_route_t; + +typedef struct +{ + a_bool_t ipv4_arp_sg_en; /*0 for disable and 1 for enable*/ + fal_fwd_cmd_t ipv4_arp_sg_vio_action; /* check fail action for arp source guard */ + a_bool_t ipv4_arp_sg_port_en; /* source port based arp source guard enable */ + a_bool_t ipv4_arp_sg_svlan_en; /* source svlan based arp source guard enable */ + a_bool_t ipv4_arp_sg_cvlan_en; /* source cvlan based arp source guard enable */ + fal_fwd_cmd_t ipv4_arp_src_unk_action; /* unknown action for arp source guard */ + a_bool_t ip_nd_sg_en; /*0 for disable and 1 for enable*/ + fal_fwd_cmd_t ip_nd_sg_vio_action; /* check fail action for nd source guard */ + a_bool_t ip_nd_sg_port_en; /* source port based nd source guard enable */ + a_bool_t ip_nd_sg_svlan_en; /* source svlan based nd source guard enable */ + a_bool_t ip_nd_sg_cvlan_en; /* source cvlan based nd source guard enable */ + fal_fwd_cmd_t ip_nd_src_unk_action; /* unknown action for nd source guard */ +} fal_arp_sg_cfg_t; + +typedef enum +{ + FAL_MC_MODE_GV = 0, /*not support igmpv3 source filter*/ + FAL_MC_MODE_SGV /*support igmpv3 source filter*/ +} fal_mc_mode_t; + +typedef struct +{ + a_bool_t l2_ipv4_mc_en; /*0 for disable and 1 for enable*/ + fal_mc_mode_t l2_ipv4_mc_mode; /*two modes*/ + a_bool_t l2_ipv6_mc_en; /*0 for disable and 1 for enable*/ + fal_mc_mode_t l2_ipv6_mc_mode; /*same with IPv4*/ +} fal_mc_mode_cfg_t; + +typedef struct +{ + a_uint8_t type; /*0 for IPv4 and 1 for IPv6*/ + fal_fwd_cmd_t action; /* forward action */ + a_uint32_t dst_info; /*bit 12:13: 1.nexthop, 2.port id, 3.port bitmap*/ + a_uint8_t lan_wan; /* 0:ip over lan side; 1:ip over wan side */ + union { + fal_ip4_addr_t ip4_addr; /* ipv4 address */ + fal_ip6_addr_t ip6_addr; /* ipv6 address */ + } route_addr; + union { + fal_ip4_addr_t ip4_addr_mask; /* ipv4 address mask */ + fal_ip6_addr_t ip6_addr_mask; /* ipv6 address mask */ + } route_addr_mask; +} fal_network_route_entry_t; + +typedef struct { + a_uint16_t mru; /* Maximum Receive Unit*/ + a_uint16_t mtu; /* Maximum Transmission Unit*/ + a_bool_t ttl_dec_bypass_en; /* Bypass TTL Decrement enable*/ + a_bool_t ipv4_uc_route_en; /*0 for disble and 1 for enable*/ + a_bool_t ipv6_uc_route_en; /*0 for disble and 1 for enable*/ + a_bool_t icmp_trigger_en; /* ICMP trigger flag enable*/ + fal_fwd_cmd_t ttl_exceed_action; /*action for ttl 0*/ + a_bool_t ttl_exceed_deacclr_en; /*0 for disble and 1 for enable*/ + a_uint8_t mac_addr_bitmap; /* bitmap for mac address*/ + fal_mac_addr_t mac_addr; /* mac address */ + fal_ip_counter_t counter; /* interface related counter */ +} fal_intf_entry_t; + +typedef struct +{ + a_bool_t l3_if_valid; /*0 for disable and 1 for enable*/ + a_uint32_t l3_if_index; /*index for interface table*/ +} fal_intf_id_t; + +typedef enum +{ + FAL_NEXTHOP_L3 = 0, + FAL_NEXTHOP_VP, +} fal_nexthop_type_t; + +typedef struct +{ + fal_nexthop_type_t type; /* 0: L3 1:port*/ + a_uint8_t vsi; /* output vsi value if type is 0 */ + fal_port_t port; /* destination port */ + a_uint32_t if_index; /* egress interface index */ + a_bool_t ip_to_me_en; /* 0 for disable and 1 for enable*/ + a_uint8_t pub_ip_index; /*index to public ip address*/ + a_uint8_t stag_fmt; /* 0: untag 1:tagged*/ + a_uint16_t svid; /*svlan id*/ + a_int8_t ctag_fmt; /* 0: untag 1:tagged*/ + a_uint16_t cvid; /* cvlan id */ + fal_mac_addr_t mac_addr; /* mac address */ + fal_ip4_addr_t dnat_ip; /*dnat ip address*/ +} fal_ip_nexthop_t; + +typedef struct +{ + a_bool_t ipv4_sg_en; /*0 for disable and 1 for enable*/ + fal_fwd_cmd_t ipv4_sg_vio_action; /* check fail action for ipv4 source guard */ + a_bool_t ipv4_sg_port_en; /* source port based ipv4 source guard enable */ + a_bool_t ipv4_sg_svlan_en; /* source svlan based ipv4 source guard enable */ + a_bool_t ipv4_sg_cvlan_en; /* source cvlan based ipv4 source guard enable */ + fal_fwd_cmd_t ipv4_src_unk_action; /* unknown action for ipv4 source guard */ + a_bool_t ipv6_sg_en; /*0 for disable and 1 for enable*/ + fal_fwd_cmd_t ipv6_sg_vio_action; /* check fail action for ipv6 source guard */ + a_bool_t ipv6_sg_port_en; /* source port based ipv6 source guard enable */ + a_bool_t ipv6_sg_svlan_en; /* source svlan based ipv6 source guard enable */ + a_bool_t ipv6_sg_cvlan_en; /* source cvlan based ipv6 source guard enable */ + fal_fwd_cmd_t ipv6_src_unk_action; /* unknown action for ipv6 source guard */ +} fal_sg_cfg_t; + +typedef struct +{ + fal_ip4_addr_t pub_ip_addr; /*public ip address*/ +} fal_ip_pub_addr_t; + +typedef struct { + a_bool_t valid; /* valid flag */ + fal_mac_addr_t mac_addr; /* mac address */ +} fal_macaddr_entry_t; + +typedef struct +{ + fal_fwd_cmd_t mru_fail_action; /*mru check fail action*/ + a_bool_t mru_deacclr_en; /*0 for disable and 1 for enable*/ + fal_fwd_cmd_t mtu_fail_action; /*mtu check fail action*/ + a_bool_t mtu_deacclr_en; /*0 for disable and 1 for enable*/ + fal_fwd_cmd_t mtu_nonfrag_fail_action; /*mtu check fail action for non-fragment */ + a_bool_t mtu_df_deacclr_en; /*0 for disable and 1 for enable*/ + fal_fwd_cmd_t prefix_bc_action; /*0 forward, 1 drop, 2 copy, 3 rdt_cpu*/ + a_bool_t prefix_deacclr_en; /*0 for disable and 1 for enable*/ + fal_fwd_cmd_t icmp_rdt_action; /*0 forward, 1 drop, 2 copy, 3 rdt_cpu*/ + a_bool_t icmp_rdt_deacclr_en; /*0 for disable and 1 for enable*/ + a_uint8_t hash_mode_0; /*0 crc10, 1 xor, 2 crc16*/ + a_uint8_t hash_mode_1; /*0 crc10, 1 xor, 2 crc16*/ +} fal_ip_global_cfg_t; + +enum { + FUNC_IP_NETWORK_ROUTE_GET = 0, + FUNC_IP_HOST_ADD, + FUNC_IP_VSI_SG_CFG_GET, + FUNC_IP_PUB_ADDR_SET, + FUNC_IP_PORT_SG_CFG_SET, + FUNC_IP_PORT_INTF_GET, + FUNC_IP_VSI_ARP_SG_CFG_SET, + FUNC_IP_PUB_ADDR_GET, + FUNC_IP_PORT_INTF_SET, + FUNC_IP_VSI_SG_CFG_SET, + FUNC_IP_HOST_NEXT, + FUNC_IP_PORT_MACADDR_SET, + FUNC_IP_VSI_INTF_GET, + FUNC_IP_NETWORK_ROUTE_ADD, + FUNC_IP_PORT_SG_CFG_GET, + FUNC_IP_INTF_GET, + FUNC_IP_NETWORK_ROUTE_DEL, + FUNC_IP_HOST_DEL, + FUNC_IP_ROUTE_MISMATCH_GET, + FUNC_IP_VSI_ARP_SG_CFG_GET, + FUNC_IP_PORT_ARP_SG_CFG_SET, + FUNC_IP_VSI_MC_MODE_SET, + FUNC_IP_VSI_INTF_SET, + FUNC_IP_NEXTHOP_GET, + FUNC_IP_ROUTE_MISMATCH_SET, + FUNC_IP_HOST_GET, + FUNC_IP_INTF_SET, + FUNC_IP_VSI_MC_MODE_GET, + FUNC_IP_PORT_MACADDR_GET, + FUNC_IP_PORT_ARP_SG_CFG_GET, + FUNC_IP_NEXTHOP_SET, + FUNC_IP_GLOBAL_CTRL_GET, + FUNC_IP_GLOBAL_CTRL_SET, +}; + + sw_error_t + fal_ip_host_add(a_uint32_t dev_id, fal_host_entry_t * host_entry); + + sw_error_t + fal_ip_host_del(a_uint32_t dev_id, a_uint32_t del_mode, + fal_host_entry_t * host_entry); + + sw_error_t + fal_ip_host_get(a_uint32_t dev_id, a_uint32_t get_mode, + fal_host_entry_t * host_entry); + + sw_error_t + fal_ip_host_next(a_uint32_t dev_id, a_uint32_t next_mode, + fal_host_entry_t * host_entry); + + sw_error_t + fal_ip_host_counter_bind(a_uint32_t dev_id, a_uint32_t entry_id, + a_uint32_t cnt_id, a_bool_t enable); + + sw_error_t + fal_ip_host_pppoe_bind(a_uint32_t dev_id, a_uint32_t entry_id, + a_uint32_t pppoe_id, a_bool_t enable); + + sw_error_t + fal_ip_pt_arp_learn_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t flags); + + sw_error_t + fal_ip_pt_arp_learn_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * flags); + + sw_error_t + fal_ip_arp_learn_set(a_uint32_t dev_id, fal_arp_learn_mode_t mode); + + sw_error_t + fal_ip_arp_learn_get(a_uint32_t dev_id, fal_arp_learn_mode_t * mode); + + sw_error_t + fal_ip_source_guard_set(a_uint32_t dev_id, fal_port_t port_id, + fal_source_guard_mode_t mode); + + sw_error_t + fal_ip_source_guard_get(a_uint32_t dev_id, fal_port_t port_id, + fal_source_guard_mode_t * mode); + + sw_error_t + fal_ip_arp_guard_set(a_uint32_t dev_id, fal_port_t port_id, + fal_source_guard_mode_t mode); + + sw_error_t + fal_ip_arp_guard_get(a_uint32_t dev_id, fal_port_t port_id, + fal_source_guard_mode_t * mode); + + sw_error_t + fal_ip_route_status_set(a_uint32_t dev_id, a_bool_t enable); + + sw_error_t + fal_ip_route_status_get(a_uint32_t dev_id, a_bool_t * enable); + + sw_error_t + fal_ip_intf_entry_add(a_uint32_t dev_id, fal_intf_mac_entry_t * entry); + + sw_error_t + fal_ip_intf_entry_del(a_uint32_t dev_id, a_uint32_t del_mode, + fal_intf_mac_entry_t * entry); + + sw_error_t + fal_ip_intf_entry_next(a_uint32_t dev_id, a_uint32_t next_mode, + fal_intf_mac_entry_t * entry); + + sw_error_t + fal_ip_unk_source_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd); + + sw_error_t + fal_ip_unk_source_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd); + + sw_error_t + fal_arp_unk_source_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd); + + sw_error_t + fal_arp_unk_source_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd); + + sw_error_t + fal_ip_age_time_set(a_uint32_t dev_id, a_uint32_t * time); + + sw_error_t + fal_ip_age_time_get(a_uint32_t dev_id, a_uint32_t * time); + + sw_error_t + fal_ip_wcmp_entry_set(a_uint32_t dev_id, a_uint32_t wcmp_id, fal_ip_wcmp_t * wcmp); + + sw_error_t + fal_ip_wcmp_entry_get(a_uint32_t dev_id, a_uint32_t wcmp_id, fal_ip_wcmp_t * wcmp); + + sw_error_t + fal_ip_wcmp_hash_mode_set(a_uint32_t dev_id, a_uint32_t hash_mode); + + sw_error_t + fal_ip_rfs_ip4_rule_set(a_uint32_t dev_id, fal_ip4_rfs_t * rfs); + + sw_error_t + fal_ip_rfs_ip6_rule_set(a_uint32_t dev_id, fal_ip6_rfs_t * rfs); + + sw_error_t + fal_ip_rfs_ip4_rule_del(a_uint32_t dev_id, fal_ip4_rfs_t * rfs); + + sw_error_t + fal_ip_rfs_ip6_rule_del(a_uint32_t dev_id, fal_ip6_rfs_t * rfs); + + sw_error_t + fal_ip_wcmp_hash_mode_get(a_uint32_t dev_id, a_uint32_t * hash_mode); + + sw_error_t + fal_ip_vrf_base_addr_set(a_uint32_t dev_id, a_uint32_t vrf_id, fal_ip4_addr_t addr); + + sw_error_t + fal_ip_vrf_base_addr_get(a_uint32_t dev_id, a_uint32_t vrf_id, fal_ip4_addr_t * addr); + + sw_error_t + fal_ip_vrf_base_mask_set(a_uint32_t dev_id, a_uint32_t vrf_id, fal_ip4_addr_t addr); + + sw_error_t + fal_ip_vrf_base_mask_get(a_uint32_t dev_id, a_uint32_t vrf_id, fal_ip4_addr_t * addr); + + sw_error_t + fal_ip_default_route_set(a_uint32_t dev_id, a_uint32_t droute_id, + fal_default_route_t * entry); + + sw_error_t + fal_ip_default_route_get(a_uint32_t dev_id, a_uint32_t droute_id, + fal_default_route_t * entry); + + sw_error_t + fal_ip_host_route_set(a_uint32_t dev_id, a_uint32_t hroute_id, + fal_host_route_t * entry); + + sw_error_t + fal_ip_host_route_get(a_uint32_t dev_id, a_uint32_t hroute_id, + fal_host_route_t * entry); + + sw_error_t + fal_ip_wcmp_entry_set(a_uint32_t dev_id, a_uint32_t wcmp_id, + fal_ip_wcmp_t * wcmp); + + sw_error_t + fal_ip_wcmp_entry_get(a_uint32_t dev_id, a_uint32_t wcmp_id, + fal_ip_wcmp_t * wcmp); + + sw_error_t + fal_default_flow_cmd_set(a_uint32_t dev_id, a_uint32_t vrf_id, + fal_flow_type_t type, fal_default_flow_cmd_t cmd); + + sw_error_t + fal_default_flow_cmd_get(a_uint32_t dev_id, a_uint32_t vrf_id, + fal_flow_type_t type, fal_default_flow_cmd_t * cmd); + + sw_error_t + fal_default_rt_flow_cmd_set(a_uint32_t dev_id, a_uint32_t vrf_id, + fal_flow_type_t type, fal_default_flow_cmd_t cmd); + + sw_error_t + fal_default_rt_flow_cmd_get(a_uint32_t dev_id, a_uint32_t vrf_id, + fal_flow_type_t type, fal_default_flow_cmd_t * cmd); + +sw_error_t +fal_ip_vsi_arp_sg_cfg_get(a_uint32_t dev_id, a_uint32_t vsi, + fal_arp_sg_cfg_t *arp_sg_cfg); + +sw_error_t +fal_ip_vsi_arp_sg_cfg_set(a_uint32_t dev_id, a_uint32_t vsi, + fal_arp_sg_cfg_t *arp_sg_cfg); + +sw_error_t +fal_ip_network_route_add(a_uint32_t dev_id, a_uint32_t index, + fal_network_route_entry_t *entry); + +sw_error_t +fal_ip_network_route_get(a_uint32_t dev_id, + a_uint32_t index, a_uint8_t type, + fal_network_route_entry_t *entry); + +sw_error_t +fal_ip_network_route_del(a_uint32_t dev_id, + a_uint32_t index, a_uint8_t type); + +sw_error_t +fal_ip_intf_set(a_uint32_t dev_id, + a_uint32_t index, + fal_intf_entry_t *entry); + +sw_error_t +fal_ip_intf_get(a_uint32_t dev_id, + a_uint32_t index, + fal_intf_entry_t *entry); + +sw_error_t +fal_ip_vsi_intf_set(a_uint32_t dev_id, a_uint32_t vsi, fal_intf_id_t *id); + +sw_error_t +fal_ip_vsi_intf_get(a_uint32_t dev_id, a_uint32_t vsi, fal_intf_id_t *id); + +sw_error_t +fal_ip_port_intf_set(a_uint32_t dev_id, fal_port_t port_id, fal_intf_id_t *id); + +sw_error_t +fal_ip_port_intf_get(a_uint32_t dev_id, fal_port_t port_id, fal_intf_id_t *id); + +sw_error_t +fal_ip_nexthop_set(a_uint32_t dev_id, a_uint32_t index, + fal_ip_nexthop_t *entry); + +sw_error_t +fal_ip_nexthop_get(a_uint32_t dev_id, a_uint32_t index, + fal_ip_nexthop_t *entry); + +sw_error_t +fal_ip_vsi_sg_cfg_get(a_uint32_t dev_id, a_uint32_t vsi, + fal_sg_cfg_t *sg_cfg); + +sw_error_t +fal_ip_vsi_sg_cfg_set(a_uint32_t dev_id, a_uint32_t vsi, + fal_sg_cfg_t *sg_cfg); + +sw_error_t +fal_ip_port_sg_cfg_set(a_uint32_t dev_id, fal_port_t port_id, + fal_sg_cfg_t *sg_cfg); + +sw_error_t +fal_ip_port_sg_cfg_get(a_uint32_t dev_id, fal_port_t port_id, + fal_sg_cfg_t *sg_cfg); + +sw_error_t +fal_ip_pub_addr_set(a_uint32_t dev_id, a_uint32_t index, + fal_ip_pub_addr_t *entry); + +sw_error_t +fal_ip_pub_addr_get(a_uint32_t dev_id, a_uint32_t index, + fal_ip_pub_addr_t *entry); + +sw_error_t +fal_ip_port_macaddr_set(a_uint32_t dev_id, fal_port_t port_id, + fal_macaddr_entry_t *macaddr); + +sw_error_t +fal_ip_port_macaddr_get(a_uint32_t dev_id, fal_port_t port_id, + fal_macaddr_entry_t *macaddr); + +sw_error_t +fal_ip_route_mismatch_action_set(a_uint32_t dev_id, fal_fwd_cmd_t action); + +sw_error_t +fal_ip_route_mismatch_action_get(a_uint32_t dev_id, fal_fwd_cmd_t *action); + +sw_error_t +fal_ip_port_arp_sg_cfg_set(a_uint32_t dev_id, fal_port_t port_id, + fal_arp_sg_cfg_t *arp_sg_cfg); + +sw_error_t +fal_ip_port_arp_sg_cfg_get(a_uint32_t dev_id, fal_port_t port_id, + fal_arp_sg_cfg_t *arp_sg_cfg); + +sw_error_t +fal_ip_vsi_mc_mode_set(a_uint32_t dev_id, a_uint32_t vsi, + fal_mc_mode_cfg_t *cfg); + +sw_error_t +fal_ip_vsi_mc_mode_get(a_uint32_t dev_id, a_uint32_t vsi, + fal_mc_mode_cfg_t *cfg); + +sw_error_t +fal_ip_global_ctrl_get(a_uint32_t dev_id, fal_ip_global_cfg_t *cfg); + +sw_error_t +fal_ip_global_ctrl_set(a_uint32_t dev_id, fal_ip_global_cfg_t *cfg); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_IP_H_ */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_leaky.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_leaky.h new file mode 100755 index 000000000..255eac486 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_leaky.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_leaky FAL_LEAKY + * @{ + */ +#ifndef _FAL_LEAKY_H_ +#define _FAL_LEAKY_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + + /** + @brief This enum defines the leaky control mode. + */ + typedef enum { + FAL_LEAKY_PORT_CTRL = 0, /**< control leaky through port which packets received*/ + FAL_LEAKY_FDB_CTRL, /**< control leaky through fdb entry*/ + FAL_LEAKY_CTRL_MODE_BUTT + } + fal_leaky_ctrl_mode_t; + + + + sw_error_t + fal_uc_leaky_mode_set(a_uint32_t dev_id, + fal_leaky_ctrl_mode_t ctrl_mode); + + + + sw_error_t + fal_uc_leaky_mode_get(a_uint32_t dev_id, + fal_leaky_ctrl_mode_t * ctrl_mode); + + + + sw_error_t + fal_mc_leaky_mode_set(a_uint32_t dev_id, + fal_leaky_ctrl_mode_t ctrl_mode); + + + + sw_error_t + fal_mc_leaky_mode_get(a_uint32_t dev_id, + fal_leaky_ctrl_mode_t * ctrl_mode); + + + + sw_error_t + fal_port_arp_leaky_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable); + + + + sw_error_t + fal_port_arp_leaky_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable); + + + + sw_error_t + fal_port_uc_leaky_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable); + + + + sw_error_t + fal_port_uc_leaky_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable); + + + + sw_error_t + fal_port_mc_leaky_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable); + + + + sw_error_t + fal_port_mc_leaky_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable); + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_LEAKY_H_ */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_led.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_led.h new file mode 100644 index 000000000..e0c78aa3d --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_led.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2014,2020 The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_led FAL_LED + * @{ + */ +#ifndef _FAL_LED_H_ +#define _FAL_LED_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + + /** + @brief This enum defines the led group. + */ + typedef enum { + LED_LAN_PORT_GROUP = 0, /**< control lan ports*/ + LED_WAN_PORT_GROUP, /**< control wan ports*/ + LED_MAC_PORT_GROUP, /**< control mac ports*/ + LED_GROUP_BUTT + } + led_pattern_group_t; + + /** + @brief This enum defines the led pattern id, each ports has three led + and pattern0 relates to led0, pattern1 relates to led1, pattern2 relates to led2. + */ + typedef a_uint32_t led_pattern_id_t; + + + /** + @brief This enum defines the led control pattern mode. + */ + typedef enum + { + LED_ALWAYS_OFF = 0, + LED_ALWAYS_BLINK, + LED_ALWAYS_ON, + LED_PATTERN_MAP_EN, + LED_PATTERN_MODE_BUTT + } led_pattern_mode_t; + + +#define FULL_DUPLEX_LIGHT_EN 0 +#define HALF_DUPLEX_LIGHT_EN 1 +#define POWER_ON_LIGHT_EN 2 +#define LINK_1000M_LIGHT_EN 3 +#define LINK_100M_LIGHT_EN 4 +#define LINK_10M_LIGHT_EN 5 +#define COLLISION_BLINK_EN 6 +#define RX_TRAFFIC_BLINK_EN 7 +#define TX_TRAFFIC_BLINK_EN 8 +#define LINKUP_OVERRIDE_EN 9 +#define LED_ACTIVE_HIGH 10 +#define LINK_2500M_LIGHT_EN 11 + + + /** + @brief This enum defines the led control pattern map. + */ + typedef a_uint32_t led_pattern_map_t; + + + /** + @brief This enum defines the led control pattern mode. + */ + typedef enum + { + LED_BLINK_2HZ = 0, + LED_BLINK_4HZ, + LED_BLINK_8HZ, + LED_BLINK_TXRX, /**< Frequency relates to speed, 1000M-8HZ,100M->4HZ,10M->2HZ,Others->4HZ */ + LED_BLINK_FREQ_BUTT + } led_blink_freq_t; + + + typedef struct + { + led_pattern_mode_t mode; + led_pattern_map_t map; + led_blink_freq_t freq; + } led_ctrl_pattern_t; + + + + + + sw_error_t + fal_led_ctrl_pattern_set(a_uint32_t dev_id, led_pattern_group_t group, + led_pattern_id_t id, led_ctrl_pattern_t * pattern); + + + + sw_error_t + fal_led_ctrl_pattern_get(a_uint32_t dev_id, led_pattern_group_t group, + led_pattern_id_t id, led_ctrl_pattern_t * pattern); + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_LED_H_ */ +/** + * @} + */ + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_mib.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_mib.h new file mode 100755 index 000000000..aa6e354c4 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_mib.h @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_mib FAL_MIB + * @{ + */ +#ifndef _FAL_MIB_H +#define _FAL_MIB_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + + /**@brief This structure defines the mib infomation. + */ + typedef struct + { + a_uint32_t RxBroad; + a_uint32_t RxPause; + a_uint32_t RxMulti; + a_uint32_t RxFcsErr; + a_uint32_t RxAllignErr; + a_uint32_t RxRunt; + a_uint32_t RxFragment; + a_uint32_t Rx64Byte; + a_uint32_t Rx128Byte; + a_uint32_t Rx256Byte; + a_uint32_t Rx512Byte; + a_uint32_t Rx1024Byte; + a_uint32_t Rx1518Byte; + a_uint32_t RxMaxByte; + a_uint32_t RxTooLong; + a_uint32_t RxGoodByte_lo; /**< low 32 bits of RxGoodByte statistc item */ + a_uint32_t RxGoodByte_hi; /**< high 32 bits of RxGoodByte statistc item*/ + a_uint32_t RxBadByte_lo; /**< low 32 bits of RxBadByte statistc item */ + a_uint32_t RxBadByte_hi; /**< high 32 bits of RxBadByte statistc item */ + a_uint32_t RxOverFlow; + a_uint32_t Filtered; + a_uint32_t TxBroad; + a_uint32_t TxPause; + a_uint32_t TxMulti; + a_uint32_t TxUnderRun; + a_uint32_t Tx64Byte; + a_uint32_t Tx128Byte; + a_uint32_t Tx256Byte; + a_uint32_t Tx512Byte; + a_uint32_t Tx1024Byte; + a_uint32_t Tx1518Byte; + a_uint32_t TxMaxByte; + a_uint32_t TxOverSize; + a_uint32_t TxByte_lo; /**< low 32 bits of TxByte statistc item */ + a_uint32_t TxByte_hi; /**< high 32 bits of TxByte statistc item */ + a_uint32_t TxCollision; + a_uint32_t TxAbortCol; + a_uint32_t TxMultiCol; + a_uint32_t TxSingalCol; + a_uint32_t TxExcDefer; + a_uint32_t TxDefer; + a_uint32_t TxLateCol; + a_uint32_t RxUniCast; + a_uint32_t TxUniCast; + a_uint32_t RxJumboFcsErr; /* add for Hawkeye*/ + a_uint32_t RxJumboAligenErr; /* add for Hawkeye*/ + a_uint32_t Rx14To63; /*add for ipq60xx lpbk port*/ + a_uint32_t RxTooLongByte_lo; /*add for ipq60xx lpbk port*/ + a_uint32_t RxTooLongByte_hi; /*add for ipq60xx lpbk port*/ + a_uint32_t RxRuntByte_lo; /*add for ipq60xx lpbk port*/ + a_uint32_t RxRuntByte_hi; /*add for ipq60xx lpbk port*/ + } fal_mib_info_t; + +/*define structure for software with 64bit*/ +typedef struct +{ + a_uint64_t RxBroad; + a_uint64_t RxPause; + a_uint64_t RxMulti; + a_uint64_t RxFcsErr; + a_uint64_t RxAllignErr; + a_uint64_t RxRunt; + a_uint64_t RxFragment; + a_uint64_t Rx64Byte; + a_uint64_t Rx128Byte; + a_uint64_t Rx256Byte; + a_uint64_t Rx512Byte; + a_uint64_t Rx1024Byte; + a_uint64_t Rx1518Byte; + a_uint64_t RxMaxByte; + a_uint64_t RxTooLong; + a_uint64_t RxGoodByte; + a_uint64_t RxBadByte; + a_uint64_t RxOverFlow; /* no this counter for Hawkeye*/ + a_uint64_t Filtered; /*no this counter for Hawkeye*/ + a_uint64_t TxBroad; + a_uint64_t TxPause; + a_uint64_t TxMulti; + a_uint64_t TxUnderRun; + a_uint64_t Tx64Byte; + a_uint64_t Tx128Byte; + a_uint64_t Tx256Byte; + a_uint64_t Tx512Byte; + a_uint64_t Tx1024Byte; + a_uint64_t Tx1518Byte; + a_uint64_t TxMaxByte; + a_uint64_t TxOverSize; /*no this counter for Hawkeye*/ + a_uint64_t TxByte; + a_uint64_t TxCollision; + a_uint64_t TxAbortCol; + a_uint64_t TxMultiCol; + a_uint64_t TxSingalCol; + a_uint64_t TxExcDefer; + a_uint64_t TxDefer; + a_uint64_t TxLateCol; + a_uint64_t RxUniCast; + a_uint64_t TxUniCast; + a_uint64_t RxJumboFcsErr; /* add for Hawkeye*/ + a_uint64_t RxJumboAligenErr; /* add for Hawkeye*/ + a_uint64_t Rx14To63; /*add for ipq60xx lpbk port*/ + a_uint64_t RxTooLongByte; /*add for ipq60xx lpbk port*/ + a_uint64_t RxRuntByte; /*add for ipq60xx lpbk port*/ +} fal_mib_counter_t; + +typedef struct +{ + a_uint64_t RxFrame; + a_uint64_t RxByte; + a_uint64_t RxByteGood; + a_uint64_t RxBroadGood; + a_uint64_t RxMultiGood; + a_uint64_t RxFcsErr; + a_uint64_t RxRuntErr; + a_uint64_t RxJabberError; + a_uint64_t RxUndersizeGood; + a_uint64_t RxOversizeGood; + a_uint64_t Rx64Byte; + a_uint64_t Rx128Byte; + a_uint64_t Rx256Byte; + a_uint64_t Rx512Byte; + a_uint64_t Rx1024Byte; + a_uint64_t RxMaxByte; + a_uint64_t RxUnicastGood; + a_uint64_t RxLengthError; + a_uint64_t RxOutOfRangeError; + a_uint64_t RxPause; + a_uint64_t RxOverFlow; + a_uint64_t RxVLANFrameGoodBad; + a_uint64_t RxWatchDogError; + a_uint64_t RxLPIUsec; + a_uint64_t RxLPITran; + a_uint64_t RxDropFrameGoodBad; + a_uint64_t RxDropByteGoodBad; + + a_uint64_t TxByte; + a_uint64_t TxFrame; + a_uint64_t TxBroadGood; + a_uint64_t TxMultiGood; + a_uint64_t Tx64Byte; + a_uint64_t Tx128Byte; + a_uint64_t Tx256Byte; + a_uint64_t Tx512Byte; + a_uint64_t Tx1024Byte; + a_uint64_t TxMaxByte; + a_uint64_t TxUnicast; + a_uint64_t TxMulti; + a_uint64_t TxBroad; + a_uint64_t TxUnderFlowError; + a_uint64_t TxByteGood; + a_uint64_t TxFrameGood; + a_uint64_t TxPause; + a_uint64_t TxVLANFrameGood; + a_uint64_t TxLPIUsec; + a_uint64_t TxLPITran; +} fal_xgmib_info_t; + + enum + { + /*mib*/ + FUNC_GET_MIB_INFO = 0 , + FUNC_GET_RX_MIB_INFO, + FUNC_GET_TX_MIB_INFO, + FUNC_GET_XGMIB_INFO, + FUNC_GET_TX_XGMIB_INFO, + FUNC_GET_RX_XGMIB_INFO, + FUNC_MIB_STATUS_SET, + FUNC_MIB_STATUS_GET, + FUNC_MIB_PORT_FLUSH_COUNTERS, + FUNC_MIB_CPUKEEP_SET, + FUNC_MIB_CPUKEEP_GET, + }; + + sw_error_t + fal_get_mib_info(a_uint32_t dev_id, fal_port_t port_id, + fal_mib_info_t * mib_info ); + + sw_error_t + fal_get_xgmib_info(a_uint32_t dev_id, fal_port_t port_id, + fal_xgmib_info_t * mib_Info); + + + sw_error_t + fal_mib_status_set(a_uint32_t dev_id, a_bool_t enable); + + + + sw_error_t + fal_mib_status_get(a_uint32_t dev_id, a_bool_t * enable); + + + sw_error_t + fal_mib_port_flush_counters(a_uint32_t dev_id, fal_port_t port_id); + + + sw_error_t + fal_mib_cpukeep_set(a_uint32_t dev_id, a_bool_t enable); + + sw_error_t + fal_mib_cpukeep_get(a_uint32_t dev_id, a_bool_t * enable); + +sw_error_t +fal_mib_counter_get(a_uint32_t dev_id, fal_port_t port_id, + fal_mib_counter_t *mib_counter); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _FAL_MIB_H */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_mirror.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_mirror.h new file mode 100755 index 000000000..498bf86fc --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_mirror.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_mirror FAL_MIRROR + * @{ + */ +#ifndef _FAL_MIRROR_H_ +#define _FAL_MIRROR_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + + +typedef struct +{ + fal_port_t port_id; + a_uint32_t priority; +} fal_mirr_analysis_config_t; + +typedef enum +{ + FAL_MIRR_INGRESS= 0, + FAL_MIRR_EGRESS, + FAL_MIRR_BOTH, +} fal_mirr_direction_t; + +enum +{ + FUNC_MIRR_ANALYSIS_PORT_SET = 0, + FUNC_MIRR_ANALYSIS_PORT_GET, + FUNC_MIRR_PORT_IN_SET, + FUNC_MIRR_PORT_IN_GET, + FUNC_MIRR_PORT_EG_SET, + FUNC_MIRR_PORT_EG_GET, + FUNC_MIRR_ANALYSIS_CONFIG_SET, + FUNC_MIRR_ANALYSIS_CONFIG_GET, +}; + +sw_error_t +fal_mirr_analysis_port_set(a_uint32_t dev_id, fal_port_t port_id); + +sw_error_t +fal_mirr_analysis_port_get(a_uint32_t dev_id, fal_port_t * port_id); + +sw_error_t +fal_mirr_port_in_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable); +sw_error_t +fal_mirr_port_in_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable); +sw_error_t +fal_mirr_port_eg_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable); +sw_error_t +fal_mirr_port_eg_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable); + +sw_error_t +fal_mirr_analysis_config_set(a_uint32_t dev_id, fal_mirr_direction_t direction, fal_mirr_analysis_config_t * config); + +sw_error_t +fal_mirr_analysis_config_get(a_uint32_t dev_id, fal_mirr_direction_t direction, fal_mirr_analysis_config_t * config); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _PORT_MIRROR_H_ */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_misc.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_misc.h new file mode 100755 index 000000000..e5e40b670 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_misc.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_gen FAL_MISC + * @{ + */ +#ifndef _FAL_MISC_H_ +#define _FAL_MISC_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + + + typedef enum + { + FAL_LOOP_CHECK_1MS = 0, + FAL_LOOP_CHECK_10MS, + FAL_LOOP_CHECK_100MS, + FAL_LOOP_CHECK_500MS, + } fal_loop_check_time_t; + + typedef struct + { + a_bool_t rx_counter_en; /* Enable/disable virtual port rx counter */ + a_bool_t vp_uni_tx_counter_en; /* Enable/disable virtual port unicast tx counter */ + a_bool_t port_mc_tx_counter_en; /* Enable/disable physical port multicast tx counter */ + a_bool_t port_tx_counter_en; /* Enable/disable physical port tx counter */ + } fal_counter_en_t; + + sw_error_t + fal_debug_port_counter_enable(a_uint32_t dev_id, fal_port_t port_id, fal_counter_en_t * cnt_en); + + sw_error_t + fal_debug_port_counter_status_get(a_uint32_t dev_id, fal_port_t port_id, fal_counter_en_t * cnt_en); + + /* define switch interrupt type bitmap */ +#define FAL_SWITCH_INTR_LINK_STATUS 0x1 /* up/down/speed/duplex status */ + + sw_error_t fal_arp_status_set(a_uint32_t dev_id, a_bool_t enable); + + + + sw_error_t fal_arp_status_get(a_uint32_t dev_id, a_bool_t * enable); + + + + sw_error_t fal_frame_max_size_set(a_uint32_t dev_id, a_uint32_t size); + + + + sw_error_t fal_frame_max_size_get(a_uint32_t dev_id, a_uint32_t * size); + + + + sw_error_t + fal_port_unk_sa_cmd_set(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t cmd); + + + + sw_error_t + fal_port_unk_sa_cmd_get(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t * cmd); + + + + sw_error_t + fal_port_unk_uc_filter_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable); + + + + sw_error_t + fal_port_unk_uc_filter_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable); + + + + sw_error_t + fal_port_unk_mc_filter_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable); + + + + sw_error_t + fal_port_unk_mc_filter_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable); + + + sw_error_t + fal_port_bc_filter_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable); + + + sw_error_t + fal_port_bc_filter_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable); + + + sw_error_t + fal_cpu_port_status_set(a_uint32_t dev_id, a_bool_t enable); + + + + sw_error_t + fal_cpu_port_status_get(a_uint32_t dev_id, a_bool_t * enable); + + + + sw_error_t + fal_bc_to_cpu_port_set(a_uint32_t dev_id, a_bool_t enable); + + + + sw_error_t + fal_bc_to_cpu_port_get(a_uint32_t dev_id, a_bool_t * enable); + + + + sw_error_t + fal_port_dhcp_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable); + + + + sw_error_t + fal_port_dhcp_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable); + + + sw_error_t + fal_arp_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd); + + + sw_error_t + fal_arp_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd); + + + sw_error_t + fal_eapol_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd); + + + sw_error_t + fal_eapol_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd); + + + sw_error_t + fal_eapol_status_set(a_uint32_t dev_id, a_uint32_t port_id, a_bool_t enable); + + sw_error_t + fal_eapol_status_get(a_uint32_t dev_id, a_uint32_t port_id, a_bool_t * enable); + + sw_error_t + fal_ripv1_status_set(a_uint32_t dev_id, a_bool_t enable); + + sw_error_t + fal_ripv1_status_get(a_uint32_t dev_id, a_bool_t * enable); + + + sw_error_t + fal_port_arp_req_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable); + + + sw_error_t + fal_port_arp_req_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable); + + + sw_error_t + fal_port_arp_ack_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable); + + + sw_error_t + fal_port_arp_ack_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable); + + + sw_error_t + fal_intr_mask_set(a_uint32_t dev_id, a_uint32_t intr_mask); + + + sw_error_t + fal_intr_mask_get(a_uint32_t dev_id, a_uint32_t * intr_mask); + + + sw_error_t + fal_intr_status_get(a_uint32_t dev_id, a_uint32_t * intr_status); + + + sw_error_t + fal_intr_status_clear(a_uint32_t dev_id, a_uint32_t intr_status); + + + sw_error_t + fal_intr_port_link_mask_set(a_uint32_t dev_id, a_uint32_t port_id, a_uint32_t intr_mask); + + + sw_error_t + fal_intr_port_link_mask_get(a_uint32_t dev_id, a_uint32_t port_id, a_uint32_t * intr_mask); + + + sw_error_t + fal_intr_port_link_status_get(a_uint32_t dev_id, a_uint32_t port_id, a_uint32_t * intr_mask); + + + sw_error_t + fal_intr_mask_mac_linkchg_set(a_uint32_t dev_id, a_uint32_t port_id, a_bool_t enable); + + + sw_error_t + fal_intr_mask_mac_linkchg_get(a_uint32_t dev_id, a_uint32_t port_id, a_bool_t * enable); + + sw_error_t + fal_intr_status_mac_linkchg_get(a_uint32_t dev_id, fal_pbmp_t *port_bitmap); + + sw_error_t + fal_cpu_vid_en_set(a_uint32_t dev_id, a_bool_t enable); + + sw_error_t + fal_cpu_vid_en_get(a_uint32_t dev_id, a_bool_t * enable); + + sw_error_t + fal_intr_status_mac_linkchg_clear(a_uint32_t dev_id); + + sw_error_t + fal_global_macaddr_set(a_uint32_t dev_id, fal_mac_addr_t * addr); + + sw_error_t + fal_global_macaddr_get(a_uint32_t dev_id, fal_mac_addr_t * addr); + + sw_error_t + fal_lldp_status_set(a_uint32_t dev_id, a_bool_t enable); + + + sw_error_t + fal_lldp_status_get(a_uint32_t dev_id, a_bool_t * enable); + + sw_error_t + fal_frame_crc_reserve_set(a_uint32_t dev_id, a_bool_t enable); + + sw_error_t + fal_frame_crc_reserve_get(a_uint32_t dev_id, a_bool_t * enable); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_MISC_H_ */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_multi.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_multi.h new file mode 100755 index 000000000..fb651d0e8 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_multi.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#ifndef _FAL_MULTI_H_ +#define _FAL_MULTI_H_ + +/*supports 32 entries*/ +#define FAL_IGMP_SG_ENTRY_MAX 32 + +typedef enum +{ + FAL_ADDR_IPV4 = 0, + FAL_ADDR_IPV6 +} fal_addr_type_t; + +typedef struct +{ + fal_addr_type_t type; + union + { + fal_ip4_addr_t ip4_addr; + fal_ip6_addr_t ip6_addr; + } u; +} fal_igmp_sg_addr_t; + +typedef struct +{ + fal_igmp_sg_addr_t source; + fal_igmp_sg_addr_t group; + fal_pbmp_t port_map; + a_uint32_t vlan_id; +} fal_igmp_sg_entry_t; + +//#define MULTI_DEBUG_ +#ifdef MULTI_DEBUG_ +#define MULTI_DEBUG(x...) aos_printk(x) +#else +#define MULTI_DEBUG(x...) +#endif + +#define FAL_ACL_LIST_MULTICAST 55 +#define FAL_MULTICAST_PRI 5 + +#define MULT_ACTION_SET 0 +#define MULT_ACTION_CLEAR 1 + +// static a_uint32_t rule_nr=1; + +typedef struct +{ + a_uint8_t index; //MAX is 32 + fal_igmp_sg_entry_t entry; //Stores the specific ACL rule info +} multi_acl_info_t; + +typedef struct +{ + a_uint8_t cnt; //MAX is 32 + multi_acl_info_t acl_info[FAL_IGMP_SG_ENTRY_MAX]; //Stores the all ACL rule info +} fal_igmp_sg_info_t; + +#endif diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_nat.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_nat.h new file mode 100755 index 000000000..9c6991196 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_nat.h @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2014, 2015, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_nat FAL_NAT + * @{ + */ +#ifndef _FAL_NAT_H_ +#define _FAL_NAT_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + + +#define FAL_NAT_ENTRY_PROTOCOL_TCP 0x1 +#define FAL_NAT_ENTRY_PROTOCOL_UDP 0x2 +#define FAL_NAT_ENTRY_PROTOCOL_PPTP 0x4 +#define FAL_NAT_ENTRY_PROTOCOL_ANY 0x8 +#define FAL_NAT_ENTRY_TRANS_IPADDR_INDEX 0x10 +#define FAL_NAT_ENTRY_PORT_CHECK 0x20 +#define FAL_NAT_HASH_KEY_PORT 0x40 +#define FAL_NAT_HASH_KEY_IPADDR 0x80 + + + /* NAT entry operation flags */ +#define FAL_NAT_ENTRY_ID_EN 0x1 +#define FAL_NAT_ENTRY_SRC_IPADDR_EN 0x2 +#define FAL_NAT_ENTRY_TRANS_IPADDR_EN 0x4 +#define FAL_NAT_ENTRY_KEY_EN 0x8 +#define FAL_NAT_ENTRY_PUBLIC_IP_EN 0x10 +#define FAL_NAT_ENTRY_SOURCE_IP_EN 0x20 +#define FAL_NAT_ENTRY_AGE_EN 0x40 +#define FAL_NAT_ENTRY_SYNC_EN 0x80 + + + typedef struct + { + a_uint32_t entry_id; + a_uint32_t flags; + a_uint32_t status; + fal_ip4_addr_t src_addr; + fal_ip4_addr_t dst_addr; + a_uint16_t src_port; + a_uint16_t dst_port; + fal_ip4_addr_t trans_addr; + a_uint16_t trans_port; + a_uint16_t rsv; + a_bool_t mirror_en; + a_bool_t counter_en; + a_uint32_t counter_id; + a_uint32_t ingress_packet; + a_uint32_t ingress_byte; + a_uint32_t egress_packet; + a_uint32_t egress_byte; + fal_fwd_cmd_t action; + a_uint32_t load_balance; + a_uint32_t flow_cookie; + a_uint32_t vrf_id; + a_uint32_t aging_sync; + a_bool_t priority_en; + a_uint32_t priority_val; + } fal_napt_entry_t; + + typedef struct + { + a_uint32_t proto; /*1 tcp; 2 udp*/ + fal_ip4_addr_t src_addr; + fal_ip4_addr_t dst_addr; + a_uint16_t src_port; + a_uint16_t dst_port; + a_uint32_t flow_cookie; + } fal_flow_cookie_t; + + typedef struct + { + a_uint32_t proto; /*1 tcp; 2 udp*/ + fal_ip4_addr_t src_addr; + fal_ip4_addr_t dst_addr; + a_uint16_t src_port; + a_uint16_t dst_port; + a_uint8_t load_balance; + } fal_flow_rfs_t; + + typedef struct + { + a_uint32_t entry_id; + a_uint32_t flags; + a_uint32_t status; + fal_ip4_addr_t src_addr; + fal_ip4_addr_t trans_addr; + a_uint16_t port_num; + a_uint16_t port_range; + a_uint32_t slct_idx; + a_bool_t mirror_en; + a_bool_t counter_en; + a_uint32_t counter_id; + a_uint32_t ingress_packet; + a_uint32_t ingress_byte; + a_uint32_t egress_packet; + a_uint32_t egress_byte; + fal_fwd_cmd_t action; + a_uint32_t vrf_id; + } fal_nat_entry_t; + + + typedef enum + { + FAL_NAPT_FULL_CONE = 0, + FAL_NAPT_STRICT_CONE, + FAL_NAPT_PORT_STRICT, + FAL_NAPT_SYNMETRIC, + } fal_napt_mode_t; + + + typedef struct + { + a_uint32_t entry_id; + fal_ip4_addr_t pub_addr; + } fal_nat_pub_addr_t; + + + sw_error_t + fal_nat_add(a_uint32_t dev_id, fal_nat_entry_t * nat_entry); + + + sw_error_t + fal_nat_del(a_uint32_t dev_id, a_uint32_t del_mode, fal_nat_entry_t * nat_entry); + + + sw_error_t + fal_nat_get(a_uint32_t dev_id, a_uint32_t get_mode, fal_nat_entry_t * nat_entry); + + + sw_error_t + fal_nat_next(a_uint32_t dev_id, a_uint32_t get_mode, fal_nat_entry_t * nat_entry); + + + sw_error_t + fal_nat_counter_bind(a_uint32_t dev_id, a_uint32_t entry_id, a_uint32_t cnt_id, a_bool_t enable); + + + sw_error_t + fal_napt_add(a_uint32_t dev_id, fal_napt_entry_t * napt_entry); + + + sw_error_t + fal_napt_del(a_uint32_t dev_id, a_uint32_t del_mode, fal_napt_entry_t * napt_entry); + + + sw_error_t + fal_napt_get(a_uint32_t dev_id, a_uint32_t get_mode, fal_napt_entry_t * napt_entry); + + + sw_error_t + fal_napt_next(a_uint32_t dev_id, a_uint32_t next_mode, fal_napt_entry_t * napt_entry); + + + sw_error_t + fal_napt_counter_bind(a_uint32_t dev_id, a_uint32_t entry_id, a_uint32_t cnt_id, a_bool_t enable); + + + sw_error_t + fal_flow_add(a_uint32_t dev_id, fal_napt_entry_t * napt_entry); + + + sw_error_t + fal_flow_del(a_uint32_t dev_id, a_uint32_t del_mode, fal_napt_entry_t * napt_entry); + + + sw_error_t + fal_flow_get(a_uint32_t dev_id, a_uint32_t get_mode, fal_napt_entry_t * napt_entry); + + + sw_error_t + fal_flow_next(a_uint32_t dev_id, a_uint32_t next_mode, fal_napt_entry_t * napt_entry); + + + sw_error_t + fal_flow_counter_bind(a_uint32_t dev_id, a_uint32_t entry_id, a_uint32_t cnt_id, a_bool_t enable); + + + sw_error_t + fal_nat_status_set(a_uint32_t dev_id, a_bool_t enable); + + + sw_error_t + fal_nat_status_get(a_uint32_t dev_id, a_bool_t * enable); + + + sw_error_t + fal_nat_hash_mode_set(a_uint32_t dev_id, a_uint32_t mode); + + + sw_error_t + fal_nat_hash_mode_get(a_uint32_t dev_id, a_uint32_t * mode); + + + sw_error_t + fal_napt_status_set(a_uint32_t dev_id, a_bool_t enable); + + + sw_error_t + fal_napt_status_get(a_uint32_t dev_id, a_bool_t * enable); + + + sw_error_t + fal_napt_mode_set(a_uint32_t dev_id, fal_napt_mode_t mode); + + + sw_error_t + fal_napt_mode_get(a_uint32_t dev_id, fal_napt_mode_t * mode); + + + sw_error_t + fal_napt_mode_get(a_uint32_t dev_id, fal_napt_mode_t * mode); + + + sw_error_t + fal_nat_prv_base_addr_set(a_uint32_t dev_id, fal_ip4_addr_t addr); + + + sw_error_t + fal_nat_prv_base_addr_get(a_uint32_t dev_id, fal_ip4_addr_t * addr); + + sw_error_t + fal_nat_prv_base_mask_set(a_uint32_t dev_id, fal_ip4_addr_t addr); + + sw_error_t + fal_nat_prv_base_mask_get(a_uint32_t dev_id, fal_ip4_addr_t * addr); + + + sw_error_t + fal_nat_prv_addr_mode_set(a_uint32_t dev_id, a_bool_t map_en); + + + sw_error_t + fal_nat_prv_addr_mode_get(a_uint32_t dev_id, a_bool_t * map_en); + + + sw_error_t + fal_nat_pub_addr_add(a_uint32_t dev_id, fal_nat_pub_addr_t * entry); + + + sw_error_t + fal_nat_pub_addr_del(a_uint32_t dev_id, a_uint32_t del_mode, fal_nat_pub_addr_t * entry); + + + sw_error_t + fal_nat_pub_addr_next(a_uint32_t dev_id, a_uint32_t next_mode, fal_nat_pub_addr_t * entry); + + + sw_error_t + fal_nat_unk_session_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd); + + + sw_error_t + fal_nat_unk_session_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd); + + sw_error_t + fal_nat_global_set(a_uint32_t dev_id, a_bool_t enable, + a_bool_t sync_cnt_enable, a_uint32_t portbmp); + + sw_error_t + fal_flow_cookie_set(a_uint32_t dev_id, fal_flow_cookie_t * flow_cookie); + + sw_error_t + fal_flow_rfs_set(a_uint32_t dev_id, a_uint8_t action, fal_flow_rfs_t * rfs); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_NAT_H_ */ + +/** + * @} + */ + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_policer.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_policer.h new file mode 100755 index 000000000..7a646348f --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_policer.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_policer FAL_POLICER + * @{ + */ +#ifndef _FAL_POLICER_H_ +#define _FAL_POLICER_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + +typedef struct +{ + a_bool_t meter_en; /* meter enable or disable */ + a_bool_t couple_en; /* two buckets coupling enable or disable*/ + a_uint32_t color_mode; /* color aware or color blind */ + a_uint32_t frame_type; /* frame type, bit0:unicast;bit1: unkown unicast;bit2:multicast;bit3: unknown multicast; bit4:broadcast */ + a_uint32_t meter_mode; + a_uint32_t meter_unit; /* 0:byte based; 1:packet based*/ + a_uint32_t cir; /* committed information rate */ + a_uint32_t cbs; /* committed burst size */ + a_uint32_t eir; /* excess information rate */ + a_uint32_t ebs; /* excess burst size */ +} fal_policer_config_t; + +typedef struct +{ + a_bool_t yellow_priority_en; /* yellow traffic internal priority change enable*/ + a_bool_t yellow_drop_priority_en; /* yellow traffic internal drop priority change enable*/ + a_bool_t yellow_pcp_en; /* yellow traffic pcp change enable*/ + a_bool_t yellow_dei_en; /* yellow traffic dei change enable*/ + a_uint32_t yellow_priority; /* yellow traffic internal priority value*/ + a_uint32_t yellow_drop_priority; /* yellow traffic internal drop priority value*/ + a_uint32_t yellow_pcp; /* yellow traffic pcp value*/ + a_uint32_t yellow_dei; /* yellow traffic dei value*/ + fal_fwd_cmd_t red_action; /* red traffic drop or forward*/ + a_bool_t red_priority_en; /* red traffic internal priority change enable*/ + a_bool_t red_drop_priority_en; /* red traffic internal drop priority change enable*/ + a_bool_t red_pcp_en; /* red traffic pcp change enable*/ + a_bool_t red_dei_en; /* red traffic dei change enable*/ + a_uint32_t red_priority; /* red traffic internal priority value*/ + a_uint32_t red_drop_priority; /* red traffic internal drop priority value*/ + a_uint32_t red_pcp; /* red traffic pcp value*/ + a_uint32_t red_dei; /* red traffic dei value*/ +}fal_policer_action_t; + +typedef struct +{ + a_uint32_t green_packet_counter; /*green packet counter */ + a_uint64_t green_byte_counter; /*green byte counter */ + a_uint32_t yellow_packet_counter; /*yellow packet counter */ + a_uint64_t yellow_byte_counter; /*yellow byte counter */ + a_uint32_t red_packet_counter; /*red packet counter */ + a_uint64_t red_byte_counter; /*red byte counter */ +} fal_policer_counter_t; + +typedef struct +{ + a_uint32_t policer_drop_packet_counter; /*drop packet counter by policer*/ + a_uint64_t policer_drop_byte_counter; /*drop byte counter by policer */ + a_uint32_t policer_forward_packet_counter; /*forward packet counter by policer*/ + a_uint64_t policer_forward_byte_counter; /*forward byte counter by policer*/ + a_uint32_t policer_bypass_packet_counter; /*bypass packet counter by policer*/ + a_uint64_t policer_bypass_byte_counter; /*bypass byte counter by policer */ +} fal_policer_global_counter_t; + +enum +{ + FUNC_ADPT_ACL_POLICER_COUNTER_GET = 0, + FUNC_ADPT_PORT_POLICER_COUNTER_GET, + FUNC_ADPT_PORT_COMPENSATION_BYTE_GET, + FUNC_ADPT_PORT_POLICER_ENTRY_GET, + FUNC_ADPT_PORT_POLICER_ENTRY_SET, + FUNC_ADPT_ACL_POLICER_ENTRY_GET, + FUNC_ADPT_ACL_POLICER_ENTRY_SET, + FUNC_ADPT_POLICER_TIME_SLOT_GET, + FUNC_ADPT_PORT_COMPENSATION_BYTE_SET, + FUNC_ADPT_POLICER_TIME_SLOT_SET, + FUNC_ADPT_POLICER_GLOBAL_COUNTER_GET, +}; + + +sw_error_t +fal_port_policer_entry_set(a_uint32_t dev_id, fal_port_t port_id, + fal_policer_config_t *policer, fal_policer_action_t *action); + +sw_error_t +fal_port_policer_entry_get(a_uint32_t dev_id, fal_port_t port_id, + fal_policer_config_t *policer, fal_policer_action_t *atcion); + +sw_error_t +fal_acl_policer_entry_set(a_uint32_t dev_id, a_uint32_t index, + fal_policer_config_t *policer, fal_policer_action_t *action); + +sw_error_t +fal_acl_policer_entry_get(a_uint32_t dev_id, a_uint32_t index, + fal_policer_config_t *policer, fal_policer_action_t *action); + +sw_error_t +fal_port_policer_counter_get(a_uint32_t dev_id, fal_port_t port_id, + fal_policer_counter_t *counter); + +sw_error_t +fal_acl_policer_counter_get(a_uint32_t dev_id, a_uint32_t index, + fal_policer_counter_t *counter); + +sw_error_t +fal_port_policer_compensation_byte_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t length); + +sw_error_t +fal_port_policer_compensation_byte_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t *length); + +sw_error_t +fal_policer_timeslot_set(a_uint32_t dev_id, a_uint32_t timeslot); + +sw_error_t +fal_policer_timeslot_get(a_uint32_t dev_id, a_uint32_t *timeslot); + +sw_error_t +fal_policer_global_counter_get(a_uint32_t dev_id, + fal_policer_global_counter_t *counter); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_POLICER_H_ */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_port_ctrl.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_port_ctrl.h new file mode 100755 index 000000000..2b63059df --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_port_ctrl.h @@ -0,0 +1,777 @@ +/* + * Copyright (c) 2014, 2016-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/*qca808x_start*/ +/** + * @defgroup fal_port_ctrl FAL_PORT_CONTROL + * @{ + */ +#ifndef _FAL_PORTCTRL_H_ +#define _FAL_PORTCTRL_H_ + +#ifdef __cplusplus +extern "c" { +#endif + +#include "sw.h" +#include "fal_type.h" + + typedef enum { + FAL_HALF_DUPLEX = 0, + FAL_FULL_DUPLEX, + FAL_DUPLEX_BUTT = 0xffff + } + fal_port_duplex_t; + + typedef enum + { + FAL_SPEED_10 = 10, + FAL_SPEED_100 = 100, + FAL_SPEED_1000 = 1000, + FAL_SPEED_2500 = 2500, + FAL_SPEED_5000 = 5000, + FAL_SPEED_10000 = 10000, + FAL_SPEED_BUTT = 0xffff, + } fal_port_speed_t; + + typedef enum + { + FAL_CABLE_STATUS_NORMAL = 0, + FAL_CABLE_STATUS_SHORT = 1, + FAL_CABLE_STATUS_OPENED = 2, + FAL_CABLE_STATUS_INVALID = 3, + FAL_CABLE_STATUS_CROSSOVERA = 4, + FAL_CABLE_STATUS_CROSSOVERB = 5, + FAL_CABLE_STATUS_CROSSOVERC = 6, + FAL_CABLE_STATUS_CROSSOVERD = 7, + FAL_CABLE_STATUS_LOW_MISMATCH = 8, + FAL_CABLE_STATUS_HIGH_MISMATCH = 9, + FAL_CABLE_STATUS_BUTT = 0xffff, + } fal_cable_status_t; + +#define FAL_ENABLE 1 +#define FAL_DISABLE 0 +#define FAL_MAX_PORT_NUMBER 8 + +//phy autoneg adv +#define FAL_PHY_ADV_10T_HD 0x01 +#define FAL_PHY_ADV_10T_FD 0x02 +#define FAL_PHY_ADV_100TX_HD 0x04 +#define FAL_PHY_ADV_100TX_FD 0x08 +//#define FAL_PHY_ADV_1000T_HD 0x100 +#define FAL_PHY_ADV_1000T_FD 0x200 +#define FAL_PHY_ADV_1000BX_HD 0x400 +#define FAL_PHY_ADV_1000BX_FD 0x800 +#define FAL_PHY_ADV_2500T_FD 0x1000 +#define FAL_PHY_ADV_5000T_FD 0x2000 +#define FAL_PHY_ADV_10000T_FD 0x4000 + +#define FAL_PHY_ADV_10G_R_FD 0x8000 + +#define FAL_PHY_ADV_FE_SPEED_ALL \ + (FAL_PHY_ADV_10T_HD | FAL_PHY_ADV_10T_FD | FAL_PHY_ADV_100TX_HD |\ + FAL_PHY_ADV_100TX_FD) + +#define FAL_PHY_ADV_GE_SPEED_ALL \ + (FAL_PHY_ADV_10T_HD | FAL_PHY_ADV_10T_FD | FAL_PHY_ADV_100TX_HD |\ + FAL_PHY_ADV_100TX_FD | FAL_PHY_ADV_1000T_FD) + +#define FAL_PHY_ADV_BX_SPEED_ALL \ + (FAL_PHY_ADV_1000BX_HD | FAL_PHY_ADV_1000BX_FD |FAL_PHY_ADV_10G_R_FD) + +#define FAL_PHY_ADV_XGE_SPEED_ALL \ + (FAL_PHY_ADV_2500T_FD | FAL_PHY_ADV_5000T_FD | FAL_PHY_ADV_10000T_FD) + +#define FAL_PHY_ADV_PAUSE 0x10 +#define FAL_PHY_ADV_ASY_PAUSE 0x20 +#define FAL_PHY_FE_ADV_ALL \ + (FAL_PHY_ADV_FE_SPEED_ALL | FAL_PHY_ADV_PAUSE | FAL_PHY_ADV_ASY_PAUSE) +#define FAL_PHY_GE_ADV_ALL \ + (FAL_PHY_ADV_GE_SPEED_ALL | FAL_PHY_ADV_PAUSE | FAL_PHY_ADV_ASY_PAUSE) + +#define FAL_PHY_COMBO_ADV_ALL \ + (FAL_PHY_ADV_BX_SPEED_ALL | FAL_PHY_ADV_GE_SPEED_ALL | FAL_PHY_ADV_XGE_SPEED_ALL |\ + FAL_PHY_ADV_PAUSE | FAL_PHY_ADV_ASY_PAUSE) + +//phy capablity +#define FAL_PHY_AUTONEG_CAPS 0x01 +#define FAL_PHY_100T2_HD_CAPS 0x02 +#define FAL_PHY_100T2_FD_CAPS 0x04 +#define FAL_PHY_10T_HD_CAPS 0x08 +#define FAL_PHY_10T_FD_CAPS 0x10 +#define FAL_PHY_100X_HD_CAPS 0x20 +#define FAL_PHY_100X_FD_CAPS 0x40 +#define FAL_PHY_100T4_CAPS 0x80 +//#define FAL_PHY_1000T_HD_CAPS 0x100 +#define FAL_PHY_1000T_FD_CAPS 0x200 +//#define FAL_PHY_1000X_HD_CAPS 0x400 +#define FAL_PHY_1000X_FD_CAPS 0x800 + +//phy partner capablity +#define FAL_PHY_PART_10T_HD 0x1 +#define FAL_PHY_PART_10T_FD 0x2 +#define FAL_PHY_PART_100TX_HD 0x4 +#define FAL_PHY_PART_100TX_FD 0x8 +//#define FAL_PHY_PART_1000T_HD 0x10 +#define FAL_PHY_PART_1000T_FD 0x20 + +//phy interrupt flag +#define FAL_PHY_INTR_SPEED_CHANGE 0x1 +#define FAL_PHY_INTR_DUPLEX_CHANGE 0x2 +#define FAL_PHY_INTR_STATUS_UP_CHANGE 0x4 +#define FAL_PHY_INTR_STATUS_DOWN_CHANGE 0x8 +#define FAL_PHY_INTR_BX_FX_STATUS_UP_CHANGE 0x10 +#define FAL_PHY_INTR_BX_FX_STATUS_DOWN_CHANGE 0x20 +#define FAL_PHY_INTR_MEDIA_STATUS_CHANGE 0x40 +#define FAL_PHY_INTR_WOL_STATUS 0x80 +#define FAL_PHY_INTR_POE_STATUS 0x100 + + typedef enum + { + FAL_NO_HEADER_EN = 0, + FAL_ONLY_MANAGE_FRAME_EN, + FAL_ALL_TYPE_FRAME_EN + } fal_port_header_mode_t; + + typedef struct + { + a_uint16_t pair_a_status; + a_uint16_t pair_b_status; + a_uint16_t pair_c_status; + a_uint16_t pair_d_status; + a_uint32_t pair_a_len; + a_uint32_t pair_b_len; + a_uint32_t pair_c_len; + a_uint32_t pair_d_len; + } fal_port_cdt_t; + +/*below is new add for malibu phy*/ + +/** Phy mdix mode */ +typedef enum { + PHY_MDIX_AUTO = 0, /**< Auto MDI/MDIX */ + PHY_MDIX_MDI = 1, /**< Fixed MDI */ + PHY_MDIX_MDIX = 2 /**< Fixed MDIX */ +} fal_port_mdix_mode_t; + +/** Phy mdix status */ +typedef enum { + PHY_MDIX_STATUS_MDI = 0, /**< Fixed MDI */ + PHY_MDIX_STATUS_MDIX = 1 /**< Fixed MDIX */ + +} fal_port_mdix_status_t; + +/** Phy master mode */ +typedef enum { + PHY_MASTER_MASTER = 0, /**< Phy manual MASTER configuration */ + PHY_MASTER_SLAVE = 1, /**< Phy manual SLAVE configuration */ + PHY_MASTER_AUTO = 2 /**< Phy automatic MASTER/SLAVE configuration */ +} fal_port_master_t; +/*qca808x_end*/ +/** Phy preferred medium type */ +typedef enum { + PHY_MEDIUM_COPPER = 0, /**< Copper */ + PHY_MEDIUM_FIBER = 1, /**< Fiber */ + +} fal_port_medium_t; + +/** Phy pages */ +typedef enum { + PHY_SGBX_PAGES = 0, /**< sgbx pages */ + PHY_COPPER_PAGES = 1 /**< copper pages */ + +} fal_port_reg_pages_t; + + +/** Phy preferred Fiber mode */ +typedef enum { + PHY_FIBER_100FX = 0, /**< 100FX fiber mode */ + PHY_FIBER_1000BX = 1, /**< 1000BX fiber mode */ + PHY_FIBER_10G_R = 2, /**< 10G-R fiber mode */ + +} fal_port_fiber_mode_t; + +/** Phy reset status */ +typedef enum { + PHY_RESET_DONE = 0, /**< Phy reset done */ + PHY_RESET_BUSY = 1 /**< Phy still in reset process */ +} fal_port_reset_status_t; + +/** Phy auto-negotiation status */ +typedef enum { + PHY_AUTO_NEG_STATUS_BUSY = 0, /**< Phy still in auto-negotiation process */ + PHY_AUTO_NEG_STATUS_DONE = 1 /**< Phy auto-negotiation done */ +} fal_port_auto_neg_status_t; +/*qca808x_start*/ + +/** Phy interface mode */ + typedef enum { + PHY_PSGMII_BASET = 0, + /**< PSGMII mode */ + PHY_PSGMII_BX1000 = 1, + /**< PSGMII BX1000 mode */ + PHY_PSGMII_FX100 = 2, + /**< PSGMII FX100 mode */ + PHY_PSGMII_AMDET = 3, + /**< PSGMII Auto mode */ + PHY_SGMII_BASET = 4, + /**< SGMII mode */ + PORT_QSGMII, + /**>24)&0xff) +#define FAL_PORT_ID_VALUE(port_id) ((port_id)&0xffffff) +#define FAL_PORT_ID(type, value) (((type)<<24)|(value)) + +#define FAL_IS_PPORT(port_id) (((FAL_PORT_ID_TYPE(port_id))==FAL_PORT_TYPE_PPORT)?1:0) +#define FAL_IS_TRUNK(port_id) (((FAL_PORT_ID_TYPE(port_id))==FAL_PORT_TYPE_TRUNK)?1:0) +#define FAL_IS_VPORT(port_id) (((FAL_PORT_ID_TYPE(port_id))==FAL_PORT_TYPE_VPORT)?1:0) + + +#if (SW_MAX_NR_PORT <= 32) + typedef a_uint32_t fal_pbmp_t; +#else + typedef a_uint64_t fal_pbmp_t; +#endif + + typedef struct + { + a_uint8_t uc[6]; + } fal_mac_addr_t; + + typedef a_uint32_t fal_ip4_addr_t; + + typedef struct + { + a_uint32_t ul[4]; + } fal_ip6_addr_t; + + /** + @brief This enum defines several forwarding command type. + * Field description: + FAL_MAC_FRWRD - packets are normally forwarded + FAL_MAC_DROP - packets are dropped + FAL_MAC_CPY_TO_CPU - packets are copyed to cpu + FAL_MAC_RDT_TO_CPU - packets are redirected to cpu + */ + typedef enum + { + FAL_MAC_FRWRD = 0, /**< packets are normally forwarded */ + FAL_MAC_DROP, /**< packets are dropped */ + FAL_MAC_CPY_TO_CPU, /**< packets are copyed to cpu */ + FAL_MAC_RDT_TO_CPU /**< packets are redirected to cpu */ + } fal_fwd_cmd_t; + + typedef enum + { + FAL_BYTE_BASED = 0, + FAL_FRAME_BASED, + FAL_RATE_MODE_BUTT + } fal_traffic_unit_t; + + typedef a_uint32_t fal_queue_t; + +#define FAL_SVL_FID 0xffff + + + /** + @brief This enum defines packets transmitted out vlan tagged mode. + */ + typedef enum + { + FAL_EG_UNMODIFIED = 0, /**< egress transmit packets unmodified */ + FAL_EG_UNTAGGED, /**< egress transmit packets without vlan tag*/ + FAL_EG_TAGGED, /**< egress transmit packets with vlan tag */ + FAL_EG_HYBRID, /**< egress transmit packets in hybrid tag mode */ + FAL_EG_UNTOUCHED, + FAL_EG_MODE_BUTT + } fal_pt_1q_egmode_t; + +#define FAL_NEXT_ENTRY_FIRST_ID 0xffffffff + + typedef struct{ + a_uint32_t reg_count; + a_uint32_t reg_base; + a_uint32_t reg_end; + a_uint32_t reg_value[256]; + a_int8_t reg_name[32]; + }fal_reg_dump_t; + + typedef struct{ + a_uint32_t reg_count; + a_uint32_t reg_addr[32]; + a_uint32_t reg_value[32]; + a_int8_t reg_name[32]; + }fal_debug_reg_dump_t; + + typedef struct{ + a_uint32_t phy_count; + a_uint32_t phy_base; + a_uint32_t phy_end; + a_uint16_t phy_value[256]; + a_int8_t phy_name[32]; + }fal_phy_dump_t; + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_TYPE_H_ */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_uk_if.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_uk_if.h new file mode 100755 index 000000000..18a8bd761 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_uk_if.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2014,2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#ifndef _FAL_UK_IF_H_ +#define _FAL_UK_IF_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "sw.h" +#include "fal_type.h" +#include "ssdk_init.h" + + sw_error_t + sw_uk_exec(a_uint32_t api_id, ...); + + sw_error_t + ssdk_init(a_uint32_t dev_id, ssdk_init_cfg * cfg); + + sw_error_t + ssdk_cleanup(void); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_UK_IF_H_ */ + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_vlan.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_vlan.h new file mode 100755 index 000000000..a1397a592 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_vlan.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_vlan FAL_VLAN + * @{ + */ +#ifndef _FAL_VLAN_H +#define _FAL_VLAN_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + + /** + @brief This structure defines vlan entry. + */ + typedef struct + { + a_uint16_t vid; /**< vlan entry id */ + a_uint16_t fid; /**< filter data base id*/ + fal_pbmp_t mem_ports; /**< member port bit map */ + fal_pbmp_t tagged_ports; /**< bit map of tagged infomation for member port*/ + fal_pbmp_t untagged_ports; /**< bit map of untagged infomation for member port*/ + fal_pbmp_t unmodify_ports;/**< bit map of unmodified infomation for member port*/ + fal_pbmp_t u_ports; + a_bool_t learn_dis; /**< disable address learning*/ + a_bool_t vid_pri_en; /**< enable 802.1p*/ + a_uint8_t vid_pri; /**< vlaue of 802.1p when enable vid_pri_en*/ + } fal_vlan_t; + + + sw_error_t + fal_vlan_entry_append(a_uint32_t dev_id, fal_vlan_t * vlan_entry); + + + + sw_error_t + fal_vlan_create(a_uint32_t dev_id, a_uint32_t vlan_id); + + + + sw_error_t + fal_vlan_next(a_uint32_t dev_id, a_uint32_t vlan_id, fal_vlan_t * p_vlan); + + + + sw_error_t + fal_vlan_find(a_uint32_t dev_id, a_uint32_t vlan_id, fal_vlan_t * p_vlan); + + + + sw_error_t + fal_vlan_member_update(a_uint32_t dev_id, a_uint32_t vlan_id, + fal_pbmp_t member, fal_pbmp_t u_member); + + + + sw_error_t + fal_vlan_delete(a_uint32_t dev_id, a_uint32_t vlan_id); + + + + sw_error_t + fal_vlan_reset(a_uint32_t dev_id); + + + sw_error_t + fal_vlan_flush(a_uint32_t dev_id); + + + sw_error_t + fal_vlan_init(a_uint32_t dev_id); + + + sw_error_t + fal_vlan_cleanup(void); + + + sw_error_t + fal_vlan_fid_set(a_uint32_t dev_id, a_uint32_t vlan_id, a_uint32_t fid); + + + sw_error_t + fal_vlan_fid_get(a_uint32_t dev_id, a_uint32_t vlan_id, a_uint32_t * fid); + + + sw_error_t + fal_vlan_member_add(a_uint32_t dev_id, a_uint32_t vlan_id, + fal_port_t port_id, fal_pt_1q_egmode_t port_info); + + + sw_error_t + fal_vlan_member_del(a_uint32_t dev_id, a_uint32_t vlan_id, fal_port_t port_id); + + + sw_error_t + fal_vlan_learning_state_set(a_uint32_t dev_id, a_uint32_t vlan_id, + a_bool_t enable); + + + sw_error_t + fal_vlan_learning_state_get(a_uint32_t dev_id, a_uint32_t vlan_id, + a_bool_t * enable); + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _FAL_VLAN_H */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_vsi.h b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_vsi.h new file mode 100755 index 000000000..c6543be6f --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/fal/fal_vsi.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup fal_stp FAL_VSI + * @{ + */ +#ifndef _FAL_VSI_H_ +#define _FAL_VSI_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "common/sw.h" +#include "fal/fal_type.h" + +#define FAL_VSI_INVALID 0xffff +#define FAL_VLAN_INVALID 0xffff + +typedef struct{ + a_uint32_t lrn_en; /*0: disable new address learn, 1: enable new address learn*/ + fal_fwd_cmd_t action;/*0:forward, 1:drop, 2: copy to CPU, 3: redirect to CPU*/ +}fal_vsi_newaddr_lrn_t; + +typedef struct{ + a_uint32_t stamove_en;/*0:disable station move, 1: enable station move*/ + fal_fwd_cmd_t action;/*0:forward, 1:drop, 2: copy to CPU, 3: redirect to CPU*/ +}fal_vsi_stamove_t; + +typedef struct{ + a_uint32_t member_ports;/*VSI member ports for known unicast and multicast*/ + a_uint32_t uuc_ports;/*VSI member ports for unknown unicast*/ + a_uint32_t umc_ports;/*VSI member ports for unknown multicast*/ + a_uint32_t bc_ports;/*VSI member ports for broadcast*/ +}fal_vsi_member_t; + +typedef struct +{ + a_uint32_t rx_packet_counter; + a_uint64_t rx_byte_counter; + a_uint32_t tx_packet_counter; + a_uint64_t tx_byte_counter; + a_uint32_t fwd_packet_counter; + a_uint64_t fwd_byte_counter; + a_uint32_t drop_packet_counter; + a_uint64_t drop_byte_counter; +}fal_vsi_counter_t; + + +enum{ + FUNC_PORT_VLAN_VSI_SET, + FUNC_PORT_VLAN_VSI_GET, + FUNC_PORT_VSI_SET, + FUNC_PORT_VSI_GET, + FUNC_VSI_STAMOVE_SET, + FUNC_VSI_STAMOVE_GET, + FUNC_VSI_NEWADDR_LRN_SET, + FUNC_VSI_NEWADDR_LRN_GET, + FUNC_VSI_MEMBER_SET, + FUNC_VSI_MEMBER_GET, + FUNC_VSI_COUNTER_GET, + FUNC_VSI_COUNTER_CLEANUP, +}; + +sw_error_t +fal_vsi_alloc(a_uint32_t dev_id, a_uint32_t *vsi); + +sw_error_t +fal_vsi_free(a_uint32_t dev_id, a_uint32_t vsi); + +sw_error_t +fal_port_vsi_set(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t vsi_id); + +sw_error_t +fal_port_vsi_get(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t *vsi_id); + +sw_error_t +fal_port_vlan_vsi_set(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t stag_vid, a_uint32_t ctag_vid, a_uint32_t vsi_id); + +sw_error_t +fal_port_vlan_vsi_get(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t stag_vid, a_uint32_t ctag_vid, a_uint32_t *vsi_id); + +sw_error_t +fal_vsi_tbl_dump(a_uint32_t dev_id); + +sw_error_t +fal_vsi_newaddr_lrn_set(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_newaddr_lrn_t *newaddr_lrn); + +sw_error_t +fal_vsi_newaddr_lrn_get(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_newaddr_lrn_t *newaddr_lrn); + +sw_error_t +fal_vsi_stamove_set(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_stamove_t *stamove); + +sw_error_t +fal_vsi_stamove_get(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_stamove_t *stamove); + +sw_error_t +fal_vsi_member_set(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_member_t *vsi_member); + +sw_error_t +fal_vsi_member_get(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_member_t *vsi_member); + +sw_error_t +fal_vsi_counter_get(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_counter_t *counter); + +sw_error_t +fal_vsi_counter_cleanup(a_uint32_t dev_id, a_uint32_t vsi_id); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _FAL_VSI_H_ */ + +/** + * @} + */ + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/init/ssdk_init.h b/feeds/ipq807x/qca-ssdk-shell/src/include/init/ssdk_init.h new file mode 100755 index 000000000..de0e27482 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/init/ssdk_init.h @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2014, 2017-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/*qca808x_start*/ +#ifndef _SSDK_INIT_H_ +#define _SSDK_INIT_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "sw.h" +/*qca808x_end*/ +#include "fal_led.h" + +/*qca808x_start*/ + typedef enum { + HSL_MDIO = 1, + HSL_HEADER, + } + hsl_access_mode; + + typedef enum + { + HSL_NO_CPU = 0, + HSL_CPU_1, + HSL_CPU_2, + HSL_CPU_1_PLUS, + } hsl_init_mode; + typedef sw_error_t + (*mdio_reg_set) (a_uint32_t dev_id, a_uint32_t phy_addr, a_uint32_t reg, + a_uint16_t data); + + typedef sw_error_t + (*mdio_reg_get) (a_uint32_t dev_id, a_uint32_t phy_addr, a_uint32_t reg, + a_uint16_t * data); + + typedef sw_error_t + (*i2c_reg_set) (a_uint32_t dev_id, a_uint32_t phy_addr, a_uint32_t reg, + a_uint16_t data); + + typedef sw_error_t + (*i2c_reg_get) (a_uint32_t dev_id, a_uint32_t phy_addr, a_uint32_t reg, + a_uint16_t * data); +/*qca808x_end*/ + typedef sw_error_t + (*hdr_reg_set) (a_uint32_t dev_id, a_uint32_t reg_addr, a_uint8_t *reg_data, a_uint32_t len); + + typedef sw_error_t + (*hdr_reg_get) (a_uint32_t dev_id, a_uint32_t reg_addr, a_uint8_t *reg_data, a_uint32_t len); + typedef sw_error_t + (*psgmii_reg_set) (a_uint32_t dev_id, a_uint32_t reg_addr, a_uint8_t *reg_data, a_uint32_t len); + + typedef sw_error_t + (*psgmii_reg_get) (a_uint32_t dev_id, a_uint32_t reg_addr, a_uint8_t *reg_data, a_uint32_t len); + + typedef sw_error_t + (*uniphy_reg_set) (a_uint32_t dev_id, a_uint32_t index, a_uint32_t reg_addr, a_uint8_t *reg_data, a_uint32_t len); + + typedef sw_error_t + (*uniphy_reg_get) (a_uint32_t dev_id, a_uint32_t index, a_uint32_t reg_addr, a_uint8_t *reg_data, a_uint32_t len); + + typedef void (*mii_reg_set)(a_uint32_t reg, a_uint32_t val); + + typedef a_uint32_t (*mii_reg_get)(a_uint32_t reg); +/*qca808x_start*/ + typedef struct + { + mdio_reg_set mdio_set; + mdio_reg_get mdio_get; +/*qca808x_end*/ + hdr_reg_set header_reg_set; + hdr_reg_get header_reg_get; + psgmii_reg_set psgmii_reg_set; + psgmii_reg_get psgmii_reg_get; + uniphy_reg_set uniphy_reg_set; + uniphy_reg_get uniphy_reg_get; + mii_reg_set mii_reg_set; + mii_reg_get mii_reg_get; +/*qca808x_start*/ + i2c_reg_set i2c_set; + i2c_reg_get i2c_get; + } hsl_reg_func; +/*qca808x_end*/ + + typedef struct + { + a_bool_t mac0_rgmii; + a_bool_t mac5_rgmii; + a_bool_t rx_delay_s0; + a_bool_t rx_delay_s1; + a_bool_t tx_delay_s0; + a_bool_t tx_delay_s1; + a_bool_t rgmii_rxclk_delay; + a_bool_t rgmii_txclk_delay; + a_bool_t phy4_rx_delay; + a_bool_t phy4_tx_delay; + } garuda_init_spec_cfg; +/*qca808x_start*/ + typedef enum + { + CHIP_UNSPECIFIED = 0, + CHIP_ATHENA, + CHIP_GARUDA, + CHIP_SHIVA, + CHIP_HORUS, + CHIP_ISIS, + CHIP_ISISC, + CHIP_DESS, + CHIP_HPPE, + } ssdk_chip_type; +/*qca808x_end*/ + typedef struct + { + a_uint32_t cpu_bmp; + a_uint32_t lan_bmp; + a_uint32_t wan_bmp; + a_uint32_t inner_bmp; + } ssdk_port_cfg; + + typedef struct + { + a_uint32_t led_num; + a_uint32_t led_source_id; + led_ctrl_pattern_t led_pattern; + + } led_source_cfg_t; +/*qca808x_start*/ +typedef struct +{ + hsl_init_mode cpu_mode; + hsl_access_mode reg_mode; + hsl_reg_func reg_func; + + ssdk_chip_type chip_type; + a_uint32_t chip_revision; + /* os specific parameter */ + /* when uk_if based on netlink, it's netlink protocol type*/ + /* when uk_if based on ioctl, it's minor device number, major number + is always 10(misc device) */ + a_uint32_t nl_prot; + /* chip specific parameter */ + void * chip_spec_cfg; +/*qca808x_end*/ + /* port cfg */ + ssdk_port_cfg port_cfg; + a_uint32_t mac_mode; + a_uint32_t led_source_num; + led_source_cfg_t led_source_cfg[15]; +/*qca808x_start*/ + a_uint32_t phy_id; + a_uint32_t mac_mode1; + a_uint32_t mac_mode2; +} ssdk_init_cfg; +/*qca808x_end*/ +#if defined ATHENA +#define def_init_cfg {.reg_mode = HSL_MDIO, .cpu_mode = HSL_CPU_2}; +#elif defined GARUDA + +#define def_init_cfg_cpu2 {.reg_mode = HSL_MDIO, .cpu_mode = HSL_CPU_2,}; + +#define def_init_spec_cfg_cpu2 {.mac0_rgmii = A_TRUE, .mac5_rgmii = A_TRUE, \ + .rx_delay_s0 = A_FALSE, .rx_delay_s1 = A_FALSE, \ + .tx_delay_s0 = A_TRUE, .tx_delay_s1 = A_FALSE,\ + .rgmii_rxclk_delay = A_TRUE, .rgmii_txclk_delay = A_TRUE,\ + .phy4_rx_delay = A_TRUE, .phy4_tx_delay = A_TRUE,} + +#define def_init_cfg_cpu1 {.reg_mode = HSL_MDIO, .cpu_mode = HSL_CPU_1,}; + +#define def_init_spec_cfg_cpu1 {.mac0_rgmii = A_TRUE, .mac5_rgmii = A_FALSE, \ + .rx_delay_s0 = A_FALSE, .rx_delay_s1 = A_FALSE, \ + .tx_delay_s0 = A_TRUE, .tx_delay_s1 = A_FALSE,\ + .rgmii_rxclk_delay = A_TRUE, .rgmii_txclk_delay = A_TRUE, \ + .phy4_rx_delay = A_TRUE, .phy4_tx_delay = A_TRUE,} + +#define def_init_cfg_cpu1plus {.reg_mode = HSL_MDIO, .cpu_mode = HSL_CPU_1_PLUS,}; + +#define def_init_spec_cfg_cpu1plus {.mac0_rgmii = A_TRUE, .mac5_rgmii = A_FALSE, \ + .rx_delay_s0 = A_FALSE, .rx_delay_s1 = A_FALSE, \ + .tx_delay_s0 = A_FALSE, .tx_delay_s1 = A_FALSE,\ + .rgmii_rxclk_delay = A_TRUE, .rgmii_txclk_delay = A_TRUE, \ + .phy4_rx_delay = A_TRUE, .phy4_tx_delay = A_TRUE,} + +#define def_init_cfg_nocpu {.reg_mode = HSL_MDIO, .cpu_mode = HSL_NO_CPU,}; + +#define def_init_spec_cfg_nocpu { .mac0_rgmii = A_FALSE, .mac5_rgmii = A_FALSE, \ + .rx_delay_s0 = A_FALSE, .rx_delay_s1 = A_FALSE, \ + .tx_delay_s0 = A_FALSE, .tx_delay_s1 = A_FALSE,\ + .rgmii_rxclk_delay = A_TRUE, .rgmii_txclk_delay = A_TRUE, \ + .phy4_rx_delay = A_TRUE, .phy4_tx_delay = A_TRUE,} + +#define def_init_cfg_cpu1_gmii {.reg_mode = HSL_MDIO, .cpu_mode = HSL_CPU_1,}; + +#define def_init_spec_cfg_cpu1_gmii {.mac0_rgmii = A_FALSE, .mac5_rgmii = A_FALSE, \ + .rx_delay_s0 = A_FALSE, .rx_delay_s1 = A_FALSE, \ + .tx_delay_s0 = A_TRUE, .tx_delay_s1 = A_FALSE,\ + .rgmii_rxclk_delay = A_TRUE, .rgmii_txclk_delay = A_TRUE, \ + .phy4_rx_delay = A_TRUE, .phy4_tx_delay = A_TRUE,} + +#define def_init_cfg def_init_cfg_cpu2 +#define def_init_spec_cfg def_init_spec_cfg_cpu2 + +#elif defined SHIVA +#define def_init_cfg {.reg_mode = HSL_MDIO, .cpu_mode = HSL_CPU_2}; +#elif defined HORUS +#define def_init_cfg {.reg_mode = HSL_MDIO, .cpu_mode = HSL_CPU_2}; +#elif defined ISIS +#define def_init_cfg {.reg_mode = HSL_MDIO, .cpu_mode = HSL_CPU_2}; +#elif defined ISISC +/*qca808x_start*/ +#define def_init_cfg {.reg_mode = HSL_MDIO, .cpu_mode = HSL_CPU_2}; +/*qca808x_end*/ +#endif + typedef struct + { + a_bool_t in_acl; + a_bool_t in_fdb; + a_bool_t in_igmp; + a_bool_t in_leaky; + a_bool_t in_led; + a_bool_t in_mib; + a_bool_t in_mirror; + a_bool_t in_misc; + a_bool_t in_portcontrol; + a_bool_t in_portvlan; + a_bool_t in_qos; + a_bool_t in_rate; + a_bool_t in_stp; + a_bool_t in_vlan; + a_bool_t in_reduced_acl; + a_bool_t in_ip; + a_bool_t in_nat; + a_bool_t in_cosmap; + a_bool_t in_sec; + a_bool_t in_trunk; + a_bool_t in_nathelper; + a_bool_t in_interfacectrl; + } ssdk_features; +/*qca808x_start*/ +#define CFG_STR_SIZE 20 + typedef struct + { + a_uint8_t build_ver[CFG_STR_SIZE]; + a_uint8_t build_date[CFG_STR_SIZE]; + + a_uint8_t chip_type[CFG_STR_SIZE]; //GARUDA + a_uint8_t cpu_type[CFG_STR_SIZE]; //mips + a_uint8_t os_info[CFG_STR_SIZE]; //OS=linux OS_VER=2_6 + + a_bool_t fal_mod; + a_bool_t kernel_mode; + a_bool_t uk_if; +/*qca808x_end*/ + ssdk_features features; +/*qca808x_start*/ + ssdk_init_cfg init_cfg; + } ssdk_cfg_t; + sw_error_t + ssdk_init(a_uint32_t dev_id, ssdk_init_cfg *cfg); +/*qca808x_end*/ + sw_error_t + ssdk_hsl_access_mode_set(a_uint32_t dev_id, hsl_access_mode reg_mode); +/*qca808x_start*/ +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _SSDK_INIT_H */ +/*qca808x_end*/ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/init/ssdk_plat.h b/feeds/ipq807x/qca-ssdk-shell/src/include/init/ssdk_plat.h new file mode 100755 index 000000000..7cbeaa8b3 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/init/ssdk_plat.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef __SSDK_PLAT_H +#define __SSDK_PLAT_H + +#ifndef BIT +#define BIT(_n) (1UL << (_n)) +#endif + + +#ifndef BITS +#define BITS(_s, _n) (((1UL << (_n)) - 1) << _s) +#endif + +/* Atheros specific MII registers */ +#define QCA_MII_MMD_ADDR 0x0d +#define QCA_MII_MMD_DATA 0x0e +#define QCA_MII_DBG_ADDR 0x1d +#define QCA_MII_DBG_DATA 0x1e + +#define AR8327_REG_CTRL 0x0000 +#define AR8327_CTRL_REVISION BITS(0, 8) +#define AR8327_CTRL_REVISION_S 0 +#define AR8327_CTRL_VERSION BITS(8, 8) +#define AR8327_CTRL_VERSION_S 8 +#define AR8327_CTRL_RESET BIT(31) + +#define AR8327_REG_LED_CTRL_0 0x50 +#define AR8327_REG_LED_CTRL_1 0x54 +#define AR8327_REG_LED_CTRL_2 0x58 +#define AR8327_REG_LED_CTRL_3 0x5c + +#define AR8327_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) + +#define AR8327_PORT_STATUS_SPEED BITS(0,2) +#define AR8327_PORT_STATUS_SPEED_S 0 +#define AR8327_PORT_STATUS_TXMAC BIT(2) +#define AR8327_PORT_STATUS_RXMAC BIT(3) +#define AR8327_PORT_STATUS_TXFLOW BIT(4) +#define AR8327_PORT_STATUS_RXFLOW BIT(5) +#define AR8327_PORT_STATUS_DUPLEX BIT(6) +#define AR8327_PORT_STATUS_LINK_UP BIT(8) +#define AR8327_PORT_STATUS_LINK_AUTO BIT(9) +#define AR8327_PORT_STATUS_LINK_PAUSE BIT(10) + +#define AR8327_REG_PAD0_CTRL 0x4 +#define AR8327_REG_PAD5_CTRL 0x8 +#define AR8327_REG_PAD6_CTRL 0xc +#define AR8327_PAD_CTRL_MAC_MII_RXCLK_SEL BIT(0) +#define AR8327_PAD_CTRL_MAC_MII_TXCLK_SEL BIT(1) +#define AR8327_PAD_CTRL_MAC_MII_EN BIT(2) +#define AR8327_PAD_CTRL_MAC_GMII_RXCLK_SEL BIT(4) +#define AR8327_PAD_CTRL_MAC_GMII_TXCLK_SEL BIT(5) +#define AR8327_PAD_CTRL_MAC_GMII_EN BIT(6) +#define AR8327_PAD_CTRL_SGMII_EN BIT(7) +#define AR8327_PAD_CTRL_PHY_MII_RXCLK_SEL BIT(8) +#define AR8327_PAD_CTRL_PHY_MII_TXCLK_SEL BIT(9) +#define AR8327_PAD_CTRL_PHY_MII_EN BIT(10) +#define AR8327_PAD_CTRL_PHY_GMII_PIPE_RXCLK_SEL BIT(11) +#define AR8327_PAD_CTRL_PHY_GMII_RXCLK_SEL BIT(12) +#define AR8327_PAD_CTRL_PHY_GMII_TXCLK_SEL BIT(13) +#define AR8327_PAD_CTRL_PHY_GMII_EN BIT(14) +#define AR8327_PAD_CTRL_PHYX_GMII_EN BIT(16) +#define AR8327_PAD_CTRL_PHYX_RGMII_EN BIT(17) +#define AR8327_PAD_CTRL_PHYX_MII_EN BIT(18) +#define AR8327_PAD_CTRL_RGMII_RXCLK_DELAY_SEL BITS(20, 2) +#define AR8327_PAD_CTRL_RGMII_RXCLK_DELAY_SEL_S 20 +#define AR8327_PAD_CTRL_RGMII_TXCLK_DELAY_SEL BITS(22, 2) +#define AR8327_PAD_CTRL_RGMII_TXCLK_DELAY_SEL_S 22 +#define AR8327_PAD_CTRL_RGMII_RXCLK_DELAY_EN BIT(24) +#define AR8327_PAD_CTRL_RGMII_TXCLK_DELAY_EN BIT(25) +#define AR8327_PAD_CTRL_RGMII_EN BIT(26) + +#define AR8327_REG_POS 0x10 +#define AR8327_POS_POWER_ON_SEL BIT(31) +#define AR8327_POS_LED_OPEN_EN BIT(24) +#define AR8327_POS_SERDES_AEN BIT(7) + +#define AR8327_REG_PAD_SGMII_CTRL 0xe0 +#define AR8327_PAD_SGMII_CTRL_MODE_CTRL BITS(22, 2) +#define AR8327_PAD_SGMII_CTRL_MODE_CTRL_S 22 +#define AR8327_PAD_SGMII_CTRL_EN_SD BIT(4) +#define AR8327_PAD_SGMII_CTRL_EN_TX BIT(3) +#define AR8327_PAD_SGMII_CTRL_EN_RX BIT(2) +#define AR8327_PAD_SGMII_CTRL_EN_PLL BIT(1) +#define AR8327_PAD_SGMII_CTRL_EN_LCKDT BIT(0) + +#define AR8327_REG_PAD_MAC_PWR_SEL 0x0e4 +#define AR8327_PAD_MAC_PWR_RGMII0_1_8V BIT(19) +#define AR8327_PAD_MAC_PWR_RGMII1_1_8V BIT(18) + +#define AR8327_NUM_PHYS 5 +#define AR8327_PORT_CPU 0 +#define AR8327_NUM_PORTS 8 +#define AR8327_MAX_VLANS 128 + +enum { + AR8327_PORT_SPEED_10M = 0, + AR8327_PORT_SPEED_100M = 1, + AR8327_PORT_SPEED_1000M = 2, + AR8327_PORT_SPEED_NONE = 3, +}; + +enum { + QCA_VER_AR8216 = 0x01, + QCA_VER_AR8236 = 0x03, + QCA_VER_AR8316 = 0x10, + QCA_VER_AR8327 = 0x12, + QCA_VER_AR8337 = 0x13 +}; + +/*poll mib per 2secs*/ +#define QCA_PHY_MIB_WORK_DELAY 20000 +#define QCA_MIB_ITEM_NUMBER 41 + +struct qca_phy_priv { + struct phy_device *phy; + struct switch_dev sw_dev; + a_uint8_t version; + a_uint8_t revision; + a_uint32_t (*mii_read)(a_uint32_t reg); + void (*mii_write)(a_uint32_t reg, a_uint32_t val); + void (*phy_dbg_write)(a_uint32_t dev_id, a_uint32_t phy_addr, + a_uint16_t dbg_addr, a_uint16_t dbg_data); + void (*phy_mmd_write)(a_uint32_t dev_id, a_uint32_t phy_addr, + a_uint16_t addr, a_uint16_t data); + void (*phy_write)(a_uint32_t dev_id, a_uint32_t phy_addr, + a_uint32_t reg, a_uint16_t data); + + bool init; + struct mutex reg_mutex; + struct mutex mib_lock; + struct delayed_work mib_dwork; + u64 *mib_counters; + /* dump buf */ + a_uint8_t buf[2048]; + + /* VLAN database */ + bool vlan; /* True: 1q vlan mode, False: port vlan mode */ + a_uint16_t vlan_id[AR8327_MAX_VLANS]; + a_uint8_t vlan_table[AR8327_MAX_VLANS]; + a_uint8_t vlan_tagged; + a_uint16_t pvid[AR8327_NUM_PORTS]; + +}; + + +#define qca_phy_priv_get(_dev) \ + container_of(_dev, struct qca_phy_priv, sw_dev) + +static int +miibus_get(void); +uint32_t +qca_ar8216_mii_read(int reg); +void +qca_ar8216_mii_write(int reg, uint32_t val); +static sw_error_t +qca_ar8327_phy_write(a_uint32_t dev_id, a_uint32_t phy_addr, + a_uint32_t reg, a_uint16_t data); +static void +qca_ar8327_mmd_write(a_uint32_t dev_id, a_uint32_t phy_addr, + a_uint16_t addr, a_uint16_t data); +static void +qca_ar8327_phy_dbg_write(a_uint32_t dev_id, a_uint32_t phy_addr, + a_uint16_t dbg_addr, a_uint16_t dbg_data); +#endif diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/ref/ref_api.h b/feeds/ipq807x/qca-ssdk-shell/src/include/ref/ref_api.h new file mode 100755 index 000000000..ec7b557b1 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/ref/ref_api.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _REF_API_H_ +#define _REF_API_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#ifdef IN_VLAN +#define REF_VLAN_API \ + SW_API_DEF(SW_API_LAN_WAN_CFG_SET, qca_lan_wan_cfg_set), \ + SW_API_DEF(SW_API_LAN_WAN_CFG_GET, qca_lan_wan_cfg_get), + +#define REF_VLAN_API_PARAM \ + SW_API_DESC(SW_API_LAN_WAN_CFG_SET) \ + SW_API_DESC(SW_API_LAN_WAN_CFG_GET) + +#else +#define REF_VLAN_API +#define REF_VLAN_API_PARAM +#endif + +#define SSDK_REF_API \ + REF_VLAN_API + +#define SSDK_REF_PARAM \ + REF_VLAN_API_PARAM + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _REF_API_H_ */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/ref/ref_vlan.h b/feeds/ipq807x/qca-ssdk-shell/src/include/ref/ref_vlan.h new file mode 100755 index 000000000..d8e124ba5 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/ref/ref_vlan.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/** + * @defgroup ref_vlan REF_VLAN + * @{ + */ +#ifndef _REF_VLAN_H +#define _REF_VLAN_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "sw.h" +#include "fal_type.h" + +typedef struct { + fal_port_t port_id; /* port id */ + a_uint32_t vid; /* vlan id */ + a_bool_t is_wan_port; /* belong to wan port */ + a_bool_t valid; /* valid or not */ +} qca_lan_wan_port_info; + +typedef struct { + a_bool_t lan_only_mode; + qca_lan_wan_port_info v_port_info[SW_MAX_NR_PORT]; +} qca_lan_wan_cfg_t; + +sw_error_t +qca_lan_wan_cfg_set(a_uint32_t dev_id, qca_lan_wan_cfg_t *lan_wan_cfg); + +sw_error_t +qca_lan_wan_cfg_get(a_uint32_t dev_id, qca_lan_wan_cfg_t *lan_wan_cfg); + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _REF_VLAN_H */ +/** + * @} + */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_lock.h b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_lock.h new file mode 100755 index 000000000..d09f012f4 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_lock.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AOS_LOCK_H +#define _AOS_LOCK_H + + +#ifdef KERNEL_MODULE +#include "sal/os/linux/aos_lock_pvt.h" +#else +#include "sal/os/linux_user/aos_lock_pvt.h" +#endif + + +typedef aos_lock_pvt_t aos_lock_t; + + +#define aos_lock_init(lock) __aos_lock_init(lock) + + +#define aos_lock(lock) __aos_lock(lock) + + +#define aos_unlock(lock) __aos_unlock(lock) + + +#define aos_irq_save(flags) __aos_irq_save(flags) + + +#define aos_irq_restore(flags) __aos_irq_restore(flags) + + +#define aos_default_unlock __aos_default_unlock + + +#endif diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_mem.h b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_mem.h new file mode 100755 index 000000000..0006bbc52 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_mem.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2014,2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AOS_MEM_H +#define _AOS_MEM_H + +#include "aos_types.h" +#ifdef KERNEL_MODULE +#include "aos_mem_pvt.h" +#else +#include "aos_mem_pvt.h" +#endif + +/** + * @g aos_mem mem + * @{ + * + * @ig shim_ext + */ + +/** + * @brief Allocate a memory buffer. Note it's a non-blocking call. + * This call can block. + * + * @param[in] size buffer size + * + * @return Buffer pointer or NULL if there's not enough memory. + */ +static inline void * +aos_mem_alloc(aos_size_t size) +{ + return __aos_mem_alloc(size); +} + +/** + * @brief Free malloc'ed buffer + * + * @param[in] buf buffer pointer allocated by aos_alloc() + * @param[in] size buffer size + */ +static inline void +aos_mem_free(void *buf) +{ + __aos_mem_free(buf); +} + +/** + * @brief Move a memory buffer + * + * @param[in] dst destination address + * @param[in] src source address + * @param[in] size buffer size + */ +static inline void +aos_mem_copy(void *dst, void *src, aos_size_t size) +{ + __aos_mem_copy(dst, src, size); +} + +/** + * @brief Fill a memory buffer + * + * @param[in] buf buffer to be filled + * @param[in] b byte to fill + * @param[in] size buffer size + */ +static inline void +aos_mem_set(void *buf, a_uint8_t b, aos_size_t size) +{ + __aos_mem_set(buf, b, size); +} + +/** + * @brief Zero a memory buffer + * + * @param[in] buf buffer to be zeroed + * @param[in] size buffer size + */ +static inline void +aos_mem_zero(void *buf, aos_size_t size) +{ + __aos_mem_zero(buf, size); +} + +/** + * @brief Compare two memory buffers + * + * @param[in] buf1 first buffer + * @param[in] buf2 second buffer + * @param[in] size buffer size + * + * @retval 0 equal + * @retval 1 not equal + */ +static inline int +aos_mem_cmp(void *buf1, void *buf2, aos_size_t size) +{ + return __aos_mem_cmp(buf1, buf2, size); +} + +/** + * @} + */ + +#endif diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_timer.h b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_timer.h new file mode 100755 index 000000000..81aa0bf07 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_timer.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#ifndef _AOS_TIMER_H +#define _AOS_TIMER_H + +#include "sal/os/aos_types.h" +#ifdef KERNEL_MODULE +#include "sal/os/linux/aos_timer_pvt.h" +#else +#include "sal/os/linux_user/aos_timer_pvt.h" +#endif + + +typedef __aos_timer_t aos_timer_t; + + +/* + * Delay in microseconds + */ +static inline void +aos_udelay(int usecs) +{ + return __aos_udelay(usecs); +} + +/* + * Delay in milliseconds. + */ +static inline void +aos_mdelay(int msecs) +{ + return __aos_mdelay(msecs); +} + + +#endif + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_types.h b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_types.h new file mode 100755 index 000000000..a80e5d057 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/aos_types.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2014,2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AOS_TYPES_H +#define _AOS_TYPES_H + +#ifdef KERNEL_MODULE +#include "aos_types_pvt.h" +#else +#include "aos_types_pvt.h" +#endif + +#ifndef NULL +#define NULL 0 +#endif + +/** + * @g aos_types types + * @{ + * + * @ig shim_ext + */ +/* + *@ basic data types. + */ +typedef enum +{ + A_FALSE, + A_TRUE +} a_bool_t; + +typedef __a_uint8_t a_uint8_t; +typedef __a_int8_t a_int8_t; +typedef __a_uint16_t a_uint16_t; +typedef __a_int16_t a_int16_t; +typedef __a_uint32_t a_uint32_t; +typedef __a_int32_t a_int32_t; +typedef __a_uint64_t a_uint64_t; +typedef __a_int64_t a_int64_t; +typedef unsigned long a_ulong_t; +typedef char a_char_t; + +typedef void * acore_t; + +/** + * @brief Platform/bus generic handle. Used for bus specific functions. + */ +typedef __aos_device_t aos_device_t; + +/** + * @brief size of an object + */ +typedef __aos_size_t aos_size_t; + +/** + * @brief Generic status to be used by acore. + */ +typedef enum +{ + A_STATUS_OK, + A_STATUS_FAILED, + A_STATUS_ENOENT, + A_STATUS_ENOMEM, + A_STATUS_EINVAL, + A_STATUS_EINPROGRESS, + A_STATUS_ENOTSUPP, + A_STATUS_EBUSY, +} a_status_t; + +/* + * An ecore needs to provide a table of all pci device/vendor id's it + * supports + * + * This table should be terminated by a NULL entry , i.e. {0} + */ +typedef struct +{ + a_uint32_t vendor; + a_uint32_t device; + a_uint32_t subvendor; + a_uint32_t subdevice; +} aos_pci_dev_id_t; + +#define AOS_PCI_ANY_ID (~0) + +/* + * Typically core's can use this macro to create a table of various device + * ID's + */ +#define AOS_PCI_DEVICE(_vendor, _device) \ + (_vendor), (_device), AOS_PCI_ANY_ID, AOS_PCI_ANY_ID + + +typedef __aos_iomem_t aos_iomem_t; +/* + * These define the hw resources the OS has allocated for the device + * Note that start defines a mapped area. + */ +typedef enum +{ + AOS_RESOURCE_TYPE_MEM, + AOS_RESOURCE_TYPE_IO, +} aos_resource_type_t; + +typedef struct +{ + a_uint32_t start; + a_uint32_t end; + aos_resource_type_t type; +} aos_resource_t; + +#define AOS_DEV_ID_TABLE_MAX 256 + +typedef union +{ + aos_pci_dev_id_t *pci; + void *raw; +} aos_bus_reg_data_t; + +typedef void *aos_attach_data_t; + +#define AOS_REGIONS_MAX 5 + +typedef enum +{ + AOS_BUS_TYPE_PCI = 1, + AOS_BUS_TYPE_GENERIC, +} aos_bus_type_t; + +typedef enum +{ + AOS_IRQ_NONE, + AOS_IRQ_HANDLED, +} aos_irq_resp_t; + +typedef enum +{ + AOS_DMA_MASK_32BIT, + AOS_DMA_MASK_64BIT, +} aos_dma_mask_t; + + +/** + * @brief DMA directions + */ +typedef enum +{ + AOS_DMA_TO_DEVICE = 0, /**< Data is transfered from device to memory */ + AOS_DMA_FROM_DEVICE, /**< Data is transfered from memory to device */ +} aos_dma_dir_t; + +/* + * Protoypes shared between public and private headers + */ + + +/* + * work queue(kernel thread) function callback + */ +typedef void (*aos_work_func_t)(void *); + +/** + * @brief Prototype of the critical region function that is to be + * executed with spinlock held and interrupt disalbed + */ +typedef a_bool_t (*aos_irqlocked_func_t)(void *); + +/** + * @brief Prototype of timer function + */ +typedef void (*aos_timer_func_t)(void *); + +#endif diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_lock_pvt.h b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_lock_pvt.h new file mode 100755 index 000000000..a8894761f --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_lock_pvt.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AOS_LOCK_PVT_H +#define _AOS_LOCK_PVT_H + +#include +#include "sal/os/aos_types.h" + + +typedef pthread_mutex_t aos_lock_pvt_t; + + +#define __aos_lock_init(lock) \ + pthread_mutex_init(lock, NULL); \ + pthread_mutexattr_setpshared(lock, PTHREAD_PROCESS_SHARED) + + +#define __aos_lock(lock) pthread_mutex_lock(lock) + + +#define __aos_unlock(lock) pthread_mutex_unlock(lock) + + +#define __aos_irq_save(flags) + + +#define __aos_irq_restore(flags) + + +#define __aos_default_unlock PTHREAD_MUTEX_INITIALIZER + + +#endif /*_AOS_LOCK_PVT_H*/ + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_mem_pvt.h b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_mem_pvt.h new file mode 100755 index 000000000..281e65dab --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_mem_pvt.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AOS_MEM_PVT_H +#define _AOS_MEM_PVT_H + +#include +#include + +static inline void *__aos_mem_alloc(aos_size_t size) +{ + return (malloc(size)); +} + +static inline void __aos_mem_free(void *buf) +{ + free(buf); +} + +/* move a memory buffer */ +static inline void +__aos_mem_copy(void *dst, void *src, aos_size_t size) +{ + memcpy(dst, src, size); +} + +/* set a memory buffer */ +static inline void +__aos_mem_set(void *buf, a_uint8_t b, aos_size_t size) +{ + memset(buf, b, size); +} + +/* zero a memory buffer */ +static inline void +__aos_mem_zero(void *buf, aos_size_t size) +{ + memset(buf, 0, size); +} + +/* compare two memory buffers */ +static inline int +__aos_mem_cmp(void *buf1, void *buf2, aos_size_t size) +{ + return (memcmp(buf1, buf2, size) == 0) ? 0 : 1; +} + + + +#endif /*_AOS_MEM_PVT_H*/ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_timer_pvt.h b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_timer_pvt.h new file mode 100755 index 000000000..58055eb71 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_timer_pvt.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AOS_TIMER_PVT_H +#define _AOS_TIMER_PVT_H + +#include + +typedef int __aos_timer_t; + +static inline void +__aos_udelay(int usecs) +{ + usleep(usecs); + return; +} + +static inline void +__aos_mdelay(int msecs) +{ + usleep(1000*msecs); + return; +} + +#endif /*_AOS_TIMER_PVT_H*/ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_types_pvt.h b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_types_pvt.h new file mode 100755 index 000000000..fc71a871f --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/os/linux_user/aos_types_pvt.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _AOS_PVTTYPES_H +#define _AOS_PVTTYPES_H + +#include +#include +/* + * Private definitions of general data types + */ + +typedef void* __aos_device_t; +typedef int __aos_size_t; +typedef int __aos_iomem_t; + +typedef __u8 __a_uint8_t; +typedef __s8 __a_int8_t; +typedef __u16 __a_uint16_t; +typedef __s16 __a_int16_t; +typedef __u32 __a_uint32_t; +typedef __s32 __a_int32_t; +typedef __u64 __a_uint64_t; +typedef __s64 __a_int64_t; + + +#define aos_printk printf + + +#endif diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/sal/sd/linux/uk_interface/sw_api_us.h b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/sd/linux/uk_interface/sw_api_us.h new file mode 100755 index 000000000..d589d59fd --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/sd/linux/uk_interface/sw_api_us.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2014, 2017-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#ifndef _SW_API_US_H +#define _SW_API_US_H + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +#include "sw.h" + + sw_error_t sw_uk_init(a_uint32_t nl_prot); + + sw_error_t sw_uk_cleanup(void); + + sw_error_t sw_uk_if(unsigned long arg_val[SW_MAX_API_PARAM]); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _SW_API_INTERFACE_H */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/sal/sd/sd.h b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/sd/sd.h new file mode 100755 index 000000000..7187f482a --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/sal/sd/sd.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#ifndef _SD_H_ +#define _SD_H_ + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + + sw_error_t + sd_reg_mdio_set(a_uint32_t dev_id, a_uint32_t phy, a_uint32_t reg, + a_uint16_t data); + + sw_error_t + sd_reg_mdio_get(a_uint32_t dev_id, a_uint32_t phy, a_uint32_t reg, + a_uint16_t * data); + + sw_error_t + sd_reg_hdr_set(a_uint32_t dev_id, a_uint32_t reg_addr, + a_uint8_t * reg_data, a_uint32_t len); + + sw_error_t + sd_reg_hdr_get(a_uint32_t dev_id, a_uint32_t reg_addr, + a_uint8_t * reg_data, a_uint32_t len); + sw_error_t + sd_reg_uniphy_set(a_uint32_t dev_id, a_uint32_t index, + a_uint32_t reg_addr, a_uint8_t * reg_data, a_uint32_t len); + + sw_error_t + sd_reg_uniphy_get(a_uint32_t dev_id, a_uint32_t index, + a_uint32_t reg_addr, a_uint8_t * reg_data, a_uint32_t len); + + void + sd_reg_mii_set(a_uint32_t reg, a_uint32_t val); + + a_uint32_t + sd_reg_mii_get(a_uint32_t reg); + + sw_error_t sd_init(a_uint32_t dev_id, ssdk_init_cfg * cfg); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* _SD_H_ */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell.h b/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell.h new file mode 100755 index 000000000..1575e1b9b --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SW_SHELL_H +#define _SW_SHELL_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include "sw.h" +#include "sw_api.h" +#include "ssdk_init.h" + + extern a_ulong_t *ioctl_buf; + extern ssdk_init_cfg init_cfg; + extern ssdk_cfg_t ssdk_cfg; + +#define IOCTL_BUF_SIZE 2048 +#define CMDSTR_BUF_SIZE 1024 +#define CMDSTR_ARGS_MAX 128 +#define dprintf cmd_print + extern sw_error_t cmd_exec_api(a_ulong_t *arg_val); + extern void cmd_print(char *fmt, ...); + void cmd_print_error(sw_error_t rtn); + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _SW_SHELL_H */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_config.h b/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_config.h new file mode 100755 index 000000000..8ddab7b68 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_config.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SHELL_CONFIG_H_ +#define _SHELL_CONFIG_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "sw.h" +#include "sw_ioctl.h" +#include "sw_api.h" + +#define SW_CMD_SET_DEVID (SW_API_MAX + 1) +#define SW_CMD_VLAN_SHOW (SW_API_MAX + 2) +#define SW_CMD_FDB_SHOW (SW_API_MAX + 3) +#define SW_CMD_RESV_FDB_SHOW (SW_API_MAX + 4) +#define SW_CMD_HOST_SHOW (SW_API_MAX + 5) +#define SW_CMD_NAT_SHOW (SW_API_MAX + 6) +#define SW_CMD_NAPT_SHOW (SW_API_MAX + 7) +#define SW_CMD_INTFMAC_SHOW (SW_API_MAX + 8) +#define SW_CMD_PUBADDR_SHOW (SW_API_MAX + 9) +#define SW_CMD_FLOW_SHOW (SW_API_MAX + 10) +#define SW_CMD_HOST_IPV4_SHOW (SW_API_MAX + 11) +#define SW_CMD_HOST_IPV6_SHOW (SW_API_MAX + 12) +#define SW_CMD_HOST_IPV4M_SHOW (SW_API_MAX + 13) +#define SW_CMD_HOST_IPV6M_SHOW (SW_API_MAX + 14) +#define SW_CMD_CTRLPKT_SHOW (SW_API_MAX + 15) +#define SW_CMD_FLOW_IPV43T_SHOW (SW_API_MAX + 16) +#define SW_CMD_FLOW_IPV63T_SHOW (SW_API_MAX + 17) +#define SW_CMD_FLOW_IPV45T_SHOW (SW_API_MAX + 18) +#define SW_CMD_FLOW_IPV65T_SHOW (SW_API_MAX + 19) +#define SW_CMD_PT_VLAN_TRANS_ADV_SHOW (SW_API_MAX + 20) +#define SW_CMD_MAX (SW_API_MAX + 21) + +#define MAX_SUB_CMD_DES_NUM 120 + +#define SW_API_INVALID 0 + + struct sub_cmd_des_t + { + char *sub_name; + char *sub_act; + char *sub_memo; + char *sub_usage; + int sub_api; + sw_error_t (*sub_func) (); + }; + struct cmd_des_t + { + char *name; + char *memo; + struct sub_cmd_des_t sub_cmd_des[MAX_SUB_CMD_DES_NUM]; + }; + extern struct cmd_des_t gcmd_des[]; + +#define GCMD_DES gcmd_des + +#define GCMD_NAME(cmd_nr) GCMD_DES[cmd_nr].name +#define GCMD_MEMO(cmd_nr) GCMD_DES[cmd_nr].memo + +#define GCMD_SUB_NAME(cmd_nr, sub_cmd_nr) GCMD_DES[cmd_nr].sub_cmd_des[sub_cmd_nr].sub_name +#define GCMD_SUB_ACT(cmd_nr, sub_cmd_nr) GCMD_DES[cmd_nr].sub_cmd_des[sub_cmd_nr].sub_act +#define GCMD_SUB_MEMO(cmd_nr, sub_cmd_nr) GCMD_DES[cmd_nr].sub_cmd_des[sub_cmd_nr].sub_memo +#define GCMD_SUB_USAGE(cmd_nr, sub_cmd_nr) GCMD_DES[cmd_nr].sub_cmd_des[sub_cmd_nr].sub_usage +#define GCMD_SUB_API(cmd_nr, sub_cmd_nr) GCMD_DES[cmd_nr].sub_cmd_des[sub_cmd_nr].sub_api +#define GCMD_SUB_FUNC(cmd_nr, sub_cmd_nr) GCMD_DES[cmd_nr].sub_cmd_des[sub_cmd_nr].sub_func + +#define GCMD_DESC_VALID(cmd_nr) GCMD_NAME(cmd_nr) +#define GCMD_SUB_DESC_VALID(cmd_nr, sub_cmd_nr) GCMD_SUB_API(cmd_nr, sub_cmd_nr) + + +#define GCMD_DESC_NO_MATCH 0xffffffff + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _SHELL_CONFIG_H_ */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_io.h b/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_io.h new file mode 100755 index 000000000..2121db881 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_io.h @@ -0,0 +1,1039 @@ +/* + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/*qca808x_start*/ +#ifndef _SHELL_IO_H +#define _SHELL_IO_H + +#include "sw.h" +#include "sw_api.h" +#include "fal.h" +/*qca808x_end*/ +#include "ref_vlan.h" + +/*qca808x_start*/ +#define SW_TYPE_DEF(type, parser, show) {type, parser, show} +typedef struct +{ + sw_data_type_e data_type; + sw_error_t(*param_check) (); + void (*show_func) (); +} sw_data_type_t; + +void set_talk_mode(int mode); +int get_talk_mode(void); +void set_full_cmdstrp(char **cmdstrp); +int +get_jump(void); +sw_data_type_t * cmd_data_type_find(sw_data_type_e type); +void cmd_strtol(char *str, a_uint32_t * arg_val); + +sw_error_t __cmd_data_check_complex(char *info, char *defval, char *usage, + sw_error_t(*chk_func)(), void *arg_val, + a_uint32_t size); + +sw_error_t cmd_data_check_portid(char *cmdstr, fal_port_t * val, a_uint32_t size); + +sw_error_t cmd_data_check_portmap(char *cmdstr, fal_pbmp_t * val, a_uint32_t size); +sw_error_t cmd_data_check_confirm(char *cmdstr, a_bool_t def, a_bool_t * val, a_uint32_t size); + +sw_error_t cmd_data_check_uint64(char *cmd_str, a_uint64_t * arg_val, + a_uint32_t size); +sw_error_t cmd_data_check_uint32(char *cmd_str, a_uint32_t * arg_val, + a_uint32_t size); +sw_error_t cmd_data_check_uint16(char *cmd_str, a_uint32_t * arg_val, + a_uint32_t size); +sw_error_t cmd_data_check_uint8(char *cmd_str, a_uint32_t * arg_val, + a_uint32_t size); +sw_error_t cmd_data_check_enable(char *cmd_str, a_uint32_t * arg_val, + a_uint32_t size); +sw_error_t cmd_data_check_pbmp(char *cmd_str, a_uint32_t * arg_val, + a_uint32_t size); +sw_error_t cmd_data_check_duplex(char *cmd_str, a_uint32_t * arg_val, + a_uint32_t size); +sw_error_t cmd_data_check_speed(char *cmd_str, a_uint32_t * arg_val, + a_uint32_t size); +/*qca808x_end*/ +sw_error_t cmd_data_check_1qmode(char *cmd_str, a_uint32_t * arg_val, + a_uint32_t size); +sw_error_t cmd_data_check_egmode(char *cmd_str, a_uint32_t * arg_val, + a_uint32_t size); +/*qca808x_start*/ +sw_error_t cmd_data_check_capable(char *cmd_str, a_uint32_t * arg_val, + a_uint32_t size); +/*qca808x_end*/ +sw_error_t cmd_data_check_fdbentry(char *cmdstr, void *val, a_uint32_t size); +sw_error_t cmd_data_check_maclimit_ctrl(char *cmdstr, void *val, a_uint32_t size); +/*qca808x_start*/ +sw_error_t cmd_data_check_macaddr(char *cmdstr, void *val, a_uint32_t size); + +void cmd_data_print_uint64(a_uint8_t * param_name, a_uint64_t * buf, + a_uint32_t size); +void cmd_data_print_uint32(a_char_t * param_name, a_uint32_t * buf, + a_uint32_t size); +void cmd_data_print_uint16(a_char_t * param_name, a_uint32_t * buf, + a_uint32_t size); +void cmd_data_print_uint8(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +void cmd_data_print_enable(a_char_t * param_name, a_uint32_t * buf, + a_uint32_t size); +void cmd_data_print_pbmp(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +void cmd_data_print_duplex(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +void cmd_data_print_speed(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +/*qca808x_end*/ +sw_error_t cmd_data_check_vlan(char *cmdstr, fal_vlan_t * val, a_uint32_t size); +void cmd_data_print_vlan(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +sw_error_t cmd_data_check_lan_wan_cfg(char *cmd_str, void *arg_val, a_uint32_t size); + +void cmd_data_print_lan_wan_cfg(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void cmd_data_print_mib(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +void cmd_data_print_mib_cntr(a_uint8_t * param_name, a_uint64_t * buf, + a_uint32_t size); +void cmd_data_print_xgmib(a_uint8_t * param_name, a_uint64_t * buf, + a_uint64_t size); +void cmd_data_print_1qmode(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +void cmd_data_print_egmode(a_char_t * param_name, a_uint32_t * buf, + a_uint32_t size); +/*qca808x_start*/ +void cmd_data_print_capable(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +/*qca808x_end*/ +void cmd_data_print_maclimit_ctrl(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +/*qca808x_start*/ +void cmd_data_print_macaddr(a_char_t * param_name, a_uint32_t * buf, + a_uint32_t size); +/*qca808x_end*/ +sw_error_t cmd_data_check_qos_sch(char *cmdstr, fal_sch_mode_t * val, + a_uint32_t size); +void cmd_data_print_qos_sch(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +sw_error_t cmd_data_check_qos_pt(char *cmdstr, fal_qos_mode_t * val, + a_uint32_t size); +void cmd_data_print_qos_pt(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +sw_error_t cmd_data_check_storm(char *cmdstr, fal_storm_type_t * val, + a_uint32_t size); +void cmd_data_print_storm(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +sw_error_t cmd_data_check_stp_state(char *cmdstr, fal_stp_state_t * val, + a_uint32_t size); +void cmd_data_print_stp_state(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +sw_error_t cmd_data_check_leaky(char *cmdstr, fal_leaky_ctrl_mode_t * val, + a_uint32_t size); +void cmd_data_print_leaky(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); + +sw_error_t cmd_data_check_uinta(char *cmdstr, a_uint32_t * val, + a_uint32_t size); +void cmd_data_print_uinta(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); +sw_error_t cmd_data_check_maccmd(char *cmdstr, fal_fwd_cmd_t * val, + a_uint32_t size); +void cmd_data_print_maccmd(a_char_t * param_name, a_uint32_t * buf, + a_uint32_t size); +sw_error_t cmd_data_check_flowcmd(char *cmdstr, fal_default_flow_cmd_t * val, + a_uint32_t size); +void cmd_data_print_flowcmd(a_char_t *param_name, a_uint32_t * buf, + a_uint32_t size); +sw_error_t cmd_data_check_flowtype(char *cmdstr, fal_flow_type_t * val, + a_uint32_t size); +void cmd_data_print_flowtype(a_char_t *param_name, a_uint32_t * buf, + a_uint32_t size); +sw_error_t cmd_data_check_aclrule(char *info, void *val, a_uint32_t size); + +void cmd_data_print_aclrule(a_char_t * param_name, a_uint32_t * buf, + a_uint32_t size); + +sw_error_t +cmd_data_check_ledpattern(char *info, void * val, a_uint32_t size); + +void +cmd_data_print_ledpattern(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); + +sw_error_t +cmd_data_check_mirr_analy_cfg(char *info, void *val, a_uint32_t size); +void +cmd_data_print_mirr_analy_cfg(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_mirr_direction(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); +void +cmd_data_print_mirr_direction(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_invlan_mode(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); +void +cmd_data_print_invlan_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_vlan_propagation(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); +void +cmd_data_print_vlan_propagation(a_char_t * param_name, a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_vlan_translation(char *info, fal_vlan_trans_entry_t *val, a_uint32_t size); +void +cmd_data_print_vlan_translation(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_qinq_mode(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); +void +cmd_data_print_qinq_mode(a_char_t * param_name, a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_qinq_role(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); +void +cmd_data_print_qinq_role(a_char_t * param_name, a_uint32_t * buf, a_uint32_t size); +/*qca808x_start*/ +void +cmd_data_print_cable_status(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +void +cmd_data_print_cable_len(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +void +cmd_data_print_ssdk_cfg(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +/*qca808x_end*/ +sw_error_t +cmd_data_check_hdrmode(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +void +cmd_data_print_hdrmode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_fdboperation(char *cmd_str, void * val, a_uint32_t size); + +sw_error_t +cmd_data_check_pppoe(char *cmd_str, void * val, a_uint32_t size); + +sw_error_t +cmd_data_check_pppoe_less(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_pppoe(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_udf_type(char *cmdstr, fal_acl_udf_type_t * arg_val, a_uint32_t size); + +void +cmd_data_print_udf_type(a_char_t * param_name, a_uint32_t * buf, + a_uint32_t size); + +sw_error_t +cmd_data_check_udf_pkt_type(a_char_t *cmdstr, fal_acl_udf_pkt_type_t * arg_val, a_uint32_t size); + +void +cmd_data_print_udf_pkt_type(a_char_t * param_name, a_uint32_t * buf, + a_uint32_t size); + +sw_error_t +cmd_data_check_host_entry(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_host_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_arp_learn_mode(char *cmd_str, fal_arp_learn_mode_t * arg_val, + a_uint32_t size); + +void +cmd_data_print_arp_learn_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ip_guard_mode(char *cmd_str, fal_source_guard_mode_t * arg_val, a_uint32_t size); + +void +cmd_data_print_ip_guard_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_nat_entry(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_nat_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_napt_entry(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_napt_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_flow_entry(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_flow_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_napt_mode(char *cmd_str, fal_napt_mode_t * arg_val, a_uint32_t size); + +void +cmd_data_print_napt_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_intf_mac_entry(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_intf_mac_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ip4addr(char *cmdstr, void * val, a_uint32_t size); + +void +cmd_data_print_ip4addr(a_char_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ip6addr(char *cmdstr, void * val, a_uint32_t size); + +void +cmd_data_print_ip6addr(a_char_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_pub_addr_entry(char *cmd_str, void * val, a_uint32_t size); + + +void +cmd_data_print_pub_addr_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + + +sw_error_t +cmd_data_check_egress_shaper(char *cmd_str, void * val, a_uint32_t size); + + +void +cmd_data_print_egress_shaper(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + + +sw_error_t +cmd_data_check_acl_policer(char *cmd_str, void * val, a_uint32_t size); + + +void +cmd_data_print_acl_policer(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + + +sw_error_t +cmd_data_check_port_policer(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_port_policer(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_mac_config(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_mac_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_phy_config(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_phy_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void cmd_data_print_fdbentry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_fdb_smode(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +void +cmd_data_print_fdb_smode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_fdb_ctrl_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_fx100_config(char *cmd_str, void * arg_val, a_uint32_t size); + +void +cmd_data_print_fx100_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_multi(char *info, void *val, a_uint32_t size); +void +cmd_data_print_multi(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_sec_mac(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_sec_ip(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_sec_ip4(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_sec_ip6(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_sec_tcp(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_sec_udp(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_sec_icmp4(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_sec_icmp6(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_remark_entry(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_remark_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_default_route_entry(char *cmd_str, void * val, a_uint32_t size); + +sw_error_t +cmd_data_check_u_qmap(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_default_route_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_host_route_entry(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_host_route_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_arp_sg(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_arp_sg(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_intf(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_intf(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_flow_age(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_flow_age(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_flow_ctrl(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_flow_ctrl(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ac_static_thresh(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ac_static_thresh(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ac_dynamic_thresh(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ac_dynamic_thresh(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ac_group_buff(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ac_group_buff(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ac_ctrl(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ac_ctrl(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ac_obj(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ac_obj(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_vsi_intf(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_vsi_intf(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ip_pub(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ip_pub(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ip_mcmode(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ip_mcmode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ip_portmac(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ip_portmac(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ip_sg(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ip_sg(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_nexthop(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_nexthop(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_network_route(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_network_route(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ip_wcmp_entry(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ip_wcmp_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ip4_rfs_entry(char *cmd_str, void * val, a_uint32_t size); +sw_error_t +cmd_data_check_ip6_rfs_entry(char *cmd_str, void * val, a_uint32_t size); +sw_error_t +cmd_data_check_flow_age_entry(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_flow_age_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_flow_ctrl_entry(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_flow_ctrl_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_flow(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_flow(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_flow_host(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_flow_host(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ip_global(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ip_global(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_flow_global(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_flow_global(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_l3_parser(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_l3_parser(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_l4_parser(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_l4_parser(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_exp_ctrl(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_exp_ctrl(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_group(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_port_group(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_pri(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_port_pri(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_remark(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_port_remark(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_cosmap(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_cosmap(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_queue_scheduler(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_queue_scheduler(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ring_queue(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_ring_queue(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_bm_static_thresh(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_bm_static_thresh(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_queue_cnt(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_bm_dynamic_thresh(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_bm_dynamic_thresh(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_bm_port_counter(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_flow_cookie(char *cmd_str, void * val, a_uint32_t size); + +sw_error_t +cmd_data_check_fdb_rfs(char *cmd_str, void * val, a_uint32_t size); +sw_error_t +cmd_data_check_flow_rfs(char *cmd_str, void * val, a_uint32_t size); +/*qca808x_start*/ +sw_error_t +cmd_data_check_crossover_mode(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_crossover_status(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); +/*qca808x_end*/ +sw_error_t +cmd_data_check_prefer_medium(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_fiber_mode(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); +/*qca808x_start*/ +sw_error_t +cmd_data_check_interface_mode(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); +/*qca808x_end*/ +sw_error_t +cmd_data_check_port_eee_config(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_port_eee_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_src_filter_config(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +void +cmd_data_print_src_filter_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_switch_port_loopback_config(char *cmd_str, void * val, + a_uint32_t size); +void +cmd_data_print_switch_port_loopback_config(a_uint8_t * param_name, + a_uint32_t * buf, a_uint32_t size); +sw_error_t +cmd_data_check_newadr_lrn(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_newaddr_lrn_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_stamove(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_stamove_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_vsi_member(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_vsi_member_entry(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_vsi_counter(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_mtu_entry(char *cmd_str, void * val, a_uint32_t size); + +sw_error_t +cmd_data_check_mru_entry(char *cmd_str, void * val, a_uint32_t size); +/*qca808x_start*/ +void +cmd_data_print_crossover_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_crossover_status(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +/*qca808x_end*/ + +void +cmd_data_print_prefer_medium(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_fiber_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +/*qca808x_start*/ + +void +cmd_data_print_interface_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_counter_info(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_register_info(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_phy_register_info(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_debug_register_info(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); +/*qca808x_end*/ + +void +cmd_data_print_mtu_info(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_mru_info(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_global_qinqmode(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_global_qinqmode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_qinqmode(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_port_qinqmode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_tpid(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_tpid(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ingress_filter(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ingress_filter(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_default_vid_en(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_port_default_vid_en(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_vlan_tag(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_port_vlan_tag(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_vlan_direction(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +void +cmd_data_print_port_vlan_direction(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_vlan_translation_adv_rule(char *info, fal_vlan_trans_adv_rule_t *val, a_uint32_t size); + +void +cmd_data_print_port_vlan_translation_adv_rule(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_vlan_translation_adv_action(char *info, fal_vlan_trans_adv_action_t *val, a_uint32_t size); + +void +cmd_data_print_port_vlan_translation_adv_action(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_port_vlan_counter(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_tag_propagation(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_tag_propagation(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_egress_vsi_tag(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_egress_vsi_tag(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_egress_mode(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_egress_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ctrlpkt_profile(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ctrlpkt_profile(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_servcode_config(char *info, fal_servcode_config_t *val, a_uint32_t size); + +void +cmd_data_print_servcode_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_rss_hash_mode(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_rss_hash_config(char *info, fal_rss_hash_config_t *val, a_uint32_t size); + +void +cmd_data_print_rss_hash_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_policer_config(char *cmd_str, void * val, a_uint32_t size); + +sw_error_t +cmd_data_check_policer_cmd_config(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_port_policer_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_policer_cmd_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_acl_policer_config(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_acl_policer_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_policer_counter_infor(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_policer_global_counter_infor(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_port_scheduler_resource(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_port_shaper_token_config(char *cmd_str, void * val, a_uint32_t size); + +sw_error_t +cmd_data_check_shaper_token_config(char *cmd_str, void * val, a_uint32_t size); + +sw_error_t +cmd_data_check_port_shaper_config(char *cmd_str, void * val, a_uint32_t size); + +sw_error_t +cmd_data_check_shaper_config(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_port_shaper_token_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_shaper_token_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_port_shaper_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_shaper_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_module(char *cmd_str, a_uint32_t * arg_val, a_uint32_t size); + +void +cmd_data_print_module(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_func_ctrl(char *cmd_str, void * val, a_uint32_t size); + +void +cmd_data_print_func_ctrl(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +void +cmd_data_print_module_func_ctrl(a_uint32_t module, fal_func_ctrl_t *p); + +sw_error_t +cmd_data_check_debug_port_counter_status(char *info, fal_counter_en_t *val, a_uint32_t size); + +void +cmd_data_print_debug_port_counter_status(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_config(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_config(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_reference_clock(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_reference_clock(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_rx_timestamp_mode(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_rx_timestamp_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_direction(char *info, void *val, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_pkt_info(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_pkt_info(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_time(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_time(a_char_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_grandmaster_mode(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_grandmaster_mode(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_security(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_security(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_pps_sig_ctrl(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_pps_sig_ctrl(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_asym_correction(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_asym_correction(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_waveform(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_waveform(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_tod_uart(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_tod_uart(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_enhanced_timestamp_engine(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_enhanced_timestamp_engine(a_uint8_t * param_name, a_uint32_t * buf, + a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_trigger(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_trigger(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_capture(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_capture(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_ptp_interrupt(char *info, void *val, a_uint32_t size); + +void +cmd_data_print_ptp_interrupt(a_uint8_t * param_name, a_uint32_t * buf, a_uint32_t size); + +sw_error_t +cmd_data_check_sfp_ccode_type(char *cmdstr, fal_sfp_cc_type_t *arg_val, a_uint32_t size); + +sw_error_t +cmd_data_check_sfp_data(char *cmd_str, void *arg_val, a_uint32_t size); + +void +cmd_data_print_sfp_data(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_dev_type(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_transc_code(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_rate_encode(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_link_length(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_vendor_info(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_laser_wavelength(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_option(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_ctrl_rate(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_enhanced_cfg(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_diag_threshold(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_diag_cal_const(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_diag_realtime(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_ctrl_status(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); + +void +cmd_data_print_sfp_alarm_warn_flag(a_uint8_t *param_name, a_ulong_t *buf, a_uint32_t size); +/*qca808x_start*/ +#endif +/*qca808x_end*/ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_lib.h b/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_lib.h new file mode 100755 index 000000000..a6f824a97 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_lib.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SW_SHELL_LIB_H +#define _SW_SHELL_LIB_H + +#ifdef __cplusplus +extern "C" { +#endif + + int next_cmd(char *out_cmd); + ssize_t getline(char **lineptr, size_t *n, FILE *stream); + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + +#endif /* _SW_SHELL_LIB_H */ + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_sw.h b/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_sw.h new file mode 100755 index 000000000..6eaa9caef --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/include/shell/shell_sw.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2014, 2017-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/*qca808x_start*/ +#ifndef _SHELL_SW_H_ +#define _SHELL_SW_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include "sw.h" + + int get_devid(void); + int set_devid(int dev_id); + sw_error_t cmd_set_devid(a_ulong_t *arg_val); +/*qca808x_end*/ + sw_error_t cmd_show_fdb(a_ulong_t *arg_val); + sw_error_t cmd_show_vlan(a_ulong_t *arg_val); + sw_error_t cmd_show_resv_fdb(a_ulong_t *arg_val); + sw_error_t cmd_show_host(a_ulong_t *arg_val); + sw_error_t cmd_show_host_ipv4(a_ulong_t *arg_val); + sw_error_t cmd_show_host_ipv6(a_ulong_t *arg_val); + sw_error_t cmd_show_host_ipv4M(a_ulong_t *arg_val); + sw_error_t cmd_show_host_ipv6M(a_ulong_t *arg_val); + sw_error_t cmd_show_flow_ipv4_3tuple(a_ulong_t *arg_val); + sw_error_t cmd_show_flow_ipv4_5tuple(a_ulong_t *arg_val); + sw_error_t cmd_show_flow_ipv6_3tuple(a_ulong_t *arg_val); + sw_error_t cmd_show_flow_ipv6_5tuple(a_ulong_t *arg_val); + sw_error_t cmd_show_nat(a_ulong_t *arg_val); + sw_error_t cmd_show_napt(a_ulong_t *arg_val); + sw_error_t cmd_show_intfmac(a_ulong_t *arg_val); + sw_error_t cmd_show_pubaddr(a_ulong_t *arg_val); + sw_error_t cmd_show_flow(a_ulong_t *arg_val); + sw_error_t cmd_show_ctrlpkt(a_ulong_t *arg_val); + sw_error_t cmd_show_ptvlan_entry(a_ulong_t *arg_val); +/*qca808x_start*/ +#ifdef __cplusplus +} +#endif /* __cplusplus */ +/*qca808x_start*/ + +#endif /* _SHELL_SW_H_ */ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/make/components.mk b/feeds/ipq807x/qca-ssdk-shell/src/make/components.mk new file mode 100755 index 000000000..71f9fb86e --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/make/components.mk @@ -0,0 +1,36 @@ + +ifeq (linux, $(OS)) + ifeq (KSLIB, $(MODULE_TYPE)) + ifeq (TRUE, $(KERNEL_MODE)) + COMPONENTS = HSL SAL INIT UTIL REF + ifeq (TRUE, $(FAL)) + COMPONENTS += FAL + endif + else + COMPONENTS = HSL SAL INIT REF + endif + + ifeq (TRUE, $(UK_IF)) + COMPONENTS += API + endif + endif + + ifeq (USLIB, $(MODULE_TYPE)) + ifneq (TRUE, $(KERNEL_MODE)) + COMPONENTS = HSL SAL INIT UTIL REF + ifeq (TRUE, $(FAL)) + COMPONENTS += FAL + endif + else + COMPONENTS = UK_IF SAL REF + endif + + ifeq (TRUE, $(UK_IF)) + COMPONENTS += API + endif + endif + + ifeq (SHELL, $(MODULE_TYPE)) + COMPONENTS = SHELL + endif +endif diff --git a/feeds/ipq807x/qca-ssdk-shell/src/make/config.mk b/feeds/ipq807x/qca-ssdk-shell/src/make/config.mk new file mode 100755 index 000000000..027c9bba3 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/make/config.mk @@ -0,0 +1,91 @@ + +include $(PRJ_PATH)/config + +ifndef SYS_PATH + $(error SYS_PATH isn't defined!) +endif + +ifndef TOOL_PATH + $(error TOOL_PATH isn't defined!) +endif + +#define cpu type such as PPC MIPS ARM X86 +ifndef CPU + CPU=mips +endif + +#define os type such as linux netbsd vxworks +ifndef OS + OS=linux +endif + +ifndef OS_VER + OS_VER=2_6 +endif + +#support chip type such as ATHENA GARUDA +ifndef CHIP_TYPE + SUPPORT_CHIP = GARUDA +else + ifeq (GARUDA, $(CHIP_TYPE)) + SUPPORT_CHIP = GARUDA + endif + + ifeq (ATHENA, $(CHIP_TYPE)) + SUPPORT_CHIP = ATHENA + endif + + ifeq (SHIVA, $(CHIP_TYPE)) + SUPPORT_CHIP = SHIVA + endif + + ifeq (HORUS, $(CHIP_TYPE)) + SUPPORT_CHIP = HORUS + endif + + ifeq (ISIS, $(CHIP_TYPE)) + SUPPORT_CHIP = ISIS + endif + + ifeq (ISISC, $(CHIP_TYPE)) + SUPPORT_CHIP = ISISC + endif + + ifeq (ALL_CHIP, $(CHIP_TYPE)) + ifneq (TRUE, $(FAL)) + $(error FAL must be TRUE when CHIP_TYPE is defined as ALL_CHIP!) + endif + SUPPORT_CHIP = GARUDA SHIVA HORUS ISIS ISISC + endif + + ifndef SUPPORT_CHIP + $(error defined CHIP_TYPE isn't supported!) + endif +endif + +#define compile tool prefix +ifndef TOOLPREFIX + TOOLPREFIX=$(CPU)-$(OS)-uclibc- +endif + +DEBUG_ON=FALSE +OPT_FLAG= +LD_FLAG= + +SHELLOBJ=ssdk_sh +US_MOD=ssdk_us +KS_MOD=ssdk_ks + +ifeq (TRUE, $(KERNEL_MODE)) + RUNMODE=km +else + RUNMODE=um +endif + +BLD_DIR=$(PRJ_PATH)/build/$(OS) +BIN_DIR=$(PRJ_PATH)/build/bin + +VER=2.0.0 +BUILD_NUMBER=$(shell cat $(PRJ_PATH)/make/.build_number) +VERSION=$(VER) +BUILD_DATE=$(shell date -u +%F-%T) diff --git a/feeds/ipq807x/qca-ssdk-shell/src/make/defs.mk b/feeds/ipq807x/qca-ssdk-shell/src/make/defs.mk new file mode 100755 index 000000000..7d1c75bb9 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/make/defs.mk @@ -0,0 +1,28 @@ +DST_DIR=$(BLD_DIR)/$(MODULE_TYPE) + +SUB_DIR=$(patsubst %/, %, $(dir $(wildcard ./*/Makefile))) + +ifeq (,$(findstring $(LIB), $(COMPONENTS))) + SRC_LIST= +endif + +SRC_FILE=$(addprefix $(PRJ_PATH)/$(LOC_DIR)/, $(SRC_LIST)) + +OBJ_LIST=$(SRC_LIST:.c=.o) +OBJ_FILE=$(addprefix $(DST_DIR)/, $(OBJ_LIST)) + +DEP_LIST=$(SRC_LIST:.c=.d) +DEP_FILE=$(addprefix $(DST_DIR)/, $(DEP_LIST)) + +vpath %.c $(PRJ_PATH)/$(LOC_DIR) +vpath %.c $(PRJ_PATH)/app/nathelper/linux +vpath %.c $(PRJ_PATH)/app/nathelper/linux/lib +vpath %.o $(DST_DIR) +vpath %.d $(DST_DIR) + +DEP_LOOP=$(foreach i, $(SUB_DIR), $(MAKE) -C $(i) dep || exit 1;) +OBJ_LOOP=$(foreach i, $(SUB_DIR), $(MAKE) -C $(i) obj || exit 1;) +CLEAN_LOOP=$(foreach i, $(SUB_DIR), $(MAKE) -C $(i) clean;) +CLEAN_OBJ_LOOP=$(foreach i, $(SUB_DIR), $(MAKE) -C $(i) clean_o;) +CLEAN_DEP_LOOP=$(foreach i, $(SUB_DIR), $(MAKE) -C $(i) clean_d;) + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/make/linux_opt.mk b/feeds/ipq807x/qca-ssdk-shell/src/make/linux_opt.mk new file mode 100755 index 000000000..e9a98079b --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/make/linux_opt.mk @@ -0,0 +1,328 @@ +ifeq (TRUE, $(IN_ACL)) + MODULE_CFLAG += -DIN_ACL +endif + +ifeq (TRUE, $(IN_FDB)) + MODULE_CFLAG += -DIN_FDB +endif + +ifeq (TRUE, $(IN_IGMP)) + MODULE_CFLAG += -DIN_IGMP +endif + +ifeq (TRUE, $(IN_LEAKY)) + MODULE_CFLAG += -DIN_LEAKY +endif + +ifeq (TRUE, $(IN_LED)) + MODULE_CFLAG += -DIN_LED +endif + +ifeq (TRUE, $(IN_MIB)) + MODULE_CFLAG += -DIN_MIB +endif + +ifeq (TRUE, $(IN_MIRROR)) + MODULE_CFLAG += -DIN_MIRROR +endif + +ifeq (TRUE, $(IN_MISC)) + MODULE_CFLAG += -DIN_MISC +endif + +ifeq (TRUE, $(IN_PORTCONTROL)) + MODULE_CFLAG += -DIN_PORTCONTROL +endif + +ifeq (TRUE, $(IN_PORTVLAN)) + MODULE_CFLAG += -DIN_PORTVLAN +endif + +ifeq (TRUE, $(IN_QOS)) + MODULE_CFLAG += -DIN_QOS +endif + +ifeq (TRUE, $(IN_RATE)) + MODULE_CFLAG += -DIN_RATE +endif + +ifeq (TRUE, $(IN_STP)) + MODULE_CFLAG += -DIN_STP +endif + +ifeq (TRUE, $(IN_VLAN)) + MODULE_CFLAG += -DIN_VLAN +endif + +ifeq (TRUE, $(IN_REDUCED_ACL)) + MODULE_CFLAG += -DIN_REDUCED_ACL +endif + +ifeq (TRUE, $(IN_COSMAP)) + MODULE_CFLAG += -DIN_COSMAP +endif + +ifeq (TRUE, $(IN_IP)) + MODULE_CFLAG += -DIN_IP +endif + +ifeq (TRUE, $(IN_NAT)) + MODULE_CFLAG += -DIN_NAT +endif + +ifeq (TRUE, $(IN_TRUNK)) + MODULE_CFLAG += -DIN_TRUNK +endif + +ifeq (TRUE, $(IN_SEC)) + MODULE_CFLAG += -DIN_SEC +endif + +ifeq (TRUE, $(IN_QM)) + MODULE_CFLAG += -DIN_QM +endif + +ifeq (TRUE, $(IN_BM)) + MODULE_CFLAG += -DIN_BM +endif + +ifeq (TRUE, $(IN_FLOW)) + MODULE_CFLAG += -DIN_FLOW +endif + +ifeq (TRUE, $(IN_NAT_HELPER)) + MODULE_CFLAG += -DIN_NAT_HELPER +endif + +ifeq (TRUE, $(IN_INTERFACECONTROL)) + MODULE_CFLAG += -DIN_INTERFACECONTROL +endif + +ifeq (TRUE, $(IN_CTRLPKT)) + MODULE_CFLAG += -DIN_CTRLPKT +endif + +ifeq (TRUE, $(IN_SERVCODE)) + MODULE_CFLAG += -DIN_SERVCODE +endif + +ifeq (TRUE, $(IN_RSS_HASH)) + MODULE_CFLAG += -DIN_RSS_HASH +endif + +ifeq (TRUE, $(IN_MACBLOCK)) + MODULE_CFLAG += -DIN_MACBLOCK +endif + +ifeq (TRUE, $(IN_VSI)) + MODULE_CFLAG += -DIN_VSI +endif + +ifeq (TRUE, $(IN_POLICER)) + MODULE_CFLAG += -DIN_POLICER +endif + +ifeq (TRUE, $(IN_SHAPER)) + MODULE_CFLAG += -DIN_SHAPER +endif + +ifeq (TRUE, $(IN_PTP)) + MODULE_CFLAG += -DIN_PTP +endif + +ifeq (TRUE, $(IN_SFP)) + MODULE_CFLAG += -DIN_SFP +endif + +ifeq (TRUE, $(IN_PPPOE)) + MODULE_CFLAG += -DIN_PPPOE +endif + +ifneq (TRUE, $(FAL)) + MODULE_CFLAG += -DHSL_STANDALONG +endif + +ifeq (TRUE, $(UK_IF)) + MODULE_CFLAG += -DUK_IF +endif + +#ifdef UK_NL_PROT + MODULE_CFLAG += -DUK_NL_PROT=$(UK_NL_PROT) +#endif + +#ifdef UK_MINOR_DEV + MODULE_CFLAG += -DUK_MINOR_DEV=$(UK_MINOR_DEV) +#endif + +ifeq (TRUE, $(API_LOCK)) + MODULE_CFLAG += -DAPI_LOCK +endif + +ifeq (TRUE, $(REG_ACCESS_SPEEDUP)) + MODULE_CFLAG += -DREG_ACCESS_SPEEDUP +endif + +ifeq (TRUE, $(DEBUG_ON)) + MODULE_CFLAG += -g +endif + +MODULE_CFLAG += $(OPT_FLAG) -Wall -Werror -DVERSION=\"$(VERSION)\" -DBUILD_DATE=\"$(BUILD_DATE)\" -DCPU=\"$(CPU)\" -DOS=\"$(OS)\" + +MODULE_INC += -I$(PRJ_PATH)/include \ + -I$(PRJ_PATH)/include/common \ + -I$(PRJ_PATH)/include/api \ + -I$(PRJ_PATH)/include/fal \ + -I$(PRJ_PATH)/include/ref \ + -I$(PRJ_PATH)/include/hsl \ + -I$(PRJ_PATH)/include/hsl/phy \ + -I$(PRJ_PATH)/include/sal/os \ + -I$(PRJ_PATH)/include/sal/os/linux_user \ + -I$(PRJ_PATH)/include/sal/sd \ + -I$(PRJ_PATH)/include/sal/sd/linux/hydra_howl \ + -I$(PRJ_PATH)/include/sal/sd/linux/uk_interface \ + -I$(PRJ_PATH)/include/init + +ifneq (,$(findstring ATHENA, $(SUPPORT_CHIP))) + MODULE_INC += -I$(PRJ_PATH)/include/hsl/athena + MODULE_CFLAG += -DATHENA +endif + +ifneq (,$(findstring GARUDA, $(SUPPORT_CHIP))) + MODULE_INC += -I$(PRJ_PATH)/include/hsl/garuda + MODULE_CFLAG += -DGARUDA +endif + +ifneq (,$(findstring SHIVA, $(SUPPORT_CHIP))) + MODULE_INC += -I$(PRJ_PATH)/include/hsl/shiva + MODULE_CFLAG += -DSHIVA +endif + +ifneq (,$(findstring HORUS, $(SUPPORT_CHIP))) + MODULE_INC += -I$(PRJ_PATH)/include/hsl/horus + MODULE_CFLAG += -DHORUS +endif + +ifneq (,$(findstring ISIS, $(SUPPORT_CHIP))) + ifneq (ISISC, $(SUPPORT_CHIP)) + MODULE_INC += -I$(PRJ_PATH)/include/hsl/isis + MODULE_CFLAG += -DISIS + endif +endif + +ifneq (,$(findstring ISISC, $(SUPPORT_CHIP))) + MODULE_INC += -I$(PRJ_PATH)/include/hsl/isisc + MODULE_CFLAG += -DISISC +endif + +# check for GCC version +ifeq (4, $(GCC_VER)) + MODULE_CFLAG += -DGCCV4 +endif + +ifeq (KSLIB, $(MODULE_TYPE)) + + ifeq (3_4, $(OS_VER)) + MODULE_CFLAG += -DKVER34 + MODULE_CFLAG += -DKVER32 + MODULE_CFLAG += -DLNX26_22 + MODULE_INC += -I$(SYS_PATH) \ + -I$(SYS_PATH)/include \ + -I$(SYS_PATH)/source/include \ + -I$(SYS_PATH)/source/arch/arm/mach-msm/include \ + -I$(SYS_PATH)/source/arch/arm/include \ + -I$(SYS_PATH)/source/arch/arm/include/asm \ + -I$(SYS_PATH)/arch/arm/include/generated \ + -I$(SYS_PATH)/source/arch/arm/include/asm/mach \ + -I$(SYS_PATH)/usr/include + + endif + + ifeq (3_2, $(OS_VER)) + MODULE_CFLAG += -DKVER32 + MODULE_CFLAG += -DLNX26_22 + ifeq (mips, $(CPU)) + MODULE_INC += -I$(SYS_PATH) \ + -I$(SYS_PATH)/include \ + -I$(SYS_PATH)/arch/mips/include \ + -I$(SYS_PATH)/arch/mips/include/asm/mach-ar7240 \ + -I$(SYS_PATH)/arch/mips/include/asm/mach-generic \ + -I$(SYS_PATH)/arch/mips/include/asm/mach-ar7 \ + -I$(SYS_PATH)/usr/include + + #CPU_CFLAG = -G 0 -mno-abicalls -fno-pic -pipe -mabi=32 -march=mips32r2 + ifndef CPU_CFLAG + CPU_CFLAG = -Wstrict-prototypes -fomit-frame-pointer -G 0 -mno-abicalls -fno-strict-aliasing \ + -O2 -fno-pic -pipe -mabi=32 -march=mips32r2 -DMODULE -mlong-calls -DEXPORT_SYMTAB + endif + else + MODULE_INC += -I$(SYS_PATH) \ + -I$(SYS_PATH)/include \ + -I$(SYS_PATH)/arch/arm/include \ + -I$(SYS_PATH)/arch/arm/include/asm \ + -I$(SYS_PATH)/arch/arm/mach-fv16xx/include \ + -I$(SYS_PATH)/arch/arm/include/generated \ + -I$(SYS_PATH)/include/generated \ + -I$(SYS_PATH)/usr/include + endif + + + endif + + ifeq (2_6, $(OS_VER)) + MODULE_CFLAG += -DKVER26 + MODULE_CFLAG += -DLNX26_22 + ifeq (mips, $(CPU)) + MODULE_INC += -I$(SYS_PATH) \ + -I$(SYS_PATH)/include \ + -I$(SYS_PATH)/arch/mips/include \ + -I$(SYS_PATH)/arch/mips/include/asm/mach-ar7240 \ + -I$(SYS_PATH)/arch/mips/include/asm/mach-generic \ + -I$(SYS_PATH)/usr/include + + #CPU_CFLAG = -G 0 -mno-abicalls -fno-pic -pipe -mabi=32 -march=mips32r2 + ifndef CPU_CFLAG + CPU_CFLAG = -Wstrict-prototypes -fomit-frame-pointer -G 0 -mno-abicalls -fno-strict-aliasing \ + -O2 -fno-pic -pipe -mabi=32 -march=mips32r2 -DMODULE -mlong-calls -DEXPORT_SYMTAB + endif + else + MODULE_INC += -I$(SYS_PATH) \ + -I$(SYS_PATH)/include \ + -I$(SYS_PATH)/arch/arm/include \ + -I$(SYS_PATH)/arch/arm/include/asm \ + -I$(SYS_PATH)/arch/arm/mach-fv16xx/include \ + -I$(SYS_PATH)/arch/arm/include/generated \ + -I$(SYS_PATH)/include/generated \ + -I$(SYS_PATH)/usr/include + endif + + + endif + + MODULE_CFLAG += -D__KERNEL__ -DKERNEL_MODULE $(CPU_CFLAG) + + +endif + +ifeq (SHELL, $(MODULE_TYPE)) + MODULE_INC += -I$(PRJ_PATH)/include/shell + + ifeq (2_6, $(OS_VER)) + MODULE_CFLAG += -DKVER26 + else + MODULE_CFLAG += -DKVER24 + endif + + ifneq (TRUE, $(KERNEL_MODE)) + MODULE_CFLAG += -DUSER_MODE + endif + +endif + +ifneq (TRUE, $(KERNEL_MODE)) + ifneq (SHELL, $(MODULE_TYPE)) + MODULE_CFLAG += -DUSER_MODE + endif +endif + +EXTRA_CFLAGS += $(MODULE_INC) $(MODULE_CFLAG) -fpie +EXTRA_LDFLAGS += -pie diff --git a/feeds/ipq807x/qca-ssdk-shell/src/make/target.mk b/feeds/ipq807x/qca-ssdk-shell/src/make/target.mk new file mode 100755 index 000000000..ddb3bd12f --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/make/target.mk @@ -0,0 +1,49 @@ + +include $(PRJ_PATH)/make/$(OS)_opt.mk + +include $(PRJ_PATH)/make/tools.mk + +obj: $(OBJ_LIST) + $(OBJ_LOOP) + +dep: build_dir $(DEP_LIST) + $(DEP_LOOP) + +$(OBJ_LIST): %.o : %.c %.d + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -c $< -o $(DST_DIR)/$@ + +$(DEP_LIST) : %.d : %.c + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) -MM $< > $(DST_DIR)/$@.tmp + sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $(DST_DIR)/$@.tmp > $(DST_DIR)/$@ + $(RM) -f $(DST_DIR)/$@.tmp; + +build_dir: $(DST_DIR) + +$(DST_DIR): + $(MKDIR) -p $(DST_DIR) + +.PHONY: clean +clean: clean_o clean_d + $(CLEAN_LOOP) + +.PHONY: clean_o +clean_o: clean_obj + $(CLEAN_OBJ_LOOP) + +.PHONY: clean_d +clean_d: clean_dep + $(CLEAN_DEP_LOOP) + +clean_obj: +ifneq (,$(word 1, $(OBJ_FILE))) + $(RM) -f $(OBJ_FILE) +endif + +clean_dep: +ifneq (,$(word 1, $(DEP_FILE))) + $(RM) -f $(DEP_FILE) +endif + +ifneq (,$(word 1, $(DEP_FILE))) + sinclude $(DEP_FILE) +endif diff --git a/feeds/ipq807x/qca-ssdk-shell/src/make/tools.mk b/feeds/ipq807x/qca-ssdk-shell/src/make/tools.mk new file mode 100755 index 000000000..6ed387298 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/make/tools.mk @@ -0,0 +1,12 @@ + +ifeq (linux, $(OS)) + CC=$(TOOL_PATH)/$(TOOLPREFIX)gcc + AR=$(TOOL_PATH)/$(TOOLPREFIX)ar + LD=$(TOOL_PATH)/$(TOOLPREFIX)ld + STRIP=$(TOOL_PATH)/$(TOOLPREFIX)strip + MAKE=make -S + CP=cp + MKDIR=mkdir + RM=rm + PERL=perl +endif diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/api/Makefile b/feeds/ipq807x/qca-ssdk-shell/src/src/api/Makefile new file mode 100755 index 000000000..25c788a90 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/api/Makefile @@ -0,0 +1,12 @@ +LOC_DIR=src/sal +LIB=API + +include $(PRJ_PATH)/make/config.mk + +SRC_LIST=$(wildcard *.c) + +include $(PRJ_PATH)/make/components.mk +include $(PRJ_PATH)/make/defs.mk +include $(PRJ_PATH)/make/target.mk + +all: dep obj \ No newline at end of file diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/api/api_access.c b/feeds/ipq807x/qca-ssdk-shell/src/src/api/api_access.c new file mode 100755 index 000000000..3c7b334e5 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/api/api_access.c @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2014,2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/*qca808x_start*/ +#include "sw.h" +#include "fal.h" +/*qca808x_end*/ +#include "ref_vlan.h" +#if (defined(KERNEL_MODULE)) +#include "hsl.h" +#include "hsl_dev.h" +#if defined ATHENA +#include "fal_igmp.h" +#include "fal_leaky.h" +#include "athena_mib.h" +#include "athena_port_ctrl.h" +#include "athena_portvlan.h" +#include "athena_fdb.h" +#include "athena_vlan.h" +#include "athena_init.h" +#include "athena_reg_access.h" +#include "athena_reg.h" +#elif defined GARUDA +#include "garuda_mib.h" +#include "garuda_qos.h" +#include "garuda_rate.h" +#include "garuda_port_ctrl.h" +#include "garuda_portvlan.h" +#include "garuda_fdb.h" +#include "garuda_vlan.h" +#include "garuda_mirror.h" +#include "garuda_stp.h" +#include "garuda_misc.h" +#include "garuda_leaky.h" +#include "garuda_igmp.h" +#include "garuda_acl.h" +#include "garuda_led.h" +#include "garuda_init.h" +#include "garuda_reg_access.h" +#include "garuda_reg.h" +#elif defined SHIVA +#include "shiva_mib.h" +#include "shiva_qos.h" +#include "shiva_rate.h" +#include "shiva_port_ctrl.h" +#include "shiva_portvlan.h" +#include "shiva_fdb.h" +#include "shiva_vlan.h" +#include "shiva_mirror.h" +#include "shiva_stp.h" +#include "shiva_misc.h" +#include "shiva_leaky.h" +#include "shiva_igmp.h" +#include "shiva_acl.h" +#include "shiva_led.h" +#include "shiva_init.h" +#include "shiva_reg_access.h" +#include "shiva_reg.h" +#elif defined HORUS +#include "horus_mib.h" +#include "horus_qos.h" +#include "horus_rate.h" +#include "horus_port_ctrl.h" +#include "horus_portvlan.h" +#include "horus_fdb.h" +#include "horus_vlan.h" +#include "horus_mirror.h" +#include "horus_stp.h" +#include "horus_misc.h" +#include "horus_leaky.h" +#include "horus_igmp.h" +#include "horus_led.h" +#include "horus_init.h" +#include "horus_reg_access.h" +#include "horus_reg.h" +#elif defined ISIS +#include "isis_mib.h" +#include "isis_qos.h" +#include "isis_cosmap.h" +#include "isis_rate.h" +#include "isis_port_ctrl.h" +#include "isis_portvlan.h" +#include "isis_fdb.h" +#include "isis_vlan.h" +#include "isis_mirror.h" +#include "isis_stp.h" +#include "isis_misc.h" +#include "isis_leaky.h" +#include "isis_igmp.h" +#include "isis_acl.h" +#include "isis_led.h" +#include "isis_cosmap.h" +#include "isis_ip.h" +#include "isis_nat.h" +#include "isis_trunk.h" +#include "isis_sec.h" +#include "isis_interface_ctrl.h" +#include "isis_init.h" +#include "isis_reg_access.h" +#include "isis_reg.h" +#elif defined ISISC +#include "isisc_mib.h" +#include "isisc_qos.h" +#include "isisc_cosmap.h" +#include "isisc_rate.h" +#include "isisc_port_ctrl.h" +#include "isisc_portvlan.h" +#include "isisc_fdb.h" +#include "isisc_vlan.h" +#include "isisc_mirror.h" +#include "isisc_stp.h" +#include "isisc_misc.h" +#include "isisc_leaky.h" +#include "isisc_igmp.h" +#include "isisc_acl.h" +#include "isisc_led.h" +#include "isisc_cosmap.h" +#include "isisc_ip.h" +#include "isisc_nat.h" +#include "isisc_trunk.h" +#include "isisc_sec.h" +#include "isisc_interface_ctrl.h" +#include "isisc_init.h" +#include "isisc_reg_access.h" +#include "isisc_reg.h" +#endif +#endif +/*qca808x_start*/ +#include "sw_api.h" +#include "api_desc.h" +/*qca808x_end*/ +#if (((!defined(USER_MODE)) && defined(KERNEL_MODULE)) || (defined(USER_MODE) && (!defined(KERNEL_MODULE)))) +#ifdef HSL_STANDALONG +#if defined ATHENA +#include "athena_api.h" +#elif defined GARUDA +#include "garuda_api.h" +#elif defined SHIVA +#include "shiva_api.h" +#elif defined HORUS +#include "horus_api.h" +#elif defined ISIS +#include "isis_api.h" +#elif defined ISISC +#include "isisc_api.h" +#endif +#else +#include "ref_api.h" +#include "fal_api.h" +#endif +#elif (defined(USER_MODE)) +#if defined ATHENA +#include "athena_api.h" +#elif defined GARUDA +#include "garuda_api.h" +#elif defined SHIVA +#include "shiva_api.h" +#elif defined HORUS +#include "horus_api.h" +#elif defined ISIS +#include "isis_api.h" +#elif defined ISISC +#include "isisc_api.h" +#endif +#else +#include "ref_api.h" +/*qca808x_start*/ +#include "fal_api.h" +/*qca808x_end*/ +#endif +/*qca808x_start*/ +static sw_api_func_t sw_api_func[] = { +/*qca808x_end*/ + SSDK_REF_API +/*qca808x_start*/ + SSDK_API }; +static sw_api_param_t sw_api_param[] = { +/*qca808x_end*/ + SSDK_REF_PARAM +/*qca808x_start*/ + SSDK_PARAM }; + +sw_api_func_t * +sw_api_func_find(a_uint32_t api_id) +{ + a_uint32_t i = 0; + static a_uint32_t save = 0; + + if(api_id == sw_api_func[save].api_id) + return &sw_api_func[save]; + + do + { + if (api_id == sw_api_func[i].api_id) + { + save = i; + return &sw_api_func[i]; + } + + } + while (++i < (sizeof(sw_api_func)/sizeof(sw_api_func[0]))); + + return NULL; +} + +sw_api_param_t * +sw_api_param_find(a_uint32_t api_id) +{ + a_uint32_t i = 0; + static a_uint32_t save = 0; + + if(api_id == sw_api_param[save].api_id) + return &sw_api_param[save]; + + do + { + if (api_id == sw_api_param[i].api_id) + { + save = i; + return &sw_api_param[i]; + } + } + while (++i < (sizeof(sw_api_param)/sizeof(sw_api_param[0]))); + + return NULL; +} + +a_uint32_t +sw_api_param_nums(a_uint32_t api_id) +{ + a_uint32_t i = 0; + sw_api_param_t *p = NULL; + static sw_api_param_t *savep = NULL; + static a_uint32_t save = 0; + + p = sw_api_param_find(api_id); + if (!p) + { + return 0; + } + + if (p == savep) + { + return save; + } + + savep = p; + while (api_id == p->api_id) + { + p++; + i++; + } + + /*error*/ + if(i >= sizeof(sw_api_param)/sizeof(sw_api_param[0])) + { + savep = NULL; + save = 0; + return 0; + } + save = i; + + return i; +} + +sw_error_t +sw_api_get(sw_api_t *sw_api) +{ + if(!sw_api) + return SW_FAIL; + + if ((sw_api->api_fp = sw_api_func_find(sw_api->api_id)) == NULL) + return SW_NOT_SUPPORTED; + + if ((sw_api->api_pp = sw_api_param_find(sw_api->api_id)) == NULL) + return SW_NOT_SUPPORTED; + + if((sw_api->api_nr = sw_api_param_nums(sw_api->api_id)) == 0) + return SW_NOT_SUPPORTED; + + return SW_OK; +} +/*qca808x_end*/ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/Makefile b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/Makefile new file mode 100755 index 000000000..34612d390 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/Makefile @@ -0,0 +1,141 @@ +LOC_DIR=src/fal_uk +LIB=UK_IF + +include $(PRJ_PATH)/make/config.mk + +SRC_LIST=fal_init.c fal_uk_if.c fal_reg_access.c + +ifeq (TRUE, $(IN_ACL)) + SRC_LIST += fal_acl.c +endif + +ifeq (TRUE, $(IN_FDB)) + SRC_LIST += fal_fdb.c +endif + +ifeq (TRUE, $(IN_IGMP)) + SRC_LIST += fal_igmp.c +endif + +ifeq (TRUE, $(IN_LEAKY)) + SRC_LIST += fal_leaky.c +endif + +ifeq (TRUE, $(IN_LED)) + SRC_LIST += fal_led.c +endif + +ifeq (TRUE, $(IN_MIB)) + SRC_LIST += fal_mib.c +endif + +ifeq (TRUE, $(IN_MIRROR)) + SRC_LIST += fal_mirror.c +endif + +ifeq (TRUE, $(IN_MISC)) + SRC_LIST += fal_misc.c +endif + +ifeq (TRUE, $(IN_PORTCONTROL)) + SRC_LIST += fal_port_ctrl.c +endif + +ifeq (TRUE, $(IN_PORTVLAN)) + SRC_LIST += fal_portvlan.c +endif + +ifeq (TRUE, $(IN_QOS)) + SRC_LIST += fal_qos.c +endif + +ifeq (TRUE, $(IN_RATE)) + SRC_LIST += fal_rate.c +endif + +ifeq (TRUE, $(IN_STP)) + SRC_LIST += fal_stp.c +endif + +ifeq (TRUE, $(IN_VLAN)) + SRC_LIST += fal_vlan.c +endif + +ifeq (TRUE, $(IN_COSMAP)) + SRC_LIST += fal_cosmap.c +endif + +ifeq (TRUE, $(IN_IP)) + SRC_LIST += fal_ip.c +endif + +ifeq (TRUE, $(IN_NAT)) + SRC_LIST += fal_nat.c +endif + +ifeq (TRUE, $(IN_SEC)) + SRC_LIST += fal_sec.c +endif + +ifeq (TRUE, $(IN_TRUNK)) + SRC_LIST += fal_trunk.c +endif + +ifeq (TRUE, $(IN_INTERFACECONTROL)) + SRC_LIST += fal_interface_ctrl.c +endif + +ifeq (TRUE, $(IN_CTRLPKT)) + SRC_LIST += fal_ctrlpkt.c +endif + +ifeq (TRUE, $(IN_SERVCODE)) + SRC_LIST += fal_servcode.c +endif + +ifeq (TRUE, $(IN_RSS_HASH)) + SRC_LIST += fal_rss_hash.c +endif + +ifeq (TRUE, $(IN_VSI)) + SRC_LIST += fal_vsi.c +endif + +ifeq (TRUE, $(IN_QM)) + SRC_LIST += fal_qm.c +endif + + +ifeq (TRUE, $(IN_FLOW)) + SRC_LIST += fal_flow.c +endif + +ifeq (TRUE, $(IN_PPPOE)) + SRC_LIST += fal_pppoe.c +endif + +ifeq (TRUE, $(IN_BM)) + SRC_LIST += fal_bm.c +endif + +ifeq (TRUE, $(IN_SHAPER)) + SRC_LIST += fal_shaper.c +endif + +ifeq (TRUE, $(IN_POLICER)) + SRC_LIST += fal_policer.c +endif + +ifeq (TRUE, $(IN_PTP)) + SRC_LIST += fal_ptp.c +endif + +ifeq (TRUE, $(IN_SFP)) + SRC_LIST += fal_sfp.c +endif + +include $(PRJ_PATH)/make/components.mk +include $(PRJ_PATH)/make/defs.mk +include $(PRJ_PATH)/make/target.mk + +all: dep obj diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_acl.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_acl.c new file mode 100755 index 000000000..9f97bf300 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_acl.c @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#include "sw_ioctl.h" +#include "fal_acl.h" +#include "fal_uk_if.h" + +sw_error_t +fal_acl_list_creat(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t prio) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_LIST_CREAT, dev_id, list_id, prio); + return rv; +} + +sw_error_t +fal_acl_list_destroy(a_uint32_t dev_id, a_uint32_t list_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_LIST_DESTROY, dev_id, list_id); + return rv; +} + +sw_error_t +fal_acl_rule_add(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, + a_uint32_t rule_nr, fal_acl_rule_t * rule) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_RULE_ADD, dev_id, list_id, rule_id, + rule_nr, rule); + return rv; +} + +sw_error_t +fal_acl_rule_delete(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, + a_uint32_t rule_nr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_RULE_DELETE, dev_id, list_id, rule_id, rule_nr); + return rv; +} + +sw_error_t +fal_acl_rule_query(a_uint32_t dev_id, a_uint32_t list_id, a_uint32_t rule_id, + fal_acl_rule_t * rule) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_RULE_QUERY, dev_id, list_id, rule_id, rule); + return rv; +} + +sw_error_t +fal_acl_list_bind(a_uint32_t dev_id, a_uint32_t list_id, + fal_acl_direc_t direc, fal_acl_bind_obj_t obj_t, + a_uint32_t obj_idx) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_LIST_BIND, dev_id, list_id, direc, obj_t, obj_idx); + return rv; +} + +sw_error_t +fal_acl_list_unbind(a_uint32_t dev_id, a_uint32_t list_id, + fal_acl_direc_t direc, fal_acl_bind_obj_t obj_t, + a_uint32_t obj_idx) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_LIST_UNBIND, dev_id, list_id, direc, obj_t, obj_idx); + return rv; +} + +sw_error_t +fal_acl_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_STATUS_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_acl_status_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_STATUS_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_acl_list_dump(a_uint32_t dev_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_LIST_DUMP, dev_id); + return rv; +} + +sw_error_t +fal_acl_rule_dump(a_uint32_t dev_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_RULE_DUMP, dev_id); + return rv; +} + +sw_error_t +fal_acl_port_udf_profile_set(a_uint32_t dev_id, fal_port_t port_id, + fal_acl_udf_type_t udf_type, a_uint32_t offset, a_uint32_t length) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_PT_UDF_PROFILE_SET, dev_id, port_id, udf_type, offset, length); + return rv; +} + +sw_error_t +fal_acl_port_udf_profile_get(a_uint32_t dev_id, fal_port_t port_id, + fal_acl_udf_type_t udf_type, a_uint32_t * offset, a_uint32_t * length) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_PT_UDF_PROFILE_GET, dev_id, port_id, + udf_type, offset, length); + return rv; +} + +sw_error_t +fal_acl_rule_active(a_uint32_t dev_id, a_uint32_t list_id, + a_uint32_t rule_id, a_uint32_t rule_nr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_RULE_ACTIVE, dev_id, list_id, rule_id, rule_nr); + return rv; +} + +sw_error_t +fal_acl_rule_deactive(a_uint32_t dev_id, a_uint32_t list_id, + a_uint32_t rule_id, a_uint32_t rule_nr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_RULE_DEACTIVE, dev_id, list_id, rule_id, rule_nr); + return rv; +} + +sw_error_t +fal_acl_rule_src_filter_sts_set(a_uint32_t dev_id, + a_uint32_t rule_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_RULE_SRC_FILTER_STS_SET, dev_id, rule_id, enable); + return rv; +} + +sw_error_t +fal_acl_rule_src_filter_sts_get(a_uint32_t dev_id, + a_uint32_t rule_id, a_bool_t* enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_RULE_SRC_FILTER_STS_GET, dev_id, rule_id, enable); + return rv; +} + +sw_error_t +fal_acl_udf_profile_set(a_uint32_t dev_id, fal_acl_udf_pkt_type_t pkt_type, + a_uint32_t udf_idx, fal_acl_udf_type_t udf_type, a_uint32_t offset) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_UDF_SET, dev_id, pkt_type, udf_idx, udf_type, offset); + return rv; +} + +sw_error_t +fal_acl_udf_profile_get(a_uint32_t dev_id, fal_acl_udf_pkt_type_t pkt_type, + a_uint32_t udf_idx, fal_acl_udf_type_t *udf_type, a_uint32_t *offset) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ACL_UDF_GET, dev_id, pkt_type, udf_idx, udf_type, offset); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_bm.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_bm.c new file mode 100755 index 000000000..c822a867b --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_bm.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_bm.h" +#include "fal_uk_if.h" + +sw_error_t +fal_port_bufgroup_map_get(a_uint32_t dev_id, fal_port_t port, + a_uint8_t *group) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_PORTGROUP_MAP_GET, dev_id, port, group); + return rv; +} + +sw_error_t +fal_port_bufgroup_map_set(a_uint32_t dev_id, fal_port_t port, + a_uint8_t group) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_PORTGROUP_MAP_SET, dev_id, port, group); + return rv; +} + +sw_error_t +fal_bm_port_reserved_buffer_get(a_uint32_t dev_id, fal_port_t port, + a_uint16_t *prealloc_buff, a_uint16_t *react_buff) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_PORT_RSVBUFFER_GET, dev_id, port, + prealloc_buff, react_buff); + return rv; +} + +sw_error_t +fal_bm_port_reserved_buffer_set(a_uint32_t dev_id, fal_port_t port, + a_uint16_t prealloc_buff, a_uint16_t react_buff) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_PORT_RSVBUFFER_SET, dev_id, port, + prealloc_buff, react_buff); + return rv; +} + +sw_error_t +fal_bm_bufgroup_buffer_get(a_uint32_t dev_id, a_uint8_t group, + a_uint16_t *buff_num) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_GROUP_BUFFER_GET, dev_id, group, + buff_num); + return rv; +} + +sw_error_t +fal_bm_bufgroup_buffer_set(a_uint32_t dev_id, a_uint8_t group, + a_uint16_t buff_num) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_GROUP_BUFFER_SET, dev_id, group, + buff_num); + return rv; +} + +sw_error_t +fal_bm_port_dynamic_thresh_get(a_uint32_t dev_id, fal_port_t port, + fal_bm_dynamic_cfg_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_DYNAMIC_THRESH_GET, dev_id, port, + cfg); + return rv; +} + +sw_error_t +fal_bm_port_dynamic_thresh_set(a_uint32_t dev_id, fal_port_t port, + fal_bm_dynamic_cfg_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_DYNAMIC_THRESH_SET, dev_id, port, + cfg); + return rv; +} + +sw_error_t +fal_port_bm_ctrl_get(a_uint32_t dev_id, fal_port_t port, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_CTRL_GET, dev_id, port, + enable); + return rv; +} + +sw_error_t +fal_port_bm_ctrl_set(a_uint32_t dev_id, fal_port_t port, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_CTRL_SET, dev_id, port, + enable); + return rv; +} + +sw_error_t +fal_bm_port_static_thresh_get(a_uint32_t dev_id, fal_port_t port, + fal_bm_static_cfg_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_STATIC_THRESH_GET, dev_id, port, + cfg); + return rv; +} + +sw_error_t +fal_bm_port_static_thresh_set(a_uint32_t dev_id, fal_port_t port, + fal_bm_static_cfg_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_STATIC_THRESH_SET, dev_id, port, + cfg); + return rv; +} + +sw_error_t +fal_bm_port_counter_get(a_uint32_t dev_id, fal_port_t port, + fal_bm_port_counter_t *counter) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BM_PORT_COUNTER_GET, dev_id, port, + counter); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_cosmap.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_cosmap.c new file mode 100755 index 000000000..9b086884a --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_cosmap.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_cosmap.h" +#include "fal_uk_if.h" + + +sw_error_t +fal_cosmap_dscp_to_pri_set(a_uint32_t dev_id, a_uint32_t dscp, a_uint32_t pri) +{ + sw_error_t rv; + + rv= sw_uk_exec(SW_API_COSMAP_DSCP_TO_PRI_SET, dev_id, dscp, pri); + return rv; +} + +sw_error_t +fal_cosmap_dscp_to_pri_get(a_uint32_t dev_id, a_uint32_t dscp, a_uint32_t * pri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_DSCP_TO_PRI_GET, dev_id, dscp, pri); + return rv; +} + +sw_error_t +fal_cosmap_dscp_to_dp_set(a_uint32_t dev_id, a_uint32_t dscp, a_uint32_t dp) +{ + sw_error_t rv; + + rv= sw_uk_exec(SW_API_COSMAP_DSCP_TO_DP_SET, dev_id, dscp, dp); + return rv; +} + +sw_error_t +fal_cosmap_dscp_to_dp_get(a_uint32_t dev_id, a_uint32_t dscp, a_uint32_t * dp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_DSCP_TO_DP_GET, dev_id, dscp, dp); + return rv; +} + +sw_error_t +fal_cosmap_up_to_pri_set(a_uint32_t dev_id, a_uint32_t up, a_uint32_t pri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_UP_TO_PRI_SET, dev_id, up, pri); + return rv; +} + +sw_error_t +fal_cosmap_up_to_pri_get(a_uint32_t dev_id, a_uint32_t up, a_uint32_t * pri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_UP_TO_PRI_GET, dev_id, up, pri); + return rv; +} + +sw_error_t +fal_cosmap_up_to_dp_set(a_uint32_t dev_id, a_uint32_t up, a_uint32_t dp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_UP_TO_DP_SET, dev_id, up, dp); + return rv; +} + +sw_error_t +fal_cosmap_up_to_dp_get(a_uint32_t dev_id, a_uint32_t up, a_uint32_t * dp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_UP_TO_DP_GET, dev_id, up, dp); + return rv; +} + +sw_error_t +fal_cosmap_pri_to_queue_set(a_uint32_t dev_id, a_uint32_t pri, + a_uint32_t queue) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_PRI_TO_QU_SET, dev_id, pri, queue); + return rv; +} + +sw_error_t +fal_cosmap_pri_to_queue_get(a_uint32_t dev_id, a_uint32_t pri, + a_uint32_t * queue) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_PRI_TO_QU_GET, dev_id, pri, queue); + return rv; +} + +sw_error_t +fal_cosmap_pri_to_ehqueue_set(a_uint32_t dev_id, a_uint32_t pri, + a_uint32_t queue) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_PRI_TO_EHQU_SET, dev_id, pri, queue); + return rv; +} + +sw_error_t +fal_cosmap_pri_to_ehqueue_get(a_uint32_t dev_id, a_uint32_t pri, + a_uint32_t * queue) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_PRI_TO_EHQU_GET, dev_id, pri, queue); + return rv; +} + +sw_error_t +fal_cosmap_egress_remark_set(a_uint32_t dev_id, a_uint32_t tbl_id, + fal_egress_remark_table_t * tbl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_EG_REMARK_SET, dev_id, tbl_id, tbl); + return rv; +} + +sw_error_t +fal_cosmap_egress_remark_get(a_uint32_t dev_id, a_uint32_t tbl_id, + fal_egress_remark_table_t * tbl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_EG_REMARK_GET, dev_id, tbl_id, tbl); + return rv; +} + +sw_error_t +fal_cosmap_dscp_to_ehpri_set(a_uint32_t dev_id, a_uint32_t dscp, a_uint32_t pri) +{ + sw_error_t rv; + + rv= sw_uk_exec(SW_API_COSMAP_DSCP_TO_EHPRI_SET, dev_id, dscp, pri); + return rv; +} + +sw_error_t +fal_cosmap_dscp_to_ehpri_get(a_uint32_t dev_id, a_uint32_t dscp, a_uint32_t * pri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_DSCP_TO_EHPRI_GET, dev_id, dscp, pri); + return rv; +} + +sw_error_t +fal_cosmap_dscp_to_ehdp_set(a_uint32_t dev_id, a_uint32_t dscp, a_uint32_t dp) +{ + sw_error_t rv; + + rv= sw_uk_exec(SW_API_COSMAP_DSCP_TO_EHDP_SET, dev_id, dscp, dp); + return rv; +} + +sw_error_t +fal_cosmap_dscp_to_ehdp_get(a_uint32_t dev_id, a_uint32_t dscp, a_uint32_t * dp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_DSCP_TO_EHDP_GET, dev_id, dscp, dp); + return rv; +} + +sw_error_t +fal_cosmap_up_to_ehpri_set(a_uint32_t dev_id, a_uint32_t up, a_uint32_t pri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_UP_TO_EHPRI_SET, dev_id, up, pri); + return rv; +} + +sw_error_t +fal_cosmap_up_to_ehpri_get(a_uint32_t dev_id, a_uint32_t up, a_uint32_t * pri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_UP_TO_EHPRI_GET, dev_id, up, pri); + return rv; +} + +sw_error_t +fal_cosmap_up_to_ehdp_set(a_uint32_t dev_id, a_uint32_t up, a_uint32_t dp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_UP_TO_EHDP_SET, dev_id, up, dp); + return rv; +} + +sw_error_t +fal_cosmap_up_to_ehdp_get(a_uint32_t dev_id, a_uint32_t up, a_uint32_t * dp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_UP_TO_EHDP_GET, dev_id, up, dp); + return rv; +} \ No newline at end of file diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ctrlpkt.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ctrlpkt.c new file mode 100755 index 000000000..af661c9fe --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ctrlpkt.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_ctrlpkt.h" +#include "fal_uk_if.h" + +sw_error_t +fal_mgmtctrl_ethtype_profile_set(a_uint32_t dev_id, a_uint32_t profile_id, a_uint32_t ethtype) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MGMTCTRL_ETHTYPE_PROFILE_SET, dev_id, profile_id, + (a_uint32_t) ethtype); + return rv; +} + +sw_error_t +fal_mgmtctrl_ethtype_profile_get(a_uint32_t dev_id, a_uint32_t profile_id, a_uint32_t * ethtype) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MGMTCTRL_ETHTYPE_PROFILE_GET, dev_id, profile_id, + ethtype); + return rv; +} + +sw_error_t +fal_mgmtctrl_rfdb_profile_set(a_uint32_t dev_id, a_uint32_t profile_id, fal_mac_addr_t *addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MGMTCTRL_RFDB_PROFILE_SET, dev_id, profile_id, + addr); + return rv; +} + +sw_error_t +fal_mgmtctrl_rfdb_profile_get(a_uint32_t dev_id, a_uint32_t profile_id, fal_mac_addr_t *addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MGMTCTRL_RFDB_PROFILE_GET, dev_id, profile_id, + addr); + return rv; +} + +sw_error_t +fal_mgmtctrl_ctrlpkt_profile_add(a_uint32_t dev_id, fal_ctrlpkt_profile_t *ctrlpkt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MGMTCTRL_CTRLPKT_PROFILE_ADD, dev_id, ctrlpkt); + return rv; +} + +sw_error_t +fal_mgmtctrl_ctrlpkt_profile_del(a_uint32_t dev_id, fal_ctrlpkt_profile_t *ctrlpkt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MGMTCTRL_CTRLPKT_PROFILE_DEL, dev_id, ctrlpkt); + return rv; +} + +sw_error_t +fal_mgmtctrl_ctrlpkt_profile_getfirst(a_uint32_t dev_id, fal_ctrlpkt_profile_t *ctrlpkt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MGMTCTRL_CTRLPKT_PROFILE_GETFIRST, dev_id, ctrlpkt); + return rv; +} + +sw_error_t +fal_mgmtctrl_ctrlpkt_profile_getnext(a_uint32_t dev_id, fal_ctrlpkt_profile_t *ctrlpkt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MGMTCTRL_CTRLPKT_PROFILE_GETNEXT, dev_id, ctrlpkt); + return rv; +} diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_fdb.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_fdb.c new file mode 100755 index 000000000..1f62e278b --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_fdb.c @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_fdb.h" +#include "fal_uk_if.h" + +sw_error_t +fal_fdb_entry_add(a_uint32_t dev_id, const fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_ADD, dev_id, entry); + return rv; +} + +sw_error_t +fal_fdb_entry_flush(a_uint32_t dev_id, a_uint32_t flag) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_DELALL, dev_id, flag); + return rv; +} + +sw_error_t +fal_fdb_entry_del_byport(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t flag) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_DELPORT, dev_id, port_id, flag); + return rv; +} + +sw_error_t +fal_fdb_entry_del_bymac(a_uint32_t dev_id, const fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_DELMAC, dev_id, entry); + return rv; +} + +sw_error_t +fal_fdb_entry_getfirst(a_uint32_t dev_id, fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_FIRST, dev_id, entry); + return rv; +} + +sw_error_t +fal_fdb_entry_getnext(a_uint32_t dev_id, fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_NEXT, dev_id, entry); + return rv; +} + +sw_error_t +fal_fdb_entry_search(a_uint32_t dev_id, fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_FIND, dev_id, entry); + return rv; +} + +sw_error_t +fal_fdb_port_learn_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PT_LEARN_SET, dev_id, port_id, + (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_fdb_port_learn_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PT_LEARN_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_fdb_port_learning_ctrl_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable, fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PT_NEWADDR_LEARN_SET, dev_id, port_id, + enable, cmd); + return rv; +} + +sw_error_t +fal_fdb_port_learning_ctrl_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable, fal_fwd_cmd_t *cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PT_NEWADDR_LEARN_GET, dev_id, port_id, + enable, cmd); + return rv; +} + +sw_error_t +fal_fdb_port_stamove_ctrl_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable, fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PT_STAMOVE_SET, dev_id, port_id, + enable, cmd); + return rv; +} + +sw_error_t +fal_fdb_port_stamove_ctrl_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable, fal_fwd_cmd_t *cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PT_STAMOVE_GET, dev_id, port_id, + enable, cmd); + return rv; +} + +sw_error_t +fal_fdb_aging_ctrl_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_AGE_CTRL_SET, dev_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_fdb_aging_ctrl_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_AGE_CTRL_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_fdb_learning_ctrl_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_LEARN_CTRL_SET, dev_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_fdb_learning_ctrl_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_LEARN_CTRL_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_fdb_vlan_ivl_svl_set(a_uint32_t dev_id, fal_fdb_smode smode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_VLAN_IVL_SVL_SET, dev_id, (a_uint32_t) smode); + return rv; +} + +sw_error_t +fal_fdb_vlan_ivl_svl_get(a_uint32_t dev_id, fal_fdb_smode* smode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_VLAN_IVL_SVL_GET, dev_id, smode); + return rv; +} + +sw_error_t +fal_fdb_aging_time_set(a_uint32_t dev_id, a_uint32_t * time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_AGE_TIME_SET, dev_id, time); + return rv; +} + +sw_error_t +fal_fdb_aging_time_get(a_uint32_t dev_id, a_uint32_t * time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_AGE_TIME_GET, dev_id, time); + return rv; +} + +sw_error_t +fal_fdb_entry_getnext_byindex(a_uint32_t dev_id, a_uint32_t * iterator, fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_ITERATE, dev_id, iterator, entry); + return rv; +} + +sw_error_t +fal_fdb_entry_extend_getnext(a_uint32_t dev_id, fal_fdb_op_t * option, + fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_EXTEND_NEXT, dev_id, option, entry); + return rv; +} + +sw_error_t +fal_fdb_entry_extend_getfirst(a_uint32_t dev_id, fal_fdb_op_t * option, + fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_EXTEND_FIRST, dev_id, option, entry); + return rv; +} + +sw_error_t +fal_fdb_entry_update_byport(a_uint32_t dev_id, fal_port_t old_port, fal_port_t new_port, + a_uint32_t fid, fal_fdb_op_t * option) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_TRANSFER, dev_id, old_port, new_port, fid, option); + return rv; +} + +sw_error_t +fal_fdb_port_learned_mac_counter_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * cnt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FDB_LEARN_COUNTER_GET, dev_id, port_id, cnt); + return rv; +} + +sw_error_t +fal_port_fdb_learn_limit_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable, a_uint32_t cnt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FDB_LEARN_LIMIT_SET, dev_id, port_id, enable, cnt); + return rv; +} + +sw_error_t +fal_port_fdb_learn_limit_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable, a_uint32_t * cnt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FDB_LEARN_LIMIT_GET, dev_id, port_id, enable, cnt); + return rv; +} + +sw_error_t +fal_port_fdb_learn_exceed_cmd_set(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FDB_LEARN_EXCEED_CMD_SET, dev_id, port_id, (a_uint32_t)cmd); + return rv; +} + +sw_error_t +fal_port_fdb_learn_exceed_cmd_get(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FDB_LEARN_EXCEED_CMD_GET, dev_id, port_id, cmd); + return rv; +} + +sw_error_t +fal_fdb_learn_limit_set(a_uint32_t dev_id, a_bool_t enable, a_uint32_t cnt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_LEARN_LIMIT_SET, dev_id, enable, cnt); + return rv; +} + +sw_error_t +fal_fdb_learn_limit_get(a_uint32_t dev_id, a_bool_t * enable, a_uint32_t * cnt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_LEARN_LIMIT_GET, dev_id, enable, cnt); + return rv; +} + +sw_error_t +fal_fdb_learn_exceed_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_LEARN_EXCEED_CMD_SET, dev_id, (a_uint32_t)cmd); + return rv; +} + +sw_error_t +fal_fdb_learn_exceed_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_LEARN_EXCEED_CMD_GET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_fdb_resv_add(a_uint32_t dev_id, fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_RESV_ADD, dev_id, entry); + return rv; +} + +sw_error_t +fal_fdb_resv_del(a_uint32_t dev_id, fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_RESV_DEL, dev_id, entry); + return rv; +} + +sw_error_t +fal_fdb_resv_find(a_uint32_t dev_id, fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_RESV_FIND, dev_id, entry); + return rv; +} + +sw_error_t +fal_fdb_resv_iterate(a_uint32_t dev_id, a_uint32_t * iterator, fal_fdb_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_RESV_ITERATE, dev_id, iterator, entry); + return rv; +} + +sw_error_t +fal_fdb_port_learn_static_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PT_LEARN_STATIC_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_fdb_port_learn_static_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PT_LEARN_STATIC_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_fdb_port_add(a_uint32_t dev_id, a_uint32_t fid, fal_mac_addr_t * addr, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PORT_ADD, dev_id, fid, addr, port_id); + return rv; +} + +sw_error_t +fal_fdb_port_del(a_uint32_t dev_id, a_uint32_t fid, fal_mac_addr_t * addr, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PORT_DEL, dev_id, fid, addr, port_id); + return rv; +} + +sw_error_t +fal_fdb_rfs_set(a_uint32_t dev_id, fal_fdb_rfs_t *rfs) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_RFS_SET, dev_id, rfs); + return rv; +} + +sw_error_t +fal_fdb_rfs_del(a_uint32_t dev_id, fal_fdb_rfs_t *rfs) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_RFS_DEL, dev_id, rfs); + return rv; +} + +sw_error_t +fal_fdb_port_maclimit_ctrl_set(a_uint32_t dev_id, fal_port_t port_id, fal_maclimit_ctrl_t * maclimit_ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PT_MACLIMIT_CTRL_SET, dev_id, port_id, maclimit_ctrl); + return rv; +} + +sw_error_t +fal_fdb_port_maclimit_ctrl_get(a_uint32_t dev_id, fal_port_t port_id, fal_maclimit_ctrl_t * maclimit_ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_PT_MACLIMIT_CTRL_GET, dev_id, port_id, maclimit_ctrl); + return rv; +} + +sw_error_t +fal_fdb_entry_del_byfid(a_uint32_t dev_id, a_uint16_t fid, a_uint32_t flag) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FDB_DEL_BY_FID, dev_id, fid, flag); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_flow.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_flow.c new file mode 100755 index 000000000..8276c0eae --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_flow.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_flow.h" +#include "fal_uk_if.h" + + +sw_error_t +fal_flow_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_STATUS_SET, dev_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_flow_status_get(a_uint32_t dev_id, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_STATUS_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_flow_age_timer_set(a_uint32_t dev_id, fal_flow_age_timer_t *age_timer) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_AGE_TIMER_SET, dev_id, age_timer); + return rv; +} + +sw_error_t +fal_flow_age_timer_get(a_uint32_t dev_id, fal_flow_age_timer_t *age_timer) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_AGE_TIMER_GET, dev_id, age_timer); + return rv; +} + +sw_error_t +fal_flow_mgmt_set( + a_uint32_t dev_id, + fal_flow_pkt_type_t type, + fal_flow_direction_t dir, + fal_flow_mgmt_t *mgmt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_CTRL_SET, dev_id, type, dir, mgmt); + return rv; +} + +sw_error_t +fal_flow_mgmt_get( + a_uint32_t dev_id, + fal_flow_pkt_type_t type, + fal_flow_direction_t dir, + fal_flow_mgmt_t *mgmt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_CTRL_GET, dev_id, type, dir, mgmt); + return rv; +} + +sw_error_t +fal_flow_entry_add( + a_uint32_t dev_id, + a_uint32_t add_mode, /*index or hash*/ + fal_flow_entry_t *flow_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_ENTRY_ADD, dev_id, add_mode, flow_entry); + return rv; +} + +sw_error_t +fal_flow_entry_del( + a_uint32_t dev_id, + a_uint32_t del_mode, + fal_flow_entry_t *flow_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_ENTRY_DEL, dev_id, del_mode, flow_entry); + return rv; +} + +sw_error_t +fal_flow_entry_next( + a_uint32_t dev_id, + a_uint32_t next_mode, + fal_flow_entry_t *flow_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOWENTRY_NEXT, dev_id, next_mode, flow_entry); + return rv; +} + +sw_error_t +fal_flow_entry_get( + a_uint32_t dev_id, + a_uint32_t get_mode, + fal_flow_entry_t *flow_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_ENTRY_GET, dev_id, get_mode, flow_entry); + return rv; +} + +sw_error_t +fal_flow_global_cfg_get( + a_uint32_t dev_id, + fal_flow_global_cfg_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_GLOBAL_CFG_GET, dev_id, cfg); + return rv; +} + +sw_error_t +fal_flow_global_cfg_set( + a_uint32_t dev_id, + fal_flow_global_cfg_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_GLOBAL_CFG_SET, dev_id, cfg); + return rv; +} + +sw_error_t +fal_flow_host_add( + a_uint32_t dev_id, + a_uint32_t add_mode, /*index or hash*/ + fal_flow_host_entry_t *flow_host_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_HOST_ADD, dev_id, add_mode, flow_host_entry); + return rv; +} + +sw_error_t +fal_flow_host_del( + a_uint32_t dev_id, + a_uint32_t del_mode, + fal_flow_host_entry_t *flow_host_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_HOST_DEL, dev_id, del_mode, flow_host_entry); + return rv; +} + +sw_error_t +fal_flow_host_get( + a_uint32_t dev_id, + a_uint32_t get_mode, + fal_flow_host_entry_t *flow_host_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_HOST_GET, dev_id, get_mode, flow_host_entry); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_igmp.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_igmp.c new file mode 100755 index 000000000..b0728fd97 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_igmp.c @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_igmp.h" +#include "fal_uk_if.h" + +sw_error_t +fal_port_igmps_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_IGMPS_MODE_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_igmps_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_IGMPS_MODE_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_igmp_mld_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_MLD_CMD_SET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_igmp_mld_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_MLD_CMD_GET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_port_igmp_mld_join_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_PT_JOIN_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_igmp_mld_join_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_PT_JOIN_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_igmp_mld_leave_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_PT_LEAVE_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_igmp_mld_leave_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_PT_LEAVE_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_igmp_mld_rp_set(a_uint32_t dev_id, fal_pbmp_t pts) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_RP_SET, dev_id, pts); + return rv; +} + +sw_error_t +fal_igmp_mld_rp_get(a_uint32_t dev_id, fal_pbmp_t * pts) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_RP_GET, dev_id, pts); + return rv; +} + +sw_error_t +fal_igmp_mld_entry_creat_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_ENTRY_CREAT_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_igmp_mld_entry_creat_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_ENTRY_CREAT_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_igmp_mld_entry_static_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_ENTRY_STATIC_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_igmp_mld_entry_static_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_ENTRY_STATIC_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_igmp_mld_entry_leaky_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_ENTRY_LEAKY_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_igmp_mld_entry_leaky_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_ENTRY_LEAKY_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_igmp_mld_entry_v3_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_ENTRY_V3_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_igmp_mld_entry_v3_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_ENTRY_V3_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_igmp_mld_entry_queue_set(a_uint32_t dev_id, a_bool_t enable, a_uint32_t queue) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_ENTRY_QUEUE_SET, dev_id, enable, queue); + return rv; +} + +sw_error_t +fal_igmp_mld_entry_queue_get(a_uint32_t dev_id, a_bool_t * enable, a_uint32_t * queue) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_ENTRY_QUEUE_GET, dev_id, enable, queue); + return rv; +} + +sw_error_t +fal_port_igmp_mld_learn_limit_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable, a_uint32_t cnt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_IGMP_LEARN_LIMIT_SET, dev_id, port_id, enable, cnt); + return rv; +} + +sw_error_t +fal_port_igmp_mld_learn_limit_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable, a_uint32_t * cnt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_IGMP_LEARN_LIMIT_GET, dev_id, port_id, enable, cnt); + return rv; +} + +sw_error_t +fal_port_igmp_mld_learn_exceed_cmd_set(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_IGMP_LEARN_EXCEED_CMD_SET, dev_id, port_id, cmd); + return rv; +} + +sw_error_t +fal_port_igmp_mld_learn_exceed_cmd_get(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_IGMP_LEARN_EXCEED_CMD_GET, dev_id, port_id, cmd); + return rv; +} + +sw_error_t +fal_igmp_sg_entry_set(a_uint32_t dev_id, fal_igmp_sg_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_SG_ENTRY_SET, dev_id, entry); + return rv; +} + +sw_error_t +fal_igmp_sg_entry_clear(a_uint32_t dev_id, fal_igmp_sg_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_SG_ENTRY_CLEAR, dev_id, entry); + return rv; +} + +sw_error_t +fal_igmp_sg_entry_show(a_uint32_t dev_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_SG_ENTRY_SHOW, dev_id); + return rv; +} + +sw_error_t +fal_igmp_sg_entry_query(a_uint32_t dev_id, fal_igmp_sg_info_t * info) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IGMP_SG_ENTRY_QUERY, dev_id, info); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_init.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_init.c new file mode 100755 index 000000000..0654a9c94 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_init.c @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/*qca808x_start*/ +#include "sw.h" +#include "sw_ioctl.h" +#include "ssdk_init.h" +#include "fal_init.h" +#include "fal_uk_if.h" +/*qca808x_end*/ +sw_error_t +fal_reset(a_uint32_t dev_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SWITCH_RESET, dev_id); + return rv; +} +/*qca808x_start*/ +sw_error_t +fal_ssdk_cfg(a_uint32_t dev_id, ssdk_cfg_t *ssdk_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SSDK_CFG, dev_id, ssdk_cfg); + return rv; +} +/*qca808x_end*/ +sw_error_t +fal_module_func_ctrl_set(a_uint32_t dev_id, a_uint32_t module, fal_func_ctrl_t *func_ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MODULE_FUNC_CTRL_SET, dev_id, module, func_ctrl); + return rv; +} + +sw_error_t +fal_module_func_ctrl_get(a_uint32_t dev_id, a_uint32_t module, fal_func_ctrl_t *func_ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MODULE_FUNC_CTRL_GET, dev_id, module, func_ctrl); + return rv; +} + + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_interface_ctrl.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_interface_ctrl.c new file mode 100755 index 000000000..ef26a72c6 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_interface_ctrl.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_interface_ctrl.h" +#include "fal_uk_if.h" + +sw_error_t +fal_port_3az_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_3AZ_STATUS_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_3az_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_3AZ_STATUS_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_interface_mac_mode_set(a_uint32_t dev_id, fal_port_t port_id, fal_mac_config_t * config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MAC_MODE_SET, dev_id, port_id, config); + return rv; +} + +sw_error_t +fal_interface_mac_mode_get(a_uint32_t dev_id, fal_port_t port_id, fal_mac_config_t * config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MAC_MODE_GET, dev_id, port_id, config); + return rv; +} + +sw_error_t +fal_interface_phy_mode_set(a_uint32_t dev_id, a_uint32_t phy_id, fal_phy_config_t * config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PHY_MODE_SET, dev_id, phy_id, config); + return rv; +} + +sw_error_t +fal_interface_phy_mode_get(a_uint32_t dev_id, a_uint32_t phy_id, fal_phy_config_t * config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PHY_MODE_GET, dev_id, phy_id, config); + return rv; +} + +sw_error_t +fal_interface_fx100_ctrl_set(a_uint32_t dev_id, fal_fx100_ctrl_config_t * config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FX100_CTRL_SET, dev_id, config); + return rv; +} + +sw_error_t +fal_interface_fx100_ctrl_get(a_uint32_t dev_id, fal_fx100_ctrl_config_t * config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FX100_CTRL_GET, dev_id, config); + return rv; +} + +sw_error_t +fal_interface_fx100_status_get(a_uint32_t dev_id, a_uint32_t *status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FX100_STATUS_GET, dev_id, status); + return rv; +} + +sw_error_t +fal_interface_mac06_exch_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MAC06_EXCH_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_interface_mac06_exch_get(a_uint32_t dev_id, a_bool_t* enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MAC06_EXCH_GET, dev_id, enable); + return rv; +} diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ip.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ip.c new file mode 100755 index 000000000..59f9cd6eb --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ip.c @@ -0,0 +1,728 @@ +/* + * Copyright (c) 2014, 2015, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_ip.h" +#include "fal_uk_if.h" + + +sw_error_t +fal_ip_host_add(a_uint32_t dev_id, fal_host_entry_t * host_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_HOST_ADD, dev_id, host_entry); + return rv; +} + +sw_error_t +fal_ip_host_del(a_uint32_t dev_id, a_uint32_t del_mode, fal_host_entry_t * host_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_HOST_DEL, dev_id, del_mode, host_entry); + return rv; +} + +sw_error_t +fal_ip_host_get(a_uint32_t dev_id, a_uint32_t get_mode, fal_host_entry_t * host_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_HOST_GET, dev_id, get_mode, host_entry); + return rv; +} + +sw_error_t +fal_ip_host_next(a_uint32_t dev_id, a_uint32_t next_mode, fal_host_entry_t * host_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_HOST_NEXT, dev_id, next_mode, host_entry); + return rv; +} + +sw_error_t +fal_ip_host_counter_bind(a_uint32_t dev_id, a_uint32_t entry_id, a_uint32_t cnt_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_HOST_COUNTER_BIND, dev_id, entry_id, cnt_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_ip_host_pppoe_bind(a_uint32_t dev_id, a_uint32_t entry_id, a_uint32_t pppoe_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_HOST_PPPOE_BIND, dev_id, entry_id, pppoe_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_ip_pt_arp_learn_set(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t flags) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PT_ARP_LEARN_SET, dev_id, port_id, flags); + return rv; +} + +sw_error_t +fal_ip_pt_arp_learn_get(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t * flags) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PT_ARP_LEARN_GET, dev_id, port_id, flags); + return rv; +} + +sw_error_t +fal_ip_arp_learn_set(a_uint32_t dev_id, fal_arp_learn_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_ARP_LEARN_SET, dev_id, (a_uint32_t)mode); + return rv; +} + +sw_error_t +fal_ip_arp_learn_get(a_uint32_t dev_id, fal_arp_learn_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_ARP_LEARN_GET, dev_id, mode); + return rv; +} + +sw_error_t +fal_ip_source_guard_set(a_uint32_t dev_id, fal_port_t port_id, fal_source_guard_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_SOURCE_GUARD_SET, dev_id, port_id, (a_uint32_t)mode); + return rv; +} + +sw_error_t +fal_ip_source_guard_get(a_uint32_t dev_id, fal_port_t port_id, fal_source_guard_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_SOURCE_GUARD_GET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_ip_arp_guard_set(a_uint32_t dev_id, fal_port_t port_id, fal_source_guard_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_ARP_GUARD_SET, dev_id, port_id, (a_uint32_t)mode); + return rv; +} + +sw_error_t +fal_ip_arp_guard_get(a_uint32_t dev_id, fal_port_t port_id, fal_source_guard_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_ARP_GUARD_GET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_ip_route_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_ROUTE_STATUS_SET, dev_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_ip_route_status_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_ROUTE_STATUS_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_ip_intf_entry_add(a_uint32_t dev_id, fal_intf_mac_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_INTF_ENTRY_ADD, dev_id, entry); + return rv; +} + +sw_error_t +fal_ip_intf_entry_del(a_uint32_t dev_id, a_uint32_t del_mode, fal_intf_mac_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_INTF_ENTRY_DEL, dev_id, del_mode, entry); + return rv; +} + +sw_error_t +fal_ip_intf_entry_next(a_uint32_t dev_id, a_uint32_t next_mode, fal_intf_mac_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_INTF_ENTRY_NEXT, dev_id, next_mode, entry); + return rv; +} + +sw_error_t +fal_ip_unk_source_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_UNK_SOURCE_CMD_SET, dev_id, (a_uint32_t) cmd); + return rv; +} + +sw_error_t +fal_ip_unk_source_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_UNK_SOURCE_CMD_GET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_arp_unk_source_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ARP_UNK_SOURCE_CMD_SET, dev_id, (a_uint32_t) cmd); + return rv; +} + +sw_error_t +fal_arp_unk_source_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ARP_UNK_SOURCE_CMD_GET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_ip_age_time_set(a_uint32_t dev_id, a_uint32_t * time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_AGE_TIME_SET, dev_id, time); + return rv; +} + +sw_error_t +fal_ip_age_time_get(a_uint32_t dev_id, a_uint32_t * time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_AGE_TIME_GET, dev_id, time); + return rv; +} + +sw_error_t +fal_ip_wcmp_hash_mode_set(a_uint32_t dev_id, a_uint32_t hash_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_WCMP_HASH_MODE_SET, dev_id, hash_mode); + return rv; +} + +sw_error_t +fal_ip_wcmp_hash_mode_get(a_uint32_t dev_id, a_uint32_t * hash_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_WCMP_HASH_MODE_GET, dev_id, hash_mode); + return rv; +} + +sw_error_t +fal_ip_vrf_base_addr_set(a_uint32_t dev_id, a_uint32_t vrf_id, fal_ip4_addr_t addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VRF_BASE_ADDR_SET, dev_id, vrf_id, addr); + return rv; +} + +sw_error_t +fal_ip_vrf_base_addr_get(a_uint32_t dev_id, a_uint32_t vrf_id, fal_ip4_addr_t * addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VRF_BASE_ADDR_GET, dev_id, vrf_id, addr); + return rv; +} + +sw_error_t +fal_ip_vrf_base_mask_set(a_uint32_t dev_id, a_uint32_t vrf_id, fal_ip4_addr_t addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VRF_BASE_MASK_SET, dev_id, vrf_id, addr); + return rv; +} + +sw_error_t +fal_ip_vrf_base_mask_get(a_uint32_t dev_id, a_uint32_t vrf_id, fal_ip4_addr_t * addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VRF_BASE_MASK_GET, dev_id, vrf_id, addr); + return rv; +} + +sw_error_t +fal_ip_default_route_set(a_uint32_t dev_id, a_uint32_t droute_id, + fal_default_route_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_DEFAULT_ROUTE_SET, dev_id, droute_id, entry); + return rv; +} + +sw_error_t +fal_ip_default_route_get(a_uint32_t dev_id, a_uint32_t droute_id, + fal_default_route_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_DEFAULT_ROUTE_GET, dev_id, droute_id, entry); + return rv; +} + +sw_error_t +fal_ip_host_route_set(a_uint32_t dev_id, a_uint32_t hroute_id, + fal_host_route_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_HOST_ROUTE_SET, dev_id, hroute_id, entry); + return rv; +} + +sw_error_t +fal_ip_host_route_get(a_uint32_t dev_id, a_uint32_t hroute_id, + fal_host_route_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_HOST_ROUTE_GET, dev_id, hroute_id, entry); + return rv; +} + +sw_error_t +fal_ip_wcmp_entry_set(a_uint32_t dev_id, a_uint32_t wcmp_id, + fal_ip_wcmp_t * wcmp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_WCMP_ENTRY_SET, dev_id, wcmp_id, wcmp); + return rv; +} + + +sw_error_t +fal_ip_wcmp_entry_get(a_uint32_t dev_id, a_uint32_t wcmp_id, + fal_ip_wcmp_t * wcmp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_WCMP_ENTRY_GET, dev_id, wcmp_id, wcmp); + return rv; +} + +sw_error_t +fal_ip_rfs_ip4_rule_set(a_uint32_t dev_id, + fal_ip4_rfs_t * rfs) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_RFS_IP4_SET, dev_id, rfs); + return rv; +} + +sw_error_t +fal_ip_rfs_ip6_rule_set(a_uint32_t dev_id, + fal_ip6_rfs_t * rfs) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_RFS_IP6_SET, dev_id, rfs); + return rv; +} + +sw_error_t +fal_ip_rfs_ip4_rule_del(a_uint32_t dev_id, + fal_ip4_rfs_t * rfs) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_RFS_IP4_DEL, dev_id, rfs); + return rv; +} + +sw_error_t +fal_ip_rfs_ip6_rule_del(a_uint32_t dev_id, + fal_ip6_rfs_t * rfs) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_RFS_IP6_DEL, dev_id, rfs); + return rv; +} + +sw_error_t +fal_default_flow_cmd_set(a_uint32_t dev_id, a_uint32_t vrf_id, + fal_flow_type_t type, fal_default_flow_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_DEFAULT_FLOW_CMD_SET, dev_id, vrf_id, type, (a_uint32_t) cmd); + return rv; +} + +sw_error_t +fal_default_flow_cmd_get(a_uint32_t dev_id, a_uint32_t vrf_id, + fal_flow_type_t type, fal_default_flow_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_DEFAULT_FLOW_CMD_GET, dev_id, vrf_id, type, cmd); + return rv; +} + +sw_error_t +fal_default_rt_flow_cmd_set(a_uint32_t dev_id, a_uint32_t vrf_id, + fal_flow_type_t type, fal_default_flow_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_DEFAULT_RT_FLOW_CMD_SET, dev_id, vrf_id, type, (a_uint32_t) cmd); + return rv; +} + +sw_error_t +fal_default_rt_flow_cmd_get(a_uint32_t dev_id, a_uint32_t vrf_id, + fal_flow_type_t type, fal_default_flow_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_DEFAULT_RT_FLOW_CMD_GET, dev_id, vrf_id, type, cmd); + return rv; +} + +sw_error_t +fal_ip_vsi_arp_sg_cfg_set(a_uint32_t dev_id, a_uint32_t vsi, + fal_arp_sg_cfg_t *arp_sg_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VIS_ARP_SG_CFG_SET, dev_id, vsi, arp_sg_cfg); + return rv; +} + +sw_error_t +fal_ip_vsi_arp_sg_cfg_get(a_uint32_t dev_id, a_uint32_t vsi, + fal_arp_sg_cfg_t *arp_sg_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VIS_ARP_SG_CFG_GET, dev_id, vsi, arp_sg_cfg); + return rv; +} + +sw_error_t +fal_ip_network_route_add(a_uint32_t dev_id, + a_uint32_t index, + fal_network_route_entry_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_NETWORK_ROUTE_ADD, dev_id, index, entry); + return rv; +} + +sw_error_t +fal_ip_network_route_get(a_uint32_t dev_id, + a_uint32_t index, a_uint8_t type, + fal_network_route_entry_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_NETWORK_ROUTE_GET, dev_id, index, type, entry); + return rv; +} + +sw_error_t +fal_ip_intf_set( + a_uint32_t dev_id, + a_uint32_t index, + fal_intf_entry_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_INTF_SET, dev_id, index, entry); + return rv; +} + +sw_error_t +fal_ip_intf_get( + a_uint32_t dev_id, + a_uint32_t index, + fal_intf_entry_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_INTF_GET, dev_id, index, entry); + return rv; +} + +sw_error_t +fal_ip_vsi_intf_set(a_uint32_t dev_id, a_uint32_t vsi, fal_intf_id_t *id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VSI_INTF_SET, dev_id, vsi, id); + return rv; +} + +sw_error_t +fal_ip_vsi_intf_get(a_uint32_t dev_id, a_uint32_t vsi, fal_intf_id_t *id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VSI_INTF_GET, dev_id, vsi, id); + return rv; +} + +sw_error_t +fal_ip_port_intf_set(a_uint32_t dev_id, fal_port_t port_id, fal_intf_id_t *id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PORT_INTF_SET, dev_id, port_id, id); + return rv; +} + +sw_error_t +fal_ip_port_intf_get(a_uint32_t dev_id, fal_port_t port_id, fal_intf_id_t *id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PORT_INTF_GET, dev_id, port_id, id); + return rv; +} + +sw_error_t +fal_ip_nexthop_set(a_uint32_t dev_id, a_uint32_t index, + fal_ip_nexthop_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_NEXTHOP_SET, dev_id, index, entry); + return rv; +} + +sw_error_t +fal_ip_nexthop_get(a_uint32_t dev_id, a_uint32_t index, + fal_ip_nexthop_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_NEXTHOP_GET, dev_id, index, entry); + return rv; +} + +sw_error_t +fal_ip_port_sg_cfg_set(a_uint32_t dev_id, fal_port_t port_id, + fal_sg_cfg_t *sg_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PORT_SG_SET, dev_id, port_id, sg_cfg); + return rv; +} + +sw_error_t +fal_ip_port_sg_cfg_get(a_uint32_t dev_id, fal_port_t port_id, + fal_sg_cfg_t *sg_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PORT_SG_GET, dev_id, port_id, sg_cfg); + return rv; +} + +sw_error_t +fal_ip_vsi_sg_cfg_get(a_uint32_t dev_id, a_uint32_t vsi, + fal_sg_cfg_t *sg_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VSI_SG_GET, dev_id, vsi, sg_cfg); + return rv; +} + +sw_error_t +fal_ip_vsi_sg_cfg_set(a_uint32_t dev_id, a_uint32_t vsi, + fal_sg_cfg_t *sg_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VSI_SG_SET, dev_id, vsi, sg_cfg); + return rv; +} + +sw_error_t +fal_ip_pub_addr_set(a_uint32_t dev_id, a_uint32_t index, + fal_ip_pub_addr_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PUB_IP_SET, dev_id, index, entry); + return rv; +} + +sw_error_t +fal_ip_network_route_del(a_uint32_t dev_id, a_uint32_t index, a_uint8_t type) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_NETWORK_ROUTE_DEL, dev_id, index, type); + return rv; +} + +sw_error_t +fal_ip_pub_addr_get(a_uint32_t dev_id, a_uint32_t index, fal_ip_pub_addr_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PUB_IP_GET, dev_id, index, entry); + return rv; +} + +sw_error_t + fal_ip_port_macaddr_set(a_uint32_t dev_id, fal_port_t port_id, + fal_macaddr_entry_t *macaddr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PORT_MAC_SET, dev_id, port_id, macaddr); + return rv; +} + +sw_error_t +fal_ip_port_macaddr_get(a_uint32_t dev_id, fal_port_t port_id, + fal_macaddr_entry_t *macaddr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PORT_MAC_GET, dev_id, port_id, macaddr); + return rv; +} + +sw_error_t +fal_ip_route_mismatch_action_set(a_uint32_t dev_id, fal_fwd_cmd_t action) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_ROUTE_MISS_SET, dev_id, (a_uint32_t)action); + return rv; +} + +sw_error_t +fal_ip_route_mismatch_action_get(a_uint32_t dev_id, fal_fwd_cmd_t *action) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_ROUTE_MISS_GET, dev_id, action); + return rv; +} + +sw_error_t +fal_ip_port_arp_sg_cfg_set(a_uint32_t dev_id, fal_port_t port_id, + fal_arp_sg_cfg_t *arp_sg_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PORT_ARP_SG_SET, dev_id, port_id, arp_sg_cfg); + return rv; +} + +sw_error_t +fal_ip_port_arp_sg_cfg_get(a_uint32_t dev_id, fal_port_t port_id, + fal_arp_sg_cfg_t *arp_sg_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_PORT_ARP_SG_GET, dev_id, port_id, arp_sg_cfg); + return rv; +} + +sw_error_t +fal_ip_vsi_mc_mode_set(a_uint32_t dev_id, a_uint32_t vsi, + fal_mc_mode_cfg_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VSI_MC_MODE_SET, dev_id, vsi, cfg); + return rv; +} + +sw_error_t +fal_ip_vsi_mc_mode_get(a_uint32_t dev_id, a_uint32_t vsi, + fal_mc_mode_cfg_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_IP_VSI_MC_MODE_GET, dev_id, vsi, cfg); + return rv; +} + +sw_error_t +fal_ip_global_ctrl_get(a_uint32_t dev_id, fal_ip_global_cfg_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_GLOBAL_CTRL_GET, dev_id, cfg); + return rv; +} + +sw_error_t +fal_ip_global_ctrl_set(a_uint32_t dev_id, fal_ip_global_cfg_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_GLOBAL_CTRL_SET, dev_id, cfg); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_leaky.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_leaky.c new file mode 100755 index 000000000..917fd4497 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_leaky.c @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_leaky.h" +#include "fal_uk_if.h" + +sw_error_t +fal_uc_leaky_mode_set(a_uint32_t dev_id, fal_leaky_ctrl_mode_t ctrl_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UC_LEAKY_MODE_SET, dev_id, ctrl_mode); + return rv; +} + +sw_error_t +fal_uc_leaky_mode_get(a_uint32_t dev_id, fal_leaky_ctrl_mode_t * ctrl_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UC_LEAKY_MODE_GET, dev_id, ctrl_mode); + return rv; +} + +sw_error_t +fal_mc_leaky_mode_set(a_uint32_t dev_id, fal_leaky_ctrl_mode_t ctrl_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MC_LEAKY_MODE_SET, dev_id, ctrl_mode); + return rv; +} + +sw_error_t +fal_mc_leaky_mode_get(a_uint32_t dev_id, fal_leaky_ctrl_mode_t * ctrl_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MC_LEAKY_MODE_GET, dev_id, ctrl_mode); + return rv; +} + +sw_error_t +fal_port_arp_leaky_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ARP_LEAKY_MODE_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_arp_leaky_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ARP_LEAKY_MODE_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_uc_leaky_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_UC_LEAKY_MODE_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_uc_leaky_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_UC_LEAKY_MODE_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_mc_leaky_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MC_LEAKY_MODE_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_mc_leaky_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MC_LEAKY_MODE_GET, dev_id, port_id, enable); + return rv; +} diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_led.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_led.c new file mode 100755 index 000000000..996ea2a90 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_led.c @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_led.h" +#include "fal_uk_if.h" + + +sw_error_t +fal_led_ctrl_pattern_set(a_uint32_t dev_id, led_pattern_group_t group, + led_pattern_id_t id, led_ctrl_pattern_t * pattern) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_LED_PATTERN_SET, dev_id, group, + id, pattern); + return rv; +} + +sw_error_t +fal_led_ctrl_pattern_get(a_uint32_t dev_id, led_pattern_group_t group, + led_pattern_id_t id, led_ctrl_pattern_t * pattern) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_LED_PATTERN_GET, dev_id, group, + id, pattern); + return rv; +} diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_mib.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_mib.c new file mode 100755 index 000000000..aa24b7413 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_mib.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_mib.h" +#include "fal_uk_if.h" + +sw_error_t +fal_get_mib_info(a_uint32_t dev_id, fal_port_t port_id, + fal_mib_info_t * mib_Info) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MIB_GET, dev_id, port_id, mib_Info); + return rv; +} + +sw_error_t +fal_get_xgmib_info(a_uint32_t dev_id, fal_port_t port_id, + fal_xgmib_info_t * mib_Info) +{ + sw_error_t rv; + rv = sw_uk_exec(SW_API_PT_XGMIB_GET, dev_id, port_id, mib_Info); + return rv; +} +sw_error_t +fal_mib_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIB_STATUS_SET, dev_id, (a_uint32_t)enable); + return rv; +} + +sw_error_t +fal_mib_status_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIB_STATUS_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_mib_port_flush_counters(a_uint32_t dev_id, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MIB_FLUSH_COUNTERS, dev_id, port_id); + return rv; +} + +sw_error_t +fal_mib_cpukeep_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIB_CPU_KEEP_SET, dev_id, (a_uint32_t)enable); + return rv; +} + +sw_error_t +fal_mib_cpukeep_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIB_CPU_KEEP_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_mib_counter_get(a_uint32_t dev_id, fal_port_t port_id, + fal_mib_counter_t * mib_Info) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MIB_COUNTER_GET, dev_id, port_id, mib_Info); + return rv; +} diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_mirror.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_mirror.c new file mode 100755 index 000000000..32b03a52a --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_mirror.c @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_mirror.h" +#include "fal_uk_if.h" + +sw_error_t +fal_mirr_analysis_port_set(a_uint32_t dev_id, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIRROR_ANALY_PT_SET, dev_id, port_id); + return rv; +} + +sw_error_t +fal_mirr_analysis_port_get(a_uint32_t dev_id, fal_port_t * port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIRROR_ANALY_PT_GET, dev_id, + port_id); + return rv; +} + +sw_error_t +fal_mirr_port_in_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIRROR_IN_PT_SET, dev_id, port_id, + (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_mirr_port_in_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIRROR_IN_PT_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_mirr_port_eg_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIRROR_EG_PT_SET, dev_id, port_id, + (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_mirr_port_eg_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIRROR_EG_PT_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_mirr_analysis_config_set(a_uint32_t dev_id, fal_mirr_direction_t direction, fal_mirr_analysis_config_t * config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIRROR_ANALYSIS_CONFIG_SET, dev_id, direction, config); + return rv; +} + +sw_error_t +fal_mirr_analysis_config_get(a_uint32_t dev_id, fal_mirr_direction_t direction, fal_mirr_analysis_config_t * config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MIRROR_ANALYSIS_CONFIG_GET, dev_id, direction, config); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_misc.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_misc.c new file mode 100755 index 000000000..de7d21148 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_misc.c @@ -0,0 +1,500 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_misc.h" +#include "fal_uk_if.h" + +sw_error_t +fal_arp_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ARP_STATUS_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_arp_status_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ARP_STATUS_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_frame_max_size_set(a_uint32_t dev_id, a_uint32_t size) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FRAME_MAX_SIZE_SET, dev_id, size); + return rv; +} + +sw_error_t +fal_frame_max_size_get(a_uint32_t dev_id, a_uint32_t * size) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FRAME_MAX_SIZE_GET, dev_id, size); + return rv; +} + +sw_error_t +fal_port_unk_sa_cmd_set(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_UNK_SA_CMD_SET, dev_id, port_id, + cmd); + return rv; +} + +sw_error_t +fal_port_unk_sa_cmd_get(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_UNK_SA_CMD_GET, dev_id, port_id, + cmd); + return rv; +} + +sw_error_t +fal_port_unk_uc_filter_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_UNK_UC_FILTER_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_unk_uc_filter_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_UNK_UC_FILTER_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_unk_mc_filter_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_UNK_MC_FILTER_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_unk_mc_filter_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_UNK_MC_FILTER_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_bc_filter_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_BC_FILTER_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_bc_filter_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_BC_FILTER_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_cpu_port_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_CPU_PORT_STATUS_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_cpu_port_status_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_CPU_PORT_STATUS_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_bc_to_cpu_port_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BC_TO_CPU_PORT_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_bc_to_cpu_port_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BC_TO_CPU_PORT_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_port_dhcp_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DHCP_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_dhcp_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DHCP_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_arp_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ARP_CMD_SET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_arp_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_ARP_CMD_GET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_eapol_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_EAPOL_CMD_SET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_eapol_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_EAPOL_CMD_GET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_eapol_status_set(a_uint32_t dev_id, a_uint32_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_EAPOL_STATUS_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_eapol_status_get(a_uint32_t dev_id, a_uint32_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_EAPOL_STATUS_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_ripv1_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RIPV1_STATUS_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_ripv1_status_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RIPV1_STATUS_GET, dev_id, enable); + return rv; +} + + +sw_error_t +fal_port_arp_req_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_ARP_REQ_STATUS_SET, dev_id, port_id, enable); + return rv; +} + + +sw_error_t +fal_port_arp_req_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_ARP_REQ_STATUS_GET, dev_id, port_id, enable); + return rv; +} + + +sw_error_t +fal_port_arp_ack_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_ARP_ACK_STATUS_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_arp_ack_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_ARP_ACK_STATUS_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_intr_mask_set(a_uint32_t dev_id, a_uint32_t intr_mask) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_MASK_SET, dev_id, intr_mask); + return rv; +} + +sw_error_t +fal_intr_mask_get(a_uint32_t dev_id, a_uint32_t * intr_mask) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_MASK_GET, dev_id, intr_mask); + return rv; +} + +sw_error_t +fal_intr_status_get(a_uint32_t dev_id, a_uint32_t * intr_status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_STATUS_GET, dev_id, intr_status); + return rv; +} + +sw_error_t +fal_intr_status_clear(a_uint32_t dev_id, a_uint32_t intr_status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_STATUS_CLEAR, dev_id, intr_status); + return rv; +} + +sw_error_t +fal_intr_port_link_mask_set(a_uint32_t dev_id, a_uint32_t port_id, a_uint32_t intr_mask) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_PORT_LINK_MASK_SET, dev_id, port_id, intr_mask); + return rv; +} + +sw_error_t +fal_intr_port_link_mask_get(a_uint32_t dev_id, a_uint32_t port_id, a_uint32_t * intr_mask) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_PORT_LINK_MASK_GET, dev_id, port_id, intr_mask); + return rv; +} + +sw_error_t +fal_intr_port_link_status_get(a_uint32_t dev_id, a_uint32_t port_id, a_uint32_t * intr_mask) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_PORT_LINK_STATUS_GET, dev_id, port_id, intr_mask); + return rv; +} + +sw_error_t +fal_intr_mask_mac_linkchg_set(a_uint32_t dev_id, a_uint32_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_MASK_MAC_LINKCHG_SET, dev_id, port_id, enable); + return rv; +} + + +sw_error_t +fal_intr_mask_mac_linkchg_get(a_uint32_t dev_id, a_uint32_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_MASK_MAC_LINKCHG_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_intr_status_mac_linkchg_get(a_uint32_t dev_id, fal_pbmp_t *port_bitmap) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_STATUS_MAC_LINKCHG_GET, dev_id, port_bitmap); + return rv; +} + +sw_error_t +fal_cpu_vid_en_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_CPU_VID_EN_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_cpu_vid_en_get(a_uint32_t dev_id, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_CPU_VID_EN_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_intr_status_mac_linkchg_clear(a_uint32_t dev_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_INTR_STATUS_MAC_LINKCHG_CLEAR, dev_id); + return rv; +} + +sw_error_t +fal_global_macaddr_set(a_uint32_t dev_id, fal_mac_addr_t * addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_GLOBAL_MACADDR_SET, dev_id, addr); + return rv; +} + +sw_error_t +fal_global_macaddr_get(a_uint32_t dev_id, fal_mac_addr_t * addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_GLOBAL_MACADDR_GET, dev_id, addr); + return rv; +} + +sw_error_t +fal_lldp_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_LLDP_STATUS_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_lldp_status_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_LLDP_STATUS_GET, dev_id, enable); + return rv; +} + + +sw_error_t +fal_frame_crc_reserve_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FRAME_CRC_RESERVE_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_frame_crc_reserve_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FRAME_CRC_RESERVE_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_debug_port_counter_enable(a_uint32_t dev_id, fal_port_t port_id, fal_counter_en_t * cnt_en) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_DEBUG_PORT_COUNTER_ENABLE, dev_id, port_id, cnt_en); + return rv; +} + +sw_error_t +fal_debug_port_counter_status_get(a_uint32_t dev_id, fal_port_t port_id, fal_counter_en_t * cnt_en) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_DEBUG_PORT_COUNTER_STATUS_GET, dev_id, port_id, cnt_en); + return rv; +} diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_nat.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_nat.c new file mode 100755 index 000000000..ec5d0586b --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_nat.c @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2014, 2015, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_nat.h" +#include "fal_uk_if.h" + +sw_error_t +fal_nat_add(a_uint32_t dev_id, fal_nat_entry_t * nat_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_ADD, dev_id, nat_entry); + return rv; +} + + +sw_error_t +fal_nat_del(a_uint32_t dev_id, a_uint32_t del_mode, fal_nat_entry_t * nat_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_DEL, dev_id, del_mode, nat_entry); + return rv; +} + + +sw_error_t +fal_nat_get(a_uint32_t dev_id, a_uint32_t get_mode, fal_nat_entry_t * nat_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_GET, dev_id, get_mode, nat_entry); + return rv; +} + +sw_error_t +fal_nat_next(a_uint32_t dev_id, a_uint32_t next_mode, fal_nat_entry_t * nat_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_NEXT, dev_id, next_mode, nat_entry); + return rv; +} + +sw_error_t +fal_nat_counter_bind(a_uint32_t dev_id, a_uint32_t entry_id, a_uint32_t cnt_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_COUNTER_BIND, dev_id, entry_id, cnt_id, enable); + return rv; +} + + +sw_error_t +fal_napt_add(a_uint32_t dev_id, fal_napt_entry_t * napt_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAPT_ADD, dev_id, napt_entry); + return rv; +} + +sw_error_t +fal_napt_del(a_uint32_t dev_id, a_uint32_t del_mode, fal_napt_entry_t * napt_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAPT_DEL, dev_id, del_mode, napt_entry); + return rv; +} + +sw_error_t +fal_napt_get(a_uint32_t dev_id, a_uint32_t get_mode, fal_napt_entry_t * napt_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAPT_GET, dev_id, get_mode, napt_entry); + return rv; +} + +sw_error_t +fal_napt_next(a_uint32_t dev_id, a_uint32_t next_mode, fal_napt_entry_t * napt_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAPT_NEXT, dev_id, next_mode, napt_entry); + return rv; +} + +sw_error_t +fal_napt_counter_bind(a_uint32_t dev_id, a_uint32_t entry_id, a_uint32_t cnt_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAPT_COUNTER_BIND, dev_id, entry_id, cnt_id, enable); + return rv; +} + +sw_error_t +fal_flow_add(a_uint32_t dev_id, fal_napt_entry_t * napt_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_ADD, dev_id, napt_entry); + return rv; +} + +sw_error_t +fal_flow_del(a_uint32_t dev_id, a_uint32_t del_mode, fal_napt_entry_t * napt_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_DEL, dev_id, del_mode, napt_entry); + return rv; +} + +sw_error_t +fal_flow_get(a_uint32_t dev_id, a_uint32_t get_mode, fal_napt_entry_t * napt_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_GET, dev_id, get_mode, napt_entry); + return rv; +} + +sw_error_t +fal_flow_next(a_uint32_t dev_id, a_uint32_t next_mode, fal_napt_entry_t * napt_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_NEXT, dev_id, next_mode, napt_entry); + return rv; +} + +sw_error_t +fal_flow_counter_bind(a_uint32_t dev_id, a_uint32_t entry_id, a_uint32_t cnt_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_COUNTER_BIND, dev_id, entry_id, cnt_id, enable); + return rv; +} + +sw_error_t +fal_nat_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_STATUS_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_nat_status_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_STATUS_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_nat_hash_mode_set(a_uint32_t dev_id, a_uint32_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_HASH_MODE_SET, dev_id, mode); + return rv; +} + +sw_error_t +fal_nat_hash_mode_get(a_uint32_t dev_id, a_uint32_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_HASH_MODE_GET, dev_id, mode); + return rv; +} + +sw_error_t +fal_napt_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAPT_STATUS_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_napt_status_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAPT_STATUS_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_napt_mode_set(a_uint32_t dev_id, fal_napt_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAPT_MODE_SET, dev_id, mode); + return rv; +} + +sw_error_t +fal_napt_mode_get(a_uint32_t dev_id, fal_napt_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAPT_MODE_GET, dev_id, mode); + return rv; +} + +sw_error_t +fal_nat_prv_base_addr_set(a_uint32_t dev_id, fal_ip4_addr_t addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PRV_BASE_ADDR_SET, dev_id, addr); + return rv; +} + +sw_error_t +fal_nat_prv_base_addr_get(a_uint32_t dev_id, fal_ip4_addr_t * addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PRV_BASE_ADDR_GET, dev_id, addr); + return rv; +} + +sw_error_t +fal_nat_prv_base_mask_set(a_uint32_t dev_id, fal_ip4_addr_t addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PRV_BASE_MASK_SET, dev_id, addr); + return rv; +} + +sw_error_t +fal_nat_prv_base_mask_get(a_uint32_t dev_id, fal_ip4_addr_t * addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PRV_BASE_MASK_GET, dev_id, addr); + return rv; +} + +sw_error_t +fal_nat_prv_addr_mode_set(a_uint32_t dev_id, a_bool_t map_en) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PRV_ADDR_MODE_SET, dev_id, map_en); + return rv; +} + +sw_error_t +fal_nat_prv_addr_mode_get(a_uint32_t dev_id, a_bool_t * map_en) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PRV_ADDR_MODE_GET, dev_id, map_en); + return rv; +} + +sw_error_t +fal_nat_pub_addr_add(a_uint32_t dev_id, fal_nat_pub_addr_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PUB_ADDR_ENTRY_ADD, dev_id, entry); + return rv; +} + +sw_error_t +fal_nat_pub_addr_del(a_uint32_t dev_id, a_uint32_t del_mode, fal_nat_pub_addr_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PUB_ADDR_ENTRY_DEL, dev_id, del_mode, entry); + return rv; +} + +sw_error_t +fal_nat_pub_addr_next(a_uint32_t dev_id, a_uint32_t next_mode, fal_nat_pub_addr_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PUB_ADDR_ENTRY_NEXT, dev_id, next_mode, entry); + return rv; +} + +sw_error_t +fal_nat_unk_session_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_UNK_SESSION_CMD_SET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_nat_unk_session_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_UNK_SESSION_CMD_GET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_nat_global_set(a_uint32_t dev_id, a_bool_t enable, + a_bool_t sync_cnt_enable, a_uint32_t portbmp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NAT_GLOBAL_SET, dev_id, enable, + sync_cnt_enable, portbmp); + return rv; +} + +sw_error_t +fal_flow_cookie_set(a_uint32_t dev_id, fal_flow_cookie_t * flow_cookie) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_COOKIE_SET, dev_id, flow_cookie); + return rv; +} + +sw_error_t +fal_flow_rfs_set(a_uint32_t dev_id, a_uint8_t action, fal_flow_rfs_t * rfs) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_RFS_SET, dev_id, action, rfs); + return rv; +} + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_policer.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_policer.c new file mode 100755 index 000000000..3897562ad --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_policer.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_policer.h" +#include "fal_uk_if.h" + + +sw_error_t +fal_policer_timeslot_set(a_uint32_t dev_id, a_uint32_t timeslot) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_TIMESLOT_SET, dev_id, timeslot); + return rv; +} + +sw_error_t +fal_policer_timeslot_get(a_uint32_t dev_id, a_uint32_t *timeslot) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_TIMESLOT_GET, dev_id, timeslot); + return rv; +} + +sw_error_t +fal_port_policer_counter_get(a_uint32_t dev_id, fal_port_t port_id, + fal_policer_counter_t *counter) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_PORT_COUNTER_GET, dev_id, port_id, + counter); + return rv; +} + +sw_error_t +fal_acl_policer_counter_get(a_uint32_t dev_id, a_uint32_t index, + fal_policer_counter_t *counter) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_ACL_COUNTER_GET, dev_id, index, + counter); + return rv; +} + +sw_error_t +fal_port_policer_compensation_byte_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t length) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_COMPENSATION_SET, dev_id, port_id, length); + return rv; +} + +sw_error_t +fal_port_policer_compensation_byte_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t *length) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_COMPENSATION_GET, dev_id, port_id, + length); + return rv; +} + +sw_error_t +fal_port_policer_entry_set(a_uint32_t dev_id, fal_port_t port_id, + fal_policer_config_t *policer, fal_policer_action_t *action) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_PORT_ENTRY_SET, dev_id, port_id, + policer, action); + return rv; +} + +sw_error_t +fal_port_policer_entry_get(a_uint32_t dev_id, fal_port_t port_id, + fal_policer_config_t *policer, fal_policer_action_t *action) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_PORT_ENTRY_GET, dev_id, port_id, + policer, action); + return rv; +} + +sw_error_t +fal_acl_policer_entry_set(a_uint32_t dev_id, a_uint32_t index, + fal_policer_config_t *policer, fal_policer_action_t *action) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_ACL_ENTRY_SET, dev_id, index, + policer, action); + return rv; +} + +sw_error_t +fal_acl_policer_entry_get(a_uint32_t dev_id, a_uint32_t index, + fal_policer_config_t *policer, fal_policer_action_t *action) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_ACL_ENTRY_GET, dev_id, index, + policer, action); + return rv; +} + +sw_error_t +fal_policer_global_counter_get(a_uint32_t dev_id,fal_policer_global_counter_t *counter) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_POLICER_GLOBAL_COUNTER_GET, dev_id, + counter); + return rv; +} + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_port_ctrl.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_port_ctrl.c new file mode 100755 index 000000000..018b38535 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_port_ctrl.c @@ -0,0 +1,956 @@ +/* + * Copyright (c) 2014,2016-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +/*qca808x_start*/ +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_port_ctrl.h" +#include "fal_uk_if.h" + +sw_error_t +fal_port_duplex_set(a_uint32_t dev_id, fal_port_t port_id, + fal_port_duplex_t duplex) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DUPLEX_SET, dev_id, port_id, + duplex); + return rv; +} + +sw_error_t +fal_port_duplex_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_duplex_t * pduplex) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DUPLEX_GET, dev_id, port_id, pduplex); + return rv; +} + +sw_error_t +fal_port_speed_set(a_uint32_t dev_id, fal_port_t port_id, + fal_port_speed_t speed) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_SPEED_SET, dev_id, port_id, + speed); + return rv; +} + +sw_error_t +fal_port_speed_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_speed_t * pspeed) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_SPEED_GET, dev_id, port_id, pspeed); + return rv; +} + +sw_error_t +fal_port_autoneg_status_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_AN_GET, dev_id, port_id, status); + return rv; +} + +sw_error_t +fal_port_autoneg_enable(a_uint32_t dev_id, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_AN_ENABLE, dev_id, port_id); + return rv; +} + +sw_error_t +fal_port_autoneg_restart(a_uint32_t dev_id, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_AN_RESTART, dev_id, port_id); + return rv; +} + +sw_error_t +fal_port_autoneg_adv_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t autoadv) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_AN_ADV_SET, dev_id, port_id, autoadv); + return rv; +} + +sw_error_t +fal_port_autoneg_adv_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * autoadv) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_AN_ADV_GET, dev_id, port_id, autoadv); + return rv; +} +/*qca808x_end*/ +sw_error_t +fal_port_hdr_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_HDR_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_hdr_status_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_HDR_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_flowctrl_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FLOWCTRL_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_flowctrl_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FLOWCTRL_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_flowctrl_forcemode_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FLOWCTRL_MODE_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_flowctrl_forcemode_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FLOWCTRL_MODE_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_powersave_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_POWERSAVE_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_powersave_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_POWERSAVE_GET, dev_id, port_id, enable); + return rv; +} +/*qca808x_start*/ +sw_error_t +fal_port_hibernate_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_HIBERNATE_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_hibernate_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_HIBERNATE_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_cdt(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t mdi_pair, + a_uint32_t *cable_status, a_uint32_t *cable_len) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_CDT, dev_id, port_id, mdi_pair, + cable_status, cable_len); + return rv; +} +/*qca808x_end*/ +sw_error_t +fal_port_rxhdr_mode_set(a_uint32_t dev_id, fal_port_t port_id, + fal_port_header_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_RXHDR_SET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_rxhdr_mode_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_header_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_RXHDR_GET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_txhdr_mode_set(a_uint32_t dev_id, fal_port_t port_id, + fal_port_header_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_TXHDR_SET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_txhdr_mode_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_header_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_TXHDR_GET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_header_type_set(a_uint32_t dev_id, a_bool_t enable, a_uint32_t type) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_HEADER_TYPE_SET, dev_id, enable, type); + return rv; +} + +sw_error_t +fal_header_type_get(a_uint32_t dev_id, a_bool_t * enable, a_uint32_t * type) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_HEADER_TYPE_GET, dev_id, enable, type); + return rv; +} + +sw_error_t +fal_port_txmac_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TXMAC_STATUS_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_txmac_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TXMAC_STATUS_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_rxmac_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RXMAC_STATUS_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_rxmac_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RXMAC_STATUS_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_txfc_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TXFC_STATUS_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_txfc_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TXFC_STATUS_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_rxfc_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RXFC_STATUS_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_rxfc_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RXFC_STATUS_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_bp_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BP_STATUS_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_bp_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_BP_STATUS_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_link_forcemode_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_LINK_MODE_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_link_forcemode_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_LINK_MODE_GET, dev_id, port_id, enable); + return rv; +} +/*qca808x_start*/ +sw_error_t +fal_port_link_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_LINK_STATUS_GET, dev_id, port_id, status); + return rv; +} +/*qca808x_end*/ +sw_error_t +fal_ports_link_status_get(a_uint32_t dev_id, a_uint32_t * status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTS_LINK_STATUS_GET, dev_id, status); + return rv; +} + +sw_error_t +fal_port_mac_loopback_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MAC_LOOPBACK_SET, dev_id, port_id, enable); + return rv; +} + + +sw_error_t +fal_port_mac_loopback_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MAC_LOOPBACK_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_congestion_drop_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t queue_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_CONGESTION_DROP_SET, dev_id, port_id, queue_id, enable); + return rv; +} + + +sw_error_t +fal_port_congestion_drop_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t queue_id, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_CONGESTION_DROP_GET, dev_id, port_id, queue_id, enable); + return rv; +} + +sw_error_t +fal_ring_flow_ctrl_thres_set(a_uint32_t dev_id, a_uint32_t ring_id, + a_uint8_t on_thres, a_uint8_t off_thres) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_RING_FLOW_CTRL_THRES_SET, dev_id, ring_id, on_thres, off_thres); + return rv; +} + + +sw_error_t +fal_ring_flow_ctrl_thres_get(a_uint32_t dev_id, a_uint32_t ring_id, + a_uint8_t *on_thres, a_uint8_t *off_thres) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_RING_FLOW_CTRL_THRES_GET, dev_id, ring_id, on_thres, off_thres); + return rv; +} +/*qca808x_start*/ +sw_error_t +fal_port_8023az_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_8023AZ_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_8023az_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_8023AZ_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_mdix_set(a_uint32_t dev_id, fal_port_t port_id, + fal_port_mdix_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MDIX_SET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_mdix_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_mdix_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MDIX_GET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_mdix_status_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_mdix_status_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MDIX_STATUS_GET, dev_id, port_id, mode); + return rv; +} +/*qca808x_end*/ +sw_error_t +fal_port_combo_prefer_medium_set(a_uint32_t dev_id, fal_port_t port_id, + fal_port_medium_t medium) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_COMBO_PREFER_MEDIUM_SET, dev_id, port_id, medium); + return rv; +} + +sw_error_t +fal_port_combo_prefer_medium_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_medium_t * medium) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_COMBO_PREFER_MEDIUM_GET, dev_id, port_id, medium); + return rv; +} + +sw_error_t +fal_port_combo_medium_status_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_medium_t * medium) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_COMBO_MEDIUM_STATUS_GET, dev_id, port_id, medium); + return rv; +} + +sw_error_t +fal_port_combo_fiber_mode_set(a_uint32_t dev_id, fal_port_t port_id, + fal_port_fiber_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_COMBO_FIBER_MODE_SET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_combo_fiber_mode_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_fiber_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_COMBO_FIBER_MODE_GET, dev_id, port_id, mode); + return rv; +} +/*qca808x_start*/ +sw_error_t +fal_port_local_loopback_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_LOCAL_LOOPBACK_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_local_loopback_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_LOCAL_LOOPBACK_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_remote_loopback_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_REMOTE_LOOPBACK_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_remote_loopback_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_REMOTE_LOOPBACK_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_reset(a_uint32_t dev_id, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_RESET, dev_id, port_id); + return rv; +} + +sw_error_t +fal_port_power_off(a_uint32_t dev_id, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_POWER_OFF, dev_id, port_id); + return rv; +} + +sw_error_t +fal_port_power_on(a_uint32_t dev_id, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_POWER_ON, dev_id, port_id); + return rv; +} + + sw_error_t + fal_port_magic_frame_mac_set (a_uint32_t dev_id, fal_port_t port_id, + fal_mac_addr_t * mac) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MAGIC_FRAME_MAC_SET, dev_id, port_id, mac); + return rv; + +} + + sw_error_t + fal_port_magic_frame_mac_get (a_uint32_t dev_id, fal_port_t port_id, + fal_mac_addr_t * mac) +{ + + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MAGIC_FRAME_MAC_GET, dev_id, port_id, mac); + return rv; + + +} + sw_error_t + fal_port_phy_id_get (a_uint32_t dev_id, fal_port_t port_id, + a_uint16_t * org_id, a_uint16_t * rev_id) + { + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_PHY_ID_GET, dev_id, port_id, org_id, rev_id); + return rv; + } + sw_error_t + fal_port_wol_status_set (a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_WOL_STATUS_SET, dev_id, port_id, enable); + return rv; + + } + sw_error_t + fal_port_wol_status_get (a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) + + { + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_WOL_STATUS_GET, dev_id, port_id,enable); + return rv; + } + /*qca808x_end*/ +sw_error_t +fal_port_interface_mode_set (a_uint32_t dev_id, fal_port_t port_id, + fal_port_interface_mode_t mode) +{ + sw_error_t rv; + rv = sw_uk_exec(SW_API_PT_INTERFACE_MODE_SET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_interface_mode_get (a_uint32_t dev_id, fal_port_t port_id, + fal_port_interface_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_INTERFACE_MODE_GET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_interface_mode_apply (a_uint32_t dev_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_INTERFACE_MODE_APPLY, dev_id); + return rv; +} +/*qca808x_start*/ +sw_error_t +fal_port_interface_mode_status_get (a_uint32_t dev_id, fal_port_t port_id, + fal_port_interface_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_INTERFACE_MODE_STATUS_GET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_debug_phycounter_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_DEBUG_PHYCOUNTER_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_debug_phycounter_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_DEBUG_PHYCOUNTER_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_debug_phycounter_show(a_uint32_t dev_id, fal_port_t port_id, + fal_port_counter_info_t * port_counter_info) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_DEBUG_PHYCOUNTER_SHOW, dev_id, port_id, port_counter_info); + return rv; +} +/*qca808x_end*/ +sw_error_t +fal_port_mtu_set(a_uint32_t dev_id, fal_port_t port_id, + fal_mtu_ctrl_t *ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MTU_SET, dev_id, port_id, ctrl); + return rv; +} + +sw_error_t +fal_port_mtu_get(a_uint32_t dev_id, fal_port_t port_id, + fal_mtu_ctrl_t *ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MTU_GET, dev_id, port_id, ctrl); + return rv; +} + +sw_error_t +fal_port_mru_set(a_uint32_t dev_id, fal_port_t port_id, + fal_mru_ctrl_t *ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MRU_SET, dev_id, port_id, ctrl); + return rv; +} + +sw_error_t +fal_port_mru_get(a_uint32_t dev_id, fal_port_t port_id, + fal_mru_ctrl_t *ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MRU_GET, dev_id, port_id, ctrl); + return rv; +} + +sw_error_t +fal_port_source_filter_enable(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_SOURCE_FILTER_SET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_source_filter_status_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_SOURCE_FILTER_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_max_frame_size_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t max_frame) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FRAME_MAX_SIZE_SET, dev_id, port_id, + max_frame); + return rv; +} + +sw_error_t +fal_port_max_frame_size_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t* max_frame) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FRAME_MAX_SIZE_GET, dev_id, port_id, max_frame); + return rv; +} +sw_error_t +fal_port_interface_3az_status_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_INTERFACE_3AZ_STATUS_SET, dev_id, port_id, enable); + + return rv; +} +sw_error_t +fal_port_interface_3az_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_INTERFACE_3AZ_STATUS_GET, dev_id, port_id, enable); + + return rv; + + return rv; +} +sw_error_t +fal_port_promisc_mode_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_PROMISC_MODE_SET, dev_id, port_id, enable); + + return rv; +} + +sw_error_t +fal_port_promisc_mode_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_PROMISC_MODE_GET, dev_id, port_id, enable); + + return rv; +} +sw_error_t +fal_port_interface_eee_cfg_set(a_uint32_t dev_id, fal_port_t port_id, + fal_port_eee_cfg_t *port_eee_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_INTERFACE_EEE_CFG_SET, dev_id, port_id, port_eee_cfg); + + return rv; +} +sw_error_t +fal_port_interface_eee_cfg_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_eee_cfg_t *port_eee_cfg) + +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_INTERFACE_EEE_CFG_GET, dev_id, port_id, port_eee_cfg); + + return rv; +} + +sw_error_t +fal_port_source_filter_config_set(a_uint32_t dev_id, + fal_port_t port_id, fal_src_filter_config_t *src_filter_config) + +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_SOURCE_FILTER_CONFIG_SET, dev_id, port_id, + src_filter_config); + + return rv; +} + +sw_error_t +fal_port_source_filter_config_get(a_uint32_t dev_id, + fal_port_t port_id, fal_src_filter_config_t *src_filter_config) + +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_SOURCE_FILTER_CONFIG_GET, dev_id, port_id, + src_filter_config); + + return rv; +} + +sw_error_t +fal_switch_port_loopback_set(a_uint32_t dev_id, fal_port_t port_id, + fal_loopback_config_t *loopback_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_SWITCH_PORT_LOOPBACK_SET, dev_id, + port_id, loopback_cfg); + + return rv; +} +sw_error_t +fal_switch_port_loopback_get(a_uint32_t dev_id, fal_port_t port_id, + fal_loopback_config_t *loopback_cfg) + +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_SWITCH_PORT_LOOPBACK_GET, dev_id, port_id, + loopback_cfg); + + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_portvlan.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_portvlan.c new file mode 100755 index 000000000..8aeea7225 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_portvlan.c @@ -0,0 +1,754 @@ +/* + * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_portvlan.h" +#include "fal_uk_if.h" + +sw_error_t +fal_port_1qmode_set(a_uint32_t dev_id, fal_port_t port_id, + fal_pt_1qmode_t port_1qmode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_ING_MODE_SET, dev_id, port_id, + (a_uint32_t) port_1qmode); + return rv; +} + +sw_error_t +fal_port_1qmode_get(a_uint32_t dev_id, fal_port_t port_id, + fal_pt_1qmode_t * pport_1qmode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_ING_MODE_GET, dev_id, port_id, + pport_1qmode); + return rv; +} + +sw_error_t +fal_port_egvlanmode_set(a_uint32_t dev_id, fal_port_t port_id, + fal_pt_1q_egmode_t port_egvlanmode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_EG_MODE_SET, dev_id, port_id, + (a_uint32_t) port_egvlanmode); + return rv; +} + +sw_error_t +fal_port_egvlanmode_get(a_uint32_t dev_id, fal_port_t port_id, + fal_pt_1q_egmode_t * pport_egvlanmode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_EG_MODE_GET, dev_id, port_id, + pport_egvlanmode); + return rv; +} + +sw_error_t +fal_portvlan_member_add(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t mem_port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_MEM_ADD, dev_id, port_id, + (a_uint32_t) mem_port_id); + return rv; +} + +sw_error_t +fal_portvlan_member_del(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t mem_port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_MEM_DEL, dev_id, port_id, + (a_uint32_t) mem_port_id); + return rv; +} + +sw_error_t +fal_portvlan_member_update(a_uint32_t dev_id, fal_port_t port_id, + fal_pbmp_t mem_port_map) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_MEM_UPDATE, dev_id, port_id, + (a_uint32_t) mem_port_map); + return rv; +} + +sw_error_t +fal_portvlan_member_get(a_uint32_t dev_id, fal_port_t port_id, + fal_pbmp_t * mem_port_map) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_MEM_GET, dev_id, port_id, + mem_port_map); + return rv; +} + +sw_error_t +fal_port_default_vid_set(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t vid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DEF_VID_SET, dev_id, port_id, + vid); + return rv; +} + +sw_error_t +fal_port_default_vid_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * vid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DEF_VID_GET, dev_id, port_id, + vid); + return rv; +} + +sw_error_t +fal_port_force_default_vid_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FORCE_DEF_VID_SET, dev_id, port_id, + (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_port_force_default_vid_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FORCE_DEF_VID_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_force_portvlan_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FORCE_PORTVLAN_SET, dev_id, port_id, + (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_port_force_portvlan_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_FORCE_PORTVLAN_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_port_nestvlan_set(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_NESTVLAN_SET, dev_id, port_id, + (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_port_nestvlan_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_NESTVLAN_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_nestvlan_tpid_set(a_uint32_t dev_id, a_uint32_t tpid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NESTVLAN_TPID_SET, dev_id, tpid); + return rv; +} + +sw_error_t +fal_nestvlan_tpid_get(a_uint32_t dev_id, a_uint32_t * tpid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NESTVLAN_TPID_GET, dev_id, tpid); + return rv; +} + +sw_error_t +fal_port_invlan_mode_set(a_uint32_t dev_id, fal_port_t port_id, + fal_pt_invlan_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_IN_VLAN_MODE_SET, dev_id, port_id, (a_uint32_t) mode); + return rv; +} + +sw_error_t +fal_port_invlan_mode_get(a_uint32_t dev_id, fal_port_t port_id, + fal_pt_invlan_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_IN_VLAN_MODE_GET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_tls_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_TLS_SET, dev_id, port_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_port_tls_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_TLS_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_pri_propagation_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_PRI_PROPAGATION_SET, dev_id, port_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_port_pri_propagation_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_PRI_PROPAGATION_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_default_svid_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t vid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DEF_SVID_SET, dev_id, port_id, vid); + return rv; +} + +sw_error_t +fal_port_default_svid_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * vid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DEF_SVID_GET, dev_id, port_id, vid); + return rv; +} + +sw_error_t +fal_port_default_cvid_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t vid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DEF_CVID_SET, dev_id, port_id, vid); + return rv; +} + +sw_error_t +fal_port_default_cvid_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * vid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DEF_CVID_GET, dev_id, port_id, vid); + return rv; +} + +sw_error_t +fal_port_vlan_propagation_set(a_uint32_t dev_id, fal_port_t port_id, + fal_vlan_propagation_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_PROPAGATION_SET, dev_id, port_id, (a_uint32_t)mode); + return rv; +} + +sw_error_t +fal_port_vlan_propagation_get(a_uint32_t dev_id, fal_port_t port_id, + fal_vlan_propagation_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_PROPAGATION_GET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_vlan_trans_add(a_uint32_t dev_id, fal_port_t port_id, fal_vlan_trans_entry_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_TRANS_ADD, dev_id, port_id, entry); + return rv; +} + +sw_error_t +fal_port_vlan_trans_del(a_uint32_t dev_id, fal_port_t port_id, fal_vlan_trans_entry_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_TRANS_DEL, dev_id, port_id, entry); + return rv; +} + +sw_error_t +fal_port_vlan_trans_get(a_uint32_t dev_id, fal_port_t port_id, fal_vlan_trans_entry_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_TRANS_GET, dev_id, port_id, entry); + return rv; +} + +sw_error_t +fal_qinq_mode_set(a_uint32_t dev_id, fal_qinq_mode_t mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QINQ_MODE_SET, dev_id, (a_uint32_t)mode); + return rv; +} + +sw_error_t +fal_qinq_mode_get(a_uint32_t dev_id, fal_qinq_mode_t * mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QINQ_MODE_GET, dev_id, mode); + return rv; +} + +sw_error_t +fal_port_qinq_role_set(a_uint32_t dev_id, fal_port_t port_id, fal_qinq_port_role_t role) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_QINQ_ROLE_SET, dev_id, port_id, (a_uint32_t)role); + return rv; +} + +sw_error_t +fal_port_qinq_role_get(a_uint32_t dev_id, fal_port_t port_id, fal_qinq_port_role_t * role) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_QINQ_ROLE_GET, dev_id, port_id, role); + return rv; +} + +sw_error_t +fal_port_vlan_trans_iterate(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * iterator, fal_vlan_trans_entry_t * entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_TRANS_ITERATE, dev_id, port_id, + iterator,entry); + return rv; +} + +sw_error_t +fal_port_mac_vlan_xlt_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MAC_VLAN_XLT_SET, dev_id, port_id, (a_uint32_t)enable); + return rv; +} + +sw_error_t +fal_port_mac_vlan_xlt_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_MAC_VLAN_XLT_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_netisolate_set(a_uint32_t dev_id, a_uint32_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NETISOLATE_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_netisolate_get(a_uint32_t dev_id, a_uint32_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_NETISOLATE_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_eg_trans_filter_bypass_en_set(a_uint32_t dev_id, a_uint32_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_EG_FLTR_BYPASS_EN_SET, dev_id, enable); + return rv; +} + +sw_error_t +fal_eg_trans_filter_bypass_en_get(a_uint32_t dev_id, a_uint32_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_EG_FLTR_BYPASS_EN_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_port_vrf_id_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t vrf_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VRF_ID_SET, dev_id, port_id, vrf_id); + return rv; +} + +sw_error_t +fal_port_vrf_id_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * vrf_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VRF_ID_GET, dev_id, port_id, vrf_id); + return rv; +} + +sw_error_t +fal_global_qinq_mode_set(a_uint32_t dev_id, fal_global_qinq_mode_t *mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_GLOBAL_QINQ_MODE_SET, dev_id, mode); + return rv; +} + +sw_error_t +fal_global_qinq_mode_get(a_uint32_t dev_id, fal_global_qinq_mode_t *mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_GLOBAL_QINQ_MODE_GET, dev_id, mode); + return rv; +} + +sw_error_t +fal_port_qinq_mode_set(a_uint32_t dev_id, fal_port_t port_id, fal_port_qinq_role_t *mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_QINQ_MODE_SET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_port_qinq_mode_get(a_uint32_t dev_id, fal_port_t port_id, fal_port_qinq_role_t *mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_QINQ_MODE_GET, dev_id, port_id, mode); + return rv; +} + +sw_error_t +fal_ingress_tpid_set(a_uint32_t dev_id, fal_tpid_t *tpid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TPID_SET, dev_id, tpid); + return rv; +} + +sw_error_t +fal_ingress_tpid_get(a_uint32_t dev_id, fal_tpid_t *tpid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TPID_GET, dev_id, tpid); + return rv; +} + +sw_error_t +fal_egress_tpid_set(a_uint32_t dev_id, fal_tpid_t *tpid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_EGRESS_TPID_SET, dev_id, tpid); + return rv; +} + +sw_error_t +fal_egress_tpid_get(a_uint32_t dev_id, fal_tpid_t *tpid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_EGRESS_TPID_GET, dev_id, tpid); + return rv; +} + +sw_error_t +fal_port_ingress_vlan_filter_set(a_uint32_t dev_id, fal_port_t port_id, fal_ingress_vlan_filter_t *filter) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_INGRESS_VLAN_FILTER_SET, dev_id, port_id, filter); + return rv; +} + +sw_error_t +fal_port_ingress_vlan_filter_get(a_uint32_t dev_id, fal_port_t port_id, fal_ingress_vlan_filter_t *filter) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_INGRESS_VLAN_FILTER_GET, dev_id, port_id, filter); + return rv; +} + +sw_error_t +fal_port_default_vlantag_set(a_uint32_t dev_id, fal_port_t port_id, + fal_port_vlan_direction_t direction, fal_port_default_vid_enable_t *default_vid_en, + fal_port_vlan_tag_t *default_tag) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DEFAULT_VLANTAG_SET, dev_id, port_id, (a_uint32_t) direction, + default_vid_en, default_tag); + return rv; +} + +sw_error_t +fal_port_default_vlantag_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_vlan_direction_t direction, fal_port_default_vid_enable_t *default_vid_en, + fal_port_vlan_tag_t *default_tag) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_DEFAULT_VLANTAG_GET, dev_id, port_id, (a_uint32_t) direction, + default_vid_en, default_tag); + return rv; +} + +sw_error_t +fal_port_tag_propagation_set(a_uint32_t dev_id, fal_port_t port_id, fal_port_vlan_direction_t direction, + fal_vlantag_propagation_t *prop) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_TAG_PROPAGATION_SET, dev_id, port_id, (a_uint32_t) direction, prop); + return rv; +} + +sw_error_t +fal_port_tag_propagation_get(a_uint32_t dev_id, fal_port_t port_id, fal_port_vlan_direction_t direction, + fal_vlantag_propagation_t *prop) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_TAG_PROPAGATION_GET, dev_id, port_id, (a_uint32_t) direction, prop); + return rv; +} + +sw_error_t +fal_port_vlan_xlt_miss_cmd_set(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_XLT_MISS_CMD_SET, dev_id, port_id, (a_uint32_t) cmd); + return rv; +} + +sw_error_t +fal_port_vlan_xlt_miss_cmd_get(a_uint32_t dev_id, fal_port_t port_id, + fal_fwd_cmd_t *cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_XLT_MISS_CMD_GET, dev_id, port_id, cmd); + return rv; +} + +sw_error_t +fal_port_vlantag_egmode_set(a_uint32_t dev_id, fal_port_t port_id, + fal_vlantag_egress_mode_t *port_egvlanmode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLANTAG_EGMODE_SET, dev_id, port_id, port_egvlanmode); + return rv; +} + +sw_error_t +fal_port_vlantag_egmode_get(a_uint32_t dev_id, fal_port_t port_id, + fal_vlantag_egress_mode_t *port_egvlanmode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLANTAG_EGMODE_GET, dev_id, port_id, port_egvlanmode); + return rv; +} + +sw_error_t +fal_port_vsi_egmode_set(a_uint32_t dev_id, a_uint32_t vsi, a_uint32_t port_id, fal_pt_1q_egmode_t egmode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VSI_EGMODE_SET, dev_id, vsi, port_id, (a_uint32_t) egmode); + return rv; +} + +sw_error_t +fal_port_vsi_egmode_get(a_uint32_t dev_id, a_uint32_t vsi, a_uint32_t port_id, fal_pt_1q_egmode_t * egmode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VSI_EGMODE_GET, dev_id, vsi, port_id, egmode); + return rv; +} + +sw_error_t +fal_port_vlantag_vsi_egmode_enable(a_uint32_t dev_id, fal_port_t port_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLANTAG_VSI_EGMODE_EN_SET, dev_id, port_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_port_vlantag_vsi_egmode_status_get(a_uint32_t dev_id, fal_port_t port_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLANTAG_VSI_EGMODE_EN_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_port_vlan_trans_adv_add(a_uint32_t dev_id, fal_port_t port_id, fal_port_vlan_direction_t direction, + fal_vlan_trans_adv_rule_t * rule, fal_vlan_trans_adv_action_t * action) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_TRANS_ADV_ADD, dev_id, port_id, (a_uint32_t) direction, + rule, action); + return rv; +} + +sw_error_t +fal_port_vlan_trans_adv_del(a_uint32_t dev_id, fal_port_t port_id, fal_port_vlan_direction_t direction, + fal_vlan_trans_adv_rule_t * rule, fal_vlan_trans_adv_action_t * action) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_TRANS_ADV_DEL, dev_id, port_id, (a_uint32_t) direction, + rule, action); + return rv; +} + +sw_error_t +fal_port_vlan_trans_adv_getfirst(a_uint32_t dev_id, fal_port_t port_id, fal_port_vlan_direction_t direction, + fal_vlan_trans_adv_rule_t * rule, fal_vlan_trans_adv_action_t * action) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_TRANS_ADV_GETFIRST, dev_id, port_id, (a_uint32_t) direction, + rule, action); + return rv; +} + +sw_error_t +fal_port_vlan_trans_adv_getnext(a_uint32_t dev_id, fal_port_t port_id, fal_port_vlan_direction_t direction, + fal_vlan_trans_adv_rule_t * rule, fal_vlan_trans_adv_action_t * action) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_TRANS_ADV_GETNEXT, dev_id, port_id, (a_uint32_t) direction, + rule, action); + return rv; +} + +sw_error_t +fal_port_vlan_counter_get(a_uint32_t dev_id, a_uint32_t cnt_index, fal_port_vlan_counter_t * counter) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_COUNTER_GET, dev_id, cnt_index, counter); + return rv; +} + +sw_error_t +fal_port_vlan_counter_cleanup(a_uint32_t dev_id, a_uint32_t cnt_index) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PT_VLAN_COUNTER_CLEANUP, dev_id, cnt_index); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_pppoe.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_pppoe.c new file mode 100755 index 000000000..4c7817c33 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_pppoe.c @@ -0,0 +1,167 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_pppoe.h" +#include "fal_uk_if.h" + +sw_error_t +fal_pppoe_cmd_set(a_uint32_t dev_id, fal_fwd_cmd_t cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_CMD_SET, dev_id, (a_uint32_t) cmd); + return rv; +} + +sw_error_t +fal_pppoe_cmd_get(a_uint32_t dev_id, fal_fwd_cmd_t * cmd) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_CMD_GET, dev_id, cmd); + return rv; +} + +sw_error_t +fal_pppoe_status_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_STATUS_SET, dev_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_pppoe_status_get(a_uint32_t dev_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_STATUS_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_pppoe_session_add(a_uint32_t dev_id, a_uint32_t session_id, a_bool_t strip_hdr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_SESSION_ADD, dev_id, session_id, (a_uint32_t)strip_hdr); + return rv; +} + +sw_error_t +fal_pppoe_session_del(a_uint32_t dev_id, a_uint32_t session_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_SESSION_DEL, dev_id, session_id); + return rv; +} + +sw_error_t +fal_pppoe_session_get(a_uint32_t dev_id, a_uint32_t session_id, a_bool_t * strip_hdr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_SESSION_GET, dev_id, session_id, strip_hdr); + return rv; +} + +sw_error_t +fal_pppoe_session_table_add(a_uint32_t dev_id, fal_pppoe_session_t * session_tbl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_SESSION_TABLE_ADD, dev_id, session_tbl); + return rv; +} + +sw_error_t +fal_pppoe_session_table_del(a_uint32_t dev_id, fal_pppoe_session_t * session_tbl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_SESSION_TABLE_DEL, dev_id, session_tbl); + return rv; +} + +sw_error_t +fal_pppoe_session_table_get(a_uint32_t dev_id, fal_pppoe_session_t * session_tbl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_SESSION_TABLE_GET, dev_id, session_tbl); + return rv; +} + +sw_error_t +fal_pppoe_session_id_set(a_uint32_t dev_id, a_uint32_t index, + a_uint32_t id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_SESSION_ID_SET, dev_id, index, id); + return rv; +} + +sw_error_t +fal_pppoe_session_id_get(a_uint32_t dev_id, a_uint32_t index, + a_uint32_t * id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_SESSION_ID_GET, dev_id, index, id); + return rv; +} + +sw_error_t +fal_rtd_pppoe_en_set(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RTD_PPPOE_EN_SET, dev_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_rtd_pppoe_en_get(a_uint32_t dev_id, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RTD_PPPOE_EN_GET, dev_id, enable); + return rv; +} + +sw_error_t +fal_pppoe_l3intf_enable(a_uint32_t dev_id, a_uint32_t l3_if, a_uint32_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_EN_SET, dev_id, l3_if, (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_pppoe_l3intf_status_get(a_uint32_t dev_id, a_uint32_t l3_if, a_uint32_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PPPOE_EN_GET, dev_id, l3_if, enable); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ptp.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ptp.c new file mode 100755 index 000000000..4ad27e5c8 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_ptp.c @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_ptp.h" +#include "fal_uk_if.h" + +sw_error_t +fal_ptp_config_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_config_t *config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_CONFIG_SET, dev_id, port_id, + config); + return rv; +} + +sw_error_t +fal_ptp_config_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_config_t *config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_CONFIG_GET, dev_id, port_id, + config); + return rv; +} + +sw_error_t +fal_ptp_reference_clock_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_reference_clock_t ref_clock) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_REFERENCE_CLOCK_SET, dev_id, port_id, + ref_clock); + return rv; +} + +sw_error_t +fal_ptp_reference_clock_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_reference_clock_t *ref_clock) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_REFERENCE_CLOCK_GET, dev_id, port_id, + ref_clock); + return rv; +} + +sw_error_t +fal_ptp_rx_timestamp_mode_set(a_uint32_t dev_id, + a_uint32_t port_id, fal_ptp_rx_timestamp_mode_t ts_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RX_TIMESTAMP_MODE_SET, dev_id, port_id, + ts_mode); + return rv; +} + +sw_error_t +fal_ptp_rx_timestamp_mode_get(a_uint32_t dev_id, + a_uint32_t port_id, fal_ptp_rx_timestamp_mode_t *ts_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RX_TIMESTAMP_MODE_GET, dev_id, port_id, + ts_mode); + return rv; +} + +sw_error_t +fal_ptp_timestamp_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_direction_t direction, + fal_ptp_pkt_info_t *pkt_info, + fal_ptp_time_t *time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_TIMESTAMP_GET, dev_id, port_id, + direction, pkt_info, time); + return rv; +} + +sw_error_t +fal_ptp_pkt_timestamp_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_time_t *time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_PKT_TIMESTAMP_SET, dev_id, port_id, + time); + return rv; +} + +sw_error_t +fal_ptp_pkt_timestamp_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_time_t *time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_PKT_TIMESTAMP_GET, dev_id, port_id, + time); + return rv; +} + +sw_error_t +fal_ptp_grandmaster_mode_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_grandmaster_mode_t *gm_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_GRANDMASTER_MODE_SET, dev_id, port_id, + gm_mode); + return rv; +} + +sw_error_t +fal_ptp_grandmaster_mode_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_grandmaster_mode_t *gm_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_GRANDMASTER_MODE_GET, dev_id, port_id, + gm_mode); + return rv; +} + +sw_error_t +fal_ptp_rtc_time_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_time_t *time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RTC_TIME_GET, dev_id, port_id, + time); + return rv; +} + +sw_error_t +fal_ptp_rtc_time_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_time_t *time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RTC_TIME_SET, dev_id, port_id, + time); + return rv; +} + +sw_error_t +fal_ptp_rtc_time_clear(a_uint32_t dev_id, a_uint32_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RTC_TIME_CLEAR, dev_id, port_id); + return rv; +} + +sw_error_t +fal_ptp_rtc_adjtime_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_time_t *time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RTC_ADJTIME_SET, dev_id, port_id, + time); + return rv; +} + +sw_error_t +fal_ptp_rtc_adjfreq_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_time_t *time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RTC_ADJFREQ_SET, dev_id, port_id, + time); + return rv; +} + +sw_error_t +fal_ptp_rtc_adjfreq_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_time_t *time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RTC_ADJFREQ_GET, dev_id, port_id, + time); + return rv; +} + +sw_error_t +fal_ptp_link_delay_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_time_t *time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_LINK_DELAY_SET, dev_id, port_id, + time); + return rv; +} + +sw_error_t +fal_ptp_link_delay_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_time_t *time) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_LINK_DELAY_GET, dev_id, port_id, + time); + return rv; +} + +sw_error_t +fal_ptp_security_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_security_t *sec) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_SECURITY_SET, dev_id, port_id, + sec); + return rv; +} + +sw_error_t +fal_ptp_security_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_security_t *sec) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_SECURITY_GET, dev_id, port_id, + sec); + return rv; +} + +sw_error_t +fal_ptp_pps_signal_control_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_pps_signal_control_t *sig_control) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_PPS_SIGNAL_CONTROL_SET, dev_id, port_id, + sig_control); + return rv; +} + +sw_error_t +fal_ptp_pps_signal_control_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_pps_signal_control_t *sig_control) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_PPS_SIGNAL_CONTROL_GET, dev_id, port_id, + sig_control); + return rv; +} + +sw_error_t +fal_ptp_rx_crc_recalc_enable(a_uint32_t dev_id, a_uint32_t port_id, + a_bool_t status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RX_CRC_RECALC_SET, dev_id, port_id, + status); + return rv; +} + +sw_error_t +fal_ptp_rx_crc_recalc_status_get(a_uint32_t dev_id, a_uint32_t port_id, + a_bool_t *status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RX_CRC_RECALC_GET, dev_id, port_id, + status); + return rv; +} + +sw_error_t +fal_ptp_asym_correction_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_asym_correction_t *asym_cf) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_ASYM_CORRECTION_SET, dev_id, port_id, + asym_cf); + return rv; +} + +sw_error_t +fal_ptp_asym_correction_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_asym_correction_t* asym_cf) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_ASYM_CORRECTION_GET, dev_id, port_id, + asym_cf); + return rv; +} + +sw_error_t +fal_ptp_output_waveform_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_output_waveform_t *waveform) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_OUTPUT_WAVEFORM_SET, dev_id, port_id, + waveform); + return rv; +} + +sw_error_t +fal_ptp_output_waveform_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_output_waveform_t *waveform) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_OUTPUT_WAVEFORM_GET, dev_id, port_id, + waveform); + return rv; +} + +sw_error_t +fal_ptp_rtc_time_snapshot_enable(a_uint32_t dev_id, a_uint32_t port_id, + a_bool_t status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RTC_TIME_SNAPSHOT_SET, dev_id, port_id, + status); + return rv; +} + +sw_error_t +fal_ptp_rtc_time_snapshot_status_get(a_uint32_t dev_id, a_uint32_t port_id, + a_bool_t *status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_RTC_TIME_SNAPSHOT_GET, dev_id, port_id, + status); + return rv; +} + +sw_error_t +fal_ptp_increment_sync_from_clock_enable(a_uint32_t dev_id, + a_uint32_t port_id, a_bool_t status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_INCREMENT_SYNC_FROM_CLOCK_SET, dev_id, port_id, + status); + return rv; +} + +sw_error_t +fal_ptp_increment_sync_from_clock_status_get(a_uint32_t dev_id, + a_uint32_t port_id, a_bool_t *status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_INCREMENT_SYNC_FROM_CLOCK_GET, dev_id, port_id, + status); + return rv; +} + +sw_error_t +fal_ptp_tod_uart_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_tod_uart_t *tod_uart) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_TOD_UART_SET, dev_id, port_id, + tod_uart); + return rv; +} + +sw_error_t +fal_ptp_tod_uart_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_tod_uart_t *tod_uart) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_TOD_UART_GET, dev_id, port_id, + tod_uart); + return rv; +} + +sw_error_t +fal_ptp_enhanced_timestamp_engine_set(a_uint32_t dev_id, + a_uint32_t port_id, fal_ptp_direction_t direction, + fal_ptp_enhanced_ts_engine_t *ts_engine) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_ENHANCED_TIMESTAMP_ENGINE_SET, dev_id, port_id, + direction, ts_engine); + return rv; +} + +sw_error_t +fal_ptp_enhanced_timestamp_engine_get(a_uint32_t dev_id, + a_uint32_t port_id, fal_ptp_direction_t direction, + fal_ptp_enhanced_ts_engine_t *ts_engine) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_ENHANCED_TIMESTAMP_ENGINE_GET, dev_id, port_id, + direction, ts_engine); + return rv; +} + +sw_error_t +fal_ptp_trigger_set(a_uint32_t dev_id, a_uint32_t port_id, + a_uint32_t trigger_id, fal_ptp_trigger_t *triger) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_TRIGGER_SET, dev_id, port_id, + trigger_id, triger); + return rv; +} + +sw_error_t +fal_ptp_trigger_get(a_uint32_t dev_id, a_uint32_t port_id, + a_uint32_t trigger_id, fal_ptp_trigger_t *triger) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_TRIGGER_GET, dev_id, port_id, + trigger_id, triger); + return rv; +} + +sw_error_t +fal_ptp_capture_set(a_uint32_t dev_id, a_uint32_t port_id, + a_uint32_t capture_id, fal_ptp_capture_t *capture) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_CAPTURE_SET, dev_id, port_id, + capture_id, capture); + return rv; +} + +sw_error_t +fal_ptp_capture_get(a_uint32_t dev_id, a_uint32_t port_id, + a_uint32_t capture_id, fal_ptp_capture_t *capture) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_CAPTURE_GET, dev_id, port_id, + capture_id, capture); + return rv; +} + +sw_error_t +fal_ptp_interrupt_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_interrupt_t *interrupt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_INTERRUPT_SET, dev_id, port_id, + interrupt); + return rv; +} + +sw_error_t +fal_ptp_interrupt_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_ptp_interrupt_t *interrupt) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PTP_INTERRUPT_GET, dev_id, port_id, + interrupt); + return rv; +} + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_qm.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_qm.c new file mode 100755 index 000000000..09b8c341e --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_qm.c @@ -0,0 +1,404 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_qm.h" +#include "fal_uk_if.h" + +sw_error_t +fal_ac_ctrl_set( + a_uint32_t dev_id, + fal_ac_obj_t *obj, + fal_ac_ctrl_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_AC_CTRL_SET, dev_id, obj, cfg); + return rv; +} + +sw_error_t +fal_ac_ctrl_get( + a_uint32_t dev_id, + fal_ac_obj_t *obj, + fal_ac_ctrl_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_AC_CTRL_GET, dev_id, obj, cfg); + return rv; +} + +sw_error_t +fal_ac_prealloc_buffer_set( + a_uint32_t dev_id, + fal_ac_obj_t *obj, + a_uint16_t num) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_AC_PRE_BUFFER_SET, dev_id, obj, num); + return rv; +} + +sw_error_t +fal_ac_prealloc_buffer_get( + a_uint32_t dev_id, + fal_ac_obj_t *obj, + a_uint16_t *num) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_AC_PRE_BUFFER_GET, dev_id, obj, num); + return rv; +} + +sw_error_t +fal_ac_queue_group_set( + a_uint32_t dev_id, + a_uint32_t queue_id, + a_uint8_t group_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_GROUP_SET, dev_id, queue_id, group_id); + return rv; +} + +sw_error_t +fal_ac_queue_group_get( + a_uint32_t dev_id, + a_uint32_t queue_id, + a_uint8_t *group_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_GROUP_GET, dev_id, queue_id, group_id); + return rv; +} + +sw_error_t +fal_ac_static_threshold_set( + a_uint32_t dev_id, + fal_ac_obj_t *obj, + fal_ac_static_threshold_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_STATIC_THRESH_SET, dev_id, obj, cfg); + return rv; +} + +sw_error_t +fal_ac_static_threshold_get( + a_uint32_t dev_id, + fal_ac_obj_t *obj, + fal_ac_static_threshold_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_STATIC_THRESH_GET, dev_id, obj, cfg); + return rv; +} + +sw_error_t +fal_ac_dynamic_threshold_set( + a_uint32_t dev_id, + a_uint32_t queue_id, + fal_ac_dynamic_threshold_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_DYNAMIC_THRESH_SET, dev_id, queue_id, cfg); + return rv; +} + +sw_error_t +fal_ac_dynamic_threshold_get( + a_uint32_t dev_id, + a_uint32_t queue_id, + fal_ac_dynamic_threshold_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_DYNAMIC_THRESH_GET, dev_id, queue_id, cfg); + return rv; +} + +sw_error_t +fal_ac_group_buffer_set( + a_uint32_t dev_id, + a_uint8_t group_id, + fal_ac_group_buffer_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_GOURP_BUFFER_SET, dev_id, group_id, cfg); + return rv; +} + +sw_error_t +fal_ac_group_buffer_get( + a_uint32_t dev_id, + a_uint8_t group_id, + fal_ac_group_buffer_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_GOURP_BUFFER_GET, dev_id, group_id, cfg); + return rv; +} + +sw_error_t +fal_ucast_queue_base_profile_set( + a_uint32_t dev_id, + fal_ucast_queue_dest_t *queue_dest, + a_uint32_t queue_base, a_uint8_t profile) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UCAST_QUEUE_BASE_PROFILE_SET, dev_id, queue_dest, queue_base, profile); + return rv; +} + +sw_error_t +fal_ucast_queue_base_profile_get( + a_uint32_t dev_id, + fal_ucast_queue_dest_t *queue_dest, + a_uint32_t *queue_base, a_uint8_t *profile) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UCAST_QUEUE_BASE_PROFILE_GET, dev_id, queue_dest, queue_base, profile); + return rv; +} + +sw_error_t +fal_ucast_priority_class_set( + a_uint32_t dev_id, + a_uint8_t profile, + a_uint8_t priority, + a_uint8_t class) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UCAST_PRIORITY_CLASS_SET, dev_id, profile, priority, class); + return rv; +} + +sw_error_t +fal_ucast_priority_class_get( + a_uint32_t dev_id, + a_uint8_t profile, + a_uint8_t priority, + a_uint8_t *class) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UCAST_PRIORITY_CLASS_GET, dev_id, profile, priority, class); + return rv; +} + +sw_error_t +fal_ucast_hash_map_set( + a_uint32_t dev_id, + a_uint8_t profile, + a_uint8_t rss_hash, + a_int8_t queue_hash) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UCAST_HASH_MAP_SET, dev_id, profile, rss_hash, queue_hash); + return rv; +} + +sw_error_t +fal_ucast_hash_map_get( + a_uint32_t dev_id, + a_uint8_t profile, + a_uint8_t rss_hash, + a_int8_t *queue_hash) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UCAST_HASH_MAP_GET, dev_id, profile, rss_hash, queue_hash); + return rv; +} + +sw_error_t +fal_mcast_cpu_code_class_set( + a_uint32_t dev_id, + a_uint8_t cpu_code, + a_uint8_t queue_class) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MCAST_CPUCODE_CLASS_SET, dev_id, cpu_code, queue_class); + return rv; +} + +sw_error_t +fal_mcast_cpu_code_class_get( + a_uint32_t dev_id, + a_uint8_t cpu_code, + a_uint8_t *queue_class) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MCAST_CPUCODE_CLASS_GET, dev_id, cpu_code, queue_class); + return rv; +} + +sw_error_t +fal_port_mcast_priority_class_set( + a_uint32_t dev_id, + fal_port_t port, + a_uint8_t priority, + a_uint8_t queue_class) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MCAST_PRIORITY_CLASS_SET, dev_id, port, priority, queue_class); + return rv; +} + +sw_error_t +fal_port_mcast_priority_class_get( + a_uint32_t dev_id, + fal_port_t port, + a_uint8_t priority, + a_uint8_t *queue_class) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_MCAST_PRIORITY_CLASS_GET, dev_id, port, priority, queue_class); + return rv; +} + +sw_error_t +fal_queue_flush( + a_uint32_t dev_id, + fal_port_t port, + a_uint16_t queue_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_FLUSH, dev_id, port, queue_id); + return rv; +} + +sw_error_t +fal_ucast_default_hash_set( + a_uint32_t dev_id, + a_uint8_t hash_value) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UCAST_DFLT_HASH_MAP_SET, dev_id, hash_value); + return rv; +} + +sw_error_t +fal_ucast_default_hash_get( + a_uint32_t dev_id, + a_uint8_t *hash_value) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UCAST_DFLT_HASH_MAP_GET, dev_id, hash_value); + return rv; +} + +sw_error_t +fal_queue_counter_ctrl_set(a_uint32_t dev_id, a_bool_t cnt_en) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_CNT_CTRL_SET, dev_id, cnt_en); + return rv; +} + +sw_error_t +fal_queue_counter_ctrl_get(a_uint32_t dev_id, a_bool_t *cnt_en) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_CNT_CTRL_GET, dev_id, cnt_en); + return rv; +} + +sw_error_t +fal_queue_counter_get( + a_uint32_t dev_id, + a_uint32_t queue_id, + fal_queue_stats_t *info) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_CNT_GET, dev_id, queue_id, info); + return rv; +} + +sw_error_t +fal_queue_counter_cleanup(a_uint32_t dev_id, a_uint32_t queue_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_CNT_CLEANUP, dev_id, queue_id); + return rv; +} + +sw_error_t +fal_qm_enqueue_ctrl_set(a_uint32_t dev_id, a_uint32_t queue_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QM_ENQUEUE_CTRL_SET, dev_id, queue_id, enable); + return rv; +} + +sw_error_t +fal_qm_enqueue_ctrl_get(a_uint32_t dev_id, a_uint32_t queue_id, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QM_ENQUEUE_CTRL_GET, dev_id, queue_id, enable); + return rv; +} + +sw_error_t +fal_qm_port_source_profile_set( + a_uint32_t dev_id, fal_port_t port, + a_uint32_t src_profile) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QM_SOURCE_PROFILE_SET, dev_id, port, src_profile); + return rv; +} + +sw_error_t +fal_qm_port_source_profile_get( + a_uint32_t dev_id, fal_port_t port, + a_uint32_t *src_profile) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QM_SOURCE_PROFILE_GET, dev_id, port, src_profile); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_qos.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_qos.c new file mode 100644 index 000000000..e48faa3c9 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_qos.c @@ -0,0 +1,612 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_qos.h" +#include "fal_uk_if.h" + +sw_error_t +fal_qos_sch_mode_set(a_uint32_t dev_id, + fal_sch_mode_t mode, const a_uint32_t weight[]) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_SCH_MODE_SET, dev_id, mode, + weight); + return rv; +} + +sw_error_t +fal_qos_sch_mode_get(a_uint32_t dev_id, + fal_sch_mode_t * mode, a_uint32_t weight[]) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_SCH_MODE_GET, dev_id, mode, + weight); + return rv; +} + +sw_error_t +fal_qos_queue_tx_buf_status_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_QU_TX_BUF_ST_SET, dev_id, port_id, + (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_qos_queue_tx_buf_status_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_QU_TX_BUF_ST_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_qos_queue_tx_buf_nr_set(a_uint32_t dev_id, fal_port_t port_id, + fal_queue_t queue_id, a_uint32_t * number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_QU_TX_BUF_NR_SET, dev_id, port_id, queue_id, + number); + return rv; +} + +sw_error_t +fal_qos_queue_tx_buf_nr_get(a_uint32_t dev_id, fal_port_t port_id, + fal_queue_t queue_id, a_uint32_t * number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_QU_TX_BUF_NR_GET, dev_id, port_id, queue_id, + number); + return rv; +} + +sw_error_t +fal_qos_port_tx_buf_status_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_TX_BUF_ST_SET, dev_id, port_id, + (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_qos_port_tx_buf_status_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_TX_BUF_ST_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_qos_port_red_en_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_RED_EN_SET, dev_id, port_id, + (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_qos_port_red_en_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t* enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_RED_EN_GET, dev_id, port_id, + enable); + return rv; +} + +sw_error_t +fal_qos_port_tx_buf_nr_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_TX_BUF_NR_SET, dev_id, port_id, + number); + return rv; +} + +sw_error_t +fal_qos_port_tx_buf_nr_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_TX_BUF_NR_GET, dev_id, port_id, + number); + return rv; +} + +sw_error_t +fal_qos_port_rx_buf_nr_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_RX_BUF_NR_SET, dev_id, port_id, + number); + return rv; +} + +sw_error_t +fal_qos_port_rx_buf_nr_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_RX_BUF_NR_GET, dev_id, port_id, + number); + return rv; +} + +sw_error_t +fal_cosmap_up_queue_set(a_uint32_t dev_id, a_uint32_t up, fal_queue_t queue) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_UP_QU_SET, dev_id, up, + (a_uint32_t) queue); + return rv; +} + +sw_error_t +fal_cosmap_up_queue_get(a_uint32_t dev_id, a_uint32_t up, + fal_queue_t * queue) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_UP_QU_GET, dev_id, up, + queue); + return rv; +} + +sw_error_t +fal_cosmap_dscp_queue_set(a_uint32_t dev_id, a_uint32_t dscp, fal_queue_t queue) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_DSCP_QU_SET, dev_id, dscp, + (a_uint32_t) queue); + return rv; +} + +sw_error_t +fal_cosmap_dscp_queue_get(a_uint32_t dev_id, a_uint32_t dscp, + fal_queue_t * queue) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_COSMAP_DSCP_QU_GET, dev_id, dscp, + queue); + return rv; +} + +sw_error_t +fal_qos_port_mode_set(a_uint32_t dev_id, fal_port_t port_id, + fal_qos_mode_t mode, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_MODE_SET, dev_id, port_id, mode, + (a_uint32_t) enable); + return rv; +} + +sw_error_t +fal_qos_port_mode_get(a_uint32_t dev_id, fal_port_t port_id, + fal_qos_mode_t mode, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_MODE_GET, dev_id, port_id, mode, + enable); + return rv; +} + +sw_error_t +fal_qos_port_mode_pri_set(a_uint32_t dev_id, fal_port_t port_id, + fal_qos_mode_t mode, a_uint32_t pri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_MODE_PRI_SET, dev_id, port_id, mode, pri); + return rv; +} + +sw_error_t +fal_qos_port_mode_pri_get(a_uint32_t dev_id, fal_port_t port_id, + fal_qos_mode_t mode, a_uint32_t * pri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_MODE_PRI_GET, dev_id, port_id, mode, + pri); + return rv; +} + +sw_error_t +fal_qos_port_default_up_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t up) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_DEF_UP_SET, dev_id, port_id, up); + return rv; +} + +sw_error_t +fal_qos_port_default_up_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * up) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_DEF_UP_GET, dev_id, port_id, up); + return rv; +} + +sw_error_t +fal_qos_port_sch_mode_set(a_uint32_t dev_id, a_uint32_t port_id, + fal_sch_mode_t mode, const a_uint32_t weight[]) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_SCH_MODE_SET, dev_id, port_id, mode, + weight); + return rv; +} + +sw_error_t +fal_qos_port_sch_mode_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sch_mode_t * mode, a_uint32_t weight[]) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_SCH_MODE_GET, dev_id, port_id, mode, + weight); + return rv; +} + +sw_error_t +fal_qos_port_default_spri_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t spri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_DEF_SPRI_SET, dev_id, port_id, spri); + return rv; +} + +sw_error_t +fal_qos_port_default_spri_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * spri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_DEF_SPRI_GET, dev_id, port_id, spri); + return rv; +} + +sw_error_t +fal_qos_port_default_cpri_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t cpri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_DEF_CPRI_SET, dev_id, port_id, cpri); + return rv; +} + +sw_error_t +fal_qos_port_default_cpri_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * cpri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_DEF_CPRI_GET, dev_id, port_id, cpri); + return rv; +} + +sw_error_t +fal_qos_port_force_spri_status_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_FORCE_SPRI_ST_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_qos_port_force_spri_status_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t* enable) +{ + sw_error_t rv; + rv = sw_uk_exec(SW_API_QOS_PT_FORCE_SPRI_ST_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_qos_port_force_cpri_status_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_FORCE_CPRI_ST_SET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_qos_port_force_cpri_status_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t* enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PT_FORCE_CPRI_ST_GET, dev_id, port_id, enable); + return rv; +} + +sw_error_t +fal_qos_queue_remark_table_set(a_uint32_t dev_id, fal_port_t port_id, + fal_queue_t queue_id, a_uint32_t tbl_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_QUEUE_REMARK_SET, dev_id, port_id, queue_id, tbl_id, enable); + return rv; +} + + +sw_error_t +fal_qos_queue_remark_table_get(a_uint32_t dev_id, fal_port_t port_id, + fal_queue_t queue_id, a_uint32_t * tbl_id, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_QUEUE_REMARK_GET, dev_id, port_id, queue_id, tbl_id, enable); + return rv; +} + +sw_error_t +fal_qos_port_group_set(a_uint32_t dev_id, fal_port_t port_id, + fal_qos_group_t *group) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_GROUP_SET, dev_id, port_id, group); + return rv; +} + +sw_error_t +fal_qos_port_group_get(a_uint32_t dev_id, fal_port_t port_id, + fal_qos_group_t *group) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_GROUP_GET, dev_id, port_id, group); + return rv; +} + +sw_error_t +fal_qos_port_pri_precedence_set(a_uint32_t dev_id, fal_port_t port_id, + fal_qos_pri_precedence_t *pri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_PRI_SET, dev_id, port_id, pri); + return rv; +} + +sw_error_t +fal_qos_port_pri_precedence_get(a_uint32_t dev_id, fal_port_t port_id, + fal_qos_pri_precedence_t *pri) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_PRI_GET, dev_id, port_id, pri); + return rv; +} + +sw_error_t +fal_qos_port_remark_set(a_uint32_t dev_id, fal_port_t port_id, + fal_qos_remark_enable_t *remark) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_REMARK_SET, dev_id, port_id, remark); + return rv; +} + +sw_error_t +fal_qos_port_remark_get(a_uint32_t dev_id, fal_port_t port_id, + fal_qos_remark_enable_t *remark) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_REMARK_GET, dev_id, port_id, remark); + return rv; +} + +sw_error_t +fal_qos_cosmap_pcp_set(a_uint32_t dev_id, a_uint8_t group_id, + a_uint8_t pcp, fal_qos_cosmap_t *cosmap) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PCP_MAP_SET, dev_id, group_id, pcp, cosmap); + return rv; +} + +sw_error_t +fal_qos_cosmap_pcp_get(a_uint32_t dev_id, a_uint8_t group_id, + a_uint8_t pcp, fal_qos_cosmap_t *cosmap) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PCP_MAP_GET, dev_id, group_id, pcp, cosmap); + return rv; +} + +sw_error_t +fal_qos_cosmap_flow_set(a_uint32_t dev_id, a_uint8_t group_id, + a_uint16_t flow, fal_qos_cosmap_t *cosmap) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_FLOW_MAP_SET, dev_id, group_id, flow, cosmap); + return rv; +} + +sw_error_t +fal_qos_cosmap_flow_get(a_uint32_t dev_id, a_uint8_t group_id, + a_uint16_t flow, fal_qos_cosmap_t *cosmap) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_FLOW_MAP_GET, dev_id, group_id, flow, cosmap); + return rv; +} + +sw_error_t +fal_qos_cosmap_dscp_set(a_uint32_t dev_id, a_uint8_t group_id, + a_uint8_t dscp, fal_qos_cosmap_t *cosmap) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_DSCP_MAP_SET, dev_id, group_id, dscp, cosmap); + return rv; +} + +sw_error_t +fal_qos_cosmap_dscp_get(a_uint32_t dev_id, a_uint8_t group_id, + a_uint8_t dscp, fal_qos_cosmap_t *cosmap) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_DSCP_MAP_GET, dev_id, group_id, dscp, cosmap); + return rv; +} + +sw_error_t +fal_queue_scheduler_set(a_uint32_t dev_id, a_uint32_t node_id, + fal_queue_scheduler_level_t level, fal_port_t port_id, + fal_qos_scheduler_cfg_t *scheduler_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_QUEUE_SCHEDULER_SET, dev_id, node_id, level, port_id, scheduler_cfg); + return rv; +} + +sw_error_t +fal_queue_scheduler_get(a_uint32_t dev_id, a_uint32_t node_id, + fal_queue_scheduler_level_t level, fal_port_t *port_id, + fal_qos_scheduler_cfg_t *scheduler_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_QUEUE_SCHEDULER_GET, dev_id, node_id, level, port_id, scheduler_cfg); + return rv; +} + +sw_error_t +fal_edma_ring_queue_map_set(a_uint32_t dev_id, + a_uint32_t ring_id, fal_queue_bmp_t *queue_bmp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_RING_QUEUE_MAP_SET, dev_id, ring_id, queue_bmp); + return rv; +} + +sw_error_t +fal_edma_ring_queue_map_get(a_uint32_t dev_id, + a_uint32_t ring_id, fal_queue_bmp_t *queue_bmp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_RING_QUEUE_MAP_GET, dev_id, ring_id, queue_bmp); + return rv; +} + +sw_error_t +fal_port_queues_get(a_uint32_t dev_id, + fal_port_t port_id, fal_queue_bmp_t *queue_bmp) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_QUEUES_GET, dev_id, port_id, queue_bmp); + return rv; +} + +sw_error_t +fal_scheduler_dequeue_ctrl_set(a_uint32_t dev_id, a_uint32_t queue_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_SCHEDULER_DEQUEU_CTRL_SET, dev_id, queue_id, enable); + return rv; +} + +sw_error_t +fal_scheduler_dequeue_ctrl_get(a_uint32_t dev_id, a_uint32_t queue_id, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_SCHEDULER_DEQUEU_CTRL_GET, dev_id, queue_id, enable); + return rv; +} + +sw_error_t +fal_port_scheduler_cfg_reset(a_uint32_t dev_id, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_SCHEDULER_CFG_RESET, dev_id, port_id); + return rv; +} + +sw_error_t +fal_port_scheduler_resource_get( + a_uint32_t dev_id, + fal_port_t port_id, + fal_portscheduler_resource_t *cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QOS_PORT_SCHEDULER_RESOURCE_GET, dev_id, port_id, cfg); + return rv; +} \ No newline at end of file diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_rate.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_rate.c new file mode 100755 index 000000000..f53ea1bbf --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_rate.c @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_rate.h" +#include "fal_uk_if.h" + +sw_error_t +fal_rate_queue_egrl_set(a_uint32_t dev_id, fal_port_t port_id, + fal_queue_t queue_id, a_uint32_t * speed, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_QU_EGRL_SET, dev_id, port_id, queue_id, + speed, enable); + return rv; +} + +sw_error_t +fal_rate_queue_egrl_get(a_uint32_t dev_id, fal_port_t port_id, + fal_queue_t queue_id, a_uint32_t * speed, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_QU_EGRL_GET, dev_id, port_id, queue_id, + speed, enable); + return rv; +} + +sw_error_t +fal_rate_port_egrl_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * speed, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PT_EGRL_SET, dev_id, port_id, + speed, enable); + return rv; +} + +sw_error_t +fal_rate_port_egrl_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * speed, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PT_EGRL_GET, dev_id, port_id, + speed, enable); + return rv; +} + +sw_error_t +fal_rate_port_inrl_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * speed, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PT_INRL_SET, dev_id, port_id, + speed, enable); + return rv; +} + +sw_error_t +fal_rate_port_inrl_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * speed, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PT_INRL_GET, dev_id, port_id, + speed, enable); + return rv; +} + +sw_error_t +fal_storm_ctrl_frame_set(a_uint32_t dev_id, fal_port_t port_id, + fal_storm_type_t frame_type, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_STORM_CTRL_FRAME_SET, dev_id, port_id, + frame_type, enable); + return rv; +} + +sw_error_t +fal_storm_ctrl_frame_get(a_uint32_t dev_id, fal_port_t port_id, + fal_storm_type_t frame_type, a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_STORM_CTRL_FRAME_GET, dev_id, port_id, + frame_type, enable); + return rv; +} + +sw_error_t +fal_storm_ctrl_rate_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * rate) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_STORM_CTRL_RATE_SET, dev_id, port_id, + rate); + return rv; +} + +sw_error_t +fal_storm_ctrl_rate_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t * rate) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_STORM_CTRL_RATE_GET, dev_id, port_id, + rate); + return rv; +} + +sw_error_t +fal_rate_port_policer_set(a_uint32_t dev_id, fal_port_t port_id, + fal_port_policer_t * policer) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PORT_POLICER_SET, dev_id, port_id, + policer); + return rv; +} + +sw_error_t +fal_rate_port_policer_get(a_uint32_t dev_id, fal_port_t port_id, + fal_port_policer_t * policer) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PORT_POLICER_GET, dev_id, port_id, + policer); + return rv; +} + +sw_error_t +fal_rate_port_shaper_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable, fal_egress_shaper_t * shaper) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PORT_SHAPER_SET, dev_id, port_id, + enable, shaper); + return rv; +} + +sw_error_t +fal_rate_port_shaper_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t * enable, fal_egress_shaper_t * shaper) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PORT_SHAPER_GET, dev_id, port_id, + enable, shaper); + return rv; +} + +sw_error_t +fal_rate_queue_shaper_set(a_uint32_t dev_id, fal_port_t port_id, + fal_queue_t queue_id, a_bool_t enable, + fal_egress_shaper_t * shaper) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_QUEUE_SHAPER_SET, dev_id, port_id, queue_id, + enable, shaper); + return rv; +} + +sw_error_t +fal_rate_queue_shaper_get(a_uint32_t dev_id, fal_port_t port_id, + fal_queue_t queue_id, a_bool_t * enable, + fal_egress_shaper_t * shaper) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_QUEUE_SHAPER_GET, dev_id, port_id, queue_id, + enable, shaper); + return rv; +} + +sw_error_t +fal_rate_acl_policer_set(a_uint32_t dev_id, a_uint32_t policer_id, + fal_acl_policer_t * policer) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_ACL_POLICER_SET, dev_id, policer_id, policer); + return rv; +} + +sw_error_t +fal_rate_acl_policer_get(a_uint32_t dev_id, a_uint32_t policer_id, + fal_acl_policer_t * policer) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_ACL_POLICER_GET, dev_id, policer_id, policer); + return rv; +} + +sw_error_t +fal_rate_port_add_rate_byte_set(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PT_ADDRATEBYTE_SET, dev_id, port_id, number); + return rv; +} + +sw_error_t +fal_rate_port_add_rate_byte_get(a_uint32_t dev_id, fal_port_t port_id, + a_uint32_t *number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PT_ADDRATEBYTE_GET, dev_id, port_id, number); + return rv; +} + +sw_error_t +fal_rate_port_gol_flow_en_set(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PT_GOL_FLOW_EN_SET, dev_id, port_id, enable); + return rv; +} + + +sw_error_t +fal_rate_port_gol_flow_en_get(a_uint32_t dev_id, fal_port_t port_id, + a_bool_t* enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RATE_PT_GOL_FLOW_EN_GET, dev_id, port_id, enable); + return rv; +} + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_reg_access.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_reg_access.c new file mode 100755 index 000000000..6825836b5 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_reg_access.c @@ -0,0 +1,159 @@ +/* + * Copyright (c) 2014, 2017-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/*qca808x_start*/ +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_reg_access.h" +#include "fal_uk_if.h" + +sw_error_t +fal_phy_get(a_uint32_t dev_id, a_uint32_t phy_addr, + a_uint32_t reg, a_uint16_t * value) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PHY_GET, dev_id, phy_addr, reg, value); + return rv; +} + +sw_error_t +fal_phy_set(a_uint32_t dev_id, a_uint32_t phy_addr, + a_uint32_t reg, a_uint16_t value) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PHY_SET, dev_id, phy_addr, reg, value); + return rv; +} +/*qca808x_end*/ +sw_error_t +fal_reg_get(a_uint32_t dev_id, a_uint32_t reg_addr, a_uint8_t value[], + a_uint32_t value_len) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_REG_GET, dev_id, reg_addr, value, value_len); + return rv; +} + +sw_error_t +fal_reg_set(a_uint32_t dev_id, a_uint32_t reg_addr, a_uint8_t value[], + a_uint32_t value_len) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_REG_SET, dev_id, reg_addr, value, value_len); + return rv; +} + +sw_error_t +fal_psgmii_reg_get(a_uint32_t dev_id, a_uint32_t reg_addr, a_uint8_t value[], + a_uint32_t value_len) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PSGMII_REG_GET, dev_id, reg_addr, value, value_len); + return rv; +} + +sw_error_t +fal_psgmii_reg_set(a_uint32_t dev_id, a_uint32_t reg_addr, a_uint8_t value[], + a_uint32_t value_len) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PSGMII_REG_SET, dev_id, reg_addr, value, value_len); + return rv; +} + +sw_error_t +fal_reg_field_get(a_uint32_t dev_id, a_uint32_t reg_addr, + a_uint32_t bit_offset, a_uint32_t field_len, + a_uint8_t value[], a_uint32_t value_len) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_REG_FIELD_GET, dev_id, reg_addr, bit_offset, field_len, value, value_len); + return rv; +} + + +sw_error_t +fal_reg_field_set(a_uint32_t dev_id, a_uint32_t reg_addr, + a_uint32_t bit_offset, a_uint32_t field_len, + const a_uint8_t value[], a_uint32_t value_len) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_REG_FIELD_SET, dev_id, reg_addr, bit_offset, field_len, value, value_len); + return rv; +} + +sw_error_t +fal_reg_dump(a_uint32_t dev_id, a_uint32_t reg_idx,fal_reg_dump_t *reg_dump) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_REG_DUMP, dev_id, reg_idx, reg_dump); + return rv; +} + + +sw_error_t +fal_dbg_reg_dump(a_uint32_t dev_id,fal_reg_dump_t *reg_dump) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_DBG_REG_DUMP, dev_id, reg_dump); + return rv; +} + +sw_error_t +fal_debug_psgmii_self_test(a_uint32_t dev_id, a_bool_t enable, + a_uint32_t times, a_uint32_t * result) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_DBG_PSGMII_SELF_TEST, dev_id, enable, times, result); + return rv; +} +sw_error_t +fal_phy_dump(a_uint32_t dev_id, a_uint32_t phy_addr, a_uint32_t reg_idx,fal_phy_dump_t *phy_dump) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PHY_DUMP, dev_id, phy_addr, reg_idx, phy_dump); + return rv; +} + +sw_error_t +fal_uniphy_reg_get(a_uint32_t dev_id, a_uint32_t index, a_uint32_t reg_addr, + a_uint8_t value[], a_uint32_t value_len) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UNIPHY_REG_GET, dev_id, index, reg_addr, value, value_len); + return rv; +} + +sw_error_t +fal_uniphy_reg_set(a_uint32_t dev_id, a_uint32_t index, a_uint32_t reg_addr, + a_uint8_t value[], a_uint32_t value_len) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_UNIPHY_REG_SET, dev_id, index, reg_addr, value, value_len); + return rv; +} diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_rss_hash.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_rss_hash.c new file mode 100755 index 000000000..28b67b5a3 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_rss_hash.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_rss_hash.h" +#include "fal_uk_if.h" + +sw_error_t +fal_rss_hash_config_set(a_uint32_t dev_id, fal_rss_hash_mode_t mode, fal_rss_hash_config_t * config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RSS_HASH_CONFIG_SET, dev_id, mode, + config); + return rv; +} + +sw_error_t +fal_rss_hash_config_get(a_uint32_t dev_id, fal_rss_hash_mode_t mode, fal_rss_hash_config_t * config) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_RSS_HASH_CONFIG_GET, dev_id, mode, + config); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_sec.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_sec.c new file mode 100755 index 000000000..60627a40d --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_sec.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_sec.h" +#include "fal_uk_if.h" + +sw_error_t +fal_sec_norm_item_set(a_uint32_t dev_id, fal_norm_item_t item, void * value) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SEC_NORM_SET, dev_id, item, value); + return rv; +} + +sw_error_t +fal_sec_norm_item_get(a_uint32_t dev_id, fal_norm_item_t item, void * value) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SEC_NORM_GET, dev_id, item, value); + return rv; +} + +sw_error_t +fal_sec_l3_excep_ctrl_set(a_uint32_t dev_id, a_uint32_t excep_type, fal_l3_excep_ctrl_t *ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SEC_EXP_CTRL_SET, dev_id, excep_type, ctrl); + return rv; +} + +sw_error_t +fal_sec_l3_excep_ctrl_get(a_uint32_t dev_id, a_uint32_t excep_type, fal_l3_excep_ctrl_t *ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SEC_EXP_CTRL_GET, dev_id, excep_type, ctrl); + return rv; +} + +sw_error_t +fal_sec_l4_excep_parser_ctrl_get(a_uint32_t dev_id, fal_l4_excep_parser_ctrl *ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SEC_L4_PARSER_CTRL_GET, dev_id, ctrl); + return rv; +} + +sw_error_t +fal_sec_l4_excep_parser_ctrl_set(a_uint32_t dev_id, fal_l4_excep_parser_ctrl *ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SEC_L4_PARSER_CTRL_SET, dev_id, ctrl); + return rv; +} + +sw_error_t +fal_sec_l3_excep_parser_ctrl_set(a_uint32_t dev_id, fal_l3_excep_parser_ctrl *ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SEC_L3_PARSER_CTRL_SET, dev_id, ctrl); + return rv; +} + +sw_error_t +fal_sec_l3_excep_parser_ctrl_get(a_uint32_t dev_id, fal_l3_excep_parser_ctrl *ctrl) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SEC_L3_PARSER_CTRL_GET, dev_id, ctrl); + return rv; +} + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_servcode.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_servcode.c new file mode 100755 index 000000000..5bc9194ea --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_servcode.c @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_servcode.h" +#include "fal_uk_if.h" + +sw_error_t fal_servcode_config_set(a_uint32_t dev_id, a_uint32_t servcode_index, + fal_servcode_config_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SERVCODE_CONFIG_SET, dev_id, servcode_index, + entry); + return rv; +} + +sw_error_t fal_servcode_config_get(a_uint32_t dev_id, a_uint32_t servcode_index, + fal_servcode_config_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SERVCODE_CONFIG_GET, dev_id, servcode_index, + entry); + return rv; +} + +sw_error_t fal_servcode_loopcheck_en(a_uint32_t dev_id, a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SERVCODE_LOOPCHECK_EN, dev_id, (a_uint32_t) enable); + return rv; +} + +sw_error_t fal_servcode_loopcheck_status_get(a_uint32_t dev_id, a_bool_t *enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SERVCODE_LOOPCHECK_STATUS_GET, dev_id, enable); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_sfp.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_sfp.c new file mode 100755 index 000000000..9b320c040 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_sfp.c @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_sfp.h" +#include "fal_uk_if.h" + +sw_error_t +fal_sfp_eeprom_data_get(a_uint32_t dev_id, a_uint32_t port_id, fal_sfp_data_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_DATA_GET, dev_id, port_id, entry); + return rv; +} + +sw_error_t +fal_sfp_eeprom_data_set(a_uint32_t dev_id, a_uint32_t port_id, fal_sfp_data_t *entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_DATA_SET, dev_id, port_id, entry); + + return rv; +} + +sw_error_t +fal_sfp_device_type_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_dev_type_t *sfp_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_DEV_TYPE_GET, dev_id, port_id, + sfp_id); + return rv; +} + +sw_error_t +fal_sfp_transceiver_code_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_transc_code_t *transc_code) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_TRANSC_CODE_GET, dev_id, port_id, + transc_code); + return rv; +} + +sw_error_t +fal_sfp_rate_encode_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_rate_encode_t *encode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_RATE_ENCODE_GET, dev_id, port_id, + encode); + return rv; +} + +sw_error_t +fal_sfp_link_length_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_link_length_t *link_len) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_LINK_LENGTH_GET, dev_id, port_id, + link_len); + return rv; +} + +sw_error_t +fal_sfp_vendor_info_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_vendor_info_t *vender_info) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_VENDOR_INFO_GET, dev_id, port_id, + vender_info); + return rv; +} + +sw_error_t +fal_sfp_laser_wavelength_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_laser_wavelength_t *laser_wavelen) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_LASER_WAVELENGTH_GET, dev_id, port_id, + laser_wavelen); + return rv; +} + +sw_error_t +fal_sfp_option_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_option_t *option) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_OPTION_GET, dev_id, port_id, + option); + return rv; +} + +sw_error_t +fal_sfp_ctrl_rate_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_rate_t *rate_limit) + +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_CTRL_RATE_GET, dev_id, port_id, + rate_limit); + return rv; +} +sw_error_t +fal_sfp_enhanced_cfg_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_enhanced_cfg_t *enhanced_feature) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_ENHANCED_CFG_GET, dev_id, port_id, + enhanced_feature); + return rv; +} + +sw_error_t +fal_sfp_diag_internal_threshold_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_internal_threshold_t *threshold) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_DIAG_THRESHOLD_GET, dev_id, port_id, + threshold); + return rv; +} + +sw_error_t +fal_sfp_diag_extenal_calibration_const_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_cal_const_t *cal_const) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_DIAG_CAL_CONST_GET, dev_id, port_id, + cal_const); + return rv; +} + +sw_error_t +fal_sfp_diag_realtime_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_realtime_diag_t *real_diag) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_DIAG_REALTIME_GET, dev_id, port_id, + real_diag); + return rv; +} + +sw_error_t +fal_sfp_diag_ctrl_status_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_ctrl_status_t *ctrl_status) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_DIAG_CTRL_STATUS_GET, dev_id, port_id, + ctrl_status); + return rv; +} + +sw_error_t +fal_sfp_diag_alarm_warning_flag_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_alarm_warn_flag_t *alarm_warn_flag) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_DIAG_ALARM_WARN_FLAG_GET, dev_id, port_id, + alarm_warn_flag); + return rv; +} + +sw_error_t +fal_sfp_checkcode_get(a_uint32_t dev_id, a_uint32_t port_id, + fal_sfp_cc_type_t cc_type, a_uint8_t *ccode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SFP_CHECKCODE_GET, dev_id, port_id, (a_ulong_t)cc_type, + ccode); + return rv; +} diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_shaper.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_shaper.c new file mode 100755 index 000000000..e579307fe --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_shaper.c @@ -0,0 +1,221 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_shaper.h" +#include "fal_uk_if.h" + + +sw_error_t +fal_port_shaper_timeslot_set(a_uint32_t dev_id, a_uint32_t timeslot) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_SHAPER_TIMESLOT_SET, dev_id, timeslot); + return rv; +} + +sw_error_t +fal_port_shaper_timeslot_get(a_uint32_t dev_id, a_uint32_t *timeslot) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_SHAPER_TIMESLOT_GET, dev_id, timeslot); + return rv; +} + +sw_error_t +fal_flow_shaper_timeslot_set(a_uint32_t dev_id, a_uint32_t timeslot) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_SHAPER_TIMESLOT_SET, dev_id, timeslot); + return rv; +} + +sw_error_t +fal_flow_shaper_timeslot_get(a_uint32_t dev_id, a_uint32_t *timeslot) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_SHAPER_TIMESLOT_GET, dev_id, timeslot); + return rv; +} + +sw_error_t +fal_queue_shaper_timeslot_set(a_uint32_t dev_id, a_uint32_t timeslot) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_SHAPER_TIMESLOT_SET, dev_id, timeslot); + return rv; +} + +sw_error_t +fal_queue_shaper_timeslot_get(a_uint32_t dev_id, a_uint32_t *timeslot) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_SHAPER_TIMESLOT_GET, dev_id, timeslot); + return rv; +} + +sw_error_t +fal_port_shaper_token_number_set(a_uint32_t dev_id, fal_port_t port_id, + fal_shaper_token_number_t * token_number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_SHAPER_TOKEN_NUMBER_SET, dev_id, port_id, + token_number); + return rv; +} + +sw_error_t +fal_port_shaper_token_number_get(a_uint32_t dev_id, fal_port_t port_id, + fal_shaper_token_number_t * token_number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_SHAPER_TOKEN_NUMBER_GET, dev_id, port_id, + token_number); + return rv; +} + +sw_error_t +fal_flow_shaper_token_number_set(a_uint32_t dev_id, a_uint32_t flow_id, + fal_shaper_token_number_t * token_number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_SHAPER_TOKEN_NUMBER_SET, dev_id, flow_id, + token_number); + return rv; +} + +sw_error_t +fal_flow_shaper_token_number_get(a_uint32_t dev_id, a_uint32_t flow_id, + fal_shaper_token_number_t * token_number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_SHAPER_TOKEN_NUMBER_GET, dev_id, flow_id, + token_number); + return rv; +} + +sw_error_t +fal_queue_shaper_token_number_set(a_uint32_t dev_id, a_uint32_t queue_id, + fal_shaper_token_number_t * token_number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_SHAPER_TOKEN_NUMBER_SET, dev_id, queue_id, + token_number); + return rv; +} + +sw_error_t +fal_queue_shaper_token_number_get(a_uint32_t dev_id, a_uint32_t queue_id, + fal_shaper_token_number_t * token_number) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_SHAPER_TOKEN_NUMBER_GET, dev_id, queue_id, + token_number); + return rv; +} + +sw_error_t +fal_port_shaper_set(a_uint32_t dev_id, fal_port_t port_id, fal_shaper_config_t * shaper) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_SHAPER_SET, dev_id, port_id, + shaper); + return rv; +} + +sw_error_t +fal_port_shaper_get(a_uint32_t dev_id, fal_port_t port_id, fal_shaper_config_t * shaper) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_SHAPER_GET, dev_id, port_id, + shaper); + return rv; +} + +sw_error_t +fal_flow_shaper_set(a_uint32_t dev_id, a_uint32_t flow_id, fal_shaper_config_t * shaper) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_SHAPER_SET, dev_id, flow_id, + shaper); + return rv; +} + +sw_error_t +fal_flow_shaper_get(a_uint32_t dev_id, a_uint32_t flow_id, fal_shaper_config_t * shaper) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_FLOW_SHAPER_GET, dev_id, flow_id, + shaper); + return rv; +} + +sw_error_t +fal_queue_shaper_set(a_uint32_t dev_id, a_uint32_t queue_id, fal_shaper_config_t * shaper) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_SHAPER_SET, dev_id, queue_id, + shaper); + return rv; +} + +sw_error_t +fal_queue_shaper_get(a_uint32_t dev_id, a_uint32_t queue_id, fal_shaper_config_t * shaper) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_QUEUE_SHAPER_GET, dev_id, queue_id, + shaper); + return rv; +} + +sw_error_t +fal_shaper_ipg_preamble_length_set(a_uint32_t dev_id, a_uint32_t ipg_pre_length) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SHAPER_IPG_PRE_SET, dev_id, ipg_pre_length); + return rv; +} + +sw_error_t +fal_shaper_ipg_preamble_length_get(a_uint32_t dev_id, a_uint32_t *ipg_pre_length) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_SHAPER_IPG_PRE_GET, dev_id, ipg_pre_length); + return rv; +} + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_stp.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_stp.c new file mode 100755 index 000000000..dce40792a --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_stp.c @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_stp.h" +#include "fal_uk_if.h" + +sw_error_t +fal_stp_port_state_set(a_uint32_t dev_id, a_uint32_t st_id, + fal_port_t port_id, fal_stp_state_t state) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_STP_PT_STATE_SET, dev_id, st_id, port_id, + (a_uint32_t) state); + return rv; +} + +sw_error_t +fal_stp_port_state_get(a_uint32_t dev_id, a_uint32_t st_id, + fal_port_t port_id, fal_stp_state_t * state) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_STP_PT_STATE_GET, dev_id, st_id, port_id, + state); + return rv; +} diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_trunk.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_trunk.c new file mode 100755 index 000000000..d509d41f8 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_trunk.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2014, 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_trunk.h" +#include "fal_uk_if.h" + + +sw_error_t +fal_trunk_group_set(a_uint32_t dev_id, a_uint32_t trunk_id, + a_bool_t enable, fal_pbmp_t member) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TRUNK_GROUP_SET, dev_id, trunk_id, enable, + (a_uint32_t) member); + return rv; +} + +sw_error_t +fal_trunk_group_get(a_uint32_t dev_id, a_uint32_t trunk_id, + a_bool_t * enable, fal_pbmp_t * member) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TRUNK_GROUP_GET, dev_id, trunk_id, enable, + member); + return rv; +} + +sw_error_t +fal_trunk_hash_mode_set(a_uint32_t dev_id, a_uint32_t hash_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TRUNK_HASH_SET, dev_id, hash_mode); + return rv; +} + +sw_error_t +fal_trunk_hash_mode_get(a_uint32_t dev_id, a_uint32_t * hash_mode) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TRUNK_HASH_GET, dev_id, hash_mode); + return rv; +} + +sw_error_t +fal_trunk_manipulate_sa_set(a_uint32_t dev_id, fal_mac_addr_t * addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TRUNK_MAN_SA_SET, dev_id, addr); + return rv; +} + +sw_error_t +fal_trunk_manipulate_sa_get(a_uint32_t dev_id, fal_mac_addr_t * addr) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TRUNK_MAN_SA_GET, dev_id, addr); + return rv; +} + +sw_error_t +fal_trunk_failover_enable(a_uint32_t dev_id, a_bool_t failover) + +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TRUNK_FAILOVER_EN_SET, dev_id, failover); + return rv; +} + +sw_error_t +fal_trunk_failover_status_get(a_uint32_t dev_id, a_bool_t *failover) + +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_TRUNK_FAILOVER_EN_GET, dev_id, failover); + return rv; +} + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_uk_if.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_uk_if.c new file mode 100755 index 000000000..891ef938d --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_uk_if.c @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#include +#include "sw.h" +#include "ssdk_init.h" +#include "sw_api.h" +#include "sw_api_us.h" +#include "api_access.h" + +sw_error_t +sw_uk_exec(a_uint32_t api_id, ...) +{ + unsigned long value[SW_MAX_API_PARAM] = { 0 }; + unsigned long rtn = SW_OK, i; + va_list arg_ptr; + a_uint32_t nr_param = 0; + + if((nr_param = sw_api_param_nums(api_id)) == 0) + { + return SW_NOT_SUPPORTED; + } + + value[0] = (unsigned long)api_id; + value[1] = (unsigned long)&rtn; + + va_start(arg_ptr, api_id); + for (i = 0; i < nr_param; i++) + { + value[i + 2] = va_arg(arg_ptr, unsigned long); + } + va_end(arg_ptr); + sw_uk_if(value); + + return rtn; +} + +sw_error_t +ssdk_init(a_uint32_t dev_id, ssdk_init_cfg * cfg) +{ + sw_error_t rv; + + rv = sw_uk_init(cfg->nl_prot); + return rv; +} + +sw_error_t +ssdk_cleanup(void) +{ + sw_error_t rv; + + rv = sw_uk_cleanup(); + return rv; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_vlan.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_vlan.c new file mode 100755 index 000000000..eef60b186 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_vlan.c @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + + +#include "sw.h" +#include "sw_ioctl.h" +#include "fal_vlan.h" +#include "fal_uk_if.h" + +sw_error_t +fal_vlan_entry_append(a_uint32_t dev_id, fal_vlan_t * vlan_entry) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_APPEND, dev_id, vlan_entry); + return rv; +} + + +sw_error_t +fal_vlan_create(a_uint32_t dev_id, a_uint32_t vlan_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_ADD, dev_id, vlan_id); + return rv; +} + +sw_error_t +fal_vlan_next(a_uint32_t dev_id, a_uint32_t vlan_id, fal_vlan_t * p_vlan) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_NEXT, dev_id, vlan_id, p_vlan); + return rv; +} + +sw_error_t +fal_vlan_find(a_uint32_t dev_id, a_uint32_t vlan_id, fal_vlan_t * p_vlan) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_FIND, dev_id, vlan_id, p_vlan); + return rv; +} + +sw_error_t +fal_vlan_member_update(a_uint32_t dev_id, a_uint32_t vlan_id, + fal_pbmp_t member, fal_pbmp_t u_member) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_MEM_UPDATE, dev_id, vlan_id, member, + u_member); + return rv; +} + +sw_error_t +fal_vlan_delete(a_uint32_t dev_id, a_uint32_t vlan_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_DEL, dev_id, vlan_id); + return rv; +} + +sw_error_t +fal_vlan_flush(a_uint32_t dev_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_FLUSH, dev_id); + return rv; +} + +sw_error_t +fal_vlan_fid_set(a_uint32_t dev_id, a_uint32_t vlan_id, a_uint32_t fid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_FID_SET, dev_id, vlan_id, fid); + return rv; +} + +sw_error_t +fal_vlan_fid_get(a_uint32_t dev_id, a_uint32_t vlan_id, a_uint32_t * fid) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_FID_GET, dev_id, vlan_id, fid); + return rv; +} + +sw_error_t +fal_vlan_member_add(a_uint32_t dev_id, a_uint32_t vlan_id, + fal_port_t port_id, fal_pt_1q_egmode_t port_info) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_MEMBER_ADD, dev_id, vlan_id, port_id, port_info); + return rv; +} + +sw_error_t +fal_vlan_member_del(a_uint32_t dev_id, a_uint32_t vlan_id, fal_port_t port_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_MEMBER_DEL, dev_id, vlan_id, port_id); + return rv; +} + +sw_error_t +fal_vlan_learning_state_set(a_uint32_t dev_id, a_uint32_t vlan_id, + a_bool_t enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_LEARN_STATE_SET, dev_id, vlan_id, enable); + return rv; +} + +sw_error_t +fal_vlan_learning_state_get(a_uint32_t dev_id, a_uint32_t vlan_id, + a_bool_t * enable) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VLAN_LEARN_STATE_GET, dev_id, vlan_id, enable); + return rv; +} + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_vsi.c b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_vsi.c new file mode 100755 index 000000000..ddfe8c623 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/fal_uk/fal_vsi.c @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +#include "sw_ioctl.h" +#include "fal_vsi.h" +#include "fal_uk_if.h" + +sw_error_t +fal_port_vsi_get(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t *vsi_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_VSI_GET, dev_id, port_id, vsi_id); + return rv; +} + +sw_error_t +fal_port_vlan_vsi_set(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t stag_vid, a_uint32_t ctag_vid, a_uint32_t vsi_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_VLAN_VSI_SET, dev_id, port_id, stag_vid, ctag_vid, vsi_id); + return rv; +} + +sw_error_t +fal_vsi_free(a_uint32_t dev_id, a_uint32_t vsi) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_FREE, dev_id, vsi); + return rv; +} + +sw_error_t +fal_port_vlan_vsi_get(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t stag_vid, a_uint32_t ctag_vid, a_uint32_t *vsi_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_VLAN_VSI_GET, dev_id, port_id, stag_vid, ctag_vid, vsi_id); + return rv; +} + +sw_error_t +fal_port_vsi_set(a_uint32_t dev_id, fal_port_t port_id, a_uint32_t vsi_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_PORT_VSI_SET, dev_id, port_id, vsi_id); + return rv; +} + +sw_error_t +fal_vsi_alloc(a_uint32_t dev_id, a_uint32_t *vsi) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_ALLOC, dev_id, vsi); + return rv; +} + +sw_error_t +fal_vsi_tbl_dump(a_uint32_t dev_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_TBL_DUMP, dev_id); + return rv; +} + +sw_error_t +fal_vsi_stamove_set(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_stamove_t *stamove) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_STAMOVE_SET, dev_id, vsi_id, stamove); + return rv; +} +sw_error_t +fal_vsi_stamove_get(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_stamove_t *stamove) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_STAMOVE_GET, dev_id, vsi_id, stamove); + return rv; +} +sw_error_t +fal_vsi_newaddr_lrn_get(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_newaddr_lrn_t *newaddr_lrn) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_NEWADDR_LRN_GET, dev_id, vsi_id, newaddr_lrn); + return rv; +} +sw_error_t +fal_vsi_newaddr_lrn_set(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_newaddr_lrn_t *newaddr_lrn) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_NEWADDR_LRN_SET, dev_id, vsi_id, newaddr_lrn); + return rv; +} +sw_error_t +fal_vsi_member_get(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_member_t *vsi_member) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_MEMBER_GET, dev_id, vsi_id, vsi_member); + return rv; +} +sw_error_t +fal_vsi_member_set(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_member_t *vsi_member) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_MEMBER_SET, dev_id, vsi_id, vsi_member); + return rv; +} + +sw_error_t +fal_vsi_counter_get(a_uint32_t dev_id, a_uint32_t vsi_id, fal_vsi_counter_t *counter) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_COUNTER_GET, dev_id, vsi_id, counter); + return rv; +} + +sw_error_t +fal_vsi_counter_cleanup(a_uint32_t dev_id, a_uint32_t vsi_id) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_VSI_COUNTER_CLEANUP, dev_id, vsi_id); + return rv; +} + + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/ref/Makefile b/feeds/ipq807x/qca-ssdk-shell/src/src/ref/Makefile new file mode 100755 index 000000000..f92ae6c45 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/ref/Makefile @@ -0,0 +1,16 @@ +LOC_DIR=src/ref +LIB=REF + +include $(PRJ_PATH)/make/config.mk + +SRC_LIST= + +ifeq (TRUE, $(IN_VLAN)) + SRC_LIST += ref_vlan.c +endif + +include $(PRJ_PATH)/make/components.mk +include $(PRJ_PATH)/make/defs.mk +include $(PRJ_PATH)/make/target.mk + +all: dep obj diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/ref/ref_vlan.c b/feeds/ipq807x/qca-ssdk-shell/src/src/ref/ref_vlan.c new file mode 100755 index 000000000..d1af39455 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/ref/ref_vlan.c @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "sw.h" +#include "sw_ioctl.h" +#include "ref_vlan.h" +#include "fal_uk_if.h" + +sw_error_t +qca_lan_wan_cfg_set(a_uint32_t dev_id, qca_lan_wan_cfg_t *lan_wan_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_LAN_WAN_CFG_SET, dev_id, lan_wan_cfg); + return rv; +} + +sw_error_t +qca_lan_wan_cfg_get(a_uint32_t dev_id, qca_lan_wan_cfg_t *lan_wan_cfg) +{ + sw_error_t rv; + + rv = sw_uk_exec(SW_API_LAN_WAN_CFG_GET, dev_id, lan_wan_cfg); + return rv; +} diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/sal/Makefile b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/Makefile new file mode 100755 index 000000000..805ae0f63 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/Makefile @@ -0,0 +1,12 @@ +LOC_DIR=src/sal +LIB=SAL + +include $(PRJ_PATH)/make/config.mk + +SRC_LIST=$(wildcard *.c) + +include $(PRJ_PATH)/make/components.mk +include $(PRJ_PATH)/make/defs.mk +include $(PRJ_PATH)/make/target.mk + +all: dep obj diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/Makefile b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/Makefile new file mode 100755 index 000000000..74c50e2eb --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/Makefile @@ -0,0 +1,12 @@ +LOC_DIR=src/sal/sd +LIB=SAL + +include $(PRJ_PATH)/make/config.mk + +SRC_LIST=$(wildcard *.c) + +include $(PRJ_PATH)/make/components.mk +include $(PRJ_PATH)/make/defs.mk +include $(PRJ_PATH)/make/target.mk + +all: dep obj \ No newline at end of file diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/Makefile b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/Makefile new file mode 100755 index 000000000..a038efc6d --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/Makefile @@ -0,0 +1,12 @@ +LOC_DIR=src/sal/sd/linux +LIB=SAL + +include $(PRJ_PATH)/make/config.mk + +SRC_LIST=$(wildcard *.c) + +include $(PRJ_PATH)/make/components.mk +include $(PRJ_PATH)/make/defs.mk +include $(PRJ_PATH)/make/target.mk + +all: dep obj \ No newline at end of file diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/Makefile b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/Makefile new file mode 100755 index 000000000..a4bac32fe --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/Makefile @@ -0,0 +1,34 @@ +LOC_DIR=src/sal/sd/linux/uk_interface +LIB=SAL + +include $(PRJ_PATH)/make/config.mk + +SRC_LIST= + +ifeq (TRUE, $(UK_IF)) +ifeq (KSLIB, $(MODULE_TYPE)) + ifeq (TRUE, $(UK_NETLINK)) + SRC_LIST=sw_api_ks_netlink.c + endif + + ifeq (TRUE, $(UK_IOCTL)) + SRC_LIST=sw_api_ks_ioctl.c + endif +endif + +ifeq (USLIB, $(MODULE_TYPE)) + ifeq (TRUE, $(UK_NETLINK)) + SRC_LIST=sw_api_us_netlink.c + endif + + ifeq (TRUE, $(UK_IOCTL)) + SRC_LIST=sw_api_us_ioctl.c + endif +endif +endif + +include $(PRJ_PATH)/make/components.mk +include $(PRJ_PATH)/make/defs.mk +include $(PRJ_PATH)/make/target.mk + +all: dep obj \ No newline at end of file diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/sw_api_us_ioctl.c b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/sw_api_us_ioctl.c new file mode 100755 index 000000000..6e977726f --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/sw_api_us_ioctl.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2014, 2017-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "sw.h" +#include "sw_api.h" +#include "sw_api_us.h" + +#define MISC_CHR_DEV 10 +static int glb_socket_fd = 0; + +sw_error_t +sw_uk_if(unsigned long arg_val[SW_MAX_API_PARAM]) +{ + ioctl(glb_socket_fd, SIOCDEVPRIVATE, arg_val); + return SW_OK; +} + +#ifndef SHELL_DEV +#define SHELL_DEV "/dev/switch_ssdk" +#endif +#define MISC_DEV "/proc/misc" + +static int sw_device_minor_get(a_uint32_t *device_minor) +{ + char buf[200] = {0}; + FILE *fp; + char *p; + + fp = fopen(MISC_DEV, "r"); + if (!fp) { + printf("failed to open %s\n", MISC_DEV); + return -1; + } + fseek(fp, 0, SEEK_SET); + while (fgets(buf, 200, fp) != NULL) { + p = strstr(buf, "switch_ssdk"); + if (p) { + sscanf(buf,"%d",device_minor); + fclose(fp); + return 0; + } + } + + fclose(fp); + return -1; +} + +static void sw_device_check(void) +{ + struct stat buf; + a_uint32_t file_minor; + a_uint32_t device_minor; + int rv; + + memset(&buf, 0, sizeof(buf)); + + if (stat( SHELL_DEV, &buf) < 0) { + printf("failed to stat!\n"); + return; + } + if (S_ISCHR(buf.st_mode)) { + file_minor = minor(buf.st_rdev); + rv = sw_device_minor_get(&device_minor); + if (!rv) { + if (device_minor != file_minor) + printf("device:%x file:%x mismatch!\n", + device_minor, file_minor); + else + printf("device:%x file:%x match!\n", + device_minor, file_minor); + } + } + +} + + +sw_error_t +sw_uk_init(a_uint32_t nl_prot) +{ + if (!glb_socket_fd) + { + /* even mknod fail we not quit, perhaps the device node exist already */ +#if defined UK_MINOR_DEV + mknod(SHELL_DEV, S_IFCHR, makedev(MISC_CHR_DEV, UK_MINOR_DEV)); +#else + mknod(SHELL_DEV, S_IFCHR, makedev(MISC_CHR_DEV, nl_prot)); +#endif + if ((glb_socket_fd = open(SHELL_DEV, O_RDWR)) < 0) + { + sw_device_check(); + return SW_INIT_ERROR; + } + } + + return SW_OK; +} + +sw_error_t +sw_uk_cleanup(void) +{ + close(glb_socket_fd); + glb_socket_fd = 0; +#if 0 + remove("/dev/switch_ssdk"); +#endif + return SW_OK; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/sw_api_us_netlink.c b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/sw_api_us_netlink.c new file mode 100755 index 000000000..d3a448000 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/linux/uk_interface/sw_api_us_netlink.c @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "sw.h" +#include "sw_api.h" +#include "sw_api_us.h" + +#define SSDK_SOCK_SEND_TRY_NUM 1000 +#define SSDK_SOCK_RCV_TRY_NUM 1000 +#define SSDK_SOCK_FD_NUM 16 +typedef struct +{ + a_uint32_t ssdk_sock_pid; + a_int32_t ssdk_sock_fd; +} ssdk_sock_t; +ssdk_sock_t ssdk_sock[SSDK_SOCK_FD_NUM]; + +static a_uint32_t ssdk_sock_prot = 0; +static struct nlmsghdr *nl_hdr = NULL; +#if defined(API_LOCK) +static aos_lock_t ssdk_sock_lock; +#define SOCK_LOCKER_INIT aos_lock_init(&ssdk_sock_lock) +#define SOCK_LOCKER_LOCK aos_lock(&ssdk_sock_lock) +#define SOCK_LOCKER_UNLOCK aos_unlock(&ssdk_sock_lock) +#else +#define SOCK_LOCKER_INIT +#define SOCK_LOCKER_LOCK +#define SOCK_LOCKER_UNLOCK +#endif + +static ssdk_sock_t * +ssdk_sock_alloc(a_uint32_t pid) +{ + a_uint32_t i; + + for (i = 0; i < SSDK_SOCK_FD_NUM; i++) + { + if (!ssdk_sock[i].ssdk_sock_pid) + { + return &ssdk_sock[i]; + } + else + { + if (0 != kill(ssdk_sock[i].ssdk_sock_pid, 0)) + { + return &ssdk_sock[i]; + } + } + } + + return NULL; +} + +static ssdk_sock_t * +ssdk_sock_find(a_uint32_t pid) +{ + a_uint32_t i; + + for (i = 0; i < SSDK_SOCK_FD_NUM; i++) + { + if (ssdk_sock[i].ssdk_sock_pid == pid) + { + return &ssdk_sock[i]; + } + } + + return NULL; +} + +sw_error_t +sw_uk_if(a_uint32_t arg_val[SW_MAX_API_PARAM]) +{ + struct sockaddr_nl src_addr; + struct sockaddr_nl dest_addr; + struct msghdr msg; + struct iovec iov; + struct nlmsghdr *nlh; + ssdk_sock_t * sock; + a_int32_t sock_fd; + a_uint32_t curr_pid; + sw_error_t rv = SW_OK; + a_uint32_t i, j, flag; + + curr_pid = getpid(); + + SOCK_LOCKER_LOCK; + sock = ssdk_sock_find(curr_pid); + if (!sock) + { + sock = ssdk_sock_alloc(curr_pid); + if (!sock) + { + SW_OUT_ON_ERROR(SW_NO_RESOURCE); + } + + sock_fd = socket(PF_NETLINK, SOCK_RAW, ssdk_sock_prot); + aos_mem_set(&src_addr, 0, sizeof(src_addr)); + src_addr.nl_family = AF_NETLINK; + src_addr.nl_pid = curr_pid; + src_addr.nl_groups = 0; + bind(sock_fd, (struct sockaddr*)&src_addr, sizeof(src_addr)); + + sock->ssdk_sock_fd = sock_fd; + sock->ssdk_sock_pid = curr_pid; + } + else + { + sock_fd = sock->ssdk_sock_fd; + } + + aos_mem_set(&dest_addr, 0, sizeof(dest_addr)); + dest_addr.nl_family = AF_NETLINK; + dest_addr.nl_pid = 0; + dest_addr.nl_groups = 0; + + nlh = nl_hdr; + aos_mem_set(nlh, 0, NLMSG_SPACE(SW_MAX_PAYLOAD)); + nlh->nlmsg_len = NLMSG_SPACE(SW_MAX_PAYLOAD); + nlh->nlmsg_pid = curr_pid; + nlh->nlmsg_flags = 0; + aos_mem_copy(NLMSG_DATA(nlh), arg_val, SW_MAX_PAYLOAD); + + iov.iov_base = (void *)nlh; + iov.iov_len = nlh->nlmsg_len; + + aos_mem_set(&msg, 0, sizeof(msg)); + msg.msg_name = (void *)&dest_addr; + msg.msg_namelen = sizeof(dest_addr); + msg.msg_iov = &iov; + msg.msg_iovlen = 1; + + for (i = 0; i < SSDK_SOCK_SEND_TRY_NUM; i++) + { + if (0 < sendmsg(sock_fd, &msg, MSG_DONTWAIT)) + { + break; + } + } + + if (SSDK_SOCK_SEND_TRY_NUM <= i) + { + SW_OUT_ON_ERROR(SW_TIMEOUT); + } + + flag = 0; + aos_mem_set(nlh, 0, NLMSG_SPACE(SW_MAX_PAYLOAD)); + for (i = 0; i < SSDK_SOCK_RCV_TRY_NUM; i++) + { + for (j = 0; j < 1000; j++) + { + if (0 < recvmsg(sock_fd, &msg, MSG_DONTWAIT)) + { + flag = 1; + break; + } + } + + if (flag) + { + break; + } + else + { + aos_mdelay(10); + } + } + + if (SSDK_SOCK_RCV_TRY_NUM <= i) + { + SW_OUT_ON_ERROR(SW_TIMEOUT); + } + +out: + SOCK_LOCKER_UNLOCK; + return rv; +} + +sw_error_t +sw_uk_init(a_uint32_t nl_prot) +{ + if (!nl_hdr) + { + nl_hdr = (struct nlmsghdr *)aos_mem_alloc(NLMSG_SPACE(SW_MAX_PAYLOAD)); + } + + if (!nl_hdr) + { + return SW_NO_RESOURCE; + } + +#if defined UK_NL_PROT + ssdk_sock_prot = UK_NL_PROT; +#else + ssdk_sock_prot = nl_prot; +#endif + SOCK_LOCKER_INIT; + aos_mem_zero(ssdk_sock, sizeof(ssdk_sock_t) * SSDK_SOCK_FD_NUM); + return SW_OK; +} + +sw_error_t +sw_uk_cleanup(void) +{ + aos_mem_zero(ssdk_sock, sizeof(ssdk_sock_t) * SSDK_SOCK_FD_NUM); + + if (nl_hdr) + { + aos_mem_free(nl_hdr); + nl_hdr = NULL; + } + + return SW_OK; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/sd.c b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/sd.c new file mode 100755 index 000000000..9b7037c95 --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/sal/sd/sd.c @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2014, 2017, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#include "sw.h" +#include "ssdk_init.h" +#include "sd.h" +#include "sw_api.h" +#if ((!defined(KERNEL_MODULE)) && defined(UK_IF)) +#include "sw_api_us.h" +#endif + +mdio_reg_set ssdk_mdio_set = NULL; +mdio_reg_get ssdk_mdio_get = NULL; +hdr_reg_set ssdk_hdr_reg_set = NULL; +hdr_reg_get ssdk_hdr_reg_get = NULL; +uniphy_reg_set ssdk_uniphy_reg_set = NULL; +uniphy_reg_get ssdk_uniphy_reg_get = NULL; +mii_reg_set ssdk_mii_reg_set = NULL; +mii_reg_get ssdk_mii_reg_get = NULL; + +sw_error_t +sd_reg_mdio_set(a_uint32_t dev_id, a_uint32_t phy, a_uint32_t reg, + a_uint16_t data) +{ + sw_error_t rv = SW_OK; + + if (NULL != ssdk_mdio_set) + { + rv = ssdk_mdio_set(dev_id, phy, reg, data); + } + else + { + return SW_NOT_SUPPORTED; + } + + return rv; +} + +sw_error_t +sd_reg_mdio_get(a_uint32_t dev_id, a_uint32_t phy, a_uint32_t reg, a_uint16_t * data) +{ + sw_error_t rv = SW_OK; + + if (NULL != ssdk_mdio_get) + { + rv = ssdk_mdio_get(dev_id, phy, reg, data); + } + else + { + return SW_NOT_SUPPORTED; + } + + return rv; +} + +sw_error_t +sd_reg_hdr_set(a_uint32_t dev_id, a_uint32_t reg_addr, a_uint8_t * reg_data, a_uint32_t len) +{ + sw_error_t rv; + + if (NULL != ssdk_hdr_reg_set) + { + rv = ssdk_hdr_reg_set(dev_id, reg_addr, reg_data, len); + } + else + { + return SW_NOT_SUPPORTED; + } + + return rv; +} + +sw_error_t +sd_reg_hdr_get(a_uint32_t dev_id, a_uint32_t reg_addr, a_uint8_t * reg_data, a_uint32_t len) +{ + sw_error_t rv; + + if (NULL != ssdk_hdr_reg_get) + { + rv = ssdk_hdr_reg_get(dev_id, reg_addr, reg_data, len); + } + else + { + return SW_NOT_SUPPORTED; + } + + return rv; +} + +sw_error_t +sd_reg_uniphy_set(a_uint32_t dev_id, a_uint32_t index, a_uint32_t reg_addr, + a_uint8_t * reg_data, a_uint32_t len) +{ + sw_error_t rv; + + if (NULL != ssdk_uniphy_reg_set) + { + rv = ssdk_uniphy_reg_set(dev_id, index, reg_addr, reg_data, len); + } + else + { + return SW_NOT_SUPPORTED; + } + + return rv; +} + +sw_error_t +sd_reg_uniphy_get(a_uint32_t dev_id, a_uint32_t index, a_uint32_t reg_addr, + a_uint8_t * reg_data, a_uint32_t len) +{ + sw_error_t rv; + + if (NULL != ssdk_uniphy_reg_get) + { + rv = ssdk_uniphy_reg_get(dev_id, index, reg_addr, reg_data, len); + } + else + { + return SW_NOT_SUPPORTED; + } + + return rv; +} + +void +sd_reg_mii_set(a_uint32_t reg, a_uint32_t val) +{ + if (NULL != ssdk_mii_reg_set) + { + ssdk_mii_reg_set(reg, val); + } +} + +a_uint32_t +sd_reg_mii_get(a_uint32_t reg) +{ + a_uint32_t value = 0; + + if (NULL != ssdk_mii_reg_get) + { + value = ssdk_mii_reg_get(reg); + } + + return value; +} + +sw_error_t +sd_init(a_uint32_t dev_id, ssdk_init_cfg * cfg) +{ + if (NULL != cfg->reg_func.mdio_set) + { + ssdk_mdio_set = cfg->reg_func.mdio_set; + } + + if (NULL != cfg->reg_func.mdio_get) + { + ssdk_mdio_get = cfg->reg_func.mdio_get; + } + + if (NULL != cfg->reg_func.header_reg_set) + { + ssdk_hdr_reg_set = cfg->reg_func.header_reg_set; + } + + if (NULL != cfg->reg_func.header_reg_get) + { + ssdk_hdr_reg_get = cfg->reg_func.header_reg_get; + } + if (NULL != cfg->reg_func.uniphy_reg_set) + { + ssdk_uniphy_reg_set = cfg->reg_func.uniphy_reg_set; + } + + if (NULL != cfg->reg_func.uniphy_reg_get) + { + ssdk_uniphy_reg_get = cfg->reg_func.uniphy_reg_get; + } + + if (NULL != cfg->reg_func.mii_reg_set) + { + ssdk_mii_reg_set = cfg->reg_func.mii_reg_set; + } + + if (NULL != cfg->reg_func.mii_reg_get) + { + ssdk_mii_reg_get = cfg->reg_func.mii_reg_get; + } + + return SW_OK; +} + diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/shell/Makefile b/feeds/ipq807x/qca-ssdk-shell/src/src/shell/Makefile new file mode 100755 index 000000000..668a48a4a --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/shell/Makefile @@ -0,0 +1,24 @@ +LOC_DIR=src/shell +LIB=SHELL + +include $(PRJ_PATH)/make/config.mk +include $(PRJ_PATH)/make/components.mk + +SRC_LIST=$(wildcard *.c) +ifeq (,$(findstring SHELL, $(COMPONENTS))) +all: dep obj +else +all: dep obj lib +endif + +include $(PRJ_PATH)/make/defs.mk +include $(PRJ_PATH)/make/target.mk + +ifeq (TRUE, $(API_LOCK)) + PT_LIB=-lpthread +else + PT_LIB= +endif + +lib: + $(CC) $(CFLAGS) $(EXTRA_CFLAGS) $(LDFLAGS) $(EXTRA_LDFLAGS) $(OBJ_FILE) $(BIN_DIR)/$(US_MOD)_$(RUNMODE).a -o $(DST_DIR)/$(SHELLOBJ) $(PT_LIB) diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell.c b/feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell.c new file mode 100755 index 000000000..fe583ba0c --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell.c @@ -0,0 +1,858 @@ +/* + * Copyright (c) 2014, 2017-2018, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/*qca808x_start*/ +#include +#include +#include +#include "shell.h" +#include "shell_io.h" +#include "shell_sw.h" +#include "shell_lib.h" +#include "shell_config.h" +#include "api_access.h" +#include "fal_uk_if.h" + +a_ulong_t *ioctl_buf = NULL; +ssdk_init_cfg init_cfg = def_init_cfg; +ssdk_cfg_t ssdk_cfg; +static a_uint32_t flag = 0; + +static a_ulong_t *ioctl_argp; +static FILE * out_fd; +char dev_id_path[] = "/sys/ssdk/dev_id"; +#ifndef SSDK_STR +#define SSDK_STR "SSDK" +#endif +static char *err_info[] = +{ + "Operation succeeded", /*SW_OK*/ + "Operation failed", /*SW_FAIL*/ + "Illegal value ", /*SW_BAD_VALUE*/ + "Value is out of range ", /*SW_OUT_OF_RANGE*/ + "Illegal parameter(s) ", /*SW_BAD_PARAM*/ + "Illegal pointer value ", /*SW_BAD_PTR*/ + "Wrong length", /*SW_BAD_LEN*/ + "Wrong state of state machine ", /*SW_BAD_STATE*/ + "Read operation failed ", /*SW_READ_ERROR*/ + "Write operation failed ", /*SW_WRITE_ERROR*/ + "Fail in creating an entry ", /*SW_CREATE_ERROR*/ + "Fail in deleteing an entry ", /*SW_DELETE_ERROR*/ + "Entry not found ", /*SW_NOT_FOUND*/ + "The parameter(s) is the same ", /*SW_NO_CHANGE*/ + "No more entry found ", /*SW_NO_MORE*/ + "No such entry ", /*SW_NO_SUCH*/ + "Tried to create existing entry ", /*SW_ALREADY_EXIST*/ + "Table is full ", /*SW_FULL*/ + "Table is empty ", /*SW_EMPTY*/ + "This request is not support ", /*SW_NOT_SUPPORTED*/ + "This request is not implemented", /*SW_NOT_IMPLEMENTED*/ + "The item is not initialized ", /*SW_NOT_INITIALIZED*/ + "Operation is still running", /*SW_BUSY*/ + "Operation Time Out ", /*SW_TIMEOUT*/ + "Operation is disabled ", /*SW_DISABLE*/ + "Resource not available (memory ...)", /*SW_NO_RESOURCE*/ + "Error occured while INIT process", /*SW_INIT_ERROR*/ + "The other side is not ready yet", /*SW_NOT_READY */ + "Cpu memory allocation failed. ", /*SW_OUT_OF_MEM */ + "Operation has been aborted. ", /*SW_ABORTED*/ +} ; + +void +cmd_print_error(sw_error_t rtn) +{ + dprintf("\n%s\n\n", err_info[abs(rtn)]); +} + +void +cmd_print(char *fmt, ...) +{ + va_list args; + + va_start(args, fmt); + if(out_fd) + vfprintf(out_fd, fmt, args); + else + vfprintf(stdout, fmt, args); + va_end(args); +} + +static sw_error_t +cmd_input_parser(a_ulong_t *arg_val, a_uint32_t arg_index, sw_api_param_t *pp) +{ + a_int16_t i; + a_ulong_t *pbuf; + a_uint16_t rtn_size = 1; + sw_api_param_t *pptmp = pp; + + pbuf = ioctl_buf + rtn_size; /*reserve for return value */ + + for (i = 0; i < arg_index; i++) + { + pptmp = pp + i; + if (pptmp->param_type & SW_PARAM_PTR) + { + pbuf += (pptmp->data_size + 3) / 4; + } + } + if ((pbuf - ioctl_buf + (pptmp->data_size + 3) / 4) > (IOCTL_BUF_SIZE/4)) + { + return SW_NO_RESOURCE; + } + + *arg_val = (a_ulong_t) pbuf; + + return SW_OK; +} + +static sw_error_t +cmd_api_func(sw_api_func_t *fp, a_uint32_t nr_param, a_ulong_t * args) +{ + a_ulong_t *p = &args[2]; + sw_error_t rv; + sw_error_t(*func) (); + + func = fp->func; + + switch (nr_param) + { + case 0: + rv = (func) (); + break; + case 1: + rv = (func) (p[0]); + break; + case 2: + rv = (func) (p[0], p[1]); + break; + case 3: + rv = (func) (p[0], p[1], p[2]); + break; + case 4: + rv = (func) (p[0], p[1], p[2], p[3]); + break; + case 5: + rv = (func) (p[0], p[1], p[2], p[3], p[4]); + break; + case 6: + rv = (func) (p[0], p[1], p[2], p[3], p[4], p[5]); + break; + case 7: + rv = (func) (p[0], p[1], p[2], p[3], p[4], p[5], p[6]); + break; + case 8: + rv = (func) (p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); + break; + case 9: + rv = (func) (p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]); + break; + case 10: + rv = (func) (p[0], p[1], p[2], p[3], p[4], p[5], + p[6], p[7], p[8], p[9]); + break; + default: + rv = SW_OUT_OF_RANGE; + } + + *(a_ulong_t *) args[1] = rv; + + return rv; +} + +static sw_error_t +cmd_api_output(sw_api_param_t *pp, a_uint32_t nr_param, a_ulong_t * args) +{ + a_uint16_t i; + a_ulong_t *pbuf; + a_uint16_t rtn_size = 1; + sw_error_t rtn = (sw_error_t) (*ioctl_buf); + sw_api_param_t *pptmp = NULL; + + if (rtn != SW_OK) + { + cmd_print_error(rtn); + return rtn; + } + + pbuf = ioctl_buf + rtn_size; + for (i = 0; i < nr_param; i++) + { + pptmp = pp + i; + if (pptmp->param_type & SW_PARAM_PTR) + { + + if (pptmp->param_type & SW_PARAM_OUT) + { + + sw_data_type_t *data_type; + if (!(data_type = cmd_data_type_find(pptmp->data_type))) + return SW_NO_SUCH; + + if (data_type->show_func) + { + data_type->show_func(pptmp->param_name, pbuf, pptmp->data_size); +/*qca808x_end*/ + if(strcmp((a_char_t *)pptmp->param_name, "Function bitmap") == 0) + { + cmd_data_print_module_func_ctrl(args[3], (fal_func_ctrl_t *)pbuf); + } +/*qca808x_start*/ + } + else + { + dprintf("\n Error, not define output print function!"); + } + } + + if ((pbuf - ioctl_buf + + (pptmp->data_size + 3) / 4) > (IOCTL_BUF_SIZE/4)) + return SW_NO_RESOURCE; + + pbuf += (pptmp->data_size + 3) / 4; + + } + } + return SW_OK; +} + +void +cmd_strtol(char *str, a_uint32_t * arg_val) +{ + if (str[0] == '0' && (str[1] == 'x' || str[1] == 'X')) + sscanf(str, "%x", arg_val); + else + sscanf(str, "%d", arg_val); +} + +static sw_error_t +cmd_parse_api(char **cmd_str, a_ulong_t * arg_val) +{ + char *tmp_str; + a_uint32_t arg_index, arg_start = 2, reserve_index = 1; /*reserve for dev_id */ + a_uint32_t last_param_in = 0; + a_ulong_t *temp; + void *pentry; + sw_api_param_t *pptmp = NULL; + sw_api_t sw_api; + a_uint32_t ignorecnt = 0, jump = 0; + sw_api.api_id = arg_val[0]; + SW_RTN_ON_ERROR(sw_api_get(&sw_api)); + + /*set device id */ + arg_val[arg_start] = get_devid(); + + for (arg_index = reserve_index; arg_index < sw_api.api_nr; arg_index++) + { + tmp_str = NULL; + pptmp = sw_api.api_pp + arg_index; + + if (!(pptmp->param_type & SW_PARAM_IN)) + { + ignorecnt++; + } + + if (pptmp->param_type & SW_PARAM_IN) + { + tmp_str = cmd_str[arg_index - reserve_index - ignorecnt + jump]; + last_param_in = arg_index; + if((pptmp->api_id == 314) && last_param_in == 2) last_param_in = 4;//SW_API_FDB_EXTEND_NEXT wr + if((pptmp->api_id == 327) && last_param_in == 2) last_param_in = 4;//SW_API_FDB_EXTEND_FIRST wr + } + temp = &arg_val[arg_start + arg_index]; + + sw_data_type_t *data_type; + if (!(data_type = cmd_data_type_find(pptmp->data_type))) + return SW_NO_SUCH; + + pentry = temp; + if (pptmp->param_type & SW_PARAM_PTR) + { + if (cmd_input_parser(temp, arg_index, sw_api.api_pp) != SW_OK) + return SW_NO_RESOURCE; + + pentry = (void *) *temp; + } + + if (pptmp->param_type & SW_PARAM_IN) + { +#if 1 + if(pptmp->param_type & SW_PARAM_PTR) //quiet mode + { + if(!get_talk_mode()) + set_full_cmdstrp((char **)(cmd_str + (last_param_in - reserve_index) + jump)); + } +#endif + /*check and convert input param */ + if (data_type->param_check != NULL) + { + if (data_type->param_check(tmp_str, pentry, pptmp->data_size) != SW_OK) + return SW_BAD_PARAM; + if(!get_talk_mode() && (pptmp->param_type & SW_PARAM_PTR)) { + if (get_jump()) + jump += get_jump() -1; + } + + } + } + } + + /*superfluous args */ + /* + if(cmd_str[last_param_in] != NULL) + return SW_BAD_PARAM; + */ + + return SW_OK; +} + +static sw_error_t +cmd_parse_sw(char **cmd_str, a_ulong_t * arg_val) +{ + char *tmp_str; + a_uint32_t arg_index = 0, tmp = 0; + a_uint32_t api_id = arg_val[0]; + + tmp_str = cmd_str[arg_index]; + while (tmp_str) + { + arg_index++; + cmd_strtol(tmp_str, &tmp); + arg_val[arg_index] = tmp; + tmp_str = cmd_str[arg_index]; + } + + /*args number check */ + if ( (arg_index == 0 && ( api_id == SW_CMD_VLAN_SHOW || + api_id == SW_CMD_FDB_SHOW || + api_id == SW_CMD_RESV_FDB_SHOW || + api_id == SW_CMD_HOST_SHOW || + api_id == SW_CMD_HOST_IPV4_SHOW || + api_id == SW_CMD_HOST_IPV6_SHOW || + api_id == SW_CMD_HOST_IPV4M_SHOW || + api_id == SW_CMD_HOST_IPV6M_SHOW || + api_id == SW_CMD_FLOW_IPV43T_SHOW || + api_id == SW_CMD_FLOW_IPV63T_SHOW || + api_id == SW_CMD_FLOW_IPV45T_SHOW || + api_id == SW_CMD_FLOW_IPV65T_SHOW || + api_id == SW_CMD_NAT_SHOW || + api_id == SW_CMD_NAPT_SHOW || + api_id == SW_CMD_FLOW_SHOW || + api_id == SW_CMD_CTRLPKT_SHOW || + api_id == SW_CMD_INTFMAC_SHOW || + api_id == SW_CMD_PUBADDR_SHOW )) || + ( arg_index == 1 && api_id == SW_CMD_SET_DEVID) || + ( arg_index == 2 && api_id == SW_CMD_PT_VLAN_TRANS_ADV_SHOW) ) + return SW_OK; + + return SW_BAD_PARAM; +} + +/*user command api*/ +sw_error_t +cmd_exec_api(a_ulong_t *arg_val) +{ + sw_error_t rv; + sw_api_t sw_api; + + sw_api.api_id = arg_val[0]; + SW_RTN_ON_ERROR(sw_api_get(&sw_api)); + + /*save cmd return value */ + arg_val[1] = (a_ulong_t) ioctl_buf; + /*save set device id */ + arg_val[2] = get_devid(); + + rv = cmd_api_func(sw_api.api_fp, sw_api.api_nr, arg_val); + SW_RTN_ON_ERROR(rv); + + rv = cmd_api_output(sw_api.api_pp, sw_api.api_nr, arg_val); + SW_RTN_ON_ERROR(rv); + + return rv; +} + + +void +cmd_print_usage (int cmd_index, int cmd_index_sub) +{ + if(GCMD_NAME(cmd_index)) + dprintf("usage: %s", GCMD_NAME(cmd_index)); + + if (GCMD_SUB_NAME(cmd_index, cmd_index_sub)) + dprintf(" %s", GCMD_SUB_NAME(cmd_index, cmd_index_sub)); + + if(GCMD_SUB_ACT(cmd_index, cmd_index_sub) && GCMD_SUB_USAGE(cmd_index, cmd_index_sub)) + dprintf(" %s %s\n\n", GCMD_SUB_ACT(cmd_index, cmd_index_sub), + GCMD_SUB_USAGE(cmd_index, cmd_index_sub)); +} +/* + main function + input args: + arg_val[0] = cmd_num + arg_val[1] = rtn_code + arg_val[2] = dev_id + arg_val[3] = dbg_cmd_num or other +*/ + +/*command string lookup*/ +a_uint32_t +cmd_lookup(char **cmd_str, int *cmd_index, int *cmd_index_sub) +{ + a_uint32_t no, sub_no; + a_uint32_t cmd_deepth = 0; + + *cmd_index = GCMD_DESC_NO_MATCH; + *cmd_index_sub = GCMD_DESC_NO_MATCH; + + if (cmd_str[0] == NULL) + return cmd_deepth; + + for (no = 0; GCMD_DESC_VALID(no); no++) + { + if (strcasecmp(cmd_str[0], GCMD_NAME(no))) + continue; + + for (sub_no = 0; GCMD_SUB_DESC_VALID(no, sub_no); sub_no++) + { + if (cmd_str[1] != NULL && cmd_str[2] != NULL) + { + + if (GCMD_SUB_NAME(no, sub_no) && GCMD_SUB_ACT(no, sub_no) + && !strcasecmp(cmd_str[1], GCMD_SUB_NAME(no, sub_no)) + && !strcasecmp(cmd_str[2], GCMD_SUB_ACT(no, sub_no))) + { + *cmd_index = no; + *cmd_index_sub = sub_no; + cmd_deepth = 3; + return cmd_deepth; + } + + else if (!GCMD_SUB_NAME(no, sub_no) && GCMD_SUB_ACT(no, sub_no) + && !strcasecmp(cmd_str[1], GCMD_SUB_ACT(no, sub_no))) + { + *cmd_index = no; + *cmd_index_sub = sub_no; + cmd_deepth = 2; + return cmd_deepth; + } + } + else if (cmd_str[1] != NULL && cmd_str[2] == NULL) + { + + if (!GCMD_SUB_NAME(no, sub_no) && GCMD_SUB_ACT(no, sub_no) + && !strcasecmp(cmd_str[1], GCMD_SUB_ACT(no, sub_no))) + { + *cmd_index = no; + *cmd_index_sub = sub_no; + cmd_deepth = 2; + return cmd_deepth; + } + } + } + } + + return cmd_deepth; +} + +static a_ulong_t * +cmd_parse(char *cmd_str, int *cmd_index, int *cmd_index_sub) +{ + int cmd_nr = 0; + a_ulong_t *arg_val = ioctl_argp; + char *tmp_str[CMDSTR_ARGS_MAX], *str_save; + + if (cmd_str == NULL) + return NULL; + + memset(arg_val, 0, CMDSTR_ARGS_MAX * sizeof (a_ulong_t)); + + /* split string into array */ + if ((tmp_str[cmd_nr] = (void *) strtok_r(cmd_str, " ", &str_save)) == NULL) + return NULL; + + /*handle help */ + if (!strcasecmp(tmp_str[cmd_nr], "help")) + { + dprintf("input ? get help\n\n"); + return NULL; + } + + while (tmp_str[cmd_nr]) + { + if (++cmd_nr == 3) + break; + tmp_str[cmd_nr] = (void *) strtok_r(NULL, " ", &str_save); + } + + /*commond string lookup */ + int cmd_depth = cmd_lookup(tmp_str, cmd_index, cmd_index_sub); + + if (*cmd_index == GCMD_DESC_NO_MATCH || *cmd_index_sub == GCMD_DESC_NO_MATCH) + { + dprintf("invalid or incomplete command.\n\n"); + return NULL; + } + + /*parse param */ + cmd_nr = 0; + if (cmd_depth == 2) + { + tmp_str[cmd_nr] = tmp_str[2]; + cmd_nr++; + } + + tmp_str[cmd_nr] = (void *) strtok_r(NULL, " ", &str_save); + while (tmp_str[cmd_nr]) + { + if (++cmd_nr == CMDSTR_ARGS_MAX) + break; + tmp_str[cmd_nr] = (void *) strtok_r(NULL, " ", &str_save); + } + + arg_val[0] = GCMD_SUB_API(*cmd_index, *cmd_index_sub); + arg_val[1] = (a_ulong_t) ioctl_buf; + + int rtn_code; + if (arg_val[0] < SW_API_MAX) + { + /*api command parse */ + rtn_code = cmd_parse_api(tmp_str, arg_val); + + } + else if (arg_val[0] > SW_API_MAX) + { + /*user command parse */ + rtn_code = cmd_parse_sw(tmp_str, arg_val); + + } + else + { + rtn_code = SW_BAD_PARAM; + } + + if(rtn_code != SW_OK) + { + cmd_print_error(rtn_code); + + if(rtn_code == SW_BAD_PARAM) + cmd_print_usage(*cmd_index, *cmd_index_sub); + + return NULL; + } + + return arg_val; +} + +static int +cmd_exec(a_ulong_t *arg_val, int cmd_index, int cmd_index_sub) +{ + a_uint32_t api_id = arg_val[0]; + sw_error_t rtn = SW_OK; + + if( api_id < SW_API_MAX ) + { + rtn = cmd_exec_api(arg_val); + + } + else if ((api_id > SW_API_MAX ) && (api_id < SW_CMD_MAX)) + { + if (GCMD_SUB_FUNC(cmd_index, cmd_index_sub)) + rtn = GCMD_SUB_FUNC(cmd_index, cmd_index_sub)(arg_val); + } + else + { + rtn = SW_BAD_PARAM; + } + + if(rtn != SW_OK) + cmd_print_error(rtn); + else + dprintf("\noperation done.\n\n"); + + return 0; +} + +static sw_error_t +cmd_socket_init(int dev_id) +{ + sw_error_t rv; + + init_cfg.cpu_mode = HSL_CPU_1; + init_cfg.reg_mode = HSL_MDIO; +#if defined UK_MINOR_DEV + init_cfg.nl_prot = UK_MINOR_DEV; +#else + init_cfg.nl_prot = 30; +#endif + init_cfg.chip_type=CHIP_UNSPECIFIED; +/*qca808x_end*/ + init_cfg.reg_func.mdio_set = NULL; + init_cfg.reg_func.mdio_get = NULL; +/*qca808x_start*/ + rv = ssdk_init(dev_id, &init_cfg); + if (SW_OK == rv) + { + dprintf("\n %s Init OK!", SSDK_STR); + } + else + { + dprintf("\n %s Init Fail! RV[%d]", SSDK_STR, rv); + } + + if (flag == 0) + { + aos_mem_set(&ssdk_cfg, 0 ,sizeof(ssdk_cfg_t)); + rv = sw_uk_exec(SW_API_SSDK_CFG, dev_id, &ssdk_cfg); + flag = 1; + } + return rv; +} + +static sw_error_t +cmd_init(void) +{ + ioctl_buf = (a_ulong_t *) malloc(IOCTL_BUF_SIZE); + ioctl_argp = (a_ulong_t *) malloc(CMDSTR_ARGS_MAX * sizeof (a_ulong_t)); + FILE *dev_id_fd = NULL; + int dev_id_value = 0; + if((dev_id_fd = fopen(dev_id_path, "r")) != NULL) + { + fscanf(dev_id_fd, "%d", &dev_id_value); + } + + set_devid(dev_id_value); + cmd_socket_init(dev_id_value); + + return SW_OK; +} + +static sw_error_t +cmd_exit(void) +{ + free(ioctl_buf); + free(ioctl_argp); + ssdk_cleanup(); + flag = 0; + return SW_OK; +} + +static sw_error_t +cmd_run_one(char *cmd_str) +{ + a_ulong_t *arg_list; + int cmd_index = 0, cmd_index_sub = 0; + + if ((arg_list = cmd_parse(cmd_str, &cmd_index, &cmd_index_sub)) != NULL) + { + cmd_exec(arg_list, cmd_index, cmd_index_sub); + } + + return SW_OK; +} + +int +cmd_is_batch(const char *cmd_str) +{ + char batch_cmd[] = "run"; + + if(!strncmp(cmd_str, batch_cmd, strlen(batch_cmd))) + return 1; + return 0; +} + +static void +cmd_batch_help(void) +{ + dprintf("usage:run \n"); +} + +static sw_error_t +cmd_run_batch (char *cmd_str) +{ + FILE *in_fd = NULL; + char * line = NULL, *str_save; + char *tmp_str[3]; + + if (cmd_str == NULL) + return SW_BAD_PARAM; + + /*usage: run cmd result*/ + if((tmp_str[0] = (void *) strtok_r(cmd_str, " ", &str_save)) == NULL) + return SW_BAD_PARAM; + + /*check again*/ + if(!cmd_is_batch(tmp_str[0])) + return SW_BAD_PARAM; + + if((tmp_str[1] = (void *) strtok_r(NULL, " ", &str_save))== NULL) + return SW_BAD_PARAM; + if((tmp_str[2] = (void *) strtok_r(NULL, " ", &str_save))== NULL) + return SW_BAD_PARAM; + + if((in_fd = fopen(tmp_str[1], "r")) == NULL) + { + dprintf("can't open cmd file %s\n", tmp_str[1]); + return SW_FAIL; + } + if((out_fd = fopen(tmp_str[2], "w+")) == NULL) + { + dprintf("can't open result file %s\n", tmp_str[2]); + return SW_FAIL; + } + + size_t len = 0; + ssize_t read; + + set_talk_mode(0); + while ((read = getline(&line, &len, in_fd)) != -1) + { + //dprintf("(%d)%s",read, line); + if (read <= 1 ) + { + continue; + } + + if (line[strlen(line)-1] == '\n') + { + line[strlen(line)-1] = '\0'; + } + + if (!strncmp(line, "echo", 4)) + { + dprintf("%s\n", line+strlen("echo ")); + continue; + } + else + { + dprintf("%s\n", line); + } + cmd_run_one(line); + } + set_talk_mode(1); + + if (line) free(line); + + fclose(out_fd); + fclose(in_fd); + out_fd = 0; + in_fd =0; + + return SW_OK; + +} + +static sw_error_t +cmd_args(char *cmd_str, int argc, const char *argv[]) +{ + /*quiet mode*/ + set_talk_mode(0); + + if(cmd_is_batch(argv[1])) + { + if(argc != 4) + { + cmd_batch_help(); + return SW_FAIL; + } + + snprintf(cmd_str, CMDSTR_BUF_SIZE, "%s %s %s", argv[1], argv[2], argv[3]); + cmd_run_batch(cmd_str); + + } + else + { + int argi; + for(argi = 1; argi < argc; argi++) + { + strlcat(cmd_str, argv[argi], CMDSTR_BUF_SIZE); + strlcat(cmd_str, " ", CMDSTR_BUF_SIZE); + } + cmd_run_one(cmd_str); + } + + return SW_OK; +} + +int +cmd_is_exit(char *cmd_str) +{ + if ((!strcasecmp(cmd_str, "q")) || (!strcasecmp(cmd_str, "quit"))) + { + return 1; + } + return 0; +} + +void cmd_welcome() +{ + char *ver = "", *date = ""; +#ifdef VERSION + ver = VERSION; +#endif + +#ifdef BUILD_DATE + date = BUILD_DATE; +#endif + dprintf("\n Welcome to %s Shell version: %s, at %s.\n", SSDK_STR, ver, date); +} + +/* Dummy function to avoid linker complaints */ +void __aeabi_unwind_cpp_pr0(void) +{ +}; +void __aeabi_unwind_cpp_pr1(void) +{ +}; + +int +main(int argc, const char *argv[]) +{ + char cmd_str[CMDSTR_BUF_SIZE]; + cmd_init(); + + if(argc > 1) + { + memset(cmd_str, 0, sizeof(cmd_str)); + cmd_args(cmd_str, argc, argv); + cmd_exit(); + return 0; + } + + cmd_welcome(); + + /*main loop*/ + while (1) + { + memset(cmd_str, 0, sizeof(cmd_str)); + + if(next_cmd(cmd_str) == 0)/*loop through if '\n'*/ + continue; + + if (cmd_is_exit(cmd_str)) + break; + + if(cmd_is_batch(cmd_str)) + { + if(cmd_run_batch(cmd_str)!= SW_OK) + cmd_batch_help(); + } + else + { + cmd_run_one(cmd_str); + } + } + + cmd_exit(); + return 0; +} +/*qca808x_end*/ diff --git a/feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell_config.c b/feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell_config.c new file mode 100755 index 000000000..e123f9b4a --- /dev/null +++ b/feeds/ipq807x/qca-ssdk-shell/src/src/shell/shell_config.c @@ -0,0 +1,1451 @@ +/* + * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved. + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ +/*qca808x_start*/ +#include "shell_config.h" +#include "shell_sw.h" + + +/*cmdline tree descript*/ +struct cmd_des_t gcmd_des[] = +{ + /*port ctrl*/ +/*qca808x_end*/ +#ifdef IN_PORTCONTROL +/*qca808x_start*/ + { + "port", "config port control", + { + {"duplex", "get", "get duplex mode of a port", "" , SW_API_PT_DUPLEX_GET, + NULL}, + {"duplex", "set", "set duplex mode of a port", " ", + SW_API_PT_DUPLEX_SET, NULL}, + {"speed", "get", "get speed mode of a port", "", SW_API_PT_SPEED_GET, + NULL}, + {"speed", "set", "set speed mode of a port", + " <10|100|1000|2500|5000|10000>", SW_API_PT_SPEED_SET, NULL}, + {"autoAdv", "get", "get auto-negotiation advertisement of a port", "", + SW_API_PT_AN_ADV_GET, NULL}, + {"autoAdv", "set", "set auto-negotiation advertisement of a port", + " ", SW_API_PT_AN_ADV_SET, NULL}, + {"autoNeg", "get", "get auto-negotiation status of a port", "", + SW_API_PT_AN_GET, NULL}, + {"autoNeg", "enable", "enable auto-negotiation of a port", "", + SW_API_PT_AN_ENABLE, NULL}, + {"autoNeg", "restart", "restart auto-negotiation process of a port", "", + SW_API_PT_AN_RESTART, NULL}, +/*qca808x_end*/ + {"header", "set", "set atheros header/tag status of a port", + " ", SW_API_PT_HDR_SET, NULL}, + {"header", "get", "get atheros header/tag status of a port", "", + SW_API_PT_HDR_GET, NULL}, + {"txhdr", "set", "set tx frame atheros header/tag status of a port", + " ", SW_API_PT_TXHDR_SET, NULL}, + {"txhdr", "get", "get tx frame atheros header/tag status of a port", "", + SW_API_PT_TXHDR_GET, NULL}, + {"rxhdr", "set", "set rx frame atheros header/tag status of a port", + " ", SW_API_PT_RXHDR_SET, NULL}, + {"rxhdr", "get", "get rx frame atheros header/tag status of a port", "", + SW_API_PT_RXHDR_GET, NULL}, + {"hdrtype", "set", "set atheros header/tag type", + " ", SW_API_HEADER_TYPE_SET, NULL}, + {"hdrtype", "get", "get atheros header/tag type", "", + SW_API_HEADER_TYPE_GET, NULL}, + {"flowCtrl", "set", "set flow control status of a port", + " ", SW_API_PT_FLOWCTRL_SET, NULL}, + {"flowCtrl", "get", "get flow control status of a port", "", + SW_API_PT_FLOWCTRL_GET, NULL}, + {"flowCtrlforcemode", "set", "set flow control force mode of a port", + " ", SW_API_PT_FLOWCTRL_MODE_SET, NULL}, + {"flowCtrlforcemode", "get", "get flow control force mode of a port", + "", SW_API_PT_FLOWCTRL_MODE_GET, NULL}, + {"powersave", "set", "set powersave status of a port", + " ", SW_API_PT_POWERSAVE_SET, NULL}, + {"powersave", "get", "get powersave status of a port", "", + SW_API_PT_POWERSAVE_GET, NULL}, +/*qca808x_start*/ + {"hibernate", "set", "set hibernate status of a port", + " ", SW_API_PT_HIBERNATE_SET, NULL}, + {"hibernate", "get", "get hibernate status of a port", "", + SW_API_PT_HIBERNATE_GET, NULL}, + {"cdt", "run", "run cable diagnostic test of a port", + " ", SW_API_PT_CDT, NULL}, +/*qca808x_end*/ + {"txmacstatus", "set", "set txmac status of a port", + " ", SW_API_TXMAC_STATUS_SET, NULL}, + {"txmacstatus", "get", "get txmac status of a port", "", + SW_API_TXMAC_STATUS_GET, NULL}, + {"rxmacstatus", "set", "set rxmac status of a port", " ", + SW_API_RXMAC_STATUS_SET, NULL}, + {"rxmacstatus", "get", "get rxmac status of a port", "", + SW_API_RXMAC_STATUS_GET, NULL}, + {"txfcstatus", "set", "set tx flow control status of a port", + " ", SW_API_TXFC_STATUS_SET, NULL}, + {"txfcstatus", "get", "get tx flow control status of a port", "", + SW_API_TXFC_STATUS_GET, NULL}, + {"rxfcstatus", "set", "set rx flow control status of a port", + " ", SW_API_RXFC_STATUS_SET, NULL}, + {"rxfcstatus", "get", "get rx flow control status of a port", "", + SW_API_RXFC_STATUS_GET, NULL}, + {"bpstatus", "set", "set back pressure status of a port", + " ", SW_API_BP_STATUS_SET, NULL}, + {"bpstatus", "get", "get back pressure status of a port", "", + SW_API_BP_STATUS_GET, NULL}, + {"linkforcemode", "set", "set link force mode of a port", + " ", SW_API_PT_LINK_MODE_SET, NULL}, + {"linkforcemode", "get", "get link force mode of a port", "", + SW_API_PT_LINK_MODE_GET, NULL}, +/*qca808x_start*/ + {"linkstatus", "get", "get link status of a port", "", + SW_API_PT_LINK_STATUS_GET, NULL}, +/*qca808x_end*/ + {"macLoopback", "set", "set mac level loop back mode of port", + " ", SW_API_PT_MAC_LOOPBACK_SET, NULL}, + {"macLoopback", "get", "get mac level loop back mode of port", "", + SW_API_PT_MAC_LOOPBACK_GET, NULL}, + {"ptslinkstatus", "get", "get link status of all ports", "", + SW_API_PTS_LINK_STATUS_GET, NULL}, + {"congedrop", "set", "set congestion drop of port queue", + " ", SW_API_PT_CONGESTION_DROP_SET, + NULL}, + {"congedrop", "get", "get congestion drop of port queue", + " ", SW_API_PT_CONGESTION_DROP_GET, NULL}, + {"ringfcthres", "set", "set ring flow ctrl of ring", + " ", SW_API_PT_RING_FLOW_CTRL_THRES_SET, + NULL}, + {"ringfcthres", "get", "get ring flow ctrl of ring", "", + SW_API_PT_RING_FLOW_CTRL_THRES_GET, NULL}, +/*qca808x_start*/ + {"Ieee8023az", "set", "set 8023az status of a port", " ", + SW_API_PT_8023AZ_SET, NULL}, + {"Ieee8023az", "get", "get 8023az status of a port", "", + SW_API_PT_8023AZ_GET, NULL}, + {"crossover", "set", "set crossover mode of a port", " ", + SW_API_PT_MDIX_SET, NULL}, + {"crossover", "get", "get crossover mode of a port", "", + SW_API_PT_MDIX_GET, NULL}, + {"crossover", "status", "get current crossover status of a port", "", + SW_API_PT_MDIX_STATUS_GET, NULL}, +/*qca808x_end*/ + {"preferMedium", "set", "set prefer medium of a combo port", + " ", SW_API_PT_COMBO_PREFER_MEDIUM_SET, NULL}, + {"preferMedium", "get", "get prefer medium of a combo port", "", + SW_API_PT_COMBO_PREFER_MEDIUM_GET, NULL}, + {"mediumType", "get", "get current medium status of a combo port", "", + SW_API_PT_COMBO_MEDIUM_STATUS_GET, NULL}, + {"fiberMode", "set", "set fiber mode of a combo fiber port", + " <100fx|1000bx |10g_r>", SW_API_PT_COMBO_FIBER_MODE_SET, NULL}, + {"fiberMode", "get", "get fiber mode of a combo fiber port", "", + SW_API_PT_COMBO_FIBER_MODE_GET, NULL}, +/*qca808x_start*/ + {"localLoopback", "set", "set local loopback of a port", + " ", SW_API_PT_LOCAL_LOOPBACK_SET, NULL}, + {"localLoopback", "get", "get local loopback of a port", "", + SW_API_PT_LOCAL_LOOPBACK_GET, NULL}, + {"remoteLoopback", "set", "set remote loopback of a port", + " ", SW_API_PT_REMOTE_LOOPBACK_SET, NULL}, + {"remoteLoopback", "get", "get remote loopback of a port", "", + SW_API_PT_REMOTE_LOOPBACK_GET, NULL}, + {"reset", "set", "reset phy of a port", "", SW_API_PT_RESET, NULL}, + {"poweroff", "set", "power off phy of a port", "", + SW_API_PT_POWER_OFF, NULL}, + {"poweron", "set", "power on phy of a port", "", SW_API_PT_POWER_ON, NULL}, + {"magicFrameMac", "set", "set magic frame mac address of a port", + " ", SW_API_PT_MAGIC_FRAME_MAC_SET, NULL}, + {"magicFrameMac", "get", "get magic frame mac address of a port", "", + SW_API_PT_MAGIC_FRAME_MAC_GET, NULL}, + {"phyId", "get", "get phy id of a port", "", SW_API_PT_PHY_ID_GET, NULL}, + {"wolstatus", "set", "set wol status of a port", " ", + SW_API_PT_WOL_STATUS_SET, NULL}, + {"wolstatus", "get", "get wol status of a port", "", + SW_API_PT_WOL_STATUS_GET, NULL}, +/*qca808x_end*/ + {"interfaceMode", "set", "set interface mode of phy", " ",\ + SW_API_PT_INTERFACE_MODE_SET, NULL}, + {"interfaceMode", "get", "get interface mode of phy", "", + SW_API_PT_INTERFACE_MODE_GET, NULL}, + {"interfaceMode", "apply", "apply interface mode","", + SW_API_PT_INTERFACE_MODE_APPLY, NULL}, +/*qca808x_start*/ + {"interfaceMode", "status", "get current interface mode of phy", "", + SW_API_PT_INTERFACE_MODE_STATUS_GET, NULL}, +/*qca808x_end*/ + {"interface3az", "set", "set interface and phy 3az info", + " ", SW_API_PT_INTERFACE_3AZ_STATUS_SET, NULL}, + {"interface3az", "get", "get interface and phy 3az info", "", + SW_API_PT_INTERFACE_3AZ_STATUS_GET, NULL}, + {"promiscmode", "set", "set port promisc mode", " ", + SW_API_PT_PROMISC_MODE_SET, NULL}, + {"promiscmode", "get", "get port promisc mode", "", + SW_API_PT_PROMISC_MODE_GET, NULL}, + {"mtu", "set", "set port mtu value", "", SW_API_PT_MTU_SET, NULL}, + {"mtu", "get", "get port mtu value", "", SW_API_PT_MTU_GET, NULL}, + {"mru", "set", "set port mru value", "", SW_API_PT_MRU_SET, NULL}, + {"mru", "get", "get port mru value", "", SW_API_PT_MRU_GET, NULL}, + {"srcfilter", "set", "set port source filter bypass", " ", + SW_API_PT_SOURCE_FILTER_SET, NULL}, + {"srcfilter", "get", "get port source filter bypass", "", + SW_API_PT_SOURCE_FILTER_GET, NULL}, + {"frameMaxSize", "get", "get port frame max size", "", + SW_API_PT_FRAME_MAX_SIZE_GET}, + {"frameMaxSize", "set", "set port frame max size", " ", + SW_API_PT_FRAME_MAX_SIZE_SET}, + {"eeecfg", "set", "set interface eee info", "", + SW_API_PT_INTERFACE_EEE_CFG_SET, NULL}, + {"eeecfg", "get", "get interface eee info", "", + SW_API_PT_INTERFACE_EEE_CFG_GET, NULL}, + {"srcfiltercfg", "set", "set port source filter configure", "", + SW_API_PT_SOURCE_FILTER_CONFIG_SET, NULL}, + {"srcfiltercfg", "get", "get port source filter configure", "", + SW_API_PT_SOURCE_FILTER_CONFIG_GET, NULL}, + {"switchportloopback", "set", "set switch port loopback", "", + SW_API_PT_SWITCH_PORT_LOOPBACK_SET, NULL}, + {"switchportloopback", "get", "get switch port loopback", "", + SW_API_PT_SWITCH_PORT_LOOPBACK_GET, NULL}, +/*qca808x_start*/ + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL},/*end of desc*/ + }, + }, +/*qca808x_end*/ +#endif + + /*vlan*/ +#ifdef IN_VLAN + { + "vlan", "config VLAN table", + { + {"entry", "create", "create a VLAN entry", "", SW_API_VLAN_ADD, NULL}, + {"entry", "del", "delete a VLAN entryn", "", SW_API_VLAN_DEL, NULL}, + {"entry", "update", "update port member of a VLAN entry", + " <0>", SW_API_VLAN_MEM_UPDATE, NULL}, + {"entry", "find", "find a VLAN entry by VLAN id", "", SW_API_VLAN_FIND, NULL}, + {"entry", "next", "find next VLAN entry by VLAN id", + "",SW_API_VLAN_NEXT, NULL}, + {"entry", "append", "append a VLAN entry", "", SW_API_VLAN_APPEND, NULL}, + {"entry", "flush", "flush all VLAN entries", "",SW_API_VLAN_FLUSH, NULL}, + {"entry", "show", "show whole VLAN entries", "", SW_CMD_VLAN_SHOW, cmd_show_vlan}, + {"fid", "set", "set VLAN entry fid", " ",SW_API_VLAN_FID_SET, NULL}, + {"fid", "get", "get VLAN entry fid", "",SW_API_VLAN_FID_GET, NULL}, + {"member", "add", "add VLAN entry member", + " ", + SW_API_VLAN_MEMBER_ADD, NULL}, + {"member", "del", "del VLAN entry member", + " ", SW_API_VLAN_MEMBER_DEL, NULL}, + {"learnsts", "set", "set VLAN entry learn status", + " ", SW_API_VLAN_LEARN_STATE_SET, NULL}, + {"learnsts", "get", "get VLAN entry learn status", + "", SW_API_VLAN_LEARN_STATE_GET, NULL}, + {"lan_wan_cfg", "set", "set lan & wan configuration", + "", SW_API_LAN_WAN_CFG_SET, NULL}, + {"lan_wan_cfg", "get", "get lan & wan configuration", + "", SW_API_LAN_WAN_CFG_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*portvlan*/ +#ifdef IN_PORTVLAN + { + "portVlan", "config port base VLAN", + { + {"ingress", "get", "get ingress VLAN mode of a port", + "", SW_API_PT_ING_MODE_GET, NULL}, + {"ingress", "set", "set ingress VLAN mode of a port", + " ", SW_API_PT_ING_MODE_SET, NULL}, + {"egress", "get", "get egress VLAN mode of a port", + "", SW_API_PT_EG_MODE_GET, NULL}, + {"egress", "set", "set egress VLAN mode of a port", + " ", + SW_API_PT_EG_MODE_SET, NULL}, + {"member", "add", "add a member to the port based VLAN of a port", + " ", SW_API_PT_VLAN_MEM_ADD, NULL}, + {"member", "del", "delete a member from the port based VLAN of a port", + " ", SW_API_PT_VLAN_MEM_DEL, NULL}, + {"member", "update", "update members of the port based VLAN of a port", + " ", SW_API_PT_VLAN_MEM_UPDATE, NULL}, + {"member", "get", "get members of the port based VLAN of a port", + "", SW_API_PT_VLAN_MEM_GET, NULL}, + {"defaultVid", "get", "get default VLAN id of a port", + "", SW_API_PT_DEF_VID_GET, NULL}, + {"defaultVid", "set", "set default VLAN id of a port", + " ", SW_API_PT_DEF_VID_SET, NULL}, + {"forceVid", "set", "set VLAN id enforcement status of a port", + " ", SW_API_PT_FORCE_DEF_VID_SET, NULL}, + {"forceVid", "get", "get VLAN id enforcement status of a port", + "", SW_API_PT_FORCE_DEF_VID_GET, NULL}, + {"forceMode", "set", "set port based VLAN enforcement status of a port", + " ", SW_API_PT_FORCE_PORTVLAN_SET, NULL}, + {"forceMode", "get", "get port based VLAN enforcement status of a port", + "", SW_API_PT_FORCE_PORTVLAN_GET, NULL}, + {"nestVlan", "set", "set nest VLAN status of a port", + " ", SW_API_PT_NESTVLAN_SET, NULL}, + {"nestVlan", "get", "get nest VLAN status of a port", + "", SW_API_PT_NESTVLAN_GET, NULL}, + {"sVlanTPID", "set", "set service VLAN tpid", + "", SW_API_NESTVLAN_TPID_SET, NULL}, + {"sVlanTPID", "get", "get service VLAN tpid", + "", SW_API_NESTVLAN_TPID_GET, NULL}, + /*shiva*/ + {"invlan", "set", "set port invlan mode", + " ", + SW_API_PT_IN_VLAN_MODE_SET, NULL}, + {"invlan", "get", "get port invlan mode", + "", SW_API_PT_IN_VLAN_MODE_GET, NULL}, + {"tlsMode", "set", "set TLS mode", + " ", SW_API_PT_TLS_SET, NULL}, + {"tlsMode", "get", "get TLS mode", + "", SW_API_PT_TLS_GET, NULL}, + {"priPropagation", "set", "set priority propagation", + " ", SW_API_PT_PRI_PROPAGATION_SET, NULL}, + {"priPropagation", "get", "get priority propagation", + "", SW_API_PT_PRI_PROPAGATION_GET, NULL}, + {"defaultSVid", "set", "set default SVID", + " ", SW_API_PT_DEF_SVID_SET, NULL}, + {"defaultSVid", "get", "get default SVID", + "", SW_API_PT_DEF_SVID_GET, NULL}, + {"defaultCVid", "set", "set default CVID", + " ", SW_API_PT_DEF_CVID_SET, NULL}, + {"defaultCVid", "get", "get default CVID", + "", SW_API_PT_DEF_CVID_GET, NULL}, + {"vlanPropagation", "set", "set vlan propagation", + " ", SW_API_PT_VLAN_PROPAGATION_SET, NULL}, + {"vlanPropagation", "get", "get vlan propagation", + "", SW_API_PT_VLAN_PROPAGATION_GET, NULL}, + {"translation", "add", "add vlan translation", + "", SW_API_PT_VLAN_TRANS_ADD, NULL}, + {"translation", "del", "del vlan translation", + "", SW_API_PT_VLAN_TRANS_DEL, NULL}, + {"translation", "get", "get vlan translation", + "", SW_API_PT_VLAN_TRANS_GET, NULL}, + {"translation", "iterate", "iterate vlan translation tables", + " ", SW_API_PT_VLAN_TRANS_ITERATE, NULL}, + {"qinqMode", "set", "set qinq mode", + "", SW_API_QINQ_MODE_SET, NULL}, + {"qinqMode", "get", "get qinq mode", + "", SW_API_QINQ_MODE_GET, NULL}, + {"qinqRole", "set", "set qinq role", + " ", SW_API_PT_QINQ_ROLE_SET, NULL}, + {"qinqRole", "get", "get qinq role", + "", SW_API_PT_QINQ_ROLE_GET, NULL}, + {"macvlanxlt", "set", "set mac vlan xlt status", + " ", SW_API_PT_MAC_VLAN_XLT_SET, NULL}, + {"macvlanxlt", "get", "set mac vlan xlt status", + "", SW_API_PT_MAC_VLAN_XLT_GET, NULL}, + {"netiso", "set", "enable public/private net isolate", + "", SW_API_NETISOLATE_SET, NULL}, + {"netiso", "get", "get public/private net isolate status", + "", SW_API_NETISOLATE_GET, NULL}, + {"egbypass", "set", "enable egress translation filter bypass", + "", SW_API_EG_FLTR_BYPASS_EN_SET, NULL}, + {"egbypass", "get", "get the status of egress translation filter bypass", + "", SW_API_EG_FLTR_BYPASS_EN_GET, NULL}, + {"ptvrfid", "set", "set port VRF ID", + " ", SW_API_PT_VRF_ID_SET, NULL}, + {"ptvrfid", "get", "get port VRF ID", + "", SW_API_PT_VRF_ID_GET, NULL}, + {"globalqinqmode", "set", "set global qinq mode", + "", SW_API_GLOBAL_QINQ_MODE_SET, NULL}, + {"globalqinqmode", "get", "get global qinq mode", + "", SW_API_GLOBAL_QINQ_MODE_GET, NULL}, + {"ptqinqmode", "set", "set port qinq mode", + "", SW_API_PORT_QINQ_MODE_SET, NULL}, + {"ptqinqmode", "get", "get port qinq mode", + "", SW_API_PORT_QINQ_MODE_GET, NULL}, + {"intpid", "set", "set ingress tpid", "", SW_API_TPID_SET, NULL}, + {"intpid", "get", "get ingress tpid", "", SW_API_TPID_GET, NULL}, + {"egtpid", "set", "set egress tpid", "", SW_API_EGRESS_TPID_SET, NULL}, + {"egtpid", "get", "get egress tpid", "", SW_API_EGRESS_TPID_GET, NULL}, + {"ingressfilter", "set", "set port ingress filter", + "", SW_API_PT_INGRESS_VLAN_FILTER_SET, NULL}, + {"ingressfilter", "get", "get port ingress filter", + "", SW_API_PT_INGRESS_VLAN_FILTER_GET, NULL}, + {"defaultvlantag", "set", "set port default vlan tag", + " ", SW_API_PT_DEFAULT_VLANTAG_SET, NULL}, + {"defaultvlantag", "get", "get port default vlan tag", + " ", SW_API_PT_DEFAULT_VLANTAG_GET, NULL}, + {"tagpropagation", "set", "set port tag propagation", + " ", SW_API_PT_TAG_PROPAGATION_SET, NULL}, + {"tagpropagation", "get", "get port tag propagation", + " ", SW_API_PT_TAG_PROPAGATION_GET, NULL}, + {"egmode", "set", "set port egress vlan mode", + "", SW_API_PT_VLANTAG_EGMODE_SET, NULL}, + {"egmode", "get", "get port egress vlan mode", + "", SW_API_PT_VLANTAG_EGMODE_GET, NULL}, + {"translationmissaction", "set", "set port xlt miss command", + " ", + SW_API_PT_VLAN_XLT_MISS_CMD_SET, NULL}, + {"translationmissaction", "get", "get port xlt miss command", + "", SW_API_PT_VLAN_XLT_MISS_CMD_GET, NULL}, + {"vsiegmode", "set", "set a vsi port egress tag", + " ", + SW_API_PT_VSI_EGMODE_SET, NULL}, + {"vsiegmode", "get", "get a vsi port egress tag", + " ", SW_API_PT_VSI_EGMODE_GET, NULL}, + {"vsiegmodeen", "set", "set port vlan tag vsi egress mode enable or not", + " ", SW_API_PT_VLANTAG_VSI_EGMODE_EN_SET, NULL}, + {"vsiegmodeen", "get", "get port vlan tag vsi egress mode enable or not", + "", SW_API_PT_VLANTAG_VSI_EGMODE_EN_GET, NULL}, + {"translationAdv", "add", "add a vlan translation entry based on port and direction", + " ", SW_API_PT_VLAN_TRANS_ADV_ADD, NULL}, + {"translationAdv", "del", "del a vlan translation entry based on port and direction", + " ", SW_API_PT_VLAN_TRANS_ADV_DEL, NULL}, + {"translationAdv", "getfirst", "get first vlan entry based on port and direction", + " ", SW_API_PT_VLAN_TRANS_ADV_GETFIRST, NULL}, + {"translationAdv", "getnext", "get next vlan entry based on port and direction", + " ", SW_API_PT_VLAN_TRANS_ADV_GETNEXT, NULL}, + {"translationAdv", "show", "get all vlan entries based on port and direction", + " <1:ingress|2:egress>", + SW_CMD_PT_VLAN_TRANS_ADV_SHOW, cmd_show_ptvlan_entry}, + {"counter", "set", "clean up port vlan counter", + "", SW_API_PT_VLAN_COUNTER_CLEANUP, NULL}, + {"counter", "get", "get port vlan counter", + "", SW_API_PT_VLAN_COUNTER_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*fdb*/ +#ifdef IN_FDB + { + "fdb", "config FDB table", + { + {"entry", "add", "add a FDB entry", "", SW_API_FDB_ADD, NULL}, + {"entry", "del", "delete a FDB entry", "", SW_API_FDB_DELMAC, NULL}, + {"entry", "flush", "flush all FDB entries", "<0:dynamic only|1:dynamic and static>", SW_API_FDB_DELALL, NULL}, + {"entry", "show", "show whole FDB entries", "", SW_CMD_FDB_SHOW, cmd_show_fdb}, + {"entry", "find", "find a FDB entry", "", SW_API_FDB_FIND, NULL}, + {"entry", "iterate", "iterate all FDB entries", "", SW_API_FDB_ITERATE, NULL}, + {"entry", "extendnext", "find next FDB entry in extend mode", "", SW_API_FDB_EXTEND_NEXT, NULL}, + {"entry", "extendfirst", "find first FDB entry in extend mode", "", SW_API_FDB_EXTEND_FIRST, NULL}, + {"entry", "transfer", "transfer port info in FDB entry", " ", SW_API_FDB_TRANSFER, NULL}, + {"portEntry", "flush", "flush all FDB entries by a port", " <0:dynamic only|1:dynamic and static>", SW_API_FDB_DELPORT, NULL}, + {"firstEntry", "find", "find the first FDB entry", "", SW_API_FDB_FIRST, NULL}, + {"nextEntry", "find", "find next FDB entry", "", SW_API_FDB_NEXT, NULL}, + {"portLearn", "set", "set FDB entry learning status of a port", " ", SW_API_FDB_PT_LEARN_SET, NULL}, + {"portLearn", "get", "get FDB entry learning status of a port", "", SW_API_FDB_PT_LEARN_GET, NULL}, + {"ptLearnCtrl", "set", "set new address learning and forward", " ", SW_API_FDB_PT_NEWADDR_LEARN_SET, NULL}, + {"ptLearnCtrl", "get", "get new address learning and forward", "", SW_API_FDB_PT_NEWADDR_LEARN_GET, NULL}, + {"ptStationMove", "set", "set station move learning and forward", " ", SW_API_FDB_PT_STAMOVE_SET, NULL}, + {"ptStationMove", "get", "get station move learning and forward", "", SW_API_FDB_PT_STAMOVE_GET, NULL}, + {"ageCtrl", "set", "set FDB entry aging status", "", SW_API_FDB_AGE_CTRL_SET, NULL}, + {"ageCtrl", "get", "get FDB entry aging status", "", SW_API_FDB_AGE_CTRL_GET, NULL}, + {"learnCtrl", "set", "set FDB entry learn status", "", SW_API_FDB_LEARN_CTRL_SET, NULL}, + {"learnCtrl", "get", "get FDB entry learn status", "", SW_API_FDB_LEARN_CTRL_GET, NULL}, + {"vlansmode", "set", "set FDB vlan search mode", "", SW_API_FDB_VLAN_IVL_SVL_SET, NULL}, + {"vlansmode", "get", "get FDB vlan search mode", "", SW_API_FDB_VLAN_IVL_SVL_GET, NULL}, + {"ageTime", "set", "set FDB entry aging time", "", SW_API_FDB_AGE_TIME_SET, NULL}, + {"ageTime", "get", "get FDB entry aging time", "", SW_API_FDB_AGE_TIME_GET, NULL}, + {"ptlearncounter", "get", "get port FDB entry learn counter", "", SW_API_PT_FDB_LEARN_COUNTER_GET, NULL}, + {"ptlearnlimit", "set", "set port FDB entry learn limit", " ", SW_API_PT_FDB_LEARN_LIMIT_SET, NULL}, + {"ptlearnlimit", "get", "get port FDB entry learn limit", "", SW_API_PT_FDB_LEARN_LIMIT_GET, NULL}, + {"ptlearnexceedcmd", "set", "set port forwarding cmd when exceed learn limit", " ", SW_API_PT_FDB_LEARN_EXCEED_CMD_SET, NULL}, + {"ptlearnexceedcmd", "get", "get port forwarding cmd when exceed learn limit", "", SW_API_PT_FDB_LEARN_EXCEED_CMD_GET, NULL}, + {"learnlimit", "set", "set FDB entry learn limit", " ", SW_API_FDB_LEARN_LIMIT_SET, NULL}, + {"learnlimit", "get", "get FDB entry learn limit", "", SW_API_FDB_LEARN_LIMIT_GET, NULL}, + {"learnexceedcmd", "set", "set forwarding cmd when exceed learn limit", "", SW_API_FDB_LEARN_EXCEED_CMD_SET, NULL}, + {"learnexceedcmd", "get", "get forwarding cmd when exceed learn limit", "", SW_API_FDB_LEARN_EXCEED_CMD_GET, NULL}, + {"resventry", "add", "add a reserve FDB entry", "", SW_API_FDB_RESV_ADD, NULL}, + {"resventry", "del", "delete reserve a FDB entry", "", SW_API_FDB_RESV_DEL, NULL}, + {"resventry", "find", "find a reserve FDB entry", "", SW_API_FDB_RESV_FIND, NULL}, + {"resventry", "iterate", "iterate all reserve FDB entries", "", SW_API_FDB_RESV_ITERATE, NULL}, + {"resventry", "show", "show whole resv FDB entries", "", SW_CMD_RESV_FDB_SHOW, cmd_show_resv_fdb}, + {"ptLearnstatic", "set", "set FDB entry learning static status of a port", " ", SW_API_FDB_PT_LEARN_STATIC_SET, NULL}, + {"ptLearnStatic", "get", "get FDB entry learning static status of a port", "", SW_API_FDB_PT_LEARN_STATIC_GET, NULL}, + {"port", "add", "add one port to a FDB entry", " ", SW_API_FDB_PORT_ADD, NULL}, + {"port", "del", "del one port from a FDB entry", " ", SW_API_FDB_PORT_DEL, NULL}, + {"fdbrfs", "set", "add a FDB rfs", "", SW_API_FDB_RFS_SET, NULL}, + {"fdbrfs", "del", "delete a FDB rfs", "", SW_API_FDB_RFS_DEL, NULL}, + {"ptmaclimitctrl", "set", "set port maclimit ctrl", "", SW_API_FDB_PT_MACLIMIT_CTRL_SET, NULL}, + {"ptmaclimitctrl", "get", "get port maclimit ctrl", "", SW_API_FDB_PT_MACLIMIT_CTRL_GET, NULL}, + {"fidEntry", "flush", "flush all FDB entries by a fid", " <0:dynamic only|1:dynamic and static>", SW_API_FDB_DEL_BY_FID, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*acl*/ +#ifdef IN_ACL + { + "acl", "config ACL", + { + {"list", "create", "create an ACL list", " ", SW_API_ACL_LIST_CREAT, NULL}, + {"list", "destroy", "destroy an ACL list", "", SW_API_ACL_LIST_DESTROY, NULL}, + {"list", "bind", "bind an ACL list to a port", " <0-0:direction> <0-0:objtype> ", SW_API_ACL_LIST_BIND, NULL}, + {"list", "unbind", "unbind an ACL list from a port", " <0-0:direction> <0-0:objtype> ", SW_API_ACL_LIST_UNBIND, NULL}, + {"rule", "add", "add ACL rules to an ACL list", " ", SW_API_ACL_RULE_ADD, NULL}, + {"rule", "del", "delete ACL rules from an ACL list", " ", SW_API_ACL_RULE_DELETE, NULL}, + {"rule", "query", "query a ACL rule", " ", SW_API_ACL_RULE_QUERY, NULL}, + {"rule", "active", "active ACL rules in an ACL list", " ", SW_API_ACL_RULE_ACTIVE, NULL}, + {"rule", "deactive", "deactive ACL rules in an ACL list", " ", SW_API_ACL_RULE_DEACTIVE, NULL}, + {"srcfiltersts", "set", "set status of ACL rules source filter", " ", SW_API_ACL_RULE_SRC_FILTER_STS_SET, NULL}, + {"srcfiltersts", "get", "get status of ACL rules source filter", "", SW_API_ACL_RULE_SRC_FILTER_STS_GET, NULL}, + {"status", "set", "set status of ACL engine", "", SW_API_ACL_STATUS_SET, NULL}, + {"status", "get", "get status of ACL engine", "", SW_API_ACL_STATUS_GET, NULL}, + {"udfprofile", "set", "set port udf profile", " ", SW_API_ACL_PT_UDF_PROFILE_SET, NULL}, + {"udfprofile", "get", "get port udf profile", " ", SW_API_ACL_PT_UDF_PROFILE_GET, NULL}, + {"udf", "set", "set udf", " <0-3> ", SW_API_ACL_UDF_SET, NULL}, + {"udf", "get", "get udf", " <0-3>", SW_API_ACL_UDF_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*qos*/ +#ifdef IN_QOS + { + "qos", "config Qos", + { + {"schMode", "set", "set traffic scheduling mode", " ", SW_API_QOS_SCH_MODE_SET, NULL}, + {"schMode", "get", "get traffic scheduling mode", "", SW_API_QOS_SCH_MODE_GET, NULL}, + {"qTxBufSts", "set", "set queue tx buffer counting status of a port", " ", SW_API_QOS_QU_TX_BUF_ST_SET, NULL}, + {"qTxBufSts", "get", "get queue tx buffer counting status of a port", "", SW_API_QOS_QU_TX_BUF_ST_GET, NULL}, + {"qTxBufNr", "set", "set queue tx buffer number", " ", SW_API_QOS_QU_TX_BUF_NR_SET, NULL}, + {"qTxBufNr", "get", "get queue tx buffer number", " ", SW_API_QOS_QU_TX_BUF_NR_GET, NULL}, + {"ptTxBufSts", "set", "set port tx buffer counting status of a port", " ", SW_API_QOS_PT_TX_BUF_ST_SET, NULL}, + {"ptTxBufSts", "get", "get port tx buffer counting status of a port", "", SW_API_QOS_PT_TX_BUF_ST_GET, NULL}, + {"ptRedEn", "set", "set status of port wred of a port", " ", SW_API_QOS_PT_RED_EN_SET, NULL}, + {"ptRedEn", "get", "get status of port wred of a port", "", SW_API_QOS_PT_RED_EN_GET, NULL}, + {"ptTxBufNr", "set", "set port tx buffer number", " ", SW_API_QOS_PT_TX_BUF_NR_SET, NULL}, + {"ptTxBufNr", "get", "get port tx buffer number", "", SW_API_QOS_PT_TX_BUF_NR_GET, NULL}, + {"ptRxBufNr", "set", "set port rx buffer number", " ", SW_API_QOS_PT_RX_BUF_NR_SET, NULL}, + {"ptRxBufNr", "get", "get port rx buffer number", "", SW_API_QOS_PT_RX_BUF_NR_GET, NULL}, + {"up2q", "set", "set user priority to queue mapping", " ", SW_API_COSMAP_UP_QU_SET, NULL}, + {"up2q", "get", "get user priority to queue mapping", "", SW_API_COSMAP_UP_QU_GET, NULL}, + {"dscp2q", "set", "set dscp to queue mapping", " ", SW_API_COSMAP_DSCP_QU_SET, NULL}, + {"dscp2q", "get", "get dscp to queue mapping", "", SW_API_COSMAP_DSCP_QU_GET, NULL}, + {"ptMode", "set", "set Qos mode of a port", " ", SW_API_QOS_PT_MODE_SET, NULL}, + {"ptMode", "get", "get Qos mode of a port", " ", SW_API_QOS_PT_MODE_GET, NULL}, + {"ptModePri", "set", "set the priority of Qos modes of a port", " ", SW_API_QOS_PT_MODE_PRI_SET, NULL}, + {"ptModePri", "get", "get the priority of Qos modes of a port", " ", SW_API_QOS_PT_MODE_PRI_GET, NULL}, + {"ptDefaultUp", "set", "set default user priority for received frames of a port", " ", SW_API_QOS_PORT_DEF_UP_SET, NULL}, + {"ptDefaultUp", "get", "get default user priority for received frames of a port", "", SW_API_QOS_PORT_DEF_UP_GET, NULL}, + {"ptschMode", "set", "set port traffic scheduling mode", " ", SW_API_QOS_PORT_SCH_MODE_SET, NULL}, + {"ptschMode", "get", "get port traffic scheduling mode", "", SW_API_QOS_PORT_SCH_MODE_GET, NULL}, + {"ptDefaultSpri", "set", "set default stag priority for received frames of a port", " ", SW_API_QOS_PT_DEF_SPRI_SET, NULL}, + {"ptDefaultSpri", "get", "get default stag priority for received frames of a port", "", SW_API_QOS_PT_DEF_SPRI_GET, NULL}, + {"ptDefaultCpri", "set", "set default ctag priority for received frames of a port", " ", SW_API_QOS_PT_DEF_CPRI_SET, NULL}, + {"ptDefaultCpri", "get", "get default ctag priority for received frames of a port", "", SW_API_QOS_PT_DEF_CPRI_GET, NULL}, + {"ptFSpriSts", "set", "set port force Stag priority status for received frames of a port", " ", SW_API_QOS_PT_FORCE_SPRI_ST_SET, NULL}, + {"ptFSpriSts", "get", "get port force Stag priority status for received frames of a port", "", SW_API_QOS_PT_FORCE_SPRI_ST_GET, NULL}, + {"ptFCpriSts", "set", "set port force Ctag priority status for received frames of a port", " ", SW_API_QOS_PT_FORCE_CPRI_ST_SET, NULL}, + {"ptFCpriSts", "get", "get port force Ctag priority status for received frames of a port", "", SW_API_QOS_PT_FORCE_CPRI_ST_GET, NULL}, + {"ptQuRemark", "set", "set egress queue based remark", " ", SW_API_QOS_QUEUE_REMARK_SET, NULL}, + {"ptQuRemark", "get", "get egress queue based remark", " ", SW_API_QOS_QUEUE_REMARK_GET, NULL}, + {"ptgroup", "set", "set port group", " ", SW_API_QOS_PORT_GROUP_SET, NULL}, + {"ptgroup", "get", "get port group", "", SW_API_QOS_PORT_GROUP_GET, NULL}, + {"ptpriprece", "set", "set port priority precedence", "", SW_API_QOS_PORT_PRI_SET, NULL}, + {"ptPriprece", "get", "get port priority precedence", "", SW_API_QOS_PORT_PRI_GET, NULL}, + {"ptremark", "set", "set port remark", " ", SW_API_QOS_PORT_REMARK_SET, NULL}, + {"ptremark", "get", "get port remark", "", SW_API_QOS_PORT_REMARK_GET, NULL}, + {"pcpmap", "set", "set pcp map", " ", SW_API_QOS_PCP_MAP_SET, NULL}, + {"pcpmap", "get", "get pcp map", " ", SW_API_QOS_PCP_MAP_GET, NULL}, + {"flowmap", "set", "set flow map", " ", SW_API_QOS_FLOW_MAP_SET, NULL}, + {"flowmap", "get", "get flow map", " ", SW_API_QOS_FLOW_MAP_GET, NULL}, + {"dscpmap", "set", "set dscp map", " ", SW_API_QOS_DSCP_MAP_SET, NULL}, + {"dscpmap", "get", "get dscp map", " ", SW_API_QOS_DSCP_MAP_GET, NULL}, + {"qscheduler", "set", "set queue scheduler", " ", SW_API_QOS_QUEUE_SCHEDULER_SET, NULL}, + {"qscheduler", "get", "get queue scheduler", " ", SW_API_QOS_QUEUE_SCHEDULER_GET, NULL}, + {"ringqueue", "set", "set ring queue map", " ", SW_API_QOS_RING_QUEUE_MAP_SET, NULL}, + {"ringqueue", "get", "get ring queue map", "", SW_API_QOS_RING_QUEUE_MAP_GET, NULL}, + {"portqueues", "get", "get queues belong to port", "", SW_API_QOS_PORT_QUEUES_GET, NULL}, + {"dequeue", "get", "dequeue control get", "", SW_API_QOS_SCHEDULER_DEQUEU_CTRL_GET, NULL}, + {"dequeue", "set", "dequeue control set", " ", SW_API_QOS_SCHEDULER_DEQUEU_CTRL_SET, NULL}, + {"portscheduler", "reset", "reset queue scheduler config", "", SW_API_QOS_PORT_SCHEDULER_CFG_RESET, NULL}, + {"schedulerresource", "get", "get port scheduler resource", "", SW_API_QOS_PORT_SCHEDULER_RESOURCE_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*igmp*/ +#ifdef IN_IGMP + { + "igmp", "config IGMP/MLD", + { + {"mode", "set", "set IGMP/MLD snooping status of a port", " ", SW_API_PT_IGMPS_MODE_SET, NULL}, + {"mode", "get", "get port IGMP/MLD snooping status", "", SW_API_PT_IGMPS_MODE_GET, NULL}, + {"cmd", "set", "set IGMP/MLD frames forwarding command", "", SW_API_IGMP_MLD_CMD_SET, NULL}, + {"cmd", "get", "get IGMP/MLD frames forwarding command", "", SW_API_IGMP_MLD_CMD_GET, NULL}, + {"portJoin", "set", "set IGMP/MLD hardware joining status", " ", SW_API_IGMP_PT_JOIN_SET, NULL}, + {"portJoin", "get", "get IGMP/MLD hardware joining status", "", SW_API_IGMP_PT_JOIN_GET, NULL}, + {"portLeave", "set", "set IGMP/MLD hardware leaving status", " ", SW_API_IGMP_PT_LEAVE_SET, NULL}, + {"portLeave", "get", "get IGMP/MLD hardware leaving status", "", SW_API_IGMP_PT_LEAVE_GET, NULL}, + {"rp", "set", "set IGMP/MLD router ports", "", SW_API_IGMP_RP_SET, NULL}, + {"rp", "get", "get IGMP/MLD router ports", "", SW_API_IGMP_RP_GET, NULL}, + {"createStatus", "set", "set IGMP/MLD ability for creating entry", "", SW_API_IGMP_ENTRY_CREAT_SET, NULL}, + {"createStatus", "get", "get IGMP/MLD ability for creating entry", "", SW_API_IGMP_ENTRY_CREAT_GET, NULL}, + {"static", "set", "set IGMP/MLD static status for creating entry", "", SW_API_IGMP_ENTRY_STATIC_SET, NULL}, + {"static", "get", "get IGMP/MLD static status for creating entry", "", SW_API_IGMP_ENTRY_STATIC_GET, NULL}, + {"leaky", "set", "set IGMP/MLD leaky status for creating entry", "", SW_API_IGMP_ENTRY_LEAKY_SET, NULL}, + {"leaky", "get", "get IGMP/MLD leaky status for creating entry", "", SW_API_IGMP_ENTRY_LEAKY_GET, NULL}, + {"version3", "set", "set IGMP v3/MLD v2 status for creating entry", "", SW_API_IGMP_ENTRY_V3_SET, NULL}, + {"version3", "get", "get IGMP v3/MLD v2 status for creating entry", "", SW_API_IGMP_ENTRY_V3_GET, NULL}, + {"queue", "set", "set IGMP/MLD queue status for creating entry", " ", SW_API_IGMP_ENTRY_QUEUE_SET, NULL}, + {"queue", "get", "get IGMP/MLD queue status for creating entry", "", SW_API_IGMP_ENTRY_QUEUE_GET, NULL}, + {"ptlearnlimit", "set", "set port Multicast entry learn limit", " ", SW_API_PT_IGMP_LEARN_LIMIT_SET, NULL}, + {"ptlearnlimit", "get", "get port Multicast entry learn limit", "", SW_API_PT_IGMP_LEARN_LIMIT_GET, NULL}, + {"ptlearnexceedcmd", "set", "set port forwarding cmd when exceed multicast learn limit", " ", SW_API_PT_IGMP_LEARN_EXCEED_CMD_SET, NULL}, + {"ptlearnexceedcmd", "get", "get port forwarding cmd when exceed multicast learn limit", "", SW_API_PT_IGMP_LEARN_EXCEED_CMD_GET, NULL}, + {"multi", "set", "set igmp/mld entry", "", SW_API_IGMP_SG_ENTRY_SET, NULL}, + {"multi", "clear", "clear igmp/mld entry", "", SW_API_IGMP_SG_ENTRY_CLEAR, NULL}, + {"multi", "show", "show all igmp/mld entry", "", SW_API_IGMP_SG_ENTRY_SHOW, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*leaky*/ +#ifdef IN_LEAKY + { + "leaky", "config leaky", + { + {"ucMode", "set", "set unicast packets leaky mode", "", SW_API_UC_LEAKY_MODE_SET, NULL}, + {"ucMode", "get", "get unicast packets leaky mode", "", SW_API_UC_LEAKY_MODE_GET, NULL}, + {"mcMode", "set", "set multicast packets leaky mode", "", SW_API_MC_LEAKY_MODE_SET, NULL}, + {"mcMode", "get", "get multicast packets leaky mode", "", SW_API_MC_LEAKY_MODE_GET, NULL}, + {"arpMode", "set", "set arp packets leaky mode", " ", SW_API_ARP_LEAKY_MODE_SET, NULL}, + {"arpMode", "get", "get arp packets leaky mode", "", SW_API_ARP_LEAKY_MODE_GET, NULL}, + {"ptUcMode", "set", "set unicast packets leaky status of a port", " ", SW_API_PT_UC_LEAKY_MODE_SET, NULL}, + {"ptUcMode", "get", "get unicast packets leaky status of a port", "", SW_API_PT_UC_LEAKY_MODE_GET, NULL}, + {"ptMcMode", "set", "set multicast packets leaky status of a port", " ", SW_API_PT_MC_LEAKY_MODE_SET, NULL}, + {"ptMcMode", "get", "get multicast packets leaky status of a port", "", SW_API_PT_MC_LEAKY_MODE_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*mirror*/ +#ifdef IN_MIRROR + { + "mirror", "config mirror", + { + {"analyPt", "set", "set mirror analysis port", "", SW_API_MIRROR_ANALY_PT_SET, NULL}, + {"analyPt", "get", "get mirror analysis port", "", SW_API_MIRROR_ANALY_PT_GET, NULL}, + {"ptIngress", "set", "set ingress mirror status of a port", " ", SW_API_MIRROR_IN_PT_SET, NULL}, + {"ptIngress", "get", "get ingress mirror status of a port", "", SW_API_MIRROR_IN_PT_GET, NULL}, + {"ptEgress", "set", "set egress mirror status of a port", " ", SW_API_MIRROR_EG_PT_SET, NULL}, + {"ptEgress", "get", "get egress mirror status of a port", "", SW_API_MIRROR_EG_PT_GET, NULL}, + {"analyCfg", "set", "set analysis configure", "", SW_API_MIRROR_ANALYSIS_CONFIG_SET, NULL}, + {"analyCfg", "get", "get analysis configure", "", SW_API_MIRROR_ANALYSIS_CONFIG_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*rate*/ +#ifdef IN_RATE + { + "rate", "config rate limit", + { + {"qEgress", "set", "set egress rate limit of a queue", " ", SW_API_RATE_QU_EGRL_SET, NULL}, + {"qEgress", "get", "get egress rate limit of a queue", " ", SW_API_RATE_QU_EGRL_GET, NULL}, + {"ptEgress", "set", "set egress rate limit of a port", " ", SW_API_RATE_PT_EGRL_SET, NULL}, + {"ptEgress", "get", "get egress rate limit of a port", "", SW_API_RATE_PT_EGRL_GET, NULL}, + {"ptIngress", "set", "set ingress rate limit of a port", " ", SW_API_RATE_PT_INRL_SET, NULL}, + {"ptIngress", "get", "get ingress rate limit of a port", "", SW_API_RATE_PT_INRL_GET, NULL}, + {"stormCtrl", "set", "set storm control status of a particular frame type", " ", SW_API_STORM_CTRL_FRAME_SET, NULL}, + {"stormCtrl", "get", "get storm control status of a particular frame type", " ", SW_API_STORM_CTRL_FRAME_GET, NULL}, + {"stormCtrlRate", "set", "set storm ctrl rate", " ", SW_API_STORM_CTRL_RATE_SET, NULL}, + {"stormCtrlRate", "get", "get storm ctrl rate", "", SW_API_STORM_CTRL_RATE_GET, NULL}, + {"portpolicer", "set", "set port policer", "", SW_API_RATE_PORT_POLICER_SET, NULL}, + {"portpolicer", "get", "get port policer", "", SW_API_RATE_PORT_POLICER_GET, NULL}, + {"portshaper", "set", "set port egress shaper", " ", SW_API_RATE_PORT_SHAPER_SET, NULL}, + {"portshaper", "get", "get port egress shaper", "", SW_API_RATE_PORT_SHAPER_GET, NULL}, + {"queueshaper", "set", "set queue egress shaper", " ", SW_API_RATE_QUEUE_SHAPER_SET, NULL}, + {"queueshaper", "get", "get queue egress shaper", " ", SW_API_RATE_QUEUE_SHAPER_GET, NULL}, + {"aclpolicer", "set", "set acl policer", "", SW_API_RATE_ACL_POLICER_SET, NULL}, + {"aclpolicer", "get", "get acl policer", "", SW_API_RATE_ACL_POLICER_GET, NULL}, + {"ptAddRateByte", "set", "set add_rate_byte when cal rate ", " ", SW_API_RATE_PT_ADDRATEBYTE_SET, NULL}, + {"ptAddRateByte", "get", "get add_rate_byte when cal rate ", "", SW_API_RATE_PT_ADDRATEBYTE_GET, NULL}, + {"ptgolflowen", "set", "set status of port globle flow control", " ", SW_API_RATE_PT_GOL_FLOW_EN_SET, NULL}, + {"ptgolflowen", "get", "get status of port globle flow control", "", SW_API_RATE_PT_GOL_FLOW_EN_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + +#ifdef IN_SEC + { + "sec", "config security", + { + {"mac", "set", "set MAC layer related security", " ", SW_API_SEC_MAC_SET, NULL}, + {"mac", "get", "get MAC layer related security", "", SW_API_SEC_MAC_GET, NULL}, + {"ip", "set", "set IP layer related security", " ", SW_API_SEC_IP_SET, NULL}, + {"ip", "get", "get IP layer related security", "", SW_API_SEC_IP_GET, NULL}, + {"ip4", "set", "set IP4 related security", " ", SW_API_SEC_IP4_SET, NULL}, + {"ip4", "get", "get IP4 related security", "", SW_API_SEC_IP4_GET, NULL}, + {"ip6", "set", "set IP6 related security", " ", SW_API_SEC_IP6_SET, NULL}, + {"ip6", "get", "get IP6 related security", "", SW_API_SEC_IP6_GET, NULL}, + {"tcp", "set", "set TCP related security", " ", SW_API_SEC_TCP_SET, NULL}, + {"tcp", "get", "get TCP related security", "", SW_API_SEC_TCP_GET, NULL}, + {"udp", "set", "set UDP related security", " ", SW_API_SEC_UDP_SET, NULL}, + {"udp", "get", "get UDP related security", "", SW_API_SEC_UDP_GET, NULL}, + {"icmp4", "set", "set ICMP4 related security", " ", SW_API_SEC_ICMP4_SET, NULL}, + {"icmp4", "get", "get ICMP4 related security", "", SW_API_SEC_ICMP4_GET, NULL}, + {"icmp6", "set", "set ICMP6 related security", " ", SW_API_SEC_ICMP6_SET, NULL}, + {"icmp6", "get", "get ICMP6 related security", "", SW_API_SEC_ICMP6_GET, NULL}, + {"l3parser", "get", "get l3 parser ctrl", " ", SW_API_SEC_L3_PARSER_CTRL_GET, NULL}, + {"l3parser", "set", "set l3 parser ctrl", " ", SW_API_SEC_L3_PARSER_CTRL_SET, NULL}, + {"l4parser", "get", "get l4 parser ctrl", " ", SW_API_SEC_L4_PARSER_CTRL_GET, NULL}, + {"l4parser", "set", "set l4 parser ctrl", " ", SW_API_SEC_L4_PARSER_CTRL_SET, NULL}, + {"expctrl", "get", "get l3 exp ctrl", "", SW_API_SEC_EXP_CTRL_GET, NULL}, + {"expctrl", "set", "set l3 exp ctrl", "", SW_API_SEC_EXP_CTRL_SET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*stp*/ +#ifdef IN_STP + { + "stp", "config STP", + { + {"portState", "set", "set STP state of a port", " ", SW_API_STP_PT_STATE_SET, NULL}, + {"portState", "get", "get STP state of a port", " ", SW_API_STP_PT_STATE_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*mib*/ +#ifdef IN_MIB + { + "mib", "show MIB statistics information", + { + {"statistics", "get", "get statistics information of a port", "", SW_API_PT_MIB_GET, NULL}, + {"status", "set", "set mib status", "", SW_API_MIB_STATUS_SET, NULL}, + {"status", "get", "get mib status", "", SW_API_MIB_STATUS_GET, NULL}, + {"counters", "flush", "flush counters of a port", "", SW_API_PT_MIB_FLUSH_COUNTERS, NULL}, + {"cpuKeep", "set", "set cpu keep bit", "", SW_API_MIB_CPU_KEEP_SET, NULL}, + {"cpuKeep", "get", "get cpu keep bit", "", SW_API_MIB_CPU_KEEP_GET, NULL}, + {"xgstatistics","get", "get statistics information of a xg_port", "",SW_API_PT_XGMIB_GET}, + {"counter", "get", "get counter information of a port", "", SW_API_PT_MIB_COUNTER_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /* led */ +#ifdef IN_LED + { + "led", "set/get led control pattern", + { + {"ctrlpattern", "set", "set led control pattern", " ", SW_API_LED_PATTERN_SET, NULL}, + {"ctrlpattern", "get", "get led control pattern", " ", SW_API_LED_PATTERN_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /* cosmap */ +#ifdef IN_COSMAP + { + "cosmap", "set/get cosmap table", + { + {"dscp2pri", "set", "set dscp to priority map table", " ", SW_API_COSMAP_DSCP_TO_PRI_SET, NULL}, + {"dscp2pri", "get", "get dscp to priority map table", "", SW_API_COSMAP_DSCP_TO_PRI_GET, NULL}, + {"dscp2dp", "set", "set dscp to dp map table", " ", SW_API_COSMAP_DSCP_TO_DP_SET, NULL}, + {"dscp2dp", "get", "get dscp to dp map table", "", SW_API_COSMAP_DSCP_TO_DP_GET, NULL}, + {"up2pri", "set", "set dot1p to priority map table", " ", SW_API_COSMAP_UP_TO_PRI_SET, NULL}, + {"up2pri", "get", "get dot1p to priority map table", "", SW_API_COSMAP_UP_TO_PRI_GET, NULL}, + {"up2dp", "set", "set dot1p to dp map table", " ", SW_API_COSMAP_UP_TO_DP_SET, NULL}, + {"up2dp", "get", "get dot1p to dp map table", "", SW_API_COSMAP_UP_TO_DP_GET, NULL}, + {"dscp2ehpri", "set", "set dscp to priority map table for WAN port", " ", SW_API_COSMAP_DSCP_TO_EHPRI_SET, NULL}, + {"dscp2ehpri", "get", "get dscp to priority map table for WAN port", "", SW_API_COSMAP_DSCP_TO_EHPRI_GET, NULL}, + {"dscp2ehdp", "set", "set dscp to dp map table for WAN port", " ", SW_API_COSMAP_DSCP_TO_EHDP_SET, NULL}, + {"dscp2ehdp", "get", "get dscp to dp map table for WAN port", "", SW_API_COSMAP_DSCP_TO_EHDP_GET, NULL}, + {"up2ehpri", "set", "set dot1p to priority map table for WAN port", " ", SW_API_COSMAP_UP_TO_EHPRI_SET, NULL}, + {"up2ehpri", "get", "get dot1p to priority map table for WAN port", "", SW_API_COSMAP_UP_TO_EHPRI_GET, NULL}, + {"up2ehdp", "set", "set dot1p to dp map table for WAN port", " ", SW_API_COSMAP_UP_TO_EHDP_SET, NULL}, + {"up2ehdp", "get", "get dot1p to dp map table for WAN port", "", SW_API_COSMAP_UP_TO_EHDP_GET, NULL}, + {"pri2q", "set", "set priority to queue mapping", " ", SW_API_COSMAP_PRI_TO_QU_SET, NULL}, + {"pri2q", "get", "get priority to queue mapping", "", SW_API_COSMAP_PRI_TO_QU_GET, NULL}, + {"pri2ehq", "set", "set priority to enhanced queue mapping", " ", SW_API_COSMAP_PRI_TO_EHQU_SET, NULL}, + {"pri2ehq", "get", "get priority to enhanced queue mapping", "", SW_API_COSMAP_PRI_TO_EHQU_GET, NULL}, + {"egRemark", "set", "set egress remark table", "", SW_API_COSMAP_EG_REMARK_SET, NULL}, + {"egRemark", "get", "get egress remark table", "", SW_API_COSMAP_EG_REMARK_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /*misc*/ +#ifdef IN_MISC + { + "misc", "config miscellaneous", + { + {"arp", "set", "set arp packets hardware identification status", "", SW_API_ARP_STATUS_SET, NULL}, + {"arp", "get", "get arp packets hardware identification status", "", SW_API_ARP_STATUS_GET, NULL}, + {"frameMaxSize", "set", "set the maximal received frame size of the device", "", SW_API_FRAME_MAX_SIZE_SET, NULL}, + {"frameMaxSize", "get", "get the maximal received frame size of the device", "", SW_API_FRAME_MAX_SIZE_GET, NULL}, + {"ptUnkSaCmd", "set", "set forwarding command for frames with unknown source address", " ", SW_API_PT_UNK_SA_CMD_SET, NULL}, + {"ptUnkSaCmd", "get", "get forwarding command for frames with unknown source address", "", SW_API_PT_UNK_SA_CMD_GET, NULL}, + {"ptUnkUcFilter", "set", "set flooding status of unknown unicast frames", " ", SW_API_PT_UNK_UC_FILTER_SET, NULL}, + {"ptUnkUcFilter", "get", "get flooding status of unknown unicast frames", "", SW_API_PT_UNK_UC_FILTER_GET, NULL}, + {"ptUnkMcFilter", "set", "set flooding status of unknown multicast frames", " ", SW_API_PT_UNK_MC_FILTER_SET, NULL}, + {"ptUnkMcFilter", "get", "get flooding status of unknown multicast frames", "", SW_API_PT_UNK_MC_FILTER_GET, NULL}, + {"ptBcFilter", "set", "set flooding status of broadcast frames", " ", SW_API_PT_BC_FILTER_SET, NULL}, + {"ptBcFilter", "get", "get flooding status of broadcast frames", "", SW_API_PT_BC_FILTER_GET, NULL}, + {"cpuPort", "set", "set cpu port status", "", SW_API_CPU_PORT_STATUS_SET, NULL}, + {"cpuPort", "get", "get cpu port status", "", SW_API_CPU_PORT_STATUS_GET, NULL}, + {"bctoCpu", "set", "set broadcast frames to Cpu port status", "", SW_API_BC_TO_CPU_PORT_SET, NULL}, + {"bctoCpu", "get", "get broadcast frames to Cpu port status", "", SW_API_BC_TO_CPU_PORT_GET, NULL}, + {"PppoeCmd", "set", "set pppoe frames forwarding command", "", SW_API_PPPOE_CMD_SET, NULL}, + {"PppoeCmd", "get", "get pppoe frames forwarding command", "", SW_API_PPPOE_CMD_GET, NULL}, + {"Pppoe", "set", "set pppoe frames hardware identification status", "", SW_API_PPPOE_STATUS_SET, NULL}, + {"Pppoe", "get", "get pppoe frames hardware identification status", "", SW_API_PPPOE_STATUS_GET, NULL}, + {"ptDhcp", "set", "set dhcp frames hardware identification status", " ", SW_API_PT_DHCP_SET, NULL}, + {"ptDhcp", "get", "get dhcp frames hardware identification status", "", SW_API_PT_DHCP_GET, NULL}, + {"arpcmd", "set", "set arp packets forwarding command", "", SW_API_ARP_CMD_SET, NULL}, + {"arpcmd", "get", "get arp packets forwarding command", "", SW_API_ARP_CMD_GET, NULL}, + {"eapolcmd", "set", "set eapol packets forwarding command", "", SW_API_EAPOL_CMD_SET, NULL}, + {"eapolcmd", "get", "get eapol packets forwarding command", "", SW_API_EAPOL_CMD_GET, NULL}, + {"pppoesession", "add", "add a pppoe session entry", " ", SW_API_PPPOE_SESSION_ADD, NULL}, + {"pppoesession", "del", "del a pppoe session entry", "", SW_API_PPPOE_SESSION_DEL, NULL}, + {"pppoesession", "get", "get a pppoe session entry", "", SW_API_PPPOE_SESSION_GET, NULL}, + {"eapolstatus", "set", "set eapol frames hardware identification status", " ", SW_API_EAPOL_STATUS_SET, NULL}, + {"eapolstatus", "get", "get eapol frames hardware identification status", "", SW_API_EAPOL_STATUS_GET, NULL}, + {"rip", "set", "set rip packets hardware identification status", "", SW_API_RIPV1_STATUS_SET, NULL}, + {"rip", "get", "get rip packets hardware identification status", "", SW_API_RIPV1_STATUS_GET, NULL}, + {"ptarpreq", "set", "set arp request packets hardware identification status", " ", SW_API_PT_ARP_REQ_STATUS_SET, NULL}, + {"ptarpreq", "get", "get arp request packets hardware identification status", "", SW_API_PT_ARP_REQ_STATUS_GET, NULL}, + {"ptarpack", "set", "set arp ack packets hardware identification status", " ", SW_API_PT_ARP_ACK_STATUS_SET, NULL}, + {"ptarpack", "get", "get arp ack packets hardware identification status", "", SW_API_PT_ARP_ACK_STATUS_GET, NULL}, + {"extendpppoe", "add", "add a pppoe session entry", "", SW_API_PPPOE_SESSION_TABLE_ADD, NULL}, + {"extendpppoe", "del", "del a pppoe session entry", "", SW_API_PPPOE_SESSION_TABLE_DEL, NULL}, + {"extendpppoe", "get", "get a pppoe session entry", "", SW_API_PPPOE_SESSION_TABLE_GET, NULL}, + {"pppoeid", "set", "set a pppoe session id entry", " ", SW_API_PPPOE_SESSION_ID_SET, NULL}, + {"pppoeid", "get", "get a pppoe session id entry", "", SW_API_PPPOE_SESSION_ID_GET, NULL}, + {"intrmask", "set", "set switch interrupt mask", "", SW_API_INTR_MASK_SET, NULL}, + {"intrmask", "get", "get switch interrupt mask", "", SW_API_INTR_MASK_GET, NULL}, + {"intrstatus", "get", "get switch interrupt status", "", SW_API_INTR_STATUS_GET, NULL}, + {"intrstatus", "clear", "clear switch interrupt status", "", SW_API_INTR_STATUS_CLEAR, NULL}, + {"intrportlinkmask", "set", "set link interrupt mask of a port", " ", SW_API_INTR_PORT_LINK_MASK_SET, NULL}, + {"intrportlinkmask", "get", "get link interrupt mask of a port", "", SW_API_INTR_PORT_LINK_MASK_GET, NULL}, + {"intrportlinkstatus", "get", "get link interrupt status of a port", "", SW_API_INTR_PORT_LINK_STATUS_GET, NULL}, + {"intrmaskmaclinkchg", "set", "set switch interrupt mask for mac link change", " ", SW_API_INTR_MASK_MAC_LINKCHG_SET, NULL}, + {"intrmaskmaclinkchg", "get", "get switch interrupt mask for mac link change", "", SW_API_INTR_MASK_MAC_LINKCHG_GET, NULL}, + {"intrstatusmaclinkchg", "get", "get switch interrupt status for mac link change", "", SW_API_INTR_STATUS_MAC_LINKCHG_GET, NULL}, + {"intrstatusmaclinkchg", "clear", "clear switch interrupt status for mac link change", "", SW_API_INTR_STATUS_MAC_LINKCHG_CLEAR, NULL}, + {"cpuVid", "set", "set to_cpu vid status", "", SW_API_CPU_VID_EN_SET, NULL}, + {"cpuVid", "get", "get to_cpu vid status", "", SW_API_CPU_VID_EN_GET, NULL}, + {"rtdPppoe", "set", "set RM_RTD_PPPOE_EN status", "", SW_API_RTD_PPPOE_EN_SET, NULL}, + {"rtdPppoe", "get", "get RM_RTD_PPPOE_EN status", "", SW_API_RTD_PPPOE_EN_GET, NULL}, + {"pppoeen", "set", "set a l3 interface pppoe status", " ", SW_API_PPPOE_EN_SET, NULL}, + {"pppoeen", "get", "get a l3 interface pppoe status", "", SW_API_PPPOE_EN_GET, NULL}, + {"glomacaddr", "set", "set global macaddr", "", SW_API_GLOBAL_MACADDR_SET, NULL}, + {"glomacaddr", "get", "get global macaddr", "", SW_API_GLOBAL_MACADDR_GET, NULL}, + {"lldp", "set", "set lldp frames hardware identification status", "", SW_API_LLDP_STATUS_SET, NULL}, + {"lldp", "get", "get lldp frames hardware identification status", "", SW_API_LLDP_STATUS_GET, NULL}, + {"framecrc", "set", "set frame crc reserve enable", "", SW_API_FRAME_CRC_RESERVE_SET, NULL}, + {"framecrc", "get", "get frame crc reserve enable", "", SW_API_FRAME_CRC_RESERVE_GET, NULL}, + {NULL, NULL, NULL, NULL, SW_API_INVALID, NULL}/*end of desc*/ + }, + }, +#endif + + /* IP */ +#ifdef IN_IP + { + "ip", "config ip", + { + {"hostentry", "add", "add host entry", "", SW_API_IP_HOST_ADD, NULL}, + {"hostentry", "del", "del host entry", "", SW_API_IP_HOST_DEL, NULL}, + {"hostentry", "get", "get host entry", "", SW_API_IP_HOST_GET, NULL}, + {"hostentry", "next", "next host entry", "", SW_API_IP_HOST_NEXT, NULL}, + {"hostentry", "show", "show whole host entries", "", SW_CMD_HOST_SHOW, cmd_show_host}, + {"hostipv4entry", "show", "show whole ipv4U host entries", "", SW_CMD_HOST_IPV4_SHOW, cmd_show_host_ipv4}, + {"hostipv6entry", "show", "show whole ipv6u host entries", "", SW_CMD_HOST_IPV6_SHOW, cmd_show_host_ipv6}, + {"hostipv4Mentry", "show", "show whole ipv4M host entries", "", SW_CMD_HOST_IPV4M_SHOW, cmd_show_host_ipv4M}, + {"hostipv6Mentry", "show", "show whole ipv6M host entries", "", SW_CMD_HOST_IPV6M_SHOW, cmd_show_host_ipv6M}, + {"hostentry", "bindcnt", "bind counter to host entry", " ", SW_API_IP_HOST_COUNTER_BIND, NULL}, + {"hostentry", "bindpppoe", "bind pppoe to host entry", " ", SW_API_IP_HOST_PPPOE_BIND, NULL}, + {"ptarplearn", "set", "set port arp learn flag, bit0 req bit1 ack", " ", SW_API_IP_PT_ARP_LEARN_SET, NULL}, + {"ptarplearn", "get", "get port arp learn flag, bit0 req bit1 ack", "", SW_API_IP_PT_ARP_LEARN_GET, NULL}, + {"arplearn", "set", "set arp learn mode", "", SW_API_IP_ARP_LEARN_SET, NULL}, + {"arplearn", "get", "get arp learn mode", "", SW_API_IP_ARP_LEARN_GET, NULL}, + {"ptipsrcguard", "set", "set ip source guard mode", " ", SW_API_IP_SOURCE_GUARD_SET, NULL}, + {"ptipsrcguard", "get", "get ip source guard mode", "", SW_API_IP_SOURCE_GUARD_GET, NULL}, + {"ptarpsrcguard", "set", "set arp source guard mode", " ", SW_API_IP_ARP_GUARD_SET, NULL}, + {"ptarpsrcguard", "get", "get arp source guard mode", "", SW_API_IP_ARP_GUARD_GET, NULL}, + {"routestatus", "set", "set ip route status", "", SW_API_IP_ROUTE_STATUS_SET, NULL}, + {"routestatus", "get", "get ip route status", "", SW_API_IP_ROUTE_STATUS_GET, NULL}, + {"intfentry", "add", "add interface mac address", "", SW_API_IP_INTF_ENTRY_ADD, NULL}, + {"intfentry", "del", "del interface mac address", "", SW_API_IP_INTF_ENTRY_DEL, NULL}, + {"intfentry", "show", "show whole interface mac entries", "", SW_CMD_INTFMAC_SHOW, cmd_show_intfmac}, + {"ipunksrc", "set", "set ip unkown source command", "", SW_API_IP_UNK_SOURCE_CMD_SET, NULL}, + {"ipunksrc", "get", "get ip unkown source command", "", SW_API_IP_UNK_SOURCE_CMD_GET, NULL}, + {"arpunksrc", "set", "set arp unkown source command", "", SW_API_ARP_UNK_SOURCE_CMD_SET, NULL}, + {"arpunksrc", "get", "get arp unkown source command", "", SW_API_ARP_UNK_SOURCE_CMD_GET, NULL}, + {"ipagetime", "set", "set dynamic ip entry age time", "